repo_name
stringlengths
8
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
Sunshine352/adversarial-robustness-toolbox
[ "070bf751aee40eb1b723fa5e24cde55d17978f62" ]
[ "art/defences/spatial_smoothing_unittest.py" ]
[ "from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport unittest\n\nimport numpy as np\n\nfrom art.defences.spatial_smoothing import SpatialSmoothing\n\nlogger = logging.getLogger('testLogger')\n\n\nclass TestLocalSpatialSmoothing(unittest.TestCase):\n def test_ones(self):\n m, n = 10, 2\n x = np.ones((1, m, n, 3))\n\n # Start to test\n for window_size in range(1, 20):\n preprocess = SpatialSmoothing()\n smoothed_x = preprocess(x, window_size)\n self.assertTrue((smoothed_x == 1).all())\n\n def test_fix(self):\n x = np.array([[[[1], [2], [3]], [[7], [8], [9]], [[4], [5], [6]]]])\n\n # Start to test\n preprocess = SpatialSmoothing()\n smooth_x = preprocess(x, window_size=3)\n self.assertTrue((smooth_x == np.array(\n [[[[2], [3], [3]], [[4], [5], [6]], [[5], [6], [6]]]])).all())\n\n smooth_x = preprocess(x, window_size=1)\n self.assertTrue((smooth_x == x).all())\n\n smooth_x = preprocess(x, window_size=2)\n self.assertTrue((smooth_x == np.array(\n [[[[1], [2], [3]], [[7], [7], [8]], [[7], [7], [8]]]])).all())\n\n def test_channels(self):\n x = np.arange(9).reshape(1, 1, 3, 3)\n preprocess = SpatialSmoothing(channel_index=1)\n smooth_x = preprocess(x)\n\n new_x = np.arange(9).reshape(1, 3, 3, 1)\n preprocess = SpatialSmoothing()\n new_smooth_x = preprocess(new_x)\n\n self.assertTrue((smooth_x[0, 0] == new_smooth_x[0, :, :, 0]).all())\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.array", "numpy.ones", "numpy.arange" ] ]
brokencuph/diff_pd
[ "e491668995a163b8ff7542d99f0b4e0c0f4ed2df" ]
[ "python/example/print_quadruped_3d.py" ]
[ "import sys\nsys.path.append('../')\n\nfrom pathlib import Path\nimport pickle\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import ScalarFormatter\nfrom matplotlib.gridspec import GridSpec\nimport numpy as np\n\nfrom py_diff_pd.common.common import print_info, print_error\n\ndef format_axes(fig):\n for i, ax in enumerate(fig.axes):\n ax.text(0.5, 0.5, \"ax%d\" % (i+1), va=\"center\", ha=\"center\")\n ax.tick_params(labelbottom=False, labelleft=False)\n\nif __name__ == '__main__':\n folder = Path('quadruped_3d')\n try:\n data = pickle.load(open(folder / 'data_0008_threads.bin', 'rb'))\n except:\n print_error('Log file not found.')\n loss_l, loss_h = data['loss_range']\n # For this quadruped, loss_l is not 0 but the performance of PD.\n loss_l = data['pd_eigen'][-1]['loss']\n print_info('Loss range: {:3f}, {:3f}'.format(loss_l, loss_h))\n def normalize_loss(unnormalized_loss):\n return (unnormalized_loss - loss_l) / (loss_h - loss_l)\n\n for thread_ct in [8,]:\n data_file = folder / 'data_{:04d}_threads.bin'.format(thread_ct)\n if data_file.exists():\n print_info('Loading {}'.format(data_file))\n data = pickle.load(open(data_file, 'rb'))\n for method in ['newton_pcg', 'newton_cholesky', 'pd_eigen']:\n total_time = 0\n avg_forward = 0\n average_backward = 0\n for d in data[method]:\n d['loss'] = normalize_loss(d['loss'])\n print('loss: {:8.3f}, |grad|: {:8.3f}, forward time: {:6.3f}s, backward time: {:6.3f}s'.format(\n d['loss'], np.linalg.norm(d['grad']), d['forward_time'], d['backward_time']))\n total_time += d['forward_time'] + d['backward_time']\n average_backward += d['backward_time']\n avg_forward += d['forward_time']\n avg_forward /= len(data[method])\n average_backward /= len(data[method])\n print_info('Optimizing with {} finished in {:6.3f}s in {:d} iterations. Average Backward time: {:6.3f}s, Average Forward Time = {:6.3f}s'.format(\n method, total_time, len(data[method]), average_backward, avg_forward))\n\n plt.rc('pdf', fonttype=42)\n plt.rc('font', size=30) # Controls default text sizes.\n plt.rc('axes', titlesize=36) # Fontsize of the axes title.\n plt.rc('axes', labelsize=36) # Fontsize of the x and y labels.\n plt.rc('xtick', labelsize=36) # Fontsize of the tick labels.\n plt.rc('ytick', labelsize=36) # Fontsize of the tick labels.\n plt.rc('legend', fontsize=36) # Legend fontsize.\n plt.rc('figure', titlesize=36) # Fontsize of the figure title.\n\n acts = {}\n losses = {}\n for method in ['newton_pcg', 'newton_cholesky', 'pd_eigen']:\n acts[method] = [np.linalg.norm(d['x']) for d in data[method]]\n losses[method] = [d['loss'] for d in data[method]]\n\n fig = plt.figure(figsize=(20, 10))\n\n ax_act = fig.add_subplot(121)\n\n ax_loss= fig.add_subplot(122)\n\n titles = ['muscle actuation', 'loss']\n for title, ax, y in zip(titles, (ax_act, ax_loss), (acts, losses)):\n\n if 'muscle' in title:\n ax.set_ylabel(\"|actuation|\")\n ax.grid(True, which='both')\n else:\n ax.set_ylabel(\"loss\")\n ax.grid(True)\n ax.set_xlabel('function evaluations')\n for method, method_ref_name, color in zip(['newton_pcg', 'newton_cholesky', 'pd_eigen'],\n ['PCG', 'Cholesky', 'Ours'], ['tab:blue', 'tab:red', 'tab:green']):\n ax.plot(y[method], color=color, label=method_ref_name, linewidth=4)\n ax.set_title(title, pad=25)\n handles, labels = ax.get_legend_handles_labels()\n\n plt.subplots_adjust(bottom = 0.25, wspace=0.3)\n # Share legends.\n fig.legend(handles, labels, loc='lower center', ncol=3)#, bbox_to_anchor=(0.5, 0.17))\n\n fig.savefig(folder / 'quadruped.pdf')\n plt.show()" ]
[ [ "matplotlib.pyplot.rc", "matplotlib.pyplot.figure", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.show", "numpy.linalg.norm" ] ]
weihs/hashtag_co-occurrence_network
[ "b6aaa664d4cd42fd7dfd2d2dc2350568e0c6b08e" ]
[ "inconsistency_graph_construction.py" ]
[ "import json\nimport copy\nimport yaml\nimport sys\nimport numpy as np\nimport networkx as nx\nfrom scipy import linalg\n\n\ndef merge_cooccurrence_matrix(number_of_days, origin_directory,result_directory,origin_prefix,result_filename):\n postfix='.npy'\n for i in range(1,1+number_of_days):#build combine co_occurrence matrix\n filename=origin_directory+origin_prefix+str(i)+postfix\n if i==1:\n combine_matrix=np.load(filename)\n else:\n new_matrix=np.load(filename)\n combine_matrix=linalg.block_diag(combine_matrix,new_matrix)\n\n result_file=result_directory+result_filename\n np.save(result_file,combine_matrix)\n return combine_matrix\n\n\ndef construct_graphml(number_of_days,combine_matrix,origin_directory,origin_prefix,hashtag_frequency_prefix):\n G=nx.from_numpy_matrix(combine_matrix)\n prenode=0\n for i in range(1,1+number_of_days):#add node attributes\n daily_matrix_filename=origin_directory+origin_prefix+str(i)+'.npy'#get the number of hashtag\n matrix=np.load(daily_matrix_filename)\n number_of_hashtag=matrix.shape[0]\n\n filename=origin_directory+hashtag_frequency_prefix+str(i)+'.json'#construct graph and set node attributes\n with open(filename, mode='r') as f:\n hashtag_frequency=json.load(f)\n for j in range(number_of_hashtag):\n G.node[prenode+j]['text']=hashtag_frequency[j]['_id']\n G.node[prenode+j]['frequency']=hashtag_frequency[j]['frequency']\n G.node[prenode+j]['timeinterval']=i\n prenode+=j+1\n\n\n for v in G.nodes():#connect the same node in two closet period\n text=G.node[v]['text']\n same_text_nodelist=[u for u in G.nodes() if G.node[u]['text']==text and u>v]\n if len(same_text_nodelist)==0:\n continue\n else:\n u=min(same_text_nodelist)\n G.add_edge(u,v)\n G.edge[u][v]['type']=1\n G.edge[u][v]['weight']=10\n for u,v in G.edges():# set type attributes for vertical edges and remove self-loop\n if 'type' not in G.edge[u][v]:\n G.edge[u][v]['type']=0\n if u==v:\n G.remove_edge(u,v)\n return G\n\nwith open(\"config.yml\", 'r') as ymlfile:\n cfg = yaml.load(ymlfile)\n\nnumber_of_days=cfg['number_of_days']\ndata_directory=cfg['data_directory']\n\nif sys.argv[1]=='without_aggregation':\n origin_prefix=cfg['origin_aggregation_matrix']\n hashtag_frequency_prefix=cfg['origin_aggregation_list']\n graphml_filename=data_directory+str(number_of_days)+cfg['without_aggregation_graphml_filename']\n result_filename=cfg['without_aggregation_combine_matrix']\nelse:\n origin_prefix=cfg['result_aggregation_matrix']\n hashtag_frequency_prefix=cfg['result_aggregation_list']\n graphml_filename=data_directory+str(number_of_days)+cfg['with_aggregation_graphml_filename']\n result_filename=cfg['with_aggregation_combine_matrix']\n\n\n\n\ncombine_matrix=merge_cooccurrence_matrix(number_of_days, data_directory, data_directory, origin_prefix, result_filename)\nG=construct_graphml(number_of_days, combine_matrix, data_directory,origin_prefix,hashtag_frequency_prefix)\n\nwith open(graphml_filename,mode='w') as f:\n nx.write_graphml(G,f)\n" ]
[ [ "numpy.save", "numpy.load", "scipy.linalg.block_diag" ] ]
lbaiao/sys-simulator-2
[ "94f00d43309fe7b56dac5099bd4024695ba317b6" ]
[ "scripts_a2c/script5.py" ]
[ "# Similar to script 1 but with discrete-value actions.\n# It uses CompleteEnvironmentA2C2\n\nfrom sys_simulator import general as gen\nfrom sys_simulator.q_learning.environments.completeEnvironmentA2C2 \\\n import CompleteEnvironmentA2C2\nfrom sys_simulator.q_learning.rewards import dis_reward_tensor\nfrom sys_simulator.parameters.parameters import EnvironmentParameters\nfrom sys_simulator.a2c.agent import Agent\nfrom sys_simulator.a2c import ActorCriticDiscrete, compute_gae_returns\nfrom torch import optim, nn\nimport torch\nimport os\nimport pickle\nimport random\n# from copy import deepcopy\n\n\ndef run():\n # environment physical parameters\n n_mues = 1 # number of mues\n n_d2d = 2 # number of d2d pairs\n n_rb = n_mues # number of RBs\n bs_radius = 500 # bs radius in m\n rb_bandwidth = 180*1e3 # rb bandwidth in Hz\n d2d_pair_distance = 50 # d2d pair distance in m\n p_max = 23 # max tx power in dBm\n noise_power = -116 # noise power per RB in dBm\n bs_gain = 17 # macro bs antenna gain in dBi\n user_gain = 4 # user antenna gain in dBi\n sinr_threshold_train = 6 # mue sinr threshold in dB for training\n mue_margin = .5e4\n # conversions from dB to pow\n p_max = p_max - 30\n p_max = gen.db_to_power(p_max)\n noise_power = noise_power - 30\n noise_power = gen.db_to_power(noise_power)\n bs_gain = gen.db_to_power(bs_gain)\n user_gain = gen.db_to_power(user_gain)\n sinr_threshold_train = gen.db_to_power(sinr_threshold_train)\n # ai training parameters\n STEPS_PER_EPISODE = 20\n MAX_NUM_EPISODES = 2700 * 1\n # STEPS_PER_EPISODE = 10\n # MAX_NUM_EPISODES = 2\n # C = 8000 # C constant for the improved reward function\n C = 80 # C constant for the improved reward function\n MAX_NUMBER_OF_AGENTS = 10\n NUM_ACTIONS = 5\n HIDDEN_SIZE = 256\n LEARNING_RATE = 3e-2\n BETA = 1e-2\n # mu = 0.82*p_max/5/2000\n # std = mu/6\n mu = 0\n std = 0.1\n # torch device\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n # parameters classes initialization\n env_params = EnvironmentParameters(\n rb_bandwidth, d2d_pair_distance, p_max, noise_power,\n bs_gain, user_gain, sinr_threshold_train,\n n_mues, n_d2d, n_rb, bs_radius, c_param=C, mue_margin=mue_margin)\n # environment initialization\n reward_function = dis_reward_tensor\n environment = CompleteEnvironmentA2C2(env_params, reward_function)\n # a2c initialization\n a2c = ActorCriticDiscrete(environment.state_space_size,\n NUM_ACTIONS, HIDDEN_SIZE, mu, std)\n actor_optimizer = optim.Adam(a2c.actor.parameters(), lr=LEARNING_RATE)\n critic_optimizer = optim.Adam(a2c.critic.parameters(), lr=LEARNING_RATE)\n # training loop\n episode = 0\n d2d_spectral_effs = []\n mue_spectral_effs = []\n actions = [i*0.82*p_max/5/1000 for i in range(NUM_ACTIONS)] # best result\n while episode < MAX_NUM_EPISODES:\n # entropy = 0\n aux_range = range(MAX_NUMBER_OF_AGENTS+1)[1:]\n n_agents = random.choice(aux_range)\n agents = [Agent() for _ in range(n_agents)]\n environment.build_scenario(agents)\n obs = [environment.get_state(a) for a in agents]\n log_probs = torch.zeros((n_agents, STEPS_PER_EPISODE)).to(device)\n values = torch.zeros((n_agents, STEPS_PER_EPISODE+1)).to(device)\n rewards = torch.zeros((n_agents, STEPS_PER_EPISODE)).to(device)\n entropy = torch.zeros((n_agents, STEPS_PER_EPISODE)).to(device)\n i = 0\n done = False\n # actions = [] # used for debug purposes\n while not done and i < STEPS_PER_EPISODE:\n # agents choose their actions\n # actions_t = [] # used for debug purposes\n for j, agent in enumerate(agents):\n action_index, dist, value = agent.act_discrete(a2c, obs[j])\n agent.set_action(actions[action_index.item()])\n # actions_t.append(action) # used for debug purposes\n log_prob = dist.log_prob(action_index)\n # entropy += dist.entropy().mean()\n log_probs[j][i] = log_prob\n values[j][i] = value\n entropy[j][i] = dist.entropy()\n # perform a environment step\n next_obs_t, rewards_t, done = environment.step(agents)\n rewards[:, i] = torch.FloatTensor(rewards_t)\n # actions.append(actions_t) # used for debug purposes\n i += 1\n # last_states = deepcopy(obs) # used for debug purposes\n obs = next_obs_t\n # gae and returns\n next_obs_t = torch.cat(obs, 0).to(device)\n for j, agent in enumerate(agents):\n _, _, next_value_t = agents[0].act(a2c, next_obs_t[j])\n values[j][i] = next_value_t\n advantages, returns = compute_gae_returns(device, rewards, values)\n # update critic\n values_critic = values[:, :-1].reshape(1, -1).to(device)\n returns_critic = returns.view(1, -1).to(device)\n critic_loss = nn.functional.mse_loss(values_critic, returns_critic)\n critic_optimizer.zero_grad()\n critic_loss.backward()\n critic_optimizer.step()\n # update actor\n aux = torch.mul(advantages, log_probs)\n aux -= BETA * entropy\n aux = torch.sum(aux, axis=1)\n actor_loss = -torch.mean(aux)\n actor_optimizer.zero_grad()\n actor_loss.backward()\n actor_optimizer.step()\n # print training info\n episode += 1\n m_reward = torch.mean(rewards).item()\n d2d_spectral_effs.append(environment.d2d_spectral_eff)\n mue_spectral_effs.append(environment.mue_spectral_eff)\n print(\"Episode#:{} mean reward:{}\".format(\n episode, m_reward))\n # save training data into a file\n cwd = os.getcwd()\n data = {}\n data['d2d_spectral_effs'] = d2d_spectral_effs\n data['mue_spectral_effs'] = mue_spectral_effs\n filename = gen.path_leaf(__file__)\n filename = filename.split('.')[0]\n filename_model = filename\n filename = f'{cwd}/data/a2c/{filename}.pickle'\n # save the a2c models\n torch.save(\n a2c.state_dict(),\n f'{cwd}/models/a2c/{filename_model}.pt')\n with open(filename, 'wb') as f:\n pickle.dump(data, f)\n" ]
[ [ "torch.sum", "torch.nn.functional.mse_loss", "torch.FloatTensor", "torch.mul", "torch.cuda.is_available", "torch.zeros", "torch.cat", "torch.mean" ] ]
ndawlab/seqanx
[ "de44aa1baeb10646d538c185f0428d53b00db4b5" ]
[ "sisyphus/mdp/_dp.py" ]
[ "\"\"\"Dynamic programming module\"\"\"\n\nimport numpy as np\nfrom copy import deepcopy\nfrom ._misc import check_params, softmax, pessimism\nfrom warnings import warn\n\nclass ValueIteration(object):\n \"\"\"Q-value iteration algorithm.\n \n Parameters\n ----------\n policy : max | min | softmax | pessimism (default = pessimism)\n Learning rule.\n gamma : float (default = 0.9)\n Temporal discounting factor.\n beta : float (default = 10.0)\n Inverse temperature for future choice (ignored if policy not softmax).\n w : float (default = 1.0)\n Pessimism weight (ignored if policy not pessimism).\n tol : float, default: 1e-4\n Tolerance for stopping criteria.\n max_iter : int, default: 100\n Maximum number of iterations taken for the solvers to converge.\n\n References\n ----------\n 1. Sutton, R. S., & Barto, A. G. (2018). Reinforcement learning: An introduction. MIT press.\n \"\"\"\n \n def __init__(self, policy='pessimism', gamma=0.9, beta=10.0, w=1.0, tol=0.0001, max_iter=100):\n\n ## Define choice policy.\n self.policy = policy\n if policy == 'max': self._policy = np.max\n elif policy == 'min': self._policy = np.min\n elif policy == 'softmax': self._policy = lambda arr: arr @ softmax(arr * self.beta)\n elif policy == 'pessimism': self._policy = lambda arr: pessimism(arr, self.w)\n else: raise ValueError('Policy \"%s\" not valid!' %self.policy)\n \n ## Check parameters.\n self.gamma = gamma\n self.beta = beta\n self.w = w\n check_params(gamma=self.gamma, beta=self.beta, w=self.w)\n \n ## Set convergence criteria.\n self.tol = tol\n self.max_iter = max_iter\n \n def __repr__(self):\n return '<Q-value iteration>'\n \n def copy(self):\n \"\"\"Return copy of agent.\"\"\"\n return deepcopy(self)\n \n def _q_solve(self, info, Q=None):\n \"\"\"Solve for Q-values iteratively.\"\"\"\n \n ## Initialize Q-values.\n if Q is None: Q = np.zeros(info.shape[0], dtype=float)\n assert np.equal(Q.shape, info.shape[0])\n copy = info.copy()\n \n ## Main loop.\n for k in range(self.max_iter):\n \n ## Make copy.\n q = Q.copy()\n \n ## Precompute successor value. \n copy['Q'] = q\n V_prime = copy.groupby('S').Q.apply(self._policy).values\n\n ## Compute Q-values.\n for i in range(info.shape[0]):\n \n ## Update Q-value.\n Q[i] = sum(info.loc[i,\"T\"] * (info.loc[i,\"R\"] + self.gamma * V_prime[info.loc[i,\"S'\"]]))\n\n ## Compute delta.\n delta = np.abs(Q - q)\n\n ## Check for termination.\n if np.all(delta < self.tol): break\n \n return Q, k + 1\n \n def _v_solve(self, info):\n \"\"\"Compute state value from Q-table.\"\"\"\n \n ## Copy info and append Q-values.\n copy = info.copy()\n copy['Q'] = self.Q\n \n ## Identify max by state.\n return copy.groupby('S').Q.max().values\n \n def _pi_solve(self, gym):\n \"\"\"Compute policy from Q-table.\"\"\"\n \n ## Precompute optimal q(s,a).\n copy = gym.info.copy()\n copy['Q'] = self.Q\n copy = copy.iloc[copy.groupby('S').Q.idxmax().values]\n copy[\"S'\"] = copy[\"S'\"].apply(lambda arr: arr[0])\n \n ## Initialize policy from initial state.\n policy = [gym.start]\n \n ## Iterately append.\n while True:\n\n ## Termination check.\n s = policy[-1]\n if s in gym.terminal: break\n \n ## Observe successor.\n s_prime, = copy.loc[copy[\"S\"]==s, \"S'\"].values\n \n ## Terminate on loops. Otherwise append.\n if s_prime in policy: break\n policy.append(s_prime)\n \n return policy\n \n def fit(self, gym, Q=None, verbose=True): \n \"\"\"Solve for optimal policy.\n \n Parameters\n ----------\n gym : GridWorld instance\n Simulation environment.\n \n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n \n ## Solve for Q-values.\n self.Q, self.n_iter = self._q_solve(gym.info, Q)\n if np.equal(self.n_iter, self.max_iter) and verbose:\n warn('Reached maximum iterations.')\n \n ## Solve for values.\n self.V = self._v_solve(gym.info)\n \n ## Compute policy.\n self.pi = self._pi_solve(gym)\n \n return self" ]
[ [ "numpy.equal", "numpy.all", "numpy.abs", "numpy.zeros" ] ]
phenomax/praktipy
[ "58d1f74e6d128b0d22bfa0fcaf754c9b8b9e8c23" ]
[ "praktiplot.py" ]
[ "# Setting matplotlib layout to match tex settings \n\nimport matplotlib\nmatplotlib.use('pgf')\nimport matplotlib.pyplot as plt\nfrom os.path import dirname, abspath\nimport locale\nmatplotlib.rcParams.update({\n 'font.family': 'serif',\n 'text.usetex': True,\n 'pgf.rcfonts': False,\n 'pgf.texsystem': 'lualatex',\n 'pgf.preamble': r'\\input{'+dirname(abspath(__file__)).replace(\" \", r\"\\ \")+r'//matplotlib_header.tex'+r'}',\n})\n# use german locale settings for printing 3.4 as 3,4\ntry:\n locale.setlocale(locale.LC_ALL, 'de_DE.UTF8')\nexcept locale.Error:\n print(\"Could not set the language settings! 3.5 will not be written as 3,5! SO SAD!\")\n \nplt.ticklabel_format(useLocale=True)" ]
[ [ "matplotlib.use", "matplotlib.pyplot.ticklabel_format" ] ]
reubensgithub/covid-19-dashboard
[ "f0d8de85a989597fe38ea771439a70fc9e0bd21f" ]
[ "coursework_project_dec_2021/covid_data_handler.py" ]
[ "\"\"\"This module has various functions inside it that will allow\r\n the processing and handling of covid data, whether from a\r\n CSV file or returned from an API\"\"\"\r\nimport sched\r\nimport time\r\nimport logging\r\nimport pandas as pd\r\nfrom typing import List\r\nfrom uk_covid19 import Cov19API\r\n\r\nlogging.basicConfig(filename='covid_log.log', level=logging.DEBUG,\r\n format='%(levelname)s: %(asctime)s %(message)s')\r\n\r\ndata_list_exeter = []\r\ndata_list_england = []\r\n\r\n\r\ndef parse_csv_data(csv_filename: str) -> list:\r\n \"\"\"This function will take the csv data from the csv file and return it as a list \"\"\"\r\n dataframe = pd.read_csv(csv_filename)\r\n return dataframe.values.tolist()\r\n\r\n# parse_csv_data(\"nation_2021-10-28.csv\")\r\n\r\n\r\ndef process_covid_csv_data(covid_csv_data: object) -> int:\r\n \"\"\"This function will take the returned list of data from parse_csv_data()\r\n (converted to a dataframe here for convenience in accessing values) and will return\r\n the necessary statistics back to the user \"\"\"\r\n covid_csv_data = pd.DataFrame(covid_csv_data)\r\n num_cases_7_days = int(covid_csv_data[6].head(9).sum(axis=0, skipna=True) -\r\n covid_csv_data._get_value(1, 6, takeable=True))\r\n current_num_hosp_cases = int(covid_csv_data._get_value(0, 5, takeable=True))\r\n cum_num_deaths = int(covid_csv_data._get_value(13, 4, takeable=True))\r\n\r\n return num_cases_7_days, current_num_hosp_cases, cum_num_deaths\r\n\r\n# process_covid_csv_data(covid_csv_data=parse_csv_data(\"nation_2021-10-28.csv\"))\r\n\r\n\r\ndef covid_API_request(location: str, location_type: str) -> List[dict]:\r\n \"\"\"This function will use the Cov19API provided by\r\n Public Health England and return all of the values of the given\r\n fields, from the start date up to the current date. This data\r\n is returned in a JSON format.\"\"\"\r\n location_data = [\r\n 'areaType='+str(location_type),\r\n 'areaName='+str(location)\r\n ]\r\n covid_data = {\r\n \"date\": \"date\",\r\n \"areaName\": \"areaName\",\r\n \"areaCode\": \"areaCode\",\r\n \"newCasesByPublishDate\": \"newCasesByPublishDate\",\r\n \"cumCasesByPublishDate\": \"cumCasesByPublishDate\",\r\n \"hospitalCases\": \"hospitalCases\",\r\n \"newDeaths28DaysByDeathDate\": \"newDeaths28DaysByDeathDate\",\r\n \"cumDeaths28DaysByDeathDate\": \"cumDeaths28DaysByDeathDate\",\r\n \"cumDeathsByPublishDate\": \"cumDeathsByPublishDate\"\r\n }\r\n\r\n api_object = Cov19API(filters=location_data, structure=covid_data)\r\n data = api_object.get_json()\r\n return data\r\n\r\n# covid_API_request()\r\n\r\nexe = covid_API_request(location=\"Exeter\", location_type=\"ltla\")\r\neng = covid_API_request(location=\"England\", location_type=\"nation\")\r\nfor piece in exe['data']:\r\n data_list_exeter.append(piece)\r\nfor info in eng['data']:\r\n data_list_england.append(info)\r\n\r\ndf_exeter = pd.DataFrame(data_list_exeter)\r\ndf_england = pd.DataFrame(data_list_england)\r\n\r\n# print(data_list_england)\r\n\r\nnational_7day_infections = int(df_england['newCasesByPublishDate'].head(7).sum(axis=0, skipna=True))\r\nlocal_7day_infections = int(df_exeter['newCasesByPublishDate'].head(7).sum(axis=0, skipna=True))\r\n# print(national_7day_infections)\r\n# print(local_7day_infections)\r\n\r\nscheduler = sched.scheduler(time.time, time.sleep)\r\n\r\ndef schedule_covid_updates(update_interval: int, update_name: str, repeat=False) -> List[object]:\r\n \"\"\"This function allows the user to schedule updates\r\n for when they want the Cov19API to get values at.\"\"\"\r\n if not repeat:\r\n event1 = scheduler.enter(update_interval, 1, covid_API_request, kwargs=update_name)\r\n logging.info(f\"\"\"Covid update for {update_name} has been scheduled\"\"\")\r\n return event1\r\n if repeat:\r\n for i in range(100000):\r\n rep = 0\r\n event2 = scheduler.enter(update_interval + rep, 2, covid_API_request,\r\n argument=repeat, kwargs=update_name)\r\n rep += 86400\r\n i += 1\r\n logging.info(f\"\"\"Repeating covid update for update {update_name}\"\"\")\r\n return event2\r\n scheduler.run(blocking=False)\r\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
vpolisky/pymc3
[ "87cdd712c86321121c2ed3150764f3d847f5083c" ]
[ "pymc3/distributions/discrete.py" ]
[ "from functools import partial \nimport numpy as np\nimport theano\nimport theano.tensor as tt\nfrom scipy import stats\n\nfrom .dist_math import bound, factln, binomln, betaln, logpow\nfrom .distribution import Discrete, draw_values, generate_samples, reshape_sampled\n\n__all__ = ['Binomial', 'BetaBinomial', 'Bernoulli', 'DiscreteWeibull',\n 'Poisson', 'NegativeBinomial', 'ConstantDist', 'Constant',\n 'ZeroInflatedPoisson', 'ZeroInflatedNegativeBinomial',\n 'DiscreteUniform', 'Geometric', 'Categorical']\n\n\nclass Binomial(Discrete):\n R\"\"\"\n Binomial log-likelihood.\n\n The discrete probability distribution of the number of successes\n in a sequence of n independent yes/no experiments, each of which\n yields success with probability p.\n\n .. math:: f(x \\mid n, p) = \\binom{n}{x} p^x (1-p)^{n-x}\n\n ======== ==========================================\n Support :math:`x \\in \\{0, 1, \\ldots, n\\}`\n Mean :math:`n p`\n Variance :math:`n p (1 - p)`\n ======== ==========================================\n\n Parameters\n ----------\n n : int\n Number of Bernoulli trials (n >= 0).\n p : float\n Probability of success in each trial (0 < p < 1).\n \"\"\"\n\n def __init__(self, n, p, *args, **kwargs):\n super(Binomial, self).__init__(*args, **kwargs)\n self.n = n = tt.as_tensor_variable(n)\n self.p = p = tt.as_tensor_variable(p)\n self.mode = tt.cast(tt.round(n * p), self.dtype)\n\n def random(self, point=None, size=None, repeat=None):\n n, p = draw_values([self.n, self.p], point=point)\n return generate_samples(stats.binom.rvs, n=n, p=p,\n dist_shape=self.shape,\n size=size)\n\n def logp(self, value):\n n = self.n\n p = self.p\n\n return bound(\n binomln(n, value) + logpow(p, value) + logpow(1 - p, n - value),\n 0 <= value, value <= n,\n 0 <= p, p <= 1)\n\n\nclass BetaBinomial(Discrete):\n R\"\"\"\n Beta-binomial log-likelihood.\n\n Equivalent to binomial random variable with success probability\n drawn from a beta distribution.\n\n .. math::\n\n f(x \\mid \\alpha, \\beta, n) =\n \\binom{n}{x}\n \\frac{B(x + \\alpha, n - x + \\beta)}{B(\\alpha, \\beta)}\n\n ======== =================================================================\n Support :math:`x \\in \\{0, 1, \\ldots, n\\}`\n Mean :math:`n \\dfrac{\\alpha}{\\alpha + \\beta}`\n Variance :math:`n \\dfrac{\\alpha \\beta}{(\\alpha+\\beta)^2 (\\alpha+\\beta+1)}`\n ======== =================================================================\n\n Parameters\n ----------\n n : int\n Number of Bernoulli trials (n >= 0).\n alpha : float\n alpha > 0.\n beta : float\n beta > 0.\n \"\"\"\n\n def __init__(self, alpha, beta, n, *args, **kwargs):\n super(BetaBinomial, self).__init__(*args, **kwargs)\n self.alpha = alpha = tt.as_tensor_variable(alpha)\n self.beta = beta = tt.as_tensor_variable(beta)\n self.n = n = tt.as_tensor_variable(n)\n self.mode = tt.cast(tt.round(alpha / (alpha + beta)), 'int8')\n\n def _random(self, alpha, beta, n, size=None):\n size = size or 1\n p = np.atleast_1d(stats.beta.rvs(a=alpha, b=beta, size=np.prod(size)))\n # Sometimes scipy.beta returns nan. Ugh.\n while np.any(np.isnan(p)):\n i = np.isnan(p)\n p[i] = stats.beta.rvs(a=alpha, b=beta, size=np.sum(i))\n # Sigh...\n _n, _p, _size = np.atleast_1d(n).flatten(), p.flatten(), np.prod(size)\n samples = np.reshape(stats.binom.rvs(n=_n, p=_p, size=_size), size)\n return samples\n\n def random(self, point=None, size=None, repeat=None):\n alpha, beta, n = \\\n draw_values([self.alpha, self.beta, self.n], point=point)\n return generate_samples(self._random, alpha=alpha, beta=beta, n=n,\n dist_shape=self.shape,\n size=size)\n\n def logp(self, value):\n alpha = self.alpha\n beta = self.beta\n return bound(binomln(self.n, value)\n + betaln(value + alpha, self.n - value + beta)\n - betaln(alpha, beta),\n value >= 0, value <= self.n,\n alpha > 0, beta > 0)\n\n\nclass Bernoulli(Discrete):\n R\"\"\"Bernoulli log-likelihood\n\n The Bernoulli distribution describes the probability of successes\n (x=1) and failures (x=0).\n\n .. math:: f(x \\mid p) = p^{x} (1-p)^{1-x}\n\n ======== ======================\n Support :math:`x \\in \\{0, 1\\}`\n Mean :math:`p`\n Variance :math:`p (1 - p)`\n ======== ======================\n\n Parameters\n ----------\n p : float\n Probability of success (0 < p < 1).\n \"\"\"\n\n def __init__(self, p, *args, **kwargs):\n super(Bernoulli, self).__init__(*args, **kwargs)\n self.p = p = tt.as_tensor_variable(p)\n self.mode = tt.cast(tt.round(p), 'int8')\n\n def random(self, point=None, size=None, repeat=None):\n p = draw_values([self.p], point=point)\n return generate_samples(stats.bernoulli.rvs, p,\n dist_shape=self.shape,\n size=size)\n\n def logp(self, value):\n p = self.p\n return bound(\n tt.switch(value, tt.log(p), tt.log(1 - p)),\n value >= 0, value <= 1,\n p >= 0, p <= 1)\n\n\nclass DiscreteWeibull(Discrete):\n R\"\"\"Discrete Weibull log-likelihood\n\n The discrete Weibull distribution is a flexible model of count data that\n can handle both over- and under-dispersion.\n\n .. math:: f(x \\mid q, \\beta) = q^{x^{\\beta}} - q^{(x + 1)^{\\beta}}\n\n ======== ======================\n Support :math:`x \\in \\mathbb{N}_0`\n Mean :math:`\\mu = \\sum_{x = 1}^{\\infty} q^{x^{\\beta}}`\n Variance :math:`2 \\sum_{x = 1}^{\\infty} x q^{x^{\\beta}} - \\mu - \\mu^2`\n ======== ======================\n \"\"\"\n def __init__(self, q, beta, *args, **kwargs):\n super(DiscreteWeibull, self).__init__(*args, defaults=['median'], **kwargs)\n \n self.q = q\n self.beta = beta\n\n self.median = self._ppf(0.5)\n \n def logp(self, value):\n q = self.q\n beta = self.beta\n \n return bound(tt.log(tt.power(q, tt.power(value, beta)) - tt.power(q, tt.power(value + 1, beta))),\n 0 <= value,\n 0 < q, q < 1,\n 0 < beta)\n\n def _ppf(self, p):\n \"\"\"\n The percentile point function (the inverse of the cumulative\n distribution function) of the discrete Weibull distribution.\n \"\"\"\n q = self.q\n beta = self.beta\n\n return (tt.ceil(tt.power(tt.log(1 - p) / tt.log(q), 1. / beta)) - 1).astype('int64')\n\n def _random(self, q, beta, size=None):\n p = np.random.uniform(size=size)\n\n return np.ceil(np.power(np.log(1 - p) / np.log(q), 1. / beta)) - 1\n\n def random(self, point=None, size=None, repeat=None):\n q, beta = draw_values([self.q, self.beta], point=point)\n\n return generate_samples(self._random, q, beta,\n dist_shape=self.shape,\n size=size)\n\n\nclass Poisson(Discrete):\n R\"\"\"\n Poisson log-likelihood.\n\n Often used to model the number of events occurring in a fixed period\n of time when the times at which events occur are independent.\n\n .. math:: f(x \\mid \\mu) = \\frac{e^{-\\mu}\\mu^x}{x!}\n\n ======== ==========================\n Support :math:`x \\in \\mathbb{N}_0`\n Mean :math:`\\mu`\n Variance :math:`\\mu`\n ======== ==========================\n\n Parameters\n ----------\n mu : float\n Expected number of occurrences during the given interval\n (mu >= 0).\n\n Notes\n -----\n The Poisson distribution can be derived as a limiting case of the\n binomial distribution.\n \"\"\"\n\n def __init__(self, mu, *args, **kwargs):\n super(Poisson, self).__init__(*args, **kwargs)\n self.mu = mu = tt.as_tensor_variable(mu)\n self.mode = tt.floor(mu).astype('int32')\n\n def random(self, point=None, size=None, repeat=None):\n mu = draw_values([self.mu], point=point)\n return generate_samples(stats.poisson.rvs, mu,\n dist_shape=self.shape,\n size=size)\n\n def logp(self, value):\n mu = self.mu\n log_prob = bound(\n logpow(mu, value) - factln(value) - mu,\n mu >= 0, value >= 0)\n # Return zero when mu and value are both zero\n return tt.switch(1 * tt.eq(mu, 0) * tt.eq(value, 0),\n 0, log_prob)\n\n\nclass NegativeBinomial(Discrete):\n R\"\"\"\n Negative binomial log-likelihood.\n\n The negative binomial distribution describes a Poisson random variable\n whose rate parameter is gamma distributed.\n\n .. math::\n\n f(x \\mid \\mu, \\alpha) =\n \\frac{\\Gamma(x+\\alpha)}{x! \\Gamma(\\alpha)}\n (\\alpha/(\\mu+\\alpha))^\\alpha (\\mu/(\\mu+\\alpha))^x\n\n ======== ==========================\n Support :math:`x \\in \\mathbb{N}_0`\n Mean :math:`\\mu`\n ======== ==========================\n\n Parameters\n ----------\n mu : float\n Poission distribution parameter (mu > 0).\n alpha : float\n Gamma distribution parameter (alpha > 0).\n \"\"\"\n\n def __init__(self, mu, alpha, *args, **kwargs):\n super(NegativeBinomial, self).__init__(*args, **kwargs)\n self.mu = mu = tt.as_tensor_variable(mu)\n self.alpha = alpha = tt.as_tensor_variable(alpha)\n self.mode = tt.floor(mu).astype('int32')\n\n def random(self, point=None, size=None, repeat=None):\n mu, alpha = draw_values([self.mu, self.alpha], point=point)\n g = generate_samples(stats.gamma.rvs, alpha, scale=mu / alpha,\n dist_shape=self.shape,\n size=size)\n g[g == 0] = np.finfo(float).eps # Just in case\n return reshape_sampled(stats.poisson.rvs(g), size, self.shape)\n\n def logp(self, value):\n mu = self.mu\n alpha = self.alpha\n negbinom = bound(binomln(value + alpha - 1, value)\n + logpow(mu / (mu + alpha), value)\n + logpow(alpha / (mu + alpha), alpha),\n value >= 0, mu > 0, alpha > 0)\n\n # Return Poisson when alpha gets very large.\n return tt.switch(1 * (alpha > 1e10),\n Poisson.dist(self.mu).logp(value),\n negbinom)\n\n\nclass Geometric(Discrete):\n R\"\"\"\n Geometric log-likelihood.\n\n The probability that the first success in a sequence of Bernoulli\n trials occurs on the x'th trial.\n\n .. math:: f(x \\mid p) = p(1-p)^{x-1}\n\n ======== =============================\n Support :math:`x \\in \\mathbb{N}_{>0}`\n Mean :math:`\\dfrac{1}{p}`\n Variance :math:`\\dfrac{1 - p}{p^2}`\n ======== =============================\n\n Parameters\n ----------\n p : float\n Probability of success on an individual trial (0 < p <= 1).\n \"\"\"\n\n def __init__(self, p, *args, **kwargs):\n super(Geometric, self).__init__(*args, **kwargs)\n self.p = p = tt.as_tensor_variable(p)\n self.mode = 1\n\n def random(self, point=None, size=None, repeat=None):\n p = draw_values([self.p], point=point)\n return generate_samples(np.random.geometric, p,\n dist_shape=self.shape,\n size=size)\n\n def logp(self, value):\n p = self.p\n return bound(tt.log(p) + logpow(1 - p, value - 1),\n 0 <= p, p <= 1, value >= 1)\n\n\nclass DiscreteUniform(Discrete):\n R\"\"\"\n Discrete uniform distribution.\n\n .. math:: f(x \\mid lower, upper) = \\frac{1}{upper-lower}\n\n ======== ===============================================\n Support :math:`x \\in {lower, lower + 1, \\ldots, upper}`\n Mean :math:`\\dfrac{lower + upper}{2}`\n Variance :math:`\\dfrac{(upper - lower)^2}{12}`\n ======== ===============================================\n\n Parameters\n ----------\n lower : int\n Lower limit.\n upper : int\n Upper limit (upper > lower).\n \"\"\"\n\n def __init__(self, lower, upper, *args, **kwargs):\n super(DiscreteUniform, self).__init__(*args, **kwargs)\n self.lower = tt.floor(lower).astype('int32')\n self.upper = tt.floor(upper).astype('int32')\n self.mode = tt.maximum(\n tt.floor((upper - lower) / 2.).astype('int32'), self.lower)\n\n def _random(self, lower, upper, size=None):\n # This way seems to be the only to deal with lower and upper\n # as array-like.\n samples = stats.uniform.rvs(lower, upper - lower - np.finfo(float).eps,\n size=size)\n return np.floor(samples).astype('int32')\n\n def random(self, point=None, size=None, repeat=None):\n lower, upper = draw_values([self.lower, self.upper], point=point)\n return generate_samples(self._random,\n lower, upper,\n dist_shape=self.shape,\n size=size)\n\n def logp(self, value):\n upper = self.upper\n lower = self.lower\n return bound(-tt.log(upper - lower + 1),\n lower <= value, value <= upper)\n\n\nclass Categorical(Discrete):\n R\"\"\"\n Categorical log-likelihood.\n\n The most general discrete distribution.\n\n .. math:: f(x \\mid p) = p_x\n\n ======== ===================================\n Support :math:`x \\in \\{1, 2, \\ldots, |p|\\}`\n ======== ===================================\n\n Parameters\n ----------\n p : array of floats\n p > 0 and the elements of p must sum to 1. They will be automatically\n rescaled otherwise.\n \"\"\"\n\n def __init__(self, p, *args, **kwargs):\n super(Categorical, self).__init__(*args, **kwargs)\n try:\n self.k = tt.shape(p)[-1].tag.test_value\n except AttributeError:\n self.k = tt.shape(p)[-1]\n self.p = p = tt.as_tensor_variable(p)\n self.p = (p.T / tt.sum(p, -1)).T\n self.mode = tt.argmax(p)\n\n def random(self, point=None, size=None, repeat=None):\n def random_choice(k, *args, **kwargs):\n if len(kwargs['p'].shape) > 1:\n return np.asarray(\n [np.random.choice(k, p=p)\n for p in kwargs['p']]\n )\n else:\n return np.random.choice(k, *args, **kwargs)\n\n p, k = draw_values([self.p, self.k], point=point)\n return generate_samples(partial(random_choice, np.arange(k)),\n p=p,\n broadcast_shape=p.shape[:-1] or (1,),\n dist_shape=self.shape,\n size=size)\n\n def logp(self, value):\n p = self.p\n k = self.k\n\n # Clip values before using them for indexing\n value_clip = tt.clip(value, 0, k - 1)\n\n sumto1 = theano.gradient.zero_grad(\n tt.le(abs(tt.sum(p, axis=-1) - 1), 1e-5))\n\n if p.ndim > 1:\n a = tt.log(p[tt.arange(p.shape[0]), value_clip])\n else:\n a = tt.log(p[value_clip])\n\n return bound(a, value >= 0, value <= (k - 1), sumto1)\n\n\nclass Constant(Discrete):\n \"\"\"\n Constant log-likelihood.\n\n Parameters\n ----------\n value : float or int\n Constant parameter.\n \"\"\"\n\n def __init__(self, c, *args, **kwargs):\n super(Constant, self).__init__(*args, **kwargs)\n self.mean = self.median = self.mode = self.c = c = tt.as_tensor_variable(c)\n\n def random(self, point=None, size=None, repeat=None):\n c = draw_values([self.c], point=point)\n dtype = np.array(c).dtype\n\n def _random(c, dtype=dtype, size=None):\n return np.full(size, fill_value=c, dtype=dtype)\n\n return generate_samples(_random, c=c, dist_shape=self.shape,\n size=size).astype(dtype)\n\n def logp(self, value):\n c = self.c\n return bound(0, tt.eq(value, c))\n\n\ndef ConstantDist(*args, **kwargs):\n import warnings\n warnings.warn(\"ConstantDist has been deprecated. In future, use Constant instead.\",\n DeprecationWarning)\n return Constant(*args, **kwargs)\n\n\nclass ZeroInflatedPoisson(Discrete):\n R\"\"\"\n Zero-inflated Poisson log-likelihood.\n\n Often used to model the number of events occurring in a fixed period\n of time when the times at which events occur are independent.\n\n .. math::\n\n f(x \\mid \\theta, \\psi) = \\left\\{ \\begin{array}{l}\n (1-\\psi) + \\psi e^{-\\theta}, \\text{if } x = 0 \\\\\n \\psi \\frac{e^{-\\theta}\\theta^x}{x!}, \\text{if } x=1,2,3,\\ldots\n \\end{array} \\right.\n\n ======== ==========================\n Support :math:`x \\in \\mathbb{N}_0`\n Mean :math:`\\psi\\theta`\n Variance :math:`\\theta + \\frac{1-\\psi}{\\psi}\\theta^2`\n ======== ==========================\n\n Parameters\n ----------\n theta : float\n Expected number of occurrences during the given interval\n (theta >= 0).\n psi : float\n Expected proportion of Poisson variates (0 < psi < 1)\n\n \"\"\"\n\n def __init__(self, theta, psi, *args, **kwargs):\n super(ZeroInflatedPoisson, self).__init__(*args, **kwargs)\n self.theta = theta = tt.as_tensor_variable(theta)\n self.psi = psi = tt.as_tensor_variable(psi)\n self.pois = Poisson.dist(theta)\n self.mode = self.pois.mode\n\n def random(self, point=None, size=None, repeat=None):\n theta, psi = draw_values([self.theta, self.psi], point=point)\n g = generate_samples(stats.poisson.rvs, theta,\n dist_shape=self.shape,\n size=size)\n sampled = g * (np.random.random(np.squeeze(g.shape)) < psi)\n return reshape_sampled(sampled, size, self.shape)\n\n def logp(self, value):\n return tt.switch(value > 0,\n tt.log(self.psi) + self.pois.logp(value),\n tt.log((1. - self.psi) + self.psi * tt.exp(-self.theta)))\n\n\nclass ZeroInflatedNegativeBinomial(Discrete):\n R\"\"\"\n Zero-Inflated Negative binomial log-likelihood.\n\n The Zero-inflated version of the Negative Binomial (NB).\n The NB distribution describes a Poisson random variable\n whose rate parameter is gamma distributed.\n\n .. math::\n\n f(x \\mid \\mu, \\alpha, \\psi) = \\left\\{ \\begin{array}{l}\n (1-\\psi) + \\psi \\left (\\frac{\\alpha}{\\alpha+\\mu} \\right) ^\\alpha, \\text{if } x = 0 \\\\\n \\psi \\frac{\\Gamma(x+\\alpha)}{x! \\Gamma(\\alpha)} \\left (\\frac{\\alpha}{\\mu+\\alpha} \\right)^\\alpha \\left( \\frac{\\mu}{\\mu+\\alpha} \\right)^x, \\text{if } x=1,2,3,\\ldots\n \\end{array} \\right.\n\n ======== ==========================\n Support :math:`x \\in \\mathbb{N}_0`\n Mean :math:`\\psi\\mu`\n Var :math:`\\psi\\mu + \\left (1 + \\frac{\\mu}{\\alpha} + \\frac{1-\\psi}{\\mu} \\right)`\n ======== ==========================\n\n Parameters\n ----------\n mu : float\n Poission distribution parameter (mu > 0).\n alpha : float\n Gamma distribution parameter (alpha > 0).\n psi : float\n Expected proportion of NegativeBinomial variates (0 < psi < 1)\n \"\"\"\n\n def __init__(self, mu, alpha, psi, *args, **kwargs):\n super(ZeroInflatedNegativeBinomial, self).__init__(*args, **kwargs)\n self.mu = mu = tt.as_tensor_variable(mu)\n self.alpha = alpha = tt.as_tensor_variable(alpha)\n self.psi = psi = tt.as_tensor_variable(psi)\n self.nb = NegativeBinomial.dist(mu, alpha)\n self.mode = self.nb.mode\n\n def random(self, point=None, size=None, repeat=None):\n mu, alpha, psi = draw_values(\n [self.mu, self.alpha, self.psi], point=point)\n g = generate_samples(stats.gamma.rvs, alpha, scale=mu / alpha,\n dist_shape=self.shape,\n size=size)\n g[g == 0] = np.finfo(float).eps # Just in case\n sampled = stats.poisson.rvs(g) * (np.random.random(np.squeeze(g.shape)) < psi)\n return reshape_sampled(sampled, size, self.shape)\n\n def logp(self, value):\n return tt.switch(value > 0,\n tt.log(self.psi) + self.nb.logp(value),\n tt.log((1. - self.psi) + self.psi * (self.alpha / (self.alpha + self.mu))**self.alpha))\n" ]
[ [ "numpy.random.uniform", "numpy.sum", "numpy.squeeze", "numpy.floor", "numpy.random.choice", "scipy.stats.poisson.rvs", "numpy.atleast_1d", "numpy.arange", "numpy.log", "numpy.prod", "scipy.stats.binom.rvs", "numpy.finfo", "numpy.full", "numpy.array", "numpy.isnan" ] ]
zwx8981/DBCNN-Pytorch
[ "fa29f0307aa4533c4025c688ba5301cfddf9812f", "fa29f0307aa4533c4025c688ba5301cfddf9812f" ]
[ "simple_demo.py", "loss/cross_entropy_prob.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torchvision import transforms\nfrom DBCNN import DBCNN\nfrom PIL import Image\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\ntest_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=(0.485, 0.456, 0.406),\n std=(0.229, 0.224, 0.225))\n])\n\noptions = {'fc': True}\nscnn_root = 'your path of SCNN model'\nmodel = nn.DataParallel(DBCNN(scnn_root, options), device_ids=[0]).cuda()\nmodel_name = type(model).__name__\nprint(model)\n\nckpt = \"your path of the checkpoint file\"\nimage_name = \"your path of test image\"\ncheckpoint = torch.load(ckpt)\nmodel.load_state_dict(checkpoint)\n\nmodel.eval()\n\nI = Image.open(image_name)\nI = test_transform(I)\nI = torch.unsqueeze(I, dim=0)\nI = I.to(device)\nwith torch.no_grad():\n score = model(I)\n\nformat_str = 'Prediction = %.4f'\nprint(format_str % score)\n\n\n\n\n\n\n", "import torch\n\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass cross_entropy_prob(nn.Module):\n def __init__(self):\n super(cross_entropy_prob, self).__init__()\n\n def forward(self, pred, soft_targets):\n pred = F.log_softmax(pred)\n loss = torch.mean(torch.sum(- soft_targets * pred, 1))\n return loss\n" ]
[ [ "torch.unsqueeze", "torch.no_grad", "torch.cuda.is_available", "torch.load" ], [ "torch.sum", "torch.nn.functional.log_softmax" ] ]
CVxTz/keras_model_aws_ec2
[ "92a19f1c065ba7b19c0cd4e75b30f2935a7efacb" ]
[ "baseline.py" ]
[ "from glob import glob\n\nimport pandas as pd\nimport numpy as np # linear algebra\nfrom tensorflow.keras.applications.imagenet_utils import preprocess_input\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nfrom sklearn.model_selection import train_test_split\n\nfrom models import get_model_classif_nasnet\nfrom utils import data_gen, chunker, read_image\n\n\nlabeled_files = glob('/media/ml/data_ml/dogs-vs-cats/train/*.jpg')\ntest_files = glob('/media/ml/data_ml/dogs-vs-cats/test1/*.jpg')\n\ntrain, val = train_test_split(labeled_files, test_size=0.1, random_state=101010)\n\nmodel = get_model_classif_nasnet()\n\nbatch_size = 32\nh5_path = \"model.h5\"\ncheckpoint = ModelCheckpoint(h5_path, monitor='val_acc', verbose=1, save_best_only=True, mode='max')\n\n_ = model.fit_generator(\n data_gen(train, batch_size),\n validation_data=data_gen(val, batch_size),\n epochs=10, verbose=1,\n callbacks=[checkpoint],\n steps_per_epoch=len(train) // batch_size,\n validation_steps=len(val) // batch_size)\n\nmodel.load_weights(h5_path)\n\npreds = []\nids = []\n\nfor batch in chunker(test_files, batch_size):\n X = [preprocess_input(read_image(x)) for x in batch]\n X = np.array(X)\n preds_batch = model.predict(X).ravel().tolist()\n preds += preds_batch\n\ndf = pd.DataFrame({'id': test_files, 'label': preds})\ndf.to_csv(\"baseline_nasnet.csv\", index=False)\ndf.head()\n" ]
[ [ "numpy.array", "pandas.DataFrame", "tensorflow.keras.callbacks.ModelCheckpoint", "sklearn.model_selection.train_test_split" ] ]
hzxie/torch-points-kernels
[ "a52ea03bdd62e890320c592282ebd89de659534f" ]
[ "torch_points_kernels/chamfer_dist.py" ]
[ "import torch\n\nif torch.cuda.is_available():\n import torch_points_kernels.points_cuda as tpcuda\n\n\nclass ChamferFunction(torch.autograd.Function):\n @staticmethod\n def forward(ctx, xyz1, xyz2):\n if not torch.cuda.is_available():\n raise NotImplementedError(\n \"CPU version is not available for Chamfer Distance\"\n )\n\n dist1, dist2, idx1, idx2 = tpcuda.chamfer_dist(xyz1, xyz2)\n ctx.save_for_backward(xyz1, xyz2, idx1, idx2)\n\n return dist1, dist2\n\n @staticmethod\n def backward(ctx, grad_dist1, grad_dist2):\n xyz1, xyz2, idx1, idx2 = ctx.saved_tensors\n grad_xyz1, grad_xyz2 = tpcuda.chamfer_dist_grad(\n xyz1, xyz2, idx1, idx2, grad_dist1, grad_dist2\n )\n return grad_xyz1, grad_xyz2\n\n\ndef chamfer_dist(xyz1, xyz2, ignore_zeros=False):\n r\"\"\"\n Calcuates the distance between B pairs of point clouds\n\n Parameters\n ----------\n xyz1 : torch.Tensor (dtype=torch.float32)\n (B, n1, 3) B point clouds containing n1 points\n xyz2 : torch.Tensor (dtype=torch.float32)\n (B, n2, 3) B point clouds containing n2 points\n ignore_zeros : bool\n ignore the point whose coordinate is (0, 0, 0) or not\n\n Returns\n -------\n dist: torch.Tensor\n (B, ): the distances between B pairs of point clouds\n \"\"\"\n if len(xyz1.shape) != 3 or xyz1.size(2) != 3 or len(xyz2.shape) != 3 or xyz2.size(2) != 3:\n raise ValueError('The input point cloud should be of size (B, n_pts, 3)')\n\n batch_size = xyz1.size(0)\n if batch_size == 1 and ignore_zeros:\n non_zeros1 = torch.sum(xyz1, dim=2).ne(0)\n non_zeros2 = torch.sum(xyz2, dim=2).ne(0)\n xyz1 = xyz1[non_zeros1].unsqueeze(dim=0)\n xyz2 = xyz2[non_zeros2].unsqueeze(dim=0)\n\n dist1, dist2 = ChamferFunction.apply(xyz1, xyz2)\n return torch.mean(dist1) + torch.mean(dist2)\n\n" ]
[ [ "torch.sum", "torch.cuda.is_available", "torch.mean" ] ]
zaman-lab/brexitmeter-py
[ "a1ea66d7d747276679f8f4acdb2c2963517a0f04" ]
[ "test/storage_service_test.py" ]
[ "\nimport os\nimport tensorflow as tf\n\nfrom app.storage_service import weights_filepath, dictionaries_dirpath\n\ndef test_local_storage():\n local_filepaths = [\n\t\tweights_filepath(\"local\"),\n\t\tos.path.join(dictionaries_dirpath(\"local\"), \"dic.txt\"),\n\t\tos.path.join(dictionaries_dirpath(\"local\"), \"dic_s.txt\"),\n\t]\n for filepath in local_filepaths:\n assert os.path.isfile(filepath)\n\ndef test_remote_storage():\n remote_filepaths = [\n\t\tweights_filepath(\"remote\"),\n\t\tos.path.join(dictionaries_dirpath(\"remote\"), \"dic.txt\"),\n\t\tos.path.join(dictionaries_dirpath(\"remote\"), \"dic_s.txt\"),\n\t]\n for filepath in remote_filepaths:\n assert tf.io.gfile.exists(filepath)\n" ]
[ [ "tensorflow.io.gfile.exists" ] ]
NREL/Panel-Segmentation
[ "2270157fe87dc211f87d79b9ca38a4fbae967a1a" ]
[ "panel_segmentation/panel_detection.py" ]
[ "\"\"\"\nPanel detection class\n\"\"\"\n\nimport numpy as np\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.preprocessing import image\nfrom tensorflow.keras.models import load_model\nimport cv2\nimport matplotlib.pyplot as plt\nfrom skimage.transform import hough_line, hough_line_peaks\nfrom matplotlib import cm\nimport requests\nfrom PIL import Image\nfrom os import path\n\npanel_seg_model_path = path.join(path.dirname(__file__), 'VGG16Net_ConvTranpose_complete.h5')\npanel_classification_model_path = path.join(path.dirname(__file__), 'VGG16_classification_model.h5')\n\nclass PanelDetection():\n '''\n A class for training a deep learning architecture, \n detecting solar arrays from a satellite image, performing spectral\n clustering, and predicting the Azimuth.\n '''\n def __init__(self, model_file_path = './VGG16Net_ConvTranpose_complete.h5', \n classifier_file_path = './VGG16_classification_model.h5'):\n \n #This is the model used for detecting if there is a panel or not\n self.classifier = load_model(classifier_file_path, \n custom_objects=None, \n compile=False)\n \n self.model = load_model(model_file_path, \n custom_objects=None, \n compile=False)\n \n \n def generateSatelliteImage(self,latitude, longitude, \n file_name_save, google_maps_api_key):\n \"\"\"\n Generates satellite image via Google Maps, using the passed lat-long coordinates.\n \n Parameters\n -----------\n latitude: Float. Latitude coordinate of the site.\n longitude: Float. Longitude coordinate of the site.\n file_name_save: String. File path that we want to save the image to. PNG file.\n google_maps_api_key: String. Google Maps API Key for automatically \n pulling satellite images.\n \n Returns\n ----------- \n Returned satellite image.\n \"\"\"\n #Check input variable for types\n if type(latitude) != float:\n raise TypeError(\"latitude variable must be of type float.\")\n if type(longitude) != float:\n raise TypeError(\"longitude variable must be of type float.\") \n if type(file_name_save) != str:\n raise TypeError(\"file_name_save variable must be of type string.\")\n if type(google_maps_api_key) != str:\n raise TypeError(\"google_maps_api_key variable must be of type string.\")\n #Build up the lat_long string from the latitude-longitude coordinates\n lat_long = str(latitude)+ \", \"+ str(longitude)\n # get method of requests module \n # return response object \n r = requests.get(\"https://maps.googleapis.com/maps/api/staticmap?maptype=satellite&center=\" + lat_long + \"&zoom=18&size=35000x35000&key=\"+google_maps_api_key,\n verify= False) \n #Raise an exception if the satellite image is not successfully returned\n if r.status_code != 200:\n raise ValueError(\"Response status code \" + str(r.status_code) + \": Image not pulled successfully from API.\")\n # wb mode is stand for write binary mode \n f = open(file_name_save, 'wb') \n # r.content gives content, \n # in this case gives image \n f.write(r.content) \n # close method of file object \n # save and close the file \n f.close()\n #Read in the image and return it via the console\n return Image.open(file_name_save) \n\n\n def diceCoeff(self,y_true, y_pred, smooth=1):\n \"\"\"\n This function is used as the metric of similarity between the \n predicted mask and ground truth. \n \n Parameters\n -----------\n y_true - (numpy array of floats) \n the true mask of the image \n y_pred - (numpy array of floats) \n the predicted mask of the data\n smooth - (int): \n a parameter to ensure we are not dividing by zero and also a smoothing parameter. \n For back propagation. If the prediction is hard threshold to 0 and 1, it is difficult to back\n propagate the dice loss gradient. We add this parameter to actually smooth out the loss function, \n making it differentiable.\n \n Returns\n -----------\n dice: - float: retuns the metric of similarity between prediction and ground truth\n \"\"\"\n #Ensure that the inputs are of the correct type\n if type(y_true) != np.ndarray:\n raise TypeError(\"Variable y_true should be of type np.ndarray.\")\n if type(y_pred) != np.ndarray:\n raise TypeError(\"Variable y_pred should be of type np.ndarray.\")\n if type(smooth) != int:\n raise TypeError(\"Variable smooth should be of type int.\")\n #If variable types are correct, continue with function\n intersection = K.sum(y_true * y_pred, axis=[1,2,3])\n union = K.sum(y_true, axis=[1,2,3]) + K.sum(y_pred, axis=[1,2,3])\n dice = K.mean((2. * intersection + smooth)/(union + smooth), axis=0)\n return dice\n\n \n def diceCoeffLoss(self, y_true, y_pred):\n \"\"\"\n This function is a loss function that can be used when training the segmentation model.\n This loss function can be used in place of binary crossentropy,\n which is the current loss function in the training stage \n \n Parameters\n -----------\n y_true - (numpy array of floats) \n the true mask of the image \n y_pred - (numpy array of floats)\n the predicted mask of the data\n \n Returns\n -----------\n float: retuns the loss metric between prediction and ground truth\n \n \"\"\"\n #Ensure that the inputs are of the correct type\n if type(y_true) != np.ndarray:\n raise TypeError(\"Variable y_true should be of type np.ndarray.\")\n if type(y_pred) != np.ndarray:\n raise TypeError(\"Variable y_pred should be of type np.ndarray.\")\n return 1-self.dice_coef(y_true, y_pred)\n \n\n def testBatch(self, test_data, test_mask=None, BATCH_SIZE = 16, model =None):\n \"\"\"\n This function is used to predict the mask of a batch of test satellite images.\n Use this to test a batch of images greater than 4\n \n Parameters\n -----------\n 'test_data': (nparray float) \n the satellite images \n 'test_mask': (nparray int/float) \n the mask ground truth corresponding to the test_data\n 'batch_size': (int) \n the batch size of the test_data. \n 'model': (tf.keras.model.object)\n a custom model can be provided as input or we can use the initialized model\n \n Returns\n -----------\n 'test_res': (nparray float) \n retuns the predicted masks.\n 'accuracy': (float) \n returns the accuarcy of prediction as compared with the ground truth if provided\n \"\"\"\n #Ensure that the inputs are of the correct type\n if type(test_data) != np.ndarray:\n raise TypeError(\"Variable test_data should be of type np.ndarray.\")\n if type(BATCH_SIZE) != int:\n raise TypeError(\"Variable BATCH_SIZE should be of type int.\") \n test_datagen = image.ImageDataGenerator(rescale=1./255,dtype='float32')\n test_image_generator = test_datagen.flow(\n test_data,\n batch_size = BATCH_SIZE, shuffle=False)\n if model != None:\n test_res = model.predict(test_image_generator)\n else :\n test_res = self.model.predict(test_image_generator)\n if test_mask != None: \n test_mask = test_mask/np.max(test_mask)\n accuracy = self.dice_coef(test_mask,test_res) \n return test_res,accuracy\n else:\n return test_res\n\n def testSingle(self, test_data, test_mask=None, model =None):\n \"\"\"\n This function is used to predict the mask corresponding to a single test image. \n It takes as input the test_data (a required parameter) and two non-required parameters- test_mask and model\n \n Use this to test a single image.\n\n Parameters\n -----------\n 'test_data': (nparray int or float) \n the satellite image. dimension is (640,640,3) or (a,640,640,3) \n 'test_mask': (nparray int/flaot) \n the ground truth of what the mask should be \n 'model': (tf.keras model object) \n a custom model can be provided as input or we can use the initialized model\n \n Returns\n -----------\n 'test_res': (nparray float) \n retuns the predicted mask of the single image. The dimension is (640,640 or (a,640,640))\n 'accuracy': (float) \n returns the accuarcy of prediction as compared with the ground truth if provided\n \n \"\"\"\n #check that the inputs are correct\n if type(test_data) != np.ndarray:\n raise TypeError(\"Variable test_data must be of type Numpy ndarray.\")\n #Test that the input array has 2 to 3 channels\n if (len(test_data.shape) > 3) | (len(test_data.shape) < 2):\n raise ValueError(\"Numpy array test_data shape should be 2 or 3 dimensions.\")\n #Once the array passes checks, run the sequence\n test_data = test_data/255\n test_data = test_data[np.newaxis, :]\n if model != None:\n test_res = model.predict(test_data)\n else :\n test_res = self.model.predict(test_data)\n test_res = (test_res[0].reshape(640,640))\n if test_mask != None: \n test_mask = test_mask/np.max(test_mask)\n accuracy = self.dice_coef(test_mask,test_res) \n return test_res,accuracy\n else:\n return test_res \n \n\n def hasPanels(self, test_data):\n \"\"\"\n This function is used to predict if there is a panel in an image or not. \n Note that it uses a saved classifier model we have trained and not the \n segmentation model. \n \n Parameters\n -----------\n test_data: (nparray float or int) \n the satellite image. The shape should be [a,640,640,3] where \n 'a' is the number of data or (640,640,3) if it is a single image\n \n Returns\n -----------\n Boolean. Returns True if solar array is detected in an image, and False otherwise.\n \"\"\"\n #Check that the input is correct\n if type(test_data) != np.ndarray:\n raise TypeError(\"Variable test_data must be of type Numpy ndarray.\")\n #Test that the input array has 3 to 4 channels\n if (len(test_data.shape) > 4) | (len(test_data.shape) < 3):\n raise ValueError(\"Numpy array test_data shape should be 3 dimensions if a single image, or 4 dimensions if a batch of images.\") \n test_data = test_data/255\n #This ensures the first dimension is the number of test data to be predicted\n if test_data.ndim == 3:\n test_data = test_data[np.newaxis, :]\n prediction = self.classifier.predict(test_data)\n #index 0 is for no panels while index 1 is for panels\n if prediction[0][1] > prediction[0][0]:\n return True \n else:\n return False\n \n\n def detectAzimuth(self, in_img, number_lines=5):\n \"\"\"\n This function uses canny edge detection to first extract the edges of the input image. \n To use this function, you have to first predict the mask of the test image \n using testSingle function. Then use the cropPanels function to extract the solar \n panels from the input image using the predicted mask. Hence the input image to this \n function is the cropped image of solar panels.\n \n After edge detection, Hough transform is used to detect the most dominant lines in\n the input image and subsequently use that to predict the azimuth of a single image\n \n Parameters\n -----------\n in_img: (nparray uint8) \n The image containing the extracted solar panels with other pixels zeroed off. Dimension is [1,640,640,3]\n number_lines: (int) \n This variable tells the function the number of dominant lines it should examine.\n We currently inspect the top 10 lines.\n \n Returns\n -----------\n azimuth: (int) \n The azimuth of the panel in the image.\n \"\"\"\n #Check that the input variables are of the correct type\n if type(in_img) != np.ndarray:\n raise TypeError(\"Variable in_img must be of type Numpy ndarray.\")\n if type(number_lines) != int:\n raise TypeError(\"Variable number_lines must be of type int.\")\n #Run through the function\n edges = cv2.Canny(in_img[0],50,150,apertureSize=3)\n tested_angles = np.linspace(-np.pi / 2, np.pi / 2, 360)\n h, theta, d = hough_line(edges, theta=tested_angles)\n origin = np.array((0, edges.shape[1]))\n ind =0\n azimuth = 0\n az = np.zeros((number_lines))\n # Classic straight-line Hough transform\n # Set a precision of 0.5 degree. \n for _, angle, dist in zip(*hough_line_peaks(h, theta, d, num_peaks=number_lines, threshold =0.25*np.max(h))):\n y0, y1 = (dist - origin * np.cos(angle)) / np.sin(angle)\n \n deg_ang = int(np.rad2deg(angle))\n if deg_ang >= 0:\n az[ind] = 90+deg_ang\n else:\n az[ind] = 270 + deg_ang\n ind =ind+1\n unique_elements, counts_elements = np.unique(az, return_counts=True)\n check = counts_elements[np.argmax(counts_elements)]\n if check == 1:\n for _, angle, dist in zip(*hough_line_peaks(h, theta, d, num_peaks=1, threshold =0.25*np.max(h))):\n deg_ang = int(np.rad2deg(angle))\n if deg_ang >= 0:\n azimuth = 90+deg_ang\n else:\n azimuth = 270 + deg_ang\n else:\n azimuth = (unique_elements[np.argmax(counts_elements)])\n return azimuth \n\n \n def cropPanels(self, test_data, test_res):\n \"\"\"\n This function basically isolates regions with solar panels in a \n satellite image using the predicted mask. It zeros out other pixels that does not \n contain a panel.\n You can use this for a single test data or multiple test data. \n \n Parameters \n ----------\n test_data: (nparray float)\n This is the input test data. This can be a single image or multiple image. Hence the \n dimension can be (640,640,3) or (a,640,640,3)\n test_res: (nparray float) \n This is the predicted mask of the test images passed as an input and used to crop out the \n solar panels. dimension is (640,640)\n \n Returns \n ----------\n new_test_res: (nparray uint8) \n This returns images here the solar panels have been cropped out and the background zeroed. \n It has the same shape as test data. The dimension is [a,640,640,3] where a is the number of\n input images\n \n \"\"\"\n #Check that the input variables are of the correct type\n if type(test_data) != np.ndarray:\n raise TypeError(\"Variable test_data must be of type Numpy ndarray.\")\n if type(test_res) != np.ndarray:\n raise TypeError(\"Variable test_res must be of type Numpy ndarray.\") \n #Convert the test_data array from 3D to 4D\n if test_data.ndim == 3:\n test_data = test_data[np.newaxis, :]\n new_test_res = np.uint8(np.zeros((test_data.shape[0],640,640,3)))\n for ju in np.arange(test_data.shape[0]):\n try:\n in_img = test_res[ju].reshape(640,640)\n except:\n in_img = test_res.reshape(640,640)\n in_img[in_img < 0.9] = 0\n in_img[in_img >= 0.9] = 1\n in_img = np.uint8(in_img)\n test2 = np.copy(test_data[ju])\n test2[(1-in_img).astype(bool),0] = 0\n test2[(1-in_img).astype(bool),1] = 0\n test2[(1-in_img).astype(bool),2] = 0\n new_test_res[ju] = test2 \n return new_test_res\n \n \n def plotEdgeAz(self, test_results, no_lines=5, \n no_figs=1, save_img_file_path = None,\n plot_show = False):\n \"\"\"\n This function is used to generate plots of the image with its azimuth\n It can generate three figures or one. For three figures, that include the \n input image, the hough transform space and the input images with detected lines.\n For single image, it only outputs the input image with detected lines.\n \n Parameters \n ----------\n test_results: (nparray float64 or unit8) \n 8-bit input image. This variable represents the predicted images from the segmentation model. Hence the \n dimension must be [a,b,c,d] where [a] is the number of images, [b,c] are the dimensions\n of the image - 640 x 640 in this case and [d] is 3 - RGB\n no_lines: (int) \n default is 10. This variable tells the function the number of dominant lines it should examine. \n no_figs: (int) \n 1 or 3. If the number of figs is 1. It outputs the mask with Hough lines and the predicted azimuth\n However, if the number of lines is 3, it gives three plots. \n 1. The input image,\n 2. Hough transform search space\n 3. Unput image with houghlines and the predicted azimuth\n \n save_img_file_path: (string) \n You can pass as input the location to save the plots\n plot_show: Boolen: If False, it will supress the plot as an output and just save the plots in a folder\n \n Returns \n ----------\n Plot of the masked image, with detected Hough Lines and azimuth estimate.\n \"\"\"\n #Check that the input variables are of the correct type\n if type(test_results) != np.ndarray:\n raise TypeError(\"Variable test_results must be of type Numpy ndarray.\")\n if type(no_lines) != int:\n raise TypeError(\"Variable no_lines must be of type int.\") \n if type(no_figs) != int:\n raise TypeError(\"Variable no_figs must be of type int.\") \n if type(plot_show) != bool:\n raise TypeError(\"Variable no_figs must be of type boolean.\") \n \n for ii in np.arange(test_results.shape[0]):\n #This changes the float64 to uint8\n if (test_results.dtype is np.dtype(np.float64)):\n in_img = test_results[ii].reshape(640,640)\n in_img[in_img < 0.9] = 0\n in_img[in_img >= 0.9] = 1\n in_img = np.uint8(in_img)\n\n in_img = test_results[ii]\n #Edge detection\n edges = cv2.Canny(in_img,50,150,apertureSize=3)\n tested_angles = np.linspace(-np.pi / 2, np.pi / 2, 360)\n h, theta, d = hough_line(edges, theta=tested_angles)\n az = np.zeros((no_lines))\n origin = np.array((0, edges.shape[1]))\n ind =0\n # Generating figure 1 \n fig, ax = plt.subplots(1, no_figs, figsize=(10, 6))\n if no_figs == 1:\n ax.imshow(edges)# cmap=cm.gray)\n for _, angle, dist in zip(*hough_line_peaks(h, theta, d, num_peaks=no_lines, threshold =0.25*np.max(h))):\n y0, y1 = (dist - origin * np.cos(angle)) / np.sin(angle)\n deg_ang = int(np.rad2deg(angle))\n if deg_ang >= 0:\n az[ind] = 90+deg_ang\n else:\n az[ind] = 270 + deg_ang\n ind =ind+1\n ax.plot(origin, (y0, y1), '-r')\n ax.set_xlim(origin)\n ax.set_ylim((edges.shape[0], 0))\n ax.set_axis_off()\n unique_elements, counts_elements = np.unique(az, return_counts=True)\n \n check = counts_elements[np.argmax(counts_elements)]\n \n if check == 1:\n for _, angle, dist in zip(*hough_line_peaks(h, theta, d, num_peaks=1, threshold =0.25*np.max(h))):\n deg_ang = int(np.rad2deg(angle))\n if deg_ang >= 0:\n azimuth = 90+deg_ang\n else:\n azimuth = 270 + deg_ang\n else:\n azimuth = (unique_elements[np.argmax(counts_elements)])\n #print(np.asarray((unique_elements, counts_elements)))\n ax.set_title('Azimuth = %i' %azimuth)\n #save the image\n if save_img_file_path != None:\n plt.savefig(save_img_file_path + '/crop_mask_az_'+str(ii),\n dpi=300)\n #Show the plot if plot_show = True\n if plot_show == True:\n plt.tight_layout()\n plt.show() \n elif no_figs == 3:\n ax = ax.ravel()\n\n ax[0].imshow(in_img, cmap=cm.gray)\n ax[0].set_title('Input image')\n ax[0].set_axis_off()\n \n\n ax[1].imshow(np.log(1 + h),\n extent=[np.rad2deg(theta[-1]), np.rad2deg(theta[0]), d[-1], d[0]],\n cmap=cm.gray, aspect=1/1.5)\n ax[1].set_title('Hough transform')\n ax[1].set_xlabel('Angles (degrees)')\n ax[1].set_ylabel('Distance (pixels)')\n ax[1].axis('image')\n\n ax[2].imshow(in_img)# cmap=cm.gray)\n origin = np.array((0, edges.shape[1]))\n ind =0\n for _, angle, dist in zip(*hough_line_peaks(h, theta, d, num_peaks=no_lines, threshold =0.25*np.max(h))):\n y0, y1 = (dist - origin * np.cos(angle)) / np.sin(angle)\n \n deg_ang = int(np.rad2deg(angle))\n if deg_ang >= 0:\n az[ind] = 90+deg_ang\n else:\n az[ind] = 270 + deg_ang\n ind =ind+1\n ax.plot(origin, (y0, y1), '-r')\n ax[2].set_xlim(origin)\n ax[2].set_ylim((edges.shape[0], 0))\n ax[2].set_axis_off()\n unique_elements, counts_elements = np.unique(az, return_counts=True)\n \n check = counts_elements[np.argmax(counts_elements)]\n \n if check == 1:\n for _, angle, dist in zip(*hough_line_peaks(h, theta, d, num_peaks=1, threshold =0.25*np.max(h))):\n deg_ang = int(np.rad2deg(angle))\n if deg_ang >= 0:\n azimuth = 90+deg_ang\n else:\n azimuth = 270 + deg_ang\n else:\n azimuth = (unique_elements[np.argmax(counts_elements)])\n #print(np.asarray((unique_elements, counts_elements)))\n ax[2].set_title('Azimuth = %i' %azimuth)\n #save the image\n if save_img_file_path != None:\n plt.savefig(save_img_file_path + '/crop_mask_az_'+str(ii),\n dpi=300)\n #Show the plot if plot_show = True\n if plot_show == True:\n plt.tight_layout()\n plt.show() \n else:\n print(\"Enter valid parameters\")\n \n\n def clusterPanels(self, test_mask, fig=False):\n '''\n This function uses connected component algorithm to cluster the panels\n\n Parameters\n ----------\n test_mask : (bool) or (float)\n The predicted mask. Dimension is (640,640) or can be converted to RGB (640,640,3)\n fig : (bool)\n shows the clustering image if fig = True\n\n Returns\n -------\n (uint8)\n Masked image containing detected clusters each of dimension(640,640,3)\n \n (uint8)\n The optimal number of clusters\n '''\n #Check that the input variables are of the correct type\n if type(test_mask) != np.ndarray:\n raise TypeError(\"Variable test_mask must be of type Numpy ndarray.\")\n if type(fig) != bool:\n raise TypeError(\"Variable fig must be of type bool.\") \n #Continue running through the function if all the inputs are correct\n if (len(test_mask.shape) < 3):\n test_mask = cv2.cvtColor(test_mask,cv2.COLOR_GRAY2RGB)\n test_mask = test_mask.reshape(640,640,3) \n # Converting those pixels with values 0-0.5 to 0 and others to 1\n img = cv2.threshold(test_mask, 0.5, 1, cv2.THRESH_BINARY)[1]\n # Applying cv2.connectedComponents() \n num_labels, labels = cv2.connectedComponents(img[:,:,2].reshape(640,640)) \n # Map component labels to hue val, 0-179 is the hue range in OpenCV\n label_hue = np.uint8(179*labels/np.max(labels))\n blank_ch = 255*np.ones_like(label_hue)\n labeled_img = cv2.merge([label_hue, blank_ch, blank_ch])\n # Converting cvt to BGR\n labeled_img = cv2.cvtColor(labeled_img, cv2.COLOR_HSV2BGR)\n # set background label to black\n labeled_img[label_hue==0] = 0\n #Initialize each clusters\n clusters = np.uint8(np.zeros((num_labels-1, 640, 640,3)))\n #starting from 1 to ignore background\n for i in np.arange(1,num_labels):\n clus = np.copy(test_mask)\n c_mask = labels==i\n #clus_label = np.zeros((640,640,3))\n clus[(1-c_mask).astype(bool),0] = 0\n clus[(1-c_mask).astype(bool),1] = 0\n clus[(1-c_mask).astype(bool),2] = 0\n #clus_label = np.stack((clus_label,)*3, axis=-1)\n clusters[i-1] = clus\n # Loop through each cluster, and detect number of non-zero values\n # in each cluster.\n clusters_list_keep = []\n for cluster_number in range(clusters.shape[0]):\n cluster = clusters[cluster_number]\n # Get the number of non-zero values as a ratio of total pixels\n pixel_count = len(cluster[cluster>0])\n total_pixels = cluster.shape[0] * cluster.shape[1] * cluster.shape[2]\n # Must greater than 3% non-zero pixels or we omit the cluster\n print(pixel_count / total_pixels)\n if (pixel_count / total_pixels) >= 0.0015:\n clusters_list_keep.append(cluster_number)\n # Filter clusters\n clusters = clusters[clusters_list_keep]\n if fig == True:\n #Showing Image after Component Labeling\n plt.figure()\n plt.imshow(cv2.cvtColor(labeled_img, cv2.COLOR_BGR2RGB))\n plt.axis('off')\n plt.title(\"Image after Component Labeling\")\n plt.show()\n return len(clusters),clusters\n \n \n\n \n" ]
[ [ "tensorflow.keras.backend.sum", "tensorflow.keras.preprocessing.image.ImageDataGenerator", "numpy.dtype", "matplotlib.pyplot.tight_layout", "numpy.ones_like", "numpy.copy", "numpy.log", "tensorflow.keras.backend.mean", "matplotlib.pyplot.figure", "numpy.cos", "matplotlib.pyplot.title", "numpy.linspace", "numpy.unique", "numpy.uint8", "numpy.zeros", "numpy.rad2deg", "matplotlib.pyplot.axis", "matplotlib.pyplot.subplots", "numpy.argmax", "numpy.arange", "numpy.max", "tensorflow.keras.models.load_model", "matplotlib.pyplot.show", "numpy.array", "numpy.sin" ] ]
deka108/mars
[ "2cd39847c188bb690dd5e2d612a5cbe9f7b21eca" ]
[ "mars/web/session.py" ]
[ "# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport base64\nimport json\nimport time\nimport logging\nimport pickle\nimport sys\nimport uuid\nfrom io import BytesIO\nfrom numbers import Integral\n\nimport numpy as np\n\nfrom ..config import options\nfrom ..core.operand import Fetch\nfrom ..errors import ResponseMalformed, ExecutionInterrupted, ExecutionFailed, \\\n ExecutionStateUnknown, ExecutionNotStopped\nfrom ..serialize import dataserializer\nfrom ..serialize.dataserializer import pyarrow\nfrom ..tensor.core import Indexes\nfrom ..utils import build_tileable_graph, sort_dataframe_result, \\\n numpy_dtype_from_descr_json, serialize_graph, serialize_serializable\n\nlogger = logging.getLogger(__name__)\n\n\nclass Session(object):\n def __init__(self, endpoint, session_id=None, req_session=None, verify_ssl=True,\n **session_kwargs):\n self._endpoint = endpoint.rstrip('/')\n self._session_id = session_id\n self._session_kwargs = session_kwargs\n # dict structure: {tileable_key -> graph_key, tileable_ids}\n # dict value is a tuple object which records graph key and tileable id\n self._executed_tileables = dict()\n\n self._serial_type = None\n self._pickle_protocol = pickle.HIGHEST_PROTOCOL\n\n if req_session:\n self._req_session = req_session\n else:\n import requests\n from requests.adapters import HTTPAdapter\n\n self._req_session = requests.Session()\n self._req_session.mount('http://stackoverflow.com', HTTPAdapter(max_retries=5))\n\n self._req_session.verify = verify_ssl\n if not verify_ssl:\n try:\n import urllib3\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n except ImportError: # pragma: no cover\n pass\n\n self._main()\n\n @property\n def session_id(self):\n return self._session_id\n\n @property\n def endpoint(self):\n return self._endpoint\n\n @endpoint.setter\n def endpoint(self, url):\n self._endpoint = url\n\n def _main(self):\n if pyarrow is None:\n self._serial_type = dataserializer.SerialType.PICKLE\n else:\n self._serial_type = dataserializer.SerialType(options.client.serial_type.lower())\n\n session_kw = self._session_kwargs.copy()\n session_kw['pyver'] = '.'.join(str(v) for v in sys.version_info[:3])\n session_kw['pickle_protocol'] = self._pickle_protocol\n if pyarrow is not None:\n session_kw['arrow_version'] = pyarrow.__version__\n\n if self._session_id is None:\n resp = self._req_session.post(self._endpoint + '/api/session', data=session_kw)\n\n if resp.status_code >= 400:\n raise SystemError('Failed to create mars session: ' + resp.reason)\n else:\n resp = self._req_session.get(\n self._endpoint + '/api/session/' + self._session_id, params=session_kw)\n if resp.status_code == 404:\n raise ValueError(f'The session with id = {self._session_id} doesn\\'t exist')\n if resp.status_code >= 400:\n raise SystemError('Failed to check mars session.')\n\n content = json.loads(resp.text)\n self._session_id = content['session_id']\n self._pickle_protocol = content.get('pickle_protocol', pickle.HIGHEST_PROTOCOL)\n\n # as pyarrow will use pickle.HIGHEST_PROTOCOL to pickle, we need to use\n # SerialType.PICKLE when pickle protocol between client and server\n # does not agree with each other\n if not content.get('arrow_compatible') or self._pickle_protocol != pickle.HIGHEST_PROTOCOL:\n self._serial_type = dataserializer.SerialType.PICKLE\n\n def _get_tileable_graph_key(self, tileable_key):\n return self._executed_tileables[tileable_key][0]\n\n def _set_tileable_graph_key(self, tileable, graph_key):\n tileable_key = tileable.key\n tileable_id = tileable.id\n if tileable_key in self._executed_tileables:\n self._executed_tileables[tileable_key][1].add(tileable_id)\n else:\n self._executed_tileables[tileable_key] = graph_key, {tileable_id}\n\n @staticmethod\n def _handle_json_response(resp, allow_empty=True, raises=True):\n try:\n resp_txt = resp.text\n if allow_empty:\n resp_txt = resp_txt or '{}'\n resp_json = json.loads(resp_txt)\n except json.JSONDecodeError:\n text_part = resp.text if len(resp.text) < 100 else resp.text[:100] + '...'\n raise ResponseMalformed(f'Failed to parse server response. Status={resp.status_code} '\n f'Response=\"{text_part}\"')\n\n if raises and resp.status_code >= 400:\n exc_info = pickle.loads(base64.b64decode(resp_json['exc_info']))\n raise exc_info[1].with_traceback(exc_info[2])\n return resp_json\n\n def _check_response_finished(self, graph_url, timeout=None):\n import requests\n try:\n resp = self._req_session.get(graph_url, params={'wait_timeout': timeout})\n except requests.ConnectionError as ex:\n err_msg = str(ex)\n if 'ConnectionResetError' in err_msg or 'Connection refused' in err_msg or \\\n 'Connection aborted' in err_msg:\n return False\n raise\n\n if resp.status_code == 504:\n logging.debug('Gateway Time-out, try again')\n return False\n if resp.status_code >= 400:\n raise SystemError(f'Failed to obtain execution status. Code: {resp.status_code}, '\n f'Reason: {resp.reason}, Content:\\n{resp.text}')\n\n resp_json = self._handle_json_response(resp, raises=False)\n if resp_json['state'] == 'succeeded':\n return True\n elif resp_json['state'] in ('running', 'preparing'):\n return False\n elif resp_json['state'] in ('cancelled', 'cancelling'):\n raise ExecutionInterrupted\n elif resp_json['state'] == 'failed':\n if 'exc_info' in resp_json:\n exc_info = pickle.loads(base64.b64decode(resp_json['exc_info']))\n exc = exc_info[1].with_traceback(exc_info[2])\n raise ExecutionFailed('Graph execution failed.') from exc\n else:\n raise ExecutionFailed('Graph execution failed with unknown reason.')\n raise ExecutionStateUnknown('Unknown graph execution state ' + resp_json['state'])\n\n def run(self, *tileables, **kw):\n timeout = kw.pop('timeout', -1)\n compose = kw.pop('compose', True)\n fetch = kw.pop('fetch', True)\n name = kw.pop('name', None)\n if kw:\n raise TypeError(f'run got unexpected key arguments {kw!r}')\n\n # those executed tileables should fetch data directly, submit the others\n run_tileables = [t for t in tileables if t.key not in self._executed_tileables]\n\n if name is not None:\n if not isinstance(name, (list, tuple)):\n name = [name]\n if len(name) != len(tileables):\n raise TypeError('Name must match execute tileables')\n name = ','.join(name)\n\n graph = build_tileable_graph(run_tileables, set(self._executed_tileables.keys()))\n targets = [t.key for t in run_tileables]\n\n if len(graph) > 0:\n targets_join = ','.join(targets)\n session_url = self._endpoint + '/api/session/' + self._session_id\n serialized_graph = serialize_graph(graph)\n\n resp_json = self._submit_graph(serialized_graph, targets_join, names=name or '', compose=compose)\n graph_key = resp_json['graph_key']\n graph_url = f'{session_url}/graph/{graph_key}'\n\n exec_start_time = time.time()\n time_elapsed = 0\n check_interval = options.check_interval\n while timeout <= 0 or time_elapsed < timeout:\n timeout_val = min(check_interval, timeout - time_elapsed) if timeout > 0 else check_interval\n try:\n if self._check_response_finished(graph_url, timeout_val):\n break\n except KeyboardInterrupt:\n resp = self._req_session.delete(graph_url)\n if resp.status_code >= 400:\n raise ExecutionNotStopped(\n f'Failed to stop graph execution. Code: {resp.status_code}, '\n f'Reason: {resp.reason}, Content:\\n{resp.text}')\n finally:\n time_elapsed = time.time() - exec_start_time\n\n if 0 < timeout < time.time() - exec_start_time:\n raise TimeoutError\n\n for t in tileables:\n self._set_tileable_graph_key(t, graph_key)\n\n if not fetch:\n return\n else:\n return self.fetch(*tileables)\n\n def _is_executed(self, tileable):\n # if tileble.key in executed tileables\n # or it's a fetch already\n return tileable.key in self._executed_tileables or \\\n isinstance(tileable.op, Fetch)\n\n def fetch(self, *tileables, **kw):\n from ..tensor.indexing import TensorIndex\n from ..dataframe.indexing.iloc import DataFrameIlocGetItem, SeriesIlocGetItem\n\n timeout = kw.pop('timeout', None)\n if kw:\n raise TypeError(f'fetch got unexpected key arguments {kw!r}')\n\n results = list()\n for tileable in tileables:\n if tileable.key not in self._executed_tileables and \\\n isinstance(tileable.op, (TensorIndex, DataFrameIlocGetItem, SeriesIlocGetItem)):\n to_fetch_tileable = tileable.inputs[0]\n indexes = tileable.op.indexes\n if not all(isinstance(ind, (slice, Integral)) for ind in indexes):\n raise ValueError('Only support fetch data slices')\n else:\n to_fetch_tileable = tileable\n indexes = []\n\n if not self._is_executed(to_fetch_tileable):\n raise ValueError('Cannot fetch the unexecuted tileable')\n\n key = to_fetch_tileable.key\n indexes_str = base64.b64encode(\n serialize_serializable(Indexes(indexes=indexes))).decode('ascii')\n\n session_url = f'{self._endpoint}/api/session/{self._session_id}'\n compression_str = ','.join(v.value for v in dataserializer.get_supported_compressions())\n params = dict(compressions=compression_str, slices=indexes_str,\n serial_type=self._serial_type.value, pickle_protocol=self._pickle_protocol)\n data_url = f'{session_url}/graph/{self._get_tileable_graph_key(key)}/data/{key}'\n resp = self._req_session.get(data_url, params=params, timeout=timeout)\n if resp.status_code >= 400:\n raise ValueError(f'Failed to fetch data from server. Code: {resp.status_code}, '\n f'Reason: {resp.reason}, Content:\\n{resp.text}')\n result_data = dataserializer.loads(resp.content)\n results.append(sort_dataframe_result(tileable, result_data))\n return results\n\n @classmethod\n def _process_int_or_dict_argument(cls, argument, name, params):\n if argument is None:\n return\n if not isinstance(argument, dict):\n params[name] = argument\n else:\n params[name] = ','.join(f'{k}={v}' for k, v in argument.items())\n\n def fetch_tileable_op_logs(self, tileable_op_key, offsets=None, sizes=None):\n url = f'{self._endpoint}/api/session/{self._session_id}/op/{tileable_op_key}/log'\n params = dict()\n self._process_int_or_dict_argument(offsets, 'offsets', params)\n self._process_int_or_dict_argument(sizes, 'sizes', params)\n resp = self._req_session.get(url, params=params)\n if resp.status_code >= 400:\n raise ValueError(f'Failed to fetch log from server. Code: {resp.status_code}, '\n f'Reason: {resp.reason}, Content:\\n{resp.text}')\n return json.loads(resp.content)\n\n def fetch_log(self, tileables, offsets=None, sizes=None):\n from ..custom_log import fetch\n\n return fetch(tileables, self, offsets=offsets, sizes=sizes)\n\n def get_named_tileable_infos(self, name):\n from ..context import TileableInfos\n\n url = f'{self._endpoint}/api/session/{self._session_id}'\n params = dict(name=name)\n resp = self._req_session.get(url, params=params)\n if resp.status_code >= 400: # pragma: no cover\n raise ValueError(f'Failed to get tileable key from server. Code: {resp.status_code}, '\n f'Reason: {resp.reason}, Content:\\n{resp.text}')\n tileable_key = self._handle_json_response(resp)['tileable_key']\n nsplits, extra_meta = self._get_tileable_meta(tileable_key)\n shape = tuple(sum(s) for s in nsplits)\n return TileableInfos(tileable_key, shape, extra_meta)\n\n def create_mutable_tensor(self, name, shape, dtype, fill_value=None, chunk_size=None, *_, **__):\n from ..tensor.utils import create_mutable_tensor\n session_url = f'{self._endpoint}/api/session/{self._session_id}'\n tensor_url = f'{session_url}/mutable-tensor/{name}?action=create'\n if not isinstance(dtype, np.dtype):\n dtype = np.dtype(dtype)\n # avoid built-in scalar dtypes are made into one-field record type.\n if dtype.fields:\n dtype_descr = dtype.descr\n else:\n dtype_descr = str(dtype)\n tensor_json = {\n 'shape': shape,\n 'dtype': dtype_descr,\n 'fill_value': fill_value,\n 'chunk_size': chunk_size,\n }\n resp = self._req_session.post(tensor_url, json=tensor_json)\n shape, dtype, chunk_size, chunk_keys, chunk_eps = self._handle_json_response(resp)\n return create_mutable_tensor(name, chunk_size, shape, numpy_dtype_from_descr_json(dtype),\n chunk_keys, chunk_eps)\n\n def get_mutable_tensor(self, name):\n from ..tensor.utils import create_mutable_tensor\n session_url = f'{self._endpoint}/api/session/{self._session_id}'\n tensor_url = f'{session_url}/mutable-tensor/{name}'\n resp = self._req_session.get(tensor_url)\n shape, dtype, chunk_size, chunk_keys, chunk_eps = self._handle_json_response(resp)\n return create_mutable_tensor(name, chunk_size, shape, numpy_dtype_from_descr_json(dtype),\n chunk_keys, chunk_eps)\n\n def write_mutable_tensor(self, tensor, index, value):\n \"\"\"\n How to serialize index and value:\n\n 1. process_index and serialize it as json\n 2. the payload of POST request:\n\n * a int64 value indicate the size of index json\n * ascii-encoded bytes of index json\n * pyarrow serialized bytes of `value`\n \"\"\"\n from ..tensor.core import Indexes\n from ..serialize import dataserializer\n\n index = Indexes(indexes=index)\n index_bytes = base64.b64encode(serialize_serializable(index))\n bio = BytesIO()\n bio.write(np.int64(len(index_bytes)).tobytes())\n bio.write(index_bytes)\n dataserializer.dump(value, bio)\n\n session_url = f'{self._endpoint}/api/session/{self._session_id}'\n tensor_url = f'{session_url}/mutable-tensor/{tensor.name}'\n resp = self._req_session.put(tensor_url, data=bio.getvalue(),\n headers={'Content-Type': 'application/octet-stream'})\n self._handle_json_response(resp)\n\n def seal(self, tensor):\n from ..tensor.utils import create_fetch_tensor\n session_url = f'{self._endpoint}/api/session/{self._session_id}'\n tensor_url = f'{session_url}/mutable-tensor/{tensor.name}?action=seal'\n resp = self._req_session.post(tensor_url)\n graph_key_hex, tileable_key, tensor_id, tensor_meta = self._handle_json_response(resp)\n self._executed_tileables[tileable_key] = uuid.UUID(graph_key_hex), {tensor_id}\n\n # # Construct Tensor on the fly.\n shape, dtype, chunk_size, chunk_keys, _ = tensor_meta\n return create_fetch_tensor(chunk_size, shape, numpy_dtype_from_descr_json(dtype),\n tensor_key=tileable_key, chunk_keys=chunk_keys)\n\n def _get_tileable_nsplits(self, tileable_key):\n session_url = f'{self._endpoint}/api/session/{self._session_id}'\n graph_key = self._get_tileable_graph_key(tileable_key)\n url = f'{session_url}/graph/{graph_key}/data/{tileable_key}?type=nsplits'\n resp = self._req_session.get(url)\n new_nsplits = self._handle_json_response(resp)\n return new_nsplits\n\n def _get_tileable_meta(self, tileable_key):\n session_url = f'{self._endpoint}/api/session/{self._session_id}'\n graph_key = self._get_tileable_graph_key(tileable_key)\n url = f'{session_url}/graph/{graph_key}/data/{tileable_key}?type=meta'\n resp = self._req_session.get(url)\n meta = self._handle_json_response(resp)\n return pickle.loads(base64.b64decode(meta)) # nosec\n\n def _update_tileable_shape(self, tileable):\n tileable_key = tileable.key\n new_nsplits = self._get_tileable_nsplits(tileable_key)\n tileable._update_shape(tuple(sum(nsplit) for nsplit in new_nsplits))\n tileable.nsplits = new_nsplits\n\n def decref(self, *keys):\n for tileable_key, tileable_id in keys:\n if tileable_key not in self._executed_tileables:\n continue\n graph_key, ids = self._executed_tileables[tileable_key]\n\n if tileable_id in ids:\n ids.remove(tileable_id)\n # for those same key tileables, do decref only when all those tileables are garbage collected\n if len(ids) != 0:\n continue\n self.delete_data(tileable_key)\n\n def delete_data(self, tileable_key, wait=False):\n if tileable_key not in self._executed_tileables:\n return\n graph_key, _ids = self._executed_tileables[tileable_key]\n data_url = f'{self._endpoint}/api/session/{self._session_id}/graph/{graph_key}' \\\n f'/data/{tileable_key}?wait={1 if wait else 0}'\n self._req_session.delete(data_url)\n self._executed_tileables.pop(tileable_key, None)\n\n def stop(self, graph_key):\n session_url = f'{self._endpoint}/api/session/{self._session_id}'\n graph_url = session_url + '/graph/' + graph_key\n resp = self._req_session.delete(graph_url)\n if resp.status_code >= 400:\n raise SystemError(f'Failed to stop graph execution. Code: {resp.status_code}, '\n f'Reason: {resp.reason}, Content:\\n{resp.text}')\n\n def _submit_graph(self, serialized_graph, targets, names=None, compose=True):\n session_url = f'{self._endpoint}/api/session/{self._session_id}'\n resp = self._req_session.post(session_url + '/graph', dict(\n graph=base64.b64encode(serialized_graph).decode('ascii'),\n target=targets,\n names=names,\n compose='1' if compose else '0'\n ))\n return self._handle_json_response(resp)\n\n def get_graph_states(self):\n resp = self._req_session.get(f'{self._endpoint}/api/session/{self._session_id}/graph')\n return self._handle_json_response(resp)\n\n def close(self):\n session_url = f'{self._endpoint}/api/session/{self._session_id}'\n for key in list(self._executed_tileables.keys()):\n self.delete_data(key, wait=True)\n resp = self._req_session.delete(session_url)\n if resp.status_code >= 400:\n raise SystemError('Failed to close mars session.')\n\n def check_service_ready(self, timeout=1):\n import requests\n try:\n resp = self._req_session.get(self._endpoint + '/api', timeout=timeout)\n except (requests.ConnectionError, requests.Timeout):\n return False\n if resp.status_code >= 400:\n return False\n return True\n\n def count_workers(self):\n resp = self._req_session.get(self._endpoint + '/api/worker?action=count', timeout=1)\n return self._handle_json_response(resp)\n\n def get_cpu_count(self):\n resp = self._req_session.get(self._endpoint + '/api/worker?action=count_cpu', timeout=1)\n return self._handle_json_response(resp)\n\n def rescale_workers(self, new_scale, min_workers=None, wait=True, timeout=None):\n data = json.dumps(dict(new_scale=new_scale, min_workers=min_workers))\n wait_req = 1 if wait else 0\n resp = self._req_session.patch(f'{self._endpoint}/api/worker?action=count&wait={wait_req}',\n data, timeout=timeout)\n return self._handle_json_response(resp)\n\n def get_workers_meta(self):\n resp = self._req_session.get(self._endpoint + '/api/worker', timeout=1)\n return self._handle_json_response(resp)\n\n def get_task_count(self):\n resp = self._req_session.get(f'{self._endpoint}/api/session/{self._session_id}/graph')\n return len(self._handle_json_response(resp))\n\n def __enter__(self):\n return self\n\n def __exit__(self, *_):\n self.close()\n" ]
[ [ "numpy.dtype" ] ]
jaryP/ContinualAI
[ "7d9b7614066d219ebd72049692da23ad6ec132b0", "7d9b7614066d219ebd72049692da23ad6ec132b0" ]
[ "continual_learning/scenarios/classification/new_classes/nc_scenarios.py", "continual_learning/scenarios/utils.py" ]
[ "from collections import defaultdict\nfrom typing import Union, List, Any, Callable, Dict\n\nimport numpy as np\n\nfrom continual_learning.datasets.base import AbstractDataset, DatasetSplitsContainer\n\nfrom continual_learning.scenarios.base import TasksGenerator\nfrom continual_learning.scenarios.classification.new_classes import \\\n NCTransformingScenario\nfrom continual_learning.scenarios.classification.utils import \\\n get_dataset_subset_using_labels\nfrom continual_learning.scenarios.tasks import TransformerTask, Task\nfrom continual_learning.scenarios.utils import ImageRotation, PixelsPermutation\n\n\nclass NCScenario(TasksGenerator):\n def __init__(self,\n *,\n tasks_n: int,\n dataset: DatasetSplitsContainer,\n # transform_factory: Callable[[Any], Callable],\n # transformation_parameters: Union[List[any],\n # Callable[[Any], Any]],\n random_state: Union[np.random.RandomState, int] = None,\n lazy_initialization: bool = True,\n labels_per_tasks: Dict[int, int] = None,\n labels_task_mapping: Dict[int, int] = None,\n shuffle_labels: bool = False,\n remap_labels_across_task: bool = False,\n remap_labels_in_task: bool = False,\n **kwargs):\n\n super().__init__(dataset,\n random_state=random_state,\n **kwargs)\n\n dataset_labels = np.asarray(dataset.classes)\n assigned_labels = []\n\n if labels_task_mapping is None:\n labels_task_mapping = {}\n\n if labels_per_tasks is None:\n # if len(labels_task_mapping) == 0:\n if len(dataset_labels) % tasks_n != 0:\n raise ValueError(\n f'Attempted to create labels_per_tasks dictionary, '\n f'but the number of labels ({len(dataset_labels)}) '\n f'cannot be distributed equally between the tasks '\n f'({tasks_n}), '\n f'because len(dataset_labels) % tasks_n != 0.')\n\n labels_per_tasks = {task: len(dataset_labels) // tasks_n\n for task in range(tasks_n)}\n\n else:\n remaining_tasks = tasks_n - len(labels_per_tasks)\n\n if remaining_tasks > 0:\n assigned_tasks = sum(labels_per_tasks.values())\n remaining_labels = len(dataset_labels) - assigned_tasks\n labels_per_remaining_task = remaining_labels // remaining_tasks\n\n tasks_map = {i: labels_per_remaining_task\n for i in range(tasks_n)\n if i not in labels_per_tasks}\n\n labels_per_tasks.update(tasks_map)\n\n if any([v == 1 for v in labels_per_tasks.values()]):\n raise ValueError('Due to the lack of tasks '\n 'in labels_per_tasks, '\n 'the dictionary has been populated, '\n 'but some task has only '\n 'one labels associated ot it. '\n 'If intended, '\n 'please force this behaviour by setting '\n f'labels_per_tasks = {labels_per_tasks}')\n\n if remap_labels_in_task and remap_labels_across_task:\n raise ValueError('Both remap_labels_in_task and '\n 'remap_labels_across_task are set to True '\n 'but are mutually exclusive. '\n 'Please set at least one to False.')\n\n if max(labels_per_tasks.keys()) >= tasks_n or min(\n labels_per_tasks.keys()) < 0:\n raise ValueError('Invalid key value in labels_per_tasks. '\n f'The keys must be in [0, {tasks_n - 1}] '\n f'({labels_per_tasks.keys()})')\n\n if min(labels_per_tasks.values()) < 0:\n raise ValueError('Invalid value in labels_per_tasks. '\n f'The values must be > 0'\n f'({labels_per_tasks.keys()})')\n\n sm = sum(labels_per_tasks.values())\n if sm > len(dataset_labels):\n raise ValueError(f'The total number of classes in labels_per_tasks '\n f'({sm}) exceeds the number of labels '\n f'in the dataset ({len(dataset_labels)}).')\n\n if not all(label in dataset_labels\n for label, task in labels_task_mapping.items()):\n raise ValueError(f'Some labels in labels_task_mapping are not '\n f'present in the dataset. '\n f'Dataset labels: {dataset_labels}, '\n f'given labels: {labels_task_mapping}')\n\n if len(labels_task_mapping) > 0:\n if max(labels_task_mapping.keys()) > len(dataset_labels) - 1 \\\n or min(labels_task_mapping.keys()) < 0:\n raise ValueError('Invalid key value in labels_task_mapping. '\n f'The keys must be in '\n f'[0, {len(dataset_labels) - 1}] '\n f'({labels_task_mapping.keys()})')\n\n if max(labels_task_mapping.values()) >= tasks_n \\\n or min(labels_per_tasks.values()) < 0:\n raise ValueError('Invalid value in labels_task_mapping. '\n f'The values must be in [0, {tasks_n - 1}] '\n f'({labels_task_mapping.values()})')\n\n task_labels = {k: [] for k in range(tasks_n)}\n\n for label, task in labels_task_mapping.items():\n task_labels[task].append(label)\n assigned_labels.append(label)\n\n if any([len(v) > labels_per_tasks[t]\n for t, v in task_labels.items()]):\n s = {t: len(v) for t, v in task_labels.items()}\n raise ValueError(f'After populating the tasks '\n f'using labels_task_mapping, some task has more '\n f'assigned labels ({s}) than the limit '\n f'imposed by labels_per_tasks '\n f'({labels_per_tasks}).')\n\n if shuffle_labels:\n self.random_state.shuffle(dataset_labels)\n\n for label in [l for l in dataset_labels\n if l not in assigned_labels]:\n eligible_tasks = [t for t, v in task_labels.items()\n if len(v) < labels_per_tasks[t]]\n\n selected_task = eligible_tasks[0]\n task_labels[selected_task].append(label)\n\n labels_mapping = {}\n indexes = iter(range(len(dataset_labels)))\n\n for t, vals in task_labels.items():\n if remap_labels_across_task:\n map_dict = {v: next(indexes) for v in vals}\n elif remap_labels_in_task:\n map_dict = {v: i for i, v in enumerate(vals)}\n else:\n map_dict = {v: v for v in vals}\n\n labels_mapping[t] = map_dict\n\n self.tasks_n = tasks_n\n self.labels_mapping = labels_mapping\n self.lazy_initialization = lazy_initialization\n self.task_labels = task_labels\n\n self._tasks_generated = []\n\n # self.task_wise_labels = task_wise_labels\n # self.shuffle_labels = shuffle_labels\n #\n # self.parameters = transformation_parameters\n # self.task_n = tasks_n\n # self.transform_function = transform_factory\n #\n # self._t_counter = 0\n # self._current_task = 0\n #\n # self._transform_functions = []\n # self._tasks_generated = []\n\n if not lazy_initialization:\n for _ in range(tasks_n):\n self.generate_task()\n # # self._tasks_generated.append(t)\n\n def __len__(self):\n return self.tasks_n\n\n def __getitem__(self, i: int):\n if i >= len(self._tasks_generated):\n raise ValueError(f'Attempting to get a non generated task from '\n f'the lazy created stream of tasks (index: {i})'\n f'. Generate the task or set '\n f'lazy_initialization=False when '\n f'instantiating this class.')\n\n return self._tasks_generated[i]\n\n def generate_task(self, **kwargs) -> Union[Task, None]:\n\n counter = len(self._tasks_generated)\n\n # if self.infinite_stream and callable(self.parameters):\n # t_parameters = self.parameters(task=counter,\n # random_state=self.random_state)\n # else:\n if counter == len(self):\n return None\n\n # t_parameters = self.parameters[counter]\n #\n # t = self.transform_function(t_parameters)\n\n labels = self.task_labels[counter]\n labels_map = self.labels_mapping[counter]\n\n dataset = get_dataset_subset_using_labels(self.dataset, labels=labels)\n task = Task(base_dataset=dataset,\n labels_mapping=labels_map,\n task_index=counter)\n\n # task = TransformerTask(base_dataset=self.dataset, transformer=t,\n # index=counter)\n\n self._tasks_generated.append(task)\n\n return task\n\n # def __next__(self):\n # self._current_task = 0\n # return self\n\n def __iter__(self):\n for i in range(self.tasks_n):\n if len(self._tasks_generated) > i:\n t = self._tasks_generated[i]\n else:\n t = self.generate_task()\n if t is None:\n return\n\n yield t\n\n\nclass ImageRotationScenario(NCTransformingScenario):\n def __init__(self,\n dataset: DatasetSplitsContainer,\n tasks_n: int,\n transformation_parameters: Union[List[any],\n Callable[[Any], Any]],\n # infinite_stream: bool = False,\n random_state: Union[np.random.RandomState, int] = None,\n lazy_initialization: bool = True,\n labels_task_mapping: Dict[int, Union[int, list]] = None,\n remap_labels_across_task: bool = False,\n\n **kwargs):\n\n transform_function = self.get_rotation\n\n super().__init__(dataset=dataset,\n tasks_n=tasks_n,\n transform_factory=transform_function,\n transformation_parameters=transformation_parameters,\n # infinite_stream=infinite_stream,\n random_state=random_state,\n lazy_initialization=lazy_initialization,\n labels_task_mapping=labels_task_mapping,\n remap_labels_across_task=remap_labels_across_task,\n **kwargs)\n\n def get_rotation(self, degree, **kwargs):\n return ImageRotation(degree)\n\n\nclass PixelsPermutationScenario(NCTransformingScenario):\n def __init__(self,\n dataset: DatasetSplitsContainer,\n tasks_n: int,\n transformation_parameters: Union[List[any],\n Callable[[Any], Any]],\n # infinite_stream: bool = False,\n random_state: Union[np.random.RandomState, int] = None,\n lazy_initialization: bool = True,\n labels_task_mapping: Dict[int, Union[int, list]] = None,\n remap_labels_across_task: bool = False,\n\n **kwargs):\n\n transform_factory = self.get_permutation\n\n super().__init__(dataset=dataset,\n tasks_n=tasks_n,\n transform_factory=transform_factory,\n transformation_parameters=transformation_parameters,\n # infinite_stream=infinite_stream,\n random_state=random_state,\n lazy_initialization=lazy_initialization,\n labels_task_mapping=labels_task_mapping,\n remap_labels_across_task=remap_labels_across_task,\n **kwargs)\n\n def get_permutation(self, permutation, **kwargs):\n return PixelsPermutation(permutation)\n\n", "from typing import Sequence, Union\n\nimport numpy as np\nfrom scipy.ndimage.interpolation import rotate as np_rotate\nfrom PIL.Image import Image\nfrom torch import Tensor, tensor\nfrom torchvision.transforms.functional import rotate\n\n\nclass ImageRotation(object):\n def __init__(self, degree):\n self.degree = degree\n\n def __call__(self, img: Union[Image, Tensor, np.ndarray]):\n if isinstance(img, np.ndarray):\n img = np_rotate(img, angle=self.degree, reshape=False)\n elif isinstance(img, Image):\n img = img.rotate(self.degree)\n elif isinstance(img, Tensor):\n img = rotate(img, angle=self.degree)\n else:\n raise ValueError(f'Accepted types are: '\n f'[ndarray, PIL Image, Tensor] {type(img)}')\n return img\n\n\nclass PixelsPermutation(object):\n def __init__(self, index_permutation: Sequence[int]):\n self.permutation = index_permutation\n\n def __call__(self, img: Union[Image, Tensor, np.ndarray]):\n if isinstance(img, np.ndarray):\n img = img.reshape(-1)[self.permutation].reshape(*img.shape)\n elif isinstance(img, Image):\n img = img.getdata()\n img = img.reshape(-1)[self.permutation].reshape(*img.shape)\n img = Image.fromarray(img)\n elif isinstance(img, Tensor):\n img = img.numpy()\n img = img.reshape(-1)[self.permutation].reshape(*img.shape)\n img = tensor(img)\n else:\n raise ValueError(f'Accepted types are: '\n f'[ndarray, PIL Image, Tensor] {type(img)}')\n\n return img\n\n\n" ]
[ [ "numpy.asarray" ], [ "torch.tensor", "scipy.ndimage.interpolation.rotate" ] ]
Duplums/SMLvsDL
[ "b285717bd8d8e832b4bc9e2b42d18bd96b628def" ]
[ "dl_training/main.py" ]
[ "import argparse\nfrom dl_training.training import BaseTrainer\nfrom dl_training.testing import OpenBHBTester\nimport torch\nimport logging\n\nif __name__==\"__main__\":\n\n logger = logging.getLogger(\"SMLvsDL\")\n\n parser = argparse.ArgumentParser()\n\n # Data location + saving paths\n parser.add_argument(\"--root\", type=str, required=True, help=\"Path to data root directory\")\n parser.add_argument(\"--preproc\", type=str, default='vbm', choices=['vbm', 'quasi_raw'])\n parser.add_argument(\"--checkpoint_dir\", type=str)\n parser.add_argument(\"--exp_name\", type=str, required=True)\n parser.add_argument(\"--outfile_name\", type=str, help=\"The output file name used to save the results in testing mode.\")\n\n parser.add_argument(\"--N_train_max\", type=int, default=None, help=\"Maximum number of training samples \"\n \"to be used per fold\")\n parser.add_argument(\"--pb\", type=str, choices=[\"age\", \"sex\", \"scz\", \"bipolar\", \"asd\", \"self_supervised\"])\n parser.add_argument(\"--folds\", nargs='+', type=int, help=\"Fold indexes to run during the training\")\n parser.add_argument(\"--nb_folds\", type=int, default=5)\n\n # Important: what model do we use\n parser.add_argument(\"--net\", type=str, help=\"Network to use\")\n\n # Depends on available CPU/GPU memory\n parser.add_argument(\"-b\", \"--batch_size\", type=int, required=True)\n\n\n parser.add_argument(\"--nb_epochs_per_saving\", type=int, default=5)\n parser.add_argument(\"--manual_seed\", type=int, help=\"The manual seed to give to pytorch.\")\n\n # Optimizer hyper-parameters\n parser.add_argument(\"--lr\", type=float, required=True, help=\"Initial learning rate\")\n parser.add_argument(\"--gamma_scheduler\", type=float, required=True)\n parser.add_argument(\"--nb_epochs\", type=int, default=300)\n parser.add_argument(\"--step_size_scheduler\", type=int, default=10)\n\n # Dataloader: set them\n parser.add_argument(\"--num_cpu_workers\", type=int, default=3, help=\"Number of workers assigned to do the \"\n \"preprocessing step (used by DataLoader of Pytorch)\")\n parser.add_argument(\"--sampler\", choices=[\"random\", \"weighted_random\", \"sequential\"], required=True)\n\n parser.add_argument(\"--residualize\", type=str, choices=[\"linear\", \"combat\"])\n\n # Self-sypervised learning\n parser.add_argument(\"--sigma\", type=float, help=\"Hyper-parameter for RBF kernel in self-supervised loss.\", default=5)\n\n # Transfer Learning\n parser.add_argument(\"--pretrained_path\", type=str)\n parser.add_argument(\"--load_optimizer\", action=\"store_true\", help=\"If <pretrained_path> is set, loads also the \"\n \"optimizer's weigth\")\n\n # This code can be executed on CPU or GPU\n parser.add_argument(\"--cuda\", type=bool, default=True, help=\"If True, executes the code on GPU\")\n\n # Kind of tests\n parser.add_argument(\"--train\", action=\"store_true\")\n parser.add_argument(\"--test\", action=\"store_true\")\n\n args = parser.parse_args()\n\n if not torch.cuda.is_available():\n args.cuda = False\n logger.warning(\"cuda is not available and has been disabled.\")\n\n if args.manual_seed:\n torch.manual_seed(args.manual_seed)\n\n if not args.train and not args.test:\n args.train = True\n logger.info(\"No mode specify: training mode is set automatically\")\n\n if args.train:\n trainer = BaseTrainer(args)\n trainer.run()\n # do not consider the pretrained path anymore since it will be eventually computed automatically\n args.pretrained_path = None\n\n if args.test:\n tester = OpenBHBTester(args)\n tester.run()\n\n\n\n\n\n" ]
[ [ "torch.manual_seed", "torch.cuda.is_available" ] ]
inacioMattos/DeepLearning-Cachorros-e-Gatos
[ "a1eb42308f820809b7239cca6e81c4e880f5f540" ]
[ "src/backend/model.py" ]
[ "import tensorflow as tf\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\nfrom tensorflow.keras.callbacks import TensorBoard\nimport pickle, os, time\n\n\nDATADIR=\"data/\"\nNAME=\"cachorros-gatos-cnn-128-128-128-{}\".format(int(time.time()))\n\ntensorboard = TensorBoard(log_dir=\"logs/{}\".format(NAME))\n\n\ngpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.4)\nsess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n\n\ndef getData():\n\tX = pickle.load(open(DATADIR + \"X.pickle\", \"rb\"))\n\ty = pickle.load(open(DATADIR + \"y.pickle\", \"rb\"))\n\n\treturn X, y\n\n\ndef normalizeData(X):\n\treturn X/255.0\t# já que numa imagem o valor máximo é 255 para cada pixels, é só dividir por 255.\n\n\ndef saveModel(model):\n\tmodel.save(\"128-128-128-CNN-noDense.model\")\n\n\ndef trainModel(model, training_set):\n\tX, y = training_set\n\n\tmodel.compile(loss=\"binary_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\n\tmodel.fit(X, y, batch_size=32, validation_split=0.1, epochs=7, callbacks=[tensorboard])\n\treturn model\n\n\ndef createModel(X):\n\tmodel = Sequential()\n\n\tmodel.add(Conv2D(128, (3,3), input_shape=X.shape[1:]))\n\tmodel.add(Activation(\"relu\"))\n\tmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\n\tmodel.add(Conv2D(128, (4,4)))\n\tmodel.add(Activation(\"relu\"))\n\tmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\n\tmodel.add(Conv2D(128, (3,3)))\n\tmodel.add(Activation(\"relu\"))\n\tmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\n\tmodel.add(Flatten())\n\n\tmodel.add(Dense(1))\n\tmodel.add(Activation(\"sigmoid\"))\n\n\treturn model\n\n\ndef main():\n\tX, y = getData()\n\tX = normalizeData(X)\n\tmodel = createModel(X)\n\tmodel = trainModel(model, (X, y))\n\t#saveModel(model)\n\n\n\nmain()" ]
[ [ "tensorflow.keras.models.Sequential", "tensorflow.keras.layers.Flatten", "tensorflow.keras.layers.MaxPooling2D", "tensorflow.keras.layers.Activation", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Conv2D", "tensorflow.GPUOptions", "tensorflow.ConfigProto" ] ]
vivivibo/pipeline
[ "2a24660ca4b53b51bde3daedde80d8489bdeb37c" ]
[ "af/analysis/analysis/analysis.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"ooni-pipeline: * -> Analysis\n\nConfigured with /etc/analysis.conf\n\nRuns as a system daemon but can also be used from command line in devel mode\n\nCreates and updates unlogged tables.\nShows confirmed correlated by country, ASN, input URL over time.\n\nInputs: Database tables:\n countries\n\nOutputs:\n Files in /var/lib/analysis\n Dedicated unlogged database tables and charts\n tables:\n\n\n\"\"\"\n\n# Compatible with Python3.7 - linted with Black\n\n# TODO:\n# Enable unused code\n# Switch print() to logging\n# Overall datapoints count per country per day\n# Add ASN to confirmed_stats and use one table only if performance is\n# acceptable.\n# Move slicing and manipulation entirely in Pandas and drop complex SQL queries\n# Support feeder.py for continuous ingestion\n# Implement a crude precision metric based on msm_count and time window\n\nfrom argparse import ArgumentParser, Namespace\nfrom configparser import ConfigParser\nfrom contextlib import contextmanager\nfrom datetime import datetime, timedelta\nfrom pathlib import Path\nfrom urllib.parse import urlencode\nimport os\nimport time\nimport logging\nimport sys\n\nfrom analysis import backup_to_s3\n\ntry:\n from systemd.journal import JournalHandler # debdeps: python3-systemd\n import sdnotify # debdeps: python3-sdnotify\n\n has_systemd = True\nexcept ImportError:\n # this will be the case on macOS for example\n has_systemd = False\n\nfrom bottle import template # debdeps: python3-bottle\nfrom sqlalchemy import create_engine # debdeps: python3-sqlalchemy-ext\n\n# TODO: move pandas / seaborn related stuff in a dedicated script\n#import pandas as pd # debdeps: python3-pandas python3-jinja2\n#import prometheus_client as prom # debdeps: python3-prometheus-client\nimport psycopg2 # debdeps: python3-psycopg2\nfrom psycopg2.extras import RealDictCursor\n\nimport matplotlib # debdeps: python3-matplotlib\n\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\n#import seaborn as sns # debdeps: python3-seaborn\n\nfrom analysis.metrics import setup_metrics # debdeps: python3-statsd\n\nfrom analysis.citizenlab_test_lists_updater import update_citizenlab_test_lists\n\nfrom analysis.counters_table_updater import (\n update_all_counters_tables,\n update_tables_daily,\n)\n\n\n# Global conf\nconf = Namespace()\n\n# Global db connectors\ndbengine = None\nconn = None\n\nlog = logging.getLogger(\"analysis\")\nmetrics = setup_metrics(name=\"analysis\")\n\n\ndef setup_database_connection(c):\n return psycopg2.connect(\n dbname=c[\"dbname\"],\n user=c[\"dbuser\"],\n host=c[\"dbhost\"],\n password=c[\"dbpassword\"],\n port=c.get(\"dbport\", 5432),\n )\n\n\n@contextmanager\ndef database_connection(c):\n conn = setup_database_connection(c)\n try:\n yield conn\n finally:\n conn.close()\n\n\ndef setup_database_connections(c):\n conn = setup_database_connection(c)\n dbengine = create_engine(\"postgresql+psycopg2://\", creator=lambda: conn)\n return conn, dbengine\n\n\ndef gen_table(name, df, cmap=\"RdYlGn\"):\n \"\"\"Render dataframe into an HTML table and save it to file.\n Create a timestamped file <name>.<ts>.html and a symlink to it.\n \"\"\"\n if cmap is None:\n tb = df.style\n else:\n tb = df.style.background_gradient(cmap=cmap)\n # df.style.bar(subset=['A', 'B'], align='mid', color=['#d65f5f', '#5fba7d'])\n html = tb.render()\n ts = datetime.utcnow().strftime(\"%Y%m%d-%H%M\")\n outf = conf.output_directory / f\"{name}.{ts}.html\"\n log.info(f\"Rendering to {outf}\")\n with outf.open(\"w\") as f:\n f.write(html)\n\n symlink = conf.output_directory / f\"{name}.html\"\n try:\n symlink.unlink() # Atomic symlinking not supported\n except:\n pass\n symlink.symlink_to(f\"{name}.{ts}.html\") # (Absolute path not supported)\n\n\ndef save(name, plt):\n fn = os.path.join(conf.output_directory, name + \".png\")\n log.info(f\"Rendering to {fn}\")\n plt.get_figure().savefig(fn)\n\n\ndef gen_plot(name, df, *a, **kw):\n plt = df.plot(*a, **kw)\n save(name, plt)\n\n\ndef heatmap(name, *a, **kw):\n fn = os.path.join(conf.output_directory, name + \".png\")\n log.info(f\"Rendering to {fn}\")\n h = sns.heatmap(*a, **kw)\n h.get_figure().savefig(fn)\n\n\ndef insert_into(tablename, q):\n assert tablename in (\"confirmed_stats\", \"confirmed_stats_asn\")\n # TODO: autoreconnect\n with metrics.timer(\"insert_into.\" + tablename):\n dbengine.execute(q)\n\n\ndef query(q):\n # TODO: add a label to generate metrics\n log.info(\" \".join(q.replace(\"\\n\", \" \").split())[:300], \"...\")\n # TODO: autoreconnect\n with metrics.timer(\"query.unnamed\"):\n r = pd.read_sql_query(q, conn)\n return r\n\n\[email protected](\"populate_countries\")\ndef populate_countries():\n ## Used only once to create a persistent list of countries\n dbengine.execute(\n \"\"\"\n CREATE UNLOGGED TABLE countries (\n probe_cc CHARACTER(2) NOT NULL,\n msm_count BIGINT NOT NULL\n );\n CREATE INDEX ON countries (msm_count);\n \"\"\"\n )\n insert_into(\n \"countries\",\n \"\"\"\n INSERT INTO countries\n SELECT\n probe_cc as country,\n COUNT(*) as msm_count\n FROM measurement\n JOIN report ON report.report_no = measurement.report_no\n WHERE measurement_start_time >= current_date - interval '5 day'\n AND measurement_start_time < current_date - interval '1 day'\n GROUP BY\n country\n \"\"\",\n )\n\n\[email protected](\"append_confirmed_stats\")\ndef append_confirmed_stats():\n ## Append confirmed_stats daily\n log.info(\"Updating confirmed_stats\")\n dbengine.execute(\n \"\"\"\n CREATE UNLOGGED TABLE IF NOT EXISTS confirmed_stats (\n day TIMESTAMP NOT NULL,\n probe_cc CHARACTER(2) NOT NULL,\n target TEXT,\n msm_count BIGINT NOT NULL,\n confirmed_count BIGINT NOT NULL,\n CONSTRAINT confirmed_stats_day_cc_target_u UNIQUE (day, probe_cc, target)\n ) ;\n CREATE INDEX ON confirmed_stats (day);\n \"\"\"\n )\n insert_into(\n \"confirmed_stats\",\n \"\"\"\n INSERT INTO confirmed_stats\n SELECT\n date_trunc('day', measurement_start_time) as day,\n probe_cc,\n concat(test_name, '::', input) as target,\n COUNT(*) as msm_count,\n COALESCE(SUM(CASE WHEN confirmed = TRUE THEN 1 ELSE 0 END), 0) as confirmed_count\n FROM measurement\n JOIN input ON input.input_no = measurement.input_no\n JOIN report ON report.report_no = measurement.report_no\n JOIN autoclaved ON autoclaved.autoclaved_no = report.autoclaved_no\n WHERE measurement_start_time < current_date - interval '1 day'\n AND measurement_start_time >= current_date - interval '2 day'\n GROUP BY\n day,\n probe_cc,\n target\n ON CONFLICT DO NOTHING\n \"\"\",\n )\n\n\[email protected](\"append_confirmed_stats_asn\")\ndef append_confirmed_stats_asn():\n ## Append confirmed_stats_asn daily\n log.info(\"Updating confirmed_stats_asn\")\n dbengine.execute(\n \"\"\"\n CREATE UNLOGGED TABLE IF NOT EXISTS confirmed_stats_asn (\n day TIMESTAMP NOT NULL,\n probe_cc CHARACTER(2) NOT NULL,\n probe_asn INTEGER NOT NULL,\n target TEXT,\n msm_count BIGINT NOT NULL,\n confirmed_count BIGINT NOT NULL,\n CONSTRAINT confirmed_stats_asn_day_cc_asn_target_u UNIQUE (day, probe_cc, probe_asn, target)\n ) ;\n CREATE INDEX ON confirmed_stats (day);\n \"\"\"\n )\n insert_into(\n \"confirmed_stats_asn\",\n \"\"\"\n INSERT INTO confirmed_stats_asn\n SELECT\n date_trunc('day', measurement_start_time) as day,\n probe_cc,\n probe_asn,\n concat(test_name, '::', input) as target,\n COUNT(*) as msm_count,\n COALESCE(SUM(CASE WHEN confirmed = TRUE THEN 1 ELSE 0 END), 0) as confirmed_count\n FROM measurement\n JOIN input ON input.input_no = measurement.input_no\n JOIN report ON report.report_no = measurement.report_no\n JOIN autoclaved ON autoclaved.autoclaved_no = report.autoclaved_no\n WHERE measurement_start_time < current_date - interval '1 day'\n AND measurement_start_time >= current_date - interval '2 day'\n GROUP BY\n day,\n probe_cc,\n probe_asn,\n target\n ON CONFLICT DO NOTHING\n \"\"\",\n )\n\n\ndef blocked_sites_per_country_per_week_heatmap():\n # Ratio of blocked sites per country per week\n q = query(\n \"\"\"\n SELECT\n date_trunc('week', test_day) as week,\n probe_cc as country,\n SUM(confirmed_count)::decimal / SUM(msm_count) as ratio\n FROM ooexpl_wc_confirmed\n WHERE test_day > current_date - interval '1 day' - interval '10 week'\n AND test_day < current_date - interval '1 day'\n GROUP BY\n probe_cc, week\n ;\n \"\"\"\n )\n x = q.pivot_table(index=\"week\", columns=\"country\", values=\"ratio\")\n plt.figure(figsize=(26, 6))\n heatmap(\"block_ratio\", x, cmap=\"Blues\")\n\n\ndef input_per_day_per_country_density_heatmap():\n # Measure input-per-day-per-country datapoint density\n q = query(\n \"\"\"\n SELECT\n date_trunc('week', test_day) as week,\n probe_cc as country,\n SUM(msm_count) as count\n FROM ooexpl_wc_confirmed\n WHERE test_day > current_date - interval '1 day' - interval '10 week'\n AND test_day < current_date - interval '1 day'\n GROUP BY\n week, country\n ;\n \"\"\"\n )\n p = q.pivot_table(index=\"week\", columns=\"country\", values=\"count\")\n heatmap(\"input_per_day_per_country_density\", p)\n\n\ndef msm_count_per_week_high_countries_gentable():\n pop = query(\n \"\"\"\n SELECT\n probe_cc as country,\n date_trunc('week', test_day) as week,\n SUM(msm_count) as cnt\n FROM msm_count_by_day_by_country\n WHERE test_day >= current_date - interval '1 day' - interval '6 week'\n AND test_day < current_date - interval '1 day'\n AND msm_count > 2000\n GROUP BY\n week,\n probe_cc\n ;\n \"\"\"\n )\n p2 = pop.pivot_table(index=\"country\", columns=\"week\", values=\"cnt\").fillna(0)\n gen_table(\"msm_count_per_week_high_countries\", p2)\n\n\ndef msm_count_per_week_high_countries_gentable2():\n # Number of datapoints per week in popular countries\n q = query(\n \"\"\"\n SELECT\n probe_cc as country,\n date_trunc('week', test_day) as week,\n SUM(msm_count) as cnt\n FROM msm_count_by_day_by_country\n WHERE test_day >= current_date - interval '1 day' - interval '6 week'\n AND test_day < current_date - interval '1 day'\n AND probe_cc IN (\n SELECT probe_cc\n FROM msm_count_by_day_by_country\n WHERE test_day >= current_date - interval '1 day' - interval '3 weeks'\n AND test_day < current_date - interval '1 day'\n GROUP BY probe_cc\n ORDER BY SUM(msm_count) DESC\n LIMIT 20\n )\n GROUP BY\n week,\n country\n ;\n \"\"\"\n )\n p = q.pivot_table(index=\"country\", columns=\"week\", values=\"cnt\").fillna(0)\n\n gen_table(\"msm_count_per_week_high_countries\", p)\n\n\ndef msm_count_per_month_high_countries():\n # Number of datapoints per day in popular countries\n q = query(\n \"\"\"\n SELECT\n probe_cc as country,\n test_day,\n SUM(msm_count) as cnt\n FROM msm_count_by_day_by_country\n WHERE test_day >= current_date - interval \\'1 day\\' - interval \\'3 week\\'\n AND test_day < current_date - interval \\'1 day\\'\n AND probe_cc IN (\n SELECT probe_cc\n FROM countries\n WHERE msm_count > 2000\n ORDER BY msm_count DESC\n )\n GROUP BY\n test_day,\n country\n ;\n \"\"\"\n )\n p = q.pivot_table(index=\"country\", columns=\"test_day\", values=\"cnt\").fillna(0)\n\n gen_table(\"msm_count_per_month_high_countries\", p)\n\n\ndef msm_count_per_month_low_countries():\n # Number of datapoints over the last month in countries with few probes\n q = query(\n \"\"\"\n SELECT probe_cc as country,\n SUM(msm_count) as cnt\n FROM msm_count_by_day_by_country\n WHERE test_day >= current_date - interval \\'1 day\\' - interval \\'1 months\\'\n AND test_day < current_date - interval \\'1 day\\'\n GROUP BY probe_cc\n ORDER BY cnt\n LIMIT 80\n \"\"\"\n )\n p = q.pivot_table(index=\"country\", values=\"cnt\").fillna(0)\n\n gen_table(\"msm_count_per_month_low_countries\", p)\n\n\ndef coverage_variance():\n ## Variance of number of datapoints over countries: high values mean unequal coverage\n q = query(\n \"\"\"\n SELECT\n probe_cc as country,\n test_day,\n msm_count as cnt\n FROM msm_count_by_day_by_country\n WHERE test_day >= current_date - interval '1 day' - interval '6 week'\n AND test_day < current_date - interval '1 day'\n ;\n \"\"\"\n )\n\n # pivot and fill NaN before calculating variance\n p = q.pivot_table(index=\"country\", columns=\"test_day\", values=\"cnt\")\n p = p.fillna(0)\n relvar = p.std() / p.mean()\n\n plt.figure(1)\n plt.subplot(311)\n plt.plot(p.sum())\n plt.subplot(312)\n plt.plot(relvar)\n plt.subplot(313)\n plt.plot(p.var())\n fig = plt.gcf()\n fig.savefig(\"output/msm_count_and_variance_over_countries.png\")\n\n ## Total number of datapoints and variance across countries per day\n\n\n# @metrics.timer(\"summarize_core_density\")\n# def summarize_core_density_UNUSED():\n# ## Core density\n# ## Measure coverage of citizenlab inputs on well-monitored countries\n# core = query(\n# \"\"\"\n# SELECT\n# date_trunc('day', measurement_start_time) as day,\n# probe_cc,\n# concat(test_name, '::', input) as target,\n# COUNT(*) as msm_count\n# FROM measurement\n# JOIN report ON report.report_no = measurement.report_no\n# JOIN input ON input.input_no = measurement.input_no\n# WHERE measurement_start_time >= current_date - interval '2 days'\n# AND measurement_start_time < current_date - interval '1 days'\n# AND (test_name, input) IN (\n# SELECT\n# test_name,\n# input\n# FROM interesting_inputs\n# )\n# AND probe_cc IN (\n# SELECT\n# probe_cc\n# FROM\n# countries\n# WHERE\n# msm_count > 2000\n# )\n# GROUP BY\n# probe_cc,\n# day,\n# target\n# \"\"\"\n# )\n#\n# day_slice = core.pivot_table(\n# index=\"probe_cc\", columns=\"target\", values=\"msm_count\", fill_value=0\n# )\n#\n# log.info(\"Countries: \", day_slice.shape[0], \"Targets:\", day_slice.shape[1])\n# metrics.gauge(\"countries_with_high_msm_count_1_day\", day_slice.shape[0])\n# metrics.gauge(\"targets_high_msm_countries_1_day\", day_slice.shape[1])\n#\n# area = day_slice.shape[0] * day_slice.shape[1]\n# log.info(\"Slice area:\", area)\n#\n# c1 = core[\"target\"].count() / area\n# log.info(\"Coverage-1: cells with at least one datapoint\", c1)\n# metrics.gauge(\"coverage_1_day_1dp\", c1)\n#\n# c5 = core[core[\"msm_count\"] > 5][\"target\"].count() / area\n# log.info(\"Coverage-5: cells with at least 5 datapoints\", c5)\n# metrics.gauge(\"coverage_1_day_5dp\", c1)\n\n\[email protected](\"plot_msmt_count_per_platform_over_time\")\ndef plot_msmt_count_per_platform_over_time(conn):\n log.info(\"COV: plot_msmt_count_per_platform_over_time\")\n sql = \"\"\"\n SELECT date_trunc('day', measurement_start_time) AS day, platform, COUNT(*) AS msm_count\n FROM fastpath\n WHERE measurement_start_time >= CURRENT_DATE - interval '60 days'\n AND measurement_start_time < CURRENT_DATE\n GROUP BY day, platform\n ORDER BY day, platform;\n \"\"\"\n q = pd.read_sql_query(sql, conn)\n p = q.pivot_table(index=\"day\", columns=\"platform\", values=\"msm_count\", fill_value=0)\n gen_plot(\"msmt_count_per_platform_over_time\", p)\n\n\[email protected](\"plot_coverage_per_platform\")\ndef plot_coverage_per_platform(conn):\n \"\"\"Measure how much each platform contributes to measurements\"\"\"\n log.info(\"COV: plot_coverage_per_platform\")\n # Consider only inputs that are listed on citizenlab\n sql = \"SELECT UPPER(cc), COUNT(*) from citizenlab GROUP BY cc\"\n with conn.cursor() as cur:\n cur.execute(sql)\n baseline = dict(cur.fetchall()) # CC -> count\n zz_cnt = baseline.pop(\"ZZ\")\n for cc in baseline:\n baseline[cc] += zz_cnt\n\n baseline[\"ZZ\"] = zz_cnt # put back the initial value\n\n # The inner query returns *one* line for each (platform, probe_cc, input)\n # that has 1 or more msmt. If an input is tested more than once in the time\n # period in a given CC we treat it as 1.\n sql = \"\"\"\n SELECT platform, probe_cc, count(*)\n FROM (\n SELECT platform, probe_cc\n FROM fastpath\n WHERE (\n (probe_cc, input) IN (SELECT UPPER(cc), url FROM citizenlab)\n OR\n input IN (SELECT url FROM citizenlab WHERE cc = 'ZZ')\n )\n AND measurement_start_time > NOW() - interval '1 days'\n AND measurement_start_time < NOW()\n AND test_name = 'web_connectivity'\n AND input IS NOT null\n GROUP BY probe_cc, input, platform\n ORDER BY probe_cc, platform) sq\n GROUP BY sq.platform, sq.probe_cc\n ORDER BY sq.probe_cc, sq.platform;\n \"\"\"\n with conn.cursor() as cur:\n cur.execute(sql)\n x = []\n for platform, probe_cc, count in cur:\n if probe_cc not in baseline:\n continue\n x.append((platform, probe_cc, count / baseline[probe_cc]))\n\n cov = pd.DataFrame(x, columns=[\"platform\", \"probe_cc\", \"ratio\"])\n cov = cov.pivot_table(\n index=\"probe_cc\", columns=\"platform\", values=\"ratio\", fill_value=0\n )\n pd.set_option(\"display.precision\", 1)\n gen_table(\"coverage_per_platform\", cov)\n\n\ndef coverage_generator(conf):\n \"\"\"Generate statistics on coverage\"\"\"\n log.info(\"COV: Started monitor_measurement_creation thread\")\n while True:\n try:\n conn, dbengine = setup_database_connections(conf.standby)\n except Exception as e:\n log.error(e, exc_info=True)\n time.sleep(30)\n continue\n\n try:\n plot_coverage_per_platform(conn)\n plot_msmt_count_per_platform_over_time(conn)\n log.info(\"COV: done. Sleeping\")\n\n except Exception as e:\n log.error(e, exc_info=True)\n\n finally:\n conn.close()\n\n time.sleep(3600 * 24)\n\n\ndef summarize_total_density_UNUSED():\n ## Total density\n ## Measure coverage of interesting_inputs on well-monitored countries\n core = query(\n \"\"\"\n SELECT\n day,\n probe_cc,\n target,\n msm_count\n FROM msm_count_core\n WHERE day >= current_date - interval \\'3 days\\'\n AND day < current_date - interval \\'2 days\\'\n ;\n \"\"\"\n )\n\n day_slice = core.pivot_table(\n index=\"probe_cc\", columns=\"target\", values=\"msm_count\", fill_value=0\n )\n log.info(\"Countries: \", day_slice.shape[0], \"Targets:\", day_slice.shape[1])\n metrics.gauge(\"\")\n metrics.gauge(\"\")\n area = day_slice.shape[0] * day_slice.shape[1]\n log.info(\"Slice area:\", area)\n metrics.gauge(\"\")\n c1 = core[\"target\"].count() / area\n log.info(\"Coverage-1: cells with at least one datapoint\", c1)\n metrics.gauge(\"\")\n c5 = core[core[\"msm_count\"] > 5][\"target\"].count() / area\n log.info(\"Coverage-5: cells with at least 5 datapoints\", c5)\n metrics.gauge(\"\")\n\n ## Another attempt at visualizing confirmed_states\n q = query(\n \"\"\"\n SELECT probe_cc, target, msm_count, confirmed_count FROM confirmed_stats\n WHERE day >= current_date - interval '8 day'\n AND day < current_date - interval '1 day'\n AND probe_cc IN (\n SELECT\n probe_cc\n FROM\n countries\n WHERE\n msm_count > 1000\n )\n AND target IN (\n SELECT\n concat(test_name, '::', input) as target\n FROM interesting_inputs\n WHERE interesting_inputs.weight > 80\n )\n ;\n \"\"\"\n )\n\n msm = q.pivot_table(\n index=\"target\", columns=\"probe_cc\", values=\"msm_count\", fill_value=0\n )\n # sort targets\n msm.sort_values(ascending=False, inplace=True, by=\"RU\")\n\n # sort countries\n msm.sort_values(\n ascending=False,\n inplace=True,\n by=\"web_connectivity::https://www.ndi.org/\",\n axis=1,\n )\n\n heatmap(\n \"core_density\",\n msm,\n cbar=False,\n annot=False,\n cmap=\"RdYlGn\",\n xticklabels=True,\n yticklabels=False,\n vmax=10.0,\n )\n\n\[email protected](\"measure_blocking_globally\")\ndef measure_blocking_globally():\n ## Extract per-country blacking over time\n q = query(\n \"\"\"\n SELECT\n date_trunc('week', day),\n probe_cc,\n SUM(msm_count) as msm_count,\n SUM(confirmed_count) as confirmed_count,\n SUM(confirmed_count) / SUM(msm_count) as block_ratio\n FROM confirmed_stats\n WHERE day >= current_date - interval '1 day' - interval '6 week'\n AND day < current_date - interval '1 day'\n AND target IN (\n SELECT\n concat(test_name, '::', input) as target\n FROM interesting_inputs\n WHERE interesting_inputs.weight > 80\n )\n GROUP BY\n day,\n probe_cc\n \"\"\"\n )\n obt = q[q[\"block_ratio\"] > 0.000001]\n oc = obt.pivot_table(\n index=\"date_trunc\", columns=\"probe_cc\", values=\"block_ratio\", fill_value=0\n )\n gen_plot(\"blocked_vs_nonblocked_by_country\", oc)\n\n\ndef create_currently_blocked_table_if_needed():\n q = \"\"\"\n CREATE UNLOGGED TABLE IF NOT EXISTS currently_blocked (\n analysis_date timestamp without time zone NOT NULL,\n probe_cc CHARACTER(2) NOT NULL,\n probe_asn integer,\n target TEXT NOT NULL,\n description TEXT NOT NULL\n ) ;\n \"\"\"\n dbengine.execute(q)\n\n\[email protected](\"detect_blocking_granularity_cc_target\")\ndef detect_blocking_granularity_cc_target(\n msm_count_threshold, block_ratio_threshold, interval=\"1 day\"\n):\n q = query(\n \"\"\"\n SELECT\n probe_cc,\n target,\n SUM(msm_count) as msm_count,\n SUM(confirmed_count) as confirmed_count,\n SUM(confirmed_count) / SUM(msm_count) as block_ratio\n FROM confirmed_stats\n WHERE day >= current_date - interval '1 day' - interval '{}'\n AND day < current_date - interval '1 day'\n AND probe_cc IN (\n SELECT\n probe_cc\n FROM\n countries\n WHERE\n msm_count > 1000\n )\n AND target IN (\n SELECT\n concat(test_name, '::', input) as target\n FROM interesting_inputs\n WHERE interesting_inputs.weight > 80\n )\n GROUP BY\n probe_cc,\n target\n \"\"\".format(\n interval\n )\n )\n r = q[\n (q[\"msm_count\"] > msm_count_threshold)\n & (q[\"block_ratio\"] > block_ratio_threshold)\n ]\n r[\"description\"] = \"by_cc_t\"\n return r\n\n\[email protected](\"detect_blocking_granularity_cc_asn_target\")\ndef detect_blocking_granularity_cc_asn_target(\n msm_count_threshold, block_ratio_threshold, interval=\"1 day\"\n):\n q = query(\n \"\"\"\n SELECT\n probe_cc,\n probe_asn,\n target,\n SUM(msm_count) as msm_count,\n SUM(confirmed_count) as confirmed_count_with_asn,\n SUM(confirmed_count) / SUM(msm_count) as block_ratio\n FROM confirmed_stats_asn\n WHERE day >= current_date - interval '1 day' - interval '{}'\n AND day < current_date - interval '1 day'\n AND probe_cc IN (\n SELECT\n probe_cc\n FROM\n countries\n WHERE\n msm_count > 1000\n )\n AND target IN (\n SELECT\n concat(test_name, '::', input) as target\n FROM interesting_inputs\n WHERE interesting_inputs.weight > 80\n )\n GROUP BY\n probe_cc,\n probe_asn,\n target\n \"\"\".format(\n interval\n )\n )\n r = q[\n (q[\"msm_count\"] > msm_count_threshold)\n & (q[\"block_ratio\"] > block_ratio_threshold)\n ]\n r[\"description\"] = \"by_cc_asn_t\"\n return r\n\n\[email protected](\"detect_blocking_granularity_cc\")\ndef detect_blocking_granularity_cc(\n msm_count_threshold, block_ratio_threshold, interval=\"1 day\"\n):\n ## Overall, per-country blocking ratio\n ## Useful to detect if a country suddenly starts blocking many targets\n q = query(\n \"\"\"\n SELECT\n probe_cc,\n SUM(msm_count) as msm_count,\n SUM(confirmed_count) as confirmed_count_with_asn,\n SUM(confirmed_count) / SUM(msm_count) as block_ratio\n FROM confirmed_stats_asn\n WHERE day >= current_date - interval '1 day' - interval '{}'\n AND day < current_date - interval '1 day'\n AND probe_cc IN (\n SELECT\n probe_cc\n FROM\n countries\n WHERE\n msm_count > 1000\n )\n AND target IN (\n SELECT\n concat(test_name, '::', input) as target\n FROM interesting_inputs\n WHERE interesting_inputs.weight > 80\n )\n GROUP BY\n probe_cc\n \"\"\".format(\n interval\n )\n )\n r = q[\n (q[\"msm_count\"] > msm_count_threshold)\n & (q[\"block_ratio\"] > block_ratio_threshold)\n ]\n r[\"description\"] = \"by_cc\"\n return r\n\n\[email protected](\"detect_blocking\")\ndef detect_blocking():\n ## Detect blocking by slicing the target/CC/ASN/time cubes.\n ## Slicing is done multiple times with decreasing granularity:\n ## - target + CC + ASN\n ## - target + CC\n ## - CC\n ## Also the slicing is done over different time ranges:\n ## Short time: detect blocking quickly in countries with high msm_count\n ## Long time: detect blocking in countries with low msm_count\n\n ## Extract country-target time cylinders with enough datapoints to do reliable detection\n ## The thresold is controlled by the time interval and the total msm_count\n ## This allows adaptive detection over different sampling frequencies using multiple time windows\n\n # TODO:\n # - avoid caching tables, do everything in Pandas\n # - implement optional continuous run to prevent recreating the Cube\n\n # config params\n msm_count_threshold = 8\n block_ratio_threshold = 0.3\n # TODO: use different thresholds for different granularities\n # TODO: add tunable filtering by country weight and interesting_inputs\n\n # Detect by CC, ASN and target\n cc_asn_t_1d = detect_blocking_granularity_cc_asn_target(\n msm_count_threshold, block_ratio_threshold, interval=\"1 day\"\n )\n metrics.gauge(\"cc_asn_t_1d_count\", len(cc_asn_t_1d.index))\n\n # Detect by CC and target\n cc_t_1d = detect_blocking_granularity_cc_target(\n msm_count_threshold, block_ratio_threshold, interval=\"1 day\"\n )\n metrics.gauge(\"cc_t_1d_count\", len(cc_t_1d.index))\n cc_t_2w = detect_blocking_granularity_cc_target(\n msm_count_threshold, block_ratio_threshold, interval=\"2 weeks\"\n )\n metrics.gauge(\"cc_t_2w_count\", len(cc_t_2w.index))\n\n # Detect by CC only. Very low granularity but allows spotting very large\n # blocking events in low-coverage countries\n cc_1d = detect_blocking_granularity_cc(\n msm_count_threshold, block_ratio_threshold, interval=\"1 day\"\n )\n metrics.gauge(\"cc_1d_count\", len(cc_1d.index))\n\n # Create df of blocking events\n blocked = pd.concat((cc_asn_t_1d, cc_t_1d, cc_t_2w, cc_1d), sort=False)\n cols = [\"probe_cc\", \"probe_asn\", \"target\", \"description\"]\n blocked = blocked[cols]\n metrics.gauge(\"currently_blocked\", len(blocked.index))\n\n log.info(\"currently_blocked\", len(blocked.index))\n with metrics.timer(\"write_blocked_now\"):\n blocked.to_sql(\"blocked\", con=dbengine, if_exists=\"replace\")\n\n\ndef parse_args():\n ap = ArgumentParser(\"Analysis script \" + __doc__)\n ap.add_argument(\n \"--update-counters\", action=\"store_true\", help=\"Update counters table\"\n )\n ap.add_argument(\n \"--update-citizenlab\", action=\"store_true\", help=\"Update citizenlab test lists\"\n )\n ap.add_argument(\n \"--update-tables-daily\", action=\"store_true\", help=\"Run daily update\"\n )\n ap.add_argument(\n \"--dry-run\", action=\"store_true\", help=\"Dry run, supported only by some commands\"\n )\n ap.add_argument(\n \"--backup-db\", action=\"store_true\", help=\"Backup DB to S3\"\n )\n # ap.add_argument(\"--\", action=\"store_true\", help=\"\")\n ap.add_argument(\"--devel\", action=\"store_true\", help=\"Devel mode\")\n ap.add_argument(\"--stdout\", action=\"store_true\", help=\"Log to stdout\")\n return ap.parse_args()\n\n\ndef to_html(c):\n return f\"\"\"<html>\n <head>\n <link rel=\"stylesheet\" href=\"https://cdnjs.cloudflare.com/ajax/libs/milligram/1.3.0/milligram.css\">\n </head>\n <body>\n {c}\n </body>\n </html>\n \"\"\"\n\n\ndef to_table(colnames, rowdicts) -> str:\n tpl = \"\"\"\n <table>\n <tr>\n % for c in colnames:\n <th>{{c}}</th>\n % end\n </tr>\n % for d in rowdicts:\n <tr>\n % for c in colnames:\n <td>{{!d[c]}}</td>\n % end\n </tr>\n % end\n </table>\n \"\"\"\n return template(tpl, colnames=colnames, rowdicts=rowdicts)\n\n\ndef gen_prometheus_url(expr, range_input=\"12h\"):\n \"\"\"Generate URL to point to a metric in Prometheus\"\"\"\n baseurl = \"https://mon.ooni.nu/prometheus/graph?\"\n return baseurl + urlencode(\n {\"g0.range_input\": \"12h\", \"g0.expr\": expr, \"g0.tab\": \"0\"}\n )\n\n\ndef html_anchor(url, text):\n return f\"\"\"<a href=\"{url}\">{text}</a>\"\"\"\n\n\[email protected](\"generate_slow_query_summary\")\ndef generate_slow_query_summary(conf):\n \"\"\"Generate HTML pages with a summary of heavy queries\n for active and standby database a bit like the \"top\" command.\n Send metrics to node exporter / Prometheus\n Show links to related charts\n \"\"\"\n sql = \"\"\"\n SELECT\n calls,\n mean_time / 1000 AS mean_s,\n round(total_time / 1000) AS total_seconds,\n queryid,\n query\n FROM\n pg_stat_statements\n ORDER BY\n total_time DESC\n LIMIT 16;\n \"\"\"\n prom_reg = prom.CollectorRegistry()\n total_query_time_g = prom.Gauge(\n \"db_total_query_time\",\n \"DB cumulative query time\",\n labelnames=[\"db_role\", \"queryid\"],\n registry=prom_reg,\n )\n calls_cnt = prom.Gauge(\n \"db_total_query_count\",\n \"DB cumulative query count\",\n labelnames=[\"db_role\", \"queryid\"],\n registry=prom_reg,\n )\n # Monitoring URL creation\n expr_tpl = \"\"\"delta(%s{db_role=\"%s\",queryid=\"%s\"}[1h])\"\"\"\n\n for role in (\"active\", \"standby\"):\n log.info(\"Main: Connecting\")\n conn, dbengine = setup_database_connections(getattr(conf, role))\n log.debug(\"Main: Connected, running query\")\n rows = dbengine.execute(sql)\n rows = [dict(r) for r in rows]\n for r in rows:\n queryid = r[\"queryid\"]\n total_query_time_g.labels(role, queryid).set(r[\"total_seconds\"])\n calls_cnt.labels(role, queryid).set(r[\"calls\"])\n expr = expr_tpl % (\"db_total_query_time\", role, queryid)\n url = gen_prometheus_url(expr)\n r[\"total_seconds\"] = html_anchor(url, r[\"total_seconds\"])\n\n expr = expr_tpl % (\"db_total_query_count\", role, queryid)\n url = gen_prometheus_url(expr)\n r[\"calls\"] = html_anchor(url, r[\"calls\"])\n r[\"mean_s\"] = \"%.3f\" % r[\"mean_s\"]\n\n colnames = [\"queryid\", \"calls\", \"mean_s\", \"total_seconds\", \"query\"]\n tbl = to_table(colnames, rows)\n html = to_html(tbl)\n\n fi = conf.output_directory / f\"db_slow_queries_{role}.html\"\n log.info(\"Main: Writing %s\", fi)\n fi.write_text(html)\n conn.close()\n\n log.info(\"Main: Writing metrics to node exporter\")\n prom.write_to_textfile(node_exporter_path, prom_reg)\n\n\ndef _generate_stat_activity_gauge(stat_activity_gauge, conn, db_role: str) -> None:\n \"\"\"Gather pg_stat_activity counts\"\"\"\n stat_activity_sql = \"\"\"SELECT state, usename, count(*)\n FROM pg_stat_activity GROUP BY state, usename\"\"\"\n with conn.cursor() as cur:\n # columns: state, usename, count\n cur.execute(stat_activity_sql)\n for r in cur:\n m = stat_activity_gauge.labels(db_role=db_role, state=r[0], usename=r[1])\n m.set(r[2])\n\n\[email protected](\"monitor_measurement_creation\")\ndef monitor_measurement_creation(conf):\n \"\"\"Monitors measurements created by fastpath and traditional pipeline\n to detect and alert on inconsistency.\n Queries the fastpath and measurements DB tables and compare their rows\n across different time ranges and generates metrics for Prometheus.\n\n Runs in a dedicated thread and writes in its own .prom file\n\n This is the most important function, therefore it pings the SystemD watchdog\n \"\"\"\n log.info(\"MMC: Started monitor_measurement_creation thread\")\n # TODO: switch to OOID\n\n INTERVAL = 60 * 5\n if has_systemd:\n watchdog = sdnotify.SystemdNotifier()\n\n prom_reg = prom.CollectorRegistry()\n gauge_family = prom.Gauge(\n \"measurements_flow\",\n \"Measurements being created\",\n labelnames=[\"type\"],\n registry=prom_reg,\n )\n replication_deltas_gauge = prom.Gauge(\n \"replication_deltas\",\n \"Deltas between xlog values\",\n labelnames=[\"type\"],\n registry=prom_reg,\n )\n stat_activity_gauge = prom.Gauge(\n \"stat_activity_count\",\n \"Active queries counts\",\n labelnames=[\"db_role\", \"state\", \"usename\"],\n registry=prom_reg,\n )\n\n queries = dict(\n fastpath_count=\"\"\"SELECT COUNT(*)\n FROM fastpath\n WHERE measurement_start_time > %(since)s\n AND measurement_start_time <= %(until)s\n \"\"\",\n pipeline_count=\"\"\"SELECT COUNT(*)\n FROM measurement\n WHERE measurement_start_time > %(since)s\n AND measurement_start_time <= %(until)s\n \"\"\",\n pipeline_not_fastpath_count=\"\"\"SELECT COUNT(*)\n FROM measurement\n LEFT OUTER JOIN input ON input.input_no = measurement.input_no\n JOIN report ON report.report_no = measurement.report_no\n WHERE NOT EXISTS (\n SELECT\n FROM fastpath fp\n WHERE measurement_start_time > %(since_ext)s\n AND measurement_start_time <= %(until_ext)s\n AND fp.report_id = report.report_id\n AND fp.test_name = report.test_name\n AND COALESCE(fp.input, '') = COALESCE(input.input, '')\n )\n AND measurement_start_time > %(since)s\n AND measurement_start_time <= %(until)s\n \"\"\",\n fastpath_not_pipeline_count=\"\"\"SELECT COUNT(*)\n FROM fastpath fp\n WHERE NOT EXISTS (\n SELECT\n FROM measurement\n LEFT OUTER JOIN input ON input.input_no = measurement.input_no\n JOIN report ON report.report_no = measurement.report_no\n WHERE measurement_start_time > %(since_ext)s\n AND measurement_start_time <= %(until_ext)s\n AND fp.report_id = report.report_id\n AND fp.test_name = report.test_name\n AND COALESCE(fp.input, '') = COALESCE(input.input, '')\n )\n AND measurement_start_time > %(since)s\n AND measurement_start_time <= %(until)s\n \"\"\",\n )\n sql_replication_delay = \"SELECT now() - pg_last_xact_replay_timestamp()\"\n\n # test connection and notify systemd\n conn, _ = setup_database_connections(conf.standby)\n with conn.cursor() as cur:\n cur.execute(\"SELECT 1\")\n conn.close()\n if has_systemd:\n watchdog.notify(\"READY=1\")\n\n cycle_seconds = 0\n\n while True:\n if has_systemd:\n watchdog.notify(\"WATCHDOG=1\")\n watchdog.notify(\"STATUS=Running\")\n\n try:\n # Clear gauges\n stat_activity_gauge._metrics.clear()\n\n log.info(\"MMC: Gathering fastpath count\")\n conn, dbengine = setup_database_connections(conf.standby)\n delta = timedelta(minutes=5)\n now = datetime.utcnow()\n since = now - delta\n with conn.cursor() as cur:\n sql = queries[\"fastpath_count\"]\n cur.execute(sql, dict(since=since, until=now))\n new_fp_msmt_count = cur.fetchone()[0]\n\n gauge_family.labels(\"fastpath_new_5m\").set(new_fp_msmt_count)\n\n log.info(\"MMC: Gathering database replica status\")\n with conn.cursor() as cur:\n cur.execute(sql_replication_delay)\n delay = cur.fetchone()[0].total_seconds()\n\n log.info(\"MMC: Summarizing pg_stat_activity on standby\")\n _generate_stat_activity_gauge(stat_activity_gauge, conn, \"standby\")\n\n log.info(\"MMC: Comparing active and standby xlog location\")\n with database_connection(conf.active) as active_conn:\n # This whole block runs against the active DB\n # Replication deltas\n log.info(\"MMC: Generating replication_deltas\")\n with active_conn.cursor(cursor_factory=RealDictCursor) as cur:\n # Thanks to\n # https://blog.dataegret.com/2017/04/deep-dive-into-postgres-stats.html\n sql = \"\"\"SELECT\n (pg_xlog_location_diff(pg_current_xlog_location(),sent_location) / 1024)::bigint as pending,\n (pg_xlog_location_diff(sent_location,write_location) / 1024)::bigint as write,\n (pg_xlog_location_diff(write_location,flush_location) / 1024)::bigint as flush,\n (pg_xlog_location_diff(flush_location,replay_location) / 1024)::bigint as replay\n FROM pg_stat_replication\"\"\"\n cur.execute(sql)\n d = cur.fetchone()\n assert d\n for k, v in d.items():\n replication_deltas_gauge.labels(k).set(v)\n # End of replication deltas\n\n log.info(\"MMC: Summarizing pg_stat_activity on active\")\n _generate_stat_activity_gauge(\n stat_activity_gauge, active_conn, \"active\"\n )\n\n # Extract active_xlog_location to compare active VS standby\n with active_conn.cursor() as cur:\n cur.execute(\"SELECT pg_current_xlog_location()\")\n active_xlog_location = cur.fetchone()[0]\n\n with conn.cursor() as cur:\n cur.execute(\"SELECT pg_last_xlog_receive_location()\")\n standby_xlog_location = cur.fetchone()[0]\n\n gauge_family.labels(\"raw_replication_delay\").set(delay)\n\n if active_xlog_location == standby_xlog_location:\n gauge_family.labels(\"replication_delay\").set(0)\n else:\n gauge_family.labels(\"replication_delay\").set(delay)\n\n # prom.write_to_textfile(nodeexp_path, prom_reg)\n\n # The following queries are heavier\n if cycle_seconds == 0:\n log.info(\"MMC: Running extended DB metrics gathering\")\n today = datetime.utcnow().date()\n with conn.cursor() as cur:\n # Compare different days in the past: pipeline and fastpath\n # might be catching up on older data and we want to monitor\n # that.\n for age_in_days in range(3):\n d1 = timedelta(days=1)\n end = today - timedelta(days=age_in_days) + d1\n times = dict(\n until_ext=end + d1 + d1 + d1,\n until=end,\n since=end - d1,\n since_ext=end - d1 - d1 - d1 - d1,\n )\n for query_name, sql in queries.items():\n cur.execute(sql, times)\n val = cur.fetchone()[0]\n log.info(\n \"MMC: %s %s %s %d\",\n times[\"since\"],\n times[\"until\"],\n query_name,\n val,\n )\n gauge_family.labels(\n f\"{query_name}_{age_in_days}_days_ago\"\n ).set(val)\n\n # prom.write_to_textfile(nodeexp_path, prom_reg)\n\n cycle_seconds = (cycle_seconds + INTERVAL) % 3600\n\n except Exception as e:\n log.error(e, exc_info=True)\n\n finally:\n conn.close()\n log.debug(\"MMC: Done\")\n if has_systemd:\n watchdog.notify(\"STATUS=MMC Sleeping\")\n\n endtime = time.time() + INTERVAL\n while time.time() < endtime:\n if has_systemd:\n watchdog.notify(\"WATCHDOG=1\")\n time.sleep(10)\n\n\ndef domain_input_update_runner():\n \"\"\"Runs domain_input_updater\"\"\"\n conf = Namespace(dry_run=False, db_uri=None)\n with metrics.timer(\"domain_input_updater_runtime\"):\n log.info(\"domain_input_updater: starting\")\n try:\n domain_input_updater.run(conf)\n metrics.gauge(\"domain_input_updater_success\", 1)\n log.info(\"domain_input_updater: success\")\n except Exception as e:\n metrics.gauge(\"domain_input_updater_success\", 0)\n log.error(\"domain_input_updater: failure %r\", e)\n\n\ndef main():\n global conf\n log.info(\"Analysis starting\")\n cp = ConfigParser()\n with open(\"/etc/ooni/analysis.conf\") as f:\n cp.read_file(f)\n\n conf = parse_args()\n if conf.devel or conf.stdout or not has_systemd:\n format = \"%(relativeCreated)d %(process)d %(levelname)s %(name)s %(message)s\"\n logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format=format)\n\n else:\n log.addHandler(JournalHandler(SYSLOG_IDENTIFIER=\"analysis\"))\n log.setLevel(logging.DEBUG)\n\n for role in (\"active\", \"standby\"):\n setattr(conf, role, dict(cp[role]))\n\n log.info(\"Logging started\")\n conf.output_directory = (\n Path(\"./var/lib/analysis\") if conf.devel else Path(\"/var/lib/analysis\")\n )\n os.makedirs(conf.output_directory, exist_ok=True)\n\n # monitor_measurement_creation(conf)\n\n if conf.backup_db:\n backup_to_s3.log = log\n backup_to_s3.run_backup(conf, cp)\n return\n\n try:\n if conf.update_counters:\n update_all_counters_tables(conf)\n\n if conf.update_citizenlab:\n update_citizenlab_test_lists(conf)\n\n if conf.update_tables_daily:\n update_tables_daily(conf)\n\n except Exception as e:\n log.error(str(e), exc_info=e)\n\n log.info(\"done\")\n # coverage_generator(conf)\n\n # generate_slow_query_summary(conf)\n\n # # Update confirmed_stats table. The update is idempotent. The table is used\n # # in the next steps.\n # if conf.no_update_confirmed_stats == False:\n # append_confirmed_stats()\n # append_confirmed_stats_asn()\n\n # measure_blocking_globally()\n # detect_blocking()\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "matplotlib.use" ] ]
hadrianl/qlib
[ "fa8f1cba06ba511744a0625afdf2cc3ac05302d0" ]
[ "setup.py" ]
[ "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\nimport io\nimport os\nimport numpy\n\nfrom setuptools import find_packages, setup, Extension\n\n# Package meta-data.\nNAME = \"pyqlib\"\nDESCRIPTION = \"A Quantitative-research Platform\"\nREQUIRES_PYTHON = \">=3.5.0\"\n\nVERSION = \"0.6.3.99\"\n\n# Detect Cython\ntry:\n import Cython\n\n ver = Cython.__version__\n _CYTHON_INSTALLED = ver >= \"0.28\"\nexcept ImportError:\n _CYTHON_INSTALLED = False\n\nif not _CYTHON_INSTALLED:\n print(\"Required Cython version >= 0.28 is not detected!\")\n print('Please run \"pip install --upgrade cython\" first.')\n exit(-1)\n\n# What packages are required for this module to be executed?\n# `estimator` may depend on other packages. In order to reduce dependencies, it is not written here.\nREQUIRED = [\n \"numpy>=1.12.0\",\n \"pandas>=0.25.1\",\n \"scipy>=1.0.0\",\n \"requests>=2.18.0\",\n \"sacred>=0.7.4\",\n \"python-socketio==3.1.2\",\n \"redis>=3.0.1\",\n \"python-redis-lock>=3.3.1\",\n \"schedule>=0.6.0\",\n \"cvxpy==1.0.21\",\n \"hyperopt==0.1.1\",\n \"fire>=0.3.1\",\n \"statsmodels\",\n \"xlrd>=1.0.0\",\n \"plotly==4.12.0\",\n \"matplotlib==3.1.3\",\n \"tables>=3.6.1\",\n \"pyyaml>=5.3.1\",\n \"mlflow>=1.12.1\",\n \"tqdm\",\n \"loguru\",\n \"lightgbm\",\n \"tornado\",\n \"joblib>=0.17.0\",\n \"ruamel.yaml>=0.16.12\",\n]\n\n# Numpy include\nNUMPY_INCLUDE = numpy.get_include()\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(here, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\n\n# Cython Extensions\nextensions = [\n Extension(\n \"qlib.data._libs.rolling\",\n [\"qlib/data/_libs/rolling.pyx\"],\n language=\"c++\",\n include_dirs=[NUMPY_INCLUDE],\n ),\n Extension(\n \"qlib.data._libs.expanding\",\n [\"qlib/data/_libs/expanding.pyx\"],\n language=\"c++\",\n include_dirs=[NUMPY_INCLUDE],\n ),\n]\n\n# Where the magic happens:\nsetup(\n name=NAME,\n version=VERSION,\n license=\"MIT Licence\",\n url=\"https://github.com/microsoft/qlib\",\n description=DESCRIPTION,\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n python_requires=REQUIRES_PYTHON,\n packages=find_packages(exclude=(\"tests\",)),\n # if your package is a single module, use this instead of 'packages':\n # py_modules=['qlib'],\n entry_points={\n # 'console_scripts': ['mycli=mymodule:cli'],\n \"console_scripts\": [\n \"qrun=qlib.workflow.cli:run\",\n ],\n },\n ext_modules=extensions,\n install_requires=REQUIRED,\n include_package_data=True,\n classifiers=[\n # Trove classifiers\n # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers\n # 'License :: OSI Approved :: MIT License',\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS\",\n \"License :: OSI Approved :: MIT License\",\n \"Development Status :: 3 - Alpha\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n)\n" ]
[ [ "numpy.get_include" ] ]
PauloBernardo/InteligenciaComputacional
[ "f5edcc01c68b83fc4435e6669e3ebd0a32d7d8b7" ]
[ "linearRegression/main.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\n\n\ndef simple_example():\n X = [10, 20, 30]\n Y = [15, 19, 45]\n plt.scatter(X, Y,)\n plt.show()\n\n A = np.array([10, 1, 20, 1, 30, 1]).reshape(3, 2)\n B = np.array(Y).reshape(3, 1)\n\n a = np.linspace(10, 30)\n arr = np.matmul(np.matmul(np.linalg.inv(np.matmul(A.T, A)), A.T), Y)\n arr.tolist()\n beta, alpha = arr\n Yi = alpha + beta * a\n\n plt.scatter(X, Y)\n plt.plot(a, Yi)\n plt.show()\n\n\ndef linear_least_squares(examples):\n m, n = examples.shape\n cx = examples[0].reshape(n, 1)\n c2 = np.ones(len(cx)).reshape(n, 1)\n A = np.hstack((cx, c2))\n\n return np.matmul(np.matmul(np.linalg.inv(np.matmul(A.T, A)), A.T), examples[1])\n\n\ndef plot_figure(x, y, alpha, beta, title, x_label, y_label):\n min_y = alpha + beta * min(x)\n max_y = alpha + beta * max(x)\n\n plt.plot([min(x), max(x)], [min_y, max_y])\n plt.scatter(x, y, color='orange')\n plt.xlabel(x_label)\n plt.title(title)\n plt.grid(True)\n plt.ylabel(y_label)\n plt.show()\n\n\ndef plot_linear_regression(examples, title='Linear Least Squares Regression Example', x_label='X', y_label='Y'):\n min_x = min(examples[0])\n max_x = max(examples[0])\n theta = linear_least_squares(examples)\n theta.tolist()\n beta, alpha = theta\n\n min_y = alpha + beta * min_x\n max_y = alpha + beta * max_x\n\n plt.plot([min(examples[0]), max(examples[0])], [min_y, max_y])\n plt.scatter(examples[0], examples[1], color='orange')\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n plt.title(title)\n plt.grid(True)\n plt.show()\n\n\ndef simple_linear_least_squares_example():\n plot_linear_regression(np.array([[1.5, 1.6, 1.3, 1.4, 1.5, 1.7, 1.8, 1.7, 1.1, 1.2], [10, 12, 16, 13, 15, 11, 8, 10, 18, 13]]), x_label='Prices', y_label='Sales')\n\n\ndef statistic_linear_regression(x, y):\n number_of_elements = len(x)\n if number_of_elements != len(y):\n raise Exception(\"Size of x and y must be equal!\")\n mean_x, mean_y = sum(x)/number_of_elements, sum(y)/number_of_elements\n sum_x_vezes_y = sum([i * j for i, j in zip(x, y)])\n sum_x_pow_2 = sum([i ** 2 for i in x])\n sxy = sum_x_vezes_y - number_of_elements * mean_x * mean_y\n sxx = sum_x_pow_2 - number_of_elements * mean_x * mean_x\n beta = sxy / sxx\n alpha = mean_y - beta * mean_x\n return alpha, beta\n\n\ndef plot_statistic_linear_regression(x, y, title='Statistic Linear Regression Example', x_label='X', y_label='Y'):\n alpha, beta = statistic_linear_regression(x, y)\n plot_figure(x, y, alpha, beta, title, x_label, y_label)\n\n\ndef simple_statistic_linear_regression_example():\n plot_statistic_linear_regression([1.5, 1.6, 1.3, 1.4, 1.5, 1.7, 1.8, 1.7, 1.1, 1.2], [10, 12, 16, 13, 15, 11, 8, 10, 18, 13], x_label='Prices', y_label='Sales')\n\n\ndef sklearn_linear_regression(x, y):\n reg = LinearRegression().fit(x.reshape(-1, 1), y.reshape(-1, 1))\n return reg.intercept_[0], reg.coef_[0][0]\n\n\ndef plot_sklearn_linear_regression(x, y, title='Sklearn Linear Regression Example', x_label='X', y_label='Y'):\n alpha, beta = sklearn_linear_regression(x, y)\n plot_figure(x, y, alpha, beta, title, x_label, y_label)\n\n\ndef simple_sklearn_linear_regression_example():\n prices = np.array([1.5, 1.6, 1.3, 1.4, 1.5, 1.7, 1.8, 1.7, 1.1, 1.2])\n sales = np.array([10, 12, 16, 13, 15, 11, 8, 10, 18, 13])\n plot_sklearn_linear_regression(prices, sales, x_label='Prices', y_label='Sales')\n\n\nif __name__ == '__main__':\n simple_linear_least_squares_example()\n simple_statistic_linear_regression_example()\n simple_sklearn_linear_regression_example()\n" ]
[ [ "numpy.matmul", "matplotlib.pyplot.grid", "sklearn.linear_model.LinearRegression", "matplotlib.pyplot.title", "numpy.hstack", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "numpy.array", "matplotlib.pyplot.plot", "numpy.linspace", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.scatter" ] ]
xkf15/faas-profiler
[ "77681daa9f5776e58d1e7dc2d38b61735d54b014" ]
[ "workload_analyzer/PerfMonAnalyzer.py" ]
[ "# Copyright (c) 2019 Princeton University\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom datetime import datetime, timedelta\nimport json\nimport os.path\nimport pandas as pd\nimport sys\n\nsys.path = ['./', '../'] + sys.path\n\n# Local\nfrom GenConfigs import *\nfrom Logger import ScriptLogger\n\nlogger = ScriptLogger(loggername='workload_analyzer/perf_mon_analyzer',\n logfile='WA.log')\n\n\ndef ReadPQOSMSRMon(pqos_msr_mon_file):\n \"\"\"\n This function parses the output of the pqos-msr-mon.\n \"\"\"\n with open(pqos_msr_mon_file) as f:\n lines = f.readlines()\n\n records = {'timestamp': [], 'Core': [], 'IPC': [],\n 'LLC Misses': [], 'LLC Util (KB)': [], 'MBL (MB/s)': []}\n tmp_records = {'timestamp': [], 'Core': [], 'IPC': [],\n 'LLC Misses': [], 'LLC Util (KB)': [], 'MBL (MB/s)': []}\n prev_timestamp, index = None, -1\n\n for line_index in range(len(lines)):\n line = lines[line_index]\n if 'TIME' in line:\n index += 1\n timestamp = datetime.strptime(line[5:-1], '%Y-%m-%d %H:%M:%S')\n if (timestamp != prev_timestamp):\n for key, value in tmp_records.items():\n if key == 'timestamp':\n for i in value:\n records[key] += [prev_timestamp +\n timedelta(seconds=1.0*i/index)]\n else:\n records[key] += value\n tmp_records = {'timestamp': [], 'Core': [], 'IPC': [\n ], 'LLC Misses': [], 'LLC Util (KB)': [], 'MBL (MB/s)': []}\n index = 0\n\n prev_timestamp = timestamp\n elif 'CORE' in line:\n pass\n else:\n tmp_records['timestamp'].append(index)\n separated = line.split(' ')\n separated = [v for v in separated if v != '']\n tmp_records['Core'].append(int(separated[0]))\n tmp_records['IPC'].append(float(separated[1]))\n tmp_records['LLC Misses'].append(int(separated[2][:-1])*1000)\n tmp_records['LLC Util (KB)'].append(float(separated[3]))\n tmp_records['MBL (MB/s)'].append(float(separated[4]))\n\n for key, value in tmp_records.items():\n if key == 'timestamp':\n for i in value:\n records[key] += [prev_timestamp +\n timedelta(seconds=1.0*i/index)]\n else:\n records[key] += value\n\n # return the records as Pandas dataframe\n records_df = pd.DataFrame(records)\n return records_df\n\n\ndef ReadPerfMon(perf_mon_file):\n \"\"\"\n This function parses the output of the Linux Perf tool.\n \"\"\"\n with open(perf_mon_file) as f:\n lines = f.readlines()\n\n records = {'timestamp': []} # more fields are added dynamically\n\n for line in lines:\n separated = line.split(' ')\n separated = [v for v in separated if v != '']\n\n try:\n if 'counted' in separated[2]:\n del separated[2]\n except:\n pass\n\n if (len(separated) < 3) or (len(separated) > 4):\n continue\n time = float(separated[0])\n field = separated[2]\n try:\n val = int(separated[1].replace(',', ''))\n except:\n val = None\n try:\n records[field].append(val)\n except:\n records[field] = [val] # first element of the list\n try:\n if records['timestamp'][-1] != time:\n records['timestamp'].append(time)\n except:\n records['timestamp'].append(time) # first append\n\n # return the records as Pandas dataframe\n return pd.DataFrame(records)\n\n\ndef AnalyzePerfMonRecords(config_file):\n \"\"\"\n This function is used to analyze the performance monitoring data after conducting the test.\n \"\"\"\n logger.info(\"Started to analyze the performance monitoring records.\")\n\n try:\n with open(FAAS_ROOT + '/' + config_file) as f:\n workload = json.load(f)\n except:\n return False\n\n records = {}\n\n # Perf Tool\n perf_mon_file = FAAS_ROOT + '/perf-mon.out'\n pqos_msr_mon_file = FAAS_ROOT + '/pqos-msr-mon.out'\n\n if not os.path.isfile(perf_mon_file):\n logger.error(\"The perf output file missing!\")\n else:\n records['perf_records'] = ReadPerfMon(perf_mon_file)\n\n # PQOS Mon\n if not os.path.isfile(pqos_msr_mon_file):\n logger.error(\"The PQOS output file is missing!\")\n else:\n records['pqos_records'] = ReadPQOSMSRMon(pqos_msr_mon_file)\n\n return records\n" ]
[ [ "pandas.DataFrame" ] ]
eonu/inf4-hons
[ "4b7372272860f19c0f5ea2910f122a62531d7d2e", "4b7372272860f19c0f5ea2910f122a62531d7d2e" ]
[ "sequentia/lib/sequentia/internals/validator.py", "sequentia/lib/sequentia/classifiers/hmm/topologies/left_right.py" ]
[ "import numpy as np\nfrom copy import copy\n\nclass _Validator:\n \"\"\"Performs internal validations on various input types.\"\"\"\n\n def observation_sequences(self, X, allow_single=False):\n \"\"\"Validates observation sequence(s).\n\n Parameters\n ----------\n X: numpy.ndarray or List[numpy.ndarray]\n An individual observation sequence or a list of multiple observation sequences.\n\n allow_single: bool\n Whether to allow an individual observation sequence.\n\n Returns\n -------\n X: numpy.ndarray or List[numpy.ndarray]\n The original input observation sequence(s) if valid.\n \"\"\"\n X = copy(X)\n if isinstance(X, (list, np.ndarray) if allow_single else list):\n if isinstance(X, list):\n for i, x in enumerate(X):\n if not isinstance(x, np.ndarray):\n raise TypeError('Each observation sequence must be a numpy.ndarray')\n if not x.ndim <= 2:\n raise ValueError('Each observation sequence must be at most two-dimensional')\n x = X[i] = (x if x.ndim == 2 else np.atleast_2d(x).T).astype(float)\n if not x.shape[1] == X[0].shape[1]:\n raise ValueError('Each observation sequence must have the same dimensionality')\n elif isinstance(X, np.ndarray):\n if not X.ndim <= 2:\n raise ValueError('Observation sequence must be at most two-dimensional')\n X = (X if X.ndim == 2 else np.atleast_2d(X).T).astype(float)\n else:\n if allow_single:\n raise TypeError('Expected an individual observation sequence or a list of multiple observation sequences, each of type numpy.ndarray')\n else:\n raise TypeError('Expected a list of observation sequences, each of type numpy.ndarray')\n return X\n\n def observation_sequences_and_labels(self, X, y):\n \"\"\"Validates observation sequences and corresponding labels.\n\n Parameters\n ----------\n X: List[numpy.ndarray]\n A list of multiple observation sequences.\n\n y: List[str]\n A list of labels for the observation sequences.\n\n Returns\n -------\n X: List[numpy.ndarray]\n The original input observation sequences if valid.\n\n y: List[str]\n The original input labels if valid.\n \"\"\"\n self.observation_sequences(X, allow_single=False)\n self.list_of_strings(y, desc='labels')\n if not len(X) == len(y):\n raise ValueError('Expected the same number of observation sequences and labels')\n return X, y\n\n def integer(self, item, desc):\n \"\"\"Validates an integer.\n\n Parameters\n ----------\n item: int\n The item to validate.\n\n desc: str\n A description of the item being validated.\n\n Returns\n -------\n item: int\n The original input item if valid.\n \"\"\"\n if not isinstance(item, int):\n raise TypeError(\"Expected {} to be an integer\".format(desc))\n return item\n\n def string(self, item, desc):\n \"\"\"Validates a string.\n\n Parameters\n ----------\n item: str\n The item to validate.\n\n desc: str\n A description of the item being validated.\n\n Returns\n -------\n item: str\n The original input item if valid.\n \"\"\"\n if not isinstance(item, str):\n raise TypeError(\"Expected {} to be a string\".format(desc))\n return item\n\n def boolean(self, item, desc):\n \"\"\"Validates a boolean.\n\n Parameters\n ----------\n item: bool\n The item to validate.\n\n desc: str\n A description of the item being validated.\n\n Returns\n -------\n item: bool\n The original input item if valid.\n \"\"\"\n if not isinstance(item, bool):\n raise TypeError(\"Expected {} to be a boolean\".format(desc))\n return item\n\n def one_of(self, item, items, desc):\n \"\"\"Validates that an item is one of some permitted values.\n\n Parameters\n ----------\n item: Any\n The item to validate.\n\n items: List[Any]\n The list of permitted values to check against.\n\n desc: str\n A description of the item being validated.\n\n Returns\n -------\n item: Any\n The original input item if valid.\n \"\"\"\n if not item in items:\n raise ValueError('Expected {} to be one of {}'.format(desc, items))\n return item\n\n def restricted_integer(self, item, condition, desc, expected):\n \"\"\"Validates an integer and checks that it satisfies some condition.\n\n Parameters\n ----------\n item: int\n The item to validate.\n\n condition: lambda\n A condition to check the item against.\n\n desc: str\n A description of the item being validated.\n\n expected: str\n A description of the condition, or expected value.\n\n Returns\n -------\n item: int\n The original input item if valid.\n \"\"\"\n if isinstance(item, int):\n if not condition(item):\n raise ValueError('Expected {} to be {}'.format(desc, expected))\n else:\n raise TypeError(\"Expected {} to be an integer\".format(desc))\n return item\n\n def restricted_float(self, item, condition, desc, expected):\n \"\"\"Validates a float and checks that it satisfies some condition.\n\n Parameters\n ----------\n item: float\n The item to validate.\n\n condition: lambda\n A condition to check the item against.\n\n desc: str\n A description of the item being validated.\n\n expected: str\n A description of the condition, or expected value.\n\n Returns\n -------\n item: float\n The original input item if valid.\n \"\"\"\n if isinstance(item, float):\n if not condition(item):\n raise ValueError('Expected {} to be {}'.format(desc, expected))\n else:\n raise TypeError(\"Expected {} to be a float\".format(desc))\n return item\n\n def list_of_strings(self, items, desc):\n \"\"\"Validates a list and checks that it consists entirely of strings.\n\n Parameters\n ----------\n items: List[str]\n The item to validate.\n\n desc: str\n A description of the item being validated.\n\n Returns\n -------\n items: List[str]\n The original input items if valid.\n \"\"\"\n if isinstance(items, list):\n if not all(isinstance(item, str) for item in items):\n raise ValueError('Expected all {} to be strings'.format(desc))\n else:\n raise TypeError('Expected {} to be a list of strings'.format(desc))\n return items\n\n def random_state(self, state):\n \"\"\"Validates a random state object or seed.\n\n Parameters\n ----------\n state: None, int, numpy.random.RandomState\n A random state object or seed.\n\n Returns\n -------\n state: numpy.random.RandomState\n A random state object.\n \"\"\"\n if state is None:\n return np.random.RandomState(seed=0)\n elif isinstance(state, int):\n return np.random.RandomState(seed=state)\n elif isinstance(state, np.random.RandomState):\n return state\n else:\n raise TypeError('Expected random state to be of type: None, int, or numpy.random.RandomState')", "import numpy as np\nfrom .topology import _Topology\n\nclass _LeftRightTopology(_Topology):\n \"\"\"Represents the topology for a left-right HMM, imposing an upper-triangular transition matrix.\n\n Parameters\n ----------\n n_states: int\n Number of states in the HMM.\n\n random_state: numpy.random.RandomState\n A random state object for reproducible randomness.\n \"\"\"\n\n def __init__(self, n_states: int, random_state: np.random.RandomState):\n super().__init__(n_states, random_state)\n\n def uniform_transitions(self) -> np.ndarray:\n \"\"\"Sets the transition matrix as uniform (equal probability of transitioning\n to all other possible states from each state) corresponding to the topology.\n\n Returns\n -------\n transitions: numpy.ndarray\n The uniform transition matrix of shape `(n_states, n_states)`.\n \"\"\"\n upper_ones = np.triu(np.ones((self._n_states, self._n_states)))\n upper_divisors = np.triu(np.tile(np.arange(self._n_states, 0, -1), (self._n_states, 1)).T)\n lower_ones = np.tril(np.ones(self._n_states), k=-1) # One-pad lower triangle to prevent zero division\n return upper_ones / (upper_divisors + lower_ones)\n\n def random_transitions(self) -> np.ndarray:\n \"\"\"Sets the transition matrix as random (random probability of transitioning\n to all other possible states from each state) by sampling probabilities\n from a Dirichlet distribution - according to the topology.\n\n Parameters\n ----------\n transitions: numpy.ndarray\n The random transition matrix of shape `(n_states, n_states)`.\n \"\"\"\n transitions = self._random_state.dirichlet(np.ones(self._n_states), size=self._n_states)\n lower_sums = np.sum(np.tril(transitions, k=-1), axis=1) # Amount to be redistributed per row\n quantities = np.arange(self._n_states, 0, -1) # Number of elements per row to redistribute evenly to\n upper_ones = np.triu(np.ones((self._n_states, self._n_states)))\n redist = (lower_sums / quantities).reshape(-1, 1) * upper_ones\n return np.triu(transitions) + redist\n\n def validate_transitions(self, transitions: np.ndarray) -> None:\n \"\"\"Validates a transition matrix according to the topology's restrictions.\n\n Parameters\n ----------\n transitions: numpy.ndarray\n The transition matrix to validate.\n \"\"\"\n super().validate_transitions(transitions)\n if not np.allclose(transitions, np.triu(transitions)):\n raise ValueError('Left-right transition matrix must be upper-triangular')" ]
[ [ "numpy.random.RandomState", "numpy.atleast_2d" ], [ "numpy.arange", "numpy.ones", "numpy.tril", "numpy.triu" ] ]
JianGoForIt/tensorflow
[ "12e78f07a30e5ec8d1a9baf7cd87f4f45d29b657" ]
[ "tensorflow/python/ops/array_ops.py" ]
[ "# Copyright 2015 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"## Casting\n\nTensorFlow provides several operations that you can use to cast tensor data\ntypes in your graph.\n\n@@string_to_number\n@@to_double\n@@to_float\n@@to_bfloat16\n@@to_int32\n@@to_int64\n@@cast\n@@saturate_cast\n\n## Shapes and Shaping\n\nTensorFlow provides several operations that you can use to determine the shape\nof a tensor and change the shape of a tensor.\n\n@@shape\n@@size\n@@rank\n@@reshape\n@@squeeze\n@@expand_dims\n\n## Slicing and Joining\n\nTensorFlow provides several operations to slice or extract parts of a tensor,\nor join multiple tensors together.\n\n@@slice\n@@split\n@@tile\n@@pad\n@@concat\n@@pack\n@@unpack\n@@reverse_sequence\n@@reverse\n@@transpose\n@@space_to_depth\n@@depth_to_space\n@@gather\n@@dynamic_partition\n@@dynamic_stitch\n@@boolean_mask\n@@one_hot\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nimport numpy as np\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import common_shapes\nfrom tensorflow.python.ops import gen_array_ops\nfrom tensorflow.python.ops import gen_math_ops\nfrom tensorflow.python.ops import logging_ops\n# 'Constant' gets imported in the module 'array_ops'.\nfrom tensorflow.python.ops.constant_op import constant\n# pylint: disable=wildcard-import\nfrom tensorflow.python.ops.gen_array_ops import *\n# pylint: enable=wildcard-import\n\n\n# We override the 'slice' for the \"slice\" op, so we keep python's\n# existing 'slice' for later use in this module.\n_baseslice = slice\n\n\n# Aliases for some automatically-generated names.\nlistdiff = gen_array_ops.list_diff\n\n\n# DEPRECATED use init_ops.zeros_initializer\n# TODO(irving) Move it to init_ops.py\ndef zeros_initializer(shape, dtype=dtypes.float32):\n \"\"\"An adaptor for zeros() to match the Initializer spec.\"\"\"\n return zeros(shape, dtype)\n\n\n# pylint: disable=undefined-variable,protected-access\ndef _SliceHelper(tensor, slice_spec):\n \"\"\"Overload for Tensor.__getitem__.\n\n Currently the size of the slice must be statically known in each dimension,\n i.e. the \"stop\" of the slice must not be omitted.\n\n TODO(mrry): Support slices where the sizes are not specified.\n TODO(mrry): Support negative indices in slices with numpy/Python semantics.\n\n Args:\n tensor: An ops.Tensor object.\n slice_spec: The arguments to Tensor.__getitem__.\n\n Returns:\n The appropriate slice of \"tensor\", based on \"slice_spec\".\n\n Raises:\n ValueError: If a slice range is negative size.\n TypeError: If the slice indices aren't int, slice, or Ellipsis.\n \"\"\"\n if not isinstance(slice_spec, (list, tuple)):\n slice_spec = [slice_spec]\n indices = []\n sizes = []\n squeeze_dims = []\n for dim, s in enumerate(slice_spec):\n if isinstance(s, int):\n if s < 0:\n raise NotImplementedError(\"Negative indices are currently unsupported\")\n indices.append(s)\n sizes.append(1)\n squeeze_dims.append(dim)\n elif isinstance(s, _baseslice):\n if s.step not in (None, 1):\n raise NotImplementedError(\n \"Steps other than 1 are not currently supported\")\n start = s.start if s.start is not None else 0\n if start < 0:\n raise NotImplementedError(\n \"Negative start indices are not currently supported\")\n indices.append(start)\n if s.stop is not None and s.stop < 0:\n raise NotImplementedError(\n \"Negative stop indices are not currently supported\")\n # NOTE(mrry): If the stop is not specified, Python substitutes\n # sys.maxsize, which is typically (2 ** 63) - 1. Since Slice currently\n # supports signed DT_INT32 arguments, we use -1 to specify that all\n # elements should be captured.\n if s.stop is None or s.stop == sys.maxsize:\n sizes.append(-1)\n else:\n if start > s.stop:\n raise ValueError(\"Stop must be at least start\")\n sizes.append(s.stop - start)\n elif s is Ellipsis:\n raise NotImplementedError(\"Ellipsis is not currently supported\")\n else:\n raise TypeError(\"Bad slice index %s of type %s\" % (s, type(s)))\n sliced = slice(tensor, indices, sizes)\n if squeeze_dims:\n return squeeze(sliced, squeeze_dims=squeeze_dims)\n else:\n return sliced\n\n\ndef slice(input_, begin, size, name=None):\n \"\"\"Extracts a slice from a tensor.\n\n This operation extracts a slice of size `size` from a tensor `input` starting\n at the location specified by `begin`. The slice `size` is represented as a\n tensor shape, where `size[i]` is the number of elements of the 'i'th dimension\n of `input` that you want to slice. The starting location (`begin`) for the\n slice is represented as an offset in each dimension of `input`. In other\n words, `begin[i]` is the offset into the 'i'th dimension of `input` that you\n want to slice from.\n\n `begin` is zero-based; `size` is one-based. If `size[i]` is -1,\n all remaining elements in dimension i are included in the\n slice. In other words, this is equivalent to setting:\n\n `size[i] = input.dim_size(i) - begin[i]`\n\n This operation requires that:\n\n `0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n]`\n\n For example:\n\n ```\n # 'input' is [[[1, 1, 1], [2, 2, 2]],\n # [[3, 3, 3], [4, 4, 4]],\n # [[5, 5, 5], [6, 6, 6]]]\n tf.slice(input, [1, 0, 0], [1, 1, 3]) ==> [[[3, 3, 3]]]\n tf.slice(input, [1, 0, 0], [1, 2, 3]) ==> [[[3, 3, 3],\n [4, 4, 4]]]\n tf.slice(input, [1, 0, 0], [2, 1, 3]) ==> [[[3, 3, 3]],\n [[5, 5, 5]]]\n ```\n\n Args:\n input_: A `Tensor`.\n begin: An `int32` or `int64` `Tensor`.\n size: An `int32` or `int64` `Tensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` the same type as `input`.\n \"\"\"\n return gen_array_ops._slice(input_, begin, size, name=name)\n\n\nops.Tensor._override_operator(\"__getitem__\", _SliceHelper)\n\n\ndef pack(values, name=\"pack\"):\n \"\"\"Packs a list of rank-`R` tensors into one rank-`(R+1)` tensor.\n\n Packs tensors in `values` into a tensor with rank one higher than each tensor\n in `values` and shape `[len(values)] + values[0].shape`. The output satisfies\n `output[i, ...] = values[i][...]`.\n\n This is the opposite of unpack. The numpy equivalent is\n\n tf.pack([x, y, z]) = np.asarray([x, y, z])\n\n Args:\n values: A list of `Tensor` objects with the same shape and type.\n name: A name for this operation (optional).\n\n Returns:\n output: A packed `Tensor` with the same type as `values`.\n \"\"\"\n return gen_array_ops._pack(values, name=name)\n\n\ndef unpack(value, num=None, name=\"unpack\"):\n \"\"\"Unpacks the outer dimension of a rank-`R` tensor into rank-`(R-1)` tensors.\n\n Unpacks `num` tensors from `value` along the first dimension.\n If `num` is not specified (the default), it is inferred from `value`'s shape.\n If `value.shape[0]` is not known, `ValueError` is raised.\n\n The ith tensor in `output` is the slice `value[i, ...]`. Each tensor in\n `output` has shape `value.shape[1:]`.\n\n This is the opposite of pack. The numpy equivalent is\n\n tf.unpack(x, n) = list(x)\n\n Args:\n value: A rank `R > 0` `Tensor` to be unpacked.\n num: An `int`. The first dimension of value. Automatically inferred if\n `None` (the default).\n name: A name for the operation (optional).\n\n Returns:\n The list of `Tensor` objects unpacked from `value`.\n\n Raises:\n ValueError: If `num` is unspecified and cannot be inferred.\n \"\"\"\n if num is None:\n value = ops.convert_to_tensor(value)\n shape = value.get_shape()\n num = shape[0].value\n if num is None:\n raise ValueError(\"Cannot infer num from shape %s\" % shape)\n return gen_array_ops._unpack(value, num=num, name=name)\n\n\ndef concat(concat_dim, values, name=\"concat\"):\n \"\"\"Concatenates tensors along one dimension.\n\n Concatenates the list of tensors `values` along dimension `concat_dim`. If\n `values[i].shape = [D0, D1, ... Dconcat_dim(i), ...Dn]`, the concatenated\n result has shape\n\n [D0, D1, ... Rconcat_dim, ...Dn]\n\n where\n\n Rconcat_dim = sum(Dconcat_dim(i))\n\n That is, the data from the input tensors is joined along the `concat_dim`\n dimension.\n\n The number of dimensions of the input tensors must match, and all dimensions\n except `concat_dim` must be equal.\n\n For example:\n\n ```python\n t1 = [[1, 2, 3], [4, 5, 6]]\n t2 = [[7, 8, 9], [10, 11, 12]]\n tf.concat(0, [t1, t2]) ==> [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]\n tf.concat(1, [t1, t2]) ==> [[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]]\n\n # tensor t3 with shape [2, 3]\n # tensor t4 with shape [2, 3]\n tf.shape(tf.concat(0, [t3, t4])) ==> [4, 3]\n tf.shape(tf.concat(1, [t3, t4])) ==> [2, 6]\n ```\n\n Args:\n concat_dim: 0-D `int32` `Tensor`. Dimension along which to concatenate.\n values: A list of `Tensor` objects or a single `Tensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` resulting from concatenation of the input tensors.\n \"\"\"\n if not isinstance(values, (list, tuple)):\n values = [values]\n # TODO(mrry): Change to return values?\n if len(values) == 1: # Degenerate case of one tensor.\n return identity(values[0], name=name)\n return gen_array_ops._concat(concat_dim=concat_dim,\n values=values,\n name=name)\n\n\[email protected](\"Pack\")\ndef _PackShape(op):\n input_shape = op.inputs[0].get_shape()\n for inp in op.inputs[1:]:\n input_shape = input_shape.merge_with(inp.get_shape())\n return [tensor_shape.TensorShape([len(op.inputs)]).concatenate(input_shape)]\n\n\[email protected](\"Unpack\")\ndef _UnpackShape(op):\n input_shape = op.inputs[0].get_shape()\n return [input_shape[1:]] * op.get_attr(\"num\")\n\n\[email protected](\"Concat\")\ndef _ConcatShape(op):\n concat_dim = tensor_util.constant_value(op.inputs[0])\n if concat_dim is None:\n # Return an unknown shape with the same rank as the inputs, or an\n # unknown rank if no input's rank is known.\n rank = None\n for value in op.inputs[1:]:\n if rank is not None:\n value.get_shape().assert_has_rank(rank)\n else:\n rank = value.get_shape().ndims\n if rank == 0:\n raise ValueError(\"Can't concatenate scalars (use tf.pack instead)\")\n return [tensor_shape.unknown_shape(ndims=rank)]\n\n else:\n # Merge all the non-concat dims, and sum the concat dim to make an\n # output shape.\n concat_dim = int(concat_dim)\n output_shape = op.inputs[1].get_shape()\n for value in op.inputs[2:]:\n value_shape = value.get_shape()\n if value_shape.ndims is not None and concat_dim >= value_shape.ndims:\n raise ValueError(\"concat_dim is out of range (values rank = %d)\" %\n value_shape.ndims)\n before = output_shape[:concat_dim].merge_with(value_shape[:concat_dim])\n at = output_shape[concat_dim] + value_shape[concat_dim]\n after = output_shape[\n concat_dim + 1:].merge_with(value_shape[concat_dim + 1:])\n output_shape = before.concatenate(at).concatenate(after)\n return [output_shape]\n\n\[email protected](\"ConcatOffset\")\ndef _ConcatOffsetShape(op):\n return [x.get_shape() for x in op.inputs[1:]]\n\n\ndef boolean_mask(tensor, mask, name=\"boolean_mask\"):\n \"\"\"Apply boolean mask to tensor. Numpy equivalent is `tensor[mask]`.\n\n ```python\n # 1-D example\n tensor = [0, 1, 2, 3]\n mask = [True, False, True, False]\n boolean_mask(tensor, mask) ==> [0, 2]\n ```\n\n In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match\n the first K dimensions of `tensor`'s shape. We then have:\n `boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`\n where `(i1,...,iK)` is the ith `True` entry of `mask` (row-major order).\n\n Args:\n tensor: N-D tensor. First K dimensions can be None, which allows e.g.\n undefined batch size. Trailing dimensions must be specified.\n mask: K-D boolean tensor, K <= N.\n name: A name for this operation (optional).\n\n Returns:\n Tensor populated by entries in `tensor` corresponding to `True` values in\n `mask`.\n\n Raises:\n ValueError: If shapes do not conform.\n\n Examples:\n\n ```python\n # 2-D example\n a = [[1, 2], [3, 4], [5, 6]]\n mask = [True, False, True]\n boolean_mask(tensor, mask) ==> [[1, 2], [5, 6]]\n ```\n \"\"\"\n def _apply_mask_1d(reshaped_tensor, mask):\n \"\"\"Mask tensor along dimension 0 with a 1-D mask.\"\"\"\n indices = squeeze(where(mask), squeeze_dims=[1])\n return gather(reshaped_tensor, indices)\n\n with ops.op_scope([tensor, mask], name):\n tensor = ops.convert_to_tensor(tensor, name=\"tensor\")\n mask = ops.convert_to_tensor(mask, name=\"mask\")\n\n shape_mask = mask.get_shape()\n ndims_mask = shape_mask.ndims\n shape_tensor = tensor.get_shape()\n if ndims_mask == 0:\n raise ValueError(\"mask cannot be scalar.\")\n if ndims_mask is None:\n raise ValueError(\n \"mask dimensions must be specified, even if some dimensions are None\"\n \". E.g. shape=[None] is ok, but shape=None is not.\")\n shape_tensor[:ndims_mask].assert_is_compatible_with(shape_mask)\n\n tensor = reshape(tensor, [-1] + shape_tensor.as_list()[ndims_mask:])\n mask = reshape(mask, [-1])\n return _apply_mask_1d(tensor, mask)\n\n\ndef sparse_mask(a, mask_indices, name=None):\n \"\"\"Masks elements of `IndexedSlices`.\n\n Given an `IndexedSlices` instance `a`, returns another `IndexedSlices` that\n contains a subset of the slices of `a`. Only the slices at indices specified\n in `mask_indices` are returned.\n\n This is useful when you need to extract a subset of slices in an\n `IndexedSlices` object.\n\n For example:\n\n ```python\n # `a` contains slices at indices [12, 26, 37, 45] from a large tensor\n # with shape [1000, 10]\n a.indices => [12, 26, 37, 45]\n tf.shape(a.values) => [4, 10]\n\n # `b` will be the subset of `a` slices at its second and third indices, so\n # we want to mask of its first and last indices (which are at absolute\n # indices 12, 45)\n b = tf.sparse_mask(a, [12, 45])\n\n b.indices => [26, 37]\n tf.shape(b.values) => [2, 10]\n\n ```\n\n Args:\n * `a`: An `IndexedSlices` instance.\n * `mask_indices`: Indices of elements to mask.\n * `name`: A name for the operation (optional).\n\n Returns:\n The masked `IndexedSlices` instance.\n \"\"\"\n with ops.op_scope([a, mask_indices], name, \"sparse_mask\") as name:\n indices = a.indices\n out_indices, to_gather = listdiff(indices, mask_indices)\n out_values = gather(a.values, to_gather, name=name)\n return ops.IndexedSlices(out_values, out_indices, a.dense_shape)\n\n\ndef split(split_dim, num_split, value, name=\"split\"):\n \"\"\"Splits a tensor into `num_split` tensors along one dimension.\n\n Splits `value` along dimension `split_dim` into `num_split` smaller tensors.\n Requires that `num_split` evenly divide `value.shape[split_dim]`.\n\n For example:\n\n ```python\n # 'value' is a tensor with shape [5, 30]\n # Split 'value' into 3 tensors along dimension 1\n split0, split1, split2 = tf.split(1, 3, value)\n tf.shape(split0) ==> [5, 10]\n ```\n\n Args:\n split_dim: A 0-D `int32` `Tensor`. The dimension along which to split.\n Must be in the range `[0, rank(value))`.\n num_split: A Python integer. The number of ways to split.\n value: The `Tensor` to split.\n name: A name for the operation (optional).\n\n Returns:\n `num_split` `Tensor` objects resulting from splitting `value`.\n \"\"\"\n return gen_array_ops._split(split_dim=split_dim,\n num_split=num_split,\n value=value,\n name=name)\n\n\[email protected](\"Reverse\")\ndef _ReverseShape(op):\n dims_shape = op.inputs[1].get_shape().with_rank(1)\n input_shape = op.inputs[0].get_shape().with_rank(dims_shape[0])\n if input_shape.ndims is not None and input_shape.ndims > 8:\n raise ValueError(\n \"tf.reverse() does not work on tensors with more than 8 dimensions\")\n return [input_shape]\n\n\ndef transpose(a, perm=None, name=\"transpose\"):\n \"\"\"Transposes `a`. Permutes the dimensions according to `perm`.\n\n The returned tensor's dimension i will correspond to the input dimension\n `perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is\n the rank of the input tensor. Hence by default, this operation performs a\n regular matrix transpose on 2-D input Tensors.\n\n For example:\n\n ```python\n # 'x' is [[1 2 3]\n # [4 5 6]]\n tf.transpose(x) ==> [[1 4]\n [2 5]\n [3 6]]\n\n # Equivalently\n tf.transpose(x, perm=[1, 0]) ==> [[1 4]\n [2 5]\n [3 6]]\n\n # 'perm' is more useful for n-dimensional tensors, for n > 2\n # 'x' is [[[1 2 3]\n # [4 5 6]]\n # [[7 8 9]\n # [10 11 12]]]\n # Take the transpose of the matrices in dimension-0\n tf.transpose(b, perm=[0, 2, 1]) ==> [[[1 4]\n [2 5]\n [3 6]]\n\n [[7 10]\n [8 11]\n [9 12]]]\n ```\n\n Args:\n a: A `Tensor`.\n perm: A permutation of the dimensions of `a`.\n name: A name for the operation (optional).\n\n Returns:\n A transposed `Tensor`.\n \"\"\"\n with ops.op_scope([a], name, \"transpose\") as name:\n if perm is None:\n rank = gen_array_ops.rank(a)\n perm = (rank - 1) - gen_math_ops._range(0, rank, 1)\n ret = gen_array_ops.transpose(a, perm, name=name)\n # NOTE(mrry): Setting the shape explicitly because\n # reverse is not handled by the shape function.\n input_shape = ret.op.inputs[0].get_shape().dims\n if input_shape is not None:\n ret.set_shape(input_shape[::-1])\n else:\n ret = gen_array_ops.transpose(a, perm, name=name)\n return ret\n\n\ndef zeros(shape, dtype=dtypes.float32, name=None):\n \"\"\"Creates a tensor with all elements set to zero.\n\n This operation returns a tensor of type `dtype` with shape `shape` and\n all elements set to zero.\n\n For example:\n\n ```python\n tf.zeros([3, 4], int32) ==> [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]\n ```\n\n Args:\n shape: Either a list of integers, or a 1-D `Tensor` of type `int32`.\n dtype: The type of an element in the resulting `Tensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` with all elements set to zero.\n \"\"\"\n with ops.op_scope([shape], name, \"zeros\") as name:\n if isinstance(shape, list):\n output = constant(0, shape=shape, dtype=dtype, name=name)\n else:\n shape = ops.convert_to_tensor(shape, name=\"shape\")\n output = fill(shape, constant(0, dtype=dtype), name=name)\n assert output.dtype.base_dtype == dtypes.as_dtype(dtype).base_dtype\n return output\n\n\ndef zeros_like(tensor, dtype=None, name=None):\n \"\"\"Creates a tensor with all elements set to zero.\n\n Given a single tensor (`tensor`), this operation returns a tensor of the\n same type and shape as `tensor` with all elements set to zero. Optionally,\n you can use `dtype` to specify a new type for the returned tensor.\n\n For example:\n\n ```python\n # 'tensor' is [[1, 2, 3], [4, 5, 6]]\n tf.zeros_like(tensor) ==> [[0, 0, 0], [0, 0, 0]]\n ```\n\n Args:\n tensor: A `Tensor`.\n dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,\n `int8`, `int16`, `int32`, `int64`, `uint8`, or `complex64`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` with all elements set to zero.\n \"\"\"\n with ops.op_scope([tensor], name, \"zeros_like\") as name:\n tensor = ops.convert_to_tensor(tensor, name=\"tensor\")\n if dtype is not None and tensor.dtype != dtype:\n ret = zeros(shape(tensor), dtype, name=name)\n ret.set_shape(tensor.get_shape())\n return ret\n else:\n return gen_array_ops._zeros_like(tensor, name=name)\n\n\ndef ones_like(tensor, dtype=None, name=None):\n \"\"\"Creates a tensor with all elements set to 1.\n\n Given a single tensor (`tensor`), this operation returns a tensor of the same\n type and shape as `tensor` with all elements set to 1. Optionally, you can\n specify a new type (`dtype`) for the returned tensor.\n\n For example:\n\n ```python\n # 'tensor' is [[1, 2, 3], [4, 5, 6]]\n tf.ones_like(tensor) ==> [[1, 1, 1], [1, 1, 1]]\n ```\n\n Args:\n tensor: A `Tensor`.\n dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,\n `int8`, `int16`, `int32`, `int64`, `uint8`, or `complex64`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` with all elements set to 1.\n \"\"\"\n with ops.op_scope([tensor], name, \"ones_like\") as name:\n tensor = ops.convert_to_tensor(tensor, name=\"tensor\")\n ones_shape = shape(tensor)\n if dtype is None:\n dtype = tensor.dtype\n ret = ones(ones_shape, dtype=dtype, name=name)\n ret.set_shape(tensor.get_shape())\n return ret\n\n\ndef ones(shape, dtype=dtypes.float32, name=None):\n \"\"\"Creates a tensor with all elements set to 1.\n\n This operation returns a tensor of type `dtype` with shape `shape` and all\n elements set to 1.\n\n For example:\n\n ```python\n tf.ones([2, 3], int32) ==> [[1, 1, 1], [1, 1, 1]]\n ```\n\n Args:\n shape: Either a list of integers, or a 1-D `Tensor` of type `int32`.\n dtype: The type of an element in the resulting `Tensor`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` with all elements set to 1.\n \"\"\"\n with ops.op_scope([shape], name, \"ones\") as name:\n if isinstance(shape, list):\n output = constant(1, shape=shape, dtype=dtype, name=name)\n else:\n shape = ops.convert_to_tensor(shape, name=\"shape\")\n output = fill(shape, constant(1, dtype=dtype), name=name)\n assert output.dtype.base_dtype == dtypes.as_dtype(dtype).base_dtype\n return output\n\n\ndef placeholder(dtype, shape=None, name=None):\n \"\"\"Inserts a placeholder for a tensor that will be always fed.\n\n **Important**: This tensor will produce an error if evaluated. Its value must\n be fed using the `feed_dict` optional argument to `Session.run()`,\n `Tensor.eval()`, or `Operation.run()`.\n\n For example:\n\n ```python\n x = tf.placeholder(tf.float32, shape=(1024, 1024))\n y = tf.matmul(x, x)\n\n with tf.Session() as sess:\n print(sess.run(y)) # ERROR: will fail because x was not fed.\n\n rand_array = np.random.rand(1024, 1024)\n print(sess.run(y, feed_dict={x: rand_array})) # Will succeed.\n ```\n\n Args:\n dtype: The type of elements in the tensor to be fed.\n shape: The shape of the tensor to be fed (optional). If the shape is not\n specified, you can feed a tensor of any shape.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` that may be used as a handle for feeding a value, but not\n evaluated directly.\n \"\"\"\n shape = tensor_shape.as_shape(shape)\n if shape.is_fully_defined():\n dim_list = shape.as_list()\n else:\n dim_list = []\n ret = gen_array_ops._placeholder(\n dtype=dtype,\n shape=dim_list,\n name=name)\n ret.set_shape(shape)\n return ret\n\n\ndef pad(tensor, paddings, mode=\"CONSTANT\", name=None): # pylint: disable=invalid-name\n \"\"\"Pads a tensor.\n\n This operation pads a `tensor` according to the `paddings` you specify.\n `paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of\n `tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how\n many values to add before the contents of `tensor` in that dimension, and\n `paddings[D, 1]` indicates how many values to add after the contents of\n `tensor` in that dimension. If `mode` is \"REFLECT\" then both `paddings[D, 0]`\n and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If\n `mode` is \"SYMMETRIC\" then both `paddings[D, 0]` and `paddings[D, 1]` must be\n no greater than `tensor.dim_size(D)`.\n\n The padded size of each dimension D of the output is:\n\n `paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`\n\n For example:\n\n ```python\n # 't' is [[1, 2, 3], [4, 5, 6]].\n # 'paddings' is [[1, 1,], [2, 2]].\n # rank of 't' is 2.\n pad(t, paddings, \"CONSTANT\") ==> [[0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 2, 3, 0, 0],\n [0, 0, 4, 5, 6, 0, 0],\n [0, 0, 0, 0, 0, 0, 0]]\n\n pad(t, paddings, \"REFLECT\") ==> [[6, 5, 4, 5, 6, 5, 4],\n [3, 2, 1, 2, 3, 2, 1],\n [6, 5, 4, 5, 6, 5, 4],\n [3, 2, 1, 2, 3, 2, 1]]\n\n pad(t, paddings, \"SYMMETRIC\") ==> [[2, 1, 1, 2, 3, 3, 2],\n [2, 1, 1, 2, 3, 3, 2],\n [5, 4, 4, 5, 6, 6, 5],\n [5, 4, 4, 5, 6, 6, 5]]\n ```\n\n Args:\n tensor: A `Tensor`.\n paddings: A `Tensor` of type `int32`.\n mode: One of \"CONSTANT\", \"REFLECT\", or \"SYMMETRIC\".\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `tensor`.\n\n Raises:\n ValueError: When mode is not one of \"CONSTANT\", \"REFLECT\", or \"SYMMETRIC\".\n \"\"\"\n\n if mode == \"CONSTANT\":\n return gen_array_ops._pad(tensor, paddings, name=name)\n if mode == \"REFLECT\":\n return gen_array_ops._mirror_pad(tensor,\n paddings,\n mode=\"REFLECT\",\n name=name)\n if mode == \"SYMMETRIC\":\n return gen_array_ops._mirror_pad(tensor,\n paddings,\n mode=\"SYMMETRIC\",\n name=name)\n raise ValueError(\"Unknown padding mode: %s\" % mode)\n\n\[email protected](\"Placeholder\")\ndef _PlaceholderShape(op):\n given_shape = tensor_util.TensorShapeProtoToList(op.get_attr(\"shape\"))\n if given_shape:\n return [tensor_shape.TensorShape(given_shape)]\n else:\n return [tensor_shape.unknown_shape()]\n\n\[email protected](\"CheckNumerics\")\[email protected](\"Identity\")\[email protected](\"RefIdentity\")\[email protected](\"StopGradient\")\ndef _UnchangedShape(op):\n return [op.inputs[0].get_shape()]\n\n\[email protected](\"Rank\")\[email protected](\"Size\")\ndef _ScalarShape(unused_op):\n return [tensor_shape.scalar()]\n\n\[email protected](\"Slice\")\ndef _SliceShape(op):\n \"\"\"Shape function for array_ops.slice.\"\"\"\n input_shape = op.inputs[0].get_shape()\n begin_shape = op.inputs[1].get_shape().with_rank(1)\n sizes_shape = op.inputs[2].get_shape().with_rank(1)\n ndims = begin_shape.merge_with(sizes_shape)[0].value\n if ndims is not None:\n input_shape.assert_has_rank(ndims)\n begin_value = tensor_util.constant_value(op.inputs[1])\n sizes_value = tensor_util.constant_value(op.inputs[2])\n if sizes_value is not None:\n returned_dims = []\n for i, slice_size in enumerate(sizes_value.ravel()):\n if slice_size != -1:\n returned_dims.append(slice_size)\n elif begin_value is not None:\n returned_dims.append(input_shape[i] - begin_value[i])\n else:\n returned_dims.append(None)\n return [tensor_shape.TensorShape(returned_dims)]\n else:\n if input_shape.ndims is not None:\n return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]\n elif ndims is not None:\n return [tensor_shape.unknown_shape(ndims=ndims)]\n else:\n return [tensor_shape.unknown_shape()]\n\n\[email protected](\"Gather\")\ndef _GatherShape(op):\n \"\"\"Shape function for array_ops.gather.\"\"\"\n params_shape = op.inputs[0].get_shape()\n indices_shape = op.inputs[1].get_shape()\n return [indices_shape.concatenate(params_shape[1:])]\n\n\[email protected](\"Unique\")\ndef _UniqueShape(op):\n \"\"\"Shape function for array_ops.Unique.\"\"\"\n # The output is a vector with data-dependent length.\n input_shape = op.inputs[0].get_shape()\n input_shape.assert_has_rank(1)\n return [tensor_shape.vector(None), input_shape]\n\n\[email protected](\"UniqueWithCounts\")\ndef _UniqueWithCountsShape(op):\n \"\"\"Shape function for array_ops.Unique.\"\"\"\n # The output is a vector with data-dependent length.\n input_shape = op.inputs[0].get_shape()\n input_shape.assert_has_rank(1)\n return [tensor_shape.vector(None), input_shape, tensor_shape.vector(None)]\n\n\[email protected](\"Diag\")\ndef _DiagShape(op):\n \"\"\"Shape function for array_ops.diag.\n\n This op has one input (of rank k <= 3), and one output (of rank 2k),\n where the shape of the output is the concatenation of the input\n shape with itself.\n\n Args:\n op: A Diag Operation.\n\n Returns:\n A single-element list containing the shape of the output.\n \"\"\"\n input_shape = op.inputs[0].get_shape().with_rank_at_most(3)\n return [input_shape.concatenate(input_shape)]\n\[email protected](\"DiagPart\")\ndef _DiagPartShape(op):\n \"\"\"Shape function for array_ops.diag_part.\n\n This op has one input (of rank k = 2, 4, or 6), and one output (of rank k/2),\n where the shape of the output is the diagonal of the input shape.\n\n Args:\n op: A DiagPart Operation.\n\n Returns:\n A single-element list containing the shape of the output.\n\n Raises:\n ValueError: If input has odd rank or greater than 6\n\n \"\"\"\n shape = op.inputs[0].get_shape()\n rank = len(shape)\n mid = rank // 2\n if rank % 2 or rank > 6:\n raise ValueError(\"Input must have even rank <= 6, input rank is \" +\n str(rank) + \".\" )\n if shape[:mid] != shape[mid:]:\n raise ValueError(\"Invalid shape, shape[:mid] \" + str(shape[:mid]) +\n \" and shape[mid:] \" + str(shape[mid:]) +\n \" do not match \")\n input_shape = shape.with_rank_at_most(6)\n return [input_shape[:len(input_shape) // 2]]\n\[email protected](\"ExpandDims\")\ndef _ExpandDimsShape(op):\n \"\"\"Determine shape for expand op's output tensor.\n\n Args:\n op: Operation for which to determine shape.\n op.inputs[0] is the input tensor.\n op.inputs[1] is the dimension in which to expand.\n Returns:\n Shape of op's output tensor.\n Raises:\n ValueError: If dim is outside of [-rank - 1, rank], where rank is the number\n of dimensions in the input tensor.\n \"\"\"\n input_shape = op.inputs[0].get_shape()\n if input_shape.dims is None:\n return [tensor_shape.unknown_shape()]\n dim = tensor_util.constant_value(op.inputs[1])\n input_ndims = input_shape.ndims\n if dim < -input_ndims - 1 or dim > input_ndims:\n raise ValueError(\n \"dim %d not in [%d, %d].\" % (dim, -input_ndims, input_ndims))\n if dim < 0:\n dim += (input_ndims + 1)\n result_shape = list(input_shape.dims)\n result_shape.insert(dim, 1)\n return [tensor_shape.TensorShape(result_shape)]\n\n\[email protected](\"Squeeze\")\ndef _SqueezeShape(op):\n \"\"\"Determine shape for squeeze op's output tensor.\n\n Args:\n op: Operation for which to determine shape.\n Returns:\n Shape of op's output tensor.\n Raises:\n ValueError: if squeeze_dims includes a dimension outside of [-rank, rank),\n where rank is the number of dimensions in the input tensor. Or, if\n squeeze_dims includes a dimension for which input shape has a value\n not equal to 1.\n \"\"\"\n input_shape = op.inputs[0].get_shape()\n if input_shape.dims is None:\n return [tensor_shape.unknown_shape()]\n\n squeeze_dims = op.get_attr(\"squeeze_dims\") or []\n wrapped_squeeze_dims = []\n input_ndims = input_shape.ndims\n for i, squeeze_dim in enumerate(squeeze_dims):\n if squeeze_dim < -input_ndims or squeeze_dim >= input_ndims:\n raise ValueError(\n \"squeeze_dims[%d]=%d not in [%d, %d).\" % (\n i, squeeze_dim, -input_ndims, input_ndims))\n if squeeze_dim < 0:\n squeeze_dim += input_ndims\n wrapped_squeeze_dims.append(squeeze_dim)\n\n result_shape = []\n for i, dim in enumerate([d.value for d in input_shape.dims]):\n is_explicit_match = i in wrapped_squeeze_dims\n if dim is None:\n if is_explicit_match:\n # Assume that the squeezed dimension will be 1 at runtime.\n continue\n if not wrapped_squeeze_dims:\n # If squeezing all 1 dimensions and we see a None, give up.\n return [tensor_shape.unknown_shape()]\n elif dim == 1:\n if is_explicit_match or not wrapped_squeeze_dims:\n continue\n elif is_explicit_match:\n raise ValueError(\n \"Can not squeeze dim[%d], expected a dimension of 1, got %d.\" % (\n i, dim))\n result_shape.append(dim)\n return [tensor_shape.TensorShape(result_shape)]\n\n\[email protected](\"Bitcast\")\ndef _BitcastShape(op):\n \"\"\"Shape function for Bitcast op.\"\"\"\n input_shape = op.inputs[0].get_shape()\n input_type = op.inputs[0].dtype\n size_of_input = input_type.size\n output = dtypes.as_dtype(op.get_attr(\"type\"))\n size_of_output = output.size\n if size_of_input == size_of_output:\n return [tensor_shape.TensorShape(input_shape)]\n else:\n if size_of_output > size_of_input:\n new_shape = input_shape.as_list()\n last_val = new_shape[-1]\n if last_val == (size_of_output // size_of_input):\n new_shape = new_shape[:-1]\n else:\n raise ValueError(\n \"Cannot bitcast due to shape. %d is not evenly divisible by %d.\" %\n (new_shape[-1], size_of_input // size_of_output))\n else:\n new_shape = input_shape\n new_shape = new_shape.concatenate([size_of_input // size_of_output])\n return [tensor_shape.TensorShape(new_shape)]\n\n\[email protected](\"Reshape\")\ndef _ReshapeShape(op):\n \"\"\"Shape function for Reshape op.\"\"\"\n input_shape = op.inputs[0].get_shape()\n if input_shape.ndims is not None:\n num_elements = tensor_shape.Dimension(1)\n for dim in input_shape.dims:\n num_elements *= dim\n else:\n num_elements = tensor_shape.Dimension(None)\n new_shape_shape = op.inputs[1].get_shape().with_rank(1)\n new_shape = tensor_util.constant_value(op.inputs[1])\n if new_shape is None:\n # Attempt to infer the rank of the output from the length of\n # new_shape.\n return [tensor_shape.unknown_shape(ndims=new_shape_shape[0].value)]\n new_shape = np.reshape(new_shape, -1).tolist()\n if -1 not in new_shape:\n # The new shape is fully defined.\n if (num_elements.value is not None\n and num_elements.value != np.prod(new_shape)):\n raise ValueError(\n \"Cannot reshape a tensor with %d elements to shape %s (%d elements)\"\n % (num_elements.value, new_shape, np.prod(new_shape)))\n return [tensor_shape.TensorShape(new_shape)]\n elif num_elements.value is not None:\n # We know the number of elements, so we can calculate the missing\n # dimension in the new_shape.\n known_elements = 1\n unknown_index = None\n for i, dim in enumerate(new_shape):\n if dim == -1:\n unknown_index = i\n else:\n known_elements *= dim\n if known_elements == 0:\n raise ValueError(\"cannot infer the missing input size for \"\n \"an empty tensor unless all specified \"\n \"input sizes are non-zero\")\n if num_elements % known_elements != 0:\n raise ValueError(\"input has %s elements, which isn't divisible by %d\" %\n (num_elements, known_elements))\n new_shape[unknown_index] = num_elements // known_elements\n return [tensor_shape.TensorShape(new_shape)]\n else:\n # We don't know the input shape, but we know n-1 of the dimensions\n # in the new shape.\n new_shape[new_shape.index(-1)] = None\n return [tensor_shape.TensorShape(new_shape)]\n\n\[email protected](\"BroadcastGradientArgs\")\ndef _BroadcastGradientArgsShape(op):\n \"\"\"Shape function for the BroadcastGradientArgs op.\"\"\"\n # TODO(mrry): Implement constant_value for BroadcastGradientArgs?\n op.inputs[0].get_shape().assert_has_rank(1)\n op.inputs[1].get_shape().assert_has_rank(1)\n return [tensor_shape.vector(None), tensor_shape.vector(None)]\n\n\[email protected](\"Fill\")\ndef _FillShape(op):\n \"\"\"Shape function for the Fill op.\n\n This op takes a vector of dimensions and a scalar, and produces a\n tensor with the given dimensions.\n\n Args:\n op: A Fill Operation.\n\n Returns:\n A single-element list containing the shape of the output.\n \"\"\"\n dimensions_shape = op.inputs[0].get_shape().with_rank(1)\n op.inputs[1].get_shape().assert_is_compatible_with(tensor_shape.scalar())\n fill_dims = tensor_util.constant_value(op.inputs[0])\n if fill_dims is None:\n # Attempt to infer the rank of the output from the length of\n # dimensions.\n return [tensor_shape.unknown_shape(ndims=dimensions_shape[0].value)]\n else:\n return [tensor_shape.TensorShape(fill_dims.tolist())]\n\n\[email protected](\"InvertPermutation\")\ndef _InvertPermutationShape(op):\n \"\"\"Shape function for the InvertPermutation op.\"\"\"\n return [op.inputs[0].get_shape().with_rank(1)]\n\n\[email protected](\"ListDiff\")\ndef _ListDiffShape(op):\n \"\"\"Shape function for the ListDiff op.\"\"\"\n op.inputs[0].get_shape().assert_has_rank(1)\n op.inputs[1].get_shape().assert_has_rank(1)\n # TODO(mrry): Indicate that the length falls within an interval?\n return [tensor_shape.vector(None)] * 2\n\n\[email protected](\"Pad\")\[email protected](\"MirrorPad\")\ndef _PadShape(op):\n \"\"\"Shape function for the Pad op.\n\n This op has two inputs:\n\n * input: A rank-N tensor.\n * paddings: An N-by-2 matrix, in which the i^th row contains the\n number of padding elements to add before and after `input` in the\n i^th dimension.\n\n It has one output, which has the same rank as input, and additional\n elements according to the values in paddings.\n\n Args:\n op: A Pad Operation.\n\n Returns:\n A single-element list containing the shape of the output.\n\n Raises:\n ValueError: If the input shapes are incompatible.\n \"\"\"\n paddings_shape = op.inputs[1].get_shape().with_rank(2)\n input_shape = op.inputs[0].get_shape()\n input_shape = input_shape.with_rank(paddings_shape[0].value)\n paddings_shape = paddings_shape.merge_with(\n tensor_shape.matrix(input_shape.ndims, 2))\n paddings = tensor_util.constant_value(op.inputs[1])\n if paddings is None:\n return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]\n else:\n output_dims = []\n for i, dim in enumerate(input_shape.dims):\n if paddings[i, 0] < 0 or paddings[i, 1] < 0:\n raise ValueError(\"paddings must be non-negative\")\n output_dims.append(dim + paddings[i, 0] + paddings[i, 1])\n return [tensor_shape.TensorShape(output_dims)]\n\n\[email protected](\"MirrorPadGrad\")\ndef _MirrorPadGradShape(op):\n \"\"\"Shape function for the MirrorPadGrad op.\"\"\"\n paddings_shape = op.inputs[1].get_shape().with_rank(2)\n input_shape = op.inputs[0].get_shape().with_rank(paddings_shape[0].value)\n paddings_shape = paddings_shape.merge_with(tensor_shape.matrix(\n input_shape.ndims, 2))\n paddings = tensor_util.constant_value(op.inputs[1])\n if paddings is None:\n return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]\n\n output_dims = []\n for i, dim in enumerate(input_shape.dims):\n if paddings[i, 0] < 0 or paddings[i, 1] < 0:\n raise ValueError(\"Paddings must be non-negative.\")\n if dim <= paddings[i, 0] + paddings[i, 1]:\n raise ValueError(\"Output dimension is not positive.\")\n output_dims.append(dim - paddings[i, 0] - paddings[i, 1])\n return [tensor_shape.TensorShape(output_dims)]\n\n\[email protected](\"ReverseSequence\")\ndef _ReverseSequenceShape(op):\n \"\"\"Shape function for the ReverseSequence op.\n\n This op has two inputs:\n\n * input: A rank-N tensor with size B in the 0th dimension.\n * seq_lens: A vector of length B.\n\n It has one output, with the same size as input.\n\n Args:\n op: A ReverseSequence Operation.\n\n Returns:\n A single-element list containing the shape of the output.\n\n Raises:\n ValueError: If the input shapes are incompatible or seq_dim == batch_dim.\n \"\"\"\n input_shape = op.inputs[0].get_shape()\n seq_lens_shape = op.inputs[1].get_shape().with_rank(1)\n seq_dim = op.get_attr(\"seq_dim\")\n batch_dim = op.get_attr(\"batch_dim\")\n if batch_dim >= input_shape.ndims:\n raise ValueError(\"batch_dim must be < input.dims() (%d vs %d)\" %\n (batch_dim, input_shape.ndims))\n if seq_dim >= input_shape.ndims:\n raise ValueError(\"seq_dim must be < input.dims() (%d vs %d)\" %\n (seq_dim, input_shape.ndims))\n batch_size = input_shape[batch_dim].merge_with(seq_lens_shape[0])\n input_shape = tensor_shape.TensorShape([\n value if ix != batch_dim else batch_size\n for ix, value in enumerate(input_shape)])\n return [input_shape]\n\n\[email protected](\"Shape\")\[email protected](\"ShapeN\")\ndef _ShapeNShape(op):\n \"\"\"Shape function for the Shape/ShapeN op.\"\"\"\n return [tensor_shape.vector(x.get_shape().ndims) for x in op.inputs]\n\n\[email protected](\"Transpose\")\ndef _TransposeShape(op):\n \"\"\"Shape function for the Transpose op.\n\n This op takes two inputs:\n\n * input: a rank-N tensor of arbitrary shape.\n * shuffle: a length-N vector.\n\n Its output is the rank-N tensor computed by permuting the dimensions\n of input according to shuffle.\n\n Args:\n op: A Transpose op.\n\n Returns:\n A single-element list containing the shape of the output.\n\n Raises:\n ValueError: If the shapes of input and shuffle are incompatible.\n IndexError: If shuffle contains an index that is >= the rank of input.\n \"\"\"\n input_shape = op.inputs[0].get_shape()\n transpose_shape = op.inputs[1].get_shape().merge_with(tensor_shape.vector(\n input_shape.ndims))\n transpose_vec = tensor_util.constant_value(op.inputs[1])\n if transpose_vec is None:\n return [tensor_shape.unknown_shape(ndims=transpose_shape[0].value)]\n else:\n return [tensor_shape.TensorShape([input_shape[i]\n for i in transpose_vec.tolist()])]\n\n\[email protected](\"Split\")\ndef _SplitShape(op):\n \"\"\"Shape function for the Split op.\"\"\"\n split_dim = tensor_util.constant_value(op.inputs[0])\n num_split = len(op.outputs)\n input_shape = op.inputs[1].get_shape()\n if split_dim is None:\n return [tensor_shape.unknown_shape(ndims=input_shape.ndims)] * num_split\n else:\n split_dim = int(split_dim)\n input_shape = input_shape.with_rank_at_least(split_dim + 1)\n if not (input_shape[split_dim] % num_split).is_compatible_with(0):\n raise ValueError(\n \"Number of ways to split should evenly divide the split \"\n \"dimension but got split_dim %d (size = %d) and num_split %d\" %\n (split_dim, input_shape[split_dim].value, num_split))\n prefix = input_shape[:split_dim]\n size_in_split_dim = input_shape[split_dim] // num_split\n suffix = input_shape[split_dim + 1:]\n output_shape = prefix.concatenate(size_in_split_dim).concatenate(suffix)\n return [output_shape] * num_split\n\n\[email protected](\"Tile\")\ndef _TileShape(op):\n \"\"\"Shape function for the Tile op.\n\n This op has two inputs:\n\n * input: A rank-N tensor.\n * multiples: A length-N vector, in which the i^th element contains\n the factor by which `input` will be tiled in the i^th dimension.\n\n It has one output, which has the same rank as input, and additional\n elements according to the values in multiples\n\n Args:\n op: A Tile Operation.\n\n Returns:\n A single-element list containing the shape of the output.\n \"\"\"\n multiples_shape = op.inputs[1].get_shape().with_rank(1)\n input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0].value)\n multiples = tensor_util.constant_value(op.inputs[1])\n if multiples is None:\n return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]\n else:\n output_dims = []\n multiples = multiples.ravel()\n for i, dim in enumerate(input_shape.dims):\n output_dims.append(dim * multiples[i])\n return [tensor_shape.TensorShape(output_dims)]\n\n\[email protected](\"TileGrad\")\ndef _TileGradShape(op):\n \"\"\"Shape function for the TileGrad op.\"\"\"\n multiples_shape = op.inputs[1].get_shape().with_rank(1)\n input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0])\n multiples = tensor_util.constant_value(op.inputs[1])\n if multiples is None:\n return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]\n else:\n output_dims = []\n for i, dim in enumerate(input_shape.dims):\n output_dims.append(dim // multiples[i])\n return [tensor_shape.TensorShape(output_dims)]\n\n\[email protected](\"Where\")\ndef _WhereShape(op):\n \"\"\"Shape function for the Where op.\"\"\"\n input_shape = op.inputs[0].get_shape()\n return [tensor_shape.matrix(None, input_shape.ndims)]\n\n\[email protected](\"ZerosLike\")\ndef _ZerosLikeShape(op):\n \"\"\"Shape function for the ZerosLike op.\"\"\"\n return [op.inputs[0].get_shape()]\n\n\ndef edit_distance(hypothesis, truth, normalize=True, name=\"edit_distance\"):\n \"\"\"Computes the Levenshtein distance between sequences.\n\n This operation takes variable-length sequences (`hypothesis` and `truth`),\n each provided as a `SparseTensor`, and computes the Levenshtein distance.\n You can normalize the edit distance by length of `truth` by setting\n `normalize` to true.\n\n For example, given the following input:\n\n ```python\n # 'hypothesis' is a tensor of shape `[2, 1]` with variable-length values:\n # (0,0) = [\"a\"]\n # (1,0) = [\"b\"]\n hypothesis = tf.SparseTensor(\n [[0, 0, 0],\n [1, 0, 0]],\n [\"a\", \"b\"]\n (2, 1, 1))\n\n # 'truth' is a tensor of shape `[2, 2]` with variable-length values:\n # (0,0) = []\n # (0,1) = [\"a\"]\n # (1,0) = [\"b\", \"c\"]\n # (1,1) = [\"a\"]\n truth = tf.SparseTensor(\n [[0, 1, 0],\n [1, 0, 0],\n [1, 0, 1],\n [1, 1, 0]]\n [\"a\", \"b\", \"c\", \"a\"],\n (2, 2, 2))\n\n normalize = True\n ```\n\n This operation would return the following:\n\n ```python\n # 'output' is a tensor of shape `[2, 2]` with edit distances normalized\n # by 'truth' lengths.\n output ==> [[inf, 1.0], # (0,0): no truth, (0,1): no hypothesis\n [0.5, 1.0]] # (1,0): addition, (1,1): no hypothesis\n ```\n\n Args:\n hypothesis: A `SparseTensor` containing hypothesis sequences.\n truth: A `SparseTensor` containing truth sequences.\n normalize: A `bool`. If `True`, normalizes the Levenshtein distance by\n length of `truth.`\n name: A name for the operation (optional).\n\n Returns:\n A dense `Tensor` with rank `R - 1`, where R is the rank of the\n `SparseTensor` inputs `hypothesis` and `truth`.\n\n Raises:\n TypeError: If either `hypothesis` or `truth` are not a `SparseTensor`.\n \"\"\"\n if not isinstance(hypothesis, ops.SparseTensor):\n raise TypeError(\"Hypothesis must be a SparseTensor\")\n if not isinstance(truth, ops.SparseTensor):\n raise TypeError(\"Truth must be a SparseTensor\")\n\n return gen_array_ops._edit_distance(hypothesis.indices,\n hypothesis.values,\n hypothesis.shape,\n truth.indices,\n truth.values,\n truth.shape,\n normalize=normalize,\n name=name)\n\n\[email protected](\"EditDistance\")\ndef _EditDistanceShape(op):\n \"\"\"Shape function for the EditDistance op.\"\"\"\n hypothesis_shape = tensor_util.constant_value(op.inputs[2])\n truth_shape = tensor_util.constant_value(op.inputs[5])\n if hypothesis_shape is not None and truth_shape is not None:\n if len(hypothesis_shape) != len(truth_shape):\n raise ValueError(\n \"Inconsistent ranks in hypothesis and truth. Saw shapes: %s and %s\" %\n (str(hypothesis_shape), str(truth_shape)))\n return [tensor_shape.TensorShape(\n [max(h, t) for h, t in zip(hypothesis_shape[:-1], truth_shape[:-1])])]\n\n return [tensor_shape.unknown_shape()]\n\n\n# The remaining ops do not change the shape of their inputs.\[email protected](\"Quantize\")\[email protected](\"Dequantize\")\ndef _QuantizeDequantizeShape(op):\n unused_min_range = op.inputs[1].get_shape().merge_with(tensor_shape.scalar())\n unused_max_range = op.inputs[2].get_shape().merge_with(tensor_shape.scalar())\n return common_shapes.unchanged_shape(op)\n\n\[email protected](\"SpaceToDepth\")\ndef _SpaceToDepthShape(op):\n \"\"\"Shape function for the SpaceToDepth op.\n\n This op takes two inputs:\n\n * input: a tensor of shape like that [B, H, W, D]\n * block_size: an int.\n\n Its output is the same-rank tensor but with changed\n dimensions like that: [B, H/block_size, W/block_size, D*block_size*block_size]\n\n Args:\n op: A SpaceToDepth op.\n\n Returns:\n A single-element list containing the shape of the output.\n\n Raises:\n ValueError: If the shapes of input are not as expected.\n IndexError: If block_size does not divide W or H.\n \"\"\"\n # Check that the input tensor is of 4 dimensions.\n try:\n input_shape = op.inputs[0].get_shape().with_rank(4)\n except ValueError:\n raise ValueError(\n \"tf.space_to_depth() requires tensors with exactly 4 dimensions.\")\n\n block_size = op.get_attr(\"block_size\")\n if block_size <= 1:\n raise ValueError(\"Attribute block_size has to be > 1.\")\n\n input_height = input_shape[1]\n input_width = input_shape[2]\n\n if (input_width % block_size > 0) or (input_height % block_size > 0):\n raise IndexError(\n \"block_size needs to divide both width and height.\")\n\n width = input_width // block_size\n height = input_height // block_size\n new_depth = input_shape[3] * block_size * block_size\n\n return [tensor_shape.TensorShape(\n [input_shape[0], height, width, new_depth])]\n\n\[email protected](\"DepthToSpace\")\ndef _DepthToSpaceShape(op):\n \"\"\"Shape function for the DepthToSpace op.\n\n This op takes two inputs:\n\n * input: a tensor of shape like that [B, H, W, D]\n * block_size: an int.\n\n Its output is the same-rank tensor but with changed\n dimensions like that:\n [B, H*block_size, W*block_size, D/(block_size*block_size)]\n\n Args:\n op: A DepthToSpace op.\n\n Returns:\n A single-element list containing the shape of the output.\n\n Raises:\n ValueError: If the shapes of input are not as expected.\n IndexError: If block_size*block_size does not divide D.\n \"\"\"\n # Check that the input tensor is of 4 dimensions.\n try:\n input_shape = op.inputs[0].get_shape().with_rank(4)\n except ValueError:\n raise ValueError(\n \"tf.depth_to_space() requires tensors with exactly 4 dimensions.\")\n\n block_size = op.get_attr(\"block_size\")\n if block_size <= 1:\n raise ValueError(\"Attribute block_size has to be > 1.\")\n\n input_height = input_shape[1]\n input_width = input_shape[2]\n input_depth = input_shape[3]\n\n width = input_width * block_size\n height = input_height * block_size\n\n if input_depth % (block_size * block_size) > 0:\n raise IndexError(\n \"block_size*block_size needs to divide the input depth.\")\n\n new_depth = input_depth // (block_size * block_size)\n return [tensor_shape.TensorShape(\n [input_shape[0], height, width, new_depth])]\n\n\[email protected](\"OneHot\")\ndef _OneHotShape(op):\n \"\"\"Shape function for the OneHot op.\n\n It closely follows the code in the .cc implementation.\n\n Args:\n op: A OneHot Operation.\n\n Returns:\n A single-element list containing the shape of the output.\n\n Raises:\n ValueError: if axis < -1.\n \"\"\"\n indices_shape = op.inputs[0].get_shape()\n indices_dims = indices_shape.ndims\n depth = tensor_util.constant_value(op.inputs[1])\n axis = op.get_attr(\"axis\")\n\n if axis < -1:\n raise ValueError(\"axis must be >= -1\")\n\n new_shape = None\n if indices_dims is not None:\n new_shape = indices_shape.as_list()\n new_shape.insert(axis % (indices_dims + 1), depth)\n\n return [tensor_shape.TensorShape(new_shape)]\n\n\[email protected](\"PlaceholderWithDefault\")\ndef _PlaceholderWithDefaultShape(op):\n \"\"\"Shape function for the PlaceholderWithDefault op.\n\n This op acts as an identity when it is not fed (passing through a\n default value), but allows the user to feed it with tensors of a\n possibly less precise shape than its default value.\n\n Args:\n op: A PlaceholderWithDefault `Operation`.\n\n Returns:\n A single-element list containing the shape of the output.\n \"\"\"\n input_shape = op.inputs[0].get_shape()\n output_shape = tensor_shape.TensorShape(op.get_attr(\"shape\"))\n # NOTE(mrry): We don't merge these shapes, because `output_shape`\n # may be *less* precise than `input_shape`.\n input_shape.assert_is_compatible_with(output_shape)\n return [output_shape]\n" ]
[ [ "tensorflow.python.framework.tensor_shape.unknown_shape", "tensorflow.python.ops.constant_op.constant", "tensorflow.python.ops.gen_array_ops._pad", "tensorflow.python.ops.gen_math_ops._range", "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.ops.gen_array_ops.rank", "tensorflow.python.ops.gen_array_ops._pack", "tensorflow.python.framework.ops.IndexedSlices", "tensorflow.python.framework.tensor_shape.scalar", "numpy.reshape", "tensorflow.python.framework.ops.Tensor._override_operator", "tensorflow.python.framework.tensor_shape.as_shape", "tensorflow.python.framework.dtypes.as_dtype", "tensorflow.python.framework.tensor_util.constant_value", "tensorflow.python.framework.ops.RegisterShape", "tensorflow.python.ops.gen_array_ops._slice", "tensorflow.python.framework.tensor_shape.Dimension", "tensorflow.python.ops.gen_array_ops.transpose", "tensorflow.python.ops.gen_array_ops._mirror_pad", "numpy.prod", "tensorflow.python.framework.ops.op_scope", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.framework.tensor_shape.matrix", "tensorflow.python.ops.gen_array_ops._concat", "tensorflow.python.ops.gen_array_ops._placeholder", "tensorflow.python.ops.gen_array_ops._edit_distance", "tensorflow.python.ops.common_shapes.unchanged_shape", "tensorflow.python.framework.tensor_shape.vector", "tensorflow.python.ops.gen_array_ops._zeros_like", "tensorflow.python.ops.gen_array_ops._unpack", "tensorflow.python.ops.gen_array_ops._split" ] ]
breaks-software/OSCAAR
[ "254acfccbd907b89485b9d78cff2681892a40309" ]
[ "oscaar/photometry.py" ]
[ "'''oscaar v2.0 \n Module for differential photometry\n Developed by Brett Morris, 2011-2013'''\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport matplotlib.cm as cm\n\ndef phot(image, xCentroid, yCentroid, apertureRadius, plottingThings, annulusOuterRadiusFactor=2.8, annulusInnerRadiusFactor=1.40, ccdGain=1, plots=False):\n '''\n Method for aperture photometry. \n \n Parameters\n ----------\n image : numpy.ndarray\n FITS image opened with PyFITS\n \n xCentroid : float\n Stellar centroid along the x-axis (determined by trackSmooth or equivalent)\n \n yCentroid : float\n Stellar centroid along the y-axis (determined by trackSmooth or equivalent)\n \n apertureRadius : float\n Radius in pixels from centroid to use for source aperture\n \n annulusInnerRadiusFactor : float\n Measure the background for sky background subtraction fron an annulus from a factor of \n `annulusInnerRadiusFactor` bigger than the `apertureRadius` to one a factor `annulusOuterRadiusFactor` bigger.\n \n annulusOuterRadiusFactor : float\n Measure the background for sky background subtraction fron an annulus a factor of \n `annulusInnerRadiusFactor` bigger than the `apertureRadius` to one a factor `annulusOuterRadiusFactor` bigger.\n \n ccdGain : float\n Gain of your detector, used to calculate the photon noise\n \n plots : bool\n If `plots`=True, display plots showing the aperture radius and \n annulus radii overplotted on the image of the star\n \n Returns\n -------\n rawFlux : float\n The background-subtracted flux measured within the aperture\n \n rawError : float\n The photon noise (limiting statistical) Poisson uncertainty on the measurement of `rawFlux`\n \n errorFlag : bool\n Boolean corresponding to whether or not any error occured when running oscaar.phot(). If an error occured, the flag is\n True; otherwise False.\n \n Core developer: Brett Morris (NASA-GSFC)\n '''\n if plots:\n [fig,subplotsDimensions,photSubplotsOffset] = plottingThings\n if photSubplotsOffset == 0: plt.clf()\n annulusRadiusInner = annulusInnerRadiusFactor*apertureRadius \n annulusRadiusOuter = annulusOuterRadiusFactor*apertureRadius\n\n ## From the full image, cut out just the bit around the star that we're interested in\n imageCrop = image[xCentroid-annulusRadiusOuter+1:xCentroid+annulusRadiusOuter+2,yCentroid-annulusRadiusOuter+1:yCentroid+annulusRadiusOuter+2]\n [dimy,dimx] = imageCrop.shape\n XX, YY = np.meshgrid(np.arange(dimx),np.arange(dimy)) \n x = (XX - annulusRadiusOuter)**2\n y = (YY - annulusRadiusOuter)**2\n ## Assemble arrays marking the pixels marked as either source or background pixels\n sourceIndices = x + y <= apertureRadius**2\n skyIndices = (x + y <= annulusRadiusOuter**2)*(x + y >= annulusRadiusInner**2)\n \n rawFlux = np.sum(imageCrop[sourceIndices] - np.median(imageCrop[skyIndices]))*ccdGain\n rawError = np.sqrt(np.sum(imageCrop[sourceIndices]*ccdGain) + np.median(ccdGain*imageCrop[skyIndices])) ## Poisson-uncertainty\n\n if plots:\n def format_coord(x, y):\n ''' Function to also give data value on mouse over with imshow. '''\n col = int(x+0.5)\n row = int(y+0.5)\n try:\n return 'x=%i, y=%i, Flux=%1.1f' % (x, y, imageCrop[row,col])\n except:\n return 'x=%i, y=%i' % (x, y)\n \n med = np.median(imageCrop)\n dsig = np.std(imageCrop)\n \n ax = fig.add_subplot(subplotsDimensions+photSubplotsOffset+1)\n ax.imshow(imageCrop, cmap=cm.gray, interpolation=\"nearest\",vmin = med-0.5*dsig, vmax =med+2*dsig)\n \n theta = np.arange(0,360)*(np.pi/180)\n rcos = lambda r, theta: annulusRadiusOuter + r*np.cos(theta)\n rsin = lambda r, theta: annulusRadiusOuter + r*np.sin(theta)\n ax.plot(rcos(apertureRadius,theta),rsin(apertureRadius,theta),'m',linewidth=4)\n ax.plot(rcos(annulusRadiusInner,theta),rsin(annulusRadiusInner,theta),'r',linewidth=4)\n ax.plot(rcos(annulusRadiusOuter,theta),rsin(annulusRadiusOuter,theta),'r',linewidth=4)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_title('Aperture')\n ax.set_xlim([-.5,dimx-.5])\n ax.set_ylim([-.5,dimy-.5])\n ax.format_coord = format_coord \n plt.draw()\n return [rawFlux, rawError, False]\n \ndef multirad(image, xCentroid, yCentroid, apertureRadii, plottingThings, annulusOuterRadiusFactor=2.8, annulusInnerRadiusFactor=1.40, ccdGain=1, plots=False):\n '''\n Method for aperture photometry. \n \n Parameters\n ----------\n image : numpy.ndarray\n FITS image opened with PyFITS\n \n xCentroid : float\n Stellar centroid along the x-axis (determined by trackSmooth or equivalent)\n \n yCentroid : float\n Stellar centroid along the y-axis (determined by trackSmooth or equivalent)\n \n apertureRadii : list\n List of aperture radii (floats) to feed to phot().\n \n annulusInnerRadiusFactor : float\n Measure the background for sky background subtraction fron an annulus from a factor of \n `annulusInnerRadiusFactor` bigger than the `apertureRadius` to one a factor `annulusOuterRadiusFactor` bigger.\n \n annulusOuterRadiusFactor : float\n Measure the background for sky background subtraction fron an annulus a factor of \n `annulusInnerRadiusFactor` bigger than the `apertureRadius` to one a factor `annulusOuterRadiusFactor` bigger.\n \n ccdGain : float\n Gain of your detector, used to calculate the photon noise\n \n plots : bool\n If `plots`=True, display plots showing the aperture radius and \n annulus radii overplotted on the image of the star\n \n Returns\n -------\n rawFlux : float\n The background-subtracted flux measured within the aperture\n \n rawError : float\n The photon noise (limiting statistical) Poisson uncertainty on the measurement of `rawFlux`\n \n errorFlag : bool\n Boolean corresponding to whether or not any error occured when running oscaar.phot(). If an error occured, the flag is\n True; otherwise False.\n \n Core developer: Brett Morris (NASA-GSFC)\n '''\n\n #[apertureRadiusMin, apertureRadiusMax, apertureRadiusStep] = apertureRadiusSettings\n #apertureRadii = np.arange(apertureRadiusMin, apertureRadiusMax, apertureRadiusStep)\n\n fluxes = []\n errors = []\n photFlags = []\n for apertureRadius in apertureRadii:\n flux, error, photFlag = phot(image, xCentroid, yCentroid, apertureRadius, plottingThings, annulusOuterRadiusFactor=annulusOuterRadiusFactor, annulusInnerRadiusFactor=annulusInnerRadiusFactor, ccdGain=ccdGain, plots=False)\n fluxes.append(flux)\n errors.append(error)\n photFlags.append(photFlag)\n annulusRadiusOuter = annulusOuterRadiusFactor*np.max(apertureRadii)\n imageCrop = image[xCentroid-annulusRadiusOuter+1:xCentroid+annulusRadiusOuter+2,yCentroid-annulusRadiusOuter+1:yCentroid+annulusRadiusOuter+2]\n [dimy,dimx] = imageCrop.shape\n\n if plots:\n [fig,subplotsDimensions,photSubplotsOffset] = plottingThings\n if photSubplotsOffset == 0: plt.clf()\n def format_coord(x, y):\n ''' Function to also give data value on mouse over with imshow. '''\n col = int(x+0.5)\n row = int(y+0.5)\n try:\n return 'x=%i, y=%i, Flux=%1.1f' % (x, y, imageCrop[row,col])\n except:\n return 'x=%i, y=%i' % (x, y)\n \n med = np.median(imageCrop)\n dsig = np.std(imageCrop)\n \n ax = fig.add_subplot(subplotsDimensions+photSubplotsOffset+1)\n ax.imshow(imageCrop, cmap=cm.gray, interpolation=\"nearest\",vmin = med-0.5*dsig, vmax =med+2*dsig)\n \n theta = np.arange(0,360)*(np.pi/180)\n rcos = lambda r, theta: annulusRadiusOuter + r*np.cos(theta)\n rsin = lambda r, theta: annulusRadiusOuter + r*np.sin(theta)\n for apertureRadius in apertureRadii:\n ax.plot(rcos(apertureRadius,theta),rsin(apertureRadius,theta),linewidth=4)\n #ax.plot(rcos(annulusRadiusInner,theta),rsin(annulusRadiusInner,theta),'r',linewidth=4)\n #ax.plot(rcos(annulusRadiusOuter,theta),rsin(annulusRadiusOuter,theta),'r',linewidth=4)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_title('Aperture')\n ax.set_xlim([-.5,dimx-.5])\n ax.set_ylim([-.5,dimy-.5])\n ax.format_coord = format_coord \n plt.draw() \n return fluxes, errors, photFlags\n" ]
[ [ "numpy.sum", "matplotlib.pyplot.draw", "numpy.sin", "numpy.median", "matplotlib.pyplot.clf", "numpy.cos", "numpy.arange", "numpy.max", "numpy.std" ] ]
dreamflasher/pybo-python3
[ "803873df98640c94580ccd93b6661197acf8fc5e" ]
[ "pybo/utils.py" ]
[ "\"\"\"\nVarious utility functions.\n\"\"\"\n\n\n\n\n\nimport numpy as np\nimport re\nimport subprocess\n\n__all__ = ['rstate', 'SubprocessQuery', 'InteractiveQuery']\n\n\ndef rstate(rng=None):\n \"\"\"\n Return a RandomState object. This is just a simple wrapper such that if rng\n is already an instance of RandomState it will be passed through, otherwise\n it will create a RandomState object using rng as its seed.\n \"\"\"\n if not isinstance(rng, np.random.RandomState):\n rng = np.random.RandomState(rng)\n return rng\n\n\nclass SubprocessQuery(object):\n \"\"\"\n Class for black-boxes that should be run from the shell. Simply pass the\n shell command with variables replaced with `{}` with python string\n formatting specs inside, then call the object with inputs to replace the `{}`\n in the same order as in the provided string.\n \"\"\"\n def __init__(self, command):\n self.command = command\n\n def __call__(self, x):\n out = subprocess.check_output(self.command.format(*x), shell=True)\n out = out.splitlines()[-1] # keep last line\n out = re.compile(r'\\x1b[^m]*m').sub('', out) # strip color codes\n out = out.split('=')[-1] # strip left hand side\n return np.float(out)\n\n\nclass InteractiveQuery(object):\n \"\"\"\n Wrapper for queries which interactively query the user.\n \"\"\"\n def __init__(self, prompt='Enter value at design x = {}\\ny = '):\n self.prompt = prompt\n\n def __call__(self, x):\n y = eval(input(self.prompt.format(x)))\n if not isinstance(y, (np.int, np.long, np.float)):\n # FIXME: this should probably just re-query the user rather than\n # raising an exception.\n raise ValueError('output must be a number')\n return y\n" ]
[ [ "numpy.random.RandomState", "numpy.float" ] ]
o-Ian/X-Minerei
[ "d9f86d802787381afa4e955d7532cf9788b94962" ]
[ "Catch.Manipulation_data.py" ]
[ "import requests\nimport csv\nimport json\nimport pandas as pd\nimport os\nimport numpy as np\nimport datetime\nimport os\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium import webdriver\n\n\ndef baixar_arquivo(url, nome_arquivo):\n resultado = requests.get(url, headers={'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) '\n 'AppleWebKit/537.36 (HTML, like Gecko) '\n 'Chrome/39.0.2171.95 Safari/537.36'})\n with open(nome_arquivo, 'wb') as novo_arquivo:\n novo_arquivo.write(resultado.content)\n novo_arquivo = pd.read_csv(nome_arquivo, usecols=['Date(UTC)', 'Value'])\n novo_arquivo['Date(UTC)'] = novo_arquivo['Date(UTC)'].astype('datetime64')\n novo_arquivo['Value'] = novo_arquivo['Value'].astype('float64')\n novo_arquivo.to_csv(f'{nome_arquivo}')\n return novo_arquivo\n\n\ndef conversorjsontocsv(nome_arquivojson):\n with open(nome_arquivojson) as file:\n data = json.load(file)\n fname = 'Mineration_DATA.ETH/IPCA.csv'\n with open(fname, 'wt') as file:\n csv_file = csv.writer(file, lineterminator='\\n')\n csv_file.writerow(['Year', '%IPCA'])\n for item in data:\n csv_file.writerow([item['p'].replace('dezembro', ''), item['v']])\n os.remove(nome_arquivojson)\n file = pd.read_csv(fname)\n file['%IPCA'] = file['%IPCA']/100\n file.to_csv('Mineration_DATA.ETH/IPCA.csv')\n\n return file\n\n\ndef baixar_arquivo2(url, nome_arquivo):\n resultado = requests.get(url, headers={'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) '\n 'AppleWebKit/537.36 (HTML, like Gecko) '\n 'Chrome/39.0.2171.95 Safari/537.36'})\n with open(nome_arquivo, 'wb') as novo_arquivo:\n novo_arquivo.write(resultado.content)\n return novo_arquivo\n\n\ndef calculateProfit(hashrate_proprio, difficulty, blockreward_dia, fees=1):\n return (((hashrate_proprio * 1000000) * (1 - (fees / 100))) / (difficulty * 1000000000000)) * blockreward_dia*3600\n\n\n# Downloading and renaming files\nETHPerDay = baixar_arquivo('https://etherscan.io/chart/blockreward?output=csv',\n 'Mineration_DATA.ETH/ETHPerDay.csv')\nETHPerDay = ETHPerDay.rename(columns={'Value': 'ETHPerDay'})\n\nNetworkDifficulty = baixar_arquivo('https://etherscan.io/chart/difficulty?output=csv',\n 'Mineration_DATA.ETH/NetworkDifficulty_TH_s.csv')\nNetworkDifficulty = NetworkDifficulty.rename(columns={'Value': 'Difficulty[TH/s]'})\n\nBlockCountDay = baixar_arquivo('https://etherscan.io/chart/blocks?output=csv',\n 'Mineration_DATA.ETH/BlockCount_Day.csv')\nBlockCountDay = BlockCountDay.rename(columns={'Value': 'Total_Blocks'})\n\nETHPriceUSD = baixar_arquivo('https://etherscan.io/chart/etherprice?output=csv',\n 'Mineration_DATA.ETH/ETHPrice_USD.csv')\nETHPriceUSD = ETHPriceUSD.rename(columns={'Value': 'ETHPrice_USD'})\n\nbaixar_arquivo2('https://servicodados.ibge.gov.br/api/v1/conjunturais?&d=s&user=ibge&t=1737&v=69&p=199512,'\n '199612,199712,199812,199912,200012,200112,200212,200312,200412,200512,200612,200712,200812,'\n '200912,201012, 201112,201212,201312,201412,201512,201612,201712,201812,201912,202012,202112,'\n '202212,202312,202412,202512,202612,202712,202812,202912,203012&ng=1(1)&c=',\n 'Mineration_DATA.ETH/IPCA.json')\nIPCA = conversorjsontocsv('Mineration_DATA.ETH/IPCA.json')\n\nDate = ETHPerDay['Date(UTC)']\n\n# Creating dataset that group all filesa\nAllData = pd.DataFrame(ETHPerDay)\n\nAllData['NetworkDifficulty[TH/s]'] = NetworkDifficulty['Difficulty[TH/s]']\n\nAllData['ETHPriceUSD'] = ETHPriceUSD['ETHPrice_USD']\n\nAllData['TotalBlocks'] = BlockCountDay['Total_Blocks']\n\n# Input data from user\nHashUsuario = float(input('Qual o seu hashrate [Mh/s]?: '))\nPower = int(input('Qual a potência [W]?: '))\nSuffixMult = 0.001\nPowerCoast = float(input('Qual o tarida de energia [USD]?: '))\nRS_GPUPrice = float(input('Qual o preço da placa de vídeo?: '))\n\n# Recalculating PowerCoast\nchoicelist = []\nc = datetime.date.today().year - 1\ncont = len(IPCA)-1\nAllData['PowerCoast'] = PowerCoast\nfor i in range(len(IPCA)):\n if c >= 2015:\n valor = float(IPCA.loc[i + cont, '%IPCA'])\n if c == 2020:\n novo_valor = 1 - valor\n choicelist.append(novo_valor)\n else:\n novo_valor = novo_valor - valor\n choicelist.append(novo_valor)\n cont -= 2\n c -= 1\n\n# Conditional structure for readjustment\ncondicionlist = [AllData['Date(UTC)'].dt.year == 2020,\n AllData['Date(UTC)'].dt.year == 2019,\n AllData['Date(UTC)'].dt.year == 2018,\n AllData['Date(UTC)'].dt.year == 2017,\n AllData['Date(UTC)'].dt.year == 2016,\n AllData['Date(UTC)'].dt.year == 2015]\n\nAllData['Inflação'] = np.select(condicionlist, choicelist, default=1)\nAllData['PowerCoast'] = PowerCoast * AllData['Inflação']\n\n# Putting together AllData and GPUPrice\nAllData['Date(UTC)'] = AllData['Date(UTC)'].astype('datetime64')\n\nGPUPrice = pd.read_csv('Mineration_DATA.ETH/GPUPrice.csv', index_col=0)\nGPUPrice['Date(UTC)'] = GPUPrice['Date(UTC)'].astype('datetime64')\ndel GPUPrice['R$_GPUPrice']\n\n# Activate/Deactivate when the relations between the variables must be done\nAllData = pd.merge(AllData, GPUPrice, on='Date(UTC)', how='outer')\n\n# Catching last date from AllData dataframe\nlast_date = AllData.loc[len(AllData)-1]\nlast_date = last_date['Date(UTC)']\nlast_date = str(last_date)\n\nlast_date = datetime.datetime.strptime(last_date[:10], '%Y-%m-%d').date()\nlast_date = last_date.strftime('%m-%d-%Y')\n\n# Downloading dollarPrice.csv\nbaixar_arquivo2(f\"https://olinda.bcb.gov.br/olinda/servico/PTAX/versao/v1/odata/CotacaoDolarPeriodo(dataInicial=@dataInicial,dataFinalCotacao=@dataFinalCotacao)?@dataInicial='07-30-2015'&@dataFinalCotacao='{last_date}'&$top=999999999&$format=text/csv\", r'Mineration_DATA.ETH/DollarPrice.csv')\ndollarPrice2 = pd.read_csv('Mineration_DATA.ETH/DollarPrice.csv')\ndollarPrice = pd.DataFrame(columns=['R$_DollarPrice', 'Date(UTC)'])\n\n# Converting dollarPrice columns\ndollarPrice['R$_DollarPrice'] = dollarPrice2['cotacaoCompra'].astype('string')\ndollarPrice['R$_DollarPrice'] = dollarPrice['R$_DollarPrice'].str.replace(',', '.').astype('float64')\ndollarPrice['Date(UTC)'] = dollarPrice2['dataHoraCotacao'].astype('string')\n\n# Taking out hours from Date(UTC) column\ndates = dollarPrice['Date(UTC)']\nlist_data = []\nfor date in dates:\n list_data.append(date[:10])\n\ndollarPrice['Date(UTC)'] = list_data\ndollarPrice['Date(UTC)'] = pd.to_datetime(dollarPrice['Date(UTC)'])\n\n# Putting dollarPrice with AllData dataframe\nAllData = pd.merge(AllData, dollarPrice, on='Date(UTC)', how='outer')\n\n# Replacing NaN values to last value from dollarPrice column\nAllData['R$_DollarPrice'].fillna(method='ffill', inplace=True)\n\n# Putting Price and Date(UTC) column from AllData on dollarPrice dataframe\ndollarPrice['R$_DollarPrice'] = AllData['R$_DollarPrice']\ndollarPrice['Date(UTC)'] = AllData['Date(UTC)']\nAllData['ETHPriceBRL'] = AllData['ETHPriceUSD'] * AllData['R$_DollarPrice']\n\n# Putting GPU Price from user on csv file\nAllData._set_value(len(AllData)-1, 'R$_GPUPrice', RS_GPUPrice)\n\n# Calculating profit\nAllData['ETH/dia'] = (calculateProfit(HashUsuario, AllData['NetworkDifficulty[TH/s]'], (AllData['ETHPerDay']/AllData['TotalBlocks']))) * 24\nAllData['USD_Revenue'] = AllData['ETHPriceBRL'] * AllData['ETH/dia']\nAllData['USD_Coast'] = Power * SuffixMult * AllData['PowerCoast'] * 24\nAllData['USD_Profit/day'] = AllData['USD_Revenue'] - AllData['USD_Coast']\nAllData['USD_Profit/month'] = AllData['USD_Profit/day'] * 30\n\nAllData['Indicador'] = AllData['USD_Coast']/AllData['USD_Revenue']\n\n# Creating new column (relation between Network Difficulty with the last Network Difficulty)\nLast_Difficulty = AllData['NetworkDifficulty[TH/s]'].iloc[-1]\nAllData['Multiple_Difficulty/LastDifficulty'] = AllData['NetworkDifficulty[TH/s]']/Last_Difficulty\n\n# Making the prevision of GPU Price\nAllData['R$_GPUPrice'] = RS_GPUPrice * AllData['Multiple_GPUPrice']\n\nfill_values = {'R$_GPUPrice': RS_GPUPrice * AllData['Multiple_Difficulty/LastDifficulty']}\nAllData.fillna(fill_values, inplace=True)\n\nAllData['R$_GPUPrice'] = RS_GPUPrice * AllData['Multiple_Difficulty/LastDifficulty']\n\n# Conditional structure to use inflation as a multiplicator when date is equal or less than 2017-09-12\ncondicionlist = [AllData['Date(UTC)'] <= '2017-09-12'\n ]\nchoicelist = [AllData['R$_GPUPrice'].loc[775] * AllData['Inflação']]\n\nAllData['R$_GPUPrice'] = np.select(condicionlist, choicelist, default=AllData['R$_GPUPrice'])\n\n# GPU Price conversion (real to dollar)\nAllData['USD_GPUPrice'] = AllData['R$_GPUPrice'] / AllData['R$_DollarPrice']\n\n# Column that calcule how much months do you need to pay your investment\nAllData['Pays_itself/months'] = AllData['R$_GPUPrice']/AllData['USD_Profit/month']\n\nAllData['Date'] = AllData['Date(UTC)']\n# Last step\ndollarPrice.to_csv('Mineration_DATA.ETH/DollarPrice.csv')\nAllData.to_csv('Mineration_DATA.ETH/AllData.csv')\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame", "pandas.to_datetime", "pandas.merge", "numpy.select" ] ]
shangz-ai/gluon-nlp
[ "75b3c121ac02c1bdef25a785fda2238e256246f9" ]
[ "scripts/word_embeddings/evaluation.py" ]
[ "# coding: utf-8\n\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Evaluation\n=============\n\nFunctions to perform evaluation of TokenEmbeddings on the datasets included in\nthe GluonNLP toolkit.\n\n\"\"\"\nimport itertools\nimport sys\nimport logging\nimport json\n\nimport mxnet as mx\nimport numpy as np\nfrom scipy import stats\n\nimport gluonnlp as nlp\n\n\ndef add_parameters(parser):\n \"\"\"Add evaluation specific parameters to parser.\"\"\"\n group = parser.add_argument_group('Evaluation arguments')\n\n group.add_argument('--eval-batch-size', type=int, default=512)\n\n # Datasets\n group.add_argument(\n '--similarity-datasets', type=str,\n default=nlp.data.word_embedding_evaluation.word_similarity_datasets,\n nargs='*',\n help='Word similarity datasets to use for intrinsic evaluation.')\n group.add_argument(\n '--similarity-functions', type=str,\n default=nlp.embedding.evaluation.list_evaluation_functions(\n 'similarity'), nargs='+',\n help='Word similarity functions to use for intrinsic evaluation.')\n group.add_argument(\n '--analogy-datasets', type=str, default=['GoogleAnalogyTestSet'],\n nargs='*',\n help='Word similarity datasets to use for intrinsic evaluation.')\n group.add_argument(\n '--analogy-functions', type=str,\n default=nlp.embedding.evaluation.list_evaluation_functions('analogy'),\n nargs='+',\n help='Word analogy functions to use for intrinsic evaluation. ')\n\n ## Analogy evaluation specific arguments\n group.add_argument(\n '--analogy-dont-exclude-question-words', action='store_true',\n help=('Exclude input words from valid output analogies.'\n 'The performance of word embeddings on the analogy task '\n 'is around 0% accuracy if input words are not excluded.'))\n\n\ndef validate_args(args):\n \"\"\"Validate provided arguments and act on --help.\"\"\"\n # Check correctness of similarity dataset names\n for dataset_name in args.similarity_datasets:\n if dataset_name and dataset_name.lower() not in map(\n str.lower,\n nlp.data.word_embedding_evaluation.word_similarity_datasets):\n print('{} is not a supported dataset.'.format(dataset_name))\n sys.exit(1)\n\n # Check correctness of analogy dataset names\n for dataset_name in args.analogy_datasets:\n if dataset_name and dataset_name.lower() not in map(\n str.lower,\n nlp.data.word_embedding_evaluation.word_analogy_datasets):\n print('{} is not a supported dataset.'.format(dataset_name))\n sys.exit(1)\n\n\ndef iterate_similarity_datasets(args):\n \"\"\"Generator over all similarity evaluation datasets.\n\n Iterates over dataset names, keyword arguments for their creation and the\n created dataset.\n\n \"\"\"\n for dataset_name in args.similarity_datasets:\n if not dataset_name:\n continue\n parameters = nlp.data.list_datasets(dataset_name)\n for key_values in itertools.product(*parameters.values()):\n kwargs = dict(zip(parameters.keys(), key_values))\n yield dataset_name, kwargs, nlp.data.create(dataset_name, **kwargs)\n\n\ndef iterate_analogy_datasets(args):\n \"\"\"Generator over all analogy evaluation datasets.\n\n Iterates over dataset names, keyword arguments for their creation and the\n created dataset.\n\n \"\"\"\n for dataset_name in args.analogy_datasets:\n if not dataset_name:\n continue\n parameters = nlp.data.list_datasets(dataset_name)\n for key_values in itertools.product(*parameters.values()):\n kwargs = dict(zip(parameters.keys(), key_values))\n yield dataset_name, kwargs, nlp.data.create(dataset_name, **kwargs)\n\n\ndef get_similarity_task_tokens(args):\n \"\"\"Returns a set of all tokens occurring the evaluation datasets.\"\"\"\n tokens = set()\n for _, _, dataset in iterate_similarity_datasets(args):\n tokens.update(\n itertools.chain.from_iterable((d[0], d[1]) for d in dataset))\n return tokens\n\n\ndef get_analogy_task_tokens(args):\n \"\"\"Returns a set of all tokens occuring the evaluation datasets.\"\"\"\n tokens = set()\n for _, _, dataset in iterate_analogy_datasets(args):\n tokens.update(\n itertools.chain.from_iterable(\n (d[0], d[1], d[2], d[3]) for d in dataset))\n return tokens\n\n\ndef get_tokens_in_evaluation_datasets(args):\n tokens = get_similarity_task_tokens(args)\n tokens.update(get_analogy_task_tokens(args))\n return tokens\n\n\ndef evaluate_similarity(args, token_embedding, ctx, logfile=None,\n global_step=0):\n \"\"\"Evaluate on specified similarity datasets.\"\"\"\n\n results = []\n for similarity_function in args.similarity_functions:\n evaluator = nlp.embedding.evaluation.WordEmbeddingSimilarity(\n idx_to_vec=token_embedding.idx_to_vec,\n similarity_function=similarity_function)\n evaluator.initialize(ctx=ctx)\n if not args.no_hybridize:\n evaluator.hybridize()\n\n # Evaluate all datasets\n for (dataset_name, dataset_kwargs,\n dataset) in iterate_similarity_datasets(args):\n initial_length = len(dataset)\n dataset_coded = [[\n token_embedding.token_to_idx[d[0]],\n token_embedding.token_to_idx[d[1]], d[2]\n ] for d in dataset if d[0] in token_embedding.token_to_idx\n and d[1] in token_embedding.token_to_idx]\n num_dropped = initial_length - len(dataset_coded)\n\n # All words are unknown\n if not len(dataset_coded):\n correlation = 0\n else:\n words1, words2, scores = zip(*dataset_coded)\n pred_similarity = evaluator(\n mx.nd.array(words1, ctx=ctx), mx.nd.array(words2, ctx=ctx))\n sr = stats.spearmanr(pred_similarity.asnumpy(),\n np.array(scores))\n correlation = sr.correlation\n\n logging.info(\n 'Spearman rank correlation on %s (%s pairs) %s with %s:\\t%s',\n dataset.__class__.__name__, len(dataset_coded),\n str(dataset_kwargs), similarity_function, correlation)\n\n result = dict(\n task='similarity',\n dataset_name=dataset_name,\n dataset_kwargs=dataset_kwargs,\n similarity_function=similarity_function,\n spearmanr=correlation,\n num_dropped=num_dropped,\n global_step=global_step,\n )\n log_similarity_result(logfile, result)\n results.append(result)\n\n return results\n\n\ndef evaluate_analogy(args, token_embedding, ctx, logfile=None, global_step=0):\n \"\"\"Evaluate on specified analogy datasets.\n\n The analogy task is an open vocabulary task, make sure to pass a\n token_embedding with a sufficiently large number of supported tokens.\n\n \"\"\"\n results = []\n exclude_question_words = not args.analogy_dont_exclude_question_words\n for analogy_function in args.analogy_functions:\n evaluator = nlp.embedding.evaluation.WordEmbeddingAnalogy(\n idx_to_vec=token_embedding.idx_to_vec,\n exclude_question_words=exclude_question_words,\n analogy_function=analogy_function)\n evaluator.initialize(ctx=ctx)\n if not args.no_hybridize:\n evaluator.hybridize()\n\n for (dataset_name, dataset_kwargs,\n dataset) in iterate_analogy_datasets(args):\n initial_length = len(dataset)\n dataset_coded = [[\n token_embedding.token_to_idx[d[0]],\n token_embedding.token_to_idx[d[1]],\n token_embedding.token_to_idx[d[2]],\n token_embedding.token_to_idx[d[3]]\n ] for d in dataset if d[0] in token_embedding.token_to_idx\n and d[1] in token_embedding.token_to_idx\n and d[2] in token_embedding.token_to_idx\n and d[3] in token_embedding.token_to_idx]\n num_dropped = initial_length - len(dataset_coded)\n\n dataset_coded_batched = mx.gluon.data.DataLoader(\n dataset_coded, batch_size=args.eval_batch_size)\n\n acc = mx.metric.Accuracy()\n for batch in dataset_coded_batched:\n batch = batch.as_in_context(ctx)\n words1, words2, words3, words4 = (batch[:, 0], batch[:, 1],\n batch[:, 2], batch[:, 3])\n pred_idxs = evaluator(words1, words2, words3)\n acc.update(pred_idxs[:, 0], words4.astype(np.float32))\n\n logging.info('Accuracy on %s (%s quadruples) %s with %s:\\t%s',\n dataset.__class__.__name__, len(dataset_coded),\n str(dataset_kwargs), analogy_function,\n acc.get()[1])\n\n result = dict(\n task='analogy',\n dataset_name=dataset_name,\n dataset_kwargs=dataset_kwargs,\n analogy_function=analogy_function,\n accuracy=acc.get()[1],\n num_dropped=num_dropped,\n global_step=global_step,\n )\n log_analogy_result(logfile, result)\n results.append(result)\n return results\n\n\ndef log_similarity_result(logfile, result):\n \"\"\"Log a similarity evaluation result dictionary as TSV to logfile.\"\"\"\n assert result['task'] == 'similarity'\n\n if not logfile:\n return\n\n with open(logfile, 'a') as f:\n f.write('\\t'.join([\n str(result['global_step']),\n result['task'],\n result['dataset_name'],\n json.dumps(result['dataset_kwargs']),\n result['similarity_function'],\n str(result['spearmanr']),\n str(result['num_dropped']),\n ]))\n\n f.write('\\n')\n\n\ndef log_analogy_result(logfile, result):\n \"\"\"Log a analogy evaluation result dictionary as TSV to logfile.\"\"\"\n assert result['task'] == 'analogy'\n\n if not logfile:\n return\n\n with open(logfile, 'a') as f:\n f.write('\\t'.join([\n str(result['global_step']),\n result['task'],\n result['dataset_name'],\n json.dumps(result['dataset_kwargs']),\n result['analogy_function'],\n str(result['accuracy']),\n str(result['num_dropped']),\n ]))\n f.write('\\n')\n" ]
[ [ "numpy.array" ] ]
pedrogbmendes/TrimTuner
[ "a9dec1f1ac610e6ec6d54cfaf5f9c93bc4f80f97" ]
[ "trimtuner/trimtuner/trimtuner.py" ]
[ "import time\nimport sys\nimport math\nimport random\nimport george\nimport numpy as np\nimport os\n\n#acq function\nfrom trimtuner.acquisition_functions.constrained_entropy_search import Constrained_EntropySearch\nfrom trimtuner.acquisition_functions.marginalization import MarginalizationGPMCMC, MarginalizationDT\nfrom robo.acquisition_functions.ei import *\n\n#heuristics to filter\nfrom trimtuner.maximizers.random_sampling import RandomSampling\nfrom trimtuner.maximizers.cea import CEA\n#from trimtuner.maximizers.direct import Direct\n#from trimtuner.maximizers.cmaes import CMAES\n\n\n#models\nfrom trimtuner.models.trimtuner_dt import EnsembleDTs\nfrom trimtuner.models.trimtuner_gp import EnsembleGPs\nfrom robo.priors.env_priors import EnvPrior\n\n\n#bootstrapping\nfrom trimtuner.trimtuner.initial_sampling import initial_sampling_trimtuner\n#incumbent estimation\nfrom trimtuner.trimtuner.incumbent_estimation import incumbent_estimation_cea, incumbent_estimation\n\n\n\n\ndef transform(s, s_min, s_max):\n s_transform = (np.log2(s) - np.log2(s_min)) / (np.log2(s_max) - np.log2(s_min))\n return s_transform\n\n\n\ndef retransform(s_transform, s_min, s_max):\n s = np.rint(2 ** (s_transform * (np.log2(s_max) - np.log2(s_min)) + np.log2(s_min)))\n return int(s)\n\n\n\nclass Logs():\n #class to print log files\n def __init__(self, seed, initSamples, model, heuristic):\n dir = os.path.abspath(os.getcwd())\n path = dir + \"/runLogs\"\n\n self.initSamples = initSamples\n self.seed = seed\n\n if not os.path.isdir(path):\n try:\n os.mkdir(path) #create runLogs folder\n except OSError:\n print(\"Creation of the directory %s failed\" % path)\n else:\n print(\"Successfully created the directory %s \" % path)\n\n filename_orig = path + \"/trimtuner_logs_seed\" + str(seed) + \"_initSamples\" + str(initSamples) + \"_model_\" + model + \"_heuristic_\" + heuristic \n \n filename = filename_orig + \".txt\"\n counter = 1\n while os.path.isfile(filename):\n\n filename = filename_orig + \"_\" + str(counter) + \".txt\"\n counter += 1\n if counter >= 10000:\n print(\"ERROR createing the log files!!! Check folder \" + path)\n sys.stdout.flush() \n sys.exit(0)\n\n #filename += \".txt\" \n\n self.file_logs = open(filename, \"w\")\n self.file_logs.write(\"runID;initSamples;explorationNumber;incumbent;incTime;incAcc;incCost;configTested;Time;Acc;Cost;Overhead;CumulativeCost;\\n\")\n\n\n def printLogs(self, it, inc, incTime, incAcc, incCost, conf, confTime, confAcc, confCost, overhead, CumulativeCost):\n \n strWrite = str(self.seed) + \";\" + str(self.initSamples) + \";\" + str(it) + \";\" + str(inc) + \";\" + str(incTime) + \";\" + str(incAcc) + \";\" + str(incCost) + \";\" + str(conf) + \";\" + str(confTime) + \";\" + str(confAcc) + \";\" + str(confCost) + \";\" + str(overhead) + \";\" + str(CumulativeCost) + \"\\n\"\n self.file_logs.write(strWrite)\n \n\n def close(self):\n self.file_logs.close()\n\n\n\n\n##################################################################################\n# TrimTuner: \n# Efficient Optimization of Machine Learning Jobs in the Cloud via Sub-Sampling\n#\n##################################################################################\n\ndef trimtuner(objective_function, all_configs, constraints, seed, filterHeuristic, model,\n lower, upper, s_min, s_max, n_init=30, num_iterations=100, subsets=[60, 10, 4, 2]):\n\n # internal paramaters\n burnin=100\n chain_length=100\n n_hypers=12\n\n #percentage of unexplored configs to test in the acquisition function\n per = 0.1 \n\n np.random.seed(seed)\n rng = np.random.RandomState(np.random.randint(0, 10000))\n\n #assert n_init * len(\n assert n_init <= num_iterations, \"Number of initial points (n_init) has to be smaller than the number of iterations\" \n assert lower.shape[0] == upper.shape[0], \"Dimension miss match between upper and lower bound\"\n assert model == \"gp\" or model == \"dt\", \"ERROR: wrong model techniques. Chose 'gp' for Gaussian Processes or 'dt' for an ensemble decision tress\"\n assert filterHeuristic == \"cea\" or filterHeuristic == \"random\" or filterHeuristic == \"nofilter\", \"ERROR: wrong filtering heuristic. Chose 'cea', 'random', or 'nofilter'!\"\n\n costCumulative = 0\n\n n_dims = lower.shape[0]\n\n # Bookkeeping logs\n logs = Logs(seed, n_init, model, filterHeuristic)\n\n unexplored_Set = all_configs # list with all possible configurations\n training_Set = [] # traning set\n\n X = []\n y = []\n c = []\n\n if model == \"dt\":\n #ensemble of descision trees\n number_trees = 10\n model_objective = EnsembleDTs(number_trees, seed)\n model_cost = EnsembleDTs(number_trees, seed)\n\n elif model == \"gp\":\n #Gaussian Processes\n\n #kernels functions based on FABOLAS\n\n # Define model for the objective function\n cov_amp = 1 # Covariance amplitude\n kernel = cov_amp\n\n for d in range(n_dims):\n kernel *= george.kernels.Matern52Kernel(np.ones([1])*0.01, ndim=n_dims+1, axes=d)\n\n # Kernel for the environmental variable\n # We use (1-s)**2 as basis function for the Bayesian linear kernel\n env_kernel = george.kernels.BayesianLinearRegressionKernel(log_a=0.1,log_b=0.1,ndim=n_dims + 1,axes=n_dims)\n kernel *= env_kernel\n\n # Take 3 times more samples than we have hyperparameters\n if n_hypers < 2 * len(kernel):\n n_hypers = 3 * len(kernel)\n if n_hypers % 2 == 1:\n n_hypers += 1\n\n\n prior = EnvPrior(len(kernel)+1, n_ls=n_dims, n_lr=2, rng=rng)\n\n quadratic_bf = lambda x: (1 - x) ** 2\n linear_bf = lambda x: x\n\n #model for accuracy\n model_objective = EnsembleGPs(kernel,\n prior=prior,\n burnin_steps=burnin,\n chain_length=chain_length,\n n_hypers=n_hypers,\n normalize_output=False,\n basis_func=quadratic_bf,\n lower=lower,\n upper=upper,\n rng=rng)\n\n # Define model for the cost function\n cost_cov_amp = 1\n cost_kernel = cost_cov_amp\n\n for d in range(n_dims):\n cost_kernel *= george.kernels.Matern52Kernel(np.ones([1])*0.01, ndim=n_dims+1, axes=d)\n\n cost_env_kernel = george.kernels.BayesianLinearRegressionKernel(log_a=0.1,log_b=0.1,ndim=n_dims+1,axes=n_dims)\n cost_kernel *= cost_env_kernel\n\n cost_prior = EnvPrior(len(cost_kernel)+1, n_ls=n_dims, n_lr=2, rng=rng)\n\n #model for cost\n model_cost = EnsembleGPs(cost_kernel,\n prior=cost_prior,\n burnin_steps=burnin,\n chain_length=chain_length,\n n_hypers=n_hypers,\n basis_func=linear_bf,\n normalize_output=False,\n lower=lower,\n upper=upper,\n rng=rng)\n\n\n # Extend input space by task variable\n extend_lower = np.append(lower, 0)\n extend_upper = np.append(upper, 1)\n is_env = np.zeros(extend_lower.shape[0])\n is_env[-1] = 1\n\n\n acq_func = Constrained_EntropySearch(model_objective,\n model_cost,\n constraints,\n extend_lower,\n extend_upper,\n sampling_acquisition=EI,\n is_env_variable=is_env,\n n_representer=50)\n\n #if model == 'gp':\n #gps marginalization\n acquisition_func = MarginalizationGPMCMC(acq_func)\n #else:\n # acquisition_func = MarginalizationDT(acq_func)\n\n\n if filterHeuristic == 'random':\n maximizer = RandomSampling(acquisition_func, extend_lower, extend_upper, seed, per)\n\n if filterHeuristic == 'nofilter':\n maximizer = RandomSampling(acquisition_func, extend_lower, extend_upper, seed, 1)\n \n elif filterHeuristic == 'cea':\n maximizer = CEA(acquisition_func, extend_lower, extend_upper, per, constraints)\n\n # elif filterHeuristic == 'direct':\n # #CMAES\n # maximizer = Direct(acquisition_func, extend_lower, extend_upper, n_func_evals=144, n_iters=300)\n\n # elif filterHeuristic == 'cmaes':\n # #CMAES\n # maximizer = CMAES(acquisition_func, seed, extend_lower, extend_upper, n_func_evals=144) \n \n\n # Initial Design\n print(\"Initial Design\")\n sys.stdout.flush()\n counter_it = 1\n\n real_n_init = int(n_init / len(subsets)) \n x_init = initial_sampling_trimtuner(seed, unexplored_Set, real_n_init, s_max)\n\n for it in range(real_n_init):\n\n for subset in subsets:\n start_time_overhead = time.time()\n s = int(s_max / float(subset)) ##real_size\n\n x = x_init[it]\n print(\"Evaluate %s on subset size %d\" % (x, s))\n sys.stdout.flush()\n\n #time to select a config to test\n overhead_init = time.time() - start_time_overhead\n\n func_val, cost, runTime = objective_function(x, s)\n costCumulative += cost\n\n print(\"Configuration has an accuracy of %f with cost %f and took %f seconds\" % (1-func_val,cost,runTime))\n sys.stdout.flush()\n\n start_time_overhead = time.time()\n\n #add config tested to the training set and remove from the untested configs\n tested_config = np.copy(x)\n tested_config[-1] = s\n training_Set.append(tested_config)\n count = 0\n while count != len(unexplored_Set):\n if np.array_equal(unexplored_Set[count], tested_config):\n unexplored_Set.pop(count)\n break\n count += 1\n\n # Bookkeeping\n config = np.append(x, transform(s, s_min, s_max))\n X.append(config)\n y.append(np.log(func_val)) # Model the target function on a logarithmic scale\n c.append(np.log(cost)) # Model the cost on a logarithmic scale\n\n #time to update the training and the unexplored set\n overhead_updateSet = time.time() - start_time_overhead\n\n overhead_time = overhead_updateSet + overhead_init\n\n #write logs in the files\n logs.printLogs(counter_it, x, runTime, 1-func_val, cost, x, runTime, 1-func_val, cost, overhead_time, costCumulative)\n\n counter_it +=1\n\n #end initial sampling\n\n X = np.array(X)\n y = np.array(y)\n c = np.array(c)\n\n # Train models\n model_objective.train(X, y, do_optimize=True) #model of accuracy\n model_cost.train(X, c, do_optimize=True) #model of cost\n\n\n #start optimization\n for it in range(X.shape[0]+1, num_iterations+1):\n print(\"Start iteration %d ... \" % (it))\n sys.stdout.flush()\n\n start_time = time.time()\n\n acquisition_func.update(model_objective, model_cost, X, y, c)\n new_x = maximizer.maximize(X, y, c, unexplored_Set) #maximize the acquisition function\n\n s = retransform(new_x[-1], s_min, s_max) # Map s from log space to original linear space\n\n #time to compute the acquisition function\n overhead_time_acqFunc = time.time() - start_time \n\n # Evaluate the chosen configuration\n print(\"Evaluate candidate \" + str(new_x[:-1]) + \" on subset size \" + str(int(s)))\n sys.stdout.flush()\n\n new_y, new_c, new_t = objective_function(new_x[:-1], int(s))\n\n costCumulative += new_c \n\n #add config tested to the training set and remove from the untested configs\n tested_config = np.copy(new_x)\n tested_config[-1] = s\n training_Set.append(tested_config)\n count = 0\n while count != len(unexplored_Set):\n if np.array_equal(unexplored_Set[count], tested_config):\n unexplored_Set.pop(count)\n break\n count += 1\n\n print(\"Configuration has an accuracy of %.3f with cost %.3f and took %.3f seconds\" % (1-new_y,new_c,new_t))\n sys.stdout.flush()\n\n start_time = time.time() #overhead\n\n # Add new observation to the data\n X = np.concatenate((X, new_x[None, :]), axis=0)\n y = np.concatenate((y, np.log(np.array([new_y]))), axis=0) # Model the target function on a logarithmic scale\n c = np.concatenate((c, np.log(np.array([new_c]))), axis=0) # Model the cost function on a logarithmic scale\n\n # Train models\n model_objective.train(X, y, do_optimize=True) #model of accuracy\n model_cost.train(X, c, do_optimize=True) #model of cost\n\n # determine the incumbent\n inc, inc_acc, inc_cost = incumbent_estimation_cea(model_objective, model_cost, X[:, :-1], constraints)\n inc[-1] = retransform(inc[-1], s_min, s_max)\n\n print(\"Current incumbent \" + str(inc) + \" with estimated accuracy of \" + str(inc_acc) + \"%\")\n\n #time to train the models\n overhead_time_trainModels = time.time() - start_time\n\n #overhead - training models and compute the acq. func.\n total_overhead = overhead_time_trainModels + overhead_time_acqFunc\n\n print(\"Optimization overhead was %.3f seconds\" % (total_overhead))\n sys.stdout.flush()\n\n #write logs in the files\n logs.printLogs(it, inc, 0, inc_acc, inc_cost, tested_config, new_t, 1-new_y, new_c, total_overhead, costCumulative)\n\n\n logs.close()\n\n results = \"\\n The optimal configuration is \" + str(inc) + \" with estimated accuracy of \" + str(inc_acc) + \"\\0025 and a cost of \" + str(inc_cost) + \"\\n\"\n print(results)\n\n return inc" ]
[ [ "numpy.log2", "numpy.ones", "numpy.append", "numpy.zeros", "numpy.random.seed", "numpy.copy", "numpy.log", "numpy.array_equal", "numpy.array", "numpy.concatenate", "numpy.random.randint" ] ]
kurbansitterley/WaterTAP3
[ "8f4493182a39e3ba180019aba02249916dbae500" ]
[ "watertap3/watertap3/utils/financials.py" ]
[ "##############################################################################\n# Institute for the Design of Advanced Energy Systems Process Systems\n# Engineering Framework (IDAES PSE Framework) Copyright (c) 2018-2020, by the\n# software owners: The Regents of the University of California, through\n# Lawrence Berkeley National Laboratory, National Technology & Engineering\n# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia\n# University Research Corporation, et al. All rights reserved.\n##############################################################################\n\nimport pandas as pd\nfrom pyomo.environ import (Block, Expression, Param, Var, NonNegativeReals, units as pyunits)\n\nfrom .ml_regression import get_linear_regression\n\n__all__ = ['SystemSpecs', 'get_complete_costing', 'get_ind_table', 'get_system_specs',\n 'get_system_costing', 'global_costing_parameters']\n\nlast_year_for_cost_indicies = 2050\n\n\nclass SystemSpecs():\n\n def __init__(self, train=None):\n basis_data = pd.read_csv('data/case_study_TEA_basis.csv', index_col='case_study')\n elec_cost = pd.read_csv('data/industrial_electricity_costs_2020.csv', index_col='location')\n elec_cost.index = elec_cost.index.str.lower()\n case_study = train['case_study']\n scenario = train['scenario']\n # print(str(case_study).replace('_', ' ').swapcase() + ':', str(scenario).replace('_', ' ').swapcase())\n self.location = basis_data[basis_data['variable'] == 'location_basis'].loc[case_study].value\n self.elec_price = float(elec_cost.loc[self.location])\n self.land_cost_percent_FCI = float(basis_data[basis_data['variable'] == 'land_cost_percent'].loc[case_study].value)\n self.working_cap_percent_FCI = float(basis_data[basis_data['variable'] == 'working_capital_percent'].loc[case_study].value)\n self.salaries_percent_FCI = float(basis_data[basis_data['variable'] == 'base_salary_per_fci'].loc[case_study].value)\n self.maintenance_costs_percent_FCI = float(basis_data[basis_data['variable'] == 'maintenance_cost_percent'].loc[case_study].value)\n self.lab_fees_percent_FCI = float(basis_data[basis_data['variable'] == 'laboratory_fees_percent'].loc[case_study].value)\n self.insurance_taxes_percent_FCI = float(basis_data[basis_data['variable'] == 'insurance_and_taxes_percent'].loc[case_study].value)\n self.benefit_percent_of_salary = float(basis_data[basis_data['variable'] == 'employee_benefits_percent'].loc[case_study].value)\n self.plant_lifetime_yrs = int(basis_data[basis_data['variable'] == 'plant_life_yrs'].loc[case_study].value)\n self.analysis_yr_cost_indices = int(basis_data[basis_data['variable'] == 'analysis_year'].loc[case_study].value)\n self.debt_interest_rate = float(basis_data[basis_data['variable'] == 'debt_interest_rate'].loc[case_study].value)\n self.plant_cap_utilization = float(basis_data[basis_data['variable'] == 'plant_cap_utilization'].loc[case_study].value)\n\n\ndef create_costing_block(unit, basis_year, tpec_or_tic):\n '''\n Function to create costing block and establish basis year and TPEC/TIC factor for each\n WaterTAP3 unit.\n\n :param unit: WaterTAP3 unit\n :type unit: str\n :param basis_year: Basis year for adjusting cost calculations\n :type basis_year: str\n :param tpec_or_tic: either 'TPEC' or 'TIC'; determines which factor to use for FCI adjustment\n (if necessary)\n :type tpec_or_tic: str\n :return:\n '''\n unit.costing = costing = Block()\n costing.basis_year = basis_year\n sys_cost_params = unit.parent_block().costing_param\n if tpec_or_tic == 'TPEC':\n costing.tpec_tic = unit.tpec_tic = sys_cost_params.tpec\n else:\n costing.tpec_tic = unit.tpec_tic = sys_cost_params.tic\n\n\ndef get_complete_costing(costing):\n '''\n Function to build costing block for each WaterTAP3 unit.\n\n :param costing: Costing block object from WaterTAP3 unit model.\n :type costing: object\n :return:\n '''\n unit = costing.parent_block()\n time = unit.flowsheet().config.time\n t = time.first()\n flow_in_m3yr = pyunits.convert(costing.parent_block().flow_vol_in[t], to_units=pyunits.m ** 3 / pyunits.year)\n\n costing.tci_reduction = Var(time,\n domain=NonNegativeReals,\n initialize=0,\n doc='Reduction factor for TCI')\n\n costing.tci_uncertainty = Var(time,\n domain=NonNegativeReals,\n initialize=1,\n doc='Uncertainty for TCI')\n\n costing.fci_reduction = Var(time,\n domain=NonNegativeReals,\n initialize=0,\n doc='Reduction factor for FCI')\n\n costing.fci_uncertainty = Var(time,\n domain=NonNegativeReals,\n initialize=1,\n doc='Uncertainty for FCI')\n\n costing.fixed_op_reduction = Var(time,\n domain=NonNegativeReals,\n initialize=0,\n doc='Reduction factor for Fixed O&M')\n\n costing.fixed_op_uncertainty = Var(time,\n domain=NonNegativeReals,\n initialize=1,\n doc='Uncertainty for Fixed O&M')\n\n costing.annual_op_reduction = Var(time,\n domain=NonNegativeReals,\n initialize=0,\n doc='Reduction factor for Annual O&M')\n\n costing.annual_op_uncertainty = Var(time,\n domain=NonNegativeReals,\n initialize=1,\n doc='Uncertainty for Annual O&M')\n\n costing.total_op_reduction = Var(time,\n domain=NonNegativeReals,\n initialize=0,\n doc='Reduction factor for Total O&M')\n\n costing.total_op_uncertainty = Var(time,\n domain=NonNegativeReals,\n initialize=1,\n doc='Uncertainty for Total O&M')\n\n costing.catchem_reduction = Var(time,\n domain=NonNegativeReals,\n initialize=0,\n doc='Reduction factor for Catalysts/Chemicals')\n\n costing.catchem_uncertainty = Var(time,\n domain=NonNegativeReals,\n initialize=1,\n doc='Uncertainty for Catalysts/Chemicals')\n\n costing.elect_intens_reduction = Var(time,\n domain=NonNegativeReals,\n initialize=0,\n doc='Reduction factor for Electricity Intensity')\n\n costing.elect_intens_uncertainty = Var(time,\n domain=NonNegativeReals,\n initialize=1,\n doc='Uncertainty for Electricity Intensity')\n\n costing.elect_cost_reduction = Var(time,\n domain=NonNegativeReals,\n initialize=0,\n doc='Reduction factor for Electricity Intensity')\n\n costing.elect_cost_uncertainty = Var(time,\n domain=NonNegativeReals,\n initialize=1,\n doc='Uncertainty for Electricity Intensity')\n\n costing.other_reduction = Var(time,\n domain=NonNegativeReals,\n initialize=0,\n doc='Reduction factor for Other capital')\n\n costing.other_uncertainty = Var(time,\n domain=NonNegativeReals,\n initialize=1,\n doc='Uncertainty for Other capital')\n\n costing.tci_reduction.fix(0)\n costing.tci_uncertainty.fix(1)\n\n costing.fci_reduction.fix(0)\n costing.fci_uncertainty.fix(1)\n\n costing.fixed_op_reduction.fix(0)\n costing.fixed_op_uncertainty.fix(1)\n\n costing.annual_op_reduction.fix(0)\n costing.annual_op_uncertainty.fix(1)\n\n costing.total_op_reduction.fix(0)\n costing.total_op_uncertainty.fix(1)\n\n costing.catchem_reduction.fix(0)\n costing.catchem_uncertainty.fix(1)\n\n costing.elect_intens_reduction.fix(0)\n costing.elect_intens_uncertainty.fix(1)\n\n costing.elect_cost_reduction.fix(0)\n costing.elect_cost_uncertainty.fix(1)\n\n costing.other_reduction.fix(0)\n costing.other_uncertainty.fix(1)\n\n basis_year = costing.basis_year\n sys_specs = unit.parent_block().costing_param\n\n chem_dict = unit.chem_dict\n electricity = unit.electricity\n\n\n ## COSTING INDICES\n df = get_ind_table(sys_specs.analysis_yr_cost_indices)\n costing.cap_replacement_parts = df.loc[basis_year].Capital_Factor\n costing.catalysts_chemicals = df.loc[basis_year].CatChem_Factor\n costing.labor_and_other_fixed = df.loc[basis_year].Labor_Factor\n costing.consumer_price_index = df.loc[basis_year].CPI_Factor\n\n costing.fixed_cap_inv = ((costing.fixed_cap_inv_unadjusted * costing.cap_replacement_parts) * (1 - costing.fci_reduction[t])) * costing.fci_uncertainty[t]\n if unit.parent_block().train['case_study'] == 'cherokee' and unit.unit_name == 'evaporation_pond':\n costing.land_cost = costing.fixed_cap_inv * 0\n else:\n costing.land_cost = costing.fixed_cap_inv * sys_specs.land_cost_percent_FCI\n costing.working_cap = costing.fixed_cap_inv * sys_specs.working_cap_percent_FCI\n costing.contingency = costing.fixed_cap_inv * sys_specs.contingency_cost_percent_FCI\n costing.component_replacement = costing.fixed_cap_inv * sys_specs.component_replace_percent_FCI\n costing.base_employee_salary_cost = costing.fixed_cap_inv_unadjusted * sys_specs.salaries_percent_FCI\n costing.salaries = costing.labor_and_other_fixed * costing.base_employee_salary_cost\n costing.benefits = costing.salaries * sys_specs.benefit_percent_of_salary\n costing.maintenance = costing.fixed_cap_inv * sys_specs.maintenance_costs_percent_FCI\n costing.lab = costing.fixed_cap_inv * sys_specs.lab_fees_percent_FCI\n costing.insurance_taxes = costing.fixed_cap_inv * sys_specs.insurance_taxes_percent_FCI\n\n cat_chem_df = pd.read_csv('data/chemical_costs.csv', index_col='Material')\n chem_cost_sum = 0\n for key in chem_dict.keys():\n if key == 'unit_cost':\n chem_cost_sum = chem_dict[key] * costing.fixed_cap_inv * 1E6\n else:\n chem_cost = cat_chem_df.loc[key].Price\n chem_cost_sum += costing.catalysts_chemicals * flow_in_m3yr * chem_cost * chem_dict[key] * sys_specs.plant_cap_utilization\n\n costing.cat_and_chem_cost = ((chem_cost_sum * 1E-6) * (1 - costing.catchem_reduction[t])) * costing.catchem_uncertainty[t]\n\n # if not hasattr(costing, 'electricity_cost'):\n costing.electricity_intensity = (unit.electricity * (1 - costing.elect_intens_reduction[t])) * costing.elect_intens_uncertainty[t]\n costing.electricity_cost = ((costing.electricity_intensity * flow_in_m3yr * sys_specs.electricity_price * 1E-6) * sys_specs.plant_cap_utilization) * (1 - costing.elect_cost_reduction[t]) * costing.elect_cost_uncertainty[t]\n\n if not hasattr(costing, 'other_var_cost'):\n costing.other_var_cost = 0\n\n else:\n costing.other_var_cost = (costing.other_var_cost * (1 - costing.other_reduction[t])) * costing.other_uncertainty[t]\n\n costing.total_cap_investment = (costing.fixed_cap_inv + costing.land_cost + costing.working_cap) * (1 - costing.tci_reduction[t]) * costing.tci_uncertainty[t]\n # costing.salaries = Expression(expr=costing.labor_and_other_fixed * costing.base_employee_salary_cost, doc='Salaries')\n costing.total_fixed_op_cost = ((costing.salaries + costing.benefits + costing.maintenance + costing.lab + costing.insurance_taxes) * (1 - costing.fixed_op_reduction[t])) * costing.fixed_op_uncertainty[t]\n costing.annual_op_main_cost = ((costing.cat_and_chem_cost + costing.electricity_cost + costing.other_var_cost + costing.total_fixed_op_cost) * (1 - costing.annual_op_reduction[t])) * costing.annual_op_uncertainty[t]\n costing.total_operating_cost = ((costing.total_fixed_op_cost + costing.cat_and_chem_cost + costing.electricity_cost + costing.other_var_cost) * (1 - costing.total_op_reduction[t])) * costing.total_op_uncertainty[t]\n\n\ndef get_ind_table(analysis_yr_cost_indices):\n '''\n Function to get costing indicies for WaterTAP3 model.\n\n :param analysis_yr_cost_indices: Year to get costing indices for.\n :type analysis_yr_cost_indices: int\n :return: Indicies DataFrame\n '''\n df = pd.read_csv('data/plant_cost_indices.csv')\n\n df1 = pd.DataFrame()\n for name in df.columns[1:]:\n a, b = get_linear_regression(list(df.Year), list(df[('%s' % name)]), name)\n new_list = []\n yr_list = []\n for yr in range(df.Year.max() + 1, last_year_for_cost_indicies + 1):\n new_list.append(a * yr + b)\n yr_list.append(yr)\n df1[name] = new_list\n df1['Year'] = yr_list\n df = pd.concat([df, df1], axis=0)\n\n new_cost_variables = ['Capital', 'CatChem', 'Labor', 'CPI']\n for variable in new_cost_variables:\n ind_name = '%s_Index' % variable\n fac_name = '%s_Factor' % variable\n df[fac_name] = (df[df.Year == analysis_yr_cost_indices][ind_name].max() / df[ind_name])\n df = df.set_index(df.Year)\n df = df.replace(1.0, 1.00000000001)\n\n return df\n\n\ndef get_system_specs(m_fs):\n '''\n Function to set costing parameters for WaterTAP3 model.\n\n\n '''\n m_fs.costing_param = Block()\n b = m_fs.costing_param\n\n b.electricity_price = Var(initialize=0.07,\n doc='Electricity cost [$/kWh]')\n b.maintenance_costs_percent_FCI = Var(initialize=0.07,\n doc='Maintenance/contingency cost as % FCI')\n b.salaries_percent_FCI = Var(initialize=0.07,\n doc='Salaries cost as % FCI')\n b.benefit_percent_of_salary = Var(initialize=0.07,\n doc='Benefits cost as % FCI')\n b.insurance_taxes_percent_FCI = Var(initialize=0.07,\n doc='Insurance/taxes cost as % FCI')\n b.lab_fees_percent_FCI = Var(initialize=0.07,\n doc='Lab cost as % FCI')\n b.land_cost_percent_FCI = Var(initialize=0.07,\n doc='Land cost as % FCI')\n b.plant_lifetime_yrs = Var(initialize=30,\n doc='Plant lifetime [years')\n b.plant_cap_utilization = Var(initialize=1,\n doc='Plant capacity utilization [%]')\n b.working_cap_percent_FCI = Var(initialize=0.008,\n doc='Working capital as % FCI')\n b.wacc = Var(initialize=0.05,\n doc='Weighted Average Cost of Capital (WACC)')\n b.contingency_cost_percent_FCI = Var(initialize=0,\n doc='Contingency costs as % FCI')\n b.component_replace_percent_FCI = Var(initialize=0,\n doc='Component replacement costs as % FCI')\n\n system_specs = SystemSpecs(m_fs.train)\n\n b.electricity_price.fix(system_specs.elec_price)\n b.salaries_percent_FCI.fix(system_specs.salaries_percent_FCI)\n b.land_cost_percent_FCI.fix(system_specs.land_cost_percent_FCI)\n b.maintenance_costs_percent_FCI.fix(system_specs.maintenance_costs_percent_FCI)\n b.lab_fees_percent_FCI.fix(system_specs.lab_fees_percent_FCI)\n b.insurance_taxes_percent_FCI.fix(system_specs.insurance_taxes_percent_FCI)\n b.plant_lifetime_yrs.fix(system_specs.plant_lifetime_yrs)\n\n b.benefit_percent_of_salary.fix(system_specs.benefit_percent_of_salary)\n b.working_cap_percent_FCI.fix(system_specs.working_cap_percent_FCI)\n b.plant_cap_utilization.fix(system_specs.plant_cap_utilization) # 1.0\n b.wacc.fix(system_specs.debt_interest_rate)\n b.contingency_cost_percent_FCI.fix(0)\n b.component_replace_percent_FCI.fix(0)\n\n b.analysis_yr_cost_indices = system_specs.analysis_yr_cost_indices\n b.location = system_specs.location\n\n b.tpec = 3.4\n b.tic = 1.65\n\n\ndef get_system_costing(m_fs):\n '''\n Function to aggregate unit model results for calculation of system costing for WaterTAP3 model.\n\n '''\n if not hasattr(m_fs, 'costing'):\n m_fs.costing = Block()\n b = m_fs.costing\n time = m_fs.config.time\n t = time.first()\n sys_specs = m_fs.costing_param\n\n total_capital_investment_var_lst = []\n cat_and_chem_cost_lst = []\n electricity_cost_lst = []\n other_var_cost_lst = []\n total_fixed_op_cost_lst = []\n electricity_intensity_lst = []\n\n wacc = sys_specs.wacc\n\n # b.wacc = Var(initialize=sys_specs.wacc,\n # doc='Weighted average cost of capital (WACC)')\n #\n # b.wacc.fix(sys_specs.wacc)\n\n b.capital_recovery_factor = (wacc * (1 + wacc) ** sys_specs.plant_lifetime_yrs) / (\n ((1 + wacc) ** sys_specs.plant_lifetime_yrs) - 1)\n\n for b_unit in m_fs.component_objects(Block, descend_into=True):\n if hasattr(b_unit, 'costing'):\n total_capital_investment_var_lst.append(b_unit.costing.total_cap_investment)\n cat_and_chem_cost_lst.append(b_unit.costing.cat_and_chem_cost)\n electricity_cost_lst.append(b_unit.costing.electricity_cost)\n other_var_cost_lst.append(b_unit.costing.other_var_cost)\n total_fixed_op_cost_lst.append(b_unit.costing.total_fixed_op_cost)\n\n b.sys_tci_reduction = Var(time,\n domain=NonNegativeReals,\n initialize=0,\n doc='System TCI reduction factor')\n\n b.sys_catchem_reduction = Var(time,\n domain=NonNegativeReals,\n initialize=0,\n doc='System catalyst/chemical cost reduction factor')\n\n b.sys_elect_reduction = Var(time,\n domain=NonNegativeReals,\n initialize=0,\n doc='System electricity cost reduction factor')\n\n b.sys_other_reduction = Var(time,\n domain=NonNegativeReals,\n initialize=0,\n doc='System other cost reduction factor')\n\n b.sys_fixed_op_reduction = Var(time,\n domain=NonNegativeReals,\n initialize=0,\n doc='System fixed O&M reduction factor')\n\n b.sys_total_op_reduction = Var(time,\n domain=NonNegativeReals,\n initialize=0,\n doc='System total O&M reduction factor')\n\n b.sys_tci_reduction.fix(0)\n b.sys_catchem_reduction.fix(0)\n b.sys_elect_reduction.fix(0)\n b.sys_other_reduction.fix(0)\n b.sys_fixed_op_reduction.fix(0)\n b.sys_total_op_reduction.fix(0)\n\n b.sys_tci_uncertainty = Var(time,\n domain=NonNegativeReals,\n initialize=1,\n doc='System TCI uncertainty factor')\n\n b.sys_catchem_uncertainty = Var(time,\n domain=NonNegativeReals,\n initialize=1,\n doc='System catalyst/chemical cost uncertainty factor')\n\n b.sys_elect_uncertainty = Var(time,\n domain=NonNegativeReals,\n initialize=1,\n doc='System electricity cost uncertainty factor')\n\n b.sys_other_uncertainty = Var(time,\n domain=NonNegativeReals,\n initialize=1,\n doc='System other cost uncertainty factor')\n\n b.sys_fixed_op_uncertainty = Var(time,\n domain=NonNegativeReals,\n initialize=1,\n doc='System fixed O&M uncertainty factor')\n\n b.sys_total_op_uncertainty = Var(time,\n domain=NonNegativeReals,\n initialize=1,\n doc='System total O&M uncertainty factor')\n\n b.sys_tci_uncertainty.fix(1)\n b.sys_catchem_uncertainty.fix(1)\n b.sys_elect_uncertainty.fix(1)\n b.sys_other_uncertainty.fix(1)\n b.sys_fixed_op_uncertainty.fix(1)\n b.sys_total_op_uncertainty.fix(1)\n\n b.cat_and_chem_cost_annual = Expression(expr=(sum(cat_and_chem_cost_lst) * (1 - b.sys_catchem_reduction[t])) * b.sys_catchem_uncertainty[t])\n b.electricity_cost_annual = Expression(expr=(sum(electricity_cost_lst) * (1 - b.sys_elect_reduction[t])) * b.sys_elect_uncertainty[t])\n b.other_var_cost_annual = Expression(expr=(sum(other_var_cost_lst) * (1 - b.sys_other_reduction[t])) * b.sys_other_uncertainty[t])\n b.fixed_op_cost_annual = Expression(expr=(sum(total_fixed_op_cost_lst) * (1 - b.sys_fixed_op_reduction[t])) * b.sys_fixed_op_uncertainty[t])\n b.operating_cost_annual = Expression(expr=(b.fixed_op_cost_annual + b.cat_and_chem_cost_annual + b.electricity_cost_annual + b.other_var_cost_annual))\n #\n b.capital_investment_total = Expression(expr=(sum(total_capital_investment_var_lst) * (1 - b.sys_tci_reduction[t])) * b.sys_tci_uncertainty[t])\n b.cat_and_chem_cost_total = Expression(expr=b.cat_and_chem_cost_annual * m_fs.costing_param.plant_lifetime_yrs)\n b.electricity_cost_total = Expression(expr=b.electricity_cost_annual * m_fs.costing_param.plant_lifetime_yrs)\n b.other_var_cost_total = Expression(expr=b.other_var_cost_annual * m_fs.costing_param.plant_lifetime_yrs)\n b.fixed_op_cost_total = Expression(expr=b.fixed_op_cost_annual * m_fs.costing_param.plant_lifetime_yrs)\n b.operating_cost_total = Expression(expr=((b.fixed_op_cost_total + b.cat_and_chem_cost_total + b.electricity_cost_total + b.other_var_cost_total) * (1 - b.sys_total_op_reduction[t])) * b.sys_total_op_uncertainty[t])\n\n\n\n\n recovered_water_flow = 0\n wastewater_list = []\n\n time = m_fs.config.time.first()\n\n for b_unit in m_fs.component_objects(Block, descend_into=False):\n if hasattr(b_unit, 'outlet'):\n if len(getattr(b_unit, 'outlet').arcs()) == 0:\n if hasattr(b_unit.parent_block(), 'pfd_dict'):\n if b_unit.parent_block().pfd_dict[str(b_unit)[3:]]['Type'] == 'use':\n recovered_water_flow = recovered_water_flow + b_unit.flow_vol_out[time]\n else:\n if 'reverse_osmosis' in str(b_unit):\n recovered_water_flow = recovered_water_flow + b_unit.flow_vol_out[time]\n if 'cooling_tower' in str(b_unit):\n recovered_water_flow = recovered_water_flow + b_unit.flow_vol_out[time]\n\n b.treated_water = recovered_water_flow\n\n b.sum_of_inflow = sum_of_inflow = 0\n for key in b.parent_block().flow_in_dict.keys():\n sum_of_inflow += getattr(m_fs, key).flow_vol_in[time]\n\n b.system_recovery = b.treated_water / sum_of_inflow\n\n # LCOW for each unit\n for b_unit in m_fs.component_objects(Block, descend_into=True):\n if hasattr(b_unit, 'costing'):\n setattr(b_unit, 'LCOW', Expression(\n expr=1E6 * (b_unit.costing.total_cap_investment * b.capital_recovery_factor + b_unit.costing.annual_op_main_cost) /\n (b.treated_water * 3600 * 24 * 365 * sys_specs.plant_cap_utilization),\n doc='Unit Levelized Cost of Water [$/m3]'))\n\n setattr(b_unit, 'LCOW_TCI', Expression(\n expr=1E6 * (b_unit.costing.total_cap_investment * b.capital_recovery_factor) /\n (b.treated_water * 3600 * 24 * 365 * sys_specs.plant_cap_utilization),\n doc='Unit TCI Levelized Cost of Water [$/m3]'))\n\n setattr(b_unit, 'LCOW_elec', Expression(\n expr=1E6 * (b_unit.costing.electricity_cost) /\n (b.treated_water * 3600 * 24 * 365 * sys_specs.plant_cap_utilization),\n doc='Unit Electricity Levelized Cost of Water [$/m3]'))\n\n setattr(b_unit, 'LCOW_fixed_op', Expression(\n expr=1E6 * (b_unit.costing.total_fixed_op_cost) /\n (b.treated_water * 3600 * 24 * 365 * sys_specs.plant_cap_utilization),\n doc='Unit Fixed Operating Levelized Cost of Water [$/m3]'))\n\n setattr(b_unit, 'LCOW_chem', Expression(\n expr=1E6 * (b_unit.costing.cat_and_chem_cost) /\n (b.treated_water * 3600 * 24 * 365 * sys_specs.plant_cap_utilization),\n doc='Unit Chemical Levelized Cost of Water [$/m3]'))\n\n setattr(b_unit, 'LCOW_other', Expression(\n expr=1E6 * (b_unit.costing.other_var_cost) /\n (b.treated_water * 3600 * 24 * 365 * sys_specs.plant_cap_utilization),\n doc='Unit Other O&M Levelized Cost of Water [$/m3]'))\n\n setattr(b_unit, 'LCOW_total_op', Expression(\n expr=1E6 * (b_unit.costing.total_operating_cost) /\n (b.treated_water * 3600 * 24 * 365 * sys_specs.plant_cap_utilization),\n doc='Unit Total Operating Levelized Cost of Water [$/m3]'))\n\n setattr(b_unit, 'elec_int_treated', Expression(\n expr=(b_unit.costing.electricity_cost * 1E6 / sys_specs.electricity_price) /\n (b.treated_water * 3600 * 24 * 365),\n doc='Unit Electricity Intensity [kWh/m3]'))\n\n # LCOW by cost category\n b.LCOW_TCI = Expression(expr=1E6 * (b.capital_investment_total * b.capital_recovery_factor) / (\n b.treated_water * 3600 * 24 * 365 * sys_specs.plant_cap_utilization))\n\n b.LCOW_elec = Expression(expr=1E6 * (b.electricity_cost_annual) / (\n b.treated_water * 3600 * 24 * 365 * sys_specs.plant_cap_utilization))\n\n b.LCOW_fixed_op = Expression(expr=1E6 * (b.fixed_op_cost_annual) / (\n b.treated_water * 3600 * 24 * 365 * sys_specs.plant_cap_utilization))\n\n b.LCOW_chem = Expression(expr=1E6 * (b.cat_and_chem_cost_annual) / (\n b.treated_water * 3600 * 24 * 365 * sys_specs.plant_cap_utilization))\n\n b.LCOW_other_onm = Expression(expr=1E6 * (b.other_var_cost_annual) / (\n b.treated_water * 3600 * 24 * 365 * sys_specs.plant_cap_utilization))\n\n b.LCOW_total_op = Expression(expr=1E6 * (b.operating_cost_annual) / (\n b.treated_water * 3600 * 24 * 365 * sys_specs.plant_cap_utilization))\n\n ## GET TOTAL ELECTRICITY CONSUMPTION IN kwh/m3 of treated water\n b.electricity_intensity = Expression(\n expr=(b.electricity_cost_annual * 1E6 / sys_specs.electricity_price) /\n (b.treated_water * 3600 * 24 * 365),\n doc='Electricity Intensity [kWh/m3]')\n\n b.LCOW = Expression(\n expr=1E6 * (b.capital_investment_total * b.capital_recovery_factor + b.operating_cost_annual) /\n (b.treated_water * 3600 * 24 * 365 * sys_specs.plant_cap_utilization),\n doc='Levelized Cost of Water [$/m3]')\n\n b.LCOW_inflow = Expression(\n expr=1E6 * (b.capital_investment_total * b.capital_recovery_factor + b.operating_cost_annual) /\n (sum_of_inflow * 3600 * 24 * 365 * sys_specs.plant_cap_utilization),\n doc='Levelized Cost of Water by influent flow [$/m3]')\n\n b.elec_frac_LCOW = Expression(\n expr=((1E6 * (b.electricity_cost_annual) /\n (b.treated_water * 3600 * 24 * 365 * sys_specs.plant_cap_utilization))) / b.LCOW,\n doc='Electricity cost as fraction of LCOW')\n\n\ndef global_costing_parameters(self, year=None):\n if year is None:\n year = '2018'\n ce_index_dic = {\n '2019': 680,\n '2018': 671.1,\n '2017': 567.5,\n '2016': 541.7,\n '2015': 556.8,\n '2014': 576.1,\n '2013': 567.3,\n '2012': 584.6,\n '2011': 585.7,\n '2010': 550.8\n }\n\n self.CE_index = Param(mutable=True, initialize=ce_index_dic[year],\n doc='Chemical Engineering Plant Cost Index $ year')" ]
[ [ "pandas.read_csv", "pandas.DataFrame", "pandas.concat" ] ]
vgaurav3011/100-Days-of-ML
[ "ec302b03fd492c459cff2592b3a4f5e38f9c9d72" ]
[ "Day 47/classifiers/neural_net.py" ]
[ "from __future__ import print_function\n\nfrom builtins import range\nfrom builtins import object\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom past.builtins import xrange\n\nclass TwoLayerNet(object):\n \"\"\"\n A two-layer fully-connected neural network. The net has an input dimension of\n N, a hidden layer dimension of H, and performs classification over C classes.\n We train the network with a softmax loss function and L2 regularization on the\n weight matrices. The network uses a ReLU nonlinearity after the first fully\n connected layer.\n\n In other words, the network has the following architecture:\n\n input - fully connected layer - ReLU - fully connected layer - softmax\n\n The outputs of the second fully-connected layer are the scores for each class.\n \"\"\"\n\n def __init__(self, input_size, hidden_size, output_size, std=1e-4):\n \"\"\"\n Initialize the model. Weights are initialized to small random values and\n biases are initialized to zero. Weights and biases are stored in the\n variable self.params, which is a dictionary with the following keys:\n\n W1: First layer weights; has shape (D, H)\n b1: First layer biases; has shape (H,)\n W2: Second layer weights; has shape (H, C)\n b2: Second layer biases; has shape (C,)\n\n Inputs:\n - input_size: The dimension D of the input data.\n - hidden_size: The number of neurons H in the hidden layer.\n - output_size: The number of classes C.\n \"\"\"\n self.params = {}\n self.params['W1'] = std * np.random.randn(input_size, hidden_size)\n self.params['b1'] = np.zeros(hidden_size)\n self.params['W2'] = std * np.random.randn(hidden_size, output_size)\n self.params['b2'] = np.zeros(output_size)\n\n def loss(self, X, y=None, reg=0.0):\n \"\"\"\n Compute the loss and gradients for a two layer fully connected neural\n network.\n\n Inputs:\n - X: Input data of shape (N, D). Each X[i] is a training sample.\n - y: Vector of training labels. y[i] is the label for X[i], and each y[i] is\n an integer in the range 0 <= y[i] < C. This parameter is optional; if it\n is not passed then we only return scores, and if it is passed then we\n instead return the loss and gradients.\n - reg: Regularization strength.\n\n Returns:\n If y is None, return a matrix scores of shape (N, C) where scores[i, c] is\n the score for class c on input X[i].\n\n If y is not None, instead return a tuple of:\n - loss: Loss (data loss and regularization loss) for this batch of training\n samples.\n - grads: Dictionary mapping parameter names to gradients of those parameters\n with respect to the loss function; has the same keys as self.params.\n \"\"\"\n # Unpack variables from the params dictionary\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n N, D = X.shape\n\n # Compute the forward pass\n scores = None\n #############################################################################\n # TODO: Perform the forward pass, computing the class scores for the input. #\n # Store the result in the scores variable, which should be an array of #\n # shape (N, C). #\n #############################################################################\n fully_connected1 = X.dot(W1) + b1\n X2 = np.maximum(0, fully_connected1)\n scores = X2.dot(W2) + b2\n pass\n \n # If the targets are not given then jump out, we're done\n if y is None:\n return scores\n\n # Compute the loss\n loss = None\n #############################################################################\n # TODO: Finish the forward pass, and compute the loss. This should include #\n # both the data loss and L2 regularization for W1 and W2. Store the result #\n # in the variable loss, which should be a scalar. Use the Softmax #\n # classifier loss. #\n #############################################################################\n scores -= np.max(scores, axis=1, keepdims=True)\n scores_exp = np.exp(scores)\n softmax_classify = scores_exp / np.sum(scores_exp, axis=1, keepdims=True)\n loss = np.sum(-np.log(softmax_classify[np.arange(N), y]))\n loss /= N\n loss += reg * (np.sum(W2 * W2) + np.sum( W1 * W1 ))\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n # Backward pass: compute gradients\n grads = {}\n #############################################################################\n # TODO: Compute the backward pass, computing the derivatives of the weights #\n # and biases. Store the results in the grads dictionary. For example, #\n # grads['W1'] should store the gradient on W1, and be a matrix of same size #\n #############################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n softmax_classify[np.arange(N), y] -= 1\n softmax_classify /= N\n \n dW = X2.T.dot(softmax_classify)\n db2 = softmax_classify.sum(axis=0)\n # W1 gradient\n dW1 = softmax_matrix.dot(W2.T) # [NxC] * [CxH] = [NxH]\n dfc1 = dW1 * (fc1>0) # [NxH] . [NxH] = [NxH]\n dW1 = X.T.dot(dfc1) # [DxN] * [NxH] = [DxH]\n\n # b1 gradient\n db1 = dfc1.sum(axis=0)\n\n # regularization gradient\n dW1 += reg * 2 * W1\n dW2 += reg * 2 * W2\n\n grads = {'W1':dW1, 'b1':db1, 'W2':dW2, 'b2':db2}\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n return loss, grads\n\n def train(self, X, y, X_val, y_val,\n learning_rate=1e-3, learning_rate_decay=0.95,\n reg=5e-6, num_iters=100,\n batch_size=200, verbose=False):\n \"\"\"\n Train this neural network using stochastic gradient descent.\n\n Inputs:\n - X: A numpy array of shape (N, D) giving training data.\n - y: A numpy array f shape (N,) giving training labels; y[i] = c means that\n X[i] has label c, where 0 <= c < C.\n - X_val: A numpy array of shape (N_val, D) giving validation data.\n - y_val: A numpy array of shape (N_val,) giving validation labels.\n - learning_rate: Scalar giving learning rate for optimization.\n - learning_rate_decay: Scalar giving factor used to decay the learning rate\n after each epoch.\n - reg: Scalar giving regularization strength.\n - num_iters: Number of steps to take when optimizing.\n - batch_size: Number of training examples to use per step.\n - verbose: boolean; if true print progress during optimization.\n \"\"\"\n num_train = X.shape[0]\n iterations_per_epoch = max(num_train / batch_size, 1)\n\n # Use SGD to optimize the parameters in self.model\n loss_history = []\n train_acc_history = []\n val_acc_history = []\n\n for it in range(num_iters):\n X_batch = None\n y_batch = None\n\n #########################################################################\n # TODO: Create a random minibatch of training data and labels, storing #\n # them in X_batch and y_batch respectively. #\n #########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n batch_indices = np.random.choice(num_train, batch_size)\n X_batch = X[batch_indices]\n y_batch = y[batch_indices]\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n # Compute loss and gradients using the current minibatch\n loss, grads = self.loss(X_batch, y=y_batch, reg=reg)\n loss_history.append(loss)\n\n #########################################################################\n # TODO: Use the gradients in the grads dictionary to update the #\n # parameters of the network (stored in the dictionary self.params) #\n # using stochastic gradient descent. You'll need to use the gradients #\n # stored in the grads dictionary defined above. #\n #########################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n for key in self.params:\n self.params[key] -= learning_rate * grads[key]\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n if verbose and it % 100 == 0:\n print('iteration %d / %d: loss %f' % (it, num_iters, loss))\n\n # Every epoch, check train and val accuracy and decay learning rate.\n if it % iterations_per_epoch == 0:\n # Check accuracy\n train_acc = (self.predict(X_batch) == y_batch).mean()\n val_acc = (self.predict(X_val) == y_val).mean()\n train_acc_history.append(train_acc)\n val_acc_history.append(val_acc)\n\n # Decay learning rate\n learning_rate *= learning_rate_decay\n\n return {\n 'loss_history': loss_history,\n 'train_acc_history': train_acc_history,\n 'val_acc_history': val_acc_history,\n }\n\n def predict(self, X):\n \"\"\"\n Use the trained weights of this two-layer network to predict labels for\n data points. For each data point we predict scores for each of the C\n classes, and assign each data point to the class with the highest score.\n\n Inputs:\n - X: A numpy array of shape (N, D) giving N D-dimensional data points to\n classify.\n\n Returns:\n - y_pred: A numpy array of shape (N,) giving predicted labels for each of\n the elements of X. For all i, y_pred[i] = c means that X[i] is predicted\n to have class c, where 0 <= c < C.\n \"\"\"\n y_pred = None\n\n ###########################################################################\n # TODO: Implement this function; it should be VERY simple! #\n ###########################################################################\n y_pred = np.argmax( self.loss(X), axis=1)\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n return y_pred\n" ]
[ [ "numpy.sum", "numpy.zeros", "numpy.random.randn", "numpy.random.choice", "numpy.exp", "numpy.arange", "numpy.max", "numpy.maximum" ] ]
jsa4000/OpenGL-Python
[ "62055ba0c16f54507b7ba709d6691b2e9c7bc152" ]
[ "scripts/tests/test_opengl.py" ]
[ "import os\nimport math\nimport ctypes\nimport numpy as np\nimport pandas as pd\nimport OpenGL.GL as GL\nimport OpenGL.GL.shaders\nimport pygame\nfrom PIL import Image\nfrom pyrr import Quaternion, matrix44, Matrix44, Vector3\n\n\n# https://github.com/adamlwgriffiths/Pyrr/tree/master/pyrr\n# https://www.opengl.org/discussion_boards/showthread.php/199031-How-to-sort-draw-and-shader-calls-for-multiple-models\n# https://www.khronos.org/opengl/wiki/Vertex_Specification_Best_Practices\n\n# Great useful resource to learn OpenGL and all the concepts needed for understanding\n# ligths, materials, shaders, transformations, etc..\n# URL: https://learnopengl.com/, https://open.gl/drawing\n# https://codereview.stackexchange.com/questions/92769/managing-shaders-in-opengl-a-shader-class\n# https://www.packtpub.com/books/content/tips-and-tricks-getting-started-opengl-and-glsl-40\n# http://dominium.maksw.com/articles/physically-based-rendering-pbr/pbr-part-one/ \n# http://www.opengl-tutorial.org/beginners-tutorials/tutorial-8-basic-shading/\n# https://www.tomdalling.com/blog/modern-opengl/08-even-more-lighting-directional-lights-spotlights-multiple-lights/\n\n\"\"\"\n\nhttps://gamedev.stackexchange.com/questions/92832/in-opengl-whats-quicker-lots-of-smaller-vaos-or-one-large-one-updated-each-fr\nhttps://www.opengl.org/discussion_boards/showthread.php/197893-View-and-Perspective-matrices\nhttps://www.gamedev.net/topic/609159-would-like-help-with-glulookat-and-python-code/\n\n\nhttp://pyopengl.sourceforge.net/documentation/manual-3.0/gluLookAt.html\nhttp://stackoverflow.com/questions/3380100/how-do-i-use-glulookat-properly\nhttp://stackoverflow.com/questions/15006905/how-do-i-modify-the-perspective-of-an-opengl-using-glulookat\nhttp://stackoverflow.com/questions/26949617/pyopengl-glulookat-behaviour\nhttp://stackoverflow.com/questions/19078620/rotate-cube-to-look-at-mouse-from-python-in-opengl\n\"\"\"\n\ndef Rectangle3D():\n vertices = [\n -0.5, -0.5, 0.0, 1.0,\n 0.5, -0.5, 0.0, 1.0,\n 0.5, 0.5, 0.0, 1.0,\n -0.5, 0.5, 0.0, 1.0\n ]\n indices = [\n 0, 1, 2, \n 2, 3, 0\n ]\n return (np.asarray(vertices, dtype=np.float32),np.asarray(indices, dtype=np.uint32))\n\ndef cube3D(origin = [0.0,0.0,0.0], transform = None):\n \"\"\"\n This function will return a cube using normalized units in\n worl space. You can transform the oject by performing a\n transformation later.\n Also the position of he object by default will be the origin\n of the scene, in this case [0,0,0]\n In general the position will be defined in 4D position, since the\n transformation matrix need to be in 4-dimension to allow the trans-\n lation too. The fourth value will be always 1.0.\n \"\"\"\n # In order to create a cube or any other 3D geoemtry it's needed\n # to store all the information previousl in a buffer. This buffer\n # will be created and managed by opengl so at the end will be used\n # to represent the content of the buffer into the scene. \n # I addition to this it's needed to create Shaders (vertex and\n # fragment) so the graphics card can use to know what it's needed\n # prior to represent the data.\n # At the end, we are just defining attributes. But in this \n # particular case the first attribute that will be defined it's the\n # position. After the position we can define: vertex color, pscale,\n # normals, etc...\n # A cube will have a total of 8 vertices\n # \n # 2 3\n # 1 0\n #\n vertices = [\n # First we represent the vertices on the bottom y = -1\n -0.5, -0.5, 0.5, 1.0, # right, bottom, back vertex. (0)\n 0.5, -0.5, 0.5, 1.0, # left, bottom, back vertex. (1)\n 0.5, 0.5, 0.5, 1.0, # left, bottom, ack vertex. (2)\n -0.5, 0.5, 0.5, 1.0, # right, bottom, back vertex. (3) \n # The same vertex positions but on the top of the cube Y= 1\n -0.5, -0.5, -0.5, 1.0, # left, bottom, front vertex. (4)\n 0.5, -0.5, -0.5, 1.0, # right, bottom, front vertex. (5)\n 0.5, 0.5, -0.5, 1.0, # right, bottom, front vertex. (6)\n -0.5, 0.5, -0.5, 1.0 # left, bottom, front vertex. (7)\n ]\n #Conver the array into a numpy array (copy vs reference)\n nvertices = np.asarray(vertices, dtype=np.float32)\n # Defne the elements, in opengl it's needed to define triangles.\n # For each triangle we need to use 3 points or three vertices.\n # In this case we are going to define the indices, that corresponds\n # with the indexes of the vertices in the previos array. \n # A cube has a total of 6 faces: 4 for the sides + top + bottom.\n # However, we have to define triangles, so each face will be divided\n # by two. At the end we need 6 * 2 = 12 triangles in total\n # The trianglulation will be made in clockwise way. This is important\n # to know where the faces will be facing for shading them (normals).\n indices = [\n 0, 1, 2, 2, 3, 0, # Bottom face\n 4, 5, 6, 6, 7, 4, # Front face\n 4, 5, 1, 1, 0, 4, # left side\n 6, 7, 3, 3, 2, 6, # back face\n 5, 6, 2, 2, 1, 5, # Right Side\n 7, 4, 0, 0, 3, 7 # Top face\n ]\n #Conver the array into a numpy array (copy vs reference)\n nindices = np.asarray(indices, dtype=np.uint32)\n # The vertices are not repeated. You can have the vertices repeated if\n # you need different attrbiutes for them, like the normals, This will\n # be used to shade the elements in different ways. In some programs\n # This is called vertex normals. An it's used to crease or decrease\n # the weight for the transition between face to face. It's like define\n # smooth areas between the hard-surface areas.\n \n # It will return a tuple with the vertices and indices.\n #return (nvertices,nindices)\n return (vertices,indices)\n\ndef empty(value):\n \"\"\"\n Ths function will return is some list or variable is empty.\n For list, dict or any other collection will check there is \n more that one element. For other variables the condition\n will check if the object is None.\n \"\"\" \n if isinstance(value, (list, dict, np.ndarray, tuple, set)):\n if len(value) > 0:\n return False\n else:\n if value is not None:\n return False\n return True\n\ndef typeGL(dtype):\n \"\"\"\n This function will convert the types supported by OpenGL from\n numpy types. \n If dtype is not founded into the GLtypes the function will\n return GL.GL_FLOAT as default Open GL type\n \"\"\"\n # Check for some posibilities with the input, np.int32, 'int32','np.int32'\n if isinstance(dtype, (np.dtype)):\n dtype = dtype.name\n elif not isinstance(dtype, (str)):\n dtype = dtype.__name__\n # get the second part in case it can be splitted\n if len(dtype.split(\".\")) > 1:\n dtype = dtype.split(\".\")[-1]\n #Check the type of data has to be converted\n datatypes = {\n \"int8\" : GL.GL_BYTE, \t\t\t\n \"uint8\" : GL.GL_UNSIGNED_BYTE,\t\n\t \"int16\" : GL.GL_SHORT,\t\t\t\n\t \"uint16\" : GL.GL_UNSIGNED_SHORT,\t\n\t \"int32\" : GL.GL_INT,\t\t\t\t\n\t \"uint32\" : GL.GL_UNSIGNED_INT,\t\t\n\t \"float16\" : GL.GL_HALF_FLOAT,\t\t\n\t \"float32\" : GL.GL_FLOAT,\t\t\t\n\t \"float64\" : GL.GL_DOUBLE,\n \"fixed\" : GL.GL_FIXED # More compatibility for OS (float32)\n }\t\n # Check if the current datatype exists\n if dtype in datatypes:\t\t\n return datatypes[dtype]\n # if the data type does't exit returns default GL type\n return datatypes[np.float32]\n\ndef isfile(filename):\n \"\"\"\n Check if file exists\n \"\"\"\n if os.path.isfile(filename):\n return True\n return False\n\ndef readfile(filename):\n \"\"\"\n Read the current file entirely and return a \n string variable with all the content with\n special characters like new_line, etc..\n \"\"\"\n result = None\n if isfile(filename):\n with open(filename,'r') as file:\n result = file.read()\n return result\n\n\"\"\"\n For my OpenGL I will need the following classes or objects.\n\n [DONE] Display: window that manage the 3D view and Input Events \n fom the user. Also this class will be the one that\n implement the main loop for all the render.\n \n Shader: This class will be enable the creation of shaders\n programs that will be added to the main shader program\n that will be used.\n We can create Vertex, Fragment or Geoemtry shaders. These\n will be inked and use every time we want to render the\n geometry. \n Geometry: The class will be the main container for storing\n vertices, indices (element), vertex colors, normals and\n other attributes. Also the geometry will manage the uvs\n attributes and the materials that will be used for this\n particular geoemtry.\n - Vertices/Points\n - Indices (Faces)\n - Attributes (list with the Attrbiutes)\n Default attributes like Cd, N, P Uv could be \n created automatically for each object since they\n are used by default in all the 3D applications.\n\n Material: Each geoemtry obejct could have more that one material.\n In this case we have to decide if we are going to use\n different shaders or only one for the entire geometry.\n Camera: This class will allow the creation of different cameras\n to swtich indide the progrm. The camera will configure\n the View and Projection matrix.\n\n Light: Every scene have a light to lit the objects. These\n lights will be passed to the shaders to the objects\n would be shaded accordingly to these lights.\n \n There are several types of lights:\n Directional lights, Aerial lights, Spot lights,\n Ambient lights, Point lights.\n \n Also there are another indirect light that will be computed\n in real-time or render time that will depend on the environment.\n Ths light will be specular lights or bouncing lights.\n\n Finally, effects like Fresnel, bump, dissplacement, sub-surface\n scattering, reflection, refraction, translucency, layers, etc..\n are a cmobination between Materials and lights\n\n Volumes (VDB): The type of geoemtry is different from the way\n that polygons are created. This type of geometry\n requires additional manipulation and pipelone.\n\n Particles/Instances: This is used to represent millions of \n GEoemtry that will be packed into points. So the \n vertices, and indices will be instances.\n\n Sprites: Sprites is used for 2D and 3D. The idea is sprites\n will alway be facing to the camera. So there is no\n distorsion or perspective transformation that affect\n to this objects.\n \n Image: Image class will be used to create Interface controls,\n dashboard elements, etc.. \n \n\n\"\"\"\n\n\"\"\"\n The new pipeline used for OpenGL is that all operations, transformations,\n etc.. will be performed in the GPU. In order to do this these operations\n must be implemented into the shaders programs instead, so the GPU will be\n able to compile those shaders and execute them in Parallel.\n\n OpenGL works using states, so for eac state we configure the buffers, arrays,\n shaders, etc.. and finally draw. We perform the sema operation for all the \n geoemtry we have. Since the geometry could have different configuration \n and attributes, and shaders we need to operate with them separately.\n\n When the entire scene is complete, and all the geoemtry all correctly renderer\n it's time to flip the buffers to start the next frame.\n\n\n\"\"\"\n\nclass DisplayMode:\n fullscreen = pygame.FULLSCREEN\t# window is fullscreen\n resizable = pygame.RESIZABLE # window is resizeable\n noframe = pygame.NOFRAME\t# window has no border or controls\n doublebuf = pygame.DOUBLEBUF\t# use double buffer - recommended for HWSURFACE or OPENGL\n hwaccel = pygame.HWSURFACE # window is hardware accelerated, only possible in combination with FULLSCREEN\n opengl = pygame.OPENGL # window is renderable by OpenGL\n\nclass Display:\n \"\"\"\n This Class will manager the Display to interact with\n openGL. It will use OpenGL and a double buffer so\n it can sweep between the buffers per frame.\n\n Also the display is going to manage the interaction\n with the user regarding the events, mouse buttons and\n keypress done.\n \"\"\"\n \n # Default Display Mode that will be used when crating the window\n # Open GL and Double Buffer are neccesary to display OpenGL\n defaultmode = DisplayMode.opengl|DisplayMode.doublebuf\n\n # Default Background Color\n defaulBGColor = [0.0, 0.0, 0.0, 1.0]\n\n def __init__(self, title, width=800, height=600, bpp=16, displaymode = DisplayMode.resizable):\n # Initialize all the variables\n self.title = title\n self.width = width\n self.height = height\n self.bpp = bpp # RGBA 8*8*8*8 = 32 bits per pixel\n self.displaymode = displaymode\n # Initiali<e variables and Window\n self._initialize()\n\n def __enter__(self):\n # Enter will always return the object itself. Use with With expressons\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n # Clean all the variables and Memory\n self._dispose()\n\n def _dispose(self):\n try:\n #Finalize pygame\n pygame.quit()\n # SEt is closed to true\n self.isClosed = True\n except:\n print(\"ERROR: Error disposing the display.\")\n\n def _initialize(self):\n # dispose and close all the windows prior to initialize\n self._dispose()\n # Initialize and open the display window\n try:\n # Initialize pygame\n pygame.init()\n # Set title bar caption\n pygame.display.set_caption(self.title)\n # Initialize the display\n screen = pygame.display.set_mode((self.width, self.height), \n Display.defaultmode|self.displaymode,\n self.bpp)\n # Enable Depth test to avoid overlaped areas\n GL.glEnable(GL.GL_DEPTH_TEST)\n # Clear the image\n self.clear()\n # Set isclosed to false\n self.isClosed = False\n except:\n print(\"ERROR: Error creating the display.\")\n \n def close(self):\n # Set close to true\n self.isClosed = True\n\n def clear(self, color = defaulBGColor):\n # Clear will clean the windows color.\n GL.glClearColor(*color)\n GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n\n def update(self):\n # With depth buffer flip is the way to update screen\n pygame.display.flip()\n # Check to close the window after update the window\n if self.isClosed:\n self._dispose()\n\nclass DrawMode:\n triangles = GL.GL_TRIANGLES\t\n points = GL.GL_POINTS\n lines = GL.GL_LINES \n quads = GL.GL_QUADS\n tfan = GL.GL_TRIANGLE_FAN\n lstrip = GL.GL_LINE_STRIP\n tstrip = GL.GL_TRIANGLE_STRIP\n\nclass UsageMode:\n stream_draw = GL.GL_STREAM_DRAW\n stream_read = GL.GL_STREAM_READ\n stream_copy = GL.GL_STREAM_COPY\n static_draw = GL.GL_STATIC_DRAW\n static_read = GL.GL_STATIC_READ\n static_copy = GL.GL_STATIC_COPY\n dynamic_draw = GL.GL_DYNAMIC_DRAW\n dynamic_read = GL.GL_DYNAMIC_READ\n dynamic_copy = GL.GL_DYNAMIC_COPY \n\nclass Geometry:\n \"\"\"\n This element will create and store all the elements needed\n to Render a Geometrt\n \"\"\"\n\n # Declare the subindex that will be used for multiple (vector) attribites\n index_cols = [\"x\",\"y\",\"z\",\"w\"]\n #Defaule type that will be used for indexing using OpenGL elements array buffer\n index_type = np.uint32\n\n def __init__(self, name=None, shader=None, mode=DrawMode.triangles, usage=UsageMode.static_draw):\n # Initialize all the variables\n self.name = name\n self.shader = shader\n self.mode = mode\n self.usage = usage\n # Create new properties\n self.transform = Transform()\n # Attributes dictionary to store the columns for each component\n self._pointAttribCols = {}\n self._primAttribCols = {}\n # Point Attributes and elements Data frames\n self._dfPoints = pd.DataFrame()\n self._dfPrims = pd.DataFrame()\n # Vertex Array Object for all the Attributtes, elements, etc.\n self._VAO = None\n # Vertex Arrays Buffers for all the Attributes\n self._VAB = {}\n # Element Array Buffers for all the Attrbiutes\n self._EAB = None\n\n # Initiali<e variables and Window\n self._initialize()\n\n def __enter__(self):\n # Enter will always return the object itself. Use with With expressons\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n # Clean all the memery stored\n self._dispose()\n\n def _dispose(self):\n # Dispose all the objects and memory allocated\n GL.glDeleteVertexArrays(1,self._VAO)\n\n def _has_indices(self):\n if \"Id\" in self._primAttribCols:\n return True\n return False\n\n def _create_vertex_buffer_array(self, name, attribute_name = None):\n \"\"\"\n This function only make sense to do when working with\n points (vertex) attributes.S\n The function will return the bind attribute attached\n to the shader. This could be stored into a list to \n detach later when copy all the buffers and after unbind\n VAO object.\n \"\"\"\n # Check if not attribute name has been mapped for the bidinng\n if attribute_name is None:\n attribute_name = name\n # Get the current vertices (flatten is not needed)\n vertices = self._dfPoints[self._pointAttribCols[name]].values\n # Create the vertex array buffer and send the positions into the GPU buffers\n self._VAB[name] = GL.glGenBuffers(1)\n GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self._VAB[name] )\n GL.glBufferData(GL.GL_ARRAY_BUFFER, vertices.nbytes, vertices, self.usage)\n\n # Bind Attribute to the current shader. \n return self.shader.bind(attribute_name, len(vertices[0]), vertices.dtype)\n\n def _copy_to_buffer(self):\n # Bind the shaders attributes for the current geometry\n if self.shader is None:\n print(\"ERROR: No shader specified\")\n \n # Create a list with the attributes created and binded\n shader_attributes = []\n\n # Create a new VAO (Vertex Array Object). Only (1) VAO.\n # Note. Using bpp > 16bits doesn't work. This depend on the Graphic Card.\n self._VAO = GL.glGenVertexArrays(1)\n # Every time we want to use VAO we just have to bind it\n GL.glBindVertexArray(self._VAO)\n\n # Create the first attribute \"position\" (location = 0) (Mandatory)\n shader_attributes.append(self._create_vertex_buffer_array(\"P\",\"position\"))\n \n # Check wether the geometry has indexes\n if self._has_indices():\n # Get the current indices (flatten)\n indices = self._dfPrims[self._primAttribCols[\"Id\"]].values\n # Create the element array buffer and send the positions into the GPU buffers\n self._EAB = GL.glGenBuffers(1)\n GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self._EAB);\n GL.glBufferData(GL.GL_ELEMENT_ARRAY_BUFFER, indices.nbytes, indices, self.usage);\n \n # Create and bind other Attributes\n for attrib in self._pointAttribCols.keys():\n if attrib != \"P\":\n shader_attributes.append(self._create_vertex_buffer_array(attrib))\n\n # Unbind VAO from OpenGL. Set to None = 0\n GL.glBindVertexArray(0)\n # Remove and unbind buffers\n GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)\n # Unbind all the Attributes \"position\" + Additionals\n for attribute in shader_attributes:\n self.shader.unbind(attribute)\n\n def _initialize(self):\n pass\n \n def update(self):\n # Depenging on the method to update the vertices using GPU or \n # inmediate OpenGL the update will be different.\n self._copy_to_buffer()\n \n def _createAttribute(self, df, name, size=3, values=None, default=None, dtype=None):\n #Check the data type if any\n if dtype is None:\n if empty(values):\n # Assign a default value\n dtype = np.float32\n else:\n # Get the type from the values\n if not isinstance(values,(np.ndarray)):\n # If not numpy then get the numppy array \n values = np.array(values)\n #Finally get the type from the numpy array\n dtype = values.dtype \n # Check any values or default values has been provided\n if empty(values) and empty(default):\n if df.empty:\n # If nothing to add exit the function\n return None\n else:\n # Create a default value (float)\n default = np.zeros((size), dtype=dtype)\n # Check the index value depending on the size\n if size > 1:\n columns = [name + Geometry.index_cols[i] for i in range(size)]\n else:\n columns = [name]\n # Check if values has been already defined\n if (empty(values) and not df.empty):\n # create an array with the same number of rows as the current\n values = np.tile(default,(len(df.index)))\n # Reshape the values [ Maybe should be normalized and flatten]\n values = np.array(np.reshape(values, (-1, size)) ,dtype=dtype)\n # Check if the DataFrame is empty\n if df.empty:\n # Add the current data into the attributes frame\n df = pd.DataFrame(values, columns=columns)\n else:\n # Add the current data into the attributes frame\n dfvalues = pd.DataFrame(values, columns=columns)\n # Append both dataframes\n df = pd.merge(df, dfvalues, how='inner', left_index=True, right_index=True)\n # Set the columns into the the current Point attribute\n return (df, columns)\n\n def getPrimsAttrib(self, name):\n return self._dfPrims[self._primAttribCols[name]]\n\n def delPrimsAttrib(self, name):\n self._dfPrims.drop(self._primAttribCols[name], axis=1, inplace=True)\n\n def addPrimsAttrib(self, name, values=None, size=3, default=None, dtype=None):\n # Get the new attribute and dataframe\n result = self._createAttribute(self._dfPrims,name,size,values,default,dtype)\n if not empty(result):\n # Set the returned dataframe with the new attribute\n self._dfPrims = result[0]\n # Set the columns into the the current Point attribute\n self._primAttribCols[name] = result[1]\n\n def addIndices(self, values, size=3, dtype=np.uint32):\n #Add prims Attributes Elements\n self.addPrimsAttrib(\"Id\", values, size, dtype=dtype)\n \n def getPointAttrib(self, name):\n return self._dfPoints[self._pointAttribCols[name]]\n\n def delPointAttrib(self, name):\n self._dfPoints.drop(self._pointAttribCols[name], axis=1, inplace=True)\n\n def addPointAttrib(self, name, values=None, size=3, default=None, dtype=None):\n # Get the new attribute and dataframe\n result = self._createAttribute(self._dfPoints,name,size,values,default,dtype)\n if not empty(result):\n # Set the returned dataframe with the new attribute\n self._dfPoints = result[0]\n # Set the columns into the the current Point attribute\n self._pointAttribCols[name] = result[1]\n\n def addPoints(self, values, size=3, dtype=np.float32):\n #Add point Attributes Position\n self.addPointAttrib(\"P\", values, size, dtype)\n\n def addNormals(self, values, size=3, dtype=np.float32):\n #Add point Attributes Normals\n self.addPointAttrib(\"N\", values, size, dtype)\n\n def render(self):\n # Bind the created Vertex Array Object\n GL.glBindVertexArray(self._VAO)\n # Draw the current geoemtry. Check if indices have been added\n if self._has_indices():\n GL.glDrawElements(self.mode, len(self._dfPrims.index) * 3, \n typeGL(Geometry.index_type), ctypes.c_void_p(0))\n else:\n GL.glDrawArrays(self.mode, 0, len(self._dfPoints.index))\n # Unbind VAO from GPU\n GL.glBindVertexArray(0)\n \n# Shader typas allow and extension for the files to use\nShaderTypes = {\n \"VERTEX_SHADER\" : { \"id\":\"vs\", \"type\":GL.GL_VERTEX_SHADER }, \n \"FRAGMENT_SHADER\" : { \"id\":\"fs\", \"type\":GL.GL_FRAGMENT_SHADER },\n \"GEOMETRY_SHADER\" : { \"id\":\"gs\", \"type\":GL.GL_GEOMETRY_SHADER }\n }\n\n# Transforms types availabile in shader\nTransformTypes = {\n \"WORLD_MATRIX\" : { \"name\":\"world_matrix\", \"size\":16, \"dtype\":np.float32 }, \n \"VIEW_MATRIX\" : { \"name\":\"view_matrix\", \"size\":16, \"dtype\":np.float32 }, \n \"PROJECTION_MATRIX\" : { \"name\":\"projection_matrix\", \"size\":16, \"dtype\":np.float32 }\n }\n\n# Transforms types availabile in shader\nGeometryAttributeTypes = {\n \"TEXTURE_COORDINATES\" : { \"name\":\"v_textcoord\", \"size\":2, \"dtype\":np.float32 }, \n \"NORMAL\" : { \"name\":\"v_normal\", \"size\":3, \"dtype\":np.float32 }, \n \"POSITION\" : { \"name\":\"v_pos\", \"size\":3, \"dtype\":np.float32 }, \n \"COLOR\" : { \"name\":\"v_color\", \"size\":4, \"dtype\":np.float32 }\n }\n\nclass Shader:\n \"\"\"\n This element will create and store all the elements needed\n to create a shader.\n \"\"\"\n\n # Shaders types\n shader_types = ShaderTypes\n\n # These are the default transforms that will be used\n uniform_transforms = TransformTypes\n \n def __init__(self, name=None, filepath=\"./\"):\n # Initialize all the variables\n self.name = name\n self.filepath = filepath\n # Initial variables\n self._shaders = {}\n self._uniforms = {}\n self._program = None\n # variable to tell if the shader has been initialized correctly\n self.initialized = False\n # Initiali<e variables and Window\n self._initialize()\n\n def __enter__(self):\n # Enter will always return the object itself. Use with With expressons\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n # Clean all the memery stored\n self._dispose()\n\n def _dispose(self):\n # Dispose all the object and memory allocated\n for shader in self._shaders:\n GL.glDetachShader(self._program, self._shaders[shader])\n GL.glDeleleShader(self._shaders[shader])\n # Delete Shader Program\n if self._program:\n GL.glDeleteProgram(self._program)\n # Set initialized to false\n self.initialized = False\n\n def _initialize(self):\n # Dispose previous elemens created\n self._dispose()\n # Create the variables needed for the shader program\n self._shaders = {}\n self._uniforms = {}\n\n # Set initialized to false\n self.initialized = False\n\n # Create the main shader program\n self._program = GL.glCreateProgram()\n \n # Arrach the default shaders to the current program\n self._attach_default_shaders()\n\n # Bind and mapping the default attributes variables to the shader\n # - Bind location must be done before the linking process.\n # - Get location must be done after the linking process.\n self._bind_location_attributes()\n\n # Link the current shader program\n GL.glLinkProgram(self._program)\n # Check for link errors \n if self._check_shader_error(self._program, GL.GL_LINK_STATUS,True): \n return\n \n # Validate Program\n GL.glValidateProgram(self._program)\n # Check for link errors \n if self._check_shader_error(self._program, GL.GL_VALIDATE_STATUS,True):\n return\n \n # Get location uniforms variablesfrom the shader\n self._get_location_uniforms()\n\n # if all ok then set initialized to true\n self.initialized = True\n\n def _get_location_uniforms(self):\n for key,value in Shader.uniform_transforms.items():\n # Get the location for the curren shader loaded\n self._uniforms[key] = GL.glGetUniformLocation(self._program, value[\"name\"])\n\n def _bind_location_attributes(self):\n pass\n\n def _attach_default_shaders(self):\n # Generate the main path for the shaders to load\n filename = self.filepath + \"/\" + self.name + \".\"\n # Read all shader type files and link into the progrma\n for key,value in Shader.shader_types.items():\n shader = self._load_shader(filename + value[\"id\"], value[\"type\"])\n # Check the current shader has been loaded correctly\n if shader:\n # Finally attach the current shader into the program\n GL.glAttachShader(self._program, shader)\n # Add current shader\n self._shaders[key] = shader\n\n def _load_shader(self, filename, shader_type):\n # Check if the file exists\n if isfile(filename):\n #Load current shader code-source from file\n shader_source = readfile(filename)\n # Create curent shader\n shader = GL.glCreateShader(shader_type)\n # Set the source for the current sshader\n GL.glShaderSource(shader, shader_source) \n # Compile current shadershader\n GL.glCompileShader(shader)\n # Check for compiler errors \n if self._check_shader_error(shader, GL.GL_COMPILE_STATUS):\n return None\n # Return the current shader\n return shader\n #Return None if no file exists\n return None\n\n def _check_shader_error(self,shader,status,isProgram=False):\n if isProgram:\n # Check for errors in Programs \n if GL.glGetProgramiv(shader,status) != GL.GL_TRUE:\n print('Program load failed: {}'.format(GL.glGetProgramInfoLog(shader)))\n return True\n else:\n # Check for errors in Shaders \n if GL.glGetShaderiv(shader,status) != GL.GL_TRUE:\n print('Shader load failed: {}'.format(GL.glGetShaderInfoLog(shader)))\n return True\n return False \n\n def load(self, name):\n # Set the current file and initialize\n self.name = name\n # Call to initialize so it will load again the program and shader\n self._initialize()\n\n def use(self,use=True):\n \"\"\"\n Function to tell Open GL to use this Shader program.\n If the shader won't be used anymore then use use=False.\n \"\"\"\n if self.initialized:\n # Tell Open GL to use/not-use the current progrma\n if use:\n GL.glUseProgram(self._program)\n else:\n GL.glUseProgram(0)\n return True\n # Not initialized\n return False\n\n def bind(self, attribute_name, size, dtype=np.float32):\n \"\"\"\n This function will allow to bind attributes from the array buffer object\n to the shader. This operation will be done per VAO since it can store this\n binding. Again when a VAO will be opened and binding to OpenGL, this\n will bind again all the bindings previously performed during the creation.\n\n After unbind the current VAO and after loading all the buffers needed\n is very convenient to unbind the attribute after.\n\n Parameters:\n attribute_name (str): \n name of the attribute to use into the shader source-code.\n size:\n size of the current attribute. This is the number of elements, not\n de number of bytes etc.. ej. vector3 will have size = 3\n dtype:\n data-type of the values for the given attribute. If the vector contains\n int, float32, unit32, etc.. This must be given using GL types. Use\n typeGL function to convert numpy types into OpenGL types\n\n \"\"\"\n if self.initialized:\n # Get the location of the 'attribute_name' in parameter of our shader and bind it.\n attribute_id = GL.glGetAttribLocation(self._program, attribute_name)\n # Check if the current attribute is in the Shader\n if attribute_id != -1:\n #Enable current attribute in the shader\n GL.glEnableVertexAttribArray(attribute_id)\n # Describe the attribute data layout in the buffer\n GL.glVertexAttribPointer(attribute_id, size, typeGL(dtype),\n False, 0, ctypes.c_void_p(0))\n # Return the attribute id\n return attribute_id\n else:\n # Attribute has been discarted for the compiler or doesn't exist.\n print (\"Warning: Current attribute {} is not in the shader\".format(attribute_name))\n # Return false is not initialized\n return False\n\n def unbind(self, attribute_id):\n \"\"\"\n This operation will be performed after unbind the VAO obhect. The parameter\n needed will be the result of the previous result that the bind function call\n returns with the attribute id.\n \"\"\"\n if self.initialized:\n # Unbind Attribute\n GL.glDisableVertexAttribArray(attribute_id)\n \n def update(self, name, value):\n # Depending on the uniform name to update we have to select the proper operator.\n GL.glUniformMatrix4fv(self._uniforms[name], 1, GL.GL_FALSE, value)\n\ndef load_image(filename, bpp=8):\n #Load the image using the path configured\n image = Image.open(filename).transpose(Image.FLIP_TOP_BOTTOM)\n if (bpp == 32):\n dtype = np.uint32\n elif (bpp == 16):\n dtype = np.uint16\n else:\n dtype = np.uint8\n # Convert the image to a numpy string. Converto to uint8 image.\n image_data = np.array(image.getdata(), dtype)\n return [image_data, image.size]\n\nclass Texture:\n \"\"\"\n This class will create and store all the elements needed\n to create the texture.\n The module needed to load the images is Pillow\n from PIL import Image\n \"\"\"\n # Maximun number of textures\n max_textures = 32\n\n def __init__(self, filename):\n # Initialize all the variables\n self.filename = filename\n # Create a texture variable with the pointer to the buffer\n self._texture = None\n # Initiali<e variables and Window\n self._initialize()\n\n def __enter__(self):\n # Enter will always return the object itself. Use with With expressons\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n # Clean all the memery stored\n self._dispose()\n\n def _dispose(self):\n pass\n\n def _initialize(self):\n # Create the texture and copy into OpenGL\n self._texture = self._load_Texture(self.filename)\n\n def _load_Texture(self,filename):\n # Check if the file exists\n if isfile(filename):\n # Load the image using the path configured\n img_data, size = load_image(filename)\n width, height = size\n # Generate texture buffer to load into GPU\n texture = GL.glGenTextures(1)\n # Set initial parameters needed prior send the image to OpenGL\n GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)\n # Bind current texture buffer to load the data\n GL.glBindTexture(GL.GL_TEXTURE_2D, texture)\n # Set parameters to tell OpenGL how to draw the image\n GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)\n GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR_MIPMAP_LINEAR)\n GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_CLAMP_TO_EDGE)\n GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T, GL.GL_CLAMP_TO_EDGE)\n GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGB, width, height, 0,\n GL.GL_RGB, typeGL(img_data.dtype), img_data)\n # Create different Mipmaps for the current texure\n GL.glGenerateMipmap(GL.GL_TEXTURE_2D)\n return texture\n # If not exist return None\n return None\n \n def bind(self, count):\n \"\"\"\n This method will bind the current texture to be used to the graphic card\n Parameter:\n count: this is used to assign a free slot to the texture into OpenGL\n [ Some graphic cards could have a limitation in the number of ]\n [ textures that can store, depending on the memory. ]\n \"\"\"\n if self._texture and (count > 0 and count < Texture.max_textures + 1 ):\n # Following we will activate the texture in a slot \n GL.glActiveTexture(GL.GL_TEXTURE0 + count)\n GL.glBindTexture(GL.GL_TEXTURE_2D, self._texture)\n\nclass Transform:\n \"\"\"\n This class will manage the basic transformation that can be\n performed to a geometry.\n\n This class uses pyrr module that it's a packadge with many\n operations that can be used directly with OpenGL. In this class\n the selected approach will be Object Oriented because its features.\n Documentation can be founf in the following link:\n \n https://github.com/adamlwgriffiths/Pyrr\n\n Parameters:\n default position, rotation and scale can be set intially.\n \n To-Do:\n Pivot implementation. So it's rotate based on a point.\n Advanced transformations such as shear, bend, twist, et..\n\n \"\"\"\n def __init__(self, position=None, rotation=None, scale=None):\n # Create private members for the setters (properties)\n self.__position = self._get_Vector3(position)\n self.__rotation = self._get_Vector3(rotation)\n self.__scale = self._get_Vector3(scale)\n # Initiali<e variables and Window\n self._initialize()\n\n def _get_Vector3(self, value):\n if empty(value):\n return None\n # Check if it's already a Vector3 instance\n if isinstance(value,(Vector3)):\n return value\n else:\n return Vector3(value)\n\n def _initialize(self):\n # Create default transformations: position, rotation and scale\n if self.position is None:\n self.position = Vector3([0.0,0.0,0.0])\n if self.rotation is None:\n self.rotation = Vector3([0.0,0.0,0.0])\n if self.scale is None:\n self.scale = Vector3([1.0,1.0,1.0])\n\n @property\n def position(self):\n return self.__position\n\n @position.setter\n def position(self, value):\n self.__position = self._get_Vector3(value)\n\n @property\n def rotation(self):\n return self.__rotation\n\n @rotation.setter\n def rotation(self, value):\n self.__rotation = self._get_Vector3(value)\n\n @property\n def scale(self):\n return self.__scale\n\n @scale.setter\n def scale(self, value):\n self.__scale = self._get_Vector3(value)\n\n @property\n def model(self):\n \"\"\"\n This property will perform the current transformation and\n return a 4x4 matrix with the transformation matrix. This\n matrix could be send to the shader so it can perform the\n model-view transformation for any geometry\n \"\"\"\n # Create scale matrix transformation\n scale = Matrix44.from_scale(self.scale)\n\n #Convert the current degrees vector into radians\n rotation = np.radians(self.rotation)\n rotationY = Quaternion.from_x_rotation(rotation.x)\n rotationX = Quaternion.from_y_rotation(rotation.y)\n rotationZ = Quaternion.from_z_rotation(rotation.z)\n # compute all rotations.\n rotation = rotationX * rotationY * rotationZ\n\n # Create translation matrix transformation\n translation = Matrix44.from_translation(self.position)\n\n # Compute transformation matrix. convert to float32\n return np.array(scale * rotation * translation,dtype=np.float32)\n\n def transform(self, point):\n \"\"\"\n This function will apply the current transformation to\n the following point. \n \"\"\"\n # Get the current tranformation matrix\n matrix = self.model\n # transform our point by the matrix to model-view\n return matrix * self._get_Vector3(point)\n\n def __enter__(self):\n # Enter will always return the object itself. Use with With expressons\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n # Clean all the memery stored\n self._dispose()\n\n def _dispose(self):\n pass\n\ndef convert(value, dtype=np.float32):\n return np.array(value,dtype)\n\nclass Camera:\n \"\"\"\n This class will manage the basic functionality that can be\n performed with a camera.\n\n This class uses pyrr module that it's a packadge with many\n operations that can be used directly with OpenGL. In this class\n the selected approach will be Object Oriented because its features.\n Documentation can be founf in the following link:\n \n https://github.com/adamlwgriffiths/Pyrr\n\n Parameters:\n default perspective, rotation and scale can be set intially.\n \n To-Do:\n Pivot implementation. So it's rotate based on a point.\n Advanced transformations such as shear, bend, twist, et..\n\n \"\"\"\n def __init__(self, position=[0.0,0.0,-3.0], fov=70.0, aspect=1.33, zNear=0.01, zFar=1000.0):\n # Create private members for the setters (properties)\n # View Matrix\n self.__position = Vector3(convert(position))\n self._forward = convert([0.0,0.0,1.0])\n self._up = convert([0.0,1.0,0.0])\n # Prejection Matrix\n self._fov = fov\n self._aspect = aspect\n self._zNear = zNear\n self._zFar = zFar\n \n # Initiali<e variables and Window\n self._initialize()\n\n def _initialize(self):\n pass\n\n def pan(self, value):\n \"\"\"\n Rotate using up direction\n \"\"\"\n \n\n def tilt(self, value):\n \"\"\"\n Rotate using cross product between up and forward vectors.\n \"\"\"\n pass\n\n def roll(self, value):\n \"\"\"\n Rotate using the forward direction\n \"\"\"\n pass\n\n @property\n def position(self):\n return self.__position\n\n @position.setter\n def position(self, value):\n self.__position = Vector3(convert(value))\n\n def setPerspective(fov=70.0, aspect=1.33, zNear=0.01, zFar=1000.0):\n \"\"\"\n Redefine the perspective view of the Camera\n \"\"\"\n self._fov = fov\n self._aspect = aspect\n self._zNear = zNear\n self._zFar = zFar\n \n @property\n def view(self):\n return self.lookAt(self.position,self.position + self._forward, self._up)\n\n def lookAt(self, position, target, up):\n ez = position - target\n ez = ez / np.linalg.norm(ez)\n\n ex = np.cross(up, ez)\n ex = ex / np.linalg.norm(ex)\n\n ey = np.cross(ez, ex)\n ey = ey / np.linalg.norm(ey)\n\n rmat = np.eye(4)\n rmat[0][0] = ex[0]\n rmat[0][1] = ex[1]\n rmat[0][2] = ex[2]\n\n rmat[1][0] = ey[0]\n rmat[1][1] = ey[1]\n rmat[1][2] = ey[2]\n\n rmat[2][0] = ez[0]\n rmat[2][1] = ez[1]\n rmat[2][2] = ez[2]\n\n tmat = np.eye(4)\n tmat[0][3] = -position[0]\n tmat[1][3] = -position[1]\n tmat[2][3] = -position[2]\n\n return np.dot(rmat, tmat).transpose()\n\n\n @property\n def projection(self):\n return matrix44.create_perspective_projection_matrix(self._fov,self._aspect,self._zNear,self._zFar)\n\n def __enter__(self):\n # Enter will always return the object itself. Use with With expressons\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n # Clean all the memery stored\n self._dispose()\n\n def _dispose(self):\n pass\n\n\ndef Triangle():\n #Create default vertices 4f\n vertices = [ -0.5, -0.5, 0.0, 1.0,\n 0.0, 0.5, 0.0, 1.0,\n 0.5, -0.5, 0.0, 1.0]\n indices = [ 0, 1, 2 ]\n color = [ 1.0, 0.0, 0.0, 1.0,\n 0.0, 1.0, 0.0, 1.0,\n 0.0, 0.0, 1.0, 1.0]\n uvs = [0.0, 0.0,\n 0.5, 1.0,\n 1.0, 0.0 ]\n return [vertices, indices, color, uvs]\n\n# Testing pourposes main function\nif __name__ == \"__main__\":\n # Create the Display with the main window\n with Display(\"Main Window\",800,600) as display:\n \n #georaw = cube3D()\n georaw = Triangle()\n # Create a Camera\n camera = Camera([0.0,0.0,-3.0],70.0,800/600,0.01,1000)\n # Create a texture\n texture = Texture(\"./assets/images/texture.png\")\n # Create the default shader\n shader = Shader(\"default_shader\", \"./assets/shaders\")\n # Create the geometry\n geo = Geometry(\"geo\",shader,mode=DrawMode.triangles)\n #geo.addPoints(georaw[0], 4)\n geo.addPointAttrib(\"P\",georaw[0], 4)\n #geo.addIndices(georaw[1])\n geo.addPointAttrib(\"Cd\",georaw[2], 4)\n geo.addPointAttrib(\"UV\",georaw[3], 2)\n #geo.addPoints(vertices, 4)\n geo.update()\n\n # Create a counter\n counter = 0\n # Start the Main loop for the program\n while not display.isClosed: \n # Manage the event from the gui\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n display.close()\n if event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE:\n display.close()\n if event.type == pygame.KEYUP and event.key == pygame.K_UP:\n camera.position.y += 0.1\n if event.type == pygame.KEYUP and event.key == pygame.K_DOWN:\n camera.position.y -= 0.1\n if event.type == pygame.KEYUP and event.key == pygame.K_RIGHT:\n camera.position.x += 0.1\n if event.type == pygame.KEYUP and event.key == pygame.K_LEFT:\n camera.position.x -= 0.1\n\n \"\"\"\n Get the Key-codes used in pygame\n events = [ \"QUIT\",\n \"ACTIVEEVENT\",\n \"KEYDOWN\",\n \"KEYUP\",\n \"MOUSEMOTION\",\n \"MOUSEBUTTONUP\",\n \"MOUSEBUTTONDOWN\",\n \"JOYAXISMOTION\",\n \"JOYBALLMOTION\",\n \"JOYHATMOTION\",\n \"JOYBUTTONUP\",\n \"JOYBUTTONDOWN\",\n \"USEREVENT\"]\n\n with open(\"keycodes.txt\", mode=\"w\") as file:\n for item in sorted(pygame.__dir__()):\n if str(item).startswith(\"K\"):\n line = \"{} = {}\\n\".format( str(item),getattr(pygame,item))\n file.write(line)\n for item in events:\n line = \"{} = {}\\n\".format( str(item),getattr(pygame,item))\n file.write(line)\n\n \"\"\"\n\n \n # Clear the display\n display.clear()\n \n # Render all the elements that share the same shader.\n # Use the current Shader configuration\n shader.use()\n # Use the current texture after the shader\n texture.bind(0)\n \n # # Perform some motion to the object\n # sincount = math.sin(counter)\n # coscount = math.cos(counter)\n\n # geo.transform.position.x = sincount\n # geo.transform.rotation.z = counter*50\n # geo.transform.scale = [coscount,coscount,coscount]\n\n # counter += 0.01;\n\n shader.update(\"WORLD_MATRIX\",geo.transform.model)\n shader.update(\"VIEW_MATRIX\",camera.view)\n shader.update(\"PROJECTION_MATRIX\",camera.projection)\n\n # Render the geometry\n geo.render()\n # End Use the current Shader configuration\n shader.use(False)\n\n # Update the display\n display.update()\n\n # End of the program\n\n" ]
[ [ "numpy.eye", "numpy.zeros", "pandas.DataFrame", "numpy.cross", "numpy.asarray", "numpy.reshape", "pandas.merge", "numpy.array", "numpy.dot", "numpy.linalg.norm", "numpy.radians" ] ]
fadhilmch/FaceRecognition
[ "03ecddb15db79b23ff612b119c9678f5b2ce1194" ]
[ "demo_video_face_recognition.py" ]
[ "import cv2\nimport time\nimport numpy as np\nfrom detection.FaceDetector import FaceDetector\nfrom recognition.FaceRecognition import FaceRecognition\nfrom classifier.FaceClassifier import FaceClassifier\n\nVIDEO_INPUT_FILE = './media/test_video/Zidane_1.avi'\nVIDEO_OUTPUT_FILE = './media/test_video_output/Zidane_Recognition_1.avi'\nFACE_CLASSIFIER_MODEL = './classifier/trained_classifier_lfw.pkl'\n\nface_detector = FaceDetector()\nface_recognition = FaceRecognition()\nface_classfier = FaceClassifier(FACE_CLASSIFIER_MODEL)\nvideo_capture = cv2.VideoCapture(VIDEO_INPUT_FILE)\n\nfourcc = cv2.VideoWriter_fourcc(*'XVID')\nout = cv2.VideoWriter(VIDEO_OUTPUT_FILE, fourcc, 24.0, (int(video_capture.get(3)),int(video_capture.get(4))))\n\nprint('Start Recognition!')\nprevTime = 0\nwhile video_capture.isOpened():\n ret, frame = video_capture.read()\n\n curTime = time.time() # calc fps\n find_results = []\n\n frame = frame[:, :, 0:3]\n boxes, scores = face_detector.detect(frame)\n face_boxes = boxes[np.argwhere(scores>0.3).reshape(-1)]\n face_scores = scores[np.argwhere(scores>0.3).reshape(-1)]\n print('Detected_FaceNum: %d' % len(face_boxes))\n\n if len(face_boxes) > 0:\n for i in range(len(face_boxes)):\n box = face_boxes[i]\n cropped_face = frame[box[0]:box[2], box[1]:box[3], :]\n cropped_face = cv2.resize(cropped_face, (160, 160), interpolation=cv2.INTER_AREA)\n feature = face_recognition.recognize(cropped_face)\n name = face_classfier.classify(feature)\n\n cv2.rectangle(frame, (box[1], box[0]), (box[3], box[2]), (0, 255, 0), 2)\n\n # plot result idx under box\n text_x = box[1]\n text_y = box[2] + 20\n cv2.putText(frame, name, (text_x, text_y), cv2.FONT_HERSHEY_COMPLEX_SMALL,\n 1, (0, 0, 255), thickness=1, lineType=2)\n else:\n print('Unable to align')\n\n sec = curTime - prevTime\n prevTime = curTime\n fps = 1 / (sec)\n str = 'FPS: %2.3f' % fps\n text_fps_x = len(frame[0]) - 150\n text_fps_y = 20\n cv2.putText(frame, str, (text_fps_x, text_fps_y),\n cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 0), thickness=1, lineType=2)\n\n out.write(frame)\n\nvideo_capture.release()\nout.release()\ncv2.destroyAllWindows()\n\n" ]
[ [ "numpy.argwhere" ] ]
SiChen-cuc/Tiyuntsong
[ "b1797b543f21d8e0cc80b2934aa1f5a70df5ee45" ]
[ "src/multi.py" ]
[ "import sabre as env\nimport math\nfrom network import Zero\nfrom tracepool import tracepool\nimport numpy as np\nfrom rules import rules\nfrom log import log\nimport os\nfrom multiprocessing import cpu_count\nimport multiprocessing as mp\n\nNUM_AGENT = 2\nUSE_CORES = cpu_count()\n\n\ndef agent(agent_id, net_params_queue, exp_queue):\n agent_list = []\n for p in range(NUM_AGENT):\n agent_list.append(Zero(str(p)))\n\n while True:\n net_params, _tracepool = net_params_queue.get()\n for i in range(NUM_AGENT):\n agent_list[i].set_params(net_params[i])\n\n _trace_result = []\n _global_history = []\n for p in range(NUM_AGENT):\n _global_history.append([])\n for _trace in _tracepool:\n agent_result = []\n for _agent in agent_list:\n total_bitrate, total_rebuffer, total_smoothness = env.execute(\n abr=_agent, trace=_trace)\n agent_result.append(\n (total_bitrate, total_rebuffer, total_smoothness))\n agent_reward = []\n for _index in range(len(agent_list[0].quality_history)):\n res = rules([agent_list[0].quality_history[_index],\n agent_list[1].quality_history[_index]])\n agent_reward.append(res)\n agent_reward = np.array(agent_reward)\n for _index, _agent in enumerate(agent_list):\n _history = _agent.get_action()\n reward = agent_reward[:, _index]\n _idx = 0\n s_batch, a_batch, r_batch, g_batch = [], [], [], []\n for (state, action, gan) in _history:\n s_batch.append(state)\n a_batch.append(action)\n r_batch.append(reward[_idx])\n g_batch.append(gan)\n _idx += 1\n _global_history[_index].append(\n (s_batch, a_batch, r_batch, g_batch))\n _trace_result.append(agent_result)\n exp_queue.put([_global_history, _trace_result])\n\n\ndef chunks(arr, m):\n if (len(arr) < m):\n m = len(arr)\n tmp, tmp_index = [], []\n idx = 0\n for i in range(m):\n tmp.append([])\n tmp_index.append([])\n for i in range(len(arr)):\n tmp[idx].append(arr[i])\n tmp_index[idx].append(i)\n idx += 1\n idx %= m\n return tmp, tmp_index\n\n\ndef central(net_params_queues, exp_queues):\n global_agent_list = []\n agent_elo = []\n\n _log = log('zero.txt')\n #log_file = open('zero.txt', 'w')\n elo_file = open('elo.txt', 'w')\n for p in range(NUM_AGENT):\n global_agent_list.append(Zero(str(p)))\n agent_elo.append(1000.0)\n\n _tracepool = tracepool(ratio=0.5)\n _split_pool, _idx_pool = chunks(_tracepool.get_list(), USE_CORES)\n while True:\n # synchronize the network parameters of work agent\n _params = []\n agent_elo = []\n global_trace_pool = []\n for p in range(len(_tracepool.get_list())):\n global_trace_pool.append([])\n\n for p in range(NUM_AGENT):\n agent_elo.append(1000.0)\n _params.append(global_agent_list[p].get_params())\n\n for i in range(USE_CORES):\n net_params_queues[i].put([_params, _split_pool[i]])\n\n _tmp = [0, 0, 0]\n for i in range(USE_CORES):\n _global_history, _trace_result = exp_queues[i].get()\n for p in range(NUM_AGENT):\n _history = _global_history[p]\n global_agent_list[p].set_action(_history)\n for p in range(len(_trace_result)):\n global_trace_pool[_idx_pool[i][p]] = _trace_result[p]\n for _trace_res in global_trace_pool: \n tmp_battle = rules(_trace_res)\n _log.write_log(_trace_res)\n _tmp[np.argmax(tmp_battle)] += 1\n _tmp[-1] += 1\n _rate, agent_elo = _tracepool.battle(agent_elo, global_trace_pool)\n _delta_array = [_tmp[0] / _tmp[-1], _tmp[1] / _tmp[-1]]\n for _agent, _d in zip(global_agent_list, _delta_array):\n _agent.learn(_d)\n _agent.clear()\n\n for p in agent_elo:\n elo_file.write(str(p) + ' ')\n elo_file.write('\\n')\n elo_file.flush()\n print(_rate)\n print(agent_elo)\n print(round(_tmp[0] * 100.0 / _tmp[-1], 2), '%',\n ',', round(_tmp[1] * 100.0 / _tmp[-1], 2), '%')\n\n _log.write_line()\n os.system('python draw.py')\n\n\ndef main():\n net_params_queues = []\n exp_queues = []\n for i in range(USE_CORES):\n net_params_queues.append(mp.Queue(1))\n exp_queues.append(mp.Queue(1))\n\n coordinator = mp.Process(target=central,\n args=(net_params_queues, exp_queues))\n coordinator.start()\n agents = []\n for i in range(USE_CORES):\n agents.append(mp.Process(target=agent,\n args=(i, net_params_queues[i], exp_queues[i])))\n for p in agents:\n p.start()\n\n # wait unit training is done\n coordinator.join()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.array", "numpy.argmax" ] ]
zygn/f1tenth_gym
[ "fbb5b6a2b84bff0bd1e4eeeb62d963c74632f787" ]
[ "playground/random_obs.py" ]
[ "import random\nimport cv2\nimport numpy as np\nfrom tqdm import tqdm\n\nfile_name = 'map.png'\nimg = cv2.imread(file_name, -1)\n\ndst = img.copy()\nh, w = img.shape\nepsilon = 0.9999\n\n# 그레이스케일과 바이너리 스케일 변환\nth = cv2.bitwise_not(dst)\ncontours, _ = cv2.findContours(th, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)\n\ncontours.sort(key=len)\nsize_contours = []\nprint(\"i found contour(s):\", end=\"\")\nfor i in contours: size_contours.append(len(i))\nprint(size_contours)\nprint(\"epslion: \", epsilon)\n\n\n# 외곽선 검출\nin_contour = contours[0]\nout_contour = contours[-1]\n\n# 바깥쪽 라인 \ncv2.drawContours(dst, [out_contour], -1, 100, 3)\ncv2.fillPoly(dst, [out_contour], 100)\n# 안쪽 라인 \ncv2.drawContours(dst, [in_contour], -1, 200, 3)\ncv2.fillPoly(dst, [in_contour], 200)\n\n\n# 트랙안에서 점찍기\npbar = tqdm(range(w))\nfor i in pbar:\n for j in range(h):\n if np.all(dst[i][j] == np.array(100)) and random.random() >= epsilon:\n cv2.circle(img, (j,i), 2, (0,0,0), -1)\n pbar.set_description(f\"\\u001b[35mAdded Obstacle - [{j},{i}]\\u001b[0m \")\n # print(f\"added obs: [{j},{i}] \\r\", end=\"\")\nprint()\n\ncv2.imwrite(f\"obs_{file_name}\", img)\n# cv2.imshow('cont', dst)\n# cv2.imshow('obs_img', img)\n# cv2.waitKey()\n# cv2.destroyAllWindows()" ]
[ [ "numpy.array" ] ]
pmrowla/dvclive
[ "fe95917c965db210a6a11ff3d6f287c2df298330" ]
[ "tests/test_catalyst.py" ]
[ "import os\n\nimport pytest\nfrom catalyst import dl\nfrom catalyst.contrib.datasets import MNIST\nfrom catalyst.data import ToTensor\nfrom catalyst.utils.torch import get_available_engine\nfrom torch import nn, optim\nfrom torch.utils.data import DataLoader\n\nimport dvclive\nfrom dvclive.catalyst import DvcLiveCallback\n\n# pylint: disable=redefined-outer-name, unused-argument\n\n\[email protected]\ndef loaders():\n train_data = MNIST(\n os.getcwd(), train=True, download=True, transform=ToTensor()\n )\n valid_data = MNIST(\n os.getcwd(), train=False, download=True, transform=ToTensor()\n )\n return {\n \"train\": DataLoader(train_data, batch_size=32),\n \"valid\": DataLoader(valid_data, batch_size=32),\n }\n\n\[email protected]\ndef runner():\n return dl.SupervisedRunner(\n engine=get_available_engine(),\n input_key=\"features\",\n output_key=\"logits\",\n target_key=\"targets\",\n loss_key=\"loss\",\n )\n\n\ndef test_catalyst_callback(tmp_dir, runner, loaders):\n dvclive.init(\"dvc_logs\")\n\n model = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 10))\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.Adam(model.parameters(), lr=0.02)\n\n runner.train(\n model=model,\n criterion=criterion,\n optimizer=optimizer,\n loaders=loaders,\n num_epochs=2,\n callbacks=[\n dl.AccuracyCallback(input_key=\"logits\", target_key=\"targets\"),\n DvcLiveCallback(),\n ],\n logdir=\"./logs\",\n valid_loader=\"valid\",\n valid_metric=\"loss\",\n minimize_valid_metric=True,\n verbose=True,\n load_best_on_end=True,\n )\n\n assert os.path.exists(\"dvc_logs\")\n\n train_path = tmp_dir / \"dvc_logs/train\"\n valid_path = tmp_dir / \"dvc_logs/valid\"\n\n assert train_path.is_dir()\n assert valid_path.is_dir()\n assert (train_path / \"accuracy.tsv\").exists()\n\n\ndef test_catalyst_model_file(tmp_dir, runner, loaders):\n dvclive.init(\"dvc_logs\")\n\n model = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 10))\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.Adam(model.parameters(), lr=0.02)\n\n runner.train(\n model=model,\n engine=runner.engine,\n criterion=criterion,\n optimizer=optimizer,\n loaders=loaders,\n num_epochs=2,\n callbacks=[\n dl.AccuracyCallback(input_key=\"logits\", target_key=\"targets\"),\n DvcLiveCallback(\"model.pth\"),\n ],\n logdir=\"./logs\",\n valid_loader=\"valid\",\n valid_metric=\"loss\",\n minimize_valid_metric=True,\n verbose=True,\n load_best_on_end=True,\n )\n assert (tmp_dir / \"model.pth\").is_file()\n" ]
[ [ "torch.utils.data.DataLoader", "torch.nn.Flatten", "torch.nn.Linear", "torch.nn.CrossEntropyLoss" ] ]
lynshao/NoisyNN
[ "2c827dbe697f4a8d8f9b2cb8abb2aa43a749fa16" ]
[ "TrainingNoise_CIFAR10/Update.py" ]
[ "import torch\nfrom torch import nn\nfrom torch.utils.data import DataLoader, Dataset\n\n\nclass DatasetSplit(Dataset):\n def __init__(self, dataset, idxs):\n self.dataset = dataset\n self.idxs = list(idxs)\n\n def __len__(self):\n return len(self.idxs)\n\n def __getitem__(self, item):\n image, label = self.dataset[self.idxs[item]]\n return image, label\n\n\nclass LocalUpdate(object):\n def __init__(self, args, dataset=None, idxs=None):\n self.args = args\n self.loss_func = nn.CrossEntropyLoss()\n self.selected_clients = []\n self.ldr_train = DataLoader(DatasetSplit(dataset, idxs), batch_size=self.args.local_bs, shuffle=True, num_workers=4)\n\n\n def train(self, net, history_dict, lrT):\n net.train()\n # train and update\n optimizer = torch.optim.SGD(net.parameters(), lr=lrT, momentum=0.9, weight_decay=5e-4)\n\n epoch_loss = []\n for iter in range(self.args.local_ep):\n batch_loss = []\n for batch_idx, (images, labels) in enumerate(self.ldr_train):\n images, labels = images.to(self.args.device), labels.to(self.args.device)\n net.zero_grad()\n log_probs = net(images)\n loss = self.loss_func(log_probs, labels)\n loss.backward()\n optimizer.step()\n batch_loss.append(loss.item())\n epoch_loss.append(sum(batch_loss)/len(batch_loss))\n \n current_dict = net.state_dict()\n\n for k in current_dict.keys():\n current_dict[k] -= history_dict[k]\n\n return current_dict, sum(epoch_loss) / len(epoch_loss)\n\n" ]
[ [ "torch.nn.CrossEntropyLoss" ] ]
smolsbs/aoc
[ "558cc68b94ead332190e14ad7a9ecd6ca5c4aa5a" ]
[ "2020/day-11/day11.py" ]
[ "#!/usr/bin/env python3\nimport numpy as np\n\ndirs = [(0,1), (1,0), (0, -1), (-1, 0), (1,1), (1,-1), (-1, 1), (-1,-1)]\n\ndef parse_seats(line):\n v = []\n for s in line:\n if s == 'L':\n v.append(0)\n else:\n v.append(-1)\n return v\n\ndef allpos(xx,yy):\n all_p = []\n for x in xx:\n for y in yy:\n all_p.append( (x,y) )\n return all_p\n\ndef part1(data):\n max_y = len(data)\n max_x = len(data[0])\n changed = 1\n new_d = np.copy(data)\n while changed:\n changed = 0\n for y in range(max_y):\n for x in range(max_x):\n if data[y,x] == -1:\n continue\n near = data[max(0,y-1):y+2, max(0, x-1):x+2]\n n_occup = np.count_nonzero(near == 1) - data[y,x]\n if data[y, x] == 0:\n if n_occup == 0:\n new_d[ y, x] = 1\n changed = 1\n elif data[y, x] == 1:\n if n_occup >= 4:\n new_d[y,x] = 0\n changed = 1\n data = np.copy(new_d)\n \n all_occ = np.count_nonzero(data == 1)\n return all_occ\n\n# SPAGHETT CODE\ndef part2(data):\n max_y = len(data)\n max_x = len(data[0])\n all_p = allpos(range(max_y), range(max_x))\n\n changed = 1\n new_d = np.copy(data)\n while changed:\n changed = 0\n for p in all_p:\n c_pos = p\n seats = 0\n for direct in dirs:\n new_p = (c_pos[0]+direct[0], c_pos[1]+direct[1])\n while (new_p[0] >= 0 and new_p[0] < max_y) and (new_p[1] >= 0 and new_p[1] < max_x):\n if data[new_p[0], new_p[1]] == 1:\n seats += 1\n break\n elif data[new_p[0], new_p[1]] == 0:\n break\n new_p = (new_p[0]+direct[0], new_p[1]+direct[1])\n if data[c_pos[0], c_pos[1]] == 0:\n if seats == 0:\n new_d[c_pos[0], c_pos[1]] = 1\n changed = 1\n elif data[c_pos[0], c_pos[1]] == 1:\n if seats >= 5:\n new_d[c_pos[0], c_pos[1]] = 0\n changed = 1\n data = np.copy(new_d)\n\n all_occ = np.count_nonzero(data == 1)\n return all_occ\n \n\ndef main():\n with open('input', 'r') as fp:\n data = np.array([parse_seats(x) for x in fp.read().split('\\n')])\n \n p1 = part1(data)\n print(\"part 1: {}\".format(p1))\n p2 = part2(data)\n print(\"part 2: {}\".format(p2))\n\nif __name__ == '__main__':\n main()" ]
[ [ "numpy.copy", "numpy.count_nonzero" ] ]
m-philipps/pyPESTO
[ "4c30abfca56ba714c302141cd44a9dd366bff4bb" ]
[ "pypesto/objective/amici.py" ]
[ "import abc\nimport copy\nimport os\nimport tempfile\nfrom collections import OrderedDict\nfrom typing import Dict, Optional, Sequence, Tuple, Union\n\nimport numpy as np\n\nfrom ..C import FVAL, MODE_FUN, MODE_RES, RDATAS\nfrom .amici_calculator import AmiciCalculator\nfrom .amici_util import (\n create_identity_parameter_mapping,\n map_par_opt_to_par_sim,\n)\nfrom .base import ObjectiveBase, ResultDict\n\ntry:\n import amici\n import amici.parameter_mapping\n import amici.petab_objective\n from amici.parameter_mapping import ParameterMapping\nexcept ImportError:\n pass\n\nAmiciModel = Union['amici.Model', 'amici.ModelPtr']\nAmiciSolver = Union['amici.Solver', 'amici.SolverPtr']\n\n\nclass AmiciObjectBuilder(abc.ABC):\n \"\"\"Allows to build AMICI model, solver, and edatas.\n\n This class is useful for pickling an :class:`pypesto.AmiciObjective`,\n which is required in some parallelization schemes. Therefore, this\n class itself must be picklable.\n \"\"\"\n\n @abc.abstractmethod\n def create_model(self) -> AmiciModel:\n \"\"\"Create an AMICI model.\"\"\"\n\n @abc.abstractmethod\n def create_solver(self, model: AmiciModel) -> AmiciSolver:\n \"\"\"Create an AMICI solver.\"\"\"\n\n @abc.abstractmethod\n def create_edatas(self, model: AmiciModel) -> Sequence['amici.ExpData']:\n \"\"\"Create AMICI experimental data.\"\"\"\n\n\nclass AmiciObjective(ObjectiveBase):\n \"\"\"Allows to create an objective directly from an amici model.\"\"\"\n\n def __init__(\n self,\n amici_model: AmiciModel,\n amici_solver: AmiciSolver,\n edatas: Union[Sequence['amici.ExpData'], 'amici.ExpData'],\n max_sensi_order: Optional[int] = None,\n x_ids: Optional[Sequence[str]] = None,\n x_names: Optional[Sequence[str]] = None,\n parameter_mapping: Optional['ParameterMapping'] = None,\n guess_steadystate: Optional[Optional[bool]] = None,\n n_threads: Optional[int] = 1,\n fim_for_hess: Optional[bool] = True,\n amici_object_builder: Optional[AmiciObjectBuilder] = None,\n calculator: Optional[AmiciCalculator] = None,\n amici_reporting: Optional['amici.RDataReporting'] = None,\n ):\n \"\"\"\n Initialize objective.\n\n Parameters\n ----------\n amici_model:\n The amici model.\n amici_solver:\n The solver to use for the numeric integration of the model.\n edatas:\n The experimental data. If a list is passed, its entries correspond\n to multiple experimental conditions.\n max_sensi_order:\n Maximum sensitivity order supported by the model. Defaults to 2 if\n the model was compiled with o2mode, otherwise 1.\n x_ids:\n Ids of optimization parameters. In the simplest case, this will be\n the AMICI model parameters (default).\n x_names:\n Names of optimization parameters.\n parameter_mapping:\n Mapping of optimization parameters to model parameters. Format\n as created by `amici.petab_objective.create_parameter_mapping`.\n The default is just to assume that optimization and simulation\n parameters coincide.\n guess_steadystate:\n Whether to guess steadystates based on previous steadystates and\n respective derivatives. This option may lead to unexpected\n results for models with conservation laws and should accordingly\n be deactivated for those models.\n n_threads:\n Number of threads that are used for parallelization over\n experimental conditions. If amici was not installed with openMP\n support this option will have no effect.\n fim_for_hess:\n Whether to use the FIM whenever the Hessian is requested. This only\n applies with forward sensitivities.\n With adjoint sensitivities, the true Hessian will be used,\n if available.\n FIM or Hessian will only be exposed if `max_sensi_order>1`.\n amici_object_builder:\n AMICI object builder. Allows recreating the objective for\n pickling, required in some parallelization schemes.\n calculator:\n Performs the actual calculation of the function values and\n derivatives.\n amici_reporting:\n Determines which quantities will be computed by AMICI,\n see ``amici.Solver.setReturnDataReportingMode``. Set to ``None``\n to compute only the minimum required information.\n \"\"\"\n if amici is None:\n raise ImportError(\n \"This objective requires an installation of amici \"\n \"(https://github.com/icb-dcm/amici). \"\n \"Install via `pip3 install amici`.\"\n )\n\n self.amici_model = amici_model.clone()\n self.amici_solver = amici_solver.clone()\n\n # make sure the edatas are a list of edata objects\n if isinstance(edatas, amici.amici.ExpData):\n edatas = [edatas]\n\n # set the experimental data container\n self.edatas = edatas\n\n # set the maximum sensitivity order\n self.max_sensi_order = max_sensi_order\n\n self.guess_steadystate = guess_steadystate\n\n # optimization parameter ids\n if x_ids is None:\n # use model parameter ids as ids\n x_ids = list(self.amici_model.getParameterIds())\n self.x_ids = x_ids\n\n # mapping of parameters\n if parameter_mapping is None:\n # use identity mapping for each condition\n parameter_mapping = create_identity_parameter_mapping(\n amici_model, len(edatas)\n )\n self.parameter_mapping = parameter_mapping\n\n # If supported, enable `guess_steadystate` by default. If not\n # supported, disable by default. If requested but unsupported, raise.\n if (\n self.guess_steadystate is not False\n and self.amici_model.nx_solver_reinit > 0\n ):\n if self.guess_steadystate:\n raise ValueError(\n 'Steadystate prediction is not supported '\n 'for models with conservation laws!'\n )\n self.guess_steadystate = False\n\n if (\n self.guess_steadystate is not False\n and self.amici_model.getSteadyStateSensitivityMode()\n == amici.SteadyStateSensitivityMode.integrationOnly\n ):\n if self.guess_steadystate:\n raise ValueError(\n 'Steadystate guesses cannot be enabled '\n 'when `integrationOnly` as '\n 'SteadyStateSensitivityMode!'\n )\n self.guess_steadystate = False\n\n if self.guess_steadystate is not False:\n self.guess_steadystate = True\n\n if self.guess_steadystate:\n # preallocate guesses, construct a dict for every edata for which\n # we need to do preequilibration\n self.steadystate_guesses = {\n 'fval': np.inf,\n 'data': {\n iexp: {}\n for iexp, edata in enumerate(self.edatas)\n if len(edata.fixedParametersPreequilibration)\n },\n }\n # optimization parameter names\n if x_names is None:\n # use ids as names\n x_names = x_ids\n\n self.n_threads = n_threads\n self.fim_for_hess = fim_for_hess\n self.amici_object_builder = amici_object_builder\n self.amici_reporting = amici_reporting\n\n if calculator is None:\n calculator = AmiciCalculator()\n self.calculator = calculator\n super().__init__(x_names=x_names)\n\n # Custom (condition-specific) timepoints. See the\n # `set_custom_timepoints` method for more information.\n self.custom_timepoints = None\n\n def get_config(self) -> dict:\n \"\"\"Return basic information of the objective configuration.\"\"\"\n info = super().get_config()\n info['x_names'] = self.x_names\n info['model_name'] = self.amici_model.getName()\n info['solver'] = str(type(self.amici_solver))\n info['sensi_order'] = self.max_sensi_order\n\n return info\n\n def initialize(self):\n \"\"\"See `ObjectiveBase` documentation.\"\"\"\n super().initialize()\n self.reset_steadystate_guesses()\n self.calculator.initialize()\n\n def __deepcopy__(self, memodict: Dict = None) -> 'AmiciObjective':\n other = self.__class__.__new__(self.__class__)\n\n for key in set(self.__dict__.keys()) - {\n 'amici_model',\n 'amici_solver',\n 'edatas',\n }:\n other.__dict__[key] = copy.deepcopy(self.__dict__[key])\n\n # copy objects that do not have __deepcopy__\n other.amici_model = self.amici_model.clone()\n other.amici_solver = self.amici_solver.clone()\n other.edatas = [amici.ExpData(data) for data in self.edatas]\n\n return other\n\n def __getstate__(self) -> Dict:\n if self.amici_object_builder is None:\n raise NotImplementedError(\n \"AmiciObjective does not support __getstate__ without \"\n \"an `amici_object_builder`.\"\n )\n\n state = {}\n for key in set(self.__dict__.keys()) - {\n 'amici_model',\n 'amici_solver',\n 'edatas',\n }:\n state[key] = self.__dict__[key]\n\n _fd, _file = tempfile.mkstemp()\n try:\n # write amici solver settings to file\n try:\n amici.writeSolverSettingsToHDF5(self.amici_solver, _file)\n except AttributeError as e:\n e.args += (\n \"Pickling the AmiciObjective requires an AMICI \"\n \"installation with HDF5 support.\",\n )\n raise\n # read in byte stream\n with open(_fd, 'rb', closefd=False) as f:\n state['amici_solver_settings'] = f.read()\n finally:\n # close file descriptor and remove temporary file\n os.close(_fd)\n os.remove(_file)\n\n state['AMICI_model_settings'] = amici.get_model_settings(\n self.amici_model\n )\n\n return state\n\n def __setstate__(self, state: Dict) -> None:\n if state['amici_object_builder'] is None:\n raise NotImplementedError(\n \"AmiciObjective does not support __setstate__ without \"\n \"an `amici_object_builder`.\"\n )\n self.__dict__.update(state)\n\n # note: attributes not defined in the builder are lost\n model = self.amici_object_builder.create_model()\n solver = self.amici_object_builder.create_solver(model)\n edatas = self.amici_object_builder.create_edatas(model)\n\n _fd, _file = tempfile.mkstemp()\n try:\n # write solver settings to temporary file\n with open(_fd, 'wb', closefd=False) as f:\n f.write(state['amici_solver_settings'])\n # read in solver settings\n try:\n amici.readSolverSettingsFromHDF5(_file, solver)\n except AttributeError as err:\n if not err.args:\n err.args = ('',)\n err.args += (\n \"Unpickling an AmiciObjective requires an AMICI \"\n \"installation with HDF5 support.\",\n )\n raise\n finally:\n # close file descriptor and remove temporary file\n os.close(_fd)\n os.remove(_file)\n\n self.amici_model = model\n self.amici_solver = solver\n self.edatas = edatas\n\n self.apply_custom_timepoints()\n amici.set_model_settings(\n self.amici_model,\n state['AMICI_model_settings'],\n )\n\n def check_sensi_orders(\n self,\n sensi_orders: Tuple[int, ...],\n mode: str,\n ) -> bool:\n \"\"\"See `ObjectiveBase` documentation.\"\"\"\n if not sensi_orders:\n return True\n sensi_order = max(sensi_orders)\n\n # dynamically obtain maximum allowed sensitivity order\n max_sensi_order = self.max_sensi_order\n if max_sensi_order is None:\n max_sensi_order = 1\n # check whether it is ok to request 2nd order\n sensi_mthd = self.amici_solver.getSensitivityMethod()\n mthd_fwd = amici.SensitivityMethod_forward\n if mode == MODE_FUN and (\n self.amici_model.o2mode\n or (sensi_mthd == mthd_fwd and self.fim_for_hess)\n ):\n max_sensi_order = 2\n\n # evaluate sensitivity order\n return sensi_order <= max_sensi_order\n\n def check_mode(self, mode: str) -> bool:\n \"\"\"See `ObjectiveBase` documentation.\"\"\"\n return mode in [MODE_FUN, MODE_RES]\n\n def __call__(\n self,\n x: np.ndarray,\n sensi_orders: Tuple[int, ...] = (0,),\n mode: str = MODE_FUN,\n return_dict: bool = False,\n **kwargs,\n ) -> Union[float, np.ndarray, Tuple, ResultDict]:\n \"\"\"See `ObjectiveBase` documentation.\"\"\"\n # Use AMICI full reporting if amici.ReturnDatas are returned and no\n # other reporting mode was set\n if (\n return_dict\n and self.amici_reporting is None\n and 'amici_reporting' not in kwargs\n ):\n kwargs['amici_reporting'] = amici.RDataReporting.full\n\n return super().__call__(x, sensi_orders, mode, return_dict, **kwargs)\n\n def call_unprocessed(\n self,\n x: np.ndarray,\n sensi_orders: Tuple[int, ...],\n mode: str,\n edatas: Sequence['amici.ExpData'] = None,\n parameter_mapping: 'ParameterMapping' = None,\n amici_reporting: Optional['amici.RDataReporting'] = None,\n ):\n \"\"\"\n Call objective function without pre- or post-processing and formatting.\n\n Returns\n -------\n result:\n A dict containing the results.\n \"\"\"\n x_dct = self.par_arr_to_dct(x)\n\n # only ask amici to compute required quantities\n amici_reporting = (\n self.amici_reporting\n if amici_reporting is None\n else amici_reporting\n )\n if amici_reporting is None:\n amici_reporting = (\n amici.RDataReporting.likelihood\n if mode == MODE_FUN\n else amici.RDataReporting.residuals\n )\n self.amici_solver.setReturnDataReportingMode(amici_reporting)\n\n # update steady state\n if (\n self.guess_steadystate\n and self.steadystate_guesses['fval'] < np.inf\n ):\n for data_ix in range(len(self.edatas)):\n self.apply_steadystate_guess(data_ix, x_dct)\n\n if edatas is None:\n edatas = self.edatas\n if parameter_mapping is None:\n parameter_mapping = self.parameter_mapping\n ret = self.calculator(\n x_dct=x_dct,\n sensi_orders=sensi_orders,\n mode=mode,\n amici_model=self.amici_model,\n amici_solver=self.amici_solver,\n edatas=edatas,\n n_threads=self.n_threads,\n x_ids=self.x_ids,\n parameter_mapping=parameter_mapping,\n fim_for_hess=self.fim_for_hess,\n )\n\n nllh = ret[FVAL]\n rdatas = ret[RDATAS]\n\n # check whether we should update data for preequilibration guesses\n if (\n self.guess_steadystate\n and nllh <= self.steadystate_guesses['fval']\n and nllh < np.inf\n ):\n self.steadystate_guesses['fval'] = nllh\n for data_ix, rdata in enumerate(rdatas):\n self.store_steadystate_guess(data_ix, x_dct, rdata)\n\n return ret\n\n def par_arr_to_dct(self, x: Sequence[float]) -> Dict[str, float]:\n \"\"\"Create dict from parameter vector.\"\"\"\n return OrderedDict(zip(self.x_ids, x))\n\n def apply_steadystate_guess(self, condition_ix: int, x_dct: Dict) -> None:\n \"\"\"\n Apply steady state guess to `edatas[condition_ix].x0`.\n\n Use the stored steadystate as well as the respective sensitivity (\n if available) and parameter value to approximate the steadystate at\n the current parameters using a zeroth or first order taylor\n approximation:\n x_ss(x') = x_ss(x) [+ dx_ss/dx(x)*(x'-x)]\n \"\"\"\n mapping = self.parameter_mapping[condition_ix].map_sim_var\n x_sim = map_par_opt_to_par_sim(mapping, x_dct, self.amici_model)\n x_ss_guess = [] # resets initial state by default\n if condition_ix in self.steadystate_guesses['data']:\n guess_data = self.steadystate_guesses['data'][condition_ix]\n if guess_data['x_ss'] is not None:\n x_ss_guess = guess_data['x_ss']\n if guess_data['sx_ss'] is not None:\n linear_update = (\n guess_data['sx_ss']\n .transpose()\n .dot(\n (x_sim - guess_data['x'])[\n np.asarray(self.edatas[condition_ix].plist)\n ]\n )\n )\n # limit linear updates to max 20 % elementwise change\n if (linear_update / (x_ss_guess + np.spacing(1))).max() < 0.2:\n x_ss_guess += linear_update\n\n self.edatas[condition_ix].x0 = tuple(x_ss_guess)\n\n def store_steadystate_guess(\n self,\n condition_ix: int,\n x_dct: Dict,\n rdata: 'amici.ReturnData',\n ) -> None:\n \"\"\"\n Store condition parameter, steadystate and steadystate sensitivity.\n\n Stored in steadystate_guesses if steadystate guesses are enabled for\n this condition.\n \"\"\"\n if condition_ix not in self.steadystate_guesses['data']:\n return\n preeq_guesses = self.steadystate_guesses['data'][condition_ix]\n\n # update parameter\n condition_map_sim_var = self.parameter_mapping[\n condition_ix\n ].map_sim_var\n x_sim = map_par_opt_to_par_sim(\n condition_map_sim_var, x_dct, self.amici_model\n )\n preeq_guesses['x'] = x_sim\n\n # update steadystates\n preeq_guesses['x_ss'] = rdata['x_ss']\n preeq_guesses['sx_ss'] = rdata['sx_ss']\n\n def reset_steadystate_guesses(self) -> None:\n \"\"\"Reset all steadystate guess data.\"\"\"\n if not self.guess_steadystate:\n return\n\n self.steadystate_guesses['fval'] = np.inf\n for condition in self.steadystate_guesses['data']:\n self.steadystate_guesses['data'][condition] = {}\n\n def apply_custom_timepoints(self) -> None:\n \"\"\"Apply custom timepoints, if applicable.\n\n See the `set_custom_timepoints` method for more information.\n \"\"\"\n if self.custom_timepoints is not None:\n for index in range(len(self.edatas)):\n self.edatas[index].setTimepoints(self.custom_timepoints[index])\n\n def set_custom_timepoints(\n self,\n timepoints: Sequence[Sequence[Union[float, int]]] = None,\n timepoints_global: Sequence[Union[float, int]] = None,\n ) -> 'AmiciObjective':\n \"\"\"\n Create a copy of this objective that is evaluated at custom timepoints.\n\n The intended use is to aid in predictions at unmeasured timepoints.\n\n Parameters\n ----------\n timepoints:\n The outer sequence should contain a sequence of timepoints for each\n experimental condition.\n timepoints_global:\n A sequence of timepoints that will be used for all experimental\n conditions.\n\n Returns\n -------\n The customized copy of this objective.\n \"\"\"\n if timepoints is None and timepoints_global is None:\n raise KeyError('Timepoints were not specified.')\n\n amici_objective = copy.deepcopy(self)\n\n if timepoints is not None:\n if len(timepoints) != len(amici_objective.edatas):\n raise ValueError(\n 'The number of condition-specific timepoints `timepoints` '\n 'does not match the number of experimental conditions.\\n'\n f'Number of provided timepoints: {len(timepoints)}. '\n 'Number of experimental conditions: '\n f'{len(amici_objective.edatas)}.'\n )\n custom_timepoints = timepoints\n else:\n custom_timepoints = [\n copy.deepcopy(timepoints_global)\n for _ in range(len(amici_objective.edatas))\n ]\n\n amici_objective.custom_timepoints = custom_timepoints\n amici_objective.apply_custom_timepoints()\n return amici_objective\n\n def check_gradients_match_finite_differences(\n self, x: np.ndarray = None, *args, **kwargs\n ) -> bool:\n \"\"\"Check if gradients match finite differences (FDs).\n\n Parameters\n ----------\n x: The parameters for which to evaluate the gradient.\n\n Returns\n -------\n bool\n Indicates whether gradients match (True) FDs or not (False)\n \"\"\"\n if x is None and 'petab_problem' in dir(self.amici_object_builder):\n x = self.amici_object_builder.petab_problem.x_nominal_scaled\n x_free = self.amici_object_builder.petab_problem.x_free_indices\n return super().check_gradients_match_finite_differences(\n x=x, x_free=x_free, *args, **kwargs\n )\n" ]
[ [ "numpy.spacing", "numpy.asarray" ] ]
gottaegbert/penter
[ "8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d", "8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d" ]
[ "matplotlib/gallery_python/event_handling/path_editor.py", "matplotlib/gallery_python/text_labels_and_annotations/fancytextbox_demo.py" ]
[ "\"\"\"\n===========\nPath Editor\n===========\n\nSharing events across GUIs.\n\nThis example demonstrates a cross-GUI application using Matplotlib event\nhandling to interact with and modify objects on the canvas.\n\"\"\"\nimport numpy as np\nimport matplotlib.path as mpath\nimport matplotlib.patches as mpatches\nimport matplotlib.pyplot as plt\n\nPath = mpath.Path\n\nfig, ax = plt.subplots()\n\npathdata = [\n (Path.MOVETO, (1.58, -2.57)),\n (Path.CURVE4, (0.35, -1.1)),\n (Path.CURVE4, (-1.75, 2.0)),\n (Path.CURVE4, (0.375, 2.0)),\n (Path.LINETO, (0.85, 1.15)),\n (Path.CURVE4, (2.2, 3.2)),\n (Path.CURVE4, (3, 0.05)),\n (Path.CURVE4, (2.0, -0.5)),\n (Path.CLOSEPOLY, (1.58, -2.57)),\n ]\n\ncodes, verts = zip(*pathdata)\npath = mpath.Path(verts, codes)\npatch = mpatches.PathPatch(path, facecolor='green', edgecolor='yellow', alpha=0.5)\nax.add_patch(patch)\n\n\nclass PathInteractor:\n \"\"\"\n An path editor.\n\n Key-bindings\n\n 't' toggle vertex markers on and off. When vertex markers are on,\n you can move them, delete them\n\n\n \"\"\"\n\n showverts = True\n epsilon = 5 # max pixel distance to count as a vertex hit\n\n def __init__(self, pathpatch):\n\n self.ax = pathpatch.axes\n canvas = self.ax.figure.canvas\n self.pathpatch = pathpatch\n self.pathpatch.set_animated(True)\n\n x, y = zip(*self.pathpatch.get_path().vertices)\n\n self.line, = ax.plot(x, y, marker='o', markerfacecolor='r', animated=True)\n\n self._ind = None # the active vert\n\n canvas.mpl_connect('draw_event', self.draw_callback)\n canvas.mpl_connect('button_press_event', self.button_press_callback)\n canvas.mpl_connect('key_press_event', self.key_press_callback)\n canvas.mpl_connect('button_release_event', self.button_release_callback)\n canvas.mpl_connect('motion_notify_event', self.motion_notify_callback)\n self.canvas = canvas\n\n def draw_callback(self, event):\n self.background = self.canvas.copy_from_bbox(self.ax.bbox)\n self.ax.draw_artist(self.pathpatch)\n self.ax.draw_artist(self.line)\n self.canvas.blit(self.ax.bbox)\n\n def pathpatch_changed(self, pathpatch):\n \"\"\"This method is called whenever the pathpatch object is called.\"\"\"\n # only copy the artist props to the line (except visibility)\n vis = self.line.get_visible()\n plt.Artist.update_from(self.line, pathpatch)\n self.line.set_visible(vis) # don't use the pathpatch visibility state\n\n def get_ind_under_point(self, event):\n \"\"\"\n Return the index of the point closest to the event position or *None*\n if no point is within ``self.epsilon`` to the event position.\n \"\"\"\n # display coords\n xy = np.asarray(self.pathpatch.get_path().vertices)\n xyt = self.pathpatch.get_transform().transform(xy)\n xt, yt = xyt[:, 0], xyt[:, 1]\n d = np.sqrt((xt - event.x)**2 + (yt - event.y)**2)\n ind = d.argmin()\n\n if d[ind] >= self.epsilon:\n ind = None\n\n return ind\n\n def button_press_callback(self, event):\n \"\"\"Callback for mouse button presses.\"\"\"\n if not self.showverts:\n return\n if event.inaxes is None:\n return\n if event.button != 1:\n return\n self._ind = self.get_ind_under_point(event)\n\n def button_release_callback(self, event):\n \"\"\"Callback for mouse button releases.\"\"\"\n if not self.showverts:\n return\n if event.button != 1:\n return\n self._ind = None\n\n def key_press_callback(self, event):\n \"\"\"Callback for key presses.\"\"\"\n if not event.inaxes:\n return\n if event.key == 't':\n self.showverts = not self.showverts\n self.line.set_visible(self.showverts)\n if not self.showverts:\n self._ind = None\n\n self.canvas.draw()\n\n def motion_notify_callback(self, event):\n \"\"\"Callback for mouse movements.\"\"\"\n if not self.showverts:\n return\n if self._ind is None:\n return\n if event.inaxes is None:\n return\n if event.button != 1:\n return\n x, y = event.xdata, event.ydata\n\n vertices = self.pathpatch.get_path().vertices\n\n vertices[self._ind] = x, y\n self.line.set_data(zip(*vertices))\n\n self.canvas.restore_region(self.background)\n self.ax.draw_artist(self.pathpatch)\n self.ax.draw_artist(self.line)\n self.canvas.blit(self.ax.bbox)\n\n\ninteractor = PathInteractor(patch)\nax.set_title('drag vertices to update path')\nax.set_xlim(-3, 4)\nax.set_ylim(-3, 4)\n\nplt.show()\n", "\"\"\"\n=================\nFancytextbox Demo\n=================\n\n\"\"\"\nimport matplotlib.pyplot as plt\n\nplt.text(0.6, 0.7, \"eggs\", size=50, rotation=30.,\n ha=\"center\", va=\"center\",\n bbox=dict(boxstyle=\"round\",\n ec=(1., 0.5, 0.5),\n fc=(1., 0.8, 0.8),\n )\n )\n\nplt.text(0.55, 0.6, \"spam\", size=50, rotation=-25.,\n ha=\"right\", va=\"top\",\n bbox=dict(boxstyle=\"square\",\n ec=(1., 0.5, 0.5),\n fc=(1., 0.8, 0.8),\n )\n )\n\nplt.show()\n" ]
[ [ "matplotlib.pyplot.subplots", "matplotlib.pyplot.show", "matplotlib.path.Path", "matplotlib.patches.PathPatch", "numpy.sqrt", "matplotlib.pyplot.Artist.update_from" ], [ "matplotlib.pyplot.show" ] ]
UoS-SNe/pycoco
[ "bbcb09b6c8fde7e0c4464bfbd574a42e09dbfed2" ]
[ "pycocosn/classes.py" ]
[ "\"\"\"\nWorkhorse classes for interacting/running the CoCo templates.\n\nauthor: Rob Firth; github.com/RobFirth ; University of Southampton SN Group\n 2017\n\"\"\"\n\nfrom __future__ import print_function ## Force python3-like printing\n\nimport os\nimport re\nimport warnings\nfrom collections import OrderedDict\n\nimport astropy.units as u\nimport numpy as np\nfrom astropy.constants import c\nfrom astropy.coordinates import SkyCoord, Distance\nfrom astropy.table import Table, vstack, Row, Column\nfrom astropy.time import Time\nfrom matplotlib import pyplot as plt\nfrom matplotlib.ticker import MultipleLocator\nfrom scipy.integrate import simps, trapz\nfrom scipy.interpolate import InterpolatedUnivariateSpline\nfrom scipy.interpolate import interp1d as interp1d\n\n# from .colours import *\n# from .defaults import *\n# from .errors import *\n# from .extinction import *\n# from .models import *\n# from .utils import *\nfrom . import colours\nfrom . import defaults\nfrom . import errors\nfrom . import extinction\nfrom . import models\nfrom . import utils\n# from . import kcorr\n\n__all__ = [\"BaseSpectrumClass\",\n \"BaseLightCurveClass\",\n \"BaseFilterClass\",\n \"BaseLCModelClass\",\n \"PhotometryClass\",\n \"SpectrumClass\",\n \"LCfitClass\",\n \"specfitClass\",\n \"SNClass\",\n \"FilterClass\",\n \"InfoClass\",\n \"find_specphase_spec\"]\n\n# #----------------------------------------------------------------------------# #\n# # TOOLS # #\n# #----------------------------------------------------------------------------# #\n\n# #------------------------------------# #\n# # DUMMY CODE # #\n# #------------------------------------# #\n\n# class CustomValueError(ValueError):\n# \"\"\"\n# Raise when....\n# \"\"\"\n#\n#\n# def __init__(self, *args, **kwargs):\n# ValueError.__init__(self, *args)\n\n\nclass DummyClass():\n \"\"\"\n Quick dummy class.\n\n Contains a test class variable and test class method that prints the\n variable.\n\n RF\n \"\"\"\n\n\n def __init__(self):\n self.dummy_string = 'Hello, World!'\n\n\n def print_dummy_string(self):\n print(self.test_string)\n\n\ndef dummy_function(verbose = True, *args, **kwargs):\n \"\"\"\n Quick dummy function.\n\n Prints supplied **args and **kwargs\n Issues warnings if nothing passed\n\n RF\n \"\"\"\n if verbose: print(__name__)\n warnings.simplefilter('always')\n print(args)\n print(kwargs)\n\n\n # warnings.warn(\"WARNING\")\n\n if not args and not kwargs:\n warnings.warn( \"You didn't pass any *args or **kwargs\", RuntimeWarning)\n\n else:\n if args:\n for i, arg in enumerate(args):\n print('an arg passed via *args: ', repr(arg))\n else:\n warnings.warn( \"You didn't pass any *args\", RuntimeWarning)\n\n if kwargs:\n for key, value in kwargs.items():\n print('a **kwarg: ', repr(key), ' == ' , repr(value))\n else:\n warnings.warn( \"You didn't pass any **kwargs\", RuntimeWarning)\n pass\n\n\n_somevar = 'Foo'\n\n\n# #----------------------------------------------------------------------------# #\n# # CODE # #\n# #----------------------------------------------------------------------------# #\n# #----------------------------------------------------------------------------# #\n# # Classes # #\n# #----------------------------------------------------------------------------# #\n# #------------------------------------# #\n# # Base Classes # #\n# #------------------------------------# #\n\nclass BaseSpectrumClass():\n \"\"\"\n Base class for handling Spectra.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n\n \"\"\"\n\n ## Initialise the class variables\n self._default_list_dir_path = os.path.join(defaults._default_coco_dir_path, \"lists/\")\n #\n # ## Initialise using class methods\n self.set_list_directory(self._get_list_directory())\n\n pass\n\n\n def _get_list_directory(self):\n \"\"\"\n Get the default path to the spec lists directory.\n\n Looks for the list file directory set as environment variable\n $COCO_ROOT_DIR. if not found, returns default.\n\n returns: Absolute path in environment variable $COCO_ROOT_DIR, or\n default location: '~/Code/CoCo/', with 'lists/' appended.\n \"\"\"\n\n return os.path.join(os.environ.get('COCO_ROOT_DIR', os.path.join(self._default_list_dir_path, os.pardir)), \"lists/\")\n\n\n def set_list_directory(self, list_dir_path = '', verbose = False):\n \"\"\"\n Set a new data directory path.\n\n Enables the data directory to be changed by the user.\n\n \"\"\"\n try:\n if verbose: print(list_dir_path, self._default_list_dir_path)\n if os.path.isdir(os.path.abspath(list_dir_path)):\n self.list_directory = os.path.abspath(list_dir_path)\n pass\n else:\n warnings.warn(os.path.abspath(list_dir_path) +\n \" is not a valid directory. Restoring default path: \" +\n self._default_list_dir_path, UserWarning)\n self.list_directory = self._default_list_dir_path\n\n if not os.path.isdir(self.list_directory):\n if verbose: print(os.path.isdir(self.list_directory))\n raise errors.PathError(\"The default list directory '\" + self.list_directory\n + \"' doesn't exist. Or isn't a directory. Or can't be located.\")\n else:\n pass\n except:\n if verbose: print(\"foo\")\n raise errors.PathError(\"The default list directory '\" + self._default_list_dir_path\n + \"' doesn't exist. Or isn't a directory. Or can't be located. Have\"\n + \" you messed with _default_list_dir_path?\")\n pass\n\n\n def load(self, filename, directory=False, abspath=False, fmt=\"ascii\",\n wmin=1500 * u.angstrom, wmax=11000 * u.angstrom,\n names=(\"wavelength\", \"flux\"), wavelength_u=u.angstrom,\n flux_u=u.cgs.erg / u.si.cm ** 2 / u.si.s / u.angstrom,\n convert_flux_u=u.cgs.erg / u.si.cm ** 2 / u.si.s / u.angstrom,\n verbose=False, spectrum_name = False):\n \"\"\"\n Parameters\n ----------\n Returns\n -------\n \"\"\"\n\n errors.StringWarning(filename)\n\n if abspath:\n path = filename\n\n else:\n if not directory:\n ## Differentiate between the two child classes\n if hasattr(self, 'data_directory'):\n path = os.path.join(self.data_directory, filename)\n if verbose: print(\"You didn't supply a directory, so using self.data_directory\")\n\n if hasattr(self, 'recon_directory'):\n path = os.path.join(self.recon_directory, filename)\n if verbose: print(\"You didn't supply a directory, so using self.recon_directory\")\n else:\n errors.StringWarning(directory)\n utils.check_dir_path(directory)\n\n path = os.path.join(directory, filename)\n if verbose: print(path)\n\n if os.path.isfile(path):\n\n ## Some might have three columns, deal with laters - this is untidy\n try:\n if hasattr(self, \"recon_directory\"):\n names = names + (\"flux_err\",)\n spec_table = Table.read(path, format=fmt, names=names)\n\n except:\n if \"flux_err\" not in names:\n names = names + (\"flux_err\",)\n spec_table = Table.read(path, format=fmt, names=names)\n\n if verbose: print(\"Reading \" + path)\n\n spec_table.meta[\"filepath\"] = path\n spec_table.meta[\"filename\"] = path.split(\"/\")[-1]\n\n if spectrum_name:\n spec_table.meta[\"plot_label_string\"] = r\"$\\textnormal{\" + spectrum_name.replace(\"_\", \"\\_\") + \"}$\"\n else:\n spec_table.meta[\"plot_label_string\"] = r'$\\rm{' + spec_table.meta[\"filename\"].split('/')[-1].replace('_', '\\_') + '}$'\n\n spec_table['wavelength'].unit = wavelength_u\n\n if wavelength_u != u.Angstrom:\n spec_table['wavelength'] = spec_table['wavelength'].to(u.Angstrom)\n\n spec_table['flux'].unit = flux_u\n if \"flux_err\" in spec_table.colnames:\n spec_table[\"flux_err\"].unit = flux_u\n\n # Automatically convert units?\n if flux_u != convert_flux_u:\n spec_table[\"flux\"] = spec_table[\"flux\"].to(convert_flux_u)\n if \"flux_err\" in spec_table.colnames:\n spec_table[\"flux_err\"] = spec_table[\"flux_err\"].to(convert_flux_u)\n\n flux_u = convert_flux_u\n if wmin.unit == spec_table[\"wavelength\"].unit:\n # enforce wmin and wmax\n spec_table = spec_table[np.bitwise_and(spec_table['wavelength'].data > wmin.value, spec_table['wavelength'].data < wmax.value)]\n self.min_wavelength = np.nanmin(spec_table[\"wavelength\"])\n self.max_wavelength = np.nanmax(spec_table[\"wavelength\"])\n\n # assign to class\n self.data = spec_table\n self.wavelength = spec_table[\"wavelength\"]\n self.flux = spec_table[\"flux\"]\n\n # If you got this far...\n self.success = True\n else:\n warnings.warn(path + \" is not a valid file path\")\n if verbose: print(path + ' not found')\n\n\n def load_table(self, spec_table, spectrum_name = False, path = False, trim_wavelength=False, wmin=1500 * u.angstrom,\n wmax=15000 * u.angstrom, verbose=False ):\n \"\"\"Use with care - basically assumes you have all of your ducks in a row\"\"\"\n\n if trim_wavelength:\n spec_table = spec_table[np.bitwise_and(spec_table['wavelength'] > wmin, spec_table['wavelength'] < wmax)]\n\n if path:\n spec_table.meta[\"filepath\"] = path\n spec_table.meta[\"filename\"] = path.split(\"/\")[-1]\n if spectrum_name:\n spec_table.meta[\"plot_label_string\"] = r\"$\\textnormal{\" + spectrum_name.replace(\"_\", \"\\_\") + \"}$\"\n else:\n spec_table.meta[\"plot_label_string\"] = r'$\\rm{' + spec_table.meta[\"filename\"].split('/')[-1].replace('_', '\\_') + '}$'\n elif spectrum_name:\n spec_table.meta[\"plot_label_string\"] = r\"$\\textnormal{\" + spectrum_name.replace(\"_\", \"\\_\")+ \"}$\"\n else:\n spec_table.meta[\"plot_label_string\"] = r\"$\\textnormal{Spectrum from table}$\"\n\n self.min_wavelength = np.nanmin(spec_table[\"wavelength\"])\n self.max_wavelength = np.nanmax(spec_table[\"wavelength\"])\n self.data = spec_table\n self.wavelength = spec_table[\"wavelength\"]\n self.flux = spec_table[\"flux\"]\n pass\n\n def plot(self, xminorticks = 250, legend = True, plot_filters=True,\n verbose = False, compare_red = True,\n return_figure=False,\n *args, **kwargs):\n \"\"\"\n Plots spec.\n\n xminorticks : spacing for minor tick marks on x axis\n\n legend : (True) Show legend\n\n verbose : print verbose output\n\n compare_red :\n\n ----------\n\n Returns\n -------\n \"\"\"\n\n if hasattr(self, \"data\"):\n\n utils.setup_plot_defaults()\n\n fig = plt.figure(figsize=[8, 4])\n fig.subplots_adjust(left = 0.09, bottom = 0.13, top = 0.95,\n right = 0.99, hspace=0, wspace = 0)\n\n ax1 = fig.add_subplot(111)\n\n\n if verbose: print(self.data.__dict__)\n # plot_label_string = r'$\\rm{' + self.data.meta[\"filename\"].split('/')[-1].replace('_', '\\_') + '}$'\n plot_label_string = self.data.meta[\"plot_label_string\"]\n\n\n # ax1.plot(self.data['wavelength'], self.flux, lw = 2,\n # label = plot_label_string, color = 'C0',\n # *args, **kwargs)\n ax1.plot(self.wavelength, self.flux, lw = 2,\n label = plot_label_string, color = 'C0',\n *args, **kwargs)\n\n maxplotydata = np.nanmax(self.flux)\n minplotydata = np.nanmin(self.flux)\n\n if hasattr(self, 'flux_dered') and compare_red:\n ax1.plot(self.data['wavelength'], self.data['flux_dered'], lw = 2,\n label = plot_label_string, color = 'Blue',\n *args, **kwargs)\n maxplotydata = np.nanmax(np.append(maxplotydata, np.nanmax(self.data['flux_dered'])))\n minplotydata = np.nanmin(np.append(minplotydata, np.nanmin(self.data['flux_dered'])))\n if legend:\n\n plot_legend = ax1.legend(loc = 1, scatterpoints = 1,\n numpoints = 1, frameon = False, fontsize = 12)\n\n\n\n ax1.set_ylim(minplotydata*0.98, maxplotydata*1.02)\n\n ## Label the axes\n xaxis_label_string = r'$\\textnormal{Wavelength (\\AA)}$'\n # yaxis_label_string = r'$\\textnormal{Flux, erg s}^{-1}\\textnormal{cm}^{-2}$'\n yaxis_label_string = r'$\\textnormal{Flux, erg s}^{-1}\\textnormal{\\AA}^{-1}\\textnormal{cm}^{-2}$'\n\n ax1.set_xlabel(xaxis_label_string)\n ax1.set_ylabel(yaxis_label_string)\n\n xminorLocator = MultipleLocator(xminorticks)\n ax1.xaxis.set_minor_locator(xminorLocator)\n\n\n if return_figure:\n return fig\n plt.show()\n\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n pass\n\n\n def set_MJD_obs(self, mjd):\n \"\"\"\n Log MJD of the observation.\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n self.mjd_obs = mjd\n\n pass\n\n\n def set_EBV(self, EBV):\n \"\"\"\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n self.EBV = EBV\n\n\n def deredden(self, z, EBV_host, EBV_MW = False, verbose = False):\n \"\"\"\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n\n if hasattr(self, \"data\"):\n if verbose: print(\"Foo\")\n if hasattr(self, \"EBV\") and not EBV_MW:\n EBV_MW = self.EBV\n\n self.flux_dered = extinction.deredden(self.wavelength, self.flux, z, EBV_MW = EBV_MW, EBV_host=EBV_host)\n self.data[\"flux_dered\"] = self.flux_dered\n\n else:\n warnings.warn(\"No extinction value set\")\n pass\n\n\n def use_flux_dered(self):\n \"\"\"\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n\n if hasattr(self, \"data\"):\n self.flux_red = self.flux\n self.flux = self.data['flux_dered']\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n pass\n\n\n def _spec_format_for_save(self):\n \"\"\"\n Parameters\n ----------\n Returns\n -------\n \"\"\"\n\n save_table = Table()\n\n save_table['wavelength'] = self.wavelength\n save_table['flux'] = self.flux\n\n save_table['wavelength'].format = \"5.5f\"\n save_table['flux'].format = \"5.5e\"\n\n return save_table\n\n\n def save(self, filename, path = False,\n squash = False, verbose = False, *args, **kwargs):\n \"\"\"\n Output the spectrum loaded into the Class via self.load into a format\n and location recognised by CoCo.\n\n Parameters\n ----------\n Returns\n -------\n \"\"\"\n\n if hasattr(self, \"data\"):\n if verbose: print(\"has data\")\n if not path:\n if verbose: print(\"No directory specified, assuming \" + self._default_data_dir_path)\n path = self._default_data_dir_path\n else:\n errors.StringWarning(path)\n\n outpath = os.path.join(path, filename)\n\n utils.check_dir_path(path)\n\n if os.path.isfile(outpath):\n if squash:\n print(\"Overwriting \" + outpath)\n self._spec_format_for_save().write(outpath, format = \"ascii.fast_commented_header\", overwrite=True)\n else:\n warnings.warn(\"Found existing file matching \" + os.path.join(path,\n filename) + \". Run with squash = True to overwrite\")\n else:\n print(\"Writing \" + outpath)\n self._spec_format_for_save().write(outpath, format = \"ascii.fast_commented_header\")\n\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n pass\n\n\n def _add_to_overlapping_filters(self, filter_name):\n if hasattr(self, \"_overlapping_filter_list\"):\n self._overlapping_filter_list = np.append(self._overlapping_filter_list, filter_name)\n self._n_overlapping_filters = self._n_overlapping_filters + 1\n else:\n self._overlapping_filter_list = np.array(filter_name)\n self._n_overlapping_filters = 1\n pass\n\n\n def set_infile(self, filename):\n self.infile=filename\n pass\n\n\n def get_specphot(self, filter_objects, correct_for_area=True, verbose = False):\n \"\"\"\n TODO - Some duplication between this and SNClass.get_specphot()\n :param spectrum:\n :param verbose:\n :return:\n \"\"\"\n if verbose: print(type(filter_objects), filter_objects)\n\n if not hasattr(self, \"_overlapping_filter_list\"):\n self.check_overlaps(filter_objects=filter_objects, verbose=verbose)\n\n if verbose: print(type(self._overlapping_filter_list), self._overlapping_filter_list)\n\n if self._n_overlapping_filters == 1:\n if verbose: print(\"only one overlapping filter\")\n\n if isinstance(filter_objects, FilterClass):\n if verbose: print(\"FilterClass passed\")\n ## if only one filter is given\n iterator = (filter_objects.filter_name,)\n else:\n # iterator = [self._overlapping_filter_list,]\n # iterator = [i.filter_name for i in filter_objects]\n iterator = filter_objects\n\n else:\n if isinstance(filter_objects, FilterClass):\n if verbose: print(\"FilterClass passed\")\n ## if only one filter is given\n iterator = [filter_objects.filter_name,]\n if verbose: print(type(iterator), iterator)\n\n else:\n # iterator = self._overlapping_filter_list\n iterator = filter_objects\n\n if isinstance(filter_objects, FilterClass):\n if verbose: print(\"FilterClass passed\")\n ## if only one filter is given\n filter_objects = (filter_objects,)\n\n if verbose: print(type(iterator), iterator)\n\n for j, filter_name in enumerate(iterator):\n if verbose: print(j, filter_name)\n\n # if filter_name in filter_objects:\n # if filter_name in [i.filter_name for i in filter_objects]:\n # if verbose: print(\"filter_name in filter_objects\")\n\n if isinstance(filter_name, FilterClass):\n filter_obj = filter_name\n elif isinstance(filter_objects, dict):\n filter_obj = filter_objects[filter_name]\n else:\n filter_obj = filter_objects[[i.filter_name for i in filter_objects].index(filter_name)]\n\n # flux = kcorr.calc_spectrum_filter_flux(filter_object=filter_obj,\n # spectrum_object=self)\n\n if not np.array_equal(filter_obj.wavelength, self.wavelength):\n filter_obj.resample_response(new_wavelength=self.wavelength)\n\n transmitted_spec = filter_obj.throughput * self.flux\n integrated_flux = simps(transmitted_spec, self.wavelength)\n\n if correct_for_area:\n\n if not hasattr(filter_obj, \"_effective_area\"):\n filter_obj.calculate_filter_area()\n\n filter_area = filter_obj._effective_area\n flux = integrated_flux / filter_area\n\n else:\n flux = integrated_flux\n\n if verbose: print(\"flux in filter\", filter_name, \" is \", flux)\n if j == 0:\n self.specphot = Table(names=(\"lambda_effective\", \"flux\", \"filter\"), dtype=('f4', 'f4', 'S'))\n\n self.specphot.add_row((filter_obj.lambda_effective, flux, filter_name))\n\n # else:\n # warnings.warn(\"no overlapping filters - filter_name not in filter_objects\")\n\n pass\n\n\n def check_overlaps(self, filter_objects, verbose = False):\n \"\"\"\n TODO - based on SNClass.check_overlaps()\n\n Checks the filters that the spectrum overlaps with.\n originally used functions.filter_within_spec\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n if isinstance(FilterClass, type(filter_objects)):\n ## if only one filter is given\n filter_objects = [filter_objects, ]\n\n\n for i, filter_name in enumerate(filter_objects):\n if isinstance(FilterClass, type(filter_name)):\n filter_obj = filter_name\n elif isinstance(filter_objects, dict):\n filter_obj = filter_objects[filter_name]\n else:\n filter_obj = filter_objects[i]\n\n if verbose:print(i, filter_obj)\n\n if hasattr(filter_obj, \"_lower_edge\") and \\\n hasattr(filter_obj, \"_upper_edge\") and \\\n hasattr(self, \"data\"):\n blue_bool = filter_obj._lower_edge > self.min_wavelength\n red_bool = filter_obj._upper_edge < self.max_wavelength\n\n if blue_bool and red_bool:\n within = True\n else:\n within = False\n if verbose: print(within)\n if within:\n self._add_to_overlapping_filters(filter_name)\n else:\n warnings.warn(\"SpectrumClass.check_overlaps - something went wrong... no overlaps or data?\")\n if self._n_overlapping_filters == 1:\n self._overlapping_filter_list = [self._overlapping_filter_list,] ## added to fix issue #27\n pass\n\n\nclass BaseLightCurveClass():\n \"\"\"\n Base class for handling Lightcurves.\n \"\"\"\n def __init__(self, verbose = False):\n \"\"\"\n\n \"\"\"\n ## Initialise the class variables\n\n ## Initialise using class methods\n\n pass\n\n\n def _get_filter_directory(self):\n \"\"\"\n Get the default path to the filter directory.\n\n Looks for the filter data directory set as environment variable\n $PYCOCO_FILTER_DIR. if not found, returns default.\n\n returns: Absolute path in environment variable $PYCOCO_FILTER_DIR, or\n default datalocation, i.e.: '/Users/berto/Code/CoCo/data/filters/'.\n \"\"\"\n return os.path.abspath(os.environ.get('PYCOCO_FILTER_DIR', self._default_filter_dir_path))\n\n\n def set_filter_directory(self, filter_dir_path='', verbose=False):\n \"\"\"\n Set a new filter directory path.\n\n Enables the data directory to be changed by the user.\n\n \"\"\"\n try:\n if os.path.isdir(os.path.abspath(filter_dir_path)):\n self.filter_directory = os.path.abspath(filter_dir_path)\n pass\n else:\n warnings.warn(os.path.abspath(filter_dir_path) +\n \" is not a valid directory. Restoring default path: \" +\n self._default_filter_dir_path, UserWarning)\n self.data_directory = self._default_data_dir_path\n\n if not os.path.isdir(self.filter_directory):\n if verbose: print(os.path.isdir(self.filter_directory))\n raise errors.PathError(\"The default data directory '\" + self.filter_directory\n + \"' doesn't exist. Or isn't a directory. Or can't be located.\")\n else:\n pass\n except:\n if verbose: print(\"foo\")\n raise errors.PathError(\"The default filter directory '\" + self._default_filter_dir_path\n + \"' doesn't exist. Or isn't a directory. Or can't be located. Have\"\n + \" you messed with _default_filter_dir_path?\")\n pass\n\n\n def _sort_phot(self, verbose=False):\n \"\"\"\n resorts the photometry according to effective wavelength of the filter.\n\n Parameters\n ----------\n\n Returns\n -------\n\n \"\"\"\n if hasattr(self, \"data\") and hasattr(self, \"data_filters\"):\n ## This looks fugly.\n newkeys = np.array([i for i in self.data_filters.keys()])[np.argsort([self.data_filters[i].lambda_effective.value for i in self.data_filters])]\n\n sorted_data = OrderedDict()\n sorted_data_filters = OrderedDict()\n\n for newkey in newkeys:\n\n if verbose: print(newkey)\n\n sorted_data[newkey] = self.data[newkey]\n sorted_data_filters[newkey] = self.data_filters[newkey]\n\n self.data = sorted_data\n self.data_filters = sorted_data_filters\n\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n pass\n\n\n def unpack(self, filter_file_type=\".dat\", verbose=False):\n \"\"\"\n If loading from preformatted file, then unpack the table into self.data\n OrderedDict and load FilterClass objects into self.data_filters OrderedDict\n\n Parameters\n ----------\n\n Returns\n -------\n\n \"\"\"\n\n if hasattr(self, \"phot\"):\n filter_names = np.unique(self.phot[\"filter\"])\n\n self.phot.add_index('filter', unique = True)\n\n\n for filter_name in filter_names:\n\n phot_table = self.phot.loc[\"filter\", filter_name]\n filter_filename = filter_name + filter_file_type\n if verbose: print(filter_filename)\n if verbose: print(phot_table)\n if verbose: print(type(filter_name), type(filter_file_type))\n\n # phot_table.meta = {\"filter_filename\": filter_filename}\n phot_table.meta[\"filter_filename\"] = filter_filename\n if not isinstance(phot_table, Row):\n # if len(np.unique(self.phot.loc[\"filter\", filter_name][\"MJD\"])) > 1:\n indices = phot_table.argsort(\"MJD\")\n # for column_name in phot_table.colnames:\n # phot_table[column_name] = phot_table[column_name][indices]\n sorted_phot_table = Table([phot_table[column_name][indices] for column_name in phot_table.colnames])\n else:\n sorted_phot_table = phot_table\n\n filter_key = np.unique(phot_table[\"filter\"])[0]\n\n if len(np.unique(phot_table[\"filter\"])) > 1 or filter_key != filter_name:\n raise errors.FilterMismatchError(\"There is a more than one filterdata in here! or there is a mismatch with filename\")\n path_to_filter = os.path.join(self.filter_directory, phot_table.meta['filter_filename'])\n\n # def load_filter(path, cmap = False, verbose = False):\n #\n if utils.check_file_path(os.path.abspath(path_to_filter)):\n filter_object = FilterClass()\n filter_object.read_filter_file(os.path.abspath(path_to_filter), verbose = verbose)\n filter_object.calculate_AB_zp()\n else:\n warnings.warn(\"Couldn't load the filter\")\n\n self.data_filters[filter_key] = filter_object\n\n self.data[filter_name] = sorted_phot_table\n\n self.filter_names = filter_names\n\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n\n pass\n\n\n def load_table(self, phot_table, verbose=False):\n \"\"\"\n Loads a single photometry table.\n\n Parameters\n ----------\n Returns\n -------\n \"\"\"\n # errors.StringWarning(path)\n try:\n self.phot = phot_table\n self.unpack(verbose=verbose)\n\n ## Sort the OrderedDict\n self._sort_phot()\n except:\n raise Exception\n\n\n def load_phot_dict(self, data_dict):\n \"\"\"\n\n \"\"\"\n self.data = data_dict\n pass\n\n\n def _combine_phot(self, verbose = True):\n \"\"\"\n\n \"\"\"\n\n if hasattr(self, \"data\"):\n if verbose: print(self.data.keys())\n\n for i, phot_filter in enumerate(self.data.keys()):\n\n if verbose: print(i, phot_filter)\n\n if i == 0:\n\n full_phot = self.data[phot_filter]\n\n else:\n\n full_phot = vstack([full_phot, self.data[phot_filter]])\n\n pass\n\n self.data['full'] = full_phot\n\n else:\n warnings.warn(\"Cant find self.data\")\n\n pass\n\n\n # def _phot_format_for_save(self, filters = False, verbose = False):\n # \"\"\"\n # This is hacky - clear it up!\n #\n # Parameters\n # ----------\n # Returns\n # -------\n # \"\"\"\n #\n # if not filters:\n # ## if none specified, use all filters\n # filters = self.data.keys()\n #\n # w = np.array([])\n # for i, f in enumerate(filters):\n # w = np.append(w, np.where(self.phot[\"filter\"] == f))\n # if verbose: print(w)\n #\n # save_table = self.phot[\"MJD\", \"flux\", \"flux_err\", \"filter\"][w.astype(int)]\n # save_table['MJD'].format = \"5.5f\"\n # save_table['flux'].format = \"5.5e\"\n # save_table['flux_err'].format = \"5.5e\"\n # # save_table[save_table.argsort(\"MJD\")]\n # return save_table\n\n\n def _phot_format_for_save(self, names = ('MJD', 'flux', 'flux_err', 'filter'), formats = ('.3f','.5g', '.5g', ''),\n filters = False, verbose = False, sort=False):\n \"\"\"\n This is hacky - clear it up!\n\n Parameters\n ----------\n Returns\n -------\n \"\"\"\n\n\n if sort:\n save_table = self.phot[names]\n save_table = save_table[save_table.argsort()]\n else:\n save_table = self.phot[names]\n\n for z in zip(names, formats):\n save_table[z[0]].format = z[1]\n\n if filters:\n save_table = save_table[np.in1d(save_table[\"filter\"], filters)]\n\n if verbose:\n print(save_table)\n return save_table\n\n\n def save(self, filename, filters = False, path = False,\n names = ('MJD', 'flux', 'flux_err', 'filter'), formats = ('.3f','.5g', '.5g', ''),\n squash = False, verbose = True, sort = False, *args, **kwargs):\n \"\"\"\n Output the photometry loaded into the SNClass via self.load_phot* into a format\n and location recognised by CoCo.\n\n Parameters\n ----------\n Returns\n -------\n \"\"\"\n\n if hasattr(self, \"data\"):\n if verbose: print(\"has data\")\n if not path:\n if verbose: print(\"No directory specified, assuming \" + self._default_data_dir_path)\n path = self._default_data_dir_path\n else:\n errors.StringWarning(path)\n\n utils.check_dir_path(path)\n\n outpath = os.path.join(path, filename)\n\n if verbose: print(outpath)\n if not filters:\n ## if none specified, use all filters\n filters = list(self.data.keys())\n if verbose: print(filters)\n\n\n if os.path.isfile(outpath):\n if squash:\n print(\"Overwriting \" + outpath)\n self._phot_format_for_save(filters = filters, names = names, formats = formats,\n verbose = verbose, sort=sort).write(outpath, format = \"ascii.fast_commented_header\", overwrite = True, names=names)\n else:\n warnings.warn(\"Found existing file matching \" + outpath + \". Run with squash = True to overwrite\")\n\n else:\n print(\"Writing \" + outpath)\n self._phot_format_for_save(filters = filters).write(outpath, format = \"ascii.fast_commented_header\", names=names)\n\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n pass\n\n\n def set_infile(self, filename):\n self.infile=filename\n pass\n\n\n def nightaverage(self, filters=False, verbose=False):\n \"\"\"\n\n :param verbose:\n :return:\n \"\"\"\n if hasattr(self, \"phot\") and hasattr(self, \"data\"):\n\n if not filters:\n filters = self.data_filters\n if type(filters) == str:\n filters = [filters]\n\n for i, filter_key in enumerate(filters):\n\n if verbose: print(i, self.data[filter_key].__dict__)\n\n dt = self.data[filter_key]\n\n dt[\"Night\"] = list(map(lambda x: np.round(x), dt[\"MJD\"]))\n\n dt.add_index(\"Night\")\n dt_grouped = dt.group_by(\"Night\")\n dt_grouped[\"weights\"] = 1.0 / ((dt_grouped[\"flux_err\"]) * (dt_grouped[\"flux_err\"]))\n\n if i == 0:\n na_table = Table(names=(\"MJD\", \"flux\", \"flux_err\", \"filter\"),\n dtype=(dt[\"MJD\"].dtype, dt[\"flux\"].dtype, dt[\"flux_err\"].dtype, dt[\"filter\"].dtype))\n for j, col in enumerate(na_table.columns):\n if verbose: print(j, col)\n na_table[col].unit = dt[col].unit\n for group in dt_grouped.groups:\n wmean = np.average(group[\"flux\"], weights=group[\"weights\"])\n wmean_err = np.sqrt(1. / np.sum(1. / (group[\"flux_err\"] * group[\"flux_err\"])))\n if verbose: print(np.mean(group[\"MJD\"]), wmean, wmean_err)\n na_table.add_row((np.mean(group[\"MJD\"]), wmean, wmean_err, filter_key))\n\n if verbose: print(\"loading into phot object...\")\n\n self.load_table(phot_table=na_table)\n\n\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.phot)\")\n pass\n\n\nclass BaseLCModelClass():\n \"\"\"\n\n \"\"\"\n\n def __init__(self, model_name):\n if model_name in _defined_models:\n\n if model_name == \"bazin09\":\n self.function = bazin09_listarg\n self.nparams = 4\n self._paramnames = [\"a\", \"t_0\", \"t_rise\", \"t_fall\"]\n\n elif model_name == \"karpenka12\":\n self.function = karpenka12_listarg\n self.nparams = 6\n self._paramnames = [\"a\", \"b\", \"t_0\", \"t_1\", \"t_rise\", \"t_fall\"]\n\n elif model_name == \"firth17\":\n self.function = firth17_listarg\n self.nparams = 8\n self._paramnames = [\"a\", \"b\", \"t_0\", \"t_1\", \"t_2\", \"t_x\", \"t_rise\", \"t_fall\"]\n\n else:\n warnings.warn(\"Model Not Recognised.\")\n # self.fit_params = OrderedDict\n pass\n\n def load_bestfitparams(self, param_array):\n \"\"\"\n pass array where the keys are the param names.\n \"\"\"\n\n self.params = OrderedDict()\n\n for i, element in enumerate(param_array):\n self.params[self._paramnames[i]] = element\n pass\n\n def evaluate(self, t):\n\n self.fit = self.function(t, [self.params[p] for p in self.params])\n pass\n\n\nclass BaseFilterClass():\n \"\"\"\n\n \"\"\"\n\n def __init__(self, verbose = True):\n \"\"\"\n\n :param verbose:\n \"\"\"\n self._wavelength_units = u.Angstrom\n self._wavelength_units._format['latex'] = r'\\rm{\\AA}'\n self._frequency_units = u.Hertz\n # self.calculate_frequency()\n # self.calculate_effective_frequency()\n pass\n\n\n def calculate_filter_area(self, verbose=False):\n \"\"\"\n\n :return:\n \"\"\"\n if hasattr(self, \"throughput\"):\n area = simps(self.throughput, self.wavelength)\n if np.isnan(area): ## See Issue #26 on GitHub\n area = trapz(self.throughput, self.wavelength)\n self._effective_area = area\n\n\n\n def calculate_AB_zp(self, ABpath = os.path.join(defaults._default_kcorr_data_path, \"AB_pseudospectrum.dat\"), wmin = 1500 * u.angstrom, wmax=25000 * u.angstrom):\n \"\"\"\n \"\"\"\n\n\n AB = SpectrumClass()\n AB.load(ABpath, wmin=wmin, wmax=wmax)\n\n if not hasattr(self, \"lambda_effective\"):\n self.calculate_effective_wavelength()\n\n self.resample_response(new_wavelength=AB.wavelength)\n\n transmitted_spec = self.throughput * AB.flux\n integrated_flux = simps(transmitted_spec, AB.wavelength)\n\n if not hasattr(self, \"_effective_area\"):\n self.calculate_filter_area()\n\n area_corr_integrated_flux = integrated_flux / self._effective_area\n\n self.zp_AB = -2.5 * np.log10(area_corr_integrated_flux)\n pass\n\n\n def calculate_effective_wavelength(self):\n \"\"\"\n Well, what are you expecting something called `calculate_effective_wavelength`\n to do?\n \"\"\"\n\n spline_rev = interp1d((np.cumsum(self.wavelength*self.throughput)/np.sum(self.wavelength*self.throughput)), self.wavelength)\n lambda_eff = spline_rev(0.5)\n\n self.lambda_effective = lambda_eff * self._wavelength_units\n pass\n\n\n def calculate_frequency(self):\n nu = c/self.wavelength_u\n self.frequency_u = nu.to(self._frequency_units)\n self.frequency = self.frequency_u.value\n\n\n def calculate_effective_frequency(self):\n \"\"\"\n\n \"\"\"\n\n if hasattr(self, \"frequency\"):\n spline_rev = interp1d((np.cumsum(self.frequency*self.throughput)/np.sum(self.frequency*self.throughput)), self.frequency)\n nu_eff = spline_rev(0.5)\n\n self.nu_effective = nu_eff * self._frequency_units\n pass\n\n\n def plot(self, xminorticks = 250, yminorticks = 0.1,\n show_lims = False, small = False, cumulative = False, return_figure=False,\n *args, **kwargs):\n \"\"\"\n Plots filter throughput, so you can double check it.\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n\n ## Check if there is something in the class to plot\n if hasattr(self, \"wavelength\") and hasattr(self, \"throughput\"):\n\n utils.setup_plot_defaults()\n if hasattr(self._wavelength_units, \"format\"):\n if \"latex\" in self._wavelength_units.format:\n xaxis_label_string = r'$\\textnormal{Wavelength, ' + self._wavelength_units.name + ' (}' + self._wavelength_units._format['latex'] +')$'\n else:\n xaxis_label_string = r'$\\textnormal{Wavelength, ' + self._wavelength_units.name + '}$'\n\n plot_label_string = r'$\\textnormal{' + self.filter_name.replace('_', '\\\\_') + '}$'\n\n yminorLocator = MultipleLocator(yminorticks)\n xminorLocator = MultipleLocator(xminorticks)\n\n if not small:\n fig = plt.figure(figsize=[8, 4])\n else:\n fig = plt.figure(figsize=[4, 2])\n plt.rcParams['font.size'] = 10\n\n fig.subplots_adjust(left = 0.09, bottom = 0.13, top = 0.99,\n right = 0.99, hspace=0, wspace = 0)\n\n ax1 = fig.add_subplot(111)\n\n if cumulative:\n throughput = np.cumsum(self.throughput)/np.sum(self.throughput)\n yaxis_label_string = r'$\\textnormal{Cumulative Throughput}$'\n\n else:\n throughput = self.throughput\n yaxis_label_string = r'$\\textnormal{Fractional Throughput}$'\n\n\n if hasattr(self, \"_plot_colour\"):\n ax1.plot(self.wavelength, throughput, color = self._plot_colour,\n lw = 2, label = plot_label_string)\n else:\n ax1.plot(self.wavelength, throughput, lw = 2, label = plot_label_string)\n\n if show_lims:\n try:\n ax1.plot([self._upper_edge, self._upper_edge], [0,1] ,\n lw = 1.5, alpha = 0.5, ls = ':',\n color = colours.hex['batman'], zorder = 0, )\n ax1.plot([self._lower_edge, self._lower_edge], [0,1] ,\n lw = 1.5, alpha = 0.5, ls = ':',\n color = colours.hex['batman'], zorder = 0, )\n except:\n print(\"Failed\")\n\n ax1.spines['top'].set_visible(True)\n\n ax1.set_xlabel(xaxis_label_string)\n ax1.set_ylabel(yaxis_label_string)\n\n ax1.yaxis.set_minor_locator(yminorLocator)\n ax1.xaxis.set_minor_locator(xminorLocator)\n\n ax1.legend(loc = 0)\n\n if return_figure:\n return fig\n\n plt.show()\n pass\n else:\n warnings.warn(\"Doesn't look like you have loaded a filter into the object\")\n\n\n def resample_response(self, new_wavelength = False, k = 1, verbose=False,\n *args, **kwargs):\n \"\"\"\n Bit dodgy - spline has weird results for poorly sampled filters.\n Now the order is by default 1, seems to be less likely to introduce artifacts\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n\n if hasattr(self, \"wavelength\") and hasattr(self, \"throughput\"):\n\n if verbose: print(\"resampling response\")\n\n self._wavelength_orig = self.wavelength\n self._throughput_orig = self.throughput\n\n self.wavelength = np.concatenate(([0,1], self._wavelength_orig, [24999,25000]))\n self.throughput = np.concatenate(([0,0], self._throughput_orig, [0,0]))\n\n interp_func = InterpolatedUnivariateSpline(self.wavelength, self.throughput, k = k,\n *args, **kwargs)\n self.throughput = interp_func(new_wavelength)\n self.wavelength = new_wavelength\n # self.wavelength.name = \"wavelength\"\n\n self.throughput[np.where(self.throughput < 0.0)] = 0.0\n else:\n warning.warn(\"Doesn't look like you have loaded a filter into the object\")\n\n\n def load(self, path, directory = False, fmt = \"ascii.commented_header\",\n names = (\"wavelength\", \"throughput\"), wavelength_u = u.angstrom,\n verbose = False, name = False):\n \"\"\"\n Assumes Response function is fractional rather than %.\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n\n\n if utils.check_file_path(os.path.abspath(path), verbose = verbose):\n\n self.data = Table.read(path, format = fmt, names = names)\n\n self.wavelength = self.data[\"wavelength\"]*wavelength_u\n self.wavelength = self.wavelength.to(u.angstrom)\n self.data[\"wavelength\"] = self.wavelength\n self.throughput = self.data[\"throughput\"]\n\n self.wavelength_u = self.wavelength * wavelength_u\n self._wavelength_units = wavelength_u\n\n self._filter_file_path = path\n\n if name:\n self.filter_name = name\n\n filename = path.split('/')[-1]\n filename_no_extension = filename.split('.')[0]\n\n else:\n warnings.warn(\"Foo\")\n\n\n def load_table(self, table, name, directory = False, wavelength_u = u.angstrom,\n verbose = False):\n \"\"\"\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n\n \"\"\"\n Assumes Response function is fractional rather than %.\n \"\"\"\n\n self.filter_name = name\n\n self.data = table\n\n if not hasattr(table[\"wavelength\"], \"unit\"):\n self.wavelength = self.data[\"wavelength\"]*wavelength_u\n else:\n self.wavelength = self.data[\"wavelength\"]\n\n self.wavelength = self.wavelength.to(u.angstrom)\n self.data[\"wavelength\"] = self.wavelength\n self.throughput = self.data[\"throughput\"]\n\n self.wavelength_u = self.wavelength.to(wavelength_u)\n self._wavelength_units = wavelength_u\n\n\n def save(self, filename, path = False,\n squash = False, verbose = True, *args, **kwargs):\n \"\"\"\n Output the filter loaded into the Class into a format\n and location recognised by CoCo.\n\n based on BaseSpectrumClass.save\n\n Parameters\n ----------\n Returns\n -------\n \"\"\"\n\n if hasattr(self, \"wavelength\") and hasattr(self, \"throughput\"): ## enables resampling and wavelength conversion to be easily saved\n if verbose: print(\"has data\")\n if not path:\n if verbose: print(\"No directory specified, assuming \" + defaults._default_filter_dir_path)\n path = defaults._default_filter_dir_path\n else:\n errors.StringWarning(path)\n\n outpath = os.path.join(path, filename)\n\n utils.check_dir_path(path)\n\n if os.path.isfile(outpath):\n warnings.warn(\"Found existing file matching \" + path + \". Run with squash = True to overwrite\")\n if squash:\n print(\"Overwriting \" + outpath)\n outtable = Table([self.wavelength, self.throughput], names = [\"wavelength\", \"throughput\"])\n outtable.write(outpath, format = \"ascii.fast_commented_header\", overwrite = True)\n self._format_for_save = outtable\n\n\n else:\n print(\"Writing \" + outpath)\n outtable = Table([self.wavelength, self.throughput], names = [\"wavelength\", \"throughput\"])\n\n outtable.write(outpath, format = \"ascii.fast_commented_header\")\n self._format_for_save = outtable\n\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n pass\n\n\n# #------------------------------------# #\n# # Inheriting Classes # #\n# #------------------------------------# #\n\n\nclass PhotometryClass(BaseLightCurveClass):\n \"\"\"\n Inherits from BaseLightCurveClass\n\n Probably also overkill - but should be easier to store metadata etc. Hopefully\n flexible enough to just be a wrapper for AP tables of phot.\n\n Photometry stored in PhotometryClass.data should have a FilterClass method\n describing the observations stored in PhotometryClass.data_filters.\n\n ## NOTE should I use properties instead of get/set? http://www.python-course.eu/python3_properties.php\n looks like only python3?\n \"\"\"\n\n def __init__(self, verbose = False):\n \"\"\"\n\n \"\"\"\n\n ## Initialise the class variables\n self._default_data_dir_path = os.path.join(defaults._default_data_dir_path, 'lc/')\n self._default_filter_dir_path = defaults._default_filter_dir_path\n self.data = OrderedDict()\n self.data_filters = OrderedDict()\n\n ## Initialise using class methods\n self.set_data_directory(self._default_data_dir_path)\n self.set_filter_directory(self._get_filter_directory())\n\n\n def _get_data_directory(self):\n \"\"\"\n Get the default path to the data directory.\n\n Looks for the data data directory set as environment variable\n $PYCOCO_DATA_DIR. if not found, returns default.\n\n returns: Absolute path in environment variable $PYCOCO_DATA_DIR, or\n default datalocation: '../testdata/', with '/lc/' appended.\n \"\"\"\n\n return self.data_directory\n\n\n def set_data_directory(self, data_dir_path = '', verbose = False):\n \"\"\"\n Set a new data directory path.\n\n Enables the data directory to be changed by the user.\n\n \"\"\"\n try:\n if verbose: print(data_dir_path, self._default_data_dir_path)\n if os.path.isdir(os.path.abspath(data_dir_path)):\n self.data_directory = os.path.abspath(data_dir_path)\n pass\n else:\n warnings.warn(os.path.abspath(data_dir_path) +\n \" is not a valid directory. Restoring default path: \" +\n self._default_data_dir_path, UserWarning)\n self.data_directory = self._default_data_dir_path\n\n if not os.path.isdir(self.data_directory):\n if verbose: print(os.path.isdir(self.data_directory))\n raise errors.PathError(\"The default data directory '\" + self.data_directory\n + \"' doesn't exist. Or isn't a directory. Or can't be located.\")\n else:\n pass\n except:\n if verbose: print(\"foo\")\n raise errors.PathError(\"The default data directory '\" + self._default_data_dir_path\n + \"' doesn't exist. Or isn't a directory. Or can't be located. Have\"\n + \" you messed with _default_data_dir_path?\")\n pass\n\n\n def load(self, path, names = ('MJD', 'flux', 'flux_err', 'filter'),\n format = 'ascii.commented_header', verbose = False):\n \"\"\"\n Loads a single photometry file.\n\n Parameters\n ----------\n Returns\n -------\n \"\"\"\n errors.StringWarning(path)\n try:\n phot_table = self.load_formatted_phot(path, names = names, format = format, verbose = verbose)\n self.phot = phot_table\n self.unpack()\n\n ## Sort the OrderedDict\n self._sort_phot()\n except:\n raise Exception\n\n\n def load_formatted_phot(self, path, format = \"ascii\", names = False,\n verbose = False):\n \"\"\"\n Loads a single photometry file.\n\n Parameters\n ----------\n Returns\n -------\n \"\"\"\n\n errors.StringWarning(path)\n\n if names:\n phot_table = Table.read(path, format = format, names = names)\n else:\n phot_table = Table.read(path, format = format)\n\n phot_table.meta[\"filename\"] = path\n\n phot_table[\"MJD\"].unit = u.day\n phot_table[\"flux\"].unit = u.cgs.erg / u.si.angstrom / u.si.cm ** 2 / u.si.s\n phot_table[\"flux_err\"].unit = phot_table[\"flux\"].unit\n\n return phot_table\n\n\n def load_phot_from_file(self, path, names = ('MJD', 'flux', 'flux_err', 'filter'),\n format = 'ascii', verbose = False):\n \"\"\"\n For single filter data\n \"\"\"\n errors.StringWarning(path)\n try:\n # phot_table = functions.load_phot(path, names = names, format = format, verbose = verbose)\n # phot_table = ap.table.Table.read(path, format = format, names = names)\n phot_table = Table.read(path, format = format, names = names)\n\n phot_table.replace_column(\"MJD\", Time(phot_table[\"MJD\"], format = 'mjd'))\n\n phot_table[\"flux\"].unit = u.cgs.erg / u.si.angstrom / u.si.cm ** 2 / u.si.s\n phot_table[\"flux_err\"].unit = phot_table[\"flux\"].unit\n\n self.data[np.unique(phot_table[\"filter\"])[0]] = phot_table\n\n ## Sort the OrderedDict\n self._sort_phot()\n except:\n raise Exception\n\n pass\n\n\n def load_phot_from_files(self, path = False, snname = False, prefix = 'SN',\n file_type = '.dat', names = ('MJD', 'flux', 'flux_err', 'filter'),\n format = 'ascii', filter_file_type = '.dat', verbose = False):\n \"\"\"\n Finds and loads in data (from file) into phot objects.\n\n Parameters\n ----------\n\n Returns\n -------\n\n \"\"\"\n\n if snname:\n if not path:\n path = self._default_data_dir_path\n ## Find matching photometry\n phot_list = find_filter_phot(path = path, snname = snname, prefix = prefix,\n file_type = file_type, verbose = verbose)\n\n full_phot_table = Table()\n\n ## Loop over files (shouldn't be that many really)\n if len(phot_list) > 0:\n\n for phot_file in phot_list:\n\n if verbose: print(phot_file)\n phot_table = Table.read(phot_file, names = names, format = format)\n\n ## NOTE astropy vstack does not support mixin columns http://docs.astropy.org/en/stable/table/mixin_columns.html\n # This means I might have problems joining the tables together if I don't add together as I go along.\n\n full_phot_table = vstack([full_phot_table, phot_table])\n\n filter_string = functions.get_filter_from_filename(phot_file, snname, file_type)\n phot_table.meta = {\"filename\" : phot_file,\n \"filter\" : filter_string,\n \"filter_filename\": filter_string + filter_file_type}\n\n ## Sort out units\n phot_table.sort(\"MJD\")\n phot_table[\"t\"] = Time(phot_table[\"MJD\"], format = 'mjd')\n\n phot_table[\"MJD\"].unit = u.day\n phot_table[\"flux\"].unit = u.cgs.erg / u.si.angstrom / u.si.cm ** 2 / u.si.s\n phot_table[\"flux_err\"].unit = phot_table[\"flux\"].unit\n\n ## Put in dictionary - use filter from the file\n filter_key = np.unique(phot_table[\"filter\"])[0]\n if verbose: print(len(np.unique(phot_table[\"filter\"])) , phot_table.meta[\"filter\"], filter_key)\n\n if len(np.unique(phot_table[\"filter\"])) > 1 or filter_key != phot_table.meta[\"filter\"]:\n raise errors.FilterMismatchError(\"There is a mismatch between the filter filename and that in the \"\n + \"photometry file\")\n\n self.data[filter_key] = phot_table\n\n path_to_filter = os.path.join(self.filter_directory, phot_table.meta['filter_filename'])\n self.data_filters[filter_key] = functions.load_filter(path_to_filter)\n\n\n ## NOTE doing it this way because vstack doesn't like mixin columns (see above comment)\n full_phot_table.sort(\"MJD\")\n # full_phot_table[\"t\"] = Time(full_phot_table[\"MJD\"], format = 'mjd')\n full_phot_table[\"MJD\"].unit = u.day\n\n full_phot_table[\"flux\"].unit = u.cgs.erg / u.si.angstrom / u.si.cm ** 2 / u.si.s\n full_phot_table[\"flux_err\"].unit = full_phot_table[\"flux\"].unit\n\n self.phot = full_phot_table\n\n ## Sort the OrderedDict\n self._sort_phot()\n else:\n warning.warn(\"Couldn't find any photometry\")\n else:\n warnings.warn(\"Provide a SN name\")\n\n pass\n\n\n def _combine_phot(self, verbose = False):\n \"\"\"\n\n \"\"\"\n\n if hasattr(self, \"data\"):\n if verbose: print(self.data.keys())\n\n for i, phot_filter in enumerate(self.data.keys()):\n\n if verbose: print(i, phot_filter)\n\n if i == 0:\n\n full_phot = self.data[phot_filter]\n\n else:\n\n\n full_phot = vstack([full_phot, self.data[phot_filter]])\n\n pass\n\n self.data['full'] = full_phot\n\n else:\n warnings.warn(\"Cant find self.data\")\n\n pass\n\n\n def plot(self, filters=False, legend=True, xminorticks=5, enforce_zero = True,\n verbose=False, xlim=False, yaxis_max_factor=1.02, return_figure=False,\n *args, **kwargs):\n \"\"\"\n Plots phot.\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n\n if hasattr(self, \"data\"):\n\n if not filters:\n filters = self.data_filters\n if type(filters) == str:\n filters = [filters]\n utils.setup_plot_defaults()\n\n fig = plt.figure(figsize=[8, 4])\n fig.subplots_adjust(left = 0.09, bottom = 0.13, top = 0.93,\n right = 0.96, hspace=0, wspace = 0)\n\n ax1 = fig.add_subplot(111)\n\n for i, filter_key in enumerate(filters):\n if verbose: print(i, self.data[filter_key].__dict__)\n plot_label_string = r'$\\rm{' + self.data_filters[filter_key].filter_name.replace('_', '\\\\_') + '}$'\n if filter_key in colours.hex.keys():\n self.data_filters[filter_key]._plot_colour = colours.hex[filter_key]\n else:\n warnings.warn(\"Cannot find filter in the pycocosn colours registry\")\n self.data_filters[filter_key]._plot_colour = \"C0\"\n\n ax1.errorbar(self.data[filter_key]['MJD'], self.data[filter_key]['flux'],\n yerr = self.data[filter_key]['flux_err'],\n capsize = 0, fmt = 'o', color = self.data_filters[filter_key]._plot_colour,\n label = plot_label_string, ecolor = colours.hex['batman'], mec = colours.hex[\"batman\"],\n *args, **kwargs)\n\n if legend:\n\n # plot_legend = ax1.legend(loc = [1.,0.0], scatterpoints = 1,\n # numpoints = 1, frameon = False, fontsize = 12)\n plot_legend = ax1.legend(loc = 1, scatterpoints = 1,\n numpoints = 1, frameon = False, fontsize = 12)\n ## Use ap table groups instead? - can't; no support for mixin columns.\n if enforce_zero:\n ax1.set_ylim(0., yaxis_max_factor * np.nanmax(self.phot['flux']))\n else:\n ax1.set_ylim(np.nanmin(self.phot['flux']), yaxis_max_factor * np.nanmax(self.phot['flux']))\n\n if xlim:\n ax1.set_xlim(xlim)\n\n ## Label the axes\n xaxis_label_string = r'$\\textnormal{Time, MJD (days)}$'\n yaxis_label_string = r'$\\textnormal{Flux, erg s}^{-1}\\textnormal{\\AA}^{-1}\\textnormal{cm}^{-2}$'\n # yaxis_label_string = r'$\\textnormal{Flux, erg s}^{-1}\\textnormal{cm}^{-2}$'\n\n ax1.set_xlabel(xaxis_label_string)\n ax1.set_ylabel(yaxis_label_string)\n\n ax1.spines['top'].set_visible(True)\n\n xminorLocator = MultipleLocator(xminorticks)\n ax1.xaxis.set_minor_locator(xminorLocator)\n\n if return_figure:\n return fig\n plt.show()\n\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n pass\n\n\n def plot_filters(self, xminorticks = 250, yminorticks = 0.1,\n legend = True, use_cmap = False, verbose = False):\n \"\"\"\n Plots filters.\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n if hasattr(self, \"data_filters\"):\n\n utils.setup_plot_defaults()\n xaxis_label_string = r'$\\textnormal{Wavelength, (\\AA)}$'\n yaxis_label_string = r'$\\textnormal{Fractional Throughput}$'\n yminorLocator = MultipleLocator(yminorticks)\n xminorLocator = MultipleLocator(xminorticks)\n\n fig = plt.figure(figsize=[8, 4])\n fig.subplots_adjust(left = 0.09, bottom = 0.13, top = 0.99,\n right = 0.99, hspace=0, wspace = 0)\n ax1 = fig.add_subplot(111)\n\n ## Plot the throughput for each filter\n for i, filter_key in enumerate(self.data_filters):\n if verbose: print(i, self.data_filters[filter_key].__dict__)\n plot_label_string = r'$\\rm{' + self.data_filters[filter_key].filter_name.replace('_', '\\\\_') + '}$'\n if hasattr(self.data_filters[filter_key], \"_plot_colour\") and use_cmap:\n ax1.plot((self.data_filters[filter_key].wavelength_u).to(u.angstrom),\n self.data_filters[filter_key].throughput,\n color = self.data_filters[filter_key]._plot_colour,\n lw = 2, label = plot_label_string)\n else:\n ax1.plot((self.data_filters[filter_key].wavelength_u).to(u.angstrom),\n self.data_filters[filter_key].throughput,\n lw = 2, label = plot_label_string)\n # if hasattr(self, \"_plot_colour\"):\n # ax1.plot(self.wavelength, self.throughput, color = self._plot_colour,\n # lw = 2, label = plot_label_string)\n # else:\n # ax1.plot(self.wavelength, self.throughput, lw = 2, label = plot_label_string)\n\n ax1.set_xlabel(xaxis_label_string)\n ax1.set_ylabel(yaxis_label_string)\n\n ax1.yaxis.set_minor_locator(yminorLocator)\n ax1.xaxis.set_minor_locator(xminorLocator)\n\n if legend:\n plot_legend = ax1.legend(loc = [1.,0.0], scatterpoints = 1,\n numpoints = 1, frameon = False, fontsize = 12)\n\n plt.show()\n else:\n warnings.warn(\"Doesn't seem to be any filters here (empty self.filter_data)\")\n pass\n\n\nclass SpectrumClass(BaseSpectrumClass):\n \"\"\"\n Class for handling Spectra.\n Inherits from BaseSpectrumClass.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n\n \"\"\"\n\n ## Initialise the class variables\n self._default_data_dir_path = os.path.join(defaults._default_data_dir_path, \"spec/\")\n self._default_list_dir_path = defaults._default_list_dir_path\n\n ## Initialise using class methods\n self.set_data_directory(self._default_data_dir_path)\n self.set_list_directory(self._get_list_directory())\n pass\n\n\n def _get_data_directory(self, path=False):\n \"\"\"\n Get the default path to the data directory.\n\n Looks for the data data directory set as environment variable\n $PYCOCO_DATA_DIR. if not found, returns default.\n\n returns: Absolute path in environment variable $PYCOCO_DATA_DIR, or\n default datalocation: '../testdata/', with '/spec/' appended.\n \"\"\"\n\n return self.data_directory\n\n\n def set_data_directory(self, data_dir_path = '', verbose = False):\n \"\"\"\n Set a new data directory path.\n\n Enables the data directory to be changed by the user.\n\n \"\"\"\n try:\n if verbose: print(data_dir_path, self._default_data_dir_path)\n if os.path.isdir(os.path.abspath(data_dir_path)):\n self.data_directory = os.path.abspath(data_dir_path)\n pass\n else:\n warnings.warn(os.path.abspath(data_dir_path) +\n \" is not a valid directory. Restoring default path: \" +\n self._default_data_dir_path, UserWarning)\n self.data_directory = self._default_data_dir_path\n\n if not os.path.isdir(self.data_directory):\n if verbose: print(os.path.isdir(self.data_directory))\n raise errors.PathError(\"The default data directory '\" + self.data_directory\n + \"' doesn't exist. Or isn't a directory. Or can't be located.\")\n else:\n pass\n except:\n if verbose: print(\"foo\")\n raise errors.PathError(\"The default data directory '\" + self._default_data_dir_path\n + \"' doesn't exist. Or isn't a directory. Or can't be located. Have\"\n + \" you messed with _default_data_dir_path?\")\n pass\n\n\nclass LCfitClass(BaseLightCurveClass):\n \"\"\"\n Small class to hold the output from CoCo LCfit.\n Inherits from BaseLightCurveClass\n \"\"\"\n\n def __init__(self):\n\n ## Initialise the class variables\n self._default_recon_dir_path = os.path.join(defaults._default_coco_dir_path, \"recon/\")\n self._default_filter_dir_path = defaults._default_filter_dir_path\n\n ## Initialise using class methods\n self.set_recon_directory(self._get_recon_directory())\n self.set_filter_directory(self._get_filter_directory())\n\n ## Initialise some other stuff\n self.data = OrderedDict()\n self.data_filters = OrderedDict()\n\n pass\n\n\n def _get_recon_directory(self):\n \"\"\"\n Get the default path to the data directory.\n\n Looks for the CoCo home directory set as environment variable\n $COCO_ROOT_DIR. if not found, returns default.\n\n returns: Absolute path in environment variable $COCO_ROOT_DIR, or\n default CoCo location: '~/Code/CoCo/', with 'recon/' appended.\n \"\"\"\n\n return os.path.join(self._default_recon_dir_path, os.path.pardir, \"recon/\")\n\n\n def set_recon_directory(self, recon_dir_path = '', verbose = False):\n \"\"\"\n Set a new recon directory path.\n\n Enables the recon directory to be changed by the user.\n\n \"\"\"\n try:\n if verbose: print(recon_dir_path, self._default_recon_dir_path)\n if os.path.isdir(os.path.abspath(recon_dir_path)):\n self.recon_directory = os.path.abspath(recon_dir_path)\n pass\n else:\n warnings.warn(os.path.abspath(recon_dir_path) +\n \" is not a valid directory. Restoring default path: \" +\n self._default_recon_dir_path, UserWarning)\n self.recon_directory = self._default_recon_dir_path\n\n if not os.path.isdir(self.recon_directory):\n if verbose: print(os.path.isdir(self.recon_directory))\n raise errors.PathError(\"The default recon directory '\" + self.recon_directory\n + \"' doesn't exist. Or isn't a directory. Or can't be located.\")\n else:\n pass\n except:\n if verbose: print(\"foo\")\n raise errors.PathError(\"The default recon directory '\" + self._default_recon_dir_path\n + \"' doesn't exist. Or isn't a directory. Or can't be located. Have\"\n + \" you messed with _default_recon_dir_path?\")\n pass\n\n\n def load_formatted_phot(self, path, names = ('MJD', 'flux', 'flux_err', 'filter'),\n format = 'ascii', verbose = False):\n \"\"\"\n\n \"\"\"\n errors.StringWarning(path)\n\n try:\n phot_table = utils.load_formatted_phot(path, format = format, names = names,\n verbose = verbose)\n self.phot = phot_table\n\n self.phot['flux_upper'] = phot_table['flux'] + phot_table['flux_err']\n self.phot['flux_lower'] = phot_table['flux'] - phot_table['flux_err']\n\n except:\n raise Exception\n\n pass\n\n\n def plot(self, legend = True, xminorticks = 5, return_figure=False,\n verbose = False, *args, **kwargs):\n \"\"\"\n Plots phot.\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n\n if hasattr(self, \"data\"):\n\n utils.setup_plot_defaults()\n\n fig = plt.figure(figsize=[8, 4])\n fig.subplots_adjust(left = 0.09, bottom = 0.13, top = 0.99,\n right = 0.99, hspace=0, wspace = 0)\n\n ax1 = fig.add_subplot(111)\n\n for i, filter_key in enumerate(self.data_filters):\n if verbose: print(i, self.data[filter_key].__dict__)\n plot_label_string = r'$\\rm{' + self.data_filters[filter_key].filter_name.replace('_', '\\\\_') + '}$'\n\n # ax1.errorbar(self.data[filter_key]['MJD'], self.data[filter_key]['flux'],\n # yerr = self.data[filter_key]['flux_err'],\n # capsize = 0, fmt = 'o',\n # label = plot_label_string,\n # *args, **kwargs)\n\n # ## Best Fit\n # ax1.plot(self.data[filter_key]['MJD'], self.data[filter_key]['flux'],\n # lw = 2, label = plot_label_string,\n # *args, **kwargs)\n\n ## With error\n ax1.fill_between(self.data[filter_key]['MJD'], self.data[filter_key]['flux_upper'], self.data[filter_key]['flux_lower'],\n label = plot_label_string, color = self.data_filters[filter_key]._plot_colour,\n alpha = 0.8,\n *args, **kwargs)\n if legend:\n\n plot_legend = ax1.legend(loc = [1.,0.0], scatterpoints = 1,\n numpoints = 1, frameon = False, fontsize = 12)\n\n ## Use ap table groups instead? - can't; no support for mixin columns.\n ax1.set_ylim(np.nanmin(self.phot['flux']), np.nanmax(self.phot['flux']))\n\n ## Label the axes\n xaxis_label_string = r'$\\textnormal{Time, MJD (days)}$'\n yaxis_label_string = r'$\\textnormal{Flux, erg s}^{-1}\\textnormal{\\AA}^{-1}\\textnormal{cm}^{-2}$'\n\n ax1.set_xlabel(xaxis_label_string)\n ax1.set_ylabel(yaxis_label_string)\n\n xminorLocator = MultipleLocator(xminorticks)\n ax1.xaxis.set_minor_locator(xminorLocator)\n\n if return_figure:\n return fig\n\n plt.show()\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n pass\n\n\n def get_fit_splines(self, verbose = False):\n \"\"\"\n\n Parameters\n ----------\n\n Returns\n -------\n\n \"\"\"\n if hasattr(self, \"data\"):\n self.spline = OrderedDict()\n\n for i, filter_key in enumerate(self.data):\n try:\n if verbose: print(filter_key)\n self.spline[filter_key] = InterpolatedUnivariateSpline(self.data[filter_key][\"MJD\"], self.data[filter_key][\"flux\"])\n self.spline[filter_key+\"_err\"] = InterpolatedUnivariateSpline(self.data[filter_key][\"MJD\"], self.data[filter_key][\"flux_err\"])\n except:\n print(\"NOPE\")\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n pass\n\n\n def colour_from_model(self, filter_key1, filter_key2):\n\n return phot_1 - phot_2\n\n\nclass specfitClass(BaseSpectrumClass):\n \"\"\"\n Small class to hold the output from CoCo spec.\n Inherits from BaseSpectrumClass.\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n\n \"\"\"\n\n ## Initialise the class variables\n self._default_recon_dir_path = os.path.join(defaults._default_coco_dir_path, \"recon/\")\n # self._default_list_dir_path = self._default_data_dir_path\n\n ## Initialise using class methods\n self.set_recon_directory(self._get_recon_directory())\n\n pass\n\n\n def _get_recon_directory(self):\n \"\"\"\n Get the default path to the recon directory.\n\n Looks for the CoCo directory set as environment variable\n $COCO_ROOT_DIR. if not found, returns default.\n\n returns: Absolute path in environment variable $COCO_ROOT_DIR, or\n default datalocation: '../testdata/', with '/spec/' appended.\n \"\"\"\n\n return os.path.join(os.environ.get('COCO_ROOT_DIR', os.path.join(self._default_recon_dir_path, os.pardir)), \"recon/\")\n\n\n def set_recon_directory(self, recon_dir_path = '', verbose = False):\n \"\"\"\n Set a new data directory path.\n\n Enables the data directory to be changed by the user.\n\n \"\"\"\n try:\n if verbose: print(recon_dir_path, self._default_recon_dir_path)\n if os.path.isdir(os.path.abspath(recon_dir_path)):\n self.recon_directory = os.path.abspath(recon_dir_path)\n pass\n else:\n warnings.warn(os.path.abspath(recon_dir_path) +\n \" is not a valid directory. Restoring default path: \" +\n self._default_recon_dir_path, UserWarning)\n self.recon_directory = self._default_recon_dir_path\n\n if not os.path.isdir(self.recon_directory):\n if verbose: print(os.path.isdir(self.recon_directory))\n raise errors.PathError(\"The default data directory '\" + self.recon_directory\n + \"' doesn't exist. Or isn't a directory. Or can't be located.\")\n else:\n pass\n except:\n if verbose: print(\"foo\")\n raise errors.PathError(\"The default data directory '\" + self._default_recon_dir_path\n + \"' doesn't exist. Or isn't a directory. Or can't be located. Have\"\n + \" you messed with _default_recon_dir_path?\")\n pass\n\n\n def set_orig_specpath(self, orig_specpath = False, verbose = False):\n \"\"\"\n Parameters\n ----------\n Returns\n -------\n \"\"\"\n\n if not orig_specpath:\n self.orig_specpath = self.data.meta[\"comments\"][0].split(\"/\")[-1]\n\n else:\n self.orig_specpath = orig_specpath\n\n pass\n\n\n def plot_comparison(self, SpectrumClassInstance,\n xminorticks=250, legend=True,\n verbose=True, twoaxes=True, return_figure=False,\n *args, **kwargs):\n \"\"\"\n Plots spec.\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n\n if hasattr(self, \"data\"):\n\n utils.setup_plot_defaults()\n\n fig = plt.figure(figsize=[8, 4])\n fig.subplots_adjust(left = 0.09, bottom = 0.13, top = 0.99,\n right = 0.94, hspace=0, wspace = 0)\n\n ax1 = fig.add_subplot(111)\n\n if verbose: print(self.data.__dict__)\n plot_label_string = r'$\\rm{' + self.data.meta[\"filename\"].replace('_', '\\_') + '}$'\n plot_label_string_compare = r'$\\rm{' + SpectrumClassInstance.data.meta[\"filename\"].replace('_', '\\_') + '}$'\n\n\n ax1.plot(self.data['wavelength'], self.flux, lw = 2,\n label = plot_label_string, color = 'Red',\n *args, **kwargs)\n if twoaxes:\n ax2 = ax1.twinx()\n ax2.plot(SpectrumClassInstance.data['wavelength'], SpectrumClassInstance.data['flux'],\n label = plot_label_string_compare, color = 'Blue',\n *args, **kwargs)\n\n else:\n ax1.plot(SpectrumClassInstance.data['wavelength'], SpectrumClassInstance.data['flux'],\n label = plot_label_string_compare, color = 'Blue',\n *args, **kwargs)\n\n maxplotydata = np.nanmax(np.append(self.flux, SpectrumClassInstance.data['flux']))\n minplotydata = np.nanmin(np.append(self.flux, SpectrumClassInstance.data['flux']))\n\n if legend:\n ## https://stackoverflow.com/a/10129461\n lines, labels = ax1.get_legend_handles_labels()\n lines2, labels2 = ax2.get_legend_handles_labels()\n\n # plot_legend = ax1.legend(loc = [1.,0.0], scatterpoints = 1,\n ax1.legend(lines + lines2,labels + labels2, loc=0, scatterpoints=1,\n numpoints = 1, frameon = False, fontsize = 12)\n\n ax1.set_ylim(minplotydata*0.98, maxplotydata*1.02)\n\n ## Label the axes\n xaxis_label_string = r'$\\textnormal{Wavelength (\\AA)}$'\n yaxis_label_string = r'$\\textnormal{Flux, erg s}^{-1}\\textnormal{\\AA}^{-1}\\textnormal{cm}^{-2}$'\n\n ax1.set_xlabel(xaxis_label_string)\n ax1.set_ylabel(yaxis_label_string)\n\n xminorLocator = MultipleLocator(xminorticks)\n ax1.xaxis.set_minor_locator(xminorLocator)\n\n if return_figure:\n return fig\n plt.show()\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n pass\n\n\nclass FilterClass(BaseFilterClass):\n \"\"\"Docstring for FilterClass inherits from BaseFilterClass\"\"\"\n\n def __init__(self):\n self._wavelength_units = u.Angstrom\n self._wavelength_units._format['latex'] = r'\\rm{\\AA}'\n self._frequency_units = u.Hertz\n pass\n\n\n def read_filter_file(self, path, fmt = \"ascii\",\n names = (\"wavelength\", \"throughput\"),\n wavelength_u = u.angstrom, verbose = False):\n \"\"\"\n Assumes Response function is fractional rather than %.\n \"\"\"\n if utils.check_file_path(os.path.abspath(path), verbose = verbose):\n self.data = Table.read(path, format = fmt, names = names)\n self.wavelength = self.data[\"wavelength\"] * wavelength_u\n if verbose: print(\"1\", np.nanmax(self.wavelength))\n self.wavelength = self.wavelength.to(u.angstrom)\n self.throughput = self.data[\"throughput\"]\n if verbose: print(\"2\", np.nanmax(self.wavelength))\n\n self.wavelength_u = self.wavelength.to(wavelength_u)\n self._filter_file_path = path\n if verbose: print(\"3\", np.nanmax(self.wavelength))\n\n filename = path.split('/')[-1]\n filename_no_extension = filename.split('.')[0]\n self.filter_name = filename_no_extension\n if verbose: print(\"4\", np.nanmax(self.wavelength))\n\n self.set_plot_colour(verbose = verbose)\n if verbose: print(\"5\", np.nanmax(self.wavelength))\n self.calculate_effective_wavelength()\n if verbose: print(\"6\", np.nanmax(self.wavelength))\n self.calculate_edges()\n if verbose: print(\"7\", np.nanmax(self.wavelength))\n self.get_zeropoint()\n if verbose: print(\"8\", np.nanmax(self.wavelength))\n\n else:\n warnings.warn(\"Foo\")\n\n\n def calculate_edges_zero(self, verbose = False):\n \"\"\"\n calculates the first and last wavelength that has non-zero and steps one\n away\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n\n ## calculates the first and last wavelength that has non-zero\n # w = np.where(self.throughput > 0)[0]\n # if verbose: print(w)\n # self._upper_edge = self.wavelength[w[-1]]\n # self._lower_edge = self.wavelength[w[0]]\n\n w = np.where(self.throughput > 0)[0]\n if verbose: print(w)\n if w[0] - 1 < 0:\n w_low = 0\n else:\n w_low = w[0] - 1\n\n if w[-1] + 1 == len(self.throughput):\n w_high = w[-1]\n else:\n w_high = w[-1] + 1\n\n self._upper_edge = self.wavelength[w_high]\n self._lower_edge = self.wavelength[w_low]\n\n\n def calculate_edges(self, pc = 3., verbose = False):\n \"\"\"\n calculates edges by defining the region that contains (100 - pc)% of the\n flux.\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n self._cumulative_throughput = np.cumsum(self.throughput)/np.sum(self.throughput)\n self._cumulative_throughput_spline = interp1d(self._cumulative_throughput, self.wavelength)\n\n self._upper_edge = self._cumulative_throughput_spline(1.0 - 0.5*(0.01*pc))\n self._lower_edge = self._cumulative_throughput_spline(0.0 + 0.5*(0.01*pc))\n\n pass\n\n\n def calculate_plot_colour(self, colourmap_name = \"plasma\", verbose = False):\n \"\"\"\n\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n\n if not hasattr(self, \"_colourmap\"):\n self._colourmap = plt.get_cmap(defaults._colourmap_name)\n\n if hasattr(self, 'lambda_effective'):\n\n relative_lambda = self.lambda_effective - defaults._colour_upper_lambda_limit\n relative_lambda = relative_lambda / defaults._colour_upper_lambda_limit\n\n if verbose: print(\"relative_lambda = \", relative_lambda)\n\n self._plot_colour = self._colourmap(relative_lambda)\n\n else:\n warnings.warn(\"No self.lambda_effective set.\")\n\n\n def set_plot_colour(self, colour = False, verbose = False):\n \"\"\"\n\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n if colour:\n self._plot_colour = colour\n\n else:\n\n try:\n if verbose:\n if self.filter_name in colours.hex.keys:\n print(colours.hex[self.filter_name])\n self._plot_colour = colours.hex[self.filter_name]\n except:\n if verbose: print(\"Nope\")\n self.calculate_plot_colour(verbose = verbose)\n\n pass\n\n\n def get_zeropoint(self, abpath=os.path.join(defaults._default_kcorr_data_path, \"AB_pseudospectrum.dat\")):\n \"\"\"\n\n :return:\n \"\"\"\n\n if hasattr(self, \"filter_name\"):\n # self.zp_AB = self.calculate_AB_zp()\n self.calculate_AB_zp(ABpath=abpath)\n # self.zp_vega = self.calc_vega_zp(filter_name)\n else:\n warnings.warn(\"No filter name - have you loaded in a bandpass?\")\n\n pass\n\n\n# #------------------------------------# #\n# # Model Classes # #\n# #------------------------------------# #\n#\n# class (BaseLCModelClass)\n\n\n# #------------------------------------# #\n# # Standalone Classes # #\n# #------------------------------------# #\n\n\nclass SNClass():\n \"\"\"docstring for SNClass.\"\"\"\n\n def __init__(self, snname):\n \"\"\"\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n ## Initialise\n self.spec = OrderedDict()\n self.mangledspec = OrderedDict()\n # self.spec = SpectrumClass()\n self.phot = PhotometryClass()\n info = InfoClass()\n info.load()\n self.info = info.get_sn_info(snname)\n\n self.coco_directory = self._get_coco_directory()\n self.recon_directory = self._get_recon_directory()\n\n self.name = snname\n pass\n\n\n def _get_coco_directory(self):\n \"\"\"\n Get the default path to the data directory.\n\n Looks for the CoCo home directory set as environment variable\n $COCO_ROOT_DIR. if not found, returns default.\n\n returns: Absolute path in environment variable $COCO_ROOT_DIR, or\n default CoCo location: '~/Code/CoCo/', with appended.\n \"\"\"\n\n return os.path.abspath(os.environ.get('COCO_ROOT_DIR', os.path.abspath(defaults._default_coco_dir_path)))\n\n\n @staticmethod\n def _get_recon_directory():\n \"\"\"\n Get the default path to the recon directory.\n\n Looks for the CoCo directory set as environment variable\n $COCO_ROOT_DIR. if not found, returns default.\n\n returns: Absolute path in environment variable $COCO_ROOT_DIR, or\n default datalocation: '../testdata/', with '/spec/' appended.\n \"\"\"\n\n return os.path.join(os.path.abspath(os.environ.get('COCO_ROOT_DIR', os.path.join(defaults._default_recon_dir_path, os.pardir))), \"recon/\")\n\n\n def set_recon_directory(self, recon_dir_path = '', verbose = False):\n \"\"\"\n Set a new data directory path.\n\n Enables the data directory to be changed by the user.\n\n \"\"\"\n try:\n if verbose: print(recon_dir_path, self._default_recon_dir_path)\n if os.path.isdir(os.path.abspath(recon_dir_path)):\n self.recon_directory = os.path.abspath(recon_dir_path)\n pass\n else:\n warnings.warn(os.path.abspath(recon_dir_path) +\n \" is not a valid directory. Restoring default path: \" +\n self._default_recon_dir_path, UserWarning)\n self.recon_directory = self._default_recon_dir_path\n\n if not os.path.isdir(self.recon_directory):\n if verbose: print(os.path.isdir(self.recon_directory))\n raise errors.PathError(\"The default data directory '\" + self.recon_directory\n + \"' doesn't exist. Or isn't a directory. Or can't be located.\")\n else:\n pass\n except:\n if verbose: print(\"foo\")\n raise errors.PathError(\"The default data directory '\" + self._default_recon_dir_path\n + \"' doesn't exist. Or isn't a directory. Or can't be located. Have\"\n + \" you messed with _default_recon_dir_path?\")\n pass\n\n\n def load_phot(self, phot_table = False, snname = False, path = False, file_type = '.dat',\n verbose = False):\n \"\"\"\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n\n if not snname:\n snname = self.name\n if phot_table:\n self.phot.load_table(phot_table=phot_table, verbose=verbose)\n else:\n if not path:\n path = os.path.join(self.phot._default_data_dir_path, snname + file_type)\n if verbose: print(path)\n self.phot.load(path, verbose = verbose)\n\n pass\n\n\n def load_list(self, path, specfiletype = \".txt\", verbose = False):\n \"\"\"\n Parameters\n ----------\n Returns\n -------\n \"\"\"\n listdata = utils.read_list_file(path, verbose=verbose)\n listdata.sort('mjd_obs')\n\n phases = []\n\n # for item in listdata[\"spec_path\"]:\n # filename = item.split(\"/\")[-1]\n # filename = filename.split(\"_\")[1:][0]\n # filename = filename.strip(specfiletype)\n # try:\n # phase = float(filename)\n # except:\n # pass\n #\n # phases.append(phase)\n # if verbose: print(phase)\n # listdata[\"phase\"] = phases\n\n self.list = listdata\n\n\n def load_spec(self, snname = False, spec_dir_path = False, verbose = False):\n \"\"\"\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n\n\n # if not snname:\n # snname = self.name\n #\n # if not spec_dir_path:\n # spec_dir_path = os.path.abspath(os.path.join(self._default_spec_data_dir_path, snname))\n #\n # if verbose: print(\"Loading spectra from: \", spec_dir_path)\n\n # spec_dir_path =\n\n\n if hasattr(self, 'coco_directory') and hasattr(self, 'list'):\n for i, path in enumerate(self.list['spec_path']):\n spec_fullpath = os.path.join(self.coco_directory, path)\n spec_filename = path.split('/')[-1]\n spec_dir_path = spec_fullpath.replace(spec_filename, '')\n if verbose: print(spec_fullpath, spec_dir_path, spec_filename)\n\n self.spec[spec_filename] = SpectrumClass()\n self.spec[spec_filename].load(spec_filename, directory = spec_dir_path,\n verbose = verbose)\n self.spec[spec_filename].set_infile(spec_filename)\n self.spec[spec_filename].set_MJD_obs(self.list['mjd_obs'][i])\n # self.spec[spec_filename].data.add_index('wavelength')\n\n else:\n warnings.warn(\"no coco or no listfile\")\n pass\n\n\n def load_mangledspec(self, snname = False, spec_dir_path = False, verbose = False):\n \"\"\"\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n\n if not snname:\n snname = self.name\n\n # self._mangledspeclist = functions.find_recon_spec(snname)\n self._mangledspeclist = find_specphase_spec(self.name)\n self.mangledspec = OrderedDict()\n if verbose: print(\"loading mangledspec\")\n if hasattr(self, 'recon_directory') and hasattr(self, '_mangledspeclist') and hasattr(self, \"mangledspec\"):\n for i, spec_filename in enumerate(self._mangledspeclist):\n\n if verbose: print(i, spec_filename)\n # self.mangledspec[spec_filename] = SpectrumClass()\n self.mangledspec[spec_filename] = specfitClass()\n self.mangledspec[spec_filename].load(spec_filename, directory = self.recon_directory,\n verbose = verbose)\n\n orig_specpath = self.mangledspec[spec_filename].data.meta['comments']\n orig_specname = orig_specpath\n print(orig_specpath)\n w = np.where(self.list[\"spec_path\"] == orig_specpath)\n if verbose: print(w[0], len(w[0]))\n\n if len(w[0]) > 0:\n self.mangledspec[spec_filename].set_MJD_obs(self.list['mjd_obs'][w].data[0])\n self.mangledspec[spec_filename].data.add_index('wavelength')\n #\n else:\n warnings.warn(\"no coco or no listfile\")\n pass\n\n\n def load_sndist(self, path = defaults._default_sn_dist_path, format = \"ascii\"):\n \"\"\"\n based on functions.read_sndist and load_sndist\n \"\"\"\n\n if hasattr(self, \"name\"):\n # sndist = load_sndist(self.name)\n # self.z = sndist[\"z\"].data[0]\n # self.distmod = sndist[\"mu\"].data[0]\n\n utils.check_file_path(path)\n sndistlist = Table.read(path, format = format)\n\n try:\n w = np.where(sndistlist[\"snname\"] == snname)\n sndist = sndistlist[w]\n\n self.z = sndist[\"z\"].data[0]\n self.distmod = sndist[\"mu\"].data[0]\n except:\n warnings.warn(\"Failed to find distance info for \" + snname + \". is it in the list?\")\n else:\n if verbose: print(\"self.name not defined.\")\n\n pass\n\n\n def plot_lc(self, filters = False, legend = True, xminorticks = 10, mark_spectra = True,\n simplespecphot = False, fade = False, xlims = False, insidelegend = True,\n fit = True, enforce_zero = True, multiplot = True, yaxis_lim_multiplier = 1.1,\n lock_axis = False, xextent = False, filter_uncertainty = 10, return_figure=False,\n savepng = False, savepdf = False, outpath = False, showsnname = False,\n verbose = False, extra_phot=False, extra_phot_label=r\"$\\textnormal{Spectrophotometry}$\",\n *args, **kwargs):\n \"\"\"\n\n :param filters:\n :param legend:\n :param xminorticks:\n :param mark_spectra:\n :param simplespecphot:\n :param fade:\n :param xlims:\n :param insidelegend:\n :param fit:\n :param enforce_zero:\n :param multiplot:\n :param yaxis_lim_multiplier:\n :param lock_axis:\n :param xextent:\n :param filter_uncertainty:\n :param savepng:\n :param savepdf:\n :param outpath:\n :param showsnname:\n :param verbose:\n :param extra_phot:\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n\n if hasattr(self.phot, \"data\"):\n if not fade:\n alpha = 1.0\n else:\n alpha = 0.2\n\n if not filters:\n filters = self.phot.data_filters\n if type(filters) == str:\n filters = [filters]\n\n utils.setup_plot_defaults()\n if not multiplot:\n fig = plt.figure(figsize=[8, 4])\n else:\n fig = plt.figure(figsize=[8, len(filters)*1.5])\n\n if showsnname:\n fig.suptitle(r\"$\\textrm{\"+self.name+\"}$\")\n if verbose: print(self.name)\n\n fig.subplots_adjust(left = 0.1, bottom = 0.13, top = 0.93,\n right = 0.91, hspace=0, wspace = 0)\n ## Label the axes\n xaxis_label_string = r'$\\textnormal{Time, MJD (days)}$'\n yaxis_label_string = r'$\\textnormal{Flux, erg s}^{-1}\\textnormal{\\AA}^{-1}\\textnormal{cm}^{-2}$'\n # yaxis_label_string = r'$\\textnormal{Flux, erg s}^{-1}\\textnormal{cm}^{-2}$'\n\n\n if not multiplot:\n ax1 = fig.add_subplot(111)\n axes_list = [ax1]\n else:\n axes_list = [plt.subplot2grid((len(filters), 1), (j, 0)) for j, k in enumerate(filters)]\n\n for i, filter_key in enumerate(filters):\n if multiplot:\n ax1 = axes_list[i]\n\n if filter_key in self.phot.data:\n if verbose: print(i, self.phot.data[filter_key].__dict__)\n plot_label_string = r'$\\rm{' + self.phot.data_filters[filter_key].filter_name.replace('_', '\\\\_') + '}$'\n\n if filter_key in colours.hex.keys():\n self.phot.data_filters[filter_key]._plot_colour = colours.hex[filter_key]\n else:\n warnings.warn(\"Cannot find filter in the pycocosn colours registry\")\n self.phot.data_filters[filter_key]._plot_colour = \"C0\"\n ax1.errorbar(self.phot.data[filter_key]['MJD'], self.phot.data[filter_key]['flux'],\n yerr = self.phot.data[filter_key]['flux_err'],\n capsize = 0, fmt = 'o', color = self.phot.data_filters[filter_key]._plot_colour,\n label = plot_label_string, ecolor = colours.hex['batman'], mec = colours.hex[\"batman\"],\n alpha = alpha,\n *args, **kwargs)\n\n if fit and hasattr(self, 'lcfit'):\n ax1.fill_between(self.lcfit.data[filter_key]['MJD'], self.lcfit.data[filter_key]['flux_upper'], self.lcfit.data[filter_key]['flux_lower'],\n color = self.phot.data_filters[filter_key]._plot_colour,\n alpha = 0.8, zorder = 0,\n *args, **kwargs)\n\n if simplespecphot and hasattr (self, \"simplespecphot\"):\n ax1.errorbar(self.simplespecphot.data[filter_key]['MJD'], self.simplespecphot.data[filter_key]['flux'],\n yerr = self.simplespecphot.data[filter_key]['flux_err'],\n capsize = 0, fmt = 'o', color = colours.hex[\"batman\"],\n ecolor = colours.hex['batman'], mec = colours.hex[\"batman\"], label = r\"$\\textnormal{SpecPhot}$\",\n *args, **kwargs)\n\n if extra_phot:\n if hasattr(extra_phot, \"data\") and filter_key in extra_phot.data:\n ax1.errorbar(extra_phot.data[filter_key]['MJD'], extra_phot.data[filter_key]['flux'],\n yerr=extra_phot.data[filter_key]['flux_err'],\n capsize=0, fmt='x', color=self.phot.data_filters[filter_key]._plot_colour,\n label=extra_phot_label, ecolor=\"C1\",\n mec=\"C1\", alpha=alpha, zorder=99,\n *args, **kwargs)\n\n\n if legend:\n if multiplot or insidelegend:\n plot_legend = ax1.legend(loc = 'upper right', scatterpoints = 1, markerfirst = False,\n numpoints = 1, frameon = False, bbox_to_anchor=(1., 1.),\n fontsize = 12.)\n\n if i == len(axes_list)-1:\n ax1.set_xlabel(xaxis_label_string)\n\n else:\n if multiplot:\n ax1.set_xticklabels('')\n\n\n xminorLocator = MultipleLocator(xminorticks)\n ax1.spines['top'].set_visible(True)\n ax1.xaxis.set_minor_locator(xminorLocator)\n\n if mark_spectra:\n\n for spec_key in self.spec:\n if verbose: print(np.nanmin(self.spec[spec_key].wavelength) - filter_uncertainty, self.phot.data_filters[filter_key]._lower_edge)\n if verbose: print(np.nanmax(self.spec[spec_key].wavelength) + filter_uncertainty, self.phot.data_filters[filter_key]._upper_edge)\n\n if verbose: print(self.spec[spec_key].data.meta[\"filename\"] )\n too_blue = self.phot.data_filters[filter_key]._lower_edge < np.nanmin(self.spec[spec_key].wavelength) - filter_uncertainty\n too_red = self.phot.data_filters[filter_key]._upper_edge > np.nanmax(self.spec[spec_key].wavelength) + filter_uncertainty\n # if self.spec[spec_key]. self.phot.data_filters[filter_key]._upper_edge and self.phot.data_filters[filter_key]._lower_edge\n if verbose: print(too_blue, too_red)\n if not too_red and not too_blue:\n ax1.plot([self.spec[spec_key].mjd_obs, self.spec[spec_key].mjd_obs],\n [0.0, np.nanmax(self.phot.phot['flux'])*1.5],\n ls = ':', color = colours.hex['batman'], zorder = 0)\n\n if enforce_zero:\n ## Use ap table groups instead? - can't; no support for mixin columns.\n if multiplot and not lock_axis:\n ax1.set_ylim(np.nanmin(np.append(self.phot.data[filter_key]['flux'], 0.0)), np.nanmax(self.phot.data[filter_key]['flux'])*yaxis_lim_multiplier)\n else:\n ax1.set_ylim(np.nanmin(np.append(self.phot.phot['flux'], 0.0)), np.nanmax(self.phot.phot['flux'])*yaxis_lim_multiplier)\n else:\n if multiplot and not lock_axis:\n ax1.set_ylim(np.nanmin(self.phot.data[filter_key]['flux']), np.nanmax(self.phot.data[filter_key]['flux'])*yaxis_lim_multiplier)\n else:\n ax1.set_ylim(np.nanmin(self.phot.phot['flux']), np.nanmax(self.phot.phot['flux'])*yaxis_lim_multiplier)\n\n if multiplot:\n if not xextent:\n ax1.set_xlim(np.nanmin(self.phot.phot[\"MJD\"])-10, np.nanmax(self.phot.phot[\"MJD\"]))\n if xextent:\n ax1.set_xlim(np.nanmin(self.phot.phot[\"MJD\"])-10,np.nanmin(self.phot.phot[\"MJD\"]) + xextent)\n else:\n pass\n\n if xlims:\n ax1.set_xlim(xlims)\n if verbose:\n print(\"xrange = \", ax1.get_xlim())\n print(\"yrange = \", ax1.get_ylim())\n\n else:\n if verbose: print(\"Filter '\" + filter_key + \"' not found\")\n warnings.warn(\"Filter '\" + filter_key + \"' not found\")\n\n\n\n if not multiplot:\n\n ax1.set_ylabel(yaxis_label_string)\n\n if legend and not insidelegend:\n\n plot_legend = ax1.legend(loc = [1.,0.0], scatterpoints = 1,\n numpoints = 1, frameon = False, fontsize = 12)\n else:\n fig.text(0.0, 0.5, yaxis_label_string, va = 'center', ha = 'left', rotation = 'vertical')\n\n if return_figure:\n warnings.warn(\"Returning figure, saveargs will be ignored\")\n return fig\n else:\n if savepdf and outpath:\n fig.savefig(outpath + \".pdf\", format = 'pdf', dpi=500)\n if savepng and outpath:\n fig.savefig(outpath + \".png\", format = 'png', dpi=500)\n plt.show()\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n pass\n\n\n def plot_spec(self, xminorticks = 250, legend = True,\n wmin = 3500, return_figure=False,\n savepng = False, savepdf = False, outpath = False,\n verbose = False, add_mjd = True,\n *args, **kwargs):\n \"\"\"\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n if hasattr(self, \"spec\"):\n\n utils.setup_plot_defaults()\n\n fig = plt.figure(figsize=[8, 10])\n fig.subplots_adjust(left = 0.09, bottom = 0.13, top = 0.99,\n right = 0.99, hspace=0, wspace = 0)\n\n ax1 = fig.add_subplot(111)\n\n cmap_indices = np.linspace(0,1, len(self.spec))\n\n j = 0\n for i, spec_key in enumerate(self.spec):\n # if verbose: print(self.spec[spec_key].data.__dict__)\n\n plot_label_string = r'$\\rm{' + self.spec[spec_key].data.meta[\"filename\"].split('/')[-1].replace('_', '\\_') + '}$'\n\n\n v_eff = 5436.87 ##Angstrom - TODO ???\n w = np.logical_and(self.spec[spec_key].data['wavelength'] > (v_eff-100.),self.spec[spec_key].data['wavelength'] < v_eff+100.)\n\n if verbose: print(i, len(w[np.where(w == True)]), spec_key, len(self.spec[spec_key].data['wavelength']), len(self.spec[spec_key].data['flux']), len(self.spec[spec_key].flux))\n if len(w[np.where(w == True)]) > 0:\n if verbose: print(len(w), 'Foo')\n flux_norm = self.spec[spec_key].flux / np.nanmean(self.spec[spec_key].flux[w])\n\n ax1.plot(self.spec[spec_key].data['wavelength'], flux_norm - 0.5*j, lw = 2,\n label = plot_label_string, color = defaults.spec_colourmap(cmap_indices[i]),\n *args, **kwargs)\n\n maxspecxdata = np.nanmax(self.spec[spec_key].data['wavelength'])\n minspecxdata = np.nanmin(self.spec[spec_key].data['wavelength'])\n\n w = np.where(self.spec[spec_key].data['wavelength'] >= maxspecxdata - 200)\n yatmaxspecxdata = np.nanmean((flux_norm - 0.5*j)[w])\n w = np.where(self.spec[spec_key].data['wavelength'] <= minspecxdata + 200)\n yatminspecxdata = np.nanmean((flux_norm - 0.5*j)[w])\n if verbose: print(yatminspecxdata)\n # if i == 0:\n if j == 0:\n maxplotydata = np.nanmax(flux_norm - 0.5*j)\n # minplotydata = np.nanmin(flux_norm - 0.5*j)\n minplotydata = 0. - 0.5*j ## Assumes always positive flux\n\n\n maxplotxdata = maxspecxdata\n minplotxdata = np.nanmin(self.spec[spec_key].data['wavelength'])\n else:\n maxplotydata = np.nanmax(np.append(maxplotydata, np.append(yatminspecxdata, yatminspecxdata)))\n # minplotydata = np.nanmin(np.append(minplotydata, flux_norm - 0.5*j))\n minplotydata = 0. - 0.5*j ## Assumes always positive flux\n maxplotxdata = np.nanmax(np.append(maxplotxdata, np.nanmax(self.spec[spec_key].data['wavelength'])))\n minplotxdata = np.nanmin(np.append(minplotxdata, np.nanmin(self.spec[spec_key].data['wavelength'])))\n if add_mjd:\n # ax1.plot([maxspecxdata, 11000],[1 - 0.5*j, 1 - 0.5*j], ls = '--', color = hex['batman'])\n # ax1.plot([maxspecxdata, 11000],[yatmaxspecxdata, yatmaxspecxdata], ls = '--', color = hex['batman'])\n ax1.plot([2000, minspecxdata],[1 - 0.5*j, yatminspecxdata], ls = '--', color = colours.hex['batman'])\n # txt = ax1.text(1500, yatminspecxdata, r'$' + str(self.spec[spec_key].mjd_obs) + '$',\n # horizontalalignment = 'right', verticalalignment = 'center')\n txt = ax1.text(2000, 1 - 0.5*j, r'$' + str(self.spec[spec_key].mjd_obs) + '$',\n horizontalalignment = 'right', verticalalignment = 'center')\n # ax1.text(1000, 1 - 0.5*j, r'$' + str(self.spec[spec_key].mjd_obs) + '$', horizontalalignment = 'right')\n j = j + 1\n else:\n if verbose: print(\"Not enough data to normalise\")\n if legend:\n\n plot_legend = ax1.legend(loc = [1.,0.0], scatterpoints = 1,\n numpoints = 1, frameon = False, fontsize = 12)\n\n ax1.set_ylim(minplotydata - 0.5, maxplotydata + 0.5)\n ax1.set_xlim(1250, maxplotxdata*1.02)\n\n if verbose: print(minplotydata, maxplotydata)\n ## Label the axes\n xaxis_label_string = r'$\\textnormal{Wavelength (\\AA)}$'\n yaxis_label_string = r'$\\textnormal{Flux, Arbitrary}$'\n\n ax1.set_xlabel(xaxis_label_string)\n ax1.set_ylabel(yaxis_label_string)\n\n ax1.set_yticklabels('')\n\n xminorLocator = MultipleLocator(xminorticks)\n ax1.xaxis.set_minor_locator(xminorLocator)\n\n if return_figure:\n warnings.warn(\"Returning figure, saveargs will be ignored\")\n return fig\n else:\n if savepdf and outpath:\n fig.savefig(outpath + \".pdf\", format = 'pdf', dpi=500)\n if savepng and outpath:\n fig.savefig(outpath + \".png\", format = 'png', dpi=500)\n\n plt.show()\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n pass\n\n\n def plot_spec_coverage(self, xminorticks = 250, yminorticks = 5, legend = True,\n wmin = 3500, return_figure=False,\n savepng = False, savepdf = False, outpath = False,\n verbose = False,):\n \"\"\"\n\n :param xminorticks:\n :param yminorticks:\n :param legend:\n :param wmin:\n :param return_figure:\n :param savepng:\n :param savepdf:\n :param outpath:\n :param verbose:\n :return:\n \"\"\"\n if hasattr(self, \"spec\"):\n utils.setup_plot_defaults()\n\n y = [self.spec[i].mjd_obs for i in self.spec]\n xmax = [self.spec[i].max_wavelength for i in self.spec]\n xmin = [self.spec[i].min_wavelength for i in self.spec]\n\n xaxis_label_string = r'$\\textnormal{Wavelength (\\AA)}$'\n yaxis_label_string = r'$\\textnormal{Time, MJD (days)}$'\n\n fig = plt.figure(figsize=[10, 3])\n fig.subplots_adjust(left=0.09, bottom=0.2, top=0.99,\n right=0.99, hspace=0, wspace=0)\n\n ax = fig.add_subplot(111)\n\n ax.hlines(y=y, xmin=xmin, xmax=xmax)\n\n ax.scatter(xmin, y, color=\"blue\")\n ax.scatter(xmax, y, color=\"red\")\n\n xminorLocator = MultipleLocator(xminorticks)\n ax.xaxis.set_minor_locator(xminorLocator)\n yminorLocator = MultipleLocator(yminorticks)\n ax.yaxis.set_minor_locator(yminorLocator)\n\n ax.set_xlabel(xaxis_label_string)\n ax.set_ylabel(yaxis_label_string)\n\n if return_figure:\n warnings.warn(\"Returning figure, saveargs will be ignored\")\n return fig\n else:\n if savepdf and outpath:\n fig.savefig(outpath + \".pdf\", format='pdf', dpi=500)\n if savepng and outpath:\n fig.savefig(outpath + \".png\", format='png', dpi=500)\n\n plt.show()\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n pass\n\n\n def plot_spectrum(self, spec_key):\n \"\"\"\n\n :param spec_key:\n :return:\n \"\"\"\n if hasattr(self, \"spec\"):\n if spec_key in self.spec:\n print(match)\n\n pass\n\n\n def plot_mangledspec(self, xminorticks = 250, legend = True,\n wmin = 3500, return_figure=False,\n savepng = False, savepdf = False, outpath = False,\n verbose = False, add_mjd = True,\n *args, **kwargs):\n \"\"\"\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n if hasattr(self, \"mangledspec\"):\n\n utils.setup_plot_defaults()\n\n fig = plt.figure(figsize=[8, 10])\n fig.subplots_adjust(left = 0.09, bottom = 0.13, top = 0.99,\n right = 0.99, hspace=0, wspace = 0)\n\n ax1 = fig.add_subplot(111)\n\n cmap_indices = np.linspace(0,1, len(self.mangledspec))\n if verbose: print(len(cmap_indices))\n\n j = 0\n for i, spec_key in enumerate(self.mangledspec):\n if hasattr(self.mangledspec[spec_key], \"mjd_obs\"):\n # if verbose: print(self.spec[spec_key].data.__dict__)\n\n plot_label_string = r'$\\rm{' + self.mangledspec[spec_key].data.meta[\"filename\"].split('/')[-1].replace('_', '\\_') + '}$'\n\n\n v_eff = 5436.87 ##Angstrom\n w = np.logical_and(self.mangledspec[spec_key].data['wavelength'] > (v_eff-100.),self.mangledspec[spec_key].data['wavelength'] < v_eff+100.)\n\n if verbose: print(i, len(w[np.where(w == True)]), spec_key, len(self.mangledspec[spec_key].data['wavelength']), len(self.mangledspec[spec_key].data['flux']), len(self.mangledspec[spec_key].flux))\n if len(w[np.where(w == True)]) > 0:\n\n if verbose: print(len(w), 'Foo')\n\n flux_norm = self.mangledspec[spec_key].flux / np.nanmean(self.mangledspec[spec_key].flux[w])\n\n ax1.plot(self.mangledspec[spec_key].data['wavelength'], flux_norm - 0.5*j, lw = 2,\n label = plot_label_string, color = defaults.spec_colourmap(cmap_indices[j]),\n *args, **kwargs)\n\n maxspecxdata = np.nanmax(self.mangledspec[spec_key].data['wavelength'])\n minspecxdata = np.nanmin(self.mangledspec[spec_key].data['wavelength'])\n\n w = np.where(self.mangledspec[spec_key].data['wavelength'] >= maxspecxdata - 200)\n yatmaxspecxdata = np.nanmean((flux_norm - 0.5*j)[w])\n w = np.where(self.mangledspec[spec_key].data['wavelength'] <= minspecxdata + 200)\n yatminspecxdata = np.nanmean((flux_norm - 0.5*j)[w])\n if verbose: print(yatminspecxdata)\n # if i == 0:\n if j == 0:\n maxplotydata = np.nanmax(flux_norm - 0.5*j)\n # minplotydata = np.nanmin(flux_norm - 0.5*j)\n minplotydata = 0. - 0.5*j ## Assumes always positive flux\n\n\n maxplotxdata = maxspecxdata\n minplotxdata = np.nanmin(self.mangledspec[spec_key].data['wavelength'])\n else:\n maxplotydata = np.nanmax(np.append(maxplotydata, np.append(yatminspecxdata, yatminspecxdata)))\n # minplotydata = np.nanmin(np.append(minplotydata, flux_norm - 0.5*j))\n minplotydata = 0. - 0.5*j ## Assumes always positive flux\n maxplotxdata = np.nanmax(np.append(maxplotxdata, np.nanmax(self.mangledspec[spec_key].data['wavelength'])))\n minplotxdata = np.nanmin(np.append(minplotxdata, np.nanmin(self.mangledspec[spec_key].data['wavelength'])))\n if add_mjd:\n # ax1.plot([maxspecxdata, 11000],[1 - 0.5*j, 1 - 0.5*j], ls = '--', color = hex['batman'])\n # ax1.plot([maxspecxdata, 11000],[yatmaxspecxdata, yatmaxspecxdata], ls = '--', color = hex['batman'])\n ax1.plot([2000, minspecxdata],[1 - 0.5*j, yatminspecxdata], ls = '--', color = colours.hex['batman'])\n # txt = ax1.text(1500, yatminspecxdata, r'$' + str(self.mangledspec[spec_key].mjd_obs) + '$',\n # horizontalalignment = 'right', verticalalignment = 'center')\n txt = ax1.text(2000, 1 - 0.5*j, r'$' + str(self.mangledspec[spec_key].mjd_obs) + '$',\n horizontalalignment = 'right', verticalalignment = 'center')\n # ax1.text(1000, 1 - 0.5*j, r'$' + str(self.mangledspec[spec_key].mjd_obs) + '$', horizontalalignment = 'right')\n j = j + 1\n else:\n if verbose: print(\"Not enough data to normalise\")\n if legend:\n\n plot_legend = ax1.legend(loc = [1.,0.0], scatterpoints = 1,\n numpoints = 1, frameon = False, fontsize = 12)\n\n ax1.set_ylim(minplotydata - 0.5, maxplotydata + 0.5)\n ax1.set_xlim(1250, maxplotxdata*1.02)\n\n if verbose: print(minplotydata, maxplotydata)\n ## Label the axes\n xaxis_label_string = r'$\\textnormal{Wavelength (\\AA)}$'\n yaxis_label_string = r'$\\textnormal{Flux, Arbitrary}$'\n\n ax1.set_xlabel(xaxis_label_string)\n ax1.set_ylabel(yaxis_label_string)\n\n ax1.set_yticklabels('')\n\n xminorLocator = MultipleLocator(xminorticks)\n ax1.xaxis.set_minor_locator(xminorLocator)\n\n if return_figure:\n warnings.warn(\"Returning figure, saveargs will be ignored\")\n return fig\n else:\n if savepdf and outpath:\n fig.savefig(outpath + \".pdf\", format = 'pdf', dpi=500)\n if savepng and outpath:\n fig.savefig(outpath + \".png\", format = 'png', dpi=500)\n\n plt.show()\n else:\n warnings.warn(\"Doesn't seem to be any data here (empty self.data)\")\n pass\n\n\n def plot_filters(self, filters = False, xminorticks = 250, yminorticks = 0.1,\n show_lims = False, return_figure=False, verbose=False,\n *args, **kwargs):\n \"\"\"\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n if hasattr(self.phot, 'data_filters'):\n\n if not filters:\n filters = self.phot.data_filters\n\n if verbose: print(filters)\n\n utils.setup_plot_defaults()\n xaxis_label_string = r'$\\textnormal{Wavelength, Angstrom }(\\AA)$'\n yaxis_label_string = r'$\\textnormal{Fractional Throughput}$'\n\n yminorLocator = MultipleLocator(yminorticks)\n xminorLocator = MultipleLocator(xminorticks)\n\n fig = plt.figure(figsize=[8, 4])\n fig.subplots_adjust(left = 0.09, bottom = 0.13, top = 0.99,\n right = 0.99, hspace=0, wspace = 0)\n\n ax1 = fig.add_subplot(111)\n\n for i, filter_key in enumerate(filters):\n ## Check if there is something in the class to plot\n if hasattr(self.phot.data_filters[filter_key], \"wavelength\") and hasattr(self.phot.data_filters[filter_key], \"throughput\"):\n\n plot_label_string = r'$\\textnormal{' + self.phot.data_filters[filter_key].filter_name.replace(\"_\", \"\\_\") + '}$'\n\n\n if hasattr(self.phot.data_filters[filter_key], \"_plot_colour\"):\n ax1.plot(self.phot.data_filters[filter_key].wavelength, self.phot.data_filters[filter_key].throughput, color = self.phot.data_filters[filter_key]._plot_colour,\n lw = 2, label = plot_label_string)\n else:\n ax1.plot(self.phot.data_filters[filter_key].wavelength, self.phot.data_filters[filter_key].throughput, lw = 2, label = plot_label_string)\n\n if show_lims:\n try:\n ax1.plot([self.phot.data_filters[filter_key]._upper_edge, self.phot.data_filters[filter_key]._upper_edge], [0,1] ,\n lw = 1.5, alpha = 0.5, ls = ':',\n color = self.phot.data_filters[filter_key]._plot_colour, zorder = 0, )\n ax1.plot([self.phot.data_filters[filter_key]._lower_edge, self.phot.data_filters[filter_key]._lower_edge], [0,1] ,\n lw = 1.5, alpha = 0.5, ls = ':',\n color = self.phot.data_filters[filter_key]._plot_colour, zorder = 0, )\n except:\n print(\"Failed\")\n else:\n warning.warn(\"Doesn't look like you have loaded a filter into the object\")\n\n default_xlims = ax1.get_xlim()\n ax1.plot(default_xlims, [0,0], color = colours.hex[\"black\"], ls = \":\")\n ax1.set_xlim(default_xlims)\n\n ax1.set_xlabel(xaxis_label_string)\n ax1.set_ylabel(yaxis_label_string)\n\n ax1.yaxis.set_minor_locator(yminorLocator)\n ax1.xaxis.set_minor_locator(xminorLocator)\n\n ax1.legend(loc = 0)\n\n if return_figure:\n return fig\n\n plt.show()\n pass\n\n\n\n def get_lcfit(self, path, verbose=False):\n \"\"\"\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n errors.StringWarning(path)\n self.lcfit = LCfitClass()\n self.lcfit.load_formatted_phot(path, verbose=verbose)\n self.lcfit.unpack(verbose=verbose)\n self.lcfit._sort_phot(verbose=verbose)\n self.lcfit.get_fit_splines(verbose=verbose)\n\n pass\n\n\n def get_specfit(self, verbose = False):\n \"\"\"\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n\n self.specfit = OrderedDict()\n\n if hasattr(self, \"name\"):\n specfit_list = functions.find_recon_spec(self.recon_directory, self.name, verbose = verbose)\n # if verbose: print(specfit_list)\n\n for i, specfit_file in enumerate(specfit_list):\n if verbose: print(i, specfit_file)\n self.specfit[specfit_file] = specfitClass()\n self.specfit[specfit_file].load(filename = specfit_file,\n directory = self.recon_directory, verbose = verbose)\n self.specfit[specfit_file].set_orig_specpath()\n\n else:\n warnings.warn(\"This SNClass object has no name\")\n if verbose: print(\"This SNClass object has no name\")\n\n pass\n\n\n def get_simplespecphot(self, verbose = False):\n \"\"\"\n When the SNClass has both lcfits and spec, sample the lcfits at the\n obsdate of the relevant (i.e. overlapping) spectra. Initially to\n recreate Fig 2 of Paper.\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n\n if hasattr(self, 'lcfit') and hasattr(self, 'spec'):\n # if verbose: print(\"Foo\")\n\n # try:\n # # self.simplespecphot = LCfitClass()\n # self.simplespecphot = PhotometryClass()\n #\n # lenstring = np.nanmax([len(i) for i in self.lcfit.data_filters.keys()]) ## object dtype is slow\n # self.simplespecphot.phot = Table(names = ('MJD', 'flux', 'flux_err', 'filter'),\n # dtype = [float, float, float, '|S'+str(lenstring)])\n #\n # for i, spectrum in enumerate(self.spec):\n #\n # for filter_name in self.spec[spectrum]._overlapping_filter_list:\n # if verbose: print(i, spectrum, filter_name)\n #\n # mjd = self.spec[spectrum].mjd_obs\n # flux = self.lcfit.spline[filter_name](mjd)\n # flux_err = self.lcfit.spline[filter_name + \"_err\"](mjd)\n # newrow = {'MJD': mjd, 'flux': flux, 'flux_err': flux_err, 'filter':filter_name}\n # self.simplespecphot.phot.add_row([mjd, flux, flux_err, filter_name])\n #\n # self.simplespecphot.unpack()\n # except:\n # warnings.warn(\"simplespecphot failed\")\n\n\n # self.simplespecphot = LCfitClass()\n self.simplespecphot = PhotometryClass()\n\n lenstring = np.nanmax([len(i) for i in self.lcfit.data_filters.keys()]) ## object dtype is slow\n # self.simplespecphot.phot = Table(names=('MJD', 'flux', 'flux_err', 'filter'),\n # dtype=[float, float, float, '|S' + str(lenstring)])\n\n mjd_list = []\n flux_list = []\n flux_err_list = []\n filter_list = []\n\n for i, spectrum in enumerate(self.spec):\n\n for filter_name in self.spec[spectrum]._overlapping_filter_list:\n if verbose: print(i, spectrum, filter_name, type(filter_name))\n\n mjd = self.spec[spectrum].mjd_obs\n flux = self.lcfit.spline[filter_name](mjd)\n flux_err = self.lcfit.spline[filter_name + \"_err\"](mjd)\n # newrow = {'MJD': mjd, 'flux': flux, 'flux_err': flux_err, 'filter': filter_name}\n # if i == 0:\n # self.simplespecphot.phot = Table(newrow)\n # else:\n # self.simplespecphot.phot.add_row([mjd, flux, flux_err, filter_name])\n\n mjd_list.append(mjd)\n flux_list.append(flux)\n flux_err_list.append(flux_err)\n filter_list.append(filter_name)\n\n self.simplespecphot.phot = Table((mjd_list, flux_list, flux_err_list, filter_list), names=('MJD', 'flux', 'flux_err', 'filter'))\n\n self.simplespecphot.unpack(verbose=verbose)\n\n pass\n\n\n def check_overlaps(self, verbose = False):\n \"\"\"\n Checks the filters that the spectrum overlaps with.\n originally used functions.filter_within_spec\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n if hasattr(self.phot, \"data\") and hasattr(self, 'spec'):\n for i, spectrum in enumerate(self.spec):\n if verbose:print(i, spectrum)\n for j, filtername in enumerate(self.phot.data_filters):\n if verbose:print(j, filtername)\n\n if hasattr(self.phot.data_filters[filtername], \"_lower_edge\") and \\\n hasattr(self.phot.data_filters[filtername], \"_upper_edge\") and \\\n hasattr(self.spec[spectrum], \"data\"):\n blue_bool = self.phot.data_filters[filtername]._lower_edge > self.spec[spectrum].min_wavelength\n red_bool = self.phot.data_filters[filtername]._upper_edge < self.spec[spectrum].max_wavelength\n\n if blue_bool and red_bool:\n within = True\n else:\n within = False\n\n if verbose:print(within)\n if within:\n self.spec[spectrum]._add_to_overlapping_filters(filtername)\n else:\n warnings.warn(\"SNClass.check_overlaps - something went wrong... no data?\")\n pass\n\n\n def get_specphot(self, spectrum = False, filter_objects = False, verbose = False):\n \"\"\"\n\n :param spectrum:\n :param filter_objects:\n :param verbose:\n :return:\n \"\"\"\n if hasattr(self, \"spec\"):\n if spectrum:\n spec_list = [spectrum]\n else:\n spec_list = self.spec\n if not filter_objects:\n filter_objects = self.phot.data_filters\n\n for i, spec in enumerate(spec_list):\n self.spec[spec].get_specphot(filter_objects=filter_objects, verbose=verbose)\n\n else:\n warnings.warn(\"object has no spectra\")\n pass\n\n\nclass InfoClass():\n \"\"\"\n\n \"\"\"\n\n def __init__(self):\n pass\n\n def load(self, path = False):\n if not path:\n path = defaults._default_info_path\n\n self._data = Table.read(path, format = \"ascii.commented_header\")\n\n self.table = self._data\n\n self.table.meta[\"success\"] = True\n self.snname = self.table[\"snname\"]\n self.z_obs = self.table[\"z_obs\"]\n self.distmod = self.table[\"mu\"]\n self.distance = Distance(distmod = self.table[\"mu\"])\n self.table[\"z_distmod\"] = [i.z for i in self.distance]\n\n self.RA = self.table[\"RA\"]\n self.Dec = self.table[\"Dec\"]\n\n self.table[\"SkyCoords\"] = SkyCoord(self.table[\"RA\"], self.table[\"Dec\"], unit=(u.hourangle, u.deg))\n self.coords = self.table[\"SkyCoords\"]\n\n self.type = self.table[\"Type\"]\n\n def get_sn_info(self, snname):\n try:\n w = np.where(self.snname == snname)\n except:\n print(\"foo\")\n\n return self.table[w]\n\n\n\n# #----------------------------------------------------------------------------# #\n# # /CODE # #\n# #----------------------------------------------------------------------------# #\n\n## FUNCTIONS THAT ITS A PAIN TO SHIFT\n\ndef find_specphase_spec(snname, dir_path = defaults._default_specphase_dir_path, file_type = \".spec\", verbose = False):\n \"\"\"\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n if verbose: print(dir_path)\n errors.StringWarning(dir_path)\n errors.StringWarning(snname)\n if type(snname) is not str and type(snname) is not np.string_:\n raise(errors.PathError)\n\n if not utils.check_dir_path(dir_path):\n print(\"utils.check_dir_path failed\")\n return False\n\n try:\n ls = np.array(os.listdir(dir_path))\n\n # wspec = np.where(np.char.find(ls, file_type, start = -len(file_type)) > -1)\n # spec_list = ls[wspec]\n spec_list = [i for i in ls if i[-5:] == \".spec\"]\n ## The last 18 chars are for the MJD and file_type\n # wsn = np.where([i[:-18] == snname for i in spec_list])\n # snmatch_list = spec_list[wsn]\n snmatch_list = [i for i in spec_list if i[:len(snname)] == snname ]\n\n if verbose:\n print(\"Found: \")\n print(ls)\n print(\"Spec:\")\n print(spec_list)\n print(\"Matched:\")\n print(snmatch_list)\n if len(snmatch_list) is 0:\n warnings.warn(\"No matches found.\")\n return snmatch_list\n\n except:\n warnings.warn(\"Something went wrong\")\n return False\n\n\ndef find_filter_phot(path = defaults._default_data_dir_path, snname = False,\n prefix = 'SN', file_type = '.dat',\n verbose = False):\n \"\"\"\n Tries to find photometry in the supplied directory.\n\n Looks in a directory for things that match SN*.dat. Uses regex via `re` -\n probably overkill.\n\n Parameters\n ----------\n\n path :\n\n snname :\n\n prefix :\n\n file_type :\n\n\n Returns\n -------\n\n phot_list :\n :param path:\n :param snname:\n :param prefix:\n :param file_type:\n :param verbose:\n\n \"\"\"\n # regex = re.compile(\"^SN.*.dat\")\n\n errors.StringWarning(path)\n if not utils.check_dir_path(path):\n # return False\n raise errors.PathError\n\n\n try:\n if snname:\n match_string = \"^\" + str(snname) + \".*\" + '.dat'\n else:\n match_string = \"^\" + str(prefix) + \".*\" + '.dat'\n except:\n raise TypeError\n\n regex = re.compile(match_string)\n\n ls = os.listdir(path)\n\n phot_list = [os.path.join(path, match.group(0)) for file_name in ls for match in [regex.search(file_name)] if match]\n\n if os.path.join(path, snname + file_type) in phot_list:\n phot_list.remove(os.path.join(path,snname + file_type))\n warnings.warn(\"Found \" + os.path.join(path,snname + file_type) + \" - you could just read that in.\")\n\n if verbose:\n print(\"searching for\", match_string)\n print(\"Found: \")\n print(ls)\n print(\"Matched:\")\n print(phot_list)\n if len(phot_list) is 0:\n warnings.warn(\"No matches found.\")\n return phot_list\n" ]
[ [ "numpy.sum", "scipy.interpolate.interp1d", "numpy.argsort", "numpy.append", "scipy.interpolate.InterpolatedUnivariateSpline", "matplotlib.pyplot.figure", "numpy.logical_and", "numpy.nanmean", "numpy.in1d", "numpy.log10", "matplotlib.pyplot.get_cmap", "numpy.isnan", "scipy.integrate.trapz", "numpy.where", "matplotlib.ticker.MultipleLocator", "numpy.bitwise_and", "numpy.unique", "numpy.average", "numpy.round", "numpy.mean", "numpy.cumsum", "numpy.nanmax", "scipy.integrate.simps", "numpy.nanmin", "matplotlib.pyplot.show", "numpy.array_equal", "numpy.array", "numpy.concatenate" ] ]
TXM-DOOM/B.Tech-CSE-Y2
[ "763436ae866f1f18fa8071c253d005bdf289532f" ]
[ "applied-statistics/python-revisited/libraries/numpy/sorting.py" ]
[ "import numpy as np\n\ntestArr1 = np.array([1, 20, 23, 14, 2, 1, 234, 12, 1, 3]) # Sorts in ascending order\ntestArr2 = np.array([True, False, False, True]) # False at the start of the array and then True\ntestArr3 = np.array(['C', 'A', 'Z', 'V']) # Sorts Alphabetically\n\nprint('1: {}\\n2: {}\\n3: {}'.format(np.sort(testArr1), np.sort(testArr2), np.sort(testArr3)))" ]
[ [ "numpy.array", "numpy.sort" ] ]
benjamin-work/aristo-mini
[ "4d99fa4cb9eb1e64d0d21adfea15450d626cfcba" ]
[ "aristomini/common/wordtwovec.py" ]
[ "\"\"\"\na wrapper class for the gensim Word2Vec model that has extra features we need, as well as some\nhelper functions for tokenizing and stemming and things like that.\n\"\"\"\n\nfrom functools import lru_cache\nimport math\nfrom typing import Iterable, List\n\nfrom gensim.parsing.preprocessing import STOPWORDS\nfrom gensim.parsing.porter import PorterStemmer\nfrom gensim.models import Word2Vec\nfrom gensim.utils import simple_preprocess\n\nimport numpy as np\n\nstemmer = PorterStemmer()\n\n\n@lru_cache(maxsize=1024)\ndef stem(word: str) -> str:\n \"\"\"stemming words is not cheap, so use a cache decorator\"\"\"\n return stemmer.stem(word)\n\n\ndef tokenizer(sentence: str) -> List[str]:\n \"\"\"use gensim's `simple_preprocess` and `STOPWORDS` list\"\"\"\n return [stem(token) for token in simple_preprocess(sentence) if token not in STOPWORDS]\n\n\ndef cosine_similarity(v1: np.ndarray, v2: np.ndarray) -> float:\n \"\"\"https://en.wikipedia.org/wiki/Cosine_similarity\"\"\"\n num = np.dot(v1, v2)\n d1 = np.dot(v1, v1)\n d2 = np.dot(v2, v2)\n\n if d1 > 0.0 and d2 > 0.0:\n return num / math.sqrt(d1 * d2)\n else:\n return 0.0\n\n\nclass WordTwoVec:\n \"\"\"\n a wrapper for gensim.Word2Vec with added functionality to embed phrases and compute the\n \"goodness\" of a question-answer pair based on embedding-vector similarity\n \"\"\"\n def __init__(self, model_file: str) -> None:\n if model_file.endswith(\".bin\"):\n self.model = Word2Vec.load_word2vec_format(model_file, binary=True)\n else:\n self.model = Word2Vec.load(model_file)\n\n def embed(self, words: Iterable[str]) -> np.ndarray:\n \"\"\"given a list of words, find their vector embeddings and return the vector mean\"\"\"\n # first find the vector embedding for each word\n vectors = [self.model[word] for word in words if word in self.model]\n\n if vectors:\n # if there are vector embeddings, take the vector average\n return np.average(vectors, axis=0)\n else:\n # otherwise just return a zero vector\n return np.zeros(self.model.vector_size)\n\n def goodness(self, question_stem: str, choice_text: str) -> float:\n \"\"\"how good is the choice for this question?\"\"\"\n question_words = {word for word in tokenizer(question_stem)}\n choice_words = {word for word in tokenizer(choice_text) if word not in question_words}\n\n score = cosine_similarity(self.embed(question_words), self.embed(choice_words))\n\n if \"Max is doing\" in question_stem:\n print(choice_text, score)\n\n return score\n" ]
[ [ "numpy.dot", "numpy.average", "numpy.zeros" ] ]
AishwaryaKalloli/koalas
[ "8d35a74508c1319996c8c27e2a5e24af52b9ee31" ]
[ "databricks/koalas/base.py" ]
[ "#\n# Copyright (C) 2019 Databricks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nBase and utility classes for Koalas objects.\n\"\"\"\nfrom abc import ABCMeta, abstractmethod\nimport datetime\nfrom functools import wraps, partial\nfrom typing import Any, Callable, Tuple, Union, cast, TYPE_CHECKING\nimport warnings\n\nimport numpy as np\nimport pandas as pd # noqa: F401\nfrom pandas.api.types import is_list_like\nfrom pyspark import sql as spark\nfrom pyspark.sql import functions as F, Window, Column\nfrom pyspark.sql.types import (\n BooleanType,\n DateType,\n DoubleType,\n FloatType,\n IntegralType,\n LongType,\n StringType,\n TimestampType,\n)\n\nfrom databricks import koalas as ks # For running doctests and reference resolution in PyCharm.\nfrom databricks.koalas import numpy_compat\nfrom databricks.koalas.internal import (\n InternalFrame,\n NATURAL_ORDER_COLUMN_NAME,\n SPARK_DEFAULT_INDEX_NAME,\n)\nfrom databricks.koalas.spark import functions as SF\nfrom databricks.koalas.spark.accessors import SparkIndexOpsMethods\nfrom databricks.koalas.typedef import as_spark_type, spark_type_to_pandas_dtype\nfrom databricks.koalas.utils import align_diff_series, same_anchor, scol_for, validate_axis\nfrom databricks.koalas.frame import DataFrame\n\nif TYPE_CHECKING:\n from databricks.koalas.indexes import Index\n from databricks.koalas.series import Series\n\n\ndef booleanize_null(left_scol, scol, f) -> Column:\n \"\"\"\n Booleanize Null in Spark Column\n \"\"\"\n comp_ops = [\n getattr(Column, \"__{}__\".format(comp_op))\n for comp_op in [\"eq\", \"ne\", \"lt\", \"le\", \"ge\", \"gt\"]\n ]\n\n if f in comp_ops:\n # if `f` is \"!=\", fill null with True otherwise False\n filler = f == Column.__ne__\n scol = F.when(scol.isNull(), filler).otherwise(scol)\n\n elif f == Column.__or__:\n scol = F.when(left_scol.isNull() | scol.isNull(), False).otherwise(scol)\n\n elif f == Column.__and__:\n scol = F.when(scol.isNull(), False).otherwise(scol)\n\n return scol\n\n\ndef column_op(f):\n \"\"\"\n A decorator that wraps APIs taking/returning Spark Column so that Koalas Series can be\n supported too. If this decorator is used for the `f` function that takes Spark Column and\n returns Spark Column, decorated `f` takes Koalas Series as well and returns Koalas\n Series.\n\n :param f: a function that takes Spark Column and returns Spark Column.\n :param self: Koalas Series\n :param args: arguments that the function `f` takes.\n \"\"\"\n\n @wraps(f)\n def wrapper(self, *args):\n # It is possible for the function `f` takes other arguments than Spark Column.\n # To cover this case, explicitly check if the argument is Koalas Series and\n # extract Spark Column. For other arguments, they are used as are.\n cols = [arg for arg in args if isinstance(arg, IndexOpsMixin)]\n\n if all(same_anchor(self, col) for col in cols):\n # Same DataFrame anchors\n args = [arg.spark.column if isinstance(arg, IndexOpsMixin) else arg for arg in args]\n scol = f(self.spark.column, *args)\n scol = booleanize_null(self.spark.column, scol, f)\n\n kser = self._with_new_scol(scol)\n else:\n # Different DataFrame anchors\n def apply_func(this_column, *that_columns):\n scol = f(this_column, *that_columns)\n return booleanize_null(this_column, scol, f)\n\n kser = align_diff_series(apply_func, self, *args, how=\"full\")\n\n if not all(self.name == col.name for col in cols):\n kser = kser.rename()\n\n return kser\n\n return wrapper\n\n\ndef numpy_column_op(f):\n @wraps(f)\n def wrapper(self, *args):\n # PySpark does not support NumPy type out of the box. For now, we convert NumPy types\n # into some primitive types understandable in PySpark.\n new_args = []\n for arg in args:\n # TODO: This is a quick hack to support NumPy type. We should revisit this.\n if isinstance(self.spark.data_type, LongType) and isinstance(arg, np.timedelta64):\n new_args.append(float(arg / np.timedelta64(1, \"s\")))\n else:\n new_args.append(arg)\n return column_op(f)(self, *new_args)\n\n return wrapper\n\n\nclass IndexOpsMixin(object, metaclass=ABCMeta):\n \"\"\"common ops mixin to support a unified interface / docs for Series / Index\n\n Assuming there are following attributes or properties and function.\n \"\"\"\n\n @property\n @abstractmethod\n def _internal(self) -> InternalFrame:\n pass\n\n @property\n @abstractmethod\n def _kdf(self) -> DataFrame:\n pass\n\n @abstractmethod\n def _with_new_scol(self, scol: spark.Column):\n pass\n\n @property\n @abstractmethod\n def _column_label(self) -> Tuple:\n pass\n\n @property\n @abstractmethod\n def spark(self) -> SparkIndexOpsMethods:\n pass\n\n @property\n def spark_column(self) -> Column:\n warnings.warn(\n \"Series.spark_column is deprecated as of Series.spark.column. \"\n \"Please use the API instead.\",\n FutureWarning,\n )\n return self.spark.column\n\n spark_column.__doc__ = SparkIndexOpsMethods.column.__doc__\n\n # arithmetic operators\n __neg__ = column_op(Column.__neg__)\n\n def __add__(self, other) -> Union[\"Series\", \"Index\"]:\n if not isinstance(self.spark.data_type, StringType) and (\n (isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, StringType))\n or isinstance(other, str)\n ):\n raise TypeError(\"string addition can only be applied to string series or literals.\")\n if isinstance(self.spark.data_type, StringType):\n # Concatenate string columns\n if isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, StringType):\n return column_op(F.concat)(self, other)\n # Handle df['col'] + 'literal'\n elif isinstance(other, str):\n return column_op(F.concat)(self, F.lit(other))\n else:\n raise TypeError(\"string addition can only be applied to string series or literals.\")\n else:\n return column_op(Column.__add__)(self, other)\n\n def __sub__(self, other) -> Union[\"Series\", \"Index\"]:\n if (\n isinstance(self.spark.data_type, StringType)\n or (isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, StringType))\n or isinstance(other, str)\n ):\n raise TypeError(\"substraction can not be applied to string series or literals.\")\n\n if isinstance(self.spark.data_type, TimestampType):\n # Note that timestamp subtraction casts arguments to integer. This is to mimic pandas's\n # behaviors. pandas returns 'timedelta64[ns]' from 'datetime64[ns]'s subtraction.\n msg = (\n \"Note that there is a behavior difference of timestamp subtraction. \"\n \"The timestamp subtraction returns an integer in seconds, \"\n \"whereas pandas returns 'timedelta64[ns]'.\"\n )\n if isinstance(other, IndexOpsMixin) and isinstance(\n other.spark.data_type, TimestampType\n ):\n warnings.warn(msg, UserWarning)\n return self.astype(\"bigint\") - other.astype(\"bigint\")\n elif isinstance(other, datetime.datetime):\n warnings.warn(msg, UserWarning)\n return self.astype(\"bigint\") - F.lit(other).cast(as_spark_type(\"bigint\"))\n else:\n raise TypeError(\"datetime subtraction can only be applied to datetime series.\")\n elif isinstance(self.spark.data_type, DateType):\n # Note that date subtraction casts arguments to integer. This is to mimic pandas's\n # behaviors. pandas returns 'timedelta64[ns]' in days from date's subtraction.\n msg = (\n \"Note that there is a behavior difference of date subtraction. \"\n \"The date subtraction returns an integer in days, \"\n \"whereas pandas returns 'timedelta64[ns]'.\"\n )\n if isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, DateType):\n warnings.warn(msg, UserWarning)\n return column_op(F.datediff)(self, other).astype(\"bigint\")\n elif isinstance(other, datetime.date) and not isinstance(other, datetime.datetime):\n warnings.warn(msg, UserWarning)\n return column_op(F.datediff)(self, F.lit(other)).astype(\"bigint\")\n else:\n raise TypeError(\"date subtraction can only be applied to date series.\")\n return column_op(Column.__sub__)(self, other)\n\n def __mul__(self, other) -> Union[\"Series\", \"Index\"]:\n if isinstance(other, str):\n raise TypeError(\"multiplication can not be applied to a string literal.\")\n\n if (\n isinstance(self.spark.data_type, IntegralType)\n and isinstance(other, IndexOpsMixin)\n and isinstance(other.spark.data_type, StringType)\n ):\n return column_op(SF.repeat)(other, self)\n\n if isinstance(self.spark.data_type, StringType):\n if (\n isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, IntegralType)\n ) or isinstance(other, int):\n return column_op(SF.repeat)(self, other)\n else:\n raise TypeError(\n \"a string series can only be multiplied to an int series or literal\"\n )\n\n return column_op(Column.__mul__)(self, other)\n\n def __truediv__(self, other) -> Union[\"Series\", \"Index\"]:\n \"\"\"\n __truediv__ has different behaviour between pandas and PySpark for several cases.\n 1. When divide np.inf by zero, PySpark returns null whereas pandas returns np.inf\n 2. When divide positive number by zero, PySpark returns null whereas pandas returns np.inf\n 3. When divide -np.inf by zero, PySpark returns null whereas pandas returns -np.inf\n 4. When divide negative number by zero, PySpark returns null whereas pandas returns -np.inf\n\n +-------------------------------------------+\n | dividend (divisor: 0) | PySpark | pandas |\n |-----------------------|---------|---------|\n | np.inf | null | np.inf |\n | -np.inf | null | -np.inf |\n | 10 | null | np.inf |\n | -10 | null | -np.inf |\n +-----------------------|---------|---------+\n \"\"\"\n\n if (\n isinstance(self.spark.data_type, StringType)\n or (isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, StringType))\n or isinstance(other, str)\n ):\n raise TypeError(\"division can not be applied on string series or literals.\")\n\n def truediv(left, right):\n return F.when(F.lit(right != 0) | F.lit(right).isNull(), left.__div__(right)).otherwise(\n F.when(F.lit(left == np.inf) | F.lit(left == -np.inf), left).otherwise(\n F.lit(np.inf).__div__(left)\n )\n )\n\n return numpy_column_op(truediv)(self, other)\n\n def __mod__(self, other) -> Union[\"Series\", \"Index\"]:\n if (\n isinstance(self.spark.data_type, StringType)\n or (isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, StringType))\n or isinstance(other, str)\n ):\n raise TypeError(\"modulo can not be applied on string series or literals.\")\n\n def mod(left, right):\n return ((left % right) + right) % right\n\n return column_op(mod)(self, other)\n\n def __radd__(self, other) -> Union[\"Series\", \"Index\"]:\n # Handle 'literal' + df['col']\n if not isinstance(self.spark.data_type, StringType) and isinstance(other, str):\n raise TypeError(\"string addition can only be applied to string series or literals.\")\n\n if isinstance(self.spark.data_type, StringType):\n if isinstance(other, str):\n return self._with_new_scol(F.concat(F.lit(other), self.spark.column))\n else:\n raise TypeError(\"string addition can only be applied to string series or literals.\")\n else:\n return column_op(Column.__radd__)(self, other)\n\n def __rsub__(self, other) -> Union[\"Series\", \"Index\"]:\n if isinstance(self.spark.data_type, StringType) or isinstance(other, str):\n raise TypeError(\"substraction can not be applied to string series or literals.\")\n\n if isinstance(self.spark.data_type, TimestampType):\n # Note that timestamp subtraction casts arguments to integer. This is to mimic pandas's\n # behaviors. pandas returns 'timedelta64[ns]' from 'datetime64[ns]'s subtraction.\n msg = (\n \"Note that there is a behavior difference of timestamp subtraction. \"\n \"The timestamp subtraction returns an integer in seconds, \"\n \"whereas pandas returns 'timedelta64[ns]'.\"\n )\n if isinstance(other, datetime.datetime):\n warnings.warn(msg, UserWarning)\n return -(self.astype(\"bigint\") - F.lit(other).cast(as_spark_type(\"bigint\")))\n else:\n raise TypeError(\"datetime subtraction can only be applied to datetime series.\")\n elif isinstance(self.spark.data_type, DateType):\n # Note that date subtraction casts arguments to integer. This is to mimic pandas's\n # behaviors. pandas returns 'timedelta64[ns]' in days from date's subtraction.\n msg = (\n \"Note that there is a behavior difference of date subtraction. \"\n \"The date subtraction returns an integer in days, \"\n \"whereas pandas returns 'timedelta64[ns]'.\"\n )\n if isinstance(other, datetime.date) and not isinstance(other, datetime.datetime):\n warnings.warn(msg, UserWarning)\n return -column_op(F.datediff)(self, F.lit(other)).astype(\"bigint\")\n else:\n raise TypeError(\"date subtraction can only be applied to date series.\")\n return column_op(Column.__rsub__)(self, other)\n\n def __rmul__(self, other) -> Union[\"Series\", \"Index\"]:\n if isinstance(other, str):\n raise TypeError(\"multiplication can not be applied to a string literal.\")\n\n if isinstance(self.spark.data_type, StringType):\n if isinstance(other, int):\n return column_op(SF.repeat)(self, other)\n else:\n raise TypeError(\n \"a string series can only be multiplied to an int series or literal\"\n )\n\n return column_op(Column.__rmul__)(self, other)\n\n def __rtruediv__(self, other) -> Union[\"Series\", \"Index\"]:\n if isinstance(self.spark.data_type, StringType) or isinstance(other, str):\n raise TypeError(\"division can not be applied on string series or literals.\")\n\n def rtruediv(left, right):\n return F.when(left == 0, F.lit(np.inf).__div__(right)).otherwise(\n F.lit(right).__truediv__(left)\n )\n\n return numpy_column_op(rtruediv)(self, other)\n\n def __floordiv__(self, other) -> Union[\"Series\", \"Index\"]:\n \"\"\"\n __floordiv__ has different behaviour between pandas and PySpark for several cases.\n 1. When divide np.inf by zero, PySpark returns null whereas pandas returns np.inf\n 2. When divide positive number by zero, PySpark returns null whereas pandas returns np.inf\n 3. When divide -np.inf by zero, PySpark returns null whereas pandas returns -np.inf\n 4. When divide negative number by zero, PySpark returns null whereas pandas returns -np.inf\n\n +-------------------------------------------+\n | dividend (divisor: 0) | PySpark | pandas |\n |-----------------------|---------|---------|\n | np.inf | null | np.inf |\n | -np.inf | null | -np.inf |\n | 10 | null | np.inf |\n | -10 | null | -np.inf |\n +-----------------------|---------|---------+\n \"\"\"\n if (\n isinstance(self.spark.data_type, StringType)\n or (isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, StringType))\n or isinstance(other, str)\n ):\n raise TypeError(\"division can not be applied on string series or literals.\")\n\n def floordiv(left, right):\n return F.when(F.lit(right is np.nan), np.nan).otherwise(\n F.when(\n F.lit(right != 0) | F.lit(right).isNull(), F.floor(left.__div__(right))\n ).otherwise(\n F.when(F.lit(left == np.inf) | F.lit(left == -np.inf), left).otherwise(\n F.lit(np.inf).__div__(left)\n )\n )\n )\n\n return numpy_column_op(floordiv)(self, other)\n\n def __rfloordiv__(self, other) -> Union[\"Series\", \"Index\"]:\n if isinstance(self.spark.data_type, StringType) or isinstance(other, str):\n raise TypeError(\"division can not be applied on string series or literals.\")\n\n def rfloordiv(left, right):\n return F.when(F.lit(left == 0), F.lit(np.inf).__div__(right)).otherwise(\n F.when(F.lit(left) == np.nan, np.nan).otherwise(F.floor(F.lit(right).__div__(left)))\n )\n\n return numpy_column_op(rfloordiv)(self, other)\n\n def __rmod__(self, other) -> Union[\"Series\", \"Index\"]:\n if isinstance(self.spark.data_type, StringType) or isinstance(other, str):\n raise TypeError(\"modulo can not be applied on string series or literals.\")\n\n def rmod(left, right):\n return ((right % left) + left) % left\n\n return column_op(rmod)(self, other)\n\n __pow__ = column_op(Column.__pow__)\n __rpow__ = column_op(Column.__rpow__)\n __abs__ = column_op(F.abs)\n\n # comparison operators\n __eq__ = column_op(Column.__eq__)\n __ne__ = column_op(Column.__ne__)\n __lt__ = column_op(Column.__lt__)\n __le__ = column_op(Column.__le__)\n __ge__ = column_op(Column.__ge__)\n __gt__ = column_op(Column.__gt__)\n\n # `and`, `or`, `not` cannot be overloaded in Python,\n # so use bitwise operators as boolean operators\n __and__ = column_op(Column.__and__)\n __or__ = column_op(Column.__or__)\n __invert__ = column_op(Column.__invert__)\n __rand__ = column_op(Column.__rand__)\n __ror__ = column_op(Column.__ror__)\n\n # NDArray Compat\n def __array_ufunc__(self, ufunc: Callable, method: str, *inputs: Any, **kwargs: Any):\n # Try dunder methods first.\n result = numpy_compat.maybe_dispatch_ufunc_to_dunder_op(\n self, ufunc, method, *inputs, **kwargs\n )\n\n # After that, we try with PySpark APIs.\n if result is NotImplemented:\n result = numpy_compat.maybe_dispatch_ufunc_to_spark_func(\n self, ufunc, method, *inputs, **kwargs\n )\n\n if result is not NotImplemented:\n return result\n else:\n # TODO: support more APIs?\n raise NotImplementedError(\"Koalas objects currently do not support %s.\" % ufunc)\n\n @property\n def dtype(self) -> np.dtype:\n \"\"\"Return the dtype object of the underlying data.\n\n Examples\n --------\n >>> s = ks.Series([1, 2, 3])\n >>> s.dtype\n dtype('int64')\n\n >>> s = ks.Series(list('abc'))\n >>> s.dtype\n dtype('O')\n\n >>> s = ks.Series(pd.date_range('20130101', periods=3))\n >>> s.dtype\n dtype('<M8[ns]')\n\n >>> s.rename(\"a\").to_frame().set_index(\"a\").index.dtype\n dtype('<M8[ns]')\n \"\"\"\n return spark_type_to_pandas_dtype(self.spark.data_type)\n\n @property\n def empty(self) -> bool:\n \"\"\"\n Returns true if the current object is empty. Otherwise, returns false.\n\n >>> ks.range(10).id.empty\n False\n\n >>> ks.range(0).id.empty\n True\n\n >>> ks.DataFrame({}, index=list('abc')).index.empty\n False\n \"\"\"\n return self._internal.resolved_copy.spark_frame.rdd.isEmpty()\n\n @property\n def hasnans(self) -> bool:\n \"\"\"\n Return True if it has any missing values. Otherwise, it returns False.\n\n >>> ks.DataFrame({}, index=list('abc')).index.hasnans\n False\n\n >>> ks.Series(['a', None]).hasnans\n True\n\n >>> ks.Series([1.0, 2.0, np.nan]).hasnans\n True\n\n >>> ks.Series([1, 2, 3]).hasnans\n False\n\n >>> (ks.Series([1.0, 2.0, np.nan]) + 1).hasnans\n True\n\n >>> ks.Series([1, 2, 3]).rename(\"a\").to_frame().set_index(\"a\").index.hasnans\n False\n \"\"\"\n sdf = self._internal.spark_frame\n scol = self.spark.column\n\n if isinstance(self.spark.data_type, (DoubleType, FloatType)):\n return sdf.select(F.max(scol.isNull() | F.isnan(scol))).collect()[0][0]\n else:\n return sdf.select(F.max(scol.isNull())).collect()[0][0]\n\n @property\n def is_monotonic(self) -> bool:\n \"\"\"\n Return boolean if values in the object are monotonically increasing.\n\n .. note:: the current implementation of is_monotonic requires to shuffle\n and aggregate multiple times to check the order locally and globally,\n which is potentially expensive. In case of multi-index, all data are\n transferred to single node which can easily cause out-of-memory error currently.\n\n Returns\n -------\n is_monotonic : bool\n\n Examples\n --------\n >>> ser = ks.Series(['1/1/2018', '3/1/2018', '4/1/2018'])\n >>> ser.is_monotonic\n True\n\n >>> df = ks.DataFrame({'dates': [None, '1/1/2018', '2/1/2018', '3/1/2018']})\n >>> df.dates.is_monotonic\n False\n\n >>> df.index.is_monotonic\n True\n\n >>> ser = ks.Series([1])\n >>> ser.is_monotonic\n True\n\n >>> ser = ks.Series([])\n >>> ser.is_monotonic\n True\n\n >>> ser.rename(\"a\").to_frame().set_index(\"a\").index.is_monotonic\n True\n\n >>> ser = ks.Series([5, 4, 3, 2, 1], index=[1, 2, 3, 4, 5])\n >>> ser.is_monotonic\n False\n\n >>> ser.index.is_monotonic\n True\n\n Support for MultiIndex\n\n >>> midx = ks.MultiIndex.from_tuples(\n ... [('x', 'a'), ('x', 'b'), ('y', 'c'), ('y', 'd'), ('z', 'e')])\n >>> midx # doctest: +SKIP\n MultiIndex([('x', 'a'),\n ('x', 'b'),\n ('y', 'c'),\n ('y', 'd'),\n ('z', 'e')],\n )\n >>> midx.is_monotonic\n True\n\n >>> midx = ks.MultiIndex.from_tuples(\n ... [('z', 'a'), ('z', 'b'), ('y', 'c'), ('y', 'd'), ('x', 'e')])\n >>> midx # doctest: +SKIP\n MultiIndex([('z', 'a'),\n ('z', 'b'),\n ('y', 'c'),\n ('y', 'd'),\n ('x', 'e')],\n )\n >>> midx.is_monotonic\n False\n \"\"\"\n return self._is_monotonic(\"increasing\")\n\n is_monotonic_increasing = is_monotonic\n\n @property\n def is_monotonic_decreasing(self) -> bool:\n \"\"\"\n Return boolean if values in the object are monotonically decreasing.\n\n .. note:: the current implementation of is_monotonic_decreasing requires to shuffle\n and aggregate multiple times to check the order locally and globally,\n which is potentially expensive. In case of multi-index, all data are transferred\n to single node which can easily cause out-of-memory error currently.\n\n Returns\n -------\n is_monotonic : bool\n\n Examples\n --------\n >>> ser = ks.Series(['4/1/2018', '3/1/2018', '1/1/2018'])\n >>> ser.is_monotonic_decreasing\n True\n\n >>> df = ks.DataFrame({'dates': [None, '3/1/2018', '2/1/2018', '1/1/2018']})\n >>> df.dates.is_monotonic_decreasing\n False\n\n >>> df.index.is_monotonic_decreasing\n False\n\n >>> ser = ks.Series([1])\n >>> ser.is_monotonic_decreasing\n True\n\n >>> ser = ks.Series([])\n >>> ser.is_monotonic_decreasing\n True\n\n >>> ser.rename(\"a\").to_frame().set_index(\"a\").index.is_monotonic_decreasing\n True\n\n >>> ser = ks.Series([5, 4, 3, 2, 1], index=[1, 2, 3, 4, 5])\n >>> ser.is_monotonic_decreasing\n True\n\n >>> ser.index.is_monotonic_decreasing\n False\n\n Support for MultiIndex\n\n >>> midx = ks.MultiIndex.from_tuples(\n ... [('x', 'a'), ('x', 'b'), ('y', 'c'), ('y', 'd'), ('z', 'e')])\n >>> midx # doctest: +SKIP\n MultiIndex([('x', 'a'),\n ('x', 'b'),\n ('y', 'c'),\n ('y', 'd'),\n ('z', 'e')],\n )\n >>> midx.is_monotonic_decreasing\n False\n\n >>> midx = ks.MultiIndex.from_tuples(\n ... [('z', 'e'), ('z', 'd'), ('y', 'c'), ('y', 'b'), ('x', 'a')])\n >>> midx # doctest: +SKIP\n MultiIndex([('z', 'a'),\n ('z', 'b'),\n ('y', 'c'),\n ('y', 'd'),\n ('x', 'e')],\n )\n >>> midx.is_monotonic_decreasing\n True\n \"\"\"\n return self._is_monotonic(\"decreasing\")\n\n def _is_locally_monotonic_spark_column(self, order):\n window = (\n Window.partitionBy(F.col(\"__partition_id\"))\n .orderBy(NATURAL_ORDER_COLUMN_NAME)\n .rowsBetween(-1, -1)\n )\n\n if order == \"increasing\":\n return (F.col(\"__origin\") >= F.lag(F.col(\"__origin\"), 1).over(window)) & F.col(\n \"__origin\"\n ).isNotNull()\n else:\n return (F.col(\"__origin\") <= F.lag(F.col(\"__origin\"), 1).over(window)) & F.col(\n \"__origin\"\n ).isNotNull()\n\n def _is_monotonic(self, order):\n assert order in (\"increasing\", \"decreasing\")\n\n sdf = self._internal.spark_frame\n\n sdf = (\n sdf.select(\n F.spark_partition_id().alias(\n \"__partition_id\"\n ), # Make sure we use the same partition id in the whole job.\n F.col(NATURAL_ORDER_COLUMN_NAME),\n self.spark.column.alias(\"__origin\"),\n )\n .select(\n F.col(\"__partition_id\"),\n F.col(\"__origin\"),\n self._is_locally_monotonic_spark_column(order).alias(\n \"__comparison_within_partition\"\n ),\n )\n .groupby(F.col(\"__partition_id\"))\n .agg(\n F.min(F.col(\"__origin\")).alias(\"__partition_min\"),\n F.max(F.col(\"__origin\")).alias(\"__partition_max\"),\n F.min(F.coalesce(F.col(\"__comparison_within_partition\"), F.lit(True))).alias(\n \"__comparison_within_partition\"\n ),\n )\n )\n\n # Now we're windowing the aggregation results without partition specification.\n # The number of rows here will be as the same of partitions, which is expected\n # to be small.\n window = Window.orderBy(F.col(\"__partition_id\")).rowsBetween(-1, -1)\n if order == \"increasing\":\n comparison_col = F.col(\"__partition_min\") >= F.lag(F.col(\"__partition_max\"), 1).over(\n window\n )\n else:\n comparison_col = F.col(\"__partition_min\") <= F.lag(F.col(\"__partition_max\"), 1).over(\n window\n )\n\n sdf = sdf.select(\n comparison_col.alias(\"__comparison_between_partitions\"),\n F.col(\"__comparison_within_partition\"),\n )\n\n ret = sdf.select(\n F.min(F.coalesce(F.col(\"__comparison_between_partitions\"), F.lit(True)))\n & F.min(F.coalesce(F.col(\"__comparison_within_partition\"), F.lit(True)))\n ).collect()[0][0]\n if ret is None:\n return True\n else:\n return ret\n\n @property\n def ndim(self) -> int:\n \"\"\"\n Return an int representing the number of array dimensions.\n\n Return 1 for Series / Index / MultiIndex.\n\n Examples\n --------\n\n For Series\n\n >>> s = ks.Series([None, 1, 2, 3, 4], index=[4, 5, 2, 1, 8])\n >>> s.ndim\n 1\n\n For Index\n\n >>> s.index.ndim\n 1\n\n For MultiIndex\n\n >>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],\n ... ['speed', 'weight', 'length']],\n ... [[0, 0, 0, 1, 1, 1, 2, 2, 2],\n ... [1, 1, 1, 1, 1, 2, 1, 2, 2]])\n >>> s = ks.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)\n >>> s.index.ndim\n 1\n \"\"\"\n return 1\n\n def astype(self, dtype) -> Union[\"Index\", \"Series\"]:\n \"\"\"\n Cast a Koalas object to a specified dtype ``dtype``.\n\n Parameters\n ----------\n dtype : data type\n Use a numpy.dtype or Python type to cast entire pandas object to\n the same type.\n\n Returns\n -------\n casted : same type as caller\n\n See Also\n --------\n to_datetime : Convert argument to datetime.\n\n Examples\n --------\n >>> ser = ks.Series([1, 2], dtype='int32')\n >>> ser\n 0 1\n 1 2\n dtype: int32\n\n >>> ser.astype('int64')\n 0 1\n 1 2\n dtype: int64\n\n >>> ser.rename(\"a\").to_frame().set_index(\"a\").index.astype('int64')\n Int64Index([1, 2], dtype='int64', name='a')\n \"\"\"\n spark_type = as_spark_type(dtype)\n if not spark_type:\n raise ValueError(\"Type {} not understood\".format(dtype))\n if isinstance(spark_type, BooleanType):\n if isinstance(self.spark.data_type, StringType):\n scol = F.when(self.spark.column.isNull(), F.lit(False)).otherwise(\n F.length(self.spark.column) > 0\n )\n elif isinstance(self.spark.data_type, (FloatType, DoubleType)):\n scol = F.when(\n self.spark.column.isNull() | F.isnan(self.spark.column), F.lit(True)\n ).otherwise(self.spark.column.cast(spark_type))\n else:\n scol = F.when(self.spark.column.isNull(), F.lit(False)).otherwise(\n self.spark.column.cast(spark_type)\n )\n elif isinstance(spark_type, StringType):\n scol = F.when(self.spark.column.isNull(), str(None)).otherwise(\n self.spark.column.cast(spark_type)\n )\n else:\n scol = self.spark.column.cast(spark_type)\n return self._with_new_scol(scol)\n\n def isin(self, values) -> Union[\"Series\", \"Index\"]:\n \"\"\"\n Check whether `values` are contained in Series or Index.\n\n Return a boolean Series or Index showing whether each element in the Series\n matches an element in the passed sequence of `values` exactly.\n\n Parameters\n ----------\n values : list or set\n The sequence of values to test.\n\n Returns\n -------\n isin : Series (bool dtype) or Index (bool dtype)\n\n Examples\n --------\n >>> s = ks.Series(['lama', 'cow', 'lama', 'beetle', 'lama',\n ... 'hippo'], name='animal')\n >>> s.isin(['cow', 'lama'])\n 0 True\n 1 True\n 2 True\n 3 False\n 4 True\n 5 False\n Name: animal, dtype: bool\n\n Passing a single string as ``s.isin('lama')`` will raise an error. Use\n a list of one element instead:\n\n >>> s.isin(['lama'])\n 0 True\n 1 False\n 2 True\n 3 False\n 4 True\n 5 False\n Name: animal, dtype: bool\n\n >>> s.rename(\"a\").to_frame().set_index(\"a\").index.isin(['lama'])\n Index([True, False, True, False, True, False], dtype='object', name='a')\n \"\"\"\n if not is_list_like(values):\n raise TypeError(\n \"only list-like objects are allowed to be passed\"\n \" to isin(), you passed a [{values_type}]\".format(values_type=type(values).__name__)\n )\n\n return self._with_new_scol(self.spark.column.isin(list(values)))\n\n def isnull(self) -> Union[\"Series\", \"Index\"]:\n \"\"\"\n Detect existing (non-missing) values.\n\n Return a boolean same-sized object indicating if the values are NA.\n NA values, such as None or numpy.NaN, gets mapped to True values.\n Everything else gets mapped to False values. Characters such as empty strings '' or\n numpy.inf are not considered NA values\n (unless you set pandas.options.mode.use_inf_as_na = True).\n\n Returns\n -------\n Series or Index : Mask of bool values for each element in Series\n that indicates whether an element is not an NA value.\n\n Examples\n --------\n >>> ser = ks.Series([5, 6, np.NaN])\n >>> ser.isna() # doctest: +NORMALIZE_WHITESPACE\n 0 False\n 1 False\n 2 True\n dtype: bool\n\n >>> ser.rename(\"a\").to_frame().set_index(\"a\").index.isna()\n Index([False, False, True], dtype='object', name='a')\n \"\"\"\n from databricks.koalas.indexes import MultiIndex\n\n if isinstance(self, MultiIndex):\n raise NotImplementedError(\"isna is not defined for MultiIndex\")\n if isinstance(self.spark.data_type, (FloatType, DoubleType)):\n return self._with_new_scol(self.spark.column.isNull() | F.isnan(self.spark.column))\n else:\n return self._with_new_scol(self.spark.column.isNull())\n\n isna = isnull\n\n def notnull(self) -> Union[\"Series\", \"Index\"]:\n \"\"\"\n Detect existing (non-missing) values.\n Return a boolean same-sized object indicating if the values are not NA.\n Non-missing values get mapped to True.\n Characters such as empty strings '' or numpy.inf are not considered NA values\n (unless you set pandas.options.mode.use_inf_as_na = True).\n NA values, such as None or numpy.NaN, get mapped to False values.\n\n Returns\n -------\n Series or Index : Mask of bool values for each element in Series\n that indicates whether an element is not an NA value.\n\n Examples\n --------\n Show which entries in a Series are not NA.\n\n >>> ser = ks.Series([5, 6, np.NaN])\n >>> ser\n 0 5.0\n 1 6.0\n 2 NaN\n dtype: float64\n\n >>> ser.notna()\n 0 True\n 1 True\n 2 False\n dtype: bool\n\n >>> ser.rename(\"a\").to_frame().set_index(\"a\").index.notna()\n Index([True, True, False], dtype='object', name='a')\n \"\"\"\n from databricks.koalas.indexes import MultiIndex\n\n if isinstance(self, MultiIndex):\n raise NotImplementedError(\"notna is not defined for MultiIndex\")\n return (~self.isnull()).rename(\n self.name # type: ignore\n )\n\n notna = notnull\n\n # TODO: axis, skipna, and many arguments should be implemented.\n def all(self, axis: Union[int, str] = 0) -> bool:\n \"\"\"\n Return whether all elements are True.\n\n Returns True unless there at least one element within a series that is\n False or equivalent (e.g. zero or empty)\n\n Parameters\n ----------\n axis : {0 or 'index'}, default 0\n Indicate which axis or axes should be reduced.\n\n * 0 / 'index' : reduce the index, return a Series whose index is the\n original column labels.\n\n Examples\n --------\n >>> ks.Series([True, True]).all()\n True\n\n >>> ks.Series([True, False]).all()\n False\n\n >>> ks.Series([0, 1]).all()\n False\n\n >>> ks.Series([1, 2, 3]).all()\n True\n\n >>> ks.Series([True, True, None]).all()\n True\n\n >>> ks.Series([True, False, None]).all()\n False\n\n >>> ks.Series([]).all()\n True\n\n >>> ks.Series([np.nan]).all()\n True\n\n >>> df = ks.Series([True, False, None]).rename(\"a\").to_frame()\n >>> df.set_index(\"a\").index.all()\n False\n \"\"\"\n axis = validate_axis(axis)\n if axis != 0:\n raise NotImplementedError('axis should be either 0 or \"index\" currently.')\n\n sdf = self._internal.spark_frame.select(self.spark.column)\n col = scol_for(sdf, sdf.columns[0])\n\n # Note that we're ignoring `None`s here for now.\n # any and every was added as of Spark 3.0\n # ret = sdf.select(F.expr(\"every(CAST(`%s` AS BOOLEAN))\" % sdf.columns[0])).collect()[0][0]\n # Here we use min as its alternative:\n ret = sdf.select(F.min(F.coalesce(col.cast(\"boolean\"), F.lit(True)))).collect()[0][0]\n if ret is None:\n return True\n else:\n return ret\n\n # TODO: axis, skipna, and many arguments should be implemented.\n def any(self, axis: Union[int, str] = 0) -> bool:\n \"\"\"\n Return whether any element is True.\n\n Returns False unless there at least one element within a series that is\n True or equivalent (e.g. non-zero or non-empty).\n\n Parameters\n ----------\n axis : {0 or 'index'}, default 0\n Indicate which axis or axes should be reduced.\n\n * 0 / 'index' : reduce the index, return a Series whose index is the\n original column labels.\n\n Examples\n --------\n >>> ks.Series([False, False]).any()\n False\n\n >>> ks.Series([True, False]).any()\n True\n\n >>> ks.Series([0, 0]).any()\n False\n\n >>> ks.Series([0, 1, 2]).any()\n True\n\n >>> ks.Series([False, False, None]).any()\n False\n\n >>> ks.Series([True, False, None]).any()\n True\n\n >>> ks.Series([]).any()\n False\n\n >>> ks.Series([np.nan]).any()\n False\n\n >>> df = ks.Series([True, False, None]).rename(\"a\").to_frame()\n >>> df.set_index(\"a\").index.any()\n True\n \"\"\"\n axis = validate_axis(axis)\n if axis != 0:\n raise NotImplementedError('axis should be either 0 or \"index\" currently.')\n\n sdf = self._internal.spark_frame.select(self.spark.column)\n col = scol_for(sdf, sdf.columns[0])\n\n # Note that we're ignoring `None`s here for now.\n # any and every was added as of Spark 3.0\n # ret = sdf.select(F.expr(\"any(CAST(`%s` AS BOOLEAN))\" % sdf.columns[0])).collect()[0][0]\n # Here we use max as its alternative:\n ret = sdf.select(F.max(F.coalesce(col.cast(\"boolean\"), F.lit(False)))).collect()[0][0]\n if ret is None:\n return False\n else:\n return ret\n\n # TODO: add frep and axis parameter\n def shift(self, periods=1, fill_value=None) -> Union[\"Series\", \"Index\"]:\n \"\"\"\n Shift Series/Index by desired number of periods.\n\n .. note:: the current implementation of shift uses Spark's Window without\n specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n periods : int\n Number of periods to shift. Can be positive or negative.\n fill_value : object, optional\n The scalar value to use for newly introduced missing values.\n The default depends on the dtype of self. For numeric data, np.nan is used.\n\n Returns\n -------\n Copy of input Series/Index, shifted.\n\n Examples\n --------\n >>> df = ks.DataFrame({'Col1': [10, 20, 15, 30, 45],\n ... 'Col2': [13, 23, 18, 33, 48],\n ... 'Col3': [17, 27, 22, 37, 52]},\n ... columns=['Col1', 'Col2', 'Col3'])\n\n >>> df.Col1.shift(periods=3)\n 0 NaN\n 1 NaN\n 2 NaN\n 3 10.0\n 4 20.0\n Name: Col1, dtype: float64\n\n >>> df.Col2.shift(periods=3, fill_value=0)\n 0 0\n 1 0\n 2 0\n 3 13\n 4 23\n Name: Col2, dtype: int64\n\n >>> df.index.shift(periods=3, fill_value=0)\n Int64Index([0, 0, 0, 0, 1], dtype='int64')\n \"\"\"\n return self._shift(periods, fill_value)\n\n def _shift(self, periods, fill_value, part_cols=()):\n if not isinstance(periods, int):\n raise ValueError(\"periods should be an int; however, got [%s]\" % type(periods).__name__)\n\n col = self.spark.column\n window = (\n Window.partitionBy(*part_cols)\n .orderBy(NATURAL_ORDER_COLUMN_NAME)\n .rowsBetween(-periods, -periods)\n )\n lag_col = F.lag(col, periods).over(window)\n col = F.when(lag_col.isNull() | F.isnan(lag_col), fill_value).otherwise(lag_col)\n return self._with_new_scol(col)\n\n # TODO: Update Documentation for Bins Parameter when its supported\n def value_counts(\n self, normalize=False, sort=True, ascending=False, bins=None, dropna=True\n ) -> \"Series\":\n \"\"\"\n Return a Series containing counts of unique values.\n The resulting object will be in descending order so that the\n first element is the most frequently-occurring element.\n Excludes NA values by default.\n\n Parameters\n ----------\n normalize : boolean, default False\n If True then the object returned will contain the relative\n frequencies of the unique values.\n sort : boolean, default True\n Sort by values.\n ascending : boolean, default False\n Sort in ascending order.\n bins : Not Yet Supported\n dropna : boolean, default True\n Don't include counts of NaN.\n\n Returns\n -------\n counts : Series\n\n See Also\n --------\n Series.count: Number of non-NA elements in a Series.\n\n Examples\n --------\n For Series\n\n >>> df = ks.DataFrame({'x':[0, 0, 1, 1, 1, np.nan]})\n >>> df.x.value_counts() # doctest: +NORMALIZE_WHITESPACE\n 1.0 3\n 0.0 2\n Name: x, dtype: int64\n\n With `normalize` set to `True`, returns the relative frequency by\n dividing all values by the sum of values.\n\n >>> df.x.value_counts(normalize=True) # doctest: +NORMALIZE_WHITESPACE\n 1.0 0.6\n 0.0 0.4\n Name: x, dtype: float64\n\n **dropna**\n With `dropna` set to `False` we can also see NaN index values.\n\n >>> df.x.value_counts(dropna=False) # doctest: +NORMALIZE_WHITESPACE\n 1.0 3\n 0.0 2\n NaN 1\n Name: x, dtype: int64\n\n For Index\n\n >>> idx = ks.Index([3, 1, 2, 3, 4, np.nan])\n >>> idx\n Float64Index([3.0, 1.0, 2.0, 3.0, 4.0, nan], dtype='float64')\n\n >>> idx.value_counts().sort_index()\n 1.0 1\n 2.0 1\n 3.0 2\n 4.0 1\n dtype: int64\n\n **sort**\n\n With `sort` set to `False`, the result wouldn't be sorted by number of count.\n\n >>> idx.value_counts(sort=True).sort_index()\n 1.0 1\n 2.0 1\n 3.0 2\n 4.0 1\n dtype: int64\n\n **normalize**\n\n With `normalize` set to `True`, returns the relative frequency by\n dividing all values by the sum of values.\n\n >>> idx.value_counts(normalize=True).sort_index()\n 1.0 0.2\n 2.0 0.2\n 3.0 0.4\n 4.0 0.2\n dtype: float64\n\n **dropna**\n\n With `dropna` set to `False` we can also see NaN index values.\n\n >>> idx.value_counts(dropna=False).sort_index() # doctest: +SKIP\n 1.0 1\n 2.0 1\n 3.0 2\n 4.0 1\n NaN 1\n dtype: int64\n\n For MultiIndex.\n\n >>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],\n ... ['speed', 'weight', 'length']],\n ... [[0, 0, 0, 1, 1, 1, 2, 2, 2],\n ... [1, 1, 1, 1, 1, 2, 1, 2, 2]])\n >>> s = ks.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)\n >>> s.index # doctest: +SKIP\n MultiIndex([( 'lama', 'weight'),\n ( 'lama', 'weight'),\n ( 'lama', 'weight'),\n ( 'cow', 'weight'),\n ( 'cow', 'weight'),\n ( 'cow', 'length'),\n ('falcon', 'weight'),\n ('falcon', 'length'),\n ('falcon', 'length')],\n )\n\n >>> s.index.value_counts().sort_index()\n (cow, length) 1\n (cow, weight) 2\n (falcon, length) 2\n (falcon, weight) 1\n (lama, weight) 3\n dtype: int64\n\n >>> s.index.value_counts(normalize=True).sort_index()\n (cow, length) 0.111111\n (cow, weight) 0.222222\n (falcon, length) 0.222222\n (falcon, weight) 0.111111\n (lama, weight) 0.333333\n dtype: float64\n\n If Index has name, keep the name up.\n\n >>> idx = ks.Index([0, 0, 0, 1, 1, 2, 3], name='koalas')\n >>> idx.value_counts().sort_index()\n 0 3\n 1 2\n 2 1\n 3 1\n Name: koalas, dtype: int64\n \"\"\"\n from databricks.koalas.series import first_series\n\n if bins is not None:\n raise NotImplementedError(\"value_counts currently does not support bins\")\n\n if dropna:\n sdf_dropna = self._internal.spark_frame.select(self.spark.column).dropna()\n else:\n sdf_dropna = self._internal.spark_frame.select(self.spark.column)\n index_name = SPARK_DEFAULT_INDEX_NAME\n column_name = self._internal.data_spark_column_names[0]\n sdf = sdf_dropna.groupby(scol_for(sdf_dropna, column_name).alias(index_name)).count()\n if sort:\n if ascending:\n sdf = sdf.orderBy(F.col(\"count\"))\n else:\n sdf = sdf.orderBy(F.col(\"count\").desc())\n\n if normalize:\n sum = sdf_dropna.count()\n sdf = sdf.withColumn(\"count\", F.col(\"count\") / F.lit(sum))\n\n internal = InternalFrame(\n spark_frame=sdf,\n index_spark_column_names=[index_name],\n column_labels=self._internal.column_labels,\n data_spark_columns=[scol_for(sdf, \"count\")],\n column_label_names=self._internal.column_label_names,\n )\n\n return first_series(DataFrame(internal))\n\n def nunique(self, dropna: bool = True, approx: bool = False, rsd: float = 0.05) -> int:\n \"\"\"\n Return number of unique elements in the object.\n Excludes NA values by default.\n\n Parameters\n ----------\n dropna : bool, default True\n Don’t include NaN in the count.\n approx: bool, default False\n If False, will use the exact algorithm and return the exact number of unique.\n If True, it uses the HyperLogLog approximate algorithm, which is significantly faster\n for large amount of data.\n Note: This parameter is specific to Koalas and is not found in pandas.\n rsd: float, default 0.05\n Maximum estimation error allowed in the HyperLogLog algorithm.\n Note: Just like ``approx`` this parameter is specific to Koalas.\n\n Returns\n -------\n int\n\n See Also\n --------\n DataFrame.nunique: Method nunique for DataFrame.\n Series.count: Count non-NA/null observations in the Series.\n\n Examples\n --------\n >>> ks.Series([1, 2, 3, np.nan]).nunique()\n 3\n\n >>> ks.Series([1, 2, 3, np.nan]).nunique(dropna=False)\n 4\n\n On big data, we recommend using the approximate algorithm to speed up this function.\n The result will be very close to the exact unique count.\n\n >>> ks.Series([1, 2, 3, np.nan]).nunique(approx=True)\n 3\n\n >>> idx = ks.Index([1, 1, 2, None])\n >>> idx\n Float64Index([1.0, 1.0, 2.0, nan], dtype='float64')\n\n >>> idx.nunique()\n 2\n\n >>> idx.nunique(dropna=False)\n 3\n \"\"\"\n res = self._internal.spark_frame.select([self._nunique(dropna, approx, rsd)])\n return res.collect()[0][0]\n\n def _nunique(self, dropna=True, approx=False, rsd=0.05):\n colname = self._internal.data_spark_column_names[0]\n count_fn = partial(F.approx_count_distinct, rsd=rsd) if approx else F.countDistinct\n if dropna:\n return count_fn(self.spark.column).alias(colname)\n else:\n return (\n count_fn(self.spark.column)\n + F.when(\n F.count(F.when(self.spark.column.isNull(), 1).otherwise(None)) >= 1, 1\n ).otherwise(0)\n ).alias(colname)\n\n def take(self, indices) -> Union[\"Series\", \"Index\"]:\n \"\"\"\n Return the elements in the given *positional* indices along an axis.\n\n This means that we are not indexing according to actual values in\n the index attribute of the object. We are indexing according to the\n actual position of the element in the object.\n\n Parameters\n ----------\n indices : array-like\n An array of ints indicating which positions to take.\n\n Returns\n -------\n taken : same type as caller\n An array-like containing the elements taken from the object.\n\n See Also\n --------\n DataFrame.loc : Select a subset of a DataFrame by labels.\n DataFrame.iloc : Select a subset of a DataFrame by positions.\n numpy.take : Take elements from an array along an axis.\n\n Examples\n --------\n\n Series\n\n >>> kser = ks.Series([100, 200, 300, 400, 500])\n >>> kser\n 0 100\n 1 200\n 2 300\n 3 400\n 4 500\n dtype: int64\n\n >>> kser.take([0, 2, 4]).sort_index()\n 0 100\n 2 300\n 4 500\n dtype: int64\n\n Index\n\n >>> kidx = ks.Index([100, 200, 300, 400, 500])\n >>> kidx\n Int64Index([100, 200, 300, 400, 500], dtype='int64')\n\n >>> kidx.take([0, 2, 4]).sort_values()\n Int64Index([100, 300, 500], dtype='int64')\n\n MultiIndex\n\n >>> kmidx = ks.MultiIndex.from_tuples([(\"x\", \"a\"), (\"x\", \"b\"), (\"x\", \"c\")])\n >>> kmidx # doctest: +SKIP\n MultiIndex([('x', 'a'),\n ('x', 'b'),\n ('x', 'c')],\n )\n\n >>> kmidx.take([0, 2]) # doctest: +SKIP\n MultiIndex([('x', 'a'),\n ('x', 'c')],\n )\n \"\"\"\n if not is_list_like(indices) or isinstance(indices, (dict, set)):\n raise ValueError(\"`indices` must be a list-like except dict or set\")\n if isinstance(self, ks.Series):\n return cast(ks.Series, self.iloc[indices])\n else:\n return self._kdf.iloc[indices].index\n" ]
[ [ "pandas.api.types.is_list_like", "numpy.timedelta64" ] ]
taolon2018/Tensorflow2_Models
[ "b931b6779e8d2fc775bcaa1e9cbcad6edf0438f4" ]
[ "object_detection/YOLO_v3/model/yolov3.py" ]
[ "import tensorflow as tf\n\nfrom object_detection.YOLO_v3.backbone.darknet53 import Darknet53, ConvLayer\n\n\nclass ConvSet(tf.keras.layers.Layer):\n def __init__(self, output_dim):\n super(ConvSet, self).__init__()\n self.conv_1 = ConvLayer(output_dim, 1)\n self.conv_2 = ConvLayer(output_dim * 2, 3)\n self.conv_3 = ConvLayer(output_dim, 1)\n self.conv_4 = ConvLayer(output_dim * 2, 3)\n self.conv_5 = ConvLayer(output_dim, 1)\n\n def __call__(self, x):\n x = self.conv_1(x)\n x = self.conv_2(x)\n x = self.conv_3(x)\n x = self.conv_4(x)\n x = self.conv_5(x)\n return x\n\n\nclass Yolov3(tf.keras.Model):\n def __init__(self, predict_class_number=21):\n super(Yolov3, self).__init__()\n self.darknet53 = Darknet53()\n self.convset_1 = ConvSet(512)\n self.small_branch_conv_1 = ConvLayer(1024, 1)\n self.small_branch_conv_2 = tf.keras.layers.Conv2D(\n 3 * (predict_class_number + 5), 1, activation=None\n )\n self.conv_1 = ConvLayer(256, 1)\n self.convset_2 = ConvSet(256)\n self.medium_branch_conv_1 = ConvLayer(512, 1)\n self.medium_branch_conv_2 = tf.keras.layers.Conv2D(\n 3 * (predict_class_number + 5), 1, activation=None\n )\n self.conv_2 = ConvLayer(512, 1)\n self.convset_3 = ConvSet(128)\n self.large_branch_conv_1 = ConvLayer(256, 1)\n self.large_branch_conv_2 = tf.keras.layers.Conv2D(\n 3 * (predict_class_number + 5), 1, activation=None\n )\n self.conv_3 = ConvLayer(1024, 1)\n\n def __call__(self, x):\n input_1, input_2, input_3 = self.darknet53(x)\n x = input_3\n\n x = self.convset_1(x)\n\n output_1 = self.small_branch_conv_1(x)\n output_1 = self.small_branch_conv_2(output_1)\n\n x = self.conv_1(x)\n x = tf.image.resize(x, (x.shape[1] * 2, x.shape[2] * 2), method=\"nearest\")\n x = tf.concat([x, input_2], axis=-1)\n\n x = self.convset_2(x)\n\n output_2 = self.medium_branch_conv_1(x)\n output_2 = self.medium_branch_conv_2(output_2)\n\n x = self.conv_2(x)\n x = tf.image.resize(x, (x.shape[1] * 2, x.shape[2] * 2), method=\"nearest\")\n x = tf.concat([x, input_1], axis=-1)\n\n x = self.convset_3(x)\n\n output_3 = self.large_branch_conv_1(x)\n output_3 = self.large_branch_conv_2(output_3)\n\n return output_1, output_2, output_3\n" ]
[ [ "tensorflow.keras.layers.Conv2D", "tensorflow.image.resize", "tensorflow.concat" ] ]
jeffreyblair/ML
[ "9cf810cbe9a1720de9013740650f9d72b61bde59" ]
[ "logistic_regression/logistic.py" ]
[ "\"\"\" Methods for doing logistic regression.\"\"\"\n\nimport numpy as np\nfrom utils import sigmoid\n\ndef logistic_predict(weights, data):\n \"\"\"\n Compute the probabilities predicted by the logistic classifier.\n\n Note: N is the number of examples and\n M is the number of features per example.\n\n Inputs:\n weights: (M+1) x 1 vector of weights, where the last element\n corresponds to the bias (intercepts).\n data: N x M data matrix where each row corresponds\n to one data point.\n Outputs:\n y: :N x 1 vector of probabilities. This is the output of the classifier.\n \"\"\"\n\n ones = np.array([[1] for i in range(data.shape[0])])\n n_data = np.c_[data, ones]\n y = n_data @ weights\n return sigmoid(y)\n\ndef evaluate(targets, y):\n \"\"\"\n Compute evaluation metrics.\n Inputs:\n targets : N x 1 vector of targets.\n y : N x 1 vector of probabilities.\n Outputs:\n ce : (scalar) Cross entropy. CE(p, q) = E_p[-log q]. Here we want to compute CE(targets, y)\n frac_correct : (scalar) Fraction of inputs classified correctly.\n \"\"\"\n ce = -np.sum(targets * np.log(y) + (1-targets) * np.log(1-y))/len(targets)\n\n predictions = [1 if y_i > 0.5 else 0 for y_i in y]\n correct = [1 if predictions[i] == targets[i] else 0 for i in range(len(predictions))]\n\n frac_correct = sum(correct)/len(correct)\n\n return ce, frac_correct\n\ndef logistic(weights, data, targets, hyperparameters):\n \"\"\"\n Calculate negative log likelihood and its derivatives with respect to weights.\n Also return the predictions.\n\n Note: N is the number of examples and\n M is the number of features per example.\n\n Inputs:\n weights: (M+1) x 1 vector of weights, where the last element\n corresponds to bias (intercepts).\n data: N x M data matrix where each row corresponds\n to one data point.\n targets: N x 1 vector of targets class probabilities.\n hyperparameters: The hyperparameters dictionary.\n\n Outputs:\n f: The sum of the loss over all data points. This is the objective that we want to minimize.\n df: (M+1) x 1 vector of derivative of f w.r.t. weights.\n y: N x 1 vector of probabilities.\n \"\"\"\n\n y = logistic_predict(weights, data)\n f, frac = evaluate(targets, y)\n d = data.T @ (y - targets)\n db = np.array([sum((y - targets))])\n zero = np.array([[0]])\n df = np.r_[d, db]\n\n return f, df, y\n\n\ndef logistic_pen(weights, data, targets, hyperparameters):\n \"\"\"\n Calculate negative log likelihood and its derivatives with respect to weights.\n Also return the predictions.\n\n Note: N is the number of examples and\n M is the number of features per example.\n\n Inputs:\n weights: (M+1) x 1 vector of weights, where the last element\n corresponds to bias (intercepts).\n data: N x M data matrix where each row corresponds\n to one data point.\n targets: N x 1 vector of targets class probabilities.\n hyperparameters: The hyperparameters dictionary.\n\n Outputs:\n f: The sum of the loss over all data points. This is the objective that we want to minimize.\n df: (M+1) x 1 vector of derivative of f w.r.t. weights.\n \"\"\"\n\n lambd = hyperparameters['weight_regularization']\n f, dwb, y = logistic(weights, data, targets, hyperparameters)\n\n regularizer = hyperparameters['weight_regularization'] * weights\n regularizer[-1] = 0 # do not penalize bias\n df = dwb + regularizer\n\n return f, df, y\n" ]
[ [ "numpy.array", "numpy.log" ] ]
lukevolpatti/xarray
[ "65ca92a5c0a4143d00dd7a822bcb1d49738717f1" ]
[ "xarray/core/common.py" ]
[ "import warnings\nfrom contextlib import suppress\nfrom html import escape\nfrom textwrap import dedent\nfrom typing import (\n Any,\n Callable,\n Dict,\n Hashable,\n Iterable,\n Iterator,\n List,\n Mapping,\n Tuple,\n TypeVar,\n Union,\n)\n\nimport numpy as np\nimport pandas as pd\n\nfrom . import dtypes, duck_array_ops, formatting, formatting_html, ops\nfrom .arithmetic import SupportsArithmetic\nfrom .npcompat import DTypeLike\nfrom .options import OPTIONS, _get_keep_attrs\nfrom .pycompat import dask_array_type\nfrom .rolling_exp import RollingExp\nfrom .utils import Frozen, either_dict_or_kwargs, is_scalar\n\n# Used as a sentinel value to indicate a all dimensions\nALL_DIMS = ...\n\n\nC = TypeVar(\"C\")\nT = TypeVar(\"T\")\n\n\nclass ImplementsArrayReduce:\n __slots__ = ()\n\n @classmethod\n def _reduce_method(cls, func: Callable, include_skipna: bool, numeric_only: bool):\n if include_skipna:\n\n def wrapped_func(self, dim=None, axis=None, skipna=None, **kwargs):\n return self.reduce(func, dim, axis, skipna=skipna, **kwargs)\n\n else:\n\n def wrapped_func(self, dim=None, axis=None, **kwargs): # type: ignore\n return self.reduce(func, dim, axis, **kwargs)\n\n return wrapped_func\n\n _reduce_extra_args_docstring = dedent(\n \"\"\"\\\n dim : str or sequence of str, optional\n Dimension(s) over which to apply `{name}`.\n axis : int or sequence of int, optional\n Axis(es) over which to apply `{name}`. Only one of the 'dim'\n and 'axis' arguments can be supplied. If neither are supplied, then\n `{name}` is calculated over axes.\"\"\"\n )\n\n _cum_extra_args_docstring = dedent(\n \"\"\"\\\n dim : str or sequence of str, optional\n Dimension over which to apply `{name}`.\n axis : int or sequence of int, optional\n Axis over which to apply `{name}`. Only one of the 'dim'\n and 'axis' arguments can be supplied.\"\"\"\n )\n\n\nclass ImplementsDatasetReduce:\n __slots__ = ()\n\n @classmethod\n def _reduce_method(cls, func: Callable, include_skipna: bool, numeric_only: bool):\n if include_skipna:\n\n def wrapped_func(self, dim=None, skipna=None, **kwargs):\n return self.reduce(\n func, dim, skipna=skipna, numeric_only=numeric_only, **kwargs\n )\n\n else:\n\n def wrapped_func(self, dim=None, **kwargs): # type: ignore\n return self.reduce(func, dim, numeric_only=numeric_only, **kwargs)\n\n return wrapped_func\n\n _reduce_extra_args_docstring = dedent(\n \"\"\"\n dim : str or sequence of str, optional\n Dimension(s) over which to apply `{name}`. By default `{name}` is\n applied over all dimensions.\n \"\"\"\n ).strip()\n\n _cum_extra_args_docstring = dedent(\n \"\"\"\n dim : str or sequence of str, optional\n Dimension over which to apply `{name}`.\n axis : int or sequence of int, optional\n Axis over which to apply `{name}`. Only one of the 'dim'\n and 'axis' arguments can be supplied.\n \"\"\"\n ).strip()\n\n\nclass AbstractArray(ImplementsArrayReduce):\n \"\"\"Shared base class for DataArray and Variable.\n \"\"\"\n\n __slots__ = ()\n\n def __bool__(self: Any) -> bool:\n return bool(self.values)\n\n def __float__(self: Any) -> float:\n return float(self.values)\n\n def __int__(self: Any) -> int:\n return int(self.values)\n\n def __complex__(self: Any) -> complex:\n return complex(self.values)\n\n def __array__(self: Any, dtype: DTypeLike = None) -> np.ndarray:\n return np.asarray(self.values, dtype=dtype)\n\n def __repr__(self) -> str:\n return formatting.array_repr(self)\n\n def _repr_html_(self):\n if OPTIONS[\"display_style\"] == \"text\":\n return f\"<pre>{escape(repr(self))}</pre>\"\n return formatting_html.array_repr(self)\n\n def _iter(self: Any) -> Iterator[Any]:\n for n in range(len(self)):\n yield self[n]\n\n def __iter__(self: Any) -> Iterator[Any]:\n if self.ndim == 0:\n raise TypeError(\"iteration over a 0-d array\")\n return self._iter()\n\n def get_axis_num(\n self, dim: Union[Hashable, Iterable[Hashable]]\n ) -> Union[int, Tuple[int, ...]]:\n \"\"\"Return axis number(s) corresponding to dimension(s) in this array.\n\n Parameters\n ----------\n dim : str or iterable of str\n Dimension name(s) for which to lookup axes.\n\n Returns\n -------\n int or tuple of int\n Axis number or numbers corresponding to the given dimensions.\n \"\"\"\n if isinstance(dim, Iterable) and not isinstance(dim, str):\n return tuple(self._get_axis_num(d) for d in dim)\n else:\n return self._get_axis_num(dim)\n\n def _get_axis_num(self: Any, dim: Hashable) -> int:\n try:\n return self.dims.index(dim)\n except ValueError:\n raise ValueError(f\"{dim!r} not found in array dimensions {self.dims!r}\")\n\n @property\n def sizes(self: Any) -> Mapping[Hashable, int]:\n \"\"\"Ordered mapping from dimension names to lengths.\n\n Immutable.\n\n See also\n --------\n Dataset.sizes\n \"\"\"\n return Frozen(dict(zip(self.dims, self.shape)))\n\n\nclass AttrAccessMixin:\n \"\"\"Mixin class that allows getting keys with attribute access\n \"\"\"\n\n __slots__ = ()\n\n def __init_subclass__(cls):\n \"\"\"Verify that all subclasses explicitly define ``__slots__``. If they don't,\n raise error in the core xarray module and a FutureWarning in third-party\n extensions.\n \"\"\"\n if not hasattr(object.__new__(cls), \"__dict__\"):\n pass\n elif cls.__module__.startswith(\"xarray.\"):\n raise AttributeError(\"%s must explicitly define __slots__\" % cls.__name__)\n else:\n cls.__setattr__ = cls._setattr_dict\n warnings.warn(\n \"xarray subclass %s should explicitly define __slots__\" % cls.__name__,\n FutureWarning,\n stacklevel=2,\n )\n\n @property\n def _attr_sources(self) -> List[Mapping[Hashable, Any]]:\n \"\"\"List of places to look-up items for attribute-style access\n \"\"\"\n return []\n\n @property\n def _item_sources(self) -> List[Mapping[Hashable, Any]]:\n \"\"\"List of places to look-up items for key-autocompletion\n \"\"\"\n return []\n\n def __getattr__(self, name: str) -> Any:\n if name not in {\"__dict__\", \"__setstate__\"}:\n # this avoids an infinite loop when pickle looks for the\n # __setstate__ attribute before the xarray object is initialized\n for source in self._attr_sources:\n with suppress(KeyError):\n return source[name]\n raise AttributeError(\n \"{!r} object has no attribute {!r}\".format(type(self).__name__, name)\n )\n\n # This complicated two-method design boosts overall performance of simple operations\n # - particularly DataArray methods that perform a _to_temp_dataset() round-trip - by\n # a whopping 8% compared to a single method that checks hasattr(self, \"__dict__\") at\n # runtime before every single assignment. All of this is just temporary until the\n # FutureWarning can be changed into a hard crash.\n def _setattr_dict(self, name: str, value: Any) -> None:\n \"\"\"Deprecated third party subclass (see ``__init_subclass__`` above)\n \"\"\"\n object.__setattr__(self, name, value)\n if name in self.__dict__:\n # Custom, non-slotted attr, or improperly assigned variable?\n warnings.warn(\n \"Setting attribute %r on a %r object. Explicitly define __slots__ \"\n \"to suppress this warning for legitimate custom attributes and \"\n \"raise an error when attempting variables assignments.\"\n % (name, type(self).__name__),\n FutureWarning,\n stacklevel=2,\n )\n\n def __setattr__(self, name: str, value: Any) -> None:\n \"\"\"Objects with ``__slots__`` raise AttributeError if you try setting an\n undeclared attribute. This is desirable, but the error message could use some\n improvement.\n \"\"\"\n try:\n object.__setattr__(self, name, value)\n except AttributeError as e:\n # Don't accidentally shadow custom AttributeErrors, e.g.\n # DataArray.dims.setter\n if str(e) != \"{!r} object has no attribute {!r}\".format(\n type(self).__name__, name\n ):\n raise\n raise AttributeError(\n \"cannot set attribute %r on a %r object. Use __setitem__ style\"\n \"assignment (e.g., `ds['name'] = ...`) instead of assigning variables.\"\n % (name, type(self).__name__)\n ) from e\n\n def __dir__(self) -> List[str]:\n \"\"\"Provide method name lookup and completion. Only provide 'public'\n methods.\n \"\"\"\n extra_attrs = [\n item\n for sublist in self._attr_sources\n for item in sublist\n if isinstance(item, str)\n ]\n return sorted(set(dir(type(self)) + extra_attrs))\n\n def _ipython_key_completions_(self) -> List[str]:\n \"\"\"Provide method for the key-autocompletions in IPython.\n See http://ipython.readthedocs.io/en/stable/config/integrating.html#tab-completion\n For the details.\n \"\"\"\n item_lists = [\n item\n for sublist in self._item_sources\n for item in sublist\n if isinstance(item, str)\n ]\n return list(set(item_lists))\n\n\ndef get_squeeze_dims(\n xarray_obj,\n dim: Union[Hashable, Iterable[Hashable], None] = None,\n axis: Union[int, Iterable[int], None] = None,\n) -> List[Hashable]:\n \"\"\"Get a list of dimensions to squeeze out.\n \"\"\"\n if dim is not None and axis is not None:\n raise ValueError(\"cannot use both parameters `axis` and `dim`\")\n if dim is None and axis is None:\n return [d for d, s in xarray_obj.sizes.items() if s == 1]\n\n if isinstance(dim, Iterable) and not isinstance(dim, str):\n dim = list(dim)\n elif dim is not None:\n dim = [dim]\n else:\n assert axis is not None\n if isinstance(axis, int):\n axis = [axis]\n axis = list(axis)\n if any(not isinstance(a, int) for a in axis):\n raise TypeError(\"parameter `axis` must be int or iterable of int.\")\n alldims = list(xarray_obj.sizes.keys())\n dim = [alldims[a] for a in axis]\n\n if any(xarray_obj.sizes[k] > 1 for k in dim):\n raise ValueError(\n \"cannot select a dimension to squeeze out \"\n \"which has length greater than one\"\n )\n return dim\n\n\nclass DataWithCoords(SupportsArithmetic, AttrAccessMixin):\n \"\"\"Shared base class for Dataset and DataArray.\"\"\"\n\n __slots__ = ()\n\n _rolling_exp_cls = RollingExp\n\n def squeeze(\n self,\n dim: Union[Hashable, Iterable[Hashable], None] = None,\n drop: bool = False,\n axis: Union[int, Iterable[int], None] = None,\n ):\n \"\"\"Return a new object with squeezed data.\n\n Parameters\n ----------\n dim : None or Hashable or iterable of Hashable, optional\n Selects a subset of the length one dimensions. If a dimension is\n selected with length greater than one, an error is raised. If\n None, all length one dimensions are squeezed.\n drop : bool, optional\n If ``drop=True``, drop squeezed coordinates instead of making them\n scalar.\n axis : None or int or iterable of int, optional\n Like dim, but positional.\n\n Returns\n -------\n squeezed : same type as caller\n This object, but with with all or a subset of the dimensions of\n length 1 removed.\n\n See Also\n --------\n numpy.squeeze\n \"\"\"\n dims = get_squeeze_dims(self, dim, axis)\n return self.isel(drop=drop, **{d: 0 for d in dims})\n\n def get_index(self, key: Hashable) -> pd.Index:\n \"\"\"Get an index for a dimension, with fall-back to a default RangeIndex\n \"\"\"\n if key not in self.dims:\n raise KeyError(key)\n\n try:\n return self.indexes[key]\n except KeyError:\n # need to ensure dtype=int64 in case range is empty on Python 2\n return pd.Index(range(self.sizes[key]), name=key, dtype=np.int64)\n\n def _calc_assign_results(\n self: C, kwargs: Mapping[Hashable, Union[T, Callable[[C], T]]]\n ) -> Dict[Hashable, T]:\n return {k: v(self) if callable(v) else v for k, v in kwargs.items()}\n\n def assign_coords(self, coords=None, **coords_kwargs):\n \"\"\"Assign new coordinates to this object.\n\n Returns a new object with all the original data in addition to the new\n coordinates.\n\n Parameters\n ----------\n coords : dict, optional\n A dict where the keys are the names of the coordinates\n with the new values to assign. If the values are callable, they are\n computed on this object and assigned to new coordinate variables.\n If the values are not callable, (e.g. a ``DataArray``, scalar, or\n array), they are simply assigned. A new coordinate can also be\n defined and attached to an existing dimension using a tuple with\n the first element the dimension name and the second element the\n values for this new coordinate.\n\n **coords_kwargs : keyword, value pairs, optional\n The keyword arguments form of ``coords``.\n One of ``coords`` or ``coords_kwargs`` must be provided.\n\n Returns\n -------\n assigned : same type as caller\n A new object with the new coordinates in addition to the existing\n data.\n\n Examples\n --------\n Convert longitude coordinates from 0-359 to -180-179:\n\n >>> da = xr.DataArray(\n ... np.random.rand(4), coords=[np.array([358, 359, 0, 1])], dims=\"lon\",\n ... )\n >>> da\n <xarray.DataArray (lon: 4)>\n array([0.28298 , 0.667347, 0.657938, 0.177683])\n Coordinates:\n * lon (lon) int64 358 359 0 1\n >>> da.assign_coords(lon=(((da.lon + 180) % 360) - 180))\n <xarray.DataArray (lon: 4)>\n array([0.28298 , 0.667347, 0.657938, 0.177683])\n Coordinates:\n * lon (lon) int64 -2 -1 0 1\n\n The function also accepts dictionary arguments:\n\n >>> da.assign_coords({\"lon\": (((da.lon + 180) % 360) - 180)})\n <xarray.DataArray (lon: 4)>\n array([0.28298 , 0.667347, 0.657938, 0.177683])\n Coordinates:\n * lon (lon) int64 -2 -1 0 1\n\n New coordinate can also be attached to an existing dimension:\n\n >>> lon_2 = np.array([300, 289, 0, 1])\n >>> da.assign_coords(lon_2=(\"lon\", lon_2))\n <xarray.DataArray (lon: 4)>\n array([0.28298 , 0.667347, 0.657938, 0.177683])\n Coordinates:\n * lon (lon) int64 358 359 0 1\n lon_2 (lon) int64 300 289 0 1\n\n Note that the same result can also be obtained with a dict e.g.\n\n >>> _ = da.assign_coords({\"lon_2\": (\"lon\", lon_2)})\n\n Notes\n -----\n Since ``coords_kwargs`` is a dictionary, the order of your arguments\n may not be preserved, and so the order of the new variables is not well\n defined. Assigning multiple variables within the same ``assign_coords``\n is possible, but you cannot reference other variables created within\n the same ``assign_coords`` call.\n\n See also\n --------\n Dataset.assign\n Dataset.swap_dims\n \"\"\"\n coords_kwargs = either_dict_or_kwargs(coords, coords_kwargs, \"assign_coords\")\n data = self.copy(deep=False)\n results = self._calc_assign_results(coords_kwargs)\n data.coords.update(results)\n return data\n\n def assign_attrs(self, *args, **kwargs):\n \"\"\"Assign new attrs to this object.\n\n Returns a new object equivalent to ``self.attrs.update(*args, **kwargs)``.\n\n Parameters\n ----------\n args : positional arguments passed into ``attrs.update``.\n kwargs : keyword arguments passed into ``attrs.update``.\n\n Returns\n -------\n assigned : same type as caller\n A new object with the new attrs in addition to the existing data.\n\n See also\n --------\n Dataset.assign\n \"\"\"\n out = self.copy(deep=False)\n out.attrs.update(*args, **kwargs)\n return out\n\n def pipe(\n self,\n func: Union[Callable[..., T], Tuple[Callable[..., T], str]],\n *args,\n **kwargs,\n ) -> T:\n \"\"\"\n Apply ``func(self, *args, **kwargs)``\n\n This method replicates the pandas method of the same name.\n\n Parameters\n ----------\n func : function\n function to apply to this xarray object (Dataset/DataArray).\n ``args``, and ``kwargs`` are passed into ``func``.\n Alternatively a ``(callable, data_keyword)`` tuple where\n ``data_keyword`` is a string indicating the keyword of\n ``callable`` that expects the xarray object.\n args : positional arguments passed into ``func``.\n kwargs : a dictionary of keyword arguments passed into ``func``.\n\n Returns\n -------\n object : the return type of ``func``.\n\n Notes\n -----\n\n Use ``.pipe`` when chaining together functions that expect\n xarray or pandas objects, e.g., instead of writing\n\n >>> f(g(h(ds), arg1=a), arg2=b, arg3=c)\n\n You can write\n\n >>> (ds.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c))\n\n If you have a function that takes the data as (say) the second\n argument, pass a tuple indicating which keyword expects the\n data. For example, suppose ``f`` takes its data as ``arg2``:\n\n >>> (ds.pipe(h).pipe(g, arg1=a).pipe((f, \"arg2\"), arg1=a, arg3=c))\n\n Examples\n --------\n\n >>> import numpy as np\n >>> import xarray as xr\n >>> x = xr.Dataset(\n ... {\n ... \"temperature_c\": (\n ... (\"lat\", \"lon\"),\n ... 20 * np.random.rand(4).reshape(2, 2),\n ... ),\n ... \"precipitation\": ((\"lat\", \"lon\"), np.random.rand(4).reshape(2, 2)),\n ... },\n ... coords={\"lat\": [10, 20], \"lon\": [150, 160]},\n ... )\n >>> x\n <xarray.Dataset>\n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lat (lat) int64 10 20\n * lon (lon) int64 150 160\n Data variables:\n temperature_c (lat, lon) float64 14.53 11.85 19.27 16.37\n precipitation (lat, lon) float64 0.7315 0.7189 0.8481 0.4671\n\n >>> def adder(data, arg):\n ... return data + arg\n ...\n >>> def div(data, arg):\n ... return data / arg\n ...\n >>> def sub_mult(data, sub_arg, mult_arg):\n ... return (data * mult_arg) - sub_arg\n ...\n >>> x.pipe(adder, 2)\n <xarray.Dataset>\n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lon (lon) int64 150 160\n * lat (lat) int64 10 20\n Data variables:\n temperature_c (lat, lon) float64 16.53 13.85 21.27 18.37\n precipitation (lat, lon) float64 2.731 2.719 2.848 2.467\n\n >>> x.pipe(adder, arg=2)\n <xarray.Dataset>\n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lon (lon) int64 150 160\n * lat (lat) int64 10 20\n Data variables:\n temperature_c (lat, lon) float64 16.53 13.85 21.27 18.37\n precipitation (lat, lon) float64 2.731 2.719 2.848 2.467\n\n >>> (\n ... x.pipe(adder, arg=2)\n ... .pipe(div, arg=2)\n ... .pipe(sub_mult, sub_arg=2, mult_arg=2)\n ... )\n <xarray.Dataset>\n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lon (lon) int64 150 160\n * lat (lat) int64 10 20\n Data variables:\n temperature_c (lat, lon) float64 14.53 11.85 19.27 16.37\n precipitation (lat, lon) float64 0.7315 0.7189 0.8481 0.4671\n\n See Also\n --------\n pandas.DataFrame.pipe\n \"\"\"\n if isinstance(func, tuple):\n func, target = func\n if target in kwargs:\n raise ValueError(\n \"%s is both the pipe target and a keyword \" \"argument\" % target\n )\n kwargs[target] = self\n return func(*args, **kwargs)\n else:\n return func(self, *args, **kwargs)\n\n def groupby(self, group, squeeze: bool = True, restore_coord_dims: bool = None):\n \"\"\"Returns a GroupBy object for performing grouped operations.\n\n Parameters\n ----------\n group : str, DataArray or IndexVariable\n Array whose unique values should be used to group this array. If a\n string, must be the name of a variable contained in this dataset.\n squeeze : boolean, optional\n If \"group\" is a dimension of any arrays in this dataset, `squeeze`\n controls whether the subarrays have a dimension of length 1 along\n that dimension or if the dimension is squeezed out.\n restore_coord_dims : bool, optional\n If True, also restore the dimension order of multi-dimensional\n coordinates.\n\n Returns\n -------\n grouped : GroupBy\n A `GroupBy` object patterned after `pandas.GroupBy` that can be\n iterated over in the form of `(unique_value, grouped_array)` pairs.\n\n Examples\n --------\n Calculate daily anomalies for daily data:\n\n >>> da = xr.DataArray(\n ... np.linspace(0, 1826, num=1827),\n ... coords=[pd.date_range(\"1/1/2000\", \"31/12/2004\", freq=\"D\")],\n ... dims=\"time\",\n ... )\n >>> da\n <xarray.DataArray (time: 1827)>\n array([0.000e+00, 1.000e+00, 2.000e+00, ..., 1.824e+03, 1.825e+03, 1.826e+03])\n Coordinates:\n * time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 ...\n >>> da.groupby(\"time.dayofyear\") - da.groupby(\"time.dayofyear\").mean(\"time\")\n <xarray.DataArray (time: 1827)>\n array([-730.8, -730.8, -730.8, ..., 730.2, 730.2, 730.5])\n Coordinates:\n * time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 ...\n dayofyear (time) int64 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 ...\n\n See Also\n --------\n core.groupby.DataArrayGroupBy\n core.groupby.DatasetGroupBy\n \"\"\"\n # While we don't generally check the type of every arg, passing\n # multiple dimensions as multiple arguments is common enough, and the\n # consequences hidden enough (strings evaluate as true) to warrant\n # checking here.\n # A future version could make squeeze kwarg only, but would face\n # backward-compat issues.\n if not isinstance(squeeze, bool):\n raise TypeError(\n f\"`squeeze` must be True or False, but {squeeze} was supplied\"\n )\n\n return self._groupby_cls(\n self, group, squeeze=squeeze, restore_coord_dims=restore_coord_dims\n )\n\n def groupby_bins(\n self,\n group,\n bins,\n right: bool = True,\n labels=None,\n precision: int = 3,\n include_lowest: bool = False,\n squeeze: bool = True,\n restore_coord_dims: bool = None,\n ):\n \"\"\"Returns a GroupBy object for performing grouped operations.\n\n Rather than using all unique values of `group`, the values are discretized\n first by applying `pandas.cut` [1]_ to `group`.\n\n Parameters\n ----------\n group : str, DataArray or IndexVariable\n Array whose binned values should be used to group this array. If a\n string, must be the name of a variable contained in this dataset.\n bins : int or array of scalars\n If bins is an int, it defines the number of equal-width bins in the\n range of x. However, in this case, the range of x is extended by .1%\n on each side to include the min or max values of x. If bins is a\n sequence it defines the bin edges allowing for non-uniform bin\n width. No extension of the range of x is done in this case.\n right : boolean, optional\n Indicates whether the bins include the rightmost edge or not. If\n right == True (the default), then the bins [1,2,3,4] indicate\n (1,2], (2,3], (3,4].\n labels : array or boolean, default None\n Used as labels for the resulting bins. Must be of the same length as\n the resulting bins. If False, string bin labels are assigned by\n `pandas.cut`.\n precision : int\n The precision at which to store and display the bins labels.\n include_lowest : bool\n Whether the first interval should be left-inclusive or not.\n squeeze : boolean, optional\n If \"group\" is a dimension of any arrays in this dataset, `squeeze`\n controls whether the subarrays have a dimension of length 1 along\n that dimension or if the dimension is squeezed out.\n restore_coord_dims : bool, optional\n If True, also restore the dimension order of multi-dimensional\n coordinates.\n\n Returns\n -------\n grouped : GroupBy\n A `GroupBy` object patterned after `pandas.GroupBy` that can be\n iterated over in the form of `(unique_value, grouped_array)` pairs.\n The name of the group has the added suffix `_bins` in order to\n distinguish it from the original variable.\n\n References\n ----------\n .. [1] http://pandas.pydata.org/pandas-docs/stable/generated/pandas.cut.html\n \"\"\"\n return self._groupby_cls(\n self,\n group,\n squeeze=squeeze,\n bins=bins,\n restore_coord_dims=restore_coord_dims,\n cut_kwargs={\n \"right\": right,\n \"labels\": labels,\n \"precision\": precision,\n \"include_lowest\": include_lowest,\n },\n )\n\n def weighted(self, weights):\n \"\"\"\n Weighted operations.\n\n Parameters\n ----------\n weights : DataArray\n An array of weights associated with the values in this Dataset.\n Each value in the data contributes to the reduction operation\n according to its associated weight.\n\n Notes\n -----\n ``weights`` must be a DataArray and cannot contain missing values.\n Missing values can be replaced by ``weights.fillna(0)``.\n \"\"\"\n\n return self._weighted_cls(self, weights)\n\n def rolling(\n self,\n dim: Mapping[Hashable, int] = None,\n min_periods: int = None,\n center: bool = False,\n keep_attrs: bool = None,\n **window_kwargs: int,\n ):\n \"\"\"\n Rolling window object.\n\n Parameters\n ----------\n dim: dict, optional\n Mapping from the dimension name to create the rolling iterator\n along (e.g. `time`) to its moving window size.\n min_periods : int, default None\n Minimum number of observations in window required to have a value\n (otherwise result is NA). The default, None, is equivalent to\n setting min_periods equal to the size of the window.\n center : boolean, default False\n Set the labels at the center of the window.\n keep_attrs : bool, optional\n If True, the object's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n **window_kwargs : optional\n The keyword arguments form of ``dim``.\n One of dim or window_kwargs must be provided.\n\n Returns\n -------\n Rolling object (core.rolling.DataArrayRolling for DataArray,\n core.rolling.DatasetRolling for Dataset.)\n\n Examples\n --------\n Create rolling seasonal average of monthly data e.g. DJF, JFM, ..., SON:\n\n >>> da = xr.DataArray(\n ... np.linspace(0, 11, num=12),\n ... coords=[\n ... pd.date_range(\n ... \"15/12/1999\", periods=12, freq=pd.DateOffset(months=1),\n ... )\n ... ],\n ... dims=\"time\",\n ... )\n >>> da\n <xarray.DataArray (time: 12)>\n array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 2000-01-15 2000-02-15 ...\n >>> da.rolling(time=3, center=True).mean()\n <xarray.DataArray (time: 12)>\n array([nan, 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., nan])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 2000-01-15 2000-02-15 ...\n\n Remove the NaNs using ``dropna()``:\n\n >>> da.rolling(time=3, center=True).mean().dropna(\"time\")\n <xarray.DataArray (time: 10)>\n array([ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10.])\n Coordinates:\n * time (time) datetime64[ns] 2000-01-15 2000-02-15 2000-03-15 ...\n\n See Also\n --------\n core.rolling.DataArrayRolling\n core.rolling.DatasetRolling\n \"\"\"\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n\n dim = either_dict_or_kwargs(dim, window_kwargs, \"rolling\")\n return self._rolling_cls(\n self, dim, min_periods=min_periods, center=center, keep_attrs=keep_attrs\n )\n\n def rolling_exp(\n self,\n window: Mapping[Hashable, int] = None,\n window_type: str = \"span\",\n **window_kwargs,\n ):\n \"\"\"\n Exponentially-weighted moving window.\n Similar to EWM in pandas\n\n Requires the optional Numbagg dependency.\n\n Parameters\n ----------\n window : A single mapping from a dimension name to window value,\n optional\n\n dim : str\n Name of the dimension to create the rolling exponential window\n along (e.g., `time`).\n window : int\n Size of the moving window. The type of this is specified in\n `window_type`\n window_type : str, one of ['span', 'com', 'halflife', 'alpha'],\n default 'span'\n The format of the previously supplied window. Each is a simple\n numerical transformation of the others. Described in detail:\n https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.ewm.html\n **window_kwargs : optional\n The keyword arguments form of ``window``.\n One of window or window_kwargs must be provided.\n\n See Also\n --------\n core.rolling_exp.RollingExp\n \"\"\"\n window = either_dict_or_kwargs(window, window_kwargs, \"rolling_exp\")\n\n return self._rolling_exp_cls(self, window, window_type)\n\n def coarsen(\n self,\n dim: Mapping[Hashable, int] = None,\n boundary: str = \"exact\",\n side: Union[str, Mapping[Hashable, str]] = \"left\",\n coord_func: str = \"mean\",\n keep_attrs: bool = None,\n **window_kwargs: int,\n ):\n \"\"\"\n Coarsen object.\n\n Parameters\n ----------\n dim: dict, optional\n Mapping from the dimension name to the window size.\n\n dim : str\n Name of the dimension to create the rolling iterator\n along (e.g., `time`).\n window : int\n Size of the moving window.\n boundary : 'exact' | 'trim' | 'pad'\n If 'exact', a ValueError will be raised if dimension size is not a\n multiple of the window size. If 'trim', the excess entries are\n dropped. If 'pad', NA will be padded.\n side : 'left' or 'right' or mapping from dimension to 'left' or 'right'\n coord_func : function (name) that is applied to the coordinates,\n or a mapping from coordinate name to function (name).\n keep_attrs : bool, optional\n If True, the object's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n\n Returns\n -------\n Coarsen object (core.rolling.DataArrayCoarsen for DataArray,\n core.rolling.DatasetCoarsen for Dataset.)\n\n Examples\n --------\n Coarsen the long time series by averaging over every four days.\n\n >>> da = xr.DataArray(\n ... np.linspace(0, 364, num=364),\n ... dims=\"time\",\n ... coords={\"time\": pd.date_range(\"15/12/1999\", periods=364)},\n ... )\n >>> da\n <xarray.DataArray (time: 364)>\n array([ 0. , 1.002755, 2.00551 , ..., 361.99449 , 362.997245,\n 364. ])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 1999-12-16 ... 2000-12-12\n >>>\n >>> da.coarsen(time=3, boundary=\"trim\").mean()\n <xarray.DataArray (time: 121)>\n array([ 1.002755, 4.011019, 7.019284, ..., 358.986226,\n 361.99449 ])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-16 1999-12-19 ... 2000-12-10\n >>>\n\n See Also\n --------\n core.rolling.DataArrayCoarsen\n core.rolling.DatasetCoarsen\n \"\"\"\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n\n dim = either_dict_or_kwargs(dim, window_kwargs, \"coarsen\")\n return self._coarsen_cls(\n self,\n dim,\n boundary=boundary,\n side=side,\n coord_func=coord_func,\n keep_attrs=keep_attrs,\n )\n\n def resample(\n self,\n indexer: Mapping[Hashable, str] = None,\n skipna=None,\n closed: str = None,\n label: str = None,\n base: int = 0,\n keep_attrs: bool = None,\n loffset=None,\n restore_coord_dims: bool = None,\n **indexer_kwargs: str,\n ):\n \"\"\"Returns a Resample object for performing resampling operations.\n\n Handles both downsampling and upsampling. The resampled\n dimension must be a datetime-like coordinate. If any intervals\n contain no values from the original object, they will be given\n the value ``NaN``.\n\n Parameters\n ----------\n indexer : {dim: freq}, optional\n Mapping from the dimension name to resample frequency [1]_. The\n dimension must be datetime-like.\n skipna : bool, optional\n Whether to skip missing values when aggregating in downsampling.\n closed : 'left' or 'right', optional\n Side of each interval to treat as closed.\n label : 'left or 'right', optional\n Side of each interval to use for labeling.\n base : int, optional\n For frequencies that evenly subdivide 1 day, the \"origin\" of the\n aggregated intervals. For example, for '24H' frequency, base could\n range from 0 through 23.\n loffset : timedelta or str, optional\n Offset used to adjust the resampled time labels. Some pandas date\n offset strings are supported.\n keep_attrs : bool, optional\n If True, the object's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n restore_coord_dims : bool, optional\n If True, also restore the dimension order of multi-dimensional\n coordinates.\n **indexer_kwargs : {dim: freq}\n The keyword arguments form of ``indexer``.\n One of indexer or indexer_kwargs must be provided.\n\n Returns\n -------\n resampled : same type as caller\n This object resampled.\n\n Examples\n --------\n Downsample monthly time-series data to seasonal data:\n\n >>> da = xr.DataArray(\n ... np.linspace(0, 11, num=12),\n ... coords=[\n ... pd.date_range(\n ... \"15/12/1999\", periods=12, freq=pd.DateOffset(months=1),\n ... )\n ... ],\n ... dims=\"time\",\n ... )\n >>> da\n <xarray.DataArray (time: 12)>\n array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 2000-01-15 2000-02-15 ...\n >>> da.resample(time=\"QS-DEC\").mean()\n <xarray.DataArray (time: 4)>\n array([ 1., 4., 7., 10.])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-01 2000-03-01 2000-06-01 2000-09-01\n\n Upsample monthly time-series data to daily data:\n\n >>> da.resample(time=\"1D\").interpolate(\"linear\")\n <xarray.DataArray (time: 337)>\n array([ 0. , 0.032258, 0.064516, ..., 10.935484, 10.967742, 11. ])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 1999-12-16 1999-12-17 ...\n\n Limit scope of upsampling method\n\n >>> da.resample(time=\"1D\").nearest(tolerance=\"1D\")\n <xarray.DataArray (time: 337)>\n array([ 0., 0., nan, ..., nan, 11., 11.])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 1999-12-16 ... 2000-11-15\n\n See Also\n --------\n pandas.Series.resample\n pandas.DataFrame.resample\n\n References\n ----------\n\n .. [1] http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases\n \"\"\"\n # TODO support non-string indexer after removing the old API.\n\n from .dataarray import DataArray\n from .resample import RESAMPLE_DIM\n from ..coding.cftimeindex import CFTimeIndex\n\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n\n # note: the second argument (now 'skipna') use to be 'dim'\n if (\n (skipna is not None and not isinstance(skipna, bool))\n or (\"how\" in indexer_kwargs and \"how\" not in self.dims)\n or (\"dim\" in indexer_kwargs and \"dim\" not in self.dims)\n ):\n raise TypeError(\n \"resample() no longer supports the `how` or \"\n \"`dim` arguments. Instead call methods on resample \"\n \"objects, e.g., data.resample(time='1D').mean()\"\n )\n\n indexer = either_dict_or_kwargs(indexer, indexer_kwargs, \"resample\")\n if len(indexer) != 1:\n raise ValueError(\"Resampling only supported along single dimensions.\")\n dim, freq = next(iter(indexer.items()))\n\n dim_name = dim\n dim_coord = self[dim]\n\n if isinstance(self.indexes[dim_name], CFTimeIndex):\n from .resample_cftime import CFTimeGrouper\n\n grouper = CFTimeGrouper(freq, closed, label, base, loffset)\n else:\n grouper = pd.Grouper(\n freq=freq, closed=closed, label=label, base=base, loffset=loffset\n )\n group = DataArray(\n dim_coord, coords=dim_coord.coords, dims=dim_coord.dims, name=RESAMPLE_DIM\n )\n resampler = self._resample_cls(\n self,\n group=group,\n dim=dim_name,\n grouper=grouper,\n resample_dim=RESAMPLE_DIM,\n restore_coord_dims=restore_coord_dims,\n )\n\n return resampler\n\n def where(self, cond, other=dtypes.NA, drop: bool = False):\n \"\"\"Filter elements from this object according to a condition.\n\n This operation follows the normal broadcasting and alignment rules that\n xarray uses for binary arithmetic.\n\n Parameters\n ----------\n cond : DataArray or Dataset with boolean dtype\n Locations at which to preserve this object's values.\n other : scalar, DataArray or Dataset, optional\n Value to use for locations in this object where ``cond`` is False.\n By default, these locations filled with NA.\n drop : boolean, optional\n If True, coordinate labels that only correspond to False values of\n the condition are dropped from the result. Mutually exclusive with\n ``other``.\n\n Returns\n -------\n Same xarray type as caller, with dtype float64.\n\n Examples\n --------\n\n >>> import numpy as np\n >>> a = xr.DataArray(np.arange(25).reshape(5, 5), dims=(\"x\", \"y\"))\n >>> a\n <xarray.DataArray (x: 5, y: 5)>\n array([[ 0, 1, 2, 3, 4],\n [ 5, 6, 7, 8, 9],\n [10, 11, 12, 13, 14],\n [15, 16, 17, 18, 19],\n [20, 21, 22, 23, 24]])\n Dimensions without coordinates: x, y\n\n >>> a.where(a.x + a.y < 4)\n <xarray.DataArray (x: 5, y: 5)>\n array([[ 0., 1., 2., 3., nan],\n [ 5., 6., 7., nan, nan],\n [ 10., 11., nan, nan, nan],\n [ 15., nan, nan, nan, nan],\n [ nan, nan, nan, nan, nan]])\n Dimensions without coordinates: x, y\n\n >>> a.where(a.x + a.y < 5, -1)\n <xarray.DataArray (x: 5, y: 5)>\n array([[ 0, 1, 2, 3, 4],\n [ 5, 6, 7, 8, -1],\n [10, 11, 12, -1, -1],\n [15, 16, -1, -1, -1],\n [20, -1, -1, -1, -1]])\n Dimensions without coordinates: x, y\n\n >>> a.where(a.x + a.y < 4, drop=True)\n <xarray.DataArray (x: 4, y: 4)>\n array([[ 0., 1., 2., 3.],\n [ 5., 6., 7., nan],\n [ 10., 11., nan, nan],\n [ 15., nan, nan, nan]])\n Dimensions without coordinates: x, y\n\n >>> a.where(lambda x: x.x + x.y < 4, drop=True)\n <xarray.DataArray (x: 4, y: 4)>\n array([[ 0., 1., 2., 3.],\n [ 5., 6., 7., nan],\n [ 10., 11., nan, nan],\n [ 15., nan, nan, nan]])\n Dimensions without coordinates: x, y\n\n See also\n --------\n numpy.where : corresponding numpy function\n where : equivalent function\n \"\"\"\n from .alignment import align\n from .dataarray import DataArray\n from .dataset import Dataset\n\n if callable(cond):\n cond = cond(self)\n\n if drop:\n if other is not dtypes.NA:\n raise ValueError(\"cannot set `other` if drop=True\")\n\n if not isinstance(cond, (Dataset, DataArray)):\n raise TypeError(\n \"cond argument is %r but must be a %r or %r\"\n % (cond, Dataset, DataArray)\n )\n\n # align so we can use integer indexing\n self, cond = align(self, cond)\n\n # get cond with the minimal size needed for the Dataset\n if isinstance(cond, Dataset):\n clipcond = cond.to_array().any(\"variable\")\n else:\n clipcond = cond\n\n # clip the data corresponding to coordinate dims that are not used\n nonzeros = zip(clipcond.dims, np.nonzero(clipcond.values))\n indexers = {k: np.unique(v) for k, v in nonzeros}\n\n self = self.isel(**indexers)\n cond = cond.isel(**indexers)\n\n return ops.where_method(self, cond, other)\n\n def close(self: Any) -> None:\n \"\"\"Close any files linked to this object\n \"\"\"\n if self._file_obj is not None:\n self._file_obj.close()\n self._file_obj = None\n\n def isin(self, test_elements):\n \"\"\"Tests each value in the array for whether it is in test elements.\n\n Parameters\n ----------\n test_elements : array_like\n The values against which to test each value of `element`.\n This argument is flattened if an array or array_like.\n See numpy notes for behavior with non-array-like parameters.\n\n Returns\n -------\n isin : same as object, bool\n Has the same shape as this object.\n\n Examples\n --------\n\n >>> array = xr.DataArray([1, 2, 3], dims=\"x\")\n >>> array.isin([1, 3])\n <xarray.DataArray (x: 3)>\n array([ True, False, True])\n Dimensions without coordinates: x\n\n See also\n --------\n numpy.isin\n \"\"\"\n from .computation import apply_ufunc\n from .dataset import Dataset\n from .dataarray import DataArray\n from .variable import Variable\n\n if isinstance(test_elements, Dataset):\n raise TypeError(\n \"isin() argument must be convertible to an array: {}\".format(\n test_elements\n )\n )\n elif isinstance(test_elements, (Variable, DataArray)):\n # need to explicitly pull out data to support dask arrays as the\n # second argument\n test_elements = test_elements.data\n\n return apply_ufunc(\n duck_array_ops.isin,\n self,\n kwargs=dict(test_elements=test_elements),\n dask=\"allowed\",\n )\n\n def __enter__(self: T) -> T:\n return self\n\n def __exit__(self, exc_type, exc_value, traceback) -> None:\n self.close()\n\n def __getitem__(self, value):\n # implementations of this class should implement this method\n raise NotImplementedError()\n\n\ndef full_like(other, fill_value, dtype: DTypeLike = None):\n \"\"\"Return a new object with the same shape and type as a given object.\n\n Parameters\n ----------\n other : DataArray, Dataset, or Variable\n The reference object in input\n fill_value : scalar\n Value to fill the new object with before returning it.\n dtype : dtype, optional\n dtype of the new array. If omitted, it defaults to other.dtype.\n\n Returns\n -------\n out : same as object\n New object with the same shape and type as other, with the data\n filled with fill_value. Coords will be copied from other.\n If other is based on dask, the new one will be as well, and will be\n split in the same chunks.\n\n Examples\n --------\n\n >>> import numpy as np\n >>> import xarray as xr\n >>> x = xr.DataArray(\n ... np.arange(6).reshape(2, 3),\n ... dims=[\"lat\", \"lon\"],\n ... coords={\"lat\": [1, 2], \"lon\": [0, 1, 2]},\n ... )\n >>> x\n <xarray.DataArray (lat: 2, lon: 3)>\n array([[0, 1, 2],\n [3, 4, 5]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.full_like(x, 1)\n <xarray.DataArray (lat: 2, lon: 3)>\n array([[1, 1, 1],\n [1, 1, 1]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.full_like(x, 0.5)\n <xarray.DataArray (lat: 2, lon: 3)>\n array([[0, 0, 0],\n [0, 0, 0]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.full_like(x, 0.5, dtype=np.double)\n <xarray.DataArray (lat: 2, lon: 3)>\n array([[0.5, 0.5, 0.5],\n [0.5, 0.5, 0.5]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.full_like(x, np.nan, dtype=np.double)\n <xarray.DataArray (lat: 2, lon: 3)>\n array([[nan, nan, nan],\n [nan, nan, nan]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n See also\n --------\n\n zeros_like\n ones_like\n\n \"\"\"\n from .dataarray import DataArray\n from .dataset import Dataset\n from .variable import Variable\n\n if not is_scalar(fill_value):\n raise ValueError(f\"fill_value must be scalar. Received {fill_value} instead.\")\n\n if isinstance(other, Dataset):\n data_vars = {\n k: _full_like_variable(v, fill_value, dtype)\n for k, v in other.data_vars.items()\n }\n return Dataset(data_vars, coords=other.coords, attrs=other.attrs)\n elif isinstance(other, DataArray):\n return DataArray(\n _full_like_variable(other.variable, fill_value, dtype),\n dims=other.dims,\n coords=other.coords,\n attrs=other.attrs,\n name=other.name,\n )\n elif isinstance(other, Variable):\n return _full_like_variable(other, fill_value, dtype)\n else:\n raise TypeError(\"Expected DataArray, Dataset, or Variable\")\n\n\ndef _full_like_variable(other, fill_value, dtype: DTypeLike = None):\n \"\"\"Inner function of full_like, where other must be a variable\n \"\"\"\n from .variable import Variable\n\n if isinstance(other.data, dask_array_type):\n import dask.array\n\n if dtype is None:\n dtype = other.dtype\n data = dask.array.full(\n other.shape, fill_value, dtype=dtype, chunks=other.data.chunks\n )\n else:\n data = np.full_like(other, fill_value, dtype=dtype)\n\n return Variable(dims=other.dims, data=data, attrs=other.attrs)\n\n\ndef zeros_like(other, dtype: DTypeLike = None):\n \"\"\"Return a new object of zeros with the same shape and\n type as a given dataarray or dataset.\n\n Parameters\n ----------\n other : DataArray, Dataset, or Variable\n The reference object. The output will have the same dimensions and coordinates as this object.\n dtype : dtype, optional\n dtype of the new array. If omitted, it defaults to other.dtype.\n\n Returns\n -------\n out : same as object\n New object of zeros with the same shape and type as other.\n\n Examples\n --------\n\n >>> import numpy as np\n >>> import xarray as xr\n >>> x = xr.DataArray(\n ... np.arange(6).reshape(2, 3),\n ... dims=[\"lat\", \"lon\"],\n ... coords={\"lat\": [1, 2], \"lon\": [0, 1, 2]},\n ... )\n >>> x\n <xarray.DataArray (lat: 2, lon: 3)>\n array([[0, 1, 2],\n [3, 4, 5]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.zeros_like(x)\n <xarray.DataArray (lat: 2, lon: 3)>\n array([[0, 0, 0],\n [0, 0, 0]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.zeros_like(x, dtype=float)\n <xarray.DataArray (lat: 2, lon: 3)>\n array([[0., 0., 0.],\n [0., 0., 0.]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n See also\n --------\n\n ones_like\n full_like\n\n \"\"\"\n return full_like(other, 0, dtype)\n\n\ndef ones_like(other, dtype: DTypeLike = None):\n \"\"\"Return a new object of ones with the same shape and\n type as a given dataarray or dataset.\n\n Parameters\n ----------\n other : DataArray, Dataset, or Variable\n The reference object. The output will have the same dimensions and coordinates as this object.\n dtype : dtype, optional\n dtype of the new array. If omitted, it defaults to other.dtype.\n\n Returns\n -------\n out : same as object\n New object of ones with the same shape and type as other.\n\n Examples\n --------\n\n >>> import numpy as np\n >>> import xarray as xr\n >>> x = xr.DataArray(\n ... np.arange(6).reshape(2, 3),\n ... dims=[\"lat\", \"lon\"],\n ... coords={\"lat\": [1, 2], \"lon\": [0, 1, 2]},\n ... )\n >>> x\n <xarray.DataArray (lat: 2, lon: 3)>\n array([[0, 1, 2],\n [3, 4, 5]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.ones_like(x)\n <xarray.DataArray (lat: 2, lon: 3)>\n array([[1, 1, 1],\n [1, 1, 1]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n See also\n --------\n\n zeros_like\n full_like\n\n \"\"\"\n return full_like(other, 1, dtype)\n\n\ndef is_np_datetime_like(dtype: DTypeLike) -> bool:\n \"\"\"Check if a dtype is a subclass of the numpy datetime types\n \"\"\"\n return np.issubdtype(dtype, np.datetime64) or np.issubdtype(dtype, np.timedelta64)\n\n\ndef is_np_timedelta_like(dtype: DTypeLike) -> bool:\n \"\"\"Check whether dtype is of the timedelta64 dtype.\n \"\"\"\n return np.issubdtype(dtype, np.timedelta64)\n\n\ndef _contains_cftime_datetimes(array) -> bool:\n \"\"\"Check if an array contains cftime.datetime objects\n \"\"\"\n try:\n from cftime import datetime as cftime_datetime\n except ImportError:\n return False\n else:\n if array.dtype == np.dtype(\"O\") and array.size > 0:\n sample = array.ravel()[0]\n if isinstance(sample, dask_array_type):\n sample = sample.compute()\n if isinstance(sample, np.ndarray):\n sample = sample.item()\n return isinstance(sample, cftime_datetime)\n else:\n return False\n\n\ndef contains_cftime_datetimes(var) -> bool:\n \"\"\"Check if an xarray.Variable contains cftime.datetime objects\n \"\"\"\n return _contains_cftime_datetimes(var.data)\n\n\ndef _contains_datetime_like_objects(var) -> bool:\n \"\"\"Check if a variable contains datetime like objects (either\n np.datetime64, np.timedelta64, or cftime.datetime)\n \"\"\"\n return is_np_datetime_like(var.dtype) or contains_cftime_datetimes(var)\n" ]
[ [ "numpy.full_like", "numpy.dtype", "numpy.issubdtype", "numpy.asarray", "pandas.Grouper", "numpy.nonzero", "numpy.unique" ] ]
jungwoohan72/DGN_pytorch
[ "65fe7ab4df661d97725f2a72a1fdb49df1b2ea44" ]
[ "Starcraft/DGN/test.py" ]
[ "import os, sys\nimport numpy as np\nfrom smac.env import StarCraft2Env\nfrom model import DGN\nfrom buffer import ReplayBuffer\nfrom config import *\nfrom utilis import *\nimport torch\nimport torch.optim as optim\n\ntest_env = StarCraft2Env(map_name='25m')\nenv_info = test_env.get_env_info()\nn_ant = env_info[\"n_agents\"]\nn_actions = env_info[\"n_actions\"]\nobs_space = env_info[\"obs_shape\"] + n_ant\n\nmodel = DGN(n_ant,obs_space,hidden_dim,n_actions).cuda()\ntask_path = os.path.dirname(os.path.realpath(__file__))\nload_path = task_path + \"/Weights/25/full_\" + str(482139) + \".pt\"\nmodel.load_state_dict(torch.load(load_path)[\"actor_architecture_state_dict\"])\n\ntest_r, test_win = 0, 0\nfor _ in range(20):\n test_env.reset()\n test_obs = get_obs(test_env.get_obs(),n_ant)\n test_adj = test_env.get_visibility_matrix()[:,0:n_ant]*1 + np.eye(n_ant)\n test_mask = np.array([test_env.get_avail_agent_actions(i) for i in range(n_ant)])\n terminated = False\n while terminated == False:\n test_env.render()\n time.sleep(0.05)\n action=[]\n q = model(torch.Tensor(np.array([test_obs])).cuda(), torch.Tensor(np.array([test_adj])).cuda())[0]\n for i in range(n_ant):\n a = np.argmax(q[i].cpu().detach().numpy() - 9e15*(1 - test_mask[i]))\n action.append(a)\n reward, terminated, winner = test_env.step(action)\n test_r += reward\n if winner.get('battle_won') == True:\n test_win += 1\n test_obs = get_obs(test_env.get_obs(),n_ant)\n test_adj = test_env.get_visibility_matrix()[:,0:n_ant]*1 + np.eye(n_ant)\n test_mask = np.array([test_env.get_avail_agent_actions(i) for i in range(n_ant)])\n\nprint(test_r/20, test_win/20)\n" ]
[ [ "numpy.array", "numpy.eye", "torch.load" ] ]
alexjungaalto/nLassoExpFamPDSimulations
[ "c07b441fff6473d7c650e511e210aaebf7fe061b" ]
[ "get_obs_csv.py" ]
[ "import requests\nimport datetime as dt\nimport xml.etree.ElementTree as ET\nimport numpy as np\nimport re\nimport argparse\n\ndef get_param_names(url):\n \"\"\" Get parameters metadata \"\"\"\n req = requests.get(url)\n params = {}\n\n if req.status_code == 200:\n xmlstring = req.content\n tree = ET.ElementTree(ET.fromstring(xmlstring))\n for p in tree.iter(tag='{http://inspire.ec.europa.eu/schemas/omop/2.9}ObservableProperty'):\n params[p.get('{http://www.opengis.net/gml/3.2}id')] = p.find('{http://inspire.ec.europa.eu/schemas/omop/2.9}label').text\n return params\n\ndef get_params(tree):\n \"\"\" Get parameters from response xml tree \"\"\"\n\n retParams = []\n for el in tree.iter(tag='{http://www.opengis.net/om/2.0}observedProperty'):\n url = el.get('{http://www.w3.org/1999/xlink}href')\n params = re.findall(r\"(?<=param=).*,.*(?=&)\", url)[0].split(',')\n\n param_names = get_param_names(url)\n for p in params:\n retParams.append('{} ({})'.format(param_names[p], p))\n\n return retParams\n\ndef get_positions(tree):\n \"\"\"\n Function to get times and coordinates from multipointcoverage answer\n \"\"\"\n positions = []\n for el in tree.iter(tag='{http://www.opengis.net/gmlcov/1.0}positions'):\n pos = el.text.split()\n i = 0\n while len(pos) > 0:\n lat = float(pos.pop(0))\n lon = float(pos.pop(0))\n timestamp = int(pos.pop(0))\n positions.append([lat,lon,timestamp])\n return np.array(positions)\n\ndef main():\n \"\"\"\n Get data from db and save it as csv\n \"\"\"\n\n url = 'http://opendata.fmi.fi/wfs'\n daystep = 1\n\n starttime = dt.datetime.strptime(options.starttime, '%Y-%m-%d')\n endtime = dt.datetime.strptime(options.endtime, '%Y-%m-%d')\n\n start = starttime\n end = start + dt.timedelta(days=daystep)\n if end > endtime: end = endtime\n\n while end <= endtime and start < end:\n startStr = start.strftime('%Y-%m-%d')\n endStr = end.strftime('%Y-%m-%d')\n\n # Get data\n payload = {\n 'request': 'getFeature',\n 'storedquery_id': 'fmi::observations::weather::multipointcoverage',\n 'bbox': '19,59,35,75',\n 'starttime': startStr,\n 'endtime': endStr,\n }\n r = requests.get(url, params=payload)\n\n # Construct XML tree\n tree = ET.ElementTree(ET.fromstring(r.content))\n\n # Get geospatial and temporal positions of data elements\n positions = get_positions(tree)\n\n # Extract data from XML tree\n d = []\n for el in tree.iter(tag='{http://www.opengis.net/gml/3.2}doubleOrNilReasonTupleList'):\n for pos in el.text.strip().split(\"\\n\"):\n d.append(pos.strip().split(' '))\n\n # Assign data values to positions\n junk = np.append(positions, np.array(d), axis=1)\n try:\n data = np.append(data, junk, axis=0)\n except NameError:\n data = junk\n\n print('Time interval {} - {} provided {} rows'.format(startStr, endStr, junk.shape[0]))\n\n start = end\n end = start + dt.timedelta(days=daystep)\n if end > endtime: end = endtime\n\n print('Done fetching data. Final dimensions of the result: {}'.format(data.shape))\n\n # Get params from the last XML tree element (they don't change over time)\n params = ['lat', 'lon', 'timestamp'] + get_params(tree)\n\n # Save\n np.savetxt(options.filename, data.astype(np.float32), fmt='%.5f', header=';'.join(params), delimiter=\";\")\n\nif __name__=='__main__':\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--filename', type=str, default=None, help='Filename to save the data')\n parser.add_argument('--starttime', type=str, default=None, help='Starttime in format Y-m-d')\n parser.add_argument('--endtime', type=str, default=None, help='Endtime in format Y-m-d')\n\n options = parser.parse_args()\n\n main()\n" ]
[ [ "numpy.array", "numpy.append" ] ]
attackgnome/SpaceX
[ "1005f8a24bc44ee3d19cc4500d9674666f07621b" ]
[ "spacex_dash_app.py" ]
[ "# Import required libraries\nimport pandas as pd\nimport dash\nimport dash_html_components as html\nimport dash_core_components as dcc\nfrom dash.dependencies import Input, Output\nimport plotly.express as px\n\n# Read the airline data into pandas dataframe\nspacex_df = pd.read_csv(\"spacex_launch_dash.csv\")\nmax_payload = spacex_df['Payload Mass (kg)'].max()\nmin_payload = spacex_df['Payload Mass (kg)'].min()\n\n# Create a dash application\napp = dash.Dash(__name__)\n\n# Create an app layout\napp.layout = html.Div(children=[html.H1('SpaceX Launch Records Dashboard',\n style={'textAlign': 'center', 'color': '#503D36',\n 'font-size': 40}),\n # TASK 1: Add a dropdown list to enable Launch Site selection\n # The default select value is for ALL sites\n dcc.Dropdown(id='site-dropdown',\n options=[\n {'label': 'All Sites', 'value': 'All Sites'},\n {'label': 'CCAFS LC-40', 'value': 'CCAFS LC-40'},\n {'label': 'VAFB SLC-4E', 'value': 'VAFB SLC-4E'},\n {'label': 'KSC LC-39A', 'value': 'KSC LC-39A'},\n {'label': 'CCAFS SLC-40', 'value': 'CCAFS SLC-40'},\n ],\n value='All Sites',\n placeholder=\"Select Launch Site\",\n searchable=True),\n html.Br(),\n\n # TASK 2: Add a pie chart to show the total successful launches count for all sites\n # If a specific launch site was selected, show the Success vs. Failed counts for the site\n html.Div(dcc.Graph(id='success-pie-chart')),\n html.Br(),\n\n html.P(\"Payload range (Kg):\"),\n # TASK 3: Add a slider to select payload range\n dcc.RangeSlider(id='payload_slider',\n min=0, max=10000, step=1000,\n marks={0: '0',\n 1000: '1000',\n 2000: '2000',\n 3000: '3000',\n 4000: '42000',\n 5000: '5000',\n 6000: '6000',\n 7000: '7000',\n 8000: '8000',\n 9000: '9000',\n 10000: '10000'\n },\n value=['min_payload', 'max_Payload']),\n\n # TASK 4: Add a scatter chart to show the correlation between payload and launch success\n html.Div(dcc.Graph(id='success-payload-scatter-chart')),\n ])\n\n# TASK 2:\n# Add a callback function for `site-dropdown` as input, `success-pie-chart` as output\n#siteList = sorted(list(set(spacex_df['Launch Site']))) \n\n# Function decorator to specify function input and output\[email protected](Output(component_id='success-pie-chart', component_property='figure'),\n Input(component_id='site-dropdown', component_property='value')\n )\n\ndef get_pie_chart(entered_site):\n filtered_df = spacex_df[spacex_df['Launch Site'] == entered_site ]\n if entered_site == 'All Sites':\n data = spacex_df\n fig1 = px.pie(data, values='class', \n names='Launch Site', \n title='Successful Launches per Site')\n else:\n # return the outcomes piechart for a selected site\n data = filtered_df\n fig1 = px.pie(data, \n names='class', \n title=entered_site)\n \n return fig1\n\n# TASK 4:\n# Add a callback function for `site-dropdown` and `payload-slider` as inputs, `success-payload-scatter-chart` as output\[email protected](Output(component_id='success-payload-scatter-chart', component_property='figure'),\n [Input(component_id='site-dropdown', component_property='value'), \n Input(component_id=\"payload-slider\", component_property=\"value\")])\n\ndef get_scatter_chart(entered_site, slider):\n filtered_df = spacex_df\n # low, high = slider_range\n # mask = (filtered_df['Payload Mass (kg)'] >= low) & (filtered_df['Payload Mass (kg)'] <= high)\n\n if entered_site == 'All Sites':\n fig2 = px.scatter(filtered_df, #[mask], \n x = 'Payload Mass (kg)',\n y = 'class',\n color = 'Booster Version Category', \n title = 'Correlation Across all sites') \n else:\n # return the outcomes piechart for a selected site\n data = filtered_df # [mask]\n fig2 = px.scatter(data[data[\"Launch Site\"]==entered_site], \n x = 'Payload Mass (kg)',\n y = 'class',\n color = 'Booster Version Category', \n title='Correlation For ' + entered_site)\n return fig2\n\n# Run the app\nif __name__ == '__main__':\n app.run_server()\n" ]
[ [ "pandas.read_csv" ] ]
simonwey/DecoupleNet
[ "3e9e09d512230cb0d95e9db98c5838ca9ff799da" ]
[ "lib/dataset/crowdpose.py" ]
[ "# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft\n# Licensed under the MIT License.\n# Written by Bowen Cheng ([email protected]) and Bin Xiao ([email protected])\n# ------------------------------------------------------------------------------\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import defaultdict\nfrom collections import OrderedDict\nimport logging\nimport os\nimport os.path\n\nimport cv2\nimport json_tricks as json\nimport numpy as np\nfrom torch.utils.data import Dataset\n\nfrom crowdposetools.cocoeval import COCOeval\nfrom lib.dataset.JointsDataset import JointsDataset\n\nfrom lib.utils import zipreader\nfrom lib.nms.nms import oks_nms\n\n# -------------------------------------------\ncrowdpose_sigmas = np.array([.79, .79, .72, .72, .62, .62, 1.07, 1.07, .87, .87, .89, .89, .25, .25]) / 10.0\n\n# -------------------------------------------\n\nlogger = logging.getLogger(__name__)\n\n\nclass COCODataset(JointsDataset):\n \"\"\"`CrowdPose`_ Dataset.\n\n Args:\n root (string): Root directory where dataset is located to.\n dataset (string): Dataset name(train2017, val2017, test2017).\n data_format(string): Data format for reading('jpg', 'zip')\n transform (callable, optional): A function/transform that takes in an opencv image\n and returns a transformed version. E.g, ``transforms.ToTensor``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n \"\"\"\n\n def __init__(self, cfg, image_dir, annotation_file, dataset_type, image_set, is_train, transform=None):\n super().__init__(cfg, image_dir, annotation_file, image_set, is_train, transform)\n\n\n\n # def __init__(self, root, dataset, data_format, transform=None,\n # target_transform=None):\n from crowdposetools.coco import COCO\n self.nms_thre = cfg.TEST.NMS_THRE\n self.image_thre = cfg.TEST.IMAGE_THRE\n self.soft_nms = cfg.TEST.SOFT_NMS\n self.oks_thre = cfg.TEST.OKS_THRE\n self.in_vis_thre = cfg.TEST.IN_VIS_THRE\n self.bbox_file = cfg.TEST.COCO_BBOX_FILE\n self.use_gt_bbox = cfg.TEST.USE_GT_BBOX\n self.image_width = cfg.MODEL.IMAGE_SIZE[0]\n self.image_height = cfg.MODEL.IMAGE_SIZE[1]\n self.aspect_ratio = self.image_width * 1.0 / self.image_height\n self.pixel_std = 200\n self.scale_thre = cfg.TEST.SCALE_THRE\n\n\n self.dataset_type = dataset_type\n self.coco = COCO(self._get_anno_file_keypoint())\n\n self.ids = list(self.coco.imgs.keys())\n self.transform = transform\n\n cats = [cat['name']\n for cat in self.coco.loadCats(self.coco.getCatIds())]\n self.classes = ['__background__'] + cats\n logger.info('=> classes: {}'.format(self.classes))\n self.num_classes = len(self.classes)\n self._class_to_ind = dict(zip(self.classes, range(self.num_classes)))\n self._class_to_coco_ind = dict(zip(cats, self.coco.getCatIds()))\n self._coco_ind_to_class_ind = dict(\n [\n (self._class_to_coco_ind[cls], self._class_to_ind[cls])\n for cls in self.classes[1:]\n ]\n )\n\n # load image file names\n self.image_set_index = self._load_image_set_index()\n self.num_images = len(self.image_set_index)\n logger.info('=> num_images: {}'.format(self.num_images))\n\n self.num_joints = 14\n self.flip_pairs = [[0, 1], [2, 3], [4, 5], [6, 7],\n [8, 9], [10, 11]]\n self.parent_ids = None\n self.upper_body_ids = (0, 1, 2, 3, 4, 5, 12, 13)\n self.lower_body_ids = (6, 7, 8, 9, 10, 11)\n\n self.joints_weight = np.array(\n [\n 1., 1., 1.2, 1.2,\n 1.5, 1.5, 1., 1., \n 1.2, 1.2, 1.5, 1.5,\n 1., 1.\n ],\n dtype=np.float32\n ).reshape((self.num_joints, 1))\n\n self.db = self._get_db()\n\n if is_train and cfg.DATASET.SELECT_DATA:\n self.db = self.select_data(self.db)\n\n logger.info('=> load {} samples'.format(len(self.db)))\n\n return\n\n def _load_image_set_index(self):\n \"\"\" image id: int \"\"\"\n image_ids = self.coco.getImgIds()\n return image_ids\n\n def _get_anno_file_keypoint(self):\n # example: root/json/crowdpose_{train,val,test}.json\n return self.annotation_file\n\n def _get_db(self):\n if self.is_train or self.use_gt_bbox:\n # use ground truth bbox\n gt_db = self._load_coco_keypoint_annotations()\n else:\n # use bbox from detection\n gt_db = self._load_coco_person_detection_results()\n return gt_db\n\n def _load_coco_keypoint_annotations(self):\n \"\"\" ground truth bbox and keypoints \"\"\"\n gt_db = []\n for index in self.image_set_index:\n gt_db.extend(self._load_coco_keypoint_annotation_kernal(index))\n return gt_db\n\n def _get_image_path(self, file_name):\n return os.path.join(self.image_dir, file_name)\n\n def _load_coco_keypoint_annotation_kernal(self, index):\n \"\"\"\n coco ann: [u'segmentation', u'area', u'iscrowd', u'image_id', u'bbox', u'category_id', u'id']\n iscrowd:\n crowd instances are handled by marking their overlaps with all categories to -1\n and later excluded in training\n bbox:\n [x1, y1, w, h]\n :param index: coco image id\n :return: db entry\n \"\"\"\n im_ann = self.coco.loadImgs(index)[0]\n width = im_ann['width']\n height = im_ann['height']\n\n annIds = self.coco.getAnnIds(imgIds=index, iscrowd=False)\n objs = self.coco.loadAnns(annIds)\n\n # sanitize bboxes\n valid_objs = []\n for obj in objs:\n x, y, w, h = obj['bbox']\n x1 = np.max((0, x))\n y1 = np.max((0, y))\n x2 = np.min((width - 1, x1 + np.max((0, w - 1))))\n y2 = np.min((height - 1, y1 + np.max((0, h - 1))))\n if x2 >= x1 and y2 >= y1:\n obj['clean_bbox'] = [x1, y1, x2-x1, y2-y1]\n valid_objs.append(obj)\n objs = valid_objs\n\n rec = []\n for obj in objs:\n cls = self._coco_ind_to_class_ind[obj['category_id']]\n if cls != 1:\n continue\n\n # ignore objs without keypoints annotation\n if max(obj['keypoints']) == 0:\n continue\n\n joints_3d = np.zeros((self.num_joints, 3), dtype=np.float)\n joints_3d_vis = np.zeros((self.num_joints, 3), dtype=np.float)\n for ipt in range(self.num_joints):\n joints_3d[ipt, 0] = obj['keypoints'][ipt * 3 + 0]\n joints_3d[ipt, 1] = obj['keypoints'][ipt * 3 + 1]\n joints_3d[ipt, 2] = 0\n t_vis = obj['keypoints'][ipt * 3 + 2]\n if t_vis > 1:\n t_vis = 1\n joints_3d_vis[ipt, 0] = t_vis\n joints_3d_vis[ipt, 1] = t_vis\n joints_3d_vis[ipt, 2] = 0\n\n center, scale = self._box2cs(obj['clean_bbox'][:4])\n image_file_name = im_ann['file_name'].split('/')[-1]\n image_path = os.path.join(self.image_dir, image_file_name)\n\n rec.append({\n 'image': image_path,\n 'center': center,\n 'scale': scale,\n 'joints_3d': joints_3d,\n 'joints_3d_vis': joints_3d_vis,\n 'filename': '',\n 'imgnum': 0,\n 'annotation_id': obj['id']\n })\n\n return rec\n\n def _box2cs(self, box):\n x, y, w, h = box[:4]\n return self._xywh2cs(x, y, w, h)\n\n def _xywh2cs(self, x, y, w, h):\n center = np.zeros((2), dtype=np.float32)\n center[0] = x + w * 0.5\n center[1] = y + h * 0.5\n\n if w > self.aspect_ratio * h:\n h = w * 1.0 / self.aspect_ratio\n elif w < self.aspect_ratio * h:\n w = h * self.aspect_ratio\n scale = np.array(\n [w * 1.0 / self.pixel_std, h * 1.0 / self.pixel_std],\n dtype=np.float32)\n if center[0] != -1:\n # scale = scale * 1.25\n scale = scale * self.scale_thre\n\n return center, scale\n\n\n def _load_coco_person_detection_results(self):\n all_boxes = None\n with open(self.bbox_file, 'r') as f:\n all_boxes = json.load(f)\n\n if not all_boxes:\n logger.error('=> Load %s fail!' % self.bbox_file)\n return None\n\n logger.info('=> Total boxes: {}'.format(len(all_boxes)))\n\n image_id_to_image_path = {}\n\n for index in self.image_set_index:\n im_ann = self.coco.loadImgs(index)[0]\n img_path_val = os.path.join(self.image_dir, im_ann['file_name'])\n image_id_to_image_path[im_ann['id']] = img_path_val\n\n kpt_db = []\n num_boxes = 0\n for n_img in range(0, len(all_boxes)):\n det_res = all_boxes[n_img]\n if det_res['category_id'] != 1:\n continue\n img_name = image_id_to_image_path[det_res['image_id']]\n\n box = det_res['bbox']\n score = det_res['score']\n\n if score < self.image_thre:\n continue\n\n num_boxes = num_boxes + 1\n\n center, scale = self._box2cs(box)\n joints_3d = np.zeros((self.num_joints, 3), dtype=np.float)\n joints_3d_vis = np.ones(\n (self.num_joints, 3), dtype=np.float)\n kpt_db.append({\n 'image': img_name,\n 'center': center,\n 'scale': scale,\n 'score': score,\n 'joints_3d': joints_3d,\n 'joints_3d_vis': joints_3d_vis,\n })\n\n logger.info('=> Total boxes after filter low score@{}: {}'.format(\n self.image_thre, num_boxes))\n return kpt_db\n # def __getitem__(self, index):\n # \"\"\"\n # Args:\n # index (int): Index\n\n # Returns:\n # tuple: Tuple (image, target). target is the object returned by ``coco.loadAnns``.\n # \"\"\"\n # coco = self.coco\n # img_id = self.ids[index]\n # ann_ids = coco.getAnnIds(imgIds=img_id)\n # target = coco.loadAnns(ann_ids)\n\n # file_name = coco.loadImgs(img_id)[0]['file_name']\n\n # if self.data_format == 'zip':\n # img = zipreader.imread(\n # self._get_image_path(file_name),\n # cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION\n # )\n # else:\n # img = cv2.imread(\n # self._get_image_path(file_name),\n # cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION\n # )\n\n # img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n # import pdb; pdb.set_trace()\n\n\n # if self.transform is not None:\n # img = self.transform(img)\n\n # if self.target_transform is not None:\n # target = self.target_transform(target)\n\n # return img, target\n\n\n def __repr__(self):\n fmt_str = 'Dataset ' + self.__class__.__name__ + '\\n'\n fmt_str += ' Number of datapoints: {}\\n'.format(self.__len__())\n fmt_str += ' Root Location: {}\\n'.format(self.root)\n tmp = ' Transforms (if any): '\n fmt_str += '{0}{1}\\n'.format(tmp, self.transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\n tmp = ' Target Transforms (if any): '\n fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\\n', '\\n' + ' ' * len(tmp)))\n return fmt_str\n\n def processKeypoints(self, keypoints):\n tmp = keypoints.copy()\n if keypoints[:, 2].max() > 0:\n p = keypoints[keypoints[:, 2] > 0][:, :2].mean(axis=0)\n num_keypoints = keypoints.shape[0]\n for i in range(num_keypoints):\n tmp[i][0:3] = [\n float(keypoints[i][0]),\n float(keypoints[i][1]),\n float(keypoints[i][2])\n ]\n\n return tmp\n\n # def evaluate(self, cfg, preds, scores, output_dir,\n # *args, **kwargs):\n def evaluate(self, cfg, preds, output_dir, all_boxes, img_path, epoch=-1,\n *args, **kwargs):\n '''\n Perform evaluation on COCO keypoint task\n :param cfg: cfg dictionary\n :param preds: prediction\n :param output_dir: output directory\n :param args: \n :param kwargs: \n :return: \n '''\n if all_boxes.shape[1] == 8:\n return self.evaluate_lambda(cfg, preds, output_dir, all_boxes, img_path, epoch, *args, **kwargs)\n\n res_folder = os.path.join(output_dir, 'val_results')\n if not os.path.exists(res_folder):\n try:\n os.makedirs(res_folder)\n except Exception:\n logger.error('Fail to make {}'.format(res_folder))\n \n res_file = os.path.join(\n res_folder, 'keypoints_{}_results_epoch{}.json'.format(\n self.image_set, epoch)\n )\n\n image_path_to_image_id = {}\n\n for index in self.image_set_index:\n im_ann = self.coco.loadImgs(index)[0]\n img_path_key = os.path.join(self.image_dir, im_ann['file_name'])\n image_path_to_image_id[img_path_key] = im_ann['id']\n\n # preds is a numpy array: person x (keypoints): N x 14 x 3\n # person x (keypoints)\n _kpts = []\n for idx, kpt in enumerate(preds):\n area = (np.max(kpt[:, 0]) - np.min(kpt[:, 0])) * (np.max(kpt[:, 1]) - np.min(kpt[:, 1]))\n kpt = self.processKeypoints(kpt)\n\n _kpts.append({\n 'keypoints': kpt[:, 0:3],\n 'center': all_boxes[idx][0:2],\n 'scale': all_boxes[idx][2:4],\n 'area': all_boxes[idx][4],\n 'score': all_boxes[idx][5],\n 'image': image_path_to_image_id[img_path[idx]],\n 'annotation_id': int(all_boxes[idx][6]),\n })\n\n # keypoints: num_joints * 4 (x, y, score, tag)\n # image x person x (keypoints)\n kpts = defaultdict(list)\n for kpt in _kpts:\n kpts[kpt['image']].append(kpt)\n\n # for idx, _kpts in enumerate(preds):\n # img_id = self.ids[idx]\n # file_name = self.coco.loadImgs(img_id)[0]['file_name']\n # for idx_kpt, kpt in enumerate(_kpts):\n # area = (np.max(kpt[:, 0]) - np.min(kpt[:, 0])) * (np.max(kpt[:, 1]) - np.min(kpt[:, 1]))\n # kpt = self.processKeypoints(kpt)\n \n\n # kpts[int(file_name.split('.')[0])].append(\n # {\n # 'keypoints': kpt[:, 0:3],\n # 'score': scores[idx][idx_kpt],\n # 'tags': kpt[:, 3],\n # 'image': int(file_name.split('.')[0]),\n # 'area': area\n # }\n # )\n\n # rescoring and oks nms\n num_joints = self.num_joints\n in_vis_thre = self.in_vis_thre\n oks_thre = self.oks_thre\n oks_nmsed_kpts = []\n\n # image x person x (keypoints)\n for img in kpts.keys():\n # person x (keypoints)\n img_kpts = kpts[img]\n for n_p in img_kpts:\n box_score = n_p['score']\n kpt_score = 0\n valid_num = 0\n for n_jt in range(0, num_joints):\n t_s = n_p['keypoints'][n_jt][2]\n if t_s > in_vis_thre:\n kpt_score = kpt_score + t_s\n valid_num = valid_num + 1\n if valid_num != 0:\n kpt_score = kpt_score / valid_num\n # rescoring\n n_p['score'] = kpt_score * box_score\n n_p['box_score'] = box_score\n n_p['keypoint_score'] = kpt_score\n \n # person x (keypoints)\n # do not use nms, keep all detections\n keep = []\n if len(keep) == 0:\n oks_nmsed_kpts.append(img_kpts)\n else:\n oks_nmsed_kpts.append([img_kpts[_keep] for _keep in keep])\n\n self._write_coco_keypoint_results(\n oks_nmsed_kpts, res_file\n )\n\n # CrowdPose `test` set has annotation.\n info_str = self._do_python_keypoint_eval(\n res_file, res_folder\n )\n name_value = OrderedDict(info_str)\n return name_value, name_value['AP']\n\n # --------------------------------------------------------------------\n def evaluate_lambda(self, cfg, preds, output_dir, all_boxes, img_path, epoch=-1,\n *args, **kwargs):\n\n res_folder = os.path.join(output_dir, 'val_results')\n if not os.path.exists(res_folder):\n try:\n os.makedirs(res_folder)\n except Exception:\n logger.error('Fail to make {}'.format(res_folder))\n\n res_file = os.path.join(\n res_folder, 'keypoints_{}_results_epoch{}.json'.format(\n self.image_set, epoch)\n )\n\n res_file_mode0 = os.path.join(\n res_folder, 'keypoints_{}_results_mode0_epoch{}.json'.format(\n self.image_set, epoch)\n )\n\n res_file_mode1 = os.path.join(\n res_folder, 'keypoints_{}_results_mode1_epoch{}.json'.format(\n self.image_set, epoch)\n )\n\n res_file_mode2 = os.path.join(\n res_folder, 'keypoints_{}_results_mode2_epoch{}.json'.format(\n self.image_set, epoch)\n )\n\n res_file_mode3 = os.path.join(\n res_folder, 'keypoints_{}_results_mode3_epoch{}.json'.format(\n self.image_set, epoch)\n )\n\n image_path_to_image_id = {}\n\n for index in self.image_set_index:\n im_ann = self.coco.loadImgs(index)[0]\n img_path_key = os.path.join(self.image_dir, im_ann['file_name'])\n image_path_to_image_id[img_path_key] = im_ann['id']\n\n # person x (keypoints)\n _kpts = []\n for idx, kpt in enumerate(preds):\n area = (np.max(kpt[:, 0]) - np.min(kpt[:, 0])) * (np.max(kpt[:, 1]) - np.min(kpt[:, 1]))\n kpt = self.processKeypoints(kpt)\n\n _kpts.append({\n 'keypoints': kpt[:, 0:3],\n 'center': all_boxes[idx][0:2],\n 'scale': all_boxes[idx][2:4],\n 'area': all_boxes[idx][4],\n 'score': all_boxes[idx][5],\n 'image': image_path_to_image_id[img_path[idx]],\n 'annotation_id': int(all_boxes[idx][6]),\n 'mode': int(all_boxes[idx][7])\n })\n\n # image x person x (keypoints)\n kpts = defaultdict(list)\n for kpt in _kpts:\n kpts[kpt['image']].append(kpt)\n\n # rescoring and oks nms\n num_joints = self.num_joints\n in_vis_thre = self.in_vis_thre\n oks_thre = self.oks_thre\n oks_nmsed_kpts = []\n oks_nmsed_kpts_mode0 = []\n oks_nmsed_kpts_mode1 = []\n oks_nmsed_kpts_mode2 = []\n oks_nmsed_kpts_mode3 = []\n\n before_len_kps = 0\n for img in kpts:\n img_kpts = kpts[img]\n before_len_kps += len(img_kpts) \n\n for img in kpts.keys():\n img_kpts = kpts[img]\n for n_p in img_kpts:\n box_score = n_p['score']\n kpt_score = 0\n valid_num = 0\n for n_jt in range(0, num_joints):\n t_s = n_p['keypoints'][n_jt][2]\n if t_s > in_vis_thre:\n kpt_score = kpt_score + t_s\n valid_num = valid_num + 1\n if valid_num != 0:\n kpt_score = kpt_score / valid_num\n # rescoring\n n_p['score'] = kpt_score * box_score\n n_p['box_score'] = box_score\n n_p['keypoint_score'] = kpt_score\n\n img_kpts_mode0 = [img_kpts[i] for i in range(len(img_kpts)) if img_kpts[i]['mode'] == 0]\n img_kpts_mode1 = [img_kpts[i] for i in range(len(img_kpts)) if img_kpts[i]['mode'] == 1]\n img_kpts_mode2 = [img_kpts[i] for i in range(len(img_kpts)) if img_kpts[i]['mode'] == 2]\n img_kpts_mode3 = [img_kpts[i] for i in range(len(img_kpts)) if img_kpts[i]['mode'] == 3]\n\n # # # ------------------------------\n # keep_mode0 = oks_nms(img_kpts_mode0, oks_thre)\n # keep_mode1 = oks_nms(img_kpts_mode1, oks_thre)\n # keep = oks_nms(img_kpts, oks_thre)\n \n # oks_img_kpts_mode0 = [img_kpts_mode0[_keep] for _keep in keep_mode0]\n # oks_img_kpts_mode1 = [img_kpts_mode1[_keep] for _keep in keep_mode1]\n\n # oks_img_kpts_merged = oks_img_kpts_mode0 + oks_img_kpts_mode1\n # # oks_img_kpts_merged = oks_merge(kpts_db_mode0=img_kpts_mode0, kpts_db_mode1=img_kpts_mode1, min_oks_thres=0.95)\n\n # # ------------------------------\n # img_kpts_merged = img_kpts_mode0 + img_kpts_mode1\n # keep = oks_nms(img_kpts_merged, oks_thre)\n # keep = []\n # oks_img_kpts_merged = [img_kpts_merged[_keep] for _keep in keep]\n\n # keep_mode0 = oks_nms(img_kpts_mode0, oks_thre)\n # keep_mode0 = []\n # oks_img_kpts_mode0 = [img_kpts_mode0[_keep] for _keep in keep_mode0]\n\n # keep_mode1 = oks_nms(img_kpts_mode1, oks_thre)\n # keep_mode1 = []\n # oks_img_kpts_mode1 = [img_kpts_mode1[_keep] for _keep in keep_mode1]\n #\n # keep_mode2 = []\n # oks_img_kpts_mode2 = [img_kpts_mode2[_keep] for _keep in keep_mode2]\n #\n # keep_mode3 = []\n # oks_img_kpts_mode3 = [img_kpts_mode3[_keep] for _keep in keep_mode3]\n\n\n # ------------------------------\n # if len(keep_mode0) == 0:\n oks_nmsed_kpts_mode0.append(img_kpts_mode0)\n # else:\n # oks_nmsed_kpts_mode0.append(oks_img_kpts_mode0)\n\n # if len(keep_mode1) == 0:\n oks_nmsed_kpts_mode1.append(img_kpts_mode1)\n # else:\n # oks_nmsed_kpts_mode1.append(oks_img_kpts_mode1)\n #\n # if len(keep_mode2) == 0:\n oks_nmsed_kpts_mode2.append(img_kpts_mode2)\n # else:\n # oks_nmsed_kpts_mode2.append(oks_img_kpts_mode2)\n #\n # if len(keep_mode3) == 0:\n oks_nmsed_kpts_mode3.append(img_kpts_mode3)\n # else:\n # oks_nmsed_kpts_mode3.append(oks_img_kpts_mode3)\n #\n # # ------------------------------\n # if len(keep) == 0:\n oks_nmsed_kpts.append(img_kpts)\n # else:\n # oks_nmsed_kpts.append(oks_img_kpts_merged)\n\n oks_len_kps = sum([len(kps) for kps in oks_nmsed_kpts])\n oks_len_kps_mode0 = sum([len(kps) for kps in oks_nmsed_kpts_mode0])\n oks_len_kps_mode1 = sum([len(kps) for kps in oks_nmsed_kpts_mode1])\n oks_len_kps_mode2 = sum([len(kps) for kps in oks_nmsed_kpts_mode2])\n oks_len_kps_mode3 = sum([len(kps) for kps in oks_nmsed_kpts_mode3])\n\n print('before #kps:{}, after #kps:{}'.format(before_len_kps, oks_len_kps))\n\n ##------------------------------\n self._write_coco_keypoint_results(oks_nmsed_kpts_mode0, res_file_mode0)\n self._write_coco_keypoint_results(oks_nmsed_kpts_mode1, res_file_mode1)\n self._write_coco_keypoint_results(oks_nmsed_kpts_mode2, res_file_mode2)\n self._write_coco_keypoint_results(oks_nmsed_kpts_mode3, res_file_mode3)\n self._write_coco_keypoint_results(oks_nmsed_kpts, res_file) ## merged\n\n ##------------------------------\n # if 'test' not in self.image_set:\n info_str = self._do_python_keypoint_eval(res_file, res_folder)\n name_value = OrderedDict(info_str)\n\n info_str_mode0 = self._do_python_keypoint_eval(res_file_mode0, res_folder)\n name_value_mode0 = OrderedDict(info_str_mode0)\n\n info_str_mode1 = self._do_python_keypoint_eval(res_file_mode1, res_folder)\n name_value_mode1 = OrderedDict(info_str_mode1)\n\n if oks_len_kps_mode2 == 0:\n name_value_mode2 = {'Null': 0}\n else:\n info_str_mode2 = self._do_python_keypoint_eval(res_file_mode2, res_folder)\n name_value_mode2 = OrderedDict(info_str_mode2)\n\n if oks_len_kps_mode3 == 0:\n name_value_mode3 = {'Null': 0}\n else:\n info_str_mode3 = self._do_python_keypoint_eval(res_file_mode3, res_folder)\n name_value_mode3 = OrderedDict(info_str_mode3)\n\n return name_value, name_value_mode0, name_value_mode1, name_value_mode2, name_value_mode3, name_value['AP']\n # else:\n # return {'Null': 0}, {'Null': 0}, {'Null': 0}, {'Null': 0}, {'Null': 0}, 0\n\n # --------------------------------------------------------------------\n\n def _write_coco_keypoint_results(self, keypoints, res_file):\n data_pack = [\n {\n 'cat_id': self._class_to_coco_ind[cls],\n 'cls_ind': cls_ind,\n 'cls': cls,\n 'ann_type': 'keypoints',\n 'keypoints': keypoints\n }\n for cls_ind, cls in enumerate(self.classes) if not cls == '__background__'\n ]\n\n results = self._coco_keypoint_results_one_category_kernel(data_pack[0])\n logger.info('=> Writing results json to %s' % res_file)\n with open(res_file, 'w') as f:\n json.dump(results, f, sort_keys=True, indent=4)\n try:\n json.load(open(res_file))\n except Exception:\n content = []\n with open(res_file, 'r') as f:\n for line in f:\n content.append(line)\n content[-1] = ']'\n with open(res_file, 'w') as f:\n for c in content:\n f.write(c)\n\n def _coco_keypoint_results_one_category_kernel(self, data_pack):\n cat_id = data_pack['cat_id']\n keypoints = data_pack['keypoints']\n cat_results = []\n num_joints = 14\n\n for img_kpts in keypoints:\n if len(img_kpts) == 0:\n continue\n\n _key_points = np.array(\n [img_kpts[k]['keypoints'] for k in range(len(img_kpts))]\n )\n key_points = np.zeros(\n (_key_points.shape[0], num_joints * 3),\n dtype=np.float\n )\n\n for ipt in range(num_joints):\n key_points[:, ipt * 3 + 0] = _key_points[:, ipt, 0]\n key_points[:, ipt * 3 + 1] = _key_points[:, ipt, 1]\n key_points[:, ipt * 3 + 2] = _key_points[:, ipt, 2] # keypoints score.\n\n for k in range(len(img_kpts)):\n kpt = key_points[k].reshape((num_joints, 3))\n left_top = np.amin(kpt, axis=0)\n right_bottom = np.amax(kpt, axis=0)\n\n w = right_bottom[0] - left_top[0]\n h = right_bottom[1] - left_top[1]\n\n cat_results.append({\n 'image_id': img_kpts[k]['image'],\n 'category_id': cat_id,\n 'keypoints': list(key_points[k]),\n 'score': img_kpts[k]['score'],\n 'bbox': list([left_top[0], left_top[1], w, h])\n })\n\n return cat_results\n\n def _do_python_keypoint_eval(self, res_file, res_folder):\n coco_dt = self.coco.loadRes(res_file)\n coco_eval = COCOeval(self.coco, coco_dt, 'keypoints')\n coco_eval.params.useSegm = None\n coco_eval.evaluate()\n coco_eval.accumulate()\n coco_eval.summarize()\n stats_names = ['AP', 'Ap .5', 'AP .75', 'AR', 'AR .5', 'AR .75', 'AP (easy)', 'AP (medium)', 'AP (hard)']\n stats_index = [0, 1, 2, 5, 6, 7, 8, 9, 10]\n\n info_str = []\n for ind, name in enumerate(stats_names):\n info_str.append((name, coco_eval.stats[stats_index[ind]]))\n # info_str.append(coco_eval.stats[ind])\n\n return info_str" ]
[ [ "numpy.ones", "numpy.zeros", "numpy.amax", "numpy.amin", "numpy.max", "numpy.min", "numpy.array" ] ]
bioShaun/omsCabinet
[ "741179a06cbd5200662cd03bc2e0115f4ad06917" ]
[ "bioinformatics/analysis/snp/replace_score.py" ]
[ "import fire\nimport gzip\nimport pandas as pd\nfrom pathlib import PurePath\n\n\ndef extract_vcf_header(vcf):\n if vcf.suffix == '.gz':\n vcf_inf = gzip.open(vcf)\n else:\n vcf_inf = open(vcf)\n prefix = ''\n for eachline in vcf_inf:\n if vcf.suffix == '.gz':\n eachline = eachline.decode()\n prefix += eachline\n if eachline[:6] == '#CHROM':\n return eachline.strip().split('\\t'), prefix\n\n\ndef replace_score(vcf, map_file, outfile):\n vcf = PurePath(vcf)\n vcf_header, vcf_prefix = extract_vcf_header(vcf)\n vcf_df = pd.read_csv(vcf, sep='\\t', comment='#',\n header=None, names=vcf_header)\n map_df = pd.read_csv(\n map_file, sep='\\t', header=None,\n names=['chrom', 'start', 'ref', 'alt', 'sample_id', ],\n index_col=0)\n vcf_df.loc[:, 'QUAL'] = [\n map_df.loc[each].sample_id if each in map_df.index\n else 'NA'\n for each in vcf_df.loc[:, 'ID']\n ]\n with gzip.open(outfile, 'wt') as out_inf:\n out_inf.write(vcf_prefix)\n\n vcf_df.to_csv(outfile, sep='\\t', header=False,\n compression='gzip', mode='a', index=False)\n\n\nif __name__ == '__main__':\n fire.Fire(replace_score)\n" ]
[ [ "pandas.read_csv" ] ]
bvaisvil/lefse
[ "6e325486b9d72a3d489b472691d171377d85b589" ]
[ "format_input.py" ]
[ "#!/usr/bin/env python3\n\nimport sys,os,argparse,pickle,re,numpy\n\nimport functools\n\n\n#***************************************************************************************************************\n#* Log of change *\n#* January 16, 2014 - George Weingart - [email protected] *\n#* *\n#* biom Support *\n#* Modified the program to enable it to accept biom files as input *\n#* *\n#* Added two optional input parameters: *\n#* 1. biom_c is the name of the biom metadata to be used as class *\n#* 2. biom_s is the name of the biom metadata to be used as subclass *\n#* class and subclass are used in the same context as the original *\n#* parameters class and subclass *\n#* These parameters are totally optional, the default is the program *\n#* chooses as class the first metadata received from the conversion *\n#* of the biom file into a sequential (pcl) file as generated by *\n#* breadcrumbs, and similarly, the second metadata is selected as *\n#* subclass. *\n#* The syntax or logic for the original non-biom case was NOT changed. *\n#* *\n#* <******************* IMPORTANT NOTE *************************> *\n#* The biom case requires breadcrumbs and therefore there is a *\n#* a conditional import of the breadcrumbs modules *\n#* If the User uses a biom input and breadcrumbs is not detected, *\n#* the run is abnormally ended *\n#* breadcrumbs itself needs a biom environment, so if the immport *\n#* of biom in breadcrumbs fails, the run is also abnormally\n#* ended (Only if the input file was biom) *\n#* *\n#* USAGE EXAMPLES *\n#* -------------- *\n#* Case #1: Using a sequential file as input (Old version - did not change *\n#* ./format_input.py hmp_aerobiosis_small.txt hmp_aerobiosis_small.in -c 1 -s 2 -u 3 -o 1000000 *\n#* Case #2: Using a biom file as input *\n#* ./format_input.py hmp_aerobiosis_small.biom hmp_aerobiosis_small.in -o 1000000 *\n#* Case #3: Using a biom file as input and override the class and subclass *\n#* ./format_input.py lefse.biom hmp_aerobiosis_small.in -biom_c oxygen_availability -biom_s body_site -o 1000000\n#* *\n#***************************************************************************************************************\n\ndef read_input_file(inp_file, CommonArea):\n\n if inp_file.endswith('.biom'): #* If the file format is biom:\n CommonArea = biom_processing(inp_file) #* Process in biom format\n return CommonArea #* And return the CommonArea\n\n with open(inp_file) as inp:\n CommonArea['ReturnedData'] = [[v.strip() for v in line.strip().split(\"\\t\")] for line in inp.readlines()]\n return CommonArea\n\ndef transpose(data):\n return list(zip(*data))\n\ndef read_params(args):\n parser = argparse.ArgumentParser(description='LEfSe formatting modules')\n parser.add_argument('input_file', metavar='INPUT_FILE', type=str, help=\"the input file, feature hierarchical level can be specified with | or . and those symbols must not be present for other reasons in the input file.\")\n parser.add_argument('output_file', metavar='OUTPUT_FILE', type=str,\n help=\"the output file containing the data for LEfSe\")\n parser.add_argument('--output_table', type=str, required=False, default=\"\",\n help=\"the formatted table in txt format\")\n parser.add_argument('-f',dest=\"feats_dir\", choices=[\"c\",\"r\"], type=str, default=\"r\",\n help=\"set whether the features are on rows (default) or on columns\")\n parser.add_argument('-c',dest=\"class\", metavar=\"[1..n_feats]\", type=int, default=1,\n help=\"set which feature use as class (default 1)\")\n parser.add_argument('-s',dest=\"subclass\", metavar=\"[1..n_feats]\", type=int, default=None,\n help=\"set which feature use as subclass (default -1 meaning no subclass)\")\n parser.add_argument('-o',dest=\"norm_v\", metavar=\"float\", type=float, default=-1.0,\n help=\"set the normalization value (default -1.0 meaning no normalization)\")\n parser.add_argument('-u',dest=\"subject\", metavar=\"[1..n_feats]\", type=int, default=None,\n help=\"set which feature use as subject (default -1 meaning no subject)\")\n parser.add_argument('-m',dest=\"missing_p\", choices=[\"f\",\"s\"], type=str, default=\"d\",\n help=\"set the policy to adopt with missin values: f removes the features with missing values, s removes samples with missing values (default f)\")\n parser.add_argument('-n',dest=\"subcl_min_card\", metavar=\"int\", type=int, default=10,\n help=\"set the minimum cardinality of each subclass (subclasses with low cardinalities will be grouped together, if the cardinality is still low, no pairwise comparison will be performed with them)\")\n\n parser.add_argument('-biom_c',dest=\"biom_class\", type=str,\n help=\"For biom input files: Set which feature use as class \")\n parser.add_argument('-biom_s',dest=\"biom_subclass\", type=str,\n help=\"For biom input files: set which feature use as subclass \")\n\n args = parser.parse_args()\n\n return vars(args)\n\ndef remove_missing(data,roc):\n if roc == \"c\": data = transpose(data)\n max_len = max([len(r) for r in data])\n to_rem = []\n for i,r in enumerate(data):\n if len([v for v in r if not( v == \"\" or v.isspace())]) < max_len: to_rem.append(i)\n if len(to_rem):\n for i in to_rem.reverse():\n data.pop(i)\n if roc == \"c\": return transpose(data)\n return data\n\n\ndef sort_by_cl(data,n,c,s,u):\n def sort_lines1(a,b):\n return int(a[c] > b[c])*2-1\n\n def sort_lines2u(a,b):\n if a[c] != b[c]:\n return int(a[c] > b[c])*2-1\n\n return int(a[u] > b[u])*2-1\n\n def sort_lines2s(a,b):\n if a[c] != b[c]:\n return int(a[c] > b[c])*2-1\n\n return int(a[s] > b[s])*2-1\n\n def sort_lines3(a,b):\n if a[c] != b[c]:\n return int(a[c] > b[c])*2-1\n\n if a[s] != b[s]:\n return int(a[s] > b[s])*2-1\n\n return int(a[u] > b[u])*2-1\n\n if n == 3:\n data.sort(key = functools.cmp_to_key(lambda a,b: sort_lines3(a,b)))\n\n if n == 2:\n if s is None:\n data.sort(key = functools.cmp_to_key(lambda a,b: sort_lines2u(a,b)))\n else:\n data.sort(key = functools.cmp_to_key(lambda a,b: sort_lines2s(a,b)))\n\n if n == 1:\n data.sort(key = functools.cmp_to_key(lambda a,b: sort_lines1(a,b)))\n\n return data\n\ndef group_small_subclasses(cls,min_subcl):\n last = \"\"\n n = 0\n repl = []\n dd = [list(cls['class']),list(cls['subclass'])]\n for d in dd:\n if d[1] != last:\n if n < min_subcl and last != \"\":\n repl.append(d[1])\n last = d[1]\n n = 1\n for i,d in enumerate(dd):\n if d[1] in repl: dd[i][1] = \"other\"\n dd[i][1] = str(dd[i][0])+\"_\"+str(dd[i][1])\n cls['class'] = dd[0]\n cls['subclass'] = dd[1]\n return cls\n\ndef get_class_slices(data):\n previous_class = data[0][0]\n previous_subclass = data[0][1]\n subclass_slices = []\n class_slices = []\n last_cl = 0\n last_subcl = 0\n class_hierarchy = []\n subcls = []\n for i,d in enumerate(data):\n if d[1] != previous_subclass:\n subclass_slices.append((previous_subclass,(last_subcl,i)))\n last_subcl = i\n subcls.append(previous_subclass)\n if d[0] != previous_class:\n class_slices.append((previous_class,(last_cl,i)))\n class_hierarchy.append((previous_class,subcls))\n subcls = []\n last_cl = i\n previous_subclass = d[1]\n previous_class = d[0]\n subclass_slices.append((previous_subclass,(last_subcl,i+1)))\n subcls.append(previous_subclass)\n class_slices.append((previous_class,(last_cl,i+1)))\n class_hierarchy.append((previous_class,subcls))\n return dict(class_slices), dict(subclass_slices), dict(class_hierarchy)\n\ndef numerical_values(feats,norm):\n mm = []\n for k,v in feats.items():\n feats[k] = [float(val) for val in v]\n if norm < 0.0: return feats\n tr = list(zip(*(list(feats.values()))))\n mul = []\n fk = list(feats.keys())\n hie = True if sum([k.count(\".\") for k in fk]) > len(fk) else False\n for i in range(len(list(feats.values())[0])):\n if hie: mul.append(sum([t for j,t in enumerate(tr[i]) if fk[j].count(\".\") < 1 ]))\n else: mul.append(sum(tr[i]))\n if hie and sum(mul) == 0:\n mul = []\n for i in range(len(list(feats.values())[0])):\n mul.append(sum(tr[i])) \n for i,m in enumerate(mul):\n if m == 0: mul[i] = 0.0\n else: mul[i] = float(norm) / m\n for k,v in feats.items():\n feats[k] = [val*mul[i] for i,val in enumerate(v)]\n if numpy.mean(feats[k]) and (numpy.std(feats[k])/numpy.mean(feats[k])) < 1e-10:\n feats[k] = [ float(round(kv*1e6)/1e6) for kv in feats[k]]\n return feats\n\ndef add_missing_levels2(ff):\n\n if sum( [f.count(\".\") for f in ff] ) < 1: return ff\n\n dn = {}\n\n added = True\n while added:\n added = False\n for f in ff:\n lev = f.count(\".\")\n if lev == 0: continue\n if lev not in dn: dn[lev] = [f]\n else: dn[lev].append(f)\n for fn in sorted(dn,reverse=True):\n for f in dn[fn]:\n fc = \".\".join(f.split('.')[:-1])\n if fc not in ff:\n ab_all = [ff[fg] for fg in ff if (fg.count(\".\") == 0 and fg == fc) or (fg.count(\".\") > 0 and fc == \".\".join(fg.split('.')[:-1]))]\n ab =[]\n for l in [f for f in zip(*ab_all)]:\n ab.append(sum([float(ll) for ll in l]))\n ff[fc] = ab\n added = True\n if added:\n break\n\n return ff\n\n\ndef add_missing_levels(ff):\n if sum( [f.count(\".\") for f in ff] ) < 1: return ff\n\n clades2leaves = {}\n for f in ff:\n fs = f.split(\".\")\n if len(fs) < 2:\n continue\n for l in range(len(fs)):\n n = \".\".join( fs[:l] )\n if n in clades2leaves:\n clades2leaves[n].append( f )\n else:\n clades2leaves[n] = [f]\n for k,v in clades2leaves.items():\n if k and k not in ff:\n ff[k] = [sum(a) for a in zip(*[[float(fn) for fn in ff[vv]] for vv in v])]\n return ff\n\n\ndef modify_feature_names(fn):\n ret = fn\n\n for v in [' ',r'\\$',r'\\@',r'#',r'%',r'\\^',r'\\&',r'\\*',r'\\\"',r'\\'']:\n ret = [re.sub(v,\"\",f) for f in ret]\n\n for v in [\"/\",r'\\(',r'\\)',r'-',r'\\+',r'=',r'{',r'}',r'\\[',r'\\]',\n r',',r'\\.',r';',r':',r'\\?',r'\\<',r'\\>',r'\\.',r'\\,']:\n ret = [re.sub(v,\"_\",f) for f in ret]\n\n for v in [\"\\|\"]:\n ret = [re.sub(v,\".\",f) for f in ret]\n\n ret2 = []\n for r in ret:\n if r[0] in ['0','1','2','3','4','5','6','7','8','9','_']:\n ret2.append(\"f_\"+r)\n else:\n ret2.append(r)\n\n return ret2\n\n\ndef rename_same_subcl(cl,subcl):\n toc = []\n for sc in set(subcl):\n if len(set([cl[i] for i in range(len(subcl)) if sc == subcl[i]])) > 1:\n toc.append(sc)\n new_subcl = []\n for i,sc in enumerate(subcl):\n if sc in toc: new_subcl.append(cl[i]+\"_\"+sc)\n else: new_subcl.append(sc)\n return new_subcl\n\n\n#*************************************************************************************\n#* Modifications by George Weingart, Jan 15, 2014 *\n#* If the input file is biom: *\n#* a. Load an AbundanceTable (Using breadcrumbs) *\n#* b. Create a sequential file from the AbundanceTable (de-facto - pcl) *\n#* c. Use that file as input to the rest of the program *\n#* d. Calculate the c,s,and u parameters, either from the values the User entered *\n#* from the meta data values in the biom file or set up defaults *\n#* <<<------------- I M P O R T A N T N O T E ------------------->> *\n#* breadcrumbs src directory must be included in the PYTHONPATH *\n#* <<<------------- I M P O R T A N T N O T E ------------------->> *\n#*************************************************************************************\ndef biom_processing(inp_file):\n CommonArea = dict() #* Set up a dictionary to return\n CommonArea['abndData'] = AbundanceTable.funcMakeFromFile(inp_file, #* Create AbundanceTable from input biom file\n cDelimiter = None,\n sMetadataID = None,\n sLastMetadataRow = None,\n sLastMetadata = None,\n strFormat = None)\n\n #****************************************************************\n #* Building the data element here *\n #****************************************************************\n ResolvedData = list() #This is the Resolved data that will be returned\n IDMetadataName = CommonArea['abndData'].funcGetIDMetadataName() #* ID Metadataname\n IDMetadata = [CommonArea['abndData'].funcGetIDMetadataName()] #* The first Row\n IDMetadata.extend([IDMetadataEntry for IDMetadataEntry in CommonArea['abndData'].funcGetMetadataCopy()[IDMetadataName]]) #* Loop on all the metadata values\n\n ResolvedData.append(IDMetadata) #Add the IDMetadata with all its values to the resolved area\n for key, value in CommonArea['abndData'].funcGetMetadataCopy().items():\n if key != IDMetadataName:\n MetadataEntry = [key] + value #* Set it up\n ResolvedData.append(MetadataEntry)\n for AbundanceDataEntry in CommonArea['abndData'].funcGetAbundanceCopy(): #* The Abundance Data\n lstAbundanceDataEntry = list(AbundanceDataEntry) #Convert tuple to list\n ResolvedData.append(lstAbundanceDataEntry) #Append the list to the metadata list\n CommonArea['ReturnedData'] = ResolvedData #Post the results\n return CommonArea\n\n\n#*******************************************************************************\n#* Check the params and override in the case of biom *\n#*******************************************************************************\ndef check_params_for_biom_case(params, CommonArea):\n CommonArea['MetadataNames'] = list() #Metadata names\n params['original_class'] = params['class'] #Save the original class\n params['original_subclass'] = params['subclass'] #Save the original subclass\n params['original_subject'] = params['subject'] #Save the original subclass\n\n\n TotalMetadataEntriesAndIDInBiomFile = len(CommonArea['abndData'].funcGetMetadataCopy()) # The number of metadata entries\n for i in range(0,TotalMetadataEntriesAndIDInBiomFile): #* Populate the meta data names table\n CommonArea['MetadataNames'].append(CommonArea['ReturnedData'][i][0]) #Add the metadata name\n\n\n #****************************************************\n #* Setting the params here *\n #****************************************************\n\n if TotalMetadataEntriesAndIDInBiomFile > 0: #If there is at least one entry - has to be the subject\n params['subject'] = 1\n if TotalMetadataEntriesAndIDInBiomFile == 2: #If there are 2 - The first is the subject and the second has to be the metadata, and that is the class\n params['class'] = 2\n if TotalMetadataEntriesAndIDInBiomFile == 3: #If there are 3: Set up default that the second entry is the class and the third is the subclass\n params['class'] = 2\n params['subclass'] = 3\n FlagError = False #Set up error flag\n\n if not params['biom_class'] is None and not params['biom_subclass'] is None: #Check if the User passed a valid class and subclass\n if params['biom_class'] in CommonArea['MetadataNames']:\n params['class'] = CommonArea['MetadataNames'].index(params['biom_class'])+1 #* Set up the index for that metadata\n else:\n FlagError = True\n if params['biom_subclass'] in CommonArea['MetadataNames']:\n params['subclass'] = CommonArea['MetadataNames'].index(params['biom_subclass'])+1 #* Set up the index for that metadata\n else:\n FlagError = True\n if FlagError == True: #* If the User passed an invalid class\n print(\"**Invalid biom class or subclass passed - Using defaults: First metadata=class, Second Metadata=subclass\\n\")\n params['class'] = 2\n params['subclass'] = 3\n return params\n\n\n\nif __name__ == '__main__':\n CommonArea = dict() #Build a Common Area to pass variables in the biom case\n params = read_params(sys.argv)\n\n #*************************************************************\n #* Conditionally import breadcrumbs if file is a biom file *\n #* If it is and no breadcrumbs found - abnormally exit *\n #*************************************************************\n if params['input_file'].endswith('.biom'):\n try:\n from lefsebiom.ConstantsBreadCrumbs import *\n from lefsebiom.AbundanceTable import *\n except ImportError:\n sys.stderr.write(\"************************************************************************************************************ \\n\")\n sys.stderr.write(\"* Error: Breadcrumbs libraries not detected - required to process biom files - run abnormally terminated * \\n\")\n sys.stderr.write(\"************************************************************************************************************ \\n\")\n exit(1)\n\n\n if type(params['subclass']) is int and int(params['subclass']) < 1:\n params['subclass'] = None\n if type(params['subject']) is int and int(params['subject']) < 1:\n params['subject'] = None\n\n\n CommonArea = read_input_file(sys.argv[1], CommonArea) #Pass The CommonArea to the Read\n data = CommonArea['ReturnedData'] #Select the data\n\n if sys.argv[1].endswith('biom'): #* Check if biom:\n params = check_params_for_biom_case(params, CommonArea) #Check the params for the biom case\n\n if params['feats_dir'] == \"c\":\n data = transpose(data)\n\n ncl = 1\n if not params['subclass'] is None: ncl += 1\n if not params['subject'] is None: ncl += 1\n\n first_line = list(zip(*data))[0]\n\n first_line = modify_feature_names(list(first_line))\n\n data = list(zip( first_line,\n *sort_by_cl(list(zip(*data))[1:],\n ncl,\n params['class']-1,\n params['subclass']-1 if not params['subclass'] is None else None,\n params['subject']-1 if not params['subject'] is None else None)))\n# data.insert(0,first_line)\n# data = remove_missing(data,params['missing_p'])\n cls = {}\n\n cls_i = [('class',params['class']-1)]\n if params['subclass'] is not None and params['subclass'] > 0:\n cls_i.append(('subclass',params['subclass']-1))\n\n if params['subject'] is not None and params['subject'] > 0:\n cls_i.append(('subject',params['subject']-1))\n\n cls_i.sort(key = functools.cmp_to_key(lambda x,y: -((x[1] > y[1]) - (x[1] < y[1]))))\n\n for v in cls_i: \n cls[v[0]] = data.pop(v[1])[1:]\n \n if params['subclass'] is None:\n cls['subclass'] = [str(cl)+\"_subcl\" for cl in cls['class']]\n\n cls['subclass'] = rename_same_subcl(cls['class'],cls['subclass'])\n# if 'subclass' in cls.keys(): cls = group_small_subclasses(cls,params['subcl_min_card'])\n class_sl,subclass_sl,class_hierarchy = get_class_slices(list(zip(cls['class'], cls['subclass'], cls['subject'])))\n\n feats = dict([(d[0],d[1:]) for d in data])\n\n feats = add_missing_levels(feats)\n\n feats = numerical_values(feats,params['norm_v'])\n out = {}\n out['feats'] = feats\n out['norm'] = params['norm_v']\n out['cls'] = cls\n out['class_sl'] = class_sl\n out['subclass_sl'] = subclass_sl\n out['class_hierarchy'] = class_hierarchy\n\n if params['output_table']:\n with open( params['output_table'], \"w\") as outf:\n if 'class' in cls: outf.write( \"\\t\".join(list([\"class\"])+list(cls['class'])) + \"\\n\" )\n if 'subclass' in cls: outf.write( \"\\t\".join(list([\"subclass\"])+list(cls['subclass'])) + \"\\n\" )\n if 'subject' in cls: outf.write( \"\\t\".join(list([\"subject\"])+list(cls['subject'])) + \"\\n\" )\n for k,v in out['feats'].items(): outf.write( \"\\t\".join([k]+[str(vv) for vv in v]) + \"\\n\" )\n\n with open(params['output_file'], 'wb') as back_file:\n pickle.dump(out,back_file)\n\n" ]
[ [ "numpy.std", "numpy.mean" ] ]
samuelwestlake/deeplodocus-dev
[ "12b283ca4eb39abf13ddc56eabc78e01e90627ff" ]
[ "deeplodocus/data/load/loader.py" ]
[ "# Python imports\nfrom typing import Optional\nfrom typing import List\nfrom typing import Union\nfrom typing import Any\nimport numpy as np\nimport mimetypes\nimport weakref\n\n# Deeplodocus imports\nfrom deeplodocus.utils.notification import Notification\nfrom deeplodocus.utils.generic_utils import get_int_or_float\nfrom deeplodocus.utils.generic_utils import is_np_array\nfrom deeplodocus.utils.generic_utils import get_corresponding_flag\n\n# Deeplodocus flags\nfrom deeplodocus.flags import *\n\n\nclass Loader(object):\n \"\"\"\n AUTHORS:\n --------\n\n :author: Alix Leroy\n\n DESCRIPTION:\n ------------\n\n Load the unloaded data after being select by the Dataset\n \"\"\"\n\n def __init__(self,\n data_entry: weakref,\n load_as: Optional[str] = None,\n cv_library: Union[str, None, Flag] = DEEP_LIB_OPENCV\n ):\n\n # Weakref of the Entry instance\n self.data_entry = data_entry\n\n # Optional type of data to load (Still highly recommended to define it)\n self.load_as = load_as\n\n # Computer Vision library\n self.warning_video = None\n self.cv_library = None\n self.set_cv_library(cv_library)\n\n # Checked\n self.checked = False\n\n def check(self):\n \"\"\"\n AUTHORS:\n --------\n\n :author: Alix Leroy\n\n DESCRIPTION:\n ------------\n\n Check the Loader\n\n PARAMETERS:\n -----------\n\n None\n\n RETURN:\n -------\n\n :return: None\n \"\"\"\n # Check the load_as argument\n self.load_as = self.__check_load_as(self.load_as)\n\n # Set self.checked as True\n self.checked = True\n\n def __check_load_as(self, load_as: Union[str, int, Flag, None]) -> Flag:\n \"\"\"\n AUTHORS:\n --------\n\n :author: Alix Leroy\n\n DESCRIPTION:\n ------------\n\n Check the data type\n If the data type given is None we try to estimate it (errors can occur with complex types)\n Else we directly get the data type given by the user\n\n PARAMETERS:\n -----------\n\n :param load_as (Union[str, int, None]): The data type in a raw format given by the user\n\n RETURN:\n -------\n\n :return load_as(Flag): The data type of the entry\n \"\"\"\n\n if load_as is None:\n # Get an instance\n instance_example, is_loaded, _ = self.data_entry().__get_first_item()\n\n if is_loaded is True:\n load_as = None\n else:\n # Automatically check the data type\n load_as = self.__estimate_load_as(instance_example)\n else:\n load_as = get_corresponding_flag(\n flag_list=DEEP_LIST_LOAD_AS,\n info=load_as\n )\n return load_as\n\n def __estimate_load_as(self, data: str) -> Flag:\n \"\"\"\n AUTHORS:\n --------\n\n author: Alix Leroy\n\n DESCRIPTION:\n ------------\n\n Find the type of the given data\n\n PARAMETERS:\n -----------\n\n :param data: The data to analyze\n\n RETURN:\n -------\n\n :return: The integer flag of the corresponding type\n \"\"\"\n\n # If we have a list of item, we check that they all contain the same type\n if isinstance(data, list):\n load_as_list = []\n # Get all the data type\n for d in data:\n dt = self.__estimate_load_as(d)\n load_as_list.append(dt)\n\n # Check the data types are all the same\n for dt in load_as_list:\n if load_as_list[0].corresponds(dt) is False:\n Notification(DEEP_NOTIF_FATAL, \"Data type in your sequence of data are not all the same\")\n\n # If all the same then return the data type\n return load_as_list[0]\n\n # If not a list\n else:\n mime = mimetypes.guess_type(data)\n if mime[0] is not None:\n mime = mime[0].split(\"/\")[0]\n\n # IMAGE\n if mime == \"image\":\n return DEEP_LOAD_AS_IMAGE\n # VIDEO\n elif mime == \"video\":\n return DEEP_LOAD_AS_VIDEO\n # FLOAT\n elif DEEP_LOAD_AS_FLOAT.corresponds(get_int_or_float(data)):\n return DEEP_LOAD_AS_FLOAT\n # INTEGER\n elif DEEP_LOAD_AS_INTEGER.corresponds(get_int_or_float(data)):\n return DEEP_LOAD_AS_INTEGER\n # NUMPY ARRAY\n if is_np_array(data) is True:\n return DEEP_LOAD_AS_NP_ARRAY\n # Type not handled\n else:\n Notification(DEEP_NOTIF_FATAL, DEEP_MSG_DATA_NOT_HANDLED % data)\n\n def load_from_str(self, data: Union[str, List[str], Any]) -> Union[Any, List[Any]]:\n \"\"\"\n AUTHORS:\n --------\n\n :author: Alix Leroy\n\n DESCRIPTION:\n ------------\n\n Load a data from a string format to the actual content\n Loads either one item or a list of items\n\n PARAMETERS:\n -----------\n\n :param data(Union[str, List[str]]): The data to transform\n\n RETURN:\n -------\n\n :return loaded_data(Union[Any, List[Any]]): The loaded data\n \"\"\"\n\n loaded_data = None\n\n # Make sure the data contains something\n if data is not None:\n\n # SEQUENCE\n if isinstance(data, list):\n # If data is a sequence we use the function in a recursive fashion\n loaded_data = []\n for d in data:\n ld = self.load_from_str(data=d)\n loaded_data.append(ld)\n\n # IMAGE\n elif DEEP_LOAD_AS_IMAGE.corresponds(self.load_as):\n # Load image\n loaded_data = self.__load_image(data)\n\n # VIDEO\n elif DEEP_LOAD_AS_VIDEO.corresponds(self.load_as):\n loaded_data = self.__load_video(data)\n\n # INTEGER\n elif DEEP_LOAD_AS_INTEGER.corresponds(self.load_as):\n loaded_data = int(data)\n\n # FLOAT NUMBER\n elif DEEP_LOAD_AS_FLOAT.corresponds(self.load_as):\n loaded_data = float(data)\n\n elif DEEP_LOAD_AS_STRING.corresponds(self.load_as):\n loaded_data = str(data)\n\n # NUMPY ARRAY\n elif DEEP_LOAD_AS_NP_ARRAY.corresponds(self.load_as):\n loaded_data = np.load(data)\n\n # LOAD AS GIVEN (unchanged)\n elif DEEP_LOAD_AS_GIVEN.corresponds(self.load_as):\n loaded_data = data\n\n # Data type not recognized\n else:\n\n Notification(DEEP_NOTIF_FATAL,\n \"The following data could not be loaded because its type is not recognized : %s.\\n\"\n \"Please check the documentation online to see the supported types\" % data)\n # If the data is None\n else:\n Notification(DEEP_NOTIF_FATAL, DEEP_MSG_DATA_IS_NONE % data)\n\n return loaded_data\n\n\n \"\"\"\n \"\n \" DATA LOADERS\n \"\n \"\"\"\n\n def __load_image(self, image_path: str):\n \"\"\"\n AUTHORS:\n --------\n\n :author: Alix Leroy\n\n DESCRIPTION:\n ------------\n\n Load the image in the image_path\n\n PARAMETERS:\n -----------\n\n :param image_path(str): The path of the image to load\n\n RETURN:\n -------\n\n :return: The loaded image\n \"\"\"\n if DEEP_LIB_OPENCV.corresponds(self.cv_library):\n image = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)\n elif DEEP_LIB_PIL.corresponds(self.cv_library):\n image = np.array(Image.open(image_path))\n else:\n # Notify the user of invalid cv library\n image = None\n Notification(DEEP_NOTIF_FATAL, DEEP_MSG_CV_LIBRARY_NOT_IMPLEMENTED % self.cv_library.name)\n\n # Notify the user that the image failed to load\n if image is None:\n Notification(DEEP_NOTIF_FATAL, DEEP_MSG_DATA_CANNOT_LOAD_IMAGE % (self.cv_library.name, image_path))\n\n # If image is is grayscale add a new dimension\n if image.ndim > 2:\n # If image loaded using openCV, convert to RGB(a)\n if DEEP_LIB_OPENCV.corresponds(self.cv_library):\n image = self.__convert_bgra2rgba(image)\n else:\n image = image[:, :, np.newaxis]\n\n return image\n\n @staticmethod\n def __convert_bgra2rgba(image):\n \"\"\"\n AUTHORS:\n --------\n\n :author: Alix Leroy\n\n DESCRIPTION:\n ------------\n\n Convert BGR(alpha) image to RGB(alpha) image\n\n PARAMETERS:\n -----------\n\n :param image: image to convert\n\n RETURN:\n -------\n\n :return: a RGB(alpha) image\n \"\"\"\n\n # Get the number of channels in the image\n _, _, channels = image.shape\n\n # Handle BGR and BGR(A) images\n if channels == 3:\n image = image[:, :, (2, 1, 0)]\n elif channels == 4:\n image = image[:, :, (2, 1, 0, 3)]\n return image\n\n def __load_video(self, video_path: str):\n \"\"\"\n AUTHORS:\n --------\n\n :author: Alix Leroy\n\n DESCRIPTION:\n ------------\n Load a video\n\n PARAMETERS:\n -----------\n\n :param video_path->str: absolute path to a video\n\n RETURN:\n -------\n\n :return: a list of frame from the video\n \"\"\"\n self.__throw_warning_video()\n video = []\n # If the computer vision library selected is OpenCV\n if DEEP_LIB_OPENCV.corresponds(self.cv_library):\n # try to load the file\n cap = cv2.VideoCapture(video_path)\n while True:\n _, frame = cap.read()\n if frame is None:\n break\n video.append(self.__convert_bgra2rgba(frame))\n cap.release()\n else:\n Notification(DEEP_NOTIF_FATAL,\n \"The video could not be loaded because OpenCV is not selected as the Computer Vision library\")\n return video\n\n def __throw_warning_video(self):\n \"\"\"\n AUTHORS:\n --------\n\n :author: Alix Leroy\n\n DESCRIPTION:\n ------------\n\n Warn the user of the unstable video mode.\n\n PARAMETERS:\n -----------\n\n None\n\n RETURN:\n -------\n\n :return: None\n \"\"\"\n if self.warning_video is None:\n Notification(DEEP_NOTIF_WARNING, \"The video mode is not fully supported. \"\n \"We deeply suggest you to use sequences of images.\")\n self.warning_video = 1\n\n def set_cv_library(self, cv_library: Flag) -> None:\n \"\"\"\n AUTHORS:\n --------\n\n :author: Samuel Westlake\n :author: Alix Leroy\n\n DESCRIPTION:\n ------------\n\n Set self.cv_library to the given value and import the corresponding cv library\n\n PARAMETERS:\n -----------\n\n :param cv_library: (Flag): The flag of the computer vision library selected\n\n RETURN:\n -------\n\n None\n \"\"\"\n # Set the cv_library argument to the corresponding Flag\n self.cv_library = get_corresponding_flag(flag_list=DEEP_LIST_CV_LIB, info=cv_library)\n\n # Import globally the required CV library\n self.__import_cv_library(cv_library=cv_library)\n\n @staticmethod\n def __import_cv_library(cv_library : Flag) -> None:\n \"\"\"\n AUTHORS:\n --------\n\n :author: Samuel Westlake\n :author: Alix Leroy\n\n DESCRIPTION:\n ------------\n\n Imports either cv2 or PIL.Image dependant on the value of cv_library\n\n PARAMETERS:\n -----------\n\n None\n\n RETURN:\n -------\n\n None\n \"\"\"\n if DEEP_LIB_OPENCV.corresponds(info=cv_library):\n try:\n global cv2\n import cv2\n except ImportError as e:\n Notification(DEEP_NOTIF_ERROR, str(e))\n elif DEEP_LIB_PIL.corresponds(info=cv_library):\n try:\n global Image\n from PIL import Image\n except ImportError as e:\n Notification(DEEP_NOTIF_ERROR, str(e))\n else:\n Notification(DEEP_NOTIF_ERROR, DEEP_MSG_CV_LIBRARY_NOT_IMPLEMENTED % cv_library)" ]
[ [ "numpy.load" ] ]
fernandezdaniel/Spearmint
[ "3c9e0a4be6108c3d652606bd957f0c9ae1bfaf84" ]
[ "spearmint/kernels/matern.py" ]
[ "# -*- coding: utf-8 -*-\n# Spearmint\n#\n# Academic and Non-Commercial Research Use Software License and Terms\n# of Use\n#\n# Spearmint is a software package to perform Bayesian optimization\n# according to specific algorithms (the “Software”). The Software is\n# designed to automatically run experiments (thus the code name\n# 'spearmint') in a manner that iteratively adjusts a number of\n# parameters so as to minimize some objective in as few runs as\n# possible.\n#\n# The Software was developed by Ryan P. Adams, Michael Gelbart, and\n# Jasper Snoek at Harvard University, Kevin Swersky at the\n# University of Toronto (“Toronto”), and Hugo Larochelle at the\n# Université de Sherbrooke (“Sherbrooke”), which assigned its rights\n# in the Software to Socpra Sciences et Génie\n# S.E.C. (“Socpra”). Pursuant to an inter-institutional agreement\n# between the parties, it is distributed for free academic and\n# non-commercial research use by the President and Fellows of Harvard\n# College (“Harvard”).\n#\n# Using the Software indicates your agreement to be bound by the terms\n# of this Software Use Agreement (“Agreement”). Absent your agreement\n# to the terms below, you (the “End User”) have no rights to hold or\n# use the Software whatsoever.\n#\n# Harvard agrees to grant hereunder the limited non-exclusive license\n# to End User for the use of the Software in the performance of End\n# User’s internal, non-commercial research and academic use at End\n# User’s academic or not-for-profit research institution\n# (“Institution”) on the following terms and conditions:\n#\n# 1. NO REDISTRIBUTION. The Software remains the property Harvard,\n# Toronto and Socpra, and except as set forth in Section 4, End User\n# shall not publish, distribute, or otherwise transfer or make\n# available the Software to any other party.\n#\n# 2. NO COMMERCIAL USE. End User shall not use the Software for\n# commercial purposes and any such use of the Software is expressly\n# prohibited. This includes, but is not limited to, use of the\n# Software in fee-for-service arrangements, core facilities or\n# laboratories or to provide research services to (or in collaboration\n# with) third parties for a fee, and in industry-sponsored\n# collaborative research projects where any commercial rights are\n# granted to the sponsor. If End User wishes to use the Software for\n# commercial purposes or for any other restricted purpose, End User\n# must execute a separate license agreement with Harvard.\n#\n# Requests for use of the Software for commercial purposes, please\n# contact:\n#\n# Office of Technology Development\n# Harvard University\n# Smith Campus Center, Suite 727E\n# 1350 Massachusetts Avenue\n# Cambridge, MA 02138 USA\n# Telephone: (617) 495-3067\n# Facsimile: (617) 495-9568\n# E-mail: [email protected]\n#\n# 3. OWNERSHIP AND COPYRIGHT NOTICE. Harvard, Toronto and Socpra own\n# all intellectual property in the Software. End User shall gain no\n# ownership to the Software. End User shall not remove or delete and\n# shall retain in the Software, in any modifications to Software and\n# in any Derivative Works, the copyright, trademark, or other notices\n# pertaining to Software as provided with the Software.\n#\n# 4. DERIVATIVE WORKS. End User may create and use Derivative Works,\n# as such term is defined under U.S. copyright laws, provided that any\n# such Derivative Works shall be restricted to non-commercial,\n# internal research and academic use at End User’s Institution. End\n# User may distribute Derivative Works to other Institutions solely\n# for the performance of non-commercial, internal research and\n# academic use on terms substantially similar to this License and\n# Terms of Use.\n#\n# 5. FEEDBACK. In order to improve the Software, comments from End\n# Users may be useful. End User agrees to provide Harvard with\n# feedback on the End User’s use of the Software (e.g., any bugs in\n# the Software, the user experience, etc.). Harvard is permitted to\n# use such information provided by End User in making changes and\n# improvements to the Software without compensation or an accounting\n# to End User.\n#\n# 6. NON ASSERT. End User acknowledges that Harvard, Toronto and/or\n# Sherbrooke or Socpra may develop modifications to the Software that\n# may be based on the feedback provided by End User under Section 5\n# above. Harvard, Toronto and Sherbrooke/Socpra shall not be\n# restricted in any way by End User regarding their use of such\n# information. End User acknowledges the right of Harvard, Toronto\n# and Sherbrooke/Socpra to prepare, publish, display, reproduce,\n# transmit and or use modifications to the Software that may be\n# substantially similar or functionally equivalent to End User’s\n# modifications and/or improvements if any. In the event that End\n# User obtains patent protection for any modification or improvement\n# to Software, End User agrees not to allege or enjoin infringement of\n# End User’s patent against Harvard, Toronto or Sherbrooke or Socpra,\n# or any of the researchers, medical or research staff, officers,\n# directors and employees of those institutions.\n#\n# 7. PUBLICATION & ATTRIBUTION. End User has the right to publish,\n# present, or share results from the use of the Software. In\n# accordance with customary academic practice, End User will\n# acknowledge Harvard, Toronto and Sherbrooke/Socpra as the providers\n# of the Software and may cite the relevant reference(s) from the\n# following list of publications:\n#\n# Practical Bayesian Optimization of Machine Learning Algorithms\n# Jasper Snoek, Hugo Larochelle and Ryan Prescott Adams\n# Neural Information Processing Systems, 2012\n#\n# Multi-Task Bayesian Optimization\n# Kevin Swersky, Jasper Snoek and Ryan Prescott Adams\n# Advances in Neural Information Processing Systems, 2013\n#\n# Input Warping for Bayesian Optimization of Non-stationary Functions\n# Jasper Snoek, Kevin Swersky, Richard Zemel and Ryan Prescott Adams\n# Preprint, arXiv:1402.0929, http://arxiv.org/abs/1402.0929, 2013\n#\n# Bayesian Optimization and Semiparametric Models with Applications to\n# Assistive Technology Jasper Snoek, PhD Thesis, University of\n# Toronto, 2013\n#\n# 8. NO WARRANTIES. THE SOFTWARE IS PROVIDED \"AS IS.\" TO THE FULLEST\n# EXTENT PERMITTED BY LAW, HARVARD, TORONTO AND SHERBROOKE AND SOCPRA\n# HEREBY DISCLAIM ALL WARRANTIES OF ANY KIND (EXPRESS, IMPLIED OR\n# OTHERWISE) REGARDING THE SOFTWARE, INCLUDING BUT NOT LIMITED TO ANY\n# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE, OWNERSHIP, AND NON-INFRINGEMENT. HARVARD, TORONTO AND\n# SHERBROOKE AND SOCPRA MAKE NO WARRANTY ABOUT THE ACCURACY,\n# RELIABILITY, COMPLETENESS, TIMELINESS, SUFFICIENCY OR QUALITY OF THE\n# SOFTWARE. HARVARD, TORONTO AND SHERBROOKE AND SOCPRA DO NOT WARRANT\n# THAT THE SOFTWARE WILL OPERATE WITHOUT ERROR OR INTERRUPTION.\n#\n# 9. LIMITATIONS OF LIABILITY AND REMEDIES. USE OF THE SOFTWARE IS AT\n# END USER’S OWN RISK. IF END USER IS DISSATISFIED WITH THE SOFTWARE,\n# ITS EXCLUSIVE REMEDY IS TO STOP USING IT. IN NO EVENT SHALL\n# HARVARD, TORONTO OR SHERBROOKE OR SOCPRA BE LIABLE TO END USER OR\n# ITS INSTITUTION, IN CONTRACT, TORT OR OTHERWISE, FOR ANY DIRECT,\n# INDIRECT, SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR OTHER\n# DAMAGES OF ANY KIND WHATSOEVER ARISING OUT OF OR IN CONNECTION WITH\n# THE SOFTWARE, EVEN IF HARVARD, TORONTO OR SHERBROOKE OR SOCPRA IS\n# NEGLIGENT OR OTHERWISE AT FAULT, AND REGARDLESS OF WHETHER HARVARD,\n# TORONTO OR SHERBROOKE OR SOCPRA IS ADVISED OF THE POSSIBILITY OF\n# SUCH DAMAGES.\n#\n# 10. INDEMNIFICATION. To the extent permitted by law, End User shall\n# indemnify, defend and hold harmless Harvard, Toronto and Sherbrooke\n# and Socpra, their corporate affiliates, current or future directors,\n# trustees, officers, faculty, medical and professional staff,\n# employees, students and agents and their respective successors,\n# heirs and assigns (the \"Indemnitees\"), against any liability,\n# damage, loss or expense (including reasonable attorney's fees and\n# expenses of litigation) incurred by or imposed upon the Indemnitees\n# or any one of them in connection with any claims, suits, actions,\n# demands or judgments arising from End User’s breach of this\n# Agreement or its Institution’s use of the Software except to the\n# extent caused by the gross negligence or willful misconduct of\n# Harvard, Toronto or Sherbrooke or Socpra. This indemnification\n# provision shall survive expiration or termination of this Agreement.\n#\n# 11. GOVERNING LAW. This Agreement shall be construed and governed by\n# the laws of the Commonwealth of Massachusetts regardless of\n# otherwise applicable choice of law standards.\n#\n# 12. NON-USE OF NAME. Nothing in this License and Terms of Use shall\n# be construed as granting End Users or their Institutions any rights\n# or licenses to use any trademarks, service marks or logos associated\n# with the Software. You may not use the terms “Harvard” or\n# “University of Toronto” or “Université de Sherbrooke” or “Socpra\n# Sciences et Génie S.E.C.” (or a substantially similar term) in any\n# way that is inconsistent with the permitted uses described\n# herein. You agree not to use any name or emblem of Harvard, Toronto\n# or Sherbrooke, or any of their subdivisions for any purpose, or to\n# falsely suggest any relationship between End User (or its\n# Institution) and Harvard, Toronto and/or Sherbrooke, or in any\n# manner that would infringe or violate any of their rights.\n#\n# 13. End User represents and warrants that it has the legal authority\n# to enter into this License and Terms of Use on behalf of itself and\n# its Institution.\n\n\nimport numpy as np\nimport kernel_utils\n\nfrom .abstract_kernel import AbstractKernel\nfrom ..utils import priors\nfrom ..utils.param import Param as Hyperparameter\n\nSQRT_3 = np.sqrt(3.0)\nSQRT_5 = np.sqrt(5.0)\n\n\nclass Matern52(AbstractKernel):\n def __init__(self, num_dims, value=None, name='Matern52', prior=None):\n self.name = name\n self.num_dims = num_dims\n\n self.ls = Hyperparameter(\n initial_value = np.ones(self.num_dims) if value is None else value,\n prior = priors.Tophat(0.0, 10.0) if prior is None else prior,\n name = 'ls'\n )\n\n assert self.ls.value.shape[0] == self.num_dims\n\n @property\n def hypers(self):\n return self.ls\n\n def cov(self, inputs):\n return self.cross_cov(inputs, inputs)\n\n def diag_cov(self, inputs):\n return np.ones(inputs.shape[0])\n\n def cross_cov(self, inputs_1, inputs_2):\n r2 = np.abs(kernel_utils.dist2(self.ls.value, inputs_1, inputs_2))\n r = np.sqrt(r2)\n cov = (1.0 + SQRT_5*r + (5.0/3.0)*r2) * np.exp(-SQRT_5*r)\n\n return cov\n\n def cross_cov_grad_data(self, inputs_1, inputs_2):\n # NOTE: This is the gradient wrt the inputs of inputs_2\n # The gradient wrt the inputs of inputs_1 is -1 times this\n # This is sloppily coded -- the gradient that comes from kernel_utils is w.r.t. inputs_1\n # but a minus sign is dropped to make it w.r.t. inputs_2\n # oh well...\n r2 = np.abs(kernel_utils.dist2(self.ls.value, inputs_1, inputs_2))\n r = np.sqrt(r2)\n grad_r2 = (5.0/6.0)*np.exp(-SQRT_5*r)*(1 + SQRT_5*r)\n\n return grad_r2[:,:,np.newaxis] * kernel_utils.grad_dist2(self.ls.value, inputs_1, inputs_2)\n\n" ]
[ [ "numpy.sqrt", "numpy.ones", "numpy.exp" ] ]
iamabhishek0/sympy
[ "c461bd1ff9d178d1012b04fd0bf37ee3abb02cdd" ]
[ "sympy/parsing/autolev/test-examples/ruletest12.py" ]
[ "import sympy.physics.mechanics as me\nimport sympy as sm\nimport math as m\nimport numpy as np\n\nx, y = me.dynamicsymbols('x y')\na, b, r = sm.symbols('a b r', real=True)\neqn = sm.Matrix([[0]])\neqn[0] = a*x**3+b*y**2-r\neqn = eqn.row_insert(eqn.shape[0], sm.Matrix([[0]]))\neqn[eqn.shape[0]-1] = a*sm.sin(x)**2+b*sm.cos(2*y)-r**2\nmatrix_list = []\nfor i in eqn:matrix_list.append(i.subs({a:2.0, b:3.0, r:1.0}))\nprint(sm.nsolve(matrix_list,(x,y),(np.deg2rad(30),3.14)))\n" ]
[ [ "numpy.deg2rad" ] ]
ACWI-SSWD/nldi_xstool
[ "f201befc6454202042d2ed76e82c3c07edcf4c48" ]
[ "nldi_xstool/__openChannel.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 5 16:26:25 2015\n\n@author: mweier\n\"\"\"\n\nimport numpy as np\nfrom numba import jit\n\n\n@jit\ndef channelBuilder(wsDepth, rightSS, leftSS, widthBottom):\n \"\"\"\n Builds trapziodal channel station/elevation array given depth,\n right side slope, left side slope, and bottom width\n \"\"\"\n leftToe = wsDepth*1.25*leftSS\n rightToe = wsDepth*1.25*rightSS\n staElev = np.array([(0.0, wsDepth*1.25),\n (leftToe, 0.0),\n (leftToe + widthBottom, 0.0),\n (leftToe+widthBottom+rightToe, wsDepth*1.25)])\n return staElev\n\n\ndef lineIntersection(line1, line2):\n xdiff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])\n ydiff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1])\n\n def det(a, b):\n return a[0] * b[1] - a[1] * b[0]\n\n div = det(xdiff, ydiff)\n if div == 0:\n x = y = np.nan\n# print 'lines do not intersect'\n return x, y\n\n d = (det(*line1), det(*line2))\n x = det(d, xdiff) / div\n y = det(d, ydiff) / div\n return x, y\n\n\n@jit\ndef polygonArea(corners):\n area = 0.0\n for i in range(len(corners)):\n j = (i + 1) % len(corners)\n area += corners[i][0] * corners[j][1]\n area -= corners[j][0] * corners[i][1]\n area = abs(area) / 2.0\n return area\n\n\n@jit\ndef channelPerimeter(corners):\n P = 0.0\n for i in range(len(corners)-1):\n P += np.sqrt((np.power((corners[i+1][0]-corners[i][0]), 2) +\n np.power((corners[i+1][1]-corners[i][1]), 2)))\n return P\n\n\ndef flowEst(wsElev, n, slope, staElev, units):\n \"\"\"\n Estimates uniform flow using the Manning equation for\n a user defined trapziodal channel or a manually defined channel using\n a station/elevation file\n \"\"\"\n\n if units == \"m\":\n const = 1.0\n else:\n const = 1.49\n\n intersectList = []\n for i in range(0, len(staElev)):\n x, y = lineIntersection(\n (staElev[i-1], staElev[i]),\n ([staElev[0][0], wsElev], [staElev[-1][0], wsElev]))\n if x >= staElev[i-1][0] and x <= staElev[i][0] and abs(y - wsElev) < 0.01:\n # print (x,y)\n intersectList.append((x, y))\n else:\n # print ('line segments do not intersect')\n pass\n\n try:\n intersectArray = np.array(intersectList)\n intersectArray = intersectArray[intersectArray[:, 0].argsort()]\n # print 'more than two points intersect'\n staMinElev = staElev[np.where(\n staElev[:, 1] == min(staElev[:, 1]))][0][0]\n startPoint = intersectArray[np.where(\n intersectArray[:, 0] < staMinElev)][-1]\n endPoint = intersectArray[np.where(\n intersectArray[:, 0] > staMinElev)][0]\n intersectArray = np.vstack([startPoint, endPoint])\n except Exception as e:\n print(e)\n return 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0\n\n staMin = np.min(intersectArray[:, 0])\n staMax = np.max(intersectArray[:, 0])\n\n thalweig = staElev[np.where(staElev[:, 1] == np.min(staElev[:, 1]))]\n\n minElev = thalweig[:, 1][0]\n maxDepth = wsElev-minElev\n\n if len(intersectArray) < 2:\n return 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0\n\n staElevTrim = np.vstack([intersectArray[0], staElev, intersectArray[1]])\n # staElevTrim = staElevTrim[staElevTrim[:,0].argsort()]\n staElevTrim = staElevTrim[np.where(\n (staElevTrim[:, 0] >= staMin) & (staElevTrim[:, 0] <= staMax))]\n\n area = polygonArea(staElevTrim)\n R = area/channelPerimeter(staElevTrim)\n v = (const/n)*np.power(R, (2./3.0))*np.sqrt(slope)\n Q = v*area\n topWidth = staMax-staMin\n xGround = staElev[:, 0]\n yGround = staElev[:, 1]\n yGround0 = np.ones(len(xGround))*np.min(yGround)\n xWater = staElevTrim[:, 0]\n yWater = np.ones(len(xWater))*wsElev\n yWater0 = staElevTrim[:, 1]\n args = R, area, topWidth, Q, v, maxDepth, xGround, yGround, yGround0, xWater, yWater, yWater0\n return args\n" ]
[ [ "numpy.sqrt", "numpy.vstack", "numpy.max", "numpy.power", "numpy.min", "numpy.array", "numpy.where" ] ]
whq-hqw/detr_change
[ "142f75cc5e0b59ca6e07928ddcbed3e461816611" ]
[ "models/matcher.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\"\"\"\nModules to compute the matching cost and solve the corresponding LSAP.\n\"\"\"\nimport torch\nfrom scipy.optimize import linear_sum_assignment\nfrom torch import nn\n\nfrom util.box_ops import box_cxcywh_to_xyxy, generalized_box_iou\n\n\nclass HungarianMatcher(nn.Module):\n \"\"\"This class computes an assignment between the targets and the predictions of the network\n\n For efficiency reasons, the targets don't include the no_object. Because of this, in general,\n there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,\n while the others are un-matched (and thus treated as non-objects).\n \"\"\"\n\n def __init__(self, cost_class: float = 1, cost_bbox: float = 1, cost_giou: float = 1):\n \"\"\"Creates the matcher\n\n Params:\n cost_class: This is the relative weight of the classification error in the matching cost\n cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost\n cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost\n \"\"\"\n super().__init__()\n self.cost_class = cost_class\n self.cost_bbox = cost_bbox\n self.cost_giou = cost_giou\n assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, \"all costs cant be 0\"\n\n @torch.no_grad()\n def forward(self, outputs, targets):\n \"\"\" Performs the matching\n\n Params:\n outputs: This is a dict that contains at least these entries:\n \"pred_logits\": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits\n \"pred_boxes\": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates\n\n targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:\n \"labels\": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth\n objects in the target) containing the class labels\n \"boxes\": Tensor of dim [num_target_boxes, 4] containing the target box coordinates\n\n Returns:\n A list of size batch_size, containing tuples of (index_i, index_j) where:\n - index_i is the indices of the selected predictions (in order)\n - index_j is the indices of the corresponding selected targets (in order)\n For each batch element, it holds:\n len(index_i) = len(index_j) = min(num_queries, num_target_boxes)\n \"\"\"\n bs, num_queries = outputs[\"pred_logits\"].shape[:2]\n\n # We flatten to compute the cost matrices in a batch\n out_prob = outputs[\"pred_logits\"].flatten(0, 1).softmax(-1) # [batch_size * num_queries, num_classes]\n out_bbox = outputs[\"pred_boxes\"].flatten(0, 1) # [batch_size * num_queries, 4]\n\n # Also concat the target labels and boxes\n tgt_ids = torch.cat([v[\"labels\"] for v in targets])\n tgt_bbox = torch.cat([v[\"boxes\"] for v in targets])\n\n # Compute the classification cost. Contrary to the loss, we don't use the NLL,\n # but approximate it in 1 - proba[target class].\n # The 1 is a constant that doesn't change the matching, it can be ommitted.\n # 在out_prob中,dim1中每一维都代表了对每个该bbox的所属类别的概率,由于有92个类,所以有92个数字\n # 由于candidate box远比实际的box数量要多,因此并不知道到底哪个candidate能与gt box进行匹配\n # 所以先获取所有tgt_id,并在out_ptob中取出对应的概率,因为知道在众多candidate中必有一个bbox与某个gt bbox最为匹配\n # 之所以用减号就是想知道与理想概率1的差距,但这里加不加1其实无所谓\n cost_class = -out_prob[:, tgt_ids]\n\n # Compute the L1 cost between boxes\n cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1)\n\n # Compute the giou cost betwen boxes\n cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox))\n\n # Final cost matrix\n C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou\n C = C.view(bs, num_queries, -1).cpu()\n\n sizes = [len(v[\"boxes\"]) for v in targets]\n indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]\n return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]\n\n\ndef build_matcher(args):\n return HungarianMatcher(cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou)\n" ]
[ [ "torch.as_tensor", "torch.no_grad", "torch.cdist", "scipy.optimize.linear_sum_assignment", "torch.cat" ] ]
lupantech/InterGPS
[ "0f326027d16d7d50a9c189f897739dfb95085021" ]
[ "theorem_predict/eval_transformer.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\nimport json\nimport ast\nfrom tqdm import tqdm\n\nimport torch\nfrom transformers import BartForConditionalGeneration, BartTokenizerFast\n\n\ndef evaluate(diagram_logic_file, text_logic_file, tokenizer_name, model_name, check_point, seq_num):\n\n test_lst = range(2401, 3002)\n\n ## read logic form files\n with open(diagram_logic_file) as f:\n diagram_logic_forms = json.load(f)\n with open(text_logic_file) as f:\n text_logic_forms = json.load(f)\n\n combined_logic_forms = {}\n for pid in test_lst:\n combined_logic_forms[pid] = diagram_logic_forms[str(pid)]['diagram_logic_forms'] + \\\n text_logic_forms[str(pid)]['text_logic_forms']\n\n ## build tokenizer and model\n tokenizer = BartTokenizerFast.from_pretrained(tokenizer_name) # 'facebook/bart-base'\n model = BartForConditionalGeneration.from_pretrained(model_name).to(device) # 'facebook/bart-base'\n model.load_state_dict(torch.load(check_point))\n\n final = dict()\n for pid in tqdm(test_lst):\n input = str(combined_logic_forms[pid])\n tmp = tokenizer.encode(input)\n if len(tmp) > 1024:\n tmp = tmp[:1024]\n input = torch.LongTensor(tmp).unsqueeze(0).to(device)\n\n output = model.generate(input, bos_token_id=0, eos_token_id=2,\n max_length=20, num_beams=10, num_return_sequences=seq_num)\n # print(out.size())\n\n ## refine output sequence\n seq = []\n for j in range(seq_num):\n res = tokenizer.decode(output[j].tolist())\n res = res.replace(\"</s>\", \"\").replace(\"<s>\", \"\").replace(\"<pad>\", \"\")\n # print(res)\n try:\n res = ast.literal_eval(res) # string class to list class\n except Exception as e:\n res = []\n seq.append(res)\n\n final[str(pid)] = {\"id\": str(pid), \"num_seqs\": seq_num, \"seq\": seq}\n\n return final\n\n\nif __name__ == '__main__':\n\n diagram_logic_file = '../data/geometry3k/logic_forms/diagram_logic_forms_annot.json'\n text_logic_file = '../data/geometry3k/logic_forms/text_logic_forms_annot_dissolved.json'\n\n check_point = 'models/tp_model_best.pt'\n output_file = 'results/test/pred_seqs_test_bart_best.json'\n\n tokenizer_name = 'facebook/bart-base'\n model_name = 'facebook/bart-base'\n\n SEQ_NUM = 5\n\n device = torch.device('cuda:0')\n\n result = evaluate(diagram_logic_file, text_logic_file, tokenizer_name, model_name, check_point, SEQ_NUM)\n\n with open(output_file, 'w') as f:\n json.dump(result, f)\n\n" ]
[ [ "torch.LongTensor", "torch.device", "torch.load" ] ]
zajaczajac/metaworld
[ "4febbc4f702c3145b73b012b58b111b2c439032a" ]
[ "metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_lever_pull.py" ]
[ "import numpy as np\nfrom gym.spaces import Box\n\nfrom metaworld.envs.env_util import get_asset_full_path\nfrom metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv, _assert_task_is_set\n\n\nclass SawyerLeverPullEnv(SawyerXYZEnv):\n\n def __init__(self):\n\n hand_low = (-0.5, 0.40, -0.15)\n hand_high = (0.5, 1, 0.5)\n obj_low = (-0.1, 0.7, 0.05)\n obj_high = (0.1, 0.8, 0.05)\n\n super().__init__(\n self.model_name,\n hand_low=hand_low,\n hand_high=hand_high,\n )\n\n self.init_config = {\n 'obj_init_pos': np.array([0, 0.7, 0.05]),\n 'hand_init_pos': np.array([0, 0.6, 0.2], dtype=np.float32),\n }\n self.goal = np.array([0, 0.75, -0.12])\n self.obj_init_pos = self.init_config['obj_init_pos']\n self.hand_init_pos = self.init_config['hand_init_pos']\n\n goal_low = self.hand_low\n goal_high = self.hand_high\n\n \n\n self._random_reset_space = Box(\n np.array(obj_low),\n np.array(obj_high),\n )\n self.goal_space = Box(np.array(goal_low), np.array(goal_high))\n\n @property\n def model_name(self):\n return get_asset_full_path('sawyer_xyz/sawyer_lever_pull.xml')\n\n @_assert_task_is_set\n def step(self, action):\n ob = super().step(action)\n reward, reachDist, pullDist = self.compute_reward(action, ob)\n self.curr_path_length += 1\n\n info = {\n 'reachDist': reachDist,\n 'goalDist': pullDist,\n 'epRew': reward,\n 'pickRew': None,\n 'success': float(pullDist <= 0.05)\n }\n\n return ob, reward, False, info\n\n def _get_pos_objects(self):\n return self._get_site_pos('leverStart')\n\n def reset_model(self):\n self._reset_hand()\n self._target_pos = self.goal.copy()\n self.obj_init_pos = self.init_config['obj_init_pos']\n\n if self.random_init:\n goal_pos = self._get_state_rand_vec()\n self.obj_init_pos = goal_pos[:3]\n final_pos = goal_pos.copy()\n final_pos[1] += 0.05\n final_pos[2] -= 0.17\n self._target_pos = final_pos\n\n self.sim.model.body_pos[self.model.body_name2id('lever')] = self.obj_init_pos\n self.maxPullDist = np.linalg.norm(self._target_pos - self.obj_init_pos)\n\n return self._get_obs()\n\n def _reset_hand(self):\n super()._reset_hand(10)\n\n rightFinger, leftFinger = self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector')\n self.init_fingerCOM = (rightFinger + leftFinger)/2\n self.reachCompleted = False\n\n def compute_reward(self, actions, obs):\n del actions\n\n objPos = obs[3:6]\n\n rightFinger, leftFinger = self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector')\n fingerCOM = (rightFinger + leftFinger)/2\n\n pullGoal = self._target_pos\n\n pullDist = np.linalg.norm(objPos - pullGoal)\n reachDist = np.linalg.norm(objPos - fingerCOM)\n reachRew = -reachDist\n\n self.reachCompleted = reachDist < 0.05\n\n def pullReward():\n c1 = 1000\n c2 = 0.01\n c3 = 0.001\n\n if self.reachCompleted:\n pullRew = 1000*(self.maxPullDist - pullDist) + c1*(np.exp(-(pullDist**2)/c2) + np.exp(-(pullDist**2)/c3))\n pullRew = max(pullRew,0)\n return pullRew\n else:\n return 0\n\n pullRew = pullReward()\n reward = reachRew + pullRew\n\n return [reward, reachDist, pullDist]\n" ]
[ [ "numpy.array", "numpy.linalg.norm", "numpy.exp" ] ]
realspacekolle/pyScattData
[ "585376761ef380c1f006bc8a0d23adaed5e9258d" ]
[ "h5_extract_write_plot/h5_extract_write_plot.py" ]
[ "import sys\nfrom pathlib import Path\nimport h5py\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nTWOTHETA_KEYS = [\"2th\", \"2theta\", \"twotheta\"]\nQ_KEYS = [\"q\"]\nINTENSITY_KEYS = [\"i\", \"intensity\", \"int\"]\nSTACK_INDICES_KEY = \"stack_indices\"\n\nDPI = 300\nFIGSIZE = (12,4)\nFONTSIZE_LABELS = 20\nFONTSIZE_TICKS = 14\nLINEWIDTH = 1\nCOLORS = dict(bg_blue='#0B3C5D', bg_red='#B82601', bg_green='#1c6b0a',\n bg_lightblue='#328CC1', bg_darkblue='#062F4F',\n bg_yellow='#D9B310', bg_darkred='#984B43', bg_bordeaux='#76323F',\n bg_olivegreen='#626E60', bg_yellowgrey='#AB987A',\n bg_brownorange='#C09F80')\nCOLOR = COLORS[\"bg_blue\"]\n\n\ndef h5_extract_to_dict(h5_file):\n f = h5py.File(h5_file, mode=\"r\")\n d = {}\n fkeys = list(f.keys())\n if \"entry\" in fkeys:\n fkeys = list(f[\"entry\"].keys())\n for k in fkeys:\n d[k.lower()] = np.array(f[k])\n\n return d\n\n\ndef dict_to_xy_write(d, fname):\n twotheta, q, intensity = None, None, None\n dkeys = d.keys()\n for k in TWOTHETA_KEYS:\n if k in dkeys:\n twotheta = d[k]\n for k in Q_KEYS:\n if k in dkeys:\n q = d[k]\n for k in INTENSITY_KEYS:\n if k in dkeys:\n intensity = d[k]\n if STACK_INDICES_KEY in dkeys:\n stack_indices = d[STACK_INDICES_KEY]\n if isinstance(twotheta, np.ndarray) and isinstance(intensity, np.ndarray):\n if intensity.ndim > 1:\n zfill = len(str(intensity.shape[0]))\n scans_index = intensity.shape[0]\n else:\n scans_index = 1\n for i in range(scans_index):\n if STACK_INDICES_KEY in dkeys:\n print(f\"\\t\\t\\t{stack_indices[i]}\")\n else:\n print(f\"\\t\\t\\t{i}\")\n if intensity.ndim > 1:\n x, y = twotheta, intensity[i,:]\n else:\n x, y = twotheta, intensity\n xy = np.column_stack((x,y))\n h = \"2theta\\tintensity\"\n if STACK_INDICES_KEY in dkeys:\n np.savetxt(f\"xy/{fname}_{stack_indices[i]}.xy\", xy,\n encoding=\"utf-8\", header=h)\n else:\n np.savetxt(f\"xy/{fname}_{str(i).zfill(zfill)}.xy\", xy,\n encoding=\"utf-8\", header=h)\n elif isinstance(q, np.ndarray) and isinstance(intensity, np.ndarray):\n if intensity.ndim > 1:\n zfill = len(str(intensity.shape[0]))\n scans_index = intensity.shape[0]\n else:\n scans_index = 1\n for i in range(scans_index):\n if STACK_INDICES_KEY in dkeys:\n print(f\"\\t\\t\\t{stack_indices[i]}\")\n else:\n print(f\"\\t\\t\\t{i}\")\n if intensity.ndim > 1:\n x, y = q, intensity[i,:]\n else:\n x, y = q, intensity\n xy = np.column_stack((x,y))\n h = \"q\\tintensity\"\n if STACK_INDICES_KEY in dkeys:\n np.savetxt(f\"xy/{fname}_{stack_indices[i]}.xy\", xy,\n encoding=\"utf-8\", header=h)\n else:\n np.savetxt(f\"xy/{fname}_{str(i).zfill(zfill)}.xy\", xy,\n encoding=\"utf-8\", header=h)\n\n return None\n\n\ndef dict_to_plot(d, fname):\n twotheta, q, intensity = None, None, None\n dkeys = d.keys()\n for k in TWOTHETA_KEYS:\n if k in dkeys:\n twotheta = d[k]\n for k in Q_KEYS:\n if k in dkeys:\n q = d[k]\n for k in INTENSITY_KEYS:\n if k in dkeys:\n intensity = d[k]\n if STACK_INDICES_KEY in dkeys:\n stack_indices = d[STACK_INDICES_KEY]\n if isinstance(twotheta, np.ndarray) and isinstance(intensity, np.ndarray):\n if intensity.ndim > 1:\n zfill = len(str(intensity.shape[0]))\n scans_index = intensity.shape[0]\n else:\n scans_index = 1\n for i in range(scans_index):\n if STACK_INDICES_KEY in dkeys:\n print(f\"\\t\\t\\t{stack_indices[i]}\")\n else:\n print(f\"\\t\\t\\t{i}\")\n if intensity.ndim > 1:\n x, y = twotheta, intensity[i,:]\n else:\n x, y = twotheta, intensity\n plt.figure(dpi=DPI, figsize=FIGSIZE)\n plt.plot(x, y, c=COLOR, lw=LINEWIDTH)\n plt.xlim(np.amin(x), np.amax(x))\n plt.xlabel(r\"$2\\theta$ $[\\degree]$\", fontsize=FONTSIZE_LABELS)\n plt.ylabel(r\"$I$ $[\\mathrm{arb. u.}]$\", fontsize=FONTSIZE_LABELS)\n plt.tick_params(axis='both', which='major',\n labelsize=FONTSIZE_LABELS)\n plt.ticklabel_format(axis='y', style='sci', scilimits=(0,0))\n if STACK_INDICES_KEY in dkeys:\n plt.savefig(f\"png/{fname}_{stack_indices[i]}.png\",\n bbox_inches=\"tight\")\n plt.savefig(f\"pdf/{fname}_{stack_indices[i]}.pdf\",\n bbox_inches=\"tight\")\n else:\n plt.savefig(f\"png/{fname}_{str(i).zfill(zfill)}.png\",\n bbox_inches=\"tight\")\n plt.savefig(f\"pdf/{fname}_{str(i).zfill(zfill)}.pdf\",\n bbox_inches=\"tight\")\n plt.close()\n if isinstance(q, np.ndarray) and isinstance(intensity, np.ndarray):\n if intensity.ndim > 1:\n zfill = len(str(intensity.shape[0]))\n scans_index = intensity.shape[0]\n else:\n scans_index = 1\n for i in range(scans_index):\n if STACK_INDICES_KEY in dkeys:\n print(f\"\\t\\t\\t{stack_indices[i]}\")\n else:\n print(f\"\\t\\t\\t{i}\")\n if intensity.ndim > 1:\n x, y = q, intensity[i,:]\n else:\n x, y = q, intensity\n plt.figure(dpi=DPI, figsize=FIGSIZE)\n plt.plot(x, y, c=COLOR, lw=LINEWIDTH)\n plt.xlim(np.amin(x), np.amax(x))\n if np.amax(q) > 40 :\n plt.xlabel(r\"$Q$ $[\\mathrm{nm}^{-1}]$\",\n fontsize=FONTSIZE_LABELS)\n else:\n plt.xlabel(r\"$Q$ $[\\mathrm{\\AA}^{-1}]$\",\n fontsize=FONTSIZE_LABELS)\n plt.ylabel(r\"$I$ $[\\mathrm{arb. u.}]$\", fontsize=FONTSIZE_LABELS)\n plt.tick_params(axis='both', which='major',\n labelsize=FONTSIZE_LABELS)\n plt.ticklabel_format(axis='y', style='sci', scilimits=(0,0))\n if STACK_INDICES_KEY in dkeys:\n plt.savefig(f\"png/{fname}_{stack_indices[i]}.png\",\n bbox_inches=\"tight\")\n plt.savefig(f\"pdf/{fname}_{stack_indices[i]}.pdf\",\n bbox_inches=\"tight\")\n else:\n plt.savefig(f\"png/{fname}_{str(i).zfill(zfill)}.png\",\n bbox_inches=\"tight\")\n plt.savefig(f\"pdf/{fname}_{str(i).zfill(zfill)}.pdf\",\n bbox_inches=\"tight\")\n plt.close()\n\n return None\n\n\ndef merge_dict(d):\n twotheta, q, intensity = None, None, None\n dkeys = d.keys()\n d_merged = {}\n for k in TWOTHETA_KEYS:\n if k in dkeys:\n twotheta = d[k]\n d_merged[k] = twotheta\n for k in Q_KEYS:\n if k in dkeys:\n q = d[k]\n d_merged[k] = q\n for k in INTENSITY_KEYS:\n if k in dkeys:\n intensity = d[k]\n intensity_key = k\n if isinstance(intensity, np.ndarray):\n zfill = len(str(intensity.shape[0]))\n number_of_scans = intensity.shape[0]\n scans_to_stack = int(input(\"\\t\\t\\tHow many scans should be stacked \"\n \"together?: \"))\n full_stacks = number_of_scans // scans_to_stack\n remainder_to_stack = number_of_scans % scans_to_stack\n stack_indices = []\n for i in range(full_stacks):\n stack = intensity[i*scans_to_stack, :]\n stack_indices_str = str(i*scans_to_stack).zfill(zfill)\n for j in range(1, scans_to_stack):\n stack += intensity[i*scans_to_stack+j, :]\n stack_indices.append(f\"{stack_indices_str}-\"\n f\"{str(i*scans_to_stack+j).zfill(zfill)}\")\n if i == 0:\n d_merged[intensity_key] = stack\n else:\n d_merged[intensity_key] = np.vstack((d_merged[intensity_key],\n stack))\n if remainder_to_stack != 0:\n stack = intensity[(full_stacks * scans_to_stack),:]\n stack_indices_str = str(full_stacks * scans_to_stack).zfill(zfill)\n for j in range(1, remainder_to_stack-1):\n stack = intensity[(full_stacks * scans_to_stack) + 1 + j,:]\n if remainder_to_stack == 1:\n stack_indices.append(f\"{stack_indices_str}\")\n else:\n last_scan = str((full_stacks*scans_to_stack)+1+j).zfill(zfill)\n stack_indices.append(f\"{stack_indices_str}-{last_scan}\")\n d_merged[intensity_key] = np.vstack((d_merged[intensity_key],\n stack))\n d_merged[STACK_INDICES_KEY] = stack_indices\n\n return d_merged\n\n\ndef main():\n h5_path = Path.cwd() / \"h5\"\n if not h5_path.exists():\n h5_path.mkdir()\n print(f\"{80*'-'}\\nA folder called 'h5' has been created. Please \"\n f\"place your .h5 files there and\\nrerun the code.\\n{80*'-'}\")\n sys.exit()\n h5_files = list(h5_path.glob(\"*.h5\"))\n if len(h5_files) == 0:\n print(f\"{80*'-'}\\nNo .h5 files were found in the 'h5' folder. Please \"\n f\"place your .h5 files there\\nand rerun the code.\\n{80*'-'}\")\n sys.exit()\n output_paths = [\"xy\", \"png\", \"pdf\"]\n for e in output_paths:\n p = Path.cwd() / e\n if not p.exists():\n p.mkdir()\n print(\"Working w. files...\")\n for h5_file in h5_files:\n try:\n print(f\"{80*'-'}\\n\\tFile: {h5_file.name}\")\n fname = h5_file.stem\n d = h5_extract_to_dict(h5_file)\n for k in INTENSITY_KEYS:\n if k in d.keys():\n print(f\"\\t\\tNumber of scans: {d[k].shape[0]}\")\n mergereq = input(\"\\t\\tDo you want to merge any of the scans? \"\n \"(y/n): \")\n while mergereq not in [\"y\", \"n\"]:\n mergereq = input(\"\\t\\tDo you want to merge any of the scans? \"\n \"(y/n): \")\n if mergereq == \"y\":\n writereq = input(\"\\t\\tDo you want to write .xy files for all \"\n \"merged scans? (y/n): \")\n while writereq not in [\"y\", \"n\"]:\n writereq = input(\"\\t\\tDo you want to write .xy files for \"\n \"all merged scans? (y/n): \")\n else:\n writereq = input(\"\\t\\tDo you want to write .xy files for all \"\n \"scans? (y/n): \")\n while writereq not in [\"y\", \"n\"]:\n writereq = input(\"\\t\\tDo you want to write .xy files for \"\n \"merged scans? (y/n): \")\n if mergereq == \"y\":\n plotreq = input(\"\\t\\tDo you want to plot all merged scans? \"\n \"(y/n): \")\n while plotreq not in [\"y\", \"n\"]:\n plotreq = input(\"\\t\\tDo you want to plot all merged scans? \"\n \"(y/n): \")\n else:\n plotreq = input(\"\\t\\tDo you want to plot all scans? (y/n): \")\n while plotreq not in [\"y\", \"n\"]:\n plotreq = input(\"\\t\\tDo you want to plot all scans? \"\n \"(y/n): \")\n if mergereq.lower() == \"y\":\n d_merged = merge_dict(d)\n if writereq == \"y\":\n print(\"\\t\\tWriting to two-column files of merged scans...\")\n dict_to_xy_write(d_merged, fname)\n print(\"\\t\\tPlotting merged scans...\")\n if plotreq == \"y\":\n dict_to_plot(d_merged, fname)\n else:\n if writereq == \"y\":\n print(\"\\t\\tWriting to two-column files for each scan...\")\n dict_to_xy_write(d, fname)\n if plotreq == \"y\":\n print(\"\\t\\tPlotting each scan...\")\n dict_to_plot(d, fname)\n except KeyError:\n print(f\"\\t\\tThis file seems to contain non-integrated data. File \"\n \"skipped.\")\n print(f\"{80*'-'}\\nDone working w. files.\\n{80*'-'}\")\n\n return None\n\n\nif __name__ == \"__main__\":\n main()\n\n# End of file.\n" ]
[ [ "numpy.vstack", "matplotlib.pyplot.ticklabel_format", "matplotlib.pyplot.tick_params", "matplotlib.pyplot.figure", "numpy.savetxt", "matplotlib.pyplot.savefig", "numpy.column_stack", "numpy.amax", "numpy.amin", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.close", "numpy.array", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel" ] ]
ilopata1/matplotlib-scalebar
[ "7e0d7b668021f01501b47b6eeecc1e8808d81c29" ]
[ "doc/example_dimension.py" ]
[ "import matplotlib.pyplot as plt\nfrom matplotlib_scalebar.scalebar import ScaleBar\nfrom matplotlib_scalebar.dimension import _Dimension, _PREFIXES_FACTORS, _LATEX_MU\n\n\nclass TimeDimension(_Dimension):\n def __init__(self):\n super().__init__(\"s\")\n for prefix, factor in _PREFIXES_FACTORS.items():\n latexrepr = None\n if prefix == \"\\u00b5\" or prefix == \"u\":\n latexrepr = _LATEX_MU + \"s\"\n self.add_units(prefix + \"s\", factor, latexrepr)\n\n\nplt.figure()\nplt.gca().add_artist(\n ScaleBar(5, units=\"ms\", dimension=TimeDimension(), location=\"lower right\")\n)\n\nplt.savefig(\"example_dimension.png\")\n" ]
[ [ "matplotlib.pyplot.figure", "matplotlib.pyplot.gca", "matplotlib.pyplot.savefig" ] ]
whigg/BeringSeaIce2018
[ "b5404cfbb51cb6f893be78d53b94de8092b25a7b" ]
[ "Scripts/BeringSeaIce_NSIDC_Feb.py" ]
[ "\"\"\"\nScript calculates sea ice extent in the Bering Sea from SIC fields\nNotes\n-----\n Author : Zachary Labe\n Date : 12 March 2018\n\"\"\"\n\n### Import modules\nimport numpy as np\nfrom netCDF4 import Dataset\nimport matplotlib.pyplot as plt\nimport datetime\nimport statsmodels.api as sm\n\n### Define directories\ndirectorydata2 = '/home/zlabe/Documents/Projects/BeringSeaIce2018/Data/'\ndirectoryfigure = '/home/zlabe/Documents/Projects/BeringSeaIce2018/Figures/'\n\n### Define time \nnow = datetime.datetime.now()\ncurrentmn = str(now.month)\ncurrentdy = str(now.day)\ncurrentyr = str(now.year)\ncurrenttime = currentmn + '_' + currentdy + '_' + currentyr\ntitletime = currentmn + '/' + currentdy + '/' + currentyr\nprint('\\n' '----Plotting Bering SIE - %s----' % titletime)\n\n### Define years\nyears = np.arange(1850,2018+1,1)\nyearsat = np.arange(1979,2018+1,1)\n\n### Retrieve data from NSIDC regional extent in Bering Sea\nberingoldf = np.genfromtxt(directorydata2 +'BeringSeaIce_NSIDC_Feb.txt')\nberingf = beringoldf/1e6\n\nberingoldd = np.genfromtxt(directorydata2 +'BeringSeaIce_NSIDC_Dec.txt')\nberingd = beringoldd/1e6\n\nberingoldj = np.genfromtxt(directorydata2 +'BeringSeaIce_NSIDC_Jan.txt')\nberingj = beringoldj/1e6\n\n#beringoldn = np.genfromtxt(directorydata2 +'BeringSeaIce_NSIDC_Nov.txt')\n#beringn = beringoldn/1e6\n\nbering = (beringd + beringj + beringf)/3.\n#bering = (beringn + beringd + beringj + beringf)/4.\n#bering = (beringj + beringf)/2.\n\nprint('Completed: Data read!')\n\n### Calculate loess \nsmoothed = sm.nonparametric.lowess(bering,np.arange(yearsat.shape[0]))\n\n###############################################################################\n###############################################################################\n###############################################################################\n### Plot figures\nplt.rc('text',usetex=True)\nplt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']}) \nplt.rc('savefig',facecolor='black')\nplt.rc('axes',edgecolor='darkgrey')\nplt.rc('xtick',color='white')\nplt.rc('ytick',color='white')\nplt.rc('axes',labelcolor='white')\nplt.rc('axes',facecolor='black')\n\nfig = plt.figure()\nax = plt.subplot(111)\n\n### Adjust axes in time series plots \ndef adjust_spines(ax, spines):\n for loc, spine in ax.spines.items():\n if loc in spines:\n spine.set_position(('outward', 5))\n else:\n spine.set_color('none') \n if 'left' in spines:\n ax.yaxis.set_ticks_position('left')\n else:\n ax.yaxis.set_ticks([])\n\n if 'bottom' in spines:\n ax.xaxis.set_ticks_position('bottom')\n else:\n ax.xaxis.set_ticks([]) \n \nax.tick_params('both',length=5.5,width=2,which='major',color='darkgrey') \nadjust_spines(ax, ['left','bottom']) \nax.spines['top'].set_color('none')\nax.spines['right'].set_color('none') \nax.spines['bottom'].set_linewidth(2)\nax.spines['left'].set_linewidth(2) \n\nplt.plot(yearsat,bering,linewidth=3.5,color='orangered',\n marker='o',markersize=6,\n label=r'\\textbf{NSIDC Sea Ice Index, Version 3}')\nplt.scatter(yearsat[-1],bering[-1],s=45,color='r',zorder=3)\nplt.text(2012.5,0.1823,r'\\textbf{2018}',color='r',fontsize=15)\n\nplt.plot(np.arange(1987,1990,2),np.array([bering[8],bering[10]]),linewidth=1.7,color='orangered',\n label=r'Missing Data',linestyle='--',\n dashes=(1, 0.4))\nxlabels = list(map(str,np.arange(1979,2021,5)))\nplt.xticks(np.arange(1979,2021,5),xlabels,rotation=0,color='darkgrey')\nplt.xlim([1979,2019])\n\nplt.yticks(np.arange(0,2.5,0.1),list(map(str,np.arange(0,2.5,0.1))),\n color='darkgrey')\nplt.ylim([0.1,0.8])\n\nax.yaxis.grid(zorder=1,color='w',alpha=0.35,linewidth=0.5)\n\nplt.title(r'\\textbf{DEC-FEB : \\underline{BERING} SEA ICE}',\n fontsize=26,color='darkgrey') \nplt.ylabel(r'\\textbf{Extent [$\\bf{\\times 10^{6}}$\\ \\textbf{km}$\\bf{^2}$]}',\n fontsize=17,alpha=1,color='darkgrey',rotation=90) \n\nle = plt.legend(shadow=False,fontsize=8,loc='upper center',\n bbox_to_anchor=(0.212, 0.13),fancybox=True,frameon=False,ncol=1)\nfor text in le.get_texts():\n text.set_color('darkgrey') \n\nplt.savefig(directoryfigure + 'Bering_SeaIceExtent_DecJanFeb.png',dpi=600)\n\nprint('Completed: Figure plotted!')\nprint('Completed: Script done!')" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.rc", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "matplotlib.pyplot.xlim", "numpy.arange", "matplotlib.pyplot.title", "matplotlib.pyplot.subplot", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.text", "matplotlib.pyplot.ylim", "numpy.array", "matplotlib.pyplot.plot", "numpy.genfromtxt", "matplotlib.pyplot.scatter" ] ]
mvaz/textacy
[ "760b96a561eb3379b3a211a0353c9bc47127e99c" ]
[ "textacy/corpus.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nA class for working with a collection of spaCy docs. Includes functionality for\neasily adding, getting, and removing documents; saving to / loading their data\nfrom disk; and tracking basic corpus statistics.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport collections\nimport itertools\nimport logging\nimport math\n\nimport numpy as np\nimport spacy\nimport srsly\nfrom cytoolz import itertoolz\nfrom thinc.neural.ops import NumpyOps\n\nfrom . import cache\nfrom . import compat\nfrom . import io as tio\nfrom . import utils\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass Corpus(object):\n \"\"\"\n An ordered collection of :class:`spacy.tokens.Doc`, all of the same language\n and sharing the same :class:`spacy.language.Language` processing pipeline\n and vocabulary, with data held *in-memory*.\n\n Initialize from a language / ``Language`` and (optionally) one or a stream\n of texts or (text, metadata) pairs:\n\n .. code-block:: pycon\n\n >>> ds = textacy.datasets.CapitolWords()\n >>> records = ds.records(limit=50)\n >>> corpus = textacy.Corpus(\"en\", data=records)\n >>> corpus\n Corpus(50 docs, 32175 tokens)\n\n Add or remove documents, with automatic updating of corpus statistics:\n\n .. code-block:: pycon\n\n >>> texts = ds.texts(congress=114, limit=25)\n >>> corpus.add(texts)\n >>> corpus.add(\"If Burton were a member of Congress, here's what he'd say.\")\n >>> corpus\n Corpus(76 docs, 55906 tokens)\n >>> corpus.remove(lambda doc: doc._.meta.get(\"speaker_name\") == \"Rick Santorum\")\n >>> corpus\n Corpus(61 docs, 48567 tokens)\n\n Get subsets of documents matching your particular use case:\n\n .. code-block:: pycon\n\n >>> match_func = lambda doc: doc._.meta.get(\"speaker_name\") == \"Bernie Sanders\"\n >>> for doc in corpus.get(match_func, limit=3):\n ... print(doc._.preview)\n Doc(159 tokens: \"Mr. Speaker, 480,000 Federal employees are work...\")\n Doc(336 tokens: \"Mr. Speaker, I thank the gentleman for yielding...\")\n Doc(177 tokens: \"Mr. Speaker, if we want to understand why in th...\")\n\n Get or remove documents by indexing, too:\n\n .. code-block:: pycon\n\n >>> corpus[0]._.preview\n 'Doc(159 tokens: \"Mr. Speaker, 480,000 Federal employees are work...\")'\n >>> [doc._.preview for doc in corpus[:3]]\n ['Doc(159 tokens: \"Mr. Speaker, 480,000 Federal employees are work...\")',\n 'Doc(219 tokens: \"Mr. Speaker, a relationship, to work and surviv...\")',\n 'Doc(336 tokens: \"Mr. Speaker, I thank the gentleman for yielding...\")']\n >>> del corpus[:5]\n >>> corpus\n Corpus(56 docs, 41573 tokens)\n\n Compute basic corpus statistics:\n\n .. code-block:: pycon\n\n >>> corpus.n_docs, corpus.n_sents, corpus.n_tokens\n (56, 1771, 41573)\n >>> word_counts = corpus.word_counts(as_strings=True)\n >>> sorted(word_counts.items(), key=lambda x: x[1], reverse=True)[:5]\n [('-PRON-', 2553), ('people', 215), ('year', 148), ('Mr.', 139), ('$', 137)]\n >>> word_doc_counts = corpus.word_doc_counts(weighting=\"freq\", as_strings=True)\n >>> sorted(word_doc_counts.items(), key=lambda x: x[1], reverse=True)[:5]\n [('-PRON-', 0.9821428571428571),\n ('Mr.', 0.7678571428571429),\n ('President', 0.5),\n ('people', 0.48214285714285715),\n ('need', 0.44642857142857145)]\n\n Save corpus data to and load from disk:\n\n .. code-block:: pycon\n\n >>> corpus.save(\"~/Desktop/capitol_words_sample.bin.gz\")\n >>> corpus = textacy.Corpus.load(\"en\", \"~/Desktop/capitol_words_sample.bin.gz\")\n >>> corpus\n Corpus(56 docs, 41573 tokens)\n\n Args:\n lang (str or :class:`spacy.language.Language`):\n Language with which spaCy processes (or processed) all documents\n added to the corpus, whether as ``data`` now or later.\n\n Pass a standard 2-letter language code (e.g. \"en\"),\n or the name of a spacy language pipeline (e.g. \"en_core_web_md\"),\n or an already-instantiated :class:`spacy.language.Language` object.\n\n A given / detected language string is then used to instantiate\n a corresponding ``Language`` with all default components enabled.\n data (obj or Iterable[obj]): One or a stream of texts, records,\n or :class:`spacy.tokens.Doc` s to be added to the corpus.\n\n .. seealso:: :meth:`Corpus.add()`\n\n Attributes:\n lang (str)\n spacy_lang (:class:`spacy.language.Language`)\n docs (List[:class:`spacy.tokens.Doc`])\n n_docs (int)\n n_sents (int)\n n_tokens (int)\n \"\"\"\n\n def __init__(self, lang, data=None):\n self.spacy_lang = _get_spacy_lang(lang)\n self.lang = self.spacy_lang.lang\n self.docs = []\n self._doc_ids = []\n self.n_docs = 0\n self.n_sents = 0\n self.n_tokens = 0\n if data:\n self.add(data)\n\n # dunder\n\n def __repr__(self):\n return \"Corpus({} docs, {} tokens)\".format(self.n_docs, self.n_tokens)\n\n def __len__(self):\n return self.n_docs\n\n def __iter__(self):\n for doc in self.docs:\n yield doc\n\n def __contains__(self, doc):\n return id(doc) in self._doc_ids\n\n def __getitem__(self, idx_or_slice):\n return self.docs[idx_or_slice]\n\n def __delitem__(self, idx_or_slice):\n if isinstance(idx_or_slice, int):\n self._remove_one_doc_by_index(idx_or_slice)\n elif isinstance(idx_or_slice, slice):\n start, end, step = idx_or_slice.indices(self.n_docs)\n idxs = compat.range_(start, end, step)\n self._remove_many_docs_by_index(idxs)\n else:\n raise TypeError(\n \"list indices must be integers or slices, not {}\".format(type(idx_or_slice))\n )\n\n # add documents\n\n def add(self, data, batch_size=1000):\n \"\"\"\n Add one or a stream of texts, records, or :class:`spacy.tokens.Doc` s\n to the corpus, ensuring that all processing is or has already been done\n by the :attr:`Corpus.spacy_lang` pipeline.\n\n Args:\n data (obj or Iterable[obj]):\n str or Iterable[str]\n Tuple[str, dict] or Iterable[Tuple[str, dict]]\n :class:`spacy.tokens.Doc` or Iterable[:class:`spacy.tokens.Doc`]\n batch_size (int)\n\n See Also:\n * :meth:`Corpus.add_text()`\n * :meth:`Corpus.add_texts()`\n * :meth:`Corpus.add_record()`\n * :meth:`Corpus.add_records()`\n * :meth:`Corpus.add_doc()`\n * :meth:`Corpus.add_docs()`\n \"\"\"\n if isinstance(data, compat.unicode_):\n self.add_text(data)\n elif isinstance(data, spacy.tokens.Doc):\n self.add_doc(data)\n elif utils.is_record(data):\n self.add_record(data)\n elif isinstance(data, compat.Iterable):\n first, data = itertoolz.peek(data)\n if isinstance(first, compat.unicode_):\n self.add_texts(data, batch_size=batch_size)\n elif isinstance(first, spacy.tokens.Doc):\n self.add_docs(data)\n elif utils.is_record(first):\n self.add_records(data, batch_size=batch_size)\n else:\n raise TypeError(\n \"data must be one of {} or an interable thereof, not {}\".format(\n {compat.unicode_, spacy.tokens.Doc, tuple},\n type(data),\n )\n )\n else:\n raise TypeError(\n \"data must be one of {} or an interable thereof, not {}\".format(\n {compat.unicode_, spacy.tokens.Doc, tuple},\n type(data),\n )\n )\n\n def add_text(self, text):\n \"\"\"\n Add one text to the corpus, processing it into a :class:`spacy.tokens.Doc`\n using the :attr:`Corpus.spacy_lang` pipeline.\n\n Args:\n text (str)\n \"\"\"\n self._add_valid_doc(self.spacy_lang(text))\n\n def add_texts(self, texts, batch_size=1000):\n \"\"\"\n Add a stream of texts to the corpus, efficiently processing them into\n :class:`spacy.tokens.Doc` s using the :attr:`Corpus.spacy_lang` pipeline.\n\n Args:\n texts (Iterable[str])\n batch_size (int)\n \"\"\"\n for doc in self.spacy_lang.pipe(texts, as_tuples=False, batch_size=batch_size):\n self._add_valid_doc(doc)\n\n def add_record(self, record):\n \"\"\"\n Add one record to the corpus, processing it into a :class:`spacy.tokens.Doc`\n using the :attr:`Corpus.spacy_lang` pipeline.\n\n Args:\n record (Tuple[str, dict])\n \"\"\"\n doc = self.spacy_lang(record[0])\n doc._.meta = record[1]\n self._add_valid_doc(doc)\n\n def add_records(self, records, batch_size=1000):\n \"\"\"\n Add a stream of records to the corpus, efficiently processing them into\n :class:`spacy.tokens.Doc` s using the :attr:`Corpus.spacy_lang` pipeline.\n\n Args:\n records (Iterable[Tuple[str, dict]])\n batch_size (int)\n \"\"\"\n for doc, meta in self.spacy_lang.pipe(records, as_tuples=True, batch_size=batch_size):\n doc._.meta = meta\n self._add_valid_doc(doc)\n\n def add_doc(self, doc):\n \"\"\"\n Add one :class:`spacy.tokens.Doc` to the corpus, provided it was processed\n using the :attr:`Corpus.spacy_lang` pipeline.\n\n Args:\n doc (:class:`spacy.tokens.Doc`)\n \"\"\"\n if not isinstance(doc, spacy.tokens.Doc):\n raise TypeError(\n \"doc must be a {}, not {}\".format(spacy.tokens.Doc, type(doc))\n )\n if doc.vocab is not self.spacy_lang.vocab:\n raise ValueError(\n \"doc.vocab ({}) must be the same as corpus.vocab ({})\".format(\n doc.vocab, self.spacy_lang.vocab,\n )\n )\n self._add_valid_doc(doc)\n\n def add_docs(self, docs):\n \"\"\"\n Add a stream of :class:`spacy.tokens.Doc` s to the corpus, provided\n they were processed using the :attr:`Corpus.spacy_lang` pipeline.\n\n Args:\n doc (Iterable[:class:`spacy.tokens.Doc`])\n \"\"\"\n for doc in docs:\n self.add_doc(doc)\n\n def _add_valid_doc(self, doc):\n self.docs.append(doc)\n self._doc_ids.append(id(doc))\n self.n_docs += 1\n self.n_tokens += len(doc)\n if doc.is_sentenced:\n self.n_sents += itertoolz.count(doc.sents)\n\n # get documents\n\n def get(self, match_func, limit=None):\n \"\"\"\n Get all (or N <= ``limit``) docs in :class:`Corpus` for which\n ``match_func(doc)`` is True.\n\n Args:\n match_func (Callable): Function that takes a :class:`spacy.tokens.Doc`\n as input and returns a boolean value. For example::\n\n Corpus.get(lambda x: len(x) >= 100)\n\n gets all docs with at least 100 tokens. And::\n\n Corpus.get(lambda doc: doc._.meta[\"author\"] == \"Burton DeWilde\")\n\n gets all docs whose author was given as 'Burton DeWilde'.\n limit (int): Maximum number of matched docs to return.\n\n Yields:\n :class:`spacy.tokens.Doc`: Next document passing ``match_func``.\n\n .. tip:: To get doc(s) by index, treat :class:`Corpus` as a list and use\n Python's usual indexing and slicing: ``Corpus[0]`` gets the first\n document in the corpus; ``Corpus[:5]`` gets the first 5; etc.\n \"\"\"\n matched_docs = (doc for doc in self if match_func(doc) is True)\n for doc in itertools.islice(matched_docs, limit):\n yield doc\n\n # remove documents\n\n def remove(self, match_func, limit=None):\n \"\"\"\n Remove all (or N <= ``limit``) docs in :class:`Corpus` for which\n ``match_func(doc)`` is True. Corpus doc/sent/token counts are adjusted\n accordingly.\n\n Args:\n match_func (func): Function that takes a :class:`spacy.tokens.Doc`\n and returns a boolean value. For example::\n\n Corpus.remove(lambda x: len(x) >= 100)\n\n removes docs with at least 100 tokens. And::\n\n Corpus.remove(lambda doc: doc._.meta[\"author\"] == \"Burton DeWilde\")\n\n removes docs whose author was given as \"Burton DeWilde\".\n limit (int): Maximum number of matched docs to remove.\n\n .. tip:: To remove doc(s) by index, treat :class:`Corpus` as a list and use\n Python's usual indexing and slicing: ``del Corpus[0]`` removes the\n first document in the corpus; ``del Corpus[:5]`` removes the first\n 5; etc.\n \"\"\"\n matched_docs = (doc for doc in self if match_func(doc) is True)\n self._remove_many_docs_by_index(\n self._doc_ids.index(id(doc))\n for doc in itertools.islice(matched_docs, limit)\n )\n\n def _remove_many_docs_by_index(self, idxs):\n for idx in sorted(idxs, reverse=True):\n self._remove_one_doc_by_index(idx)\n\n def _remove_one_doc_by_index(self, idx):\n doc = self.docs[idx]\n self.n_docs -= 1\n self.n_tokens -= len(doc)\n if doc.is_sentenced:\n self.n_sents -= itertoolz.count(doc.sents)\n del self.docs[idx]\n del self._doc_ids[idx]\n\n # useful properties\n\n @property\n def vectors(self):\n \"\"\"Constituent docs' word vectors stacked in a 2d array.\"\"\"\n return np.vstack((doc.vector for doc in self))\n\n @property\n def vector_norms(self):\n \"\"\"Constituent docs' L2-normalized word vectors stacked in a 2d array.\"\"\"\n return np.vstack((doc.vector_norm for doc in self))\n\n # useful methods\n\n def word_counts(self, normalize=\"lemma\", weighting=\"count\", as_strings=False):\n \"\"\"\n Map the set of unique words in :class:`Corpus` to their counts as\n absolute, relative, or binary frequencies of occurence, similar to\n :meth:`Doc._.to_bag_of_words() <textacy.spacier.doc_extensions.to_bag_of_words>`\n but aggregated over all docs.\n\n Args:\n normalize (str): If \"lemma\", lemmatize words before counting; if\n \"lower\", lowercase words before counting; otherwise, words are\n counted using the form with which they appear.\n weighting ({\"count\", \"freq\"}): Type of weight to assign to words.\n If \"count\" (default), weights are the absolute number of\n occurrences (count) of word in corpus.\n If \"freq\", word counts are normalized by the total token count,\n giving their relative frequencies of occurrence.\n\n .. note:: The resulting set of frequencies won't (necessarily) sum\n to 1.0, since punctuation and stop words are filtered out after\n counts are normalized.\n\n as_strings (bool): If True, words are returned as strings; if False\n (default), words are returned as their unique integer ids.\n\n Returns:\n dict: mapping of a unique word id or string (depending on the value\n of ``as_strings``) to its absolute, relative, or binary frequency\n of occurrence (depending on the value of ``weighting``).\n\n See Also:\n * :func:`textacy.vsm.get_term_freqs() <textacy.vsm.matrix_utils.get_term_freqs>`\n \"\"\"\n word_counts_ = collections.Counter()\n for doc in self:\n word_counts_.update(\n doc._.to_bag_of_words(\n normalize=normalize, weighting=\"count\", as_strings=as_strings\n )\n )\n if weighting == \"count\":\n word_counts_ = dict(word_counts_)\n elif weighting == \"freq\":\n n_tokens = self.n_tokens\n word_counts_ = {\n word: count / n_tokens for word, count in word_counts_.items()\n }\n else:\n raise ValueError(\n \"weighting='{}' is invalid; valid values are {}\".format(\n weighting, {\"count\", \"freq\"}\n )\n )\n return word_counts_\n\n def word_doc_counts(\n self, normalize=\"lemma\", weighting=\"count\", smooth_idf=True, as_strings=False\n ):\n \"\"\"\n Map the set of unique words in :class:`Corpus` to their *document* counts\n as absolute, relative, inverse, or binary frequencies of occurence.\n\n Args:\n normalize (str): If \"lemma\", lemmatize words before counting; if\n \"lower\", lowercase words before counting; otherwise, words are\n counted using the form with which they appear.\n weighting ({\"count\", \"freq\", \"idf\"}): Type of weight to assign to words.\n If \"count\" (default), weights are the absolute number (count)\n of documents in which word appears. If \"freq\", word doc counts\n are normalized by the total document count, giving their relative\n frequencies of occurrence. If \"idf\", weights are the log of the\n inverse relative frequencies: ``log(n_docs / word_doc_count)``\n or (if ``smooth_idf`` is True) ``log(1 + (n_docs / word_doc_count))`` .\n smooth_idf (bool): If True, add 1 to all word doc counts when\n calculating \"idf\" weighting, equivalent to adding a single\n document to the corpus containing every unique word.\n as_strings (bool): If True, words are returned as strings; if False\n (default), words are returned as their unique integer ids\n\n Returns:\n dict: mapping of a unique word id or string (depending on the value\n of ``as_strings``) to the number of documents in which it appears\n weighted as absolute, relative, or binary frequencies (depending\n on the value of ``weighting``).\n\n See Also:\n * :func:`textacy.vsm.get_doc_freqs() <textacy.vsm.matrix_utils.get_doc_freqs>`\n \"\"\"\n word_doc_counts_ = collections.Counter()\n for doc in self:\n word_doc_counts_.update(\n doc._.to_bag_of_words(\n normalize=normalize, weighting=\"binary\", as_strings=as_strings\n )\n )\n if weighting == \"count\":\n word_doc_counts_ = dict(word_doc_counts_)\n elif weighting == \"freq\":\n n_docs = self.n_docs\n word_doc_counts_ = {\n word: count / n_docs for word, count in word_doc_counts_.items()\n }\n elif weighting == \"idf\":\n n_docs = self.n_docs\n if smooth_idf is True:\n word_doc_counts_ = {\n word: math.log(1 + (n_docs / count))\n for word, count in word_doc_counts_.items()\n }\n else:\n word_doc_counts_ = {\n word: math.log(n_docs / count)\n for word, count in word_doc_counts_.items()\n }\n else:\n raise ValueError(\n \"weighting='{}' is invalid; valid values are {}\".format(\n weighting, {\"count\", \"freq\", \"idf\"}\n )\n )\n return word_doc_counts_\n\n # file io\n\n def save(self, filepath):\n \"\"\"\n Save :class:`Corpus` to disk as binary data.\n\n Args:\n filepath (str): Full path to file on disk where :class:`Corpus` data\n will be saved as a binary file.\n\n See Also:\n :meth:`Corpus.load()`\n \"\"\"\n attrs = [\n spacy.attrs.ORTH,\n spacy.attrs.SPACY,\n spacy.attrs.LEMMA,\n spacy.attrs.ENT_IOB,\n spacy.attrs.ENT_TYPE,\n ]\n if self[0].is_tagged:\n attrs.append(spacy.attrs.TAG)\n if self[0].is_parsed:\n attrs.append(spacy.attrs.HEAD)\n attrs.append(spacy.attrs.DEP)\n else:\n attrs.append(spacy.attrs.SENT_START)\n\n tokens = []\n lengths = []\n strings = set()\n user_datas = []\n for doc in self:\n tokens.append(doc.to_array(attrs))\n lengths.append(len(doc))\n strings.update(tok.text for tok in doc)\n user_datas.append(doc.user_data)\n\n msg = {\n \"meta\": self.spacy_lang.meta,\n \"attrs\": attrs,\n \"tokens\": np.vstack(tokens).tobytes(\"C\"),\n \"lengths\": np.asarray(lengths, dtype=\"int32\").tobytes(\"C\"),\n \"strings\": list(strings),\n \"user_datas\": user_datas,\n }\n with tio.open_sesame(filepath, mode=\"wb\") as f:\n f.write(srsly.msgpack_dumps(msg))\n\n @classmethod\n def load(cls, lang, filepath):\n \"\"\"\n Load previously saved :class:`Corpus` binary data, reproduce the original\n `:class:`spacy.tokens.Doc`s tokens and annotations, and instantiate\n a new :class:`Corpus` from them.\n\n Args:\n lang (str or :class:`spacy.language.Language`)\n filepath (str): Full path to file on disk where :class:`Corpus` data\n was previously saved as a binary file.\n\n Returns:\n :class:`Corpus`\n\n See Also:\n :meth:`Corpus.save()`\n \"\"\"\n spacy_lang = _get_spacy_lang(lang)\n with tio.open_sesame(filepath, mode=\"rb\") as f:\n msg = srsly.msgpack_loads(f.read())\n if spacy_lang.meta != msg[\"meta\"]:\n LOGGER.warning(\"the spacy langs are different!\")\n for string in msg[\"strings\"]:\n spacy_lang.vocab[string]\n attrs = msg[\"attrs\"]\n lengths = np.frombuffer(msg[\"lengths\"], dtype=\"int32\")\n flat_tokens = np.frombuffer(msg[\"tokens\"], dtype=\"uint64\")\n flat_tokens = flat_tokens.reshape(\n (flat_tokens.size // len(attrs), len(attrs))\n )\n tokens = np.asarray(NumpyOps().unflatten(flat_tokens, lengths))\n user_datas = msg[\"user_datas\"]\n\n def _make_spacy_docs(tokens, user_datas):\n for toks, user_data in compat.zip_(tokens, user_datas):\n doc = spacy.tokens.Doc(\n spacy_lang.vocab,\n words=[spacy_lang.vocab.strings[orth] for orth in toks[:, 0]],\n spaces=np.ndarray.tolist(toks[:, 1]),\n )\n doc = doc.from_array(attrs[2:], toks[:, 2:])\n doc.user_data = user_data\n yield doc\n\n return cls(spacy_lang, data=_make_spacy_docs(tokens, user_datas))\n\n\ndef _get_spacy_lang(lang):\n if isinstance(lang, compat.unicode_):\n return cache.load_spacy_lang(lang)\n elif isinstance(lang, spacy.language.Language):\n return lang\n else:\n raise TypeError(\n \"`lang` must be {}, not {}\".format(\n {compat.unicode_, spacy.language.Language}, type(lang)\n )\n )\n" ]
[ [ "numpy.vstack", "numpy.ndarray.tolist", "numpy.asarray", "numpy.frombuffer" ] ]
TomKingsfordUoA/ResidualMaskingNetwork
[ "b77abb6e548b9a09b5c96b1592d71332b45d050e" ]
[ "rmn/models/residual_attention_network.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch.nn import init\nimport functools\nfrom torch.autograd import Variable\nimport numpy as np\nfrom .basic_layers import ResidualBlock\nfrom .attention_module import AttentionModule\n\n\nclass ResidualAttentionModel(nn.Module):\n def __init__(self, in_channels=3, num_classes=1000):\n super(ResidualAttentionModel, self).__init__()\n self.conv1 = nn.Sequential(\n nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3, bias=False),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n )\n self.mpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.residual_block1 = ResidualBlock(64, 256)\n self.attention_module1 = AttentionModule(256, 256, (56, 56), (28, 28), (14, 14))\n self.residual_block2 = ResidualBlock(256, 512, 2)\n self.attention_module2 = AttentionModule(512, 512, (28, 28), (14, 14), (7, 7))\n self.residual_block3 = ResidualBlock(512, 1024, 2)\n self.attention_module3 = AttentionModule(1024, 1024, (14, 14), (7, 7), (4, 4))\n self.residual_block4 = ResidualBlock(1024, 2048, 2)\n self.residual_block5 = ResidualBlock(2048, 2048)\n self.residual_block6 = ResidualBlock(2048, 2048)\n self.mpool2 = nn.Sequential(\n nn.BatchNorm2d(2048),\n nn.ReLU(inplace=True),\n nn.AvgPool2d(kernel_size=7, stride=1),\n )\n self.fc = nn.Linear(2048, num_classes)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.mpool1(out)\n # print(out.data)\n out = self.residual_block1(out)\n out = self.attention_module1(out)\n out = self.residual_block2(out)\n out = self.attention_module2(out)\n out = self.residual_block3(out)\n # print(out.data)\n out = self.attention_module3(out)\n out = self.residual_block4(out)\n out = self.residual_block5(out)\n out = self.residual_block6(out)\n out = self.mpool2(out)\n out = out.view(out.size(0), -1)\n out = self.fc(out)\n\n return out\n\n\ndef res_attention(in_channels=3, num_classes=1000):\n return ResidualAttentionModel(in_channels, num_classes)\n" ]
[ [ "torch.nn.MaxPool2d", "torch.nn.BatchNorm2d", "torch.nn.Linear", "torch.nn.Conv2d", "torch.nn.AvgPool2d", "torch.nn.ReLU" ] ]
kumiori/stability-bifurcation
[ "9a82bf40742a9b16122b7a476ad8aec65fe22539" ]
[ "scripts/parametric_1d.py" ]
[ "from traction_1d import *\nimport numpy as np\nfrom utils import ColorPrint\n\n# ell_list = np.linspace(.1, .5, 20)\n# ell_min = 0.1\n#ell_max = 2.\nell_list = np.logspace(np.log10(.15), np.log10(1.5), 20)\n\ndef t_stab(ell, q=2):\n\tcoeff_stab = 2.*np.pi*q/(q+1)**(3./2.)*np.sqrt(2)\n\tif 1/ell > coeff_stab:\n\t\treturn 1.\n\telse:\n\t\treturn coeff_stab*ell/1.\n\n\ndef t_bif(ell, q=2):\n\t# coeff = t_stab(ell, q)*(q+1)/(2.*q)\t\n\tcoeff_bif = 2.*np.pi*q/(q+1)**(3./2.)*np.sqrt(2)*(q+1)/(2.*q)\n\tif 1/ell > coeff_bif:\n\t\treturn 1.\n\telse:\n\t\treturn coeff_bif*ell/1.\n\nprint([t_stab(ell) for ell in ell_list])\nprint([t_bif(ell) for ell in ell_list])\nprint([3./4.*t_stab(ell) for ell in ell_list])\n\n# sys.exit()\nfor ell in ell_list:\n\t# tstab = 1./ell*4*np.pi/3.**(3./2.)\n\teps = .3\n\tell_r = ell*np.sqrt(2)\n\t# *np.sqrt(2)\n\ttstab = t_stab(ell_r, 2)\n\ttbif = t_bif(ell_r, 2)\n\tprint('tstab, tbif', tstab, tbif)\n\t# sys.exit(//z)\n\t# tstab = 1.\n\t# lmin = tstab - 1.\n\t# load_min = load_min if lmin > 0. else 0.\n\t# load_min = tstab - 1. - tstab/10 \n\tload_min = .5 \n\t# load_max = tstab + 1. + tstab/5\n\tload_max = 5.\n\t# loads = [tstab-2*eps, tstab+2*eps]\n\tColorPrint.print_info('Solving ell {}'.format(ell))\n\tColorPrint.print_info('Load: [{} {}]'.format(load_min, load_max))\n\tColorPrint.print_info('stab limit: {} '.format(tstab))\n\tColorPrint.print_info('uniq limit: {} '.format(tbif))\n\tloads = np.logspace(load_min, load_max, 50)\n\ttry:\n\t\ttraction_1d(\n\t\t\tell=ell,\n\t\t\tload_min=load_min,\n\t\t\tload_max=load_max,\n\t\t\t# loads = loads,\n\t\t\tnsteps=50,\n\t\t\tn=7,\n\t\t\t# Lx=Lx,\n\t\t\t# outdir='../output/parametric-traction-plane-stress/ell-{:2f}'.format(ell),\n\t\t\t# outdir='../output/parametric-traction-1d-validation-paper/ell-{:2f}'.format(ell),\n\t\t\t# outdir='../output/parametric-traction-1d-validation-paperdoublecheck/ell-{:2f}'.format(ell),\n\t\t\toutdir='../output/parametric-traction-1d-validation-auto/ell-{:2f}'.format(ell),\n\t\t\t# outdir='../output/parametric-traction-1d-validation-paper-auto/ell-{:2f}'.format(ell),\n\t\t\t# outdir='../output/parametric-traction-n-10/ell-{:2f}'.format(ell),\n\t\t\tbreakifunstable = True\n\t\t)\n\texcept:\n\t\tColorPrint.print_warn(\"Something went somewhere, most likely an instability\")\n\n" ]
[ [ "numpy.sqrt", "numpy.log10", "numpy.logspace" ] ]
hishamsajid/vsketch
[ "1b35b794972097b8fb5af94ea6e93f3e8c69448c" ]
[ "examples/random_flower/sketch_random_flower.py" ]
[ "import math\n\nimport numpy as np\n\nimport vsketch\n\n\nclass RandomFlowerSketch(vsketch.SketchClass):\n num_line = vsketch.Param(200, 1)\n point_per_line = vsketch.Param(100, 1)\n rdir_range = vsketch.Param(math.pi / 6)\n\n def draw(self, vsk: vsketch.Vsketch) -> None:\n vsk.size(\"a4\", landscape=True)\n vsk.scale(\"cm\")\n\n vsk.rotate(-90, degrees=True)\n\n noise_coord = np.linspace(0, 1, self.point_per_line)\n dirs = np.linspace(0, 2 * math.pi, self.num_line)\n perlin = vsk.noise(noise_coord, dirs, [0, 100])\n\n for i, direction in enumerate(dirs):\n rdir = vsk.map(\n perlin[:, i, 0], 0, 1, direction - self.rdir_range, direction + self.rdir_range\n )\n roffset = vsk.map(perlin[:, i, 1], 0, 1, 0.05, 0.12)\n\n xoffset = roffset * np.cos(rdir)\n yoffset = roffset * np.sin(rdir)\n\n vsk.polygon(np.cumsum(xoffset), np.cumsum(yoffset))\n\n def finalize(self, vsk: vsketch.Vsketch) -> None:\n vsk.vpype(\"linemerge linesimplify reloop linesort\")\n\n\nif __name__ == \"__main__\":\n RandomFlowerSketch.display()\n" ]
[ [ "numpy.cumsum", "numpy.sin", "numpy.linspace", "numpy.cos" ] ]
mpsampat/kaggle-ds-bowl-2018-baseline
[ "c769ba7b4db4d1f38f7190db9a11bf812b2b983a" ]
[ "bowl_config.py" ]
[ "from config import Config\nimport numpy as np\nclass BowlConfig(Config):\n \"\"\"Configuration for training on the toy shapes dataset.\n Derives from the base Config class and overrides values specific\n to the toy shapes dataset.\n \"\"\"\n # Give the configuration a recognizable name\n NAME = \"bowl\"\n\n # Train on 1 GPU and 8 images per GPU. We can put multiple images on each\n # GPU because the images are small. Batch size is 8 (GPUs * images/GPU).\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n LEARNING_RATE = 0.00005\n # Number of classes (including background)\n NUM_CLASSES = 1 + 1 # background + nuclei\n\n # Use small images for faster training. Set the limits of the small side\n # the large side, and that determines the image shape.\n IMAGE_MIN_DIM = 256\n IMAGE_MAX_DIM = 256\n\n # Use smaller anchors because our image and objects are small\n RPN_ANCHOR_SCALES = (8, 16, 32, 64) # anchor side in pixels\n\n # Reduce training ROIs per image because the images are small and have\n # few objects. Aim to allow ROI sampling to pick 33% positive ROIs.\n TRAIN_ROIS_PER_IMAGE = 500\n\n STEPS_PER_EPOCH = 600\n\n # use small validation steps since the epoch is small\n VALIDATION_STEPS = 10\n MEAN_PIXEL = np.array([0.0, 0.0, 0.0])\n USE_MINI_MASK = False\n MINI_MASK_SHAPE = (28, 28)\t\n MAX_GT_INSTANCES = 500\n\n DETECTION_MAX_INSTANCES = 512\n\n RESNET_ARCHITECTURE = \"resnet50\"\n\n\nbowl_config = BowlConfig()\nbowl_config.display()\n" ]
[ [ "numpy.array" ] ]
flatironinstitute/sparse_dot
[ "d04a277016ec4af4e507131a3751daca028edc1d" ]
[ "sparse_dot_mkl/tests/test_gram_matrix.py" ]
[ "import unittest\nimport numpy as np\nimport numpy.testing as npt\nfrom sparse_dot_mkl import gram_matrix_mkl\nfrom sparse_dot_mkl.tests.test_mkl import MATRIX_1\n\n\nclass TestGramMatrix(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n gram_ut = np.dot(MATRIX_1.A.T, MATRIX_1.A)\n gram_ut[np.tril_indices(gram_ut.shape[0], k=-1)] = 0.\n cls.gram_ut = gram_ut\n\n gram_ut_t = np.dot(MATRIX_1.A, MATRIX_1.A.T)\n gram_ut_t[np.tril_indices(gram_ut_t.shape[0], k=-1)] = 0.\n cls.gram_ut_t = gram_ut_t\n\n def setUp(self):\n self.mat1 = MATRIX_1.copy()\n self.mat1_d = MATRIX_1.A\n\n def test_gram_matrix_sp(self):\n mat2 = gram_matrix_mkl(self.mat1)\n npt.assert_array_almost_equal(mat2.A, self.gram_ut)\n\n with self.assertRaises(ValueError):\n gram_matrix_mkl(self.mat1, out=np.zeros((self.mat1.shape[0], self.mat1.shape[0])))\n\n def test_gram_matrix_sp_single(self):\n mat2 = gram_matrix_mkl(self.mat1.astype(np.float32))\n npt.assert_array_almost_equal(mat2.A, self.gram_ut)\n\n def test_gram_matrix_d_single(self):\n mat2 = gram_matrix_mkl(self.mat1.astype(np.float32), dense=True)\n npt.assert_array_almost_equal(mat2, self.gram_ut)\n\n mat2 = gram_matrix_mkl(self.mat1.astype(np.float32), dense=True,\n out=np.zeros((self.mat1.shape[1], self.mat1.shape[1]), dtype=np.float32), out_scalar=1.)\n mat2[np.tril_indices(mat2.shape[0], k=-1)] = 0.\n npt.assert_array_almost_equal(mat2, self.gram_ut)\n\n with self.assertRaises(ValueError):\n mat2 = gram_matrix_mkl(self.mat1.astype(np.float32), dense=True,\n out=np.zeros((self.mat1.shape[1], self.mat1.shape[1])),\n out_scalar=1.)\n\n def test_gram_matrix_d(self):\n mat2 = gram_matrix_mkl(self.mat1, dense=True)\n npt.assert_array_almost_equal(mat2, self.gram_ut)\n\n mat2 = gram_matrix_mkl(self.mat1, dense=True,\n out=np.zeros((self.mat1.shape[1], self.mat1.shape[1]), dtype=np.float64), out_scalar=1.)\n mat2[np.tril_indices(mat2.shape[0], k=-1)] = 0.\n npt.assert_array_almost_equal(mat2, self.gram_ut)\n\n def test_gram_matrix_sp_t(self):\n mat2 = gram_matrix_mkl(self.mat1, transpose=True)\n npt.assert_array_almost_equal(mat2.A, self.gram_ut_t)\n\n def test_gram_matrix_d_t(self):\n mat2 = gram_matrix_mkl(self.mat1, dense=True, transpose=True)\n npt.assert_array_almost_equal(mat2, self.gram_ut_t)\n\n def test_gram_matrix_csc_sp(self):\n mat2 = gram_matrix_mkl(self.mat1.tocsc(), cast=True)\n npt.assert_array_almost_equal(mat2.A, self.gram_ut)\n\n def test_gram_matrix_csc_d(self):\n mat2 = gram_matrix_mkl(self.mat1.tocsc(), dense=True, cast=True)\n npt.assert_array_almost_equal(mat2, self.gram_ut)\n\n def test_gram_matrix_dd_double(self):\n mat2 = gram_matrix_mkl(self.mat1.A, dense=True)\n npt.assert_array_almost_equal(mat2, self.gram_ut)\n\n mat2 = gram_matrix_mkl(self.mat1.A, dense=True,\n out=np.zeros((self.mat1.shape[1], self.mat1.shape[1]), dtype=np.float64), out_scalar=1.)\n npt.assert_array_almost_equal(mat2, self.gram_ut)\n\n def test_gram_matrix_dd_single(self):\n mat2 = gram_matrix_mkl(self.mat1.astype(np.float32).A, dense=True)\n npt.assert_array_almost_equal(mat2, self.gram_ut)\n\n mat2 = gram_matrix_mkl(self.mat1.astype(np.float32).A, dense=True,\n out=np.zeros((self.mat1.shape[1], self.mat1.shape[1]), dtype=np.float32), out_scalar=1.)\n npt.assert_array_almost_equal(mat2, self.gram_ut)\n\n def test_gram_matrix_dd_double_F(self):\n mat2 = gram_matrix_mkl(np.asarray(self.mat1.A, order=\"F\"), dense=True)\n npt.assert_array_almost_equal(mat2, self.gram_ut)\n\n mat2 = gram_matrix_mkl(np.asarray(self.mat1.A, order=\"F\"), dense=True,\n out=np.zeros((self.mat1.shape[1], self.mat1.shape[1]), dtype=np.float64, order=\"F\"),\n out_scalar=1.)\n npt.assert_array_almost_equal(mat2, self.gram_ut)\n\n def test_gram_matrix_dd_single_F(self):\n mat2 = gram_matrix_mkl(np.asarray(self.mat1.astype(np.float32).A, order=\"F\"), dense=True)\n npt.assert_array_almost_equal(mat2, self.gram_ut)\n\n mat2 = gram_matrix_mkl(np.asarray(self.mat1.astype(np.float32).A, order=\"F\"), dense=True,\n out=np.zeros((self.mat1.shape[1], self.mat1.shape[1]), dtype=np.float32, order=\"F\"),\n out_scalar=1.)\n npt.assert_array_almost_equal(mat2, self.gram_ut)\n" ]
[ [ "numpy.zeros", "numpy.tril_indices", "numpy.asarray", "numpy.testing.assert_array_almost_equal", "numpy.dot" ] ]
slamer59/awesome-panel
[ "91c30bd6d6859eadf9c65b1e143952f7e64d5290" ]
[ "application/pages/awesome_panel_express_tests/test_perspective.py" ]
[ "\"\"\"\r\n# Perspective Viewer\r\n\r\n[Perspective](https://github.com/finos/perspective#readme) is an interactive visualization\r\ncomponent for large, real-time datasets. It comes with the `perspective-viewer` web component.\r\n\r\nIt enables analysts and traders at large banks like J.P.Morgan to understand their data. But it is\r\nalso very usefull for analysts, engineers, scientists, data engineers and data scientists in\r\ngeneral.\r\n\r\n[Panel](https://panel.holoviz.org/) is a pwerful framework for creating awesome analytics apps\r\nin Python.\r\n\r\nIn this example we demonstrate how to use the `perspective-viewer` web component with Panel.\r\n\r\nIf you want Perspective supported in Panel, then go to GitHub and upvote\r\n\r\n- [Panel Feature 1107](https://github.com/holoviz/panel/issues/1107): Add Perspective widget.\r\n- [Perspective Feature 942](https://github.com/finos/perspective/issues/942): Enable Perspective in\r\nPanel.\r\n- [Panel PR 1261](https://github.com/holoviz/panel/pull/1261): Perspective-Viewer WebComponent\r\nExample.\r\n\r\n**Author:** [Marc Skov Madsen](https://datamodelanalytics.com)\r\n([awesome-panel.org](https://awesome-panel.org))\r\n\r\n**Tags:**\r\n[Perspective](https://github.com/finos/perspective#readme),\r\n[Panel](https://panel.holoviz.org/),\r\n[Python](https://www.python.org/)\r\n\r\n**Resources:**\r\n[Code](https://github.com/MarcSkovMadsen/awesome-panel/blob/master/application/pages/\\\r\nawesome_panel_express_tests/test_perspective.py),\r\n[Data](https://datahub.io/core/s-and-p-500-companies-financials)\r\n\"\"\"\r\n\r\nimport pathlib\r\n\r\nimport pandas as pd\r\nimport panel as pn\r\nfrom awesome_panel.express.components import PerspectiveViewer\r\n\r\nDARK_BACKGROUND = \"rgb(42, 44, 47)\"\r\nDARK_COLOR = \"white\"\r\nPERSPECTIVE_LOGO = \"https://perspective.finos.org/img/logo.png\"\r\nPANEL_LOGO = \"https://panel.holoviz.org/_static/logo_horizontal.png\"\r\nROOT = pathlib.Path(__file__).parent\r\n# Source: https://datahub.io/core/s-and-p-500-companies-financials\r\nDATA = ROOT / \"PerspectiveViewerData.csv\"\r\n\r\ndataframe = pd.read_csv(DATA)\r\n\r\n\r\ndef create_app(**params) -> pn.Column:\r\n \"\"\"Returns app using PerspectiveViewer\r\n\r\n Returns:\r\n pn.Column: The app\r\n \"\"\"\r\n\r\n perspective_viewer = PerspectiveViewer(sizing_mode=\"stretch_both\", data=dataframe)\r\n\r\n top_app_bar = pn.Row(\r\n pn.pane.PNG(PERSPECTIVE_LOGO, height=50, margin=(10, 25, 10, 10)),\r\n # pn.pane.PNG(PANEL_LOGO, height=40, margin=(10, 0, 10, 0)),\r\n pn.layout.HSpacer(),\r\n margin=0,\r\n background=DARK_BACKGROUND,\r\n )\r\n\r\n settings_parameters = [\r\n \"theme\",\r\n \"row_pivots\",\r\n \"plugin\",\r\n \"columns\",\r\n \"aggregates\",\r\n \"filters\",\r\n \"sort\",\r\n \"rows\",\r\n \"column_pivots\",\r\n ]\r\n\r\n settings_pane = pn.Param(\r\n perspective_viewer,\r\n parameters=settings_parameters,\r\n width=200,\r\n sizing_mode=\"stretch_height\",\r\n background=\"#9E9E9E\",\r\n )\r\n\r\n return pn.Column(\r\n pn.pane.Markdown(__doc__),\r\n top_app_bar,\r\n pn.Row(\r\n perspective_viewer,\r\n pn.layout.VSpacer(width=10),\r\n settings_pane,\r\n sizing_mode=\"stretch_both\",\r\n margin=0,\r\n background=DARK_BACKGROUND,\r\n ),\r\n pn.layout.HSpacer(height=50),\r\n **params\r\n )\r\n\r\n\r\ndef view() -> pn.Column:\r\n \"\"\"Return a PerspectiveViewer Test App for inclusion in the Gallery at awesome-panel.org\r\n\r\n Returns:\r\n pn.Column: The app\r\n \"\"\"\r\n return create_app(height=800, sizing_mode=\"stretch_width\")\r\n\r\n\r\nif __name__.startswith(\"bokeh\"):\r\n PerspectiveViewer.config()\r\n view().servable()\r\n" ]
[ [ "pandas.read_csv" ] ]
OscarPellicer/probreg
[ "8f1dd23dd86371b8040abad580332ff36967c078" ]
[ "tests/test_svr.py" ]
[ "import unittest\nimport numpy as np\nimport transforms3d as t3d\nimport open3d as o3\nfrom probreg import l2dist_regs\nfrom probreg import transformation as tf\n\n\nclass SVRTest(unittest.TestCase):\n def setUp(self):\n pcd = o3.io.read_point_cloud('data/horse.ply')\n pcd = pcd.voxel_down_sample(voxel_size=0.01)\n self._source = np.asarray(pcd.points)\n rot = t3d.euler.euler2mat(*np.random.uniform(0.0, np.pi / 4, 3))\n self._tf = tf.RigidTransformation(rot, np.zeros(3))\n self._target = self._tf.transform(self._source)\n\n def test_svr_registration(self):\n res = l2dist_regs.registration_svr(self._source, self._target)\n self.assertTrue(np.allclose(t3d.euler.mat2euler(res.rot),\n t3d.euler.mat2euler(self._tf.rot), atol=1.0e-1, rtol=1.0e-1))\n self.assertTrue(np.allclose(res.t, self._tf.t, atol=1.0e-2, rtol=1.0e-3))\n\nif __name__ == \"__main__\":\n unittest.main()" ]
[ [ "numpy.random.uniform", "numpy.allclose", "numpy.asarray", "numpy.zeros" ] ]
anishacharya/BGMD
[ "03dee098217d2b9a209fea5759e2e0a2237390a5" ]
[ "src/aggregation_manager/trimmed_mean.py" ]
[ "# Copyright (c) Anish Acharya.\n# Licensed under the MIT License\nimport numpy as np\nfrom .base_gar import GAR\nfrom scipy import stats\nfrom typing import List\n\"\"\"\nComputes Trimmed mean estimates\nCite: Yin, Chen, Ramchandran, Bartlett : Byzantine-Robust Distributed Learning: Towards Optimal Statistical Rates \n\"\"\"\n\n\nclass TrimmedMean(GAR):\n def __init__(self, aggregation_config):\n GAR.__init__(self, aggregation_config=aggregation_config)\n self.trimmed_mean_config = aggregation_config.get('trimmed_mean_config', {})\n self.proportion = self.trimmed_mean_config.get('proportion', 0.1)\n\n def aggregate(self, G: np.ndarray, ix: List[int] = None, axis=0) -> np.ndarray:\n agg_grad = stats.trim_mean(a=G, proportiontocut=self.proportion, axis=axis)\n if ix is not None:\n return agg_grad[ix]\n else:\n return agg_grad\n" ]
[ [ "scipy.stats.trim_mean" ] ]
guiomar/mne-python
[ "2d19800a07904cfe69c1ba290c3eaf712625c6ab" ]
[ "mne/io/nirx/tests/test_nirx.py" ]
[ "# -*- coding: utf-8 -*-\n# Authors: Robert Luke <[email protected]>\n# Eric Larson <[email protected]>\n# simplified BSD-3 license\n\nimport os.path as op\nimport shutil\nimport os\nimport datetime as dt\nimport numpy as np\n\nimport pytest\nfrom numpy.testing import assert_allclose, assert_array_equal\n\nfrom mne import pick_types\nfrom mne.datasets.testing import data_path, requires_testing_data\nfrom mne.io import read_raw_nirx, read_raw_snirf\nfrom mne.utils import requires_h5py\nfrom mne.io.tests.test_raw import _test_raw_reader\nfrom mne.preprocessing import annotate_nan\nfrom mne.transforms import apply_trans, _get_trans\nfrom mne.preprocessing.nirs import source_detector_distances,\\\n short_channels\nfrom mne.io.constants import FIFF\n\nfname_nirx_15_0 = op.join(data_path(download=False),\n 'NIRx', 'nirscout', 'nirx_15_0_recording')\nfname_nirx_15_2 = op.join(data_path(download=False),\n 'NIRx', 'nirscout', 'nirx_15_2_recording')\nfname_nirx_15_2_short = op.join(data_path(download=False),\n 'NIRx', 'nirscout',\n 'nirx_15_2_recording_w_short')\nfname_nirx_15_3_short = op.join(data_path(download=False),\n 'NIRx', 'nirscout', 'nirx_15_3_recording')\n\n\n# This file has no saturated sections\nnirsport1_wo_sat = op.join(data_path(download=False), 'NIRx', 'nirsport_v1',\n 'nirx_15_3_recording_wo_saturation')\n# This file has saturation, but not on the optode pairing in montage\nnirsport1_w_sat = op.join(data_path(download=False), 'NIRx', 'nirsport_v1',\n 'nirx_15_3_recording_w_saturation_'\n 'not_on_montage_channels')\n# This file has saturation in channels of interest\nnirsport1_w_fullsat = op.join(data_path(download=False), 'NIRx', 'nirsport_v1',\n 'nirx_15_3_recording_w_'\n 'saturation_on_montage_channels')\n\n# NIRSport2 device using Aurora software and matching snirf file\nnirsport2 = op.join(data_path(download=False), 'NIRx', 'nirsport_v2',\n 'aurora_recording _w_short_and_acc')\nnirsport2_snirf = op.join(data_path(download=False), 'SNIRF', 'NIRx',\n 'NIRSport2', '1.0.3', '2021-05-05_001.snirf')\n\nnirsport2_2021_9 = op.join(data_path(download=False), 'NIRx', 'nirsport_v2',\n 'aurora_2021_9')\nsnirf_nirsport2_20219 = op.join(data_path(download=False),\n 'SNIRF', 'NIRx', 'NIRSport2', '2021.9',\n '2021-10-01_002.snirf')\n\n\n@requires_h5py\n@requires_testing_data\[email protected]('ignore:.*Extraction of measurement.*:')\[email protected]('fname_nirx, fname_snirf', (\n [nirsport2, nirsport2_snirf],\n [nirsport2_2021_9, snirf_nirsport2_20219],\n))\ndef test_nirsport_v2_matches_snirf(fname_nirx, fname_snirf):\n \"\"\"Test NIRSport2 raw files return same data as snirf.\"\"\"\n raw = read_raw_nirx(fname_nirx, preload=True)\n raw_snirf = read_raw_snirf(fname_snirf, preload=True)\n\n assert_allclose(raw._data, raw_snirf._data)\n\n # Check the timing of annotations match (naming is different)\n assert_allclose(raw.annotations.onset, raw_snirf.annotations.onset)\n\n assert_array_equal(raw.ch_names, raw_snirf.ch_names)\n\n # This test fails as snirf encodes name incorrectly.\n # assert raw.info[\"subject_info\"][\"first_name\"] ==\n # raw_snirf.info[\"subject_info\"][\"first_name\"]\n\n\n@requires_testing_data\[email protected]('ignore:.*Extraction of measurement.*:')\ndef test_nirsport_v2():\n \"\"\"Test NIRSport2 file.\"\"\"\n raw = read_raw_nirx(nirsport2, preload=True)\n assert raw._data.shape == (40, 128)\n\n # Test distance between optodes matches values from\n # nirsite https://github.com/mne-tools/mne-testing-data/pull/86\n # figure 3\n allowed_distance_error = 0.005\n distances = source_detector_distances(raw.info)\n assert_allclose(distances[::2][:14],\n [0.0304, 0.0411, 0.008, 0.0400, 0.008, 0.0310, 0.0411,\n 0.008, 0.0299, 0.008, 0.0370, 0.008, 0.0404, 0.008],\n atol=allowed_distance_error)\n\n # Test location of detectors\n # The locations of detectors can be seen in the first\n # figure on this page...\n # https://github.com/mne-tools/mne-testing-data/pull/86\n allowed_dist_error = 0.0002\n locs = [ch['loc'][6:9] for ch in raw.info['chs']]\n head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri')\n mni_locs = apply_trans(head_mri_t, locs)\n\n assert raw.info['ch_names'][0][3:5] == 'D1'\n assert_allclose(\n mni_locs[0], [-0.0841, -0.0464, -0.0129], atol=allowed_dist_error)\n\n assert raw.info['ch_names'][2][3:5] == 'D6'\n assert_allclose(\n mni_locs[2], [-0.0841, -0.0138, 0.0248], atol=allowed_dist_error)\n\n assert raw.info['ch_names'][34][3:5] == 'D5'\n assert_allclose(\n mni_locs[34], [0.0845, -0.0451, -0.0123], atol=allowed_dist_error)\n\n # Test location of sensors\n # The locations of sensors can be seen in the second\n # figure on this page...\n # https://github.com/mne-tools/mne-testing-data/pull/86\n locs = [ch['loc'][3:6] for ch in raw.info['chs']]\n head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri')\n mni_locs = apply_trans(head_mri_t, locs)\n\n assert raw.info['ch_names'][0][:2] == 'S1'\n assert_allclose(\n mni_locs[0], [-0.0848, -0.0162, -0.0163], atol=allowed_dist_error)\n\n assert raw.info['ch_names'][9][:2] == 'S2'\n assert_allclose(\n mni_locs[9], [-0.0, -0.1195, 0.0142], atol=allowed_dist_error)\n\n assert raw.info['ch_names'][34][:2] == 'S8'\n assert_allclose(\n mni_locs[34], [0.0828, -0.046, 0.0285], atol=allowed_dist_error)\n\n assert len(raw.annotations) == 3\n assert raw.annotations.description[0] == '1.0'\n assert raw.annotations.description[2] == '6.0'\n # Lose tolerance as I am eyeballing the time differences on screen\n assert_allclose(\n np.diff(raw.annotations.onset), [2.3, 3.1], atol=0.1)\n\n mon = raw.get_montage()\n assert len(mon.dig) == 43\n\n\n@requires_testing_data\[email protected]('ignore:.*Extraction of measurement.*:')\ndef test_nirsport_v1_wo_sat():\n \"\"\"Test NIRSport1 file with no saturation.\"\"\"\n raw = read_raw_nirx(nirsport1_wo_sat, preload=True)\n\n # Test data import\n assert raw._data.shape == (26, 164)\n assert raw.info['sfreq'] == 10.416667\n\n # By default real data is returned\n assert np.sum(np.isnan(raw.get_data())) == 0\n\n raw = read_raw_nirx(nirsport1_wo_sat, preload=True, saturated='nan')\n data = raw.get_data()\n assert data.shape == (26, 164)\n assert np.sum(np.isnan(data)) == 0\n\n raw = read_raw_nirx(nirsport1_wo_sat, saturated='annotate')\n data = raw.get_data()\n assert data.shape == (26, 164)\n assert np.sum(np.isnan(data)) == 0\n\n\[email protected]('ignore:.*Extraction of measurement.*:')\n@requires_testing_data\ndef test_nirsport_v1_w_sat():\n \"\"\"Test NIRSport1 file with NaNs but not in channel of interest.\"\"\"\n raw = read_raw_nirx(nirsport1_w_sat)\n\n # Test data import\n data = raw.get_data()\n assert data.shape == (26, 176)\n assert raw.info['sfreq'] == 10.416667\n assert np.sum(np.isnan(data)) == 0\n\n raw = read_raw_nirx(nirsport1_w_sat, saturated='nan')\n data = raw.get_data()\n assert data.shape == (26, 176)\n assert np.sum(np.isnan(data)) == 0\n\n raw = read_raw_nirx(nirsport1_w_sat, saturated='annotate')\n data = raw.get_data()\n assert data.shape == (26, 176)\n assert np.sum(np.isnan(data)) == 0\n\n\[email protected]('ignore:.*Extraction of measurement.*:')\n@requires_testing_data\[email protected]('preload', (True, False))\ndef test_nirsport_v1_w_bad_sat(preload):\n \"\"\"Test NIRSport1 file with NaNs.\"\"\"\n fname = nirsport1_w_fullsat\n raw = read_raw_nirx(fname, preload=preload)\n data = raw.get_data()\n assert not np.isnan(data).any()\n assert len(raw.annotations) == 5\n # annotated version and ignore should have same data but different annot\n raw_ignore = read_raw_nirx(fname, saturated='ignore', preload=preload)\n assert_allclose(raw_ignore.get_data(), data)\n assert len(raw_ignore.annotations) == 2\n assert not any('NAN' in d for d in raw_ignore.annotations.description)\n # nan version should not have same data, but we can give it the same annot\n raw_nan = read_raw_nirx(fname, saturated='nan', preload=preload)\n data_nan = raw_nan.get_data()\n assert np.isnan(data_nan).any()\n assert not np.allclose(raw_nan.get_data(), data)\n raw_nan_annot = raw_ignore.copy()\n raw_nan_annot.set_annotations(annotate_nan(raw_nan))\n use_mask = np.where(raw.annotations.description == 'BAD_SATURATED')\n for key in ('onset', 'duration'):\n a = getattr(raw_nan_annot.annotations, key)[::2] # one ch in each\n b = getattr(raw.annotations, key)[use_mask] # two chs in each\n assert_allclose(a, b)\n\n\n@requires_testing_data\ndef test_nirx_hdr_load():\n \"\"\"Test reading NIRX files using path to header file.\"\"\"\n fname = fname_nirx_15_2_short + \"/NIRS-2019-08-23_001.hdr\"\n raw = read_raw_nirx(fname, preload=True)\n\n # Test data import\n assert raw._data.shape == (26, 145)\n assert raw.info['sfreq'] == 12.5\n\n\n@requires_testing_data\ndef test_nirx_missing_warn():\n \"\"\"Test reading NIRX files when missing data.\"\"\"\n with pytest.raises(FileNotFoundError, match='does not exist'):\n read_raw_nirx(fname_nirx_15_2_short + \"1\", preload=True)\n\n\n@requires_testing_data\ndef test_nirx_missing_evt(tmp_path):\n \"\"\"Test reading NIRX files when missing data.\"\"\"\n shutil.copytree(fname_nirx_15_2_short, str(tmp_path) + \"/data/\")\n os.rename(str(tmp_path) + \"/data\" + \"/NIRS-2019-08-23_001.evt\",\n str(tmp_path) + \"/data\" + \"/NIRS-2019-08-23_001.xxx\")\n fname = str(tmp_path) + \"/data\" + \"/NIRS-2019-08-23_001.hdr\"\n raw = read_raw_nirx(fname, preload=True)\n assert raw.annotations.onset.shape == (0, )\n\n\n@requires_testing_data\ndef test_nirx_dat_warn(tmp_path):\n \"\"\"Test reading NIRX files when missing data.\"\"\"\n shutil.copytree(fname_nirx_15_2_short, str(tmp_path) + \"/data/\")\n os.rename(str(tmp_path) + \"/data\" + \"/NIRS-2019-08-23_001.dat\",\n str(tmp_path) + \"/data\" + \"/NIRS-2019-08-23_001.tmp\")\n fname = str(tmp_path) + \"/data\" + \"/NIRS-2019-08-23_001.hdr\"\n with pytest.raises(RuntimeWarning, match='A single dat'):\n read_raw_nirx(fname, preload=True)\n\n\n@requires_testing_data\ndef test_nirx_15_2_short():\n \"\"\"Test reading NIRX files.\"\"\"\n raw = read_raw_nirx(fname_nirx_15_2_short, preload=True)\n\n # Test data import\n assert raw._data.shape == (26, 145)\n assert raw.info['sfreq'] == 12.5\n assert raw.info['meas_date'] == dt.datetime(2019, 8, 23, 7, 37, 4, 540000,\n tzinfo=dt.timezone.utc)\n\n # Test channel naming\n assert raw.info['ch_names'][:4] == [\"S1_D1 760\", \"S1_D1 850\",\n \"S1_D9 760\", \"S1_D9 850\"]\n assert raw.info['ch_names'][24:26] == [\"S5_D13 760\", \"S5_D13 850\"]\n\n # Test frequency encoding\n assert raw.info['chs'][0]['loc'][9] == 760\n assert raw.info['chs'][1]['loc'][9] == 850\n\n # Test info import\n assert raw.info['subject_info'] == dict(sex=1, first_name=\"MNE\",\n middle_name=\"Test\",\n last_name=\"Recording\",\n birthday=(2014, 8, 23),\n his_id=\"MNE_Test_Recording\")\n\n # Test distance between optodes matches values from\n # nirsite https://github.com/mne-tools/mne-testing-data/pull/51\n # step 4 figure 2\n allowed_distance_error = 0.0002\n distances = source_detector_distances(raw.info)\n assert_allclose(distances[::2], [\n 0.0304, 0.0078, 0.0310, 0.0086, 0.0416,\n 0.0072, 0.0389, 0.0075, 0.0558, 0.0562,\n 0.0561, 0.0565, 0.0077], atol=allowed_distance_error)\n\n # Test which channels are short\n # These are the ones marked as red at\n # https://github.com/mne-tools/mne-testing-data/pull/51 step 4 figure 2\n is_short = short_channels(raw.info)\n assert_array_equal(is_short[:9:2], [False, True, False, True, False])\n is_short = short_channels(raw.info, threshold=0.003)\n assert_array_equal(is_short[:3:2], [False, False])\n is_short = short_channels(raw.info, threshold=50)\n assert_array_equal(is_short[:3:2], [True, True])\n\n # Test trigger events\n assert_array_equal(raw.annotations.description, ['3.0', '2.0', '1.0'])\n\n # Test location of detectors\n # The locations of detectors can be seen in the first\n # figure on this page...\n # https://github.com/mne-tools/mne-testing-data/pull/51\n # And have been manually copied below\n # These values were reported in mm, but according to this page...\n # https://mne.tools/stable/auto_tutorials/intro/plot_40_sensor_locations.html\n # 3d locations should be specified in meters, so that's what's tested below\n # Detector locations are stored in the third three loc values\n allowed_dist_error = 0.0002\n locs = [ch['loc'][6:9] for ch in raw.info['chs']]\n head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri')\n mni_locs = apply_trans(head_mri_t, locs)\n\n assert raw.info['ch_names'][0][3:5] == 'D1'\n assert_allclose(\n mni_locs[0], [-0.0841, -0.0464, -0.0129], atol=allowed_dist_error)\n\n assert raw.info['ch_names'][4][3:5] == 'D3'\n assert_allclose(\n mni_locs[4], [0.0846, -0.0142, -0.0156], atol=allowed_dist_error)\n\n assert raw.info['ch_names'][8][3:5] == 'D2'\n assert_allclose(\n mni_locs[8], [0.0207, -0.1062, 0.0484], atol=allowed_dist_error)\n\n assert raw.info['ch_names'][12][3:5] == 'D4'\n assert_allclose(\n mni_locs[12], [-0.0196, 0.0821, 0.0275], atol=allowed_dist_error)\n\n assert raw.info['ch_names'][16][3:5] == 'D5'\n assert_allclose(\n mni_locs[16], [-0.0360, 0.0276, 0.0778], atol=allowed_dist_error)\n\n assert raw.info['ch_names'][19][3:5] == 'D6'\n assert_allclose(\n mni_locs[19], [0.0352, 0.0283, 0.0780], atol=allowed_dist_error)\n\n assert raw.info['ch_names'][21][3:5] == 'D7'\n assert_allclose(\n mni_locs[21], [0.0388, -0.0477, 0.0932], atol=allowed_dist_error)\n\n\n@requires_testing_data\ndef test_nirx_15_3_short():\n \"\"\"Test reading NIRX files.\"\"\"\n raw = read_raw_nirx(fname_nirx_15_3_short, preload=True)\n\n # Test data import\n assert raw._data.shape == (26, 220)\n assert raw.info['sfreq'] == 12.5\n\n # Test channel naming\n assert raw.info['ch_names'][:4] == [\"S1_D2 760\", \"S1_D2 850\",\n \"S1_D9 760\", \"S1_D9 850\"]\n assert raw.info['ch_names'][24:26] == [\"S5_D13 760\", \"S5_D13 850\"]\n\n # Test frequency encoding\n assert raw.info['chs'][0]['loc'][9] == 760\n assert raw.info['chs'][1]['loc'][9] == 850\n\n # Test info import\n assert raw.info['subject_info'] == dict(birthday=(2020, 8, 18),\n sex=0,\n first_name=\"testMontage\\\\0A\"\n \"TestMontage\",\n his_id=\"testMontage\\\\0A\"\n \"TestMontage\")\n\n # Test distance between optodes matches values from\n # https://github.com/mne-tools/mne-testing-data/pull/72\n allowed_distance_error = 0.001\n distances = source_detector_distances(raw.info)\n assert_allclose(distances[::2], [\n 0.0304, 0.0078, 0.0310, 0.0086, 0.0416,\n 0.0072, 0.0389, 0.0075, 0.0558, 0.0562,\n 0.0561, 0.0565, 0.0077], atol=allowed_distance_error)\n\n # Test which channels are short\n # These are the ones marked as red at\n # https://github.com/mne-tools/mne-testing-data/pull/72\n is_short = short_channels(raw.info)\n assert_array_equal(is_short[:9:2], [False, True, False, True, False])\n is_short = short_channels(raw.info, threshold=0.003)\n assert_array_equal(is_short[:3:2], [False, False])\n is_short = short_channels(raw.info, threshold=50)\n assert_array_equal(is_short[:3:2], [True, True])\n\n # Test trigger events\n assert_array_equal(raw.annotations.description, ['4.0', '2.0', '1.0'])\n\n # Test location of detectors\n # The locations of detectors can be seen in the first\n # figure on this page...\n # https://github.com/mne-tools/mne-testing-data/pull/72\n # And have been manually copied below\n allowed_dist_error = 0.0002\n locs = [ch['loc'][6:9] for ch in raw.info['chs']]\n head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri')\n mni_locs = apply_trans(head_mri_t, locs)\n\n assert raw.info['ch_names'][0][3:5] == 'D2'\n assert_allclose(\n mni_locs[0], [-0.0841, -0.0464, -0.0129], atol=allowed_dist_error)\n\n assert raw.info['ch_names'][4][3:5] == 'D1'\n assert_allclose(\n mni_locs[4], [0.0846, -0.0142, -0.0156], atol=allowed_dist_error)\n\n assert raw.info['ch_names'][8][3:5] == 'D3'\n assert_allclose(\n mni_locs[8], [0.0207, -0.1062, 0.0484], atol=allowed_dist_error)\n\n assert raw.info['ch_names'][12][3:5] == 'D4'\n assert_allclose(\n mni_locs[12], [-0.0196, 0.0821, 0.0275], atol=allowed_dist_error)\n\n assert raw.info['ch_names'][16][3:5] == 'D5'\n assert_allclose(\n mni_locs[16], [-0.0360, 0.0276, 0.0778], atol=allowed_dist_error)\n\n assert raw.info['ch_names'][19][3:5] == 'D6'\n assert_allclose(\n mni_locs[19], [0.0388, -0.0477, 0.0932], atol=allowed_dist_error)\n\n assert raw.info['ch_names'][21][3:5] == 'D7'\n assert_allclose(\n mni_locs[21], [-0.0394, -0.0483, 0.0928], atol=allowed_dist_error)\n\n\n@requires_testing_data\ndef test_encoding(tmp_path):\n \"\"\"Test NIRx encoding.\"\"\"\n fname = tmp_path / 'latin'\n shutil.copytree(fname_nirx_15_2, fname)\n hdr_fname = op.join(fname, 'NIRS-2019-10-02_003.hdr')\n hdr = list()\n with open(hdr_fname, 'rb') as fid:\n hdr.extend(line for line in fid)\n hdr[2] = b'Date=\"jeu. 13 f\\xe9vr. 2020\"\\r\\n'\n with open(hdr_fname, 'wb') as fid:\n for line in hdr:\n fid.write(line)\n # smoke test\n with pytest.raises(RuntimeWarning, match='Extraction of measurement date'):\n read_raw_nirx(fname)\n\n\n@requires_testing_data\ndef test_nirx_15_2():\n \"\"\"Test reading NIRX files.\"\"\"\n raw = read_raw_nirx(fname_nirx_15_2, preload=True)\n\n # Test data import\n assert raw._data.shape == (64, 67)\n assert raw.info['sfreq'] == 3.90625\n assert raw.info['meas_date'] == dt.datetime(2019, 10, 2, 9, 8, 47, 511000,\n tzinfo=dt.timezone.utc)\n\n # Test channel naming\n assert raw.info['ch_names'][:4] == [\"S1_D1 760\", \"S1_D1 850\",\n \"S1_D10 760\", \"S1_D10 850\"]\n\n # Test info import\n assert raw.info['subject_info'] == dict(sex=1, first_name=\"TestRecording\",\n birthday=(1989, 10, 2),\n his_id=\"TestRecording\")\n\n # Test trigger events\n assert_array_equal(raw.annotations.description, ['4.0', '6.0', '2.0'])\n print(raw.annotations.onset)\n\n # Test location of detectors\n allowed_dist_error = 0.0002\n locs = [ch['loc'][6:9] for ch in raw.info['chs']]\n head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri')\n mni_locs = apply_trans(head_mri_t, locs)\n\n assert raw.info['ch_names'][0][3:5] == 'D1'\n assert_allclose(\n mni_locs[0], [-0.0292, 0.0852, -0.0142], atol=allowed_dist_error)\n\n assert raw.info['ch_names'][15][3:5] == 'D4'\n assert_allclose(\n mni_locs[15], [-0.0739, -0.0756, -0.0075], atol=allowed_dist_error)\n\n # Old name aliases for backward compat\n assert 'fnirs_cw_amplitude' in raw\n with pytest.raises(ValueError, match='Invalid value'):\n 'fnirs_raw' in raw\n assert 'fnirs_od' not in raw\n picks = pick_types(raw.info, fnirs='fnirs_cw_amplitude')\n assert len(picks) > 0\n\n\n@requires_testing_data\ndef test_nirx_15_0():\n \"\"\"Test reading NIRX files.\"\"\"\n raw = read_raw_nirx(fname_nirx_15_0, preload=True)\n\n # Test data import\n assert raw._data.shape == (20, 92)\n assert raw.info['sfreq'] == 6.25\n assert raw.info['meas_date'] == dt.datetime(2019, 10, 27, 13, 53, 34,\n 209000,\n tzinfo=dt.timezone.utc)\n\n # Test channel naming\n assert raw.info['ch_names'][:12] == [\"S1_D1 760\", \"S1_D1 850\",\n \"S2_D2 760\", \"S2_D2 850\",\n \"S3_D3 760\", \"S3_D3 850\",\n \"S4_D4 760\", \"S4_D4 850\",\n \"S5_D5 760\", \"S5_D5 850\",\n \"S6_D6 760\", \"S6_D6 850\"]\n\n # Test info import\n assert raw.info['subject_info'] == {'birthday': (2004, 10, 27),\n 'first_name': 'NIRX',\n 'last_name': 'Test',\n 'sex': FIFF.FIFFV_SUBJ_SEX_UNKNOWN,\n 'his_id': \"NIRX_Test\"}\n\n # Test trigger events\n assert_array_equal(raw.annotations.description, ['1.0', '2.0', '2.0'])\n\n # Test location of detectors\n allowed_dist_error = 0.0002\n locs = [ch['loc'][6:9] for ch in raw.info['chs']]\n head_mri_t, _ = _get_trans('fsaverage', 'head', 'mri')\n mni_locs = apply_trans(head_mri_t, locs)\n\n assert raw.info['ch_names'][0][3:5] == 'D1'\n assert_allclose(\n mni_locs[0], [0.0287, -0.1143, -0.0332], atol=allowed_dist_error)\n\n assert raw.info['ch_names'][15][3:5] == 'D8'\n assert_allclose(\n mni_locs[15], [-0.0693, -0.0480, 0.0657], atol=allowed_dist_error)\n\n # Test distance between optodes matches values from\n allowed_distance_error = 0.0002\n distances = source_detector_distances(raw.info)\n assert_allclose(distances[::2], [\n 0.0301, 0.0315, 0.0343, 0.0368, 0.0408,\n 0.0399, 0.0393, 0.0367, 0.0336, 0.0447], atol=allowed_distance_error)\n\n\n@requires_testing_data\[email protected]('fname, boundary_decimal', (\n [fname_nirx_15_2_short, 1],\n [fname_nirx_15_2, 0],\n [fname_nirx_15_2, 0],\n [nirsport2_2021_9, 0]\n))\ndef test_nirx_standard(fname, boundary_decimal):\n \"\"\"Test standard operations.\"\"\"\n _test_raw_reader(read_raw_nirx, fname=fname,\n boundary_decimal=boundary_decimal) # low fs\n" ]
[ [ "numpy.diff", "numpy.testing.assert_array_equal", "numpy.isnan", "numpy.where", "numpy.testing.assert_allclose" ] ]
Miles-Ma/mmclassification
[ "b54acfd5c431bf3a15a964c9d3d9a271c197ac18" ]
[ "mmcls/models/utils/attention.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn.bricks.transformer import build_dropout\nfrom mmcv.cnn.utils.weight_init import trunc_normal_\nfrom mmcv.runner.base_module import BaseModule\n\nfrom ..builder import ATTENTION\nfrom .helpers import to_2tuple\n\n\nclass WindowMSA(BaseModule):\n \"\"\"Window based multi-head self-attention (W-MSA) module with relative\n position bias.\n\n Args:\n embed_dims (int): Number of input channels.\n window_size (tuple[int]): The height and width of the window.\n num_heads (int): Number of attention heads.\n qkv_bias (bool, optional): If True, add a learnable bias to q, k, v.\n Defaults to True.\n qk_scale (float | None, optional): Override default qk scale of\n head_dim ** -0.5 if set. Defaults to None.\n attn_drop (float, optional): Dropout ratio of attention weight.\n Defaults to 0.\n proj_drop (float, optional): Dropout ratio of output. Defaults to 0.\n init_cfg (dict, optional): The extra config for initialization.\n Defaults to None.\n \"\"\"\n\n def __init__(self,\n embed_dims,\n window_size,\n num_heads,\n qkv_bias=True,\n qk_scale=None,\n attn_drop=0.,\n proj_drop=0.,\n init_cfg=None):\n\n super().__init__(init_cfg)\n self.embed_dims = embed_dims\n self.window_size = window_size # Wh, Ww\n self.num_heads = num_heads\n head_embed_dims = embed_dims // num_heads\n self.scale = qk_scale or head_embed_dims**-0.5\n\n # define a parameter table of relative position bias\n self.relative_position_bias_table = nn.Parameter(\n torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1),\n num_heads)) # 2*Wh-1 * 2*Ww-1, nH\n\n # About 2x faster than original impl\n Wh, Ww = self.window_size\n rel_index_coords = self.double_step_seq(2 * Ww - 1, Wh, 1, Ww)\n rel_position_index = rel_index_coords + rel_index_coords.T\n rel_position_index = rel_position_index.flip(1).contiguous()\n self.register_buffer('relative_position_index', rel_position_index)\n\n self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias)\n self.attn_drop = nn.Dropout(attn_drop)\n self.proj = nn.Linear(embed_dims, embed_dims)\n self.proj_drop = nn.Dropout(proj_drop)\n\n self.softmax = nn.Softmax(dim=-1)\n\n def init_weights(self):\n super(WindowMSA, self).init_weights()\n\n trunc_normal_(self.relative_position_bias_table, std=0.02)\n\n def forward(self, x, mask=None):\n \"\"\"\n Args:\n\n x (tensor): input features with shape of (num_windows*B, N, C)\n mask (tensor, Optional): mask with shape of (num_windows, Wh*Ww,\n Wh*Ww), value should be between (-inf, 0].\n \"\"\"\n B_, N, C = x.shape\n qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads,\n C // self.num_heads).permute(2, 0, 3, 1, 4)\n q, k, v = qkv[0], qkv[1], qkv[\n 2] # make torchscript happy (cannot use tensor as tuple)\n\n q = q * self.scale\n attn = (q @ k.transpose(-2, -1))\n\n relative_position_bias = self.relative_position_bias_table[\n self.relative_position_index.view(-1)].view(\n self.window_size[0] * self.window_size[1],\n self.window_size[0] * self.window_size[1],\n -1) # Wh*Ww,Wh*Ww,nH\n relative_position_bias = relative_position_bias.permute(\n 2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww\n attn = attn + relative_position_bias.unsqueeze(0)\n\n if mask is not None:\n nW = mask.shape[0]\n attn = attn.view(B_ // nW, nW, self.num_heads, N,\n N) + mask.unsqueeze(1).unsqueeze(0)\n attn = attn.view(-1, self.num_heads, N, N)\n attn = self.softmax(attn)\n else:\n attn = self.softmax(attn)\n\n attn = self.attn_drop(attn)\n\n x = (attn @ v).transpose(1, 2).reshape(B_, N, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n return x\n\n @staticmethod\n def double_step_seq(step1, len1, step2, len2):\n seq1 = torch.arange(0, step1 * len1, step1)\n seq2 = torch.arange(0, step2 * len2, step2)\n return (seq1[:, None] + seq2[None, :]).reshape(1, -1)\n\n\[email protected]_module()\nclass ShiftWindowMSA(BaseModule):\n \"\"\"Shift Window Multihead Self-Attention Module.\n\n Args:\n embed_dims (int): Number of input channels.\n input_resolution (Tuple[int, int]): The resolution of the input feature\n map.\n num_heads (int): Number of attention heads.\n window_size (int): The height and width of the window.\n shift_size (int, optional): The shift step of each window towards\n right-bottom. If zero, act as regular window-msa. Defaults to 0.\n qkv_bias (bool, optional): If True, add a learnable bias to q, k, v.\n Default: True\n qk_scale (float | None, optional): Override default qk scale of\n head_dim ** -0.5 if set. Defaults to None.\n attn_drop (float, optional): Dropout ratio of attention weight.\n Defaults to 0.0.\n proj_drop (float, optional): Dropout ratio of output. Defaults to 0.\n dropout_layer (dict, optional): The dropout_layer used before output.\n Defaults to dict(type='DropPath', drop_prob=0.).\n auto_pad (bool, optional): Auto pad the feature map to be divisible by\n window_size, Defaults to False.\n init_cfg (dict, optional): The extra config for initialization.\n Default: None.\n \"\"\"\n\n def __init__(self,\n embed_dims,\n input_resolution,\n num_heads,\n window_size,\n shift_size=0,\n qkv_bias=True,\n qk_scale=None,\n attn_drop=0,\n proj_drop=0,\n dropout_layer=dict(type='DropPath', drop_prob=0.),\n auto_pad=False,\n init_cfg=None):\n super().__init__(init_cfg)\n\n self.embed_dims = embed_dims\n self.input_resolution = input_resolution\n self.shift_size = shift_size\n self.window_size = window_size\n if min(self.input_resolution) <= self.window_size:\n # if window size is larger than input resolution, don't partition\n self.shift_size = 0\n self.window_size = min(self.input_resolution)\n\n self.w_msa = WindowMSA(embed_dims, to_2tuple(self.window_size),\n num_heads, qkv_bias, qk_scale, attn_drop,\n proj_drop)\n\n self.drop = build_dropout(dropout_layer)\n\n H, W = self.input_resolution\n # Handle auto padding\n self.auto_pad = auto_pad\n if self.auto_pad:\n self.pad_r = (self.window_size -\n W % self.window_size) % self.window_size\n self.pad_b = (self.window_size -\n H % self.window_size) % self.window_size\n self.H_pad = H + self.pad_b\n self.W_pad = W + self.pad_r\n else:\n H_pad, W_pad = self.input_resolution\n assert H_pad % self.window_size + W_pad % self.window_size == 0,\\\n f'input_resolution({self.input_resolution}) is not divisible '\\\n f'by window_size({self.window_size}). Please check feature '\\\n f'map shape or set `auto_pad=True`.'\n self.H_pad, self.W_pad = H_pad, W_pad\n self.pad_r, self.pad_b = 0, 0\n\n if self.shift_size > 0:\n # calculate attention mask for SW-MSA\n img_mask = torch.zeros((1, self.H_pad, self.W_pad, 1)) # 1 H W 1\n h_slices = (slice(0, -self.window_size),\n slice(-self.window_size,\n -self.shift_size), slice(-self.shift_size, None))\n w_slices = (slice(0, -self.window_size),\n slice(-self.window_size,\n -self.shift_size), slice(-self.shift_size, None))\n cnt = 0\n for h in h_slices:\n for w in w_slices:\n img_mask[:, h, w, :] = cnt\n cnt += 1\n\n # nW, window_size, window_size, 1\n mask_windows = self.window_partition(img_mask)\n mask_windows = mask_windows.view(\n -1, self.window_size * self.window_size)\n attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)\n attn_mask = attn_mask.masked_fill(attn_mask != 0,\n float(-100.0)).masked_fill(\n attn_mask == 0, float(0.0))\n else:\n attn_mask = None\n\n self.register_buffer('attn_mask', attn_mask)\n\n def forward(self, query):\n H, W = self.input_resolution\n B, L, C = query.shape\n assert L == H * W, 'input feature has wrong size'\n query = query.view(B, H, W, C)\n\n if self.pad_r or self.pad_b:\n query = F.pad(query, (0, 0, 0, self.pad_r, 0, self.pad_b))\n\n # cyclic shift\n if self.shift_size > 0:\n shifted_query = torch.roll(\n query,\n shifts=(-self.shift_size, -self.shift_size),\n dims=(1, 2))\n else:\n shifted_query = query\n\n # nW*B, window_size, window_size, C\n query_windows = self.window_partition(shifted_query)\n # nW*B, window_size*window_size, C\n query_windows = query_windows.view(-1, self.window_size**2, C)\n\n # W-MSA/SW-MSA (nW*B, window_size*window_size, C)\n attn_windows = self.w_msa(query_windows, mask=self.attn_mask)\n\n # merge windows\n attn_windows = attn_windows.view(-1, self.window_size,\n self.window_size, C)\n\n # B H' W' C\n shifted_x = self.window_reverse(attn_windows, self.H_pad, self.W_pad)\n # reverse cyclic shift\n if self.shift_size > 0:\n x = torch.roll(\n shifted_x,\n shifts=(self.shift_size, self.shift_size),\n dims=(1, 2))\n else:\n x = shifted_x\n\n if self.pad_r or self.pad_b:\n x = x[:, :H, :W, :].contiguous()\n\n x = x.view(B, H * W, C)\n\n x = self.drop(x)\n return x\n\n def window_reverse(self, windows, H, W):\n window_size = self.window_size\n B = int(windows.shape[0] / (H * W / window_size / window_size))\n x = windows.view(B, H // window_size, W // window_size, window_size,\n window_size, -1)\n x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)\n return x\n\n def window_partition(self, x):\n B, H, W, C = x.shape\n window_size = self.window_size\n x = x.view(B, H // window_size, window_size, W // window_size,\n window_size, C)\n windows = x.permute(0, 1, 3, 2, 4, 5).contiguous()\n windows = windows.view(-1, window_size, window_size, C)\n return windows\n" ]
[ [ "torch.nn.Linear", "torch.roll", "torch.nn.Softmax", "torch.nn.functional.pad", "torch.arange", "torch.zeros", "torch.nn.Dropout" ] ]
xpo0a/SpeechEnhancement
[ "2efd67c24289541e43d3943cc1a3f8989c0afeb4" ]
[ "data_preprocess.py" ]
[ "import os\nimport yaml\n\nimport librosa\nimport numpy as np\nfrom tqdm import tqdm\n\n\ndef slice_signal(file, window_size, stride, sample_rate):\n\n wav, sr = librosa.load(file, sr=None)\n\n if sr != sample_rate:\n wav = librosa.resample(wav, sr, sample_rate)\n\n wav = wav / np.max(np.abs(wav))\n\n if np.max(wav) > 1 or np.min(wav) < -1:\n print('need to norm')\n\n hop = int(window_size * stride)\n slices = []\n for end_idx in range(window_size, len(wav), hop):\n start_idx = end_idx - window_size\n slice_sig = wav[start_idx:end_idx]\n slices.append(slice_sig)\n return slices\n\n\ndef process_and_serialize(data_type):\n\n stride = 0.5\n cfg_path = r'config/config.yaml'\n cfg = yaml.load(open(cfg_path, 'r'), Loader=yaml.FullLoader)\n\n root_dir = cfg['data']['root_path']\n corpus = cfg['data']['corpus']\n window_size = cfg['data']['window_size']\n sample_rate = cfg['data']['sample_rate']\n\n clean_folder = os.path.join(root_dir, corpus, data_type, 'clean')\n noisy_folder = os.path.join(root_dir, corpus, data_type, 'noise')\n serialized_folder = os.path.join(root_dir, corpus, data_type, 'serialized_data')\n\n if not os.path.exists(serialized_folder):\n os.makedirs(serialized_folder)\n\n for root, dirs, files in os.walk(clean_folder):\n if len(files) == 0:\n continue\n for filename in tqdm(files, desc='Serialize and down-sample {} audios'.format(data_type)):\n clean_file = os.path.join(clean_folder, filename)\n noisy_file = os.path.join(noisy_folder, filename)\n # slice both clean signal and noisy signal\n clean_sliced = slice_signal(clean_file, window_size, stride, sample_rate)\n noisy_sliced = slice_signal(noisy_file, window_size, stride, sample_rate)\n for idx, slice_tuple in enumerate(zip(clean_sliced, noisy_sliced)):\n pair = np.array([slice_tuple[0], slice_tuple[1]])\n np.save(os.path.join(serialized_folder, '{}_{}'.format(filename, idx)), arr=pair)\n data_verify(serialized_folder=serialized_folder, window_size=window_size)\n\n\ndef data_verify(serialized_folder, window_size):\n for root, dirs, files in os.walk(serialized_folder):\n for filename in tqdm(files, desc='Verify serialized audios'):\n data_pair = np.load(os.path.join(root, filename), allow_pickle=True)\n if data_pair.shape[1] != window_size:\n print('Snippet length not {} : {} instead'.format(window_size, data_pair.shape[1]))\n break\n\n\nif __name__ == '__main__':\n process_and_serialize('train')\n # process_and_serialize('test')\n\n" ]
[ [ "numpy.array", "numpy.max", "numpy.abs", "numpy.min" ] ]
kbrodt/clog-loss
[ "0831b3a01b079609a71490bb921633110927206c" ]
[ "src/models/resnext.py" ]
[ "import math\nfrom functools import partial\nfrom functools import partialmethod\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .resnet import conv1x1x1, Bottleneck, ResNet\n\n\ndef partialclass(cls, *args, **kwargs):\n class PartialClass(cls):\n __init__ = partialmethod(cls.__init__, *args, **kwargs)\n\n return PartialClass\n\n\ndef get_inplanes():\n return [128, 256, 512, 1024]\n\n\nclass ResNeXtBottleneck(Bottleneck):\n expansion = 2\n\n def __init__(self, in_planes, planes, cardinality, stride=1,\n downsample=None):\n super().__init__(in_planes, planes, stride, downsample)\n\n mid_planes = cardinality * planes // 32\n self.conv1 = conv1x1x1(in_planes, mid_planes)\n self.bn1 = nn.BatchNorm3d(mid_planes)\n self.conv2 = nn.Conv3d(mid_planes,\n mid_planes,\n kernel_size=3,\n stride=stride,\n padding=1,\n groups=cardinality,\n bias=False)\n self.bn2 = nn.BatchNorm3d(mid_planes)\n self.conv3 = conv1x1x1(mid_planes, planes * self.expansion)\n\n\nclass ResNeXt(ResNet):\n\n def __init__(self,\n block,\n layers,\n block_inplanes,\n n_input_channels=3,\n conv1_t_size=7,\n conv1_t_stride=1,\n no_max_pool=False,\n shortcut_type='B',\n cardinality=32,\n n_classes=400):\n block = partialclass(block, cardinality=cardinality)\n super().__init__(block, layers, block_inplanes, n_input_channels,\n conv1_t_size, conv1_t_stride, no_max_pool,\n shortcut_type, n_classes)\n\n self.fc = nn.Linear(cardinality * 32 * block.expansion, n_classes)\n\n\ndef generate_model(model_depth, **kwargs):\n assert model_depth in [50, 101, 152, 200]\n\n if model_depth == 50:\n model = ResNeXt(ResNeXtBottleneck, [3, 4, 6, 3], get_inplanes(),\n **kwargs)\n elif model_depth == 101:\n model = ResNeXt(ResNeXtBottleneck, [3, 4, 23, 3], get_inplanes(),\n **kwargs)\n elif model_depth == 152:\n model = ResNeXt(ResNeXtBottleneck, [3, 8, 36, 3], get_inplanes(),\n **kwargs)\n elif model_depth == 200:\n model = ResNeXt(ResNeXtBottleneck, [3, 24, 36, 3], get_inplanes(),\n **kwargs)\n\n return model\n" ]
[ [ "torch.nn.Conv3d", "torch.nn.Linear", "torch.nn.BatchNorm3d" ] ]
NVIDIA/cuQuantum
[ "0f00494d4639d760228ac002e83e6d2d3dd97eca" ]
[ "python/samples/sampler.py" ]
[ "# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES\n#\n# SPDX-License-Identifier: BSD-3-Clause\n\nimport numpy as np\nimport cupy as cp\n\nimport cuquantum\nfrom cuquantum import custatevec as cusv\n\n\nnIndexBits = 3\nnSvSize = (1 << nIndexBits)\nnMaxShots = 5\nnShots = 5\n\nbitStringLen = 2;\nbitOrdering = np.asarray([0, 1], dtype=np.int32)\n\nbitStrings = np.empty((nShots,), dtype=np.int64)\nbitStrings_expected = np.asarray([0b00, 0b01, 0b10, 0b11, 0b11], dtype=np.int64)\n\nh_sv = np.asarray([0.0+0.0j, 0.0+0.1j, 0.1+0.1j, 0.1+0.2j, \n 0.2+0.2j, 0.3+0.3j, 0.3+0.4j, 0.4+0.5j], dtype=np.complex64)\n\nd_sv = cp.asarray(h_sv)\n\n# In real appliction, random numbers in range [0, 1) will be used.\nrandnums = np.asarray([0.1, 0.8, 0.4, 0.6, 0.2], dtype=np.float64)\n\n########################################################################\n\n# cuStateVec handle initialization\nhandle = cusv.create()\n\n# create sampler and check the size of external workspace\nsampler, extraWorkspaceSizeInBytes = cusv.sampler_create(\n handle, d_sv.data.ptr, cuquantum.cudaDataType.CUDA_C_32F, nIndexBits, nMaxShots)\n\n# allocate external workspace\nextraWorkspace = cp.cuda.alloc(extraWorkspaceSizeInBytes)\n\n# sample preprocess\ncusv.sampler_preprocess(\n handle, sampler, extraWorkspace.ptr, extraWorkspaceSizeInBytes)\n\n# sample bit strings\ncusv.sampler_sample(\n handle, sampler, bitStrings.ctypes.data, bitOrdering.ctypes.data, bitStringLen,\n randnums.ctypes.data, nShots, cusv.SamplerOutput.ASCENDING_ORDER)\n\n# destroy sampler\ncusv.sampler_destroy(sampler)\n\n# destroy handle\ncusv.destroy(handle)\n\nif not np.allclose(bitStrings, bitStrings_expected):\n raise ValueError(\"results mismatch\")\nprint(\"test passed\")\n" ]
[ [ "numpy.empty", "numpy.asarray", "numpy.allclose" ] ]
JeromeMutgeert/Detectron-DA-Faster-RCNN
[ "86e4fb06bf3e934c12eb0913ef4210ad61114386" ]
[ "detectron/core/test_engine.py" ]
[ "# Copyright (c) 2017-present, Facebook, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##############################################################################\n\n\"\"\"Test a Detectron network on an imdb (image database).\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom collections import defaultdict\nimport cv2\nimport datetime\nimport logging\nimport numpy as np\nimport os\n\nfrom caffe2.python import workspace\n\nfrom detectron.core.config import cfg\nfrom detectron.core.config import get_output_dir\nfrom detectron.core.rpn_generator import generate_rpn_on_dataset\nfrom detectron.core.rpn_generator import generate_rpn_on_range\nfrom detectron.core.test import im_detect_all\nfrom detectron.datasets import task_evaluation\nfrom detectron.datasets.json_dataset import JsonDataset\nfrom detectron.modeling import model_builder\nfrom detectron.utils.io import save_object\nfrom detectron.utils.timer import Timer\nimport detectron.utils.c2 as c2_utils\nimport detectron.utils.env as envu\nimport detectron.utils.net as net_utils\nimport detectron.utils.subprocess as subprocess_utils\nimport detectron.utils.vis as vis_utils\n\n# for loading detections.pkl if already present\nfrom detectron.utils.io import load_object\n\nlogger = logging.getLogger(__name__)\n\n\n\ndef get_eval_functions():\n # Determine which parent or child function should handle inference\n if cfg.MODEL.RPN_ONLY:\n child_func = generate_rpn_on_range\n parent_func = generate_rpn_on_dataset\n else:\n # Generic case that handles all network types other than RPN-only nets\n # and RetinaNet\n child_func = test_net\n parent_func = test_net_on_dataset\n\n return parent_func, child_func\n\n\ndef get_inference_dataset(index, is_parent=True):\n assert is_parent or len(cfg.TEST.DATASETS) == 1, \\\n 'The child inference process can only work on a single dataset'\n\n dataset_name = cfg.TEST.DATASETS[index]\n\n if cfg.TEST.PRECOMPUTED_PROPOSALS:\n assert is_parent or len(cfg.TEST.PROPOSAL_FILES) == 1, \\\n 'The child inference process can only work on a single proposal file'\n assert len(cfg.TEST.PROPOSAL_FILES) == len(cfg.TEST.DATASETS), \\\n 'If proposals are used, one proposal file must be specified for ' \\\n 'each dataset'\n proposal_file = cfg.TEST.PROPOSAL_FILES[index]\n else:\n proposal_file = None\n\n return dataset_name, proposal_file\n\n\ndef run_inference(\n weights_file, ind_range=None,\n multi_gpu_testing=False, gpu_id=0,\n check_expected_results=False,\n):\n parent_func, child_func = get_eval_functions()\n is_parent = ind_range is None\n\n def result_getter():\n if is_parent:\n # Parent case:\n # In this case we're either running inference on the entire dataset in a\n # single process or (if multi_gpu_testing is True) using this process to\n # launch subprocesses that each run inference on a range of the dataset\n all_results = {}\n \n subset_pointer = None\n if cfg.VOC_SUBSET != '':\n subset_pointer = result_getter #any dummy object could be used that is more advanced than 'object()' or similar builtins.\n subset_pointer.subset = np.load(cfg.VOC_SUBSET)\n print('loading subset')\n \n for i in range(len(cfg.TEST.DATASETS)):\n dataset_name, proposal_file = get_inference_dataset(i)\n output_dir = get_output_dir(dataset_name, training=False)\n print('len before',len(subset_pointer.subset))\n results = parent_func(\n weights_file,\n dataset_name,\n proposal_file,\n output_dir,\n multi_gpu=multi_gpu_testing,\n subset_pointer=subset_pointer\n )\n all_results.update(results)\n\n return all_results\n else:\n # Subprocess child case:\n # In this case test_net was called via subprocess.Popen to execute on a\n # range of inputs on a single dataset\n dataset_name, proposal_file = get_inference_dataset(0, is_parent=False)\n output_dir = get_output_dir(dataset_name, training=False)\n return child_func(\n weights_file,\n dataset_name,\n proposal_file,\n output_dir,\n ind_range=ind_range,\n gpu_id=gpu_id\n )\n\n all_results = result_getter()\n if check_expected_results and is_parent:\n task_evaluation.check_expected_results(\n all_results,\n atol=cfg.EXPECTED_RESULTS_ATOL,\n rtol=cfg.EXPECTED_RESULTS_RTOL\n )\n task_evaluation.log_copy_paste_friendly_results(all_results)\n\n return all_results\n\n\ndef coco_detects_to_voc(all_boxes):\n # coco2voc: for each of the 81 coco classes the corresponding voc class index. See coco2voc.ipynb\n coco2voc = np.array([ 0, 15, 2, 7, 14, 1, 6, 19, 0, 4, 0, 0, 0, 0, 0, 3, 8,\n 12, 13, 17, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 9, 18, 16, 0, 11, 0, 20, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=np.int32)\n \n voc_boxes = [[] for _ in range(21)]\n for i,cls_dets in enumerate(all_boxes):\n voc_i = coco2voc[i]\n if voc_i != 0:\n voc_boxes[voc_i] = cls_dets\n \n return voc_boxes\n\n\ndef test_net_on_dataset(\n weights_file,\n dataset_name,\n proposal_file,\n output_dir,\n multi_gpu=False,\n gpu_id=0,\n subset_pointer=None\n):\n \"\"\"Run inference on a dataset.\"\"\"\n if dataset_name[:5] != 'live_':\n dataset = JsonDataset(dataset_name)\n test_timer = Timer()\n test_timer.tic()\n if multi_gpu:\n num_images = len(dataset.get_roidb())\n all_boxes, all_segms, all_keyps = multi_gpu_test_net_on_dataset(\n weights_file, dataset_name, proposal_file, num_images, output_dir\n )\n else:\n all_boxes, all_segms, all_keyps = test_net(\n weights_file, dataset_name, proposal_file, output_dir, gpu_id=gpu_id,\n subset_pointer=subset_pointer\n )\n test_timer.toc()\n logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))\n \n if cfg.TEST.COCO_TO_VOC:\n all_boxes = coco_detects_to_voc(all_boxes)\n \n if dataset_name[:5] == 'live_':\n return None\n \n results = task_evaluation.evaluate_all(\n dataset, all_boxes, all_segms, all_keyps, output_dir,\n subset_pointer=subset_pointer\n )\n \n if subset_pointer is not None:\n # prune the subset for the following datasets:\n subset_pointer.subset = subset_pointer.subset[len(dataset.get_roidb()):]\n print('remains',len(subset_pointer.subset)) # should have 0 remains for the last set, voc_2012_train.\n \n return results\n\n\ndef multi_gpu_test_net_on_dataset(\n weights_file, dataset_name, proposal_file, num_images, output_dir\n):\n \"\"\"Multi-gpu inference on a dataset.\"\"\"\n binary_dir = envu.get_runtime_dir()\n binary_ext = envu.get_py_bin_ext()\n binary = os.path.join(binary_dir, 'test_net' + binary_ext)\n assert os.path.exists(binary), 'Binary \\'{}\\' not found'.format(binary)\n\n # Pass the target dataset and proposal file (if any) via the command line\n opts = ['TEST.DATASETS', '(\"{}\",)'.format(dataset_name)]\n opts += ['TEST.WEIGHTS', weights_file]\n if proposal_file:\n opts += ['TEST.PROPOSAL_FILES', '(\"{}\",)'.format(proposal_file)]\n\n # Run inference in parallel in subprocesses\n # Outputs will be a list of outputs from each subprocess, where the output\n # of each subprocess is the dictionary saved by test_net().\n outputs = subprocess_utils.process_in_parallel(\n 'detection', num_images, binary, output_dir, opts\n )\n\n # Collate the results from each subprocess\n all_boxes = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]\n all_segms = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]\n all_keyps = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]\n for det_data in outputs:\n all_boxes_batch = det_data['all_boxes']\n all_segms_batch = det_data['all_segms']\n all_keyps_batch = det_data['all_keyps']\n for cls_idx in range(1, cfg.MODEL.NUM_CLASSES):\n all_boxes[cls_idx] += all_boxes_batch[cls_idx]\n all_segms[cls_idx] += all_segms_batch[cls_idx]\n all_keyps[cls_idx] += all_keyps_batch[cls_idx]\n det_file = os.path.join(output_dir, 'detections.pkl')\n cfg_yaml = envu.yaml_dump(cfg)\n save_object(\n dict(\n all_boxes=all_boxes,\n all_segms=all_segms,\n all_keyps=all_keyps,\n cfg=cfg_yaml\n ), det_file\n )\n logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))\n\n return all_boxes, all_segms, all_keyps\n\n\ndef test_net(\n weights_file,\n dataset_name,\n proposal_file,\n output_dir,\n ind_range=None,\n gpu_id=0,\n subset_pointer=None\n):\n \"\"\"Run inference on all images in a dataset or over an index range of images\n in a dataset using a single GPU.\n \"\"\"\n assert not cfg.MODEL.RPN_ONLY, \\\n 'Use rpn_generate to generate proposals from RPN-only models'\n \n # determine file name\n if ind_range is not None:\n det_name = 'detection_range_%s_%s.pkl' % tuple(ind_range)\n else:\n det_name = 'detections.pkl'\n det_file = os.path.join(output_dir, det_name)\n \n # load results if already present\n if os.path.exists(det_file):\n res = load_object(det_file)\n all_boxes, all_segms, all_keyps = res['all_boxes'],res['all_segms'],res['all_keyps']\n else:\n \n roidb, dataset, start_ind, end_ind, total_num_images = get_roidb_and_dataset(\n dataset_name, proposal_file, ind_range\n )\n \n if subset_pointer is not None:\n voc_subset = subset_pointer.subset\n this_sub = voc_subset[:len(roidb)]\n # subset_pointer.subset = voc_subset[len(roidb):]\n \n # filter roidb:\n roidb = [roi for taking,roi in zip(this_sub,roidb) if taking]\n \n total_num_images = len(roidb)\n end_ind = total_num_images\n \n model = initialize_model_from_cfg(weights_file, gpu_id=gpu_id)\n \n num_images = len(roidb)\n num_classes = cfg.MODEL.NUM_CLASSES\n \n all_boxes, all_segms, all_keyps = empty_results(num_classes, num_images)\n if cfg.TEST.COLLECT_ALL:\n all_feats = []\n all_class_weights = np.empty(shape=(num_images,num_classes),dtype=np.float32)\n \n timers = defaultdict(Timer)\n \n for i, entry in enumerate(roidb):\n if cfg.TEST.PRECOMPUTED_PROPOSALS:\n # The roidb may contain ground-truth rois (for example, if the roidb\n # comes from the training or val split). We only want to evaluate\n # detection on the *non*-ground-truth rois. We select only the rois\n # that have the gt_classes field set to 0, which means there's no\n # ground truth.\n box_proposals = entry['boxes'][entry['gt_classes'] == 0]\n if len(box_proposals) == 0:\n continue\n else:\n # Faster R-CNN type models generate proposals on-the-fly with an\n # in-network RPN; 1-stage models don't require proposals.\n box_proposals = None\n \n im = cv2.imread(entry['image'])\n with c2_utils.NamedCudaScope(gpu_id):\n cls_boxes_i, cls_segms_i, cls_keyps_i, sum_softmax, topk_feats = im_detect_all(\n model, im, box_proposals, timers, return_feats= cfg.TEST.COLLECT_ALL\n )\n \n # print('nfeats:', topk_feats.shape[0])\n # print(topk_feats)\n \n extend_results(i, all_boxes, cls_boxes_i)\n if cls_segms_i is not None:\n extend_results(i, all_segms, cls_segms_i)\n if cls_keyps_i is not None:\n extend_results(i, all_keyps, cls_keyps_i)\n \n if cfg.TEST.COLLECT_ALL:\n all_class_weights[i] = sum_softmax\n all_feats.append(topk_feats) # will accumulate about 9 Gb of feats on COCO train set (118K imgs)\n \n if i % 10 == 0: # Reduce log file size\n ave_total_time = np.sum([t.average_time for t in timers.values()])\n eta_seconds = ave_total_time * (num_images - i - 1)\n eta = str(datetime.timedelta(seconds=int(eta_seconds)))\n det_time = (\n timers['im_detect_bbox'].average_time +\n timers['im_detect_mask'].average_time +\n timers['im_detect_keypoints'].average_time\n )\n misc_time = (\n timers['misc_bbox'].average_time +\n timers['misc_mask'].average_time +\n timers['misc_keypoints'].average_time\n )\n logger.info(\n (\n 'im_detect: range [{:d}, {:d}] of {:d}: '\n '{:d}/{:d} {:.3f}s + {:.3f}s (eta: {})'\n ).format(\n start_ind + 1, end_ind, total_num_images, start_ind + i + 1,\n start_ind + num_images, det_time, misc_time, eta\n )\n )\n \n if cfg.VIS:\n im_name = os.path.splitext(os.path.basename(entry['image']))[0]\n vis_utils.vis_one_image(\n im[:, :, ::-1],\n '{:d}_{:s}'.format(i, im_name),\n os.path.join(output_dir, 'vis'),\n cls_boxes_i,\n segms=cls_segms_i,\n keypoints=cls_keyps_i,\n thresh=cfg.VIS_TH,\n box_alpha=0.8,\n dataset=dataset,\n show_class=True\n )\n \n cfg_yaml = envu.yaml_dump(cfg)\n save_object(\n dict(\n all_boxes=all_boxes,\n all_segms=all_segms,\n all_keyps=all_keyps,\n cfg=cfg_yaml\n ), det_file\n )\n logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))\n if cfg.TEST.COLLECT_ALL:\n save_object(all_class_weights,os.path.join(output_dir,'class_weights.pkl'))\n save_object(all_feats,os.path.join(output_dir,'feature_vectors.pkl'))\n logger.info('Wrote class weights and feature vectors to output folder')\n \n return all_boxes, all_segms, all_keyps\n\n\ndef initialize_model_from_cfg(weights_file, gpu_id=0):\n \"\"\"Initialize a model from the global cfg. Loads test-time weights and\n creates the networks in the Caffe2 workspace.\n \"\"\"\n model = model_builder.create(cfg.MODEL.TYPE, train=False, gpu_id=gpu_id)\n net_utils.initialize_gpu_from_weights_file(\n model, weights_file, gpu_id=gpu_id,\n )\n model_builder.add_inference_inputs(model)\n workspace.CreateNet(model.net)\n workspace.CreateNet(model.conv_body_net)\n if cfg.MODEL.MASK_ON:\n workspace.CreateNet(model.mask_net)\n if cfg.MODEL.KEYPOINTS_ON:\n workspace.CreateNet(model.keypoint_net)\n return model\n\n\ndef get_roidb_and_dataset(dataset_name, proposal_file, ind_range):\n \"\"\"Get the roidb for the dataset specified in the global cfg. Optionally\n restrict it to a range of indices if ind_range is a pair of integers.\n \"\"\"\n \n if dataset_name == 'live_targets':\n from detectron.datasets.live_dataset import LiveRoidb\n roidb = LiveRoidb()\n import detectron.datasets.dummy_datasets as dummy_datasets\n json_dataset = dummy_datasets.get_coco_dataset()\n if not cfg.TRAIN.USE_FLIPPED:\n logger.info('Live target data set will use flipped examples anyway!')\n logger.info('\"Loaded\" dataset: {:s}'.format('live_targets'))\n return roidb, json_dataset, 0, len(roidb), len(roidb)\n \n dataset = JsonDataset(dataset_name)\n if cfg.TEST.PRECOMPUTED_PROPOSALS:\n assert proposal_file, 'No proposal file given'\n roidb = dataset.get_roidb(\n proposal_file=proposal_file,\n proposal_limit=cfg.TEST.PROPOSAL_LIMIT\n )\n else:\n roidb = dataset.get_roidb()\n\n if ind_range is not None:\n total_num_images = len(roidb)\n start, end = ind_range\n roidb = roidb[start:end]\n else:\n start = 0\n end = len(roidb)\n total_num_images = end\n\n return roidb, dataset, start, end, total_num_images\n\n\ndef empty_results(num_classes, num_images):\n \"\"\"Return empty results lists for boxes, masks, and keypoints.\n Box detections are collected into:\n all_boxes[cls][image] = N x 5 array with columns (x1, y1, x2, y2, score)\n Instance mask predictions are collected into:\n all_segms[cls][image] = [...] list of COCO RLE encoded masks that are in\n 1:1 correspondence with the boxes in all_boxes[cls][image]\n Keypoint predictions are collected into:\n all_keyps[cls][image] = [...] list of keypoints results, each encoded as\n a 3D array (#rois, 4, #keypoints) with the 4 rows corresponding to\n [x, y, logit, prob] (See: utils.keypoints.heatmaps_to_keypoints).\n Keypoints are recorded for person (cls = 1); they are in 1:1\n correspondence with the boxes in all_boxes[cls][image].\n \"\"\"\n # Note: do not be tempted to use [[] * N], which gives N references to the\n # *same* empty list.\n all_boxes = [[[] for _ in range(num_images)] for _ in range(num_classes)]\n all_segms = [[[] for _ in range(num_images)] for _ in range(num_classes)]\n all_keyps = [[[] for _ in range(num_images)] for _ in range(num_classes)]\n return all_boxes, all_segms, all_keyps\n\n\ndef extend_results(index, all_res, im_res):\n \"\"\"Add results for an image to the set of all results at the specified\n index.\n \"\"\"\n # Skip cls_idx 0 (__background__)\n for cls_idx in range(1, len(im_res)):\n all_res[cls_idx][index] = im_res[cls_idx]\n" ]
[ [ "numpy.array", "numpy.load", "numpy.empty" ] ]
DamonU2/model-factory
[ "494f8b65afcd80fbedca3224e4c29a2e10cd484f" ]
[ "scripts/PSRA_combineSrcLossTable.py" ]
[ "#!/usr/bin/env python\n\nimport pandas as pd\nimport numpy as np\nimport csv\nimport glob \nimport os\nimport re\nimport sys\nimport argparse\nimport configparser\nimport logging\n\n'''\npython script to merge source loss tables for Provinces and territories\nwhere PSRA runs have een split up by economic region or sub regions\ncan be run from the command line with mandatory arguments like:\npython3 PSRA_combineSrcLossTable.py --srcLossDir=/usr/src/app/ebRisk/AB/\n'''\n\ndef main():\n args = parse_args()\n os.chdir(args.srcLossDir)\n\n for retrofit in 'b0', 'r2':\n erFileList = glob.glob('*src_loss_table_{}.csv'.format(retrofit))\n erFileList.sort()\n \n with open(erFileList[0], newline='') as f:\n reader = csv.reader(f)\n columns = next(reader)\n\n columns.append('region')\n\n dfFinal = pd.DataFrame(columns=columns)\n\n for erFile in erFileList:\n dfTemp = pd.read_csv(erFile)\n er = erFile.split('_')[1]\n #Remove the split econmic region identifiers \n #handle subregions and combined regions differently \n # For example 'QC2445-55' should remain the same\n # NB1330 should remain the same \n # BC5920A2 should be changed to BC5920 \n if len(re.split('(\\d+)',er)) == 1 or re.split('(\\d+)',er)[2] == '-':\n er = ''.join(re.split('(\\d+)',er)[0:4])\n else :\n er = ''.join(re.split('(\\d+)',er)[0:2])\n\n dfTemp['region'] = er\n dfFinal = dfFinal.append(dfTemp)\n outFileName = 'ebR_{er}_src_loss_table_{retrofit}.csv'.format(**{'er':er[0:2], 'retrofit':retrofit})\n\n if not os.path.isfile(outFileName): \n #Check if the file already exists, it should for \n #Provs/Territories that were process with a single \n #Economic region\n dfFinal.to_csv(outFileName, index=False)\n else: # else it exists, do nothing\n print('File ({}) already exists renaming original file'.format(outFileName))\n os.rename(outFileName, '{}_orginal.csv'.format(os.path.splitext(outFileName)[0]))\n dfFinal.to_csv(outFileName, index=False)\n return\n\ndef get_config_params(args):\n \"\"\"\n Parse Input/Output columns from supplied *.ini file\n \"\"\"\n configParseObj = configparser.ConfigParser()\n configParseObj.read(args)\n return configParseObj\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Combine Source Loss Tables across Economic Regions')\n parser.add_argument('--srcLossDir', type=str, help='', required=True)\n args = parser.parse_args()\n \n return args\n\nif __name__ == '__main__':\n main() " ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
bclarkson-code/search-query-classification
[ "8928faad459ef97934a6dbcf38a9347da5662415" ]
[ "gpt2_model/generate_gpt2_embeddings.py" ]
[ "import pickle\nimport torch\nfrom tqdm.auto import tqdm\nfrom gpt2_predictor import GPT2Predictor, GPT2TestSearchQueryDataModule\n\nif __name__ == '__main__':\n encoding = {\n 'Arts': 0,\n 'Business': 11,\n 'Computers': 10,\n 'Games': 12,\n 'Health': 9,\n 'Home': 6,\n 'News': 14,\n 'Recreation': 1,\n 'Reference': 13,\n 'Regional': 4,\n 'Science': 8,\n 'Shopping': 3,\n 'Society': 2,\n 'Sports': 5,\n 'World': 7\n }\n queries = GPT2TestSearchQueryDataModule(\n 'open_source.feather',\n batch_size=128,\n num_workers=0,\n tokeniser_string='gpt2',\n debug=False,\n encoding=encoding,\n )\n queries.prepare_data()\n queries.setup()\n\n model = GPT2Predictor.load_from_checkpoint(\n 'gpt2-checkpoints/model-epoch=00-valid/loss=1.86.ckpt',\n strict=False\n )\n test_data = queries.test_dataloader()\n preds = []\n with torch.no_grad():\n for batch in tqdm(test_data, desc='Predicting'):\n (input_ids, attention_mask), _ = batch\n pred = model(\n input_ids=input_ids,\n attention_mask=attention_mask\n )\n preds.append(pred)\n\n with open('test_preds.pkl', 'wb') as f:\n pickle.dump(preds)\n\n\n\n" ]
[ [ "torch.no_grad" ] ]
cpaxton/costar_plan
[ "be5c12f9d0e9d7078e6a5c283d3be059e7f3d040" ]
[ "costar_models/python/costar_models/conditional_image_costar.py" ]
[ "from __future__ import print_function\n\nimport keras.backend as K\nimport keras.losses as losses\nimport keras.optimizers as optimizers\nimport numpy as np\n\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.layers import Input, RepeatVector, Reshape\nfrom keras.layers.embeddings import Embedding\nfrom keras.layers.merge import Concatenate, Multiply\nfrom keras.losses import binary_crossentropy\nfrom keras.models import Model, Sequential\nfrom keras.optimizers import Adam\nfrom matplotlib import pyplot as plt\n\nfrom .robot_multi_models import *\nfrom .mhp_loss import *\nfrom .loss import *\nfrom .conditional_image import ConditionalImage\nfrom .multi import *\nfrom .costar import *\nfrom .callbacks import *\n\nclass ConditionalImageCostar(ConditionalImage):\n\n def __init__(self, *args, **kwargs):\n super(ConditionalImageCostar, self).__init__(*args, **kwargs)\n self.PredictorCb = ImageWithFirstCb\n\n def _makeModel(self, image, *args, **kwargs):\n\n img_shape = image.shape[1:]\n img_size = 1.\n for dim in img_shape:\n img_size *= dim\n gripper_size = 1\n arm_size = 6\n\n # =====================================================================\n # Load the image decoders\n img_in = Input(img_shape, name=\"predictor_img_in\")\n img0_in = Input(img_shape, name=\"predictor_img0_in\")\n #arm_in = Input((arm_size,))\n #gripper_in = Input((gripper_size,))\n #arm_gripper = Concatenate()([arm_in, gripper_in])\n label_in = Input((1,))\n ins = [img0_in, img_in]\n\n encoder = MakeImageEncoder(self, img_shape)\n decoder = MakeImageDecoder(self, self.hidden_shape)\n\n LoadEncoderWeights(self, encoder, decoder)\n\n # =====================================================================\n # Load the arm and gripper representation\n h = encoder([img0_in, img_in])\n\n if self.validate:\n self.loadValidationModels(arm_size, gripper_size, h0, h)\n\n next_option_in = Input((1,), name=\"next_option_in\")\n next_option_in2 = Input((1,), name=\"next_option_in2\")\n ins += [next_option_in, next_option_in2]\n\n # =====================================================================\n # Apply transforms\n y = Flatten()(OneHot(self.num_options)(next_option_in))\n y2 = Flatten()(OneHot(self.num_options)(next_option_in2))\n\n tform = self._makeTransform() if not self.dense_transform else self._makeDenseTransform()\n tform.summary()\n x = tform([h,y])\n x2 = tform([x,y2])\n\n image_out, image_out2 = decoder([x]), decoder([x2])\n\n # Compute classifier on the last transform\n if not self.no_disc:\n image_discriminator = LoadGoalClassifierWeights(self,\n make_classifier_fn=MakeCostarImageClassifier,\n img_shape=img_shape)\n #disc_out1 = image_discriminator([img0_in, image_out])\n disc_out2 = image_discriminator([img0_in, image_out2])\n\n # Create custom encoder loss\n if self.enc_loss:\n loss = EncoderLoss(self.image_encoder, self.loss)\n enc_losses = [loss, loss]\n enc_outs = [x, x2]\n enc_wts = [1e-2, 1e-2]\n img_loss_wt = 1.\n else:\n enc_losses = []\n enc_outs = []\n enc_wts = []\n img_loss_wt = 1.\n\n # Create models to train\n if self.no_disc:\n disc_wt = 0.\n else:\n disc_wt = 1e-3\n if self.no_disc:\n train_predictor = Model(ins + [label_in],\n [image_out, image_out2] + enc_outs)\n train_predictor.compile(\n loss=[self.loss, self.loss,] + enc_losses,\n loss_weights=[img_loss_wt, img_loss_wt] + enc_wts,\n optimizer=self.getOptimizer())\n else:\n train_predictor = Model(ins + [label_in],\n #[image_out, image_out2, disc_out1, disc_out2] + enc_outs)\n [image_out, image_out2, disc_out2] + enc_outs)\n train_predictor.compile(\n loss=[self.loss, self.loss, \"categorical_crossentropy\"] + enc_losses,\n #loss_weights=[img_loss_wt, img_loss_wt, 0.9*disc_wt, disc_wt] + enc_wts,\n loss_weights=[img_loss_wt, img_loss_wt, disc_wt] + enc_wts,\n optimizer=self.getOptimizer())\n train_predictor.summary()\n\n # Set variables\n self.predictor = None\n self.model = train_predictor\n\n\n def _getData(self, image, label, goal_idx, q, gripper, labels_to_name, *args, **kwargs):\n '''\n Parameters:\n -----------\n image: jpeg encoding of image\n label: integer code for which action is being performed\n goal_idx: index of the start of the next action\n q: joint states\n gripper: floating point gripper openness\n labels_to_name: list of high level actions (AKA options)\n '''\n\n # Null option to be set as the first option\n # Verify this to make sure we aren't loading things with different\n # numbers of available options/high-level actions\n if len(labels_to_name) != self.null_option:\n raise ValueError('labels_to_name must match the number of values in self.null_option. '\n 'self.null_option: ' + str(self.null_option) + ' ' +\n 'labels_to_name len: ' + str(len(labels_to_name)) + ' ' +\n 'labels_to_name values: ' + str(labels_to_name) + ' ' +\n 'If this is expected because you collected a dataset with new actions '\n 'or are using an old dataset, go to '\n 'costar_models/python/costar_models/util.py '\n 'and change model_instance.null_option and model_instance.num_options '\n 'accordingly in the \"costar\" features case.')\n self.null_option = len(labels_to_name)\n # Total number of options incl. null\n self.num_options = len(labels_to_name) + 1\n\n length = label.shape[0]\n prev_label = np.zeros_like(label)\n prev_label[1:] = label[:(length-1)]\n prev_label[0] = self.null_option\n\n goal_idx = np.min((goal_idx, np.ones_like(goal_idx)*(length-1)), axis=0)\n\n if not (image.shape[0] == goal_idx.shape[0]):\n print(\"Image shape:\", image.shape)\n print(\"Goal idxs:\", goal_idx.shape)\n print(label)\n print(goal_idx)\n raise RuntimeError('data type shapes did not match')\n goal_label = label[goal_idx]\n goal_image = image[goal_idx]\n goal_image2, goal_label2 = GetNextGoal(goal_image, label)\n\n # Extend image_0 to full length of sequence\n image0 = image[0]\n image0 = np.tile(np.expand_dims(image0,axis=0),[length,1,1,1])\n\n lbls_1h = np.squeeze(ToOneHot2D(label, self.num_options))\n lbls2_1h = np.squeeze(ToOneHot2D(goal_label2, self.num_options))\n if self.no_disc:\n return ([image0, image, label, goal_label, prev_label],\n [goal_image,\n goal_image2,])\n else:\n return ([image0, image, label, goal_label, prev_label],\n [goal_image,\n goal_image2,\n lbls2_1h,])\n\n" ]
[ [ "numpy.zeros_like", "numpy.expand_dims", "numpy.ones_like" ] ]
Jackwaterveg/Parakeet
[ "e75a07076ba5766206a6cd1fb2e5f82b0ba3842c" ]
[ "utils/gen_duration_from_textgrid.py" ]
[ "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport argparse\nimport os\nfrom pathlib import Path\n\nimport librosa\nimport numpy as np\nfrom praatio import tgio\n\n\ndef readtg(tg_path, sample_rate=24000, n_shift=300):\n alignment = tgio.openTextgrid(tg_path, readRaw=True)\n phones = []\n ends = []\n for interval in alignment.tierDict[\"phones\"].entryList:\n phone = interval.label\n phones.append(phone)\n ends.append(interval.end)\n frame_pos = librosa.time_to_frames(ends, sr=sample_rate, hop_length=n_shift)\n durations = np.diff(frame_pos, prepend=0)\n assert len(durations) == len(phones)\n # merge \"\" and sp in the end\n if phones[-1] == \"\" and len(phones) > 1 and phones[-2] == \"sp\":\n phones = phones[:-1]\n durations[-2] += durations[-1]\n durations = durations[:-1]\n # replace the last \"sp\" with \"sil\" in MFA1.x\n phones[-1] = \"sil\" if phones[-1] == \"sp\" else phones[-1]\n # replace the edge \"\" with \"sil\", replace the inner \"\" with \"sp\"\n new_phones = []\n for i, phn in enumerate(phones):\n if phn == \"\":\n if i in {0, len(phones) - 1}:\n new_phones.append(\"sil\")\n else:\n new_phones.append(\"sp\")\n else:\n new_phones.append(phn)\n phones = new_phones\n results = \"\"\n for (p, d) in zip(phones, durations):\n results += p + \" \" + str(d) + \" \"\n return results.strip()\n\n\n# assume that the directory structure of inputdir is inputdir/speaker/*.TextGrid\n# in MFA1.x, there are blank labels(\"\") in the end, and maybe \"sp\" before it\n# in MFA2.x, there are blank labels(\"\") in the begin and the end, while no \"sp\" and \"sil\" anymore\n# we replace it with \"sil\"\ndef gen_duration_from_textgrid(inputdir, output, sample_rate=24000,\n n_shift=300):\n # key: utt_id, value: (speaker, phn_durs)\n durations_dict = {}\n list_dir = os.listdir(inputdir)\n speakers = [dir for dir in list_dir if os.path.isdir(inputdir / dir)]\n for speaker in speakers:\n subdir = inputdir / speaker\n for file in os.listdir(subdir):\n if file.endswith(\".TextGrid\"):\n tg_path = subdir / file\n name = file.split(\".\")[0]\n durations_dict[name] = (speaker, readtg(\n tg_path, sample_rate=sample_rate, n_shift=n_shift))\n with open(output, \"w\") as wf:\n for name in sorted(durations_dict.keys()):\n wf.write(name + \"|\" + durations_dict[name][0] + \"|\" +\n durations_dict[name][1] + \"\\n\")\n\n\ndef main():\n # parse config and args\n parser = argparse.ArgumentParser(\n description=\"Preprocess audio and then extract features.\")\n parser.add_argument(\n \"--inputdir\",\n default=None,\n type=str,\n help=\"directory to alignment files.\")\n parser.add_argument(\n \"--output\", type=str, required=True, help=\"output duration file.\")\n parser.add_argument(\"--sample-rate\", type=int, help=\"the sample of wavs.\")\n parser.add_argument(\n \"--n-shift\",\n type=int,\n help=\"the n_shift of time_to_freames, also called hop_length.\")\n\n args = parser.parse_args()\n\n inputdir = Path(args.inputdir).expanduser()\n output = Path(args.output).expanduser()\n gen_duration_from_textgrid(inputdir, output, args.sample_rate, args.n_shift)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.diff" ] ]
ufukhurriyetoglu/allennlp
[ "3f431799776dbf2a42091ba114fc3b6f38b268c8" ]
[ "allennlp/models/coreference_resolution/coref.py" ]
[ "import logging\nimport math\nfrom typing import Any, Dict, List, Optional, Tuple\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom overrides import overrides\n\nfrom allennlp.common import Params\nfrom allennlp.data import Vocabulary\nfrom allennlp.models.model import Model\nfrom allennlp.modules.token_embedders import Embedding\nfrom allennlp.modules import FeedForward\nfrom allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder\nfrom allennlp.nn import util, InitializerApplicator, RegularizerApplicator\nfrom allennlp.training.metrics import MentionRecall, ConllCorefScores\n\nlogger = logging.getLogger(__name__) # pylint: disable=invalid-name\n\n\[email protected](\"coref\")\nclass CoreferenceResolver(Model):\n \"\"\"\n This ``Model`` implements the coreference resolution model described \"End-to-end Neural\n Coreference Resolution\"\n <https://www.semanticscholar.org/paper/End-to-end-Neural-Coreference-Resolution-Lee-He/3f2114893dc44eacac951f148fbff142ca200e83>\n by Lee et al., 2017.\n The basic outline of this model is to get an embedded representation of each span in the\n document. These span representations are scored and used to prune away spans that are unlikely\n to occur in a coreference cluster. For the remaining spans, the model decides which antecedent\n span (if any) they are coreferent with. The resulting coreference links, after applying\n transitivity, imply a clustering of the spans in the document.\n\n Parameters\n ----------\n vocab : ``Vocabulary``\n text_field_embedder : ``TextFieldEmbedder``\n Used to embed the ``text`` ``TextField`` we get as input to the model.\n context_layer : ``Seq2SeqEncoder``\n This layer incorporates contextual information for each word in the document.\n mention_feedforward : ``FeedForward``\n This feedforward network is applied to the span representations which is then scored\n by a linear layer.\n antecedent_feedforward: ``FeedForward``\n This feedforward network is applied to pairs of span representation, along with any\n pairwise features, which is then scored by a linear layer.\n feature_size: ``int``\n The embedding size for all the embedded features, such as distances or span widths.\n max_span_width: ``int``\n The maximum width of candidate spans.\n spans_per_word: float, required.\n A multiplier between zero and one which controls what percentage of candidate mention\n spans we retain with respect to the number of words in the document.\n max_antecedents: int, required.\n For each mention which survives the pruning stage, we consider this many antecedents.\n lexical_dropout: ``int``\n The probability of dropping out dimensions of the embedded text.\n initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)\n Used to initialize the model parameters.\n regularizer : ``RegularizerApplicator``, optional (default=``None``)\n If provided, will be used to calculate the regularization penalty during training.\n \"\"\"\n def __init__(self,\n vocab: Vocabulary,\n text_field_embedder: TextFieldEmbedder,\n context_layer: Seq2SeqEncoder,\n mention_feedforward: FeedForward,\n antecedent_feedforward: FeedForward,\n feature_size: int,\n max_span_width: int,\n spans_per_word: float,\n max_antecedents: int,\n lexical_dropout: float = 0.2,\n initializer: InitializerApplicator = InitializerApplicator(),\n regularizer: Optional[RegularizerApplicator] = None) -> None:\n super(CoreferenceResolver, self).__init__(vocab, regularizer)\n\n self._text_field_embedder = text_field_embedder\n self._context_layer = context_layer\n self._mention_feedforward = TimeDistributed(mention_feedforward)\n self._antecedent_feedforward = TimeDistributed(antecedent_feedforward)\n self._mention_scorer = TimeDistributed(torch.nn.Linear(mention_feedforward.get_output_dim(), 1))\n self._antecedent_scorer = TimeDistributed(torch.nn.Linear(antecedent_feedforward.get_output_dim(), 1))\n self._head_scorer = TimeDistributed(torch.nn.Linear(context_layer.get_output_dim(), 1))\n\n # 10 possible distance buckets.\n self._num_distance_buckets = 10\n self._distance_embedding = Embedding(self._num_distance_buckets, feature_size)\n self._span_width_embedding = Embedding(max_span_width, feature_size)\n\n self._max_span_width = max_span_width\n self._spans_per_word = spans_per_word\n self._max_antecedents = max_antecedents\n\n self._mention_recall = MentionRecall()\n self._conll_coref_scores = ConllCorefScores()\n if lexical_dropout > 0:\n self._lexical_dropout = torch.nn.Dropout(p=lexical_dropout)\n else:\n self._lexical_dropout = lambda x: x\n initializer(self)\n\n @overrides\n def forward(self, # type: ignore\n text: Dict[str, torch.LongTensor],\n span_starts: torch.IntTensor,\n span_ends: torch.IntTensor,\n span_labels: torch.IntTensor = None,\n metadata: List[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]:\n # pylint: disable=arguments-differ\n \"\"\"\n Parameters\n ----------\n text : ``Dict[str, torch.LongTensor]``, required.\n The output of a ``TextField`` representing the text of\n the document.\n span_starts : ``torch.IntTensor``, required.\n A tensor of shape (batch_size, num_spans, 1), representing the start indices of\n candidate spans for mentions. Comes from a ``ListField[IndexField]`` of indices into\n the text of the document.\n span_ends : ``torch.IntTensor``, required.\n A tensor of shape (batch_size, num_spans, 1), representing the end indices of\n candidate spans for mentions. Comes from a ``ListField[IndexField]`` of indices into\n the text of the document.\n span_labels : ``torch.IntTensor``, optional (default = None)\n A tensor of shape (batch_size, num_spans), representing the cluster ids\n of each span, or -1 for those which do not appear in any clusters.\n\n Returns\n -------\n An output dictionary consisting of:\n top_spans : ``torch.IntTensor``\n A tensor of shape ``(batch_size, num_spans_to_keep, 2)`` representing\n the start and end word indices of the top spans that survived the pruning stage.\n antecedent_indices : ``torch.IntTensor``\n A tensor of shape ``(num_spans_to_keep, max_antecedents)`` representing for each top span\n the index (with respect to top_spans) of the possible antecedents the model considered.\n predicted_antecedents : ``torch.IntTensor``\n A tensor of shape ``(batch_size, num_spans_to_keep)`` representing, for each top span, the\n index (with respect to antecedent_indices) of the most likely antecedent. -1 means there\n was no predicted link.\n loss : ``torch.FloatTensor``, optional\n A scalar loss to be optimised.\n \"\"\"\n # Shape: (batch_size, document_length, embedding_size)\n text_embeddings = self._lexical_dropout(self._text_field_embedder(text))\n\n document_length = text_embeddings.size(1)\n num_spans = span_starts.size(1)\n\n # Shape: (batch_size, document_length)\n text_mask = util.get_text_field_mask(text).float()\n\n # Shape: (batch_size, num_spans, 1)\n span_mask = (span_starts >= 0).float()\n # IndexFields return -1 when they are used as padding. As we do\n # some comparisons based on span widths when we attend over the\n # span representations that we generate from these indices, we\n # need them to be <= 0. This is only relevant in edge cases where\n # the number of spans we consider after the pruning stage is >= the\n # total number of spans, because in this case, it is possible we might\n # consider a masked span.\n span_starts = F.relu(span_starts.float()).long()\n span_ends = F.relu(span_ends.float()).long()\n\n # Shape: (batch_size, num_spans, embedding_size)\n span_embeddings = self._compute_span_representations(text_embeddings,\n text_mask,\n span_starts,\n span_ends)\n # Compute a score for whether each span is a mention,\n # making sure that masked spans have very low scores.\n # Shape: (batch_size, num_spans, 1)\n mention_scores = self._mention_scorer(self._mention_feedforward(span_embeddings))\n mention_scores += span_mask.log()\n\n # Prune based on mention scores.\n num_spans_to_keep = int(math.floor(self._spans_per_word * document_length))\n\n # Shape: (batch_size, num_spans_to_keep)\n # These are indices (with values between 0 and num_spans) into\n # the span_embeddings tensor.\n top_span_indices = self._prune_and_sort_spans(mention_scores, num_spans_to_keep)\n\n # Now that we've decided which spans are actually mentions the next\n # few steps are reformatting all of our variables to be in terms of\n # num_spans_to_keep instead of num_spans, so we don't waste computation\n # on spans that we've already discarded.\n\n # Shape: (batch_size * num_spans_to_keep)\n # torch.index_select only accepts 1D indices, but here\n # we need to select spans for each element in the batch.\n # This reformats the indices to take into account their\n # index into the batch. We precompute this here to make\n # the multiple calls to util.batched_index_select below more efficient.\n flat_top_span_indices = util.flatten_and_batch_shift_indices(top_span_indices, num_spans)\n\n # Select the span embeddings corresponding to the\n # top spans based on the mention scorer.\n # Shape: (batch_size, num_spans_to_keep, embedding_size)\n top_span_embeddings = util.batched_index_select(span_embeddings,\n top_span_indices,\n flat_top_span_indices)\n # Shape: (batch_size, num_spans_to_keep, 1)\n # TODO(Mark): If we parameterised the mention scorer to score things in (0, inf)\n # I think we could get rid of the need for this mask entirely.\n top_span_mask = util.batched_index_select(span_mask,\n top_span_indices,\n flat_top_span_indices)\n top_span_mention_scores = util.batched_index_select(mention_scores,\n top_span_indices,\n flat_top_span_indices)\n top_span_starts = util.batched_index_select(span_starts,\n top_span_indices,\n flat_top_span_indices)\n top_span_ends = util.batched_index_select(span_ends,\n top_span_indices,\n flat_top_span_indices)\n\n # Compute indices for antecedent spans to consider.\n max_antecedents = min(self._max_antecedents, num_spans_to_keep)\n\n # Now that we have our variables in terms of num_spans_to_keep, we need to\n # compare span pairs to decide each span's antecedent. Each span can only\n # have prior spans as antecedents, and we only consider up to max_antecedents\n # prior spans. So the first thing we do is construct a matrix mapping a span's\n # index to the indices of its allowed antecedents. Note that this is independent\n # of the batch dimension - it's just a function of the span's position in\n # top_spans. The spans are in document order, so we can just use the relative\n # index of the spans to know which other spans are allowed antecedents.\n\n # Once we have this matrix, we reformat our variables again to get embeddings\n # for all valid antecedents for each span. This gives us variables with shapes\n # like (batch_size, num_spans_to_keep, max_antecedents, embedding_size), which\n # we can use to make coreference decisions between valid span pairs.\n\n # Shapes:\n # (num_spans_to_keep, max_antecedents),\n # (1, max_antecedents),\n # (1, num_spans_to_keep, max_antecedents)\n valid_antecedent_indices, valid_antecedent_offsets, valid_antecedent_log_mask = \\\n self._generate_valid_antecedents(num_spans_to_keep, max_antecedents, text_mask.is_cuda)\n # Select tensors relating to the antecedent spans.\n # Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)\n candidate_antecedent_embeddings = util.flattened_index_select(top_span_embeddings,\n valid_antecedent_indices)\n\n # Shape: (batch_size, num_spans_to_keep, max_antecedents)\n candidate_antecedent_mention_scores = util.flattened_index_select(top_span_mention_scores,\n valid_antecedent_indices).squeeze(-1)\n # Compute antecedent scores.\n # Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)\n span_pair_embeddings = self._compute_span_pair_embeddings(top_span_embeddings,\n candidate_antecedent_embeddings,\n valid_antecedent_offsets)\n\n # Shape: (batch_size, num_spans_to_keep, 1 + max_antecedents)\n coreference_scores = self._compute_coreference_scores(span_pair_embeddings,\n top_span_mention_scores,\n candidate_antecedent_mention_scores,\n valid_antecedent_log_mask)\n # Compute final predictions.\n # Shape: (batch_size, num_spans_to_keep, 2)\n top_spans = torch.cat([top_span_starts, top_span_ends], -1)\n\n # We now have, for each span which survived the pruning stage,\n # a predicted antecedent. This implies a clustering if we group\n # mentions which refer to each other in a chain.\n # Shape: (batch_size, num_spans_to_keep)\n _, predicted_antecedents = coreference_scores.max(2)\n # Subtract one here because index 0 is the \"no antecedent\" class,\n # so this makes the indices line up with actual spans if the prediction\n # is greater than -1.\n predicted_antecedents -= 1\n\n output_dict = {\"top_spans\": top_spans,\n \"antecedent_indices\": valid_antecedent_indices,\n \"predicted_antecedents\": predicted_antecedents}\n if span_labels is not None:\n # Find the gold labels for the spans which we kept.\n pruned_gold_labels = util.batched_index_select(span_labels.unsqueeze(-1),\n top_span_indices,\n flat_top_span_indices)\n\n antecedent_labels = util.flattened_index_select(pruned_gold_labels,\n valid_antecedent_indices).squeeze(-1)\n antecedent_labels += valid_antecedent_log_mask.long()\n\n # Compute labels.\n # Shape: (batch_size, num_spans_to_keep, max_antecedents + 1)\n gold_antecedent_labels = self._compute_antecedent_gold_labels(pruned_gold_labels,\n antecedent_labels)\n # Now, compute the loss using the negative marginal log-likelihood.\n # This is equal to the log of the sum of the probabilities of all antecedent predictions\n # that would be consistent with the data, in the sense that we are minimising, for a\n # given span, the negative marginal log likelihood of all antecedents which are in the\n # same gold cluster as the span we are currently considering. Each span i predicts a\n # single antecedent j, but there might be several prior mentions k in the same\n # coreference cluster that would be valid antecedents. Our loss is the sum of the\n # probability assigned to all valid antecedents. This is a valid objective for\n # clustering as we don't mind which antecedent is predicted, so long as they are in\n # the same coreference cluster.\n coreference_log_probs = util.last_dim_log_softmax(coreference_scores, top_span_mask)\n correct_antecedent_log_probs = coreference_log_probs + gold_antecedent_labels.log()\n negative_marginal_log_likelihood = -util.logsumexp(correct_antecedent_log_probs).sum()\n\n self._mention_recall(top_spans, metadata)\n self._conll_coref_scores(top_spans, valid_antecedent_indices, predicted_antecedents, metadata)\n\n output_dict[\"loss\"] = negative_marginal_log_likelihood\n return output_dict\n\n @overrides\n def decode(self, output_dict: Dict[str, torch.Tensor]):\n \"\"\"\n Converts the list of spans and predicted antecedent indices into clusters\n of spans for each element in the batch.\n\n Parameters\n ----------\n output_dict : ``Dict[str, torch.Tensor]``, required.\n The result of calling :func:`forward` on an instance or batch of instances.\n\n Returns\n -------\n The same output dictionary, but with an additional ``clusters`` key:\n\n clusters : ``List[List[List[Tuple[int, int]]]]``\n A nested list, representing, for each instance in the batch, the list of clusters,\n which are in turn comprised of a list of (start, end) inclusive spans into the\n original document.\n \"\"\"\n\n # A tensor of shape (batch_size, num_spans_to_keep, 2), representing\n # the start and end indices of each span.\n batch_top_spans = output_dict[\"top_spans\"].data.cpu()\n\n # A tensor of shape (batch_size, num_spans_to_keep) representing, for each span,\n # the index into ``antecedent_indices`` which specifies the antecedent span. Additionally,\n # the index can be -1, specifying that the span has no predicted antecedent.\n batch_predicted_antecedents = output_dict[\"predicted_antecedents\"].data.cpu()\n\n # A tensor of shape (num_spans_to_keep, max_antecedents), representing the indices\n # of the predicted antecedents with respect to the 2nd dimension of ``batch_top_spans``\n # for each antecedent we considered.\n antecedent_indices = output_dict[\"antecedent_indices\"].data.cpu()\n batch_clusters: List[List[List[Tuple[int, int]]]] = []\n\n # Calling zip() on two tensors results in an iterator over their\n # first dimension. This is iterating over instances in the batch.\n for top_spans, predicted_antecedents in zip(batch_top_spans, batch_predicted_antecedents):\n spans_to_cluster_ids: Dict[Tuple[int, int], int] = {}\n clusters: List[List[Tuple[int, int]]] = []\n\n for i, (span, predicted_antecedent) in enumerate(zip(top_spans, predicted_antecedents)):\n if predicted_antecedent < 0:\n # We don't care about spans which are\n # not co-referent with anything.\n continue\n\n # Find the right cluster to update with this span.\n # To do this, we find the row in ``antecedent_indices``\n # corresponding to this span we are considering.\n # The predicted antecedent is then an index into this list\n # of indices, denoting the span from ``top_spans`` which is the\n # most likely antecedent.\n predicted_index = antecedent_indices[i, predicted_antecedent]\n\n antecedent_span = (top_spans[predicted_index, 0],\n top_spans[predicted_index, 1])\n # Check if we've seen the span before.\n if antecedent_span in spans_to_cluster_ids.keys():\n predicted_cluster_id: int = spans_to_cluster_ids[antecedent_span]\n else:\n # We start a new cluster.\n predicted_cluster_id = len(clusters)\n # Append a new cluster containing only this span.\n clusters.append([antecedent_span])\n # Record the new id of this span.\n spans_to_cluster_ids[antecedent_span] = predicted_cluster_id\n\n # Now add the span we are currently considering.\n span_start, span_end = span\n clusters[predicted_cluster_id].append((span_start, span_end))\n spans_to_cluster_ids[(span_start, span_end)] = predicted_cluster_id\n batch_clusters.append(clusters)\n\n output_dict[\"clusters\"] = batch_clusters\n return output_dict\n\n @overrides\n def get_metrics(self, reset: bool = False) -> Dict[str, float]:\n mention_recall = self._mention_recall.get_metric(reset)\n coref_precision, coref_recall, coref_f1 = self._conll_coref_scores.get_metric(reset)\n\n return {\"coref_precision\": coref_precision,\n \"coref_recall\": coref_recall,\n \"coref_f1\": coref_f1,\n \"mention_recall\": mention_recall}\n\n def _create_attended_span_representations(self,\n head_scores: torch.FloatTensor,\n text_embeddings: torch.FloatTensor,\n span_ends: torch.IntTensor,\n span_widths: torch.IntTensor) -> torch.FloatTensor:\n \"\"\"\n Given a tensor of unnormalized attention scores for each word in the document, compute\n distributions over every span with respect to these scores by normalising the headedness\n scores for words inside the span.\n\n Given these headedness distributions over every span, weight the corresponding vector\n representations of the words in the span by this distribution, returning a weighted\n representation of each span.\n\n Parameters\n ----------\n head_scores : ``torch.FloatTensor``, required.\n Unnormalized headedness scores for every word. This score is shared for every\n candidate. The only way in which the headedness scores differ over different\n spans is in the set of words over which they are normalized.\n text_embeddings: ``torch.FloatTensor``, required.\n The embeddings with shape (batch_size, document_length, embedding_size)\n over which we are computing a weighted sum.\n span_ends: ``torch.IntTensor``, required.\n A tensor of shape (batch_size, num_spans, 1), representing the end indices\n of each span.\n span_widths : ``torch.IntTensor``, required.\n A tensor of shape (batch_size, num_spans, 1) representing the width of each\n span candidates.\n Returns\n -------\n attended_text_embeddings : ``torch.FloatTensor``\n A tensor of shape (batch_size, num_spans, embedding_dim) - the result of\n applying attention over all words within each candidate span.\n \"\"\"\n # Shape: (1, 1, max_span_width)\n max_span_range_indices = util.get_range_vector(self._max_span_width,\n text_embeddings.is_cuda).view(1, 1, -1)\n\n # Shape: (batch_size, num_spans, max_span_width)\n # This is a broadcasted comparison - for each span we are considering,\n # we are creating a range vector of size max_span_width, but masking values\n # which are greater than the actual length of the span.\n span_mask = (max_span_range_indices <= span_widths).float()\n raw_span_indices = span_ends - max_span_range_indices\n # We also don't want to include span indices which are less than zero,\n # which happens because some spans near the beginning of the document\n # are of a smaller width than max_span_width, so we add this to the mask here.\n span_mask = span_mask * (raw_span_indices >= 0).float()\n # Spans\n span_indices = F.relu(raw_span_indices.float()).long()\n\n # Shape: (batch_size * num_spans * max_span_width)\n flat_span_indices = util.flatten_and_batch_shift_indices(span_indices, text_embeddings.size(1))\n\n # Shape: (batch_size, num_spans, max_span_width, embedding_dim)\n span_text_embeddings = util.batched_index_select(text_embeddings, span_indices, flat_span_indices)\n\n # Shape: (batch_size, num_spans, max_span_width)\n span_head_scores = util.batched_index_select(head_scores, span_indices, flat_span_indices).squeeze(-1)\n\n # Shape: (batch_size, num_spans, max_span_width)\n span_head_weights = util.last_dim_softmax(span_head_scores, span_mask)\n\n # Do a weighted sum of the embedded spans with\n # respect to the normalised head score distributions.\n # Shape: (batch_size, num_spans, embedding_dim)\n attended_text_embeddings = util.weighted_sum(span_text_embeddings, span_head_weights)\n\n return attended_text_embeddings\n\n def _compute_span_representations(self,\n text_embeddings: torch.FloatTensor,\n text_mask: torch.FloatTensor,\n span_starts: torch.IntTensor,\n span_ends: torch.IntTensor) -> torch.FloatTensor:\n \"\"\"\n Computes an embedded representation of every candidate span. This is a concatenation\n of the contextualized endpoints of the span, an embedded representation of the width of\n the span and a representation of the span's predicted head.\n\n Parameters\n ----------\n text_embeddings : ``torch.FloatTensor``, required.\n The embedded document of shape (batch_size, document_length, embedding_dim)\n over which we are computing a weighted sum.\n text_mask : ``torch.FloatTensor``, required.\n A mask of shape (batch_size, document_length) representing non-padding entries of\n ``text_embeddings``.\n span_starts : ``torch.IntTensor``, required.\n A tensor of shape (batch_size, num_spans) representing the start of each span candidate.\n span_ends : ``torch.IntTensor``, required.\n A tensor of shape (batch_size, num_spans) representing the end of each span candidate.\n Returns\n -------\n span_embeddings : ``torch.FloatTensor``\n An embedded representation of every candidate span with shape:\n (batch_size, num_spans, context_layer.get_output_dim() * 2 + embedding_size + feature_size)\n \"\"\"\n # Shape: (batch_size, document_length, encoding_dim)\n contextualized_embeddings = self._context_layer(text_embeddings, text_mask)\n\n # Shape: (batch_size, num_spans, encoding_dim)\n start_embeddings = util.batched_index_select(contextualized_embeddings, span_starts.squeeze(-1))\n end_embeddings = util.batched_index_select(contextualized_embeddings, span_ends.squeeze(-1))\n\n # Compute and embed the span_widths (strictly speaking the span_widths - 1)\n # Shape: (batch_size, num_spans, 1)\n span_widths = span_ends - span_starts\n # Shape: (batch_size, num_spans, encoding_dim)\n span_width_embeddings = self._span_width_embedding(span_widths.squeeze(-1))\n\n # Shape: (batch_size, document_length, 1)\n head_scores = self._head_scorer(contextualized_embeddings)\n\n # Shape: (batch_size, num_spans, embedding_dim)\n # Note that we used the original text embeddings, not the contextual ones here.\n attended_text_embeddings = self._create_attended_span_representations(head_scores,\n text_embeddings,\n span_ends,\n span_widths)\n # (batch_size, num_spans, context_layer.get_output_dim() * 2 + embedding_dim + feature_dim)\n span_embeddings = torch.cat([start_embeddings,\n end_embeddings,\n span_width_embeddings,\n attended_text_embeddings], -1)\n return span_embeddings\n\n @staticmethod\n def _prune_and_sort_spans(mention_scores: torch.FloatTensor,\n num_spans_to_keep: int) -> torch.IntTensor:\n \"\"\"\n The indices of the top-k scoring spans according to span_scores. We return the\n indices in their original order, not ordered by score, so that we can rely on\n the ordering to consider the previous k spans as antecedents for each span later.\n\n Parameters\n ----------\n mention_scores : ``torch.FloatTensor``, required.\n The mention score for every candidate, with shape (batch_size, num_spans, 1).\n num_spans_to_keep : ``int``, required.\n The number of spans to keep when pruning.\n Returns\n -------\n top_span_indices : ``torch.IntTensor``, required.\n The indices of the top-k scoring spans. Has shape (batch_size, num_spans_to_keep).\n \"\"\"\n # Shape: (batch_size, num_spans_to_keep, 1)\n _, top_span_indices = mention_scores.topk(num_spans_to_keep, 1)\n top_span_indices, _ = torch.sort(top_span_indices, 1)\n\n # Shape: (batch_size, num_spans_to_keep)\n top_span_indices = top_span_indices.squeeze(-1)\n return top_span_indices\n\n @staticmethod\n def _generate_valid_antecedents(num_spans_to_keep: int,\n max_antecedents: int,\n is_cuda: bool) -> Tuple[torch.IntTensor,\n torch.IntTensor,\n torch.FloatTensor]:\n \"\"\"\n This method generates possible antecedents per span which survived the pruning\n stage. This procedure is `generic across the batch`. The reason this is the case is\n that each span in a batch can be coreferent with any previous span, but here we\n are computing the possible `indices` of these spans. So, regardless of the batch,\n the 1st span _cannot_ have any antecedents, because there are none to select from.\n Similarly, each element can only predict previous spans, so this returns a matrix\n of shape (num_spans_to_keep, max_antecedents), where the (i,j)-th index is equal to\n (i - 1) - j if j <= i, or zero otherwise.\n\n Parameters\n ----------\n num_spans_to_keep : ``int``, required.\n The number of spans that were kept while pruning.\n max_antecedents : ``int``, required.\n The maximum number of antecedent spans to consider for every span.\n is_cuda : ``bool``, required.\n Whether the computation is being done on the GPU or not.\n\n Returns\n -------\n valid_antecedent_indices : ``torch.IntTensor``\n The indices of every antecedent to consider with respect to the top k spans.\n Has shape ``(num_spans_to_keep, max_antecedents)``.\n valid_antecedent_offsets : ``torch.IntTensor``\n The distance between the span and each of its antecedents in terms of the number\n of considered spans (i.e not the word distance between the spans).\n Has shape ``(1, max_antecedents)``.\n valid_antecedent_log_mask : ``torch.FloatTensor``\n The logged mask representing whether each antecedent span is valid. Required since\n different spans have different numbers of valid antecedents. For example, the first\n span in the document should have no valid antecedents.\n Has shape ``(1, num_spans_to_keep, max_antecedents)``.\n \"\"\"\n # Shape: (num_spans_to_keep, 1)\n target_indices = util.get_range_vector(num_spans_to_keep, is_cuda).unsqueeze(1)\n\n # Shape: (1, max_antecedents)\n valid_antecedent_offsets = (util.get_range_vector(max_antecedents, is_cuda) + 1).unsqueeze(0)\n\n # This is a broadcasted subtraction.\n # Shape: (num_spans_to_keep, max_antecedents)\n raw_antecedent_indices = target_indices - valid_antecedent_offsets\n\n # In our matrix of indices, the upper triangular part will be negative\n # because the offsets will be > the target indices. We want to mask these,\n # because these are exactly the indices which we don't want to predict, per span.\n # We're generating a logspace mask here because we will eventually create a\n # distribution over these indices, so we need the 0 elements of the mask to be -inf\n # in order to not mess up the normalisation of the distribution.\n # Shape: (1, num_spans_to_keep, max_antecedents)\n valid_antecedent_log_mask = (raw_antecedent_indices >= 0).float().unsqueeze(0).log()\n\n # Shape: (num_spans_to_keep, max_antecedents)\n valid_antecedent_indices = F.relu(raw_antecedent_indices.float()).long()\n return valid_antecedent_indices, valid_antecedent_offsets, valid_antecedent_log_mask\n\n def _compute_span_pair_embeddings(self,\n top_span_embeddings: torch.FloatTensor,\n antecedent_embeddings: torch.FloatTensor,\n antecedent_offsets: torch.FloatTensor):\n \"\"\"\n Computes an embedding representation of pairs of spans for the pairwise scoring function\n to consider. This includes both the original span representations, the element-wise\n similarity of the span representations, and an embedding representation of the distance\n between the two spans.\n\n Parameters\n ----------\n top_span_embeddings : ``torch.FloatTensor``, required.\n Embedding representations of the top spans. Has shape\n (batch_size, num_spans_to_keep, embedding_size).\n antecedent_embeddings : ``torch.FloatTensor``, required.\n Embedding representations of the antecedent spans we are considering\n for each top span. Has shape\n (batch_size, num_spans_to_keep, max_antecedents, embedding_size).\n antecedent_offsets : ``torch.IntTensor``, required.\n The offsets between each top span and its antecedent spans in terms\n of spans we are considering. Has shape (1, max_antecedents).\n\n Returns\n -------\n span_pair_embeddings : ``torch.FloatTensor``\n Embedding representation of the pair of spans to consider. Has shape\n (batch_size, num_spans_to_keep, max_antecedents, embedding_size)\n \"\"\"\n # Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)\n target_embeddings = top_span_embeddings.unsqueeze(2).expand_as(antecedent_embeddings)\n\n # Shape: (1, max_antecedents, embedding_size)\n antecedent_distance_embeddings = self._distance_embedding(\n util.bucket_values(antecedent_offsets,\n num_total_buckets=self._num_distance_buckets))\n\n # Shape: (1, 1, max_antecedents, embedding_size)\n antecedent_distance_embeddings = antecedent_distance_embeddings.unsqueeze(0)\n\n expanded_distance_embeddings_shape = (antecedent_embeddings.size(0),\n antecedent_embeddings.size(1),\n antecedent_embeddings.size(2),\n antecedent_distance_embeddings.size(-1))\n # Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)\n antecedent_distance_embeddings = antecedent_distance_embeddings.expand(*expanded_distance_embeddings_shape)\n\n # Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)\n span_pair_embeddings = torch.cat([target_embeddings,\n antecedent_embeddings,\n antecedent_embeddings * target_embeddings,\n antecedent_distance_embeddings], -1)\n return span_pair_embeddings\n\n @staticmethod\n def _compute_antecedent_gold_labels(top_span_labels: torch.IntTensor,\n antecedent_labels: torch.IntTensor):\n \"\"\"\n Generates a binary indicator for every pair of spans. This label is one if and\n only if the pair of spans belong to the same cluster. The labels are augmented\n with a dummy antecedent at the zeroth position, which represents the prediction\n that a span does not have any antecedent.\n\n Parameters\n ----------\n top_span_labels : ``torch.IntTensor``, required.\n The cluster id label for every span. The id is arbitrary,\n as we just care about the clustering. Has shape (batch_size, num_spans_to_keep).\n antecedent_labels : ``torch.IntTensor``, required.\n The cluster id label for every antecedent span. The id is arbitrary,\n as we just care about the clustering. Has shape\n (batch_size, num_spans_to_keep, max_antecedents).\n\n Returns\n -------\n pairwise_labels_with_dummy_label : ``torch.FloatTensor``\n A binary tensor representing whether a given pair of spans belong to\n the same cluster in the gold clustering.\n Has shape (batch_size, num_spans_to_keep, max_antecedents + 1).\n\n \"\"\"\n # Shape: (batch_size, num_spans_to_keep, max_antecedents)\n target_labels = top_span_labels.expand_as(antecedent_labels)\n same_cluster_indicator = (target_labels == antecedent_labels).float()\n non_dummy_indicator = (target_labels >= 0).float()\n pairwise_labels = same_cluster_indicator * non_dummy_indicator\n\n # Shape: (batch_size, num_spans_to_keep, 1)\n dummy_labels = (1 - pairwise_labels).prod(-1, keepdim=True)\n\n # Shape: (batch_size, num_spans_to_keep, max_antecedents + 1)\n pairwise_labels_with_dummy_label = torch.cat([dummy_labels, pairwise_labels], -1)\n return pairwise_labels_with_dummy_label\n\n def _compute_coreference_scores(self,\n pairwise_embeddings: torch.FloatTensor,\n top_span_mention_scores: torch.FloatTensor,\n antecedent_mention_scores: torch.FloatTensor,\n antecedent_log_mask: torch.FloatTensor) -> torch.FloatTensor:\n \"\"\"\n Computes scores for every pair of spans. Additionally, a dummy label is included,\n representing the decision that the span is not coreferent with anything. For the dummy\n label, the score is always zero. For the true antecedent spans, the score consists of\n the pairwise antecedent score and the unary mention scores for the span and its\n antecedent. The factoring allows the model to blame many of the absent links on bad\n spans, enabling the pruning strategy used in the forward pass.\n\n Parameters\n ----------\n pairwise_embeddings: ``torch.FloatTensor``, required.\n Embedding representations of pairs of spans. Has shape\n (batch_size, num_spans_to_keep, max_antecedents, encoding_dim)\n top_span_mention_scores: ``torch.FloatTensor``, required.\n Mention scores for every span. Has shape\n (batch_size, num_spans_to_keep, max_antecedents).\n antecedent_mention_scores: ``torch.FloatTensor``, required.\n Mention scores for every antecedent. Has shape\n (batch_size, num_spans_to_keep, max_antecedents).\n antecedent_log_mask: ``torch.FloatTensor``, required.\n The log of the mask for valid antecedents.\n\n Returns\n -------\n coreference_scores: ``torch.FloatTensor``\n A tensor of shape (batch_size, num_spans_to_keep, max_antecedents + 1),\n representing the unormalised score for each (span, antecedent) pair\n we considered.\n\n \"\"\"\n # Shape: (batch_size, num_spans_to_keep, max_antecedents)\n antecedent_scores = self._antecedent_scorer(\n self._antecedent_feedforward(pairwise_embeddings)).squeeze(-1)\n antecedent_scores += top_span_mention_scores + antecedent_mention_scores\n antecedent_scores += antecedent_log_mask\n\n # Shape: (batch_size, num_spans_to_keep, 1)\n shape = [antecedent_scores.size(0), antecedent_scores.size(1), 1]\n dummy_scores = Variable(antecedent_scores.data.new(*shape).fill_(0), requires_grad=False)\n\n # Shape: (batch_size, num_spans_to_keep, max_antecedents + 1)\n coreference_scores = torch.cat([dummy_scores, antecedent_scores], -1)\n return coreference_scores\n\n @classmethod\n def from_params(cls, vocab: Vocabulary, params: Params) -> \"CoreferenceResolver\":\n embedder_params = params.pop(\"text_field_embedder\")\n text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)\n context_layer = Seq2SeqEncoder.from_params(params.pop(\"context_layer\"))\n mention_feedforward = FeedForward.from_params(params.pop(\"mention_feedforward\"))\n antecedent_feedforward = FeedForward.from_params(params.pop(\"antecedent_feedforward\"))\n\n feature_size = params.pop(\"feature_size\")\n max_span_width = params.pop(\"max_span_width\")\n spans_per_word = params.pop(\"spans_per_word\")\n max_antecedents = params.pop(\"max_antecedents\")\n lexical_dropout = params.pop(\"lexical_dropout\", 0.2)\n\n init_params = params.pop(\"initializer\", None)\n reg_params = params.pop(\"regularizer\", None)\n initializer = (InitializerApplicator.from_params(init_params)\n if init_params is not None\n else InitializerApplicator())\n regularizer = RegularizerApplicator.from_params(reg_params) if reg_params is not None else None\n\n params.assert_empty(cls.__name__)\n return cls(vocab=vocab,\n text_field_embedder=text_field_embedder,\n context_layer=context_layer,\n mention_feedforward=mention_feedforward,\n antecedent_feedforward=antecedent_feedforward,\n feature_size=feature_size,\n max_span_width=max_span_width,\n spans_per_word=spans_per_word,\n max_antecedents=max_antecedents,\n lexical_dropout=lexical_dropout,\n initializer=initializer,\n regularizer=regularizer)\n" ]
[ [ "torch.cat", "torch.nn.Dropout", "torch.sort" ] ]
civic-jabber/data-ingest
[ "bf44c6041ad947547ceede535124c5db004d2f43" ]
[ "civic_jabber_ingest/cli.py" ]
[ "import click\nimport pandas as pd\n\n\nfrom civic_jabber_ingest.external_services.newspaper import load_news\nfrom civic_jabber_ingest.external_services.open_states import get_all_people\nfrom civic_jabber_ingest.regs.va import load_va_regulations\nfrom civic_jabber_ingest.utils.config import read_config\n\n\[email protected]()\ndef main():\n pass\n\n\[email protected](\"run-ingest\")\ndef run_ingest():\n \"\"\"Runs all of the ingest commands that are current implemented. Currently, this\n includes:\n\n 1. Regulations for VA\n \"\"\"\n print(\"Loading VA regs ...\")\n load_va_regulations()\n\n\nmain.add_command(run_ingest)\n\n\[email protected](\"ingest-news\")\[email protected](\"--start\")\[email protected](\"--end\")\ndef ingest_news(start, end):\n \"\"\"Ingests news for states in the specified range. States are in alphabetical\n order.\"\"\"\n states = list(read_config(\"states\").keys())\n states = [state.lower() for state in states]\n load_news(states)\n\n\nmain.add_command(ingest_news)\n\n\[email protected](\"people-to-csv\")\[email protected](\"--state\")\[email protected](\"--outfile\")\ndef people_to_csv(state, outfile):\n \"\"\"Finds a list of legislators for a state and saves the results as a CSV file.\"\"\"\n people = get_all_people(state, per_page=25, links=True)\n\n data = {\"name\": [], \"party\": [], \"role\": [], \"district\": [], \"link\": []}\n for person in people:\n data[\"name\"].append(person[\"name\"])\n data[\"party\"].append(person[\"party\"])\n data[\"role\"].append(person[\"current_role\"][\"title\"])\n data[\"district\"].append(person[\"current_role\"][\"district\"])\n\n if person[\"links\"]:\n data[\"link\"].append(person[\"links\"][0][\"url\"])\n else:\n data[\"link\"].append(None)\n\n people_data = pd.DataFrame(data)\n people_data.to_csv(outfile, index=False)\n\n\nmain.add_command(people_to_csv)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "pandas.DataFrame" ] ]
beaulian/fpn.pytorch
[ "49fe36711cff71e26d9bef838613577dabd02336" ]
[ "lib/datasets/pascal_voc.py" ]
[ "# --------------------------------------------------------\n# Fast R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick\n# --------------------------------------------------------\n\nimport xml.dom.minidom as minidom\n\nimport os\n# import PIL\nimport numpy as np\nimport scipy.sparse\nimport subprocess\nimport six.moves.cPickle as cPickle\nimport math\nimport glob\nimport uuid\nimport scipy.io as sio\nimport xml.etree.ElementTree as ET\n\nfrom .imdb import imdb\nfrom .imdb import ROOT_DIR\nfrom datasets import ds_utils\nfrom .voc_eval import voc_eval\n\n# TODO: make fast_rcnn irrelevant\n# >>>> obsolete, because it depends on sth outside of this project\nfrom model.utils.config import cfg\n\n\n# <<<< obsolete\n\n\nclass pascal_voc(imdb):\n def __init__(self, image_set, year, devkit_path=None):\n imdb.__init__(self, 'voc_' + year + '_' + image_set)\n self._year = year\n self._image_set = image_set\n self._devkit_path = self._get_default_path() if devkit_path is None \\\n else devkit_path\n self._data_path = os.path.join(self._devkit_path, 'VOC' + self._year)\n self._classes = ('__background__', # always index 0\n 'aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat', 'chair',\n 'cow', 'diningtable', 'dog', 'horse',\n 'motorbike', 'person', 'pottedplant',\n 'sheep', 'sofa', 'train', 'tvmonitor')\n self._class_to_ind = dict(zip(self.classes, range(self.num_classes)))\n self._image_ext = '.jpg'\n self._image_index = self._load_image_set_index()\n # Default to roidb handler\n # self._roidb_handler = self.selective_search_roidb\n self._roidb_handler = self.gt_roidb\n self._salt = str(uuid.uuid4())\n self._comp_id = 'comp4'\n\n # PASCAL specific config options\n self.config = {'cleanup': True,\n 'use_salt': True,\n 'use_diff': False,\n 'matlab_eval': False,\n 'rpn_file': None,\n 'min_size': 2}\n\n assert os.path.exists(self._devkit_path), \\\n 'VOCdevkit path does not exist: {}'.format(self._devkit_path)\n assert os.path.exists(self._data_path), \\\n 'Path does not exist: {}'.format(self._data_path)\n\n def image_path_at(self, i):\n \"\"\"\n Return the absolute path to image i in the image sequence.\n \"\"\"\n return self.image_path_from_index(self._image_index[i])\n\n def image_id_at(self, i):\n \"\"\"\n Return the absolute path to image i in the image sequence.\n \"\"\"\n return i\n\n def image_path_from_index(self, index):\n \"\"\"\n Construct an image path from the image's \"index\" identifier.\n \"\"\"\n image_path = os.path.join(self._data_path, 'JPEGImages',\n index + self._image_ext)\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path\n\n def _load_image_set_index(self):\n \"\"\"\n Load the indexes listed in this dataset's image set file.\n \"\"\"\n # Example path to image set file:\n # self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt\n image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main',\n self._image_set + '.txt')\n assert os.path.exists(image_set_file), \\\n 'Path does not exist: {}'.format(image_set_file)\n with open(image_set_file) as f:\n image_index = [x.strip() for x in f.readlines()]\n return image_index\n\n def _get_default_path(self):\n \"\"\"\n Return the default path where PASCAL VOC is expected to be installed.\n \"\"\"\n return os.path.join(cfg.DATA_DIR, 'VOCdevkit' + self._year)\n\n def gt_roidb(self):\n \"\"\"\n Return the database of ground-truth regions of interest.\n\n This function loads/saves from/to a cache file to speed up future calls.\n \"\"\"\n cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as fid:\n roidb = cPickle.load(fid)\n print('{} gt roidb loaded from {}'.format(self.name, cache_file))\n return roidb\n\n gt_roidb = [self._load_pascal_annotation(index)\n for index in self.image_index]\n with open(cache_file, 'wb') as fid:\n cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)\n print('wrote gt roidb to {}'.format(cache_file))\n\n return gt_roidb\n\n def selective_search_roidb(self):\n \"\"\"\n Return the database of selective search regions of interest.\n Ground-truth ROIs are also included.\n\n This function loads/saves from/to a cache file to speed up future calls.\n \"\"\"\n cache_file = os.path.join(self.cache_path,\n self.name + '_selective_search_roidb.pkl')\n\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as fid:\n roidb = cPickle.load(fid)\n print('{} ss roidb loaded from {}'.format(self.name, cache_file))\n return roidb\n\n if int(self._year) == 2007 or self._image_set != 'test':\n gt_roidb = self.gt_roidb()\n ss_roidb = self._load_selective_search_roidb(gt_roidb)\n roidb = imdb.merge_roidbs(gt_roidb, ss_roidb)\n else:\n roidb = self._load_selective_search_roidb(None)\n with open(cache_file, 'wb') as fid:\n cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)\n print('wrote ss roidb to {}'.format(cache_file))\n\n return roidb\n\n def rpn_roidb(self):\n if int(self._year) == 2007 or self._image_set != 'test':\n gt_roidb = self.gt_roidb()\n rpn_roidb = self._load_rpn_roidb(gt_roidb)\n roidb = imdb.merge_roidbs(gt_roidb, rpn_roidb)\n else:\n roidb = self._load_rpn_roidb(None)\n\n return roidb\n\n def _load_rpn_roidb(self, gt_roidb):\n filename = self.config['rpn_file']\n print('loading {}'.format(filename))\n assert os.path.exists(filename), \\\n 'rpn data not found at: {}'.format(filename)\n with open(filename, 'rb') as f:\n box_list = cPickle.load(f)\n return self.create_roidb_from_box_list(box_list, gt_roidb)\n\n def _load_selective_search_roidb(self, gt_roidb):\n filename = os.path.abspath(os.path.join(cfg.DATA_DIR,\n 'selective_search_data',\n self.name + '.mat'))\n assert os.path.exists(filename), \\\n 'Selective search data not found at: {}'.format(filename)\n raw_data = sio.loadmat(filename)['boxes'].ravel()\n\n box_list = []\n for i in range(raw_data.shape[0]):\n boxes = raw_data[i][:, (1, 0, 3, 2)] - 1\n keep = ds_utils.unique_boxes(boxes)\n boxes = boxes[keep, :]\n keep = ds_utils.filter_small_boxes(boxes, self.config['min_size'])\n boxes = boxes[keep, :]\n box_list.append(boxes)\n\n return self.create_roidb_from_box_list(box_list, gt_roidb)\n\n def _load_pascal_annotation(self, index):\n \"\"\"\n Load image and bounding boxes info from XML file in the PASCAL VOC\n format.\n \"\"\"\n filename = os.path.join(self._data_path, 'Annotations', index + '.xml')\n tree = ET.parse(filename)\n objs = tree.findall('object')\n # if not self.config['use_diff']:\n # # Exclude the samples labeled as difficult\n # non_diff_objs = [\n # obj for obj in objs if int(obj.find('difficult').text) == 0]\n # # if len(non_diff_objs) != len(objs):\n # # print 'Removed {} difficult objects'.format(\n # # len(objs) - len(non_diff_objs))\n # objs = non_diff_objs\n num_objs = len(objs)\n\n boxes = np.zeros((num_objs, 4), dtype=np.uint16)\n gt_classes = np.zeros((num_objs), dtype=np.int32)\n overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)\n # \"Seg\" area for pascal is just the box area\n seg_areas = np.zeros((num_objs), dtype=np.float32)\n ishards = np.zeros((num_objs), dtype=np.int32)\n\n # Load object bounding boxes into a data frame.\n for ix, obj in enumerate(objs):\n bbox = obj.find('bndbox')\n # Make pixel indexes 0-based\n x1 = float(bbox.find('xmin').text) - 1\n y1 = float(bbox.find('ymin').text) - 1\n x2 = float(bbox.find('xmax').text) - 1\n y2 = float(bbox.find('ymax').text) - 1\n\n diffc = obj.find('difficult')\n difficult = 0 if diffc == None else int(diffc.text)\n ishards[ix] = difficult\n\n cls = self._class_to_ind[obj.find('name').text.lower().strip()]\n boxes[ix, :] = [x1, y1, x2, y2]\n gt_classes[ix] = cls\n overlaps[ix, cls] = 1.0\n seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)\n\n overlaps = scipy.sparse.csr_matrix(overlaps)\n\n return {'boxes': boxes,\n 'gt_classes': gt_classes,\n 'gt_ishard': ishards,\n 'gt_overlaps': overlaps,\n 'flipped': False,\n 'seg_areas': seg_areas}\n\n def _get_comp_id(self):\n comp_id = (self._comp_id + '_' + self._salt if self.config['use_salt']\n else self._comp_id)\n return comp_id\n\n def _get_voc_results_file_template(self):\n # VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt\n filename = self._get_comp_id() + '_det_' + self._image_set + '_{:s}.txt'\n filedir = os.path.join(self._devkit_path, 'results', 'VOC' + self._year, 'Main')\n if not os.path.exists(filedir):\n os.makedirs(filedir)\n path = os.path.join(filedir, filename)\n return path\n\n def _write_voc_results_file(self, all_boxes):\n for cls_ind, cls in enumerate(self.classes):\n if cls == '__background__':\n continue\n print('Writing {} VOC results file'.format(cls))\n filename = self._get_voc_results_file_template().format(cls)\n with open(filename, 'wt') as f:\n for im_ind, index in enumerate(self.image_index):\n dets = all_boxes[cls_ind][im_ind]\n if dets == []:\n continue\n # the VOCdevkit expects 1-based indices\n for k in range(dets.shape[0]):\n f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\\n'.\n format(index, dets[k, -1],\n dets[k, 0] + 1, dets[k, 1] + 1,\n dets[k, 2] + 1, dets[k, 3] + 1))\n\n def _do_python_eval(self, output_dir='output'):\n annopath = os.path.join(\n self._devkit_path,\n 'VOC' + self._year,\n 'Annotations',\n '{:s}.xml')\n imagesetfile = os.path.join(\n self._devkit_path,\n 'VOC' + self._year,\n 'ImageSets',\n 'Main',\n self._image_set + '.txt')\n cachedir = os.path.join(self._devkit_path, 'annotations_cache')\n aps = []\n # The PASCAL VOC metric changed in 2010\n use_07_metric = True if int(self._year) < 2010 else False\n print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))\n if not os.path.isdir(output_dir):\n os.mkdir(output_dir)\n for i, cls in enumerate(self._classes):\n if cls == '__background__':\n continue\n filename = self._get_voc_results_file_template().format(cls)\n rec, prec, ap = voc_eval(\n filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,\n use_07_metric=use_07_metric)\n aps += [ap]\n print('AP for {} = {:.4f}'.format(cls, ap))\n with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:\n cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)\n print('Mean AP = {:.4f}'.format(np.mean(aps)))\n print('~~~~~~~~')\n print('Results:')\n for ap in aps:\n print('{:.3f}'.format(ap))\n print('{:.3f}'.format(np.mean(aps)))\n print('~~~~~~~~')\n print('')\n print('--------------------------------------------------------------')\n print('Results computed with the **unofficial** Python eval code.')\n print('Results should be very close to the official MATLAB eval code.')\n print('Recompute with `./tools/reval.py --matlab ...` for your paper.')\n print('-- Thanks, The Management')\n print('--------------------------------------------------------------')\n\n def _do_matlab_eval(self, output_dir='output'):\n print('-----------------------------------------------------')\n print('Computing results with the official MATLAB eval code.')\n print('-----------------------------------------------------')\n path = os.path.join(cfg.ROOT_DIR, 'lib', 'datasets',\n 'VOCdevkit-matlab-wrapper')\n cmd = 'cd {} && '.format(path)\n cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB)\n cmd += '-r \"dbstop if error; '\n cmd += 'voc_eval(\\'{:s}\\',\\'{:s}\\',\\'{:s}\\',\\'{:s}\\'); quit;\"' \\\n .format(self._devkit_path, self._get_comp_id(),\n self._image_set, output_dir)\n print('Running:\\n{}'.format(cmd))\n status = subprocess.call(cmd, shell=True)\n\n def evaluate_detections(self, all_boxes, output_dir):\n self._write_voc_results_file(all_boxes)\n self._do_python_eval(output_dir)\n if self.config['matlab_eval']:\n self._do_matlab_eval(output_dir)\n if self.config['cleanup']:\n for cls in self._classes:\n if cls == '__background__':\n continue\n filename = self._get_voc_results_file_template().format(cls)\n os.remove(filename)\n\n def competition_mode(self, on):\n if on:\n self.config['use_salt'] = False\n self.config['cleanup'] = False\n else:\n self.config['use_salt'] = True\n self.config['cleanup'] = True\n\n\nif __name__ == '__main__':\n d = pascal_voc('trainval', '2007')\n res = d.roidb\n from IPython import embed;\n\n embed()\n" ]
[ [ "numpy.mean", "scipy.io.loadmat", "numpy.zeros" ] ]
supunab/Lantern
[ "f453de532da638c1f467953b32bbe49a3dedfa45" ]
[ "src/out/NIPS18evaluation/evaluationRNN/min-char-rnn-pytorch.py" ]
[ "\"\"\"\nMinimal character-level Vanilla RNN model. Written by Andrej Karpathy (@karpathy)\nBSD License\n\"\"\"\nimport numpy as np\nimport time\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\n\ndef run(write_to):\n\n torch.set_num_threads(1)\n\n start = time.time()\n data = open('graham.txt', 'r').read() # should be simple plain text file\n chars = list(set(data))\n data_size, vocab_size = len(data), len(chars)\n print('data has %d characters, %d unique.' % (data_size, vocab_size))\n char_to_ix = { ch:i for i,ch in enumerate(chars) }\n ix_to_char = { i:ch for i,ch in enumerate(chars) }\n\n # hyper-parameters\n hidden_size = 50 # size of hidden layer of neurons\n seq_length = 20 # number of steps to unroll the RNN for\n batch_size = 20\n learning_rate = 1e-1\n n_iter = 5000\n iter_step = 100\n\n torch.manual_seed(1)\n\n def lineToTensor(line):\n tensor = torch.zeros(seq_length, batch_size, vocab_size)\n for i in range(seq_length):\n for j in range(batch_size):\n tensor[i][j][char_to_ix[line[j * seq_length + i]]] = 1\n return tensor\n\n def lineToLongTensor(line):\n tensor = torch.LongTensor(seq_length, batch_size).zero_()\n for i in range(seq_length):\n for j in range(batch_size):\n tensor[i][j] = char_to_ix[line[j * seq_length + i]]\n return tensor\n\n class RNN(nn.Module):\n def __init__(self, input_size, hidden_size, output_size):\n super(RNN, self).__init__()\n\n self.hidden_size = hidden_size\n self.i2h = nn.Linear(input_size + hidden_size, hidden_size)\n self.i2o = nn.Linear(hidden_size, output_size)\n\n def forward(self, input, hidden):\n combined = torch.cat((input, hidden), 1)\n hidden = F.tanh(self.i2h(combined))\n output = self.i2o(hidden)\n return output, hidden\n\n def initHidden(self):\n return Variable(torch.zeros(batch_size, self.hidden_size))\n\n rnn = RNN(vocab_size, hidden_size, vocab_size)\n optimizer = torch.optim.Adagrad(rnn.parameters(), lr = learning_rate)\n criterion = nn.CrossEntropyLoss()\n\n def train(output_tensor, input_tensor):\n hidden = rnn.initHidden()\n\n optimizer.zero_grad()\n\n loss = 0\n\n for i in range(input_tensor.size()[0]):\n output, hidden = rnn(input_tensor[i], hidden)\n loss += criterion(output, output_tensor[i])\n\n loss.backward()\n\n # grad clipping and stepping\n torch.nn.utils.clip_grad_norm(rnn.parameters(), 5.0, norm_type=1)\n optimizer.step()\n\n return loss.data[0]\n\n end = time.time()\n prepareTime = end-start\n\n loss_save = []\n p = -seq_length * batch_size\n start = time.time()\n for iter in range(n_iter + 1):\n p += seq_length * batch_size\n if p+seq_length * batch_size+1 >= len(data): p = 0\n\n inputs = Variable(lineToTensor(data[p:p+seq_length * batch_size]))\n targets = Variable(lineToLongTensor(data[p+1:p+seq_length * batch_size +1]))\n loss = train(targets, inputs)\n if iter % iter_step == 0:\n print('iter %d, loss: %f' % (iter, loss))\n loss_save.append(loss)\n\n end = time.time()\n loopTime = end -start\n\n with open(write_to, \"w\") as f:\n f.write(\"unit: \" + \"100 iteration\\n\")\n for loss in loss_save:\n f.write(\"{}\\n\".format(loss))\n f.write(\"run time: \" + str(prepareTime) + \" \" + str(loopTime) + \"\\n\")\n\nif __name__ == '__main__':\n import sys\n if (len(sys.argv) != 2):\n print(\"should have a file to write results to\")\n exit(0)\n run(sys.argv[1])\n" ]
[ [ "torch.nn.Linear", "torch.manual_seed", "torch.nn.CrossEntropyLoss", "torch.set_num_threads", "torch.zeros", "torch.LongTensor", "torch.cat" ] ]
mcflugen/terrainbento
[ "1b756477b8a8ab6a8f1275b1b30ec84855c840ea" ]
[ "terrainbento/derived_models/model_440_basicChSa/model_440_basicChSa.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nmodel_440_basicChSa.py: erosion model using depth-dependent cubic diffusion\nwith a soil layer, basic stream power, and discharge proportional to drainage\narea.\n\nModel 440 BasicChSa\n\nLandlab components used: FlowRouter, DepressionFinderAndRouter,\n FastscapeStreamPower, DepthDependentCubicDiffuser,\n ExponentialWeatherer\n\n@author: gtucker\n@author: Katherine Barnhart\n\"\"\"\n\nfrom terrainbento.base_class import ErosionModel\nfrom landlab.components import (FlowAccumulator, DepressionFinderAndRouter,\n FastscapeEroder, DepthDependentTaylorDiffuser,\n ExponentialWeatherer)\nimport numpy as np\n\n\nclass BasicChSa(ErosionModel):\n \"\"\"\n A BasicChSa model computes erosion using depth-dependent cubic diffusion\n with a soil layer, basic stream power, and Q~A.\n \"\"\"\n\n def __init__(self, input_file=None, params=None,\n BaselevelHandlerClass=None):\n \"\"\"Initialize the BasicChSa model.\"\"\"\n\n # Call ErosionModel's init\n super(BasicChSa, self).__init__(input_file=input_file,\n params=params,\n BaselevelHandlerClass=BaselevelHandlerClass)\n\n self.K_sp = self.get_parameter_from_exponent('K_sp')\n linear_diffusivity = (self._length_factor**2.)*self.get_parameter_from_exponent('linear_diffusivity') # has units length^2/time\n try:\n initial_soil_thickness = (self._length_factor)*self.params['initial_soil_thickness'] # has units length\n except KeyError:\n initial_soil_thickness = 1.0 # default value\n soil_transport_decay_depth = (self._length_factor)*self.params['soil_transport_decay_depth'] # has units length\n max_soil_production_rate = (self._length_factor)*self.params['max_soil_production_rate'] # has units length per time\n soil_production_decay_depth = (self._length_factor)*self.params['soil_production_decay_depth'] # has units length\n\n # Create soil thickness (a.k.a. depth) field\n if 'soil__depth' in self.grid.at_node:\n soil_thickness = self.grid.at_node['soil__depth']\n else:\n soil_thickness = self.grid.add_zeros('node', 'soil__depth')\n\n # Create bedrock elevation field\n if 'bedrock__elevation' in self.grid.at_node:\n bedrock_elev = self.grid.at_node['bedrock__elevation']\n else:\n bedrock_elev = self.grid.add_zeros('node', 'bedrock__elevation')\n\n soil_thickness[:] = initial_soil_thickness\n bedrock_elev[:] = self.z - initial_soil_thickness\n\n # Instantiate a FlowAccumulator with DepressionFinderAndRouter using D8 method\n self.flow_router = FlowAccumulator(self.grid,\n flow_director='D8',\n depression_finder = DepressionFinderAndRouter)\n\n # Instantiate a FastscapeEroder component\n self.eroder = FastscapeEroder(self.grid,\n K_sp=self.K_sp,\n m_sp=self.params['m_sp'],\n n_sp=self.params['n_sp'])\n\n # Instantiate a weathering component\n self.weatherer = ExponentialWeatherer(self.grid,\n max_soil_production_rate=max_soil_production_rate,\n soil_production_decay_depth=soil_production_decay_depth)\n\n # Instantiate a soil-transport component\n self.diffuser = DepthDependentTaylorDiffuser(self.grid,\n linear_diffusivity=linear_diffusivity,\n slope_crit=self.params['slope_crit'],\n soil_transport_decay_depth=soil_transport_decay_depth,\n nterms=11)\n\n def run_one_step(self, dt):\n \"\"\"\n Advance model for one time-step of duration dt.\n \"\"\"\n\n # Route flow\n self.flow_router.run_one_step()\n\n # Get IDs of flooded nodes, if any\n flooded = np.where(self.flow_router.depression_finder.flood_status==3)[0]\n\n # Do some erosion (but not on the flooded nodes)\n # (if we're varying K through time, update that first)\n if self.opt_var_precip:\n self.eroder.K = (self.K_sp\n * self.pc.get_erodibility_adjustment_factor(self.model_time))\n\n self.eroder.run_one_step(dt, flooded_nodes=flooded)\n\n # We must also now erode the bedrock where relevant. If water erosion\n # into bedrock has occurred, the bedrock elevation will be higher than\n # the actual elevation, so we simply re-set bedrock elevation to the\n # lower of itself or the current elevation.\n b = self.grid.at_node['bedrock__elevation']\n b[:] = np.minimum(b, self.grid.at_node['topographic__elevation'])\n\n # Calculate regolith-production rate\n self.weatherer.calc_soil_prod_rate()\n\n # Do some soil creep\n self.diffuser.run_one_step(dt,\n dynamic_dt=True,\n if_unstable='raise',\n courant_factor=0.1)\n\n # calculate model time\n self.model_time += dt\n\n # Lower outlet\n self.update_outlet(dt)\n\n # Check walltime\n self.check_walltime()\n\ndef main():\n \"\"\"Executes model.\"\"\"\n import sys\n\n try:\n infile = sys.argv[1]\n except IndexError:\n print('Must include input file name on command line')\n sys.exit(1)\n\n cdsp = BasicChSa(input_file=infile)\n cdsp.run()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.where", "numpy.minimum" ] ]
jdvelasq/techminer2
[ "ad64a49402749755798a18417c38a7ad10e83bad" ]
[ "techminer2/co_occurrence_matrix_cluster_mds_map.py" ]
[ "\"\"\"\nCo-occurrence Matrix / Cluster MDS Map\n===============================================================================\n\n>>> from techminer2 import *\n>>> directory = \"data/\"\n>>> file_name = \"sphinx/images/co_occurrence_matrix_cluster_mds_map.png\"\n>>> co_occurrence_matrix_cluster_mds_map(\n... 'author_keywords',\n... min_occ=2, \n... directory=directory,\n... ).savefig(file_name)\n\n.. image:: images/co_occurrence_matrix_cluster_mds_map.png\n :width: 700px\n :align: center\n\n\n\"\"\"\nfrom sklearn.manifold import MDS\n\nfrom .co_occurrence_matrix import co_occurrence_matrix\nfrom .network import network\nfrom .network_map import network_map\n\n\ndef co_occurrence_matrix_cluster_mds_map(\n column,\n min_occ=2,\n max_occ=None,\n normalization=None,\n clustering_method=\"louvain\",\n directory=\"./\",\n color_scheme=\"clusters\",\n figsize=(7, 7),\n):\n\n coc_matrix = co_occurrence_matrix(\n column=column,\n min_occ=min_occ,\n max_occ=max_occ,\n normalization=normalization,\n directory=directory,\n )\n\n manifold_method = MDS(n_components=2)\n\n network_ = network(\n matrix=coc_matrix,\n clustering_method=clustering_method,\n manifold_method=manifold_method,\n )\n\n return network_map(\n network_,\n color_scheme=color_scheme,\n figsize=figsize,\n )\n" ]
[ [ "sklearn.manifold.MDS" ] ]
Telcrome/ai-trainer
[ "54bca3252e194c054bdd3af2b94d6dde940a2a86" ]
[ "trainer/ml/utils.py" ]
[ "from enum import Enum\nfrom typing import Generator, Tuple, Iterable, Dict, List\n\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nfrom scipy.ndimage import label, generate_binary_structure\nfrom scipy.ndimage.morphology import distance_transform_edt as dist_trans\n\nimport trainer.lib as lib\n\n\nclass ImageNormalizations(Enum):\n UnitRange = 1\n\n\ndef duplicate_columns(data, minoccur=2):\n ind = np.lexsort(data)\n diff = np.any(data.T[ind[1:]] != data.T[ind[:-1]], axis=1)\n edges = np.where(diff)[0] + 1\n result = np.split(ind, edges)\n result = [group for group in result if len(group) >= minoccur]\n return result\n\n\ndef pad(small_arr: np.ndarray, size=(30, 30)) -> np.ndarray:\n # if small_arr.shape[0] < size[0] or small_arr.shape[1] < size[1]:\n size = max(small_arr.shape[0], size[0]), max(small_arr.shape[1], size[1])\n res = np.zeros(size, dtype=np.int32)\n res[:small_arr.shape[0], :small_arr.shape[1]] = small_arr\n return res\n # else:\n # return small_arr # There is no need for padding\n\n\ndef split_into_regions(arr: np.ndarray, mode=0) -> List[np.ndarray]:\n \"\"\"\n Splits an array into its coherent regions.\n\n :param mode: 0 for orthogonal connection, 1 for full connection\n :param arr: Numpy array with shape [W, H]\n :return: A list with length #NumberOfRegions of arrays with shape [W, H]\n \"\"\"\n res = []\n if mode == 0:\n rs, num_regions = label(arr)\n elif mode == 1:\n rs, num_regions = label(arr, structure=generate_binary_structure(2, 2))\n else:\n raise Exception(\"Please specify a valid Neighborhood mode for split_into_regions\")\n\n for i in range(1, num_regions + 1):\n res.append(rs == i)\n return res\n\n\ndef normalize_im(im: np.ndarray, norm_type=ImageNormalizations.UnitRange) -> np.ndarray:\n \"\"\"\n Currently just normalizes an image with pixel intensities in range [0, 255] to [-1, 1]\n :return: The normalized image\n \"\"\"\n if norm_type == ImageNormalizations.UnitRange:\n return (im.astype(np.float32) / 127.5) - 1\n else:\n raise Exception(\"Unknown Normalization type\")\n\n\ndef distance_transformed(mask: np.ndarray) -> np.ndarray:\n if mask.dtype != np.bool:\n mask = mask.astype(np.bool)\n return dist_trans(np.invert(mask).astype(np.float32))\n\n\ndef one_hot_to_cont(x: np.ndarray) -> np.ndarray:\n \"\"\"\n Convert a one hot encoded image into the same image with integer representations.\n\n :param x: np.ndarray with (C, W, H)\n :return: np.ndarray with (W, H)\n \"\"\"\n return np.argmax(x, axis=len(x.shape) - 3)\n\n\ndef cont_to_ont_hot(arr: np.ndarray, n_values=-1) -> np.ndarray:\n if n_values == -1:\n n_values = np.max(arr) + 1\n res = np.zeros((n_values,) + arr.shape)\n for v in np.unique(arr):\n res[v, :, :][arr == v] = 1\n return res\n\n\ndef reduce_by_attention(arr: np.ndarray, att: np.ndarray):\n \"\"\"\n Reduce an array by a field of attention, such that the result is a rectangle with the empty borders cropped.\n\n :param arr: Target array. The last two dimensions need to be of the same shape as the attention field\n :param att: field of attention\n :return: cropped array\n \"\"\"\n assert arr.shape[-2] == att.shape[0] and arr.shape[-1] == att.shape[1]\n ones = np.argwhere(att)\n lmost, rmost = np.min(ones[:, 0]), np.max(ones[:, 0]) + 1\n bmost, tmost = np.min(ones[:, 1]), np.max(ones[:, 1]) + 1\n grid_slice = [slice(None) for _ in range(len(arr.shape) - 2)]\n grid_slice.extend([slice(lmost, rmost), slice(bmost, tmost)])\n return arr[tuple(grid_slice)], att[lmost:rmost, bmost:tmost], (lmost, rmost, bmost, tmost)\n\n\ndef pair_augmentation(g: Iterable[Tuple[np.ndarray, np.ndarray]], aug_ls) -> Iterable[Tuple[np.ndarray, np.ndarray]]:\n import imgaug.augmenters as iaa\n seq = iaa.Sequential(aug_ls)\n for im, gt, frame_number in g:\n im_prep = im[frame_number] if im.shape[3] > 1 else im.squeeze()\n gt_prep = np.expand_dims(gt, len(gt.shape))\n images_aug = seq(images=[im_prep], segmentation_maps=[gt_prep])\n yield images_aug[0][0].astype(np.float32), images_aug[1][0][:, :, 0].astype(np.float32), frame_number\n\n\ndef insert_np_at(a1: np.ndarray, a2: np.ndarray, pos: Tuple[int, int], filter_arr=None) -> np.ndarray:\n assert len(a1.shape) == 2 and len(a2.shape) == 2\n if filter_arr is None:\n filter_arr = np.ones_like(a2).astype(np.bool)\n x, y = pos\n res = np.copy(a1)\n a1_x = slice(x, min(x + a2.shape[0], a1.shape[0]))\n a1_y = slice(y, min(y + a2.shape[1], a1.shape[1]))\n\n if x + a2.shape[0] <= a1.shape[0]:\n a2_x = slice(0, a2.shape[0])\n else:\n a2_x = slice(0, a1.shape[0] - (x + a2.shape[0]))\n\n if y + a2.shape[1] <= a1.shape[1]:\n a2_y = slice(0, a2.shape[1])\n else:\n a2_y = slice(0, a1.shape[1] - (y + a2.shape[1]))\n item_filter = filter_arr[(a2_x, a2_y)]\n assert res[(a1_x, a1_y)].shape == a2[(a2_x, a2_y)].shape\n res[(a1_x, a1_y)][item_filter] = a2[(a2_x, a2_y)][item_filter]\n return res\n\n\nif __name__ == '__main__':\n fit = insert_np_at(np.ones((10, 10)), np.ones((3, 3)) * 2, (2, 3))\n too_big1 = insert_np_at(np.ones((10, 10)), np.ones((3, 10)) * 2, (2, 3))\n too_big = insert_np_at(np.ones((10, 10)), np.ones((10, 10)) * 2, (2, 3))\n\n# def put_array(big_arr: np.ndarray, small_arr: np.ndarray, offset=(0, 0)) -> np.ndarray:\n# \"\"\"\n# Puts the small array into the big array. Ignores problems and does its best to fulfill the task\n# \"\"\"\n# b, t =\n# big_arr[]\n# big_arr = np.putmask(big_arr, )\n\n\n# if __name__ == '__main__':\n# # a = np.zeros((10, 10))\n# # b = np.random.random((4, 4))\n# # c = put_array(a, b)\n# # lib.logger.debug_var(c)\n" ]
[ [ "numpy.ones", "numpy.zeros", "numpy.argwhere", "scipy.ndimage.label", "numpy.invert", "numpy.any", "numpy.ones_like", "scipy.ndimage.generate_binary_structure", "numpy.copy", "numpy.lexsort", "numpy.max", "numpy.min", "numpy.where", "numpy.unique", "numpy.split" ] ]
sxontheway/BalanceFL
[ "43bb7539c932b7b6f7ad03f94a724452ae3855a3" ]
[ "IMU/fed.py" ]
[ "import copy\nimport time\nfrom collections import OrderedDict\n\nimport torch\nfrom data.dataloader import local_client_dataset, test_dataset\nfrom models.utils import *\nfrom utils.train_helper import validate_one_model\nfrom utils.sampling import *\n\n\nimport numpy as np\nfrom multiprocessing import Process\nimport time\n\n\ndef return_state_dict(network):\n \"\"\"\n save model to state_dict\n \"\"\"\n feat_model = {k: v.cpu() for k, v in network[\"feat_model\"].state_dict().items()}\n classifier = {k: v.cpu() for k, v in network[\"classifier\"].state_dict().items()}\n return {\"feat_model\": feat_model, \"classifier\": classifier}\n\n\ndef load_state_dict(network, state_dict):\n \"\"\"\n restore model from state_dict\n \"\"\"\n network[\"feat_model\"].load_state_dict(state_dict[\"feat_model\"])\n network[\"classifier\"].load_state_dict(state_dict[\"classifier\"])\n\n # for name, param in state_dict[\"feat_model\"].items():\n # print(name, \"\\t\", param.size())\n return network\n\n\ndef check_status(status_list, selected_idx, target_status):\n \"\"\"\n 0. original status (1st FL round)\n 1. server finished sending: server_network --> mp_list\n 2. client received, and returned the model: mp_list --> networks[i] --> local_update --> mp_list\n 3. server received: mp_list --> networks[i]\n --> 1. aggregation finished. networks[i] --> aggregate --> server_network --> mp_list, the status change to 1\n ---\n Return True: when all clients meet conditions, else False\n \"\"\"\n tmp = np.array(status_list)\n if (tmp[selected_idx] == target_status).all() == True:\n return True\n else:\n return False\n\n\ndef set_status(status_list, selected_idx, target_status):\n \"\"\"\n see function: check_status\n \"\"\"\n if type(selected_idx) is int:\n selected_idx = [selected_idx]\n for i in selected_idx:\n status_list[i] = target_status\n # print(f\"set_status {target_status}\")\n\n\ndef difference_models_norm_2(model_1, model_2):\n \"\"\"\n Return the norm 2 difference between the two model parameters. Used in FedProx. \n \"\"\"\n tensor_1_backbone = list(model_1[\"feat_model\"].parameters())\n tensor_1_classifier = list(model_1[\"classifier\"].parameters())\n tensor_2_backbone = list(model_2[\"feat_model\"].parameters())\n tensor_2_classifier = list(model_2[\"classifier\"].parameters())\n\n diff_list = [\n torch.sum((tensor_1_backbone[i] - tensor_2_backbone[i]) ** 2)\n for i in range(len(tensor_1_backbone))\n ]\n diff_list.extend(\n [\n torch.sum((tensor_1_classifier[i] - tensor_2_classifier[i]) ** 2)\n for i in range(len(tensor_1_classifier))\n ]\n )\n\n norm = sum(diff_list)\n return norm\n\n\nclass Fed_server(Process):\n \"\"\"\n Class for client updating and model aggregation\n \"\"\"\n\n def __init__(\n self,\n init_network,\n criterion,\n config,\n per_client_data,\n per_client_label,\n idx_per_client_train,\n test_data,\n test_label,\n state_list=None,\n state_dict_list=None,\n idx=None,\n ):\n\n super(Fed_server, self).__init__()\n\n self.local_bs = config[\"fl_opt\"][\"local_bs\"]\n self.local_ep = config[\"fl_opt\"][\"local_ep\"]\n self.num_clients = config[\"fl_opt\"][\"num_clients\"]\n self.criterion = criterion\n self.networks, self.optimizers, self.optimizers_stage2, self.schedulers = (\n [],\n [],\n [],\n [],\n )\n self.train_loaders = [] # include dataloader or pre-loaded dataset\n self.train_loader_balanced = [] # balanced-sampling dataloader\n self.local_num_per_cls = [] # list to store local data number per class\n self.test_loaders = []\n self.status_list = state_list\n self.state_dict_list = state_dict_list\n self.client_idx = idx # physical idx of clients (hardcoded)\n\n self.config = config\n self.prefetch = False\n self.feat_aug = config[\"fl_opt\"][\"feat_aug\"]\n self.crt = config[\"fl_opt\"][\"crt\"]\n\n self.client_weights = np.array([i for i in idx_per_client_train])\n self.client_weights = self.client_weights / self.client_weights.sum()\n\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n self.server_network = copy.deepcopy(init_network)\n self.server_network[\"feat_model\"].to(self.device)\n self.server_network[\"classifier\"].to(self.device)\n\n # per-client accuracy and loss\n self.acc = [0 for i in range(self.num_clients)]\n self.losses_cls = [-1 for i in range(self.num_clients)]\n self.losses_kd = [-1 for i in range(self.num_clients)]\n\n print(f'=====> {config[\"metainfo\"][\"optimizer\"]}, Server (fed.py)\\n ')\n\n ######## init backbone, classifier, optimizer and dataloader ########\n for client_i in range(self.num_clients):\n\n backbone = copy.deepcopy(self.server_network[\"feat_model\"])\n classifier = copy.deepcopy(self.server_network[\"classifier\"])\n self.networks.append({\"feat_model\": backbone, \"classifier\": classifier})\n\n \"\"\" Server does not need\n # list of optimizer_dict. One optimizer for one network\n self.optimizers.append(init_optimizers(self.networks[client_i], config)) \n optim_params_dict = {'params': self.networks[client_i][\"classifier\"].parameters(), 'lr': 0.001, 'momentum': 0.9, 'weight_decay': 0} \n self.optimizers_stage2.append(torch.optim.SGD([optim_params_dict],))\n\n # dataloader\n num_workers = 0\n local_dataset = \\\n local_client_dataset(per_client_data[client_i], per_client_label[client_i], config)\n self.train_loaders.append(\n torch.utils.data.DataLoader(\n local_dataset, batch_size=self.local_bs, shuffle=True, \n num_workers=num_workers, pin_memory=False)\n )\n self.train_loader_balanced.append(\n torch.utils.data.DataLoader(\n local_dataset, batch_size=self.local_bs, sampler=local_dataset.get_balanced_sampler(), \n num_workers=num_workers, pin_memory=False)\n )\n self.local_num_per_cls.append(local_dataset.class_sample_count)\n \"\"\"\n\n # centralized train dataset\n train_data_all, train_label_all = [], []\n for client_i in range(len(per_client_label)):\n train_data_all = train_data_all + per_client_data[client_i]\n train_label_all = train_label_all + per_client_label[client_i]\n self.train_dataset = local_client_dataset(\n train_data_all, train_label_all, config\n )\n self.test_dataset = test_dataset(test_data, test_label, config)\n\n def local_train(self, selected_idx):\n \"\"\"\n server-side code\n \"\"\"\n # self.server_network --> mp_list\n for i in selected_idx:\n self.state_dict_list[i] = return_state_dict(\n self.server_network\n ) # model transfer\n set_status(self.status_list, selected_idx, 1)\n\n # wait until all clients returning the model\n while check_status(self.status_list, selected_idx, 2) is False:\n time.sleep(0.1)\n\n # mp_list --> self.networks (copys of client models on the server). Prepare for aggregation.\n for i in selected_idx:\n load_state_dict(self.networks[i], self.state_dict_list[i]) # model transfer\n print(\"===> Local training finished\")\n\n def aggregation(self, selected_idx, mode):\n \"\"\"\n server-side code: aggregation\n \"\"\"\n if mode in [\"fedavg\", \"fedavgm\", \"fedbn\", \"fedprox\"]:\n self.aggregate_layers(selected_idx, mode, backbone_only=False)\n elif mode == \"fedavg_fs\":\n opt = self.config[\"fl_opt\"]\n backbone_only, imprint, spread_out = (\n opt[\"backbone_only\"],\n opt[\"imprint\"],\n opt[\"spread_out\"],\n )\n self.aggregate_layers(selected_idx, \"fedavg\", backbone_only=backbone_only)\n if imprint:\n self.imprint(selected_idx)\n if spread_out:\n self.spread_out()\n\n # model: self.server_network --> mp_list\n for i in selected_idx:\n self.state_dict_list[i] = return_state_dict(\n self.server_network\n ) # model transfer\n set_status(self.status_list, selected_idx, 0) # back to original\n\n print(\"===> Aggregation finished\")\n\n def aggregate_layers(self, selected_idx, mode, backbone_only):\n \"\"\"\n backbone_only: choose to only aggregate backbone\n \"\"\"\n weights_sum = self.client_weights[selected_idx].sum()\n with torch.no_grad():\n if mode in [\"fedavg\", \"fedprox\"]:\n for net_name, net in self.server_network.items():\n if net_name == \"classifier\" and backbone_only:\n pass\n else:\n for key, layer in net.state_dict().items():\n if \"num_batches_tracked\" in key:\n # num_batches_tracked is a non trainable LongTensor\n # and num_batches_tracked are the same for\n # all clients for the given datasets\n layer.data.copy_(\n self.networks[0][net_name].state_dict()[key]\n )\n else:\n temp = torch.zeros_like(layer)\n # Fedavg\n for idx in selected_idx:\n weight = self.client_weights[idx] / weights_sum\n temp += (\n weight\n * self.networks[idx][net_name].state_dict()[key]\n )\n layer.data.copy_(temp)\n # update client models\n # for idx in selected_idx:\n # self.networks[idx][net_name].state_dict()[key].data.copy_(layer)\n\n elif mode == \"fedbn\": # https://openreview.net/pdf?id=6YEQUn0QICG\n for net_name, net in self.server_network.items():\n if net_name == \"classifier\" and backbone_only:\n pass\n else:\n for key, layer in net.state_dict().items():\n if \"bn\" not in key:\n temp = torch.zeros_like(layer)\n # Fedavg\n for idx in selected_idx:\n weight = self.client_weights[idx] / weights_sum\n temp += (\n weight\n * self.networks[idx][net_name].state_dict()[key]\n )\n layer.data.copy_(temp)\n # update client models\n # for idx in selected_idx:\n # self.networks[idx][net_name].state_dict()[key].data.copy_(layer)\n elif mode == \"fedavgm\":\n raise NotImplementedError\n\n def evaluate_global(self, train_dataset=None, test_dataset=None):\n \"\"\"\n Accuracy of the global model and all classes\n \"\"\"\n # evaluate on training set\n if train_dataset is None:\n train_dataset = self.train_dataset\n if test_dataset is None:\n test_dataset = self.test_dataset\n train_loss_per_cls, train_acc_per_cls = validate_one_model(\n self.server_network, train_dataset, self.device, per_cls_acc=True\n )\n\n # evaluate on test set: per-class loss/acc\n test_loss_per_cls, test_acc_per_cls = validate_one_model(\n self.server_network, test_dataset, self.device, per_cls_acc=True\n )\n print(\"===> Evaluation finished\\n\")\n\n return (\n train_loss_per_cls,\n train_acc_per_cls,\n test_loss_per_cls,\n test_acc_per_cls,\n )\n\n def evaluate_global_all(self, train_dataset=None, test_dataset=None):\n \"\"\"\n Accuracy of models of all nodes and all classes\n\n Return: all_results\n shape: (4, num_client, num_cls), 4 for (train_loss, train_acc, test_loss, test_acc)\n \"\"\"\n # evaluate on training set\n if train_dataset is None:\n train_dataset = self.train_dataset\n if test_dataset is None:\n test_dataset = self.test_dataset\n\n all_results = [None for i in range(self.num_clients)]\n for idx in range(self.num_clients):\n # evaluate on test set: per-class loss/acc\n train_loss_per_cls, train_acc_per_cls = validate_one_model(\n self.networks[idx], train_dataset, self.device, per_cls_acc=True\n )\n # evaluate on test set: per-class loss/acc\n test_loss_per_cls, test_acc_per_cls = validate_one_model(\n self.networks[idx], test_dataset, self.device, per_cls_acc=True\n )\n all_results[idx] = (\n train_loss_per_cls,\n train_acc_per_cls,\n test_loss_per_cls,\n test_acc_per_cls,\n )\n\n print(f\"===> Evaluation finished{idx}\\n\")\n\n all_results = np.array(all_results).transpose(1, 0, 2)\n return all_results\n\n\nclass Fed_client(Process):\n \"\"\"\n Class for client updating and model aggregation\n \"\"\"\n\n def __init__(\n self,\n init_network,\n criterion,\n config,\n per_client_data,\n per_client_label,\n idx_per_client_train,\n test_data,\n test_label,\n state_list=None,\n state_dict_list=None,\n idx=None,\n ):\n\n super(Fed_client, self).__init__()\n\n self.local_bs = config[\"fl_opt\"][\"local_bs\"]\n self.local_ep = config[\"fl_opt\"][\"local_ep\"]\n self.num_clients = config[\"fl_opt\"][\"num_clients\"]\n self.criterion = criterion\n self.networks, self.optimizers, self.optimizers_stage2, self.schedulers = (\n [],\n [],\n [],\n [],\n )\n self.train_loaders = [] # include dataloader or pre-loaded dataset\n self.train_loader_balanced = [] # balanced-sampling dataloader\n self.local_num_per_cls = [] # list to store local data number per class\n self.test_loaders = []\n self.status_list = state_list\n self.state_dict_list = state_dict_list\n self.client_idx = idx # physical idx of clients (hardcoded)\n\n self.config = config\n self.device = config[\"device_client\"][idx]\n self.server_network = copy.deepcopy(init_network)\n self.balanced_loader = config[\"fl_opt\"][\"balanced_loader\"]\n\n self.prefetch = False\n self.feat_aug = config[\"fl_opt\"][\"feat_aug\"]\n self.crt = config[\"fl_opt\"][\"crt\"]\n\n if config[\"fl_opt\"][\"aggregation\"] == \"fedprox\":\n self.fedprox = True\n else:\n self.fedprox = False\n self.mu = 0.05\n\n self.client_weights = np.array([i for i in idx_per_client_train])\n self.client_weights = self.client_weights / self.client_weights.sum()\n\n # per-client accuracy and loss\n self.acc = [0 for i in range(self.num_clients)]\n self.losses_cls = [-1 for i in range(self.num_clients)]\n self.losses_kd = [-1 for i in range(self.num_clients)]\n\n print(f'=====> {config[\"metainfo\"][\"optimizer\"]}, Client {idx} (fed.py)\\n ')\n\n ######## init backbone, classifier, optimizer and dataloader ########\n for client_i in range(self.num_clients):\n # list of network and optimizer_dict. One optimizer for one network.\n if client_i != self.client_idx:\n self.networks.append(None)\n self.optimizers.append(None)\n self.optimizers_stage2.append(None)\n else:\n backbone = copy.deepcopy(self.server_network[\"feat_model\"])\n classifier = copy.deepcopy(self.server_network[\"classifier\"])\n self.networks.append({\"feat_model\": backbone, \"classifier\": classifier})\n self.optimizers.append(init_optimizers(self.networks[client_i], config))\n optim_params_dict = {\n \"params\": self.networks[client_i][\"classifier\"].parameters(),\n \"lr\": 0.001,\n \"momentum\": 0.9,\n \"weight_decay\": 0,\n }\n self.optimizers_stage2.append(torch.optim.SGD([optim_params_dict],))\n\n # dataloader\n num_workers = 0\n local_dataset = local_client_dataset(\n per_client_data[client_i], per_client_label[client_i], config\n )\n self.train_loaders.append(\n torch.utils.data.DataLoader(\n local_dataset,\n batch_size=self.local_bs,\n shuffle=True,\n num_workers=num_workers,\n pin_memory=False,\n )\n )\n self.train_loader_balanced.append(\n torch.utils.data.DataLoader(\n local_dataset,\n batch_size=self.local_bs,\n sampler=local_dataset.get_balanced_sampler(),\n num_workers=num_workers,\n pin_memory=False,\n )\n )\n self.local_num_per_cls.append(local_dataset.class_sample_count)\n\n \"\"\" clients do not need\n # centralized train dataset\n train_data_all, train_label_all = [], []\n for client_i in range(len(per_client_label)):\n train_data_all = train_data_all + per_client_data[client_i]\n train_label_all = train_label_all + per_client_label[client_i]\n self.train_dataset = local_client_dataset(train_data_all, train_label_all, config)\n self.test_dataset = test_dataset(test_data, test_label, config)\n \"\"\"\n\n def run(self):\n \"\"\"\n client-side code\n \"\"\"\n self.server_network[\"feat_model\"].to(self.device)\n self.server_network[\"classifier\"].to(self.device)\n self.networks[self.client_idx][\"feat_model\"].to(self.device)\n self.networks[self.client_idx][\"classifier\"].to(self.device)\n\n while 1:\n while check_status(self.status_list, self.client_idx, 1) is False:\n time.sleep(0.1)\n\n # model: mp_list --> server_network\n load_state_dict(\n self.server_network, self.state_dict_list[self.client_idx]\n ) # model transfer\n self.train_lt(self.client_idx) # local model updating\n\n # self.networks[i] --> mp_list\n self.state_dict_list[self.client_idx] = return_state_dict(\n self.networks[self.client_idx]\n ) # model transfer\n set_status(self.status_list, self.client_idx, 2)\n\n def train_lt(self, idx):\n \"\"\"\n client-side code\n ---\n Argus:\n - idx: the index in all clients (e.g., 50) or selected clients (e.g., 10).\n If self.prefetch is true: the index in selected clients,\n If self.prefetch is true: the index in all clients\n \"\"\"\n idx_in_all = idx\n\n # server broadcast the model to clients\n \"\"\"\n # optimizer will not work if use this, because optimizer needs the params from the model\n # self.networks[idx_in_all] = copy.deepcopy(self.server_network) \n \"\"\"\n for net_name, net in self.server_network.items(): # feat_model, classifier\n state_dict = self.networks[idx_in_all][net_name].state_dict()\n for key, layer in net.state_dict().items():\n state_dict[key].data.copy_(layer.data)\n\n for net in self.networks[idx_in_all].values():\n net.train()\n for net in self.server_network.values():\n net.train()\n teacher = self.server_network\n\n # torch.cuda.empty_cache()\n\n \"\"\"\n (Per-cls) Covariance Calculation\n \"\"\"\n if self.feat_aug:\n # probability for augmentation for every class\n max_num = max(self.local_num_per_cls[idx])\n prob = torch.tensor(\n [1.0 - i / max_num for i in self.local_num_per_cls[idx]]\n )\n\n # obtain features and labels under eval mode\n feat_list, label_list = [], []\n\n # self.networks[idx_in_all]['feat_model'].eval()\n\n for (imgs, labels, indexs) in self.train_loaders[idx]:\n with torch.no_grad():\n imgs = imgs.to(self.device)\n feat_list.append(teacher[\"feat_model\"](imgs).cpu())\n label_list.append(labels)\n feat_list = torch.cat(feat_list, 0)\n\n # self.networks[idx_in_all]['feat_model'].train()\n\n label_list = torch.cat(label_list, 0)\n unique_labels = list(np.unique(label_list)) # e.g., size (6, )\n transformed_label_list = torch.tensor(\n [unique_labels.index(i) for i in label_list]\n ) # e.g., size (n, )\n\n # per-cls features\n feats_per_cls = [[] for i in range(len(unique_labels))]\n for feats, label in zip(feat_list, transformed_label_list):\n feats_per_cls[label].append(feats)\n\n # calculate the variance\n sampled_data, sample_label = [], []\n per_cls_cov = []\n for feats in feats_per_cls:\n if len(feats) > 1:\n per_cls_cov.append(np.cov(torch.stack(feats, 1).numpy()))\n else:\n per_cls_cov.append(np.zeros((feats[0].shape[0], feats[0].shape[0])))\n per_cls_cov = np.array(per_cls_cov)\n # per_cls_cov = np.array([np.cov(torch.stack(feats, 1).numpy()) for feats in feats_per_cls])\n cov = np.average(\n per_cls_cov, axis=0, weights=self.local_num_per_cls[idx]\n ) # covariance for feature dimension, shape: e.g., (128, 128)\n\n # pre-generate deviation\n divider = 500\n pointer = 0\n augs = (\n torch.from_numpy(\n np.random.multivariate_normal(\n mean=np.zeros(cov.shape[0]),\n cov=cov, # covariance for feature dimension, shape: e.g., (128, 128)\n size=divider,\n )\n )\n .float()\n .to(self.device)\n )\n\n with torch.set_grad_enabled(True):\n losses_cls = 0\n losses_kd = 0\n\n ##########################\n #### stage 1 training ####\n ##########################\n for epoch in range(self.local_ep):\n\n \"\"\"\n model update\n \"\"\"\n if self.local_ep > 10: # locla training mode\n print(epoch, end=\" \")\n\n if self.balanced_loader:\n tmp_loader = self.train_loader_balanced[idx]\n else:\n tmp_loader = self.train_loaders[idx]\n for (imgs, labels, indexs) in tmp_loader:\n # to device\n imgs = imgs.to(self.device)\n\n # forward\n feat = self.networks[idx_in_all][\"feat_model\"](imgs)\n logits = self.networks[idx_in_all][\"classifier\"](feat)\n\n # do feature space augmentation with a likelihood\n if self.feat_aug:\n # prob = torch.tensor([1.0 for i in self.local_num_per_cls[idx]])\n rand_list = torch.rand(len(labels))\n mask = (\n rand_list\n < prob[\n torch.tensor([unique_labels.index(i) for i in labels])\n ]\n )\n degree = 1\n aug_num = sum(mask).item()\n if aug_num > 0:\n if pointer + aug_num >= divider:\n pointer = 0\n feat_aug = feat.clone()\n feat_aug[mask] = (\n feat[mask] + augs[pointer : pointer + aug_num] * degree\n )\n logits_aug = self.networks[idx_in_all][\"classifier\"](\n feat_aug\n )\n pointer = pointer + aug_num\n\n # teacher\n with torch.no_grad():\n feat_teacher = teacher[\"feat_model\"](imgs)\n pred_teacher = teacher[\"classifier\"](feat_teacher)\n\n # loss\n labels = labels.to(self.device)\n if self.config[\"criterions\"][\"def_file\"].find(\"LwF\") > 0:\n if self.feat_aug:\n if len(labels) != len(logits_aug):\n continue\n loss, loss_cls, loss_kd = self.criterion(\n labels, pred_teacher, logits, logits_aug\n )\n else:\n loss, loss_cls, loss_kd = self.criterion(\n labels, pred_teacher, logits\n )\n elif self.config[\"criterions\"][\"def_file\"].find(\"KDLoss\") > 0:\n loss, loss_cls, loss_kd = self.criterion(\n logits,\n labels,\n feat,\n feat_teacher,\n classfier_weight=self.networks[idx_in_all][\n \"classifier\"\n ].fc.weight,\n )\n\n # fedprox loss: https://epione.gitlabpages.inria.fr/flhd/federated_learning/FedAvg_FedProx_MNIST_iid_and_noniid.html#federated-training-with-fedprox\n if self.fedprox:\n prox_loss = difference_models_norm_2(\n self.networks[idx_in_all], teacher\n )\n # print(\"FedProx Loss: \", prox_loss, loss)\n loss += self.mu / 2 * prox_loss\n\n # backward\n for optimizer in self.optimizers[idx_in_all].values():\n optimizer.zero_grad()\n loss.backward()\n for optimizer in self.optimizers[idx_in_all].values():\n optimizer.step()\n\n # classifier L2-norm\n if self.networks[idx_in_all][\"classifier\"].l2_norm:\n self.networks[idx_in_all][\"classifier\"].weight_norm()\n losses_cls += loss_cls.item()\n losses_kd += loss_kd.item()\n\n self.losses_cls[idx_in_all] = (\n losses_cls / len(self.train_loaders[idx]) / self.local_ep\n )\n self.losses_kd[idx_in_all] = (\n losses_kd / len(self.train_loaders[idx]) / self.local_ep\n )\n\n ##########################\n #### stage 2 training ####\n ##########################\n if self.crt:\n self.networks[idx_in_all][\"feat_model\"].eval()\n\n if self.feat_aug:\n # obtain features and labels\n feat_list = []\n label_list = []\n for (imgs, labels, indexs) in self.train_loaders[idx]:\n imgs = imgs.to(self.device)\n with torch.no_grad():\n feat_list.append(\n self.networks[idx_in_all][\"feat_model\"](imgs).cpu()\n )\n label_list.append(labels)\n feat_list = torch.cat(feat_list, 0)\n label_list = torch.cat(label_list, 0)\n unique_labels = list(np.unique(label_list)) # e.g., size (6, )\n transformed_label_list = torch.tensor(\n [unique_labels.index(i) for i in label_list]\n ) # e.g., size (n, )\n\n # per-cls features\n feats_per_cls = [[] for i in range(len(unique_labels))]\n for feat, label in zip(feat_list, transformed_label_list):\n feats_per_cls[label].append(feat)\n\n # determine the extra sample number for every existing samples\n num_per_cls = np.array(\n [len(np.where(label_list == t)[0]) for t in unique_labels]\n ) # e.g., size (6, )\n max_num = max(num_per_cls)\n gen_nums = [\n np.array(\n [max_num // num_per_cls[i] - 1 for _ in feats_per_cls[i]]\n )\n for i in range(len(unique_labels))\n ]\n for cls_i, nums in enumerate(gen_nums):\n nums[: max_num % num_per_cls[cls_i]] = (\n nums[: max_num % num_per_cls[cls_i]] + 1\n )\n\n # generate samples\n sampled_data, sample_label = [], []\n per_cls_cov = np.array(\n [\n np.cov(torch.stack(feats, 1).numpy())\n for feats in feats_per_cls\n ]\n )\n cov = np.average(per_cls_cov, axis=0, weights=num_per_cls)\n # print([np.mean(i) for i in per_cls_cov])\n for cls_i, nums in enumerate(gen_nums):\n for sample_i, num in enumerate(nums):\n if num > 0:\n sampled_data.append(\n torch.from_numpy(\n np.random.multivariate_normal(\n mean=feats_per_cls[cls_i][sample_i],\n cov=cov, # covariance for feature dimension, shape: e.g., (128, 128)\n size=num,\n )\n ).float()\n )\n sample_label.append(torch.full((num,), cls_i).long())\n\n # add generated fetaures to training data\n feat_list = torch.cat([feat_list, *sampled_data], 0)\n label_list = torch.cat([transformed_label_list, *sample_label], 0)\n\n # build new dataloader\n feats_dataset = local_client_dataset(\n feat_list, label_list, self.config\n )\n feats_loader = torch.utils.data.DataLoader(\n feats_dataset,\n batch_size=self.local_bs,\n shuffle=True,\n num_workers=0,\n pin_memory=False,\n )\n\n # train classifier\n for epoch in range(5):\n for (feats, labels, indexs) in feats_loader:\n feats = feats.to(self.device)\n labels = labels.to(self.device)\n logits = self.networks[idx_in_all][\"classifier\"](feats)\n loss = torch.nn.CrossEntropyLoss()(\n logits[:, unique_labels], labels\n )\n\n self.optimizers_stage2[idx_in_all].zero_grad()\n loss.backward()\n self.optimizers_stage2[idx_in_all].step()\n # print(loss)\n\n # re-sampling without feature augmentation\n else:\n for epoch in range(5):\n for (imgs, labels, indexs) in self.train_loader_balanced[idx]:\n # to device\n imgs = imgs.to(self.device)\n # forward\n with torch.no_grad():\n feat = self.networks[idx_in_all][\"feat_model\"](imgs)\n logits = self.networks[idx_in_all][\"classifier\"](feat)\n\n pos_cls = torch.unique(labels).tolist()\n transformed_labels = torch.tensor(\n [pos_cls.index(i) for i in labels]\n ).to(self.device)\n loss = torch.nn.CrossEntropyLoss()(\n logits[:, pos_cls], transformed_labels\n )\n\n self.optimizers_stage2[idx_in_all].zero_grad()\n loss.backward()\n self.optimizers_stage2[idx_in_all].step()\n # print(loss)\n\n print(\"=> \", end=\"\")\n\n\ndef fedavg(w):\n w_avg = copy.deepcopy(w[0])\n for k in w_avg.keys():\n for i in range(1, len(w)):\n w_avg[k] += w[i][k]\n w_avg[k] = torch.div(w_avg[k] * 1.0, len(w))\n return w_avg\n\n\n# See: https://arxiv.org/abs/1909.06335\ndef fedavgm(new_ws, old_w, vel, args):\n \"\"\"\n fedavg + momentum\n - new_ws (list of OrderedDict): The new calculated global model\n - old_w (OrderedDict) : Initial state of the global model (which needs to be updated here) \n \"\"\"\n global_lr = 1\n beta1 = 0\n\n new_w = fedavg(new_ws)\n\n # For the first round: initialize old_w, create an Orderdict to store velocity\n if old_w is None:\n old_w = new_w\n new_v = OrderedDict()\n for key in old_w.keys():\n new_v[key] = torch.zeros(old_w[key].shape, dtype=old_w[key].dtype).to(\n args.device\n )\n else:\n new_v = copy.deepcopy(vel)\n\n for key in new_w.keys():\n delta_w_tmp = old_w[key] - new_w[key]\n new_v[key] = beta1 * new_v[key] + torch.mul(delta_w_tmp, global_lr)\n old_w[key] -= new_v[key]\n\n return old_w, new_v\n\n\ndef fedavgw(new_ws, old_w, args, round_i):\n \"\"\"\n fedavg + adaptive updating parameter\n - new_ws (list of OrderedDict): The new calculated global model\n - old_w (OrderedDict) : Initial state of the global model (which needs to be updated here) \n \"\"\"\n\n new_w = fedavg(new_ws)\n\n # For the first round: initialize old_w\n if old_w is None:\n old_w = new_w\n\n for key in new_w.keys():\n old_w[key] = new_w[key] * (1 / (round_i + 1)) + old_w[key] * (\n round_i / (round_i + 1)\n )\n\n # for key in new_w.keys():\n # if key == \"classifier.fc.weight\":\n # old_w[key] = new_w[key]*(1/(round_i+1)) + old_w[key]*(round_i/(round_i+1))\n # else:\n # old_w[key] = new_w[key]\n\n return old_w\n" ]
[ [ "torch.utils.data.DataLoader", "torch.stack", "torch.no_grad", "numpy.random.multivariate_normal", "torch.cuda.is_available", "torch.cat", "torch.set_grad_enabled", "torch.unique", "numpy.where", "numpy.average", "numpy.unique", "torch.optim.SGD", "numpy.zeros", "torch.tensor", "numpy.array", "torch.sum", "torch.zeros_like", "torch.full", "torch.nn.CrossEntropyLoss", "torch.mul", "torch.zeros" ] ]
alphardex/looter
[ "2be094576e31fd13123719ca94e42cb31475dffa" ]
[ "examples/baidu_index.py" ]
[ "\"\"\"\n爬取百度指数的某一时间段内的特定关键词的所有指数\n\"\"\"\nimport time\nimport looter as lt\nimport requests\nimport pandas as pd\nimport arrow\nfrom loguru import logger\n\nwords = [] # 关键词列表\nstart_date = '2018-01-29'\nend_date = '2018-12-31'\nkinds = ['all', 'pc', 'wise']\ndomain = 'http://index.baidu.com'\nheaders = {\n 'Host':\n 'index.baidu.com',\n 'Connection':\n 'keep-alive',\n 'X-Requested-With':\n 'XMLHttpRequest',\n 'User-Agent':\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36',\n 'Cookie':\n 'BD_UPN=12314753; ORIGIN=2; ISSW=1; ISSW=1; BAIDUID=F0F664464891FF22022016FEED575109:FG=1; PSTM=1558524896; BIDUPSID=C9733DAACC84E56AF9FED0BDDAADA245; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; BDUSS=lZaZ3I2RzZnN2QtN3doRjlOcnpKMDRYOUJvVDFxVFl-WmFZODVwYTlKLW5MQ0JkSVFBQUFBJCQAAAAAAAAAAAEAAABBGFGnsOvU2MH39fwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKef-Fynn~hcQ; bdindexid=2cka9urn2rk1o4dmnsueadarc7; H_PS_PSSID=1468_21103_29237_28519_29098_29368_28832_29220; BD_HOME=1; BDRCVFR[feWj1Vr5u3D]=I67x6TjHwwYf0; delPer=0; BD_CK_SAM=1; PSINO=2; H_PS_645EC=22aaZNHp8tp6Pqs1f3AIplUyT%2F67VGrp%2B2iogcH66TNgP6TYyCWal3%2BTHPaWCW6LDeS3'\n}\ntotal = []\nname = f'popularity({start_date}-{end_date})'\nlogger.add(f'{name}.log')\n\n\ndef decrypt(key, data):\n m = list(key)\n v = data\n d = dict(zip(m[:len(m) // 2:], m[len(m) // 2::]))\n return ''.join(map(lambda x: d[x], v))\n\n\ndef crawl(word):\n try:\n url = f'{domain}/api/SearchApi/index'\n params = {\n 'word': word,\n 'startDate': arrow.get(start_date).naive,\n 'endDate': arrow.get(end_date).naive,\n 'area': 0\n }\n data = requests.get(url, params=params, headers=headers).json()\n uniqid = data['data']['uniqid']\n user_indexes = data['data']['userIndexes'][0]\n key = requests.get(f'{domain}/Interface/api/ptbk?uniqid={uniqid}', headers=headers).json()['data']\n encrypted_data = {kind: user_indexes[kind]['data'] for kind in kinds}\n decrypted_data = {kind: decrypt(key, d).split(',') for kind, d in encrypted_data.items()}\n date_range = pd.date_range(start_date, end_date).to_native_types()\n result = []\n for kind, indexes in decrypted_data.items():\n rows = [{\n 'kind': kind,\n 'date': date,\n 'index': index,\n 'keyword': word\n } for date, index in zip(date_range, indexes)]\n result.extend(rows)\n logger.info((rows[0], rows[-1]))\n total.extend(result)\n time.sleep(5)\n except Exception as e:\n logger.error(f'{word}抓取失败')\n\n\nif __name__ == '__main__':\n [crawl(word) for word in words]\n lt.save(total, name=f'{name}.csv')\n" ]
[ [ "pandas.date_range" ] ]
aiyasin/X2Paddle
[ "b37959f2ecdc09fdec7a38c01272126a7f3800e4" ]
[ "x2paddle/op_mapper/dygraph/caffe2paddle/caffe_op_mapper.py" ]
[ "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nimport numbers\nimport numpy as np\nfrom x2paddle.core.op_mapper import OpMapper\nfrom x2paddle.core.util import *\nfrom x2paddle.core.program import PaddleGraph \nfrom x2paddle.decoder.caffe_decoder import CaffeGraphNode\n\n\ndef _adjust_parameters(node):\n data = node.data\n # When using the protobuf-backend, each parameter initially has four dimensions.\n # In certain cases (like FC layers), we want to eliminate the singleton dimensions.\n # This implementation takes care of the common cases. However, it does leave the\n # potential for future issues.\n # The Caffe-backend does not suffer from this problem.\n data = list(data)\n\n squeeze_indices = [1] # Squeeze biases.\n if node.layer_type == 'InnerProduct':\n squeeze_indices.append(0) # Squeeze FC.\n\n for idx in squeeze_indices:\n if idx >= len(data):\n continue\n\n d = data[idx]\n assert len(\n d.shape\n ) == 4, 'invalid shape[%s] from caffe when adjust_parameters' % (\n str(d.shape))\n\n shape_old = d.shape\n sq_axis = None\n if idx == 0:\n sq_axis = (0, 1)\n elif idx == 1:\n sq_axis = (0, 1, 2)\n else:\n continue\n\n data[idx] = np.squeeze(d, axis=sq_axis)\n shape_new = data[idx].shape\n return data\n\ndef _get_kernel_parameters(kind, params):\n assert kind in [\"Convolution\", \"Pooling\", \"Deconvolution\", \"ConvolutionDepthwise\"]\n [k_h, k_w] = [1, 1]\n if isinstance(params.kernel_size, numbers.Number):\n [k_h, k_w] = [params.kernel_size] * 2\n elif len(params.kernel_size) > 0:\n k_h = params.kernel_h if params.kernel_h > 0 else params.kernel_size[\n 0]\n k_w = params.kernel_w if params.kernel_w > 0 else params.kernel_size[\n len(params.kernel_size) - 1]\n elif params.kernel_h > 0 or params.kernel_w > 0:\n k_h = params.kernel_h\n k_w = params.kernel_w\n [s_h, s_w] = [1, 1]\n if isinstance(params.stride, numbers.Number):\n [s_h, s_w] = [params.stride] * 2\n elif len(params.stride) > 0:\n s_h = params.stride_h if params.stride_h > 0 else params.stride[0]\n s_w = params.stride_w if params.stride_w > 0 else params.stride[len(\n params.stride) - 1]\n elif params.stride_h > 0 or params.stride_w > 0:\n s_h = params.stride_h\n s_w = params.stride_w\n [p_h, p_w] = [0, 0]\n if isinstance(params.pad, numbers.Number):\n [p_h, p_w] = [params.pad] * 2\n elif len(params.pad) > 0:\n p_h = params.pad_h if params.pad_h > 0 else params.pad[0]\n p_w = params.pad_w if params.pad_w > 0 else params.pad[len(\n params.pad) - 1]\n elif params.pad_h > 0 or params.pad_w > 0:\n p_h = params.pad_h\n p_w = params.pad_w\n dila_h = dila_w = 1\n group = 1\n c_o = 1\n if kind in [\"Convolution\", \"Deconvolution\", \"ConvolutionDepthwise\"]:\n if kind in [\"Convolution\", \"Deconvolution\"]:\n c_o = params.num_output\n dila_len = len(params.dilation)\n if dila_len == 2:\n dila_h = params.dilation[0]\n dila_w = params.dilation[1]\n elif dila_len == 1:\n dila_h = dila_w = params.dilation[0]\n else:\n assert dila_len == 0, \"invalid length[%s] of dilation in convolution\" % (\n dila_len)\n if kind in ['Convolution', 'Deconvolution']:\n group = params.group\n kernel = [k_h, k_w]\n stride = [s_h, s_w]\n pad = [p_h, p_w]\n dilation = [dila_h, dila_w]\n return c_o, kernel, stride, pad, dilation, group\n\n\nclass CaffeOpMapper(OpMapper):\n directly_map_ops = {\n 'Sigmoid': ['paddle.nn.layer.Sigmoid'],\n 'TanH': ['paddle.nn.Tanh'],\n }\n\n def __init__(self, decoder):\n super(CaffeOpMapper, self).__init__()\n self.graph = decoder.caffe_graph\n if not self.op_checker():\n raise Exception(\"Model is not supported yet.\")\n self.params = dict()\n self.paddle_graph = PaddleGraph(parent_layer=None, graph_type=\"dygraph\", source_type=\"caffe\")\n self.paddle_graph.outputs = self.graph.output_nodes\n self.input_index = 0 \n self.inputs_info = {}\n self.nn_name2id = {}\n print(\"Total nodes: {}\".format(\n sum([\n isinstance(node, CaffeGraphNode)\n for name, node in self.graph.node_map.items()\n ])))\n print(\"Nodes converting ...\")\n for i, node_name in enumerate(self.graph.topo_sort):\n sys.stderr.write(\"\\rConverting node {} ... \".format(i + 1))\n node = self.graph.get_node(node_name)\n op = node.layer_type\n if hasattr(self, op):\n func = getattr(self, op)\n func(node)\n elif op in self.directly_map_ops:\n self.directly_map(node)\n print(\"\\nNodes converted.\")\n self.paddle_graph.set_name(self.graph.graph_name)\n self.paddle_graph.set_parameters(self.params)\n self.paddle_graph.set_inputs_info(self.inputs_info)\n \n def op_checker(self):\n unsupported_ops = set()\n for node_name in self.graph.topo_sort:\n node = self.graph.get_node(node_name)\n op = node.layer_type\n if not hasattr(self, op) and op not in self.directly_map_ops:\n unsupported_ops.add(op)\n if len(unsupported_ops) == 0:\n return True\n else:\n if len(unsupported_ops) > 0:\n print(\"\\n========= {} OPs are not supported yet ===========\".format(\n len(unsupported_ops)))\n for op in unsupported_ops:\n print(\"========== {} ============\".format(op))\n return False\n \n def directly_map(self, node):\n inputs = node.layer.input\n assert len(inputs) == 1, 'directly_map error with multi inputs'\n op_info = self.directly_map_ops[node.layer_type]\n input = self.graph.get_input_node(node, 0)\n paddle_op = op_info[0]\n if paddle_op.startswith(\"paddle.nn\"):\n op_name = paddle_op[10:].lower()\n op_name = name_generator(op_name, self.nn_name2id)\n output_name = node.name\n layer_outputs = [op_name, output_name]\n self.paddle_graph.add_layer(\n kernel=paddle_op,\n inputs={\"x\": input.name},\n outputs=layer_outputs)\n else:\n self.paddle_graph.add_layer(\n kernel=paddle_op,\n inputs={\"x\": input.name},\n outputs=[node.name])\n\n def Input(self, node):\n self.paddle_graph.add_layer(\n \"paddle.to_tensor\",\n inputs={},\n outputs=[node.layer_name],\n data=\"x{}\".format(self.input_index))\n shape = list(node.layer.input_param.shape[0].dim)[1:]\n self.inputs_info[\"x{}\".format(self.input_index)] = [[-1] + shape, \"float32\"]\n self.input_index += 1\n \n def MemoryData(self, node):\n params = node.layer.memory_data_param\n transform_params = node.layer.transform_param\n self.paddle_graph.add_layer(\n \"paddle.to_tensor\",\n inputs={},\n outputs=[node.layer_name],\n data=\"x{}\".format(self.input_index))\n shape = list()\n shape.append(params.batch_size)\n shape.append(params.channels)\n if hasattr(transform_params, \"crop_size\"):\n shape.append(transform_params.crop_size)\n shape.append(transform_params.crop_size)\n else:\n shape.append(params.width)\n shape.append(params.height)\n self.inputs_info[\"x{}\".format(self.input_index)] = [shape, \"float32\"]\n self.input_index += 1\n\n def Convolution(self, node):\n conv2d_name = name_generator(\"conv\", self.nn_name2id)\n output_name = node.layer_name\n layer_outputs = [conv2d_name, output_name]\n data = node.data\n params = node.layer.convolution_param\n out_channel, kernel, stride, pad, dilation, group = _get_kernel_parameters(\n node.layer_type, params)\n if data is None:\n data = []\n print(\n \"The parameter of {} (type is {}) is not set. So we set the parameters as 0\"\n .format(node.layer_name, node.layer_type))\n data.append(\n np.zeros([out_channel, node.in_shapes[0][1], kernel[0], kernel[1]]).astype(\n 'float32'))\n data.append(np.zeros([out_channel, ]).astype('float32'))\n else:\n data = _adjust_parameters(node)\n self.params[conv2d_name + \".weight\"] = data[0]\n if len(data) == 2:\n self.params[conv2d_name + \".bias\"] = data[1]\n assert len(node.inputs\n ) == 1, \"The count of Convolution node\\'s input is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n layer_attrs = {\n \"in_channels\": node.in_shapes[0][1],\n \"out_channels\": out_channel,\n \"kernel_size\": kernel,\n \"stride\": stride,\n \"padding\": pad,\n \"dilation\": dilation,\n \"groups\": group\n }\n if len(data) == 1:\n layer_attrs[\"bias_attr\"] = False\n self.paddle_graph.add_layer(\n \"paddle.nn.Conv2D\",\n inputs={\"input\": input.name},\n outputs=layer_outputs,\n **layer_attrs)\n \n def DepthwiseConvolution(self, node):\n node.layer_type = \"ConvolutionDepthwise\"\n self.ConvolutionDepthwise(node)\n\n def Deconvolution(self, node):\n conv2d_name = name_generator(\"conv\", self.nn_name2id)\n output_name = node.layer_name\n layer_outputs = [conv2d_name, output_name]\n data = node.data\n params = node.layer.convolution_param\n out_channel, kernel, stride, pad, dilation, group = _get_kernel_parameters(\n node.layer_type, params)\n if data is None:\n data = []\n print(\n \"The parameter of {} (type is {}) is not set. So we set the parameters as 0\"\n .format(node.layer_name, node.layer_type))\n data.append(\n np.zeros([out_channel, node.in_shapes[0][1], kernel[0], kernel[1]]).astype(\n 'float32'))\n data.append(np.zeros([out_channel, ]).astype('float32'))\n else:\n data = _adjust_parameters(node)\n self.params[conv2d_name + \".weight\"] = data[0]\n if len(data) == 2:\n self.params[conv2d_name + \".bias\"] = data[1]\n assert len(node.inputs\n ) == 1, \"The count of Deconvolution node\\'s input is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n layer_attrs = {\n \"in_channels\": node.in_shapes[0][1],\n \"out_channels\": out_channel,\n \"kernel_size\": kernel,\n \"stride\": stride,\n \"padding\": pad,\n \"dilation\": dilation,\n \"groups\": group\n }\n if len(data) == 1:\n layer_attrs[\"bias_attr\"] = False\n self.paddle_graph.add_layer(\n \"paddle.nn.Conv2DTranspose\",\n inputs={\"input\": input.name},\n outputs=layer_outputs,\n **layer_attrs)\n \n def ConvolutionDepthwise(self, node):\n conv2d_name = name_generator(\"conv\", self.nn_name2id)\n output_name = node.layer_name\n layer_outputs = [conv2d_name, output_name]\n data = node.data\n params = node.layer.convolution_param\n out_channel, kernel, stride, pad, dilation, group = _get_kernel_parameters(\n node.layer_type, params)\n out_channel = params.num_output if params.num_output is not None else node.in_shapes[0][1]\n in_channel = node.in_shapes[0][1]\n group = int(in_channel / (in_channel / out_channel)) if in_channel > out_channel else int(in_channel /\n (out_channel / in_channel))\n if data is None:\n data = []\n print(\n \"The parameter of {} (type is {}) is not set. So we set the parameters as 0\"\n .format(node.layer_name, node.layer_type))\n data.append(\n np.zeros([out_channel, node.in_shapes[0][1], kernel[0], kernel[1]]).astype(\n 'float32'))\n data.append(np.zeros([out_channel, ]).astype('float32'))\n else:\n data = _adjust_parameters(node)\n self.params[conv2d_name + \".weight\"] = data[0]\n if len(data) == 2:\n self.params[conv2d_name + \".bias\"] = data[1]\n assert len(node.inputs\n ) == 1, \"The count of Deconvolution node\\'s input is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n layer_attrs = {\n \"in_channels\": in_channel,\n \"out_channels\": out_channel,\n \"kernel_size\": kernel,\n \"stride\": stride,\n \"padding\": pad,\n \"dilation\": dilation,\n \"groups\": group\n }\n if len(data) == 1:\n layer_attrs[\"bias_attr\"] = False\n self.paddle_graph.add_layer(\n \"paddle.nn.Conv2D\",\n inputs={\"input\": input.name},\n outputs=layer_outputs,\n **layer_attrs)\n\n def Pooling(self, node):\n pool2d_name = name_generator(\"pool\", self.nn_name2id)\n output_name = node.layer_name\n layer_outputs = [pool2d_name, output_name]\n params = node.layer.pooling_param\n ceil_mode = getattr(params, \"ceil_mode\", True)\n if not hasattr(params, 'ceil_mode'):\n ceil_mode = True if getattr(params, \"round_mode\", 0) == 0 else False\n global_pool = getattr(params, \"global_pooling\", False)\n kernel_default = [1, 1]\n channel, kernel, stride, pad, dilation, group = _get_kernel_parameters(\n node.layer_type, params)\n if params.pool == 0:\n pool_type = \"max\"\n else:\n pool_type = \"avg\"\n assert len(\n node.inputs) == 1, \"The count of Pooling node\\'s input is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n if global_pool:\n if kernel[0] == 0:\n kernel = [1, 1]\n if params.pool == 0:\n self.paddle_graph.add_layer(\n \"paddle.nn.AdaptiveMaxPool2D\",\n inputs={\"input\": input.name},\n outputs=layer_outputs,\n output_size=kernel)\n else:\n self.paddle_graph.add_layer(\n \"paddle.nn.AdaptiveAvgPool2D\",\n inputs={\"input\": input.name},\n outputs=layer_outputs,\n output_size=kernel)\n else:\n layer_attrs = {\n 'kernel_size': kernel,\n 'stride': stride,\n 'padding': pad,\n 'ceil_mode': ceil_mode,\n }\n if params.pool == 0:\n self.paddle_graph.add_layer(\n \"paddle.nn.MaxPool2D\",\n inputs={\"input\": input.name},\n outputs=layer_outputs,\n **layer_attrs)\n else:\n self.paddle_graph.add_layer(\n \"paddle.nn.AvgPool2D\",\n inputs={\"input\": input.name},\n outputs=layer_outputs,\n **layer_attrs)\n\n def LRN(self, node):\n lrn_name = name_generator(\"lrn\", self.nn_name2id)\n output_name = node.layer_name\n layer_outputs = [lrn_name, output_name]\n assert len(node.inputs) == 1, \"The count of LRN node\\'s input is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n params = node.layer.lrn_param\n assert params.local_size % 2 == 1\n alpha = params.alpha / float(params.local_size)\n layer_attrs = {\n \"n\": params.local_size,\n \"k\": params.k,\n \"alpha\": alpha,\n \"beta\": params.beta,\n }\n self.paddle_graph.add_layer(\n \"paddle.fluid.layers.lrn\", \n inputs={\"input\": input.name},\n outputs=[node.layer_name],\n **layer_attrs)\n\n\n def InnerProduct(self, node):\n linear_name = name_generator(\"linear\", self.nn_name2id)\n output_name = node.layer_name\n layer_outputs = [linear_name, output_name]\n data = node.data\n input = self.graph.get_input_node(node, idx=0, copy=True)\n params = node.layer.inner_product_param\n if data is None:\n print(\n \"The parameter of {} (type is {}) is not set. So we set the parameters as 0.\"\n .format(node.layer_name, node.layer_type))\n data = []\n data.append(\n np.zeros([node.in_shapes[0][1], params.num_output]).astype(\"float32\").astype(\n \"float32\"))\n data.append(\n np.zeros([params.num_output]).astype(\"float32\").astype(\"float32\"))\n else:\n data = _adjust_parameters(node)\n # Reshape the parameters to Paddle's ordering\n transpose_order = (1, 0)\n w = data[0]\n fc_shape = w.shape\n output_channels = fc_shape[0]\n w = w.reshape((output_channels, -1))\n w = w.transpose(transpose_order)\n data[0] = w\n\n self.params[linear_name + \".weight\"] = data[0]\n if len(data) == 2:\n self.params[linear_name + \".bias\"] = data[1]\n assert len(node.inputs\n ) == 1, \"The count of InnerProduct node\\'s input is not 1.\"\n assert params.axis == 1\n assert params.bias_term == True\n layer_attrs = {\n \"in_features\": data[0].shape[0],\n \"out_features\": params.num_output \n }\n if len(data) == 1:\n layer_attrs[\"bias\"] = False\n if node.in_shapes[0][-1] != data[0].shape[0]:\n self.paddle_graph.add_layer(\n \"paddle.reshape\",\n inputs={\"x\": input.name},\n outputs=[output_name],\n shape=[-1, data[0].shape[0]])\n self.paddle_graph.add_layer(\n \"paddle.nn.Linear\",\n inputs={\"input\": output_name},\n outputs=layer_outputs,\n **layer_attrs)\n else:\n self.paddle_graph.add_layer(\n \"paddle.nn.Linear\",\n inputs={\"input\": input.name},\n outputs=layer_outputs,\n **layer_attrs)\n \n def AbsVal(self, node):\n assert len(\n node.inputs\n ) >= 1, \"The count of AbsVal node\\'s input is not more than 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n self.paddle_graph.add_layer(\n \"paddle.abs\",\n inputs={\"input\": input.name},\n outputs=[node.layer_name])\n\n def Softmax(self, node):\n softmax_name = name_generator(\"softmax\", self.nn_name2id)\n output_name = node.layer_name\n layer_outputs = [softmax_name, output_name]\n assert len(\n node.inputs) == 1, \"The count of Softmax node\\'s input is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n params = node.layer.softmax_param\n axis = params.axis\n shape = node.in_shapes[0]\n dims = len(shape)\n axis = axis + dims if axis < 0 else axis\n layer_attrs = {'axis': axis}\n self.paddle_graph.add_layer(\n \"paddle.nn.Softmax\",\n inputs={\"input\": input.name},\n outputs=layer_outputs,\n **layer_attrs)\n\n def Slice(self, node):\n assert len(\n node.inputs) == 1, \"The count of Slice node\\'s input is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n top_len = len(node.layer.top)\n params = node.layer.slice_param\n axis = params.axis\n slice_dim = params.slice_dim\n if slice_dim != 1 and axis == 1:\n axis = slice_dim\n output_shape = node.out_shapes\n sections_list = list()\n outputs_list = list()\n for i, s in enumerate(output_shape):\n sections_list.append(s[axis])\n outputs_list.append(\"{}_p{}\".format(node.layer_name, i))\n layer_attrs = {\n 'num_or_sections': sections_list,\n 'axis': axis,\n }\n self.paddle_graph.add_layer(\n \"paddle.split\",\n inputs={\"x\": input.name},\n outputs=outputs_list,\n **layer_attrs)\n\n def Concat(self, node):\n assert len(\n node.inputs\n ) >= 1, \"The count of Concat node\\'s input is not more than 1.\"\n inputs_list = list()\n for i in range(len(node.inputs)):\n input = self.graph.get_input_node(node, idx=i, copy=True)\n inputs_list.append(input.name)\n params = node.layer.concat_param\n axis = params.axis\n layer_attrs = {'axis': axis}\n self.paddle_graph.add_layer(\n \"paddle.concat\",\n inputs={\"x\": inputs_list},\n outputs=[node.layer_name],\n **layer_attrs)\n\n def ReLU(self, node):\n relu_name = name_generator(\"relu\", self.nn_name2id)\n output_name = node.layer_name\n layer_outputs = [relu_name, output_name]\n assert len(\n node.inputs) == 1, \"The count of RelU node\\'s input is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n params = node.layer.relu_param\n if params.HasField('negative_slope') and params.negative_slope != 0:\n negative_slope = float(params.negative_slope)\n\n layer_attrs = {'negative_slope': negative_slope}\n self.paddle_graph.add_layer(\n \"paddle.nn.LeakyReLU\",\n inputs={\"input\": input.name},\n outputs=layer_outputs,\n **layer_attrs)\n else:\n self.paddle_graph.add_layer(\n \"paddle.nn.ReLU\",\n inputs={\"input\": input.name},\n outputs=layer_outputs)\n\n def PReLU(self, node):\n prelu_name = name_generator(\"prelu\", self.nn_name2id)\n output_name = node.layer_name\n layer_outputs = [prelu_name, output_name]\n assert len(\n node.inputs) == 1, \"The count of PReLU node\\'s input is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n params = node.layer.prelu_param\n mode_bool = params.channel_shared\n output_shape = node.out_shapes[0]\n if mode_bool:\n num_parameters = 1\n else:\n num_parameters = output_shape[1]\n data = node.data\n self.params[prelu_name + '._weight'] = np.squeeze(data[0])\n assert data is not None, \"The parameter of {} (type is {}) is not set. You need to use python package of caffe to set the default value.\".format(\n node.layer_name, node.layer_type)\n self.paddle_graph.add_layer(\n \"paddle.nn.PReLU\",\n inputs={\"input\": input.name},\n outputs=layer_outputs,\n num_parameters=num_parameters)\n\n def Eltwise(self, node):\n assert len(\n node.inputs) == 2, \"The count of Eltwise node\\'s input is not 2.\"\n params = node.layer.eltwise_param\n mode = params.operation\n inputs = []\n input0 = self.graph.get_input_node(node, idx=0, copy=True)\n input1 = self.graph.get_input_node(node, idx=1, copy=True)\n input0_name = input0.name\n input1_name = input1.name\n if mode == 0:\n inputs_dict = {}\n inputs_dict['x'] = input0_name\n inputs_dict['y'] = input1_name\n self.paddle_graph.add_layer(\n \"paddle.multiply\",\n inputs=inputs_dict,\n outputs=[node.layer_name])\n elif mode == 1:\n if hasattr(params, 'coeff') and len(params.coeff) == 2:\n coeff = params.coeff\n self.paddle_graph.add_layer(\n \"paddle.scale\",\n inputs={\"x\": input0_name},\n outputs=[node.layer_name + '_mul0'],\n scale=coeff[0])\n self.paddle_graph.add_layer(\n \"paddle.scale\",\n inputs={\"x\": input1_name},\n outputs=[node.layer_name + '_mul1'],\n scale=coeff[1])\n inputs_dict = {}\n inputs_dict['x'] = node.layer_name + '_mul0'\n inputs_dict['y'] = node.layer_name + '_mul1'\n self.paddle_graph.add_layer(\n \"paddle.add\",\n inputs=inputs_dict,\n outputs=[node.layer_name])\n else:\n inputs_dict = {}\n inputs_dict['x'] = input0_name\n inputs_dict['y'] = input1_name\n self.paddle_graph.add_layer(\n \"paddle.add\",\n inputs=inputs_dict,\n outputs=[node.layer_name])\n else:\n inputs_dict = {}\n inputs_dict['x'] = input0_name\n inputs_dict['y'] = input1_name\n self.paddle_graph.add_layer(\n \"paddle.max\",\n inputs=inputs_dict,\n outputs=[node.layer_name])\n\n def BatchNorm(self, node):\n batchnorm_name = name_generator(\"batchnorm\", self.nn_name2id)\n output_name = node.layer_name\n layer_outputs = [batchnorm_name, output_name]\n assert len(\n node.inputs) == 1, \"The count of BatchNorm node\\'s input is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n params = node.layer.batch_norm_param\n if hasattr(params, \"eps\"):\n eps = params.eps\n else:\n eps = 1e-5\n if node.data is None or len(node.data) != 3:\n print(\n \"The parameter of {} (type is {}) is not set. So we set the parameters as 0\"\n .format(node.layer_name, node.layer_type))\n mean = np.zeros([node.in_shapes[0][1], ]).astype(\"float32\")\n variance = np.zeros([node.in_shapes[0][1], ]).astype(\"float32\")\n scale = 0\n else:\n\n node.data = [np.squeeze(i).astype(\"float32\") for i in node.data]\n mean, variance, scale = node.data\n # Prescale the stats\n scaling_factor = 1.0 / scale if scale != 0 else 0\n mean *= scaling_factor\n variance *= scaling_factor\n self.params[batchnorm_name + \"._mean\"] = mean\n self.params[batchnorm_name + '._variance'] = variance\n layer_attrs = {\n \"num_features\": node.in_shapes[0][1],\n \"epsilon\": eps,\n \"weight_attr\": False,\n \"bias_attr\": False,\n }\n if len(node.in_shapes[0]) == 2:\n self.paddle_graph.add_layer(\n \"paddle.unsqueeze\",\n inputs={\"x\": input.name},\n outputs=[input.name],\n axis=[2,3])\n self.paddle_graph.add_layer(\n \"paddle.nn.BatchNorm2D\",\n inputs={\"input\": input.name},\n outputs=layer_outputs,\n **layer_attrs)\n if len(node.in_shapes[0]) == 2:\n self.paddle_graph.add_layer(\n \"paddle.squeeze\",\n inputs={\"x\": node.layer_name},\n outputs=[node.layer_name],\n axis=[2,3])\n \n def Scale(self, node):\n if node.data is None:\n print(\n \"The parameter of {} (type is {}) is not set. So we set the parameters as 0\"\n .format(node.layer_name, node.layer_type))\n self.params[node.layer_name + \"_cparam1\"] = np.zeros([\n node.in_shapes[0][1],\n ]).astype(\"float32\")\n self.params[node.layer_name + \"_cparam2\"] = np.zeros([\n node.in_shapes[0][1],\n ]).astype(\"float32\")\n else:\n self.params[node.layer_name + \"_cparam1\"] = np.squeeze(node.data[\n 0]).astype(\"float32\")\n if not node.layer.scale_param.bias_term:\n self.params[node.layer_name + \"_cparam2\"] = np.zeros([\n node.in_shapes[0][1],\n ]).astype(\"float32\")\n else:\n self.params[node.layer_name + \"_cparam2\"] = np.squeeze(node.data[\n 1]).astype(\"float32\")\n params = node.layer.scale_param\n axis = params.axis\n inputs = []\n if len(node.inputs) == 2:\n input0 = self.graph.get_input_node(node, idx=0, copy=True)\n input1 = self.graph.get_input_node(node, idx=1, copy=True)\n input0_name = input0.name\n input1_name = input1.name\n inputs_dict = {}\n inputs_dict['x'] = input0_name\n inputs_dict['y'] = input1_name\n self.paddle_graph.add_layer(\n \"paddle.multiply\",\n inputs=inputs_dict,\n outputs=[node.layer_name + \"_mul\"],\n axis=1)\n else:\n self.paddle_graph.add_layer(\n \"self.create_parameter\",\n inputs={},\n outputs=[node.layer_name + \"_cparam1\"],\n shape=self.params[node.layer_name + \"_cparam1\"].shape,\n attr=string(node.layer_name + \"_cparam1\"))\n input0 = self.graph.get_input_node(node, idx=0, copy=True)\n input0_name = input0.name\n inputs_dict = {}\n inputs_dict['x'] = input0_name\n inputs_dict['y'] = node.layer_name + \"_cparam1\"\n if len(node.in_shapes[0]) == 2:\n self.paddle_graph.add_layer(\n \"paddle.multiply\",\n inputs=inputs_dict,\n outputs=[node.layer_name + \"_mul\"])\n else:\n self.paddle_graph.add_layer(\n \"paddle.multiply\",\n inputs=inputs_dict,\n outputs=[node.layer_name + \"_mul\"],\n axis=axis)\n self.paddle_graph.add_layer(\n \"self.create_parameter\",\n inputs={},\n outputs=[node.layer_name + \"_cparam2\"],\n shape=self.params[node.layer_name + \"_cparam2\"].shape,\n attr=string(node.layer_name + \"_cparam2\"))\n inputs_dict = {}\n inputs_dict['x'] = node.layer_name + \"_mul\"\n inputs_dict['y'] = node.layer_name + \"_cparam2\"\n output_shape = node.out_shapes[0]\n if axis == -1:\n self.paddle_graph.add_layer(\n \"paddle.add\",\n inputs=inputs_dict,\n outputs=[node.layer_name])\n else:\n if axis < 0:\n axis = axis + len(output_shape)\n param2_shape = self.params[node.layer_name + \"_cparam2\"].shape\n param2_shape_len = len(param2_shape)\n diff_len = len(output_shape) - axis - param2_shape_len\n new_shape = list(param2_shape) + [1] * diff_len\n self.paddle_graph.add_layer(\n \"paddle.reshape\",\n inputs={\"x\": node.layer_name + \"_cparam2\"},\n outputs=[node.layer_name + \"_cparam2\"],\n shape=new_shape)\n self.paddle_graph.add_layer(\n \"paddle.add\",\n inputs=inputs_dict,\n outputs=[node.layer_name])\n \n def Reshape(self, node):\n input = self.graph.get_input_node(node, idx=0, copy=True)\n output_shape = node.out_shapes[0]\n self.paddle_graph.add_layer(\n \"paddle.reshape\",\n inputs={\"x\": input.name},\n outputs=[node.layer_name],\n shape=output_shape)\n\n\n def ArgMax(self, node):\n assert len(node.inputs) == 1 and len(\n node.outputs\n ) == 1, \"The count of ArgMax node\\'s input and output is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n input_shape = node.in_shapes[0]\n params = node.layer.argmax_param\n out_max_val = params.out_max_val if hasattr(params,\n out_max_val) else False\n top_k = params.top_k if hasattr(params, top_k) else 1\n axis = params.axis if hasattr(params, axis) else -1\n if axis < 0:\n axis += len(input_shape)\n if out_max_val is True:\n self.paddle_graph.add_layer(\n \"paddle.topk\",\n inputs={\"x\": input.name},\n outputs=[node.layer_name + \"_topk_var\", node.layer_name + \"_index_var\"],\n k=top_k)\n self.paddle_graph.add_layer(\n \"paddle.cast\",\n inputs={\"x\": node.layer_name + \"_index_var\"},\n outputs=[node.layer_name + \"_index_var\"],\n dtype=\"{}_topk_var.dtype\".format(node.layer_name))\n self.paddle_graph.add_layer(\n \"paddle.concat\",\n inputs={\"x\": [node.layer_name + \"_topk_var\", node.layer_name + \"_index_var\"]},\n outputs=[node.layer_name],\n axis=axis)\n else:\n self.paddle_graph.add_layer(\n \"paddle.topk\",\n inputs={\"x\": input.name},\n outputs=[\"_\", node.layer_name],\n k=top_k)\n \n def Axpy(self, node):\n assert len(node.inputs) == 1 and len(\n node.outputs\n ) == 1, \"The count of Axpy node\\'s input and output is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n params = node.layer.axpy_param\n input0 = self.graph.get_input_node(node, idx=0, copy=True)\n input1 = self.graph.get_input_node(node, idx=1, copy=True)\n input2 = self.graph.get_input_node(node, idx=2, copy=True)\n input0_name = input0.name\n input1_name = input1.name\n input2_name = input2.name\n inputs_dict = {}\n inputs_dict['x'] = input1_name\n inputs_dict['y'] = input0_name\n self.paddle_graph.add_layer(\n \"paddle.multiply\",\n inputs=inputs_dict,\n outputs=[node.layer_name + \"_mul\"],\n axis=0)\n inputs_dict = {}\n inputs_dict['x'] = node.layer_name + \"_mul\"\n inputs_dict['y'] = input2_name\n self.paddle_graph.add_layer(\n \"paddle.add\",\n inputs=inputs_dict,\n outputs=[node.layer_name + \"_mul\"])\n \n\n def Crop(self, node):\n assert len(\n node.inputs) == 2, \"The count of Crop node\\'s input is not 2.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n example = self.graph.get_input_node(node, idx=1, copy=True)\n params = node.layer.crop_param\n axis = params.axis\n input_shape = node.in_shapes[0]\n if axis < 0:\n axis += len(input_shape)\n offset_real = [0] * len(input_shape)\n if hasattr(params, \"offset\") and len(params.offset) > 0:\n offset = list(params.offset)\n assert (len(input_shape) - axis\n ) == len(offset), \"invalid offset[%s] in crop layer\" % (\n str(offset))\n offset_real = [0] * axis + offset\n self.paddle_graph.add_layer(\n \"paddle.crop\",\n inputs={\"x\": input.name},\n outputs=[node.layer_name],\n shape=node.in_shapes[1],\n offsets=list(offset_real))\n\n def Flatten(self, node):\n assert len(\n node.\n inputs) == 1, \"The count of DetectionOutput node\\'s input is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n self.paddle_graph.add_layer(\n \"paddle.reshape\",\n inputs={\"x\": input.name},\n outputs=[node.layer_name],\n shape=node.out_shapes[0])\n\n def Power(self, node):\n assert len(\n node.inputs) == 1, \"The count of Permute node\\'s input is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n params = node.layer.power_param\n layer_attrs = {\n 'scale': params.scale,\n 'bias': params.shift,\n 'bias_after_scale': True\n }\n self.paddle_graph.add_layer(\n \"paddle.scale\",\n inputs={\"x\": input.name},\n outputs=[node.layer_name],\n **layer_attrs)\n self.paddle_graph.add_layer(\n \"paddle.pow\",\n inputs={\"x\": node.layer_name},\n outputs=[node.layer_name],\n exponent=params.power)\n\n def Reduction(self, node):\n assert len(\n node.inputs) == 1, \"The count of Reduction node\\'s input is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n params = node.layer.reduction_param\n operation = params.operation\n axis = params.axis\n coeff = params.coeff\n assert operation >= 1 and operation <= 4, \"reduction reduction [%s] error\" % (\n operation)\n input_len = len(node.in_shapes[0])\n if axis < 0:\n axis += input_len + 1\n dim = list(range(input_len))\n # operation = SUM\n if operation == 1: \n layer_attrs = {\n \"dim\": dim[axis:],\n \"keep_dim\": False,\n }\n self.paddle_graph.add_layer(\n \"paddle.sum\",\n inputs={\"input\": input.name},\n outputs=[node.layer_name],\n **layer_attrs)\n # operation = ASUM\n elif operation == 2: \n self.paddle_graph.add_layer(\n \"paddle.abs\",\n inputs={\"x\": input.name},\n outputs=[node.layer_name])\n layer_attrs = {\n \"dim\": dim[axis:],\n \"keep_dim\": False,\n }\n self.paddle_graph.add_layer(\n \"paddle.sum\",\n inputs={\"input\": node.layer_name},\n outputs=[node.layer_name],\n **layer_attrs)\n # operation = SUMSQ\n elif operation == 3: \n self.paddle_graph.add_layer(\n \"paddle.pow\",\n inputs={\"x\": input.name},\n outputs=[node.layer_name],\n exponent=2.0)\n layer_attrs = {\n \"dim\": dim[axis:],\n \"keep_dim\": False,\n }\n self.paddle_graph.add_layer(\n \"paddle.sum\",\n inputs={\"input\": node.layer_name},\n outputs=[node.layer_name],\n **layer_attrs)\n # operation = MEAN\n else: \n layer_attrs = {\n \"axis\": dim[axis:],\n \"keepdim\": False,\n }\n self.paddle_graph.add_layer(\n \"paddle.mean\",\n inputs={\"x\": input.name},\n outputs=[node.layer_name],\n **layer_attrs)\n self.paddle_graph.add_layer(\n \"paddle.scale\",\n inputs={\"x\": node.layer_name},\n outputs=[node.layer_name],\n scale=coeff)\n \n def DetectionOutput(self, node):\n detection_output_name = name_generator(\"detection_output\", self.nn_name2id)\n output_name = node.layer_name\n layer_outputs = [detection_output_name, output_name]\n assert len(\n node.inputs) == 3, \"The count of DetectionOutput node\\'s input is not 3.\"\n inputs_dict = dict()\n for i in range(len(node.inputs)):\n input = self.graph.get_input_node(node, idx=i, copy=True)\n if i == 1:\n input = self.graph.get_input_node(node, idx=i, copy=True)\n while input is not None \\\n and input.layer_type != 'Softmax' \\\n and input.layer_type != 'Sigmoid':\n input = self.graph.get_input_node(input, idx=0, copy=True)\n assert input is not None, 'This kind of DetectionOutput is not supported!'\n input = self.graph.get_input_node(input, idx=0, copy=True)\n inputs_dict[\"x{}\".format(i)] = input.name\n params = node.layer.detection_output_param\n nms_param = params.nms_param\n nms_param_dict = dict()\n nms_param_dict[\"nms_threshold\"] = nms_param.nms_threshold\n nms_param_dict[\"top_k\"] = nms_param.top_k\n nms_param_dict[\"eta\"] = nms_param.eta\n if nms_param is None:\n nms_param_dict = {\"nms_threshold\": 0.3, \"top_k\": 10, \"eta\": 1.0}\n default = {\"nms_threshold\": 0.3, \"top_k\": 10, \"eta\": 1.0}\n fields = [\"eta\", \"top_k\", \"nms_threshold\"]\n for f in default.keys():\n if f not in nms_param_dict:\n nms_param_dict[f] = default[f]\n layer_attrs = {\n \"background_label\": params.background_label_id,\n \"nms_threshold\": nms_param_dict[\"nms_threshold\"],\n \"nms_top_k\": nms_param_dict[\"top_k\"],\n \"keep_top_k\": params.keep_top_k,\n \"score_threshold\": params.confidence_threshold,\n \"nms_eta\": nms_param_dict[\"eta\"]}\n self.paddle_graph.add_layer(\n kernel=\"custom_layer:DetectionOutput\",\n inputs=inputs_dict,\n outputs=layer_outputs,\n **layer_attrs)\n \n def Normalize(self, node):\n normalize_name = name_generator(\"normalize\", self.nn_name2id)\n output_name = node.layer_name\n layer_outputs = [normalize_name, output_name]\n assert len(\n node.inputs) == 1, \"The count of Normalize node\\'s input is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n params = node.layer.norm_param\n param_name = node.layer_name + \"_scale\"\n if node.data is None or len(node.data) != 1:\n print(\n \"The parameter of {} (type is {}) is not set. So we set the parameters as 0\"\n .format(node.layer_name, node.layer_type))\n self.params[param_name] = \\\n np.zeros([1] if params.channel_shared else [node.in_shapes[0][1]]).astype(\"float32\")\n else:\n self.params[param_name] = _adjust_parameters(node)[0]\n \n \n self.paddle_graph.add_layer(\n \"self.create_parameter\",\n inputs={},\n outputs=[param_name],\n shape=self.params[param_name].shape,\n attr=string(param_name))\n inputs_dict = {}\n layer_attrs = {\n \"axis\": -1 if params.channel_shared else 1}\n self.paddle_graph.add_layer(\n \"custom_layer:Normalize\",\n inputs={\"x\": input.name,\n \"param\": param_name},\n outputs=layer_outputs,\n **layer_attrs)\n \n def Permute(self, node):\n assert len(\n node.inputs) == 1, \"The count of Permute node\\'s input is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n params = node.layer.permute_param\n order = list(params.order) \n self.paddle_graph.add_layer(\n \"paddle.transpose\",\n inputs={\"x\": input.name},\n outputs=[node.layer_name],\n perm=order)\n \n def PriorBox(self, node):\n priorbox_name = name_generator(\"priorbox\", self.nn_name2id)\n output_name = node.layer_name\n layer_outputs = [priorbox_name, output_name]\n assert len(\n node.inputs) == 2, \"The count of PriorBox node\\'s input is not 2.\"\n input0 = self.graph.get_input_node(node, idx=0, copy=True)\n input1 = self.graph.get_input_node(node, idx=1, copy=True)\n inputs_dict = {}\n inputs_dict[\"x0\"] = input0.name\n inputs_dict[\"x1\"] = input1.name\n params = node.layer.prior_box_param\n steps = tuple(params.step) if type(params.step) \\\n is list or type(params.step) is tuple \\\n else (params.step, params.step)\n layer_attrs = {\n \"min_sizes\": params.min_size,\n \"max_sizes\": params.max_size,\n \"aspect_ratios\": params.aspect_ratio,\n \"variance\": params.variance,\n \"flip\": params.flip,\n \"clip\": params.clip,\n \"steps\": steps,\n \"offset\": params.offset,\n \"min_max_aspect_ratios_order\": True}\n self.paddle_graph.add_layer(\n \"custom_layer:PriorBox\",\n inputs=inputs_dict,\n outputs=layer_outputs,\n **layer_attrs)\n \n def ReLU6(self, node):\n relu6_name = name_generator(\"relu6\", self.nn_name2id)\n output_name = node.layer_name\n layer_outputs = [relu6_name, output_name]\n assert len(\n node.inputs) == 1, \"The count of RelU6 node\\'s input is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n self.paddle_graph.add_layer(\n \"paddle.nn.ReLU6\",\n inputs={\"input\": input.name},\n outputs=layer_outputs)\n \n def ROIPooling(self, node):\n roipooling_name = name_generator(\"roipooling\", self.nn_name2id)\n output_name = node.layer_name\n layer_outputs = [roipooling_name, output_name]\n assert len(\n node.inputs) == 2, \"The count of ROIPooling node\\'s input is not 2.\"\n input0 = self.graph.get_input_node(node, idx=0, copy=True)\n input1 = self.graph.get_input_node(node, idx=1, copy=True)\n inputs_dict = {}\n inputs_dict[\"x0\"] = input0.name\n inputs_dict[\"x1\"] = input1.name\n params = node.layer.roi_pooling_param\n layer_attrs = {\n \"pooled_height\": params.pooled_h,\n \"pooled_width\": params.pooled_w,\n \"spatial_scale\": params.spatial_scale}\n self.paddle_graph.add_layer(\n \"custom_layer:ROIPooling\",\n inputs=inputs_dict,\n outputs=layer_outputs,\n **layer_attrs)\n \n def ShuffleChannel(self, node):\n assert len(\n node.inputs) == 1, \"The count of ShuffleChannel node\\'s input is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n params = node.layer.shuffle_channel_param\n self.paddle_graph.add_layer(\n \"paddle.fluid.layers.shuffle_channel\",\n inputs={\"x\": input.name},\n outputs=[node.layer_name],\n group=params.group)\n \n def Upsample(self, node):\n assert len(\n node.inputs) == 1, \"The count of Upsample node\\'s input is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n params = node.layer.upsample_param\n layer_attrs = {\n \"align_corners\": False,\n \"scale_factor\": params.scale,\n \"mode\": \"nearest\"}\n self.paddle_graph.add_layer(\n \"paddle.nn.functional.interpolate\",\n inputs={\"x\": input.name},\n outputs=[node.layer_name],\n **layer_attrs)\n \n def Select(self, node):\n select_name = name_generator(\"select\", self.nn_name2id)\n output_name = node.layer_name\n layer_outputs = [select_name, output_name]\n assert len(\n node.inputs) == 1, \"The count of Select node\\'s input is not 1.\"\n input = self.graph.get_input_node(node, idx=0, copy=True)\n input_shape = node.in_shapes[0]\n params = node.layer.select_param\n layer_attrs = {\n \"input_shape\": input_shape,\n \"point\": params.slice_point,\n \"axis\": params.axis}\n self.paddle_graph.add_layer(\n \"custom_layer:Select\",\n inputs={\"x\": input.name},\n outputs=layer_outputs,\n **layer_attrs)\n \n\n \n\n" ]
[ [ "numpy.zeros", "numpy.squeeze" ] ]
ahillbs/minimum_scan_cover
[ "e41718e5a8e0e3039d161800da70e56bd50a1b97" ]
[ "code/instance_evolver.py" ]
[ "import os\nimport subprocess\nfrom inspect import isclass\n\nimport configargparse\nimport numpy as np\nimport sqlalchemy\nimport yaml\nfrom IPython import embed\n\n\nfrom angular_solver import solve\nfrom database import Config, ConfigHolder, Graph, Task, get_session, DatabaseGraphGenome\nfrom genetic_algorithm import (GeneticAlgorithm, Genome,\n IterationTerminationConditionMet, SaveCallback,\n k_point_crossover, linear_rank_selection,\n one_point_crossover, uniform_crossover,\n uniform_wheel_selection)\nfrom instance_generation import (create_circle, create_circle_n_k,\n create_random_circle)\nfrom solver import MscColoringSolver, AngularMinSumGreedySolver\nfrom solver.min_sum_simple_solver import solve_min_sum_simple_n_gon\nfrom solver.mip import (AngularGraphScanMakespanAbsolute,\n AngularGraphScanMakespanAbsoluteReduced,\n AngularGraphScanMakespanHamilton,\n AngularGraphScanMinSumHamilton,\n AngularDependencySolver,\n AngularDependencyLocalMinSumSolver,\n AngularGraphScanLocalMinSumHamilton)\nfrom solver.cp import (ConstraintAbsSolver,\n ConstraintDependencySolver)\nfrom utils import (Multidict, visualize_graph_2d, visualize_min_sum_sol_2d,\n visualize_solution_2d)\nfrom angular_evolver import (AngularSolverFitness, CompleteGraphGenome, GraphGenome, GraphGenomeCreator,\n CompleteGraphGenomeCreator, mutate_2d_points, mutate_vertex_edge_genomes)\nfrom solver import ALL_SOLVER\n\nclass GroupedAction(configargparse.Action):\n def __call__(self, parser, namespace, values, option_string=None):\n group, dest = self.dest.split('.', 2)\n groupspace = getattr(namespace, group, configargparse.Namespace())\n setattr(groupspace, dest, values)\n setattr(namespace, group, groupspace)\n\ndef string_to_callable(function_name):\n assert function_name != 'eval', \"Eval is not allowed!\"\n warning_displayed_once = getattr(StringToCallableAction, \"warning_displayed\", False)\n if not warning_displayed_once:\n print(\"WARNING: Do not use StringToCallableAction in production code! This is just a hack for faster development!\")\n setattr(StringToCallableAction, \"warning_displayed\", True)\n try:\n call = ALL_SOLVER[function_name]\n except KeyError:\n call = globals()[function_name]\n return call\n\nclass StringToCallableAction(configargparse.Action):\n def __call__(self, parser, namespace, values, option_string=None):\n \n warning_displayed_once = getattr(StringToCallableAction, \"warning_displayed\", False)\n if not warning_displayed_once:\n print(\"WARNING: Do not use StringToCallableAction in production code! This is just a hack for faster development!\")\n setattr(StringToCallableAction, \"warning_displayed\", True)\n call = globals()[values]\n if callable(call):\n setattr(namespace, self.dest, call)\n else:\n raise TypeError(f\"{values} is not callable\")\n\ndef _instantiate_callables(func_name, obj_args):\n callable_obj = string_to_callable(func_name)\n if not callable_obj:\n raise AttributeError(f\"{func_name} function is not set.\".capitalize())\n if not isclass(callable_obj):\n return callable_obj\n if not obj_args:\n obj_args = {}\n return callable_obj(**obj_args)\n\ndef _get_task_and_config(session, arg_config):\n task = None\n config = None\n if arg_config.url_path:\n if hasattr(arg_config, \"task\") and arg_config.task is not None:\n task = session.query(Task).filter(Task.id == arg_config.task).one()\n if arg_config.override_config and \\\n input(f\"Are you sure to override the configs for {task.id}? (y/N)\").lower() in [\"y\", \"yes\"]:\n print(f\"Override config from task {task.id})\")\n for task_config in task.configs:\n session.delete(task_config)\n arg_config.override_config = False\n config = ConfigHolder.fromNamespace(arg_config, task, [\"override_config\", \"url_path\", \"PreEvolveInteractive\", \"create_only\"])\n session.add(config)\n session.commit()\n else:\n print(\"Using config from database\")\n config = ConfigHolder(task)\n else:\n if input(\"New Task will be created (Y/n)?\").lower() in [\"\", \"yes\", \"y\"]:\n print(\"Will create a new task.\")\n task = Task(task_type=\"instance_evolver\", status=Task.STATUS_OPTIONS.CREATED, name=arg_config.name)\n session.add(task)\n session.commit()\n arg_config.task = task.id\n config = ConfigHolder.fromNamespace(arg_config, task, ignored_attributes=[\"url_path\", \"create_only\", \"name\", \"override_config\"])\n session.add_all(config.database_configs)\n session.commit()\n savepath = input(f\"Task ID is {task.id}. Type a filepath to save the ID in a config file (default: Skip save): \")\n if savepath:\n _save_task_file(savepath, config, task)\n else:\n config = arg_config\n return task, config\n\ndef _save_task_file(savepath, config, task):\n n_s = configargparse.Namespace()\n n_s.task = task.id\n parser = configargparse.Parser()\n parser.add_argument(\"--task\")\n parser.add_argument(\"--database\")\n parsed = parser.parse_args(args=[f\"--task={task.id}\", f\"--database={config.url_path}\"])\n parser.write_config_file(n_s, [savepath])\n\ndef _evolve_instances(arg_config):\n session = get_session(arg_config.url_path)\n task, config = _get_task_and_config(session, arg_config)\n\n if not arg_config.create_only:\n process_task(config, task, session)\n\ndef process_task(config, task, session):\n # First init all callable classes\n try:\n mutation = _instantiate_callables(config.mutation_func, None)\n selection = _instantiate_callables(config.selection_func, None)\n crossover = _instantiate_callables(config.crossover_func, None)\n fitness = _instantiate_callables(config.fitness_func, config.fitness_func_initargs)\n if config.term_condition == 'IterationTerminationConditionMet' and not config.term_condition_initargs:\n term_con = IterationTerminationConditionMet(max_iter=config.generations)\n else:\n term_con = _instantiate_callables(config.term_condition, config.term_condition_initargs)\n if config.callback == 'SaveCallback' and config.callback_initargs is None:\n callback = SaveCallback(config.generations, config.population_amount, task, session)\n else:\n callback = _instantiate_callables(config.callback, config.callback_initargs)\n task.status = Task.STATUS_OPTIONS.PROCESSING\n if session:\n session.commit()\n # Now load population if provided, else generate it\n starting_generation, population = _load_population(config, task, session)\n\n if config.PreEvolveInteractive:\n print(\"Config set up. To change the population just change the 'population' variable.\")\n print(\"For other variables just refer to the locals.\")\n embed()\n\n gen_algo = GeneticAlgorithm(\n genomes=population,\n selection=selection,\n mutation=mutation,\n fitness=fitness,\n crossover=crossover,\n callback=callback,\n termCon=term_con,\n elitism=config.elitism,\n mutationChance=config.mutation_chance_genome,\n mutationChanceGene=config.mutation_chance_gene\n )\n gen_algo.evolve(generation=starting_generation)\n task.status = Task.STATUS_OPTIONS.FINISHED\n if session:\n session.commit()\n except InterruptedError as e:\n task.status = task.STATUS_OPTIONS.INTERRUPTED\n if session:\n session.commit()\n except Exception as e:\n if session:\n task.status = Task.STATUS_OPTIONS.ERROR\n task.error_message = str(e)\n session.commit()\n print(e)\n raise e\n\ndef _load_population(config, task, session: 'Session'):\n population = []\n curr_generation = 0\n if session is not None:\n try:\n last_gen = session.query(DatabaseGraphGenome)\\\n .filter(DatabaseGraphGenome.task_id == task.id)\\\n .order_by(DatabaseGraphGenome.generation.desc())\\\n .limit(1)\\\n .one()\n curr_generation = last_gen.generation\n queue = session.query(DatabaseGraphGenome)\\\n .filter(DatabaseGraphGenome.task_id == task.id, DatabaseGraphGenome.generation == curr_generation)\\\n .order_by(DatabaseGraphGenome.generation.desc())\\\n .limit(config.population_amount)\n population = np.zeros(config.population_amount, dtype=object)\n population[:] = [genome for genome in queue]\n assert isinstance(population[0], Genome), \"Loaded data does not contain valid genomes\"\n except sqlalchemy.orm.exc.NoResultFound as e:\n pass\n\n if len(population) < config.population_amount:\n if population:\n print(\"Given population smaller than wanted. Fill with random instances\")\n temp_pop = np.zeros(config.population_amount - len(population), dtype=object)\n create_instances = _instantiate_callables(config.instance_creation_func, config.instance_creation_initargs)\n temp_pop[:] = [\n create_instances(task, generation=curr_generation)\n for i in range(config.population_amount - len(population))\n ]\n session.add_all(temp_pop.tolist())\n session.commit()\n population = np.hstack([population[:len(population)],\n temp_pop]) # ToDo: This call needs to be reworked\n elif len(population) > config.population_amount:\n print(\"Given population too large. Will slice off the end\")\n population = population[:config.population_amount]\n\n return curr_generation, population\n\n\ndef _argument_parser():\n parser = configargparse.ArgumentParser(description=\"Parser for the instance evolver\")\n parser.add_argument(\n '--config',\n type=str,\n help='Path to config file (default: inst_evo_settings.yaml)',\n default=\"inst_evo_settings.yaml\",\n is_config_file_arg=True)\n parser.add_argument(\n '--PreEvolveInteractive',\n action='store_true',\n help='Ipython interactive for instance creation (default: False)',\n default=False)\n parser.add_argument('--override-config', action=\"store_true\", default=False, help=\"Set this flag to override configuration with passed arguments\")\n parser.add_argument('--url-path', type=str, default=\"angular.db\", help=\"Path to database. Creates Database if it does not exist (Default: angular.db)\")\n parser.add_argument('--task', type=int, help=\"Id of the task that shall be continued\")\n parser.add_argument('--generations', type=int, default=200, help=\"Amount of generations evolved. If a save is loaded, it will only evolve the difference for the generations (default: 200)\")\n parser.add_argument('--elitism', type=float, default=0.01, help=\"Elitism rate (default: 0.01)\")\n #parser.add_argument('--genome-creator',\n parser.add_argument('--instance-creation-func', type=str, help=\"Function for initial creation of instances\")\n parser.add_argument('--instance-creation-initargs', type=yaml.safe_load, help=\"Parameter for instance creation\")\n parser.add_argument('--population-amount', type=int, default=200, help=\"Amont of genomes per generation (default: 200)\")\n parser.add_argument('--mutation-chance-genome', type=float, default=0.03, help=\"Chance a genome will be selected for mutation (default: 0.03)\")\n parser.add_argument('--mutation-chance-gene', type=float, default=0.03, help=\"Chance a gene is changed (default: 0.03)\")\n parser.add_argument('--mutation-func', type=str, help=\"Mutation callable used. Required if no safefile config is used\")\n parser.add_argument('--selection-func', type=str, help=\"Selection callable used. Required if no safefile is used\")\n parser.add_argument('--crossover-func', type=str, help=\"Crossover callable used. Required if no safefile is used\")\n parser.add_argument('--fitness-func', type=str, help=\"Fitness callable used. Required if no safefile is used\")\n parser.add_argument('--fitness-func-initargs', type=yaml.safe_load, default=None, help=\"Fitness callable init keyword arguments. Omitted when emtpy.\")\n parser.add_argument('--term-condition', type=str, default='IterationTerminationConditionMet', help=\"Termination callable used. (default: IterationTerminationConditionMet)\")\n parser.add_argument('--term-condition-initargs', type=yaml.safe_load, default=None, help=\"Keyword arguments dict for termination condition callable init. Not needed for standard term-condition.\")\n parser.add_argument('--callback', type=str, default='SaveCallback', help=\"Callback used in genetic_algorithm (default: SaveCallback)\")\n parser.add_argument('--callback-initargs', type=yaml.safe_load, default=None, help=\"Callback keyword arguments dict for init. Not needed for standard SaveCallback else omitted if not provided\")\n parser.add_argument('--create-only', action=\"store_true\", help=\"Only create task instead of also solving it\")\n parser.add_argument('--name', type=str, default=\"\", help=\"Optional name description of the task\")\n parsed = parser.parse_args()\n #parser.write_config_file()\n #print(vars(parsed))\n return parsed\n\nif __name__ == \"__main__\":\n CONFIG = _argument_parser()\n _evolve_instances(CONFIG)\n" ]
[ [ "numpy.zeros" ] ]
aounleonardo/Spread-Classification
[ "22c643252e31df367dfeb55fd1a5397dabd7f2b4" ]
[ "modeling/src/nlp_ignite_engines.py" ]
[ "import torch\n\nfrom ignite.engine.engine import Engine, State, Events\nfrom ignite.utils import convert_tensor\n\n\ndef _prepare_batch(batch, device=None, non_blocking=False):\n \"\"\"Prepare batch for training: pass to a device with options.\n\n \"\"\"\n x, attention_mask, y = batch\n return (\n convert_tensor(x, device=device, non_blocking=non_blocking),\n convert_tensor(attention_mask, device=device, non_blocking=non_blocking),\n convert_tensor(y, device=device, non_blocking=non_blocking).float(),\n )\n\n\ndef create_nlp_trainer(\n model,\n optimizer,\n loss_fn,\n device=None,\n non_blocking=False,\n prepare_batch=_prepare_batch,\n output_transform=lambda x, y, y_pred, loss: loss.item(),\n):\n \"\"\"\n Factory function for creating a trainer for nlp models.\n The only difference with the ignite create_supervised_trainer is the attention to attention_mask (pun intented).\n\n Args:\n model (`torch.nn.Module`): the model to train.\n optimizer (`torch.optim.Optimizer`): the optimizer to use.\n loss_fn (torch.nn loss function): the loss function to use.\n device (str, optional): device type specification (default: None).\n Applies to both model and batches.\n non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously\n with respect to the host. For other cases, this argument has no effect.\n prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs\n tuple of tensors `(batch_x, batch_y)`.\n output_transform (callable, optional): function that receives 'x', 'y', 'y_pred', 'loss' and returns value\n to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`.\n\n Note: `engine.state.output` for this engine is defind by `output_transform` parameter and is the loss\n of the processed batch by default.\n\n Returns:\n Engine: a trainer engine with supervised update function.\n \"\"\"\n if device:\n model.to(device)\n\n def _update(engine, batch):\n model.train()\n optimizer.zero_grad()\n x, attention_mask, y = prepare_batch(\n batch, device=device, non_blocking=non_blocking\n )\n y_pred = model(x, attention_mask=attention_mask)\n loss = loss_fn(y_pred, y)\n loss.backward()\n optimizer.step()\n return output_transform(x, y, y_pred, loss)\n\n return Engine(_update)\n\n\ndef create_nlp_evaluator(\n model,\n metrics=None,\n device=None,\n non_blocking=False,\n prepare_batch=_prepare_batch,\n output_transform=lambda x, y, y_pred: (y_pred, y,),\n):\n \"\"\"\n Factory function for creating an evaluator for nlp models.\n The only difference with the ignite create_supervised_evaluator is the attention to attention_mask (pun intented).\n\n Args:\n model (`torch.nn.Module`): the model to train.\n metrics (dict of str - :class:`~ignite.metrics.Metric`): a map of metric names to Metrics.\n device (str, optional): device type specification (default: None).\n Applies to both model and batches.\n non_blocking (bool, optional): if True and this copy is between CPU and GPU, the copy may occur asynchronously\n with respect to the host. For other cases, this argument has no effect.\n prepare_batch (callable, optional): function that receives `batch`, `device`, `non_blocking` and outputs\n tuple of tensors `(batch_x, batch_y)`.\n output_transform (callable, optional): function that receives 'x', 'y', 'y_pred' and returns value\n to be assigned to engine's state.output after each iteration. Default is returning `(y_pred, y,)` which fits\n output expected by metrics. If you change it you should use `output_transform` in metrics.\n\n Note: `engine.state.output` for this engine is defind by `output_transform` parameter and is\n a tuple of `(batch_pred, batch_y)` by default.\n\n Returns:\n Engine: an evaluator engine with supervised inference function.\n \"\"\"\n metrics = metrics or {}\n\n if device:\n model.to(device)\n\n def _inference(engine, batch):\n model.eval()\n with torch.no_grad():\n x, attention_mask, y = prepare_batch(\n batch, device=device, non_blocking=non_blocking\n )\n y_pred = model(x, attention_mask=attention_mask)\n return output_transform(x, y, y_pred)\n\n engine = Engine(_inference)\n\n for name, metric in metrics.items():\n metric.attach(engine, name)\n\n return engine\n" ]
[ [ "torch.no_grad" ] ]
janfreyberg/ipyannotate
[ "b1c30fe73bfda107d4ef75945338d42bfe8e3b64" ]
[ "tests/images/test_abstract_canvas.py" ]
[ "import pathlib\nimport tempfile\nfrom typing import Tuple, Union\nfrom unittest.mock import patch\n\nimport ipywidgets as widgets\nimport numpy as np\nfrom hypothesis import assume, given, infer, settings, strategies\nfrom PIL import Image\n\nfrom ipyannotations.images.canvases.abstract_canvas import (\n AbstractAnnotationCanvas,\n)\nimport ipyannotations.images.canvases.image_utils\nfrom ipyannotations.images.canvases.image_utils import fit_image\n\n\nclass TestCanvas(AbstractAnnotationCanvas):\n \"\"\"Test canvas to test the abstract canvas.\"\"\"\n\n def init_empty_data(self):\n self.data = []\n\n\n@settings(deadline=None)\n@given(img=infer)\ndef test_that_loading_image_clears_data(\n img: Union[widgets.Image, np.ndarray, Image.Image]\n):\n\n with patch.object(\n AbstractAnnotationCanvas, \"init_empty_data\"\n ) as mock_init_empty_data:\n canvas = AbstractAnnotationCanvas()\n mock_init_empty_data.reset_mock()\n canvas.load_image(img)\n\n mock_init_empty_data.assert_called_once()\n\n\n@settings(deadline=None)\n@given(img=infer)\ndef test_that_loading_image_from_path_succeeds(img: Image.Image):\n\n with tempfile.TemporaryDirectory(dir=\".\") as tmp:\n tmp = pathlib.Path(tmp)\n tmp = tmp / \"testfile.jpg\"\n img.save(tmp)\n\n with patch.object(\n AbstractAnnotationCanvas, \"init_empty_data\"\n ) as mock_init_empty_data:\n canvas = AbstractAnnotationCanvas()\n mock_init_empty_data.reset_mock()\n canvas.load_image(tmp)\n\n mock_init_empty_data.assert_called_once()\n\n\n@given(img=infer)\ndef test_that_fit_image_always_fits_image(img: widgets.Image):\n\n with patch.object(AbstractAnnotationCanvas, \"init_empty_data\"):\n canvas = AbstractAnnotationCanvas()\n\n x0, y0, x1, y1, _, _ = fit_image(img, canvas)\n\n assert (x1, y1) < canvas.size\n\n\n@given(\n img=infer, click_x=strategies.floats(0, 1), click_y=strategies.floats(0, 1)\n)\ndef test_that_points_clicked_get_translated_correctly(\n img: widgets.Image, click_x: float, click_y: float\n):\n with patch.object(AbstractAnnotationCanvas, \"init_empty_data\"):\n canvas = AbstractAnnotationCanvas()\n canvas.load_image(img)\n\n x0, y0, width, height, img_width, img_height = fit_image(img, canvas)\n assume((img_width, img_height) > (20, 20))\n\n click_x = round(x0 + click_x * width)\n click_y = round(y0 + click_y * height)\n\n assert (\n (0, 0)\n <= canvas.canvas_to_image_coordinates((click_x, click_y))\n <= (img_width, img_height)\n )\n\n round_trip_x, round_trip_y = canvas.image_to_canvas_coordinates(\n canvas.canvas_to_image_coordinates((click_x, click_y))\n )\n assert np.isclose(round_trip_x, click_x) and np.isclose(\n round_trip_y, click_y, atol=1\n )\n\n\n@settings(deadline=None)\n@given(img=infer)\ndef test_that_images_are_adjusted(img: widgets.Image):\n with patch(\n \"ipyannotations.images.canvases.abstract_canvas.adjust\", autospec=True\n ) as mock_adjust:\n mock_adjust.return_value = img\n canvas = TestCanvas()\n canvas.image_brightness = 1.1\n canvas.image_contrast = 1.1\n canvas.load_image(img)\n\n mock_adjust.assert_called_once_with(\n img,\n contrast_factor=1.1,\n brightness_factor=1.1,\n )\n" ]
[ [ "numpy.isclose" ] ]
thomashopf/EVcouplings-1
[ "d3e4947d29b62537bd79215ce72b6eea18134850" ]
[ "evcouplings/compare/protocol.py" ]
[ "\"\"\"\nEC to 3D structure comparison protocols/workflows.\n\nAuthors:\n Thomas A. Hopf\n Anna G. Green (complex and _make_complex_contact_maps)\n\"\"\"\n\nfrom copy import deepcopy\nfrom math import ceil\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom evcouplings.align.alignment import (\n read_fasta, parse_header\n)\nfrom evcouplings.utils.config import (\n check_required, InvalidParameterError\n)\n\nfrom evcouplings.utils.system import (\n create_prefix_folders, insert_dir, verify_resources,\n)\nfrom evcouplings.couplings import Segment\nfrom evcouplings.compare.pdb import load_structures\nfrom evcouplings.compare.distances import (\n intra_dists, multimer_dists, remap_chains,\n inter_dists, remap_complex_chains\n)\nfrom evcouplings.compare.sifts import SIFTS, SIFTSResult\nfrom evcouplings.compare.ecs import (\n coupling_scores_compared, add_precision\n)\nfrom evcouplings.visualize import pairs, misc\n\n\ndef _identify_structures(**kwargs):\n \"\"\"\n Identify set of 3D structures for comparison\n\n Parameters\n ----------\n **kwargs\n See check_required in code below\n\n Returns\n -------\n SIFTSResult\n Identified structures and residue index mappings\n \"\"\"\n\n def _filter_by_id(x, id_list):\n x = deepcopy(x)\n x.hits = x.hits.loc[\n x.hits.pdb_id.isin(id_list)\n ]\n return x\n\n check_required(\n kwargs,\n [\n \"prefix\", \"pdb_ids\", \"compare_multimer\",\n \"max_num_hits\", \"max_num_structures\",\n \"pdb_mmtf_dir\",\n \"sifts_mapping_table\", \"sifts_sequence_db\",\n \"by_alignment\", \"pdb_alignment_method\",\n \"alignment_min_overlap\",\n \"sequence_id\", \"sequence_file\", \"region\",\n \"use_bitscores\", \"domain_threshold\",\n \"sequence_threshold\"\n ]\n )\n # get SIFTS mapping object/sequence DB\n s = SIFTS(\n kwargs[\"sifts_mapping_table\"],\n kwargs[\"sifts_sequence_db\"]\n )\n\n reduce_chains = not kwargs[\"compare_multimer\"]\n\n # determine if we need to find structures\n # by sequence search or just fetching\n # based on Uniprot/PDB identifier\n if kwargs[\"by_alignment\"]:\n\n # if searching by alignment, verify that\n # user selected jackhmmer or hmmsearch\n SEARCH_METHODS = [\"jackhmmer\", \"hmmsearch\"]\n\n if kwargs[\"pdb_alignment_method\"] not in SEARCH_METHODS:\n raise InvalidParameterError(\n \"Invalid pdb search method: \" +\n \"{}. Valid selections are: {}\".format(\n \", \".join(SEARCH_METHODS.keys())\n )\n )\n\n sifts_map = s.by_alignment(\n reduce_chains=reduce_chains,\n min_overlap=kwargs[\"alignment_min_overlap\"],\n **kwargs\n )\n else:\n sifts_map = s.by_uniprot_id(\n kwargs[\"sequence_id\"], reduce_chains=reduce_chains\n )\n\n sifts_map_full = deepcopy(sifts_map)\n\n # filter ID list down to manually selected PDB entries\n if kwargs[\"pdb_ids\"] is not None:\n pdb_ids = kwargs[\"pdb_ids\"]\n\n # make sure we have a list of PDB IDs\n if not isinstance(pdb_ids, list):\n pdb_ids = [pdb_ids]\n\n pdb_ids = [x.lower() for x in pdb_ids]\n\n sifts_map = _filter_by_id(sifts_map, pdb_ids)\n\n # limit number of hits and structures\n if kwargs[\"max_num_hits\"] is not None:\n sifts_map.hits = sifts_map.hits.iloc[:kwargs[\"max_num_hits\"]]\n\n if kwargs[\"max_num_structures\"] is not None:\n keep_ids = sifts_map.hits.pdb_id.unique()\n keep_ids = keep_ids[:kwargs[\"max_num_structures\"]]\n sifts_map = _filter_by_id(sifts_map, keep_ids)\n\n return sifts_map, sifts_map_full\n\n\ndef _make_contact_maps(ec_table, d_intra, d_multimer, **kwargs):\n \"\"\"\n Plot contact maps with all ECs above a certain probability threshold,\n or a given count of ECs\n\n Parameters\n ----------\n ec_table : pandas.DataFrame\n Full set of evolutionary couplings (all pairs)\n d_intra : DistanceMap\n Computed residue-residue distances inside chain\n d_multimer : DistanceMap\n Computed residue-residue distances between homomultimeric\n chains\n **kwargs\n Further plotting parameters, see check_required in code\n for necessary values.\n\n Returns\n -------\n cm_files : list(str)\n Paths of generated contact map files\n \"\"\"\n\n def plot_cm(ecs, output_file=None):\n \"\"\"\n Simple wrapper for contact map plotting\n \"\"\"\n with misc.plot_context(\"Arial\"):\n fig = plt.figure(figsize=(8, 8))\n if kwargs[\"scale_sizes\"]:\n ecs = ecs.copy()\n ecs.loc[:, \"size\"] = ecs.cn.values / ecs.cn.max()\n\n pairs.plot_contact_map(\n ecs, d_intra, d_multimer,\n distance_cutoff=kwargs[\"distance_cutoff\"],\n show_secstruct=kwargs[\"draw_secondary_structure\"],\n margin=5,\n boundaries=kwargs[\"boundaries\"]\n )\n\n plt.suptitle(\"{} evolutionary couplings\".format(len(ecs)), fontsize=14)\n\n if output_file is not None:\n plt.savefig(output_file, bbox_inches=\"tight\")\n plt.close(fig)\n\n check_required(\n kwargs,\n [\n \"prefix\", \"min_sequence_distance\",\n \"plot_probability_cutoffs\",\n \"boundaries\", \"plot_lowest_count\",\n \"plot_highest_count\", \"plot_increase\",\n \"draw_secondary_structure\"\n ]\n )\n prefix = kwargs[\"prefix\"]\n\n cm_files = []\n\n ecs_longrange = ec_table.query(\n \"abs(i - j) >= {}\".format(kwargs[\"min_sequence_distance\"])\n )\n\n # based on significance cutoff\n if kwargs[\"plot_probability_cutoffs\"]:\n cutoffs = kwargs[\"plot_probability_cutoffs\"]\n if not isinstance(cutoffs, list):\n cutoffs = [cutoffs]\n\n for c in cutoffs:\n ec_set = ecs_longrange.query(\"probability >= @c\")\n # only can plot if we have any significant ECs above threshold\n if len(ec_set) > 0:\n output_file = prefix + \"_significant_ECs_{}.pdf\".format(c)\n plot_cm(ec_set, output_file=output_file)\n cm_files.append(output_file)\n\n # based on number of long-range ECs\n\n # identify number of sites in EC model\n num_sites = len(\n set.union(set(ec_table.i.unique()), set(ec_table.j.unique()))\n )\n\n # transform fraction of number of sites into discrete number of ECs\n def _discrete_count(x):\n if isinstance(x, float):\n x = ceil(x * num_sites)\n return int(x)\n\n # range of plots to make\n lowest = _discrete_count(kwargs[\"plot_lowest_count\"])\n highest = _discrete_count(kwargs[\"plot_highest_count\"])\n step = _discrete_count(kwargs[\"plot_increase\"])\n\n # create individual plots\n for c in range(lowest, highest + 1, step):\n ec_set = ecs_longrange.iloc[:c]\n output_file = prefix + \"_{}_ECs.pdf\".format(c)\n plot_cm(ec_set, output_file=output_file)\n cm_files.append(output_file)\n\n # give back list of all contact map file names\n return cm_files\n\n\ndef _make_complex_contact_maps(ec_table, d_intra_i, d_multimer_i,\n d_intra_j, d_multimer_j,\n d_inter, first_segment_name,\n second_segment_name, **kwargs):\n \"\"\"\n Plot contact maps with all ECs above a certain probability threshold,\n or a given count of ECs\n\n Parameters\n ----------\n ec_table : pandas.DataFrame\n Full set of evolutionary couplings (all pairs)\n d_intra_i, d_intra_j: DistanceMap\n Computed residue-residue distances within chains for\n monomers i and j\n d_multimer_i, d_multimer_j : DistanceMap\n Computed residue-residue distances between homomultimeric\n chains for monomers i and j\n d_inter: DistanceMap\n Computed residue-residue distances between heteromultimeric\n chains i and j\n first_segment_name, second_segment_name: str\n Name of segment i and segment j in the ec_table\n **kwargs\n Further plotting parameters, see check_required in code\n for necessary values.\n\n Returns\n -------\n cm_files : list(str)\n Paths of generated contact map files\n \"\"\"\n\n def plot_complex_cm(ecs_i, ecs_j, ecs_inter, \n first_segment_name,\n second_segment_name, output_file=None):\n \"\"\"\n Simple wrapper for contact map plotting\n \"\"\"\n with misc.plot_context(\"Arial\"):\n if kwargs[\"scale_sizes\"]:\n # to scale sizes, combine all ecs to rescale together\n ecs = pd.concat([ecs_i, ecs_j, ecs_inter])\n ecs.loc[:, \"size\"] = ecs.cn.values / ecs.cn.max()\n\n # split back into three separate DataFrames\n ecs_i = ecs.query(\"segment_i == segment_j == @first_segment_name\")\n ecs_j = ecs.query(\"segment_i == segment_j == @second_segment_name\")\n ecs_inter = ecs.query(\"segment_i != segment_j\")\n\n # if any of these groups are entry, replace with None\n if len(ecs_i) == 0:\n ecs_i = None\n if len(ecs_j) == 0:\n ecs_j = None\n if len(ecs_inter) == 0:\n ecs_inter = None\n\n # Currently, we require at least one of the monomer \n # to have either ECs or distances in order to make a plot\n if ((ecs_i is None or ecs_i.empty) and d_intra_i is None and d_multimer_i is None) \\\n or ((ecs_j is None or ecs_j.empty) and d_intra_j is None and d_multimer_i is None):\n return False\n\n fig = plt.figure(figsize=(8, 8))\n\n # create the contact map\n pairs.complex_contact_map(\n ecs_i, ecs_j, ecs_inter,\n d_intra_i, d_multimer_i,\n d_intra_j, d_multimer_j,\n d_inter,\n margin=5,\n boundaries=kwargs[\"boundaries\"],\n scale_sizes=kwargs[\"scale_sizes\"]\n )\n\n # Add title to the plot\n if ecs_inter is None:\n ec_len = '0'\n else:\n ec_len = len(ecs_inter)\n plt.suptitle(\n \"{} inter-molecule evolutionary couplings\".format(ec_len), \n fontsize=14\n )\n\n # save to output\n if output_file is not None:\n plt.savefig(output_file, bbox_inches=\"tight\")\n plt.close(fig)\n\n return True\n\n check_required(\n kwargs,\n [\n \"prefix\", \"min_sequence_distance\",\n \"plot_probability_cutoffs\",\n \"boundaries\",\n \"draw_secondary_structure\", \"plot_lowest_count\",\n \"plot_highest_count\", \"plot_increase\",\n \"scale_sizes\"\n ]\n )\n\n prefix = kwargs[\"prefix\"]\n\n cm_files = []\n\n ecs_longrange = ec_table.query(\n \"abs(i - j) >= {} or segment_i != segment_j\".format(kwargs[\"min_sequence_distance\"])\n )\n\n # create plots based on significance cutoff\n if kwargs[\"plot_probability_cutoffs\"]:\n cutoffs = kwargs[\"plot_probability_cutoffs\"]\n if not isinstance(cutoffs, list):\n cutoffs = [cutoffs]\n\n for c in cutoffs:\n ec_set = ecs_longrange.query(\"probability >= @c\")\n\n # only can plot if we have any significant ECs above threshold\n if len(ec_set) > 0:\n ec_set_i = ec_set.query(\"segment_i == segment_j == @first_segment_name\")\n ec_set_j = ec_set.query(\"segment_i == segment_j == @second_segment_name\")\n ec_set_inter = ec_set.query(\"segment_i != segment_j\")\n\n output_file = prefix + \"_significant_ECs_{}.pdf\".format(c)\n plot_completed = plot_complex_cm(\n ec_set_i, ec_set_j, ec_set_inter,\n first_segment_name, second_segment_name,\n output_file=output_file\n )\n if plot_completed:\n cm_files.append(output_file)\n\n # transform fraction of number of sites into discrete number of ECs\n def _discrete_count(x):\n if isinstance(x, float):\n num_sites = 0\n for seg_name in [first_segment_name, second_segment_name]:\n num_sites += len(\n set.union(\n set(ec_table.query(\"segment_i == @seg_name\").i.unique()),\n set(ec_table.query(\"segment_j == @seg_name\").j.unique())\n )\n )\n\n x = ceil(x * num_sites)\n\n return int(x)\n\n # range of plots to make\n lowest = _discrete_count(kwargs[\"plot_lowest_count\"])\n highest = _discrete_count(kwargs[\"plot_highest_count\"])\n step = _discrete_count(kwargs[\"plot_increase\"])\n\n for c in range(lowest, highest + 1, step):\n # get the inter ECs to plot\n ec_set_inter = ecs_longrange.query(\"segment_i != segment_j\")[0:c]\n\n # if there are no inter ecs to be plotted, continue\n if ec_set_inter.empty:\n continue\n\n # get the index of the lowest inter EC\n last_inter_index = ec_set_inter.index[-1]\n\n # take all intra-protein ECs that score higher than the lowest plotted inter-protein EC\n ec_set_i = ecs_longrange.iloc[0:last_inter_index].query(\n \"segment_i == segment_j == @first_segment_name\"\n )\n ec_set_j = ecs_longrange.iloc[0:last_inter_index].query(\n \"segment_i == segment_j == @second_segment_name\"\n )\n\n output_file = prefix + \"_{}_ECs.pdf\".format(c)\n plot_completed = plot_complex_cm(\n ec_set_i, ec_set_j, ec_set_inter,\n first_segment_name, second_segment_name,\n output_file=output_file\n )\n if plot_completed:\n cm_files.append(output_file)\n\n # give back list of all contact map file names\n return cm_files\n\n\ndef standard(**kwargs):\n \"\"\"\n Protocol:\n Compare ECs for single proteins (or domains)\n to 3D structure information\n\n Parameters\n ----------\n Mandatory kwargs arguments:\n See list below in code where calling check_required\n\n Returns\n -------\n outcfg : dict\n Output configuration of the pipeline, including\n the following fields:\n\n * ec_file_compared_all\n * ec_file_compared_all_longrange\n * pdb_structure_hits\n * distmap_monomer\n * distmap_multimer\n * contact_map_files\n * remapped_pdb_files\n \"\"\"\n check_required(\n kwargs,\n [\n \"prefix\", \"ec_file\", \"min_sequence_distance\",\n \"pdb_mmtf_dir\", \"atom_filter\", \"compare_multimer\",\n \"distance_cutoff\", \"target_sequence_file\",\n \"scale_sizes\",\n ]\n )\n\n prefix = kwargs[\"prefix\"]\n\n outcfg = {\n \"ec_compared_all_file\": prefix + \"_CouplingScoresCompared_all.csv\",\n \"ec_compared_longrange_file\": prefix + \"_CouplingScoresCompared_longrange.csv\",\n \"pdb_structure_hits_file\": prefix + \"_structure_hits.csv\",\n \"pdb_structure_hits_unfiltered_file\": prefix + \"_structure_hits_unfiltered.csv\",\n # cannot have the distmap files end with \"_file\" because there are\n # two files (.npy and .csv), which would cause problems with automatic\n # checking if those files exist\n \"distmap_monomer\": prefix + \"_distance_map_monomer\",\n \"distmap_multimer\": prefix + \"_distance_map_multimer\",\n }\n\n # make sure EC file exists\n verify_resources(\n \"EC file does not exist\",\n kwargs[\"ec_file\"]\n )\n\n # make sure output directory exists\n create_prefix_folders(prefix)\n\n # store auxiliary files here (too much for average user)\n aux_prefix = insert_dir(prefix, \"aux\", rootname_subdir=False)\n create_prefix_folders(aux_prefix)\n\n # Step 1: Identify 3D structures for comparison\n sifts_map, sifts_map_full = _identify_structures(**{\n **kwargs,\n \"prefix\": aux_prefix,\n })\n\n # save selected PDB hits\n sifts_map.hits.to_csv(\n outcfg[\"pdb_structure_hits_file\"], index=False\n )\n\n # also save full list of hits\n sifts_map_full.hits.to_csv(\n outcfg[\"pdb_structure_hits_unfiltered_file\"], index=False\n )\n\n # Step 2: Compute distance maps\n\n # load all structures at once\n structures = load_structures(\n sifts_map.hits.pdb_id,\n kwargs[\"pdb_mmtf_dir\"],\n raise_missing=False\n )\n\n # compute distance maps and save\n # (but only if we found some structure)\n if len(sifts_map.hits) > 0:\n d_intra = intra_dists(\n sifts_map, structures, atom_filter=kwargs[\"atom_filter\"],\n output_prefix=aux_prefix + \"_distmap_intra\"\n )\n d_intra.to_file(outcfg[\"distmap_monomer\"])\n\n # save contacts to separate file\n outcfg[\"monomer_contacts_file\"] = prefix + \"_contacts_monomer.csv\"\n d_intra.contacts(\n kwargs[\"distance_cutoff\"]\n ).to_csv(\n outcfg[\"monomer_contacts_file\"], index=False\n )\n\n # compute multimer distances, if requested;\n # note that d_multimer can be None if there\n # are no structures with multiple chains\n if kwargs[\"compare_multimer\"]:\n d_multimer = multimer_dists(\n sifts_map, structures, atom_filter=kwargs[\"atom_filter\"],\n output_prefix=aux_prefix + \"_distmap_multimer\"\n )\n else:\n d_multimer = None\n\n # if we have a multimer contact mapin the end, save it\n if d_multimer is not None:\n d_multimer.to_file(outcfg[\"distmap_multimer\"])\n outcfg[\"multimer_contacts_file\"] = prefix + \"_contacts_multimer.csv\"\n\n # save contacts to separate file\n d_multimer.contacts(\n kwargs[\"distance_cutoff\"]\n ).to_csv(\n outcfg[\"multimer_contacts_file\"], index=False\n )\n else:\n outcfg[\"distmap_multimer\"] = None\n\n # at this point, also create remapped structures (e.g. for\n # later comparison of folding results)\n verify_resources(\n \"Target sequence file does not exist\",\n kwargs[\"target_sequence_file\"]\n )\n\n # create target sequence map for remapping structure\n with open(kwargs[\"target_sequence_file\"]) as f:\n header, seq = next(read_fasta(f))\n\n seq_id, seq_start, seq_end = parse_header(header)\n seqmap = dict(zip(range(seq_start, seq_end + 1), seq))\n\n # remap structures, swap mapping index and filename in\n # dictionary so we have a list of files in the dict keys\n outcfg[\"remapped_pdb_files\"] = {\n filename: mapping_index for mapping_index, filename in\n remap_chains(sifts_map, aux_prefix, seqmap).items()\n }\n else:\n # if no structures, can not compute distance maps\n d_intra = None\n d_multimer = None\n outcfg[\"distmap_monomer\"] = None\n outcfg[\"distmap_multimer\"] = None\n outcfg[\"remapped_pdb_files\"] = None\n\n # Step 3: Compare ECs to distance maps\n\n ec_table = pd.read_csv(kwargs[\"ec_file\"])\n\n # identify number of sites in EC model\n num_sites = len(\n set.union(set(ec_table.i.unique()), set(ec_table.j.unique()))\n )\n\n for out_file, min_seq_dist in [\n (\"ec_compared_longrange_file\", kwargs[\"min_sequence_distance\"]),\n (\"ec_compared_all_file\", 0),\n ]:\n # compare ECs only if we minimally have intra distance map\n if d_intra is not None:\n coupling_scores_compared(\n ec_table, d_intra, d_multimer,\n dist_cutoff=kwargs[\"distance_cutoff\"],\n output_file=outcfg[out_file],\n min_sequence_dist=min_seq_dist\n )\n else:\n outcfg[out_file] = None\n\n # also create line-drawing script if we made the csv\n if outcfg[\"ec_compared_longrange_file\"] is not None:\n ecs_longrange = pd.read_csv(outcfg[\"ec_compared_longrange_file\"])\n\n outcfg[\"ec_lines_compared_pml_file\"] = prefix + \"_draw_ec_lines_compared.pml\"\n pairs.ec_lines_pymol_script(\n ecs_longrange.iloc[:num_sites, :],\n outcfg[\"ec_lines_compared_pml_file\"],\n distance_cutoff=kwargs[\"distance_cutoff\"]\n )\n\n # Step 4: Make contact map plots\n # if no structures available, defaults to EC-only plot\n\n outcfg[\"contact_map_files\"] = _make_contact_maps(\n ec_table, d_intra, d_multimer, **kwargs\n )\n\n return outcfg\n\n\ndef complex(**kwargs):\n \"\"\"\n Protocol:\n Compare ECs for a complex to\n 3D structure\n\n Parameters\n ----------\n Mandatory kwargs arguments:\n See list below in code where calling check_required\n\n Returns\n -------\n outcfg : dict\n Output configuration of the pipeline, including\n the following fields:\n\n * ec_file_compared_all\n * ec_file_compared_all_longrange\n * pdb_structure_hits\n * distmap_monomer\n * distmap_multimer\n * contact_map_files\n * remapped_pdb_files\n \"\"\"\n check_required(\n kwargs,\n [\n \"prefix\", \"ec_file\", \"min_sequence_distance\",\n \"pdb_mmtf_dir\", \"atom_filter\",\n \"first_compare_multimer\", \"second_compare_multimer\",\n \"distance_cutoff\", \"segments\",\n \"first_sequence_id\", \"second_sequence_id\",\n \"first_sequence_file\", \"second_sequence_file\",\n \"first_target_sequence_file\", \"second_target_sequence_file\",\n \"scale_sizes\"\n ]\n )\n\n prefix = kwargs[\"prefix\"]\n\n outcfg = {\n # initialize output EC files\n \"ec_compared_all_file\": prefix + \"_CouplingScoresCompared_all.csv\",\n \"ec_compared_longrange_file\": prefix + \"_CouplingScoresCompared_longrange.csv\",\n \"ec_compared_inter_file\": prefix + \"_CouplingScoresCompared_inter.csv\",\n\n # initialize output inter distancemap files\n \"distmap_inter\": prefix + \"_distmap_inter\",\n \"inter_contacts_file\": prefix + \"_inter_contacts_file\"\n }\n\n # Add PDB comparison files for first and second monomer\n for monomer_prefix in [\"first\", \"second\"]:\n outcfg = {\n **outcfg,\n monomer_prefix + \"_pdb_structure_hits_file\":\n \"{}_{}_structure_hits.csv\".format(prefix, monomer_prefix),\n monomer_prefix + \"_pdb_structure_hits_unfiltered_file\":\n \"{}_{}_structure_hits_unfitered.csv\".format(prefix, monomer_prefix),\n monomer_prefix + \"_distmap_monomer\":\n \"{}_{}_distance_map_monomer\".format(prefix, monomer_prefix),\n monomer_prefix + \"_distmap_multimer\":\n \"{}_{}_distance_map_multimer\".format(prefix, monomer_prefix),\n }\n\n # make sure EC file exists\n verify_resources(\n \"EC file does not exist\",\n kwargs[\"ec_file\"]\n )\n\n # make sure output directory exists\n create_prefix_folders(prefix)\n\n # store auxiliary files here (too much for average user)\n aux_prefix = insert_dir(prefix, \"aux\", rootname_subdir=False)\n create_prefix_folders(aux_prefix)\n\n # store auxiliary files here (too much for average user)\n first_aux_prefix = insert_dir(aux_prefix, \"first_monomer\", rootname_subdir=False)\n create_prefix_folders(first_aux_prefix)\n\n # store auxiliary files here (too much for average user)\n second_aux_prefix = insert_dir(aux_prefix, \"second_monomer\", rootname_subdir=False)\n create_prefix_folders(second_aux_prefix)\n\n # Step 1: Identify 3D structures for comparison\n def _identify_monomer_structures(name_prefix, outcfg, aux_prefix):\n # create a dictionary with kwargs for just the current monomer\n # remove the \"prefix\" kwargs so that we can replace with the \n # aux prefix when calling _identify_structures\n # only replace first occurrence of name_prefix\n monomer_kwargs = {\n k.replace(name_prefix + \"_\", \"\", 1): v for k, v in kwargs.items() if \"prefix\" not in k\n }\n\n # this field needs to be set explicitly else it gets overwritten by concatenated file\n monomer_kwargs[\"alignment_file\"] = kwargs[name_prefix + \"_alignment_file\"]\n monomer_kwargs[\"raw_focus_alignment_file\"] = kwargs[name_prefix + \"_raw_focus_alignment_file\"]\n\n # identify structures for that monomer\n sifts_map, sifts_map_full = _identify_structures(\n **monomer_kwargs,\n prefix=aux_prefix\n )\n\n # save selected PDB hits\n sifts_map.hits.to_csv(\n outcfg[name_prefix + \"_pdb_structure_hits_file\"], index=False\n )\n\n # also save full list of hits\n sifts_map_full.hits.to_csv(\n outcfg[name_prefix + \"_pdb_structure_hits_unfiltered_file\"], index=False\n )\n return outcfg, sifts_map\n\n outcfg, first_sifts_map = _identify_monomer_structures(\"first\", outcfg, first_aux_prefix)\n outcfg, second_sifts_map = _identify_monomer_structures(\"second\", outcfg, second_aux_prefix)\n\n # get the segment names from the kwargs\n segment_list = kwargs[\"segments\"]\n\n # Make sure user provided exactly two segments\n if len(segment_list) != 2:\n raise InvalidParameterError(\n \"Compare stage for protein complexes requires exactly two segments\"\n )\n\n first_segment_name = Segment.from_list(kwargs[\"segments\"][0]).segment_id\n second_segment_name = Segment.from_list(kwargs[\"segments\"][1]).segment_id\n\n first_chain_name = Segment.from_list(kwargs[\"segments\"][0]).default_chain_name()\n second_chain_name = Segment.from_list(kwargs[\"segments\"][1]).default_chain_name()\n\n # Step 2: Compute distance maps\n def _compute_monomer_distance_maps(sifts_map, name_prefix, chain_name):\n\n # prepare a sequence map to remap the structures we have found\n verify_resources(\n \"Target sequence file does not exist\",\n kwargs[name_prefix + \"_target_sequence_file\"]\n )\n\n # create target sequence map for remapping structure\n with open(kwargs[name_prefix + \"_target_sequence_file\"]) as f:\n header, seq = next(read_fasta(f))\n\n # create target sequence map for remapping structure\n seq_id, seq_start, seq_end = parse_header(header)\n seqmap = dict(zip(range(seq_start, seq_end + 1), seq))\n\n # compute distance maps and save\n # (but only if we found some structure)\n if len(sifts_map.hits) > 0:\n d_intra = intra_dists(\n sifts_map, structures, atom_filter=kwargs[\"atom_filter\"],\n output_prefix=aux_prefix + \"_\" + name_prefix + \"_distmap_intra\"\n )\n d_intra.to_file(outcfg[name_prefix + \"_distmap_monomer\"])\n\n # save contacts to separate file\n outcfg[name_prefix + \"_monomer_contacts_file\"] = prefix + \"_\" + name_prefix + \"_contacts_monomer.csv\"\n d_intra.contacts(\n kwargs[\"distance_cutoff\"]\n ).to_csv(\n outcfg[name_prefix + \"_monomer_contacts_file\"], index=False\n )\n\n # compute multimer distances, if requested;\n # note that d_multimer can be None if there\n # are no structures with multiple chains\n if kwargs[name_prefix + \"_compare_multimer\"]:\n d_multimer = multimer_dists(\n sifts_map, structures, atom_filter=kwargs[\"atom_filter\"],\n output_prefix=aux_prefix + \"_\" + name_prefix + \"_distmap_multimer\"\n )\n else:\n d_multimer = None\n\n # if we have a multimer contact map, save it\n if d_multimer is not None:\n d_multimer.to_file(outcfg[name_prefix + \"_distmap_multimer\"])\n outcfg[name_prefix + \"_multimer_contacts_file\"] = prefix + name_prefix + \"_contacts_multimer.csv\"\n\n # save contacts to separate file\n d_multimer.contacts(\n kwargs[\"distance_cutoff\"]\n ).to_csv(\n outcfg[name_prefix + \"_multimer_contacts_file\"], index=False\n )\n else:\n outcfg[name_prefix + \"_distmap_multimer\"] = None\n\n # create remapped structures (e.g. for\n # later comparison of folding results)\n # remap structures, swap mapping index and filename in\n # dictionary so we have a list of files in the dict keys\n outcfg[name_prefix + \"_remapped_pdb_files\"] = {\n filename: mapping_index for mapping_index, filename in\n remap_chains(\n sifts_map, aux_prefix, seqmap, chain_name=chain_name,\n raise_missing=kwargs[\"raise_missing\"]\n ).items()\n }\n\n else:\n # if no structures, cannot compute distance maps\n d_intra = None\n d_multimer = None\n outcfg[name_prefix + \"_distmap_monomer\"] = None\n outcfg[name_prefix + \"_distmap_multimer\"] = None\n outcfg[name_prefix + \"remapped_pdb_files\"] = None\n\n return d_intra, d_multimer, seqmap\n\n # load all structures for both monomers\n all_structures = set(first_sifts_map.hits.pdb_id).union(\n set(second_sifts_map.hits.pdb_id)\n )\n structures = load_structures(\n all_structures,\n kwargs[\"pdb_mmtf_dir\"],\n raise_missing=False\n )\n\n d_intra_i, d_multimer_i, seqmap_i = _compute_monomer_distance_maps(\n first_sifts_map, \"first\", first_chain_name\n )\n d_intra_j, d_multimer_j, seqmap_j = _compute_monomer_distance_maps(\n second_sifts_map, \"second\", second_chain_name\n )\n\n # compute inter distance map if sifts map for each monomer exists\n if len(first_sifts_map.hits) > 0 and len(second_sifts_map.hits) > 0:\n d_inter = inter_dists(\n first_sifts_map, second_sifts_map,\n raise_missing=kwargs[\"raise_missing\"]\n )\n # if there were overlapping PDBs, save the results\n if d_inter is not None:\n d_inter.to_file(outcfg[\"distmap_inter\"])\n\n # save contacts to separate file\n d_inter.contacts(\n kwargs[\"distance_cutoff\"]\n ).to_csv(\n outcfg[\"inter_contacts_file\"], index=False\n )\n\n else:\n outcfg[\"inter_contacts_file\"] = None\n d_inter = None\n\n # # Step 3: Compare ECs to distance maps\n ec_table = pd.read_csv(kwargs[\"ec_file\"])\n\n for out_file, min_seq_dist in [\n (\"ec_compared_longrange_file\", kwargs[\"min_sequence_distance\"]),\n (\"ec_compared_all_file\", 0),\n ]:\n\n # compare ECs only if we have an intra distance map\n # for at least one monomer - inter can't exist unless\n # we have both monomers\n if (d_intra_i is not None) or (d_intra_j is not None):\n # compare distances individually for each segment pair\n ecs_intra_i = ec_table.query(\"segment_i == segment_j == @first_segment_name\")\n if d_intra_i is not None:\n ecs_intra_i_compared = coupling_scores_compared(\n ecs_intra_i, d_intra_i, d_multimer_i,\n dist_cutoff=kwargs[\"distance_cutoff\"],\n output_file=None,\n min_sequence_dist=min_seq_dist\n )\n else:\n # If no distance map, the distance is saved as np.nan\n ecs_intra_i_compared = ecs_intra_i.assign(dist=np.nan)\n\n ecs_intra_j = ec_table.query(\"segment_i == segment_j == @second_segment_name\")\n if d_intra_j is not None:\n ecs_intra_j_compared = coupling_scores_compared(\n ecs_intra_j, d_intra_j, d_multimer_j,\n dist_cutoff=kwargs[\"distance_cutoff\"],\n output_file=None,\n min_sequence_dist=min_seq_dist\n )\n else:\n ecs_intra_j_compared = ecs_intra_j.assign(dist=np.nan)\n\n ecs_inter = ec_table.query(\"segment_i != segment_j\")\n if d_inter is not None:\n ecs_inter_compared = coupling_scores_compared(\n ecs_inter, d_inter, dist_map_multimer=None,\n dist_cutoff=kwargs[\"distance_cutoff\"],\n output_file=None,\n min_sequence_dist=None # does not apply for inter-protein ECs\n )\n else:\n ecs_inter_compared = ecs_inter.assign(dist=np.nan)\n\n # combine the tables\n ec_table_compared = pd.concat([\n ecs_inter_compared,\n ecs_intra_i_compared,\n ecs_intra_j_compared\n ])\n\n # rename the precision column to \"segmentwise_precision\"\n # because we calculated precision for each segment independently\n ec_table_compared = ec_table_compared.rename(\n columns={\"precision\": \"segmentwise_precision\"}\n )\n # TODO: change \"cn\" to \"score\" eventually\n ec_table_compared = ec_table_compared.sort_values(\"cn\", ascending=False)\n\n # add the total precision\n # TODO: implement different cutoffs for intra vs inter contacts\n ec_table_compared = add_precision(\n ec_table_compared,\n dist_cutoff=kwargs[\"distance_cutoff\"]\n )\n\n # save to file\n # all ecs\n ec_table_compared.to_csv(outcfg[out_file])\n\n # save the inter ECs to a file\n ecs_inter_compared.to_csv(outcfg[\"ec_compared_inter_file\"])\n\n # create the inter-ecs line drawing script\n if outcfg[\"ec_compared_inter_file\"] is not None and kwargs[\"plot_highest_count\"] is not None:\n inter_ecs = ec_table.query(\"segment_i != segment_j\")\n\n outcfg[\"ec_lines_compared_pml_file\"] = prefix + \"_draw_ec_lines_compared.pml\"\n\n pairs.ec_lines_pymol_script(\n inter_ecs.iloc[:kwargs[\"plot_highest_count\"], :],\n outcfg[\"ec_lines_compared_pml_file\"],\n distance_cutoff=kwargs[\"distance_cutoff\"],\n chain={\n first_segment_name: first_chain_name,\n second_segment_name: second_chain_name\n }\n )\n\n # Remap the complex crystal structures, if available\n if len(first_sifts_map.hits) > 0 and len(second_sifts_map.hits) > 0:\n outcfg[\"complex_remapped_pdb_files\"] = {\n filename: mapping_index for mapping_index, filename in\n remap_complex_chains(\n first_sifts_map, second_sifts_map,\n seqmap_i, seqmap_j, output_prefix=aux_prefix,\n raise_missing=kwargs[\"raise_missing\"]\n ).items()\n }\n\n # Step 4: Make contact map plots\n # if no structures available, defaults to EC-only plot\n outcfg[\"contact_map_files\"] = _make_complex_contact_maps(\n ec_table, d_intra_i, d_multimer_i,\n d_intra_j, d_multimer_j,\n d_inter, first_segment_name,\n second_segment_name, **kwargs\n )\n\n return outcfg\n\n\n# list of available EC comparison protocols\nPROTOCOLS = {\n # standard monomer comparison protocol\n \"standard\": standard,\n\n # comparison for protein complexes\n \"complex\": complex\n}\n\n\ndef run(**kwargs):\n \"\"\"\n Run inference protocol to calculate ECs from\n input sequence alignment.\n\n Parameters\n ----------\n Mandatory kwargs arguments:\n protocol: EC protocol to run\n prefix: Output prefix for all generated files\n\n Returns\n -------\n outcfg : dict\n Output configuration of stage\n (see individual protocol for fields)\n \"\"\"\n check_required(kwargs, [\"protocol\"])\n\n if kwargs[\"protocol\"] not in PROTOCOLS:\n raise InvalidParameterError(\n \"Invalid protocol selection: \" +\n \"{}. Valid protocols are: {}\".format(\n kwargs[\"protocol\"], \", \".join(PROTOCOLS.keys())\n )\n )\n\n return PROTOCOLS[kwargs[\"protocol\"]](**kwargs)\n" ]
[ [ "pandas.read_csv", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "pandas.concat", "matplotlib.pyplot.close" ] ]
srkasuMsft/MLOpsTemplate
[ "0c90ed954c553a3936ecb882cbf35dfd03e14e9d" ]
[ "src/workshop/core/scoring/batch_score.py" ]
[ "\nimport os\nimport tempfile\nimport logging\nfrom azureml.core.model import Model\nimport pickle\nimport pandas as pd\nfrom azureml.core import Run\nimport os\nimport mlflow\n\ndef init():\n global model\n model_dir =os.getenv('AZUREML_MODEL_DIR')\n model_file = os.listdir(model_dir)[0]\n model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), model_file)\n model = mlflow.sklearn.load_model(model_path)\n\ndef run(mini_batch):\n print(f\"run method start: {__file__}, run({mini_batch})\")\n resultList = []\n\n \n # Set up logging\n\n for batch in mini_batch:\n # prepare each image\n data = pd.read_json(batch)\n predictions = model.predict(data)\n data[\"prediction\"] =predictions\n resultList.append(data)\n result = pd.concat(resultList)\n\n return result\n" ]
[ [ "pandas.concat", "pandas.read_json" ] ]
bayesxl/PB2
[ "54b94dc2ebae488ea5e2bf5250a9d10b89011852" ]
[ "run_ppo.py" ]
[ "\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport random\nimport argparse\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom datetime import datetime\n\nimport ray\nfrom ray.tune import run, sample_from\nfrom ray.tune.schedulers import PopulationBasedTraining, AsyncHyperBandScheduler\n\nfrom pb2 import PB2\n\n# Postprocess the perturbed config to ensure it's still valid\ndef explore(config):\n # ensure we collect enough timesteps to do sgd\n if config[\"train_batch_size\"] < config[\"sgd_minibatch_size\"] * 2:\n config[\"train_batch_size\"] = config[\"sgd_minibatch_size\"] * 2\n # ensure we run at least one sgd iter\n if config[\"lambda\"] > 1:\n config[\"lambda\"] = 1\n config[\"train_batch_size\"] = int(config[\"train_batch_size\"])\n return config\n\nif __name__ == \"__main__\":\n \n parser = argparse.ArgumentParser()\n parser.add_argument(\"--max\", type=int, default=1000000)\n parser.add_argument(\"--algo\", type=str, default='PPO')\n parser.add_argument(\"--num_workers\", type=int, default=4)\n parser.add_argument(\"--num_samples\", type=int, default=4)\n parser.add_argument(\"--freq\", type=int, default=50000)\n parser.add_argument(\"--seed\", type=int, default=0)\n parser.add_argument(\"--horizon\", type=int, default=1600) # make this 1000 for other envs\n parser.add_argument(\"--perturb\", type=float, default=0.25)\n parser.add_argument(\"--env_name\", type=str, default=\"BipedalWalker-v2\")\n parser.add_argument(\"--criteria\", type=str, default=\"timesteps_total\") # \"training_iteration\"\n parser.add_argument(\"--net\", type=str, default=\"32_32\") # didn't play with this, but may be important for bigger tasks\n parser.add_argument(\"--batchsize\", type=str, default=\"1000_60000\")\n parser.add_argument(\"--num_sgd_iter\", type=int, default=10)\n parser.add_argument(\"--sgd_minibatch_size\", type=int, default=128)\n parser.add_argument(\"--use_lstm\", type=int, default=0) # for future, not used\n parser.add_argument(\"--filename\", type=str, default=\"\")\n parser.add_argument(\"--method\", type=str, default=\"pb2\") # ['pbt', 'pb2', 'asha']\n \n args = parser.parse_args()\n ray.init()\n \n args.dir = \"{}_{}_{}_Size{}_{}_{}\".format(args.algo, args.filename, args.method, str(args.num_samples), args.env_name, args.criteria)\n if not(os.path.exists('data/'+args.dir)):\n os.makedirs('data/'+args.dir)\n\n pbt = PopulationBasedTraining(\n time_attr= args.criteria,\n metric=\"episode_reward_mean\",\n mode=\"max\",\n perturbation_interval=args.freq,\n resample_probability=args.perturb,\n quantile_fraction = args.perturb, # copy bottom % with top %\n # Specifies the mutations of these hyperparams\n hyperparam_mutations={\n \"lambda\": lambda: random.uniform(0.9, 1.0),\n \"clip_param\": lambda: random.uniform(0.1, 0.5),\n \"lr\": lambda: random.uniform(1e-3, 1e-5),\n \"train_batch_size\": lambda: random.randint(int(args.batchsize.split(\"_\")[0]), int(args.batchsize.split(\"_\")[1])),\n },\n custom_explore_fn=explore)\n \n pb2 = PB2(\n time_attr= args.criteria,\n metric=\"episode_reward_mean\",\n mode=\"max\",\n perturbation_interval=args.freq,\n resample_probability=0,\n quantile_fraction = args.perturb, # copy bottom % with top %\n # Specifies the mutations of these hyperparams\n hyperparam_mutations={\n \"lambda\": lambda: random.uniform(0.9, 1.0),\n \"clip_param\": lambda: random.uniform(0.1, 0.5),\n \"lr\": lambda: random.uniform(1e-3, 1e-5),\n \"train_batch_size\": lambda: random.randint(int(args.batchsize.split(\"_\")[0]), int(args.batchsize.split(\"_\")[1])),\n },\n custom_explore_fn=explore)\n\n asha = AsyncHyperBandScheduler(\n time_attr=args.criteria,\n metric=\"episode_reward_mean\",\n mode=\"max\",\n grace_period=args.freq,\n max_t=args.max)\n \n \n methods = {'pbt': pbt,\n 'pb2': pb2,\n 'asha': asha}\n \n timelog = str(datetime.date(datetime.now())) + '_' + str(datetime.time(datetime.now()))\n \n analysis = run(\n args.algo,\n name=\"{}_{}_{}_seed{}_{}\".format(timelog, args.method, args.env_name, str(args.seed), args.filename),\n scheduler=methods[args.method],\n verbose=1,\n num_samples= args.num_samples,\n stop= {args.criteria: args.max},\n config= {\n \"env\": args.env_name,\n \"log_level\": \"INFO\",\n \"seed\": args.seed,\n \"kl_coeff\": 1.0,\n #\"monitor\": True, uncomment this for videos... it may slow it down a LOT, but hey :)\n \"num_gpus\": 0,\n \"horizon\": args.horizon,\n \"observation_filter\": \"MeanStdFilter\",\n \"model\": {'fcnet_hiddens': [int(args.net.split('_')[0]),int(args.net.split('_')[1])],\n 'free_log_std': True,\n 'use_lstm': args.use_lstm\n },\n \"num_sgd_iter\":args.num_sgd_iter,\n \"sgd_minibatch_size\":args.sgd_minibatch_size,\n \"lambda\": sample_from(\n lambda spec: random.uniform(0.9, 1.0)),\n \"clip_param\": sample_from(\n lambda spec: random.uniform(0.1, 0.5)),\n \"lr\": sample_from(\n lambda spec: random.uniform(1e-3, 1e-5)), \n \"train_batch_size\": sample_from(\n lambda spec: random.choice([1000 * i for i in range(int(int(args.batchsize.split(\"_\")[0])/1000), int(int(args.batchsize.split(\"_\")[1])/1000))]))\n }\n )\n \n all_dfs = analysis.trial_dataframes\n names = list(all_dfs.keys())\n \n results = pd.DataFrame() \n for i in range(args.num_samples):\n df = all_dfs[names[i]]\n df = df[['timesteps_total', 'time_total_s','episodes_total', 'episode_reward_mean', 'info/learner/default_policy/cur_kl_coeff']]\n df['Agent'] = i\n results = pd.concat([results, df]).reset_index(drop=True)\n \n results.to_csv(\"data/{}/seed{}.csv\".format(args.dir, str(args.seed)))\n\n\n" ]
[ [ "pandas.DataFrame", "pandas.concat" ] ]
MerkleBros/generate-ascii-art-from-photographs
[ "966f83737d32bd7cd8858e94ac0d2b1aef24e676" ]
[ "services/image-to-ascii-api/generate_ascii_post.py" ]
[ "import base64\nimport io\nimport os\nimport json\nimport numpy as np\nfrom PIL import Image, ImageDraw, ImageFont\nfrom colour import Color\n\ndef generate_ascii_post(event, context):\n\n try:\n\n print(\"## ENVIRONMENT\")\n print(os.environ)\n print(\"## EVENT\")\n print(event)\n\n response = {\n \"statusCode\": 500,\n \"isBase64Encoded\": True,\n \"headers\": {'Content-Type': 'application/json'},\n \"body\": \"\"\n }\n # body = json.loads(event[\"body\"])\n # input_file = body[\"input_file\"]\n # HORIZONTAL_SAMPLING_RATE = body[\"HORIZONTAL_SAMPLING_RATE\"]\n # GCF = body[\"GCF\"]\n # output_file = body[\"output_file\"]\n # color1 = body[\"color1\"]\n # color2 = body[\"color2\"]\n # bgcolor = body[\"bgcolor\"]\n\n\n # TODO: Remove, hard coded to see if function works\n print(\"## RETRIEVING INPUT FILE\")\n\n input_file = event[\"body\"]\n\n print(input_file)\n\n HORIZONTAL_SAMPLING_RATE = 0.1\n GCF = 1\n color1 = \"black\"\n color2 = \"black\"\n bgcolor = \"white\"\n\n # The array of ascii symbols from white to black\n chars = np.asarray(list(' .,:irs?@9B&#'))\n\n # Load the fonts and then get the the height and width of a typical symbol\n # You can use different fonts here\n font = ImageFont.load_default()\n letter_width = font.getsize(\"x\")[0]\n letter_height = font.getsize(\"x\")[1]\n\n height_width_ratio = letter_height/letter_width\n\n #open the input file\n print(\"## BASE64 DECODING THE INPUT FILE\")\n message = base64.b64decode(input_file)\n print(message)\n print(\"## IMAGE FILE TO BUFFER\")\n buffer = io.BytesIO(message)\n buffer.seek(0)\n print(\"## GET IMAGE FROM BUFFER\")\n img = Image.open(buffer)\n\n #Calculate how many ascii letters are needed on the width and height\n width_by_letter = round(img.size[0]*HORIZONTAL_SAMPLING_RATE*height_width_ratio)\n height_by_letter = round(img.size[1]*HORIZONTAL_SAMPLING_RATE)\n letter_size = (width_by_letter, height_by_letter)\n\n #Resize the image based on the symbol width and height\n print(\"## RESIZING IMAGE\")\n img = img.resize(letter_size)\n\n #Get the RGB color values of each sampled pixel and convert them to graycolor using average.\n #https://www.johndcook.com/blog/2009/08/24/algorithms-convert-color-grayscale/\n img = np.sum(np.asarray(img), axis=2)\n\n # Normalize the results, enhance and reduce the brightness contrast.\n # Map grayscale values to bins of symbols\n img -= img.min()\n img = (1.0 - img/img.max())**GCF*(chars.size-1)\n\n # Generate the ascii art symbols\n lines = (\"\\n\".join((\"\".join(r) for r in chars[img.astype(int)]))).split(\"\\n\")\n\n # Create gradient color bins\n nbins = len(lines)\n color_range = list(Color(color1).range_to(Color(color2), nbins))\n\n #Create an image object, set its width and height\n new_image_width = letter_width *width_by_letter\n new_image_height = letter_height * height_by_letter\n new_image = Image.new(\"RGBA\", (new_image_width, new_image_height), bgcolor)\n draw = ImageDraw.Draw(new_image)\n\n # Print symbols to image\n left_padding = 0\n y = 0\n line_index = 0\n for line in lines:\n color = color_range[line_index]\n line_index += 1\n\n draw.text((left_padding, y), line, color.hex, font=font)\n y += letter_height\n\n print(\"## FINISHED PRINTING ASCII IMAGE\")\n\n # Save the image file\n print(\"## RETRIEVING IMAGE FROM BUFFER\")\n buffered = io.BytesIO()\n print(\"## SAVING IMAGE as PNG\")\n new_image.save(buffered, format=\"PNG\")\n print(\"## BASE 64 ENCODING IMAGE\")\n image_string = base64.b64encode(buffered.getvalue()).decode('ascii')\n\n print(\"## base64 image_string:\")\n print(image_string)\n\n response = {\n \"statusCode\": 200,\n \"isBase64Encoded\": True,\n \"headers\": {'Content-Type': 'image/png'},\n \"body\": image_string\n }\n\n print(\"## response:\")\n print(response)\n\n return response\n\n except Exception as err:\n\n print(\"## ERROR\")\n print(\"Error: {}\".format(err))\n\n response[\"body\"] = \"Error {}\".format(err)\n return response\n" ]
[ [ "numpy.asarray" ] ]
zjzh/vega
[ "aa6e7b8c69024262fc483ee06113b4d1bd5156d8", "aa6e7b8c69024262fc483ee06113b4d1bd5156d8" ]
[ "vega/networks/pytorch/customs/modnas/arch_space/construct/torch/torch.py", "vega/networks/pytorch/customs/modnas/arch_space/torch/resnet.py" ]
[ "# -*- coding:utf-8 -*-\n\n# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Torch constructors.\"\"\"\nimport torch\nfrom modnas.registry.construct import register\nfrom modnas.arch_space.slot import Slot\nfrom modnas.arch_space import ops\nfrom modnas.core.param_space import ParamSpace\nfrom modnas.utils.logging import get_logger\nfrom modnas import backend\n\n\nlogger = get_logger('construct')\n\n\ndef parse_device(device):\n \"\"\"Return device ids from config.\"\"\"\n if isinstance(device, int):\n device = str(device)\n if not isinstance(device, str):\n return []\n device = device.lower()\n if device in ['cpu', 'nil', 'none']:\n return []\n if device == 'all':\n return list(range(torch.cuda.device_count()))\n else:\n return [int(s) for s in device.split(',')]\n\n\ndef configure_ops(new_config):\n \"\"\"Set global operator config.\"\"\"\n config = ops.config\n config.update(new_config)\n if isinstance(config.ops_order, str):\n config.ops_order = config.ops_order.split('_')\n if config.ops_order[-1] == 'bn':\n config.conv.bias = False\n if config.ops_order[0] == 'act':\n config.act.inplace = False\n logger.info('ops config: {}'.format(config.to_dict()))\n\n\n@register\nclass TorchInitConstructor():\n \"\"\"Constructor that initializes the architecture space.\"\"\"\n\n def __init__(self, seed=None, device=None, ops_conf=None):\n self.seed = seed\n self.device = device\n self.ops_conf = ops_conf\n\n def __call__(self, model):\n \"\"\"Run constructor.\"\"\"\n Slot.reset()\n ParamSpace().reset()\n seed = self.seed\n if seed:\n backend.init_device(self.device, seed)\n configure_ops(self.ops_conf or {})\n return model\n\n\n@register\nclass TorchToDevice():\n \"\"\"Constructor that moves model to some device.\"\"\"\n\n def __init__(self, device='all', data_parallel=True):\n device_ids = parse_device(device) or [None]\n self.device_ids = device_ids\n self.data_parallel = data_parallel\n\n def __call__(self, model):\n \"\"\"Run constructor.\"\"\"\n if model is None:\n return\n device_ids = self.device_ids\n backend.set_device(device_ids[0])\n if device_ids[0] is not None:\n torch.cuda.set_device(device_ids[0])\n model.to(device=device_ids[0])\n if self.data_parallel and len(device_ids) > 1:\n model = torch.nn.DataParallel(model, device_ids=device_ids)\n return model\n\n\n@register\nclass TorchCheckpointLoader():\n \"\"\"Constructor that loads model checkpoints.\"\"\"\n\n def __init__(self, path):\n logger.info('Loading torch checkpoint from {}'.format(path))\n self.chkpt = torch.load(path)\n\n def __call__(self, model):\n \"\"\"Run constructor.\"\"\"\n model.load_state_dict(self.chkpt)\n return model\n", "# -*- coding:utf-8 -*-\n\n# This file is adapted from the torchvision library at\n# https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py\n\n# 2020.6.29-Changed for Modular-NAS search space.\n# Huawei Technologies Co., Ltd. <[email protected]>\n# Copyright 2020 Huawei Technologies Co., Ltd.\n\n\"\"\"ResNet architectures.\"\"\"\n\nfrom functools import partial\nimport torch.nn as nn\nfrom modnas.registry.construct import DefaultSlotTraversalConstructor\nfrom modnas.registry.construct import register as register_constructor\nfrom modnas.registry.arch_space import register\nfrom ..ops import Identity\nfrom ..slot import Slot\n\n\ndef conv3x3(in_planes, out_planes, stride=1, groups=1):\n \"\"\"Return 3x3 convolution with padding.\"\"\"\n return Slot(_chn_in=in_planes, _chn_out=out_planes, _stride=stride, groups=groups)\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"Return 1x1 convolution.\"\"\"\n return nn.Sequential(nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(out_planes))\n\n\nclass BasicBlock(nn.Module):\n \"\"\"Basic Block class.\"\"\"\n\n expansion = 1\n chn_init = 16\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=None, norm_layer=None):\n super(BasicBlock, self).__init__()\n del base_width\n self.conv1 = conv3x3(inplanes, planes, stride, groups)\n self.bn1 = norm_layer(planes)\n self.relu = nn.ReLU(inplace=False)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = norm_layer(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n \"\"\"Compute network output.\"\"\"\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n \"\"\"Bottleneck block class.\"\"\"\n\n expansion = 4\n chn_init = 16\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=None, norm_layer=None):\n super(Bottleneck, self).__init__()\n width = int(planes * (1. * base_width / self.chn_init)) * groups\n self.conv1 = conv1x1(inplanes, width)\n self.conv2 = conv3x3(width, width, stride, groups)\n self.bn2 = norm_layer(width)\n self.conv3 = conv1x1(width, planes * self.expansion)\n self.relu = nn.ReLU(inplace=False)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n \"\"\"Compute network output.\"\"\"\n identity = x\n\n out = self.conv1(x)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n \"\"\"ResNet architecture class.\"\"\"\n\n def __init__(self,\n chn_in,\n chn,\n block,\n layers,\n n_classes,\n zero_init_residual=False,\n groups=1,\n width_per_group=None,\n use_bn=False,\n expansion=None):\n super(ResNet, self).__init__()\n if use_bn:\n norm_layer = nn.BatchNorm2d\n else:\n norm_layer = Identity\n self.use_bn = use_bn\n if expansion is not None:\n block.expansion = expansion\n block.chn_init = chn\n\n self.chn = chn\n self.groups = groups\n self.base_width = chn // groups if width_per_group is None else width_per_group\n self.conv1 = self.get_stem(chn_in, chn, nn.BatchNorm2d)\n\n self.layers = nn.Sequential(*[\n self._make_layer(block, (2**i) * chn, layers[i], stride=(1 if i == 0 else 2), norm_layer=norm_layer)\n for i in range(len(layers))\n ])\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = nn.Linear(self.chn, n_classes)\n self.zero_init_residual = zero_init_residual\n\n def _make_layer(self, block, planes, blocks, stride=1, norm_layer=None):\n downsample = None\n if stride != 1 or self.chn != planes * block.expansion:\n downsample = nn.Sequential(conv1x1(\n self.chn,\n planes * block.expansion,\n stride,\n ), )\n\n layers = []\n layers.append(block(self.chn, planes, stride, downsample, self.groups, self.base_width, norm_layer=norm_layer))\n self.chn = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.chn, planes, 1, None, self.groups, self.base_width, norm_layer=norm_layer))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n \"\"\"Compute network output.\"\"\"\n x = self.conv1(x)\n\n x = self.layers(x)\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n\n\n@register_constructor\nclass ResNetPredefinedConstructor(DefaultSlotTraversalConstructor):\n \"\"\"ResNet original network constructor.\"\"\"\n\n def __init__(self, use_bn=False):\n super().__init__()\n self.use_bn = use_bn\n\n def convert(self, slot):\n \"\"\"Convert slot to module.\"\"\"\n return nn.Sequential(\n nn.Conv2d(slot.chn_in, slot.chn_out, 3, stride=slot.stride, padding=1, bias=False, **slot.kwargs),\n nn.BatchNorm2d(slot.chn_out) if self.use_bn else Identity(),\n )\n\n\nclass ImageNetResNet(ResNet):\n \"\"\"ResNet for ImageNet dataset.\"\"\"\n\n def get_stem(self, chn_in, chn, norm_layer):\n \"\"\"Return stem layers.\"\"\"\n return nn.Sequential(\n nn.Conv2d(chn_in, chn, kernel_size=7, stride=2, padding=3, bias=False),\n norm_layer(chn),\n nn.ReLU(inplace=False),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1),\n )\n\n\nclass CIFARResNet(ResNet):\n \"\"\"ResNet for CIFAR dataset.\"\"\"\n\n def get_stem(self, chn_in, chn, norm_layer):\n \"\"\"Return stem layers.\"\"\"\n return nn.Sequential(\n nn.Conv2d(chn_in, chn, kernel_size=3, stride=1, padding=1, bias=False),\n norm_layer(chn),\n nn.ReLU(inplace=False),\n )\n\n\ndef resnet10(resnet_cls, **kwargs):\n \"\"\"Construct a ResNet-10 model.\"\"\"\n return resnet_cls(block=BasicBlock, layers=[1, 1, 1, 1], **kwargs)\n\n\ndef resnet18(resnet_cls, **kwargs):\n \"\"\"Construct a ResNet-18 model.\"\"\"\n return resnet_cls(block=BasicBlock, layers=[2, 2, 2, 2], **kwargs)\n\n\ndef resnet32(resnet_cls, **kwargs):\n \"\"\"Construct a ResNet-32 model.\"\"\"\n return resnet_cls(block=BasicBlock, layers=[5, 5, 5], **kwargs)\n\n\ndef resnet34(resnet_cls, **kwargs):\n \"\"\"Construct a ResNet-34 model.\"\"\"\n return resnet_cls(block=BasicBlock, layers=[3, 4, 6, 3], **kwargs)\n\n\ndef resnet50(resnet_cls, **kwargs):\n \"\"\"Construct a ResNet-50 model.\"\"\"\n return resnet_cls(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs)\n\n\ndef resnet56(resnet_cls, **kwargs):\n \"\"\"Construct a ResNet-56 model.\"\"\"\n return resnet_cls(block=BasicBlock, layers=[9, 9, 9], **kwargs)\n\n\ndef resnet101(resnet_cls, **kwargs):\n \"\"\"Construct a ResNet-101 model.\"\"\"\n return resnet_cls(block=Bottleneck, layers=[3, 4, 23, 3], **kwargs)\n\n\ndef resnet110(resnet_cls, **kwargs):\n \"\"\"Construct a ResNet-110 model.\"\"\"\n return resnet_cls(block=BasicBlock, layers=[18, 18, 18], **kwargs)\n\n\ndef resnet152(resnet_cls, **kwargs):\n \"\"\"Construct a ResNet-152 model.\"\"\"\n return resnet_cls(block=Bottleneck, layers=[3, 8, 36, 3], **kwargs)\n\n\ndef resnext50_32x4d(resnet_cls, **kwargs):\n \"\"\"Construct a ResNeXt-50 32x4d model.\"\"\"\n return resnet_cls(block=Bottleneck, layers=[3, 4, 6, 3], groups=32, width_per_group=4, **kwargs)\n\n\ndef resnext101_32x8d(resnet_cls, **kwargs):\n \"\"\"Construct a ResNeXt-50 32x8d model.\"\"\"\n return resnet_cls(block=Bottleneck, layers=[3, 4, 23, 3], groups=32, width_per_group=8, **kwargs)\n\n\ndef resnet(resnet_cls, bottleneck=False, **kwargs):\n \"\"\"Construct a ResNet model.\"\"\"\n block = Bottleneck if bottleneck else BasicBlock\n return resnet_cls(block=block, **kwargs)\n\n\nfor net_cls in [CIFARResNet, ImageNetResNet]:\n name = 'CIFAR-' if net_cls == CIFARResNet else 'ImageNet-'\n register(partial(resnet10, net_cls), name + 'ResNet-10')\n register(partial(resnet18, net_cls), name + 'ResNet-18')\n register(partial(resnet32, net_cls), name + 'ResNet-32')\n register(partial(resnet34, net_cls), name + 'ResNet-34')\n register(partial(resnet50, net_cls), name + 'ResNet-50')\n register(partial(resnet56, net_cls), name + 'ResNet-56')\n register(partial(resnet101, net_cls), name + 'ResNet-101')\n register(partial(resnet152, net_cls), name + 'ResNet-152')\n register(partial(resnext50_32x4d, net_cls), name + 'ResNeXt-50')\n register(partial(resnext101_32x8d, net_cls), name + 'ResNeXt-101')\n register(partial(resnet, net_cls), name + 'ResNet')\n" ]
[ [ "torch.nn.DataParallel", "torch.load", "torch.cuda.device_count", "torch.cuda.set_device" ], [ "torch.nn.BatchNorm2d", "torch.nn.MaxPool2d", "torch.nn.Linear", "torch.nn.AdaptiveAvgPool2d", "torch.nn.Conv2d", "torch.nn.Sequential", "torch.nn.ReLU" ] ]
CyberZHG/keras-global-self-attention
[ "f3bf21dbb1f3251b5417a8bb254dd91807b1aec5" ]
[ "keras_self_attention/seq_self_attention.py" ]
[ "from tensorflow import keras\nfrom tensorflow.keras import backend as K\n\n\nclass SeqSelfAttention(keras.layers.Layer):\n\n ATTENTION_TYPE_ADD = 'additive'\n ATTENTION_TYPE_MUL = 'multiplicative'\n\n def __init__(self,\n units=32,\n attention_width=None,\n attention_type=ATTENTION_TYPE_ADD,\n return_attention=False,\n history_only=False,\n kernel_initializer='glorot_normal',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n use_additive_bias=True,\n use_attention_bias=True,\n attention_activation=None,\n attention_regularizer_weight=0.0,\n **kwargs):\n \"\"\"Layer initialization.\n\n For additive attention, see: https://arxiv.org/pdf/1806.01264.pdf\n\n :param units: The dimension of the vectors that used to calculate the attention weights.\n :param attention_width: The width of local attention.\n :param attention_type: 'additive' or 'multiplicative'.\n :param return_attention: Whether to return the attention weights for visualization.\n :param history_only: Only use historical pieces of data.\n :param kernel_initializer: The initializer for weight matrices.\n :param bias_initializer: The initializer for biases.\n :param kernel_regularizer: The regularization for weight matrices.\n :param bias_regularizer: The regularization for biases.\n :param kernel_constraint: The constraint for weight matrices.\n :param bias_constraint: The constraint for biases.\n :param use_additive_bias: Whether to use bias while calculating the relevance of inputs features\n in additive mode.\n :param use_attention_bias: Whether to use bias while calculating the weights of attention.\n :param attention_activation: The activation used for calculating the weights of attention.\n :param attention_regularizer_weight: The weights of attention regularizer.\n :param kwargs: Parameters for parent class.\n \"\"\"\n super(SeqSelfAttention, self).__init__(**kwargs)\n self.supports_masking = True\n self.units = units\n self.attention_width = attention_width\n self.attention_type = attention_type\n self.return_attention = return_attention\n self.history_only = history_only\n if history_only and attention_width is None:\n self.attention_width = int(1e9)\n\n self.use_additive_bias = use_additive_bias\n self.use_attention_bias = use_attention_bias\n self.kernel_initializer = keras.initializers.get(kernel_initializer)\n self.bias_initializer = keras.initializers.get(bias_initializer)\n self.kernel_regularizer = keras.regularizers.get(kernel_regularizer)\n self.bias_regularizer = keras.regularizers.get(bias_regularizer)\n self.kernel_constraint = keras.constraints.get(kernel_constraint)\n self.bias_constraint = keras.constraints.get(bias_constraint)\n self.attention_activation = keras.activations.get(attention_activation)\n self.attention_regularizer_weight = attention_regularizer_weight\n self._backend = keras.backend.backend()\n\n if attention_type == SeqSelfAttention.ATTENTION_TYPE_ADD:\n self.Wx, self.Wt, self.bh = None, None, None\n self.Wa, self.ba = None, None\n elif attention_type == SeqSelfAttention.ATTENTION_TYPE_MUL:\n self.Wa, self.ba = None, None\n else:\n raise NotImplementedError('No implementation for attention type : ' + attention_type)\n\n def get_config(self):\n config = {\n 'units': self.units,\n 'attention_width': self.attention_width,\n 'attention_type': self.attention_type,\n 'return_attention': self.return_attention,\n 'history_only': self.history_only,\n 'use_additive_bias': self.use_additive_bias,\n 'use_attention_bias': self.use_attention_bias,\n 'kernel_initializer': keras.initializers.serialize(self.kernel_initializer),\n 'bias_initializer': keras.initializers.serialize(self.bias_initializer),\n 'kernel_regularizer': keras.regularizers.serialize(self.kernel_regularizer),\n 'bias_regularizer': keras.regularizers.serialize(self.bias_regularizer),\n 'kernel_constraint': keras.constraints.serialize(self.kernel_constraint),\n 'bias_constraint': keras.constraints.serialize(self.bias_constraint),\n 'attention_activation': keras.activations.serialize(self.attention_activation),\n 'attention_regularizer_weight': self.attention_regularizer_weight,\n }\n base_config = super(SeqSelfAttention, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n def build(self, input_shape):\n if self.attention_type == SeqSelfAttention.ATTENTION_TYPE_ADD:\n self._build_additive_attention(input_shape)\n elif self.attention_type == SeqSelfAttention.ATTENTION_TYPE_MUL:\n self._build_multiplicative_attention(input_shape)\n super(SeqSelfAttention, self).build(input_shape)\n\n def _build_additive_attention(self, input_shape):\n feature_dim = int(input_shape[2])\n\n self.Wt = self.add_weight(shape=(feature_dim, self.units),\n name='{}_Add_Wt'.format(self.name),\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n self.Wx = self.add_weight(shape=(feature_dim, self.units),\n name='{}_Add_Wx'.format(self.name),\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n if self.use_additive_bias:\n self.bh = self.add_weight(shape=(self.units,),\n name='{}_Add_bh'.format(self.name),\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n\n self.Wa = self.add_weight(shape=(self.units, 1),\n name='{}_Add_Wa'.format(self.name),\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n if self.use_attention_bias:\n self.ba = self.add_weight(shape=(1,),\n name='{}_Add_ba'.format(self.name),\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n\n def _build_multiplicative_attention(self, input_shape):\n feature_dim = int(input_shape[2])\n\n self.Wa = self.add_weight(shape=(feature_dim, feature_dim),\n name='{}_Mul_Wa'.format(self.name),\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n if self.use_attention_bias:\n self.ba = self.add_weight(shape=(1,),\n name='{}_Mul_ba'.format(self.name),\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n\n def call(self, inputs, mask=None, **kwargs):\n input_len = K.shape(inputs)[1]\n\n if self.attention_type == SeqSelfAttention.ATTENTION_TYPE_ADD:\n e = self._call_additive_emission(inputs)\n elif self.attention_type == SeqSelfAttention.ATTENTION_TYPE_MUL:\n e = self._call_multiplicative_emission(inputs)\n\n if self.attention_activation is not None:\n e = self.attention_activation(e)\n if self.attention_width is not None:\n if self.history_only:\n lower = K.arange(0, input_len) - (self.attention_width - 1)\n else:\n lower = K.arange(0, input_len) - self.attention_width // 2\n lower = K.expand_dims(lower, axis=-1)\n upper = lower + self.attention_width\n indices = K.expand_dims(K.arange(0, input_len), axis=0)\n e -= 10000.0 * (1.0 - K.cast(lower <= indices, K.floatx()) * K.cast(indices < upper, K.floatx()))\n if mask is not None:\n mask = K.expand_dims(K.cast(mask, K.floatx()), axis=-1)\n e -= 10000.0 * ((1.0 - mask) * (1.0 - K.permute_dimensions(mask, (0, 2, 1))))\n\n # a_{t} = \\text{softmax}(e_t)\n e = K.exp(e - K.max(e, axis=-1, keepdims=True))\n a = e / K.sum(e, axis=-1, keepdims=True)\n\n # l_t = \\sum_{t'} a_{t, t'} x_{t'}\n v = K.batch_dot(a, inputs)\n if self.attention_regularizer_weight > 0.0:\n self.add_loss(self._attention_regularizer(a))\n\n if self.return_attention:\n return [v, a]\n return v\n\n def _call_additive_emission(self, inputs):\n input_shape = K.shape(inputs)\n batch_size, input_len = input_shape[0], input_shape[1]\n\n # h_{t, t'} = \\tanh(x_t^T W_t + x_{t'}^T W_x + b_h)\n q = K.expand_dims(K.dot(inputs, self.Wt), 2)\n k = K.expand_dims(K.dot(inputs, self.Wx), 1)\n if self.use_additive_bias:\n h = K.tanh(q + k + self.bh)\n else:\n h = K.tanh(q + k)\n\n # e_{t, t'} = W_a h_{t, t'} + b_a\n if self.use_attention_bias:\n e = K.reshape(K.dot(h, self.Wa) + self.ba, (batch_size, input_len, input_len))\n else:\n e = K.reshape(K.dot(h, self.Wa), (batch_size, input_len, input_len))\n return e\n\n def _call_multiplicative_emission(self, inputs):\n # e_{t, t'} = x_t^T W_a x_{t'} + b_a\n e = K.batch_dot(K.dot(inputs, self.Wa), K.permute_dimensions(inputs, (0, 2, 1)))\n if self.use_attention_bias:\n e += self.ba[0]\n return e\n\n def compute_output_shape(self, input_shape):\n output_shape = input_shape\n if self.return_attention:\n attention_shape = (input_shape[0], output_shape[1], input_shape[1])\n return [output_shape, attention_shape]\n return output_shape\n\n def compute_mask(self, inputs, mask=None):\n if self.return_attention:\n return [mask, None]\n return mask\n\n def _attention_regularizer(self, attention):\n batch_size = K.cast(K.shape(attention)[0], K.floatx())\n input_len = K.shape(attention)[-1]\n indices = K.expand_dims(K.arange(0, input_len), axis=0)\n diagonal = K.expand_dims(K.arange(0, input_len), axis=-1)\n eye = K.cast(K.equal(indices, diagonal), K.floatx())\n return self.attention_regularizer_weight * K.sum(K.square(K.batch_dot(\n attention,\n K.permute_dimensions(attention, (0, 2, 1))) - eye)) / batch_size\n\n @staticmethod\n def get_custom_objects():\n return {'SeqSelfAttention': SeqSelfAttention}\n" ]
[ [ "tensorflow.keras.activations.serialize", "tensorflow.keras.backend.sum", "tensorflow.keras.constraints.get", "tensorflow.keras.backend.batch_dot", "tensorflow.keras.backend.expand_dims", "tensorflow.keras.constraints.serialize", "tensorflow.keras.backend.floatx", "tensorflow.keras.backend.permute_dimensions", "tensorflow.keras.backend.arange", "tensorflow.keras.regularizers.serialize", "tensorflow.keras.initializers.serialize", "tensorflow.keras.activations.get", "tensorflow.keras.backend.tanh", "tensorflow.keras.backend.shape", "tensorflow.keras.backend.max", "tensorflow.keras.regularizers.get", "tensorflow.keras.backend.dot", "tensorflow.keras.initializers.get", "tensorflow.keras.backend.backend", "tensorflow.keras.backend.equal" ] ]