repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
possible_versions
list
SABS-R3-Epidemiology/branching-process
[ "d7dd5f612c45b280b0b369e8e0391ee6dcd84459" ]
[ "branchpro/data_library/covid_hainan/parse_data.py" ]
[ "#\n# This file is part of BRANCHPRO\n# (https://github.com/SABS-R3-Epidemiology/branchpro.git) which is released\n# under the BSD 3-clause license. See accompanying LICENSE.md for copyright\n# notice and full license details.\n#\n\n\"\"\"Processing script for Hainan, China data from [1]_.\nIt rewrites the data file into the format expected by our app.\nReferences\n----------\n.. [1] National Risk Management Area Classification and Prevention and Control\n Measures, (updated February 01 2022). http://wst.hainan.gov.cn/yqfk/\n\"\"\"\n\nimport datetime\nimport os\nimport pandas\nfrom collections import defaultdict\n\n\ndef main():\n \"\"\"\n Rewrite a new csv file for the data in the desired format.\n We combine the daily import-related and imported cases as the imported\n case, and we add the daily under investigation to the daily locally\n acquired cases (with unknown source and epidemiologically linked)\n \"\"\"\n # Read the original data\n data = pandas.read_csv(\n os.path.join(os.path.dirname(__file__), 'cases.csv'))\n # Initialize a dictionary for the new data\n new_data = defaultdict(lambda: [0, 0])\n\n for i, row in data.iterrows():\n date = row['date']\n day, month, year = date.split('/')\n date = '{}-{:02d}-{:02d}'.format(year, int(month), int(day))\n\n # Select imported cases\n new_data[date][1] += row['imported']\n\n # Select locally acquired cases\n new_data[date][0] += row['local']\n\n all_dates = sorted(list(new_data.keys()))\n\n # Create a pandas DataFrame for the data\n data = pandas.DataFrame()\n data['Incidence Number'] = [new_data[d][0] for d in all_dates]\n data['Imported Cases'] = [new_data[d][1] for d in all_dates]\n data['date'] = all_dates\n\n start_date = all_dates[0]\n\n start = datetime.date(*map(int, start_date.split('-')))\n data['Time'] = [(datetime.date(*map(int, x.split('-'))) - start).days + 1\n for x in data['date']]\n\n # Name the columns\n data = data[['Time', 'Incidence Number', 'Imported Cases', 'date']]\n\n # Convert the file to csv\n data.to_csv(\n os.path.join(os.path.dirname(__file__), '{}.csv'.format('HN')),\n index=False)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
louislva/minihex
[ "c8d0fea1e341d1e061f06f2ec39f99857d20c00e" ]
[ "minihex/HexGame.py" ]
[ "import gym\nfrom gym import spaces\nimport numpy as np\nfrom enum import IntEnum\n\n\nclass player(IntEnum):\n BLACK = 0\n WHITE = 1\n EMPTY = 2\n\n\nclass HexGame(object):\n \"\"\"\n Hex Game Environment.\n \"\"\"\n\n def __init__(self, active_player, board,\n focus_player, connected_stones=None, debug=False):\n self.board = board\n # track number of empty feelds for speed\n self.empty_fields = np.count_nonzero(board == player.EMPTY)\n\n if debug:\n self.make_move = self.make_move_debug\n else:\n self.make_move = self.fast_move\n\n # self.special_moves = IntEnum(\"SpecialMoves\", {\n # \"RESIGN\": self.board_size ** 2,\n # \"SWAP\": self.board_size ** 2 + 1\n # })\n\n if connected_stones is None:\n self.regions = np.stack([\n np.pad(np.zeros_like(self.board), 1),\n np.pad(np.zeros_like(self.board), 1)\n ], axis=0)\n self.regions[player.WHITE][:, 0] = 1\n self.regions[player.BLACK][0, :] = 1\n self.regions[player.WHITE][:, self.board_size + 1] = 2\n self.regions[player.BLACK][self.board_size + 1, :] = 2\n else:\n self.regions = connected_stones\n\n self.region_counter = np.zeros(2)\n self.region_counter[player.BLACK] = np.max(self.regions[player.BLACK]) + 1\n self.region_counter[player.WHITE] = np.max(self.regions[player.WHITE]) + 1\n\n if connected_stones is None:\n for y, row in enumerate(board):\n for x, value in enumerate(row):\n if value == player.BLACK:\n self.active_player = player.BLACK\n self.flood_fill((y, x))\n elif value == player.WHITE:\n self.active_player = player.WHITE\n self.flood_fill((y, x))\n\n self.active_player = active_player\n self.player = focus_player\n self.done = False\n self.winner = None\n\n self.actions = np.arange(self.board_size ** 2)\n\n @property\n def board_size(self):\n return self.board.shape[1]\n\n def is_valid_move(self, action):\n coords = self.action_to_coordinate(action)\n return self.board[coords[0], coords[1]] == player.EMPTY\n\n def make_move_debug(self, action):\n if not self.is_valid_move(action):\n raise IndexError((\"Illegal move \"\n f\"{self.action_to_coordinate(action)}\"))\n\n return self.fast_move(action)\n\n def fast_move(self, action):\n # # currently resigning is not a possible option\n # if action == self.special_moves.RESIGN:\n # self.done = True\n # self.winner = (self.active_player + 1) % 2\n # return (self.active_player + 1) % 2\n\n y, x = self.action_to_coordinate(action)\n self.board[y, x] = self.active_player\n self.empty_fields -= 1\n\n self.flood_fill((y, x))\n\n winner = None\n regions = self.regions[self.active_player]\n if regions[-1, -1] == 1:\n self.done = True\n winner = player(self.active_player)\n self.winner = winner\n elif self.empty_fields <= 0:\n self.done = True\n winner = None\n\n self.active_player = (self.active_player + 1) % 2\n return winner\n\n def coordinate_to_action(self, coords):\n return np.ravel_multi_index(coords, (self.board_size, self.board_size))\n\n def action_to_coordinate(self, action):\n y = action // self.board_size\n x = action - self.board_size * y\n return (y, x)\n\n def get_possible_actions(self):\n return self.actions[self.board.flatten() == player.EMPTY]\n\n def flood_fill(self, position):\n regions = self.regions[self.active_player]\n\n y, x = (position[0] + 1, position[1] + 1)\n neighborhood = regions[(y - 1):(y + 2), (x - 1):(x + 2)].copy()\n neighborhood[0, 0] = 0\n neighborhood[2, 2] = 0\n adjacent_regions = sorted(set(neighborhood.flatten().tolist()))\n\n # region label = 0 is always present, but not a region\n adjacent_regions.pop(0)\n\n if len(adjacent_regions) == 0:\n regions[y, x] = self.region_counter[self.active_player]\n self.region_counter[self.active_player] += 1\n else:\n new_region_label = adjacent_regions.pop(0)\n regions[y, x] = new_region_label\n for label in adjacent_regions:\n regions[regions == label] = new_region_label\n\n\nclass HexEnv(gym.Env):\n \"\"\"\n Hex environment. Play against a fixed opponent.\n \"\"\"\n\n metadata = {\"render.modes\": [\"ansi\"]}\n\n def __init__(self, opponent_policy,\n player_color=player.BLACK,\n active_player=player.BLACK,\n board=None,\n regions=None,\n board_size=5,\n debug=False):\n self.opponent_policy = opponent_policy\n\n if board is None:\n board = player.EMPTY * np.ones((board_size, board_size))\n\n self.initial_board = board\n self.active_player = active_player\n self.player = player_color\n self.simulator = None\n self.winner = None\n self.previous_opponent_move = None\n self.debug = debug\n\n # cache initial connection matrix (approx +100 games/s)\n self.initial_regions = regions\n\n @property\n def opponent(self):\n return player((self.player + 1) % 2)\n\n def reset(self):\n if self.initial_regions is None:\n self.simulator = HexGame(self.active_player,\n self.initial_board.copy(),\n self.player,\n debug=self.debug)\n regions = self.simulator.regions.copy()\n self.initial_regions = regions\n else:\n regions = self.initial_regions.copy()\n self.simulator = HexGame(self.active_player,\n self.initial_board.copy(),\n self.player,\n connected_stones=regions,\n debug=self.debug)\n\n self.previous_opponent_move = None\n\n if self.player != self.active_player:\n info_opponent = {\n 'state': self.simulator.board,\n 'last_move_opponent': None,\n 'last_move_player': None\n }\n self.opponent_move(info_opponent)\n\n info = {\n 'state': self.simulator.board,\n 'last_move_opponent': self.previous_opponent_move,\n 'last_move_player': None\n }\n\n return (self.simulator.board, self.active_player), info\n\n def step(self, action):\n if not self.simulator.done:\n self.winner = self.simulator.make_move(action)\n\n opponent_action = None\n\n if not self.simulator.done:\n info_opponent = {\n 'state': self.simulator.board,\n 'last_move_opponent': action,\n 'last_move_player': self.previous_opponent_move\n }\n opponent_action = self.opponent_move(info_opponent)\n\n if self.winner == self.player:\n reward = 1\n elif self.winner == self.opponent:\n reward = -1\n else:\n reward = 0\n\n info = {\n 'state': self.simulator.board,\n 'last_move_opponent': opponent_action,\n 'last_move_player': action\n }\n\n return ((self.simulator.board, self.active_player), reward,\n self.simulator.done, info)\n\n def render(self, mode='ansi', close=False):\n board = self.simulator.board\n print(\" \" * 6, end=\"\")\n for j in range(board.shape[1]):\n print(\" \", j + 1, \" \", end=\"\")\n print(\"|\", end=\"\")\n print(\"\")\n print(\" \" * 5, end=\"\")\n print(\"-\" * (board.shape[1] * 6 - 1), end=\"\")\n print(\"\")\n for i in range(board.shape[1]):\n print(\" \" * (1 + i * 3), i + 1, \" \", end=\"\")\n print(\"|\", end=\"\")\n for j in range(board.shape[1]):\n if board[i, j] == player.EMPTY:\n print(\" O \", end=\"\")\n elif board[i, j] == player.BLACK:\n print(\" B \", end=\"\")\n else:\n print(\" W \", end=\"\")\n print(\"|\", end=\"\")\n print(\"\")\n print(\" \" * (i * 3 + 1), end=\"\")\n print(\"-\" * (board.shape[1] * 7 - 1), end=\"\")\n print(\"\")\n\n def opponent_move(self, info):\n opponent_action = self.opponent_policy(self.simulator.board,\n self.opponent,\n info)\n self.winner = self.simulator.make_move(opponent_action)\n self.previous_opponent_move = opponent_action\n return opponent_action\n" ]
[ [ "numpy.arange", "numpy.ones", "numpy.max", "numpy.zeros_like", "numpy.count_nonzero", "numpy.ravel_multi_index", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ZikangXiong/ToyRLAlgorithms
[ "6b54a3a845fdc227b1fb619f4a682859a36060fc" ]
[ "algorithms/refactor/learning/rewards.py" ]
[ "from typing import Tuple\n\nimport numpy as np\nimport torch as th\n\n\nclass GAE:\n\n def __init__(self, n_workers: int, worker_steps: int, gamma: float, lambda_: float, device: str):\n \"\"\"\n https://nn.labml.ai/rl/ppo/gae.html\n :param n_workers:\n :param worker_steps:\n :param gamma:\n :param lambda_:\n \"\"\"\n self.lambda_ = lambda_\n self.gamma = gamma\n self.worker_steps = worker_steps\n self.n_workers = n_workers\n self.device = device\n\n def __call__(self, done: th.Tensor, rewards: th.Tensor, values: th.Tensor) -> Tuple[th.Tensor, th.Tensor]:\n advantages = th.zeros(self.n_workers, self.worker_steps).float().to(self.device)\n last_advantage = 0\n last_value = values[:, -1]\n values_ests = []\n\n for t in reversed(range(self.worker_steps)):\n mask = th.logical_not(done[:, t])\n last_value = last_value * mask\n last_advantage = last_advantage * mask\n\n values_ests.append(rewards[:, t] + self.gamma * last_value)\n delta = values_ests[-1] - values[:, t]\n last_advantage = delta + self.gamma * self.lambda_ * last_advantage\n advantages[:, t] = last_advantage\n last_value = values[:, t]\n\n values_ests = th.tensor(values_ests).float().to(done.device)\n\n return advantages, values_ests\n\n\ndef single_worker_gae(values: th.Tensor,\n dones: th.Tensor,\n rewards: th.Tensor,\n gamma: float, lam: float, device: str) -> Tuple[th.Tensor, th.Tensor]:\n gae = GAE(1, len(values), gamma, lam, device)\n\n values = values.view((1,) + values.shape)\n dones = dones.view((1,) + dones.shape)\n rewards = rewards.view((1,) + rewards.shape)\n ret = gae(dones, rewards, values)\n return ret[0][0], ret[1][0]\n\n\ndef mc_reward_estimation(rewards, dones, gamma) -> np.ndarray:\n value_est = []\n discounted_reward = 0\n\n for reward, is_terminal in zip(reversed(rewards), reversed(dones)):\n if is_terminal:\n discounted_reward = 0\n discounted_reward = reward + (gamma * discounted_reward)\n value_est.insert(0, discounted_reward)\n\n return np.array(value_est)\n" ]
[ [ "torch.tensor", "numpy.array", "torch.zeros", "torch.logical_not" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nikhil-garg/Reservoir-Computing-framework-for-multivariate-time-series-classification
[ "99d59a8defb5581604a2662c368751f92879309b" ]
[ "code/modules.py" ]
[ "# General imports\nimport numpy as np\nimport time\nfrom sklearn.linear_model import Ridge\nfrom sklearn.metrics import accuracy_score, f1_score\nfrom sklearn.decomposition import PCA\nfrom sklearn.svm import SVC\nfrom sklearn.neural_network import MLPClassifier\nfrom scipy.spatial.distance import pdist, cdist, squareform\n\n# Custom imports\nfrom reservoir import Reservoir\nfrom tensorPCA import tensorPCA\n\n\ndef compute_test_scores(pred_class, Yte):\n \"\"\"\n Wrapper to compute classification accuracy and F1 score\n \"\"\"\n \n true_class = np.argmax(Yte, axis=1)\n \n accuracy = accuracy_score(true_class, pred_class)\n if Yte.shape[1] > 2:\n f1 = f1_score(true_class, pred_class, average='weighted')\n else:\n f1 = f1_score(true_class, pred_class, average='binary')\n\n return accuracy, f1\n\n \nclass RC_model(object):\n \n def __init__(self,\n # reservoir\n reservoir=None, \n n_internal_units=None,\n spectral_radius=None,\n leak=None,\n connectivity=None,\n input_scaling=None,\n noise_level=None,\n n_drop=None,\n bidir=False,\n circle=False,\n # dim red\n dimred_method=None, \n n_dim=None,\n # representation\n mts_rep=None,\n w_ridge_embedding=None,\n # readout\n readout_type=None, \n w_ridge=None, \n mlp_layout=None,\n num_epochs=None,\n w_l2=None,\n nonlinearity=None, \n svm_gamma=1.0,\n svm_C=1.0,\n ):\n \"\"\"\n Build and evaluate a RC-based classifier.\n The training and test MTS are multidimensional arrays of shape [N,T,V], with\n - N = number of samples\n - T = number of time steps in each sample\n - V = number of variables in each sample\n Training and test labels have shape [N,C], with C the number of classes\n \n The dataset consists of:\n X, Y = training data and respective labels\n Xte, Yte = test data and respective labels\n \n Reservoir parameters:\n reservoir = precomputed reservoir (oject of class 'Reservoir');\n if None, the following structural hyperparameters must be specified\n n_internal_units = processing units in the reservoir\n spectral_radius = largest eigenvalue of the reservoir matrix of connection weights\n leak = amount of leakage in the reservoir state update (optional)\n connectivity = percentage of nonzero connection weights\n input_scaling = scaling of the input connection weights\n noise_level = deviation of the Gaussian noise injected in the state update\n n_drop = number of transient states to drop\n bidir = use a bidirectional reservoir (True or false)\n \n Dimensionality reduction parameters:\n dimred_method = procedure for reducing the number of features in the sequence of reservoir states;\n possible options are: None (no dimensionality reduction), 'pca' or 'tenpca'\n n_dim = number of resulting dimensions after the dimensionality reduction procedure\n \n Representation parameters:\n mts_rep = type of MTS representation. It can be 'last' (last state), 'output' (output model space),\n or 'reservoir' (reservoir model space)\n w_ridge_embedding = regularization parameter of the ridge regression in the output model space\n and reservoir model space representation; ignored if mts_rep == None\n \n Readout parameters:\n readout_type = type of readout used for classification. It can be 'lin' (ridge regression), \n 'mlp' (multiplayer perceptron), 'svm' (support vector machine), or None.\n If None, the input representations will be saved instead: this is useful for clustering and visualization.\n w_ridge = regularization parameter of the ridge regression readout (only for readout_type=='lin') \n mlp_layout = tuple with the sizes of MLP layers, e.g. (20, 10) defines a MLP with 2 layers \n of 20 and 10 units respectively. (only for readout_type=='mlp')\n num_epochs = number of iterations during the optimization (only for readout_type=='mlp')\n w_l2 = weight of the L2 regularization (only for readout_type=='mlp')\n nonlinearity = type of activation function {'relu', 'tanh', 'logistic', 'identity'} (only for readout_type=='mlp')\n svm_gamma = bandwith of the RBF kernel (only for readout_type=='svm')\n svm_C = regularization for SVM hyperplane (only for readout_type=='svm')\n \"\"\"\n self.n_drop=n_drop\n self.bidir=bidir\n self.dimred_method=dimred_method\n self.mts_rep=mts_rep\n self.readout_type=readout_type\n self.svm_gamma=svm_gamma\n \n # Initialize reservoir\n if reservoir is None:\n self._reservoir = Reservoir(n_internal_units=n_internal_units,\n spectral_radius=spectral_radius,\n leak=leak,\n connectivity=connectivity,\n input_scaling=input_scaling,\n noise_level=noise_level,\n circle=circle)\n else:\n self._reservoir = reservoir\n \n # Initialize dimensionality reduction method\n if dimred_method is not None:\n if dimred_method.lower() == 'pca':\n self._dim_red = PCA(n_components=n_dim) \n elif dimred_method.lower() == 'tenpca':\n self._dim_red = tensorPCA(n_components=n_dim)\n else:\n raise RuntimeError('Invalid dimred method ID')\n \n # Initialize ridge regression model\n if mts_rep=='output' or mts_rep=='reservoir':\n self._ridge_embedding = Ridge(alpha=w_ridge_embedding, fit_intercept=True)\n \n # Initialize readout type \n if self.readout_type is not None:\n \n if self.readout_type == 'lin': # Ridge regression\n self.readout = Ridge(alpha=w_ridge) \n elif self.readout_type == 'svm': # SVM readout\n self.readout = SVC(C=svm_C, kernel='precomputed') \n elif readout_type == 'mlp': # MLP (deep readout) \n # pass\n self.readout = MLPClassifier(\n hidden_layer_sizes=mlp_layout, \n activation=nonlinearity, \n alpha=w_l2,\n batch_size=32, \n learning_rate='adaptive', # 'constant' or 'adaptive'\n learning_rate_init=0.001, \n max_iter=num_epochs, \n early_stopping=False, # if True, set validation_fraction > 0\n validation_fraction=0.0 # used for early stopping\n )\n else:\n raise RuntimeError('Invalid readout type') \n \n \n def train(self, X, Y=None):\n \n time_start = time.time()\n \n # ============ Compute reservoir states ============ \n res_states = self._reservoir.get_states(X, n_drop=self.n_drop, bidir=self.bidir)\n \n # ============ Dimensionality reduction of the reservoir states ============ \n if self.dimred_method.lower() == 'pca':\n # matricize\n N_samples = res_states.shape[0]\n res_states = res_states.reshape(-1, res_states.shape[2]) \n # ..transform..\n red_states = self._dim_red.fit_transform(res_states) \n # ..and put back in tensor form\n red_states = red_states.reshape(N_samples,-1,red_states.shape[1]) \n elif self.dimred_method.lower() == 'tenpca':\n red_states = self._dim_red.fit_transform(res_states) \n else: # Skip dimensionality reduction\n red_states = res_states\n\n # ============ Generate representation of the MTS ============\n coeff_tr = []\n biases_tr = [] \n \n # Output model space representation\n if self.mts_rep=='output':\n if self.bidir:\n X = np.concatenate((X,X[:, ::-1, :]),axis=2) \n \n for i in range(X.shape[0]):\n self._ridge_embedding.fit(red_states[i, 0:-1, :], X[i, self.n_drop+1:, :])\n coeff_tr.append(self._ridge_embedding.coef_.ravel())\n biases_tr.append(self._ridge_embedding.intercept_.ravel())\n input_repr = np.concatenate((np.vstack(coeff_tr), np.vstack(biases_tr)), axis=1)\n \n # Reservoir model space representation\n elif self.mts_rep=='reservoir':\n for i in range(X.shape[0]):\n self._ridge_embedding.fit(red_states[i, 0:-1, :], red_states[i, 1:, :])\n coeff_tr.append(self._ridge_embedding.coef_.ravel())\n biases_tr.append(self._ridge_embedding.intercept_.ravel())\n input_repr = np.concatenate((np.vstack(coeff_tr), np.vstack(biases_tr)), axis=1)\n \n # Last state representation \n elif self.mts_rep=='last':\n input_repr = red_states[:, -1, :]\n \n # Mean state representation \n elif self.mts_rep=='mean':\n input_repr = np.mean(red_states, axis=1)\n \n else:\n raise RuntimeError('Invalid representation ID') \n \n # ============ Apply readout ============\n if self.readout_type == None: # Just store the input representations\n self.input_repr = input_repr\n \n elif self.readout_type == 'lin': # Ridge regression\n self.readout.fit(input_repr, Y) \n \n elif self.readout_type == 'svm': # SVM readout\n Ktr = squareform(pdist(input_repr, metric='sqeuclidean')) \n Ktr = np.exp(-self.svm_gamma*Ktr)\n self.readout.fit(Ktr, np.argmax(Y,axis=1))\n self.input_repr_tr = input_repr # store them to build test kernel\n \n elif self.readout_type == 'mlp': # MLP (deep readout)\n self.readout.fit(input_repr, Y)\n \n tot_time = (time.time()-time_start)/60\n return tot_time\n\n \n def test(self, Xte, Yte):\n\n # ============ Compute reservoir states ============\n res_states_te = self._reservoir.get_states(Xte, n_drop=self.n_drop, bidir=self.bidir) \n \n # ============ Dimensionality reduction of the reservoir states ============ \n if self.dimred_method.lower() == 'pca':\n # matricize\n N_samples_te = res_states_te.shape[0]\n res_states_te = res_states_te.reshape(-1, res_states_te.shape[2]) \n # ..transform..\n red_states_te = self._dim_red.transform(res_states_te) \n # ..and put back in tensor form\n red_states_te = red_states_te.reshape(N_samples_te,-1,red_states_te.shape[1]) \n elif self.dimred_method.lower() == 'tenpca':\n red_states_te = self._dim_red.transform(res_states_te) \n else: # Skip dimensionality reduction\n red_states_te = res_states_te \n \n # ============ Generate representation of the MTS ============\n coeff_te = []\n biases_te = [] \n \n # Output model space representation\n if self.mts_rep=='output':\n if self.bidir:\n Xte = np.concatenate((Xte,Xte[:, ::-1, :]),axis=2) \n \n for i in range(Xte.shape[0]):\n self._ridge_embedding.fit(red_states_te[i, 0:-1, :], Xte[i, self.n_drop+1:, :])\n coeff_te.append(self._ridge_embedding.coef_.ravel())\n biases_te.append(self._ridge_embedding.intercept_.ravel())\n input_repr_te = np.concatenate((np.vstack(coeff_te), np.vstack(biases_te)), axis=1)\n \n # Reservoir model space representation\n elif self.mts_rep=='reservoir': \n for i in range(Xte.shape[0]):\n self._ridge_embedding.fit(red_states_te[i, 0:-1, :], red_states_te[i, 1:, :])\n coeff_te.append(self._ridge_embedding.coef_.ravel())\n biases_te.append(self._ridge_embedding.intercept_.ravel())\n input_repr_te = np.concatenate((np.vstack(coeff_te), np.vstack(biases_te)), axis=1)\n \n # Last state representation \n elif self.mts_rep=='last':\n input_repr_te = red_states_te[:, -1, :]\n \n # Mean state representation \n elif self.mts_rep=='mean':\n input_repr_te = np.mean(red_states_te, axis=1)\n \n else:\n raise RuntimeError('Invalid representation ID') \n \n # ============ Apply readout ============\n if self.readout_type == 'lin': # Ridge regression \n logits = self.readout.predict(input_repr_te)\n pred_class = np.argmax(logits, axis=1)\n \n elif self.readout_type == 'svm': # SVM readout\n Kte = cdist(input_repr_te, self.input_repr_tr, metric='sqeuclidean')\n Kte = np.exp(-self.svm_gamma*Kte)\n pred_class = self.readout.predict(Kte)\n \n elif self.readout_type == 'mlp': # MLP (deep readout)\n pred_class = self.readout.predict(input_repr_te)\n pred_class = np.argmax(pred_class, axis=1)\n \n accuracy, f1 = compute_test_scores(pred_class, Yte)\n return accuracy, f1" ]
[ [ "sklearn.neural_network.MLPClassifier", "scipy.spatial.distance.cdist", "numpy.concatenate", "sklearn.linear_model.Ridge", "numpy.argmax", "numpy.mean", "scipy.spatial.distance.pdist", "sklearn.svm.SVC", "sklearn.metrics.f1_score", "numpy.exp", "sklearn.decomposition.PCA", "numpy.vstack", "sklearn.metrics.accuracy_score" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] } ]
jklarson/volttron-applications
[ "159e7ca12e3a7935c7882a29b4d9c720e1c8b689" ]
[ "pnnl/TCM2Agent/tcm2/agent.py" ]
[ "# -*- coding: utf-8 -*- {{{\n# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:\n#\n# Copyright (c) 2015, Battelle Memorial Institute\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n# The views and conclusions contained in the software and documentation are those\n# of the authors and should not be interpreted as representing official policies,\n# either expressed or implied, of the FreeBSD Project.\n#\n\n# This material was prepared as an account of work sponsored by an\n# agency of the United States Government. Neither the United States\n# Government nor the United States Department of Energy, nor Battelle,\n# nor any of their employees, nor any jurisdiction or organization\n# that has cooperated in the development of these materials, makes\n# any warranty, express or implied, or assumes any legal liability\n# or responsibility for the accuracy, completeness, or usefulness or\n# any information, apparatus, product, software, or process disclosed,\n# or represents that its use would not infringe privately owned rights.\n#\n# Reference herein to any specific commercial product, process, or\n# service by trade name, trademark, manufacturer, or otherwise does\n# not necessarily constitute or imply its endorsement, recommendation,\n# r favoring by the United States Government or any agency thereof,\n# or Battelle Memorial Institute. The views and opinions of authors\n# expressed herein do not necessarily state or reflect those of the\n# United States Government or any agency thereof.\n#\n# PACIFIC NORTHWEST NATIONAL LABORATORY\n# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY\n# under Contract DE-AC05-76RL01830\n\n#}}}\n\nimport os\nimport sys\nimport logging\nimport datetime\nfrom dateutil import parser\n\nfrom volttron.platform.vip.agent import Agent, Core, PubSub, RPC, compat\nfrom volttron.platform.agent import utils\nfrom volttron.platform.agent.utils import (get_aware_utc_now,\n format_timestamp)\n\nimport pandas as pd\nimport statsmodels.formula.api as sm\n\nutils.setup_logging()\n_log = logging.getLogger(__name__)\n\n\nclass TCMAgent(Agent):\n def __init__(self, config_path, **kwargs):\n super(TCMAgent, self).__init__(**kwargs)\n self.config = utils.load_config(config_path)\n self.site = self.config.get('campus')\n self.building = self.config.get('building')\n self.unit = self.config.get('unit')\n\n self.static_pressure_name = self.config.get('static_pressure_name')\n self.air_flow_rate_name = self.config.get('air_flow_rate_name')\n self.aggregate_in_min = self.config.get('aggregate_in_min')\n self.aggregate_freq = str(self.aggregate_in_min) + 'Min'\n self.ts_name = self.config.get('ts_name')\n self.fan_power_name = self.config.get('fan_power_name')\n\n self.window_size_in_day = int(self.config.get('window_size_in_day'))\n self.min_required_window_size_in_percent = float(self.config.get('min_required_window_size_in_percent'))\n self.interval_in_min = int(self.config.get('interval_in_min'))\n self.no_of_recs_needed = self.window_size_in_day * 24 * (60 / self.interval_in_min)\n self.min_no_of_records_needed_after_aggr = int(self.min_required_window_size_in_percent/100 *\n self.no_of_recs_needed/self.aggregate_in_min)\n self.schedule_run_in_sec = int(self.config.get('schedule_run_in_day')) * 86400\n\n # Testing\n #self.no_of_recs_needed = 200\n #self.min_no_of_records_needed_after_aggr = self.no_of_recs_needed/self.aggregate_in_min\n\n\n @Core.receiver('onstart')\n def onstart(self, sender, **kwargs):\n self.core.periodic(self.schedule_run_in_sec, self.calculate_latest_coeffs)\n\n def calculate_latest_coeffs(self):\n unit_topic_tmpl = \"{campus}/{building}/{unit}/{point}\"\n unit_points = [self.fan_power_name, self.static_pressure_name, self.air_flow_rate_name]\n df = None\n\n for point in unit_points:\n unit_topic = unit_topic_tmpl.format(campus=self.site,\n building=self.building,\n unit=self.unit,\n point=point)\n result = self.vip.rpc.call('platform.historian',\n 'query',\n topic=unit_topic,\n count=self.no_of_recs_needed,\n order=\"LAST_TO_FIRST\").get(timeout=1000)\n df2 = pd.DataFrame(result['values'], columns=[self.ts_name, point])\n self.convert_units_to_SI(df2, point, result['metadata']['units'])\n df2[self.ts_name] = pd.to_datetime(df2[self.ts_name])\n df2 = df2.groupby([pd.TimeGrouper(key=self.ts_name, freq=self.aggregate_freq)]).mean()\n #df2[self.ts_name] = df2[self.ts_name].apply(lambda dt: dt.replace(second=0, microsecond=0))\n df = df2 if df is None else pd.merge(df, df2, how='outer', left_index=True, right_index=True)\n\n #print(df)\n coeffs = self.calculate_coeffs(df)\n # Publish coeffs to store\n if coeffs is not None:\n self.save_coeffs(coeffs)\n\n def convert_units_to_SI(self, df, point, unit):\n if unit == 'kilowatts':\n df[point] = df[point] * 1000\n if unit == 'inchesOfWater':\n df[point] = df[point] * 249.088908333\n if unit == 'cubicFeetPerMinute':\n df[point] = df[point] * 0.00047194745\n\n def calculate_coeffs(self, df):\n # check if there is enough data\n l = len(df.index)\n if l < self.min_no_of_records_needed_after_aggr:\n _log.exception('Not enough data to process')\n return None\n\n df = df.dropna(subset=[self.air_flow_rate_name, self.static_pressure_name])\n\n df[self.air_flow_rate_name+'2'] = (df[self.air_flow_rate_name] * df[self.air_flow_rate_name])\n df[self.air_flow_rate_name+'3'] = (df[self.air_flow_rate_name+'2'] * df[self.air_flow_rate_name])\n df[self.static_pressure_name+'2'] = (df[self.static_pressure_name] * df[self.static_pressure_name])\n\n # calculate model coefficients\n formula = \"{power} ~ {flow} + {flow2} + {flow3} + {pressure} + {pressure2}\".format(\n power=self.fan_power_name,\n flow=self.air_flow_rate_name,\n flow2=self.air_flow_rate_name+'2',\n flow3=self.air_flow_rate_name+'3',\n pressure=self.static_pressure_name,\n pressure2=self.static_pressure_name+'2'\n )\n coeffs = sm.ols(formula=formula, data=df).fit()\n\n return coeffs\n\n def save_coeffs(self, coeffs):\n topic_tmpl = \"analysis/TCM2/{campus}/{building}/{unit}/\"\n topic = topic_tmpl.format(campus=self.site,\n building=self.building,\n unit=self.unit)\n\n headers = {'Date': format_timestamp(get_aware_utc_now())}\n for idx in range(0,6):\n FP_topic = topic + \"FanPower_c\" + str(idx)\n self.vip.pubsub.publish(\n 'pubsub', FP_topic, headers, coeffs.params[idx])\n\n _log.debug(coeffs.params)\n\n\ndef main(argv=sys.argv):\n '''Main method called by the eggsecutable.'''\n try:\n utils.vip_main(TCMAgent)\n except Exception as e:\n _log.exception('unhandled exception')\n\n\ndef test_ols():\n '''To compare result of pandas and R's linear regression'''\n import os\n\n test_csv = '../test_data/tcm_ZONE_VAV_150_data.csv'\n df = pd.read_csv(test_csv)\n\n config_path = os.environ.get('AGENT_CONFIG')\n tcm = TCMAgent(config_path)\n coeffs = tcm.calculate_coeffs(df)\n if coeffs is not None:\n T_coeffs = coeffs[\"T_fit\"]\n Q_coeffs = coeffs[\"Q_fit\"]\n _log.debug(T_coeffs.params)\n _log.debug(Q_coeffs.params)\n\n\ndef test_api():\n '''To test Volttron APIs'''\n import os\n\n topic_tmpl = \"{campus}/{building}/{unit}/{subdevice}/{point}\"\n tcm = TCMAgent(os.environ.get('AGENT_CONFIG'))\n\n topic1 = topic_tmpl.format(campus='PNNL',\n building='SEB',\n unit='AHU1',\n subdevice='VAV123A',\n point='MaximumZoneAirFlow')\n result = tcm.vip.rpc.call('platform.historian',\n 'query',\n topic=topic1,\n count=20,\n order=\"LAST_TO_FIRST\").get(timeout=100)\n assert result is not None\n\nif __name__ == '__main__':\n # Entry point for script\n sys.exit(main())\n #test_api()\n" ]
[ [ "pandas.merge", "pandas.read_csv", "pandas.to_datetime", "pandas.TimeGrouper", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
VictorJiangXin/Linear-CRF
[ "37c6737b573ecc7cfc48075baa23035b4c7a96d0" ]
[ "src/crf.py" ]
[ "# MIT License\n# \n# Copyright (c) 2019 Jiang Xin, [email protected]\n# \n\n\nimport os\nimport sys\nimport time\n\nimport pickle\nimport codecs\nimport numpy as np\nfrom scipy import optimize\n\n\nclass LinearCRF(object):\n \"\"\"Simple implementation of linear-chain CRF for Chinese word segmentation task.\n \n This class is a simple implementation of linear-chain conditional random field\n for Chinese word segementation task. So many function are designed for this\n particular task.\n There are two types of feature templates:\n Unigram template: first character, 'U'\n ('U', pos, word, tag)\n Bigram template: first character, 'B'\n ('B', tag_pre, tag_now)\n You can get more information from my blog (PS. the blog is in Chinese)\n https://victorjiangxin.github.io/Chinese-Word-Segmentation/\n \"\"\"\n def __init__(self):\n super(LinearCRF, self).__init__()\n self.ntags = 4 # {'B', 'I', 'E', 'S'}\n self.index_tag = {} # {0:'B', 1:'I', 2:'E', 3:'S'}\n self.tag_index = {} # {'B':0, 'I':1, 'E':2, 'S':3}\n\n self.start_tag = 'S'\n self.end_tag = 'S'\n\n self.U_feature_pos = [-2, -1, 0, 1, 2]\n\n self.index_feature = {} # {0 : ('U', -2, word, tag)}\n self.feature_index = {} # {('U', -2, word, tag)}\n\n self.nweights = 0\n self.weights = np.zeros(self.nweights)\n self.theta = 1e-4 # theta should in the range of (1e-6 ~ 1e-3)\n\n\n def get_word(self, x, i):\n \"\"\"Return x[i]\n \"\"\"\n if i == -1:\n return '_B-1'\n elif i == -2:\n return '_B-2'\n elif i == len(x):\n return '_B+1'\n elif i == len(x) + 1:\n return '_B+2'\n else:\n return x[i]\n\n\n def feature_at(self, k, x, y_pre, y_now, i):\n \"\"\"Get f_k(yt_1, yt, x, t).\n\n Args:\n k: (int) the Kth feature\n x: str word list [word_index['<START'>], word_index['今'],]\n yi_1: tag of y_[i-1]\n yi: tag of yi\n i: (int) index\n\n Return:\n 1 or 0\n \"\"\"\n if k < self.nweights:\n feature = self.index_feature[k]\n if feature[0] == 'U':\n _, pos, word, tag = feature\n if y_now == tag and self.get_word(x, i + pos) == word:\n return 1\n elif feature[0] == 'B':\n _, tag_pre, tag_now = feature\n if tag_pre == y_pre and tag_now == y_now:\n return 1\n\n return 0\n\n\n def log_M_at(self, x, y_pre, y_now, i):\n \"\"\"Calc log M(yi_1, yi|x) = W.F_i(yi_1, yi|x)\n \"\"\"\n nwords = len(x)\n activate_feature = []\n if i == 0 and y_pre != self.tag_index[self.start_tag]:\n return float('-inf')\n elif i == nwords and y_now != self.tag_index[self.end_tag]:\n return float('-inf')\n elif i == nwords and y_now == self.tag_index[self.end_tag]:\n return 0\n\n # U feature\n for pos in self.U_feature_pos:\n feature = ('U', pos, self.get_word(x, i + pos), y_now)\n if feature in self.feature_index:\n activate_feature.append(self.feature_index[feature])\n\n # B feature\n feature = ('B', y_pre, y_now)\n if feature in self.feature_index:\n activate_feature.append(self.feature_index[feature])\n\n return self.weights[activate_feature].sum()\n\n\n def log_M(self, x):\n \"\"\"Get log probablity matrix M(x)\n\n Return:\n M(x): tensor(nwords_x+1, ntags, ntags)\n \"\"\"\n nwords = len(x)\n M = np.ones((nwords + 1, self.ntags, self.ntags)) * float('-inf')\n for i in range(nwords + 1):\n for tag_pre in range(self.ntags):\n for tag_now in range(self.ntags):\n M[i, tag_pre, tag_now] = self.log_M_at(x, tag_pre, tag_now, i)\n return M\n\n\n def log_sum_exp(self, a, b):\n \"\"\"\n a = [a1, a2, a3]\n b = [b1, b2, b3]\n return log(e^a1*e^b1+e^a2*e^b2+e^a3*e^b3)\n \"\"\"\n bound = float('-inf')\n if bound in a or bound in b: \n return np.log(np.sum(np.exp(a) * np.exp(b)))\n c = a + b\n max_value = np.max(c)\n return max_value + np.log(np.sum(np.exp(c - max_value)))\n\n\n def log_alpha(self, x, M=None):\n \"\"\"Get forward probablity log a(i, x).\n \n a(i, x, Yt) = sum_{y}a(i-1, x, y)*M(i-1, x, y, Yt)\n so log(a(i, tag)) = log sum exp(log a(i-1) + log M(, :, tag))\n\n Args:\n x: sequence\n M: log potential matrix M(x)\n\n Return:\n alpha: tensor(nwords_x+1, ntags)\n \"\"\"\n nwords = len(x)\n alpha = np.ones((nwords + 1, self.ntags)) * float('-inf')\n\n if M is None:\n M = self.log_M(x)\n\n alpha[0] = M[0, self.tag_index[self.start_tag], :]\n for i in range(1, nwords + 1):\n for tag in range(self.ntags):\n alpha[i, tag] = self.log_sum_exp(alpha[i - 1], \n M[i, :, tag])\n return alpha\n\n\n def log_beta(self, x, M=None):\n \"\"\"Get backward probablity log b(i, x)\n\n b(i, x, Yt) = sum_{y}M(i, x, Yt, y)b(i+1, x, y)\n Warnning: because beta[len(x)] = [0, 0, 0, 1] is certain\n so we use beta[len(x)] to store beta[-1]\n \n Args:\n x: sequence\n M: log potential matrix M(x)\n\n Return:\n beta: tensor(nwords_x+1, ntags)\n \"\"\"\n nwords = len(x)\n beta = np.ones((nwords + 1, self.ntags)) * float('-inf')\n\n if M is None:\n M = self.log_M(x)\n\n beta[nwords - 1] = 0 # because the last one must be 'S'\n for i in range(nwords - 2, -2, -1):\n for tag in range(self.ntags):\n beta[i, tag] = self.log_sum_exp(beta[i + 1], \n M[i + 1, tag, :])\n return beta\n\n\n def log_z(self, x, M=None, alpha=None):\n \"\"\"Get log Z(x)\n \"\"\"\n nwords = len(x)\n\n if M is None:\n M = self.log_M(x)\n\n if alpha is None:\n alpha = self.log_alpha(x, M)\n\n return alpha[nwords, self.tag_index[self.end_tag]]\n\n\n def log_potential(self, x, y, M=None, alpha=None):\n \"\"\"Calculate log p(y|x).\n\n log p(y|x) = log exp(sum(W.Feature)) - log Z(x)\n \"\"\"\n nwords = len(x)\n\n if M is None:\n M = self.log_M(x)\n if alpha is None:\n alpha = self.log_alpha(x, M)\n\n log_p = 0\n for i in range(nwords):\n if i == 0:\n log_p += self.log_M_at(x, self.tag_index[self.start_tag], \n y[i], i)\n else:\n log_p += self.log_M_at(x, y[i - 1], y[i], i)\n\n z = self.log_z(x, M, alpha)\n log_p -= z\n\n return log_p\n\n\n def inference_viterbi(self, x, M=None):\n \"\"\"Inference tags of x\n\n Return:\n y_char: ['B', 'S', ..., ] in char not in int\n \"\"\"\n nwords = len(x)\n if nwords < 1:\n return 'S'\n delta = np.zeros((nwords, self.ntags))\n trace = np.zeros((nwords, self.ntags), dtype='int')\n\n if M is None:\n M = self.log_M(x)\n\n delta[0] = M[0, self.tag_index[self.start_tag], :]\n for i in range(1, nwords):\n for tag in range(self.ntags):\n delta[i, tag] = np.max(delta[i - 1] + M[i, :, tag])\n trace[i, tag] = np.argmax(delta[i-1] + M[i, :, tag])\n\n y_char = nwords * [self.start_tag]\n best = np.argmax(delta[nwords - 1])\n y_char[nwords - 1] = self.index_tag[best]\n\n for i in range(nwords - 2, -1, -1):\n best = trace[i + 1, best]\n y_char[i] = self.index_tag[best]\n\n return y_char\n\n\n def model_gradient_x(self, x, M=None, alpha=None, beta=None):\n \"\"\"Get sum_y p(y|x)C_k(y, x).\n \n log P(yi_1, yi|x) = log alpha(i-1, yi_1) + log M(i, yi_1, yi, x) + log beta(i, yi) - log z(x)\n One item in gradient, get more information from\n https://victorjiangxin.github.io/Chinese-Word-Segmentation/\n \"\"\"\n nwords = len(x)\n\n if M is None:\n M = self.log_M(x)\n if alpha is None:\n alpha = self.log_alpha(x, M)\n if beta is None:\n beta = self.log_beta(x, M)\n\n z = self.log_z(x, M, alpha)\n P = np.zeros((nwords, self.ntags, self.ntags))\n gradient = np.zeros(self.nweights)\n\n for i in range(nwords):\n for y_pre in range(self.ntags):\n for y_now in range(self.ntags):\n if i == 0 and y_pre != self.tag_index[self.start_tag]:\n pass\n elif i == 0 and y_pre == self.tag_index[self.start_tag]:\n P[i, y_pre, y_now] = M[i, y_pre, y_now] + beta[i, y_now] - z\n else:\n P[i, y_pre, y_now] = alpha[i - 1, y_pre] +\\\n M[i, y_pre, y_now] + beta[i, y_now] - z\n\n P = np.exp(P)\n for i in range(nwords):\n for y_pre in range(self.ntags):\n for y_now in range(self.ntags):\n activate_feature = []\n # U feature\n for pos in self.U_feature_pos:\n feature = ('U', pos, self.get_word(x, i + pos), y_now)\n if feature in self.feature_index:\n activate_feature.append(self.feature_index[feature])\n\n # B feature\n if i == 0 and y_pre != self.tag_index[self.start_tag]:\n pass\n else:\n feature = ('B', y_pre, y_now)\n if feature in self.feature_index:\n activate_feature.append(self.feature_index[feature])\n gradient[activate_feature] += P[i, y_pre, y_now]\n\n return gradient\n\n\n def neg_likelihood_and_gradient(self, weights, prior_feature_count, train_data):\n \"\"\"Return -L(x), f'(L)\n \"\"\"\n self.weights = weights\n likelihood = 0\n gradient = np.zeros(self.nweights)\n for x, y in train_data:\n M = self.log_M(x)\n alpha = self.log_alpha(x, M)\n beta = self.log_beta(x, M)\n likelihood += self.log_potential(x, y, M, alpha)\n gradient += self.model_gradient_x(x, M, alpha, beta)\n # add regulariser\n likelihood = likelihood - np.dot(self.weights, self.weights) * self.theta / 2\n gradient = prior_feature_count - gradient - self.weights * self.theta\n\n return -likelihood, -gradient\n\n\n def train(self, file_name):\n \"\"\"Train this model\n\n Args:\n file_name: corpus file\n \"\"\"\n print('Start training!')\n self.ntags = 4\n self.index_tag = {0:'B', 1:'I', 2:'E', 3:'S'}\n self.tag_index = {'B':0, 'I':1, 'E':2, 'S':3}\n\n sentences = []\n labels = []\n\n f = codecs.open(file_name, 'r', encoding='utf-8')\n lines = f.readlines()\n f.close()\n\n words = []\n sentence = []\n label = []\n for line in lines:\n if len(line) < 2:\n # sentence end\n if len(sentence) > 3:\n sentences.append(sentence)\n labels.append(label)\n sentence = []\n label = []\n else:\n char, tag = line.split()\n sentence.append(char)\n label.append(tag)\n if char not in words:\n words.append(char)\n\n print(\"Total sentences is {}\".format(len(sentences)))\n print(\"Total words in corpus is {}\".format(len(words)))\n print(\"sentence[0]:{} labels[0]:{}\".format(''.join(sentences[0]), ''.join(labels[0])))\n\n labels = [[self.tag_index[tag] for tag in label] for label in labels]\n train_data = []\n for x, y in zip(sentences, labels):\n train_data.append((x, y))\n\n del sentences\n del labels\n\n # construct features\n # B features\n feature_id = 0\n for tag_pre in range(self.ntags):\n for tag_now in range(self.ntags):\n feature = ('B', tag_pre, tag_now)\n self.feature_index[feature] = feature_id\n self.index_feature[feature_id] = feature\n feature_id += 1\n\n # U features\n for x, _ in train_data:\n nwords = len(x)\n for i in range(nwords):\n for pos in self.U_feature_pos:\n for tag in range(self.ntags):\n feature = ('U', pos, self.get_word(x, i + pos), tag)\n if feature not in self.feature_index:\n self.feature_index[feature] = feature_id\n self.index_feature[feature_id] = feature\n feature_id += 1\n\n self.nweights = len(self.feature_index)\n self.weights = np.random.randn(self.nweights)\n print('Total features is {}'.format(self.nweights))\n print('Feature[0]={}, Feature[16]={}'.format(self.index_feature[0], self.index_feature[16]))\n print('Statistic Count of feature k ....')\n prior_feature_count = np.zeros(self.nweights)\n for x, y in train_data:\n nwords = len(x)\n for i in range(nwords):\n activate_feature = []\n # U feature\n for pos in self.U_feature_pos:\n feature = ('U', pos, self.get_word(x, i + pos), y[i])\n activate_feature.append(self.feature_index[feature])\n # B feature\n if i == 0:\n feature = ('B', self.tag_index[self.start_tag], y[i])\n else:\n feature = ('B', y[i - 1], y[i])\n activate_feature.append(self.feature_index[feature])\n prior_feature_count[activate_feature] += 1\n print(\"prior_feature_count[2]: {} {}\".format(self.index_feature[2], prior_feature_count[2]))\n\n print(\"Start training!\")\n func = lambda weights : self.neg_likelihood_and_gradient(weights, prior_feature_count, train_data)\n start_time = time.time()\n res = optimize.fmin_l_bfgs_b(func, self.weights, iprint=0, disp=1, maxiter=300, maxls=100)\n\n print(\"Training time:{}s\".format(time.time() - start_time))\n\n self.save()\n\n\n def save(self, file_path='linear_crf.model'):\n save_dict = {}\n save_dict['ntags'] = self.ntags\n save_dict['index_tag'] = self.index_tag\n save_dict['tag_index'] = self.tag_index\n save_dict['feature_index'] = self.feature_index\n save_dict['index_feature'] = self.index_feature\n save_dict['nweights'] = self.nweights\n save_dict['weights'] = self.weights\n with open(file_path, 'wb') as f:\n pickle.dump(save_dict, f)\n print(\"Save model successful!\")\n\n\n def load(self, file_path):\n with open(file_path, 'rb') as f:\n save_dict = pickle.load(f)\n\n self.ntags = save_dict['ntags']\n self.index_tag = save_dict['index_tag']\n self.tag_index = save_dict['tag_index']\n self.feature_index = save_dict['feature_index']\n self.index_feature = save_dict['index_feature'] \n self.nweights = save_dict['nweights']\n self.weights = save_dict['weights']\n\n print(\"Load model successful!\")\n\n\n def load_crfpp_model(self, model_path):\n \"\"\"Load model which is trained by crf++\n \"\"\"\n with open(model_path, 'r') as f:\n lines = f.readlines()\n\n tags_id = 0\n\n i = 0\n # print plus information\n while i < len(lines) and lines[i] != '\\n':\n line = lines[i].strip()\n print(line)\n i += 1\n\n i += 1\n # get tags \n while i < len(lines) and lines[i] != '\\n':\n line = lines[i].strip()\n self.tag_index[line] = tags_id\n self.index_tag[tags_id] = line\n tags_id += 1\n i += 1\n\n self.ntags = len(self.tag_index)\n print(self.tag_index)\n\n i += 1\n # map\n feature_map = {} # {'U00', -2}\n self.U_feature_pos = []\n while i < len(lines) and lines[i] != '\\n':\n line = lines[i].strip()\n if line != 'B':\n feature_template = line.split(':')[0]\n pos = line.split('[')[1].split(',')[0]\n feature_map[feature_template] = int(pos)\n self.U_feature_pos.append(int(pos))\n i += 1\n print('self.U_feature_pos', self.U_feature_pos)\n print('feature_map:', feature_map)\n\n\n i += 1\n # construct feature\n feature_id = 0\n feature_id_weight_index = {} # in model.txt weight are not in \n while i < len(lines) and lines[i] != '\\n':\n weight_index = int(lines[i].strip().split()[0])\n line = lines[i].strip().split()[1]\n if line == 'B':\n for tag_pre in range(self.ntags):\n for tag_now in range(self.ntags):\n feature = ('B', tag_pre, tag_now)\n self.feature_index[feature] = feature_id\n self.index_feature[feature_id] = feature\n feature_id_weight_index[feature_id] = weight_index\n weight_index += 1\n feature_id += 1\n else:\n feature_template = line.split(':')[0]\n word = line.split(':')[1]\n pos = feature_map[feature_template]\n for tag in range(self.ntags):\n feature = ('U', pos, word, tag)\n self.feature_index[feature] = feature_id\n self.index_feature[feature_id] = feature\n feature_id_weight_index[feature_id] = weight_index\n weight_index += 1\n feature_id += 1\n i += 1\n\n print('Total features:', len(self.feature_index))\n i += 1\n # read weights\n self.nweights = len(self.feature_index)\n self.weights = np.zeros(self.nweights)\n weights_in_file = []\n while i < len(lines) and lines[i] != '\\n':\n line = lines[i].strip()\n weights_in_file.append(float(line))\n i += 1\n\n for feature_id in feature_id_weight_index:\n self.weights[feature_id] = weights_in_file[feature_id_weight_index[feature_id]]\n print('Record weights = ', feature_id)\n print(\"The last feature is {}, it's weight is {}\".format(\n self.index_feature[feature_id-1], self.weights[feature_id-1]))\n self.save()\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "numpy.dot", "scipy.optimize.fmin_l_bfgs_b", "numpy.ones", "numpy.max", "numpy.argmax", "numpy.random.randn", "numpy.exp", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
abhishek2024/Chatbot-Mem2Seq-Knowledge-Base
[ "7ad9fb8c2e70b39ebfcbea659d755d0ee9c2bbf5" ]
[ "models/Mem2Seq.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.optim import lr_scheduler\nfrom torch import optim\nimport torch.nn.functional as F\nfrom utils.masked_cross_entropy import *\nfrom utils.config import *\nimport random\nimport numpy as np\nimport datetime\nfrom utils.measures import wer, moses_multi_bleu\nimport matplotlib\n# matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport nltk\nimport os\nfrom sklearn.metrics import f1_score\nimport json\nfrom utils.until_temp import entityList\n\nclass Mem2Seq(nn.Module):\n def __init__(self, hidden_size, max_len, max_r, lang, path, task, lr, n_layers, dropout, unk_mask):\n super(Mem2Seq, self).__init__()\n self.name = \"Mem2Seq\"\n self.task = task\n self.input_size = lang.n_words\n self.output_size = lang.n_words\n self.hidden_size = hidden_size\n self.max_len = max_len ## max input\n self.max_r = max_r ## max responce len \n self.lang = lang\n self.lr = lr\n self.n_layers = n_layers\n self.dropout = dropout\n self.unk_mask = unk_mask\n \n if path:\n if USE_CUDA:\n logging.info(\"MODEL {} LOADED\".format(str(path)))\n self.encoder = torch.load(str(path)+'/enc.th')\n self.decoder = torch.load(str(path)+'/dec.th')\n else:\n logging.info(\"MODEL {} LOADED\".format(str(path)))\n self.encoder = torch.load(str(path)+'/enc.th',lambda storage, loc: storage)\n self.decoder = torch.load(str(path)+'/dec.th',lambda storage, loc: storage)\n else:\n self.encoder = EncoderMemNN(lang.n_words, hidden_size, n_layers, self.dropout, self.unk_mask)\n self.decoder = DecoderMemNN(lang.n_words, hidden_size, n_layers, self.dropout, self.unk_mask)\n # Initialize optimizers and criterion\n self.encoder_optimizer = optim.Adam(self.encoder.parameters(), lr=lr)\n self.decoder_optimizer = optim.Adam(self.decoder.parameters(), lr=lr)\n self.scheduler = lr_scheduler.ReduceLROnPlateau(self.decoder_optimizer,mode='max',factor=0.5,patience=1,min_lr=0.0001, verbose=True)\n self.criterion = nn.MSELoss()\n self.loss = 0\n self.loss_ptr = 0\n self.loss_vac = 0\n self.print_every = 1\n self.batch_size = 0\n # Move models to GPU\n if USE_CUDA:\n self.encoder.cuda()\n self.decoder.cuda()\n\n def print_loss(self): \n print_loss_avg = self.loss / self.print_every\n print_loss_ptr = self.loss_ptr / self.print_every\n print_loss_vac = self.loss_vac / self.print_every\n self.print_every += 1 \n return 'L:{:.2f}, VL:{:.2f}, PL:{:.2f}'.format(print_loss_avg,print_loss_vac,print_loss_ptr)\n \n def save_model(self, dec_type):\n name_data = \"KVR/\" if self.task=='' else \"BABI/\"\n directory = 'save/mem2seq-'+name_data+str(self.task)+'HDD'+str(self.hidden_size)+'BSZ'+str(args['batch'])+'DR'+str(self.dropout)+'L'+str(self.n_layers)+'lr'+str(self.lr)+str(dec_type) \n if not os.path.exists(directory):\n os.makedirs(directory)\n torch.save(self.encoder, directory+'/enc.th')\n torch.save(self.decoder, directory+'/dec.th')\n \n def train_batch(self, input_batches, input_lengths, target_batches, \n target_lengths, target_index, target_gate, batch_size, clip,\n teacher_forcing_ratio, reset): \n\n if reset:\n self.loss = 0\n self.loss_ptr = 0\n self.loss_vac = 0\n self.print_every = 1\n\n self.batch_size = batch_size\n # Zero gradients of both optimizers\n self.encoder_optimizer.zero_grad()\n self.decoder_optimizer.zero_grad()\n loss_Vocab,loss_Ptr= 0,0\n\n # Run words through encoder\n decoder_hidden = self.encoder(input_batches).unsqueeze(0)\n self.decoder.load_memory(input_batches.transpose(0,1))\n\n # Prepare input and output variables\n decoder_input = Variable(torch.LongTensor([SOS_token] * batch_size))\n \n max_target_length = max(target_lengths)\n all_decoder_outputs_vocab = Variable(torch.zeros(max_target_length, batch_size, self.output_size))\n all_decoder_outputs_ptr = Variable(torch.zeros(max_target_length, batch_size, input_batches.size(0)))\n\n # Move new Variables to CUDA\n if USE_CUDA:\n all_decoder_outputs_vocab = all_decoder_outputs_vocab.cuda()\n all_decoder_outputs_ptr = all_decoder_outputs_ptr.cuda()\n decoder_input = decoder_input.cuda()\n\n # Choose whether to use teacher forcing\n use_teacher_forcing = random.random() < teacher_forcing_ratio\n \n if use_teacher_forcing: \n # Run through decoder one time step at a time\n for t in range(max_target_length):\n decoder_ptr, decoder_vacab, decoder_hidden = self.decoder.ptrMemDecoder(decoder_input, decoder_hidden)\n all_decoder_outputs_vocab[t] = decoder_vacab\n all_decoder_outputs_ptr[t] = decoder_ptr\n decoder_input = target_batches[t]# Chosen word is next input\n if USE_CUDA: decoder_input = decoder_input.cuda() \n else:\n for t in range(max_target_length):\n decoder_ptr, decoder_vacab, decoder_hidden = self.decoder.ptrMemDecoder(decoder_input, decoder_hidden)\n _, toppi = decoder_ptr.data.topk(1)\n _, topvi = decoder_vacab.data.topk(1)\n all_decoder_outputs_vocab[t] = decoder_vacab\n all_decoder_outputs_ptr[t] = decoder_ptr\n ## get the correspective word in input\n top_ptr_i = torch.gather(input_batches[:,:,0], 0, Variable(toppi.view(1, -1))).transpose(0,1)\n next_in = [top_ptr_i[i].item() if (toppi[i].item() < input_lengths[i]-1) else topvi[i].item() for i in range(batch_size)]\n\n decoder_input = Variable(torch.LongTensor(next_in)) # Chosen word is next input\n if USE_CUDA: decoder_input = decoder_input.cuda()\n \n #Loss calculation and backpropagation\n loss_Vocab = masked_cross_entropy(\n all_decoder_outputs_vocab.transpose(0, 1).contiguous(), # -> batch x seq\n target_batches.transpose(0, 1).contiguous(), # -> batch x seq\n target_lengths\n )\n loss_Ptr = masked_cross_entropy(\n all_decoder_outputs_ptr.transpose(0, 1).contiguous(), # -> batch x seq\n target_index.transpose(0, 1).contiguous(), # -> batch x seq\n target_lengths\n )\n\n loss = loss_Vocab + loss_Ptr\n loss.backward()\n \n # Clip gradient norms\n ec = torch.nn.utils.clip_grad_norm(self.encoder.parameters(), clip)\n dc = torch.nn.utils.clip_grad_norm(self.decoder.parameters(), clip)\n # Update parameters with optimizers\n self.encoder_optimizer.step()\n self.decoder_optimizer.step()\n self.loss += loss.item()\n self.loss_ptr += loss_Ptr.item()\n self.loss_vac += loss_Vocab.item()\n \n def evaluate_batch(self,batch_size,input_batches, input_lengths, target_batches, target_lengths, target_index,target_gate,src_plain): \n # Set to not-training mode to disable dropout\n self.encoder.train(False)\n self.decoder.train(False) \n # Run words through encoder\n decoder_hidden = self.encoder(input_batches).unsqueeze(0)\n self.decoder.load_memory(input_batches.transpose(0,1))\n\n # Prepare input and output variables\n decoder_input = Variable(torch.LongTensor([SOS_token] * batch_size))\n\n decoded_words = []\n all_decoder_outputs_vocab = Variable(torch.zeros(self.max_r, batch_size, self.output_size))\n all_decoder_outputs_ptr = Variable(torch.zeros(self.max_r, batch_size, input_batches.size(0)))\n #all_decoder_outputs_gate = Variable(torch.zeros(self.max_r, batch_size))\n # Move new Variables to CUDA\n\n if USE_CUDA:\n all_decoder_outputs_vocab = all_decoder_outputs_vocab.cuda()\n all_decoder_outputs_ptr = all_decoder_outputs_ptr.cuda()\n #all_decoder_outputs_gate = all_decoder_outputs_gate.cuda()\n decoder_input = decoder_input.cuda()\n \n p = []\n for elm in src_plain:\n elm_temp = [ word_triple[0] for word_triple in elm ]\n p.append(elm_temp) \n \n self.from_whichs = []\n acc_gate,acc_ptr,acc_vac = 0.0, 0.0, 0.0\n # Run through decoder one time step at a time\n for t in range(self.max_r):\n decoder_ptr,decoder_vacab, decoder_hidden = self.decoder.ptrMemDecoder(decoder_input, decoder_hidden)\n all_decoder_outputs_vocab[t] = decoder_vacab\n topv, topvi = decoder_vacab.data.topk(1)\n all_decoder_outputs_ptr[t] = decoder_ptr\n topp, toppi = decoder_ptr.data.topk(1)\n top_ptr_i = torch.gather(input_batches[:,:,0], 0, Variable(toppi.view(1, -1))).transpose(0,1)\n next_in = [top_ptr_i[i].item() if (toppi[i].item() < input_lengths[i]-1) else topvi[i].item() for i in range(batch_size)]\n\n decoder_input = Variable(torch.LongTensor(next_in)) # Chosen word is next input\n if USE_CUDA: decoder_input = decoder_input.cuda()\n\n temp = []\n from_which = []\n for i in range(batch_size):\n if(toppi[i].item() < len(p[i])-1 ):\n temp.append(p[i][toppi[i].item()])\n from_which.append('p')\n else:\n ind = topvi[i].item()\n if ind == EOS_token:\n temp.append('<EOS>')\n else:\n temp.append(self.lang.index2word[ind])\n from_which.append('v')\n decoded_words.append(temp)\n self.from_whichs.append(from_which)\n self.from_whichs = np.array(self.from_whichs)\n\n # indices = torch.LongTensor(range(target_gate.size(0)))\n # if USE_CUDA: indices = indices.cuda()\n\n # ## acc pointer\n # y_ptr_hat = all_decoder_outputs_ptr.topk(1)[1].squeeze()\n # y_ptr_hat = torch.index_select(y_ptr_hat, 0, indices)\n # y_ptr = target_index \n # acc_ptr = y_ptr.eq(y_ptr_hat).sum()\n # acc_ptr = acc_ptr.data[0]/(y_ptr_hat.size(0)*y_ptr_hat.size(1))\n # ## acc vocab\n # y_vac_hat = all_decoder_outputs_vocab.topk(1)[1].squeeze()\n # y_vac_hat = torch.index_select(y_vac_hat, 0, indices) \n # y_vac = target_batches \n # acc_vac = y_vac.eq(y_vac_hat).sum()\n # acc_vac = acc_vac.data[0]/(y_vac_hat.size(0)*y_vac_hat.size(1))\n\n # Set back to training mode\n self.encoder.train(True)\n self.decoder.train(True)\n return decoded_words #, acc_ptr, acc_vac\n\n\n def evaluate(self,dev,avg_best,BLEU=False):\n logging.info(\"STARTING EVALUATION\")\n acc_avg = 0.0\n wer_avg = 0.0\n bleu_avg = 0.0\n acc_P = 0.0\n acc_V = 0.0\n microF1_PRED,microF1_PRED_cal,microF1_PRED_nav,microF1_PRED_wet = 0, 0, 0, 0\n microF1_TRUE,microF1_TRUE_cal,microF1_TRUE_nav,microF1_TRUE_wet = 0, 0, 0, 0\n ref = []\n hyp = []\n ref_s = \"\"\n hyp_s = \"\"\n dialog_acc_dict = {}\n\n if args['dataset'] == 'kvr':\n with open('data/KVR/kvret_entities.json') as f:\n global_entity = json.load(f)\n global_entity_list = []\n for key in global_entity.keys():\n if key != 'poi':\n global_entity_list += [item.lower().replace(' ', '_') for item in global_entity[key]]\n else:\n for item in global_entity['poi']:\n global_entity_list += [item[k].lower().replace(' ', '_') for k in item.keys()]\n global_entity_list = list(set(global_entity_list))\n else:\n if int(args[\"task\"])!=6:\n global_entity_list = entityList('data/dialog-bAbI-tasks/dialog-babi-kb-all.txt',int(args[\"task\"]))\n else:\n global_entity_list = entityList('data/dialog-bAbI-tasks/dialog-babi-task6-dstc2-kb.txt',int(args[\"task\"]))\n\n pbar = tqdm(enumerate(dev),total=len(dev))\n for j, data_dev in pbar: \n if args['dataset']=='kvr':\n words = self.evaluate_batch(len(data_dev[1]),data_dev[0],data_dev[1],\n data_dev[2],data_dev[3],data_dev[4],data_dev[5],data_dev[6]) \n else:\n words = self.evaluate_batch(len(data_dev[1]),data_dev[0],data_dev[1],\n data_dev[2],data_dev[3],data_dev[4],data_dev[5],data_dev[6]) \n\n acc=0\n w = 0 \n temp_gen = []\n\n for i, row in enumerate(np.transpose(words)):\n st = ''\n for e in row:\n if e== '<EOS>': break\n else: st+= e + ' '\n temp_gen.append(st)\n correct = data_dev[7][i] \n ### compute F1 SCORE \n st = st.lstrip().rstrip()\n correct = correct.lstrip().rstrip()\n if args['dataset']=='kvr':\n f1_true,count = self.compute_prf(data_dev[8][i], st.split(), global_entity_list, data_dev[14][i])\n microF1_TRUE += f1_true\n microF1_PRED += count\n f1_true,count = self.compute_prf(data_dev[9][i], st.split(), global_entity_list, data_dev[14][i])\n microF1_TRUE_cal += f1_true\n microF1_PRED_cal += count \n f1_true,count = self.compute_prf(data_dev[10][i], st.split(), global_entity_list, data_dev[14][i])\n microF1_TRUE_nav += f1_true\n microF1_PRED_nav += count \n f1_true, count = self.compute_prf(data_dev[11][i], st.split(), global_entity_list, data_dev[14][i]) \n microF1_TRUE_wet += f1_true\n microF1_PRED_wet += count\n elif args['dataset']=='babi' and int(args[\"task\"])==6:\n f1_true,count = self.compute_prf(data_dev[10][i], st.split(), global_entity_list, data_dev[12][i])\n microF1_TRUE += f1_true\n microF1_PRED += count\n\n if args['dataset']=='babi':\n if data_dev[11][i] not in dialog_acc_dict.keys():\n dialog_acc_dict[data_dev[11][i]] = []\n if (correct == st):\n acc+=1\n dialog_acc_dict[data_dev[11][i]].append(1)\n else:\n dialog_acc_dict[data_dev[11][i]].append(0)\n else:\n if (correct == st):\n acc+=1\n # print(\"Correct:\"+str(correct))\n # print(\"\\tPredict:\"+str(st))\n # print(\"\\tFrom:\"+str(self.from_whichs[:,i]))\n\n w += wer(correct,st)\n ref.append(str(correct))\n hyp.append(str(st))\n ref_s+=str(correct)+ \"\\n\"\n hyp_s+=str(st) + \"\\n\"\n\n acc_avg += acc/float(len(data_dev[1]))\n wer_avg += w/float(len(data_dev[1])) \n pbar.set_description(\"R:{:.4f},W:{:.4f}\".format(acc_avg/float(len(dev)),\n wer_avg/float(len(dev))))\n\n # dialog accuracy\n if args['dataset']=='babi':\n dia_acc = 0\n for k in dialog_acc_dict.keys():\n if len(dialog_acc_dict[k])==sum(dialog_acc_dict[k]):\n dia_acc += 1\n logging.info(\"Dialog Accuracy:\\t\"+str(dia_acc*1.0/len(dialog_acc_dict.keys())))\n\n if args['dataset']=='kvr':\n logging.info(\"F1 SCORE:\\t{}\".format(microF1_TRUE/float(microF1_PRED)))\n logging.info(\"\\tCAL F1:\\t{}\".format(microF1_TRUE_cal/float(microF1_PRED_cal))) \n logging.info(\"\\tWET F1:\\t{}\".format(microF1_TRUE_wet/float(microF1_PRED_wet))) \n logging.info(\"\\tNAV F1:\\t{}\".format(microF1_TRUE_nav/float(microF1_PRED_nav))) \n elif args['dataset']=='babi' and int(args[\"task\"])==6:\n logging.info(\"F1 SCORE:\\t{}\".format(microF1_TRUE/float(microF1_PRED)))\n \n bleu_score = moses_multi_bleu(np.array(hyp), np.array(ref), lowercase=True) \n logging.info(\"BLEU SCORE:\"+str(bleu_score)) \n if (BLEU): \n if (bleu_score >= avg_best):\n self.save_model(str(self.name)+str(bleu_score))\n logging.info(\"MODEL SAVED\") \n return bleu_score\n else:\n acc_avg = acc_avg/float(len(dev))\n if (acc_avg >= avg_best):\n self.save_model(str(self.name)+str(acc_avg))\n logging.info(\"MODEL SAVED\")\n return acc_avg\n\n def compute_prf(self, gold, pred, global_entity_list, kb_plain):\n local_kb_word = [k[0] for k in kb_plain]\n TP, FP, FN = 0, 0, 0\n if len(gold)!= 0:\n count = 1\n for g in gold:\n if g in pred:\n TP += 1\n else:\n FN += 1\n for p in set(pred):\n if p in global_entity_list or p in local_kb_word:\n if p not in gold:\n FP += 1\n precision = TP / float(TP+FP) if (TP+FP)!=0 else 0\n recall = TP / float(TP+FN) if (TP+FN)!=0 else 0\n F1 = 2 * precision * recall / float(precision + recall) if (precision+recall)!=0 else 0\n else:\n precision, recall, F1, count = 0, 0, 0, 0\n return F1, count\n\n\nclass EncoderMemNN(nn.Module):\n def __init__(self, vocab, embedding_dim, hop, dropout, unk_mask):\n super(EncoderMemNN, self).__init__()\n self.num_vocab = vocab\n self.max_hops = hop\n self.embedding_dim = embedding_dim\n self.dropout = dropout\n self.unk_mask = unk_mask\n for hop in range(self.max_hops+1):\n C = nn.Embedding(self.num_vocab, embedding_dim, padding_idx=PAD_token)\n C.weight.data.normal_(0, 0.1)\n self.add_module(\"C_{}\".format(hop), C)\n self.C = AttrProxy(self, \"C_\")\n self.softmax = nn.Softmax(dim=1)\n \n def get_state(self,bsz):\n \"\"\"Get cell states and hidden states.\"\"\"\n if USE_CUDA:\n return Variable(torch.zeros(bsz, self.embedding_dim)).cuda()\n else:\n return Variable(torch.zeros(bsz, self.embedding_dim))\n\n\n def forward(self, story):\n story = story.transpose(0,1)\n story_size = story.size() # b * m * 3 \n if self.unk_mask:\n if(self.training):\n ones = np.ones((story_size[0],story_size[1],story_size[2]))\n rand_mask = np.random.binomial([np.ones((story_size[0],story_size[1]))],1-self.dropout)[0]\n ones[:,:,0] = ones[:,:,0] * rand_mask\n a = Variable(torch.Tensor(ones))\n if USE_CUDA: a = a.cuda()\n story = story*a.long()\n u = [self.get_state(story.size(0))]\n for hop in range(self.max_hops):\n embed_A = self.C[hop](story.contiguous().view(story.size(0), -1).long()) # b * (m * s) * e\n embed_A = embed_A.view(story_size+(embed_A.size(-1),)) # b * m * s * e\n m_A = torch.sum(embed_A, 2).squeeze(2) # b * m * e\n\n u_temp = u[-1].unsqueeze(1).expand_as(m_A)\n prob = self.softmax(torch.sum(m_A*u_temp, 2)) \n embed_C = self.C[hop+1](story.contiguous().view(story.size(0), -1).long())\n embed_C = embed_C.view(story_size+(embed_C.size(-1),)) \n m_C = torch.sum(embed_C, 2).squeeze(2)\n\n prob = prob.unsqueeze(2).expand_as(m_C)\n o_k = torch.sum(m_C*prob, 1)\n u_k = u[-1] + o_k\n u.append(u_k) \n return u_k\n\nclass DecoderMemNN(nn.Module):\n def __init__(self, vocab, embedding_dim, hop, dropout, unk_mask):\n super(DecoderMemNN, self).__init__()\n self.num_vocab = vocab\n self.max_hops = hop\n self.embedding_dim = embedding_dim\n self.dropout = dropout\n self.unk_mask = unk_mask\n for hop in range(self.max_hops+1):\n C = nn.Embedding(self.num_vocab, embedding_dim, padding_idx=PAD_token)\n C.weight.data.normal_(0, 0.1)\n self.add_module(\"C_{}\".format(hop), C)\n self.C = AttrProxy(self, \"C_\")\n self.softmax = nn.Softmax(dim=1)\n self.W = nn.Linear(embedding_dim,1)\n self.W1 = nn.Linear(2*embedding_dim,self.num_vocab)\n self.gru = nn.GRU(embedding_dim, embedding_dim, dropout=dropout)\n\n def load_memory(self, story):\n story_size = story.size() # b * m * 3 \n if self.unk_mask:\n if(self.training):\n ones = np.ones((story_size[0],story_size[1],story_size[2]))\n rand_mask = np.random.binomial([np.ones((story_size[0],story_size[1]))],1-self.dropout)[0]\n ones[:,:,0] = ones[:,:,0] * rand_mask\n a = Variable(torch.Tensor(ones))\n if USE_CUDA:\n a = a.cuda()\n story = story*a.long()\n self.m_story = []\n for hop in range(self.max_hops):\n embed_A = self.C[hop](story.contiguous().view(story.size(0), -1))#.long()) # b * (m * s) * e\n embed_A = embed_A.view(story_size+(embed_A.size(-1),)) # b * m * s * e\n embed_A = torch.sum(embed_A, 2).squeeze(2) # b * m * e\n m_A = embed_A \n embed_C = self.C[hop+1](story.contiguous().view(story.size(0), -1).long())\n embed_C = embed_C.view(story_size+(embed_C.size(-1),)) \n embed_C = torch.sum(embed_C, 2).squeeze(2)\n m_C = embed_C\n self.m_story.append(m_A)\n self.m_story.append(m_C)\n\n def ptrMemDecoder(self, enc_query, last_hidden):\n embed_q = self.C[0](enc_query) # b * e\n output, hidden = self.gru(embed_q.unsqueeze(0), last_hidden)\n temp = []\n u = [hidden[0].squeeze()] \n for hop in range(self.max_hops):\n m_A = self.m_story[hop]\n if(len(list(u[-1].size()))==1): u[-1] = u[-1].unsqueeze(0) ## used for bsz = 1.\n u_temp = u[-1].unsqueeze(1).expand_as(m_A)\n prob_lg = torch.sum(m_A*u_temp, 2)\n prob_ = self.softmax(prob_lg)\n m_C = self.m_story[hop+1]\n temp.append(prob_)\n prob = prob_.unsqueeze(2).expand_as(m_C)\n o_k = torch.sum(m_C*prob, 1)\n if (hop==0):\n p_vocab = self.W1(torch.cat((u[0], o_k),1))\n u_k = u[-1] + o_k\n u.append(u_k)\n p_ptr = prob_lg \n return p_ptr, p_vocab, hidden\n\n\nclass AttrProxy(object):\n \"\"\"\n Translates index lookups into attribute lookups.\n To implement some trick which able to use list of nn.Module in a nn.Module\n see https://discuss.pytorch.org/t/list-of-nn-module-in-a-nn-module/219/2\n \"\"\"\n def __init__(self, module, prefix):\n self.module = module\n self.prefix = prefix\n\n def __getitem__(self, i):\n return getattr(self.module, self.prefix + str(i))\n" ]
[ [ "torch.nn.Softmax", "torch.LongTensor", "torch.optim.lr_scheduler.ReduceLROnPlateau", "torch.Tensor", "torch.zeros", "torch.cat", "torch.nn.GRU", "torch.sum", "torch.nn.Embedding", "numpy.ones", "torch.nn.Linear", "numpy.transpose", "numpy.array", "torch.nn.MSELoss", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Wentzell/libdlr
[ "69906bed4f4619e9e685af9f2c15943fa8b5f398" ]
[ "pydlr/kernel_fortran.py" ]
[ "\"\"\"Python wrapper module for dlrcode \n\nCopyright 2021 Hugo U.R. Strand\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\nhttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\nor implied. See the License for the specific language governing\npermissions and limitations under the License.\"\"\"\n\n\nimport os\nimport glob\n\nlibname = glob.glob(os.path.dirname(__file__) + '/../lib/libdlr_c.*')[0]\n\n# -- CFFI\n\nfrom cffi import FFI\n\nffi = FFI()\nffi.cdef(\"void c_ccfine_init(double *lambda, int *p, int *npt, int *npo, int *nt, int *no);\")\nffi.cdef(\"void c_ccfine(double *lambda, int *p, int *npt, int *npo, double *t, double *om);\")\nffi.cdef(\"void c_dlr_kfine(double *lambda, int *p, int *npt, int *npo, double *t, double *om, double *kmat, double *err);\")\nffi.cdef(\"void c_dlr_rf(double *lambda, double *eps, int *nt, int *no, double *om, double *kmat, int *rank, double *dlrrf, int *oidx);\")\nffi.cdef(\"void c_dlr_it(double *lambda, int *nt, int *no, double *t, double *kmat, int *rank, int *oidx, double* dlrit, int *tidx);\")\nffi.cdef(\"void c_dlr_cf2it_init(int *rank, double *dlrrf, double *dlrit, double *cf2it);\")\nffi.cdef(\"void c_dlr_it2cf_init(int *rank, double *dlrrf, double *dlrit, double *dlrit2cf, int *it2cfpiv);\")\nffi.cdef(\"void c_dlr_mf(int *nmax, int *rank, double *dlrrf, int *xi, int *dlrmf);\")\nffi.cdef(\"void c_dlr_cf2mf_init(int *rank, double *dlrrf,int *dlrmf, int *xi, double _Complex *cf2mf);\")\nffi.cdef(\"void c_dlr_mf2cf_init(int *nmax, int *rank, double *dlrrf,int *dlrmf, int *xi, double _Complex *dlrmf2cf, int *mf2cfpiv);\")\n\nlib = ffi.dlopen(libname)\n\n\nimport numpy as np\n\n\ndef get_P(piv):\n \"\"\" Permutation matrix corresponding to Lapack piv index vector \"\"\"\n P = np.eye(len(piv))\n for i, p in enumerate(piv):\n a = P[:, i].copy()\n b = P[:, p].copy()\n P[:, i], P[:, p] = b, a\n return P\n\n\ndef get_idx(piv):\n \"\"\" Numpy index vector corresponding to Lapack piv index vector \"\"\"\n P = get_P(piv)\n idx = np.zeros_like(piv)\n for i in range(len(piv)):\n idx[i] = np.argwhere(P==1)\n\n return idx\n\n\ndef get_A(lu, piv):\n L, U = np.tril(lu, k=-1) + np.eye(lu.shape[0]), np.triu(lu)\n P = get_P(piv)\n A = P @ L @ U\n return A\n\n\nclass KernelInterpolativeDecopositionFortran:\n\n def __init__(self, lamb, eps=1e-15, xi=-1,\n max_rank=500, nmax=None, verbose=False):\n\n print('--> Fortran driver')\n \n self.xi = xi\n self.lamb = lamb\n self.eps = eps\n\n if verbose:\n print(f'xi = {self.xi}')\n print(f'lambda = {self.lamb}')\n print(f'eps = {self.eps}')\n\n if nmax is None: nmax = int(lamb)\n \n # -- Determine kernel discretization from heuristics\n\n lamb = ffi.new('double *', lamb)\n res = [ ffi.new('int [1]') for n in range(5) ]\n\n lib.c_ccfine_init(lamb, *res)\n\n p, npt, npo, nt, no = [ x for x in res ]\n self.p, self.npt, self.npo, self.nt, self.no = [ x[0] for x in res ]\n \n if verbose:\n print(f'p = {self.p}')\n print(f'npt = {self.npt}')\n print(f'npo = {self.npo}')\n print(f'nt = {self.nt}')\n print(f'no = {self.no}')\n\n # -- Build analytical continuation kernel\n t = ffi.new(f'double [{self.nt}]')\n om = ffi.new(f'double [{self.no}]')\n\n lib.c_ccfine(lamb, p, npt, npo, t, om)\n\n self.t = np.frombuffer(ffi.buffer(t), dtype=np.float)\n self.om = np.frombuffer(ffi.buffer(om), dtype=np.float)\n\n if verbose:\n print(f't.shape = {self.t.shape}')\n print(f'om.shape = {self.om.shape}') \n\n kmat = ffi.new(f'double [{self.nt*self.no}]')\n err = ffi.new('double [2]')\n\n lib.c_dlr_kfine(lamb, p, npt, npo, t, om, kmat, err)\n\n self.kmat = np.frombuffer(ffi.buffer(kmat), dtype=np.float).reshape((self.no, self.nt)).T\n self.err = np.frombuffer(ffi.buffer(err), dtype=np.float)\n \n if verbose:\n print(f'kmat.shape = {self.kmat.shape}')\n print(f'err.shape = {self.err.shape}')\n print(f'err = {self.err}')\n\n # -- Select real frequency points\n\n eps = ffi.new('double *', eps)\n rank = ffi.new('int *', max_rank)\n oidx = ffi.new(f'int [{rank[0]}]')\n dlrrf = ffi.new(f'double [{rank[0]}]')\n\n lib.c_dlr_rf(lamb, eps, nt, no, om, kmat, rank, dlrrf, oidx)\n\n self.rank = rank[0]\n self.oidx = np.frombuffer(ffi.buffer(oidx), dtype=np.int32)[:self.rank] - 1\n self.dlrrf = np.frombuffer(ffi.buffer(dlrrf), dtype=np.float)[:self.rank]\n\n if verbose:\n print(f'rank = {self.rank}')\n print(f'oidx = {self.oidx}')\n print(f'dlrrf = {self.dlrrf}')\n\n # -- Select imaginary time points\n\n tidx = ffi.new(f'int [{self.rank}]')\n dlrit = ffi.new(f'double [{self.rank}]')\n\n lib.c_dlr_it(lamb, nt, no, t, kmat, rank, oidx, dlrit, tidx)\n\n self.tidx = np.frombuffer(ffi.buffer(tidx), dtype=np.int32) - 1\n self.dlrit = np.frombuffer(ffi.buffer(dlrit), dtype=np.float)\n self.dlrit = (self.dlrit > 0) * self.dlrit + (self.dlrit < 0) * (1 + self.dlrit)\n\n if verbose:\n print(f'tidx = {self.tidx}')\n print(f'dlrit = {self.dlrit}')\n \n # -- Transform matrix (LU-decomposed)\n\n it2cfpiv = ffi.new(f'int [{self.rank}]')\n dlrit2cf = ffi.new(f'double [{self.rank**2}]')\n\n lib.c_dlr_it2cf_init(rank,dlrrf,dlrit,dlrit2cf,it2cfpiv)\n\n self.it2cfpiv = np.frombuffer(ffi.buffer(it2cfpiv), dtype=np.int32) - 1\n self.dlrit2cf = np.frombuffer(\n ffi.buffer(dlrit2cf), dtype=np.float).reshape((self.rank, self.rank)).T\n \n if verbose:\n print(f'it2cfpiv = {self.it2cfpiv}')\n #print(f'dlrit2cf = \\n{self.dlrit2cf}')\n\n cf2it = ffi.new(f'double [{self.rank**2}]')\n\n lib.c_dlr_cf2it_init(rank,dlrrf,dlrit,cf2it)\n\n self.cf2it = np.frombuffer(\n ffi.buffer(cf2it), dtype=np.float).reshape((self.rank, self.rank)).T\n \n # -- Matsubara frequency points\n\n if nmax < self.rank: nmax = self.rank\n \n if verbose:\n print(f'nmax = {nmax}')\n \n nmax = ffi.new('int *', nmax)\n xi = ffi.new('int *', int(xi))\n dlrmf = ffi.new(f'int [{self.rank}]')\n\n lib.c_dlr_mf(nmax,rank,dlrrf,xi,dlrmf)\n\n self.nmax = nmax[0]\n self.dlrmf = np.frombuffer(ffi.buffer(dlrmf), dtype=np.int32)\n \n if verbose:\n print(f'nmax = {self.nmax}')\n print(f'dlrmf = {self.dlrmf}')\n\n mf2cfpiv = ffi.new(f'int [{self.rank}]')\n dlrmf2cf = ffi.new(f'double _Complex [{self.rank**2}]')\n\n lib.c_dlr_mf2cf_init(nmax,rank,dlrrf,dlrmf,xi,dlrmf2cf,mf2cfpiv)\n\n self.mf2cfpiv = np.frombuffer(ffi.buffer(mf2cfpiv), dtype=np.int32) - 1\n self.dlrmf2cf = np.frombuffer(ffi.buffer(dlrmf2cf), dtype=np.complex).reshape((self.rank, self.rank)).T\n \n if verbose:\n print(f'mf2cfpiv = {self.mf2cfpiv}')\n #print(f'dlrmf2cf = \\n{self.dlrmf2cf}')\n\n cf2mf = ffi.new(f'double _Complex [{self.rank**2}]')\n\n lib.c_dlr_cf2mf_init(rank,dlrrf,dlrmf,xi,cf2mf)\n\n self.cf2mf = np.frombuffer(ffi.buffer(cf2mf), dtype=np.complex).reshape((self.rank, self.rank)).T\n \n #self.T_lx = get_A(self.dlrit2cf, self.it2cfpiv)\n #self.T_qx = get_A(self.dlrmf2cf, self.mf2cfpiv)\n\n self.T_lx = self.cf2it\n self.T_qx = self.cf2mf\n \n" ]
[ [ "numpy.eye", "numpy.argwhere", "numpy.zeros_like", "numpy.triu", "numpy.tril" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MichalisLazarou/pytorch-meta
[ "96ad5703414ef983b56d3c396a944829707ff076" ]
[ "torchmeta/transforms/categorical.py" ]
[ "import torch\nfrom torchmeta.transforms.utils import apply_wrapper\nfrom collections import defaultdict\n\n\nclass Categorical(object):\n \"\"\"Target transform to return labels in `[0, num_classes)`.\n\n Parameters\n ----------\n num_classes : int, optional\n Number of classes. If `None`, then the number of classes is inferred\n from the number of individual labels encountered.\n\n Examples\n --------\n >>> dataset = Omniglot('data', num_classes_per_task=5, meta_train=True)\n >>> task = dataset.sample_task()\n >>> task[0]\n (<PIL.Image.Image image mode=L size=105x105 at 0x11EC797F0>,\n ('images_evaluation/Glagolitic/character12', None))\n\n >>> dataset = Omniglot('data', num_classes_per_task=5, meta_train=True,\n ... target_transform=Categorical(5))\n >>> task = dataset.sample_task()\n >>> task[0]\n (<PIL.Image.Image image mode=L size=105x105 at 0x11ED3F668>, 2)\n \"\"\"\n def __init__(self, num_classes=None):\n self.num_classes = num_classes\n self._classes = None\n self._labels = None\n\n def reset(self):\n self._classes = None\n self._labels = None\n\n @property\n def classes(self):\n if self._classes is None:\n self._classes = defaultdict(None)\n if self.num_classes is None:\n default_factory = lambda: len(self._classes)\n else:\n default_factory = lambda: self.labels[len(self._classes)]\n self._classes.default_factory = default_factory\n if (self.num_classes is not None) and (len(self._classes) > self.num_classes):\n raise ValueError('The number of individual labels ({0}) is greater '\n 'than the number of classes defined by `num_classes` '\n '({1}).'.format(len(self._classes), self.num_classes))\n return self._classes\n\n @property\n def labels(self):\n if (self._labels is None) and (self.num_classes is not None):\n # TODO: Replace torch.randperm with seed-friendly counterpart\n self._labels = torch.randperm(self.num_classes).tolist()\n return self._labels\n\n def __call__(self, target):\n return self.classes[target]\n\n def __repr__(self):\n return '{0}({1})'.format(self.__class__.__name__, self.num_classes or '')\n\n\nclass FixedCategory(object):\n def __init__(self, transform=None):\n self.transform = transform\n\n def __call__(self, index):\n return (index, self.transform)\n\n def __repr__(self):\n return ('{0}({1})'.format(self.__class__.__name__, self.transform))\n" ]
[ [ "torch.randperm" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tinabd14/TrackR-CNN
[ "68fbd2aaf4feaf078651b7229c9bec9bc20bb0df" ]
[ "datasets/KITTI/segtrack/KITTI_segtrack.py" ]
[ "import glob\nimport tensorflow as tf\n\nfrom datasets.DetectionDataset import MapillaryLikeDetectionFileListDataset\nfrom datasets.KITTI.segtrack.KITTI_MOTS_info import SEQ_IDS_TRAIN, SEQ_IDS_VAL\nfrom datasets.Loader import register_dataset\nfrom datasets.util.Util import username\nfrom datasets import DataKeys\nfrom shutil import copytree\nfrom random import randint\nfrom core.Log import log\nimport os\n\nNAME = \"KITTI_segtrack\"\nNAME_DETECTION = \"KITTI_segtrack_detection\"\nDEFAULT_PATH = \"C:/Users/Tunar Mahmudov/Desktop/TrackR-CNN/data/KITTI_MOTS/train\"\nID_DIVISOR = 1000\nCLASS_IDS_WITH_INSTANCES = [1, 2]\nCROWD_ID = 10\nNUM_CLASSES = 3 # background, car, pedestrian\nN_MAX_DETECTIONS = 100\n\n\n# used for detection on individual images\n@register_dataset(NAME_DETECTION)\nclass KittiSegtrackDetectionDataset(MapillaryLikeDetectionFileListDataset):\n def __init__(self, config, subset, name=NAME, default_path=\"C:/Users/Tunar Mahmudov/Desktop/TrackR-CNN/data/KITTI_MOTS/train\"):\n self.seq_ids_train = SEQ_IDS_TRAIN\n self.seq_ids_val = SEQ_IDS_VAL\n self.imgs_are_pngs = config.bool(\"imgs_are_pngs\", True)\n t = config.string_list(\"seq_ids_train\", [])\n if t:\n self.seq_ids_train = t\n v = config.string_list(\"seq_ids_val\", [])\n if v:\n self.seq_ids_val = v\n super().__init__(config, name, subset, default_path, NUM_CLASSES, N_MAX_DETECTIONS, CLASS_IDS_WITH_INSTANCES,\n ID_DIVISOR, crowd_id=CROWD_ID)\n self.copy_dataset_to_tmp = config.bool(\"copy_dataset_to_tmp\", False)\n if self.copy_dataset_to_tmp:\n print(\"Copying dataset to $TMP!\", file=log.v1)\n new_path = \"$TMP/\" + str(randint(1, 100000))\n new_path = os.path.expandvars(new_path)\n os.makedirs(new_path)\n print(\"Copying images...\", file=log.v1)\n copytree(self.data_dir + \"/images\", new_path + \"/images\")\n print(\"Copying instances...\", file=log.v1)\n copytree(self.data_dir + \"/instances\", new_path + \"/instances\")\n self.data_dir = new_path\n\n def read_inputfile_lists(self):\n seq_ids = self.seq_ids_train if self.subset == \"train\" else self.seq_ids_val\n anns = []\n for seq_id in seq_ids:\n anns += sorted(glob.glob(self.data_dir + \"/instances/\" + seq_id + \"/*.png\"))\n\n imgs = [x.replace(\"/instances/\", \"/images/\") for x in anns]\n if not self.imgs_are_pngs:\n imgs = [x.replace(\".png\", \".jpg\") for x in imgs]\n return imgs, anns\n\n\n# used for training on chunks of video\n@register_dataset(NAME)\nclass KittiSegtrackDataset(KittiSegtrackDetectionDataset):\n def __init__(self, config, subset, name=NAME, default_path=\"C:/Users/Tunar Mahmudov/Desktop/TrackR-CNN/data/KITTI_MOTS/train\"):\n # batch size here is the number of time steps considered in a chunk\n # TODO: what do we do at test time?\n self._batch_size = config.int(\"batch_size\")\n assert self._batch_size > 1, \"use KittiSegtrackDetectionDataset for single image training\"\n super().__init__(config, subset, name, default_path)\n\n def read_inputfile_lists(self):\n seq_ids = self.seq_ids_train if self.subset == \"train\" else self.seq_ids_val\n anns = []\n for seq_id in seq_ids:\n anns_vid = sorted(glob.glob(self.data_dir + \"/instances/\" + seq_id + \"/*.png\"))\n starting_points = anns_vid[:-(self._batch_size - 1)]\n anns += starting_points\n\n imgs = [x.replace(\"/instances/\", \"/images/\") for x in anns]\n return imgs, anns\n\n def _batch(self, tfdata, batch_size):\n return tfdata\n\n def load_example(self, input_filenames):\n examples = []\n for delta_t in range(self._batch_size):\n input_filenames_t = [successor_frame_filename(fn, delta_t) for fn in input_filenames]\n raw_example = self.load_raw_example(*input_filenames_t)\n examples.append(raw_example)\n\n # cf process_raw_example\n # here we need to do it jointly to synchronize the augmentation (e.g. flipping)\n examples = [self.postproc_example_initial(example) for example in examples]\n examples = self.jointly_augment_examples_before_resize(examples)\n examples = [self.postproc_example_before_resize(example) for example in examples]\n examples = [self.resize_example(example) for example in examples]\n examples = self.jointly_augment_examples_after_resize(examples)\n examples = [self.postproc_example_before_assembly(example) for example in examples]\n examples = [self.assemble_example(example) for example in examples]\n\n # stack everything together\n examples_stacked = {}\n for key in examples[0].keys():\n if key == DataKeys.SKIP_EXAMPLE:\n stacked = tf.reduce_any([example[key] for example in examples])\n else:\n stacked = tf.stack([example[key] for example in examples], axis=0)\n examples_stacked[key] = stacked\n return examples_stacked\n\n def n_examples_per_epoch(self):\n n_examples = super().n_examples_per_epoch()\n if n_examples == len(self.inputfile_lists[0]):\n return n_examples * self._batch_size\n\n\ndef successor_frame_filename(filename, offset):\n if offset == 0:\n return filename\n else:\n return tf.py_func(successor_frame_filename_np, [filename, offset], tf.string)\n\n\ndef successor_frame_filename_np(filename, offset):\n filename = filename.decode(\"utf-8\")\n sp = filename.replace(\"\\\\\",\"/\").split(\"/\")\n png = \".png\" in sp[-1]\n t = int(sp[-1].replace(\".png\", \"\").replace(\".jpg\", \"\"))\n if png:\n filename = \"/\".join(sp[:-1]) + \"/%06d\" % (t + offset) + \".png\"\n else:\n filename = \"/\".join(sp[:-1]) + \"/%06d\" % (t + offset) + \".jpg\"\n return filename.encode(\"utf-8\")\n" ]
[ [ "tensorflow.reduce_any", "tensorflow.stack", "tensorflow.py_func" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
dbogatov/embedding-tests
[ "1d4d32c9c3aa5fe9f3d35ff456c030591fb8e855" ]
[ "data/mimic.py" ]
[ "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport re\nimport os\nimport tqdm\n\nimport numpy as np\nfrom common import DATA_DIR\nfrom nltk.tokenize import word_tokenize\nfrom joblib import Parallel, delayed\n\nNOTE_DIR = os.path.join(DATA_DIR, 'patient_notes')\nDIAGNOSIS_DIR = os.path.join(DATA_DIR, 'diagnosis')\n\nif not os.path.exists(DIAGNOSIS_DIR):\n os.makedirs(DIAGNOSIS_DIR)\n\n\ndef remove_private(text):\n text = re.sub(r'\\[\\*\\*[^\\]]*\\*\\*]', '', text)\n text = text.replace('M.D.', '')\n text = text.replace('Dr.', '')\n return text.strip()\n\n\ndef remove_titles(text, titles):\n titles = r'|'.join(titles + [':'])\n text = re.sub(titles, '', text)\n return remove_private(text)\n\n\ndef process_note(note):\n title_pat = r\"^.*?\\s*([a-zA-Z',\\.\\-\\*\\[\\]\\(\\) ]+):\"\n diag_titles = ['discharge diagnosis', 'discharge diagnoses',\n 'final diagnosis', 'final diagnoses']\n sub_diag_titles = ['diagnosis', 'diagnoses', 'primary', 'secondary']\n\n def is_diag_section(t):\n return any(diag_title in t for diag_title in diag_titles)\n\n def is_sub_diag_section(t):\n return any(diag_title in t for diag_title in sub_diag_titles)\n\n with open(os.path.join(NOTE_DIR, note), 'rb') as f:\n text = f.read().lower()\n diagnosis_descriptions = []\n start_append = False\n for line in text.split('\\n'):\n m = re.search(title_pat, line, re.I)\n\n if m and len(diagnosis_descriptions):\n temp_title = line[m.span()[0]: m.span()[1] - 1]\n if not is_sub_diag_section(temp_title):\n break\n else:\n line = remove_titles(line, sub_diag_titles)\n if line:\n diagnosis_descriptions.append(line)\n elif start_append:\n line = remove_private(line)\n if len(line) > 1:\n diagnosis_descriptions.append(line)\n\n if is_diag_section(line):\n start_append = True\n line = remove_titles(line, diag_titles + sub_diag_titles)\n if len(line) > 1:\n diagnosis_descriptions.append(line)\n\n if len(diagnosis_descriptions):\n text = ' '.join(diagnosis_descriptions)\n text = re.sub(r\"[0-9]+([.)])\", '', text)\n text = re.sub(r\"([!@*&#$^_().,;:\\'\\\"\\[\\]?/\\\\><+]+|[-]+ | [-]+|--)\",\n '', text)\n text = word_tokenize(text)\n text = [w for w in text if w != '-']\n if len(text):\n with open(os.path.join(DIAGNOSIS_DIR, note), 'wb') as f:\n f.write(' '.join(text))\n else:\n print(note)\n print(diagnosis_descriptions)\n\n\ndef read_all_patient_notes():\n all_notes = os.listdir(NOTE_DIR)\n Parallel(n_jobs=16)(delayed(process_note)(note)\n for note in tqdm.tqdm(all_notes))\n\n\ndef load_diagnosis(note, max_len=30):\n with open(os.path.join(DIAGNOSIS_DIR, note)) as f:\n text = f.read()\n return text.split()[:max_len]\n\n\ndef load_all_diagnosis(train_size=0.5, split_word=True, seed=12345):\n all_notes = os.listdir(DIAGNOSIS_DIR)\n all_notes = sorted(all_notes)\n texts = []\n for note in tqdm.tqdm(all_notes):\n text = load_diagnosis(note)\n if not split_word:\n text = ' '.join(text)\n texts.append(text)\n\n n = len(texts)\n n_train = int(train_size * n)\n np.random.seed(seed)\n texts = np.asarray(texts)\n np.random.shuffle(texts)\n return texts[:n_train], texts[n_train:]\n\n\nif __name__ == '__main__':\n load_all_diagnosis()\n" ]
[ [ "numpy.asarray", "numpy.random.shuffle", "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dykestra/menpo
[ "3a4690f991003a8706028ddb898493ac5b53418e", "3a4690f991003a8706028ddb898493ac5b53418e" ]
[ "menpo/math/test/decomposition_base_test.py", "menpo/transform/piecewiseaffine/base.py" ]
[ "import numpy as np\nfrom numpy.testing import assert_almost_equal\nfrom menpo.math import eigenvalue_decomposition, pca, ipca\n\n# Positive semi-definite matrix\ncov_matrix = np.array([[3, 1], [1, 3]])\n# Data values taken from:\n# http://www.cs.otago.ac.nz/cosc453/student_tutorials/principal_components.pdf\n# Tested values are equal\nlarge_samples_data_matrix = np.array([[2.5, 2.4],\n [0.5, 0.7],\n [2.2, 2.9],\n [1.9, 2.2],\n [3.1, 3.0],\n [2.3, 2.7],\n [2.0, 1.6],\n [1.0, 1.1],\n [1.5, 1.6],\n [1.1, 0.9]])\ncentered_eigenvectors_s = np.array([[0.6778734, 0.73517866],\n [-0.73517866, 0.6778734]])\nnon_centered_eigenvectors_s = np.array([[0.68647784, 0.72715072],\n [-0.72715072, 0.68647784]])\nmean_vector_s = np.array([1.81, 1.91])\neigenvalues_no_centre_s = np.array([8.97738481, 0.04928186])\neigenvalues_centered_s = np.array([1.28402771, 0.0490834])\n\ncentered_eigenvectors_f = np.array([[-0.09901475, 0.19802951, 0.69310328,\n 0.29704426, -0.09901475, 0.39605902,\n -0.39605902, 0.09901475, 0.09901475,\n -0.19802951]])\nnon_centered_eigenvectors_f = np.array(\n [[0.38507927, 0.09481302, 0.40261598, 0.32307722, 0.4794398, 0.39407387,\n 0.28217662, 0.16535718, 0.24399096, 0.15681507],\n [-0.25575629, 0.17561812, 0.58718113, 0.19319469, -0.29239933, 0.27184299,\n -0.5344514, 0.04200527, 0.01146941, -0.27333287]])\nmean_vector_f = np.array([2.45, 0.6, 2.55, 2.05, 3.05,\n 2.5, 1.8, 1.05, 1.55, 1.])\neigenvalues_no_centre_f = np.array([80.79646326, 0.44353674])\neigenvalues_centered_f = np.array([0.51])\n\n\ndef pcd_samples_nocentre_test():\n output = pca(large_samples_data_matrix, centre=False)\n eigenvectors, eigenvalues, mean_vector = output\n\n assert_almost_equal(eigenvalues, eigenvalues_no_centre_s)\n assert_almost_equal(eigenvectors, non_centered_eigenvectors_s)\n assert_almost_equal(mean_vector, [0.0, 0.0])\n\n\ndef pcd_samples_yescentre_test():\n output = pca(large_samples_data_matrix, centre=True)\n eigenvectors, eigenvalues, mean_vector = output\n\n assert_almost_equal(eigenvalues, eigenvalues_centered_s)\n assert_almost_equal(eigenvectors, centered_eigenvectors_s)\n assert_almost_equal(mean_vector, mean_vector_s)\n\n\ndef pcd_features_nocentre_test():\n output = pca(large_samples_data_matrix.T, centre=False)\n eigenvectors, eigenvalues, mean_vector = output\n\n assert_almost_equal(eigenvalues, eigenvalues_no_centre_f)\n assert_almost_equal(eigenvectors, non_centered_eigenvectors_f)\n assert_almost_equal(mean_vector, np.zeros(10))\n\n\ndef pcd_features_nocentre_inplace_test():\n # important to copy as this will now destructively effect the input data\n # matrix (due to inplace)\n output = pca(large_samples_data_matrix.T.copy(), centre=False,\n inplace=True)\n eigenvectors, eigenvalues, mean_vector = output\n\n assert_almost_equal(eigenvalues, eigenvalues_no_centre_f)\n assert_almost_equal(eigenvectors, non_centered_eigenvectors_f)\n assert_almost_equal(mean_vector, np.zeros(10))\n\n\ndef pcd_features_yescentre_test():\n output = pca(large_samples_data_matrix.T, centre=True)\n eigenvectors, eigenvalues, mean_vector = output\n\n assert_almost_equal(eigenvalues, eigenvalues_centered_f)\n assert_almost_equal(eigenvectors, centered_eigenvectors_f)\n assert_almost_equal(mean_vector, mean_vector_f)\n\n\ndef eigenvalue_decomposition_default_epsilon_test():\n pos_eigenvectors, pos_eigenvalues = eigenvalue_decomposition(cov_matrix)\n\n assert_almost_equal(pos_eigenvalues, [4.0, 2.0])\n sqrt_one_over_2 = np.sqrt(2.0) / 2.0\n assert_almost_equal(pos_eigenvectors, [[sqrt_one_over_2, -sqrt_one_over_2],\n [sqrt_one_over_2, sqrt_one_over_2]])\n\n\ndef eigenvalue_decomposition_large_epsilon_test():\n pos_eigenvectors, pos_eigenvalues = eigenvalue_decomposition(cov_matrix,\n eps=0.5)\n\n assert_almost_equal(pos_eigenvalues, [4.0])\n sqrt_one_over_2 = np.sqrt(2.0) / 2.0\n assert_almost_equal(pos_eigenvectors,\n [[sqrt_one_over_2], [sqrt_one_over_2]])\n\n\ndef ipca_samples_yescentre_test():\n n_a = large_samples_data_matrix.shape[0] / 2\n A = large_samples_data_matrix[:n_a, :]\n U_a, l_a, m_a = pca(A, centre=True)\n\n B = large_samples_data_matrix[n_a:, :]\n i_U, i_l, i_m = ipca(B, U_a, l_a, n_a, m_a=m_a)\n\n b_U, b_l, b_m = pca(large_samples_data_matrix, centre=True)\n\n assert_almost_equal(np.abs(i_U), np.abs(b_U))\n assert_almost_equal(i_l, b_l)\n assert_almost_equal(i_m, b_m)\n\n\ndef ipca_samples_nocentre_test():\n n_a = large_samples_data_matrix.shape[0] / 2\n A = large_samples_data_matrix[:n_a, :]\n U_a, l_a, m_a = pca(A, centre=False)\n\n B = large_samples_data_matrix[n_a:, :]\n i_U, i_l, i_m = ipca(B, U_a, l_a, n_a, m_a=m_a)\n\n b_U, b_l, b_m = pca(large_samples_data_matrix, centre=False)\n\n assert_almost_equal(np.abs(i_U), np.abs(b_U))\n assert_almost_equal(i_l, b_l)\n assert_almost_equal(i_m, b_m)\n\n\ndef ipca_features_yescentre_test():\n C = np.vstack((large_samples_data_matrix.T, large_samples_data_matrix.T))\n\n n_a = C.shape[0] / 2\n A = C[:n_a, :]\n U_a, l_a, m_a = pca(A, centre=True)\n\n B = C[n_a:, :]\n i_U, i_l, i_m = ipca(B, U_a, l_a, n_a, m_a=m_a)\n\n b_U, b_l, b_m = pca(C, centre=True)\n\n assert_almost_equal(np.abs(i_U), np.abs(b_U))\n assert_almost_equal(i_l, b_l)\n assert_almost_equal(i_m, b_m)\n\n\ndef ipca_features_nocentre_test():\n C = np.vstack((large_samples_data_matrix.T, large_samples_data_matrix.T))\n\n n_a = C.shape[0] / 2\n A = C[:n_a, :]\n U_a, l_a, m_a = pca(A, centre=False)\n\n B = C[n_a:, :]\n i_U, i_l, i_m = ipca(B, U_a, l_a, n_a, m_a=m_a)\n\n b_U, b_l, b_m = pca(C, centre=False)\n\n assert_almost_equal(np.abs(i_U), np.abs(b_U))\n assert_almost_equal(i_l, b_l)\n assert_almost_equal(i_m, b_m)\n", "import numpy as np\nfrom copy import deepcopy\nfrom menpo.base import Copyable\nfrom menpo.transform.base import Alignment, Invertible, Transform\nfrom .fastpwa import CLookupPWA\n# TODO View is broken for PWA (TriangleContainmentError)\n\n\nclass TriangleContainmentError(Exception):\n r\"\"\"\n Exception that is thrown when an attempt is made to map a point with a\n PWATransform that does not lie in a source triangle.\n\n points_outside_source_domain : ``(d,)`` `ndarray`\n A `bool` value for the ``d`` points that were attempted to be applied.\n If ``True```, the point was outside of the domain.\n \"\"\"\n def __init__(self, points_outside_source_domain):\n super(TriangleContainmentError, self).__init__()\n self.points_outside_source_domain = points_outside_source_domain\n\n\ndef containment_from_alpha_beta(alpha, beta):\n r\"\"\"\n Check `alpha` and `beta` are within a triangle (``alpha >= 0``,\n ``beta >= 0``, ``alpha + beta <= 1``). Returns the indices of the triangles\n that are `alpha` and `beta` are in. If any of the points are not contained\n in a triangle, raises a `TriangleContainmentError`.\n\n Parameters\n ----------\n alpha : ``(K, n_tris)`` `ndarray`\n Alpha for each point and triangle being tested.\n beta : ``(K, n_tris)`` `ndarray`\n Beta for each point and triangle being tested.\n\n Returns\n -------\n tri_index : ``(L,)`` `ndarray`\n Triangle index for each `points`, assigning each\n point in a triangle to the triangle index.\n\n Raises\n ------\n TriangleContainmentError\n All `points` must be contained in a source triangle. Check\n `error.points_outside_source_domain` to handle this case.\n \"\"\"\n # (K, n_tris), boolean for whether a given triangle contains a given\n # point\n point_containment = np.logical_and(np.logical_and(\n alpha >= 0, beta >= 0),\n alpha + beta <= 1)\n # is each point in a triangle?\n point_in_a_triangle = np.any(point_containment, axis=1)\n if np.any(~point_in_a_triangle):\n raise TriangleContainmentError(~point_in_a_triangle)\n point_index, tri_index = np.nonzero(point_containment)\n # don't want duplicates! ensure that here:\n index = np.zeros(alpha.shape[0])\n index[point_index] = tri_index\n return index.astype(np.uint32)\n\n\ndef alpha_beta(i, ij, ik, points):\n r\"\"\"\n Calculates the `alpha` and `beta` values (barycentric coordinates) for each\n triangle for all points provided. Note that this does not raise a\n `TriangleContainmentError`.\n\n Parameters\n ----------\n i : ``(n_tris, 2)`` `ndarray`\n The coordinate of the i'th point of each triangle\n ij : ``(n_tris, 2)`` `ndarray`\n The vector between the i'th point and the j'th point of each\n triangle\n ik : ``(n_tris, 2)`` `ndarray`\n The vector between the i'th point and the k'th point of each\n triangle\n points : ``(n_points, 2)`` `ndarray`\n Points to calculate the barycentric coordinates for.\n\n Returns\n -------\n alpha : ``(n_points, n_tris)`` `ndarray`\n The `alpha` for each point and triangle. Alpha can be interpreted\n as the contribution of the `ij` vector to the position of the point in\n question.\n beta : ``(n_points, n_tris)`` `ndarray`\n The beta for each point and triangle. Beta can be interpreted as\n the contribution of the ik vector to the position of the point in\n question.\n \"\"\"\n ip = points[..., None] - i\n dot_jj = np.einsum('dt, dt -> t', ij, ij)\n dot_kk = np.einsum('dt, dt -> t', ik, ik)\n dot_jk = np.einsum('dt, dt -> t', ij, ik)\n dot_pj = np.einsum('vdt, dt -> vt', ip, ij)\n dot_pk = np.einsum('vdt, dt -> vt', ip, ik)\n\n d = 1.0/(dot_jj * dot_kk - dot_jk * dot_jk)\n alpha = (dot_kk * dot_pj - dot_jk * dot_pk) * d\n beta = (dot_jj * dot_pk - dot_jk * dot_pj) * d\n return alpha, beta\n\n\ndef index_alpha_beta(i, ij, ik, points):\n \"\"\"\n Finds for each input point the index of it's bounding triangle and the\n `alpha` and `beta` value for that point in the triangle. Note this means\n that the following statements will always be true::\n\n alpha + beta <= 1\n alpha >= 0\n beta >= 0\n\n for each triangle result.\n\n Trying to map a point that does not exist in a triangle throws a\n `TriangleContainmentError`.\n\n Parameters\n ----------\n i : ``(n_tris, 2)`` `ndarray`\n The coordinate of the i'th point of each triangle\n ij : ``(n_tris, 2)`` `ndarray`\n The vector between the i'th point and the j'th point of each\n triangle\n ik : ``(n_tris, 2)`` `ndarray`\n The vector between the i'th point and the k'th point of each\n triangle\n points : ``(n_points, 2)`` `ndarray`\n Points to calculate the barycentric coordinates for.\n\n Returns\n -------\n tri_index : ``(n_tris,)`` `ndarray`\n Triangle index for each of the `points`, assigning each point to its\n containing triangle.\n alpha : ``(n_tris,)`` `ndarray`\n Alpha for containing triangle of each point.\n beta : ``(n_tris,)`` `ndarray`\n Beta for containing triangle of each point.\n\n Raises\n ------\n TriangleContainmentError\n All `points` must be contained in a source triangle. Check\n `error.points_outside_source_domain` to handle this case.\n \"\"\"\n alpha, beta = alpha_beta(i, ij, ik, points)\n each_point = np.arange(points.shape[0])\n index = containment_from_alpha_beta(alpha, beta)\n return index, alpha[each_point, index], beta[each_point, index]\n\n\ndef barycentric_vectors(points, trilist):\n r\"\"\"\n Compute the affine transformation between each triangle in the `source`\n and `target`. This is calculated analytically.\n\n Parameters\n ----------\n points : ``(n_points, 2)`` `ndarray`\n Points to calculate the barycentric coordinates for.\n trilist: ``(n_tris, 3)`` `ndarray`\n The 0-based index triangulation joining the points.\n\n Returns\n -------\n i : ``(n_tris, 2)`` `ndarray`\n The coordinate of the i'th point of each triangle\n ij : ``(n_tris, 2)`` `ndarray`\n The vector between the i'th point and the j'th point of each\n triangle\n ik : ``(n_tris, 2)`` `ndarray`\n The vector between the i'th point and the k'th point of each\n triangle\n \"\"\"\n # we permute the axes of the indexed point set to have shape\n # [3, n_dims, n_tris] for ease of indexing in.\n x = np.transpose(points[trilist], axes=[1, 2, 0])\n return x[0], x[1] - x[0], x[2] - x[0]\n\n\n# Note we inherit from Alignment first to get it's n_dims behavior\nclass AbstractPWA(Alignment, Transform, Invertible):\n r\"\"\"\n A piecewise affine transformation.\n\n This is composed of a number of triangles defined be a set of `source` and\n `target` vertices. These vertices are related by a common triangle `list`.\n No limitations on the nature of the triangle `list` are imposed. Points can\n then be mapped via barycentric coordinates from the `source` to the `target`\n space. Trying to map points that are not contained by any source triangle\n throws a `TriangleContainmentError`, which contains diagnostic information.\n\n Parameters\n ----------\n source : :map:`PointCloud` or :map:`TriMesh`\n The source points. If a TriMesh is provided, the triangulation on\n the TriMesh is used. If a PointCloud is provided, a Delaunay\n triangulation of the source is performed automatically.\n target : :map:`PointCloud`\n The target points. Note that the trilist is entirely decided by the\n source.\n\n Raises\n ------\n ValueError\n Source and target must both be 2D.\n TriangleContainmentError\n All points to apply must be contained in a source triangle. Check\n `error.points_outside_source_domain` to handle this case.\n \"\"\"\n def __init__(self, source, target):\n from menpo.shape import TriMesh # to avoid circular import\n if not isinstance(source, TriMesh):\n source = TriMesh(source.points)\n Alignment.__init__(self, source, target)\n if self.n_dims != 2:\n raise ValueError(\"source and target must be 2 \"\n \"dimensional\")\n self.ti, self.tij, self.tik = None, None, None\n self._rebuild_target_vectors()\n\n @property\n def n_tris(self):\n r\"\"\"\n The number of triangles in the triangle list.\n\n :type: `int`\n \"\"\"\n return self.source.n_tris\n\n @property\n def trilist(self):\n r\"\"\"\n The triangle list.\n\n :type: ``(n_tris, 3)`` `ndarray`\n \"\"\"\n return self.source.trilist\n\n def _rebuild_target_vectors(self):\n r\"\"\"\n Rebuild the vectors that are used in the apply method. This needs to\n be called whenever the target is changed.\n \"\"\"\n t = self.target.points[self.trilist]\n # get vectors ij ik for the target\n self.tij, self.tik = t[:, 1] - t[:, 0], t[:, 2] - t[:, 0]\n # target i'th vertex positions\n self.ti = t[:, 0]\n\n def _sync_state_from_target(self):\n r\"\"\"\n PWA is particularly efficient to sync from target - we don't have to\n do much at all, just rebuild the target vectors.\n \"\"\"\n self._rebuild_target_vectors()\n\n def _apply(self, x, **kwargs):\n \"\"\"\n Applies this transform to a new set of vectors.\n\n Parameters\n ----------\n x : ``(K, 2)`` `ndarray`\n Points to apply this transform to.\n\n Returns\n -------\n transformed : ``(K, 2)`` `ndarray`\n The transformed array.\n \"\"\"\n tri_index, alpha, beta = self.index_alpha_beta(x)\n return (self.ti[tri_index] +\n alpha[:, None] * self.tij[tri_index] +\n beta[:, None] * self.tik[tri_index])\n\n def _apply_batched(self, x, batch_size, **kwargs):\n # This is a rare case where we need to override the batched apply\n # method. In this case, we override it because we want to the\n # possibly raised TriangleContainmentError to contain ALL the points\n # that were considered, and not just the first batch of points.\n if batch_size is None:\n return self._apply(x, **kwargs)\n else:\n outputs = []\n points_outside_source_domain = []\n n_points = x.shape[0]\n exception_thrown = False\n for lo_ind in range(0, n_points, batch_size):\n try:\n hi_ind = lo_ind + batch_size\n outputs.append(self._apply(x[lo_ind:hi_ind], **kwargs))\n except TriangleContainmentError as e:\n exception_thrown = True\n points_outside_source_domain.append(\n e.points_outside_source_domain)\n else:\n # No exception was thrown, so all points were inside\n points_outside_source_domain.append(\n np.zeros(batch_size, dtype=np.bool))\n\n if exception_thrown:\n raise TriangleContainmentError(\n np.hstack(points_outside_source_domain))\n else:\n return np.vstack(outputs)\n\n def index_alpha_beta(self, points):\n \"\"\"\n Finds for each input point the index of its bounding triangle and the\n `alpha` and `beta` value for that point in the triangle. Note this\n means that the following statements will always be true::\n\n alpha + beta <= 1\n alpha >= 0\n beta >= 0\n\n for each triangle result.\n\n Trying to map a point that does not exist in a triangle throws a\n `TriangleContainmentError`.\n\n Parameters\n ----------\n points : ``(K, 2)`` `ndarray`\n Points to test.\n\n Returns\n -------\n tri_index : ``(L,)`` `ndarray`\n Triangle index for each of the `points`, assigning each\n point to it's containing triangle.\n alpha : ``(L,)`` `ndarray`\n Alpha for containing triangle of each point.\n beta : ``(L,)`` `ndarray`\n Beta for containing triangle of each point.\n\n Raises\n ------\n TriangleContainmentError\n All `points` must be contained in a source triangle. Check\n `error.points_outside_source_domain` to handle this case.\n \"\"\"\n raise NotImplementedError()\n\n @property\n def has_true_inverse(self):\n \"\"\"\n The inverse is true.\n\n :type: ``True``\n \"\"\"\n return True\n\n def pseudoinverse(self):\n r\"\"\"\n The pseudoinverse of the transform - that is, the transform that\n results from swapping `source` and `target`, or more formally, negating\n the transforms parameters. If the transform has a true inverse this\n is returned instead.\n\n :type: ``type(self)``\n \"\"\"\n from menpo.shape import PointCloud, TriMesh # to avoid circular import\n new_source = TriMesh(self.target.points, self.source.trilist)\n new_target = PointCloud(self.source.points)\n return type(self)(new_source, new_target)\n\n\nclass PythonPWA(AbstractPWA):\n\n def __init__(self, source, target):\n super(PythonPWA, self).__init__(source, target)\n si, sij, sik = barycentric_vectors(self.source.points, self.trilist)\n self.s, self.sij, self.sik = si, sij, sik\n\n def index_alpha_beta(self, points):\n return index_alpha_beta(self.s, self.sij, self.sik, points)\n\n\nclass CachedPWA(PythonPWA):\n\n def __init__(self, source, target):\n super(CachedPWA, self).__init__(source, target)\n self._applied_points, self._iab = None, None\n\n def index_alpha_beta(self, points):\n if (self._applied_points is None or not\n np.all(points == self._applied_points)):\n self._applied_points = points\n self._iab = PythonPWA.index_alpha_beta(self, points)\n return self._iab\n\n\nclass CythonPWA(AbstractPWA):\n r\"\"\"\n A piecewise affine transformation.\n\n The apply method in this case involves dotting the triangle vectors with\n the values of alpha and beta found. The calculation of alpha and beta is\n done in C, and a hash map is used to cache lookup values.\n\n Parameters\n ----------\n source : :class:`menpo.shape.PointCloud` or :class:`menpo.shape.TriMesh`\n The source points. If a TriMesh is provided, the triangulation on\n the TriMesh is used. If a :class:`menpo.shape.PointCloud`\n is provided, a Delaunay triangulation of the source is performed\n automatically.\n target : :class:`PointCloud`\n The target points. Note that the trilist is entirely decided by\n the source.\n\n Raises\n ------\n ValueError\n Source and target must both be 2D.\n\n TriangleContainmentError\n All points to apply must be contained in a source triangle. Check\n `error.points_outside_source_domain` to handle this case.\n \"\"\"\n def __init__(self, source, target):\n super(CythonPWA, self).__init__(source, target)\n # make sure the source and target satisfy the c requirements\n source_c = np.require(self.source.points, dtype=np.float64,\n requirements=['C'])\n trilist_c = np.require(self.trilist, dtype=np.uint32,\n requirements=['C'])\n # build the cython wrapped C object and store it locally\n self._fastpwa = CLookupPWA(source_c, trilist_c)\n\n def copy(self):\n new = Copyable.copy(self)\n new._fastpwa = deepcopy(self._fastpwa)\n return new\n\n def index_alpha_beta(self, points):\n points_c = np.require(points, dtype=np.float64, requirements=['C'])\n index, alpha, beta = self._fastpwa.index_alpha_beta(points_c)\n if np.any(index < 0):\n raise TriangleContainmentError(index < 0)\n else:\n return index, alpha, beta\n" ]
[ [ "numpy.sqrt", "numpy.abs", "numpy.testing.assert_almost_equal", "numpy.array", "numpy.zeros", "numpy.vstack" ], [ "numpy.hstack", "numpy.einsum", "numpy.nonzero", "numpy.arange", "numpy.all", "numpy.any", "numpy.require", "numpy.transpose", "numpy.logical_and", "numpy.zeros", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
matinaghaei/Stock-Trading-ActorCriticRL
[ "8876eacb6d2ba09f3f80792c82fe6fb6e4ffda29" ]
[ "algorithms/ddpg/ddpg.py" ]
[ "from env.environment import PortfolioEnv\nfrom algorithms.ddpg.agent import Agent\nimport numpy as np\nfrom plot import add_curve, add_hline, save_plot\nimport os\nimport pandas as pd\nfrom pyfolio import timeseries\n\n\nclass DDPG:\n\n def __init__(self, load=False, alpha=0.000025, beta=0.00025, tau=0.001,\n batch_size=64, layer1_size=400, layer2_size=300,\n state_type='only prices', djia_year=2019, repeat=0):\n\n self.figure_dir = 'plots/ddpg'\n self.checkpoint_dir = 'checkpoints/ddpg'\n os.makedirs(self.figure_dir, exist_ok=True)\n os.makedirs(self.checkpoint_dir, exist_ok=True)\n self.repeat = repeat\n\n self.env = PortfolioEnv(action_scale=1000, state_type=state_type, djia_year=djia_year)\n if djia_year == 2019:\n self.intervals = self.env.get_intervals(train_ratio=0.7, valid_ratio=0.15, test_ratio=0.15)\n elif djia_year == 2012:\n self.intervals = self.env.get_intervals(train_ratio=0.9, valid_ratio=0.05, test_ratio=0.05)\n self.agent = Agent(alpha=alpha, beta=beta, input_dims=self.env.state_shape(), \n action_dims=self.env.action_shape(), tau=tau, batch_size=batch_size, \n layer1_size=layer1_size, layer2_size=layer2_size)\n if load:\n self.agent.load_models(self.checkpoint_dir)\n\n np.random.seed(0)\n\n def train(self, verbose=False):\n training_history = []\n validation_history = []\n iteration = 1\n max_wealth = 0\n\n while True:\n observation = self.env.reset(*self.intervals['training'])\n done = False\n while not done:\n action = self.agent.choose_action(observation)\n observation_, reward, done, info, wealth = self.env.step(action)\n self.agent.remember(observation, action, reward, observation_, int(done))\n self.agent.learn()\n observation = observation_\n if verbose:\n print(f\"DDPG training - Date: {info.date()},\\tBalance: {int(self.env.get_balance())},\\t\"\n f\"Cumulative Return: {int(wealth) - 1000000},\\tShares: {self.env.get_shares()}\")\n self.agent.memory.clear_buffer()\n\n print(f\"DDPG training - Iteration: {iteration},\\tCumulative Return: {int(wealth) - 1000000}\")\n training_history.append(wealth - 1000000)\n\n validation_wealth = self.validate(verbose)\n print(f\"DDPG validating - Iteration: {iteration},\\tCumulative Return: {int(validation_wealth) - 1000000}\")\n validation_history.append(validation_wealth - 1000000)\n if validation_wealth > max_wealth:\n self.agent.save_models(self.checkpoint_dir)\n max_wealth = max(max_wealth, validation_wealth)\n if validation_history[-5:].count(max_wealth - 1000000) != 1:\n break\n if iteration == 30:\n break\n iteration += 1\n\n self.agent.load_models(self.checkpoint_dir)\n\n buy_hold_history = self.env.buy_hold_history(*self.intervals['training'])\n buy_hold_final = (buy_hold_history[-1] / buy_hold_history[0] - 1) * 1000000\n add_hline(buy_hold_final, 'Buy&Hold')\n add_curve(training_history, 'DDPG')\n save_plot(filename=self.figure_dir + f'/{self.repeat}0_training.png',\n title=f\"Training - {self.intervals['training'][0].date()} to {self.intervals['training'][1].date()}\",\n x_label='Iteration', y_label='Cumulative Return (Dollars)')\n\n buy_hold_history = self.env.buy_hold_history(*self.intervals['validation'])\n buy_hold_final = (buy_hold_history[-1] / buy_hold_history[0] - 1) * 1000000\n add_hline(buy_hold_final, 'Buy&Hold')\n add_curve(validation_history, 'DDPG')\n save_plot(filename=self.figure_dir + f'/{self.repeat}1_validation.png',\n title=f\"Validation - {self.intervals['validation'][0].date()} to {self.intervals['validation'][1].date()}\",\n x_label='Iteration', y_label='Cumulative Return (Dollars)')\n\n def validate(self, verbose=False):\n observation = self.env.reset(*self.intervals['validation'])\n done = False\n while not done:\n action = self.agent.choose_action(observation)\n observation_, reward, done, info, wealth = self.env.step(action)\n observation = observation_\n if verbose:\n print(f\"DDPG validation - Date: {info.date()},\\tBalance: {int(self.env.get_balance())},\\t\"\n f\"Cumulative Return: {int(wealth) - 1000000},\\tShares: {self.env.get_shares()}\")\n return wealth\n\n def test(self, verbose=True):\n return_history = [0]\n buy_hold_history = self.env.buy_hold_history(*self.intervals['testing'])\n add_curve((buy_hold_history / buy_hold_history[0] - 1) * 1000000, 'Buy&Hold')\n\n observation = self.env.reset(*self.intervals['testing'])\n wealth_history = [self.env.get_wealth()]\n done = False\n while not done:\n action = self.agent.choose_action(observation)\n observation_, reward, done, info, wealth = self.env.step(action)\n self.agent.remember(observation, action, reward, observation_, int(done))\n self.agent.learn()\n observation = observation_\n if verbose:\n print(f\"DDPG testing - Date: {info.date()},\\tBalance: {int(self.env.get_balance())},\\t\"\n f\"Cumulative Return: {int(wealth) - 1000000},\\tShares: {self.env.get_shares()}\")\n return_history.append(wealth - 1000000)\n wealth_history.append(wealth)\n self.agent.memory.clear_buffer()\n\n add_curve(return_history, 'DDPG')\n save_plot(self.figure_dir + f'/{self.repeat}2_testing.png',\n title=f\"Testing - {self.intervals['testing'][0].date()} to {self.intervals['testing'][1].date()}\",\n x_label='Days', y_label='Cumulative Return (Dollars)')\n \n returns = pd.Series(wealth_history, buy_hold_history.index).pct_change().dropna()\n stats = timeseries.perf_stats(returns)\n stats.to_csv(self.figure_dir + f'/{self.repeat}3_perf.csv')\n" ]
[ [ "pandas.Series", "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
Asif54321/pynwb
[ "e484bfd0208a9777bc6c45e44a4cfa3a900cffe3" ]
[ "docs/gallery/general/object_id.py" ]
[ "\"\"\"\n.. _object_ids:\n\nObject IDs in NWB\n=================\n\nThis example focuses on how to access object IDs from NWB container objects and NWB container objects by\nobject ID. Every NWB container object has an object ID that is a UUID_ string, such as\n\"123e4567-e89b-12d3-a456-426655440000\". These IDs have a non-zero probability of being duplicated, but are practically\nunique and used widely across computing platforms as if they are unique.\n\nThe object ID of an NWB container object can be accessed using the\n:py:meth:`~hdmf.container.AbstractContainer.object_id` method.\n\n.. _UUID: https://en.wikipedia.org/wiki/Universally_unique_identifier\n\n\"\"\"\n\n# sphinx_gallery_thumbnail_path = 'figures/gallery_thumbnails_objectid.png'\nfrom pynwb import NWBFile, TimeSeries\nfrom datetime import datetime\nfrom dateutil.tz import tzlocal\nimport numpy as np\n\n# set up the NWBFile\nstart_time = datetime(2019, 4, 3, 11, tzinfo=tzlocal())\nnwbfile = NWBFile(session_description='demonstrate NWB object IDs',\n identifier='NWB456',\n session_start_time=start_time)\n\n# make some fake data\ntimestamps = np.linspace(0, 100, 1024)\ndata = np.sin(0.333 * timestamps) + np.cos(0.1 * timestamps) + np.random.randn(len(timestamps))\ntest_ts = TimeSeries(name='raw_timeseries', data=data, unit='m', timestamps=timestamps)\n\n# add it to the NWBFile\nnwbfile.add_acquisition(test_ts)\n\n# print the object ID of the NWB file\nprint(nwbfile.object_id)\n\n# print the object ID of the TimeSeries\nprint(test_ts.object_id)\n\n####################\n# The :py:class:`~pynwb.file.NWBFile` class has the :py:meth:`~pynwb.file.NWBFile.objects` property, which provides a\n# dictionary of all neurodata_type objects in the `NWBFile`, indexed by each object's object ID.\n\nprint(nwbfile.objects)\n\n####################\n# You can iterate through the `objects` dictionary as with any other Python dictionary.\n\nfor oid in nwbfile.objects:\n print(nwbfile.objects[oid])\n\nfor obj in nwbfile.objects.values():\n print('%s: %s \"%s\"' % (obj.object_id, obj.neurodata_type, obj.name))\n\n####################\n# If you have stored the object ID of a particular NWB container object, you can use it as a key on `NWBFile.objects` to\n# get the object.\n\nts_id = test_ts.object_id\nmy_ts = nwbfile.objects[ts_id] # test_ts == my_ts\n\n####################\n#\n# .. note::\n# It is important to note that the object ID is NOT a unique hash of the data. If the contents of an NWB container\n# change, the object ID remains the same.\n#\n" ]
[ [ "numpy.cos", "numpy.linspace", "numpy.sin" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vzinche/inferno
[ "91b22dfcd1b6a9ec415f0bbb6ae66caea42f4034" ]
[ "inferno/io/transform/generic.py" ]
[ "import numpy as np\nimport torch\nfrom scipy.stats import median_absolute_deviation\nfrom .base import Transform, DTypeMapping\nfrom ...utils.exceptions import assert_, DTypeError\n\n\nclass Normalize(Transform):\n \"\"\"Normalizes input to zero mean unit variance.\"\"\"\n def __init__(self, eps=1e-4, mean=None, std=None, ignore_value=None, **super_kwargs):\n \"\"\"\n Parameters\n ----------\n eps : float\n A small epsilon for numerical stability.\n mean : list or float or numpy.ndarray\n Global dataset mean for all channels.\n std : list or float or numpy.ndarray\n Global dataset std for all channels.\n super_kwargs : dict\n Kwargs to the superclass `inferno.io.transform.base.Transform`.\n \"\"\"\n super(Normalize, self).__init__(**super_kwargs)\n self.eps = eps\n self.mean = np.asarray(mean) if mean is not None else None\n self.std = np.asarray(std) if std is not None else None\n self.ignore_value = ignore_value\n\n def tensor_function(self, tensor):\n # if we have a background value that we don't want to normalize\n mask = None if self.ignore_value is None else (tensor != self.ignore_value)\n if mask is None:\n mean = np.asarray(tensor.mean()) if self.mean is None else self.mean\n std = np.asarray(tensor.std()) if self.std is None else self.std\n else:\n mean = np.asarray(tensor[mask].mean()) if self.mean is None else self.mean\n std = np.asarray(tensor[mask].std()) if self.std is None else self.std\n # Figure out how to reshape mean and std\n reshape_as = [-1] + [1] * (tensor.ndim - 1)\n # Normalize\n if mask is None:\n tensor = (tensor - mean.reshape(*reshape_as)) / (std.reshape(*reshape_as) + self.eps)\n else:\n # if tensor is int, the normalized tensor will be in int as well\n tensor = tensor.astype('float32')\n tensor[mask] = ((tensor - mean.reshape(*reshape_as)) \\\n / (std.reshape(*reshape_as) + self.eps))[mask]\n return tensor\n\n\nclass NormalizeMedian(Transform):\n \"\"\"Normalizes input using median and median absolute deviation.\"\"\"\n def __init__(self, eps=1e-4, ignore_value=None, clip_min=None, clip_max=None, **super_kwargs):\n \"\"\"\n Parameters\n ----------\n eps : float\n A small epsilon for numerical stability.\n ignore_value: float\n A background value to ignore while computing median and MAD\n clip_min / clip_max: float\n Clip everything below/above this value\n super_kwargs : dict\n Kwargs to the superclass `inferno.io.transform.base.Transform`.\n \"\"\"\n super().__init__(**super_kwargs)\n self.eps = eps\n self.ignore_value = ignore_value\n self.min = clip_min\n self.max = clip_max\n\n def tensor_function(self, tensor):\n if self.ignore_value is not None:\n mask = (tensor != self.ignore_value)\n median = np.median(tensor[mask])\n mad = median_absolute_deviation(tensor[mask], axis=None)\n tensor[mask] = ((tensor - median) / (mad + self.eps))[mask]\n else:\n median = np.median(tensor)\n mad = median_absolute_deviation(tensor, axis=None)\n tensor = (tensor - median) / (mad + self.eps)\n if self.min is not None or self.max is not None:\n tensor = np.clip(tensor, a_min=self.min, a_max=self.max)\n return tensor\n\n\nclass NormalizeRange(Transform):\n \"\"\"Normalizes input by a constant.\"\"\"\n def __init__(self, normalize_by=255., **super_kwargs):\n \"\"\"\n Parameters\n ----------\n normalize_by : float or int\n Scalar to normalize by.\n super_kwargs : dict\n Kwargs to the superclass `inferno.io.transform.base.Transform`.\n \"\"\"\n super(NormalizeRange, self).__init__(**super_kwargs)\n self.normalize_by = float(normalize_by)\n\n def tensor_function(self, tensor):\n return tensor / self.normalize_by\n\n\nclass Project(Transform):\n \"\"\"\n Given a projection mapping (i.e. a dict) and an input tensor, this transform replaces\n all values in the tensor that equal a key in the mapping with the value corresponding to\n the key.\n \"\"\"\n def __init__(self, projection, **super_kwargs):\n \"\"\"\n Parameters\n ----------\n projection : dict\n The projection mapping.\n super_kwargs : dict\n Keywords to the super class.\n \"\"\"\n super(Project, self).__init__(**super_kwargs)\n self.projection = dict(projection)\n\n def tensor_function(self, tensor):\n output = np.zeros_like(tensor)\n for source, target in self.projection.items():\n output[tensor == source] = target\n return output\n\n\nclass Label2OneHot(Transform, DTypeMapping):\n \"\"\"Convert integer labels to one-hot vectors for arbitrary dimensional data.\"\"\"\n def __init__(self, num_classes, dtype='float', **super_kwargs):\n \"\"\"\n Parameters\n ----------\n num_classes : int\n Number of classes.\n dtype : str\n Datatype of the output.\n super_kwargs : dict\n Keyword arguments to the superclass.\n \"\"\"\n super(Label2OneHot, self).__init__(**super_kwargs)\n self.num_classes = num_classes\n self.dtype = self.DTYPE_MAPPING.get(dtype)\n\n def tensor_function(self, tensor):\n reshaped_arange = np.arange(self.num_classes).reshape(-1, *(1,)*tensor.ndim)\n output = np.equal(reshaped_arange, tensor).astype(self.dtype)\n # output = np.zeros(shape=(self.num_classes,) + tensor.shape, dtype=self.dtype)\n # # Optimizing for simplicity and memory efficiency, because one would usually\n # # spawn multiple workers\n # for class_num in range(self.num_classes):\n # output[class_num] = tensor == class_num\n return output\n\n\nclass Cast(Transform, DTypeMapping):\n \"\"\"Casts inputs to a specified datatype.\"\"\"\n def __init__(self, dtype='float', **super_kwargs):\n \"\"\"\n Parameters\n ----------\n dtype : {'float16', 'float32', 'float64', 'half', 'float', 'double'}\n Datatype to cast to.\n super_kwargs : dict\n Kwargs to the superclass `inferno.io.transform.base.Transform`.\n \"\"\"\n super(Cast, self).__init__(**super_kwargs)\n assert dtype in self.DTYPE_MAPPING.keys()\n self.dtype = self.DTYPE_MAPPING.get(dtype)\n\n def tensor_function(self, tensor):\n return getattr(np, self.dtype)(tensor)\n\n\nclass AsTorchBatch(Transform):\n \"\"\"Converts a given numpy array to a torch batch tensor.\n\n The result is a torch tensor __without__ the leading batch axis. For example,\n if the input is an image of shape `(100, 100)`, the output is a batch of shape\n `(1, 100, 100)`. The collate function will add the leading batch axis to obtain\n a tensor of shape `(N, 1, 100, 100)`, where `N` is the batch-size.\n \"\"\"\n def __init__(self, dimensionality, add_channel_axis_if_necessary=True, **super_kwargs):\n \"\"\"\n Parameters\n ----------\n dimensionality : {1, 2, 3}\n Dimensionality of the data: 1 if vector, 2 if image, 3 if volume.\n add_channel_axis_if_necessary : bool\n Whether to add a channel axis where necessary. For example, if `dimensionality = 2`\n and the input temperature has 2 dimensions (i.e. an image), setting\n `add_channel_axis_if_necessary` to True results in the output being a 3 dimensional\n tensor, where the leading dimension is a singleton and corresponds to `channel`.\n super_kwargs : dict\n Kwargs to the superclass `inferno.io.transform.base.Transform`.\n \"\"\"\n super(AsTorchBatch, self).__init__(**super_kwargs)\n assert dimensionality in [1, 2, 3]\n self.dimensionality = dimensionality\n self.add_channel_axis_if_necessary = bool(add_channel_axis_if_necessary)\n\n def _to_batch(self, tensor):\n assert_(isinstance(tensor, np.ndarray),\n \"Expected numpy array, got %s\" % type(tensor),\n DTypeError)\n # some of the np functions return view, and then tensor[None, ...] complains\n tensor = tensor.copy()\n if self.dimensionality == 3:\n # We're dealing with a volume. tensor can either be 3D or 4D\n assert tensor.ndim in [3, 4]\n if tensor.ndim == 3 and self.add_channel_axis_if_necessary:\n # Add channel axis\n return torch.from_numpy(tensor[None, ...])\n else:\n # Channel axis is in already\n return torch.from_numpy(tensor)\n elif self.dimensionality == 2:\n # We're dealing with an image. tensor can either be 2D or 3D\n assert tensor.ndim in [2, 3]\n if tensor.ndim == 2 and self.add_channel_axis_if_necessary:\n # Add channel axis\n return torch.from_numpy(tensor[None, ...])\n else:\n # Channel axis is in already\n return torch.from_numpy(tensor)\n elif self.dimensionality == 1:\n # We're dealing with a vector - it has to be 1D\n assert tensor.ndim == 1\n return torch.from_numpy(tensor)\n else:\n raise NotImplementedError\n\n def tensor_function(self, tensor):\n assert_(isinstance(tensor, (list, np.ndarray)),\n \"Expected numpy array or list, got %s\" % type(tensor),\n DTypeError)\n if isinstance(tensor, np.ndarray):\n return self._to_batch(tensor)\n else:\n return [self._to_batch(elem) for elem in tensor]\n" ]
[ [ "numpy.clip", "numpy.asarray", "numpy.arange", "numpy.median", "torch.from_numpy", "scipy.stats.median_absolute_deviation", "numpy.zeros_like", "numpy.equal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.4", "1.5", "1.7", "1.3", "1.8" ], "tensorflow": [] } ]
laureho/arboretum
[ "b011dc5b5cf54340a06925d0420985444358df86" ]
[ "arboretum/io.py" ]
[ "import numpy as np\nimport btrack\n\n\n\nclass TrackerFrozenState:\n \"\"\" Capture the Tracker state at the end of tracking \"\"\"\n def __init__(self):\n self.objects = None\n self.tracks = None\n self.lbep = None\n self.refs = None\n self.dummies = None\n\n def set(self, tracker):\n for key in self.__dict__.keys():\n setattr(self, key, getattr(tracker, key))\n\n\n\nclass ArboretumHDFHandler(btrack.dataio.HDF5FileHandler):\n \"\"\" ArboretumHDFHandler\n\n Extend the default btrack HDF handler to deal with writing out segmentation,\n objects and tracks\n\n Generic HDF5 file hander for reading and writing datasets. This is\n inter-operable between segmentation, tracking and analysis code.\n\n LBEPR is a modification of the LBEP format to also include the root node\n of the tree.\n\n I - number of objects\n J - number of frames\n K - number of tracks\n\n Added generic filtering to object retrieval, e.g.\n obj = handler.filtered_objects('flag==1')\n retrieves all objects if there is an object['flag'] == 1\n\n Basic format of the HDF file is:\n segmentation/\n images - (J x h x w) uint8 images of the segmentation\n objects/\n obj_type_1/\n coords - (I x 5) [t, x, y, z, object_type]\n labels - (I x D) [label, (softmax scores ...)]\n map - (J x 2) [start_index, end_index] -> coords array\n ...\n tracks/\n obj_type_1/\n tracks - (I x 1) [index into coords]\n dummies - similar to coords, but for dummy objects\n map - (K x 2) [start_index, end_index] -> tracks array\n LBEPR - (K x 5) [L, B, E, P, R, G]\n fates - (K x n) [fate_from_tracker, ...future_expansion]\n ...\n\n \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n @property\n @btrack.dataio.h5check_property_exists('segmentation')\n def segmentation(self):\n return self._hdf['segmentation']['images'][:].astype(np.uint16)\n\n def write_segmentation(self,\n segmentation: np.ndarray,\n obj_type='obj_type_1'):\n\n \"\"\" write out the segmentation to an HDF file \"\"\"\n\n # write the segmentation out\n grp = self._hdf.create_group('segmentation')\n grp.create_dataset(f'images',\n data=segmentation,\n dtype='uint16',\n compression='gzip',\n compression_opts=7)\n\n def write_objects(self,\n tracker_state: TrackerFrozenState,\n obj_type='obj_type_1'):\n\n #TODO(arl): make sure that the objects are ordered in time\n\n self._hdf.create_group('objects')\n grp = self._hdf['objects'].create_group(obj_type)\n\n n_objects = len(tracker_state.objects)\n n_frames = np.max([o.t for o in tracker_state.objects]) + 1\n\n txyz = np.zeros((n_objects, 5), dtype=np.float32)\n labels = np.zeros((n_objects, 1), dtype=np.uint8)\n fmap = np.zeros((n_frames, 2), dtype=np.uint32)\n\n # convert the btrack objects into a numpy array\n for i, obj in enumerate(tracker_state.objects):\n txyz[i,:] = [obj.t, obj.x, obj.y, obj.z, 1]\n labels[i,:] = obj.label\n\n # update the frame map\n t = int(obj.t)\n fmap[t, 1] = np.max([fmap[t, 1], i])\n\n fmap[1:,0] = fmap[:-1,1]\n\n grp.create_dataset('coords', data=txyz, dtype='float32')\n grp.create_dataset('labels', data=labels, dtype='float32')\n grp.create_dataset('map', data=fmap, dtype='uint32')\n" ]
[ [ "numpy.max", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Patabu2/extended-semi-siamese-unet
[ "e20d36abeaffe0c2d856fbe757a142f75380c446" ]
[ "models/u_net_ext_semi_siam_3_channels.py" ]
[ "import tensorflow as tf\n\nfrom tensorflow.keras import Model\n\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D, UpSampling2D, Concatenate, Dropout, BatchNormalization, ReLU, Add\n\n\n#--------------------------------------------------------------------------------\n# Encoding block\n#--------------------------------------------------------------------------------\nclass DownBlock(Model):\n def __init__(self, filters, kernel_size, padding, strides):\n super(DownBlock, self).__init__()\n\n self.conv_1 = Conv2D(filters, kernel_size, padding = padding, strides = strides, activation = 'relu')\n self.conv_2 = Conv2D(filters, kernel_size, padding = padding, strides = strides, activation = 'relu')\n \n self.max_pool = MaxPooling2D( pool_size = (2,2), strides = (2,2) )\n\n # Dropout layer\n self.dropout = Dropout(0.2)\n\n\n def call(self, inputs):\n conv = self.conv_1(inputs)\n conv = self.conv_2(conv)\n dropout = self.dropout(conv)\n max_pool = self.max_pool(dropout)\n\n return conv, max_pool\n\n#------------------------------------------------------------------\n# Upsampling Block\n#------------------------------------------------------------------\nclass UpBlock(Model):\n def __init__(self, filters, kernel_size, padding, strides):\n super(UpBlock, self).__init__()\n\n self.upsampling = UpSampling2D((2,2))\n self.concat = Concatenate()\n self.conv_1 = Conv2D(filters, kernel_size, padding = padding, strides = strides, activation = 'relu')\n self.conv_2 = Conv2D(filters, kernel_size, padding = padding, strides = strides, activation = 'relu')\n\n # Dropout layer\n self.dropout = Dropout(0.2)\n\n def call(self, inputs, skip):\n x = self.upsampling(inputs)\n x = self.concat([x, skip])\n x = self.conv_1(x)\n x = self.conv_2(x)\n x = self.dropout(x)\n return x\n\n\ndef sigmoid_correcter(x):\n return tf.where(x > 0.5, 1.0, 0.0)\n\n#----------------------------------------------------------------------------------------\n# U-Net model\n#----------------------------------------------------------------------------------------\nclass ExpandedUNet(Model):\n def __init__(self, filters, kernel_size, padding, strides):\n super(ExpandedUNet, self).__init__()\n\n # Create the encoding blocks\n self.down_block_1 = DownBlock(filters[0], kernel_size, padding, strides)\n self.down_block_2 = DownBlock(filters[1], kernel_size, padding, strides)\n self.down_block_3 = DownBlock(filters[2], kernel_size, padding, strides)\n self.down_block_4 = DownBlock(filters[3], kernel_size, padding, strides)\n\n #--------------------------------\n # Create the upsampling blocks\n #--------------------------------\n # Path 1\n self.up_block_1_1 = UpBlock(filters[3], kernel_size, padding, strides)\n self.up_block_1_2 = UpBlock(filters[2], kernel_size, padding, strides)\n self.up_block_1_3 = UpBlock(filters[1], kernel_size, padding, strides)\n self.up_block_1_4_segm = UpBlock(filters[0], kernel_size, padding, strides)\n self.segmenter = Conv2D( 1, (1,1), padding = 'same', strides =1, activation = 'sigmoid', name='segmenter_1')\n\n # Path 2\n self.up_block_2_1 = UpBlock(filters[3], kernel_size, padding, strides)\n self.up_block_2_2 = UpBlock(filters[2], kernel_size, padding, strides)\n self.up_block_2_3 = UpBlock(filters[1], kernel_size, padding, strides)\n self.up_block_2_4_segm = UpBlock(filters[0], kernel_size, padding, strides)\n self.segmenter_2 = Conv2D( 1, (1,1), padding = 'same', strides =1, activation = 'sigmoid', name = 'segmenter_2')\n\n # Path 3\n self.up_block_3_1 = UpBlock(filters[3], kernel_size, padding, strides)\n self.up_block_3_2 = UpBlock(filters[2], kernel_size, padding, strides)\n self.up_block_3_3 = UpBlock(filters[1], kernel_size, padding, strides)\n self.up_block_3_4_segm = UpBlock(filters[0], kernel_size, padding, strides)\n self.segmenter_3 = Conv2D( 1, (1,1), padding = 'same', strides =1, activation = 'sigmoid', name = 'segmenter_3')\n\n # Path 4\n self.up_block_4_1 = UpBlock(filters[3], kernel_size, padding, strides)\n self.up_block_4_2 = UpBlock(filters[2], kernel_size, padding, strides)\n self.up_block_4_3 = UpBlock(filters[1], kernel_size, padding, strides)\n self.up_block_4_4_segm = UpBlock(filters[0], kernel_size, padding, strides)\n self.segmenter_4 = Conv2D( 1, (1,1), padding = 'same', strides =1, activation = 'sigmoid', name = 'segmenter_4')\n \n # Create convolutional blocks for the bottom part\n self.conv_1 = Conv2D(filters[4], kernel_size, padding = padding, strides = strides, activation = 'relu')\n self.conv_2 = Conv2D(filters[4], kernel_size, padding = padding, strides = strides, activation = 'relu')\n\n\n def call(self, inputs):\n # Down sampling part\n c1, a1 = self.down_block_1(inputs)\n c2, a2 = self.down_block_2(a1)\n c3, a3 = self.down_block_3(a2)\n c4, a4 = self.down_block_4(a3)\n\n # The bottom of the U\n bottom = self.conv_1(a4)\n bottom = self.conv_2(bottom)\n\n #-----------------------\n # Upsampling parts\n #-----------------------\n # Path 1\n u1_1 = self.up_block_1_1(bottom, c4)\n u1_2 = self.up_block_1_2(u1_1, c3)\n u1_3 = self.up_block_1_3(u1_2, c2)\n u1_4_segm = self.up_block_1_4_segm(u1_3, c1)\n segm = self.segmenter(u1_4_segm)\n\n # Path 2\n u2_1 = self.up_block_2_1(bottom, c4)\n u2_2 = self.up_block_2_2(u2_1, c3)\n u2_3 = self.up_block_2_3(u2_2, c2)\n u2_4_segm = self.up_block_2_4_segm(u2_3, c1)\n segm_2 = self.segmenter_2(u2_4_segm)\n\n # Path 3\n u3_1 = self.up_block_3_1(bottom, c4)\n u3_2 = self.up_block_3_2(u3_1, c3)\n u3_3 = self.up_block_3_3(u3_2, c2)\n u3_4_segm = self.up_block_3_4_segm(u3_3, c1)\n segm_3 = self.segmenter_3(u3_4_segm)\n\n # Path 4\n u4_1 = self.up_block_4_1(bottom, c4)\n u4_2 = self.up_block_4_2(u4_1, c3)\n u4_3 = self.up_block_4_3(u4_2, c2)\n u4_4_segm = self.up_block_4_4_segm(u4_3, c1)\n segm_4 = self.segmenter_4(u4_4_segm)\n\n return segm, segm_2, segm_3, segm_4\n" ]
[ [ "tensorflow.keras.layers.Concatenate", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.layers.UpSampling2D", "tensorflow.where", "tensorflow.keras.layers.Dropout", "tensorflow.keras.layers.MaxPooling2D" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
SpiritedAwayCN/AIintro-GiveMeSomeCredit
[ "c73288c8eebd6ea7122bf5ab70be23766bc7647b" ]
[ "utils/draw.py" ]
[ "# 画图\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.transforms as trs\nimport math\nimport seaborn\n\ndata = pd.read_csv('cs-training.csv', index_col=0)\ndata.drop_duplicates(inplace=True)\n\ndata.boxplot(column=['NumberOfTime30-59DaysPastDueNotWorse',\n 'NumberOfTime60-89DaysPastDueNotWorse', 'NumberOfTimes90DaysLate'])\nplt.title('Before Cleaning', fontproperties='Times New Roman', size=16)\nplt.xticks([1, 2, 3], ['30-59Days', '60-89Days', '90Days'],\n fontproperties='Times New Roman', size=14)\nplt.yticks(fontproperties='Times New Roman', size=14)\nplt.savefig('001_NumberOfTime_before_cleaning.png', dpi=1200)\nplt.close()\n\ndata = data[(data['NumberOfTime30-59DaysPastDueNotWorse'] < 80) &\n (data['NumberOfTime60-89DaysPastDueNotWorse'] < 80) & (data['NumberOfTimes90DaysLate'] < 80)]\n\ndata.boxplot(column=['NumberOfTime30-59DaysPastDueNotWorse',\n 'NumberOfTime60-89DaysPastDueNotWorse', 'NumberOfTimes90DaysLate'])\nplt.title('After Cleaning', fontproperties='Times New Roman', size=16)\nplt.xticks([1, 2, 3], ['30-59Days', '60-89Days', '90Days'],\n fontproperties='Times New Roman', size=14)\nplt.yticks(fontproperties='Times New Roman', size=14)\nplt.savefig('002_NumberOfTime_after_cleaning.png', dpi=1200)\nplt.close()\n\ndata.boxplot(column='age', figsize=(5, 5))\nplt.title('Age', fontproperties='Times New Roman', size=16)\nplt.xticks(fontproperties='Times New Roman', size=14)\nplt.yticks(fontproperties='Times New Roman', size=14)\nplt.savefig('003_Age_box.png', dpi=1200)\nplt.close()\n\ndata = data[data['age'] != 0]\ndata.hist(column='age', bins=data['age'].max()-data['age'].min())\nplt.title('Age', fontproperties='Times New Roman', size=16)\nplt.xticks(fontproperties='Times New Roman', size=14)\nplt.yticks(fontproperties='Times New Roman', size=14)\nplt.savefig('004_Age_hist.png', dpi=1200)\nplt.close()\n\ndata['LogDebtRatio'] = data['DebtRatio'].apply(\n lambda x: math.log(x) if x else np.nan)\nplt.subplots(figsize=(8, 5))\nplt.subplot(121)\ndata.boxplot(column='DebtRatio')\nplt.xticks([1], ['Original'], fontproperties='Times New Roman', size=14)\nplt.yticks(fontproperties='Times New Roman', size=14)\nplt.subplot(122)\ndata.boxplot(column='LogDebtRatio')\nplt.xticks([1], ['Logarithmic'], fontproperties='Times New Roman', size=14)\nplt.yticks(fontproperties='Times New Roman', size=14)\nplt.suptitle('Before and After Logarithmic Comparison\\n(DebtRatio)',\n fontproperties='Times New Roman', size=16)\nplt.savefig('005_DebtRatio_box_Logarithmic_Comparison.png', dpi=1200)\nplt.close()\n\ndata['LogRevolvingUtilizationOfUnsecuredLines'] = data['RevolvingUtilizationOfUnsecuredLines'].apply(\n lambda x: math.log(x) if x else np.nan)\nplt.subplots(figsize=(8, 5))\nplt.subplot(121)\ndata.boxplot(column='RevolvingUtilizationOfUnsecuredLines')\nplt.xticks([1], ['Original'], fontproperties='Times New Roman', size=14)\nplt.yticks(fontproperties='Times New Roman', size=14)\nplt.subplot(122)\ndata.boxplot(column='LogRevolvingUtilizationOfUnsecuredLines')\nplt.xticks([1], ['Logarithmic'], fontproperties='Times New Roman', size=14)\nplt.yticks(fontproperties='Times New Roman', size=14)\nplt.suptitle('Before and After Logarithmic Comparison\\n(RevolvingUtilizationOfUnsecuredLines)',\n fontproperties='Times New Roman', size=16)\nplt.savefig('006_RUOUL_box_Logarithmic_Comparison.png', dpi=1200)\nplt.close()\n\ndata['LogMonthlyIncome'] = data['MonthlyIncome'].apply(\n lambda x: math.log(x) if x else np.nan)\nplt.subplots(figsize=(8, 5))\nplt.subplot(121)\ndata.boxplot(column='MonthlyIncome')\nplt.xticks([1], ['Original'], fontproperties='Times New Roman', size=14)\nplt.yticks(fontproperties='Times New Roman', size=14)\nplt.subplot(122)\ndata.boxplot(column='LogMonthlyIncome')\nplt.xticks([1], ['Logarithmic'], fontproperties='Times New Roman', size=14)\nplt.yticks(fontproperties='Times New Roman', size=14)\nplt.suptitle('Before and After Logarithmic Comparison\\n(MonthlyIncome)',\n fontproperties='Times New Roman', size=16)\nplt.savefig('007_MonthlyIncome_box_Logarithmic_Comparison.png', dpi=1200)\nplt.close()\n\ndata['LogDebtRatio'].hist(bins=100)\nplt.title('Logarithmic DebtRatio', fontproperties='Times New Roman', size=16)\nplt.xticks(fontproperties='Times New Roman', size=14)\nplt.yticks(fontproperties='Times New Roman', size=14)\nplt.savefig('008_Logarithmic_DebtRatio_hist.png', dpi=1200)\nplt.close()\n\ndata['LogRevolvingUtilizationOfUnsecuredLines'].hist(bins=100)\nplt.title('Logarithmic RevolvingUtilizationOfUnsecuredLines',\n fontproperties='Times New Roman', size=14)\nplt.xticks(fontproperties='Times New Roman', size=14)\nplt.yticks(fontproperties='Times New Roman', size=14)\nplt.savefig('009_Logarithmic_RUOUL_hist.png', dpi=1200)\nplt.close()\n\ndata['LogMonthlyIncome'].hist(bins=100)\nplt.title('Logarithmic MonthlyIncome',\n fontproperties='Times New Roman', size=16)\nplt.xticks(fontproperties='Times New Roman', size=14)\nplt.yticks(fontproperties='Times New Roman', size=14)\nplt.savefig('010_Logarithmic_MonthlyIncome_hist.png', dpi=1200)\nplt.close()\n\n# 以上是数据预处理以及直观可视化\n\ndata['LowIncome'] = data['MonthlyIncome'] < 180\ndata['NormalIncome'] = data['MonthlyIncome'] >= 180\ndata[['LowIncome', 'NormalIncome']].sum().plot(kind='bar')\nplt.xticks(rotation=0, fontproperties='Times New Roman', size=14)\nplt.yticks(fontproperties='Times New Roman', size=14)\nplt.title('MonthlyIncome',\n fontproperties='Times New Roman', size=16)\nplt.savefig('011_Income_binning.png', dpi=1200)\nplt.close()\n\ndata['YoungAge'] = data['age'] < 24\ndata['OldAge'] = data['age'] > 65\ndata['NormalAge'] = (data['age'] <= 65) & (data['age'] >= 24)\ndata[['YoungAge', 'NormalAge', 'OldAge']].sum().plot(kind='bar')\nplt.xticks(rotation=0, fontproperties='Times New Roman', size=14)\nplt.yticks(fontproperties='Times New Roman', size=14)\nplt.title('Age',\n fontproperties='Times New Roman', size=16)\nplt.savefig('012_Age_binning.png', dpi=1200)\nplt.close()\n\n# LogAge图像比较分立就弃了\n# data=data[data['age']!=0]\n#data['LogAge'] = np.log(data['age'])\n# data.hist(column='LogAge',bins=100)\n# plt.show()\n\ndata['LogIncomePerPerson'] = data['LogMonthlyIncome'] / \\\n data['NumberOfDependents']\ndata.loc[~np.isfinite(data['LogIncomePerPerson']),\n 'LogIncomePerPerson'] = np.nan\ndata['LogIncomePerPerson'].hist(bins=100)\nplt.title('LogIncomePerPerson',\n fontproperties='Times New Roman', size=16)\nplt.xticks(fontproperties='Times New Roman', size=14)\nplt.yticks(fontproperties='Times New Roman', size=14)\nplt.savefig('013_LogIncomePerPerson.png', dpi=1200)\nplt.close()\n\ndata['LogDebt'] = np.log(data['DebtRatio'] * data['LogMonthlyIncome'])\ndata.loc[~np.isfinite(data['LogDebt']), 'LogDebt'] = np.nan\ndata['LogDebt'].hist(bins=100)\nplt.title('LogDebt',\n fontproperties='Times New Roman', size=16)\nplt.xticks(fontproperties='Times New Roman', size=14)\nplt.yticks(fontproperties='Times New Roman', size=14)\nplt.savefig('014_LogDebt.png', dpi=1200)\nplt.close()\n\n# 以上为新增特征\n\noriginal_data = pd.read_csv('cs-training.csv', index_col=0)\noriginal_data = original_data[original_data['age'] != 0]\noriginal_data = original_data[original_data['NumberOfTime30-59DaysPastDueNotWorse'] < 80]\nplt.subplots(figsize=(15, 15))\nseaborn.heatmap(original_data.corr(), annot=True,\n vmax=1, square=True, cmap='Blues')\nplt.title('Heatmap', size=20)\nplt.savefig('015_Heatmap.png', dpi=1200,bbox_inches=trs.Bbox([[-2,-1],[13,14]]))\nplt.close()\n\n# 以上为变量间相关关系\nprint('done')\n" ]
[ [ "matplotlib.transforms.Bbox", "numpy.log", "pandas.read_csv", "matplotlib.pyplot.title", "numpy.isfinite", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig", "matplotlib.pyplot.subplot", "matplotlib.pyplot.close", "matplotlib.pyplot.yticks", "matplotlib.pyplot.suptitle", "matplotlib.pyplot.xticks" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
lebr0nli/ncu-schedule2ics
[ "fdf8e8adf6965b29494486f5688b658d5007f83c" ]
[ "main.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 13 22:00:49 2020\n\n@author: AlanLi\n\"\"\"\n\nimport pandas as pd\nfrom datetime import datetime, timedelta\nimport re\nfrom icalendar import Calendar, Event, Alarm\nimport requests\nimport configparser\n\nlogin_url = \"https://cis.ncu.edu.tw/Course/main/login\" # login url\nmy_form_url = \"https://cis.ncu.edu.tw/Course/main/personal/A4Crstable\" # schedule url\nconfig = configparser.ConfigParser()\nconfig.read('./config.ini')\nuser_info = {\"account\": config['login']['username'],\n \"passwd\": config['login']['password']}\n\n# import config for ics\nstart_time = config['start_time']\nstart_time = {x: int(y) for x, y in start_time.items()}\nend_time = config['end_time']\nend_time = {x: int(y) for x, y in end_time.items()}\nannounce_time = int(config['announcement']['announce_time'])\n\n# login\nmain_request = requests.session()\nlogin_respond = main_request.post(login_url, user_info)\nif \"Login successfully\" in login_respond.text:\n print(\"Login successfully\")\nelse:\n raise ValueError(\"Error! Check your login config! Or issue the bug for me, thanks!\")\n\n# init Dataframe\ndf = pd.read_html(main_request.get(my_form_url).text)[2]\ndf = df.drop(index=[14, 15])\nstart_date = '2019'\ndf = df.drop(columns=['Unnamed: 0'])\nprint(df)\n\n# init ics\nc = Calendar()\nc.add('prodid', 'Alan Li')\nc['summary'] = 'NCU'\nevent_list = []\nprevious_class = 'nan'\n\n# since new web page use javascript to generate table , so i need to copy this ugly html, sorr :(\nwith open('./building_code.html') as f:\n raw_html = ''.join(f.readlines())\nlocationCode = pd.read_html(raw_html)[0].iloc[5:, 1:3]\nlocationCode = locationCode.dropna().T.reset_index(drop=True).T.reset_index(drop=True)\nlocationCode = locationCode.set_index([0], drop=True).to_dict(orient='index')\n\nfor day in range(0, 7): # day of week\n for class_time in range(0, 14): # 14 class time\n e = Event()\n # print(day, class_time)\n # print(df.iloc[class_time, day])\n\n if previous_class != df.iloc[class_time, day]: # 這堂課變了\n if str(previous_class) != 'nan': # 上一個課程不是nan,結束上個活動\n if class_time != 0:\n event_list[-1].add('dtend',\n datetime(start_time['year'], start_time[\"month\"], start_time[\"day\"] + day,\n class_time + 7, 50, 0))\n else:\n event_list[-1].add('dtend',\n datetime(start_time['year'], start_time[\"month\"], start_time[\"day\"] + day, 21,\n 50, 0))\n if str(df.iloc[class_time, day]) != 'nan': # 新的課不是nan\n regex = re.compile(r'/ \\((.+)\\)')\n rawClassData = regex.search(str(df.iloc[class_time, day]))\n # print(rawLocation)\n location = rawClassData.group(1)\n regex = re.compile(r'(.{1,2})-')\n code_name = regex.search(location).group(1)\n location = locationCode[code_name][1] + ' ' + location\n class_summary = str(df.iloc[class_time, day]).replace(rawClassData.group(0), \"\")\n # print(f\"summary:{classSummary}\")\n e.add('summary', class_summary)\n e.add('location', location)\n e.add('dtstart',\n datetime(start_time['year'], start_time[\"month\"], start_time[\"day\"] + day, class_time + 8, 0, 0))\n alarm = Alarm()\n alarm.add(name='action', value='DISPLAY')\n alarm.add(name='trigger', value=timedelta(minutes=-announce_time))\n e.add('rrule', {'freq': 'weekly',\n 'until': datetime(end_time[\"year\"], end_time[\"month\"], end_time[\"day\"], class_time + 7,\n 0, 0)})\n e.add_component(alarm)\n event_list.append(e)\n # else:\n # print('pass')\n previous_class = df.iloc[class_time, day]\nfor event in event_list: # combine all event\n c.add_component(event)\nprint(c)\nwith open('my_schedule.ics', 'wb+') as f:\n f.write(c.to_ical())\n" ]
[ [ "pandas.read_html" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
toxidol/MLSP-FA2021
[ "3109e62f00d6f16e410f4eae341df2e027ee9cc3" ]
[ "calc_snr.py" ]
[ "\"\"\"\nInstall pysepm as follows:\n pip3 install https://github.com/schmiph2/pysepm/archive/master.zip\n\"\"\"\n\nimport soundfile as sf\nimport pysepm\nimport numpy as np\n\n\nif __name__ == '__main__':\n clean_speech, fs = sf.read('eval/SA1_clean.wav')\n noisy_speech, fs = sf.read('eval/SA1_noisy.wav')\n enhanced_speech, fs = sf.read('eval/SA1_enh.wav')\n\n # ensure all signals have the same length\n N = min([len(enhanced_speech), len(clean_speech), len(noisy_speech)])\n clean_speech = clean_speech[:N]\n noisy_speech = noisy_speech[:N]\n enhanced_speech = enhanced_speech[:N]\n\n # normalize all signals\n clean_speech = clean_speech / np.abs(clean_speech).max()\n noisy_speech = noisy_speech / np.abs(noisy_speech).max()\n enhanced_speech = enhanced_speech / np.abs(enhanced_speech).max()\n\n snr_before_enh = pysepm.SNRseg(clean_speech, noisy_speech, fs)\n snr_after_enh = pysepm.SNRseg(clean_speech, enhanced_speech, fs)\n\n print(f'Segmental SNR before enhancement: {snr_before_enh:.4f}')\n print(f'Segmental SNR after enhancement: {snr_after_enh:.4f}')\n" ]
[ [ "numpy.abs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mskinner5278/lsdc
[ "7f8aec813c7f2b3a49f152aac66a06fcb688b453" ]
[ "lsdcGui.py" ]
[ "\"\"\"\nThe GUI for the LSDC system\n\"\"\"\nimport sys\nimport os\nimport string\nimport math\nimport urllib\nimport urllib.request\nfrom io import BytesIO\nfrom epics import PV\nfrom qtpy import QtWidgets\nfrom qtpy import QtCore\nfrom qtpy import QtGui\nfrom qtpy.QtCore import * \nfrom qtpy.QtGui import * \nQString = str\nimport db_lib\nfrom qt_epics.QtEpicsMotorEntry import *\nfrom qt_epics.QtEpicsMotorLabel import *\nfrom qt_epics.QtEpicsPVLabel import *\nfrom qt_epics.QtEpicsPVEntry import *\nimport cv2\nfrom cv2 import *\nfrom PIL import Image\nfrom PIL import ImageQt\nimport daq_utils\nfrom daq_utils import getBlConfig, setBlConfig\nfrom config_params import *\nimport albulaUtils\nimport functools\nfrom QPeriodicTable import *\nfrom PyMca5.PyMcaGui.pymca.McaWindow import McaWindow, ScanWindow\nfrom PyMca5.PyMcaGui.physics.xrf.McaAdvancedFit import McaAdvancedFit\nfrom PyMca5.PyMcaPhysics.xrf import Elements\nfrom element_info import element_info\nimport numpy as np\nimport _thread #TODO python document suggests using threading! make this chance once stable\nimport lsdcOlog\n\nimport socket\nhostname = socket.gethostname()\nws_split = hostname.split('ws')\nlogging_file = 'lsdcGuiLog.txt'\n\nimport logging\nfrom logging import handlers\nlogger = logging.getLogger()\nlogging.getLogger().setLevel(logging.INFO)\nlogging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.WARNING)\nhandler1 = handlers.RotatingFileHandler(logging_file, maxBytes=5000000, backupCount=100)\n#handler2 = handlers.RotatingFileHandler('/var/log/dama/%slsdcGuiLog.txt' % os.environ['BEAMLINE_ID'], maxBytes=50000000)\nmyformat = logging.Formatter('%(asctime)s %(name)-8s %(levelname)-8s %(message)s')\nhandler1.setFormatter(myformat)\n#handler2.setFormatter(myformat)\nlogger.addHandler(handler1)\n#logger.addHandler(handler2)\ntry:\n import ispybLib\nexcept Exception as e:\n logger.error(\"lsdcGui: ISPYB import error, %s\" % e)\nimport raddoseLib\n\nglobal sampleNameDict\nsampleNameDict = {}\n\nglobal containerDict\ncontainerDict = {}\n\ncryostreamTempPV = {'amx': 'AMX:cs700:gasT-I', 'fmx': 'FMX:cs700:gasT-I'}\n\nVALID_EXP_TIMES = {'amx':{'min':0.005, 'max':1, 'digits':3}, 'fmx':{'min':0.01, 'max':10, 'digits':3}}\nVALID_DET_DIST = {'amx':{'min': 100, 'max':500, 'digits':3}, 'fmx':{'min':137, 'max':2000, 'digits':2}}\nVALID_TOTAL_EXP_TIMES = {'amx':{'min':0.005, 'max':300, 'digits':3}, 'fmx':{'min':0.01, 'max':300, 'digits':3}}\nVALID_PREFIX_LENGTH = 25 #TODO centralize with spreadsheet validation?\nVALID_PREFIX_NAME = '[0-9a-zA-Z-_]{0,%s}' % VALID_PREFIX_LENGTH\n\nclass SnapCommentDialog(QtWidgets.QDialog):\n def __init__(self,parent = None):\n QtWidgets.QDialog.__init__(self,parent)\n self.setWindowTitle(\"Snapshot Comment\")\n self.setModal(False)\n vBoxColParams1 = QtWidgets.QVBoxLayout()\n hBoxColParams1 = QtWidgets.QHBoxLayout()\n self.textEdit = QtWidgets.QPlainTextEdit()\n vBoxColParams1.addWidget(self.textEdit)\n self.ologCheckBox = QCheckBox(\"Save to Olog\")\n self.ologCheckBox.setChecked(False)\n vBoxColParams1.addWidget(self.ologCheckBox) \n commentButton = QtWidgets.QPushButton(\"Add Comment\") \n commentButton.clicked.connect(self.commentCB)\n cancelButton = QtWidgets.QPushButton(\"Cancel\") \n cancelButton.clicked.connect(self.cancelCB)\n \n hBoxColParams1.addWidget(commentButton)\n hBoxColParams1.addWidget(cancelButton)\n vBoxColParams1.addLayout(hBoxColParams1)\n self.setLayout(vBoxColParams1)\n\n \n def cancelCB(self):\n self.comment = \"\"\n self.useOlog = False\n self.reject()\n\n def commentCB(self):\n self.comment = self.textEdit.toPlainText()\n self.useOlog = self.ologCheckBox.isChecked()\n self.accept()\n \n @staticmethod\n def getComment(parent = None):\n dialog = SnapCommentDialog(parent)\n result = dialog.exec_()\n return (dialog.comment, dialog.useOlog,result == QDialog.Accepted)\n\nclass RasterExploreDialog(QtWidgets.QDialog):\n def __init__(self):\n QDialog.__init__(self)\n self.setModal(False)\n self.setWindowTitle(\"Raster Explore\")\n vBoxParams1 = QtWidgets.QVBoxLayout()\n hBoxParams1 = QtWidgets.QHBoxLayout()\n hBoxParams2 = QtWidgets.QHBoxLayout()\n hBoxParams3 = QtWidgets.QHBoxLayout()\n spotCountLabel = QtWidgets.QLabel('Spot Count:')\n spotCountLabel.setFixedWidth(120)\n self.spotCount_ledit = QtWidgets.QLabel()\n self.spotCount_ledit.setFixedWidth(60)\n hBoxParams1.addWidget(spotCountLabel)\n hBoxParams1.addWidget(self.spotCount_ledit)\n intensityLabel = QtWidgets.QLabel('Total Intensity:')\n intensityLabel.setFixedWidth(120)\n self.intensity_ledit = QtWidgets.QLabel()\n self.intensity_ledit.setFixedWidth(60)\n hBoxParams2.addWidget(intensityLabel)\n hBoxParams2.addWidget(self.intensity_ledit)\n resoLabel = QtWidgets.QLabel('Resolution:')\n resoLabel.setFixedWidth(120)\n self.reso_ledit = QtWidgets.QLabel()\n self.reso_ledit.setFixedWidth(60)\n hBoxParams3.addWidget(resoLabel)\n hBoxParams3.addWidget(self.reso_ledit)\n\n self.buttons = QDialogButtonBox(\n QDialogButtonBox.Cancel,\n Qt.Horizontal, self)\n self.buttons.buttons()[0].clicked.connect(self.rasterExploreCancelCB)\n vBoxParams1.addLayout(hBoxParams1)\n vBoxParams1.addLayout(hBoxParams2)\n vBoxParams1.addLayout(hBoxParams3)\n vBoxParams1.addWidget(self.buttons)\n self.setLayout(vBoxParams1)\n\n\n def setSpotCount(self,val):\n self.spotCount_ledit.setText(str(val))\n\n def setTotalIntensity(self,val):\n self.intensity_ledit.setText(str(val))\n\n def setResolution(self,val):\n self.reso_ledit.setText(str(val))\n\n def rasterExploreCancelCB(self):\n self.done(QDialog.Rejected)\n\n\nclass StaffScreenDialog(QFrame): \n def __init__(self,parent = None):\n self.parent=parent\n QFrame.__init__(self)\n self.setWindowTitle(\"Staff Only\")\n self.spotNodeCount = 8\n self.fastDPNodeCount = 4\n self.cpuCount = 28\n vBoxColParams1 = QtWidgets.QVBoxLayout()\n hBoxColParams0 = QtWidgets.QHBoxLayout() \n hBoxColParams1 = QtWidgets.QHBoxLayout() \n hBoxColParams2 = QtWidgets.QHBoxLayout()\n hBoxColParams3 = QtWidgets.QHBoxLayout()\n hBoxFastDP = QtWidgets.QHBoxLayout()\n hBoxSpotfinder = QtWidgets.QHBoxLayout()\n puckToDewarButton = QtWidgets.QPushButton(\"Puck to Dewar...\")\n puckToDewarButton.clicked.connect(self.parent.puckToDewarCB)\n removePuckButton = QtWidgets.QPushButton(\"Remove Puck...\")\n removePuckButton.clicked.connect(self.parent.removePuckCB)\n hBoxColParams0.addWidget(puckToDewarButton)\n hBoxColParams0.addWidget(removePuckButton ) \n self.robotOnCheckBox = QCheckBox(\"Robot (On)\")\n if (getBlConfig(\"robot_online\") == 1):\n self.robotOnCheckBox.setChecked(True)\n else:\n self.robotOnCheckBox.setChecked(False) \n self.robotOnCheckBox.stateChanged.connect(self.robotOnCheckCB)\n self.topViewCheckOnCheckBox = QCheckBox(\"TopViewCheck (On)\")\n if (getBlConfig(TOP_VIEW_CHECK) == 1):\n self.topViewCheckOnCheckBox.setChecked(True)\n else:\n self.topViewCheckOnCheckBox.setChecked(False) \n self.topViewCheckOnCheckBox.stateChanged.connect(self.topViewOnCheckCB)\n self.queueCollectOnCheckBox = QCheckBox(\"Queue Collect\")\n hBoxColParams1.addWidget(self.queueCollectOnCheckBox)\n if (getBlConfig(\"queueCollect\") == 1):\n self.queueCollectOnCheckBox.setChecked(True)\n else:\n self.queueCollectOnCheckBox.setChecked(False) \n self.queueCollectOnCheckBox.stateChanged.connect(self.queueCollectOnCheckCB)\n self.vertRasterOnCheckBox = QCheckBox(\"Vert. Raster\")\n hBoxColParams1.addWidget(self.vertRasterOnCheckBox) \n if (getBlConfig(\"vertRasterOn\") == 1):\n self.vertRasterOnCheckBox.setChecked(True)\n else:\n self.vertRasterOnCheckBox.setChecked(False) \n self.vertRasterOnCheckBox.stateChanged.connect(self.vertRasterOnCheckCB)\n self.procRasterOnCheckBox = QCheckBox(\"Process Raster\")\n hBoxColParams1.addWidget(self.procRasterOnCheckBox) \n if (getBlConfig(\"rasterProcessFlag\") == 1):\n self.procRasterOnCheckBox.setChecked(True)\n else:\n self.procRasterOnCheckBox.setChecked(False) \n self.procRasterOnCheckBox.stateChanged.connect(self.procRasterOnCheckCB)\n self.guiRemoteOnCheckBox = QCheckBox(\"GUI Remote\")\n hBoxColParams1.addWidget(self.guiRemoteOnCheckBox) \n if (getBlConfig(\"omegaMonitorPV\") == \"VAL\"):\n self.guiRemoteOnCheckBox.setChecked(True)\n else:\n self.guiRemoteOnCheckBox.setChecked(False) \n self.guiRemoteOnCheckBox.stateChanged.connect(self.guiRemoteOnCheckCB)\n self.enableMountCheckBox = QCheckBox(\"Enable Mount\")\n if (getBlConfig(\"mountEnabled\") == 1):\n self.enableMountCheckBox.setChecked(True)\n else:\n self.enableMountCheckBox.setChecked(False) \n self.enableMountCheckBox.stateChanged.connect(self.enableMountCheckCB)\n self.unmountColdButton = QtWidgets.QPushButton(\"Unmount Cold\")\n self.unmountColdButton.clicked.connect(self.unmountColdCB)\n self.openPort1Button = QtWidgets.QPushButton(\"Open Port 1\")\n self.openPort1Button.clicked.connect(self.openPort1CB)\n self.closePortsButton = QtWidgets.QPushButton(\"Close Ports\")\n self.closePortsButton.clicked.connect(self.closePortsCB)\n self.warmupButton = QtWidgets.QPushButton(\"Dry Gripper\") \n self.warmupButton.clicked.connect(self.parent.dryGripperCB)\n self.enableTScreenButton = QtWidgets.QPushButton(\"Enable Dewar Tscreen\") \n self.enableTScreenButton.clicked.connect(self.parent.enableTScreenGripperCB)\n self.parkButton = QtWidgets.QPushButton(\"Park Gripper\") \n self.parkButton.clicked.connect(self.parent.parkGripperCB)\n self.homePinsButton = QtWidgets.QPushButton(\"Home Pins\")\n self.homePinsButton.clicked.connect(self.homePinsCB)\n self.clearMountedSampleButton = QtWidgets.QPushButton(\"Clear Mounted Sample\")\n self.clearMountedSampleButton.clicked.connect(self.clearMountedSampleCB)\n hBoxColParams2.addWidget(self.openPort1Button)\n hBoxColParams2.addWidget(self.closePortsButton) \n hBoxColParams2.addWidget(self.unmountColdButton)\n hBoxColParams2.addWidget(self.warmupButton)\n hBoxColParams2.addWidget(self.enableTScreenButton)\n hBoxColParams2.addWidget(self.parkButton) \n hBoxColParams2.addWidget(self.clearMountedSampleButton)\n hBoxColParams1.addWidget(self.homePinsButton) \n self.setFastDPNodesButton = QtWidgets.QPushButton(\"Set FastDP Nodes\")\n self.setFastDPNodesButton.clicked.connect(self.setFastDPNodesCB)\n hBoxFastDP.addWidget(self.setFastDPNodesButton) \n self.fastDPNodeEntryList = []\n nodeList = self.getFastDPNodeList() \n for i in range (0,self.fastDPNodeCount):\n self.fastDPNodeEntryList.append(QtWidgets.QLineEdit())\n self.fastDPNodeEntryList[i].setFixedWidth(30)\n self.fastDPNodeEntryList[i].setText(str(nodeList[i]))\n hBoxFastDP.addWidget(self.fastDPNodeEntryList[i])\n self.setBeamcenterButton = QtWidgets.QPushButton(\"Set Beamcenter\")\n self.setBeamcenterButton.clicked.connect(self.setBeamcenterCB)\n hBoxFastDP.addWidget(self.setBeamcenterButton)\n self.beamcenterX_ledit = QtWidgets.QLineEdit()\n self.beamcenterX_ledit.setText(str(self.parent.beamCenterX_pv.get())) \n self.beamcenterY_ledit = QtWidgets.QLineEdit()\n self.beamcenterY_ledit.setText(str(self.parent.beamCenterY_pv.get()))\n hBoxFastDP.addWidget(self.beamcenterX_ledit)\n hBoxFastDP.addWidget(self.beamcenterY_ledit) \n self.setSpotNodesButton = QtWidgets.QPushButton(\"Set Spotfinder Nodes\")\n self.setSpotNodesButton.clicked.connect(self.setSpotNodesCB)\n self.lockGuiButton = QtWidgets.QPushButton(\"Lock\")\n self.lockGuiButton.clicked.connect(self.lockGuiCB)\n self.unLockGuiButton = QtWidgets.QPushButton(\"unLock\")\n self.unLockGuiButton.clicked.connect(self.unLockGuiCB)\n hBoxSpotfinder.addWidget(self.lockGuiButton)\n hBoxSpotfinder.addWidget(self.unLockGuiButton) \n hBoxSpotfinder.addWidget(self.setSpotNodesButton) \n self.spotNodeEntryList = []\n nodeList = self.getSpotNodeList() \n for i in range (0,self.spotNodeCount):\n self.spotNodeEntryList.append(QtWidgets.QLineEdit())\n self.spotNodeEntryList[i].setFixedWidth(30)\n self.spotNodeEntryList[i].setText(str(nodeList[i])) \n hBoxSpotfinder.addWidget(self.spotNodeEntryList[i])\n robotGB = QtWidgets.QGroupBox()\n robotGB.setTitle(\"Robot\")\n hBoxRobot1 = QtWidgets.QHBoxLayout()\n vBoxRobot1 = QtWidgets.QVBoxLayout()\n self.recoverRobotButton = QtWidgets.QPushButton(\"Recover Robot\")\n self.recoverRobotButton.clicked.connect(self.recoverRobotCB)\n self.rebootEMBLButton = QtWidgets.QPushButton(\"Reboot EMBL\")\n self.rebootEMBLButton.clicked.connect(self.rebootEMBL_CB)\n self.restartEMBLButton = QtWidgets.QPushButton(\"Start EMBL\")\n self.restartEMBLButton.clicked.connect(self.restartEMBL_CB)\n self.openGripperButton = QtWidgets.QPushButton(\"Open Gripper\")\n self.openGripperButton.clicked.connect(self.openGripper_CB)\n self.closeGripperButton = QtWidgets.QPushButton(\"Close Gripper\")\n self.closeGripperButton.clicked.connect(self.closeGripper_CB)\n hBoxRobot1.addWidget(self.robotOnCheckBox)\n hBoxRobot1.addWidget(self.topViewCheckOnCheckBox)\n hBoxRobot1.addWidget(self.enableMountCheckBox) \n hBoxRobot1.addWidget(self.recoverRobotButton)\n hBoxRobot1.addWidget(self.rebootEMBLButton)\n hBoxRobot1.addWidget(self.restartEMBLButton)\n hBoxRobot1.addWidget(self.openGripperButton)\n hBoxRobot1.addWidget(self.closeGripperButton) \n vBoxRobot1.addLayout(hBoxRobot1)\n vBoxRobot1.addLayout(hBoxColParams2) \n robotGB.setLayout(vBoxRobot1)\n self.buttons = QDialogButtonBox(\n QDialogButtonBox.Ok,\n Qt.Horizontal, self)\n self.buttons.buttons()[0].clicked.connect(self.screenDefaultsOKCB)\n vBoxColParams1.addLayout(hBoxColParams0)\n vBoxColParams1.addLayout(hBoxColParams1)\n vBoxColParams1.addLayout(hBoxFastDP)\n vBoxColParams1.addLayout(hBoxSpotfinder) \n vBoxColParams1.addWidget(robotGB)\n vBoxColParams1.addWidget(self.buttons)\n self.setLayout(vBoxColParams1) \n self.show()\n\n\n def getSpotNodeList(self):\n nodeList = []\n for i in range (0,self.spotNodeCount):\n nodeList.append(int(getBlConfig(\"spotNode\"+str(i+1)).split('-')[1]))\n return nodeList\n \n \n def getFastDPNodeList(self):\n nodeList = []\n for i in range (0,self.fastDPNodeCount):\n nodeList.append(int(getBlConfig(\"fastDPNode\"+str(i+1)).split('-')[1]))\n return nodeList\n\n def setFastDPNodesCB(self):\n comm_s = \"fastDPNodes(\"\n for i in range (0,self.fastDPNodeCount):\n comm_s = comm_s+str(self.fastDPNodeEntryList[i].text())\n if (i==self.fastDPNodeCount-1):\n comm_s = comm_s+\")\"\n else:\n comm_s = comm_s+\",\"\n logger.info(comm_s)\n self.parent.send_to_server(comm_s)\n\n def lockGuiCB(self):\n self.parent.send_to_server(\"lockControl\")\n\n def unLockGuiCB(self):\n self.parent.send_to_server(\"unlockControl\")\n \n def setSpotNodesCB(self):\n comm_s = \"spotNodes(\"\n for i in range (0,self.spotNodeCount):\n comm_s = comm_s+str(self.spotNodeEntryList[i].text())\n if (i==self.spotNodeCount-1):\n comm_s = comm_s+\")\"\n else:\n comm_s = comm_s+\",\"\n logger.info(comm_s)\n self.parent.send_to_server(comm_s) \n\n \n def unmountColdCB(self):\n self.parent.send_to_server(\"unmountCold()\")\n\n def openPort1CB(self):\n self.parent.send_to_server(\"openPort(1)\")\n\n def setBeamcenterCB(self):\n self.parent.send_to_server(\"set_beamcenter (\" + str(self.beamcenterX_ledit.text()) + \",\" + str(self.beamcenterY_ledit.text()) + \")\")\n \n def closePortsCB(self):\n self.parent.send_to_server(\"closePorts()\")\n \n def clearMountedSampleCB(self):\n self.parent.send_to_server(\"clearMountedSample()\")\n\n def recoverRobotCB(self):\n self.parent.aux_send_to_server(\"recoverRobot()\")\n\n def rebootEMBL_CB(self):\n self.parent.aux_send_to_server(\"rebootEMBL()\")\n\n def restartEMBL_CB(self):\n self.parent.send_to_server(\"restartEMBL()\")\n\n def openGripper_CB(self):\n self.parent.send_to_server(\"openGripper()\")\n\n def closeGripper_CB(self):\n self.parent.send_to_server(\"closeGripper()\")\n \n def homePinsCB(self):\n self.parent.send_to_server(\"homePins()\")\n \n\n def robotOnCheckCB(self,state):\n if state == QtCore.Qt.Checked:\n setBlConfig(\"robot_online\",1)\n else:\n setBlConfig(\"robot_online\",0)\n\n def topViewOnCheckCB(self,state):\n if state == QtCore.Qt.Checked:\n setBlConfig(TOP_VIEW_CHECK,1)\n else:\n setBlConfig(TOP_VIEW_CHECK,0)\n \n def vertRasterOnCheckCB(self,state):\n if state == QtCore.Qt.Checked:\n setBlConfig(\"vertRasterOn\",1)\n else:\n setBlConfig(\"vertRasterOn\",0)\n\n def procRasterOnCheckCB(self,state):\n if state == QtCore.Qt.Checked:\n setBlConfig(\"rasterProcessFlag\",1)\n else:\n setBlConfig(\"rasterProcessFlag\",0)\n\n def guiRemoteOnCheckCB(self,state):\n if state == QtCore.Qt.Checked:\n setBlConfig(\"omegaMonitorPV\",\"VAL\")\n else:\n setBlConfig(\"omegaMonitorPV\",\"RBV\")\n \n def queueCollectOnCheckCB(self,state):\n if state == QtCore.Qt.Checked:\n setBlConfig(\"queueCollect\",1)\n else:\n setBlConfig(\"queueCollect\",0)\n\n def enableMountCheckCB(self,state):\n if state == QtCore.Qt.Checked:\n setBlConfig(\"mountEnabled\",1)\n else:\n setBlConfig(\"mountEnabled\",0)\n\n def screenDefaultsCancelCB(self):\n self.hide()\n\n def screenDefaultsOKCB(self):\n self.hide()\n \n\nclass UserScreenDialog(QFrame): \n def __init__(self,parent = None):\n self.parent=parent\n QFrame.__init__(self)\n self.setWindowTitle(\"User Extras\") \n vBoxColParams1 = QtWidgets.QVBoxLayout()\n hBoxColParams1 = QtWidgets.QHBoxLayout() \n hBoxColParams2 = QtWidgets.QHBoxLayout()\n hBoxColParams25 = QtWidgets.QHBoxLayout() \n hBoxColParams3 = QtWidgets.QHBoxLayout() \n govLabel = QtWidgets.QLabel('Set Governor State:') \n self.SEbutton = QtWidgets.QPushButton(\"SE\")\n self.SEbutton.clicked.connect(self.SEgovCB)\n self.SAbutton = QtWidgets.QPushButton(\"SA\")\n self.SAbutton.clicked.connect(self.SAgovCB)\n self.DAbutton = QtWidgets.QPushButton(\"DA\")\n self.DAbutton.clicked.connect(self.DAgovCB)\n self.BLbutton = QtWidgets.QPushButton(\"BL\")\n self.BLbutton.clicked.connect(self.BLgovCB)\n hBoxColParams1.addWidget(govLabel)\n hBoxColParams1.addWidget(self.SEbutton)\n hBoxColParams1.addWidget(self.SAbutton)\n hBoxColParams1.addWidget(self.DAbutton)\n hBoxColParams1.addWidget(self.BLbutton) \n govLabel2 = QtWidgets.QLabel('Current Governor State:') \n self.governorMessage = QtEpicsPVLabel(daq_utils.pvLookupDict[\"governorMessage\"],self,140,highlight_on_change=False)\n hBoxColParams2.addWidget(govLabel2)\n hBoxColParams2.addWidget(self.governorMessage.getEntry())\n \n self.openShutterButton = QtWidgets.QPushButton(\"Open Photon Shutter\")\n self.openShutterButton.clicked.connect(self.parent.openPhotonShutterCB)\n hBoxColParams25.addWidget(self.openShutterButton) \n robotGB = QtWidgets.QGroupBox()\n robotGB.setTitle(\"Robot\")\n\n self.unmountColdButton = QtWidgets.QPushButton(\"Unmount Cold\")\n self.unmountColdButton.clicked.connect(self.unmountColdCB) \n self.testRobotButton = QtWidgets.QPushButton(\"Test Robot\")\n self.testRobotButton.clicked.connect(self.testRobotCB) \n self.recoverRobotButton = QtWidgets.QPushButton(\"Recover Robot\")\n self.recoverRobotButton.clicked.connect(self.recoverRobotCB) \n self.dryGripperButton = QtWidgets.QPushButton(\"Dry Gripper\")\n self.dryGripperButton.clicked.connect(self.dryGripperCB) \n\n hBoxColParams3.addWidget(self.unmountColdButton)\n hBoxColParams3.addWidget(self.testRobotButton)\n hBoxColParams3.addWidget(self.recoverRobotButton) \n hBoxColParams3.addWidget(self.dryGripperButton)\n robotGB.setLayout(hBoxColParams3)\n\n zebraGB = QtWidgets.QGroupBox()\n detGB = QtWidgets.QGroupBox() \n zebraGB.setTitle(\"Zebra (Timing)\")\n detGB.setTitle(\"Eiger Detector\")\n hBoxDet1 = QtWidgets.QHBoxLayout()\n hBoxDet2 = QtWidgets.QHBoxLayout() \n vBoxDet1 = QtWidgets.QVBoxLayout()\n self.stopDetButton = QtWidgets.QPushButton(\"Stop\")\n self.stopDetButton.clicked.connect(self.stopDetCB)\n self.rebootDetIocButton = QtWidgets.QPushButton(\"Reboot Det IOC\")\n self.rebootDetIocButton.clicked.connect(self.rebootDetIocCB)\n detStatLabel = QtWidgets.QLabel('Detector Status:')\n self.detMessage_ledit = QtWidgets.QLabel()\n hBoxDet1.addWidget(self.stopDetButton)\n hBoxDet1.addWidget(self.rebootDetIocButton)\n hBoxDet2.addWidget(detStatLabel)\n hBoxDet2.addWidget(self.detMessage_ledit)\n\n beamGB = QtWidgets.QGroupBox()\n beamGB.setTitle(\"Beam\")\n hBoxBeam1 = QtWidgets.QHBoxLayout()\n hBoxBeam2 = QtWidgets.QHBoxLayout()\n hBoxBeam3 = QtWidgets.QHBoxLayout() \n vBoxBeam = QtWidgets.QVBoxLayout()\n if (daq_utils.beamline == \"fmx\"):\n slit1XLabel = QtWidgets.QLabel('Slit 1 X Gap:')\n slit1XLabel.setAlignment(QtCore.Qt.AlignCenter) \n slit1XRBLabel = QtWidgets.QLabel(\"Readback:\")\n self.slit1XRBVLabel = QtEpicsPVLabel(daq_utils.motor_dict[\"slit1XGap\"] + \".RBV\",self,70) \n slit1XSPLabel = QtWidgets.QLabel(\"SetPoint:\")\n self.slit1XMotor_ledit = QtWidgets.QLineEdit()\n self.slit1XMotor_ledit.returnPressed.connect(self.setSlit1XCB)\n self.slit1XMotor_ledit.setText(str(self.parent.slit1XGapSP_pv.get()))\n\n slit1YLabel = QtWidgets.QLabel('Slit 1 Y Gap:')\n slit1YLabel.setAlignment(QtCore.Qt.AlignCenter) \n slit1YRBLabel = QtWidgets.QLabel(\"Readback:\")\n self.slit1YRBVLabel = QtEpicsPVLabel(daq_utils.motor_dict[\"slit1YGap\"] + \".RBV\",self,70) \n slit1YSPLabel = QtWidgets.QLabel(\"SetPoint:\")\n self.slit1YMotor_ledit = QtWidgets.QLineEdit()\n self.slit1YMotor_ledit.setText(str(self.parent.slit1YGapSP_pv.get())) \n self.slit1YMotor_ledit.returnPressed.connect(self.setSlit1YCB)\n \n sampleFluxLabelDesc = QtWidgets.QLabel(\"Sample Flux:\")\n sampleFluxLabelDesc.setFixedWidth(80)\n self.sampleFluxLabel = QtWidgets.QLabel()\n self.sampleFluxLabel.setText('%E' % self.parent.sampleFluxPV.get())\n hBoxBeam3.addWidget(sampleFluxLabelDesc)\n hBoxBeam3.addWidget(self.sampleFluxLabel)\n\n if (daq_utils.beamline == \"fmx\"): \n hBoxBeam1.addWidget(slit1XLabel)\n hBoxBeam1.addWidget(slit1XRBLabel)\n hBoxBeam1.addWidget(self.slit1XRBVLabel.getEntry())\n hBoxBeam1.addWidget(slit1XSPLabel) \n hBoxBeam1.addWidget(self.slit1XMotor_ledit) \n hBoxBeam2.addWidget(slit1YLabel)\n hBoxBeam2.addWidget(slit1YRBLabel)\n hBoxBeam2.addWidget(self.slit1YRBVLabel.getEntry())\n hBoxBeam2.addWidget(slit1YSPLabel) \n hBoxBeam2.addWidget(self.slit1YMotor_ledit)\n vBoxBeam.addLayout(hBoxBeam1) \n vBoxBeam.addLayout(hBoxBeam2)\n vBoxBeam.addLayout(hBoxBeam3) \n beamGB.setLayout(vBoxBeam)\n \n \n vBoxDet1.addLayout(hBoxDet1)\n vBoxDet1.addLayout(hBoxDet2) \n detGB.setLayout(vBoxDet1)\n hBoxColParams4 = QtWidgets.QHBoxLayout()\n vBoxZebraParams4 = QtWidgets.QVBoxLayout() \n self.resetZebraButton = QtWidgets.QPushButton(\"Reset Zebra\")\n self.resetZebraButton.clicked.connect(self.resetZebraCB)\n self.rebootZebraButton = QtWidgets.QPushButton(\"Reboot Zebra IOC\")\n self.rebootZebraButton.clicked.connect(self.rebootZebraIOC_CB)\n hBoxColParams5 = QtWidgets.QHBoxLayout()\n self.zebraArmCheckBox = QCheckBox(\"Arm\")\n self.zebraArmCheckBox.setEnabled(False)\n self.zebraPulseCheckBox = QCheckBox(\"Pulse\")\n self.zebraPulseCheckBox.setEnabled(False)\n self.zebraDownloadCheckBox = QCheckBox(\"Downloading\")\n self.zebraDownloadCheckBox.setEnabled(False)\n self.zebraSentTriggerCheckBox = QCheckBox(\"Trigger Sent\")\n self.zebraSentTriggerCheckBox.setEnabled(False)\n self.zebraReturnedTriggerCheckBox = QCheckBox(\"Trigger Returned\")\n self.zebraReturnedTriggerCheckBox.setEnabled(False)\n hBoxColParams4.addWidget(self.resetZebraButton)\n hBoxColParams4.addWidget(self.rebootZebraButton)\n hBoxColParams5.addWidget(self.zebraArmCheckBox)\n hBoxColParams5.addWidget(self.zebraPulseCheckBox)\n hBoxColParams5.addWidget(self.zebraDownloadCheckBox)\n hBoxColParams5.addWidget(self.zebraSentTriggerCheckBox)\n hBoxColParams5.addWidget(self.zebraReturnedTriggerCheckBox) \n vBoxZebraParams4.addLayout(hBoxColParams4)\n vBoxZebraParams4.addLayout(hBoxColParams5) \n zebraGB.setLayout(vBoxZebraParams4)\n\n self.buttons = QDialogButtonBox(\n QDialogButtonBox.Ok,\n Qt.Horizontal, self)\n self.buttons.buttons()[0].clicked.connect(self.userScreenOKCB)\n\n vBoxColParams1.addLayout(hBoxColParams1)\n vBoxColParams1.addLayout(hBoxColParams2) \n vBoxColParams1.addLayout(hBoxColParams25)\n vBoxColParams1.addWidget(robotGB) \n vBoxColParams1.addWidget(zebraGB)\n vBoxColParams1.addWidget(detGB)\n vBoxColParams1.addWidget(beamGB) \n\n vBoxColParams1.addWidget(self.buttons)\n self.setLayout(vBoxColParams1)\n\n\n def setSlit1XCB(self):\n comm_s = \"setSlit1X(\" + str(self.slit1XMotor_ledit.text()) + \")\" \n self.parent.send_to_server(comm_s)\n\n def setSlit1YCB(self):\n comm_s = \"setSlit1Y(\" + str(self.slit1YMotor_ledit.text()) + \")\" \n self.parent.send_to_server(comm_s)\n \n def unmountColdCB(self):\n self.parent.send_to_server(\"unmountCold()\")\n\n def testRobotCB(self):\n self.parent.send_to_server(\"testRobot()\")\n\n def recoverRobotCB(self):\n self.parent.send_to_server(\"recoverRobot()\")\n\n def dryGripperCB(self):\n self.parent.send_to_server(\"dryGripper()\")\n \n def stopDetCB(self):\n logger.info('stopping detector')\n self.parent.stopDet_pv.put(0)\n\n def rebootDetIocCB(self):\n logger.info('rebooting detector IOC')\n self.parent.rebootDetIOC_pv.put(1) # no differences visible, but zebra IOC reboot works, this doesn't! \n\n def resetZebraCB(self):\n logger.info('resetting zebra')\n self.parent.resetZebra_pv.put(1)\n\n def rebootZebraIOC_CB(self):\n logger.info('rebooting zebra IOC')\n self.parent.rebootZebraIOC_pv.put(1)\n\n def SEgovCB(self):\n self.parent.send_to_server(\"setGovRobot('SE')\")\n\n def SAgovCB(self):\n self.parent.send_to_server(\"setGovRobot('SA')\")\n\n def DAgovCB(self):\n self.parent.send_to_server(\"setGovRobot('DA')\")\n\n def BLgovCB(self):\n self.parent.send_to_server(\"setGovRobot('BL')\")\n\n def userScreenOKCB(self):\n self.hide()\n \n def screenDefaultsCancelCB(self):\n self.done(QDialog.Rejected)\n\n def screenDefaultsOKCB(self):\n self.done(QDialog.Accepted) \n \n\nclass ScreenDefaultsDialog(QtWidgets.QDialog):\n def __init__(self,parent = None):\n QtWidgets.QDialog.__init__(self,parent)\n self.parent=parent \n self.setModal(False)\n self.setWindowTitle(\"Raster Params\") \n\n vBoxColParams1 = QtWidgets.QVBoxLayout()\n\n collectionGB = QtWidgets.QGroupBox()\n collectionGB.setTitle('Collection parameters')\n\n hBoxColParams2 = QtWidgets.QHBoxLayout()\n colRangeLabel = QtWidgets.QLabel('Oscillation Width:')\n colRangeLabel.setAlignment(QtCore.Qt.AlignCenter) \n self.osc_range_ledit = QtWidgets.QLineEdit() # note, this is for rastering! same name used for data collections\n self.setGuiValues({'osc_range':getBlConfig(\"rasterDefaultWidth\")})\n self.osc_range_ledit.returnPressed.connect(self.screenDefaultsOKCB) \n colExptimeLabel = QtWidgets.QLabel('ExposureTime:')\n colExptimeLabel.setAlignment(QtCore.Qt.AlignCenter) \n self.exp_time_ledit = QtWidgets.QLineEdit()\n self.setGuiValues({'exp_time':getBlConfig(\"rasterDefaultTime\")})\n self.exp_time_ledit.returnPressed.connect(self.screenDefaultsOKCB) \n self.exp_time_ledit.setValidator(QtGui.QDoubleValidator(VALID_EXP_TIMES[daq_utils.beamline]['min'], VALID_EXP_TIMES[daq_utils.beamline]['max'], VALID_EXP_TIMES[daq_utils.beamline]['digits']))\n self.exp_time_ledit.textChanged.connect(self.checkEntryState)\n\n colTransLabel = QtWidgets.QLabel('Transmission (0.0-1.0):')\n colTransLabel.setAlignment(QtCore.Qt.AlignCenter) \n self.trans_ledit = QtWidgets.QLineEdit()\n self.setGuiValues({'transmission':getBlConfig(\"rasterDefaultTrans\")})\n self.trans_ledit.returnPressed.connect(self.screenDefaultsOKCB) \n hBoxColParams2.addWidget(colRangeLabel)\n hBoxColParams2.addWidget(self.osc_range_ledit)\n hBoxColParams2.addWidget(colExptimeLabel)\n hBoxColParams2.addWidget(self.exp_time_ledit)\n hBoxColParams2.addWidget(colTransLabel)\n hBoxColParams2.addWidget(self.trans_ledit)\n collectionGB.setLayout(hBoxColParams2)\n\n dozorGB = QtWidgets.QGroupBox()\n dozorGB.setTitle('Dozor Parameter')\n hBoxColParams2a = QtWidgets.QHBoxLayout()\n dozorSpotLevelLabel = QtWidgets.QLabel('Dozor Spot Level\\n(Applies immediately)')\n self.dozorSpotLevel = QComboBox()\n self.dozorSpotLevel.addItems(['5', '6', '7', '8'])\n self.dozorSpotLevel.currentIndexChanged.connect(self.dozorSpotLevelChangedCB)\n hBoxColParams2a.addWidget(dozorSpotLevelLabel)\n hBoxColParams2a.addWidget(self.dozorSpotLevel)\n dozorGB.setLayout(hBoxColParams2a)\n\n dialsGB = QtWidgets.QGroupBox()\n dialsGB.setTitle('Dials Parameters')\n vBoxDialsParams = QtWidgets.QVBoxLayout()\n hBoxColParams2b = QtWidgets.QHBoxLayout()\n colMinSpotLabel = QtWidgets.QLabel('Min Spot Size:')\n colMinSpotLabel.setAlignment(QtCore.Qt.AlignCenter) \n self.minSpot_ledit = QtWidgets.QLineEdit()\n self.minSpot_ledit.setText(str(getBlConfig(\"rasterDefaultMinSpotSize\")))\n self.minSpot_ledit.returnPressed.connect(self.screenDefaultsOKCB) \n hBoxColParams2b.addWidget(colMinSpotLabel)\n hBoxColParams2b.addWidget(self.minSpot_ledit)\n\n self.hBoxRasterLayout2 = QtWidgets.QHBoxLayout()\n rasterTuneLabel = QtWidgets.QLabel('Raster\\nTuning')\n self.rasterResoCheckBox = QCheckBox(\"Constrain Resolution\")\n self.rasterResoCheckBox.stateChanged.connect(self.rasterResoCheckCB)\n rasterLowResLabel = QtWidgets.QLabel('LowRes:')\n self.rasterLowRes = QtWidgets.QLineEdit()\n self.rasterLowRes.setText(str(getBlConfig(RASTER_TUNE_LOW_RES)))\n self.rasterLowRes.returnPressed.connect(self.screenDefaultsOKCB) \n rasterHighResLabel = QtWidgets.QLabel('HighRes:')\n self.rasterHighRes = QtWidgets.QLineEdit()\n self.rasterHighRes.setText(str(getBlConfig(RASTER_TUNE_HIGH_RES)))\n self.rasterHighRes.returnPressed.connect(self.screenDefaultsOKCB) \n if (getBlConfig(RASTER_TUNE_RESO_FLAG) == 1):\n resoFlag = True\n else:\n resoFlag = False\n self.rasterHighRes.setEnabled(False)\n self.rasterLowRes.setEnabled(False) \n self.rasterResoCheckBox.setChecked(resoFlag)\n self.rasterIceRingCheckBox = QCheckBox(\"Ice Ring\")\n self.rasterIceRingCheckBox.setChecked(False)\n self.rasterIceRingCheckBox.stateChanged.connect(self.rasterIceRingCheckCB) \n self.rasterIceRingWidth = QtWidgets.QLineEdit()\n self.rasterIceRingWidth.setText(str(getBlConfig(RASTER_TUNE_ICE_RING_WIDTH)))\n self.rasterIceRingWidth.returnPressed.connect(self.screenDefaultsOKCB) \n self.rasterIceRingWidth.setEnabled(False)\n if (getBlConfig(RASTER_TUNE_ICE_RING_FLAG) == 1):\n iceRingFlag = True\n else:\n iceRingFlag = False \n self.rasterIceRingCheckBox.setChecked(iceRingFlag)\n self.hBoxRasterLayout2.addWidget(self.rasterResoCheckBox)\n self.hBoxRasterLayout2.addWidget(rasterLowResLabel)\n self.hBoxRasterLayout2.addWidget(self.rasterLowRes)\n self.hBoxRasterLayout2.addWidget(rasterHighResLabel)\n self.hBoxRasterLayout2.addWidget(self.rasterHighRes)\n self.hBoxRasterLayout2.addWidget(self.rasterIceRingCheckBox)\n self.hBoxRasterLayout2.addWidget(self.rasterIceRingWidth) \n\n self.hBoxRasterLayout3 = QtWidgets.QHBoxLayout()\n self.rasterThreshCheckBox = QCheckBox(\"Tune Threshold\")\n if (getBlConfig(\"rasterThreshFlag\") == 1):\n threshFlag = True\n else:\n threshFlag = False\n self.rasterThreshCheckBox.setChecked(threshFlag)\n self.rasterThreshCheckBox.stateChanged.connect(self.rasterThreshCheckCB)\n \n rasterThreshKernSizeLabel = QtWidgets.QLabel('KernelSize')\n self.rasterThreshKernSize = QtWidgets.QLineEdit()\n self.rasterThreshKernSize.setText(str(getBlConfig(\"rasterThreshKernSize\")))\n self.rasterThreshKernSize.returnPressed.connect(self.screenDefaultsOKCB) \n rasterThreshSigBckLabel = QtWidgets.QLabel('SigmaBkrnd') \n self.rasterThreshSigBckrnd = QtWidgets.QLineEdit()\n self.rasterThreshSigBckrnd.setText(str(getBlConfig(\"rasterThreshSigBckrnd\")))\n self.rasterThreshSigBckrnd.returnPressed.connect(self.screenDefaultsOKCB) \n rasterThreshSigStrongLabel = QtWidgets.QLabel('SigmaStrong') \n self.rasterThreshSigStrong = QtWidgets.QLineEdit()\n self.rasterThreshSigStrong.setText(str(getBlConfig(\"rasterThreshSigStrong\")))\n self.rasterThreshSigStrong.returnPressed.connect(self.screenDefaultsOKCB) \n self.rasterThreshKernSize.setEnabled(threshFlag)\n self.rasterThreshSigBckrnd.setEnabled(threshFlag)\n self.rasterThreshSigStrong.setEnabled(threshFlag) \n self.hBoxRasterLayout3.addWidget(self.rasterThreshCheckBox)\n self.hBoxRasterLayout3.addWidget(rasterThreshKernSizeLabel)\n self.hBoxRasterLayout3.addWidget(self.rasterThreshKernSize)\n self.hBoxRasterLayout3.addWidget(rasterThreshSigBckLabel)\n self.hBoxRasterLayout3.addWidget(self.rasterThreshSigBckrnd)\n self.hBoxRasterLayout3.addWidget(rasterThreshSigStrongLabel)\n self.hBoxRasterLayout3.addWidget(self.rasterThreshSigStrong)\n\n vBoxDialsParams.addLayout(hBoxColParams2b)\n vBoxDialsParams.addLayout(self.hBoxRasterLayout2)\n vBoxDialsParams.addLayout(self.hBoxRasterLayout3)\n dialsGB.setLayout(vBoxDialsParams)\n\n reprocessRasterButton = QtWidgets.QPushButton(\"ReProcessRaster\") \n reprocessRasterButton.clicked.connect(self.reprocessRasterRequestCB)\n self.buttons = QDialogButtonBox(\n QDialogButtonBox.Apply | QDialogButtonBox.Cancel,\n Qt.Horizontal, self)\n self.buttons.buttons()[1].clicked.connect(self.screenDefaultsOKCB)\n self.buttons.buttons()[0].clicked.connect(self.screenDefaultsCancelCB)\n vBoxColParams1.addWidget(collectionGB)\n vBoxColParams1.addWidget(dozorGB)\n vBoxColParams1.addWidget(dialsGB)\n vBoxColParams1.addWidget(reprocessRasterButton) \n vBoxColParams1.addWidget(self.buttons)\n self.setLayout(vBoxColParams1)\n\n def setGuiValues(self, values):\n for item, value in values.items():\n logger.info('resetting %s to %s' % (item, value))\n if item == 'osc_range':\n self.osc_range_ledit.setText('%.3f' % float(value))\n elif item == 'exp_time':\n self.exp_time_ledit.setText('%.3f' % float(value))\n elif item == 'transmission':\n self.trans_ledit.setText('%.3f' % float(value))\n else:\n logger.error('setGuiValues unknown item: %s value: %s' % (item, value))\n\n def reprocessRasterRequestCB(self):\n self.parent.eraseCB()\n try: \n reqID = self.parent.selectedSampleRequest[\"uid\"]\n self.parent.drawPolyRaster(db_lib.getRequestByID(reqID))\n self.parent.send_to_server(\"reprocessRaster(\\\"\"+str(reqID)+\"\\\")\")\n except:\n pass\n \n \n def screenDefaultsCancelCB(self):\n self.done(QDialog.Rejected)\n\n def dozorSpotLevelChangedCB(self, i):\n setBlConfig(RASTER_DOZOR_SPOT_LEVEL, int(self.dozorSpotLevel.itemText(i)))\n\n def screenDefaultsOKCB(self):\n setBlConfig(\"rasterDefaultWidth\",float(self.osc_range_ledit.text()))\n setBlConfig(\"rasterDefaultTime\",float(self.exp_time_ledit.text()))\n setBlConfig(\"rasterDefaultTrans\",float(self.trans_ledit.text()))\n setBlConfig(\"rasterDefaultMinSpotSize\",float(self.minSpot_ledit.text())) \n setBlConfig(RASTER_TUNE_LOW_RES,float(self.rasterLowRes.text()))\n setBlConfig(RASTER_TUNE_HIGH_RES,float(self.rasterHighRes.text()))\n setBlConfig(RASTER_TUNE_ICE_RING_WIDTH,float(self.rasterIceRingWidth.text()))\n setBlConfig(\"rasterThreshKernSize\",float(self.rasterThreshKernSize.text()))\n setBlConfig(\"rasterThreshSigBckrnd\",float(self.rasterThreshSigBckrnd.text()))\n setBlConfig(\"rasterThreshSigStrong\",float(self.rasterThreshSigStrong.text())) \n if (self.rasterIceRingCheckBox.isChecked()):\n setBlConfig(RASTER_TUNE_ICE_RING_FLAG,1)\n else:\n setBlConfig(RASTER_TUNE_ICE_RING_FLAG,0) \n if (self.rasterResoCheckBox.isChecked()):\n setBlConfig(RASTER_TUNE_RESO_FLAG,1)\n else:\n setBlConfig(RASTER_TUNE_RESO_FLAG,0) \n \n def rasterIceRingCheckCB(self,state):\n if state == QtCore.Qt.Checked:\n self.rasterIceRingWidth.setEnabled(True) \n else:\n self.rasterIceRingWidth.setEnabled(False) \n\n def rasterResoCheckCB(self,state):\n if state == QtCore.Qt.Checked:\n setBlConfig(RASTER_TUNE_RESO_FLAG,1) \n self.rasterLowRes.setEnabled(True)\n self.rasterHighRes.setEnabled(True) \n else:\n setBlConfig(RASTER_TUNE_RESO_FLAG,0) \n self.rasterLowRes.setEnabled(False)\n self.rasterHighRes.setEnabled(False) \n\n def rasterThreshCheckCB(self,state):\n if state == QtCore.Qt.Checked:\n setBlConfig(\"rasterThreshFlag\",1)\n self.rasterThreshKernSize.setEnabled(True)\n self.rasterThreshSigBckrnd.setEnabled(True)\n self.rasterThreshSigStrong.setEnabled(True) \n else:\n setBlConfig(\"rasterThreshFlag\",0) \n self.rasterThreshKernSize.setEnabled(False)\n self.rasterThreshSigBckrnd.setEnabled(False)\n self.rasterThreshSigStrong.setEnabled(False) \n\n #code below and its application from: https://snorfalorpagus.net/blog/2014/08/09/validating-user-input-in-pyqt4-using-qvalidator/\n def checkEntryState(self, *args, **kwargs):\n sender = self.sender()\n validator = sender.validator()\n state = validator.validate(sender.text(), 0)[0]\n if state == QtGui.QValidator.Intermediate:\n color = '#fff79a' # yellow\n elif state == QtGui.QValidator.Invalid:\n color = '#f6989d' # red\n else:\n color = '#ffffff' # white\n sender.setStyleSheet('QLineEdit { background-color: %s }' % color)\n\n\nclass PuckDialog(QtWidgets.QDialog):\n def __init__(self, parent = None):\n super(PuckDialog, self).__init__(parent)\n self.initData()\n self.initUI()\n\n\n def initData(self):\n puckListUnsorted = db_lib.getAllPucks(daq_utils.owner)\n puckList = sorted(puckListUnsorted,key=lambda i: i['name'],reverse=False)\n dewarObj = db_lib.getPrimaryDewar(daq_utils.beamline)\n pucksInDewar = dewarObj['content']\n data = []\n#if you have to, you could store the puck_id in the item data\n for i in range(len(puckList)):\n if (puckList[i][\"uid\"] not in pucksInDewar):\n data.append(puckList[i][\"name\"])\n self.model = QtGui.QStandardItemModel()\n labels = QStringList((\"Name\"))\n self.model.setHorizontalHeaderLabels(labels)\n for i in range(len(data)):\n name = QtGui.QStandardItem(data[i])\n self.model.appendRow(name)\n\n\n def initUI(self):\n self.tv = QtWidgets.QListView(self)\n self.tv.setModel(self.model)\n self.tv.doubleClicked[QModelIndex].connect(self.containerOKCB)\n behavior = QtWidgets.QAbstractItemView.SelectRows\n self.tv.setSelectionBehavior(behavior)\n \n self.label = QtWidgets.QLabel(self)\n self.buttons = QDialogButtonBox(\n QDialogButtonBox.Ok | QDialogButtonBox.Cancel,\n Qt.Horizontal, self)\n self.buttons.buttons()[0].clicked.connect(self.containerOKCB)\n self.buttons.buttons()[1].clicked.connect(self.containerCancelCB)\n layout = QtWidgets.QVBoxLayout()\n layout.addWidget(self.tv) \n layout.addWidget(self.label)\n layout.addWidget(self.buttons)\n self.setLayout(layout) \n self.tv.clicked.connect(self.onClicked)\n \n def containerOKCB(self):\n selmod = self.tv.selectionModel()\n selection = selmod.selection()\n indexes = selection.indexes()\n if (indexes != []):\n i = 0\n item = self.model.itemFromIndex(indexes[i])\n text = str(item.text())\n self.label.setText(text) \n self.accept()\n self.puckName = text\n else:\n text = \"\"\n self.reject()\n self.puckName = text\n \n\n def containerCancelCB(self):\n text = \"\"\n self.reject()\n self.puckName = text\n\n \n def onClicked(self, idx):\n item = self.model.itemFromIndex(idx) \n text = str(item.text())\n\n @staticmethod\n def getPuckName(parent = None):\n dialog = PuckDialog(parent)\n result = dialog.exec_()\n return (dialog.puckName, result == QDialog.Accepted)\n\n\nclass DewarDialog(QtWidgets.QDialog):\n def __init__(self, parent = None,action=\"add\"):\n super(DewarDialog, self).__init__(parent)\n self.pucksPerDewarSector = 3\n self.dewarSectors = 8\n self.action = action\n self.parent=parent\n\n self.initData()\n self.initUI()\n\n def initData(self):\n dewarObj = db_lib.getPrimaryDewar(daq_utils.beamline)\n puckLocs = dewarObj['content']\n self.data = []\n for i in range(len(puckLocs)):\n if (puckLocs[i] != \"\"):\n owner = db_lib.getContainerByID(puckLocs[i])[\"owner\"]\n self.data.append(db_lib.getContainerNameByID(puckLocs[i]))\n else:\n self.data.append(\"Empty\")\n logger.info(self.data)\n\n\n def initUI(self):\n layout = QtWidgets.QVBoxLayout()\n headerLabelLayout = QtWidgets.QHBoxLayout()\n aLabel = QtWidgets.QLabel(\"A\")\n aLabel.setFixedWidth(15)\n headerLabelLayout.addWidget(aLabel)\n bLabel = QtWidgets.QLabel(\"B\")\n bLabel.setFixedWidth(10)\n headerLabelLayout.addWidget(bLabel)\n cLabel = QtWidgets.QLabel(\"C\")\n cLabel.setFixedWidth(10)\n headerLabelLayout.addWidget(cLabel)\n layout.addLayout(headerLabelLayout)\n self.allButtonList = [None]*(self.dewarSectors*self.pucksPerDewarSector)\n for i in range (0,self.dewarSectors):\n rowLayout = QtWidgets.QHBoxLayout()\n numLabel = QtWidgets.QLabel(str(i+1))\n rowLayout.addWidget(numLabel)\n for j in range (0,self.pucksPerDewarSector):\n dataIndex = (i*self.pucksPerDewarSector)+j \n self.allButtonList[dataIndex] = QtWidgets.QPushButton((str(self.data[dataIndex])))\n self.allButtonList[dataIndex].clicked.connect(functools.partial(self.on_button,str(dataIndex)))\n rowLayout.addWidget(self.allButtonList[dataIndex])\n layout.addLayout(rowLayout)\n cancelButton = QtWidgets.QPushButton(\"Done\") \n cancelButton.clicked.connect(self.containerCancelCB)\n layout.addWidget(cancelButton)\n self.setLayout(layout) \n \n def on_button(self, n):\n if (self.action == \"remove\"):\n self.dewarPos = n\n db_lib.removePuckFromDewar(daq_utils.beamline,int(n))\n self.allButtonList[int(n)].setText(\"Empty\")\n self.parent.treeChanged_pv.put(1)\n else:\n self.dewarPos = n\n self.accept()\n\n\n def containerCancelCB(self):\n self.dewarPos = 0\n self.reject()\n\n @staticmethod\n def getDewarPos(parent = None,action=\"add\"):\n dialog = DewarDialog(parent,action)\n result = dialog.exec_()\n return (dialog.dewarPos, result == QDialog.Accepted)\n\n\nclass DewarTree(QtWidgets.QTreeView):\n def __init__(self, parent=None):\n super(DewarTree, self).__init__(parent)\n self.pucksPerDewarSector = 3\n self.dewarSectors = 8\n self.parent=parent\n self.setDragDropMode(QtWidgets.QAbstractItemView.InternalMove)\n self.setAnimated(True)\n self.model = QtGui.QStandardItemModel()\n self.model.itemChanged.connect(self.queueSelectedSample)\n self.isExpanded = 1\n\n def keyPressEvent(self, event):\n if (event.key() == Qt.Key_Delete or event.key() == Qt.Key_Backspace):\n self.deleteSelectedCB(0)\n else:\n super(DewarTree,self).keyPressEvent(event) \n\n def refreshTree(self):\n self.parent.dewarViewToggleCheckCB()\n\n def refreshTreeDewarView(self):\n startTime = time.time()\n selectedIndex = None\n mountedIndex = None\n selectedSampleIndex = None\n puck = \"\"\n collectionRunning = False\n self.model.clear()\n st = time.time()\n dewarContents = db_lib.getContainerByName(daq_utils.primaryDewarName,daq_utils.beamline)['content']\n for i in range (0,len(dewarContents)): #dewar contents is the list of puck IDs\n parentItem = self.model.invisibleRootItem()\n if (dewarContents[i]==\"\"):\n puck = \"\"\n puckName = \"\"\n else:\n st = time.time()\n if (dewarContents[i] not in containerDict):\n puck = db_lib.getContainerByID(dewarContents[i])\n containerDict[dewarContents[i]] = puck\n else:\n puck = containerDict[dewarContents[i]]\n puckName = puck[\"name\"]\n index_s = \"%d%s\" % ((i)/self.pucksPerDewarSector+1,chr(((i)%self.pucksPerDewarSector)+ord('A')))\n item = QtGui.QStandardItem(QtGui.QIcon(\":/trolltech/styles/commonstyle/images/file-16.png\"), QString(index_s + \" \" + puckName))\n item.setData(puckName,32)\n item.setData(\"container\",33) \n parentItem.appendRow(item)\n parentItem = item\n if (puck != \"\" and puckName != \"private\"):\n puckContents = puck['content']\n puckSize = len(puckContents)\n for j in range (0,len(puckContents)):#should be the list of samples\n if (puckContents[j] != \"\"):\n st = time.time()\n if (puckContents[j] not in sampleNameDict):\n sampleName = db_lib.getSampleNamebyID(puckContents[j])\n sampleNameDict[puckContents[j]] = sampleName\n else:\n sampleName = sampleNameDict[puckContents[j]]\n position_s = str(j+1) + \"-\" + sampleName\n item = QtGui.QStandardItem(QtGui.QIcon(\":/trolltech/styles/commonstyle/images/file-16.png\"), QString(position_s))\n item.setData(puckContents[j],32) #just stuck sampleID there, but negate it to diff from reqID\n item.setData(\"sample\",33)\n if (puckContents[j] == self.parent.mountedPin_pv.get()):\n item.setForeground(QtGui.QColor('red')) \n font = QtGui.QFont()\n font.setItalic(True)\n font.setOverline(True)\n font.setUnderline(True)\n item.setFont(font)\n parentItem.appendRow(item)\n if (puckContents[j] == self.parent.mountedPin_pv.get()):\n mountedIndex = self.model.indexFromItem(item)\n if (puckContents[j] == self.parent.selectedSampleID): #looking for the selected item\n logger.info(\"found \" + str(self.parent.SelectedItemData))\n selectedSampleIndex = self.model.indexFromItem(item)\n st = time.time()\n sampleRequestList = db_lib.getRequestsBySampleID(puckContents[j])\n for k in range(len(sampleRequestList)):\n if not (\"protocol\" in sampleRequestList[k][\"request_obj\"]):\n continue\n col_item = QtGui.QStandardItem(QtGui.QIcon(\":/trolltech/styles/commonstyle/images/file-16.png\"), QString(sampleRequestList[k][\"request_obj\"][\"file_prefix\"]+\"_\"+sampleRequestList[k][\"request_obj\"][\"protocol\"]))\n col_item.setData(sampleRequestList[k][\"uid\"],32)\n col_item.setData(\"request\",33) \n col_item.setFlags(Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsEditable | Qt.ItemIsSelectable)\n if (sampleRequestList[k][\"priority\"] == 99999):\n col_item.setCheckState(Qt.Checked)\n col_item.setBackground(QtGui.QColor('green'))\n selectedIndex = self.model.indexFromItem(col_item) ##attempt to leave it on the request after collection\n \n collectionRunning = True\n self.parent.refreshCollectionParams(sampleRequestList[k])\n elif (sampleRequestList[k][\"priority\"] > 0):\n col_item.setCheckState(Qt.Checked)\n col_item.setBackground(QtGui.QColor('white'))\n elif (sampleRequestList[k][\"priority\"]< 0):\n col_item.setCheckable(False)\n col_item.setBackground(QtGui.QColor('cyan'))\n else:\n col_item.setCheckState(Qt.Unchecked)\n col_item.setBackground(QtGui.QColor('white'))\n item.appendRow(col_item)\n if (sampleRequestList[k][\"uid\"] == self.parent.SelectedItemData): #looking for the selected item, this is a request\n selectedIndex = self.model.indexFromItem(col_item)\n else : #this is an empty spot, no sample\n position_s = str(j+1)\n item = QtGui.QStandardItem(QtGui.QIcon(\":/trolltech/styles/commonstyle/images/file-16.png\"), QString(position_s))\n item.setData(\"\",32)\n parentItem.appendRow(item)\n self.setModel(self.model)\n if (selectedSampleIndex != None and collectionRunning == False):\n self.setCurrentIndex(selectedSampleIndex)\n if (mountedIndex != None):\n self.model.itemFromIndex(mountedIndex).setForeground(QtGui.QColor('red')) \n font = QtGui.QFont()\n font.setUnderline(True)\n font.setItalic(True)\n font.setOverline(True)\n self.model.itemFromIndex(mountedIndex).setFont(font)\n self.parent.row_clicked(selectedSampleIndex)\n elif (selectedSampleIndex == None and collectionRunning == False):\n if (mountedIndex != None):\n self.setCurrentIndex(mountedIndex)\n self.model.itemFromIndex(mountedIndex).setForeground(QtGui.QColor('red')) \n font = QtGui.QFont()\n font.setUnderline(True)\n font.setItalic(True)\n font.setOverline(True)\n self.model.itemFromIndex(mountedIndex).setFont(font)\n self.parent.row_clicked(mountedIndex)\n else:\n pass\n if (selectedIndex != None and collectionRunning == False):\n self.setCurrentIndex(selectedIndex)\n self.parent.row_clicked(selectedIndex)\n if (collectionRunning == True):\n if (mountedIndex != None):\n self.setCurrentIndex(mountedIndex)\n if (self.isExpanded):\n self.expandAll()\n else:\n self.collapseAll()\n self.scrollTo(self.currentIndex(),QAbstractItemView.PositionAtCenter)\n logger.info(\"refresh time = \" + str(time.time()-startTime))\n\n\n def refreshTreePriorityView(self): #\"item\" is a sample, \"col_items\" are requests which are children of samples.\n collectionRunning = False\n selectedIndex = None\n mountedIndex = None\n selectedSampleIndex = None\n self.model.clear()\n self.orderedRequests = db_lib.getOrderedRequestList(daq_utils.beamline)\n dewarContents = db_lib.getContainerByName(daq_utils.primaryDewarName,daq_utils.beamline)['content']\n maxPucks = len(dewarContents)\n requestedSampleList = []\n mountedPin = self.parent.mountedPin_pv.get()\n for i in range(len(self.orderedRequests)): # I need a list of samples for parent nodes\n if (self.orderedRequests[i][\"sample\"] not in requestedSampleList):\n requestedSampleList.append(self.orderedRequests[i][\"sample\"])\n for i in range(len(requestedSampleList)):\n sample = db_lib.getSampleByID(requestedSampleList[i])\n owner = sample[\"owner\"]\n parentItem = self.model.invisibleRootItem()\n nodeString = QString(str(db_lib.getSampleNamebyID(requestedSampleList[i])))\n item = QtGui.QStandardItem(QtGui.QIcon(\":/trolltech/styles/commonstyle/images/file-16.png\"), nodeString)\n item.setData(requestedSampleList[i],32)\n item.setData(\"sample\",33) \n if (requestedSampleList[i] == mountedPin):\n item.setForeground(QtGui.QColor('red')) \n font = QtGui.QFont()\n font.setItalic(True)\n font.setOverline(True)\n font.setUnderline(True)\n item.setFont(font)\n parentItem.appendRow(item)\n if (requestedSampleList[i] == mountedPin):\n mountedIndex = self.model.indexFromItem(item)\n if (requestedSampleList[i] == self.parent.selectedSampleID): #looking for the selected item\n selectedSampleIndex = self.model.indexFromItem(item)\n parentItem = item\n for k in range(len(self.orderedRequests)):\n if (self.orderedRequests[k][\"sample\"] == requestedSampleList[i]):\n col_item = QtGui.QStandardItem(QtGui.QIcon(\":/trolltech/styles/commonstyle/images/file-16.png\"), QString(self.orderedRequests[k][\"request_obj\"][\"file_prefix\"]+\"_\"+self.orderedRequests[k][\"request_obj\"][\"protocol\"]))\n col_item.setData(self.orderedRequests[k][\"uid\"],32)\n col_item.setData(\"request\",33) \n col_item.setFlags(Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsEditable | Qt.ItemIsSelectable)\n if (self.orderedRequests[k][\"priority\"] == 99999):\n col_item.setCheckState(Qt.Checked)\n col_item.setBackground(QtGui.QColor('green'))\n collectionRunning = True\n self.parent.refreshCollectionParams(self.orderedRequests[k])\n\n elif (self.orderedRequests[k][\"priority\"] > 0):\n col_item.setCheckState(Qt.Checked)\n col_item.setBackground(QtGui.QColor('white'))\n elif (self.orderedRequests[k][\"priority\"]< 0):\n col_item.setCheckable(False) \n col_item.setBackground(QtGui.QColor('cyan'))\n else:\n col_item.setCheckState(Qt.Unchecked)\n col_item.setBackground(QtGui.QColor('white'))\n item.appendRow(col_item)\n if (self.orderedRequests[k][\"uid\"] == self.parent.SelectedItemData): #looking for the selected item\n selectedIndex = self.model.indexFromItem(col_item)\n self.setModel(self.model)\n if (selectedSampleIndex != None and collectionRunning == False):\n self.setCurrentIndex(selectedSampleIndex)\n self.parent.row_clicked(selectedSampleIndex)\n elif (selectedSampleIndex == None and collectionRunning == False):\n if (mountedIndex != None):\n self.setCurrentIndex(mountedIndex)\n self.parent.row_clicked(mountedIndex)\n else:\n pass\n\n if (selectedIndex != None and collectionRunning == False):\n self.setCurrentIndex(selectedIndex)\n self.parent.row_clicked(selectedIndex)\n self.scrollTo(self.currentIndex(),QAbstractItemView.PositionAtCenter)\n self.expandAll()\n\n\n def queueSelectedSample(self,item):\n reqID = str(item.data(32))\n checkedSampleRequest = db_lib.getRequestByID(reqID) #line not needed???\n if (item.checkState() == Qt.Checked):\n db_lib.updatePriority(reqID,5000)\n else:\n db_lib.updatePriority(reqID,0)\n item.setBackground(QtGui.QColor('white'))\n self.parent.treeChanged_pv.put(self.parent.processID) #the idea is touch the pv, but have this gui instance not refresh\n\n\n def queueAllSelectedCB(self):\n selmod = self.selectionModel()\n selection = selmod.selection()\n indexes = selection.indexes()\n for i in range(len(indexes)):\n item = self.model.itemFromIndex(indexes[i])\n itemData = str(item.data(32))\n itemDataType = str(item.data(33))\n if (itemDataType == \"request\"): \n selectedSampleRequest = db_lib.getRequestByID(itemData)\n db_lib.updatePriority(itemData,5000)\n self.parent.treeChanged_pv.put(1)\n\n\n def deQueueAllSelectedCB(self):\n selmod = self.selectionModel()\n selection = selmod.selection()\n indexes = selection.indexes()\n for i in range(len(indexes)):\n item = self.model.itemFromIndex(indexes[i])\n itemData = str(item.data(32))\n itemDataType = str(item.data(33))\n if (itemDataType == \"request\"): \n selectedSampleRequest = db_lib.getRequestByID(itemData)\n db_lib.updatePriority(itemData,0)\n self.parent.treeChanged_pv.put(1)\n\n\n def confirmDelete(self):\n quit_msg = \"Are you sure you want to delete all requests?\"\n self.parent.timerHutch.stop()\n self.parent.timerSample.stop() \n reply = QtWidgets.QMessageBox.question(self, 'Message',quit_msg, QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)\n self.parent.timerSample.start(SAMPLE_TIMER_DELAY) \n self.parent.timerHutch.start(HUTCH_TIMER_DELAY) \n if reply == QtWidgets.QMessageBox.Yes:\n return(1)\n else:\n return(0)\n \n\n def deleteSelectedCB(self,deleteAll):\n if (deleteAll):\n if (not self.confirmDelete()):\n return \n self.selectAll() \n selmod = self.selectionModel()\n selection = selmod.selection()\n indexes = selection.indexes()\n progressInc = 100.0/float(len(indexes))\n self.parent.progressDialog.setWindowTitle(\"Deleting Requests\")\n self.parent.progressDialog.show()\n for i in range(len(indexes)):\n self.parent.progressDialog.setValue(int((i+1)*progressInc))\n item = self.model.itemFromIndex(indexes[i])\n itemData = str(item.data(32))\n itemDataType = str(item.data(33))\n if (itemDataType == \"request\"): \n selectedSampleRequest = db_lib.getRequestByID(itemData)\n self.selectedSampleID = selectedSampleRequest[\"sample\"]\n db_lib.deleteRequest(selectedSampleRequest[\"uid\"])\n if (selectedSampleRequest[\"request_obj\"][\"protocol\"] == \"raster\" or selectedSampleRequest[\"request_obj\"][\"protocol\"] == \"stepRaster\" or selectedSampleRequest[\"request_obj\"][\"protocol\"] == \"specRaster\"):\n for i in range(len(self.parent.rasterList)):\n if (self.parent.rasterList[i] != None):\n if (self.parent.rasterList[i][\"uid\"] == selectedSampleRequest[\"uid\"]):\n self.parent.scene.removeItem(self.parent.rasterList[i][\"graphicsItem\"])\n self.parent.rasterList[i] = None\n if (selectedSampleRequest[\"request_obj\"][\"protocol\"] == \"vector\" or selectedSampleRequest[\"request_obj\"][\"protocol\"] == \"stepVector\"):\n self.parent.clearVectorCB()\n self.parent.progressDialog.close()\n self.parent.treeChanged_pv.put(1)\n \n\n def expandAllCB(self):\n self.expandAll()\n self.isExpanded = 1\n\n def collapseAllCB(self):\n self.collapseAll()\n self.isExpanded = 0\n\n\n\nclass DataLocInfo(QtWidgets.QGroupBox):\n\n def __init__(self,parent=None):\n QGroupBox.__init__(self,parent)\n self.parent = parent\n self.setTitle(\"Data Location\")\n self.vBoxDPathParams1 = QtWidgets.QVBoxLayout()\n self.hBoxDPathParams1 = QtWidgets.QHBoxLayout()\n self.basePathLabel = QtWidgets.QLabel('Base Path:')\n self.base_path_ledit = QtWidgets.QLabel() #leave editable for now\n self.base_path_ledit.setText(os.getcwd())\n self.base_path_ledit.setTextInteractionFlags(QtCore.Qt.TextSelectableByMouse)\n #self.base_path_ledit.textChanged[str].connect(self.basePathTextChanged)\n self.browseBasePathButton = QtWidgets.QPushButton(\"Browse...\") \n self.browseBasePathButton.setEnabled(False)\n #self.browseBasePathButton.clicked.connect(self.parent.popBaseDirectoryDialogCB)\n self.hBoxDPathParams1.addWidget(self.basePathLabel)\n self.hBoxDPathParams1.addWidget(self.base_path_ledit)\n self.hBoxDPathParams1.addWidget(self.browseBasePathButton)\n self.hBoxDPathParams2 = QtWidgets.QHBoxLayout()\n self.dataPrefixLabel = QtWidgets.QLabel('Data Prefix:\\n(%s Char Limit)' % VALID_PREFIX_LENGTH)\n self.prefix_ledit = QtWidgets.QLineEdit()\n self.prefix_ledit.textChanged[str].connect(self.prefixTextChanged)\n self.prefix_ledit.setValidator(QRegExpValidator(QRegExp(VALID_PREFIX_NAME), self.prefix_ledit))\n self.hBoxDPathParams2.addWidget(self.dataPrefixLabel)\n self.hBoxDPathParams2.addWidget(self.prefix_ledit)\n self.dataNumstartLabel = QtWidgets.QLabel('File Number Start:')\n self.file_numstart_ledit = QtWidgets.QLineEdit()\n self.file_numstart_ledit.setFixedWidth(50)\n self.hBoxDPathParams3 = QtWidgets.QHBoxLayout()\n self.dataPathLabel = QtWidgets.QLabel('Data Path:')\n self.dataPath_ledit = QtWidgets.QLineEdit()\n self.dataPath_ledit.setFrame(False)\n self.dataPath_ledit.setReadOnly(True)\n self.hBoxDPathParams3.addWidget(self.dataPathLabel)\n self.hBoxDPathParams3.addWidget(self.dataPath_ledit)\n self.hBoxDPathParams2.addWidget(self.dataNumstartLabel)\n self.hBoxDPathParams2.addWidget(self.file_numstart_ledit)\n self.vBoxDPathParams1.addLayout(self.hBoxDPathParams1)\n self.vBoxDPathParams1.addLayout(self.hBoxDPathParams2)\n self.vBoxDPathParams1.addLayout(self.hBoxDPathParams3)\n self.setLayout(self.vBoxDPathParams1)\n\n\n def basePathTextChanged(self,text):\n prefix = self.prefix_ledit.text()\n self.setDataPath_ledit(text+\"/\" + str(daq_utils.getVisitName()) + \"/\"+prefix+\"/#/\")\n\n def prefixTextChanged(self,text):\n prefix = self.prefix_ledit.text()\n try:\n runNum = db_lib.getSampleRequestCount(self.parent.selectedSampleID)\n except KeyError:\n logger.error('just setting a value of 1 for now')\n runNum = 1\n try:\n (puckPosition,samplePositionInContainer,containerID) = db_lib.getCoordsfromSampleID(daq_utils.beamline,self.parent.selectedSampleID)\n except IndexError:\n logger.error('IndexError returning')\n return\n self.setDataPath_ledit(self.base_path_ledit.text()+\"/\"+ str(daq_utils.getVisitName()) + \"/\"+prefix+\"/\"+str(runNum+1)+\"/\"+db_lib.getContainerNameByID(containerID)+\"_\"+str(samplePositionInContainer+1)+\"/\") \n\n def setFileNumstart_ledit(self,s):\n self.file_numstart_ledit.setText(s)\n\n def setFilePrefix_ledit(self,s):\n self.prefix_ledit.setText(s)\n\n def setBasePath_ledit(self,s):\n self.base_path_ledit.setText(s)\n\n def setDataPath_ledit(self,s):\n self.dataPath_ledit.setText(s)\n\n\n\nclass RasterCell(QtWidgets.QGraphicsRectItem):\n\n def __init__(self,x,y,w,h,topParent):\n super(RasterCell,self).__init__(x,y,w,h,None)\n self.topParent = topParent\n\n\ndef isInCell(position, item):\n if item.contains(position):\n return True\n return False\n\nclass RasterGroup(QtWidgets.QGraphicsItemGroup):\n def __init__(self,parent = None):\n super(RasterGroup, self).__init__()\n self.parent=parent\n self.setAcceptHoverEvents(True)\n\n\n def mousePressEvent(self, e):\n super(RasterGroup, self).mousePressEvent(e)\n logger.info(\"mouse pressed on group\")\n for i in range(len(self.parent.rasterList)):\n if (self.parent.rasterList[i] != None):\n if (self.parent.rasterList[i][\"graphicsItem\"].isSelected()):\n logger.info(\"found selected raster\")\n self.parent.SelectedItemData = self.parent.rasterList[i][\"uid\"]\n self.parent.treeChanged_pv.put(1)\n if (self.parent.vidActionRasterExploreRadio.isChecked()):\n for cell in self.childItems():\n if isInCell(e.pos(), cell):\n if (cell.data(0) != None):\n spotcount = cell.data(0)\n filename = cell.data(1)\n d_min = cell.data(2)\n intensity = cell.data(3)\n if (self.parent.albulaDispCheckBox.isChecked()):\n if (filename != \"empty\"):\n albulaUtils.albulaDispFile(filename)\n if not (self.parent.rasterExploreDialog.isVisible()):\n self.parent.rasterExploreDialog.show()\n self.parent.rasterExploreDialog.setSpotCount(spotcount)\n self.parent.rasterExploreDialog.setTotalIntensity(intensity)\n self.parent.rasterExploreDialog.setResolution(d_min)\n groupList = self.childItems()\n for i in range (0,len(groupList)):\n groupList[i].setPen(self.parent.redPen)\n cell.setPen(self.parent.yellowPen)\n\n else:\n super(RasterGroup, self).mousePressEvent(e)\n\n\n\n def mouseMoveEvent(self, e):\n\n if e.buttons() == QtCore.Qt.LeftButton:\n pass\n if e.buttons() == QtCore.Qt.RightButton:\n pass\n\n super(RasterGroup, self).mouseMoveEvent(e)\n logger.info(\"pos \" + str(self.pos()))\n\n def mouseReleaseEvent(self, e):\n super(RasterGroup, self).mouseReleaseEvent(e)\n if e.button() == QtCore.Qt.LeftButton:\n pass\n if e.button() == QtCore.Qt.RightButton:\n pass\n\n def hoverMoveEvent(self, e):\n super(RasterGroup, self).hoverEnterEvent(e)\n for cell in self.childItems():\n if isInCell(e.scenePos(), cell):\n if (cell.data(0) != None):\n spotcount = cell.data(0)\n d_min = cell.data(2)\n intensity = cell.data(3)\n if not (self.parent.rasterExploreDialog.isVisible()):\n self.parent.rasterExploreDialog.show()\n self.parent.rasterExploreDialog.setSpotCount(spotcount)\n self.parent.rasterExploreDialog.setTotalIntensity(intensity)\n self.parent.rasterExploreDialog.setResolution(d_min)\n\n\nclass ControlMain(QtWidgets.QMainWindow):\n#1/13/15 - are these necessary?\n Signal = QtCore.Signal()\n refreshTreeSignal = QtCore.Signal()\n serverMessageSignal = QtCore.Signal(str)\n serverPopupMessageSignal = QtCore.Signal(str)\n programStateSignal = QtCore.Signal(str)\n pauseButtonStateSignal = QtCore.Signal(str) \n\n\n xrecRasterSignal = QtCore.Signal(str)\n choochResultSignal = QtCore.Signal(str)\n energyChangeSignal = QtCore.Signal(float)\n mountedPinSignal = QtCore.Signal(int)\n beamSizeSignal = QtCore.Signal(float)\n controlMasterSignal = QtCore.Signal(int)\n zebraArmStateSignal = QtCore.Signal(int)\n govRobotSeReachSignal = QtCore.Signal(int)\n govRobotSaReachSignal = QtCore.Signal(int)\n govRobotDaReachSignal = QtCore.Signal(int)\n govRobotBlReachSignal = QtCore.Signal(int)\n detMessageSignal = QtCore.Signal(str)\n sampleFluxSignal = QtCore.Signal(float)\n zebraPulseStateSignal = QtCore.Signal(int)\n stillModeStateSignal = QtCore.Signal(int)\n zebraDownloadStateSignal = QtCore.Signal(int)\n zebraSentTriggerStateSignal = QtCore.Signal(int)\n zebraReturnedTriggerStateSignal = QtCore.Signal(int)\n fastShutterSignal = QtCore.Signal(float)\n gripTempSignal = QtCore.Signal(float)\n ringCurrentSignal = QtCore.Signal(float)\n beamAvailableSignal = QtCore.Signal(float)\n sampleExposedSignal = QtCore.Signal(float)\n sampMoveSignal = QtCore.Signal(int, str)\n roiChangeSignal = QtCore.Signal(str)\n highMagCursorChangeSignal = QtCore.Signal(str)\n lowMagCursorChangeSignal = QtCore.Signal(str)\n cryostreamTempSignal = QtCore.Signal(str)\n\n def __init__(self):\n super(ControlMain, self).__init__()\n self.SelectedItemData = \"\" #attempt to know what row is selected\n self.popUpMessageInit = 1 # I hate these next two, but I don't want to catch old messages. Fix later, maybe.\n self.textWindowMessageInit = 1\n self.processID = os.getpid()\n self.popupMessage = QtWidgets.QErrorMessage(self)\n self.popupMessage.setStyleSheet(\"background-color: red\")\n self.popupMessage.setModal(False)\n self.groupName = \"skinner\"\n self.scannerType = getBlConfig(\"scannerType\")\n self.vectorStart = None\n self.vectorEnd = None\n self.staffScreenDialog = None\n self.centerMarkerCharSize = 20\n self.centerMarkerCharOffsetX = 12\n self.centerMarkerCharOffsetY = 18\n self.currentRasterCellList = []\n self.redPen = QtGui.QPen(QtCore.Qt.red)\n self.bluePen = QtGui.QPen(QtCore.Qt.blue)\n self.yellowPen = QtGui.QPen(QtCore.Qt.yellow) \n self.initUI()\n self.zoom1FrameRatePV = PV(daq_utils.pvLookupDict[\"zoom1FrameRate\"])\n self.zoom2FrameRatePV = PV(daq_utils.pvLookupDict[\"zoom2FrameRate\"])\n self.zoom3FrameRatePV = PV(daq_utils.pvLookupDict[\"zoom3FrameRate\"])\n self.zoom4FrameRatePV = PV(daq_utils.pvLookupDict[\"zoom4FrameRate\"]) \n self.sampleFluxPV = PV(daq_utils.pvLookupDict[\"sampleFlux\"]) \n self.beamFlux_pv = PV(daq_utils.pvLookupDict[\"flux\"])\n self.stillMode_pv = PV(daq_utils.pvLookupDict[\"stillMode\"])\n self.standardMode_pv = PV(daq_utils.pvLookupDict[\"standardMode\"]) \n self.lowMagCursorX_pv = PV(daq_utils.pvLookupDict[\"lowMagCursorX\"])\n self.lowMagCursorY_pv = PV(daq_utils.pvLookupDict[\"lowMagCursorY\"])\n self.highMagCursorX_pv = PV(daq_utils.pvLookupDict[\"highMagCursorX\"])\n self.highMagCursorY_pv = PV(daq_utils.pvLookupDict[\"highMagCursorY\"])\n self.fastShutterOpenPos_pv = PV(daq_utils.pvLookupDict[\"fastShutterOpenPos\"])\n self.gripTemp_pv = PV(daq_utils.pvLookupDict[\"gripTemp\"])\n self.cryostreamTemp_pv = PV(cryostreamTempPV[daq_utils.beamline])\n if (daq_utils.beamline == \"fmx\"): \n self.slit1XGapSP_pv = PV(daq_utils.motor_dict[\"slit1XGap\"] + \".VAL\")\n self.slit1YGapSP_pv = PV(daq_utils.motor_dict[\"slit1YGap\"] + \".VAL\") \n ringCurrentPvName = \"SR:C03-BI{DCCT:1}I:Real-I\"\n self.ringCurrent_pv = PV(ringCurrentPvName)\n\n self.beamAvailable_pv = PV(daq_utils.pvLookupDict[\"beamAvailable\"])\n self.sampleExposed_pv = PV(daq_utils.pvLookupDict[\"exposing\"])\n \n self.beamSize_pv = PV(daq_utils.beamlineComm + \"size_mode\")\n self.energy_pv = PV(daq_utils.motor_dict[\"energy\"]+\".RBV\")\n self.rasterStepDefs = {\"Coarse\":20.0,\"Fine\":10.0,\"VFine\":5.0}\n self.createSampleTab()\n \n self.initCallbacks()\n if (self.scannerType != \"PI\"): \n self.motPos = {\"x\":self.sampx_pv.get(),\"y\":self.sampy_pv.get(),\"z\":self.sampz_pv.get(),\"omega\":self.omega_pv.get()}\n else:\n self.motPos = {\"x\":self.sampx_pv.get(),\"y\":self.sampy_pv.get(),\"z\":self.sampz_pv.get(),\"omega\":self.omega_pv.get(),\"fineX\":self.sampFineX_pv.get(),\"fineY\":self.sampFineY_pv.get(),\"fineZ\":self.sampFineZ_pv.get()} \n self.dewarTree.refreshTreeDewarView()\n if (self.mountedPin_pv.get() == \"\"):\n mountedPin = db_lib.beamlineInfo(daq_utils.beamline, 'mountedSample')[\"sampleID\"]\n self.mountedPin_pv.put(mountedPin)\n self.rasterExploreDialog = RasterExploreDialog()\n self.userScreenDialog = UserScreenDialog(self) \n self.detDistMotorEntry.getEntry().setText(self.detDistRBVLabel.getEntry().text()) #this is to fix the current val being overwritten by reso\n self.proposalID = -999999\n if (len(sys.argv)>1):\n if (sys.argv[1] == \"master\"):\n self.changeControlMasterCB(1)\n self.controlMasterCheckBox.setChecked(True) \n self.XRFInfoDict = self.parseXRFTable() #I don't like this\n\n\n def setGuiValues(self, values):\n for item, value in values.items():\n logger.info('resetting %s to %s' % (item, value))\n if item == 'osc_start':\n self.osc_start_ledit.setText('%.3f'% float(value))\n elif item == 'osc_end':\n self.osc_end_ledit.setText('%.3f' % float(value))\n elif item == 'osc_range':\n self.osc_range_ledit.setText('%.3f' % float(value))\n elif item == 'img_width':\n self.img_width_ledit.setText('%.3f' % float(value))\n elif item == 'exp_time':\n self.exp_time_ledit.setText('%.3f' % float(value))\n elif item == 'transmission':\n self.transmission_ledit.setText('%.3f' % float(value))\n elif item == 'resolution':\n self.resolution_ledit.setText('%.2f' % float(value))\n else:\n logger.error('setGuiValues unknown item: %s value: %s' % (item, value))\n\n def parseXRFTable(self):\n XRFFile = open(os.environ[\"CONFIGDIR\"] + \"/XRF-AMX_simple.txt\")\n XRFInfoDict = {}\n for line in XRFFile.readlines():\n tokens = line.split()\n XRFInfoDict[tokens[0]] = int(float(tokens[5])*100)\n XRFFile.close()\n return XRFInfoDict\n \n\n\n def closeEvent(self, evnt):\n evnt.accept()\n sys.exit() #doing this to close any windows left open\n\n \n def initVideo2(self,frequency):\n self.captureHighMag=cv2.VideoCapture(daq_utils.highMagCamURL)\n logger.debug('highMagCamURL: \"' + daq_utils.highMagCamURL + '\"')\n\n def initVideo4(self,frequency):\n self.captureHighMagZoom=cv2.VideoCapture(daq_utils.highMagZoomCamURL)\n logger.debug('highMagZoomCamURL: \"' + daq_utils.highMagZoomCamURL + '\"')\n\n def initVideo3(self,frequency):\n self.captureLowMagZoom=cv2.VideoCapture(daq_utils.lowMagZoomCamURL)\n logger.debug('lowMagZoomCamURL: \"' + daq_utils.lowMagZoomCamURL + '\"')\n \n def createSampleTab(self):\n\n sampleTab= QtWidgets.QWidget() \n splitter1 = QtWidgets.QSplitter(Qt.Horizontal)\n vBoxlayout= QtWidgets.QVBoxLayout()\n self.dewarTreeFrame = QFrame()\n vBoxDFlayout= QtWidgets.QVBoxLayout()\n self.selectedSampleRequest = {}\n self.selectedSampleID = \"\"\n self.dewarTree = DewarTree(self)\n self.dewarTree.clicked[QModelIndex].connect(self.row_clicked)\n treeSelectBehavior = QtWidgets.QAbstractItemView.SelectItems\n treeSelectMode = QtWidgets.QAbstractItemView.ExtendedSelection\n self.dewarTree.setSelectionMode(treeSelectMode)\n self.dewarTree.setSelectionBehavior(treeSelectBehavior)\n hBoxRadioLayout1= QtWidgets.QHBoxLayout() \n self.viewRadioGroup=QtWidgets.QButtonGroup()\n self.priorityViewRadio = QtWidgets.QRadioButton(\"PriorityView\")\n self.priorityViewRadio.toggled.connect(functools.partial(self.dewarViewToggledCB,\"priorityView\"))\n self.viewRadioGroup.addButton(self.priorityViewRadio)\n self.dewarViewRadio = QtWidgets.QRadioButton(\"DewarView\")\n self.dewarViewRadio.setChecked(True) \n self.dewarViewRadio.toggled.connect(functools.partial(self.dewarViewToggledCB,\"dewarView\"))\n hBoxRadioLayout1.addWidget(self.dewarViewRadio) \n hBoxRadioLayout1.addWidget(self.priorityViewRadio)\n self.viewRadioGroup.addButton(self.dewarViewRadio)\n vBoxDFlayout.addLayout(hBoxRadioLayout1)\n vBoxDFlayout.addWidget(self.dewarTree)\n queueSelectedButton = QtWidgets.QPushButton(\"Queue All Selected\") \n queueSelectedButton.clicked.connect(self.dewarTree.queueAllSelectedCB)\n deQueueSelectedButton = QtWidgets.QPushButton(\"deQueue All Selected\") \n deQueueSelectedButton.clicked.connect(self.dewarTree.deQueueAllSelectedCB)\n runQueueButton = QtWidgets.QPushButton(\"Collect Queue\")\n runQueueButton.setStyleSheet(\"background-color: yellow\")\n runQueueButton.clicked.connect(self.collectQueueCB)\n stopRunButton = QtWidgets.QPushButton(\"Stop Collection\")\n stopRunButton.setStyleSheet(\"background-color: red\")\n stopRunButton.clicked.connect(self.stopRunCB) #immediate stop everything\n puckToDewarButton = QtWidgets.QPushButton(\"Puck to Dewar...\") \n mountSampleButton = QtWidgets.QPushButton(\"Mount Sample\") \n mountSampleButton.clicked.connect(self.mountSampleCB)\n unmountSampleButton = QtWidgets.QPushButton(\"Unmount Sample\") \n unmountSampleButton.clicked.connect(self.unmountSampleCB)\n puckToDewarButton.clicked.connect(self.puckToDewarCB)\n removePuckButton = QtWidgets.QPushButton(\"Remove Puck...\") \n removePuckButton.clicked.connect(self.removePuckCB)\n expandAllButton = QtWidgets.QPushButton(\"Expand All\") \n expandAllButton.clicked.connect(self.dewarTree.expandAllCB)\n collapseAllButton = QtWidgets.QPushButton(\"Collapse All\") \n collapseAllButton.clicked.connect(self.dewarTree.collapseAllCB)\n self.pauseQueueButton = QtWidgets.QPushButton(\"Pause\")\n self.pauseQueueButton.clicked.connect(self.stopQueueCB) \n emptyQueueButton = QtWidgets.QPushButton(\"Empty Queue\")\n emptyQueueButton.clicked.connect(functools.partial(self.dewarTree.deleteSelectedCB,1))\n warmupButton = QtWidgets.QPushButton(\"Warmup Gripper\") \n warmupButton.clicked.connect(self.warmupGripperCB)\n self.openShutterButton = QtWidgets.QPushButton(\"Open Photon Shutter\") \n self.openShutterButton.clicked.connect(self.openPhotonShutterCB)\n self.popUserScreen = QtWidgets.QPushButton(\"User Screen...\")\n self.popUserScreen.clicked.connect(self.popUserScreenCB)\n self.closeShutterButton = QtWidgets.QPushButton(\"Close Photon Shutter\") \n self.closeShutterButton.clicked.connect(self.closePhotonShutterCB)\n hBoxTreeButtsLayout = QtWidgets.QHBoxLayout()\n vBoxTreeButtsLayoutLeft = QtWidgets.QVBoxLayout()\n vBoxTreeButtsLayoutRight = QtWidgets.QVBoxLayout()\n vBoxTreeButtsLayoutLeft.addWidget(runQueueButton)\n vBoxTreeButtsLayoutLeft.addWidget(mountSampleButton)\n vBoxTreeButtsLayoutLeft.addWidget(self.pauseQueueButton)\n vBoxTreeButtsLayoutLeft.addWidget(queueSelectedButton)\n vBoxTreeButtsLayoutLeft.addWidget(self.popUserScreen) \n vBoxTreeButtsLayoutLeft.addWidget(warmupButton) \n vBoxTreeButtsLayoutRight.addWidget(stopRunButton)\n vBoxTreeButtsLayoutRight.addWidget(unmountSampleButton) \n vBoxTreeButtsLayoutRight.addWidget(self.closeShutterButton)\n vBoxTreeButtsLayoutRight.addWidget(deQueueSelectedButton) \n vBoxTreeButtsLayoutRight.addWidget(emptyQueueButton)\n hBoxTreeButtsLayout.addLayout(vBoxTreeButtsLayoutLeft)\n hBoxTreeButtsLayout.addLayout(vBoxTreeButtsLayoutRight)\n vBoxDFlayout.addLayout(hBoxTreeButtsLayout)\n self.dewarTreeFrame.setLayout(vBoxDFlayout)\n splitter1.addWidget(self.dewarTreeFrame)\n splitter11 = QtWidgets.QSplitter(Qt.Horizontal)\n self.mainSetupFrame = QFrame()\n self.mainSetupFrame.setFixedHeight(890)\n vBoxMainSetup = QtWidgets.QVBoxLayout()\n self.mainToolBox = QtWidgets.QToolBox()\n self.mainToolBox.setMinimumWidth(750)\n self.mainColFrame = QFrame()\n vBoxMainColLayout= QtWidgets.QVBoxLayout()\n colParamsGB = QtWidgets.QGroupBox()\n colParamsGB.setTitle(\"Acquisition\")\n vBoxColParams1 = QtWidgets.QVBoxLayout()\n hBoxColParams1 = QtWidgets.QHBoxLayout()\n colStartLabel = QtWidgets.QLabel('Oscillation Start:')\n colStartLabel.setFixedWidth(140)\n colStartLabel.setAlignment(QtCore.Qt.AlignCenter) \n self.osc_start_ledit = QtWidgets.QLineEdit()\n self.osc_start_ledit.setFixedWidth(60)\n self.colEndLabel = QtWidgets.QLabel('Oscillation Range:')\n self.colEndLabel.setAlignment(QtCore.Qt.AlignCenter) \n self.colEndLabel.setFixedWidth(140)\n self.osc_end_ledit = QtWidgets.QLineEdit()\n self.setGuiValues({'osc_end':\"180.0\"})\n self.osc_end_ledit.setFixedWidth(60)\n self.osc_end_ledit.textChanged[str].connect(functools.partial(self.totalExpChanged,\"oscEnd\")) \n hBoxColParams1.addWidget(colStartLabel)\n hBoxColParams1.addWidget(self.osc_start_ledit)\n hBoxColParams1.addWidget(self.colEndLabel)\n hBoxColParams1.addWidget(self.osc_end_ledit)\n hBoxColParams2 = QtWidgets.QHBoxLayout()\n colRangeLabel = QtWidgets.QLabel('Oscillation Width:')\n colRangeLabel.setFixedWidth(140)\n colRangeLabel.setAlignment(QtCore.Qt.AlignCenter) \n self.osc_range_ledit = QtWidgets.QLineEdit()\n self.osc_range_ledit.setFixedWidth(60)\n self.stillModeCheckBox = QCheckBox(\"Stills\")\n self.stillModeCheckBox.setEnabled(False)\n if (self.stillModeStatePV.get()):\n self.stillModeCheckBox.setChecked(True)\n self.setGuiValues({'osc_range':\"0.0\"})\n else:\n self.stillModeCheckBox.setChecked(False) \n colExptimeLabel = QtWidgets.QLabel('ExposureTime:')\n self.stillModeCheckBox.clicked.connect(self.stillModeUserPushCB) \n self.osc_range_ledit.textChanged[str].connect(functools.partial(self.totalExpChanged,\"oscRange\"))\n colExptimeLabel.setFixedWidth(140)\n colExptimeLabel.setAlignment(QtCore.Qt.AlignCenter) \n self.exp_time_ledit = QtWidgets.QLineEdit()\n self.exp_time_ledit.setFixedWidth(60)\n self.exp_time_ledit.textChanged[str].connect(self.totalExpChanged) \n self.exp_time_ledit.setValidator(QtGui.QDoubleValidator(VALID_EXP_TIMES[daq_utils.beamline]['min'], VALID_EXP_TIMES[daq_utils.beamline]['max'], VALID_EXP_TIMES[daq_utils.beamline]['digits']))\n self.exp_time_ledit.textChanged.connect(self.checkEntryState)\n hBoxColParams2.addWidget(colRangeLabel)\n hBoxColParams2.addWidget(self.osc_range_ledit)\n\n hBoxColParams2.addWidget(colExptimeLabel)\n hBoxColParams2.addWidget(self.exp_time_ledit)\n hBoxColParams25 = QtWidgets.QHBoxLayout()\n hBoxColParams25.addWidget(self.stillModeCheckBox) \n totalExptimeLabel = QtWidgets.QLabel('Total Exposure Time (s):')\n totalExptimeLabel.setFixedWidth(155)\n totalExptimeLabel.setAlignment(QtCore.Qt.AlignCenter) \n self.totalExptime_ledit = QtWidgets.QLineEdit() \n self.totalExptime_ledit.setReadOnly(True)\n self.totalExptime_ledit.setFrame(False)\n self.totalExptime_ledit.setFixedWidth(60)\n self.totalExptime_ledit.setValidator(QtGui.QDoubleValidator(VALID_TOTAL_EXP_TIMES[daq_utils.beamline]['min'],\n VALID_TOTAL_EXP_TIMES[daq_utils.beamline]['max'], VALID_TOTAL_EXP_TIMES[daq_utils.beamline]['digits']))\n self.totalExptime_ledit.textChanged.connect(self.checkEntryState)\n\n sampleLifetimeLabel = QtWidgets.QLabel('Estimated Sample Lifetime (s): ') \n if (daq_utils.beamline == \"amx\"): \n self.sampleLifetimeReadback = QtEpicsPVLabel(daq_utils.pvLookupDict[\"sampleLifetime\"],self,70,2)\n self.sampleLifetimeReadback_ledit = self.sampleLifetimeReadback.getEntry()\n else:\n calcLifetimeButton = QtWidgets.QPushButton(\"Calc. Lifetime\")\n calcLifetimeButton.clicked.connect(self.calcLifetimeCB)\n self.sampleLifetimeReadback_ledit = QtWidgets.QLabel()\n self.calcLifetimeCB()\n hBoxColParams25.addWidget(totalExptimeLabel)\n hBoxColParams25.addWidget(self.totalExptime_ledit)\n if (daq_utils.beamline == \"fmx\"):\n hBoxColParams25.addWidget(calcLifetimeButton)\n hBoxColParams25.addWidget(sampleLifetimeLabel)\n hBoxColParams25.addWidget(self.sampleLifetimeReadback_ledit)\n hBoxColParams22 = QtWidgets.QHBoxLayout()\n if (daq_utils.beamline == \"fmx\"):\n if (getBlConfig(\"attenType\") == \"RI\"):\n self.transmissionReadback = QtEpicsPVLabel(daq_utils.pvLookupDict[\"RI_Atten_SP\"],self,60,3)\n self.transmissionSetPoint = QtEpicsPVEntry(daq_utils.pvLookupDict[\"RI_Atten_SP\"],self,60,3)\n colTransmissionLabel = QtWidgets.QLabel('Transmission (RI) (0.0-1.0):') \n else:\n self.transmissionReadback = QtEpicsPVLabel(daq_utils.pvLookupDict[\"transmissionRBV\"],self,60,3)\n self.transmissionSetPoint = QtEpicsPVEntry(daq_utils.pvLookupDict[\"transmissionSet\"],self,60,3)\n colTransmissionLabel = QtWidgets.QLabel('Transmission (BCU) (0.0-1.0):') \n else:\n self.transmissionReadback = QtEpicsPVLabel(daq_utils.pvLookupDict[\"transmissionRBV\"],self,60,3)\n self.transmissionSetPoint = QtEpicsPVEntry(daq_utils.pvLookupDict[\"transmissionSet\"],self,60,3)\n colTransmissionLabel = QtWidgets.QLabel('Transmission (0.0-1.0):') \n self.transmissionReadback_ledit = self.transmissionReadback.getEntry()\n\n colTransmissionLabel.setAlignment(QtCore.Qt.AlignCenter) \n colTransmissionLabel.setFixedWidth(190)\n \n transmisionSPLabel = QtWidgets.QLabel(\"SetPoint:\")\n\n self.transmission_ledit = self.transmissionSetPoint.getEntry()\n self.setGuiValues({'transmission':getBlConfig(\"stdTrans\")})\n self.transmission_ledit.returnPressed.connect(self.setTransCB) \n setTransButton = QtWidgets.QPushButton(\"Set Trans\")\n setTransButton.clicked.connect(self.setTransCB)\n beamsizeLabel = QtWidgets.QLabel(\"BeamSize:\") \n beamSizeOptionList = [\"V0H0\",\"V0H1\",\"V1H0\",\"V1H1\"]\n self.beamsizeComboBox = QtWidgets.QComboBox(self)\n self.beamsizeComboBox.addItems(beamSizeOptionList)\n self.beamsizeComboBox.setCurrentIndex(int(self.beamSize_pv.get()))\n self.beamsizeComboBox.activated[str].connect(self.beamsizeComboActivatedCB)\n if (daq_utils.beamline == \"amx\" or self.energy_pv.get() < 9000):\n self.beamsizeComboBox.setEnabled(False)\n hBoxColParams3 = QtWidgets.QHBoxLayout()\n colEnergyLabel = QtWidgets.QLabel('Energy (eV):')\n colEnergyLabel.setAlignment(QtCore.Qt.AlignCenter)\n self.energyMotorEntry = QtEpicsPVLabel(daq_utils.motor_dict[\"energy\"]+ \".RBV\",self,70,2)\n self.energyReadback = self.energyMotorEntry.getEntry()\n energySPLabel = QtWidgets.QLabel(\"SetPoint:\")\n self.energyMoveLedit = QtEpicsPVEntry(daq_utils.motor_dict[\"energy\"] + \".VAL\",self,75,2)\n self.energy_ledit = self.energyMoveLedit.getEntry()\n self.energy_ledit.returnPressed.connect(self.moveEnergyCB) \n moveEnergyButton = QtWidgets.QPushButton(\"Move Energy\")\n moveEnergyButton.clicked.connect(self.moveEnergyCB) \n hBoxColParams3.addWidget(colEnergyLabel)\n hBoxColParams3.addWidget(self.energyReadback)\n hBoxColParams3.addWidget(energySPLabel) \n hBoxColParams3.addWidget(self.energy_ledit)\n hBoxColParams22.addWidget(colTransmissionLabel)\n hBoxColParams22.addWidget(self.transmissionReadback_ledit)\n hBoxColParams22.addWidget(transmisionSPLabel)\n hBoxColParams22.addWidget(self.transmission_ledit)\n hBoxColParams22.insertSpacing(5,100)\n hBoxColParams22.addWidget(beamsizeLabel)\n hBoxColParams22.addWidget(self.beamsizeComboBox) \n hBoxColParams4 = QtWidgets.QHBoxLayout()\n colBeamWLabel = QtWidgets.QLabel('Beam Width:')\n colBeamWLabel.setFixedWidth(140)\n colBeamWLabel.setAlignment(QtCore.Qt.AlignCenter) \n self.beamWidth_ledit = QtWidgets.QLineEdit()\n self.beamWidth_ledit.setFixedWidth(60)\n colBeamHLabel = QtWidgets.QLabel('Beam Height:')\n colBeamHLabel.setFixedWidth(140)\n colBeamHLabel.setAlignment(QtCore.Qt.AlignCenter) \n self.beamHeight_ledit = QtWidgets.QLineEdit()\n self.beamHeight_ledit.setFixedWidth(60)\n hBoxColParams4.addWidget(colBeamWLabel)\n hBoxColParams4.addWidget(self.beamWidth_ledit)\n hBoxColParams4.addWidget(colBeamHLabel)\n hBoxColParams4.addWidget(self.beamHeight_ledit)\n hBoxColParams5 = QtWidgets.QHBoxLayout()\n colResoLabel = QtWidgets.QLabel('Edge Resolution:')\n colResoLabel.setAlignment(QtCore.Qt.AlignCenter) \n self.resolution_ledit = QtWidgets.QLineEdit()\n self.resolution_ledit.setFixedWidth(60)\n self.resolution_ledit.textEdited[str].connect(self.resoTextChanged)\n detDistLabel = QtWidgets.QLabel('Detector Dist.')\n detDistLabel.setAlignment(QtCore.Qt.AlignCenter) \n detDistRBLabel = QtWidgets.QLabel(\"Readback:\")\n self.detDistRBVLabel = QtEpicsPVLabel(daq_utils.motor_dict[\"detectorDist\"] + \".RBV\",self,70) \n detDistSPLabel = QtWidgets.QLabel(\"SetPoint:\")\n self.detDistMotorEntry = QtEpicsPVEntry(daq_utils.motor_dict[\"detectorDist\"] + \".VAL\",self,70,2)\n self.detDistMotorEntry.getEntry().setValidator(QtGui.QDoubleValidator(VALID_DET_DIST[daq_utils.beamline]['min'],\n VALID_DET_DIST[daq_utils.beamline]['max'], VALID_DET_DIST[daq_utils.beamline]['digits']))\n self.detDistMotorEntry.getEntry().textChanged[str].connect(self.detDistTextChanged)\n self.detDistMotorEntry.getEntry().textChanged[str].connect(self.checkEntryState)\n self.detDistMotorEntry.getEntry().returnPressed.connect(self.moveDetDistCB) \n self.moveDetDistButton = QtWidgets.QPushButton(\"Move Detector\")\n self.moveDetDistButton.clicked.connect(self.moveDetDistCB)\n hBoxColParams3.addWidget(detDistLabel)\n hBoxColParams3.addWidget(self.detDistRBVLabel.getEntry())\n hBoxColParams3.addWidget(detDistSPLabel) \n hBoxColParams3.addWidget(self.detDistMotorEntry.getEntry())\n hBoxColParams6 = QtWidgets.QHBoxLayout()\n hBoxColParams6.setAlignment(QtCore.Qt.AlignLeft) \n hBoxColParams7 = QtWidgets.QHBoxLayout()\n hBoxColParams7.setAlignment(QtCore.Qt.AlignLeft) \n centeringLabel = QtWidgets.QLabel('Sample Centering:')\n centeringLabel.setFixedWidth(140) \n centeringOptionList = [\"Interactive\",\"AutoLoop\",\"AutoRaster\",\"Testing\"]\n self.centeringComboBox = QtWidgets.QComboBox(self)\n self.centeringComboBox.addItems(centeringOptionList)\n protoLabel = QtWidgets.QLabel('Protocol:')\n font = QtGui.QFont()\n font.setBold(True)\n protoLabel.setFont(font)\n protoLabel.setAlignment(QtCore.Qt.AlignCenter)\n self.protoRadioGroup=QtWidgets.QButtonGroup()\n self.protoStandardRadio = QtWidgets.QRadioButton(\"standard\")\n self.protoStandardRadio.setChecked(True)\n self.protoStandardRadio.toggled.connect(functools.partial(self.protoRadioToggledCB,\"standard\"))\n self.protoStandardRadio.pressed.connect(functools.partial(self.protoRadioToggledCB,\"standard\")) \n self.protoRadioGroup.addButton(self.protoStandardRadio)\n self.protoRasterRadio = QtWidgets.QRadioButton(\"raster\")\n self.protoRasterRadio.toggled.connect(functools.partial(self.protoRadioToggledCB,\"raster\"))\n self.protoRasterRadio.pressed.connect(functools.partial(self.protoRadioToggledCB,\"raster\")) \n self.protoRadioGroup.addButton(self.protoRasterRadio)\n self.protoVectorRadio = QtWidgets.QRadioButton(\"vector\")\n self.protoRasterRadio.toggled.connect(functools.partial(self.protoRadioToggledCB,\"vector\"))\n self.protoRasterRadio.pressed.connect(functools.partial(self.protoRadioToggledCB,\"vector\")) \n self.protoRadioGroup.addButton(self.protoVectorRadio)\n self.protoOtherRadio = QtWidgets.QRadioButton(\"other\")\n self.protoOtherRadio.setEnabled(False)\n self.protoRadioGroup.addButton(self.protoOtherRadio)\n protoOptionList = [\"standard\",\"screen\",\"raster\",\"vector\",\"burn\",\"eScan\",\"rasterScreen\",\"stepRaster\",\"stepVector\",\"multiCol\",\"characterize\",\"ednaCol\",\"specRaster\"] # these should probably come from db\n self.protoComboBox = QtWidgets.QComboBox(self)\n self.protoComboBox.addItems(protoOptionList)\n self.protoComboBox.activated[str].connect(self.protoComboActivatedCB) \n hBoxColParams6.addWidget(protoLabel)\n hBoxColParams6.addWidget(self.protoStandardRadio)\n hBoxColParams6.addWidget(self.protoRasterRadio)\n hBoxColParams6.addWidget(self.protoVectorRadio) \n hBoxColParams6.addWidget(self.protoComboBox)\n hBoxColParams7.addWidget(centeringLabel)\n hBoxColParams7.addWidget(self.centeringComboBox)\n hBoxColParams7.addWidget(colResoLabel)\n hBoxColParams7.addWidget(self.resolution_ledit)\n self.processingOptionsFrame = QFrame()\n self.hBoxProcessingLayout1= QtWidgets.QHBoxLayout() \n self.hBoxProcessingLayout1.setAlignment(QtCore.Qt.AlignLeft) \n procOptionLabel = QtWidgets.QLabel('Processing Options:')\n procOptionLabel.setFixedWidth(200)\n self.autoProcessingCheckBox = QCheckBox(\"AutoProcessing On\")\n self.autoProcessingCheckBox.setChecked(True)\n self.autoProcessingCheckBox.stateChanged.connect(self.autoProcessingCheckCB)\n self.fastDPCheckBox = QCheckBox(\"FastDP\")\n self.fastDPCheckBox.setChecked(False)\n self.fastEPCheckBox = QCheckBox(\"FastEP\")\n self.fastEPCheckBox.setChecked(False)\n self.fastEPCheckBox.setEnabled(False)\n self.dimpleCheckBox = QCheckBox(\"Dimple\")\n self.dimpleCheckBox.setChecked(True) \n self.xia2CheckBox = QCheckBox(\"Xia2\")\n self.xia2CheckBox.setChecked(False)\n self.hBoxProcessingLayout1.addWidget(self.autoProcessingCheckBox) \n self.hBoxProcessingLayout1.addWidget(self.fastDPCheckBox)\n self.hBoxProcessingLayout1.addWidget(self.fastEPCheckBox)\n self.hBoxProcessingLayout1.addWidget(self.dimpleCheckBox) \n self.processingOptionsFrame.setLayout(self.hBoxProcessingLayout1)\n self.rasterParamsFrame = QFrame()\n self.vBoxRasterParams = QtWidgets.QVBoxLayout()\n self.hBoxRasterLayout1= QtWidgets.QHBoxLayout() \n self.hBoxRasterLayout1.setAlignment(QtCore.Qt.AlignLeft) \n self.hBoxRasterLayout2= QtWidgets.QHBoxLayout() \n self.hBoxRasterLayout2.setAlignment(QtCore.Qt.AlignLeft) \n rasterStepLabel = QtWidgets.QLabel('Raster Step')\n rasterStepLabel.setFixedWidth(110)\n self.rasterStepEdit = QtWidgets.QLineEdit(str(self.rasterStepDefs[\"Coarse\"]))\n self.rasterStepEdit.textChanged[str].connect(self.rasterStepChanged) \n self.rasterStepEdit.setFixedWidth(60)\n self.rasterGrainRadioGroup=QtWidgets.QButtonGroup()\n self.rasterGrainCoarseRadio = QtWidgets.QRadioButton(\"Coarse\")\n self.rasterGrainCoarseRadio.setChecked(False)\n self.rasterGrainCoarseRadio.toggled.connect(functools.partial(self.rasterGrainToggledCB,\"Coarse\"))\n self.rasterGrainRadioGroup.addButton(self.rasterGrainCoarseRadio)\n self.rasterGrainFineRadio = QtWidgets.QRadioButton(\"Fine\")\n self.rasterGrainFineRadio.setChecked(False)\n self.rasterGrainFineRadio.toggled.connect(functools.partial(self.rasterGrainToggledCB,\"Fine\"))\n self.rasterGrainRadioGroup.addButton(self.rasterGrainFineRadio)\n self.rasterGrainVFineRadio = QtWidgets.QRadioButton(\"VFine\")\n self.rasterGrainVFineRadio.setChecked(False)\n self.rasterGrainVFineRadio.toggled.connect(functools.partial(self.rasterGrainToggledCB,\"VFine\"))\n self.rasterGrainRadioGroup.addButton(self.rasterGrainVFineRadio)\n self.rasterGrainCustomRadio = QtWidgets.QRadioButton(\"Custom\")\n self.rasterGrainCustomRadio.setChecked(True)\n self.rasterGrainCustomRadio.toggled.connect(functools.partial(self.rasterGrainToggledCB,\"Custom\"))\n self.rasterGrainRadioGroup.addButton(self.rasterGrainCustomRadio)\n rasterEvalLabel = QtWidgets.QLabel('Raster\\nEvaluate By:')\n rasterEvalOptionList = [\"Spot Count\",\"Resolution\",\"Intensity\"]\n self.rasterEvalComboBox = QtWidgets.QComboBox(self)\n self.rasterEvalComboBox.addItems(rasterEvalOptionList)\n self.rasterEvalComboBox.setCurrentIndex(db_lib.beamlineInfo(daq_utils.beamline,'rasterScoreFlag')[\"index\"])\n self.rasterEvalComboBox.activated[str].connect(self.rasterEvalComboActivatedCB)\n self.hBoxRasterLayout1.addWidget(rasterStepLabel)\n self.hBoxRasterLayout1.addWidget(self.rasterStepEdit)\n self.hBoxRasterLayout1.addWidget(self.rasterGrainCoarseRadio)\n self.hBoxRasterLayout1.addWidget(self.rasterGrainFineRadio)\n self.hBoxRasterLayout1.addWidget(self.rasterGrainVFineRadio) \n self.hBoxRasterLayout1.addWidget(self.rasterGrainCustomRadio)\n self.hBoxRasterLayout1.addWidget(rasterEvalLabel)\n self.hBoxRasterLayout1.addWidget(self.rasterEvalComboBox)\n self.vBoxRasterParams.addLayout(self.hBoxRasterLayout1)\n self.vBoxRasterParams.addLayout(self.hBoxRasterLayout2) \n self.rasterParamsFrame.setLayout(self.vBoxRasterParams)\n self.multiColParamsFrame = QFrame() #something for criteria to decide on which hotspots to collect on for multi-xtal\n self.hBoxMultiColParamsLayout1 = QtWidgets.QHBoxLayout()\n self.hBoxMultiColParamsLayout1.setAlignment(QtCore.Qt.AlignLeft)\n multiColCutoffLabel = QtWidgets.QLabel('Diffraction Cutoff')\n multiColCutoffLabel.setFixedWidth(110)\n self.multiColCutoffEdit = QtWidgets.QLineEdit(\"320\") #may need to store this in DB at some point, it's a silly number for now\n self.multiColCutoffEdit.setFixedWidth(60)\n self.hBoxMultiColParamsLayout1.addWidget(multiColCutoffLabel)\n self.hBoxMultiColParamsLayout1.addWidget(self.multiColCutoffEdit)\n self.multiColParamsFrame.setLayout(self.hBoxMultiColParamsLayout1)\n self.characterizeParamsFrame = QFrame()\n vBoxCharacterizeParams1 = QtWidgets.QVBoxLayout()\n self.hBoxCharacterizeLayout1= QtWidgets.QHBoxLayout() \n self.characterizeTargetLabel = QtWidgets.QLabel('Characterization Targets') \n characterizeResoLabel = QtWidgets.QLabel('Resolution')\n characterizeResoLabel.setAlignment(QtCore.Qt.AlignCenter) \n self.characterizeResoEdit = QtWidgets.QLineEdit(\"3.0\")\n characterizeISIGLabel = QtWidgets.QLabel('I/Sigma')\n characterizeISIGLabel.setAlignment(QtCore.Qt.AlignCenter) \n self.characterizeISIGEdit = QtWidgets.QLineEdit(\"2.0\")\n self.characterizeAnomCheckBox = QCheckBox(\"Anomolous\")\n self.characterizeAnomCheckBox.setChecked(False)\n self.hBoxCharacterizeLayout2 = QtWidgets.QHBoxLayout() \n characterizeCompletenessLabel = QtWidgets.QLabel('Completeness')\n characterizeCompletenessLabel.setAlignment(QtCore.Qt.AlignCenter) \n self.characterizeCompletenessEdit = QtWidgets.QLineEdit(\"0.99\")\n characterizeMultiplicityLabel = QtWidgets.QLabel('Multiplicity')\n characterizeMultiplicityLabel.setAlignment(QtCore.Qt.AlignCenter) \n self.characterizeMultiplicityEdit = QtWidgets.QLineEdit(\"auto\")\n characterizeDoseLimitLabel = QtWidgets.QLabel('Dose Limit')\n characterizeDoseLimitLabel.setAlignment(QtCore.Qt.AlignCenter) \n self.characterizeDoseLimitEdit = QtWidgets.QLineEdit(\"100\")\n characterizeSpaceGroupLabel = QtWidgets.QLabel('Space Group')\n characterizeSpaceGroupLabel.setAlignment(QtCore.Qt.AlignCenter) \n self.characterizeSpaceGroupEdit = QtWidgets.QLineEdit(\"P1\")\n self.hBoxCharacterizeLayout1.addWidget(characterizeResoLabel)\n self.hBoxCharacterizeLayout1.addWidget(self.characterizeResoEdit)\n self.hBoxCharacterizeLayout1.addWidget(characterizeISIGLabel)\n self.hBoxCharacterizeLayout1.addWidget(self.characterizeISIGEdit)\n self.hBoxCharacterizeLayout1.addWidget(characterizeSpaceGroupLabel)\n self.hBoxCharacterizeLayout1.addWidget(self.characterizeSpaceGroupEdit) \n self.hBoxCharacterizeLayout1.addWidget(self.characterizeAnomCheckBox) \n self.hBoxCharacterizeLayout2.addWidget(characterizeCompletenessLabel)\n self.hBoxCharacterizeLayout2.addWidget(self.characterizeCompletenessEdit)\n self.hBoxCharacterizeLayout2.addWidget(characterizeMultiplicityLabel)\n self.hBoxCharacterizeLayout2.addWidget(self.characterizeMultiplicityEdit)\n self.hBoxCharacterizeLayout2.addWidget(characterizeDoseLimitLabel)\n self.hBoxCharacterizeLayout2.addWidget(self.characterizeDoseLimitEdit)\n vBoxCharacterizeParams1.addWidget(self.characterizeTargetLabel)\n vBoxCharacterizeParams1.addLayout(self.hBoxCharacterizeLayout1)\n vBoxCharacterizeParams1.addLayout(self.hBoxCharacterizeLayout2)\n self.characterizeParamsFrame.setLayout(vBoxCharacterizeParams1)\n self.vectorParamsFrame = QFrame()\n hBoxVectorLayout1= QtWidgets.QHBoxLayout() \n setVectorStartButton = QtWidgets.QPushButton(\"Vector\\nStart\") \n setVectorStartButton.clicked.connect(self.setVectorStartCB)\n setVectorEndButton = QtWidgets.QPushButton(\"Vector\\nEnd\") \n setVectorEndButton.clicked.connect(self.setVectorEndCB)\n vectorFPPLabel = QtWidgets.QLabel(\"Number of Wedges\")\n self.vectorFPP_ledit = QtWidgets.QLineEdit(\"1\")\n vecLenLabel = QtWidgets.QLabel(\" Length(microns):\")\n self.vecLenLabelOutput = QtWidgets.QLabel(\"---\")\n vecSpeedLabel = QtWidgets.QLabel(\" Speed(microns/s):\")\n self.vecSpeedLabelOutput = QtWidgets.QLabel(\"---\")\n hBoxVectorLayout1.addWidget(setVectorStartButton)\n hBoxVectorLayout1.addWidget(setVectorEndButton)\n hBoxVectorLayout1.addWidget(vectorFPPLabel)\n hBoxVectorLayout1.addWidget(self.vectorFPP_ledit)\n hBoxVectorLayout1.addWidget(vecLenLabel)\n hBoxVectorLayout1.addWidget(self.vecLenLabelOutput)\n hBoxVectorLayout1.addWidget(vecSpeedLabel)\n hBoxVectorLayout1.addWidget(self.vecSpeedLabelOutput) \n self.vectorParamsFrame.setLayout(hBoxVectorLayout1)\n vBoxColParams1.addLayout(hBoxColParams1)\n vBoxColParams1.addLayout(hBoxColParams2)\n vBoxColParams1.addLayout(hBoxColParams25) \n vBoxColParams1.addLayout(hBoxColParams22) \n vBoxColParams1.addLayout(hBoxColParams3)\n vBoxColParams1.addLayout(hBoxColParams7)\n vBoxColParams1.addLayout(hBoxColParams6) \n vBoxColParams1.addWidget(self.rasterParamsFrame)\n vBoxColParams1.addWidget(self.multiColParamsFrame)\n vBoxColParams1.addWidget(self.vectorParamsFrame)\n vBoxColParams1.addWidget(self.characterizeParamsFrame)\n vBoxColParams1.addWidget(self.processingOptionsFrame)\n self.rasterParamsFrame.hide()\n self.multiColParamsFrame.hide()\n self.characterizeParamsFrame.hide()\n colParamsGB.setLayout(vBoxColParams1)\n self.dataPathGB = DataLocInfo(self)\n hBoxDisplayOptionLayout= QtWidgets.QHBoxLayout() \n self.albulaDispCheckBox = QCheckBox(\"Display Data (Albula)\")\n self.albulaDispCheckBox.setChecked(False)\n hBoxDisplayOptionLayout.addWidget(self.albulaDispCheckBox)\n vBoxMainColLayout.addWidget(colParamsGB)\n vBoxMainColLayout.addWidget(self.dataPathGB)\n vBoxMainColLayout.addLayout(hBoxDisplayOptionLayout)\n self.mainColFrame.setLayout(vBoxMainColLayout)\n self.mainToolBox.addItem(self.mainColFrame,\"Collection Parameters\") \n editSampleButton = QtWidgets.QPushButton(\"Apply Changes\") \n editSampleButton.clicked.connect(self.editSelectedRequestsCB)\n cloneRequestButton = QtWidgets.QPushButton(\"Clone Raster Request\") \n cloneRequestButton.clicked.connect(self.cloneRequestCB)\n hBoxPriorityLayout1= QtWidgets.QHBoxLayout() \n priorityEditLabel = QtWidgets.QLabel(\"Priority Edit\")\n priorityTopButton = QtWidgets.QPushButton(\" >> \")\n priorityUpButton = QtWidgets.QPushButton(\" > \")\n priorityDownButton = QtWidgets.QPushButton(\" < \")\n priorityBottomButton=QtWidgets.QPushButton(\" << \")\n priorityTopButton.clicked.connect(self.topPriorityCB)\n priorityBottomButton.clicked.connect(self.bottomPriorityCB)\n priorityUpButton.clicked.connect(self.upPriorityCB)\n priorityDownButton.clicked.connect(self.downPriorityCB)\n hBoxPriorityLayout1.addWidget(priorityEditLabel)\n hBoxPriorityLayout1.addWidget(priorityBottomButton)\n hBoxPriorityLayout1.addWidget(priorityDownButton)\n hBoxPriorityLayout1.addWidget(priorityUpButton)\n hBoxPriorityLayout1.addWidget(priorityTopButton)\n queueSampleButton = QtWidgets.QPushButton(\"Add Requests to Queue\") \n queueSampleButton.clicked.connect(self.addRequestsToAllSelectedCB)\n deleteSampleButton = QtWidgets.QPushButton(\"Delete Requests\") \n deleteSampleButton.clicked.connect(functools.partial(self.dewarTree.deleteSelectedCB,0))\n editScreenParamsButton = QtWidgets.QPushButton(\"Edit Raster Params...\") \n editScreenParamsButton.clicked.connect(self.editScreenParamsCB)\n vBoxMainSetup.addWidget(self.mainToolBox)\n vBoxMainSetup.addLayout(hBoxPriorityLayout1)\n vBoxMainSetup.addWidget(queueSampleButton)\n vBoxMainSetup.addWidget(editSampleButton)\n vBoxMainSetup.addWidget(cloneRequestButton)\n\n vBoxMainSetup.addWidget(editScreenParamsButton)\n self.mainSetupFrame.setLayout(vBoxMainSetup)\n self.VidFrame = QFrame()\n self.VidFrame.setFixedWidth(680)\n vBoxVidLayout= QtWidgets.QVBoxLayout()\n self.captureLowMag = None\n self.captureHighMag = None\n self.captureHighMagZoom = None \n self.captureLowMagZoom = None \n if (daq_utils.has_xtalview):\n if (self.zoom3FrameRatePV.get() != 0): \n _thread.start_new_thread(self.initVideo2,(.25,)) #highMag\n if (self.zoom4FrameRatePV.get() != 0): \n _thread.start_new_thread(self.initVideo4,(.25,)) #this sets up highMagDigiZoom\n if (self.zoom2FrameRatePV.get() != 0): \n _thread.start_new_thread(self.initVideo3,(.25,)) #this sets up lowMagDigiZoom\n if (self.zoom1FrameRatePV.get() != 0):\n self.captureLowMag=cv2.VideoCapture(daq_utils.lowMagCamURL)\n logger.debug('lowMagCamURL: \"' + daq_utils.lowMagCamURL + '\"')\n self.capture = self.captureLowMag\n self.timerHutch = QTimer()\n self.timerHutch.timeout.connect(self.timerHutchRefresh)\n self.timerHutch.start(HUTCH_TIMER_DELAY)\n\n self.timerSample = QTimer()\n self.timerSample.timeout.connect(self.timerSampleRefresh)\n self.timerSample.start(SAMPLE_TIMER_DELAY)\n self.centeringMarksList = []\n self.rasterList = []\n self.rasterDefList = []\n self.polyPointItems = []\n self.rasterPoly = None\n self.measureLine = None\n self.scene = QtWidgets.QGraphicsScene(0,0,640,512,self)\n hBoxHutchVidsLayout= QtWidgets.QHBoxLayout()\n self.sceneHutchCorner = QtWidgets.QGraphicsScene(0,0,320,180,self)\n self.sceneHutchTop = QtWidgets.QGraphicsScene(0,0,320,180,self) \n self.scene.keyPressEvent = self.sceneKey\n self.view = QtWidgets.QGraphicsView(self.scene)\n self.viewHutchCorner = QtWidgets.QGraphicsView(self.sceneHutchCorner)\n self.viewHutchTop = QtWidgets.QGraphicsView(self.sceneHutchTop) \n self.pixmap_item = QtWidgets.QGraphicsPixmapItem(None)\n self.scene.addItem(self.pixmap_item)\n self.pixmap_item_HutchCorner = QtWidgets.QGraphicsPixmapItem(None)\n self.sceneHutchCorner.addItem(self.pixmap_item_HutchCorner)\n self.pixmap_item_HutchTop = QtWidgets.QGraphicsPixmapItem(None)\n self.sceneHutchTop.addItem(self.pixmap_item_HutchTop)\n\n self.pixmap_item.mousePressEvent = self.pixelSelect\n centerMarkBrush = QtGui.QBrush(QtCore.Qt.blue) \n centerMarkPen = QtGui.QPen(centerMarkBrush,2.0)\n self.centerMarker = QtWidgets.QGraphicsSimpleTextItem(\"+\")\n self.centerMarker.setZValue(10.0)\n self.centerMarker.setBrush(centerMarkBrush)\n font = QtGui.QFont('DejaVu Sans Light', self.centerMarkerCharSize,weight=0)\n self.centerMarker.setFont(font) \n self.scene.addItem(self.centerMarker)\n self.centerMarker.setPos(daq_utils.screenPixCenterX-self.centerMarkerCharOffsetX,daq_utils.screenPixCenterY-self.centerMarkerCharOffsetY)\n self.zoomRadioGroup=QtWidgets.QButtonGroup()\n self.zoom1Radio = QtWidgets.QRadioButton(\"Mag1\")\n self.zoom1Radio.setChecked(True)\n self.zoom1Radio.toggled.connect(functools.partial(self.zoomLevelToggledCB,\"Zoom1\"))\n self.zoomRadioGroup.addButton(self.zoom1Radio)\n self.zoom2Radio = QtWidgets.QRadioButton(\"Mag2\")\n self.zoom2Radio.toggled.connect(functools.partial(self.zoomLevelToggledCB,\"Zoom2\"))\n self.zoomRadioGroup.addButton(self.zoom2Radio)\n self.zoom3Radio = QtWidgets.QRadioButton(\"Mag3\")\n self.zoom3Radio.toggled.connect(functools.partial(self.zoomLevelToggledCB,\"Zoom3\"))\n self.zoomRadioGroup.addButton(self.zoom3Radio)\n self.zoom4Radio = QtWidgets.QRadioButton(\"Mag4\")\n self.zoom4Radio.toggled.connect(functools.partial(self.zoomLevelToggledCB,\"Zoom4\"))\n self.zoomRadioGroup.addButton(self.zoom4Radio)\n beamOverlayPen = QtGui.QPen(QtCore.Qt.red)\n self.tempBeamSizeXMicrons = 30\n self.tempBeamSizeYMicrons = 30 \n self.beamSizeXPixels = self.screenXmicrons2pixels(self.tempBeamSizeXMicrons)\n self.beamSizeYPixels = self.screenYmicrons2pixels(self.tempBeamSizeYMicrons)\n self.overlayPosOffsetX = self.centerMarkerCharOffsetX-1\n self.overlayPosOffsetY = self.centerMarkerCharOffsetY-1 \n self.beamSizeOverlay = QtWidgets.QGraphicsRectItem(self.centerMarker.x()-self.overlayPosOffsetX,self.centerMarker.y()-self.overlayPosOffsetY,self.beamSizeXPixels,self.beamSizeYPixels)\n self.beamSizeOverlay.setPen(beamOverlayPen)\n self.scene.addItem(self.beamSizeOverlay)\n self.beamSizeOverlay.setVisible(False)\n self.beamSizeOverlay.setRect(self.overlayPosOffsetX+self.centerMarker.x()-(self.beamSizeXPixels/2),self.overlayPosOffsetY+self.centerMarker.y()-(self.beamSizeYPixels/2),self.beamSizeXPixels,self.beamSizeYPixels)\n scaleBrush = QtGui.QBrush(QtCore.Qt.blue) \n scalePen = QtGui.QPen(scaleBrush,2.0)\n scaleTextPen = QtGui.QPen(scaleBrush,1.0)\n self.imageScaleLineLen = 50\n self.imageScale = self.scene.addLine(10,daq_utils.screenPixY-30,10+self.imageScaleLineLen, daq_utils.screenPixY-30, scalePen)\n self.imageScaleText = self.scene.addSimpleText(\"50 microns\",font=QtGui.QFont(\"Times\", 13)) \n self.imageScaleText.setPen(scaleTextPen)\n self.imageScaleText.setPos(10,450)\n self.click_positions = []\n self.vectorStartFlag = 0\n hBoxHutchVidsLayout.addWidget(self.viewHutchTop)\n hBoxHutchVidsLayout.addWidget(self.viewHutchCorner) \n vBoxVidLayout.addLayout(hBoxHutchVidsLayout)\n vBoxVidLayout.addWidget(self.view) \n hBoxSampleOrientationLayout = QtWidgets.QHBoxLayout()\n setDC2CPButton = QtWidgets.QPushButton(\"SetStart\")\n setDC2CPButton.clicked.connect(self.setDCStartCB) \n omegaLabel = QtWidgets.QLabel(\"Omega:\")\n omegaMonitorPV = str(getBlConfig(\"omegaMonitorPV\"))\n self.sampleOmegaRBVLedit = QtEpicsPVLabel(daq_utils.motor_dict[\"omega\"] + \".\" + omegaMonitorPV,self,70) \n omegaSPLabel = QtWidgets.QLabel(\"SetPoint:\")\n self.sampleOmegaMoveLedit = QtEpicsPVEntry(daq_utils.motor_dict[\"omega\"] + \".VAL\",self,70,2)\n self.sampleOmegaMoveLedit.getEntry().returnPressed.connect(self.moveOmegaCB)\n moveOmegaButton = QtWidgets.QPushButton(\"Move\")\n moveOmegaButton.clicked.connect(self.moveOmegaCB)\n omegaTweakNegButtonFine = QtWidgets.QPushButton(\"-5\") \n omegaTweakNegButton = QtWidgets.QPushButton(\"<\")\n omegaTweakNegButton.clicked.connect(self.omegaTweakNegCB)\n omegaTweakNegButtonFine.clicked.connect(functools.partial(self.omegaTweakCB,-5))\n self.omegaTweakVal_ledit = QtWidgets.QLineEdit()\n self.omegaTweakVal_ledit.setFixedWidth(60)\n self.omegaTweakVal_ledit.setText(\"90\")\n omegaTweakPosButtonFine = QtWidgets.QPushButton(\"+5\") \n omegaTweakPosButton = QtWidgets.QPushButton(\">\")\n omegaTweakPosButton.clicked.connect(self.omegaTweakPosCB)\n omegaTweakPosButtonFine.clicked.connect(functools.partial(self.omegaTweakCB,5))\n hBoxSampleOrientationLayout.addWidget(setDC2CPButton)\n hBoxSampleOrientationLayout.addWidget(omegaLabel)\n hBoxSampleOrientationLayout.addWidget(self.sampleOmegaRBVLedit.getEntry())\n hBoxSampleOrientationLayout.addWidget(omegaSPLabel)\n hBoxSampleOrientationLayout.addWidget(self.sampleOmegaMoveLedit.getEntry())\n spacerItem = QtWidgets.QSpacerItem(100, 1, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\n hBoxSampleOrientationLayout.insertSpacing(6,100)\n hBoxSampleOrientationLayout.addWidget(omegaTweakNegButtonFine)\n hBoxSampleOrientationLayout.addWidget(omegaTweakNegButton) \n hBoxSampleOrientationLayout.addWidget(self.omegaTweakVal_ledit)\n hBoxSampleOrientationLayout.addWidget(omegaTweakPosButton)\n hBoxSampleOrientationLayout.addWidget(omegaTweakPosButtonFine) \n hBoxSampleOrientationLayout.addStretch(1)\n hBoxVidControlLayout = QtWidgets.QHBoxLayout()\n lightLevelLabel = QtWidgets.QLabel(\"Light\")\n lightLevelLabel.setAlignment(QtCore.Qt.AlignRight|Qt.AlignVCenter) \n sampleBrighterButton = QtWidgets.QPushButton(\"+\")\n sampleBrighterButton.setFixedWidth(30)\n sampleBrighterButton.clicked.connect(self.lightUpCB)\n sampleDimmerButton = QtWidgets.QPushButton(\"-\")\n sampleDimmerButton.setFixedWidth(30)\n sampleDimmerButton.clicked.connect(self.lightDimCB)\n focusLabel = QtWidgets.QLabel(\"Focus\")\n focusLabel.setAlignment(QtCore.Qt.AlignRight|Qt.AlignVCenter) \n focusPlusButton = QtWidgets.QPushButton(\"+\")\n focusPlusButton.setFixedWidth(30)\n focusPlusButton.clicked.connect(functools.partial(self.focusTweakCB,5)) \n focusMinusButton = QtWidgets.QPushButton(\"-\")\n focusMinusButton.setFixedWidth(30)\n focusMinusButton.clicked.connect(functools.partial(self.focusTweakCB,-5))\n annealButton = QtWidgets.QPushButton(\"Anneal\")\n annealButton.clicked.connect(self.annealButtonCB)\n annealTimeLabel = QtWidgets.QLabel(\"Time\")\n self.annealTime_ledit = QtWidgets.QLineEdit()\n self.annealTime_ledit.setFixedWidth(40)\n self.annealTime_ledit.setText(\"0.5\")\n magLevelLabel = QtWidgets.QLabel(\"Vid:\")\n snapshotButton = QtWidgets.QPushButton(\"SnapShot\") \n snapshotButton.clicked.connect(self.saveVidSnapshotButtonCB)\n self.hideRastersCheckBox = QCheckBox(\"Hide\\nRasters\")\n self.hideRastersCheckBox.setChecked(False)\n self.hideRastersCheckBox.stateChanged.connect(self.hideRastersCB)\n hBoxVidControlLayout.addWidget(self.zoom1Radio)\n hBoxVidControlLayout.addWidget(self.zoom2Radio)\n hBoxVidControlLayout.addWidget(self.zoom3Radio)\n hBoxVidControlLayout.addWidget(self.zoom4Radio)\n hBoxVidControlLayout.addWidget(focusLabel)\n hBoxVidControlLayout.addWidget(focusPlusButton)\n hBoxVidControlLayout.addWidget(focusMinusButton) \n hBoxVidControlLayout.addWidget(lightLevelLabel)\n hBoxVidControlLayout.addWidget(sampleBrighterButton)\n hBoxVidControlLayout.addWidget(sampleDimmerButton)\n hBoxVidControlLayout.addWidget(annealButton)\n hBoxVidControlLayout.addWidget(annealTimeLabel)\n hBoxVidControlLayout.addWidget(self.annealTime_ledit) \n hBoxSampleAlignLayout = QtWidgets.QHBoxLayout()\n centerLoopButton = QtWidgets.QPushButton(\"Center\\nLoop\")\n centerLoopButton.clicked.connect(self.autoCenterLoopCB)\n measureButton = QtWidgets.QPushButton(\"Measure\")\n measureButton.clicked.connect(self.measurePolyCB)\n loopShapeButton = QtWidgets.QPushButton(\"Add Raster\\nto Queue\")\n loopShapeButton.clicked.connect(self.drawInteractiveRasterCB)\n runRastersButton = QtWidgets.QPushButton(\"Run\\nRaster\")\n runRastersButton.clicked.connect(self.runRastersCB)\n clearGraphicsButton = QtWidgets.QPushButton(\"Clear\")\n clearGraphicsButton.clicked.connect(self.eraseCB)\n self.click3Button = QtWidgets.QPushButton(\"3-Click\\nCenter\")\n self.click3Button.clicked.connect(self.center3LoopCB)\n self.threeClickCount = 0\n saveCenteringButton = QtWidgets.QPushButton(\"Save\\nCenter\")\n saveCenteringButton.clicked.connect(self.saveCenterCB)\n selectAllCenteringButton = QtWidgets.QPushButton(\"Select All\\nCenterings\")\n selectAllCenteringButton.clicked.connect(self.selectAllCenterCB)\n hBoxSampleAlignLayout.addWidget(centerLoopButton)\n hBoxSampleAlignLayout.addWidget(clearGraphicsButton)\n hBoxSampleAlignLayout.addWidget(saveCenteringButton)\n hBoxSampleAlignLayout.addWidget(selectAllCenteringButton)\n hBoxSampleAlignLayout.addWidget(self.click3Button)\n hBoxSampleAlignLayout.addWidget(snapshotButton)\n hBoxSampleAlignLayout.addWidget(self.hideRastersCheckBox) \n hBoxRadioLayout100= QtWidgets.QHBoxLayout()\n vidActionLabel = QtWidgets.QLabel(\"Video Click Mode:\") \n self.vidActionRadioGroup=QtWidgets.QButtonGroup()\n self.vidActionC2CRadio = QtWidgets.QRadioButton(\"C2C\")\n self.vidActionC2CRadio.setChecked(True)\n self.vidActionC2CRadio.toggled.connect(self.vidActionToggledCB)\n self.vidActionRadioGroup.addButton(self.vidActionC2CRadio) \n self.vidActionDefineCenterRadio = QtWidgets.QRadioButton(\"Define Center\")\n self.vidActionDefineCenterRadio.setChecked(False)\n self.vidActionDefineCenterRadio.setEnabled(False) \n self.vidActionDefineCenterRadio.toggled.connect(self.vidActionToggledCB)\n self.vidActionRadioGroup.addButton(self.vidActionDefineCenterRadio)\n self.vidActionRasterExploreRadio = QtWidgets.QRadioButton(\"Raster Explore\")\n self.vidActionRasterExploreRadio.setChecked(False)\n self.vidActionRasterExploreRadio.toggled.connect(self.vidActionToggledCB)\n self.vidActionRadioGroup.addButton(self.vidActionRasterExploreRadio)\n self.vidActionRasterSelectRadio = QtWidgets.QRadioButton(\"Raster Select\")\n self.vidActionRasterSelectRadio.setChecked(False)\n self.vidActionRasterSelectRadio.toggled.connect(self.vidActionToggledCB)\n self.vidActionRasterDefRadio = QtWidgets.QRadioButton(\"Define Raster\")\n self.vidActionRasterDefRadio.setChecked(False)\n self.vidActionRasterDefRadio.setEnabled(False)\n self.vidActionRasterDefRadio.toggled.connect(self.vidActionToggledCB)\n self.vidActionRadioGroup.addButton(self.vidActionRasterDefRadio)\n hBoxRadioLayout100.addWidget(vidActionLabel)\n hBoxRadioLayout100.addWidget(self.vidActionC2CRadio)\n hBoxRadioLayout100.addWidget(self.vidActionRasterExploreRadio)\n hBoxRadioLayout100.addWidget(self.vidActionRasterDefRadio)\n hBoxRadioLayout100.addWidget(self.vidActionDefineCenterRadio) \n vBoxVidLayout.addLayout(hBoxSampleOrientationLayout)\n vBoxVidLayout.addLayout(hBoxVidControlLayout)\n vBoxVidLayout.addLayout(hBoxSampleAlignLayout)\n vBoxVidLayout.addLayout(hBoxRadioLayout100)\n self.VidFrame.setLayout(vBoxVidLayout)\n splitter11.addWidget(self.mainSetupFrame)\n self.colTabs= QtWidgets.QTabWidget() \n self.energyFrame = QFrame()\n vBoxEScanFull = QtWidgets.QVBoxLayout()\n hBoxEScan = QtWidgets.QHBoxLayout()\n vBoxEScan = QtWidgets.QVBoxLayout()\n self.periodicTable = QPeriodicTable(butSize=20)\n self.periodicTable.elementClicked(\"Se\")\n vBoxEScan.addWidget(self.periodicTable)\n self.EScanDataPathGB = DataLocInfo(self)\n vBoxEScan.addWidget(self.EScanDataPathGB)\n hBoxEScanParams = QtWidgets.QHBoxLayout()\n hBoxEScanButtons = QtWidgets.QHBoxLayout() \n tempPlotButton = QtWidgets.QPushButton(\"Queue Requests\") \n tempPlotButton.clicked.connect(self.queueEnScanCB)\n clearEnscanPlotButton = QtWidgets.QPushButton(\"Clear\") \n clearEnscanPlotButton.clicked.connect(self.clearEnScanPlotCB) \n hBoxEScanButtons.addWidget(clearEnscanPlotButton)\n hBoxEScanButtons.addWidget(tempPlotButton)\n escanStepsLabel = QtWidgets.QLabel(\"Steps\") \n self.escan_steps_ledit = QtWidgets.QLineEdit()\n self.escan_steps_ledit.setText(\"41\")\n escanStepsizeLabel = QtWidgets.QLabel(\"Stepsize (EVs)\") \n self.escan_stepsize_ledit = QtWidgets.QLineEdit()\n self.escan_stepsize_ledit.setText(\"1\")\n hBoxEScanParams.addWidget(escanStepsLabel)\n hBoxEScanParams.addWidget(self.escan_steps_ledit)\n hBoxEScanParams.addWidget(escanStepsizeLabel)\n hBoxEScanParams.addWidget(self.escan_stepsize_ledit)\n hBoxChoochResults = QtWidgets.QHBoxLayout()\n hBoxChoochResults2 = QtWidgets.QHBoxLayout() \n choochResultsLabel = QtWidgets.QLabel(\"Chooch Results\")\n choochInflLabel = QtWidgets.QLabel(\"Infl\")\n self.choochInfl = QtWidgets.QLabel(\"\")\n self.choochInfl.setFixedWidth(70) \n choochPeakLabel = QtWidgets.QLabel(\"Peak\")\n self.choochPeak = QtWidgets.QLabel(\"\")\n self.choochPeak.setFixedWidth(70)\n choochInflFPrimeLabel = QtWidgets.QLabel(\"fPrimeInfl\")\n self.choochFPrimeInfl = QtWidgets.QLabel(\"\")\n self.choochFPrimeInfl.setFixedWidth(70) \n choochInflF2PrimeLabel = QtWidgets.QLabel(\"f2PrimeInfl\")\n self.choochF2PrimeInfl = QtWidgets.QLabel(\"\")\n self.choochF2PrimeInfl.setFixedWidth(70) \n choochPeakFPrimeLabel = QtWidgets.QLabel(\"fPrimePeak\")\n self.choochFPrimePeak = QtWidgets.QLabel(\"\")\n self.choochFPrimePeak.setFixedWidth(70) \n choochPeakF2PrimeLabel = QtWidgets.QLabel(\"f2PrimePeak\")\n self.choochF2PrimePeak = QtWidgets.QLabel(\"\")\n self.choochF2PrimePeak.setFixedWidth(70) \n hBoxChoochResults.addWidget(choochResultsLabel)\n hBoxChoochResults.addWidget(choochInflLabel)\n hBoxChoochResults.addWidget(self.choochInfl) \n hBoxChoochResults.addWidget(choochPeakLabel)\n hBoxChoochResults.addWidget(self.choochPeak) \n hBoxChoochResults2.addWidget(choochInflFPrimeLabel)\n hBoxChoochResults2.addWidget(self.choochFPrimeInfl)\n hBoxChoochResults2.addWidget(choochInflF2PrimeLabel) \n hBoxChoochResults2.addWidget(self.choochF2PrimeInfl) \n hBoxChoochResults2.addWidget(choochPeakFPrimeLabel)\n hBoxChoochResults2.addWidget(self.choochFPrimePeak)\n hBoxChoochResults2.addWidget(choochPeakF2PrimeLabel) \n hBoxChoochResults2.addWidget(self.choochF2PrimePeak) \n vBoxEScan.addLayout(hBoxEScanParams)\n vBoxEScan.addLayout(hBoxEScanButtons)\n vBoxEScan.addLayout(hBoxChoochResults)\n vBoxEScan.addLayout(hBoxChoochResults2) \n hBoxEScan.addLayout(vBoxEScan)\n verticalLine = QFrame()\n verticalLine.setFrameStyle(QFrame.VLine)\n self.EScanGraph = ScanWindow(self.energyFrame)\n hBoxEScan.addWidget(verticalLine)\n hBoxEScan.addWidget(self.EScanGraph)\n vBoxEScanFull.addLayout(hBoxEScan)\n self.choochGraph = ScanWindow(self.energyFrame) #TODO should be another type? need to be able to add curves\n vBoxEScanFull.addWidget(self.choochGraph)\n self.energyFrame.setLayout(vBoxEScanFull)\n splitter11.addWidget(self.VidFrame)\n self.colTabs.addTab(splitter11,\"Sample Control\")\n self.colTabs.addTab(self.energyFrame,\"Energy Scan\")\n splitter1.addWidget(self.colTabs)\n vBoxlayout.addWidget(splitter1)\n self.lastFileLabel2 = QtWidgets.QLabel('File:')\n self.lastFileLabel2.setFixedWidth(60)\n if (daq_utils.beamline == \"amx\"): \n self.lastFileRBV2 = QtEpicsPVLabel(\"XF:17IDB-ES:AMX{Det:Eig9M}cam1:FullFileName_RBV\",self,0) \n else:\n self.lastFileRBV2 = QtEpicsPVLabel(\"XF:17IDC-ES:FMX{Det:Eig16M}cam1:FullFileName_RBV\",self,0) \n fileHBoxLayout = QtWidgets.QHBoxLayout()\n fileHBoxLayout2 = QtWidgets.QHBoxLayout() \n self.controlMasterCheckBox = QCheckBox(\"Control Master\")\n self.controlMasterCheckBox.stateChanged.connect(self.changeControlMasterCB)\n self.controlMasterCheckBox.setChecked(False)\n fileHBoxLayout.addWidget(self.controlMasterCheckBox) \n self.statusLabel = QtEpicsPVLabel(daq_utils.beamlineComm+\"program_state\",self,150,highlight_on_change=False)\n fileHBoxLayout.addWidget(self.statusLabel.getEntry())\n self.shutterStateLabel = QtWidgets.QLabel('Shutter State:')\n governorMessageLabel = QtWidgets.QLabel('Governor Message:')\n self.governorMessage = QtEpicsPVLabel(daq_utils.pvLookupDict[\"governorMessage\"],self,140,highlight_on_change=False)\n ringCurrentMessageLabel = QtWidgets.QLabel('Ring(mA):')\n self.ringCurrentMessage = QtWidgets.QLabel(str(self.ringCurrent_pv.get()))\n beamAvailable = self.beamAvailable_pv.get()\n if (beamAvailable):\n self.beamAvailLabel = QtWidgets.QLabel(\"Beam Available\")\n self.beamAvailLabel.setStyleSheet(\"background-color: #99FF66;\") \n else:\n self.beamAvailLabel = QtWidgets.QLabel(\"No Beam\")\n self.beamAvailLabel.setStyleSheet(\"background-color: red;\") \n sampleExposed = self.sampleExposed_pv.get()\n if (sampleExposed):\n self.sampleExposedLabel = QtWidgets.QLabel(\"Sample Exposed\")\n self.sampleExposedLabel.setStyleSheet(\"background-color: red;\") \n else:\n self.sampleExposedLabel = QtWidgets.QLabel(\"Sample Not Exposed\")\n self.sampleExposedLabel.setStyleSheet(\"background-color: #99FF66;\") \n gripperLabel = QtWidgets.QLabel('Gripper Temp:')\n self.gripperTempLabel = QtWidgets.QLabel('%.1f' % self.gripTemp_pv.get())\n cryostreamLabel = QtWidgets.QLabel('Cryostream Temp:')\n self.cryostreamTempLabel = QtWidgets.QLabel(str(self.cryostreamTemp_pv.get()))\n\n fileHBoxLayout.addWidget(gripperLabel)\n fileHBoxLayout.addWidget(self.gripperTempLabel)\n fileHBoxLayout.addWidget(cryostreamLabel)\n fileHBoxLayout.addWidget(self.cryostreamTempLabel)\n fileHBoxLayout.addWidget(ringCurrentMessageLabel)\n fileHBoxLayout.addWidget(self.ringCurrentMessage)\n fileHBoxLayout.addWidget(self.beamAvailLabel)\n fileHBoxLayout.addWidget(self.sampleExposedLabel) \n fileHBoxLayout.addWidget(governorMessageLabel)\n fileHBoxLayout.addWidget(self.governorMessage.getEntry())\n fileHBoxLayout2.addWidget(self.lastFileLabel2)\n fileHBoxLayout2.addWidget(self.lastFileRBV2.getEntry())\n vBoxlayout.addLayout(fileHBoxLayout)\n vBoxlayout.addLayout(fileHBoxLayout2) \n sampleTab.setLayout(vBoxlayout) \n self.XRFTab = QtWidgets.QFrame() \n XRFhBox = QtWidgets.QHBoxLayout()\n self.mcafit = McaAdvancedFit(self.XRFTab)\n XRFhBox.addWidget(self.mcafit)\n self.XRFTab.setLayout(XRFhBox)\n self.tabs.addTab(sampleTab,\"Collect\")\n#12/19 - uncomment this to expose the PyMCA XRF interface. It's not connected to anything. \n self.tabs.addTab(self.XRFTab,\"XRF Spectrum\")\n self.zoomLevelToggledCB(\"Zoom1\") \n\n def albulaCheckCB(self,state):\n if state != QtCore.Qt.Checked:\n albulaUtils.albulaClose()\n else:\n albulaUtils.albulaOpen() #TODO there is no albulaOpen method! remove?\n\n def annealButtonCB(self):\n try:\n ftime=float(self.annealTime_ledit.text())\n if (ftime >= 0.1 and ftime <= 5.0):\n comm_s = \"anneal(\" + str(ftime) + \")\"\n logger.info(comm_s)\n self.send_to_server(comm_s)\n else:\n self.popupServerMessage(\"Anneal time must be between 0.1 and 5.0 seconds.\") \n except:\n pass\n \n\n def hideRastersCB(self,state):\n if state == QtCore.Qt.Checked:\n self.eraseRastersCB()\n else:\n self.refreshCollectionParams(self.selectedSampleRequest) \n\n def stillModeUserPushCB(self,state):\n logger.info(\"still checkbox state \" + str(state))\n if (self.controlEnabled()):\n if (state):\n self.stillMode_pv.put(1)\n self.setGuiValues({'osc_range':\"0.0\"})\n else:\n self.standardMode_pv.put(1)\n else:\n self.popupServerMessage(\"You don't have control\")\n if (self.stillModeStatePV.get()):\n self.stillModeCheckBox.setChecked(True)\n else:\n self.stillModeCheckBox.setChecked(False) \n \n \n \n def autoProcessingCheckCB(self,state):\n if state == QtCore.Qt.Checked:\n self.fastDPCheckBox.setEnabled(True)\n self.dimpleCheckBox.setEnabled(True)\n self.xia2CheckBox.setEnabled(True) \n else:\n self.fastDPCheckBox.setEnabled(False) \n self.fastEPCheckBox.setEnabled(False)\n self.dimpleCheckBox.setEnabled(False)\n self.xia2CheckBox.setEnabled(False) \n\n \n\n def rasterGrainToggledCB(self,identifier):\n if (identifier == \"Coarse\" or identifier == \"Fine\" or identifier == \"VFine\"):\n cellSize = self.rasterStepDefs[identifier] \n self.rasterStepEdit.setText(str(cellSize))\n self.beamWidth_ledit.setText(str(cellSize))\n self.beamHeight_ledit.setText(str(cellSize)) \n\n\n\n def vidActionToggledCB(self):\n if (len(self.rasterList) > 0):\n if (self.vidActionRasterSelectRadio.isChecked()):\n for i in range(len(self.rasterList)):\n if (self.rasterList[i] != None):\n self.rasterList[i][\"graphicsItem\"].setFlag(QtWidgets.QGraphicsItem.ItemIsSelectable, True) \n else:\n for i in range(len(self.rasterList)):\n if (self.rasterList[i] != None):\n self.rasterList[i][\"graphicsItem\"].setFlag(QtWidgets.QGraphicsItem.ItemIsMovable, False)\n self.rasterList[i][\"graphicsItem\"].setFlag(QtWidgets.QGraphicsItem.ItemIsSelectable, False)\n if (self.vidActionRasterDefRadio.isChecked()):\n self.click_positions = []\n self.showProtParams()\n if (self.vidActionC2CRadio.isChecked()): \n self.click_positions = []\n if (self.protoComboBox.findText(str(\"raster\")) == self.protoComboBox.currentIndex() or self.protoComboBox.findText(str(\"stepRaster\")) == self.protoComboBox.currentIndex() or self.protoComboBox.findText(str(\"specRaster\")) == self.protoComboBox.currentIndex()):\n self.protoComboBox.setCurrentIndex(self.protoComboBox.findText(str(\"standard\")))\n self.protoComboActivatedCB(\"standard\") \n self.showProtParams()\n\n\n\n def adjustGraphics4ZoomChange(self,fov):\n imageScaleMicrons = int(round(self.imageScaleLineLen * (fov[\"x\"]/daq_utils.screenPixX)))\n self.imageScaleText.setText(str(imageScaleMicrons) + \" microns\")\n if (self.rasterList != []):\n saveRasterList = self.rasterList\n self.eraseDisplayCB()\n for i in range(len(saveRasterList)):\n if (saveRasterList[i] == None): \n self.rasterList.append(None)\n else:\n rasterXPixels = float(saveRasterList[i][\"graphicsItem\"].x())\n rasterYPixels = float(saveRasterList[i][\"graphicsItem\"].y())\n self.rasterXmicrons = rasterXPixels * (fov[\"x\"]/daq_utils.screenPixX)\n self.rasterYmicrons = rasterYPixels * (fov[\"y\"]/daq_utils.screenPixY)\n if (not self.hideRastersCheckBox.isChecked()):\n self.drawPolyRaster(db_lib.getRequestByID(saveRasterList[i][\"uid\"]),saveRasterList[i][\"coords\"][\"x\"],saveRasterList[i][\"coords\"][\"y\"],saveRasterList[i][\"coords\"][\"z\"])\n self.fillPolyRaster(db_lib.getRequestByID(saveRasterList[i][\"uid\"]))\n self.processSampMove(self.sampx_pv.get(),\"x\")\n self.processSampMove(self.sampy_pv.get(),\"y\")\n self.processSampMove(self.sampz_pv.get(),\"z\")\n if (self.vectorStart != None):\n self.processSampMove(self.sampx_pv.get(),\"x\")\n self.processSampMove(self.sampy_pv.get(),\"y\")\n self.processSampMove(self.sampz_pv.get(),\"z\")\n if (self.centeringMarksList != []): \n self.processSampMove(self.sampx_pv.get(),\"x\")\n self.processSampMove(self.sampy_pv.get(),\"y\")\n self.processSampMove(self.sampz_pv.get(),\"z\")\n\n def flushBuffer(self,vidStream):\n if (vidStream == None):\n return\n for i in range (0,1000):\n stime = time.time() \n vidStream.grab()\n etime = time.time()\n commTime = etime-stime\n if (commTime>.01):\n return\n\n \n def zoomLevelToggledCB(self,identifier):\n fov = {}\n zoomedCursorX = daq_utils.screenPixCenterX-self.centerMarkerCharOffsetX\n zoomedCursorY = daq_utils.screenPixCenterY-self.centerMarkerCharOffsetY\n if (self.zoom2Radio.isChecked()):\n self.flushBuffer(self.captureLowMagZoom)\n self.capture = self.captureLowMagZoom\n fov[\"x\"] = daq_utils.lowMagFOVx/2.0\n fov[\"y\"] = daq_utils.lowMagFOVy/2.0\n unzoomedCursorX = self.lowMagCursorX_pv.get()-self.centerMarkerCharOffsetX\n unzoomedCursorY = self.lowMagCursorY_pv.get()-self.centerMarkerCharOffsetY\n if (unzoomedCursorX*2.0<daq_utils.screenPixCenterX):\n zoomedCursorX = unzoomedCursorX*2.0\n if (unzoomedCursorY*2.0<daq_utils.screenPixCenterY):\n zoomedCursorY = unzoomedCursorY*2.0\n if (unzoomedCursorX-daq_utils.screenPixCenterX>daq_utils.screenPixCenterX/2):\n zoomedCursorX = (unzoomedCursorX*2.0) - daq_utils.screenPixX\n if (unzoomedCursorY-daq_utils.screenPixCenterY>daq_utils.screenPixCenterY/2):\n zoomedCursorY = (unzoomedCursorY*2.0) - daq_utils.screenPixY\n self.centerMarker.setPos(zoomedCursorX,zoomedCursorY)\n self.beamSizeXPixels = self.screenXmicrons2pixels(self.tempBeamSizeXMicrons)\n self.beamSizeYPixels = self.screenYmicrons2pixels(self.tempBeamSizeYMicrons)\n self.beamSizeOverlay.setRect(self.overlayPosOffsetX+self.centerMarker.x()-(self.beamSizeXPixels/2),self.overlayPosOffsetY+self.centerMarker.y()-(self.beamSizeYPixels/2),self.beamSizeXPixels,self.beamSizeYPixels)\n elif (self.zoom1Radio.isChecked()):\n self.flushBuffer(self.captureLowMag)\n self.capture = self.captureLowMag\n fov[\"x\"] = daq_utils.lowMagFOVx\n fov[\"y\"] = daq_utils.lowMagFOVy\n self.centerMarker.setPos(self.lowMagCursorX_pv.get()-self.centerMarkerCharOffsetX,self.lowMagCursorY_pv.get()-self.centerMarkerCharOffsetY)\n self.beamSizeXPixels = self.screenXmicrons2pixels(self.tempBeamSizeXMicrons)\n self.beamSizeYPixels = self.screenYmicrons2pixels(self.tempBeamSizeYMicrons)\n self.beamSizeOverlay.setRect(self.overlayPosOffsetX+self.centerMarker.x()-(self.beamSizeXPixels/2),self.overlayPosOffsetY+self.centerMarker.y()-(self.beamSizeYPixels/2),self.beamSizeXPixels,self.beamSizeYPixels)\n elif (self.zoom4Radio.isChecked()):\n self.flushBuffer(self.captureHighMagZoom)\n self.capture = self.captureHighMagZoom\n fov[\"x\"] = daq_utils.highMagFOVx/2.0\n fov[\"y\"] = daq_utils.highMagFOVy/2.0\n unzoomedCursorX = self.highMagCursorX_pv.get()-self.centerMarkerCharOffsetX\n unzoomedCursorY = self.highMagCursorY_pv.get()-self.centerMarkerCharOffsetY\n if (unzoomedCursorX*2.0<daq_utils.screenPixCenterX):\n zoomedCursorX = unzoomedCursorX*2.0\n if (unzoomedCursorY*2.0<daq_utils.screenPixCenterY):\n zoomedCursorY = unzoomedCursorY*2.0\n if (unzoomedCursorX-daq_utils.screenPixCenterX>daq_utils.screenPixCenterX/2):\n zoomedCursorX = (unzoomedCursorX*2.0) - daq_utils.screenPixX\n if (unzoomedCursorY-daq_utils.screenPixCenterY>daq_utils.screenPixCenterY/2):\n zoomedCursorY = (unzoomedCursorY*2.0) - daq_utils.screenPixY\n self.centerMarker.setPos(zoomedCursorX,zoomedCursorY)\n self.beamSizeXPixels = self.screenXmicrons2pixels(self.tempBeamSizeXMicrons)\n self.beamSizeYPixels = self.screenYmicrons2pixels(self.tempBeamSizeYMicrons)\n self.beamSizeOverlay.setRect(self.overlayPosOffsetX+self.centerMarker.x()-(self.beamSizeXPixels/2),self.overlayPosOffsetY+self.centerMarker.y()-(self.beamSizeYPixels/2),self.beamSizeXPixels,self.beamSizeYPixels)\n elif (self.zoom3Radio.isChecked()):\n self.flushBuffer(self.captureHighMag)\n self.capture = self.captureHighMag\n fov[\"x\"] = daq_utils.highMagFOVx\n fov[\"y\"] = daq_utils.highMagFOVy\n self.centerMarker.setPos(self.highMagCursorX_pv.get()-self.centerMarkerCharOffsetX,self.highMagCursorY_pv.get()-self.centerMarkerCharOffsetY)\n self.beamSizeXPixels = self.screenXmicrons2pixels(self.tempBeamSizeXMicrons)\n self.beamSizeYPixels = self.screenYmicrons2pixels(self.tempBeamSizeYMicrons)\n self.beamSizeOverlay.setRect(self.overlayPosOffsetX+self.centerMarker.x()-(self.beamSizeXPixels/2),self.overlayPosOffsetY+self.centerMarker.y()-(self.beamSizeYPixels/2),self.beamSizeXPixels,self.beamSizeYPixels)\n self.adjustGraphics4ZoomChange(fov)\n \n\n def saveVidSnapshotButtonCB(self): \n comment,useOlog,ok = SnapCommentDialog.getComment()\n if (ok):\n self.saveVidSnapshotCB(comment,useOlog)\n\n\n def saveVidSnapshotCB(self,comment=\"\",useOlog=False,reqID=None,rasterHeatJpeg=None):\n if (not os.path.exists(\"snapshots\")):\n os.system(\"mkdir snapshots\")\n width=640\n height=512\n targetrect = QRectF(0, 0, width, height)\n sourcerect = QRectF(0, 0, width, height)\n pix = QtGui.QPixmap(width, height)\n painter = QtGui.QPainter(pix)\n self.scene.render(painter, targetrect,sourcerect)\n painter.end()\n now = time.time()\n if (rasterHeatJpeg == None):\n if (reqID != None):\n filePrefix = db_lib.getRequestByID(reqID)[\"request_obj\"][\"file_prefix\"]\n imagePath = os.getcwd()+\"/snapshots/\"+filePrefix+str(int(now))+\".jpg\"\n else:\n if (self.dataPathGB.prefix_ledit.text() != \"\"): \n imagePath = os.getcwd()+\"/snapshots/\"+str(self.dataPathGB.prefix_ledit.text())+str(int(now))+\".jpg\" \n else:\n imagePath = os.getcwd()+\"/snapshots/capture\"+str(int(now))+\".jpg\"\n else:\n imagePath = rasterHeatJpeg\n logger.info(\"saving \" + imagePath)\n pix.save(imagePath, \"JPG\")\n if (useOlog):\n lsdcOlog.toOlogPicture(imagePath,str(comment))\n resultObj = {}\n imgRef = imagePath #for now, just the path, might want to use filestore later, if they really do facilitate moving files\n resultObj[\"data\"] = imgRef\n resultObj[\"comment\"] = str(comment)\n if (reqID != None): #assuming raster here, but will probably need to check the type\n db_lib.addResultforRequest(\"rasterJpeg\",reqID,owner=daq_utils.owner,result_obj=resultObj,proposalID=daq_utils.getProposalID(),beamline=daq_utils.beamline)\n else: # the user pushed the snapshot button on the gui\n mountedSampleID = self.mountedPin_pv.get()\n if (mountedSampleID != \"\"): \n db_lib.addResulttoSample(\"snapshotResult\",mountedSampleID,owner=daq_utils.owner,result_obj=resultObj,proposalID=daq_utils.getProposalID(),beamline=daq_utils.beamline) \n else: #beamline result, no sample mounted\n db_lib.addResulttoBL(\"snapshotResult\",daq_utils.beamline,owner=daq_utils.owner,result_obj=resultObj,proposalID=daq_utils.getProposalID()) \n\n \n\n def changeControlMasterCB(self, state, processID=os.getpid()): #when someone touches checkbox, either through interaction or code\n logger.info(\"change control master\")\n logger.info(processID)\n currentMaster = self.controlMaster_pv.get()\n if (currentMaster < 0):\n self.controlMaster_pv.put(currentMaster) #this makes sure if things are locked, and someone tries to get control, their checkbox will uncheck itself\n self.popupServerMessage(\"Control is locked by staff. Please stand by.\") \n return\n if (state == QtCore.Qt.Checked):\n self.controlMaster_pv.put(processID)\n if len(self.osc_range_ledit.text()) == 0 or abs(float(self.osc_range_ledit.text())) > 0:\n self.standardMode_pv.put(1)\n elif(float(self.osc_range_ledit.text()) == 0):\n self.stillMode_pv.put(1)\n else:\n self.userScreenDialog.hide()\n if (self.staffScreenDialog != None):\n self.staffScreenDialog.hide()\n\n \n\n def calculateNewYCoordPos(self,startYX,startYY):\n startY_pixels = 0\n zMotRBV = self.motPos[\"y\"]\n yMotRBV = self.motPos[\"z\"]\n if (self.scannerType == \"PI\"): \n fineYRBV = self.motPos[\"fineY\"]\n fineZRBV = self.motPos[\"fineZ\"] \n deltaYX = startYX-zMotRBV-fineZRBV\n deltaYY = startYY-yMotRBV-fineYRBV\n else:\n deltaYX = startYX-zMotRBV \n deltaYY = startYY-yMotRBV\n omegaRad = math.radians(self.motPos[\"omega\"])\n newYY = (float(startY_pixels-(self.screenYmicrons2pixels(deltaYY))))*math.sin(omegaRad)\n newYX = (float(startY_pixels-(self.screenYmicrons2pixels(deltaYX))))*math.cos(omegaRad)\n newY = newYX + newYY\n return newY\n\n\n def processROIChange(self,posRBV,ID):\n pass\n\n\n def processLowMagCursorChange(self,posRBV,ID):\n zoomedCursorX = daq_utils.screenPixCenterX-self.centerMarkerCharOffsetX\n zoomedCursorY = daq_utils.screenPixCenterY-self.centerMarkerCharOffsetY\n if (self.zoom2Radio.isChecked()): #lowmagzoom\n unzoomedCursorX = self.lowMagCursorX_pv.get()-self.centerMarkerCharOffsetX\n unzoomedCursorY = self.lowMagCursorY_pv.get()-self.centerMarkerCharOffsetY\n if (unzoomedCursorX*2.0<daq_utils.screenPixCenterX):\n zoomedCursorX = unzoomedCursorX*2.0\n if (unzoomedCursorY*2.0<daq_utils.screenPixCenterY):\n zoomedCursorY = unzoomedCursorY*2.0\n if (unzoomedCursorX-daq_utils.screenPixCenterX>daq_utils.screenPixCenterX/2):\n zoomedCursorX = (unzoomedCursorX*2.0) - daq_utils.screenPixX\n if (unzoomedCursorY-daq_utils.screenPixCenterY>daq_utils.screenPixCenterY/2): \n zoomedCursorY = (unzoomedCursorY*2.0) - daq_utils.screenPixY\n self.centerMarker.setPos(zoomedCursorX,zoomedCursorY)\n self.beamSizeXPixels = self.screenXmicrons2pixels(self.tempBeamSizeXMicrons)\n self.beamSizeYPixels = self.screenYmicrons2pixels(self.tempBeamSizeYMicrons)\n self.beamSizeOverlay.setRect(self.overlayPosOffsetX+self.centerMarker.x()-(self.beamSizeXPixels/2),self.overlayPosOffsetY+self.centerMarker.y()-(self.beamSizeYPixels/2),self.beamSizeXPixels,self.beamSizeYPixels)\n else:\n self.centerMarker.setPos(self.lowMagCursorX_pv.get()-self.centerMarkerCharOffsetX,self.lowMagCursorY_pv.get()-self.centerMarkerCharOffsetY)\n self.beamSizeXPixels = self.screenXmicrons2pixels(self.tempBeamSizeXMicrons)\n self.beamSizeYPixels = self.screenYmicrons2pixels(self.tempBeamSizeYMicrons)\n self.beamSizeOverlay.setRect(self.overlayPosOffsetX+self.centerMarker.x()-(self.beamSizeXPixels/2),self.overlayPosOffsetY+self.centerMarker.y()-(self.beamSizeYPixels/2),self.beamSizeXPixels,self.beamSizeYPixels)\n\n\n def processHighMagCursorChange(self,posRBV,ID):\n zoomedCursorX = daq_utils.screenPixCenterX-self.centerMarkerCharOffsetX\n zoomedCursorY = daq_utils.screenPixCenterY-self.centerMarkerCharOffsetY\n if (self.zoom4Radio.isChecked()): #highmagzoom\n unzoomedCursorX = self.highMagCursorX_pv.get()-self.centerMarkerCharOffsetX\n unzoomedCursorY = self.highMagCursorY_pv.get()-self.centerMarkerCharOffsetY\n if (unzoomedCursorX*2.0<daq_utils.screenPixCenterX):\n zoomedCursorX = unzoomedCursorX*2.0\n if (unzoomedCursorY*2.0<daq_utils.screenPixCenterY):\n zoomedCursorY = unzoomedCursorY*2.0\n if (unzoomedCursorX-daq_utils.screenPixCenterX>daq_utils.screenPixCenterX/2):\n zoomedCursorX = (unzoomedCursorX*2.0) - daq_utils.screenPixX\n if (unzoomedCursorY-daq_utils.screenPixCenterY>daq_utils.screenPixCenterY/2): \n zoomedCursorY = (unzoomedCursorY*2.0) - daq_utils.screenPixY\n self.centerMarker.setPos(zoomedCursorX,zoomedCursorY)\n self.beamSizeXPixels = self.screenXmicrons2pixels(self.tempBeamSizeXMicrons)\n self.beamSizeYPixels = self.screenYmicrons2pixels(self.tempBeamSizeYMicrons)\n self.beamSizeOverlay.setRect(self.overlayPosOffsetX+self.centerMarker.x()-(self.beamSizeXPixels/2),self.overlayPosOffsetY+self.centerMarker.y()-(self.beamSizeYPixels/2),self.beamSizeXPixels,self.beamSizeYPixels)\n else:\n self.centerMarker.setPos(self.highMagCursorX_pv.get()-self.centerMarkerCharOffsetX,self.highMagCursorY_pv.get()-self.centerMarkerCharOffsetY)\n self.beamSizeXPixels = self.screenXmicrons2pixels(self.tempBeamSizeXMicrons)\n self.beamSizeYPixels = self.screenYmicrons2pixels(self.tempBeamSizeYMicrons)\n self.beamSizeOverlay.setRect(self.overlayPosOffsetX+self.centerMarker.x()-(self.beamSizeXPixels/2),self.overlayPosOffsetY+self.centerMarker.y()-(self.beamSizeYPixels/2),self.beamSizeXPixels,self.beamSizeYPixels)\n\n \n def processSampMove(self,posRBV,motID):\n# print \"new \" + motID + \" pos=\" + str(posRBV)\n self.motPos[motID] = posRBV\n if (len(self.centeringMarksList)>0):\n for i in range(len(self.centeringMarksList)):\n if (self.centeringMarksList[i] != None):\n centerMarkerOffsetX = self.centeringMarksList[i][\"centerCursorX\"]-self.centerMarker.x()\n centerMarkerOffsetY = self.centeringMarksList[i][\"centerCursorY\"]-self.centerMarker.y()\n if (motID == \"x\"):\n startX = self.centeringMarksList[i][\"sampCoords\"][\"x\"]\n delta = startX-posRBV\n newX = float(self.screenXmicrons2pixels(delta))\n self.centeringMarksList[i][\"graphicsItem\"].setPos(newX-centerMarkerOffsetX,self.centeringMarksList[i][\"graphicsItem\"].y())\n if (motID == \"y\" or motID == \"z\" or motID == \"omega\"):\n startYY = self.centeringMarksList[i][\"sampCoords\"][\"z\"]\n startYX = self.centeringMarksList[i][\"sampCoords\"][\"y\"]\n newY = self.calculateNewYCoordPos(startYX,startYY)\n self.centeringMarksList[i][\"graphicsItem\"].setPos(self.centeringMarksList[i][\"graphicsItem\"].x(),newY-centerMarkerOffsetY)\n if (len(self.rasterList)>0):\n for i in range(len(self.rasterList)):\n if (self.rasterList[i] != None):\n if (motID == \"x\"):\n startX = self.rasterList[i][\"coords\"][\"x\"]\n delta = startX-posRBV\n newX = float(self.screenXmicrons2pixels(delta))\n self.rasterList[i][\"graphicsItem\"].setPos(newX,self.rasterList[i][\"graphicsItem\"].y())\n if (motID == \"y\" or motID == \"z\"):\n startYY = self.rasterList[i][\"coords\"][\"z\"]\n startYX = self.rasterList[i][\"coords\"][\"y\"]\n newY = self.calculateNewYCoordPos(startYX,startYY)\n self.rasterList[i][\"graphicsItem\"].setPos(self.rasterList[i][\"graphicsItem\"].x(),newY)\n\n if (motID == \"fineX\"):\n startX = self.rasterList[i][\"coords\"][\"x\"]\n delta = startX-posRBV-self.motPos[\"x\"]\n newX = float(self.screenXmicrons2pixels(delta))\n self.rasterList[i][\"graphicsItem\"].setPos(newX,self.rasterList[i][\"graphicsItem\"].y()) \n if (motID == \"fineY\" or motID == \"fineZ\"):\n startYY = self.rasterList[i][\"coords\"][\"z\"]\n startYX = self.rasterList[i][\"coords\"][\"y\"]\n newY = self.calculateNewYCoordPos(startYX,startYY)\n self.rasterList[i][\"graphicsItem\"].setPos(self.rasterList[i][\"graphicsItem\"].x(),newY)\n \n if (motID == \"omega\"):\n if (abs(posRBV-self.rasterList[i][\"coords\"][\"omega\"])%360.0 > 5.0): \n self.rasterList[i][\"graphicsItem\"].setVisible(False)\n else:\n self.rasterList[i][\"graphicsItem\"].setVisible(True) \n startYY = self.rasterList[i][\"coords\"][\"z\"]\n startYX = self.rasterList[i][\"coords\"][\"y\"]\n newY = self.calculateNewYCoordPos(startYX,startYY)\n self.rasterList[i][\"graphicsItem\"].setPos(self.rasterList[i][\"graphicsItem\"].x(),newY)\n \n if (self.vectorStart != None):\n centerMarkerOffsetX = self.vectorStart[\"centerCursorX\"]-self.centerMarker.x()\n centerMarkerOffsetY = self.vectorStart[\"centerCursorY\"]-self.centerMarker.y()\n \n if (motID == \"omega\"):\n startYY = self.vectorStart[\"coords\"][\"z\"]\n startYX = self.vectorStart[\"coords\"][\"y\"]\n newY = self.calculateNewYCoordPos(startYX,startYY)\n self.vectorStart[\"graphicsitem\"].setPos(self.vectorStart[\"graphicsitem\"].x(),newY-centerMarkerOffsetY)\n if (motID == \"x\"):\n startX = self.vectorStart[\"coords\"][\"x\"]\n delta = startX-posRBV\n newX = float(self.screenXmicrons2pixels(delta))\n self.vectorStart[\"graphicsitem\"].setPos(newX-centerMarkerOffsetX,self.vectorStart[\"graphicsitem\"].y())\n if (motID == \"y\" or motID == \"z\"):\n startYX = self.vectorStart[\"coords\"][\"y\"]\n startYY = self.vectorStart[\"coords\"][\"z\"]\n newY = self.calculateNewYCoordPos(startYX,startYY)\n self.vectorStart[\"graphicsitem\"].setPos(self.vectorStart[\"graphicsitem\"].x(),newY-centerMarkerOffsetY)\n if (self.vectorEnd != None):\n centerMarkerOffsetX = self.vectorEnd[\"centerCursorX\"]-self.centerMarker.x()\n centerMarkerOffsetY = self.vectorEnd[\"centerCursorY\"]-self.centerMarker.y()\n\n if (motID == \"omega\"):\n startYX = self.vectorEnd[\"coords\"][\"y\"]\n startYY = self.vectorEnd[\"coords\"][\"z\"]\n newY = self.calculateNewYCoordPos(startYX,startYY)\n self.vectorEnd[\"graphicsitem\"].setPos(self.vectorEnd[\"graphicsitem\"].x(),newY-centerMarkerOffsetY)\n if (motID == \"x\"):\n startX = self.vectorEnd[\"coords\"][\"x\"]\n delta = startX-posRBV\n newX = float(self.screenXmicrons2pixels(delta))\n self.vectorEnd[\"graphicsitem\"].setPos(newX-centerMarkerOffsetX,self.vectorEnd[\"graphicsitem\"].y())\n if (motID == \"y\" or motID == \"z\"):\n startYX = self.vectorEnd[\"coords\"][\"y\"]\n startYY = self.vectorEnd[\"coords\"][\"z\"]\n newY = self.calculateNewYCoordPos(startYX,startYY)\n self.vectorEnd[\"graphicsitem\"].setPos(self.vectorEnd[\"graphicsitem\"].x(),newY-centerMarkerOffsetY)\n\n\n if (self.vectorStart != None and self.vectorEnd != None):\n self.vecLine.setLine(self.vectorStart[\"graphicsitem\"].x()+self.vectorStart[\"centerCursorX\"]+self.centerMarkerCharOffsetX,self.vectorStart[\"graphicsitem\"].y()+self.vectorStart[\"centerCursorY\"]+self.centerMarkerCharOffsetY,self.vectorEnd[\"graphicsitem\"].x()+self.vectorStart[\"centerCursorX\"]+self.centerMarkerCharOffsetX,self.vectorEnd[\"graphicsitem\"].y()+self.vectorStart[\"centerCursorY\"]+self.centerMarkerCharOffsetY)\n\n def queueEnScanCB(self):\n self.protoComboBox.setCurrentIndex(self.protoComboBox.findText(str(\"eScan\"))) \n self.addRequestsToAllSelectedCB()\n self.treeChanged_pv.put(1) \n\n def clearEnScanPlotCB(self):\n self.EScanGraph.removeCurves() # get list of all curves to provide to method?\n self.choochGraph.removeCurves()\n\n def displayXrecRaster(self,xrecRasterFlag):\n self.xrecRasterFlag_pv.put(\"0\")\n if (xrecRasterFlag==\"100\"):\n for i in range(len(self.rasterList)):\n if (self.rasterList[i] != None):\n self.scene.removeItem(self.rasterList[i][\"graphicsItem\"])\n else:\n logger.info(\"xrecrasterflag = %s\" % xrecRasterFlag)\n try:\n rasterReq = db_lib.getRequestByID(xrecRasterFlag)\n except IndexError:\n logger.error('bad xrecRasterFlag: %s' % xrecRasterFlag)\n return\n rasterDef = rasterReq[\"request_obj\"][\"rasterDef\"]\n if (rasterDef[\"status\"] == RasterStatus.DRAWN.value):\n self.drawPolyRaster(rasterReq)\n elif (rasterDef[\"status\"] == RasterStatus.READY_FOR_FILL.value):\n self.fillPolyRaster(\n rasterReq,\n waitTime=getBlConfig(RASTER_GUI_XREC_FILL_DELAY)\n )\n logger.info(\"polyraster filled by displayXrecRaster\")\n elif (rasterDef[\"status\"] == RasterStatus.READY_FOR_SNAPSHOT.value):\n if (self.controlEnabled()): \n self.takeRasterSnapshot(rasterReq)\n logger.info(\"raster snapshot taken\")\n self.vidActionRasterExploreRadio.setChecked(True) \n self.selectedSampleID = rasterReq[\"sample\"]\n self.treeChanged_pv.put(1) #not sure about this\n elif (rasterDef[\"status\"] == RasterStatus.READY_FOR_REPROCESS.value):\n self.fillPolyRaster(rasterReq)\n logger.info(\"reprocessed polyraster filled by displayXrecraster\")\n if (self.controlEnabled()):\n self.takeRasterSnapshot(rasterReq)\n logger.info(\"reprocessed raster snapshot taken\") \n self.vidActionRasterExploreRadio.setChecked(True) \n self.selectedSampleID = rasterReq[\"sample\"]\n self.treeChanged_pv.put(1) #not sure about this\n else:\n pass\n\n\n def processMountedPin(self,mountedPinPos):\n self.eraseCB() \n self.treeChanged_pv.put(1)\n\n def processFastShutter(self,shutterVal):\n if (round(shutterVal)==round(self.fastShutterOpenPos_pv.get())):\n self.shutterStateLabel.setText(\"Shutter State:Open\")\n self.shutterStateLabel.setStyleSheet(\"background-color: red;\") \n else:\n self.shutterStateLabel.setText(\"Shutter State:Closed\")\n self.shutterStateLabel.setStyleSheet(\"background-color: #99FF66;\") \n\n def processGripTemp(self,gripVal):\n self.gripperTempLabel.setText('%.1f' % gripVal)\n if (int(gripVal) > -170):\n self.gripperTempLabel.setStyleSheet(\"background-color: red;\") \n else:\n self.gripperTempLabel.setStyleSheet(\"background-color: #99FF66;\") \n\n def processCryostreamTemp(self,cryostreamVal):\n self.cryostreamTempLabel.setText(str(cryostreamVal))\n\n def processRingCurrent(self,ringCurrentVal):\n self.ringCurrentMessage.setText(str(int(ringCurrentVal)))\n if (int(ringCurrentVal) < 390):\n self.ringCurrentMessage.setStyleSheet(\"background-color: red;\") \n else:\n self.ringCurrentMessage.setStyleSheet(\"background-color: #99FF66;\") \n \n def processBeamAvailable(self,beamAvailVal):\n if (int(beamAvailVal) == 1):\n self.beamAvailLabel.setText(\"Beam Available\")\n self.beamAvailLabel.setStyleSheet(\"background-color: #99FF66;\") \n else:\n self.beamAvailLabel.setText(\"No Beam\") \n self.beamAvailLabel.setStyleSheet(\"background-color: red;\") \n\n def processSampleExposed(self,sampleExposedVal):\n if (int(sampleExposedVal) == 1):\n self.sampleExposedLabel.setText(\"Sample Exposed\")\n self.sampleExposedLabel.setStyleSheet(\"background-color: red;\") \n else:\n self.sampleExposedLabel.setText(\"Sample Not Exposed\") \n self.sampleExposedLabel.setStyleSheet(\"background-color: #99FF66;\") \n \n \n def processBeamSize(self,beamSizeFlag):\n self.beamsizeComboBox.setCurrentIndex(beamSizeFlag)\n\n def processEnergyChange(self,energyVal):\n if (energyVal<9000):\n self.beamsizeComboBox.setEnabled(False)\n else:\n self.beamsizeComboBox.setEnabled(True)\n \n def processControlMaster(self,controlPID):\n logger.info(\"in callback controlPID = \" + str(controlPID))\n if (abs(int(controlPID)) == self.processID):\n self.controlMasterCheckBox.setChecked(True)\n else:\n self.controlMasterCheckBox.setChecked(False) \n\n def processZebraArmState(self,state):\n if (int(state)):\n self.userScreenDialog.zebraArmCheckBox.setChecked(True)\n else:\n self.userScreenDialog.zebraArmCheckBox.setChecked(False) \n\n def processGovRobotSeReach(self,state):\n if (int(state)):\n self.userScreenDialog.SEbutton.setEnabled(True)\n else:\n self.userScreenDialog.SEbutton.setEnabled(False)\n\n def processGovRobotSaReach(self,state):\n if (int(state)):\n self.userScreenDialog.SAbutton.setEnabled(True)\n else:\n self.userScreenDialog.SAbutton.setEnabled(False)\n \n def processGovRobotDaReach(self,state):\n if (int(state)):\n self.userScreenDialog.DAbutton.setEnabled(True)\n else:\n self.userScreenDialog.DAbutton.setEnabled(False)\n \n def processGovRobotBlReach(self,state):\n if (int(state)):\n self.userScreenDialog.BLbutton.setEnabled(True)\n else:\n self.userScreenDialog.BLbutton.setEnabled(False)\n \n\n def processDetMessage(self,state):\n self.userScreenDialog.detMessage_ledit.setText(str(state))\n\n def processSampleFlux(self,state):\n self.userScreenDialog.sampleFluxLabel.setText('%E' % state)\n\n \n def processZebraPulseState(self,state):\n if (int(state)):\n self.userScreenDialog.zebraPulseCheckBox.setChecked(True)\n else:\n self.userScreenDialog.zebraPulseCheckBox.setChecked(False) \n\n def processStillModeState(self,state):\n if (int(state)):\n self.stillModeCheckBox.setChecked(True)\n else:\n self.stillModeCheckBox.setChecked(False) \n\n def processZebraDownloadState(self,state):\n if (int(state)):\n self.userScreenDialog.zebraDownloadCheckBox.setChecked(True)\n else:\n self.userScreenDialog.zebraDownloadCheckBox.setChecked(False) \n \n def processZebraSentTriggerState(self,state):\n if (int(state)):\n self.userScreenDialog.zebraSentTriggerCheckBox.setChecked(True)\n else:\n self.userScreenDialog.zebraSentTriggerCheckBox.setChecked(False) \n\n def processZebraReturnedTriggerState(self,state):\n if (int(state)):\n self.userScreenDialog.zebraReturnedTriggerCheckBox.setChecked(True)\n else:\n self.userScreenDialog.zebraReturnedTriggerCheckBox.setChecked(False) \n \n\n def processControlMasterNew(self,controlPID):\n logger.info(\"in callback controlPID = \" + str(controlPID))\n if (abs(int(controlPID)) != self.processID):\n self.controlMasterCheckBox.setChecked(False) \n\n def processChoochResult(self,choochResultFlag):\n if (choochResultFlag == \"0\"):\n return\n choochResult = db_lib.getResult(choochResultFlag)\n choochResultObj = choochResult[\"result_obj\"]\n graph_x = choochResultObj[\"choochInXAxis\"]\n graph_y = choochResultObj[\"choochInYAxis\"]\n self.EScanGraph.name = \"Chooch PLot\"\n try:\n self.EScanGraph.addCurve(graph_x, graph_y, 'Raw counts vs. energy')\n self.EScanGraph.replot()\n except TypeError as e:\n logger.error('Problems with data type going into energy scan plot: %s' % (e))\n chooch_graph_x = choochResultObj[\"choochOutXAxis\"]\n chooch_graph_y1 = choochResultObj[\"choochOutY1Axis\"]\n chooch_graph_y2 = choochResultObj[\"choochOutY2Axis\"] \n self.choochGraph.name = \"Chooch PLot\"\n try:\n self.choochGraph.addCurve(chooch_graph_x, chooch_graph_y1, legend='spline')\n self.choochGraph.addCurve(chooch_graph_x, chooch_graph_y2, legend='fp')\n self.choochGraph.replot()\n self.choochInfl.setText(str(choochResultObj[\"infl\"]))\n self.choochPeak.setText(str(choochResultObj[\"peak\"]))\n self.choochFPrimeInfl.setText(str(choochResultObj[\"fprime_infl\"]))\n self.choochFPrimePeak.setText(str(choochResultObj[\"fprime_peak\"]))\n self.choochF2PrimeInfl.setText(str(choochResultObj[\"f2prime_infl\"]))\n self.choochF2PrimePeak.setText(str(choochResultObj[\"f2prime_peak\"]))\n self.choochResultFlag_pv.put(\"0\")\n self.protoComboBox.setCurrentIndex(self.protoComboBox.findText(str(\"standard\")))\n self.protoComboActivatedCB(\"standard\")\n except TypeError as e:\n logger.error('Chooch plotting failed - check whether scan had a strong signal or not: %s' % (e))\n \n\n\n# seems like we should be able to do an aggregate query to mongo for max/min :(\n def getMaxPriority(self):\n orderedRequests = db_lib.getOrderedRequestList(daq_utils.beamline) \n priorityMax = 0\n for i in range(len(orderedRequests)):\n if (orderedRequests[i][\"priority\"] > priorityMax):\n priorityMax = orderedRequests[i][\"priority\"]\n return priorityMax\n\n def getMinPriority(self):\n orderedRequests = db_lib.getOrderedRequestList(daq_utils.beamline) \n priorityMin = 10000000\n for i in range(len(orderedRequests)):\n if ((orderedRequests[i][\"priority\"] < priorityMin) and orderedRequests[i][\"priority\"]>0):\n priorityMin = orderedRequests[i][\"priority\"]\n return priorityMin\n\n\n def showProtParams(self):\n protocol = str(self.protoComboBox.currentText())\n self.rasterParamsFrame.hide()\n self.characterizeParamsFrame.hide()\n self.processingOptionsFrame.hide()\n self.multiColParamsFrame.hide()\n self.osc_start_ledit.setEnabled(True)\n self.osc_end_ledit.setEnabled(True)\n if (protocol == \"raster\" or protocol == \"rasterScreen\"):\n self.rasterParamsFrame.show()\n self.osc_start_ledit.setEnabled(False)\n self.osc_end_ledit.setEnabled(False)\n \n elif (protocol == \"stepRaster\" or protocol == \"specRaster\"):\n self.rasterParamsFrame.show()\n self.processingOptionsFrame.show() \n elif (protocol == \"multiCol\" or protocol == \"multiColQ\"):\n self.rasterParamsFrame.show()\n self.multiColParamsFrame.show()\n elif (protocol == \"screen\"):\n pass\n elif (protocol == \"vector\" or protocol == \"stepVector\"):\n self.vectorParamsFrame.show()\n self.processingOptionsFrame.show() \n elif (protocol == \"characterize\" or protocol == \"ednaCol\"):\n self.characterizeParamsFrame.show()\n self.processingOptionsFrame.show() \n elif (protocol == \"standard\" or protocol == \"burn\"):\n self.processingOptionsFrame.show()\n else:\n pass \n\n def rasterStepChanged(self,text):\n self.beamWidth_ledit.setText(text)\n self.beamHeight_ledit.setText(text)\n\n\n def updateVectorLengthAndSpeed(self):\n x_vec_end = self.vectorEnd[\"coords\"][\"x\"]\n y_vec_end = self.vectorEnd[\"coords\"][\"y\"]\n z_vec_end = self.vectorEnd[\"coords\"][\"z\"]\n x_vec_start = self.vectorStart[\"coords\"][\"x\"]\n y_vec_start = self.vectorStart[\"coords\"][\"y\"]\n z_vec_start = self.vectorStart[\"coords\"][\"z\"]\n x_vec = x_vec_end - x_vec_start\n y_vec = y_vec_end - y_vec_start\n z_vec = z_vec_end - z_vec_start\n trans_total = math.sqrt(x_vec**2 + y_vec**2 + z_vec**2)\n self.vecLenLabelOutput.setText(str(int(trans_total)))\n totalExpTime =(float(self.osc_end_ledit.text())/float(self.osc_range_ledit.text()))*float(self.exp_time_ledit.text()) #(range/inc)*exptime\n speed = trans_total/totalExpTime\n self.vecSpeedLabelOutput.setText(str(int(speed)))\n return x_vec, y_vec, z_vec, trans_total\n\n def totalExpChanged(self,text):\n if (text == \"oscEnd\" and daq_utils.beamline == \"fmx\"):\n self.sampleLifetimeReadback_ledit.setStyleSheet(\"color : red\"); \n try:\n if (float(str(self.osc_range_ledit.text())) == 0):\n if (text == \"oscRange\"):\n if (self.controlEnabled()):\n self.stillMode_pv.put(1)\n self.colEndLabel.setText(\"Number of Images: \")\n if (str(self.protoComboBox.currentText()) != \"standard\" and str(self.protoComboBox.currentText()) != \"vector\"):\n self.totalExptime_ledit.setText(\"----\")\n else:\n try:\n totalExptime = (float(self.osc_end_ledit.text())*float(self.exp_time_ledit.text()))\n except ValueError:\n totalExptime = 0.0\n except TypeError:\n totalExptime = 0.0\n except ZeroDivisionError:\n totalExptime = 0.0\n self.totalExptime_ledit.setText('%.3f' % totalExptime)\n return\n else:\n if (text == \"oscRange\"): \n if (self.controlEnabled()):\n self.standardMode_pv.put(1)\n self.colEndLabel.setText(\"Oscillation Range:\")\n except ValueError:\n return\n \n if (str(self.protoComboBox.currentText()) != \"standard\" and str(self.protoComboBox.currentText()) != \"vector\"):\n self.totalExptime_ledit.setText(\"----\")\n else:\n try:\n totalExptime = (float(self.osc_end_ledit.text())/(float(self.osc_range_ledit.text())))*float(self.exp_time_ledit.text())\n except ValueError:\n totalExptime = 0.0\n except TypeError:\n totalExptime = 0.0\n except ZeroDivisionError:\n totalExptime = 0.0\n self.totalExptime_ledit.setText('%.3f' % totalExptime)\n if (str(self.protoComboBox.currentText()) == \"vector\"):\n try:\n self.updateVectorLengthAndSpeed()\n except:\n pass\n \n try:\n if (float(self.osc_end_ledit.text()) > 4.9):\n self.fastDPCheckBox.setChecked(True)\n else:\n self.fastDPCheckBox.setChecked(False) \n except:\n pass\n \n\n def resoTextChanged(self,text):\n try:\n dist_s = \"%.2f\" % (daq_utils.distance_from_reso(daq_utils.det_radius,float(text),daq_utils.energy2wave(float(self.energy_ledit.text())),0))\n except ValueError:\n dist_s = self.detDistRBVLabel.getEntry().text() \n self.detDistMotorEntry.getEntry().setText(dist_s)\n\n def detDistTextChanged(self,text):\n try:\n reso_s = \"%.2f\" % (daq_utils.calc_reso(daq_utils.det_radius,float(text),daq_utils.energy2wave(float(self.energy_ledit.text())),0))\n except ValueError:\n reso_s = \"50.0\"\n except TypeError:\n reso_s = \"50.0\"\n self.setGuiValues({'resolution':reso_s})\n \n def energyTextChanged(self,text):\n dist_s = \"%.2f\" % (daq_utils.distance_from_reso(daq_utils.det_radius,float(self.resolution_ledit.text()),float(text),0))\n self.detDistMotorEntry.getEntry().setText(dist_s)\n\n #code below and its application from: https://snorfalorpagus.net/blog/2014/08/09/validating-user-input-in-pyqt4-using-qvalidator/\n def checkEntryState(self, *args, **kwargs):\n sender = self.sender()\n validator = sender.validator()\n state = validator.validate(sender.text(), 0)[0]\n if state == QtGui.QValidator.Intermediate:\n color = '#fff79a' # yellow\n elif state == QtGui.QValidator.Invalid:\n color = '#f6989d' # red\n else:\n color = '#ffffff' # white\n sender.setStyleSheet('QLineEdit { background-color: %s }' % color)\n\n def validateAllFields(self):\n fields_dict = {self.exp_time_ledit: {'name': 'exposure time', 'minmax': VALID_EXP_TIMES},\n self.detDistMotorEntry.getEntry(): {'name': 'detector distance', 'minmax': VALID_DET_DIST},\n self.totalExptime_ledit: {'name': 'total exposure time', 'minmax': VALID_TOTAL_EXP_TIMES}}\n\n return self.validateFields(fields_dict)\n\n def validateFields(self, field_values_dict):\n for field, value in field_values_dict.items():\n values = value['minmax']\n field_name = value['name']\n logger.info('validateFields: %s %s %s' % (field_name, field.text(), values))\n try:\n val = float(field.text())\n logger.info('>= min: %s <= max: %s' % (val >= values['fmx']['min'], val <= values['fmx']['max']))\n except: #total exposure time is '----' for rasters, so just ignore\n pass\n if field.text() == '----': #special case: total exp time not calculated for non-standard, non-vector experiments\n continue\n if field.validator().validate(field.text(),0)[0] != QtGui.QValidator.Acceptable:\n self.popupServerMessage('Invalid value for field %s! must be between %s and %s' % (field_name, values[daq_utils.beamline][\"min\"], values[daq_utils.beamline][\"max\"]))\n return False\n return True\n\n def protoRadioToggledCB(self, text):\n if (self.protoStandardRadio.isChecked()):\n self.protoComboBox.setCurrentIndex(self.protoComboBox.findText(\"standard\"))\n self.protoComboActivatedCB(text) \n elif (self.protoRasterRadio.isChecked()):\n self.protoComboBox.setCurrentIndex(self.protoComboBox.findText(\"raster\")) \n self.protoComboActivatedCB(text)\n elif (self.protoVectorRadio.isChecked()):\n self.protoComboBox.setCurrentIndex(self.protoComboBox.findText(\"vector\")) \n self.protoComboActivatedCB(text)\n else:\n pass\n\n def beamsizeComboActivatedCB(self, text):\n comm_s = \"set_beamsize(\\\"\" + str(text[0:2]) + \"\\\",\\\"\" + str(text[2:4]) + \"\\\")\"\n logger.info(comm_s)\n self.send_to_server(comm_s) \n\n def protoComboActivatedCB(self, text):\n self.showProtParams()\n protocol = str(self.protoComboBox.currentText())\n if (protocol == \"raster\" or protocol == \"stepRaster\" or protocol == \"rasterScreen\" or protocol == \"specRaster\"):\n self.vidActionRasterDefRadio.setChecked(True)\n else:\n self.vidActionC2CRadio.setChecked(True)\n if (protocol == \"raster\"):\n self.protoRasterRadio.setChecked(True)\n self.osc_start_ledit.setEnabled(False)\n self.osc_end_ledit.setEnabled(False)\n self.setGuiValues({'osc_range':getBlConfig(\"rasterDefaultWidth\"), 'exp_time':getBlConfig(\"rasterDefaultTime\"), 'transmission':getBlConfig(\"rasterDefaultTrans\")})\n elif (protocol == \"rasterScreen\"):\n self.osc_start_ledit.setEnabled(False)\n self.osc_end_ledit.setEnabled(False)\n self.setGuiValues({'osc_range':getBlConfig(\"rasterDefaultWidth\"), 'exp_time':getBlConfig(\"rasterDefaultTime\"), 'transmission':getBlConfig(\"rasterDefaultTrans\")})\n self.protoOtherRadio.setChecked(True) \n elif (protocol == \"standard\"):\n self.protoStandardRadio.setChecked(True)\n self.setGuiValues({'osc_range':getBlConfig(\"screen_default_width\"), 'exp_time':getBlConfig(\"screen_default_time\"), 'transmission':getBlConfig(\"stdTrans\")})\n self.osc_start_ledit.setEnabled(True)\n self.osc_end_ledit.setEnabled(True)\n elif (protocol == \"burn\"):\n self.fastDPCheckBox.setChecked(False) \n self.setGuiValues({'osc_range':\"0.0\", 'exp_time':getBlConfig(\"burnDefaultTime\"), 'transmission':getBlConfig(\"burnDefaultTrans\")})\n screenWidth = float(getBlConfig(\"burnDefaultNumFrames\"))\n self.setGuiValues({'osc_end':screenWidth})\n self.osc_start_ledit.setEnabled(True)\n self.osc_end_ledit.setEnabled(True)\n \n elif (protocol == \"vector\"):\n self.setGuiValues({'osc_range':getBlConfig(\"screen_default_width\"), 'exp_time':getBlConfig(\"screen_default_time\"), 'transmission':getBlConfig(\"stdTrans\")})\n self.osc_start_ledit.setEnabled(True)\n self.osc_end_ledit.setEnabled(True)\n self.protoVectorRadio.setChecked(True)\n else:\n self.protoOtherRadio.setChecked(True)\n self.totalExpChanged(\"\")\n \n\n def rasterEvalComboActivatedCB(self, text):\n db_lib.beamlineInfo(daq_utils.beamline,'rasterScoreFlag',info_dict={\"index\":self.rasterEvalComboBox.findText(str(text))})\n if (self.currentRasterCellList != []):\n self.reFillPolyRaster()\n\n\n def popBaseDirectoryDialogCB(self):\n fname = QtWidgets.QFileDialog.getExistingDirectory(self, 'Choose Directory', '',QtWidgets.QFileDialog.DontUseNativeDialog) \n if (fname != \"\"):\n self.dataPathGB.setBasePath_ledit(fname)\n\n\n def popImportDialogCB(self):\n self.timerHutch.stop()\n self.timerSample.stop() \n fname = QtWidgets.QFileDialog.getOpenFileName(self, 'Choose Spreadsheet File', '',filter=\"*.xls *.xlsx\",options=QtWidgets.QFileDialog.DontUseNativeDialog)\n self.timerSample.start(SAMPLE_TIMER_DELAY) \n self.timerHutch.start(HUTCH_TIMER_DELAY) \n if (fname != \"\"):\n logger.info(fname)\n comm_s = \"importSpreadsheet(\\\"\"+str(fname[0])+\"\\\")\"\n logger.info(comm_s)\n self.send_to_server(comm_s)\n \n def setUserModeCB(self):\n self.vidActionDefineCenterRadio.setEnabled(False)\n\n def setExpertModeCB(self):\n self.vidActionDefineCenterRadio.setEnabled(True)\n \n\n def upPriorityCB(self): #neither of these are very elegant, and might even be glitchy if overused\n currentPriority = self.selectedSampleRequest[\"priority\"]\n if (currentPriority<1):\n return\n orderedRequests = db_lib.getOrderedRequestList(daq_utils.beamline)\n for i in range(len(orderedRequests)):\n if (orderedRequests[i][\"sample\"] == self.selectedSampleRequest[\"sample\"]):\n if (i<2):\n self.topPriorityCB()\n else:\n priority = (orderedRequests[i-2][\"priority\"] + orderedRequests[i-1][\"priority\"])/2\n if (currentPriority == priority):\n priority = priority+20\n db_lib.updatePriority(self.selectedSampleRequest[\"uid\"],priority)\n self.treeChanged_pv.put(1)\n \n \n def downPriorityCB(self):\n currentPriority = self.selectedSampleRequest[\"priority\"]\n if (currentPriority<1):\n return\n orderedRequests = db_lib.getOrderedRequestList(daq_utils.beamline)\n for i in range(len(orderedRequests)):\n if (orderedRequests[i][\"sample\"] == self.selectedSampleRequest[\"sample\"]):\n if ((len(orderedRequests)-i) < 3):\n self.bottomPriorityCB()\n else:\n priority = (orderedRequests[i+1][\"priority\"] + orderedRequests[i+2][\"priority\"])/2\n if (currentPriority == priority):\n priority = priority-20\n db_lib.updatePriority(self.selectedSampleRequest[\"uid\"],priority)\n self.treeChanged_pv.put(1)\n\n\n def topPriorityCB(self):\n currentPriority = self.selectedSampleRequest[\"priority\"]\n if (currentPriority<1):\n return\n priority = int(self.getMaxPriority())\n priority = priority+100\n db_lib.updatePriority(self.selectedSampleRequest[\"uid\"],priority)\n self.treeChanged_pv.put(1)\n\n\n def bottomPriorityCB(self):\n currentPriority = self.selectedSampleRequest[\"priority\"]\n if (currentPriority<1):\n return\n priority = int(self.getMinPriority())\n priority = priority-100\n db_lib.updatePriority(self.selectedSampleRequest[\"uid\"],priority)\n self.treeChanged_pv.put(1)\n \n\n def dewarViewToggledCB(self,identifier):\n self.selectedSampleRequest = {}\n#should probably clear textfields here too\n if (identifier == \"dewarView\"):\n if (self.dewarViewRadio.isChecked()):\n self.dewarTree.refreshTreeDewarView()\n else:\n if (self.priorityViewRadio.isChecked()):\n self.dewarTree.refreshTreePriorityView()\n\n def dewarViewToggleCheckCB(self):\n if (self.dewarViewRadio.isChecked()):\n self.dewarTree.refreshTreeDewarView()\n else:\n self.dewarTree.refreshTreePriorityView()\n\n def moveOmegaCB(self):\n comm_s = \"mvaDescriptor(\\\"omega\\\",\" + str(self.sampleOmegaMoveLedit.getEntry().text()) + \")\"\n logger.info(comm_s)\n self.send_to_server(comm_s)\n \n\n def moveEnergyCB(self):\n energyRequest = float(str(self.energy_ledit.text()))\n if (abs(energyRequest-self.energy_pv.get()) > 10.0):\n self.popupServerMessage(\"Energy change must be less than 10 ev\")\n return\n else: \n comm_s = \"mvaDescriptor(\\\"energy\\\",\" + str(self.energy_ledit.text()) + \")\"\n logger.info(comm_s) \n self.send_to_server(comm_s)\n\n def calcLifetimeCB(self):\n if (not os.path.exists(\"2vb1.pdb\")):\n os.system(\"ln -s $CONFIGDIR/2vb1.pdb .\")\n os.system(\"mkdir rd3d\")\n os.system(\"chmod 777 rd3d\") \n \n energyReadback = self.energy_pv.get()/1000.0\n sampleFlux = self.sampleFluxPV.get()\n logger.info(\"sample flux = \" + str(sampleFlux)) \n try:\n vecLen_s = self.vecLenLabelOutput.text()\n if (vecLen_s != \"---\"):\n vecLen = float(vecLen_s)\n else:\n vecLen = 0\n except:\n vecLen = 0\n wedge = float(self.osc_end_ledit.text())\n try:\n lifeTime = raddoseLib.fmx_expTime_to_10MGy(beamsizeV = 3.0, beamsizeH = 5.0, vectorL = vecLen, energy = energyReadback, wedge = wedge, flux = sampleFlux, verbose = True) \n lifeTime_s = \"%.2f\" % (lifeTime)\n except:\n lifeTime_s = \"0.00\"\n self.sampleLifetimeReadback_ledit.setText(lifeTime_s)\n self.sampleLifetimeReadback_ledit.setStyleSheet(\"color : green\");\n \n\n def setTransCB(self):\n if (float(self.transmission_ledit.text()) > 1.0 or float(self.transmission_ledit.text()) < 0.001):\n self.popupServerMessage(\"Transmission must be 0.001-1.0\")\n return\n comm_s = \"setTrans(\" + str(self.transmission_ledit.text()) + \")\"\n logger.info(comm_s)\n self.send_to_server(comm_s)\n\n def setDCStartCB(self):\n currentPos = float(self.sampleOmegaRBVLedit.getEntry().text())%360.0\n self.setGuiValues({'osc_start':currentPos})\n \n \n def moveDetDistCB(self):\n comm_s = \"mvaDescriptor(\\\"detectorDist\\\",\" + str(self.detDistMotorEntry.getEntry().text()) + \")\"\n logger.info(comm_s)\n self.send_to_server(comm_s)\n\n def omegaTweakNegCB(self):\n tv = float(self.omegaTweakVal_ledit.text())\n tweakVal = 0.0-tv\n if (self.controlEnabled()):\n self.omegaTweak_pv.put(tweakVal)\n else:\n self.popupServerMessage(\"You don't have control\")\n \n def omegaTweakPosCB(self):\n tv = float(self.omegaTweakVal_ledit.text())\n if (self.controlEnabled()):\n self.omegaTweak_pv.put(tv)\n else:\n self.popupServerMessage(\"You don't have control\")\n\n def focusTweakCB(self,tv):\n tvf = float(tv) \n if (self.controlEnabled()):\n tvY = tvf*(math.cos(math.radians(90.0 + self.motPos[\"omega\"]))) #these are opposite C2C\n tvZ = tvf*(math.sin(math.radians(90.0 + self.motPos[\"omega\"])))\n self.sampyTweak_pv.put(tvY)\n self.sampzTweak_pv.put(tvZ) \n else:\n self.popupServerMessage(\"You don't have control\")\n\n def omegaTweakCB(self,tv):\n tvf = float(tv)\n if (self.controlEnabled()):\n self.omegaTweak_pv.put(tvf)\n time.sleep(0.05)\n else:\n self.popupServerMessage(\"You don't have control\")\n\n def autoCenterLoopCB(self):\n logger.info(\"auto center loop\")\n self.send_to_server(\"loop_center_xrec()\")\n \n def autoRasterLoopCB(self):\n self.selectedSampleID = self.selectedSampleRequest[\"sample\"]\n comm_s = \"autoRasterLoop(\" + str(self.selectedSampleID) + \")\"\n self.send_to_server(comm_s)\n\n\n def runRastersCB(self):\n comm_s = \"snakeRaster(\" + str(self.selectedSampleRequest[\"uid\"]) + \")\"\n self.send_to_server(comm_s)\n \n def drawInteractiveRasterCB(self): # any polygon for now, interactive or from xrec\n for i in range(len(self.polyPointItems)):\n self.scene.removeItem(self.polyPointItems[i])\n polyPointItems = []\n pen = QtGui.QPen(QtCore.Qt.red)\n brush = QtGui.QBrush(QtCore.Qt.red)\n points = []\n polyPoints = [] \n if (self.click_positions != []): #use the user clicks\n if (len(self.click_positions) == 2): #draws a single row or column\n logger.info(\"2-click raster\")\n polyPoints.append(self.click_positions[0])\n point = QtCore.QPointF(self.click_positions[0].x(),self.click_positions[1].y())\n polyPoints.append(point)\n point = QtCore.QPointF(self.click_positions[0].x()+2,self.click_positions[1].y())\n polyPoints.append(point) \n point = QtCore.QPointF(self.click_positions[0].x()+2,self.click_positions[0].y())\n polyPoints.append(point)\n self.rasterPoly = QtWidgets.QGraphicsPolygonItem(QtGui.QPolygonF(polyPoints))\n else:\n self.rasterPoly = QtWidgets.QGraphicsPolygonItem(QtGui.QPolygonF(self.click_positions))\n else:\n return\n self.polyBoundingRect = self.rasterPoly.boundingRect()\n raster_w = int(self.polyBoundingRect.width())\n raster_h = int(self.polyBoundingRect.height())\n center_x = int(self.polyBoundingRect.center().x())\n center_y = int(self.polyBoundingRect.center().y())\n stepsizeXPix = self.screenXmicrons2pixels(float(self.rasterStepEdit.text()))\n stepsizeYPix = self.screenYmicrons2pixels(float(self.rasterStepEdit.text())) \n self.click_positions = []\n self.definePolyRaster(raster_w,raster_h,stepsizeXPix,stepsizeYPix,center_x,center_y)\n\n\n def measurePolyCB(self):\n for i in range(len(self.polyPointItems)):\n self.scene.removeItem(self.polyPointItems[i])\n if (self.measureLine != None):\n self.scene.removeItem(self.measureLine)\n self.polyPointItems = []\n \n pen = QtGui.QPen(QtCore.Qt.red)\n brush = QtGui.QBrush(QtCore.Qt.red)\n points = []\n if (self.click_positions != []): #use the user clicks\n if (len(self.click_positions) == 2): #draws a single row or column\n self.measureLine = self.scene.addLine(self.click_positions[0].x(),self.click_positions[0].y(),self.click_positions[1].x(),self.click_positions[1].y(), pen)\n length = self.measureLine.line().length()\n fov = self.getCurrentFOV()\n lineMicronsX = int(round(length * (fov[\"x\"]/daq_utils.screenPixX)))\n logger.info(\"linelength = \" + str(lineMicronsX)) \n self.click_positions = []\n\n \n def center3LoopCB(self):\n logger.info(\"3-click center loop\")\n self.threeClickCount = 1\n self.click3Button.setStyleSheet(\"background-color: yellow\")\n self.send_to_server(\"mvaDescriptor(\\\"omega\\\",0)\")\n \n\n def fillPolyRaster(self,rasterReq,waitTime=1): #at this point I should have a drawn polyRaster\n time.sleep(waitTime)\n logger.info(\"filling poly for \" + str(rasterReq[\"uid\"]))\n resultCount = len(db_lib.getResultsforRequest(rasterReq[\"uid\"]))\n rasterResults = db_lib.getResultsforRequest(rasterReq[\"uid\"])\n rasterResult = {}\n for i in range (0,len(rasterResults)):\n if (rasterResults[i]['result_type'] == 'rasterResult'):\n rasterResult = rasterResults[i]\n break\n try:\n rasterDef = rasterReq[\"request_obj\"][\"rasterDef\"]\n except KeyError:\n db_lib.deleteRequest(rasterReq[\"uid\"])\n return\n rasterListIndex = 0\n for i in range(len(self.rasterList)):\n if (self.rasterList[i] != None):\n if (self.rasterList[i][\"uid\"] == rasterReq[\"uid\"]):\n rasterListIndex = i\n if (rasterResult == {}):\n return\n\n try:\n currentRasterGroup = self.rasterList[rasterListIndex][\"graphicsItem\"]\n except IndexError as e:\n logger.error('IndexError while getting raster group: %s' % e) \n return\n self.currentRasterCellList = currentRasterGroup.childItems()\n cellResults = rasterResult[\"result_obj\"][\"rasterCellResults\"]['resultObj']\n numLines = len(cellResults)\n cellResults_array = [{} for i in range(numLines)]\n my_array = np.zeros(numLines)\n spotLineCounter = 0\n cellIndex=0\n rowStartIndex = 0\n rasterEvalOption = str(self.rasterEvalComboBox.currentText())\n lenX = abs(rasterDef[\"rowDefs\"][0][\"end\"][\"x\"] - rasterDef[\"rowDefs\"][0][\"start\"][\"x\"]) #ugly for tile flip/noflip\n for i in range(len(rasterDef[\"rowDefs\"])): #this is building up \"my_array\" with the rasterEvalOption result, and numpy can then be run against the array. 2/16, I think cellResultsArray not needed\n rowStartIndex = spotLineCounter\n numsteps = rasterDef[\"rowDefs\"][i][\"numsteps\"]\n for j in range(numsteps):\n try:\n cellResult = cellResults[spotLineCounter]\n except IndexError:\n logger.error(\"caught index error #1\")\n logger.error(\"numlines = \" + str(numLines))\n logger.error(\"expected: \" + str(len(rasterDef[\"rowDefs\"])*numsteps))\n return #means a raster failure, and not enough data to cover raster, caused a gui crash\n try:\n spotcount = cellResult[\"spot_count_no_ice\"]\n filename = cellResult[\"image\"] \n except TypeError:\n spotcount = 0\n filename = \"empty\"\n\n if (lenX > 180 and self.scannerType == \"PI\"): #this is trying to figure out row direction\n cellIndex = spotLineCounter\n else:\n if (i%2 == 0): #this is trying to figure out row direction \n cellIndex = spotLineCounter\n else:\n cellIndex = rowStartIndex + ((numsteps-1)-j)\n try:\n if (rasterEvalOption == \"Spot Count\"):\n my_array[cellIndex] = spotcount \n elif (rasterEvalOption == \"Intensity\"):\n my_array[cellIndex] = cellResult[\"total_intensity\"]\n else:\n if (float(cellResult[\"d_min\"]) == -1):\n my_array[cellIndex] = 50.0\n else:\n my_array[cellIndex] = float(cellResult[\"d_min\"])\n except IndexError:\n logger.error(\"caught index error #2\")\n logger.error(\"numlines = \" + str(numLines))\n logger.error(\"expected: \" + str(len(rasterDef[\"rowDefs\"])*numsteps))\n return #means a raster failure, and not enough data to cover raster, caused a gui crash\n cellResults_array[cellIndex] = cellResult #instead of just grabbing filename, get everything. Not sure why I'm building my own list of results. How is this different from cellResults?\n#I don't think cellResults_array is different from cellResults, could maybe test that below by subtituting one for the other. It may be a remnant of trying to store less than the whole result set. \n spotLineCounter+=1\n floor = np.amin(my_array)\n ceiling = np.amax(my_array)\n cellCounter = 0 \n for i in range(len(rasterDef[\"rowDefs\"])):\n rowCellCount = 0\n for j in range(rasterDef[\"rowDefs\"][i][\"numsteps\"]):\n cellResult = cellResults_array[cellCounter]\n try:\n spotcount = int(cellResult[\"spot_count_no_ice\"])\n cellFilename = cellResult[\"image\"]\n d_min = float(cellResult[\"d_min\"])\n if (d_min == -1):\n d_min = 50.0 #trying to handle frames with no spots\n total_intensity = int(cellResult[\"total_intensity\"])\n except TypeError:\n spotcount = 0\n cellFilename = \"empty\"\n d_min = 50.0\n total_intensity = 0\n \n if (rasterEvalOption == \"Spot Count\"):\n param = spotcount \n elif (rasterEvalOption == \"Intensity\"):\n param = total_intensity\n else:\n param = d_min\n if (ceiling == 0):\n color_id = 255\n elif ceiling == floor:\n if rasterEvalOption == \"Resolution\":\n color_id = 0\n else:\n color_id = 255\n elif (rasterEvalOption == \"Resolution\"):\n color_id = int(255.0*(float(param-floor)/float(ceiling-floor)))\n else:\n color_id = int(255-(255.0*(float(param-floor)/float(ceiling-floor))))\n self.currentRasterCellList[cellCounter].setBrush(QtGui.QBrush(QtGui.QColor(0,255-color_id,0,127)))\n self.currentRasterCellList[cellCounter].setData(0,spotcount)\n self.currentRasterCellList[cellCounter].setData(1,cellFilename)\n self.currentRasterCellList[cellCounter].setData(2,d_min)\n self.currentRasterCellList[cellCounter].setData(3,total_intensity)\n cellCounter+=1\n \n def takeRasterSnapshot(self,rasterReq):\n request_obj = rasterReq[\"request_obj\"] \n directory = request_obj[\"directory\"]\n filePrefix = request_obj['file_prefix']\n basePath = request_obj[\"basePath\"]\n visitName = daq_utils.getVisitName()\n jpegDirectory = visitName + \"/jpegs/\" + directory[directory.find(visitName)+len(visitName):len(directory)] \n fullJpegDirectory = basePath + \"/\" + jpegDirectory\n if (not os.path.exists(fullJpegDirectory)):\n os.system(\"mkdir -p \" + fullJpegDirectory)\n jpegImagePrefix = fullJpegDirectory+\"/\"+filePrefix \n jpegImageFilename = jpegImagePrefix+\".jpg\"\n jpegImageThumbFilename = jpegImagePrefix+\"t.jpg\"\n logger.info(\"saving raster snapshot\")\n self.saveVidSnapshotCB(\"Raster Result from sample \" + str(rasterReq[\"request_obj\"][\"file_prefix\"]),useOlog=False,reqID=rasterReq[\"uid\"],rasterHeatJpeg=jpegImageFilename)\n self.saveVidSnapshotCB(\"Raster Result from sample \" + str(rasterReq[\"request_obj\"][\"file_prefix\"]),useOlog=False,reqID=rasterReq[\"uid\"],rasterHeatJpeg=jpegImageFilename)\n try:\n ispybLib.insertRasterResult(rasterReq,visitName)\n except Exception as e:\n logger.error(f'Exception while writing raster result: {e}')\n\n\n\n def reFillPolyRaster(self): \n rasterEvalOption = str(self.rasterEvalComboBox.currentText())\n for i in range(len(self.rasterList)):\n if (self.rasterList[i] != None):\n currentRasterGroup = self.rasterList[i][\"graphicsItem\"]\n currentRasterCellList = currentRasterGroup.childItems() \n my_array = np.zeros(len(currentRasterCellList))\n for i in range (0,len(currentRasterCellList)): #first loop is to get floor and ceiling\n cellIndex = i\n if (rasterEvalOption == \"Spot Count\"):\n spotcount = currentRasterCellList[i].data(0)\n if not isinstance(spotcount, int):\n spotcount = int(spotcount)\n my_array[cellIndex] = spotcount \n elif (rasterEvalOption == \"Intensity\"):\n total_intensity = currentRasterCellList[i].data(3)\n if not isinstance(total_intensity, int):\n total_intensity = int(total_intensity)\n my_array[cellIndex] = total_intensity\n else:\n d_min = currentRasterCellList[i].data(2)\n if not isinstance(d_min, float):\n d_min = float(d_min)\n if (d_min == -1):\n d_min = 50.0 #trying to handle frames with no spots\n my_array[cellIndex] = d_min\n floor = np.amin(my_array)\n ceiling = np.amax(my_array)\n for i in range (0,len(currentRasterCellList)):\n if (rasterEvalOption == \"Spot Count\") or (rasterEvalOption == \"Intensity\"):\n param = my_array[i] \n else:\n d_min = my_array[i]\n if (d_min == -1):\n d_min = 50.0 #trying to handle frames with no spots\n param = d_min\n if (ceiling == 0):\n color_id = 255\n elif ceiling == floor:\n if rasterEvalOption == \"Resolution\":\n color_id = 0\n else:\n color_id = 255\n elif (rasterEvalOption == \"Resolution\"):\n color_id = int(255.0*(float(param-floor)/float(ceiling-floor)))\n else:\n color_id = int(255-(255.0*(float(param-floor)/float(ceiling-floor))))\n currentRasterCellList[i].setBrush(QtGui.QBrush(QtGui.QColor(0,255-color_id,0,127)))\n\n \n \n def saveCenterCB(self):\n pen = QtGui.QPen(QtCore.Qt.magenta)\n brush = QtGui.QBrush(QtCore.Qt.magenta)\n markWidth = 10\n marker = self.scene.addEllipse(self.centerMarker.x()-(markWidth/2.0)-1+self.centerMarkerCharOffsetX,self.centerMarker.y()-(markWidth/2.0)-1+self.centerMarkerCharOffsetY,markWidth,markWidth,pen,brush)\n marker.setFlag(QtWidgets.QGraphicsItem.ItemIsSelectable, True) \n self.centeringMark = {\"sampCoords\":{\"x\":self.sampx_pv.get(),\"y\":self.sampy_pv.get(),\"z\":self.sampz_pv.get()},\"graphicsItem\":marker,\"centerCursorX\":self.centerMarker.x(),\"centerCursorY\":self.centerMarker.y()}\n self.centeringMarksList.append(self.centeringMark)\n \n\n def selectAllCenterCB(self):\n logger.info(\"select all center\")\n for i in range(len(self.centeringMarksList)):\n self.centeringMarksList[i][\"graphicsItem\"].setSelected(True) \n\n\n def lightUpCB(self):\n self.send_to_server(\"backlightBrighter()\") \n\n def lightDimCB(self):\n self.send_to_server(\"backlightDimmer()\")\n \n def eraseRastersCB(self):\n if (self.rasterList != []):\n for i in range(len(self.rasterList)):\n if (self.rasterList[i] != None):\n self.scene.removeItem(self.rasterList[i][\"graphicsItem\"])\n self.rasterList = []\n self.rasterDefList = []\n self.currentRasterCellList = []\n \n\n def eraseCB(self):\n self.click_positions = []\n if (self.measureLine != None):\n self.scene.removeItem(self.measureLine)\n for i in range(len(self.centeringMarksList)):\n self.scene.removeItem(self.centeringMarksList[i][\"graphicsItem\"]) \n self.centeringMarksList = []\n for i in range(len(self.polyPointItems)):\n self.scene.removeItem(self.polyPointItems[i])\n self.polyPointItems = []\n if (self.rasterList != []):\n for i in range(len(self.rasterList)):\n if (self.rasterList[i] != None):\n self.scene.removeItem(self.rasterList[i][\"graphicsItem\"])\n self.rasterList = []\n self.rasterDefList = []\n self.currentRasterCellList = []\n self.clearVectorCB()\n if (self.rasterPoly != None): \n self.scene.removeItem(self.rasterPoly)\n self.rasterPoly = None\n\n\n def eraseDisplayCB(self): #use this for things like zoom change. This is not the same as getting rid of all rasters.\n if (self.rasterList != []):\n for i in range(len(self.rasterList)):\n if (self.rasterList[i] != None):\n self.scene.removeItem(self.rasterList[i][\"graphicsItem\"])\n self.rasterList = []\n return #short circuit\n if (self.rasterPoly != None): \n self.scene.removeItem(self.rasterPoly)\n\n\n def getCurrentFOV(self):\n fov = {\"x\":0.0,\"y\":0.0}\n if (self.zoom2Radio.isChecked()): #lowmagzoom \n fov[\"x\"] = daq_utils.lowMagFOVx/2.0\n fov[\"y\"] = daq_utils.lowMagFOVy/2.0\n elif (self.zoom1Radio.isChecked()):\n fov[\"x\"] = daq_utils.lowMagFOVx\n fov[\"y\"] = daq_utils.lowMagFOVy\n elif (self.zoom4Radio.isChecked()): \n fov[\"x\"] = daq_utils.highMagFOVx/2.0\n fov[\"y\"] = daq_utils.highMagFOVy/2.0\n else:\n fov[\"x\"] = daq_utils.highMagFOVx\n fov[\"y\"] = daq_utils.highMagFOVy\n return fov\n\n\n def screenXPixels2microns(self,pixels):\n fov = self.getCurrentFOV()\n fovX = fov[\"x\"]\n return float(pixels)*(fovX/daq_utils.screenPixX)\n\n def screenYPixels2microns(self,pixels):\n fov = self.getCurrentFOV()\n fovY = fov[\"y\"]\n return float(pixels)*(fovY/daq_utils.screenPixY)\n\n def screenXmicrons2pixels(self,microns):\n fov = self.getCurrentFOV()\n fovX = fov[\"x\"]\n return int(round(microns*(daq_utils.screenPixX/fovX)))\n\n def screenYmicrons2pixels(self,microns):\n fov = self.getCurrentFOV()\n fovY = fov[\"y\"]\n return int(round(microns*(daq_utils.screenPixY/fovY)))\n\n\n\n def definePolyRaster(self,raster_w,raster_h,stepsizeXPix,stepsizeYPix,point_x,point_y): #all come in as pixels, raster_w and raster_h are bounding box of drawn graphic\n#raster status - 0=nothing done, 1=run, 2=displayed\n stepTime = float(self.exp_time_ledit.text())\n stepsize =float(self.rasterStepEdit.text())\n if ((stepsize/1000.0)/stepTime > 2.0):\n self.popupServerMessage(\"Stage speed exceeded. Increase exposure time, or decrease step size. Limit is 2mm/s.\")\n self.eraseCB() \n return\n \n try:\n beamWidth = float(self.beamWidth_ledit.text())\n beamHeight = float(self.beamHeight_ledit.text())\n except ValueError:\n logger.error('bad value for beam width or beam height')\n self.popupServerMessage('bad value for beam width or beam height')\n return\n if (self.scannerType == \"PI\"):\n rasterDef = {\"rasterType\":\"normal\",\"beamWidth\":beamWidth,\"beamHeight\":beamHeight,\"status\":RasterStatus.NEW.value,\"x\":self.sampx_pv.get()+self.sampFineX_pv.get(),\"y\":self.sampy_pv.get()+self.sampFineY_pv.get(),\"z\":self.sampz_pv.get()+self.sampFineZ_pv.get(),\"omega\":self.omega_pv.get(),\"stepsize\":stepsize,\"rowDefs\":[]} #just storing step as microns, not using her\n else:\n rasterDef = {\"rasterType\":\"normal\",\"beamWidth\":beamWidth,\"beamHeight\":beamHeight,\"status\":RasterStatus.NEW.value,\"x\":self.sampx_pv.get(),\"y\":self.sampy_pv.get(),\"z\":self.sampz_pv.get(),\"omega\":self.omega_pv.get(),\"stepsize\":stepsize,\"rowDefs\":[]} #just storing step as microns, not using here \n numsteps_h = int(raster_w/stepsizeXPix) #raster_w = width,goes to numsteps horizonatl\n numsteps_v = int(raster_h/stepsizeYPix)\n if (numsteps_h == 2):\n numsteps_h = 1 #fix slop in user single line attempt\n if (numsteps_h%2 == 0): # make odd numbers of rows and columns\n numsteps_h = numsteps_h + 1\n if (numsteps_v%2 == 0):\n numsteps_v = numsteps_v + 1\n rasterDef[\"numCells\"] = numsteps_h * numsteps_v\n point_offset_x = -(numsteps_h*stepsizeXPix)/2\n point_offset_y = -(numsteps_v*stepsizeYPix)/2\n if ((numsteps_h == 1) or (numsteps_v > numsteps_h and getBlConfig(\"vertRasterOn\"))): #vertical raster\n for i in range(numsteps_h):\n rowCellCount = 0\n for j in range(numsteps_v):\n newCellX = point_x+(i*stepsizeXPix)+point_offset_x\n newCellY = point_y+(j*stepsizeYPix)+point_offset_y\n if (rowCellCount == 0): #start of a new row\n rowStartX = newCellX\n rowStartY = newCellY\n rowCellCount = rowCellCount+1\n if (rowCellCount != 0): #test for no points in this row of the bounding rect are in the poly?\n vectorStartX = self.screenXPixels2microns(rowStartX-self.centerMarker.x()-self.centerMarkerCharOffsetX)\n vectorEndX = vectorStartX \n vectorStartY = self.screenYPixels2microns(rowStartY-self.centerMarker.y()-self.centerMarkerCharOffsetY)\n vectorEndY = vectorStartY + self.screenYPixels2microns(rowCellCount*stepsizeYPix)\n newRowDef = {\"start\":{\"x\": vectorStartX,\"y\":vectorStartY},\"end\":{\"x\":vectorEndX,\"y\":vectorEndY},\"numsteps\":rowCellCount}\n rasterDef[\"rowDefs\"].append(newRowDef)\n else: #horizontal raster\n for i in range(numsteps_v):\n rowCellCount = 0\n for j in range(numsteps_h):\n newCellX = point_x+(j*stepsizeXPix)+point_offset_x\n newCellY = point_y+(i*stepsizeYPix)+point_offset_y\n if (rowCellCount == 0): #start of a new row\n rowStartX = newCellX\n rowStartY = newCellY\n rowCellCount = rowCellCount+1\n if (rowCellCount != 0): #testing for no points in this row of the bounding rect are in the poly?\n vectorStartX = self.screenXPixels2microns(rowStartX-self.centerMarker.x()-self.centerMarkerCharOffsetX)\n vectorEndX = vectorStartX + self.screenXPixels2microns(rowCellCount*stepsizeXPix) #this looks better\n vectorStartY = self.screenYPixels2microns(rowStartY-self.centerMarker.y()-self.centerMarkerCharOffsetY)\n vectorEndY = vectorStartY\n newRowDef = {\"start\":{\"x\": vectorStartX,\"y\":vectorStartY},\"end\":{\"x\":vectorEndX,\"y\":vectorEndY},\"numsteps\":rowCellCount}\n rasterDef[\"rowDefs\"].append(newRowDef)\n setBlConfig(\"rasterDefaultWidth\",float(self.osc_range_ledit.text()))\n setBlConfig(\"rasterDefaultTime\",float(self.exp_time_ledit.text()))\n setBlConfig(\"rasterDefaultTrans\",float(self.transmission_ledit.text()))\n \n self.addSampleRequestCB(rasterDef)\n return #short circuit\n\n\n def rasterIsDrawn(self,rasterReq):\n for i in range(len(self.rasterList)):\n if (self.rasterList[i] != None):\n if (self.rasterList[i][\"uid\"] == rasterReq[\"uid\"]):\n return True\n return False\n \n\n\n def drawPolyRaster(self,rasterReq,x=-1,y=-1,z=-1): #rasterDef in microns,offset from center, need to convert to pixels to draw, mainly this is for displaying autoRasters, but also called in zoom change\n try:\n rasterDef = rasterReq[\"request_obj\"][\"rasterDef\"]\n except KeyError:\n return\n beamSize = self.screenXmicrons2pixels(rasterDef[\"beamWidth\"])\n stepsizeX = self.screenXmicrons2pixels(rasterDef[\"stepsize\"])\n stepsizeY = self.screenYmicrons2pixels(rasterDef[\"stepsize\"]) \n pen = QtGui.QPen(QtCore.Qt.red)\n newRasterCellList = []\n try:\n if (rasterDef[\"rowDefs\"][0][\"start\"][\"y\"] == rasterDef[\"rowDefs\"][0][\"end\"][\"y\"]): #this is a horizontal raster\n rasterDir = \"horizontal\"\n else:\n rasterDir = \"vertical\"\n except IndexError:\n return\n for i in range(len(rasterDef[\"rowDefs\"])):\n rowCellCount = 0\n for j in range(rasterDef[\"rowDefs\"][i][\"numsteps\"]):\n if (rasterDir == \"horizontal\"):\n newCellX = self.screenXmicrons2pixels(rasterDef[\"rowDefs\"][i][\"start\"][\"x\"])+(j*stepsizeX)+self.centerMarker.x()+self.centerMarkerCharOffsetX\n newCellY = self.screenYmicrons2pixels(rasterDef[\"rowDefs\"][i][\"start\"][\"y\"])+self.centerMarker.y()+self.centerMarkerCharOffsetY\n else:\n newCellX = self.screenXmicrons2pixels(rasterDef[\"rowDefs\"][i][\"start\"][\"x\"])+self.centerMarker.x()+self.centerMarkerCharOffsetX\n newCellY = self.screenYmicrons2pixels(rasterDef[\"rowDefs\"][i][\"start\"][\"y\"])+(j*stepsizeY)+self.centerMarker.y()+self.centerMarkerCharOffsetY\n if (rowCellCount == 0): #start of a new row\n rowStartX = newCellX\n rowStartY = newCellY\n newCellX = int(newCellX)\n newCellY = int(newCellY)\n newCell = RasterCell(newCellX,newCellY,stepsizeX, stepsizeY, self)\n newRasterCellList.append(newCell)\n newCell.setPen(pen)\n rowCellCount = rowCellCount+1 #really just for test of new row\n newItemGroup = RasterGroup(self)\n self.scene.addItem(newItemGroup)\n for i in range(len(newRasterCellList)):\n newItemGroup.addToGroup(newRasterCellList[i])\n newRasterGraphicsDesc = {\"uid\":rasterReq[\"uid\"],\"coords\":{\"x\":rasterDef[\"x\"],\"y\":rasterDef[\"y\"],\"z\":rasterDef[\"z\"],\"omega\":rasterDef[\"omega\"]},\"graphicsItem\":newItemGroup}\n self.rasterList.append(newRasterGraphicsDesc)\n\n\n def timerHutchRefresh(self):\n try:\n # instead of the previous StringIO, use BytesIO:\n # https://stackoverflow.com/questions/41340296/how-can-pillow-open-uploaded-image-file-from-stringio-directly\n file = BytesIO(urllib.request.urlopen(getBlConfig(\"hutchCornerCamURL\")).read())\n img = Image.open(file)\n qimage = ImageQt.ImageQt(img)\n pixmap_orig = QtGui.QPixmap.fromImage(qimage)\n self.pixmap_item_HutchCorner.setPixmap(pixmap_orig) \n except Exception as e:\n logger.error('Exception during hutch corner cam handling: %s URL: %s' % (e, getBlConfig('hutchCornerCamURL')))\n try:\n file = BytesIO(urllib.request.urlopen(getBlConfig(\"hutchTopCamURL\")).read())\n img = Image.open(file)\n qimage = ImageQt.ImageQt(img)\n pixmap_orig = QtGui.QPixmap.fromImage(qimage)\n self.pixmap_item_HutchTop.setPixmap(pixmap_orig)\n except Exception as e:\n logger.error('Exception during hutch top cam handling: %s URL: %s' % (e, getBlConfig('hutchTopCamURL')))\n \n\n def timerSampleRefresh(self):\n if self.capture is None:\n return \n retval,self.currentFrame = self.capture.read()\n if self.currentFrame is None:\n logger.warning('no frame read from stream URL - ensure the URL does not end with newline and that the filename is correct')\n return #maybe stop the timer also???\n height,width=self.currentFrame.shape[:2]\n qimage=QtGui.QImage(self.currentFrame,width,height,3*width,QtGui.QImage.Format_RGB888)\n qimage = qimage.rgbSwapped()\n pixmap_orig = QtGui.QPixmap.fromImage(qimage)\n self.pixmap_item.setPixmap(pixmap_orig)\n\n\n def sceneKey(self, event):\n if (event.key() == QtCore.Qt.Key_Delete or event.key() == QtCore.Qt.Key_Backspace):\n for i in range(len(self.rasterList)):\n if (self.rasterList[i] != None):\n if (self.rasterList[i][\"graphicsItem\"].isSelected()):\n try:\n sceneReq = db_lib.getRequestByID(self.rasterList[i][\"uid\"])\n if (sceneReq != None):\n self.selectedSampleID = sceneReq[\"sample\"]\n db_lib.deleteRequest(sceneReq)[\"uid\"]\n except AttributeError:\n pass\n self.scene.removeItem(self.rasterList[i][\"graphicsItem\"])\n self.rasterList[i] = None\n self.treeChanged_pv.put(1)\n for i in range(len(self.centeringMarksList)):\n if (self.centeringMarksList[i] != None):\n if (self.centeringMarksList[i][\"graphicsItem\"].isSelected()):\n self.scene.removeItem(self.centeringMarksList[i][\"graphicsItem\"]) \n self.centeringMarksList[i] = None\n \n\n def pixelSelect(self, event):\n super(QtWidgets.QGraphicsPixmapItem, self.pixmap_item).mousePressEvent(event)\n x_click = float(event.pos().x())\n y_click = float(event.pos().y())\n penGreen = QtGui.QPen(QtCore.Qt.green)\n penRed = QtGui.QPen(QtCore.Qt.red)\n if (self.vidActionDefineCenterRadio.isChecked()):\n self.vidActionC2CRadio.setChecked(True) #because it's easy to forget defineCenter is on\n if (self.zoom4Radio.isChecked()): \n comm_s = \"changeImageCenterHighMag(\" + str(x_click) + \",\" + str(y_click) + \",1)\"\n elif (self.zoom3Radio.isChecked()):\n comm_s = \"changeImageCenterHighMag(\" + str(x_click) + \",\" + str(y_click) + \",0)\" \n if (self.zoom2Radio.isChecked()): \n comm_s = \"changeImageCenterLowMag(\" + str(x_click) + \",\" + str(y_click) + \",1)\"\n elif (self.zoom1Radio.isChecked()):\n comm_s = \"changeImageCenterLowMag(\" + str(x_click) + \",\" + str(y_click) + \",0)\" \n self.send_to_server(comm_s)\n return\n if (self.vidActionRasterDefRadio.isChecked()):\n self.click_positions.append(event.pos())\n self.polyPointItems.append(self.scene.addEllipse(x_click, y_click, 4, 4, penRed))\n if (len(self.click_positions) == 4):\n self.drawInteractiveRasterCB()\n return\n fov = self.getCurrentFOV()\n correctedC2C_x = daq_utils.screenPixCenterX + (x_click - (self.centerMarker.x()+self.centerMarkerCharOffsetX))\n correctedC2C_y = daq_utils.screenPixCenterY + (y_click - (self.centerMarker.y()+self.centerMarkerCharOffsetY)) \n if (self.threeClickCount > 0): #3-click centering\n self.threeClickCount = self.threeClickCount + 1\n comm_s = 'center_on_click(' + str(correctedC2C_x) + \",\" + str(correctedC2C_y) + \",\" + str(fov[\"x\"]) + \",\" + str(fov[\"y\"]) + \",\" + '\"screen\",jog=90)' \n else:\n comm_s = 'center_on_click(' + str(correctedC2C_x) + \",\" + str(correctedC2C_y) + \",\" + str(fov[\"x\"]) + \",\" + str(fov[\"y\"]) + \",\" + '\"screen\",0)'\n if (not self.vidActionRasterExploreRadio.isChecked()):\n self.aux_send_to_server(comm_s)\n if (self.threeClickCount == 4):\n self.threeClickCount = 0\n self.click3Button.setStyleSheet(\"background-color: None\") \n return \n\n\n def editScreenParamsCB(self):\n self.screenDefaultsDialog = ScreenDefaultsDialog(self)\n self.screenDefaultsDialog.show()\n\n\n def editSelectedRequestsCB(self):\n selmod = self.dewarTree.selectionModel()\n selection = selmod.selection()\n indexes = selection.indexes()\n singleRequest = 1\n for i in range(len(indexes)):\n item = self.dewarTree.model.itemFromIndex(indexes[i])\n itemData = str(item.data(32))\n itemDataType = str(item.data(33))\n if (itemDataType == \"request\"): \n self.selectedSampleRequest = db_lib.getRequestByID(itemData)\n self.editSampleRequestCB(singleRequest)\n singleRequest = 0\n self.treeChanged_pv.put(1)\n\n\n\n def editSampleRequestCB(self,singleRequest):\n colRequest=self.selectedSampleRequest\n reqObj = colRequest[\"request_obj\"]\n if not self.validateAllFields():\n return\n reqObj[\"sweep_start\"] = float(self.osc_start_ledit.text())\n reqObj[\"sweep_end\"] = float(self.osc_end_ledit.text())+float(self.osc_start_ledit.text())\n reqObj[\"img_width\"] = float(self.osc_range_ledit.text())\n reqObj[\"exposure_time\"] = float(self.exp_time_ledit.text())\n reqObj[\"detDist\"] = float(self.detDistMotorEntry.getEntry().text()) \n reqObj[\"resolution\"] = float(self.resolution_ledit.text())\n if (singleRequest == 1): # a touch kludgy, but I want to be able to edit parameters for multiple requests w/o screwing the data loc info\n reqObj[\"file_prefix\"] = str(self.dataPathGB.prefix_ledit.text())\n reqObj[\"basePath\"] = str(self.dataPathGB.base_path_ledit.text())\n reqObj[\"directory\"] = str(self.dataPathGB.dataPath_ledit.text())\n reqObj[\"file_number_start\"] = int(self.dataPathGB.file_numstart_ledit.text())\n reqObj[\"attenuation\"] = float(self.transmission_ledit.text())\n reqObj[\"slit_width\"] = float(self.beamWidth_ledit.text())\n reqObj[\"slit_height\"] = float(self.beamHeight_ledit.text())\n reqObj[\"energy\"] = float(self.energy_ledit.text())\n wave = daq_utils.energy2wave(float(self.energy_ledit.text()))\n reqObj[\"wavelength\"] = wave\n reqObj[\"fastDP\"] =(self.fastDPCheckBox.isChecked() or self.fastEPCheckBox.isChecked() or self.dimpleCheckBox.isChecked())\n reqObj[\"fastEP\"] =self.fastEPCheckBox.isChecked()\n reqObj[\"dimple\"] =self.dimpleCheckBox.isChecked() \n reqObj[\"xia2\"] =self.xia2CheckBox.isChecked()\n reqObj[\"protocol\"] = str(self.protoComboBox.currentText())\n if (reqObj[\"protocol\"] == \"vector\" or reqObj[\"protocol\"] == \"stepVector\"):\n reqObj[\"vectorParams\"][\"fpp\"] = int(self.vectorFPP_ledit.text())\n colRequest[\"request_obj\"] = reqObj\n db_lib.updateRequest(colRequest)\n self.treeChanged_pv.put(1)\n\n def addRequestsToAllSelectedCB(self):\n if (self.protoComboBox.currentText() == \"raster\" or self.protoComboBox.currentText() == \"stepRaster\" or self.protoComboBox.currentText() == \"specRaster\"): #it confused people when they didn't need to add rasters explicitly\n return\n selmod = self.dewarTree.selectionModel()\n selection = selmod.selection()\n indexes = selection.indexes()\n try:\n progressInc = 100.0/float(len(indexes))\n except ZeroDivisionError:\n self.popupServerMessage(\"Select a sample to perform the request on!\")\n return\n self.progressDialog.setWindowTitle(\"Creating Requests\")\n self.progressDialog.show()\n for i in range(len(indexes)):\n self.progressDialog.setValue(int((i+1)*progressInc))\n item = self.dewarTree.model.itemFromIndex(indexes[i])\n itemData = str(item.data(32))\n itemDataType = str(item.data(33)) \n if (itemDataType == \"sample\"): \n self.selectedSampleID = itemData\n if (getBlConfig(\"queueCollect\") == 0):\n if (self.mountedPin_pv.get() != self.selectedSampleID): \n self.popupServerMessage(\"You can only add requests to a mounted sample, for now.\")\n self.progressDialog.close() \n return\n\n try:\n self.selectedSampleRequest = daq_utils.createDefaultRequest(self.selectedSampleID) #7/21/15 - not sure what this does, b/c I don't pass it, ahhh probably the commented line for prefix\n except KeyError:\n self.popupServerMessage(\"Please select a sample!\")\n self.progressDialog.close()\n return\n if (len(indexes)>1):\n self.dataPathGB.setFilePrefix_ledit(str(self.selectedSampleRequest[\"request_obj\"][\"file_prefix\"]))\n self.dataPathGB.setDataPath_ledit(str(self.selectedSampleRequest[\"request_obj\"][\"directory\"]))\n self.EScanDataPathGB.setFilePrefix_ledit(str(self.selectedSampleRequest[\"request_obj\"][\"file_prefix\"]))\n self.EScanDataPathGB.setDataPath_ledit(str(self.selectedSampleRequest[\"request_obj\"][\"directory\"]))\n if (itemDataType != \"container\"):\n self.addSampleRequestCB(selectedSampleID=self.selectedSampleID)\n self.progressDialog.close()\n self.treeChanged_pv.put(1)\n\n\n def addSampleRequestCB(self,rasterDef=None,selectedSampleID=None):\n if (self.selectedSampleID != None):\n try:\n sample = db_lib.getSampleByID(self.selectedSampleID)\n propNum = sample[\"proposalID\"]\n except KeyError:\n propNum = 999999\n if (propNum == None):\n propNum = 999999 \n if (propNum != daq_utils.getProposalID()):\n logger.info(\"setting proposal in add request\")\n daq_utils.setProposalID(propNum,createVisit=True)\n\n if (getBlConfig(\"queueCollect\") == 0):\n if (self.mountedPin_pv.get() != self.selectedSampleID): \n self.popupServerMessage(\"You can only add requests to a mounted sample, for now.\")\n return\n \n if not self.validateAllFields():\n return\n#skinner, not pretty below the way stuff is duplicated.\n if ((float(self.osc_end_ledit.text()) < float(self.osc_range_ledit.text())) and str(self.protoComboBox.currentText()) != \"eScan\"):\n self.popupServerMessage(\"Osc range less than Osc width\")\n return\n\n if (self.periodicTable.isVisible()):\n if (self.periodicTable.eltCurrent != None):\n symbol = self.periodicTable.eltCurrent.symbol\n targetEdge = element_info[symbol][2]\n if (daq_utils.beamline == \"fmx\"): \n mcaRoiLo = element_info[symbol][4]\n mcaRoiHi = element_info[symbol][5]\n else:\n mcaRoiLo = self.XRFInfoDict[symbol]-25\n mcaRoiHi = self.XRFInfoDict[symbol]+25\n targetEnergy = Elements.Element[symbol][\"binding\"][targetEdge]\n colRequest = daq_utils.createDefaultRequest(self.selectedSampleID)\n sampleName = str(db_lib.getSampleNamebyID(colRequest[\"sample\"]))\n runNum = db_lib.incrementSampleRequestCount(colRequest[\"sample\"])\n (puckPosition,samplePositionInContainer,containerID) = db_lib.getCoordsfromSampleID(daq_utils.beamline,colRequest[\"sample\"])\n reqObj = get_request_object_escan(colRequest[\"request_obj\"], self.periodicTable.eltCurrent.symbol, runNum, self.EScanDataPathGB.prefix_ledit.text(),\n self.EScanDataPathGB.base_path_ledit.text(), sampleName, containerID, samplePositionInContainer,\n self.EScanDataPathGB.file_numstart_ledit.text(), self.exp_time_ledit.text(), targetEnergy, self.escan_steps_ledit.text(),\n self.escan_stepsize_ledit.text())\n reqObj[\"detDist\"] = float(self.detDistMotorEntry.getEntry().text())\n reqObj[\"attenuation\"] = float(self.transmission_ledit.text()) \n reqObj[\"mcaRoiLo\"] = mcaRoiLo\n reqObj[\"mcaRoiHi\"] = mcaRoiHi\n\n colRequest[\"request_obj\"] = reqObj\n newSampleRequestID = db_lib.addRequesttoSample(self.selectedSampleID,reqObj[\"protocol\"],daq_utils.owner,reqObj,priority=5000,proposalID=daq_utils.getProposalID())\n#attempt here to select a newly created request. \n self.SelectedItemData = newSampleRequestID\n \n if (selectedSampleID == None): #this is a temp kludge to see if this is called from addAll\n self.treeChanged_pv.put(1)\n else:\n logger.info(\"choose an element and try again\")\n return \n\n# I don't like the code duplication, but one case is the mounted sample and selected centerings - so it's in a loop for multiple reqs, the other requires autocenter.\n if ((self.mountedPin_pv.get() == self.selectedSampleID) and (len(self.centeringMarksList) != 0)): \n selectedCenteringFound = 0\n for i in range(len(self.centeringMarksList)):\n if (self.centeringMarksList[i][\"graphicsItem\"].isSelected()):\n selectedCenteringFound = 1\n colRequest = daq_utils.createDefaultRequest(self.selectedSampleID)\n sampleName = str(db_lib.getSampleNamebyID(colRequest[\"sample\"]))\n runNum = db_lib.incrementSampleRequestCount(colRequest[\"sample\"])\n (puckPosition,samplePositionInContainer,containerID) = db_lib.getCoordsfromSampleID(daq_utils.beamline,colRequest[\"sample\"]) \n reqObj = colRequest[\"request_obj\"]\n reqObj[\"runNum\"] = runNum\n reqObj[\"sweep_start\"] = float(self.osc_start_ledit.text())\n reqObj[\"sweep_end\"] = float(self.osc_end_ledit.text())+float(self.osc_start_ledit.text())\n reqObj[\"img_width\"] = float(self.osc_range_ledit.text())\n setBlConfig(\"screen_default_width\",float(self.osc_range_ledit.text()))\n setBlConfig(\"screen_default_time\",float(self.exp_time_ledit.text()))\n setBlConfig(\"stdTrans\",float(self.transmission_ledit.text()))\n setBlConfig(\"screen_default_dist\",float(self.detDistMotorEntry.getEntry().text()))\n reqObj[\"exposure_time\"] = float(self.exp_time_ledit.text())\n reqObj[\"resolution\"] = float(self.resolution_ledit.text())\n reqObj[\"file_prefix\"] = str(self.dataPathGB.prefix_ledit.text()+\"_C\"+str(i+1))\n reqObj[\"basePath\"] = str(self.dataPathGB.base_path_ledit.text())\n reqObj[\"directory\"] = str(self.dataPathGB.base_path_ledit.text())+\"/\"+ str(daq_utils.getVisitName()) + \"/\"+sampleName+\"/\" + str(runNum) + \"/\"+db_lib.getContainerNameByID(containerID)+\"_\"+str(samplePositionInContainer+1)+\"/\" \n reqObj[\"file_number_start\"] = int(self.dataPathGB.file_numstart_ledit.text())\n reqObj[\"attenuation\"] = float(self.transmission_ledit.text())\n reqObj[\"slit_width\"] = float(self.beamWidth_ledit.text())\n reqObj[\"slit_height\"] = float(self.beamHeight_ledit.text())\n reqObj[\"energy\"] = float(self.energy_ledit.text()) \n wave = daq_utils.energy2wave(float(self.energy_ledit.text()))\n reqObj[\"wavelength\"] = wave\n reqObj[\"detDist\"] = float(self.detDistMotorEntry.getEntry().text()) \n reqObj[\"protocol\"] = str(self.protoComboBox.currentText())\n reqObj[\"pos_x\"] = float(self.centeringMarksList[i][\"sampCoords\"][\"x\"])\n reqObj[\"pos_y\"] = float(self.centeringMarksList[i][\"sampCoords\"][\"y\"])\n reqObj[\"pos_z\"] = float(self.centeringMarksList[i][\"sampCoords\"][\"z\"])\n reqObj[\"fastDP\"] = (self.fastDPCheckBox.isChecked() or self.fastEPCheckBox.isChecked() or self.dimpleCheckBox.isChecked())\n reqObj[\"fastEP\"] =self.fastEPCheckBox.isChecked()\n reqObj[\"dimple\"] =self.dimpleCheckBox.isChecked() \n reqObj[\"xia2\"] =self.xia2CheckBox.isChecked()\n if (reqObj[\"protocol\"] == \"characterize\" or reqObj[\"protocol\"] == \"ednaCol\"):\n characterizationParams = {\"aimed_completeness\":float(self.characterizeCompletenessEdit.text()),\"aimed_multiplicity\":str(self.characterizeMultiplicityEdit.text()),\"aimed_resolution\":float(self.characterizeResoEdit.text()),\"aimed_ISig\":float(self.characterizeISIGEdit.text())}\n reqObj[\"characterizationParams\"] = characterizationParams\n colRequest[\"request_obj\"] = reqObj \n newSampleRequestID = db_lib.addRequesttoSample(self.selectedSampleID,reqObj[\"protocol\"],daq_utils.owner,reqObj,priority=5000,proposalID=daq_utils.getProposalID())\n#attempt here to select a newly created request. \n self.SelectedItemData = newSampleRequestID\n if (selectedCenteringFound == 0):\n message = QtWidgets.QErrorMessage(self)\n message.setModal(False)\n message.showMessage(\"You need to select a centering.\")\n else: #autocenter or interactive\n colRequest=self.selectedSampleRequest\n try:\n sampleName = str(db_lib.getSampleNamebyID(colRequest[\"sample\"]))\n except KeyError:\n logger.error('no sample selected')\n self.popupServerMessage('no sample selected')\n return\n (puckPosition,samplePositionInContainer,containerID) = db_lib.getCoordsfromSampleID(daq_utils.beamline,colRequest[\"sample\"]) \n runNum = db_lib.incrementSampleRequestCount(colRequest[\"sample\"])\n reqObj = colRequest[\"request_obj\"]\n centeringOption = str(self.centeringComboBox.currentText())\n reqObj[\"centeringOption\"] = centeringOption \n if ((centeringOption == \"Interactive\" and self.mountedPin_pv.get() == self.selectedSampleID) or centeringOption == \"Testing\"): #user centered manually\n reqObj[\"pos_x\"] = float(self.sampx_pv.get())\n reqObj[\"pos_y\"] = float(self.sampy_pv.get())\n reqObj[\"pos_z\"] = float(self.sampz_pv.get())\n reqObj[\"runNum\"] = runNum\n reqObj[\"sweep_start\"] = float(self.osc_start_ledit.text())\n reqObj[\"sweep_end\"] = float(self.osc_end_ledit.text())+float(self.osc_start_ledit.text())\n reqObj[\"img_width\"] = float(self.osc_range_ledit.text())\n reqObj[\"exposure_time\"] = float(self.exp_time_ledit.text())\n if (rasterDef == None and reqObj[\"protocol\"] != \"burn\"): \n setBlConfig(\"screen_default_width\",float(self.osc_range_ledit.text()))\n setBlConfig(\"screen_default_time\",float(self.exp_time_ledit.text()))\n setBlConfig(\"stdTrans\",float(self.transmission_ledit.text()))\n setBlConfig(\"screen_default_dist\",float(self.detDistMotorEntry.getEntry().text())) \n reqObj[\"resolution\"] = float(self.resolution_ledit.text())\n reqObj[\"directory\"] = str(self.dataPathGB.base_path_ledit.text())+ \"/\" + str(daq_utils.getVisitName()) + \"/\" +str(self.dataPathGB.prefix_ledit.text())+\"/\" + str(runNum) + \"/\"+db_lib.getContainerNameByID(containerID)+\"_\"+str(samplePositionInContainer+1)+\"/\"\n reqObj[\"basePath\"] = str(self.dataPathGB.base_path_ledit.text())\n reqObj[\"file_prefix\"] = str(self.dataPathGB.prefix_ledit.text())\n reqObj[\"file_number_start\"] = int(self.dataPathGB.file_numstart_ledit.text())\n if (abs(reqObj[\"sweep_end\"]-reqObj[\"sweep_start\"])<5.0):\n reqObj[\"fastDP\"] = False\n reqObj[\"fastEP\"] = False\n reqObj[\"dimple\"] = False \n else:\n reqObj[\"fastDP\"] = (self.fastDPCheckBox.isChecked() or self.fastEPCheckBox.isChecked() or self.dimpleCheckBox.isChecked())\n reqObj[\"fastEP\"] =self.fastEPCheckBox.isChecked()\n reqObj[\"dimple\"] =self.dimpleCheckBox.isChecked() \n reqObj[\"xia2\"] =self.xia2CheckBox.isChecked()\n reqObj[\"attenuation\"] = float(self.transmission_ledit.text())\n reqObj[\"slit_width\"] = float(self.beamWidth_ledit.text())\n reqObj[\"slit_height\"] = float(self.beamHeight_ledit.text())\n reqObj[\"energy\"] = float(self.energy_ledit.text()) \n try: \n wave = daq_utils.energy2wave(float(self.energy_ledit.text()))\n except ValueError:\n wave = 1.1\n\n reqObj[\"wavelength\"] = wave\n reqObj[\"protocol\"] = str(self.protoComboBox.currentText())\n try:\n reqObj[\"detDist\"] = float(self.detDistMotorEntry.getEntry().text())\n except ValueError:\n new_distance = 502.0\n logger.error(\"set dist to %s in exception handler 1\" % new_distance)\n reqObj[\"detDist\"] = new_distance\n if (reqObj[\"protocol\"] == \"multiCol\" or reqObj[\"protocol\"] == \"multiColQ\"):\n reqObj[\"gridStep\"] = float(self.rasterStepEdit.text())\n reqObj[\"diffCutoff\"] = float(self.multiColCutoffEdit.text())\n if (reqObj[\"protocol\"] == \"rasterScreen\"):\n reqObj[\"gridStep\"] = float(self.rasterStepEdit.text())\n if (rasterDef != None):\n reqObj[\"rasterDef\"] = rasterDef\n reqObj[\"gridStep\"] = float(self.rasterStepEdit.text())\n if (reqObj[\"protocol\"] == \"characterize\" or reqObj[\"protocol\"] == \"ednaCol\"):\n characterizationParams = {\"aimed_completeness\":float(self.characterizeCompletenessEdit.text()),\"aimed_multiplicity\":str(self.characterizeMultiplicityEdit.text()),\"aimed_resolution\":float(self.characterizeResoEdit.text()),\"aimed_ISig\":float(self.characterizeISIGEdit.text())}\n reqObj[\"characterizationParams\"] = characterizationParams\n if (reqObj[\"protocol\"] == \"vector\" or reqObj[\"protocol\"] == \"stepVector\"):\n if (float(self.osc_end_ledit.text()) < 5.0): \n self.popupServerMessage(\"Vector oscillation must be at least 5.0 degrees.\")\n return\n selectedCenteringFound = 1 \n try:\n x_vec, y_vec, z_vec, trans_total = self.updateVectorLengthAndSpeed()\n framesPerPoint = int(self.vectorFPP_ledit.text())\n vectorParams={\"vecStart\":self.vectorStart[\"coords\"],\"vecEnd\":self.vectorEnd[\"coords\"],\"x_vec\":x_vec,\"y_vec\":y_vec,\"z_vec\":z_vec,\"trans_total\":trans_total,\"fpp\":framesPerPoint}\n reqObj[\"vectorParams\"] = vectorParams\n except Exception as e:\n if self.vectorStart == None:\n self.popupServerMessage(\"Vector start must be defined.\")\n return\n elif self.vectorEnd == None:\n self.popupServerMessage(\"Vector end must be defined.\")\n return\n logger.error('Exception while getting vector parameters: %s' % e)\n pass\n colRequest[\"request_obj\"] = reqObj\n newSampleRequestID = db_lib.addRequesttoSample(self.selectedSampleID,reqObj[\"protocol\"],daq_utils.owner,reqObj,priority=5000,proposalID=daq_utils.getProposalID())\n#attempt here to select a newly created request. \n self.SelectedItemData = newSampleRequestID\n newSampleRequest = db_lib.getRequestByID(newSampleRequestID)\n if (rasterDef != None):\n self.rasterDefList.append(newSampleRequest)\n self.drawPolyRaster(newSampleRequest)\n if (selectedSampleID == None): #this is a temp kludge to see if this is called from addAll\n self.treeChanged_pv.put(1)\n\n\n def cloneRequestCB(self):\n self.eraseCB()\n colRequest=self.selectedSampleRequest\n reqObj = colRequest[\"request_obj\"]\n rasterDef = reqObj[\"rasterDef\"]\n self.addSampleRequestCB(rasterDef) \n\n\n \n def collectQueueCB(self):\n currentRequest = db_lib.popNextRequest(daq_utils.beamline)\n if (currentRequest == {}):\n self.addRequestsToAllSelectedCB() \n logger.info(\"running queue\")\n self.send_to_server(\"runDCQueue()\")\n\n def warmupGripperCB(self):\n self.send_to_server(\"warmupGripper()\") \n\n def dryGripperCB(self):\n self.send_to_server(\"dryGripper()\") \n\n def enableTScreenGripperCB(self):\n self.send_to_server(\"enableDewarTscreen()\") \n\n def parkGripperCB(self):\n self.send_to_server(\"parkGripper()\") \n \n def openPhotonShutterCB(self):\n self.photonShutterOpen_pv.put(1)\n\n def popUserScreenCB(self):\n if (self.controlEnabled()): \n self.userScreenDialog.show()\n else:\n self.popupServerMessage(\"You don't have control\") \n \n \n\n def closePhotonShutterCB(self):\n self.photonShutterClose_pv.put(1) \n\n \n\n def removePuckCB(self):\n self.timerHutch.stop()\n self.timerSample.stop() \n dewarPos, ok = DewarDialog.getDewarPos(parent=self,action=\"remove\")\n self.timerSample.start(SAMPLE_TIMER_DELAY) \n self.timerHutch.start(HUTCH_TIMER_DELAY) \n \n\n def getVectorObject(self):\n pen = QtGui.QPen(QtCore.Qt.blue)\n brush = QtGui.QBrush(QtCore.Qt.blue)\n markWidth = 10\n vecMarker = self.scene.addEllipse(self.centerMarker.x()-(markWidth/2.0)-1+self.centerMarkerCharOffsetX,self.centerMarker.y()-(markWidth/2.0)-1+self.centerMarkerCharOffsetY,markWidth,markWidth,pen,brush)\n vectorCoords = {\"x\":self.sampx_pv.get(),\"y\":self.sampy_pv.get(),\"z\":self.sampz_pv.get()}\n return {\"coords\":vectorCoords,\"graphicsitem\":vecMarker,\"centerCursorX\":self.centerMarker.x(),\"centerCursorY\":self.centerMarker.y()}\n \n def setVectorStartCB(self): #save sample x,y,z\n if (self.vectorStart != None):\n self.scene.removeItem(self.vectorStart[\"graphicsitem\"])\n try:\n self.scene.removeItem(self.vecLine)\n except AttributeError: # liekly due to vecLine not being defined yet\n pass\n self.vectorStart = None\n self.vectorStart = self.getVectorObject()\n\n if self.vectorStart and self.vectorEnd:\n self.drawVector()\n\n def drawVector(self):\n pen = QtGui.QPen(QtCore.Qt.blue)\n brush = QtGui.QBrush(QtCore.Qt.blue)\n try:\n self.updateVectorLengthAndSpeed()\n except:\n pass\n self.protoVectorRadio.setChecked(True)\n self.vecLine = self.scene.addLine(self.centerMarker.x()+self.vectorStart[\"graphicsitem\"].x()+self.centerMarkerCharOffsetX,\n self.centerMarker.y()+self.vectorStart[\"graphicsitem\"].y()+self.centerMarkerCharOffsetY,\n self.centerMarker.x()+self.vectorEnd[\"graphicsitem\"].x()+self.centerMarkerCharOffsetX,\n self.centerMarker.y()+self.vectorEnd[\"graphicsitem\"].y()+self.centerMarkerCharOffsetY, pen)\n self.vecLine.setFlag(QtWidgets.QGraphicsItem.ItemIsMovable, True)\n\n def setVectorEndCB(self): #save sample x,y,z\n if (self.vectorEnd != None):\n self.scene.removeItem(self.vectorEnd[\"graphicsitem\"])\n try:\n self.scene.removeItem(self.vecLine)\n except AttributeError: # likely due to self.vecLine not being defined yet\n pass\n self.vectorEnd = None\n self.vectorEnd = self.getVectorObject()\n\n if self.vectorStart and self.vectorEnd:\n self.drawVector() \n\n def clearVectorCB(self):\n if (self.vectorStart != None):\n self.scene.removeItem(self.vectorStart[\"graphicsitem\"])\n self.vectorStart = None\n if (self.vectorEnd != None):\n self.scene.removeItem(self.vectorEnd[\"graphicsitem\"])\n self.vectorEnd = None\n self.vecLenLabelOutput.setText(\"---\")\n self.vecSpeedLabelOutput.setText(\"---\") \n try:\n if self.vecLine != None:\n self.scene.removeItem(self.vecLine)\n except AttributeError: # likely due to self.vecLine not defined yet\n pass\n\n def puckToDewarCB(self):\n while (1):\n self.timerHutch.stop()\n self.timerSample.stop() \n puckName, ok = PuckDialog.getPuckName()\n self.timerSample.start(SAMPLE_TIMER_DELAY) \n self.timerHutch.start(HUTCH_TIMER_DELAY) \n if (ok):\n self.timerHutch.stop()\n self.timerSample.stop() \n dewarPos, ok = DewarDialog.getDewarPos(parent=self,action=\"add\")\n self.timerSample.start(SAMPLE_TIMER_DELAY) \n self.timerHutch.start(HUTCH_TIMER_DELAY) \n ipos = int(dewarPos)+1\n if (ok):\n db_lib.insertIntoContainer(daq_utils.primaryDewarName,daq_utils.beamline,ipos,db_lib.getContainerIDbyName(puckName,daq_utils.owner))\n self.treeChanged_pv.put(1)\n else:\n break\n\n\n def stopRunCB(self):\n logger.info(\"stopping collection\")\n self.aux_send_to_server(\"stopDCQueue(1)\")\n\n def stopQueueCB(self):\n logger.info(\"stopping queue\")\n if (self.pauseQueueButton.text() == \"Continue\"):\n self.aux_send_to_server(\"continue_data_collection()\") \n else:\n self.aux_send_to_server(\"stopDCQueue(2)\")\n\n def mountSampleCB(self):\n if (getBlConfig(\"mountEnabled\") == 0):\n self.popupServerMessage(\"Mounting disabled!! Call staff!\")\n return\n logger.info(\"mount selected sample\")\n self.eraseCB() \n try:\n self.selectedSampleID = self.selectedSampleRequest[\"sample\"]\n except KeyError as e:\n logger.error('unable to get sample')\n return\n self.send_to_server(\"mountSample(\\\"\"+str(self.selectedSampleID)+\"\\\")\")\n self.zoom1Radio.setChecked(True) \n self.zoomLevelToggledCB(\"Zoom1\")\n self.protoComboBox.setCurrentIndex(self.protoComboBox.findText(str(\"standard\")))\n self.protoComboActivatedCB(\"standard\")\n\n def unmountSampleCB(self):\n logger.info(\"unmount sample\")\n self.eraseCB() \n self.send_to_server(\"unmountSample()\")\n\n\n def refreshCollectionParams(self,selectedSampleRequest):\n reqObj = selectedSampleRequest[\"request_obj\"]\n self.protoComboBox.setCurrentIndex(self.protoComboBox.findText(str(reqObj[\"protocol\"])))\n protocol = str(reqObj[\"protocol\"])\n if (protocol == \"raster\"):\n self.protoRasterRadio.setChecked(True)\n elif (protocol == \"standard\"):\n self.protoStandardRadio.setChecked(True)\n elif (protocol == \"vector\"):\n self.protoVectorRadio.setChecked(True)\n else:\n self.protoOtherRadio.setChecked(True)\n \n logger.info('osc range')\n self.setGuiValues({'osc_start':reqObj[\"sweep_start\"], 'osc_end':reqObj[\"sweep_end\"]-reqObj[\"sweep_start\"], 'osc_range':reqObj[\"img_width\"], 'exp_time':reqObj[\"exposure_time\"], 'resolution':reqObj[\"resolution\"], 'transmission':reqObj[\"attenuation\"]})\n self.dataPathGB.setFileNumstart_ledit(str(reqObj[\"file_number_start\"]))\n self.beamWidth_ledit.setText(str(reqObj[\"slit_width\"]))\n self.beamHeight_ledit.setText(str(reqObj[\"slit_height\"]))\n if (\"fastDP\" in reqObj):\n self.fastDPCheckBox.setChecked((reqObj[\"fastDP\"] or reqObj[\"fastEP\"] or reqObj[\"dimple\"]))\n if (\"fastEP\" in reqObj):\n self.fastEPCheckBox.setChecked(reqObj[\"fastEP\"])\n if (\"dimple\" in reqObj):\n self.dimpleCheckBox.setChecked(reqObj[\"dimple\"]) \n if (\"xia2\" in reqObj):\n self.xia2CheckBox.setChecked(reqObj[\"xia2\"])\n reqObj[\"energy\"] = float(self.energy_ledit.text())\n self.energy_ledit.setText(str(reqObj[\"energy\"])) \n energy_s = str(daq_utils.wave2energy(reqObj[\"wavelength\"]))\n dist_s = str(reqObj[\"detDist\"])\n self.detDistMotorEntry.getEntry().setText(str(dist_s))\n self.dataPathGB.setFilePrefix_ledit(str(reqObj[\"file_prefix\"]))\n self.dataPathGB.setBasePath_ledit(str(reqObj[\"basePath\"]))\n self.dataPathGB.setDataPath_ledit(str(reqObj[\"directory\"]))\n if (str(reqObj[\"protocol\"]) == \"characterize\" or str(reqObj[\"protocol\"]) == \"ednaCol\"): \n prefix_long = str(reqObj[\"directory\"])+\"/ref-\"+str(reqObj[\"file_prefix\"])\n else:\n prefix_long = str(reqObj[\"directory\"])+\"/\"+str(reqObj[\"file_prefix\"])\n fnumstart=reqObj[\"file_number_start\"]\n\n if (str(reqObj[\"protocol\"]) == \"characterize\" or str(reqObj[\"protocol\"]) == \"ednaCol\" or str(reqObj[\"protocol\"]) == \"standard\" or str(reqObj[\"protocol\"]) == \"vector\"):\n if (\"priority\" in selectedSampleRequest):\n if (selectedSampleRequest[\"priority\"] < 0 and self.albulaDispCheckBox.isChecked()):\n firstFilename = daq_utils.create_filename(prefix_long,fnumstart) \n albulaUtils.albulaDispFile(firstFilename) \n self.rasterStepEdit.setText(str(reqObj[\"gridStep\"]))\n if (reqObj[\"gridStep\"] == self.rasterStepDefs[\"Coarse\"]):\n self.rasterGrainCoarseRadio.setChecked(True)\n elif (reqObj[\"gridStep\"] == self.rasterStepDefs[\"Fine\"]):\n self.rasterGrainFineRadio.setChecked(True)\n elif (reqObj[\"gridStep\"] == self.rasterStepDefs[\"VFine\"]):\n self.rasterGrainVFineRadio.setChecked(True)\n else:\n self.rasterGrainCustomRadio.setChecked(True) \n rasterStep = int(reqObj[\"gridStep\"])\n if (not self.hideRastersCheckBox.isChecked() and (str(reqObj[\"protocol\"])== \"raster\" or str(reqObj[\"protocol\"])== \"stepRaster\" or str(reqObj[\"protocol\"])== \"specRaster\")):\n if (not self.rasterIsDrawn(selectedSampleRequest)):\n self.drawPolyRaster(selectedSampleRequest)\n self.fillPolyRaster(selectedSampleRequest)\n self.processSampMove(self.sampx_pv.get(),\"x\")\n self.processSampMove(self.sampy_pv.get(),\"y\")\n self.processSampMove(self.sampz_pv.get(),\"z\")\n if (abs(selectedSampleRequest[\"request_obj\"][\"rasterDef\"][\"omega\"]-self.omega_pv.get()) > 5.0):\n comm_s = \"mvaDescriptor(\\\"omega\\\",\" + str(selectedSampleRequest[\"request_obj\"][\"rasterDef\"][\"omega\"]) + \")\"\n self.send_to_server(comm_s)\n if (str(reqObj[\"protocol\"])== \"eScan\"):\n try:\n self.escan_steps_ledit.setText(str(reqObj[\"steps\"]))\n self.escan_stepsize_ledit.setText(str(reqObj[\"stepsize\"]))\n self.EScanDataPathGB.setBasePath_ledit(reqObj[\"basePath\"])\n self.EScanDataPathGB.setDataPath_ledit(reqObj[\"directory\"])\n self.EScanDataPathGB.setFileNumstart_ledit(str(reqObj[\"file_number_start\"])) \n self.EScanDataPathGB.setFilePrefix_ledit(str(reqObj[\"file_prefix\"])) \n self.periodicTable.elementClicked(reqObj[\"element\"])\n except KeyError: \n pass\n elif (str(reqObj[\"protocol\"])== \"characterize\" or str(reqObj[\"protocol\"])== \"ednaCol\"):\n characterizationParams = reqObj[\"characterizationParams\"]\n self.characterizeCompletenessEdit.setText(str(characterizationParams[\"aimed_completeness\"]))\n self.characterizeISIGEdit.setText(str(characterizationParams[\"aimed_ISig\"]))\n self.characterizeResoEdit.setText(str(characterizationParams[\"aimed_resolution\"]))\n self.characterizeMultiplicityEdit.setText(str(characterizationParams[\"aimed_multiplicity\"]))\n else: #for now, erase the rasters if a non-raster is selected, need to rationalize later\n pass\n self.showProtParams()\n \n\n\n def row_clicked(self,index): #I need \"index\" here? seems like I get it from selmod, but sometimes is passed\n selmod = self.dewarTree.selectionModel()\n selection = selmod.selection()\n indexes = selection.indexes()\n if (len(indexes)==0):\n return\n i = 0\n item = self.dewarTree.model.itemFromIndex(indexes[i])\n parent = indexes[i].parent()\n try:\n puck_name = parent.data()\n except AttributeError as e:\n logger.error('attribute error in row_clicked: %s', e)\n return\n itemData = str(item.data(32))\n itemDataType = str(item.data(33))\n self.SelectedItemData = itemData # an attempt to know what is selected and preserve it when refreshing the tree\n if (itemData == \"\"):\n logger.info(\"nothing there\")\n return\n elif (itemDataType == \"container\"):\n logger.info(\"I'm a puck\")\n return\n elif (itemDataType == \"sample\"):\n self.selectedSampleID = itemData\n sample = db_lib.getSampleByID(self.selectedSampleID)\n owner = sample[\"owner\"]\n sample_name = db_lib.getSampleNamebyID(self.selectedSampleID)\n logger.info(\"sample in pos \" + str(itemData))\n if (self.osc_start_ledit.text() == \"\"):\n self.selectedSampleRequest = daq_utils.createDefaultRequest(itemData,createVisit=False)\n self.refreshCollectionParams(self.selectedSampleRequest)\n if (self.stillModeStatePV.get()):\n self.setGuiValues({'osc_range':\"0.0\"})\n reqObj = self.selectedSampleRequest[\"request_obj\"]\n self.dataPathGB.setFilePrefix_ledit(str(reqObj[\"file_prefix\"])) \n self.dataPathGB.setBasePath_ledit(reqObj[\"basePath\"])\n self.dataPathGB.setDataPath_ledit(reqObj[\"directory\"])\n self.EScanDataPathGB.setFilePrefix_ledit(str(reqObj[\"file_prefix\"])) \n self.EScanDataPathGB.setBasePath_ledit(reqObj[\"basePath\"])\n self.EScanDataPathGB.setDataPath_ledit(reqObj[\"directory\"])\n self.EScanDataPathGB.setFileNumstart_ledit(str(reqObj[\"file_number_start\"])) \n if (self.vidActionRasterDefRadio.isChecked()):\n self.protoComboBox.setCurrentIndex(self.protoComboBox.findText(str(\"raster\")))\n self.showProtParams()\n elif (str(self.protoComboBox.currentText()) == \"screen\"):\n self.selectedSampleRequest = daq_utils.createDefaultRequest(itemData,createVisit=False)\n self.refreshCollectionParams(self.selectedSampleRequest)\n if (self.stillModeStatePV.get()):\n self.setGuiValue({'osc_range':\"0.0\"})\n else:\n self.selectedSampleRequest = daq_utils.createDefaultRequest(itemData,createVisit=False)\n reqObj = self.selectedSampleRequest[\"request_obj\"]\n self.dataPathGB.setFilePrefix_ledit(str(reqObj[\"file_prefix\"])) \n self.dataPathGB.setBasePath_ledit(reqObj[\"basePath\"])\n self.dataPathGB.setDataPath_ledit(reqObj[\"directory\"])\n self.EScanDataPathGB.setFilePrefix_ledit(str(reqObj[\"file_prefix\"])) \n self.EScanDataPathGB.setBasePath_ledit(reqObj[\"basePath\"])\n self.EScanDataPathGB.setDataPath_ledit(reqObj[\"directory\"])\n self.EScanDataPathGB.setFileNumstart_ledit(str(reqObj[\"file_number_start\"])) \n else: #request\n self.selectedSampleRequest = db_lib.getRequestByID(itemData)\n reqObj = self.selectedSampleRequest[\"request_obj\"]\n reqID = self.selectedSampleRequest[\"uid\"]\n self.selectedSampleID = self.selectedSampleRequest[\"sample\"] \n sample = db_lib.getSampleByID(self.selectedSampleID)\n owner = sample[\"owner\"]\n if (reqObj[\"protocol\"] == \"eScan\"):\n try:\n if (reqObj[\"runChooch\"]):\n resultList = db_lib.getResultsforRequest(reqID)\n if (len(resultList) > 0):\n lastResult = resultList[-1]\n if (db_lib.getResult(lastResult['uid'])[\"result_type\"] == \"choochResult\"): \n resultID = lastResult['uid']\n logger.info(\"plotting chooch\")\n self.processChoochResult(resultID)\n except KeyError:\n logger.error('KeyError - ignoring chooch-related items, perhaps from a bad energy scan')\n self.refreshCollectionParams(self.selectedSampleRequest)\n\n\n def processXrecRasterCB(self,value=None, char_value=None, **kw):\n xrecFlag = value\n if (xrecFlag != \"0\"):\n self.xrecRasterSignal.emit(xrecFlag)\n\n def processChoochResultsCB(self,value=None, char_value=None, **kw):\n choochFlag = value\n if (choochFlag != \"0\"):\n self.choochResultSignal.emit(choochFlag)\n\n def processEnergyChangeCB(self,value=None, char_value=None, **kw):\n energyVal = value\n self.energyChangeSignal.emit(energyVal)\n\n def mountedPinChangedCB(self,value=None, char_value=None, **kw):\n mountedPinPos = value\n self.mountedPinSignal.emit(mountedPinPos)\n\n def beamSizeChangedCB(self,value=None, char_value=None, **kw):\n beamSizeFlag = value\n self.beamSizeSignal.emit(beamSizeFlag)\n \n def controlMasterChangedCB(self,value=None, char_value=None, **kw):\n controlMasterPID = value\n self.controlMasterSignal.emit(controlMasterPID)\n\n def zebraArmStateChangedCB(self,value=None, char_value=None, **kw):\n armState = value\n self.zebraArmStateSignal.emit(armState)\n \n def govRobotSeReachChangedCB(self,value=None, char_value=None, **kw):\n armState = value\n self.govRobotSeReachSignal.emit(armState)\n\n def govRobotSaReachChangedCB(self,value=None, char_value=None, **kw):\n armState = value\n self.govRobotSaReachSignal.emit(armState)\n\n def govRobotDaReachChangedCB(self,value=None, char_value=None, **kw):\n armState = value\n self.govRobotDaReachSignal.emit(armState)\n\n def govRobotBlReachChangedCB(self,value=None, char_value=None, **kw):\n armState = value\n self.govRobotBlReachSignal.emit(armState)\n\n\n def detMessageChangedCB(self,value=None, char_value=None, **kw):\n state = char_value\n self.detMessageSignal.emit(state)\n \n def sampleFluxChangedCB(self,value=None, char_value=None, **kw):\n state = value\n self.sampleFluxSignal.emit(state)\n \n def zebraPulseStateChangedCB(self,value=None, char_value=None, **kw):\n state = value\n self.zebraPulseStateSignal.emit(state)\n\n def stillModeStateChangedCB(self,value=None, char_value=None, **kw):\n state = value\n self.stillModeStateSignal.emit(state)\n\n def zebraDownloadStateChangedCB(self,value=None, char_value=None, **kw):\n state = value\n self.zebraDownloadStateSignal.emit(state)\n\n def zebraSentTriggerStateChangedCB(self,value=None, char_value=None, **kw):\n state = value\n self.zebraSentTriggerStateSignal.emit(state)\n \n def zebraReturnedTriggerStateChangedCB(self,value=None, char_value=None, **kw):\n state = value\n self.zebraReturnedTriggerStateSignal.emit(state)\n \n def shutterChangedCB(self,value=None, char_value=None, **kw):\n shutterVal = value \n self.fastShutterSignal.emit(shutterVal)\n \n def gripTempChangedCB(self,value=None, char_value=None, **kw):\n gripVal = value \n self.gripTempSignal.emit(gripVal)\n\n def cryostreamTempChangedCB(self, value=None, char_value=None, **kw):\n cryostreamTemp = value\n self.cryostreamTempSignal.emit(cryostreamTemp)\n\n def ringCurrentChangedCB(self,value=None, char_value=None, **kw):\n ringCurrentVal = value \n self.ringCurrentSignal.emit(ringCurrentVal)\n\n def beamAvailableChangedCB(self,value=None, char_value=None, **kw):\n beamAvailableVal = value \n self.beamAvailableSignal.emit(beamAvailableVal)\n\n def sampleExposedChangedCB(self,value=None, char_value=None, **kw):\n sampleExposedVal = value \n self.sampleExposedSignal.emit(sampleExposedVal)\n \n def processSampMoveCB(self,value=None, char_value=None, **kw):\n posRBV = value\n motID = kw[\"motID\"]\n self.sampMoveSignal.emit(posRBV,motID)\n\n def processROIChangeCB(self,value=None, char_value=None, **kw):\n posRBV = value\n ID = kw[\"ID\"]\n self.roiChangeSignal.emit(posRBV,ID)\n \n\n def processHighMagCursorChangeCB(self,value=None, char_value=None, **kw):\n posRBV = value\n ID = kw[\"ID\"]\n self.highMagCursorChangeSignal.emit(posRBV,ID)\n \n def processLowMagCursorChangeCB(self,value=None, char_value=None, **kw):\n posRBV = value\n ID = kw[\"ID\"]\n self.lowMagCursorChangeSignal.emit(posRBV,ID)\n \n\n def treeChangedCB(self,value=None, char_value=None, **kw):\n if (self.processID != self.treeChanged_pv.get()):\n self.refreshTreeSignal.emit()\n\n def serverMessageCB(self,value=None, char_value=None, **kw):\n serverMessageVar = char_value\n self.serverMessageSignal.emit(serverMessageVar)\n\n def serverPopupMessageCB(self,value=None, char_value=None, **kw):\n serverMessageVar = char_value\n self.serverPopupMessageSignal.emit(serverMessageVar)\n\n \n def programStateCB(self, value=None, char_value=None, **kw):\n programStateVar = value\n self.programStateSignal.emit(programStateVar)\n\n def pauseButtonStateCB(self, value=None, char_value=None, **kw):\n pauseButtonStateVar = value\n self.pauseButtonStateSignal.emit(pauseButtonStateVar)\n\n \n def initUI(self): \n self.tabs= QtWidgets.QTabWidget()\n self.comm_pv = PV(daq_utils.beamlineComm + \"command_s\")\n self.immediate_comm_pv = PV(daq_utils.beamlineComm + \"immediate_command_s\")\n self.stillModeStatePV = PV(daq_utils.pvLookupDict[\"stillModeStatus\"]) \n self.progressDialog = QtWidgets.QProgressDialog()\n self.progressDialog.setCancelButtonText(QString())\n self.progressDialog.setModal(False)\n tab1= QtWidgets.QWidget()\n vBoxlayout1= QtWidgets.QVBoxLayout()\n splitter1 = QtWidgets.QSplitter(QtCore.Qt.Vertical,self)\n splitter1.addWidget(self.tabs)\n self.setCentralWidget(splitter1)\n splitterSizes = [600,100]\n importAction = QtWidgets.QAction('Import Spreadsheet...', self)\n importAction.triggered.connect(self.popImportDialogCB)\n modeGroup = QActionGroup(self);\n modeGroup.setExclusive(True) \n self.userAction = QtWidgets.QAction('User Mode', self,checkable=True)\n self.userAction.triggered.connect(self.setUserModeCB)\n self.userAction.setChecked(True)\n self.expertAction = QtWidgets.QAction('Expert Mode', self,checkable=True)\n self.expertAction.triggered.connect(self.setExpertModeCB)\n self.staffAction = QtWidgets.QAction('Staff Panel...', self)\n self.staffAction.triggered.connect(self.popStaffDialogCB)\n modeGroup.addAction(self.userAction)\n modeGroup.addAction(self.expertAction)\n exitAction = QtWidgets.QAction(QtGui.QIcon('exit24.png'), 'Exit', self)\n exitAction.setShortcut('Ctrl+Q')\n exitAction.setStatusTip('Exit application')\n exitAction.triggered.connect(self.closeAll)\n self.statusBar()\n menubar = self.menuBar()\n fileMenu = menubar.addMenu('&File')\n fileMenu.addAction(importAction)\n fileMenu.addAction(self.userAction)\n fileMenu.addAction(self.expertAction)\n fileMenu.addAction(self.staffAction) \n fileMenu.addAction(exitAction)\n self.setGeometry(300, 300, 1550, 1000) #width and height here. \n self.setWindowTitle('LSDC on %s' % daq_utils.beamline)\n self.show()\n\n def popStaffDialogCB(self):\n if (self.controlEnabled()):\n self.staffScreenDialog = StaffScreenDialog(self)\n else:\n self.popupServerMessage(\"You don't have control\") \n \n\n def closeAll(self):\n QtWidgets.QApplication.closeAllWindows()\n\n\n def initCallbacks(self):\n\n self.beamSizeSignal.connect(self.processBeamSize)\n self.beamSize_pv.add_callback(self.beamSizeChangedCB) \n\n self.treeChanged_pv = PV(daq_utils.beamlineComm + \"live_q_change_flag\")\n self.refreshTreeSignal.connect(self.dewarTree.refreshTree)\n self.treeChanged_pv.add_callback(self.treeChangedCB) \n self.mountedPin_pv = PV(daq_utils.beamlineComm + \"mounted_pin\")\n self.mountedPinSignal.connect(self.processMountedPin)\n self.mountedPin_pv.add_callback(self.mountedPinChangedCB)\n det_stop_pv = daq_utils.pvLookupDict[\"stopEiger\"]\n logger.info('setting stop Eiger detector PV: %s' % det_stop_pv)\n self.stopDet_pv = PV(det_stop_pv)\n det_reboot_pv = daq_utils.pvLookupDict[\"eigerIOC_reboot\"]\n logger.info('setting detector ioc reboot PV: %s' % det_reboot_pv)\n self.rebootDetIOC_pv = PV(daq_utils.beamlineComm + \"eigerIOC_reboot\") \n rz_pv = daq_utils.pvLookupDict[\"zebraReset\"]\n logger.info('setting zebra reset PV: %s' % rz_pv)\n self.resetZebra_pv = PV(rz_pv)\n rz_reboot_pv = daq_utils.pvLookupDict[\"zebraRebootIOC\"]\n logger.info('setting zebra reboot ioc PV: %s' % rz_reboot_pv)\n self.rebootZebraIOC_pv = PV(rz_reboot_pv) \n self.zebraArmedPV = PV(daq_utils.pvLookupDict[\"zebraArmStatus\"])\n self.zebraArmStateSignal.connect(self.processZebraArmState)\n self.zebraArmedPV.add_callback(self.zebraArmStateChangedCB)\n\n self.govRobotSeReachPV = PV(daq_utils.pvLookupDict[\"govRobotSeReach\"])\n self.govRobotSeReachSignal.connect(self.processGovRobotSeReach)\n self.govRobotSeReachPV.add_callback(self.govRobotSeReachChangedCB)\n\n self.govRobotSaReachPV = PV(daq_utils.pvLookupDict[\"govRobotSaReach\"])\n self.govRobotSaReachSignal.connect(self.processGovRobotSaReach)\n self.govRobotSaReachPV.add_callback(self.govRobotSaReachChangedCB)\n\n self.govRobotDaReachPV = PV(daq_utils.pvLookupDict[\"govRobotDaReach\"])\n self.govRobotDaReachSignal.connect(self.processGovRobotDaReach)\n self.govRobotDaReachPV.add_callback(self.govRobotDaReachChangedCB)\n\n self.govRobotBlReachPV = PV(daq_utils.pvLookupDict[\"govRobotBlReach\"])\n self.govRobotBlReachSignal.connect(self.processGovRobotBlReach)\n self.govRobotBlReachPV.add_callback(self.govRobotBlReachChangedCB)\n \n self.detectorMessagePV = PV(daq_utils.pvLookupDict[\"eigerStatMessage\"])\n self.detMessageSignal.connect(self.processDetMessage)\n self.detectorMessagePV.add_callback(self.detMessageChangedCB)\n\n\n self.sampleFluxSignal.connect(self.processSampleFlux)\n self.sampleFluxPV.add_callback(self.sampleFluxChangedCB)\n \n self.stillModeStateSignal.connect(self.processStillModeState)\n self.stillModeStatePV.add_callback(self.stillModeStateChangedCB) \n\n self.zebraPulsePV = PV(daq_utils.pvLookupDict[\"zebraPulseStatus\"])\n self.zebraPulseStateSignal.connect(self.processZebraPulseState)\n self.zebraPulsePV.add_callback(self.zebraPulseStateChangedCB)\n\n self.zebraDownloadPV = PV(daq_utils.pvLookupDict[\"zebraDownloading\"])\n self.zebraDownloadStateSignal.connect(self.processZebraDownloadState)\n self.zebraDownloadPV.add_callback(self.zebraDownloadStateChangedCB)\n\n self.zebraSentTriggerPV = PV(daq_utils.pvLookupDict[\"zebraSentTriggerStatus\"])\n self.zebraSentTriggerStateSignal.connect(self.processZebraSentTriggerState)\n self.zebraSentTriggerPV.add_callback(self.zebraSentTriggerStateChangedCB)\n\n self.zebraReturnedTriggerPV = PV(daq_utils.pvLookupDict[\"zebraTriggerReturnStatus\"])\n self.zebraReturnedTriggerStateSignal.connect(self.processZebraReturnedTriggerState)\n self.zebraReturnedTriggerPV.add_callback(self.zebraReturnedTriggerStateChangedCB)\n \n self.controlMaster_pv = PV(daq_utils.beamlineComm + \"zinger_flag\")\n self.controlMasterSignal.connect(self.processControlMaster)\n self.controlMaster_pv.add_callback(self.controlMasterChangedCB)\n\n self.beamCenterX_pv = PV(daq_utils.pvLookupDict[\"beamCenterX\"])\n self.beamCenterY_pv = PV(daq_utils.pvLookupDict[\"beamCenterY\"]) \n\n self.choochResultFlag_pv = PV(daq_utils.beamlineComm + \"choochResultFlag\")\n self.choochResultSignal.connect(self.processChoochResult)\n self.choochResultFlag_pv.add_callback(self.processChoochResultsCB) \n self.xrecRasterFlag_pv = PV(daq_utils.beamlineComm + \"xrecRasterFlag\")\n self.xrecRasterFlag_pv.put(\"0\")\n self.xrecRasterSignal.connect(self.displayXrecRaster)\n self.xrecRasterFlag_pv.add_callback(self.processXrecRasterCB) \n self.message_string_pv = PV(daq_utils.beamlineComm + \"message_string\") \n self.serverMessageSignal.connect(self.printServerMessage)\n self.message_string_pv.add_callback(self.serverMessageCB) \n self.popup_message_string_pv = PV(daq_utils.beamlineComm + \"gui_popup_message_string\") \n self.serverPopupMessageSignal.connect(self.popupServerMessage)\n self.popup_message_string_pv.add_callback(self.serverPopupMessageCB) \n self.program_state_pv = PV(daq_utils.beamlineComm + \"program_state\") \n self.programStateSignal.connect(self.colorProgramState)\n self.program_state_pv.add_callback(self.programStateCB) \n self.pause_button_state_pv = PV(daq_utils.beamlineComm + \"pause_button_state\") \n self.pauseButtonStateSignal.connect(self.changePauseButtonState)\n self.pause_button_state_pv.add_callback(self.pauseButtonStateCB) \n\n self.energyChangeSignal.connect(self.processEnergyChange)\n self.energy_pv.add_callback(self.processEnergyChangeCB,motID=\"x\")\n\n self.sampx_pv = PV(daq_utils.motor_dict[\"sampleX\"]+\".RBV\") \n self.sampMoveSignal.connect(self.processSampMove)\n self.sampx_pv.add_callback(self.processSampMoveCB,motID=\"x\")\n self.sampy_pv = PV(daq_utils.motor_dict[\"sampleY\"]+\".RBV\")\n self.sampy_pv.add_callback(self.processSampMoveCB,motID=\"y\")\n self.sampz_pv = PV(daq_utils.motor_dict[\"sampleZ\"]+\".RBV\")\n self.sampz_pv.add_callback(self.processSampMoveCB,motID=\"z\")\n\n if (self.scannerType == \"PI\"):\n self.sampFineX_pv = PV(daq_utils.motor_dict[\"fineX\"]+\".RBV\")\n self.sampFineX_pv.add_callback(self.processSampMoveCB,motID=\"fineX\")\n self.sampFineY_pv = PV(daq_utils.motor_dict[\"fineY\"]+\".RBV\")\n self.sampFineY_pv.add_callback(self.processSampMoveCB,motID=\"fineY\")\n self.sampFineZ_pv = PV(daq_utils.motor_dict[\"fineZ\"]+\".RBV\")\n self.sampFineZ_pv.add_callback(self.processSampMoveCB,motID=\"fineZ\")\n \n \n self.omega_pv = PV(daq_utils.motor_dict[\"omega\"] + \".VAL\")\n self.omegaTweak_pv = PV(daq_utils.motor_dict[\"omega\"] + \".RLV\")\n self.sampyTweak_pv = PV(daq_utils.motor_dict[\"sampleY\"] + \".RLV\")\n self.sampzTweak_pv = PV(daq_utils.motor_dict[\"sampleZ\"] + \".RLV\") \n self.omegaRBV_pv = PV(daq_utils.motor_dict[\"omega\"] + \".RBV\")\n self.omegaRBV_pv.add_callback(self.processSampMoveCB,motID=\"omega\") #I think monitoring this allows for the textfield to monitor val and this to deal with the graphics. Else next line has two callbacks on same thing.\n self.photonShutterOpen_pv = PV(daq_utils.pvLookupDict[\"photonShutterOpen\"])\n self.photonShutterClose_pv = PV(daq_utils.pvLookupDict[\"photonShutterClose\"]) \n self.fastShutterRBV_pv = PV(daq_utils.motor_dict[\"fastShutter\"] + \".RBV\")\n self.fastShutterSignal.connect(self.processFastShutter)\n self.fastShutterRBV_pv.add_callback(self.shutterChangedCB)\n self.gripTempSignal.connect(self.processGripTemp)\n self.gripTemp_pv.add_callback(self.gripTempChangedCB)\n self.cryostreamTempSignal.connect(self.processCryostreamTemp)\n self.cryostreamTemp_pv.add_callback(self.cryostreamTempChangedCB)\n self.ringCurrentSignal.connect(self.processRingCurrent) \n self.ringCurrent_pv.add_callback(self.ringCurrentChangedCB)\n self.beamAvailableSignal.connect(self.processBeamAvailable) \n self.beamAvailable_pv.add_callback(self.beamAvailableChangedCB)\n self.sampleExposedSignal.connect(self.processSampleExposed) \n self.sampleExposed_pv.add_callback(self.sampleExposedChangedCB)\n self.highMagCursorChangeSignal.connect(self.processHighMagCursorChange)\n self.highMagCursorX_pv.add_callback(self.processHighMagCursorChangeCB,ID=\"x\")\n self.highMagCursorY_pv.add_callback(self.processHighMagCursorChangeCB,ID=\"y\") \n self.lowMagCursorChangeSignal.connect(self.processLowMagCursorChange)\n self.lowMagCursorX_pv.add_callback(self.processLowMagCursorChangeCB,ID=\"x\")\n self.lowMagCursorY_pv.add_callback(self.processLowMagCursorChangeCB,ID=\"y\") \n \n\n \n\n def popupServerMessage(self,message_s):\n\n if (self.popUpMessageInit):\n self.popUpMessageInit = 0\n return \n self.popupMessage.done(1)\n if (message_s == \"killMessage\"):\n return\n else:\n self.popupMessage.showMessage(message_s)\n\n\n def printServerMessage(self,message_s):\n if (self.textWindowMessageInit):\n self.textWindowMessageInit = 0\n return \n logger.info(message_s)\n print(message_s)\n\n\n def colorProgramState(self,programState_s):\n if (programState_s.find(\"Ready\") == -1):\n self.statusLabel.setColor(\"yellow\")\n else:\n self.statusLabel.setColor(\"#99FF66\") \n\n def changePauseButtonState(self,buttonState_s):\n self.pauseQueueButton.setText(buttonState_s)\n if (buttonState_s.find(\"Pause\") != -1):\n self.pauseQueueButton.setStyleSheet(\"background-color: None\") \n else:\n self.pauseQueueButton.setStyleSheet(\"background-color: yellow\") \n\n\n def controlEnabled(self):\n return (self.processID == abs(int(self.controlMaster_pv.get())) and self.controlMasterCheckBox.isChecked())\n \n def send_to_server(self,s):\n if (s == \"lockControl\"):\n self.controlMaster_pv.put(0-self.processID)\n return\n if (s == \"unlockControl\"):\n self.controlMaster_pv.put(self.processID)\n return\n if (self.controlEnabled()):\n\n time.sleep(.01)\n logger.info('send_to_server: %s' % s)\n self.comm_pv.put(s)\n else:\n self.popupServerMessage(\"You don't have control\")\n \n\n\n def aux_send_to_server(self,s):\n if (self.controlEnabled()):\n time.sleep(.01)\n logger.info('aux_send_to_server: %s' % s)\n self.immediate_comm_pv.put(s)\n else:\n self.popupServerMessage(\"You don't have control\")\n\n\ndef get_request_object_escan(reqObj, symbol, runNum, file_prefix, base_path, sampleName, containerID, samplePositionInContainer,\n file_number_start, exposure_time, targetEnergy, steps, stepsize):\n reqObj[\"element\"] = symbol\n reqObj[\"runNum\"] = runNum\n reqObj[\"file_prefix\"] = str(file_prefix)\n reqObj[\"basePath\"] = str(base_path)\n reqObj[\"directory\"] = str(base_path) + \"/\" + str(\n daq_utils.getVisitName()) + \"/\" + sampleName + \"/\" + str(runNum) + \"/\" + db_lib.getContainerNameByID(\n containerID) + \"_\" + str(samplePositionInContainer + 1) + \"/\"\n try:\n reqObj[\"file_number_start\"] = int(file_number_start)\n except ValueError as e:\n logger.error('Problem with a value passed in - %s' % e)\n reqObj[\"file_number_start\"] = 1\n reqObj[\"exposure_time\"] = float(exposure_time)\n reqObj[\"protocol\"] = \"eScan\"\n reqObj[\"scanEnergy\"] = targetEnergy\n reqObj[\"runChooch\"] = True # just hardcode for now\n reqObj[\"steps\"] = int(steps)\n reqObj[\"stepsize\"] = int(stepsize)\n return reqObj\n\ndef main():\n daq_utils.init_environment()\n daq_utils.readPVDesc() \n app = QtWidgets.QApplication(sys.argv)\n ex = ControlMain()\n sys.exit(app.exec_())\n\n#skinner - I think Matt did a lot of what's below and I have no idea what it is. \nif __name__ == '__main__':\n if '-pc' in sys.argv or '-p' in sys.argv:\n logger.info('cProfile not working yet :(')\n #print 'starting cProfile profiler...'\n #import cProfile, pstats, io\n #pr = cProfile.Profile()\n #pr.enable()\n\n elif '-py' in sys.argv:\n logger.info('starting yappi profiler...')\n import yappi\n yappi.start(True)\n\n try:\n main() \n\n finally:\n if '-pc' in sys.argv or '-p' in sys.argv:\n pass\n #pr.disable()\n #s = StringIO()\n #sortby = 'cumulative'\n #ps = pstats.Stats(pr, stream=s).sort_stats(sortby)\n #ps.print_stats() # dies here, expected unicode, got string, need unicode io stream?\n #logger.info(s.getvalue())\n\n elif '-py' in sys.argv:\n # stop profiler and print results\n yappi.stop()\n yappi.get_func_stats().print_all()\n yappi.get_thread_stats().print_all()\n logger.info('memory usage: {0}'.format(yappi.get_mem_usage()))\n" ]
[ [ "numpy.amin", "numpy.amax", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
andrasormos/A3C_Keras_FlappyBird
[ "9b3fb805653e628a37c730eb707dbf4af392654a" ]
[ "test.py" ]
[ "import numpy as np\nimport sys\n\nsys.path.append(\"game/\")\n\nimport pygame\nimport wrapped_flappy_bird as game\n\nimport skimage\nfrom skimage import transform, color, exposure\n\nimport keras\nfrom keras.models import Sequential, Model, load_model\nfrom keras.layers.core import Dense, Flatten, Activation\nfrom keras.layers.convolutional import Convolution2D\nfrom keras.optimizers import RMSprop\nimport keras.backend as K\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nBETA = 0.01\nconst = 1e-5\n\n\n# loss function for policy output\ndef logloss(y_true, y_pred): # policy loss\n return -K.sum(K.log(y_true * y_pred + (1 - y_true) * (1 - y_pred) + const), axis=-1)\n # BETA * K.sum(y_pred * K.log(y_pred + const) + (1-y_pred) * K.log(1-y_pred + const)) #regularisation term\n\n\n# loss function for critic output\ndef sumofsquares(y_true, y_pred): # critic loss\n return K.sum(K.square(y_pred - y_true), axis=-1)\n\n\ndef preprocess(image):\n image = skimage.color.rgb2gray(image)\n image = skimage.transform.resize(image, (84, 84), mode='constant')\n image = skimage.exposure.rescale_intensity(image, out_range=(0, 255))\n\n image = exposure.rescale_intensity(image, in_range=(1, 2))\n image = skimage.exposure.rescale_intensity(image, out_range=(0, 255))\n\n image = image.reshape(1, image.shape[0], image.shape[1], 1)\n return image\n\n\ngame_state = game.GameState(30)\n\ncurrentScore = 0\ntopScore = 0\na_t = [1, 0]\nFIRST_FRAME = True\nterminal = False\nr_t = 0\nmyCount = 1\n\n\n# -------------- code for checking performance of saved models by finding average scores for 10 runs------------------\n\nevalGamer = pd.DataFrame(columns=['model','evalScore'])\nlogCnt = 0\n\nmodels = [\"\"]\n\nfileName = \"saved_models/model_updates\"\nmodelName = 6000\n\nfor i in range(1, 120):\n modelName += 50\n fileName = \"saved_models/model_updates\" + str(modelName)\n model = load_model(fileName, custom_objects={'logloss': logloss, 'sumofsquares': sumofsquares})\n score = 0\n counter = 0\n\n while counter < 1:\n x_t, r_t, terminal = game_state.frame_step(a_t)\n score += 1\n if r_t == -1:\n counter += 1\n\n x_t = preprocess(x_t)\n\n if FIRST_FRAME:\n s_t = np.concatenate((x_t, x_t, x_t, x_t), axis=3)\n\n else:\n s_t = np.append(x_t, s_t[:, :, :, :3], axis=3)\n\n y = model.predict(s_t)\n no = np.random.random()\n\n #print(y)\n if FIRST_FRAME:\n a_t = [0, 1]\n FIRST_FRAME = False\n else:\n no = np.random.rand()\n a_t = [0, 1] if no < y[0] else [1, 0]\n # a_t = [0,1] if 0.5 <y[0] else [1,0]\n\n if r_t == -1:\n FIRST_FRAME = True\n\n if score % 200 == 0:\n evalGamer.loc[logCnt] = modelName, score\n evalGamer.to_csv(\"evalGamer.csv\", index=True)\n\n if terminal == True:\n print(\"DIED\", \"SCORE:\", score, \"Model:\", modelName)\n logCnt += 1\n evalGamer.loc[logCnt] = modelName, score\n evalGamer.to_csv(\"evalGamer.csv\", index=True)\n\n\n\n" ]
[ [ "numpy.random.random", "pandas.DataFrame", "numpy.concatenate", "numpy.append", "numpy.random.rand" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
starimeL/PytorchConverter
[ "75fbdb3d52da9ee64db509ecdf221dd102402579" ]
[ "code/ReplaceDenormals.py" ]
[ "import numpy as np\nimport torch\n\n\ndef ReplaceDenormals(net):\n for name, param in net.named_parameters():\n np_arr = param.data.numpy()\n for x in np.nditer(np_arr, op_flags=['readwrite']):\n if abs(x) < 1e-30:\n x[...] = 1e-30\n param.data = torch.from_numpy(np_arr)\n" ]
[ [ "torch.from_numpy", "numpy.nditer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
peterhan91/Throax_GAN
[ "739dd4d7885d73b651ef13f9336f6a46a4b9f56f" ]
[ "ProGAN/train.py" ]
[ "# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# This work is licensed under the Creative Commons Attribution-NonCommercial\n# 4.0 International License. To view a copy of this license, visit\n# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to\n# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.\n\nimport os\nimport time\nimport numpy as np\nimport tensorflow as tf\n\nimport config\nimport tfutil\nimport dataset\nimport misc\n\n#----------------------------------------------------------------------------\n# Choose the size and contents of the image snapshot grids that are exported\n# periodically during training.\n\ndef setup_snapshot_image_grid(G, training_set,\n size = '1080p', # '1080p' = to be viewed on 1080p display, '4k' = to be viewed on 4k display.\n layout = 'random'): # 'random' = grid contents are selected randomly, 'row_per_class' = each row corresponds to one class label.\n\n # Select size.\n gw = 1; gh = 1\n if size == '1080p':\n gw = np.clip(1920 // G.output_shape[3], 3, 32)\n gh = np.clip(1080 // G.output_shape[2], 2, 32)\n if size == '4k':\n gw = np.clip(3840 // G.output_shape[3], 7, 32)\n gh = np.clip(2160 // G.output_shape[2], 4, 32)\n\n # Fill in reals and labels.\n reals = np.zeros([gw * gh] + training_set.shape, dtype=training_set.dtype)\n labels = np.zeros([gw * gh, training_set.label_size], dtype=training_set.label_dtype)\n print('the training set labels have the shape of: ' + str(training_set.label_size))\n for idx in range(gw * gh):\n x = idx % gw; y = idx // gw\n while True:\n real, label = training_set.get_minibatch_np(1)\n if layout == 'row_per_class' and training_set.label_size > 0:\n if label[0, y % training_set.label_size] == 0.0:\n continue\n reals[idx] = real[0]\n labels[idx] = label[0]\n break\n\n # Generate latents.\n latents = misc.random_latents(gw * gh, G)\n return (gw, gh), reals, labels, latents\n\n#----------------------------------------------------------------------------\n# Just-in-time processing of training images before feeding them to the networks.\n\ndef process_reals(x, lod, mirror_augment, drange_data, drange_net):\n with tf.name_scope('ProcessReals'):\n with tf.name_scope('DynamicRange'):\n x = tf.cast(x, tf.float32)\n x = misc.adjust_dynamic_range(x, drange_data, drange_net)\n if mirror_augment:\n with tf.name_scope('MirrorAugment'):\n s = tf.shape(x)\n mask = tf.random_uniform([s[0], 1, 1, 1], 0.0, 1.0)\n mask = tf.tile(mask, [1, s[1], s[2], s[3]])\n x = tf.where(mask < 0.5, x, tf.reverse(x, axis=[3]))\n with tf.name_scope('FadeLOD'): # Smooth crossfade between consecutive levels-of-detail.\n s = tf.shape(x)\n y = tf.reshape(x, [-1, s[1], s[2]//2, 2, s[3]//2, 2])\n y = tf.reduce_mean(y, axis=[3, 5], keepdims=True)\n y = tf.tile(y, [1, 1, 1, 2, 1, 2])\n y = tf.reshape(y, [-1, s[1], s[2], s[3]])\n x = tfutil.lerp(x, y, lod - tf.floor(lod))\n with tf.name_scope('UpscaleLOD'): # Upscale to match the expected input/output size of the networks.\n s = tf.shape(x)\n factor = tf.cast(2 ** tf.floor(lod), tf.int32)\n x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1])\n x = tf.tile(x, [1, 1, 1, factor, 1, factor])\n x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor])\n return x\n\n#----------------------------------------------------------------------------\n# Class for evaluating and storing the values of time-varying training parameters.\n\nclass TrainingSchedule:\n def __init__(\n self,\n cur_nimg,\n training_set,\n lod_initial_resolution = 4, # Image resolution used at the beginning.\n lod_training_kimg = 600, # Thousands of real images to show before doubling the resolution.\n lod_transition_kimg = 600, # Thousands of real images to show when fading in new layers.\n minibatch_base = 16, # Maximum minibatch size, divided evenly among GPUs.\n minibatch_dict = {}, # Resolution-specific overrides.\n max_minibatch_per_gpu = {}, # Resolution-specific maximum minibatch size per GPU.\n G_lrate_base = 0.001, # Learning rate for the generator.\n G_lrate_dict = {}, # Resolution-specific overrides.\n D_lrate_base = 0.001, # Learning rate for the discriminator.\n D_lrate_dict = {}, # Resolution-specific overrides.\n tick_kimg_base = 160, # Default interval of progress snapshots.\n tick_kimg_dict = {4: 160, 8:140, 16:120, 32:100, 64:80, 128:60, 256:40, 512:20, 1024:10}): # Resolution-specific overrides.\n\n # Training phase.\n self.kimg = cur_nimg / 1000.0\n phase_dur = lod_training_kimg + lod_transition_kimg\n phase_idx = int(np.floor(self.kimg / phase_dur)) if phase_dur > 0 else 0\n phase_kimg = self.kimg - phase_idx * phase_dur\n\n # Level-of-detail and resolution.\n self.lod = training_set.resolution_log2\n self.lod -= np.floor(np.log2(lod_initial_resolution))\n self.lod -= phase_idx\n if lod_transition_kimg > 0:\n self.lod -= max(phase_kimg - lod_training_kimg, 0.0) / lod_transition_kimg\n self.lod = max(self.lod, 0.0)\n self.resolution = 2 ** (training_set.resolution_log2 - int(np.floor(self.lod)))\n\n # Minibatch size.\n self.minibatch = minibatch_dict.get(self.resolution, minibatch_base)\n self.minibatch -= self.minibatch % config.num_gpus\n if self.resolution in max_minibatch_per_gpu:\n self.minibatch = min(self.minibatch, max_minibatch_per_gpu[self.resolution] * config.num_gpus)\n\n # Other parameters.\n self.G_lrate = G_lrate_dict.get(self.resolution, G_lrate_base)\n self.D_lrate = D_lrate_dict.get(self.resolution, D_lrate_base)\n self.tick_kimg = tick_kimg_dict.get(self.resolution, tick_kimg_base)\n\n#----------------------------------------------------------------------------\n# Main training script.\n# To run, comment/uncomment appropriate lines in config.py and launch train.py.\n\ndef train_progressive_gan(\n G_smoothing = 0.999, # Exponential running average of generator weights.\n D_repeats = 1, # How many times the discriminator is trained per G iteration.\n minibatch_repeats = 4, # Number of minibatches to run before adjusting training parameters.\n reset_opt_for_new_lod = True, # Reset optimizer internal state (e.g. Adam moments) when new layers are introduced?\n total_kimg = 15000, # Total length of the training, measured in thousands of real images.\n mirror_augment = False, # Enable mirror augment?\n drange_net = [-1,1], # Dynamic range used when feeding image data to the networks.\n image_snapshot_ticks = 1, # How often to export image snapshots?\n network_snapshot_ticks = 10, # How often to export network snapshots?\n save_tf_graph = False, # Include full TensorFlow computation graph in the tfevents file?\n save_weight_histograms = False, # Include weight histograms in the tfevents file?\n resume_run_id = None, # Run ID or network pkl to resume training from, None = start from scratch.\n resume_snapshot = None, # Snapshot index to resume training from, None = autodetect.\n resume_kimg = 0.0, # Assumed training progress at the beginning. Affects reporting and training schedule.\n resume_time = 0.0): # Assumed wallclock time at the beginning. Affects reporting.\n\n maintenance_start_time = time.time()\n training_set = dataset.load_dataset(data_dir=config.data_dir, verbose=True, **config.dataset)\n\n # Construct networks.\n with tf.device('/gpu:0'):\n if resume_run_id is not None:\n network_pkl = misc.locate_network_pkl(resume_run_id, resume_snapshot)\n print('Loading networks from \"%s\"...' % network_pkl)\n G, D, Gs = misc.load_pkl(network_pkl)\n else:\n print('Constructing networks...')\n G = tfutil.Network('G', num_channels=training_set.shape[0], resolution=training_set.shape[1], label_size=training_set.label_size, **config.G)\n D = tfutil.Network('D', num_channels=training_set.shape[0], resolution=training_set.shape[1], label_size=training_set.label_size, **config.D)\n Gs = G.clone('Gs')\n Gs_update_op = Gs.setup_as_moving_average_of(G, beta=G_smoothing)\n G.print_layers(); D.print_layers()\n\n print('Building TensorFlow graph...')\n with tf.name_scope('Inputs'):\n lod_in = tf.placeholder(tf.float32, name='lod_in', shape=[])\n lrate_in = tf.placeholder(tf.float32, name='lrate_in', shape=[])\n minibatch_in = tf.placeholder(tf.int32, name='minibatch_in', shape=[])\n minibatch_split = minibatch_in // config.num_gpus\n reals, labels = training_set.get_minibatch_tf()\n reals_split = tf.split(reals, config.num_gpus)\n labels_split = tf.split(labels, config.num_gpus)\n G_opt = tfutil.Optimizer(name='TrainG', learning_rate=lrate_in, **config.G_opt)\n D_opt = tfutil.Optimizer(name='TrainD', learning_rate=lrate_in, **config.D_opt)\n for gpu in range(config.num_gpus):\n with tf.name_scope('GPU%d' % gpu), tf.device('/gpu:%d' % gpu):\n G_gpu = G if gpu == 0 else G.clone(G.name + '_shadow')\n D_gpu = D if gpu == 0 else D.clone(D.name + '_shadow')\n lod_assign_ops = [tf.assign(G_gpu.find_var('lod'), lod_in), tf.assign(D_gpu.find_var('lod'), lod_in)]\n reals_gpu = process_reals(reals_split[gpu], lod_in, mirror_augment, training_set.dynamic_range, drange_net)\n labels_gpu = labels_split[gpu]\n with tf.name_scope('G_loss'), tf.control_dependencies(lod_assign_ops):\n G_loss = tfutil.call_func_by_name(G=G_gpu, D=D_gpu, opt=G_opt, training_set=training_set, minibatch_size=minibatch_split, **config.G_loss)\n with tf.name_scope('D_loss'), tf.control_dependencies(lod_assign_ops):\n D_loss = tfutil.call_func_by_name(G=G_gpu, D=D_gpu, opt=D_opt, training_set=training_set, minibatch_size=minibatch_split, reals=reals_gpu, labels=labels_gpu, **config.D_loss)\n G_opt.register_gradients(tf.reduce_mean(G_loss), G_gpu.trainables)\n D_opt.register_gradients(tf.reduce_mean(D_loss), D_gpu.trainables)\n G_train_op = G_opt.apply_updates()\n D_train_op = D_opt.apply_updates()\n\n print('Setting up snapshot image grid...')\n grid_size, grid_reals, grid_labels, grid_latents = setup_snapshot_image_grid(G, training_set, **config.grid)\n sched = TrainingSchedule(total_kimg * 1000, training_set, **config.sched)\n grid_fakes = Gs.run(grid_latents, grid_labels, minibatch_size=sched.minibatch//config.num_gpus)\n\n print('Setting up result dir...')\n result_subdir = misc.create_result_subdir(config.result_dir, config.desc)\n misc.save_image_grid(grid_reals, os.path.join(result_subdir, 'reals.png'), drange=training_set.dynamic_range, grid_size=grid_size)\n misc.save_image_grid(grid_fakes, os.path.join(result_subdir, 'fakes%06d.png' % 0), drange=drange_net, grid_size=grid_size)\n summary_log = tf.summary.FileWriter(result_subdir)\n if save_tf_graph:\n summary_log.add_graph(tf.get_default_graph())\n if save_weight_histograms:\n G.setup_weight_histograms(); D.setup_weight_histograms()\n\n print('Training...')\n cur_nimg = int(resume_kimg * 1000)\n cur_tick = 0\n tick_start_nimg = cur_nimg\n tick_start_time = time.time()\n train_start_time = tick_start_time - resume_time\n prev_lod = -1.0\n while cur_nimg < total_kimg * 1000:\n\n # Choose training parameters and configure training ops.\n sched = TrainingSchedule(cur_nimg, training_set, **config.sched)\n training_set.configure(sched.minibatch, sched.lod)\n if reset_opt_for_new_lod:\n if np.floor(sched.lod) != np.floor(prev_lod) or np.ceil(sched.lod) != np.ceil(prev_lod):\n G_opt.reset_optimizer_state(); D_opt.reset_optimizer_state()\n prev_lod = sched.lod\n\n # Run training ops.\n for repeat in range(minibatch_repeats):\n for _ in range(D_repeats):\n tfutil.run([D_train_op, Gs_update_op], {lod_in: sched.lod, lrate_in: sched.D_lrate, minibatch_in: sched.minibatch})\n cur_nimg += sched.minibatch\n tfutil.run([G_train_op], {lod_in: sched.lod, lrate_in: sched.G_lrate, minibatch_in: sched.minibatch})\n\n # Perform maintenance tasks once per tick.\n done = (cur_nimg >= total_kimg * 1000)\n if cur_nimg >= tick_start_nimg + sched.tick_kimg * 1000 or done:\n cur_tick += 1\n cur_time = time.time()\n tick_kimg = (cur_nimg - tick_start_nimg) / 1000.0\n tick_start_nimg = cur_nimg\n tick_time = cur_time - tick_start_time\n total_time = cur_time - train_start_time\n maintenance_time = tick_start_time - maintenance_start_time\n maintenance_start_time = cur_time\n\n # Report progress.\n print('tick %-5d kimg %-8.1f lod %-5.2f minibatch %-4d time %-12s sec/tick %-7.1f sec/kimg %-7.2f maintenance %.1f' % (\n tfutil.autosummary('Progress/tick', cur_tick),\n tfutil.autosummary('Progress/kimg', cur_nimg / 1000.0),\n tfutil.autosummary('Progress/lod', sched.lod),\n tfutil.autosummary('Progress/minibatch', sched.minibatch),\n misc.format_time(tfutil.autosummary('Timing/total_sec', total_time)),\n tfutil.autosummary('Timing/sec_per_tick', tick_time),\n tfutil.autosummary('Timing/sec_per_kimg', tick_time / tick_kimg),\n tfutil.autosummary('Timing/maintenance_sec', maintenance_time)))\n tfutil.autosummary('Timing/total_hours', total_time / (60.0 * 60.0))\n tfutil.autosummary('Timing/total_days', total_time / (24.0 * 60.0 * 60.0))\n tfutil.save_summaries(summary_log, cur_nimg)\n\n # Save snapshots.\n if cur_tick % image_snapshot_ticks == 0 or done:\n grid_fakes = Gs.run(grid_latents, grid_labels, minibatch_size=sched.minibatch//config.num_gpus)\n misc.save_image_grid(grid_fakes, os.path.join(result_subdir, 'fakes%06d.png' % (cur_nimg // 1000)), drange=drange_net, grid_size=grid_size)\n if cur_tick % network_snapshot_ticks == 0 or done:\n misc.save_pkl((G, D, Gs), os.path.join(result_subdir, 'network-snapshot-%06d.pkl' % (cur_nimg // 1000)))\n\n # Record start time of the next tick.\n tick_start_time = time.time()\n\n # Write final results.\n misc.save_pkl((G, D, Gs), os.path.join(result_subdir, 'network-final.pkl'))\n summary_log.close()\n open(os.path.join(result_subdir, '_training-done.txt'), 'wt').close()\n\n#----------------------------------------------------------------------------\n# Main entry point.\n# Calls the function indicated in config.py.\n\nif __name__ == \"__main__\":\n misc.init_output_logging()\n np.random.seed(config.random_seed)\n print('Initializing TensorFlow...')\n os.environ.update(config.env)\n tfutil.init_tf(config.tf_config)\n print('Running %s()...' % config.train['func'])\n tfutil.call_func_by_name(**config.train)\n print('Exiting...')\n\n#----------------------------------------------------------------------------\n" ]
[ [ "tensorflow.device", "tensorflow.control_dependencies", "tensorflow.cast", "tensorflow.get_default_graph", "numpy.clip", "tensorflow.floor", "numpy.ceil", "tensorflow.name_scope", "numpy.zeros", "tensorflow.tile", "tensorflow.reverse", "tensorflow.shape", "tensorflow.placeholder", "numpy.floor", "tensorflow.split", "numpy.log2", "tensorflow.summary.FileWriter", "numpy.random.seed", "tensorflow.reduce_mean", "tensorflow.reshape", "tensorflow.random_uniform" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
TRex22/vector-AI-ML-term_project
[ "70d1cab20966c9c71e38a2f471d60628396c8e5a" ]
[ "src/ann.py" ]
[ "# Jason Chalom 711985 May 2017\n# Artificial Neural Network helper functions for vector AI\n\nimport numpy as np\nimport utils as u\nimport tic_tac_toe as game\n\ndef determine_xy(output_layer, b_s):\n\tsize = output_layer.shape[0]\n\t# print(output_layer)\n\tmax = -1;\n\tidex = -1;\n\tfor i in range(size):\n\t\tif(output_layer[i] > max):\n\t\t\tmax = output_layer[i];\n\t\t\tidex = i;\n\tif (idex == -1):\n\t\treturn -1;\n\tx = idex/b_s\n\ty = idex%b_s # idex%size-1\n\treturn x,y;\n\ndef auto_encode_3(world):\n\tdata_dat = np.load('data/autoencoder_3.dat.npz')\n\t\n\tdata1 = np.vstack((data_dat['data1']))\n\tx = np.hstack(([1.0], u.flattenWorld(world)))\n\tlayer1 = np.dot(x, data1)\n\tlayer1 = 1.0/(1.0 + np.exp(-layer1))\n\n\tdata2 = np.vstack((data_dat['data2']))\n\tlayer1 = np.hstack(([1.0], layer1))\n\tlayer2 = np.dot(layer1, data2)\n\tlayer2 = 1.0/(1.0 + np.exp(-layer2))\n\n\tdata3 = np.vstack((data_dat['data3']))\n\tlayer2 = np.hstack(([1.0], layer2))\n\tlayer3 = np.dot(layer2, data3)\n\tlayer3 = 1.0/(1.0 + np.exp(-layer3))\n\n\tdata4 = np.vstack((data_dat['data4']))\n\tlayer3 = np.hstack(([1.0], layer3))\n\tlayer4 = np.dot(layer3, data4)\n\tlayer4 = 1.0/(1.0 + np.exp(-layer4))\n\n\treturn layer4;\n\ndef auto_decode_3(layer4):\n\tdata_dat = np.load('data/autoencoder_3.dat.npz')\n\tdata5 = np.vstack((data_dat['data5']))\n\tlayer4 = np.hstack(([1.0], layer4))\n\tlayer5 = np.dot(layer4, data5)\n\tlayer5 = 1.0/(1.0 + np.exp(-layer5))\n\n\tdata6 = np.vstack((data_dat['data6']))\n\tlayer5 = np.hstack(([1.0], layer5))\n\tlayer6 = np.dot(layer5, data6)\n\tlayer6 = 1.0/(1.0 + np.exp(-layer6))\n\n\treturn layer6\n\ndef auto_encode_5(world):\n\tdata_dat = np.load('data/autoencoder_5.dat.npz')\n\t\n\tdata1 = np.vstack((data_dat['data1']))\n\tx = np.hstack(([1.0], u.flattenWorld(world)))\n\tlayer1 = np.dot(x, data1)\n\tlayer1 = 1.0/(1.0 + np.exp(-layer1))\n\n\tdata2 = np.vstack((data_dat['data2']))\n\tlayer1 = np.hstack(([1.0], layer1))\n\tlayer2 = np.dot(layer1, data2)\n\tlayer2 = 1.0/(1.0 + np.exp(-layer2))\n\n\tdata3 = np.vstack((data_dat['data3']))\n\tlayer2 = np.hstack(([1.0], layer2))\n\tlayer3 = np.dot(layer2, data3)\n\tlayer3 = 1.0/(1.0 + np.exp(-layer3))\n\n\tdata4 = np.vstack((data_dat['data4']))\n\tlayer3 = np.hstack(([1.0], layer3))\n\tlayer4 = np.dot(layer3, data4)\n\tlayer4 = 1.0/(1.0 + np.exp(-layer4))\n\n\treturn layer4;\n\ndef auto_decode_5(layer4):\n\tdata_dat = np.load('data/autoencoder_5.dat.npz')\n\tdata5 = np.vstack((data_dat['data5']))\n\tlayer4 = np.hstack(([1.0], layer4))\n\tlayer5 = np.dot(layer4, data5)\n\tlayer5 = 1.0/(1.0 + np.exp(-layer5))\n\n\tdata6 = np.vstack((data_dat['data6']))\n\tlayer5 = np.hstack(([1.0], layer5))\n\tlayer6 = np.dot(layer5, data6)\n\tlayer6 = 1.0/(1.0 + np.exp(-layer6))\n\n\treturn layer6\n\ndef NN3(world, player):\n\tsize = world.shape[0]\n\tmovesLeft = game.numberMovesLeft(world)\n\tif movesLeft == 0:\n\t\treturn world\n\n\tmadeMove = False\n\tdata_dat = np.load('data/NN_natural_3_3.dat.npz')\n\ttempWorld = world.copy()\n\titerations = 0\n\n\twhile madeMove == False and iterations < 1000:\n\t\tdata1 = np.vstack((data_dat['data1']))\n\t\tx = np.hstack(([1.0], u.flattenWorld(tempWorld)))\n\t\tlayer1 = np.dot(x, data1)\n\t\tlayer1 = 1.0/(1.0 + np.exp(-layer1))\n\n\t\tdata2 = np.vstack((data_dat['data2']))\n\t\tlayer1 = np.hstack(([1.0], layer1))\n\t\tlayer2 = np.dot(layer1, data2)\n\t\tlayer2 = 1.0/(1.0 + np.exp(-layer2))\n\n\t\tdata3 = np.vstack((data_dat['data3']))\n\t\tlayer2 = np.hstack(([1.0], layer2))\n\t\tlayer3 = np.dot(layer2, data3)\n\t\tlayer3 = 1.0/(1.0 + np.exp(-layer3))\n\n\t\tx,y = determine_xy(layer3, size)\n\t\t# tempWorld[x][y] = player\n\t\tmadeMove = game.checkMove(tempWorld, x, y)\n\t\t# tempWorld[x][y] = 0\n\t\titerations = iterations+1\n\n\tif(iterations >= 1000):\n\t\tworld, x, y = game.rndMoveXY(world, 1)\n\telse:\n\t\tworld[x][y] = player\n\t\n\treturn world,x,y\n\n# 'data/NN_natural_5_5.dat.npz'\ndef NN5(world, player):\n\tsize = world.shape[0]\n\tmovesLeft = game.numberMovesLeft(world)\n\tif movesLeft == 0:\n\t\treturn world\n\n\tmadeMove = False\n\tdata_dat = np.load('data/NN_natural_5_5.dat.npz')\n\ttempWorld = world.copy()\n\titerations = 0\n\n\twhile madeMove == False and iterations < 1000:\n\t\tdata1 = np.vstack((data_dat['data1']))\n\t\tx = np.hstack(([1.0], u.flattenWorld(tempWorld)))\n\t\tlayer1 = np.dot(x, data1)\n\t\tlayer1 = 1.0/(1.0 + np.exp(-layer1))\n\n\t\tdata2 = np.vstack((data_dat['data2']))\n\t\tlayer1 = np.hstack(([1.0], layer1))\n\t\tlayer2 = np.dot(layer1, data2)\n\t\tlayer2 = 1.0/(1.0 + np.exp(-layer2))\n\n\t\tdata3 = np.vstack((data_dat['data3']))\n\t\tlayer2 = np.hstack(([1.0], layer2))\n\t\tlayer3 = np.dot(layer2, data3)\n\t\tlayer3 = 1.0/(1.0 + np.exp(-layer3))\n\n\t\tx,y = determine_xy(layer3, size)\n\t\t# tempWorld[x][y] = player\n\t\tmadeMove = game.checkMove(tempWorld, x, y)\n\t\t# tempWorld[x][y] = 0\n\t\titerations = iterations+1\n\n\tif(iterations >= 1000):\n\t\tworld, x, y = game.rndMoveXY(world, 1)\n\telse:\n\t\tworld[x][y] = player\n\t\n\treturn world,x,y\n\ndef rndVsNN(board_size, tprint=False):\n\tworld = game.initGameWorld(board_size)\n\tmovesLeft = game.numberMovesLeft(world)\n\thasWon = False\n\tplayer1won = False\n\tplayer2won = False\n\tmoveCount = 0\n\n\twhile(movesLeft > 0) and (hasWon == False):\n\t\t# player 1\n\t\tif (movesLeft > 0) and (hasWon == False):\n\t\t\tnewWorld, x, y = game.rndMoveXY(world, -1)\n\t\t\thasWon = game.checkWin(newWorld, 1) \n\t\t\t\n\t\t\tif hasWon:\n\t\t\t\tplayer1won = True\n\n\t\t\tmoveCount = moveCount+1\t\n \n\t\tif (movesLeft > 0):\n\t\t\tmovesLeft = game.numberMovesLeft(world)\n \n\t\t# player 2\n\t\tif (movesLeft > 0) and (hasWon == False):\n\t\t\tif board_size == 3:\n\t\t\t\tnewWorld, x, y = NN3(world, 1)\n\t\t\telif board_size == 5:\n\t\t\t\tnewWorld, x, y = NN5(world, 1)\n\t\t\telse:\n\t\t\t\tnewWorld, x, y = game.rndMoveXY(world, 1)\n\n\t\t\thasWon = game.checkWin(newWorld, 1)\n\n\t\t\tif hasWon and not player1won:\n\t\t\t\tplayer2won = True\n\t\t\t# print(newWorld)\n\t\t\tworld = newWorld\n\t\t\tmoveCount = moveCount+1\t\t\t\n\n\t\tif (movesLeft > 0):\n\t\t\t# print(world)\n\t\t\t# game.printWorld(world)\n\t\t\tmovesLeft = game.numberMovesLeft(world)\n \n\tif(tprint):\n\t\tif(game.checkDraw(world, moveCount)):\n\t\t\tprint(\"It's a draw!\")\n\n\t\telse:\n\t\t\tif player1won:\n\t\t\t\tprint(\"player 1 wins!\")\n\t\t\telse:\n\t\t\t\tprint(\"player 2 wins!\")\n\t\tgame.printWorld(world)\n\n\tif(game.checkDraw(world, moveCount)):\n\t\treturn 0\n\n\telse:\n\t\tif player1won:\n\t\t\treturn 1\n\t\telse:\n\t\t\treturn -1\n\ndef NNVsRnd(board_size, tprint=False):\n\tworld = game.initGameWorld(board_size)\n\tmovesLeft = game.numberMovesLeft(world)\n\thasWon = False\n\tplayer1won = False\n\tplayer2won = False\n\tmoveCount = 0\n\n\twhile(movesLeft > 0) and (hasWon == False):\n\t\t# player 1\n\t\tif (movesLeft > 0) and (hasWon == False):\n\t\t\tif board_size == 3:\n\t\t\t\tnewWorld, x, y = NN3(world, 1)\n\t\t\telif board_size == 5:\n\t\t\t\tnewWorld, x, y = NN5(world, 1)\n\t\t\telse:\n\t\t\t\tnewWorld, x, y = game.rndMoveXY(world, 1)\n\n\t\t\thasWon = game.checkWin(world, 1) \n\t\t\t\n\t\t\tif hasWon:\n\t\t\t\tplayer1won = True\n\n\t\t\tmoveCount = moveCount+1\t\n \n\t\tif (movesLeft > 0):\n\t\t\tmovesLeft = game.numberMovesLeft(world)\n \n\t\t# player 2\n\t\tif (movesLeft > 0) and (hasWon == False):\n\t\t\tnewWorld, x, y = game.rndMoveXY(world, -1)\n\t\t\thasWon = game.checkWin(world, -1)\n\n\t\t\tif hasWon and not player1won:\n\t\t\t\tplayer2won = True\n\n\t\t\tworld = newWorld\n\t\t\tmoveCount = moveCount+1\t\t\t\n\n\t\tif (movesLeft > 0):\n\t\t\tmovesLeft = game.numberMovesLeft(world)\n \n\tif(tprint):\n\t\tif(game.checkDraw(world, moveCount)):\n\t\t\tprint(\"It's a draw!\")\n\n\t\telse:\n\t\t\tif player1won:\n\t\t\t\tprint(\"player 1 wins!\")\n\t\t\telse:\n\t\t\t\tprint(\"player 2 wins!\")\n\t\tgame.printWorld(world)\n\n\tif(game.checkDraw(world, moveCount)):\n\t\treturn 0\n\n\telse:\n\t\tif player1won:\n\t\t\treturn 1\n\t\telse:\n\t\t\treturn -1" ]
[ [ "numpy.dot", "numpy.hstack", "numpy.load", "numpy.exp", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
augustoolucas/iCaRL
[ "90ac1be39c9e055d9dd2fa1b679c0cfb8cf7335a" ]
[ "iCaRL-Tensorflow/utils_resnet.py" ]
[ "import tensorflow as tf\nimport numpy as np\ntry:\n import cPickle\nexcept:\n import _pickle as cPickle\n\n\ndef relu(x, name, alpha):\n if alpha > 0:\n return tf.maximum(alpha * x, x, name=name)\n else:\n return tf.nn.relu(x, name=name)\n\n\ndef get_variable(name, shape, dtype, initializer, trainable=True, regularizer=None):\n with tf.device('/cpu:0'):\n var = tf.get_variable(name, shape=shape, dtype=dtype,\n initializer=initializer, regularizer=regularizer, trainable=trainable,\n collections=[tf.GraphKeys.WEIGHTS, tf.GraphKeys.GLOBAL_VARIABLES])\n return var\n\n\ndef conv(inp, name, size, out_channels, strides=[1, 1, 1, 1],\n dilation=None, padding='SAME', apply_relu=True, alpha=0.0,bias=True,\n initializer=tf.contrib.layers.xavier_initializer_conv2d()):\n \n batch_size = inp.get_shape().as_list()[0]\n res1 = inp.get_shape().as_list()[1]\n res2 = inp.get_shape().as_list()[1]\n in_channels = inp.get_shape().as_list()[3]\n \n with tf.variable_scope(name):\n W = get_variable(\"W\", shape=[size, size, in_channels, out_channels], dtype=tf.float32,\n initializer=initializer, regularizer=tf.nn.l2_loss)\n b = get_variable(\"b\", shape=[1, 1, 1, out_channels], dtype=tf.float32,\n initializer=tf.zeros_initializer(),trainable=bias)\n \n if dilation:\n assert(strides == [1, 1, 1, 1])\n out = tf.add(tf.nn.atrous_conv2d(inp, W, rate=dilation, padding=padding), b, name='convolution')\n out.set_shape([batch_size, res1, res2, out_channels])\n else:\n out = tf.add(tf.nn.conv2d(inp, W, strides=strides, padding=padding), b, name='convolution')\n \n if apply_relu:\n out = relu(out, alpha=alpha, name='relu')\n \n return out\n\n\ndef softmax(target, axis, name=None):\n max_axis = tf.reduce_max(target, axis, keep_dims=True)\n target_exp = tf.exp(target - max_axis)\n normalize = tf.reduce_sum(target_exp, axis, keep_dims=True)\n softmax = target_exp / normalize\n return softmax\n\n\ndef batch_norm(inp, name, phase, decay=0.9):\n \n channels = inp.get_shape().as_list()[3]\n \n with tf.variable_scope(name):\n moving_mean = get_variable(\"mean\", shape=[channels], dtype=tf.float32, initializer=tf.constant_initializer(0.0), trainable=False)\n moving_variance = get_variable(\"var\", shape=[channels], dtype=tf.float32, initializer=tf.constant_initializer(1.0), trainable=False)\n \n offset = get_variable(\"offset\", shape=[channels], dtype=tf.float32, initializer=tf.constant_initializer(0.0))\n scale = get_variable(\"scale\", shape=[channels], dtype=tf.float32, initializer=tf.constant_initializer(1.0), regularizer=tf.nn.l2_loss)\n \n mean, variance = tf.nn.moments(inp, axes=[0, 1, 2], shift=moving_mean)\n \n mean_op = moving_mean.assign(decay * moving_mean + (1 - decay) * mean)\n var_op = moving_variance.assign(decay * moving_variance + (1 - decay) * variance)\n \n assert(phase in ['train', 'test'])\n if phase == 'train':\n with tf.control_dependencies([mean_op, var_op]):\n return tf.nn.batch_normalization(inp, mean, variance, offset, scale, 0.01, name='norm')\n else:\n return tf.nn.batch_normalization(inp, moving_mean, moving_variance, offset, scale, 0.01, name='norm')\n\n\ndef pool(inp, name, kind, size, stride, padding='SAME'):\n \n assert kind in ['max', 'avg']\n \n strides = [1, stride, stride, 1]\n sizes = [1, size, size, 1]\n \n with tf.variable_scope(name):\n if kind == 'max':\n out = tf.nn.max_pool(inp, sizes, strides=strides, padding=padding, name=kind)\n else:\n out = tf.nn.avg_pool(inp, sizes, strides=strides, padding=padding, name=kind)\n \n return out\n\n\ndef ResNet18(inp, phase, num_outputs=1000, alpha=0.0):\n def residual_block(inp, phase, alpha=0.0,nom='a',increase_dim=False,last=False):\n input_num_filters = inp.get_shape().as_list()[3]\n if increase_dim:\n first_stride = [1, 2, 2, 1]\n out_num_filters = input_num_filters*2\n else:\n first_stride = [1, 1, 1, 1]\n out_num_filters = input_num_filters\n \n layer = conv(inp, 'resconv1'+nom, size=3, strides=first_stride, out_channels=out_num_filters, alpha=alpha, padding='SAME')\n layer = batch_norm(layer, 'batch_norm_resconv1'+nom, phase=phase)\n layer = conv(layer, 'resconv2'+nom, size=3, strides=[1, 1, 1, 1], out_channels=out_num_filters, apply_relu=False,alpha=alpha, padding='SAME')\n layer = batch_norm(layer, 'batch_norm_resconv2'+nom, phase=phase)\n \n if increase_dim:\n projection = conv(inp, 'projconv'+nom, size=1, strides=[1, 2, 2, 1], out_channels=out_num_filters, alpha=alpha, apply_relu=False,padding='SAME',bias=False)\n projection = batch_norm(projection, 'batch_norm_projconv'+nom, phase=phase)\n if last:\n block = layer + projection\n else:\n block = layer + projection\n block = tf.nn.relu(block, name='relu')\n else:\n if last:\n block = layer + inp\n else:\n block = layer + inp\n block = tf.nn.relu(block, name='relu')\n \n return block\n \n # First conv\n #layer = batch_norm(inp, 'batch_norm_0', phase=phase)\n layer = conv(inp,\"conv1\",size=7,strides=[1, 2, 2, 1], out_channels=64, alpha=alpha, padding='SAME')\n layer = batch_norm(layer, 'batch_norm_1', phase=phase)\n layer = pool(layer, 'pool1', 'max', size=3, stride=2)\n \n # First stack of residual blocks\n for letter in 'ab':\n layer = residual_block(layer, phase, alpha=0.0,nom=letter)\n \n # Second stack of residual blocks\n layer = residual_block(layer, phase, alpha=0.0,nom='c',increase_dim=True)\n for letter in 'd':\n layer = residual_block(layer, phase, alpha=0.0,nom=letter)\n \n # Third stack of residual blocks\n layer = residual_block(layer, phase, alpha=0.0,nom='e',increase_dim=True)\n for letter in 'f':\n layer = residual_block(layer, phase, alpha=0.0,nom=letter)\n \n # Fourth stack of residual blocks\n layer = residual_block(layer, phase, alpha=0.0,nom='g',increase_dim=True)\n layer = residual_block(layer, phase, alpha=0.0,nom='h',increase_dim=False,last=True)\n \n layer = pool(layer, 'pool_last', 'avg', size=7, stride=1,padding='VALID')\n layer = conv(layer, name='fc', size=1, out_channels=num_outputs, padding='VALID', apply_relu=False, alpha=alpha)[:, 0, 0, :]\n \n return layer\n\n\ndef get_weight_initializer(params):\n \n initializer = []\n \n scope = tf.get_variable_scope()\n scope.reuse_variables()\n for layer, value in params.items():\n op = tf.get_variable('%s' % layer).assign(value)\n initializer.append(op)\n return initializer\n\n\ndef save_model(name, scope, sess):\n variables = tf.get_collection(tf.GraphKeys.WEIGHTS, scope=scope)\n d = [(v.name.split(':')[0], sess.run(v)) for v in variables]\n \n cPickle.dump(d, open(name, 'wb'))\n" ]
[ [ "tensorflow.device", "tensorflow.get_variable", "tensorflow.control_dependencies", "tensorflow.nn.max_pool", "tensorflow.reduce_sum", "tensorflow.nn.atrous_conv2d", "tensorflow.nn.conv2d", "tensorflow.get_collection", "tensorflow.nn.moments", "tensorflow.nn.batch_normalization", "tensorflow.zeros_initializer", "tensorflow.exp", "tensorflow.nn.avg_pool", "tensorflow.contrib.layers.xavier_initializer_conv2d", "tensorflow.nn.relu", "tensorflow.reduce_max", "tensorflow.maximum", "tensorflow.constant_initializer", "tensorflow.variable_scope", "tensorflow.get_variable_scope" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
naveednilawfar/DroidCC
[ "68cd9cf369a223767513278b9780b35c2b92aeb9" ]
[ "code/droidcc/model_train/train_dbn.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2018/3/30 13:52\n\n@file: train_dbn.py\n@author: Qingyu Mao\n\"\"\"\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix, classification_report, roc_auc_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics.classification import accuracy_score\nimport pickle\nimport pandas as pd\nfrom dbn.tensorflow import SupervisedDBNClassification\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import roc_curve, auc\nimport sys\n\nnp.random.seed(1337) # for reproducibility\n\ndataset = pd.read_csv(\"../data/sample-3000.csv\")\n\nX = dataset.drop(\"label\", 1).values\nY = dataset[\"label\"].values\n\n# Splitting data\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.5, random_state=0)\n\n# Training\n\"\"\"\n :param hidden_layers_structure: 隐层大小\n :param learning_rate_rbm: 预训练阶段的学习率\n :param learning_rate: 微调阶段的学习率\n :param n_epochs_rbm: 进行预训练的迭代次数\n :param n_iter_backprop: 进行训练的迭代次数\n :param batch_size: minibatch的大小\n :param activation_function: 激励函数\n :param dropout_p:\n\"\"\"\nclassifier = SupervisedDBNClassification(hidden_layers_structure=[200, 200, 200, 200, 200],\n learning_rate_rbm=0.05,\n learning_rate=0.1,\n n_epochs_rbm=10,\n n_iter_backprop=100,\n batch_size=100,\n activation_function='relu',\n dropout_p=0)\nclassifier.fit(X_train, Y_train)\n\n# Test\nY_pred = classifier.predict(X_test)\n# print(Y_test)\n# print(Y_pred)\nprint('Done.\\nAccuracy: %f' % accuracy_score(Y_test, Y_pred))\n\nprint(\"=== Detailed Accuracy By Class ===\\n\")\nprint(classification_report(Y_test, Y_pred, digits=6))\nprint(\"=== Confusion Matrix ===\\n\")\nprint(confusion_matrix(Y_test, Y_pred))\nprint(roc_auc_score(Y_test, Y_pred))\n\n# # y_test:实际的标签, dataset_pred:预测的概率值。\n# fpr, tpr, thresholds = roc_curve(Y_test, Y_pred)\n# roc_auc = auc(fpr, tpr)\n# # 画图,只需要plt.plot(fpr,tpr),变量roc_auc只是记录auc的值,通过auc()函数能计算出来\n# plt.plot(fpr, tpr, lw=1, label='ROC(area = %0.3f)' % (roc_auc))\n# plt.xlabel(\"FPR (False Positive Rate)\")\n# plt.ylabel(\"TPR (True Positive Rate)\")\n# plt.title(\"Receiver Operating Characteristic, ROC(AUC = %0.2f)\" % (roc_auc))\n# plt.show()\n" ]
[ [ "sklearn.metrics.roc_auc_score", "pandas.read_csv", "numpy.random.seed", "sklearn.metrics.confusion_matrix", "sklearn.model_selection.train_test_split", "sklearn.metrics.classification.accuracy_score", "sklearn.metrics.classification_report" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
maxpark/PaddleSlim
[ "f6b827fca5f3d9cc467426b8ef30e3a6d2b012b9" ]
[ "demo/dygraph/pruning/imagenet.py" ]
[ "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport math\nimport random\nimport numpy as np\nfrom PIL import Image\n\nfrom paddle.vision.datasets import DatasetFolder\nfrom paddle.vision.transforms import transforms\n\n\nclass ImageNetDataset(DatasetFolder):\n def __init__(self,\n path,\n mode='train',\n image_size=224,\n resize_short_size=256):\n super(ImageNetDataset, self).__init__(path)\n self.mode = mode\n\n self.samples = []\n list_file = \"train_list.txt\" if self.mode == \"train\" else \"val_list.txt\"\n with open(os.path.join(path, list_file), 'r') as f:\n for line in f:\n _image, _label = line.strip().split(\" \")\n _image = os.path.join(path, _image)\n self.samples.append((_image, int(_label)))\n normalize = transforms.Normalize(\n mean=[123.675, 116.28, 103.53], std=[58.395, 57.120, 57.375])\n if self.mode == 'train':\n self.transform = transforms.Compose([\n transforms.RandomResizedCrop(image_size),\n transforms.RandomHorizontalFlip(), transforms.Transpose(),\n normalize\n ])\n else:\n self.transform = transforms.Compose([\n transforms.Resize(resize_short_size),\n transforms.CenterCrop(image_size), transforms.Transpose(),\n normalize\n ])\n\n def __getitem__(self, idx):\n img_path, label = self.samples[idx]\n img = Image.open(img_path).convert('RGB')\n label = np.array([label]).astype(np.int64)\n return self.transform(img), label\n\n def __len__(self):\n return len(self.samples)\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Jelly6489/Stock-Proj
[ "3e7b1ad5cddc5b142f0069e024199fe969c7c7e8" ]
[ "com_blacktensor/cop/emo/model/emotion_dfo.py" ]
[ "import requests\nimport pandas as pd\nimport codecs\nimport numpy as np\nimport re\nfrom bs4 import BeautifulSoup\nfrom konlpy.tag import Twitter\nfrom collections import Counter\n\n# from sqlalchemy import Column, Integer, String, Date\n# # from sqlalchemy import create_engine\n\nfrom com_blacktensor.util.file_handler import FileHandler\nfrom com_blacktensor.cop.emo.model.emotion_kdd import keyword\n# # ============================================================\n# # ================== =====================\n# # ================== Preprocessing =====================\n# # ================== =====================\n# # ============================================================\nclass EmotionDfo(object):\n def __init__(self):\n print('-----------emotionDfo--------------')\n print(keyword)\n self.fileReader = FileHandler() \n\n def data_pro(self, keyword):\n # def data_pro(self, keyword):\n print('-----------emotionDfo--------------')\n\n word = []\n positive_word = []\n negative_word = []\n noun_list =[]\n poflag = []\n neflag = []\n\n po_key = []\n ne_key = []\n po_val = []\n ne_val = []\n\n\n file = open('{}.csv'.format(keyword), 'r', encoding='utf-8-sig')\n\n lists = file.readlines()\n file.close()\n \n twitter = Twitter()\n morphs = []\n\n for sentence in lists:\n morphs.append(twitter.pos(sentence))\n\n # print(morphs)\n\n pos = codecs.open('positive_words_self.txt', 'rb', encoding='utf-8-sig')\n\n while True:\n line = pos.readline()\n line = line.replace('\\n', '')\n positive_word.append(line)\n # keyword_text.append(line)\n\n if not line: break\n pos.close()\n\n neg = codecs.open('negative_words_self.txt', 'rb', encoding='utf-8-sig')\n\n while True:\n line = neg.readline()\n line = line.replace('\\n', '')\n negative_word.append(line)\n # keyword_text.append(line)\n\n if not line: break\n neg.close()\n\n for sentence in morphs : \n for word, text_tag in sentence :\n if text_tag in ['Noun']:\n noun_list.append(word)\n for x in positive_word:\n if x == word: \n poflag.append(x)\n \n for y in negative_word:\n if y == word:\n neflag.append(y)\n\n # print(\"부정적 :\", y)\n # if text_tag in ['Noun'] and (\"것\" not in word) and (\"내\" not in word) and (\"첫\" not in word) and \\\n # (\"나\" not in word) and (\"와\" not in word) and (\"식\" not in word) and (\"수\" not in word) and \\\n # (\"게\" not in word) and (\"말\" not in word):\n # noun_list.append(word)\n \n # if text_tag in ['Noun'] and (\"갑질\" not in word) and (\"논란\" not in word) and (\"폭리\" not in word) and \\\n # (\"허위\" not in word) and (\"과징금\" not in word) and (\"눈물\" not in word) and (\"피해\" not in word) and \\\n # (\"포화\" not in word) and (\"우롱\" not in word) and (\"위반\" not in word) and (\"리스크\" not in word) and \\\n # (\"사퇴\" not in word) and (\"급락\" not in word) and (\"하락\" not in word) and (\"폐업\" not in word) and \\\n # (\"불만\" not in word) and (\"산재\" not in word) and (\"닫아\" not in word) and (\"손해배상\" not in word) and \\\n # (\"구설수\" not in word) and (\"적발\" not in word) and (\"침해\" not in word) and (\"빨간불\" not in word) and \\\n # (\"취약\" not in word) and (\"불명예\" not in word) and (\"구형\" not in word) and (\"기소\" not in word) and \\\n # (\"반토막\" not in word) and (\"호소\" not in word) and (\"불매\" not in word) and (\"냉담\" not in word) and \\\n # (\"문제\" not in word) and (\"직격탄\" not in word) and (\"한숨\" not in word) and (\"불똥\" not in word) and \\\n # (\"항의\" not in word) and (\"싸늘\" not in word) and (\"일탈\" not in word) and (\"파문\" not in word) and \\\n # (\"횡령\" not in word) and (\"사과문\" not in word) and (\"여파\" not in word) and (\"울상\" not in word) and \\\n # (\"초토화\" not in word) and (\"급감\" not in word) and (\"우려\" not in word) and (\"중단\" not in word) and \\\n # (\"퇴출\" not in word) and (\"해지\" not in word) and (\"일베\" not in word) and (\"이물질\" not in word) and \\\n # (\"엉망\" not in word) and (\"소송\" not in word) and (\"하락\" not in word) and (\"매출하락\" not in word) and \\\n # (\"혐의\" not in word) and (\"부채\" not in word) and (\"과징금\" not in word) and (\"포기\" not in word) and \\\n # (\"약세\" not in word) and (\"최악\" not in word) and (\"손실\" not in word) and (\"의혹\" not in word):\n # positive_word.append(word)\n\n # elif text_tag in ['Noun'] and (\"MOU\" not in word) and (\"제휴\" not in word) and (\"주목\" not in word) and \\\n # (\"호응\" not in word) and (\"돌파\" not in word) and (\"이목\" not in word) and (\"수상\" not in word) and \\\n # (\"입점\" not in word) and (\"인기\" not in word) and (\"열풍\" not in word) and (\"진화\" not in word) and \\\n # (\"대박\" not in word) and (\"순항\" not in word) and (\"유치\" not in word) and (\"1위\" not in word) and \\\n # (\"출시\" not in word) and (\"오픈\" not in word) and (\"돌풍\" not in word) and (\"인싸\" not in word) and \\\n # (\"줄서서\" not in word) and (\"대세\" not in word) and (\"트렌드\" not in word) and (\"불티\" not in word) and \\\n # (\"진출\" not in word) and (\"체결\" not in word) and (\"증가\" not in word) and (\"기부\" not in word) and \\\n # (\"신제품\" not in word) and (\"신상\" not in word) and (\"최고\" not in word) and (\"새로운\" not in word) and \\\n # (\"착한\" not in word) and (\"신기록\" not in word) and (\"전망\" not in word) and (\"협력\" not in word) and \\\n # (\"역대\" not in word) and (\"상승\" not in word) and (\"늘어\" not in word) and (\"승인\" not in word):\n # negative_word.append(word)\n\n count_po = Counter(poflag)\n count_ne = Counter(neflag)\n po_words = dict(count_po.most_common())\n ne_words = dict(count_ne.most_common())\n\n # 워드클라우드로 명사만 추출\n '''\n ['창립', '주년', '삼성', '전자', '이건희', '회장', '도전', '혁신', '삼성', '전자', '삼성', '포럼', '개최', '김기남', '대표', \n '핵심', '기술', '발전', '현', '코스피', '코스닥', '장', '동반', '상승', '덕성', '시스', '웍', '한국', '컴퓨터', '삼성', '전자\n ', '창립', '주년', '기념', '개최', '이재용', '부회장', '불참', '롯데', '하이마트', '온라인', '오늘', '역대', '빅', '하트', ' \n 일', '시작', '손연기', '칼럼', '차', '산업혁명', '시대', '문제', '일자리', '삼성', '전자', '모바일', '신제품', '엑시노스', ' \n ...\n '멘토', '체험', '활동', '김기남', '삼성', '부회장', '로', '코로나', '해결', '위해', '전세계', '연구자', '협력', '순위', '주식\n ', '부자', '위', '눈앞', '이재용', '뉴', '파워', '프라', '마', '규모', '유상증자', '결정', '삼성', '전자', '창립', '주념', ' \n 기념', '회장', '도전', '혁신', '계승', '삼성', '전자', '창립', '주년', '기념', '개최']\n '''\n\n po_key = po_words.keys()\n po_val = po_words.values()\n\n ne_key = ne_words.keys()\n ne_val = ne_words.values()\n\n print(\"\\n긍정적인 단어 :\", po_key, po_val)\n print(\"부정적인 단어 :\", ne_key, ne_val)\n \n po_df = pd.DataFrame(list(po_words.items()), columns=['positive', 'pos_count'])\n ne_df = pd.DataFrame(list(ne_words.items()), columns=['negative', 'neg_count'])\n\n df = pd.concat([po_df,ne_df], axis=1)\n\n df.loc[:, 'keyword'] = keyword\n\n#\n df.fillna(0, inplace=True)\n#\n\n print(df.head())\n df.to_csv(keyword + '_word.csv', encoding='utf-8-sig')\n\n\n\n '''\n 긍정적인 단어 : {'상승': 141, '인기': 66, '출시': 60, '전망': 36, '오픈': 30, \n '돌파': 19, '트렌드': 12, '체결': 12, '증가': 12, '역대': 11, '협력': 11, \n '주목': 11, '미소': 8, '기부': 8, '승인': 6, '최고': 6, '대세': 5, '유치': 4, \n '수상': 4, '불티': 2, '부상': 2, '순항': 2, '호응': 1, '진출': 1}\n 부정적인 단어 : {'급감': 233, '여파': 163, '하락': 162, '피해': 115, \n '직격탄': 83, '논란': 61, '중단': 41, '손실': 39, '반토 막': 34, '최악': 33, \n '포기': 32, '폐업': 25, '급락': 25, '우려': 24, '불매': 14, '눈물': 13, '\n 매각': 10, '호소': 9, '울상': 7, '문제': 6, '불만': 6, '약세': 5, '한숨': 5, \n '일베': 4, '해지': 4, '초토화': 3, '참혹': 3, '폐점': 2, '파문': 2, \n '과징금': 2, '항의': 1, '소송': 1, '불명예': 1, '리스크': 1, '갑질': 1, \n '침해': 1, '발끈': 1}\n '''\n print('---------------EmotionDfo Success----------------')\n return df\n\n\n\n def get_df(self, keyword):\n # file = open('{}.csv'.format(keyword), 'r', encoding='utf-8-sig')\n\n news_df = pd.read_csv('{}.csv'.format(keyword), index_col=[0], encoding='utf-8-sig')\n # C:/Users/Admin/VscProject/BlackTensor_Test/\n\n news_df.rename( columns={'Unnamed: 0':'name'}, inplace=True )\n news_df.to_csv(keyword + '.csv', encoding='utf-8-sig')\n print('-----------------get_df------------------')\n print(news_df)\n return news_df\n # return pd.DataFrame(data, columns=self.colums)\n data_pro(0, keyword)\n # get_df(0, keyword)" ]
[ [ "pandas.concat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
martinsbruveris/pytorch-image-models
[ "65419f60cc389f197ed555e1fc3b7f986708a683" ]
[ "timm/models/resnet.py" ]
[ "\"\"\"PyTorch ResNet\n\nThis started as a copy of https://github.com/pytorch/vision 'resnet.py' (BSD-3-Clause) with\nadditional dropout and dynamic global avg/max pool.\n\nResNeXt, SE-ResNeXt, SENet, and MXNet Gluon stem/downsample variants, tiered stems added by Ross Wightman\nCopyright 2020 Ross Wightman\n\"\"\"\nimport math\nfrom functools import partial\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD\nfrom .helpers import build_model_with_cfg\nfrom .layers import DropBlock2d, DropPath, AvgPool2dSame, BlurPool2d, create_attn, get_attn, create_classifier\nfrom .registry import register_model\n\n__all__ = ['ResNet', 'BasicBlock', 'Bottleneck'] # model_registry will add each entrypoint fn to this\n\n\ndef _cfg(url='', **kwargs):\n return {\n 'url': url,\n 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),\n 'crop_pct': 0.875, 'interpolation': 'bilinear',\n 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,\n 'first_conv': 'conv1', 'classifier': 'fc',\n **kwargs\n }\n\n\ndefault_cfgs = {\n # ResNet and Wide ResNet\n 'resnet18': _cfg(url='https://download.pytorch.org/models/resnet18-5c106cde.pth'),\n 'resnet18d': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet18d_ra2-48a79e06.pth',\n interpolation='bicubic', first_conv='conv1.0'),\n 'resnet34': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34-43635321.pth'),\n 'resnet34d': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34d_ra2-f8dcfcaf.pth',\n interpolation='bicubic', first_conv='conv1.0'),\n 'resnet26': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet26-9aa10e23.pth',\n interpolation='bicubic'),\n 'resnet26d': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet26d-69e92c46.pth',\n interpolation='bicubic', first_conv='conv1.0'),\n 'resnet26t': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/resnet26t_256_ra2-6f6fa748.pth',\n interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.94),\n 'resnet50': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet50_a1_0-14fe96d1.pth',\n interpolation='bicubic', crop_pct=0.95),\n 'resnet50d': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet50d_ra2-464e36ba.pth',\n interpolation='bicubic', first_conv='conv1.0'),\n 'resnet50t': _cfg(\n url='',\n interpolation='bicubic', first_conv='conv1.0'),\n 'resnet101': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet101_a1h-36d3f2aa.pth',\n interpolation='bicubic', crop_pct=0.95),\n 'resnet101d': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet101d_ra2-2803ffab.pth',\n interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8),\n crop_pct=1.0, test_input_size=(3, 320, 320)),\n 'resnet152': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rsb-weights/resnet152_a1h-dc400468.pth',\n interpolation='bicubic', crop_pct=0.95),\n 'resnet152d': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet152d_ra2-5cac0439.pth',\n interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8),\n crop_pct=1.0, test_input_size=(3, 320, 320)),\n 'resnet200': _cfg(url='', interpolation='bicubic'),\n 'resnet200d': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet200d_ra2-bdba9bf9.pth',\n interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8),\n crop_pct=1.0, test_input_size=(3, 320, 320)),\n 'tv_resnet34': _cfg(url='https://download.pytorch.org/models/resnet34-333f7ec4.pth'),\n 'tv_resnet50': _cfg(url='https://download.pytorch.org/models/resnet50-19c8e357.pth'),\n 'tv_resnet101': _cfg(url='https://download.pytorch.org/models/resnet101-5d3b4d8f.pth'),\n 'tv_resnet152': _cfg(url='https://download.pytorch.org/models/resnet152-b121ed2d.pth'),\n 'wide_resnet50_2': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/wide_resnet50_racm-8234f177.pth',\n interpolation='bicubic'),\n 'wide_resnet101_2': _cfg(url='https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth'),\n\n # ResNeXt\n 'resnext50_32x4d': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnext50_32x4d_ra-d733960d.pth',\n interpolation='bicubic'),\n 'resnext50d_32x4d': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnext50d_32x4d-103e99f8.pth',\n interpolation='bicubic',\n first_conv='conv1.0'),\n 'resnext101_32x4d': _cfg(url=''),\n 'resnext101_32x8d': _cfg(url='https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth'),\n 'resnext101_64x4d': _cfg(url=''),\n 'tv_resnext50_32x4d': _cfg(url='https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth'),\n\n # ResNeXt models - Weakly Supervised Pretraining on Instagram Hashtags\n # from https://github.com/facebookresearch/WSL-Images\n # Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only.\n 'ig_resnext101_32x8d': _cfg(url='https://download.pytorch.org/models/ig_resnext101_32x8-c38310e5.pth'),\n 'ig_resnext101_32x16d': _cfg(url='https://download.pytorch.org/models/ig_resnext101_32x16-c6f796b0.pth'),\n 'ig_resnext101_32x32d': _cfg(url='https://download.pytorch.org/models/ig_resnext101_32x32-e4b90b00.pth'),\n 'ig_resnext101_32x48d': _cfg(url='https://download.pytorch.org/models/ig_resnext101_32x48-3e41cc8a.pth'),\n\n # Semi-Supervised ResNe*t models from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models\n # Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only.\n 'ssl_resnet18': _cfg(\n url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet18-d92f0530.pth'),\n 'ssl_resnet50': _cfg(\n url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnet50-08389792.pth'),\n 'ssl_resnext50_32x4d': _cfg(\n url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext50_32x4-ddb3e555.pth'),\n 'ssl_resnext101_32x4d': _cfg(\n url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x4-dc43570a.pth'),\n 'ssl_resnext101_32x8d': _cfg(\n url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x8-2cfe2f8b.pth'),\n 'ssl_resnext101_32x16d': _cfg(\n url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_supervised_resnext101_32x16-15fffa57.pth'),\n\n # Semi-Weakly Supervised ResNe*t models from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models\n # Please note the CC-BY-NC 4.0 license on theses weights, non-commercial use only.\n 'swsl_resnet18': _cfg(\n url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnet18-118f1556.pth'),\n 'swsl_resnet50': _cfg(\n url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnet50-16a12f1b.pth'),\n 'swsl_resnext50_32x4d': _cfg(\n url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext50_32x4-72679e44.pth'),\n 'swsl_resnext101_32x4d': _cfg(\n url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x4-3f87e46b.pth'),\n 'swsl_resnext101_32x8d': _cfg(\n url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x8-b4712904.pth'),\n 'swsl_resnext101_32x16d': _cfg(\n url='https://dl.fbaipublicfiles.com/semiweaksupervision/model_files/semi_weakly_supervised_resnext101_32x16-f3559a9c.pth'),\n\n # Squeeze-Excitation ResNets, to eventually replace the models in senet.py\n 'seresnet18': _cfg(\n url='',\n interpolation='bicubic'),\n 'seresnet34': _cfg(\n url='',\n interpolation='bicubic'),\n 'seresnet50': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet50_ra_224-8efdb4bb.pth',\n interpolation='bicubic'),\n 'seresnet50t': _cfg(\n url='',\n interpolation='bicubic',\n first_conv='conv1.0'),\n 'seresnet101': _cfg(\n url='',\n interpolation='bicubic'),\n 'seresnet152': _cfg(\n url='',\n interpolation='bicubic'),\n 'seresnet152d': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet152d_ra2-04464dd2.pth',\n interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8),\n crop_pct=1.0, test_input_size=(3, 320, 320)\n ),\n 'seresnet200d': _cfg(\n url='',\n interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), crop_pct=0.94, pool_size=(8, 8)),\n 'seresnet269d': _cfg(\n url='',\n interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), crop_pct=0.94, pool_size=(8, 8)),\n\n\n # Squeeze-Excitation ResNeXts, to eventually replace the models in senet.py\n 'seresnext26d_32x4d': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26d_32x4d-80fa48a3.pth',\n interpolation='bicubic',\n first_conv='conv1.0'),\n 'seresnext26t_32x4d': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26tn_32x4d-569cb627.pth',\n interpolation='bicubic',\n first_conv='conv1.0'),\n 'seresnext50_32x4d': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext50_32x4d_racm-a304a460.pth',\n interpolation='bicubic'),\n 'seresnext101_32x4d': _cfg(\n url='',\n interpolation='bicubic'),\n 'seresnext101_32x8d': _cfg(\n url='',\n interpolation='bicubic'),\n 'senet154': _cfg(\n url='',\n interpolation='bicubic',\n first_conv='conv1.0'),\n\n # Efficient Channel Attention ResNets\n 'ecaresnet26t': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet26t_ra2-46609757.pth',\n interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8),\n crop_pct=0.95, test_input_size=(3, 320, 320)),\n 'ecaresnetlight': _cfg(\n url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45402/outputs/ECAResNetLight_4f34b35b.pth',\n interpolation='bicubic'),\n 'ecaresnet50d': _cfg(\n url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45402/outputs/ECAResNet50D_833caf58.pth',\n interpolation='bicubic',\n first_conv='conv1.0'),\n 'ecaresnet50d_pruned': _cfg(\n url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45899/outputs/ECAResNet50D_P_9c67f710.pth',\n interpolation='bicubic',\n first_conv='conv1.0'),\n 'ecaresnet50t': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet50t_ra2-f7ac63c4.pth',\n interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8),\n crop_pct=0.95, test_input_size=(3, 320, 320)),\n 'ecaresnet101d': _cfg(\n url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45402/outputs/ECAResNet101D_281c5844.pth',\n interpolation='bicubic', first_conv='conv1.0'),\n 'ecaresnet101d_pruned': _cfg(\n url='https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45610/outputs/ECAResNet101D_P_75a3370e.pth',\n interpolation='bicubic',\n first_conv='conv1.0'),\n 'ecaresnet200d': _cfg(\n url='',\n interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), crop_pct=0.94, pool_size=(8, 8)),\n 'ecaresnet269d': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecaresnet269d_320_ra2-7baa55cb.pth',\n interpolation='bicubic', first_conv='conv1.0', input_size=(3, 320, 320), pool_size=(10, 10),\n crop_pct=1.0, test_input_size=(3, 352, 352)),\n\n # Efficient Channel Attention ResNeXts\n 'ecaresnext26t_32x4d': _cfg(\n url='',\n interpolation='bicubic', first_conv='conv1.0'),\n 'ecaresnext50t_32x4d': _cfg(\n url='',\n interpolation='bicubic', first_conv='conv1.0'),\n\n # ResNets with anti-aliasing blur pool\n 'resnetblur18': _cfg(\n interpolation='bicubic'),\n 'resnetblur50': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnetblur50-84f4748f.pth',\n interpolation='bicubic'),\n\n # ResNet-RS models\n 'resnetrs50': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs50_ema-6b53758b.pth',\n input_size=(3, 160, 160), pool_size=(5, 5), crop_pct=0.91, test_input_size=(3, 224, 224),\n interpolation='bicubic', first_conv='conv1.0'),\n 'resnetrs101': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs101_i192_ema-1509bbf6.pth',\n input_size=(3, 192, 192), pool_size=(6, 6), crop_pct=0.94, test_input_size=(3, 288, 288),\n interpolation='bicubic', first_conv='conv1.0'),\n 'resnetrs152': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs152_i256_ema-a9aff7f9.pth',\n input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320),\n interpolation='bicubic', first_conv='conv1.0'),\n 'resnetrs200': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs200_ema-623d2f59.pth',\n input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 320, 320),\n interpolation='bicubic', first_conv='conv1.0'),\n 'resnetrs270': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs270_ema-b40e674c.pth',\n input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=1.0, test_input_size=(3, 352, 352),\n interpolation='bicubic', first_conv='conv1.0'),\n 'resnetrs350': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs350_i256_ema-5a1aa8f1.pth',\n input_size=(3, 288, 288), pool_size=(9, 9), crop_pct=1.0, test_input_size=(3, 384, 384),\n interpolation='bicubic', first_conv='conv1.0'),\n 'resnetrs420': _cfg(\n url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-rs-weights/resnetrs420_ema-972dee69.pth',\n input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=1.0, test_input_size=(3, 416, 416),\n interpolation='bicubic', first_conv='conv1.0'),\n}\n\n\ndef get_padding(kernel_size, stride, dilation=1):\n padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2\n return padding\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64,\n reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d,\n attn_layer=None, aa_layer=None, drop_block=None, drop_path=None):\n super(BasicBlock, self).__init__()\n\n assert cardinality == 1, 'BasicBlock only supports cardinality of 1'\n assert base_width == 64, 'BasicBlock does not support changing base width'\n first_planes = planes // reduce_first\n outplanes = planes * self.expansion\n first_dilation = first_dilation or dilation\n use_aa = aa_layer is not None and (stride == 2 or first_dilation != dilation)\n\n self.conv1 = nn.Conv2d(\n inplanes, first_planes, kernel_size=3, stride=1 if use_aa else stride, padding=first_dilation,\n dilation=first_dilation, bias=False)\n self.bn1 = norm_layer(first_planes)\n self.act1 = act_layer(inplace=True)\n self.aa = aa_layer(channels=first_planes, stride=stride) if use_aa else None\n\n self.conv2 = nn.Conv2d(\n first_planes, outplanes, kernel_size=3, padding=dilation, dilation=dilation, bias=False)\n self.bn2 = norm_layer(outplanes)\n\n self.se = create_attn(attn_layer, outplanes)\n\n self.act2 = act_layer(inplace=True)\n self.downsample = downsample\n self.stride = stride\n self.dilation = dilation\n self.drop_block = drop_block\n self.drop_path = drop_path\n\n def zero_init_last_bn(self):\n nn.init.zeros_(self.bn2.weight)\n\n def forward(self, x):\n shortcut = x\n\n x = self.conv1(x)\n x = self.bn1(x)\n if self.drop_block is not None:\n x = self.drop_block(x)\n x = self.act1(x)\n if self.aa is not None:\n x = self.aa(x)\n\n x = self.conv2(x)\n x = self.bn2(x)\n if self.drop_block is not None:\n x = self.drop_block(x)\n\n if self.se is not None:\n x = self.se(x)\n\n if self.drop_path is not None:\n x = self.drop_path(x)\n\n if self.downsample is not None:\n shortcut = self.downsample(shortcut)\n x += shortcut\n x = self.act2(x)\n\n return x\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64,\n reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d,\n attn_layer=None, aa_layer=None, drop_block=None, drop_path=None):\n super(Bottleneck, self).__init__()\n\n width = int(math.floor(planes * (base_width / 64)) * cardinality)\n first_planes = width // reduce_first\n outplanes = planes * self.expansion\n first_dilation = first_dilation or dilation\n use_aa = aa_layer is not None and (stride == 2 or first_dilation != dilation)\n\n self.conv1 = nn.Conv2d(inplanes, first_planes, kernel_size=1, bias=False)\n self.bn1 = norm_layer(first_planes)\n self.act1 = act_layer(inplace=True)\n\n self.conv2 = nn.Conv2d(\n first_planes, width, kernel_size=3, stride=1 if use_aa else stride,\n padding=first_dilation, dilation=first_dilation, groups=cardinality, bias=False)\n self.bn2 = norm_layer(width)\n self.act2 = act_layer(inplace=True)\n self.aa = aa_layer(channels=width, stride=stride) if use_aa else None\n\n self.conv3 = nn.Conv2d(width, outplanes, kernel_size=1, bias=False)\n self.bn3 = norm_layer(outplanes)\n\n self.se = create_attn(attn_layer, outplanes)\n\n self.act3 = act_layer(inplace=True)\n self.downsample = downsample\n self.stride = stride\n self.dilation = dilation\n self.drop_block = drop_block\n self.drop_path = drop_path\n\n def zero_init_last_bn(self):\n nn.init.zeros_(self.bn3.weight)\n\n def forward(self, x):\n shortcut = x\n\n x = self.conv1(x)\n x = self.bn1(x)\n if self.drop_block is not None:\n x = self.drop_block(x)\n x = self.act1(x)\n\n x = self.conv2(x)\n x = self.bn2(x)\n if self.drop_block is not None:\n x = self.drop_block(x)\n x = self.act2(x)\n if self.aa is not None:\n x = self.aa(x)\n\n x = self.conv3(x)\n x = self.bn3(x)\n if self.drop_block is not None:\n x = self.drop_block(x)\n\n if self.se is not None:\n x = self.se(x)\n\n if self.drop_path is not None:\n x = self.drop_path(x)\n\n if self.downsample is not None:\n shortcut = self.downsample(shortcut)\n x += shortcut\n x = self.act3(x)\n\n return x\n\n\ndef downsample_conv(\n in_channels, out_channels, kernel_size, stride=1, dilation=1, first_dilation=None, norm_layer=None):\n norm_layer = norm_layer or nn.BatchNorm2d\n kernel_size = 1 if stride == 1 and dilation == 1 else kernel_size\n first_dilation = (first_dilation or dilation) if kernel_size > 1 else 1\n p = get_padding(kernel_size, stride, first_dilation)\n\n return nn.Sequential(*[\n nn.Conv2d(\n in_channels, out_channels, kernel_size, stride=stride, padding=p, dilation=first_dilation, bias=False),\n norm_layer(out_channels)\n ])\n\n\ndef downsample_avg(\n in_channels, out_channels, kernel_size, stride=1, dilation=1, first_dilation=None, norm_layer=None):\n norm_layer = norm_layer or nn.BatchNorm2d\n avg_stride = stride if dilation == 1 else 1\n if stride == 1 and dilation == 1:\n pool = nn.Identity()\n else:\n avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d\n pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False)\n\n return nn.Sequential(*[\n pool,\n nn.Conv2d(in_channels, out_channels, 1, stride=1, padding=0, bias=False),\n norm_layer(out_channels)\n ])\n\n\ndef drop_blocks(drop_block_rate=0.):\n return [\n None, None,\n DropBlock2d(drop_block_rate, 5, 0.25) if drop_block_rate else None,\n DropBlock2d(drop_block_rate, 3, 1.00) if drop_block_rate else None]\n\n\ndef make_blocks(\n block_fn, channels, block_repeats, inplanes, reduce_first=1, output_stride=32,\n down_kernel_size=1, avg_down=False, drop_block_rate=0., drop_path_rate=0., **kwargs):\n stages = []\n feature_info = []\n net_num_blocks = sum(block_repeats)\n net_block_idx = 0\n net_stride = 4\n dilation = prev_dilation = 1\n for stage_idx, (planes, num_blocks, db) in enumerate(zip(channels, block_repeats, drop_blocks(drop_block_rate))):\n stage_name = f'layer{stage_idx + 1}' # never liked this name, but weight compat requires it\n stride = 1 if stage_idx == 0 else 2\n if net_stride >= output_stride:\n dilation *= stride\n stride = 1\n else:\n net_stride *= stride\n\n downsample = None\n if stride != 1 or inplanes != planes * block_fn.expansion:\n down_kwargs = dict(\n in_channels=inplanes, out_channels=planes * block_fn.expansion, kernel_size=down_kernel_size,\n stride=stride, dilation=dilation, first_dilation=prev_dilation, norm_layer=kwargs.get('norm_layer'))\n downsample = downsample_avg(**down_kwargs) if avg_down else downsample_conv(**down_kwargs)\n\n block_kwargs = dict(reduce_first=reduce_first, dilation=dilation, drop_block=db, **kwargs)\n blocks = []\n for block_idx in range(num_blocks):\n downsample = downsample if block_idx == 0 else None\n stride = stride if block_idx == 0 else 1\n block_dpr = drop_path_rate * net_block_idx / (net_num_blocks - 1) # stochastic depth linear decay rule\n blocks.append(block_fn(\n inplanes, planes, stride, downsample, first_dilation=prev_dilation,\n drop_path=DropPath(block_dpr) if block_dpr > 0. else None, **block_kwargs))\n prev_dilation = dilation\n inplanes = planes * block_fn.expansion\n net_block_idx += 1\n\n stages.append((stage_name, nn.Sequential(*blocks)))\n feature_info.append(dict(num_chs=inplanes, reduction=net_stride, module=stage_name))\n\n return stages, feature_info\n\n\nclass ResNet(nn.Module):\n \"\"\"ResNet / ResNeXt / SE-ResNeXt / SE-Net\n\n This class implements all variants of ResNet, ResNeXt, SE-ResNeXt, and SENet that\n * have > 1 stride in the 3x3 conv layer of bottleneck\n * have conv-bn-act ordering\n\n This ResNet impl supports a number of stem and downsample options based on the v1c, v1d, v1e, and v1s\n variants included in the MXNet Gluon ResNetV1b model. The C and D variants are also discussed in the\n 'Bag of Tricks' paper: https://arxiv.org/pdf/1812.01187. The B variant is equivalent to torchvision default.\n\n ResNet variants (the same modifications can be used in SE/ResNeXt models as well):\n * normal, b - 7x7 stem, stem_width = 64, same as torchvision ResNet, NVIDIA ResNet 'v1.5', Gluon v1b\n * c - 3 layer deep 3x3 stem, stem_width = 32 (32, 32, 64)\n * d - 3 layer deep 3x3 stem, stem_width = 32 (32, 32, 64), average pool in downsample\n * e - 3 layer deep 3x3 stem, stem_width = 64 (64, 64, 128), average pool in downsample\n * s - 3 layer deep 3x3 stem, stem_width = 64 (64, 64, 128)\n * t - 3 layer deep 3x3 stem, stem width = 32 (24, 48, 64), average pool in downsample\n * tn - 3 layer deep 3x3 stem, stem width = 32 (24, 32, 64), average pool in downsample\n\n ResNeXt\n * normal - 7x7 stem, stem_width = 64, standard cardinality and base widths\n * same c,d, e, s variants as ResNet can be enabled\n\n SE-ResNeXt\n * normal - 7x7 stem, stem_width = 64\n * same c, d, e, s variants as ResNet can be enabled\n\n SENet-154 - 3 layer deep 3x3 stem (same as v1c-v1s), stem_width = 64, cardinality=64,\n reduction by 2 on width of first bottleneck convolution, 3x3 downsample convs after first block\n\n Parameters\n ----------\n block : Block\n Class for the residual block. Options are BasicBlockGl, BottleneckGl.\n layers : list of int\n Numbers of layers in each block\n num_classes : int, default 1000\n Number of classification classes.\n in_chans : int, default 3\n Number of input (color) channels.\n cardinality : int, default 1\n Number of convolution groups for 3x3 conv in Bottleneck.\n base_width : int, default 64\n Factor determining bottleneck channels. `planes * base_width / 64 * cardinality`\n stem_width : int, default 64\n Number of channels in stem convolutions\n stem_type : str, default ''\n The type of stem:\n * '', default - a single 7x7 conv with a width of stem_width\n * 'deep' - three 3x3 convolution layers of widths stem_width, stem_width, stem_width * 2\n * 'deep_tiered' - three 3x3 conv layers of widths stem_width//4 * 3, stem_width, stem_width * 2\n block_reduce_first: int, default 1\n Reduction factor for first convolution output width of residual blocks,\n 1 for all archs except senets, where 2\n down_kernel_size: int, default 1\n Kernel size of residual block downsampling path, 1x1 for most archs, 3x3 for senets\n avg_down : bool, default False\n Whether to use average pooling for projection skip connection between stages/downsample.\n output_stride : int, default 32\n Set the output stride of the network, 32, 16, or 8. Typically used in segmentation.\n act_layer : nn.Module, activation layer\n norm_layer : nn.Module, normalization layer\n aa_layer : nn.Module, anti-aliasing layer\n drop_rate : float, default 0.\n Dropout probability before classifier, for training\n global_pool : str, default 'avg'\n Global pooling type. One of 'avg', 'max', 'avgmax', 'catavgmax'\n \"\"\"\n\n def __init__(self, block, layers, num_classes=1000, in_chans=3,\n cardinality=1, base_width=64, stem_width=64, stem_type='', replace_stem_pool=False,\n output_stride=32, block_reduce_first=1, down_kernel_size=1, avg_down=False,\n act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None, drop_rate=0.0, drop_path_rate=0.,\n drop_block_rate=0., global_pool='avg', zero_init_last_bn=True, block_args=None):\n block_args = block_args or dict()\n assert output_stride in (8, 16, 32)\n self.num_classes = num_classes\n self.drop_rate = drop_rate\n super(ResNet, self).__init__()\n\n # Stem\n deep_stem = 'deep' in stem_type\n inplanes = stem_width * 2 if deep_stem else 64\n if deep_stem:\n stem_chs = (stem_width, stem_width)\n if 'tiered' in stem_type:\n stem_chs = (3 * (stem_width // 4), stem_width)\n self.conv1 = nn.Sequential(*[\n nn.Conv2d(in_chans, stem_chs[0], 3, stride=2, padding=1, bias=False),\n norm_layer(stem_chs[0]),\n act_layer(inplace=True),\n nn.Conv2d(stem_chs[0], stem_chs[1], 3, stride=1, padding=1, bias=False),\n norm_layer(stem_chs[1]),\n act_layer(inplace=True),\n nn.Conv2d(stem_chs[1], inplanes, 3, stride=1, padding=1, bias=False)])\n else:\n self.conv1 = nn.Conv2d(in_chans, inplanes, kernel_size=7, stride=2, padding=3, bias=False)\n self.bn1 = norm_layer(inplanes)\n self.act1 = act_layer(inplace=True)\n self.feature_info = [dict(num_chs=inplanes, reduction=2, module='act1')]\n\n # Stem Pooling\n if replace_stem_pool:\n self.maxpool = nn.Sequential(*filter(None, [\n nn.Conv2d(inplanes, inplanes, 3, stride=1 if aa_layer else 2, padding=1, bias=False),\n aa_layer(channels=inplanes, stride=2) if aa_layer else None,\n norm_layer(inplanes),\n act_layer(inplace=True)\n ]))\n else:\n if aa_layer is not None:\n self.maxpool = nn.Sequential(*[\n nn.MaxPool2d(kernel_size=3, stride=1, padding=1),\n aa_layer(channels=inplanes, stride=2)])\n else:\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n # Feature Blocks\n channels = [64, 128, 256, 512]\n stage_modules, stage_feature_info = make_blocks(\n block, channels, layers, inplanes, cardinality=cardinality, base_width=base_width,\n output_stride=output_stride, reduce_first=block_reduce_first, avg_down=avg_down,\n down_kernel_size=down_kernel_size, act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer,\n drop_block_rate=drop_block_rate, drop_path_rate=drop_path_rate, **block_args)\n for stage in stage_modules:\n self.add_module(*stage) # layer1, layer2, etc\n self.feature_info.extend(stage_feature_info)\n\n # Head (Pooling and Classifier)\n self.num_features = 512 * block.expansion\n self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)\n\n self.init_weights(zero_init_last_bn=zero_init_last_bn)\n\n def init_weights(self, zero_init_last_bn=True):\n for n, m in self.named_modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.ones_(m.weight)\n nn.init.zeros_(m.bias)\n if zero_init_last_bn:\n for m in self.modules():\n if hasattr(m, 'zero_init_last_bn'):\n m.zero_init_last_bn()\n\n def get_classifier(self):\n return self.fc\n\n def reset_classifier(self, num_classes, global_pool='avg'):\n self.num_classes = num_classes\n self.global_pool, self.fc = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)\n\n def forward_features(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.act1(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n return x\n\n def forward(self, x):\n x = self.forward_features(x)\n x = self.global_pool(x)\n if self.drop_rate:\n x = F.dropout(x, p=float(self.drop_rate), training=self.training)\n x = self.fc(x)\n return x\n\n\ndef _create_resnet(variant, pretrained=False, **kwargs):\n return build_model_with_cfg(\n ResNet, variant, pretrained,\n default_cfg=default_cfgs[variant],\n **kwargs)\n\n\n@register_model\ndef resnet18(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-18 model.\n \"\"\"\n model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], **kwargs)\n return _create_resnet('resnet18', pretrained, **model_args)\n\n\n@register_model\ndef resnet18d(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-18-D model.\n \"\"\"\n model_args = dict(\n block=BasicBlock, layers=[2, 2, 2, 2], stem_width=32, stem_type='deep', avg_down=True, **kwargs)\n return _create_resnet('resnet18d', pretrained, **model_args)\n\n\n@register_model\ndef resnet34(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-34 model.\n \"\"\"\n model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], **kwargs)\n return _create_resnet('resnet34', pretrained, **model_args)\n\n\n@register_model\ndef resnet34d(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-34-D model.\n \"\"\"\n model_args = dict(\n block=BasicBlock, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs)\n return _create_resnet('resnet34d', pretrained, **model_args)\n\n\n@register_model\ndef resnet26(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-26 model.\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[2, 2, 2, 2], **kwargs)\n return _create_resnet('resnet26', pretrained, **model_args)\n\n\n@register_model\ndef resnet26t(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-26-T model.\n \"\"\"\n model_args = dict(\n block=Bottleneck, layers=[2, 2, 2, 2], stem_width=32, stem_type='deep_tiered', avg_down=True, **kwargs)\n return _create_resnet('resnet26t', pretrained, **model_args)\n\n\n@register_model\ndef resnet26d(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-26-D model.\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[2, 2, 2, 2], stem_width=32, stem_type='deep', avg_down=True, **kwargs)\n return _create_resnet('resnet26d', pretrained, **model_args)\n\n\n@register_model\ndef resnet50(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-50 model.\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs)\n return _create_resnet('resnet50', pretrained, **model_args)\n\n\n@register_model\ndef resnet50d(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-50-D model.\n \"\"\"\n model_args = dict(\n block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs)\n return _create_resnet('resnet50d', pretrained, **model_args)\n\n\n@register_model\ndef resnet50t(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-50-T model.\n \"\"\"\n model_args = dict(\n block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep_tiered', avg_down=True, **kwargs)\n return _create_resnet('resnet50t', pretrained, **model_args)\n\n\n@register_model\ndef resnet101(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-101 model.\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], **kwargs)\n return _create_resnet('resnet101', pretrained, **model_args)\n\n\n@register_model\ndef resnet101d(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-101-D model.\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs)\n return _create_resnet('resnet101d', pretrained, **model_args)\n\n\n@register_model\ndef resnet152(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-152 model.\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], **kwargs)\n return _create_resnet('resnet152', pretrained, **model_args)\n\n\n@register_model\ndef resnet152d(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-152-D model.\n \"\"\"\n model_args = dict(\n block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs)\n return _create_resnet('resnet152d', pretrained, **model_args)\n\n\n@register_model\ndef resnet200(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-200 model.\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[3, 24, 36, 3], **kwargs)\n return _create_resnet('resnet200', pretrained, **model_args)\n\n\n@register_model\ndef resnet200d(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-200-D model.\n \"\"\"\n model_args = dict(\n block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', avg_down=True, **kwargs)\n return _create_resnet('resnet200d', pretrained, **model_args)\n\n\n@register_model\ndef tv_resnet34(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-34 model with original Torchvision weights.\n \"\"\"\n model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], **kwargs)\n return _create_resnet('tv_resnet34', pretrained, **model_args)\n\n\n@register_model\ndef tv_resnet50(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-50 model with original Torchvision weights.\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs)\n return _create_resnet('tv_resnet50', pretrained, **model_args)\n\n\n@register_model\ndef tv_resnet101(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-101 model w/ Torchvision pretrained weights.\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], **kwargs)\n return _create_resnet('tv_resnet101', pretrained, **model_args)\n\n\n@register_model\ndef tv_resnet152(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-152 model w/ Torchvision pretrained weights.\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], **kwargs)\n return _create_resnet('tv_resnet152', pretrained, **model_args)\n\n\n@register_model\ndef wide_resnet50_2(pretrained=False, **kwargs):\n \"\"\"Constructs a Wide ResNet-50-2 model.\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], base_width=128, **kwargs)\n return _create_resnet('wide_resnet50_2', pretrained, **model_args)\n\n\n@register_model\ndef wide_resnet101_2(pretrained=False, **kwargs):\n \"\"\"Constructs a Wide ResNet-101-2 model.\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same.\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], base_width=128, **kwargs)\n return _create_resnet('wide_resnet101_2', pretrained, **model_args)\n\n\n@register_model\ndef resnext50_32x4d(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNeXt50-32x4d model.\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs)\n return _create_resnet('resnext50_32x4d', pretrained, **model_args)\n\n\n@register_model\ndef resnext50d_32x4d(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNeXt50d-32x4d model. ResNext50 w/ deep stem & avg pool downsample\n \"\"\"\n model_args = dict(\n block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4,\n stem_width=32, stem_type='deep', avg_down=True, **kwargs)\n return _create_resnet('resnext50d_32x4d', pretrained, **model_args)\n\n\n@register_model\ndef resnext101_32x4d(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNeXt-101 32x4d model.\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, **kwargs)\n return _create_resnet('resnext101_32x4d', pretrained, **model_args)\n\n\n@register_model\ndef resnext101_32x8d(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNeXt-101 32x8d model.\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, **kwargs)\n return _create_resnet('resnext101_32x8d', pretrained, **model_args)\n\n\n@register_model\ndef resnext101_64x4d(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNeXt101-64x4d model.\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=64, base_width=4, **kwargs)\n return _create_resnet('resnext101_64x4d', pretrained, **model_args)\n\n\n@register_model\ndef tv_resnext50_32x4d(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNeXt50-32x4d model with original Torchvision weights.\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs)\n return _create_resnet('tv_resnext50_32x4d', pretrained, **model_args)\n\n\n@register_model\ndef ig_resnext101_32x8d(pretrained=True, **kwargs):\n \"\"\"Constructs a ResNeXt-101 32x8 model pre-trained on weakly-supervised data\n and finetuned on ImageNet from Figure 5 in\n `\"Exploring the Limits of Weakly Supervised Pretraining\" <https://arxiv.org/abs/1805.00932>`_\n Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, **kwargs)\n return _create_resnet('ig_resnext101_32x8d', pretrained, **model_args)\n\n\n@register_model\ndef ig_resnext101_32x16d(pretrained=True, **kwargs):\n \"\"\"Constructs a ResNeXt-101 32x16 model pre-trained on weakly-supervised data\n and finetuned on ImageNet from Figure 5 in\n `\"Exploring the Limits of Weakly Supervised Pretraining\" <https://arxiv.org/abs/1805.00932>`_\n Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=16, **kwargs)\n return _create_resnet('ig_resnext101_32x16d', pretrained, **model_args)\n\n\n@register_model\ndef ig_resnext101_32x32d(pretrained=True, **kwargs):\n \"\"\"Constructs a ResNeXt-101 32x32 model pre-trained on weakly-supervised data\n and finetuned on ImageNet from Figure 5 in\n `\"Exploring the Limits of Weakly Supervised Pretraining\" <https://arxiv.org/abs/1805.00932>`_\n Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=32, **kwargs)\n return _create_resnet('ig_resnext101_32x32d', pretrained, **model_args)\n\n\n@register_model\ndef ig_resnext101_32x48d(pretrained=True, **kwargs):\n \"\"\"Constructs a ResNeXt-101 32x48 model pre-trained on weakly-supervised data\n and finetuned on ImageNet from Figure 5 in\n `\"Exploring the Limits of Weakly Supervised Pretraining\" <https://arxiv.org/abs/1805.00932>`_\n Weights from https://pytorch.org/hub/facebookresearch_WSL-Images_resnext/\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=48, **kwargs)\n return _create_resnet('ig_resnext101_32x48d', pretrained, **model_args)\n\n\n@register_model\ndef ssl_resnet18(pretrained=True, **kwargs):\n \"\"\"Constructs a semi-supervised ResNet-18 model pre-trained on YFCC100M dataset and finetuned on ImageNet\n `\"Billion-scale Semi-Supervised Learning for Image Classification\" <https://arxiv.org/abs/1905.00546>`_\n Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/\n \"\"\"\n model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], **kwargs)\n return _create_resnet('ssl_resnet18', pretrained, **model_args)\n\n\n@register_model\ndef ssl_resnet50(pretrained=True, **kwargs):\n \"\"\"Constructs a semi-supervised ResNet-50 model pre-trained on YFCC100M dataset and finetuned on ImageNet\n `\"Billion-scale Semi-Supervised Learning for Image Classification\" <https://arxiv.org/abs/1905.00546>`_\n Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs)\n return _create_resnet('ssl_resnet50', pretrained, **model_args)\n\n\n@register_model\ndef ssl_resnext50_32x4d(pretrained=True, **kwargs):\n \"\"\"Constructs a semi-supervised ResNeXt-50 32x4 model pre-trained on YFCC100M dataset and finetuned on ImageNet\n `\"Billion-scale Semi-Supervised Learning for Image Classification\" <https://arxiv.org/abs/1905.00546>`_\n Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs)\n return _create_resnet('ssl_resnext50_32x4d', pretrained, **model_args)\n\n\n@register_model\ndef ssl_resnext101_32x4d(pretrained=True, **kwargs):\n \"\"\"Constructs a semi-supervised ResNeXt-101 32x4 model pre-trained on YFCC100M dataset and finetuned on ImageNet\n `\"Billion-scale Semi-Supervised Learning for Image Classification\" <https://arxiv.org/abs/1905.00546>`_\n Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, **kwargs)\n return _create_resnet('ssl_resnext101_32x4d', pretrained, **model_args)\n\n\n@register_model\ndef ssl_resnext101_32x8d(pretrained=True, **kwargs):\n \"\"\"Constructs a semi-supervised ResNeXt-101 32x8 model pre-trained on YFCC100M dataset and finetuned on ImageNet\n `\"Billion-scale Semi-Supervised Learning for Image Classification\" <https://arxiv.org/abs/1905.00546>`_\n Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, **kwargs)\n return _create_resnet('ssl_resnext101_32x8d', pretrained, **model_args)\n\n\n@register_model\ndef ssl_resnext101_32x16d(pretrained=True, **kwargs):\n \"\"\"Constructs a semi-supervised ResNeXt-101 32x16 model pre-trained on YFCC100M dataset and finetuned on ImageNet\n `\"Billion-scale Semi-Supervised Learning for Image Classification\" <https://arxiv.org/abs/1905.00546>`_\n Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=16, **kwargs)\n return _create_resnet('ssl_resnext101_32x16d', pretrained, **model_args)\n\n\n@register_model\ndef swsl_resnet18(pretrained=True, **kwargs):\n \"\"\"Constructs a semi-weakly supervised Resnet-18 model pre-trained on 1B weakly supervised\n image dataset and finetuned on ImageNet.\n `\"Billion-scale Semi-Supervised Learning for Image Classification\" <https://arxiv.org/abs/1905.00546>`_\n Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/\n \"\"\"\n model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], **kwargs)\n return _create_resnet('swsl_resnet18', pretrained, **model_args)\n\n\n@register_model\ndef swsl_resnet50(pretrained=True, **kwargs):\n \"\"\"Constructs a semi-weakly supervised ResNet-50 model pre-trained on 1B weakly supervised\n image dataset and finetuned on ImageNet.\n `\"Billion-scale Semi-Supervised Learning for Image Classification\" <https://arxiv.org/abs/1905.00546>`_\n Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs)\n return _create_resnet('swsl_resnet50', pretrained, **model_args)\n\n\n@register_model\ndef swsl_resnext50_32x4d(pretrained=True, **kwargs):\n \"\"\"Constructs a semi-weakly supervised ResNeXt-50 32x4 model pre-trained on 1B weakly supervised\n image dataset and finetuned on ImageNet.\n `\"Billion-scale Semi-Supervised Learning for Image Classification\" <https://arxiv.org/abs/1905.00546>`_\n Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4, **kwargs)\n return _create_resnet('swsl_resnext50_32x4d', pretrained, **model_args)\n\n\n@register_model\ndef swsl_resnext101_32x4d(pretrained=True, **kwargs):\n \"\"\"Constructs a semi-weakly supervised ResNeXt-101 32x4 model pre-trained on 1B weakly supervised\n image dataset and finetuned on ImageNet.\n `\"Billion-scale Semi-Supervised Learning for Image Classification\" <https://arxiv.org/abs/1905.00546>`_\n Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4, **kwargs)\n return _create_resnet('swsl_resnext101_32x4d', pretrained, **model_args)\n\n\n@register_model\ndef swsl_resnext101_32x8d(pretrained=True, **kwargs):\n \"\"\"Constructs a semi-weakly supervised ResNeXt-101 32x8 model pre-trained on 1B weakly supervised\n image dataset and finetuned on ImageNet.\n `\"Billion-scale Semi-Supervised Learning for Image Classification\" <https://arxiv.org/abs/1905.00546>`_\n Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8, **kwargs)\n return _create_resnet('swsl_resnext101_32x8d', pretrained, **model_args)\n\n\n@register_model\ndef swsl_resnext101_32x16d(pretrained=True, **kwargs):\n \"\"\"Constructs a semi-weakly supervised ResNeXt-101 32x16 model pre-trained on 1B weakly supervised\n image dataset and finetuned on ImageNet.\n `\"Billion-scale Semi-Supervised Learning for Image Classification\" <https://arxiv.org/abs/1905.00546>`_\n Weights from https://github.com/facebookresearch/semi-supervised-ImageNet1K-models/\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=16, **kwargs)\n return _create_resnet('swsl_resnext101_32x16d', pretrained, **model_args)\n\n\n@register_model\ndef ecaresnet26t(pretrained=False, **kwargs):\n \"\"\"Constructs an ECA-ResNeXt-26-T model.\n This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels\n in the deep stem and ECA attn.\n \"\"\"\n model_args = dict(\n block=Bottleneck, layers=[2, 2, 2, 2], stem_width=32,\n stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'), **kwargs)\n return _create_resnet('ecaresnet26t', pretrained, **model_args)\n\n\n@register_model\ndef ecaresnet50d(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-50-D model with eca.\n \"\"\"\n model_args = dict(\n block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True,\n block_args=dict(attn_layer='eca'), **kwargs)\n return _create_resnet('ecaresnet50d', pretrained, **model_args)\n\n\n@register_model\ndef resnetrs50(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-RS-50 model.\n Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579\n Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs\n \"\"\"\n attn_layer = partial(get_attn('se'), rd_ratio=0.25)\n model_args = dict(\n block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', replace_stem_pool=True,\n avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs)\n return _create_resnet('resnetrs50', pretrained, **model_args)\n\n\n@register_model\ndef resnetrs101(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-RS-101 model.\n Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579\n Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs\n \"\"\"\n attn_layer = partial(get_attn('se'), rd_ratio=0.25)\n model_args = dict(\n block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', replace_stem_pool=True,\n avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs)\n return _create_resnet('resnetrs101', pretrained, **model_args)\n\n\n@register_model\ndef resnetrs152(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-RS-152 model.\n Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579\n Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs\n \"\"\"\n attn_layer = partial(get_attn('se'), rd_ratio=0.25)\n model_args = dict(\n block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', replace_stem_pool=True,\n avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs)\n return _create_resnet('resnetrs152', pretrained, **model_args)\n\n\n@register_model\ndef resnetrs200(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-RS-200 model.\n Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579\n Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs\n \"\"\"\n attn_layer = partial(get_attn('se'), rd_ratio=0.25)\n model_args = dict(\n block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', replace_stem_pool=True,\n avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs)\n return _create_resnet('resnetrs200', pretrained, **model_args)\n\n\n@register_model\ndef resnetrs270(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-RS-270 model.\n Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579\n Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs\n \"\"\"\n attn_layer = partial(get_attn('se'), rd_ratio=0.25)\n model_args = dict(\n block=Bottleneck, layers=[4, 29, 53, 4], stem_width=32, stem_type='deep', replace_stem_pool=True,\n avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs)\n return _create_resnet('resnetrs270', pretrained, **model_args)\n\n\n\n@register_model\ndef resnetrs350(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-RS-350 model.\n Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579\n Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs\n \"\"\"\n attn_layer = partial(get_attn('se'), rd_ratio=0.25)\n model_args = dict(\n block=Bottleneck, layers=[4, 36, 72, 4], stem_width=32, stem_type='deep', replace_stem_pool=True,\n avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs)\n return _create_resnet('resnetrs350', pretrained, **model_args)\n\n\n@register_model\ndef resnetrs420(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-RS-420 model\n Paper: Revisiting ResNets - https://arxiv.org/abs/2103.07579\n Pretrained weights from https://github.com/tensorflow/tpu/tree/bee9c4f6/models/official/resnet/resnet_rs\n \"\"\"\n attn_layer = partial(get_attn('se'), rd_ratio=0.25)\n model_args = dict(\n block=Bottleneck, layers=[4, 44, 87, 4], stem_width=32, stem_type='deep', replace_stem_pool=True,\n avg_down=True, block_args=dict(attn_layer=attn_layer), **kwargs)\n return _create_resnet('resnetrs420', pretrained, **model_args)\n\n\n@register_model\ndef ecaresnet50d_pruned(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-50-D model pruned with eca.\n The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf\n \"\"\"\n model_args = dict(\n block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep', avg_down=True,\n block_args=dict(attn_layer='eca'), **kwargs)\n return _create_resnet('ecaresnet50d_pruned', pretrained, pruned=True, **model_args)\n\n\n@register_model\ndef ecaresnet50t(pretrained=False, **kwargs):\n \"\"\"Constructs an ECA-ResNet-50-T model.\n Like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels in the deep stem and ECA attn.\n \"\"\"\n model_args = dict(\n block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32,\n stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'), **kwargs)\n return _create_resnet('ecaresnet50t', pretrained, **model_args)\n\n\n@register_model\ndef ecaresnetlight(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-50-D light model with eca.\n \"\"\"\n model_args = dict(\n block=Bottleneck, layers=[1, 1, 11, 3], stem_width=32, avg_down=True,\n block_args=dict(attn_layer='eca'), **kwargs)\n return _create_resnet('ecaresnetlight', pretrained, **model_args)\n\n\n@register_model\ndef ecaresnet101d(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-101-D model with eca.\n \"\"\"\n model_args = dict(\n block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True,\n block_args=dict(attn_layer='eca'), **kwargs)\n return _create_resnet('ecaresnet101d', pretrained, **model_args)\n\n\n@register_model\ndef ecaresnet101d_pruned(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-101-D model pruned with eca.\n The pruning has been obtained using https://arxiv.org/pdf/2002.08258.pdf\n \"\"\"\n model_args = dict(\n block=Bottleneck, layers=[3, 4, 23, 3], stem_width=32, stem_type='deep', avg_down=True,\n block_args=dict(attn_layer='eca'), **kwargs)\n return _create_resnet('ecaresnet101d_pruned', pretrained, pruned=True, **model_args)\n\n\n@register_model\ndef ecaresnet200d(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-200-D model with ECA.\n \"\"\"\n model_args = dict(\n block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', avg_down=True,\n block_args=dict(attn_layer='eca'), **kwargs)\n return _create_resnet('ecaresnet200d', pretrained, **model_args)\n\n\n@register_model\ndef ecaresnet269d(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-269-D model with ECA.\n \"\"\"\n model_args = dict(\n block=Bottleneck, layers=[3, 30, 48, 8], stem_width=32, stem_type='deep', avg_down=True,\n block_args=dict(attn_layer='eca'), **kwargs)\n return _create_resnet('ecaresnet269d', pretrained, **model_args)\n\n\n@register_model\ndef ecaresnext26t_32x4d(pretrained=False, **kwargs):\n \"\"\"Constructs an ECA-ResNeXt-26-T model.\n This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels\n in the deep stem. This model replaces SE module with the ECA module\n \"\"\"\n model_args = dict(\n block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32,\n stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'), **kwargs)\n return _create_resnet('ecaresnext26t_32x4d', pretrained, **model_args)\n\n\n@register_model\ndef ecaresnext50t_32x4d(pretrained=False, **kwargs):\n \"\"\"Constructs an ECA-ResNeXt-50-T model.\n This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels\n in the deep stem. This model replaces SE module with the ECA module\n \"\"\"\n model_args = dict(\n block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32,\n stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='eca'), **kwargs)\n return _create_resnet('ecaresnext50t_32x4d', pretrained, **model_args)\n\n\n@register_model\ndef resnetblur18(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-18 model with blur anti-aliasing\n \"\"\"\n model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], aa_layer=BlurPool2d, **kwargs)\n return _create_resnet('resnetblur18', pretrained, **model_args)\n\n\n@register_model\ndef resnetblur50(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-50 model with blur anti-aliasing\n \"\"\"\n model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], aa_layer=BlurPool2d, **kwargs)\n return _create_resnet('resnetblur50', pretrained, **model_args)\n\n\n@register_model\ndef seresnet18(pretrained=False, **kwargs):\n model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2], block_args=dict(attn_layer='se'), **kwargs)\n return _create_resnet('seresnet18', pretrained, **model_args)\n\n\n@register_model\ndef seresnet34(pretrained=False, **kwargs):\n model_args = dict(block=BasicBlock, layers=[3, 4, 6, 3], block_args=dict(attn_layer='se'), **kwargs)\n return _create_resnet('seresnet34', pretrained, **model_args)\n\n\n@register_model\ndef seresnet50(pretrained=False, **kwargs):\n model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], block_args=dict(attn_layer='se'), **kwargs)\n return _create_resnet('seresnet50', pretrained, **model_args)\n\n\n@register_model\ndef seresnet50t(pretrained=False, **kwargs):\n model_args = dict(\n block=Bottleneck, layers=[3, 4, 6, 3], stem_width=32, stem_type='deep_tiered', avg_down=True,\n block_args=dict(attn_layer='se'), **kwargs)\n return _create_resnet('seresnet50t', pretrained, **model_args)\n\n\n@register_model\ndef seresnet101(pretrained=False, **kwargs):\n model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], block_args=dict(attn_layer='se'), **kwargs)\n return _create_resnet('seresnet101', pretrained, **model_args)\n\n\n@register_model\ndef seresnet152(pretrained=False, **kwargs):\n model_args = dict(block=Bottleneck, layers=[3, 8, 36, 3], block_args=dict(attn_layer='se'), **kwargs)\n return _create_resnet('seresnet152', pretrained, **model_args)\n\n\n@register_model\ndef seresnet152d(pretrained=False, **kwargs):\n model_args = dict(\n block=Bottleneck, layers=[3, 8, 36, 3], stem_width=32, stem_type='deep', avg_down=True,\n block_args=dict(attn_layer='se'), **kwargs)\n return _create_resnet('seresnet152d', pretrained, **model_args)\n\n\n@register_model\ndef seresnet200d(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-200-D model with SE attn.\n \"\"\"\n model_args = dict(\n block=Bottleneck, layers=[3, 24, 36, 3], stem_width=32, stem_type='deep', avg_down=True,\n block_args=dict(attn_layer='se'), **kwargs)\n return _create_resnet('seresnet200d', pretrained, **model_args)\n\n\n@register_model\ndef seresnet269d(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-269-D model with SE attn.\n \"\"\"\n model_args = dict(\n block=Bottleneck, layers=[3, 30, 48, 8], stem_width=32, stem_type='deep', avg_down=True,\n block_args=dict(attn_layer='se'), **kwargs)\n return _create_resnet('seresnet269d', pretrained, **model_args)\n\n\n@register_model\ndef seresnext26d_32x4d(pretrained=False, **kwargs):\n \"\"\"Constructs a SE-ResNeXt-26-D model.`\n This is technically a 28 layer ResNet, using the 'D' modifier from Gluon / bag-of-tricks for\n combination of deep stem and avg_pool in downsample.\n \"\"\"\n model_args = dict(\n block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32,\n stem_type='deep', avg_down=True, block_args=dict(attn_layer='se'), **kwargs)\n return _create_resnet('seresnext26d_32x4d', pretrained, **model_args)\n\n\n@register_model\ndef seresnext26t_32x4d(pretrained=False, **kwargs):\n \"\"\"Constructs a SE-ResNet-26-T model.\n This is technically a 28 layer ResNet, like a 'D' bag-of-tricks model but with tiered 24, 32, 64 channels\n in the deep stem.\n \"\"\"\n model_args = dict(\n block=Bottleneck, layers=[2, 2, 2, 2], cardinality=32, base_width=4, stem_width=32,\n stem_type='deep_tiered', avg_down=True, block_args=dict(attn_layer='se'), **kwargs)\n return _create_resnet('seresnext26t_32x4d', pretrained, **model_args)\n\n\n@register_model\ndef seresnext26tn_32x4d(pretrained=False, **kwargs):\n \"\"\"Constructs a SE-ResNeXt-26-T model.\n NOTE I deprecated previous 't' model defs and replaced 't' with 'tn', this was the only tn model of note\n so keeping this def for backwards compat with any uses out there. Old 't' model is lost.\n \"\"\"\n return seresnext26t_32x4d(pretrained=pretrained, **kwargs)\n\n\n@register_model\ndef seresnext50_32x4d(pretrained=False, **kwargs):\n model_args = dict(\n block=Bottleneck, layers=[3, 4, 6, 3], cardinality=32, base_width=4,\n block_args=dict(attn_layer='se'), **kwargs)\n return _create_resnet('seresnext50_32x4d', pretrained, **model_args)\n\n\n@register_model\ndef seresnext101_32x4d(pretrained=False, **kwargs):\n model_args = dict(\n block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=4,\n block_args=dict(attn_layer='se'), **kwargs)\n return _create_resnet('seresnext101_32x4d', pretrained, **model_args)\n\n\n@register_model\ndef seresnext101_32x8d(pretrained=False, **kwargs):\n model_args = dict(\n block=Bottleneck, layers=[3, 4, 23, 3], cardinality=32, base_width=8,\n block_args=dict(attn_layer='se'), **kwargs)\n return _create_resnet('seresnext101_32x8d', pretrained, **model_args)\n\n\n@register_model\ndef senet154(pretrained=False, **kwargs):\n model_args = dict(\n block=Bottleneck, layers=[3, 8, 36, 3], cardinality=64, base_width=4, stem_type='deep',\n down_kernel_size=3, block_reduce_first=2, block_args=dict(attn_layer='se'), **kwargs)\n return _create_resnet('senet154', pretrained, **model_args)\n" ]
[ [ "torch.nn.Sequential", "torch.nn.Conv2d", "torch.nn.MaxPool2d", "torch.nn.Identity", "torch.nn.init.ones_", "torch.nn.init.zeros_", "torch.nn.init.kaiming_normal_" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ngailapdi/LWF
[ "7b7a87db3d80e25cfe8590b4135a5ee25c1f2707" ]
[ "data_loader.py" ]
[ "from torchvision.datasets import CIFAR10\nimport torchvision.transforms as transforms\nimport numpy as np\nimport torch\nfrom PIL import Image\nimport cv2\nimport time\n\nclass cifar10(CIFAR10):\n\tdef __init__(self, root,\n\t\t\t\t classes=range(10),\n\t\t\t\t train=True,\n\t\t\t\t transform=None,\n\t\t\t\t target_transform=None,\n\t\t\t\t download=False,\n\t\t\t\t mean_image=None):\n\t\tsuper(cifar10, self).__init__(root,\n\t\t\t\t\t\t\t\t\t train=train,\n\t\t\t\t\t\t\t\t\t transform=transform,\n\t\t\t\t\t\t\t\t\t target_transform=target_transform,\n\t\t\t\t\t\t\t\t\t download=download)\n\t\t\n\t\tself.tensorTranform = transforms.ToTensor()\n\t\tself.train = train\n\t\tself.img_size = 224\n\t\tif mean_image is not None:\n\t\t\tmean_image = mean_image.transpose(1,2,0)\n\t\t\tself.mean_image = cv2.resize(mean_image, (self.img_size, self.img_size))\n\t\t\tself.mean_image = self.mean_image.transpose(2,0,1)\n\n\t\t# Select subset of classes\n\t\tif self.train:\n\t\t\tself.train_data = self.data\n\t\t\tself.train_labels = self.targets\n\t\t\ttrain_data = []\n\t\t\ttrain_labels = []\n\n\t\t\tfor i in range(len(self.train_data)):\n\t\t\t\tif self.train_labels[i] in classes:\n\t\t\t\t\tcurr_img = cv2.resize(self.train_data[i], (self.img_size, self.img_size))\n\t\t\t\t\tcurr_img = curr_img.transpose(2,0,1)\n\t\t\t\t\tif mean_image is None:\n\t\t\t\t\t\ttrain_data.append(curr_img/255.)\n\t\t\t\t\telse:\n\t\t\t\t\t\ttrain_data.append(curr_img/255. - self.mean_image)\n\n\t\t\t\t\ttrain_labels.append(int(self.train_labels[i]))\n\t\t\t\t\t\n\t\t\tself.train_data = np.array(train_data, dtype = np.float32)\n\t\t\tself.train_labels = np.array(train_labels)\n\n\t\t\t\n\t\t\tif mean_image is None:\n\t\t\t\tself.mean_image = np.mean(self.train_data, axis=0)\n\n\t\telse:\n\t\t\tself.test_data = self.data\n\t\t\tself.test_labels = self.targets\n\t\t\ttest_data = []\n\t\t\ttest_labels = []\n\n\t\t\tfor i in range(len(self.test_data)):\n\t\t\t\tif self.test_labels[i] in classes:\n\t\t\t\t\tcurr_img = cv2.resize(self.test_data[i], (self.img_size, self.img_size))\n\t\t\t\t\tcurr_img = curr_img.transpose(2,0,1)\n\t\t\t\t\ttest_data.append(curr_img/255. - self.mean_image)\n\t\t\t\t\ttest_labels.append(int(self.test_labels[i]))\n\t\t\t\t\t\n\t\t\tself.test_data = np.array(test_data, dtype = np.float32)\n\t\t\tself.test_labels = test_labels\n\n\n\tdef __getitem__(self, index):\n\t\tif self.train:\n\t\t\timage = self.train_data[index]\n\t\t\trandom_cropped = np.zeros(image.shape, dtype=np.float32)\n\t\t\tpadded = np.pad(image,((0,0),(4,4),(4,4)),mode='constant')\n\t\t\tcrops = np.random.random_integers(0,high=8,size=(1,2))\n\t\t\t# Cropping and possible flipping\n\t\t\tif (np.random.randint(2) > 0):\n\t\t\t\trandom_cropped[:,:,:] = padded[:,crops[0,0]:(crops[0,0]+self.img_size),crops[0,1]:(crops[0,1]+self.img_size)]\n\t\t\telse:\n\t\t\t\trandom_cropped[:,:,:] = padded[:,crops[0,0]:(crops[0,0]+self.img_size),crops[0,1]:(crops[0,1]+self.img_size)][:,:,::-1]\n\t\t\timage = torch.FloatTensor(random_cropped)\n\t\t\ttarget = self.train_labels[index]\n\t\telse:\n\t\t\timage, target = self.test_data[index], self.test_labels[index]\n\n\t\timage = torch.FloatTensor(image)\n\t\t\n\t\treturn index, image, target\n\n\tdef __len__(self):\n\t\tif self.train:\n\t\t\treturn len(self.train_data)\n\t\telse:\n\t\t\treturn len(self.test_data)\n\t\t\n\nclass cifar100(cifar10):\n\tbase_folder = 'cifar-100-python'\n\turl = \"http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz\"\n\tfilename = \"cifar-100-python.tar.gz\"\n\ttgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'\n\ttrain_list = [\n\t\t['train', '16019d7e3df5f24257cddd939b257f8d'],\n\t]\n\ttest_list = [\n\t\t['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],\n\t]\n\tmeta = {\n\t\t'filename': 'meta',\n\t\t'key': 'fine_label_names',\n\t\t'md5': '7973b15100ade9c7d40fb424638fde48',\n }\n" ]
[ [ "numpy.pad", "torch.FloatTensor", "numpy.random.random_integers", "numpy.mean", "numpy.array", "numpy.zeros", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rishabh-jha101/Character-level-rnn-trained-on-Tolstoy-s-Anna-Karenina
[ "9cee2b8d7c89a8b9b65831a9e354405add97bc18" ]
[ "get_batches.py" ]
[ "import numpy as np\n\ndef get_batches(arr, batch_size, seq_length):\n '''Create a generator that returns batches of size\n batch_size x seq_length from arr.\n\n Arguments\n ---------\n arr: Array you want to make batches from\n batch_size: Batch size, the number of sequences per batch\n seq_length: Number of encoded chars in a sequence\n '''\n\n batch_size_total = batch_size * seq_length\n # total number of batches we can make\n n_batches = len(arr)//batch_size_total\n\n # Keep only enough characters to make full batches\n arr = arr[:n_batches * batch_size_total]\n # Reshape into batch_size rows\n arr = arr.reshape((batch_size, -1))\n\n # iterate through the array, one sequence at a time\n for n in range(0, arr.shape[1], seq_length):\n # The features\n x = arr[:, n:n+seq_length]\n # The targets, shifted by one\n y = np.zeros_like(x)\n try:\n y[:, :-1], y[:, -1] = x[:, 1:], arr[:, n+seq_length]\n except IndexError:\n y[:, :-1], y[:, -1] = x[:, 1:], arr[:, 0]\n yield x, y\n" ]
[ [ "numpy.zeros_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Open-Catalyst-Project/baselines
[ "5a664501d2eff24d362b6f1ea504300705c481ac" ]
[ "ocpmodels/common/utils.py" ]
[ "\"\"\"\nCopyright (c) Facebook, Inc. and its affiliates.\n\nThis source code is licensed under the MIT license found in the\nLICENSE file in the root directory of this source tree.\n\"\"\"\n\nimport ast\nimport collections\nimport copy\nimport glob\nimport importlib\nimport itertools\nimport json\nimport logging\nimport os\nimport sys\nimport time\nfrom bisect import bisect\nfrom functools import wraps\nfrom itertools import product\nfrom pathlib import Path\n\nimport numpy as np\nimport torch\nimport torch_geometric\nimport yaml\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\nfrom matplotlib.figure import Figure\nfrom torch_geometric.data import Data\nfrom torch_geometric.utils import remove_self_loops\nfrom torch_scatter import segment_coo, segment_csr\n\n\ndef pyg2_data_transform(data: Data):\n # if we're on the new pyg (2.0 or later), we need to convert the data to the new format\n if torch_geometric.__version__ >= \"2.0\":\n return Data(\n **{k: v for k, v in data.__dict__.items() if v is not None}\n )\n\n return data\n\n\ndef save_checkpoint(\n state, checkpoint_dir=\"checkpoints/\", checkpoint_file=\"checkpoint.pt\"\n):\n filename = os.path.join(checkpoint_dir, checkpoint_file)\n torch.save(state, filename)\n\n\nclass Complete(object):\n def __call__(self, data):\n device = data.edge_index.device\n\n row = torch.arange(data.num_nodes, dtype=torch.long, device=device)\n col = torch.arange(data.num_nodes, dtype=torch.long, device=device)\n\n row = row.view(-1, 1).repeat(1, data.num_nodes).view(-1)\n col = col.repeat(data.num_nodes)\n edge_index = torch.stack([row, col], dim=0)\n\n edge_attr = None\n if data.edge_attr is not None:\n idx = data.edge_index[0] * data.num_nodes + data.edge_index[1]\n size = list(data.edge_attr.size())\n size[0] = data.num_nodes * data.num_nodes\n edge_attr = data.edge_attr.new_zeros(size)\n edge_attr[idx] = data.edge_attr\n\n edge_index, edge_attr = remove_self_loops(edge_index, edge_attr)\n data.edge_attr = edge_attr\n data.edge_index = edge_index\n\n return data\n\n\ndef warmup_lr_lambda(current_step, optim_config):\n \"\"\"Returns a learning rate multiplier.\n Till `warmup_steps`, learning rate linearly increases to `initial_lr`,\n and then gets multiplied by `lr_gamma` every time a milestone is crossed.\n \"\"\"\n\n # keep this block for older configs that have warmup_epochs instead of warmup_steps\n # and lr_milestones are defined in epochs\n if (\n any(x < 100 for x in optim_config[\"lr_milestones\"])\n or \"warmup_epochs\" in optim_config\n ):\n raise Exception(\n \"ConfigError: please define lr_milestones in steps not epochs and define warmup_steps instead of warmup_epochs\"\n )\n\n if current_step <= optim_config[\"warmup_steps\"]:\n alpha = current_step / float(optim_config[\"warmup_steps\"])\n return optim_config[\"warmup_factor\"] * (1.0 - alpha) + alpha\n else:\n idx = bisect(optim_config[\"lr_milestones\"], current_step)\n return pow(optim_config[\"lr_gamma\"], idx)\n\n\ndef print_cuda_usage():\n print(\"Memory Allocated:\", torch.cuda.memory_allocated() / (1024 * 1024))\n print(\n \"Max Memory Allocated:\",\n torch.cuda.max_memory_allocated() / (1024 * 1024),\n )\n print(\"Memory Cached:\", torch.cuda.memory_cached() / (1024 * 1024))\n print(\"Max Memory Cached:\", torch.cuda.max_memory_cached() / (1024 * 1024))\n\n\ndef conditional_grad(dec):\n \"Decorator to enable/disable grad depending on whether force/energy predictions are being made\"\n # Adapted from https://stackoverflow.com/questions/60907323/accessing-class-property-as-decorator-argument\n def decorator(func):\n @wraps(func)\n def cls_method(self, *args, **kwargs):\n f = func\n if self.regress_forces and not getattr(self, \"direct_forces\", 0):\n f = dec(func)\n return f(self, *args, **kwargs)\n\n return cls_method\n\n return decorator\n\n\ndef plot_histogram(data, xlabel=\"\", ylabel=\"\", title=\"\"):\n assert isinstance(data, list)\n\n # Preset\n fig = Figure(figsize=(5, 4), dpi=150)\n canvas = FigureCanvas(fig)\n ax = fig.gca()\n\n # Plot\n ax.hist(data, bins=20, rwidth=0.9, zorder=3)\n\n # Axes\n ax.grid(color=\"0.95\", zorder=0)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.set_title(title)\n fig.tight_layout(pad=2)\n\n # Return numpy array\n canvas.draw()\n image_from_plot = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)\n image_from_plot = image_from_plot.reshape(\n fig.canvas.get_width_height()[::-1] + (3,)\n )\n\n return image_from_plot\n\n\n# Override the collation method in `pytorch_geometric.data.InMemoryDataset`\ndef collate(data_list):\n keys = data_list[0].keys\n data = data_list[0].__class__()\n\n for key in keys:\n data[key] = []\n slices = {key: [0] for key in keys}\n\n for item, key in product(data_list, keys):\n data[key].append(item[key])\n if torch.is_tensor(item[key]):\n s = slices[key][-1] + item[key].size(\n item.__cat_dim__(key, item[key])\n )\n elif isinstance(item[key], int) or isinstance(item[key], float):\n s = slices[key][-1] + 1\n else:\n raise ValueError(\"Unsupported attribute type\")\n slices[key].append(s)\n\n if hasattr(data_list[0], \"__num_nodes__\"):\n data.__num_nodes__ = []\n for item in data_list:\n data.__num_nodes__.append(item.num_nodes)\n\n for key in keys:\n if torch.is_tensor(data_list[0][key]):\n data[key] = torch.cat(\n data[key], dim=data.__cat_dim__(key, data_list[0][key])\n )\n else:\n data[key] = torch.tensor(data[key])\n slices[key] = torch.tensor(slices[key], dtype=torch.long)\n\n return data, slices\n\n\ndef add_edge_distance_to_graph(\n batch,\n device=\"cpu\",\n dmin=0.0,\n dmax=6.0,\n num_gaussians=50,\n):\n # Make sure x has positions.\n if not all(batch.pos[0][:] == batch.x[0][-3:]):\n batch.x = torch.cat([batch.x, batch.pos.float()], dim=1)\n # First set computations to be tracked for positions.\n batch.x = batch.x.requires_grad_(True)\n # Then compute Euclidean distance between edge endpoints.\n pdist = torch.nn.PairwiseDistance(p=2.0)\n distances = pdist(\n batch.x[batch.edge_index[0]][:, -3:],\n batch.x[batch.edge_index[1]][:, -3:],\n )\n # Expand it using a gaussian basis filter.\n gdf_filter = torch.linspace(dmin, dmax, num_gaussians)\n var = gdf_filter[1] - gdf_filter[0]\n gdf_filter, var = gdf_filter.to(device), var.to(device)\n gdf_distances = torch.exp(\n -((distances.view(-1, 1) - gdf_filter) ** 2) / var**2\n )\n # Reassign edge attributes.\n batch.edge_weight = distances\n batch.edge_attr = gdf_distances.float()\n return batch\n\n\n# Copied from https://github.com/facebookresearch/mmf/blob/master/mmf/utils/env.py#L89.\ndef setup_imports():\n from ocpmodels.common.registry import registry\n\n # First, check if imports are already setup\n has_already_setup = registry.get(\"imports_setup\", no_warning=True)\n if has_already_setup:\n return\n # Automatically load all of the modules, so that\n # they register with registry\n root_folder = registry.get(\"ocpmodels_root\", no_warning=True)\n\n if root_folder is None:\n root_folder = os.path.dirname(os.path.abspath(__file__))\n root_folder = os.path.join(root_folder, \"..\")\n\n trainer_folder = os.path.join(root_folder, \"trainers\")\n trainer_pattern = os.path.join(trainer_folder, \"**\", \"*.py\")\n datasets_folder = os.path.join(root_folder, \"datasets\")\n datasets_pattern = os.path.join(datasets_folder, \"*.py\")\n model_folder = os.path.join(root_folder, \"models\")\n model_pattern = os.path.join(model_folder, \"*.py\")\n task_folder = os.path.join(root_folder, \"tasks\")\n task_pattern = os.path.join(task_folder, \"*.py\")\n\n importlib.import_module(\"ocpmodels.common.logger\")\n\n files = (\n glob.glob(datasets_pattern, recursive=True)\n + glob.glob(model_pattern, recursive=True)\n + glob.glob(trainer_pattern, recursive=True)\n + glob.glob(task_pattern, recursive=True)\n )\n\n for f in files:\n for key in [\"/trainers\", \"/datasets\", \"/models\", \"/tasks\"]:\n if f.find(key) != -1:\n splits = f.split(os.sep)\n file_name = splits[-1]\n module_name = file_name[: file_name.find(\".py\")]\n importlib.import_module(\n \"ocpmodels.%s.%s\" % (key[1:], module_name)\n )\n\n experimental_folder = os.path.join(root_folder, \"../experimental/\")\n if os.path.exists(experimental_folder):\n experimental_files = glob.glob(\n experimental_folder + \"**/*py\",\n recursive=True,\n )\n # Ignore certain directories within experimental\n ignore_file = os.path.join(experimental_folder, \".ignore\")\n if os.path.exists(ignore_file):\n ignored = []\n with open(ignore_file) as f:\n for line in f.read().splitlines():\n ignored += glob.glob(\n experimental_folder + line + \"/**/*py\", recursive=True\n )\n for f in ignored:\n experimental_files.remove(f)\n for f in experimental_files:\n splits = f.split(os.sep)\n file_name = \".\".join(splits[-splits[::-1].index(\"..\") :])\n module_name = file_name[: file_name.find(\".py\")]\n importlib.import_module(module_name)\n\n registry.register(\"imports_setup\", True)\n\n\ndef dict_set_recursively(dictionary, key_sequence, val):\n top_key = key_sequence.pop(0)\n if len(key_sequence) == 0:\n dictionary[top_key] = val\n else:\n if top_key not in dictionary:\n dictionary[top_key] = {}\n dict_set_recursively(dictionary[top_key], key_sequence, val)\n\n\ndef parse_value(value):\n \"\"\"\n Parse string as Python literal if possible and fallback to string.\n \"\"\"\n try:\n return ast.literal_eval(value)\n except (ValueError, SyntaxError):\n # Use as string if nothing else worked\n return value\n\n\ndef create_dict_from_args(args: list, sep: str = \".\"):\n \"\"\"\n Create a (nested) dictionary from console arguments.\n Keys in different dictionary levels are separated by sep.\n \"\"\"\n return_dict = {}\n for arg in args:\n arg = arg.strip(\"--\")\n keys_concat, val = arg.split(\"=\")\n val = parse_value(val)\n key_sequence = keys_concat.split(sep)\n dict_set_recursively(return_dict, key_sequence, val)\n return return_dict\n\n\ndef load_config(path: str, previous_includes: list = []):\n path = Path(path)\n if path in previous_includes:\n raise ValueError(\n f\"Cyclic config include detected. {path} included in sequence {previous_includes}.\"\n )\n previous_includes = previous_includes + [path]\n\n direct_config = yaml.safe_load(open(path, \"r\"))\n\n # Load config from included files.\n if \"includes\" in direct_config:\n includes = direct_config.pop(\"includes\")\n else:\n includes = []\n if not isinstance(includes, list):\n raise AttributeError(\n \"Includes must be a list, '{}' provided\".format(type(includes))\n )\n\n config = {}\n duplicates_warning = []\n duplicates_error = []\n\n for include in includes:\n include_config, inc_dup_warning, inc_dup_error = load_config(\n include, previous_includes\n )\n duplicates_warning += inc_dup_warning\n duplicates_error += inc_dup_error\n\n # Duplicates between includes causes an error\n config, merge_dup_error = merge_dicts(config, include_config)\n duplicates_error += merge_dup_error\n\n # Duplicates between included and main file causes warnings\n config, merge_dup_warning = merge_dicts(config, direct_config)\n duplicates_warning += merge_dup_warning\n\n return config, duplicates_warning, duplicates_error\n\n\ndef build_config(args, args_override):\n config, duplicates_warning, duplicates_error = load_config(args.config_yml)\n if len(duplicates_warning) > 0:\n logging.warning(\n f\"Overwritten config parameters from included configs \"\n f\"(non-included parameters take precedence): {duplicates_warning}\"\n )\n if len(duplicates_error) > 0:\n raise ValueError(\n f\"Conflicting (duplicate) parameters in simultaneously \"\n f\"included configs: {duplicates_error}\"\n )\n\n # Check for overridden parameters.\n if args_override != []:\n overrides = create_dict_from_args(args_override)\n config, _ = merge_dicts(config, overrides)\n\n # Some other flags.\n config[\"mode\"] = args.mode\n config[\"identifier\"] = args.identifier\n config[\"timestamp_id\"] = args.timestamp_id\n config[\"seed\"] = args.seed\n config[\"is_debug\"] = args.debug\n config[\"run_dir\"] = args.run_dir\n config[\"print_every\"] = args.print_every\n config[\"amp\"] = args.amp\n config[\"checkpoint\"] = args.checkpoint\n config[\"cpu\"] = args.cpu\n # Submit\n config[\"submit\"] = args.submit\n config[\"summit\"] = args.summit\n # Distributed\n config[\"local_rank\"] = args.local_rank\n config[\"distributed_port\"] = args.distributed_port\n config[\"world_size\"] = args.num_nodes * args.num_gpus\n config[\"distributed_backend\"] = args.distributed_backend\n config[\"noddp\"] = args.no_ddp\n\n return config\n\n\ndef create_grid(base_config, sweep_file):\n def _flatten_sweeps(sweeps, root_key=\"\", sep=\".\"):\n flat_sweeps = []\n for key, value in sweeps.items():\n new_key = root_key + sep + key if root_key else key\n if isinstance(value, collections.MutableMapping):\n flat_sweeps.extend(_flatten_sweeps(value, new_key).items())\n else:\n flat_sweeps.append((new_key, value))\n return collections.OrderedDict(flat_sweeps)\n\n def _update_config(config, keys, override_vals, sep=\".\"):\n for key, value in zip(keys, override_vals):\n key_path = key.split(sep)\n child_config = config\n for name in key_path[:-1]:\n child_config = child_config[name]\n child_config[key_path[-1]] = value\n return config\n\n sweeps = yaml.safe_load(open(sweep_file, \"r\"))\n flat_sweeps = _flatten_sweeps(sweeps)\n keys = list(flat_sweeps.keys())\n values = list(itertools.product(*flat_sweeps.values()))\n\n configs = []\n for i, override_vals in enumerate(values):\n config = copy.deepcopy(base_config)\n config = _update_config(config, keys, override_vals)\n config[\"identifier\"] = config[\"identifier\"] + f\"_run{i}\"\n configs.append(config)\n return configs\n\n\ndef save_experiment_log(args, jobs, configs):\n log_file = args.logdir / \"exp\" / time.strftime(\"%Y-%m-%d-%I-%M-%S%p.log\")\n log_file.parent.mkdir(exist_ok=True, parents=True)\n with open(log_file, \"w\") as f:\n for job, config in zip(jobs, configs):\n print(\n json.dumps(\n {\n \"config\": config,\n \"slurm_id\": job.job_id,\n \"timestamp\": time.strftime(\"%I:%M:%S%p %Z %b %d, %Y\"),\n }\n ),\n file=f,\n )\n return log_file\n\n\ndef get_pbc_distances(\n pos,\n edge_index,\n cell,\n cell_offsets,\n neighbors,\n return_offsets=False,\n return_distance_vec=False,\n):\n row, col = edge_index\n\n distance_vectors = pos[row] - pos[col]\n\n # correct for pbc\n neighbors = neighbors.to(cell.device)\n cell = torch.repeat_interleave(cell, neighbors, dim=0)\n offsets = cell_offsets.float().view(-1, 1, 3).bmm(cell.float()).view(-1, 3)\n distance_vectors += offsets\n\n # compute distances\n distances = distance_vectors.norm(dim=-1)\n\n # redundancy: remove zero distances\n nonzero_idx = torch.arange(len(distances))[distances != 0]\n edge_index = edge_index[:, nonzero_idx]\n distances = distances[nonzero_idx]\n\n out = {\n \"edge_index\": edge_index,\n \"distances\": distances,\n }\n\n if return_distance_vec:\n out[\"distance_vec\"] = distance_vectors[nonzero_idx]\n\n if return_offsets:\n out[\"offsets\"] = offsets[nonzero_idx]\n\n return out\n\n\ndef radius_graph_pbc(data, radius, max_num_neighbors_threshold):\n device = data.pos.device\n batch_size = len(data.natoms)\n\n # position of the atoms\n atom_pos = data.pos\n\n # Before computing the pairwise distances between atoms, first create a list of atom indices to compare for the entire batch\n num_atoms_per_image = data.natoms\n num_atoms_per_image_sqr = (num_atoms_per_image**2).long()\n\n # index offset between images\n index_offset = (\n torch.cumsum(num_atoms_per_image, dim=0) - num_atoms_per_image\n )\n\n index_offset_expand = torch.repeat_interleave(\n index_offset, num_atoms_per_image_sqr\n )\n num_atoms_per_image_expand = torch.repeat_interleave(\n num_atoms_per_image, num_atoms_per_image_sqr\n )\n\n # Compute a tensor containing sequences of numbers that range from 0 to num_atoms_per_image_sqr for each image\n # that is used to compute indices for the pairs of atoms. This is a very convoluted way to implement\n # the following (but 10x faster since it removes the for loop)\n # for batch_idx in range(batch_size):\n # batch_count = torch.cat([batch_count, torch.arange(num_atoms_per_image_sqr[batch_idx], device=device)], dim=0)\n num_atom_pairs = torch.sum(num_atoms_per_image_sqr)\n index_sqr_offset = (\n torch.cumsum(num_atoms_per_image_sqr, dim=0) - num_atoms_per_image_sqr\n )\n index_sqr_offset = torch.repeat_interleave(\n index_sqr_offset, num_atoms_per_image_sqr\n )\n atom_count_sqr = (\n torch.arange(num_atom_pairs, device=device) - index_sqr_offset\n )\n\n # Compute the indices for the pairs of atoms (using division and mod)\n # If the systems get too large this apporach could run into numerical precision issues\n index1 = (\n atom_count_sqr // num_atoms_per_image_expand\n ) + index_offset_expand\n index2 = (\n atom_count_sqr % num_atoms_per_image_expand\n ) + index_offset_expand\n # Get the positions for each atom\n pos1 = torch.index_select(atom_pos, 0, index1)\n pos2 = torch.index_select(atom_pos, 0, index2)\n\n # Calculate required number of unit cells in each direction.\n # Smallest distance between planes separated by a1 is\n # 1 / ||(a2 x a3) / V||_2, since a2 x a3 is the area of the plane.\n # Note that the unit cell volume V = a1 * (a2 x a3) and that\n # (a2 x a3) / V is also the reciprocal primitive vector\n # (crystallographer's definition).\n cross_a2a3 = torch.cross(data.cell[:, 1], data.cell[:, 2], dim=-1)\n cell_vol = torch.sum(data.cell[:, 0] * cross_a2a3, dim=-1, keepdim=True)\n inv_min_dist_a1 = torch.norm(cross_a2a3 / cell_vol, p=2, dim=-1)\n rep_a1 = torch.ceil(radius * inv_min_dist_a1)\n\n cross_a3a1 = torch.cross(data.cell[:, 2], data.cell[:, 0], dim=-1)\n inv_min_dist_a2 = torch.norm(cross_a3a1 / cell_vol, p=2, dim=-1)\n rep_a2 = torch.ceil(radius * inv_min_dist_a2)\n\n if radius >= 20:\n # Cutoff larger than the vacuum layer of 20A\n cross_a1a2 = torch.cross(data.cell[:, 0], data.cell[:, 1], dim=-1)\n inv_min_dist_a3 = torch.norm(cross_a1a2 / cell_vol, p=2, dim=-1)\n rep_a3 = torch.ceil(radius * inv_min_dist_a3)\n else:\n rep_a3 = data.cell.new_zeros(1)\n # Take the max over all images for uniformity. This is essentially padding.\n # Note that this can significantly increase the number of computed distances\n # if the required repetitions are very different between images\n # (which they usually are). Changing this to sparse (scatter) operations\n # might be worth the effort if this function becomes a bottleneck.\n max_rep = [rep_a1.max(), rep_a2.max(), rep_a3.max()]\n\n # Tensor of unit cells\n cells_per_dim = [\n torch.arange(-rep, rep + 1, device=device, dtype=torch.float)\n for rep in max_rep\n ]\n unit_cell = torch.cat(torch.meshgrid(cells_per_dim), dim=-1).reshape(-1, 3)\n num_cells = len(unit_cell)\n unit_cell_per_atom = unit_cell.view(1, num_cells, 3).repeat(\n len(index2), 1, 1\n )\n unit_cell = torch.transpose(unit_cell, 0, 1)\n unit_cell_batch = unit_cell.view(1, 3, num_cells).expand(\n batch_size, -1, -1\n )\n\n # Compute the x, y, z positional offsets for each cell in each image\n data_cell = torch.transpose(data.cell, 1, 2)\n pbc_offsets = torch.bmm(data_cell, unit_cell_batch)\n pbc_offsets_per_atom = torch.repeat_interleave(\n pbc_offsets, num_atoms_per_image_sqr, dim=0\n )\n\n # Expand the positions and indices for the 9 cells\n pos1 = pos1.view(-1, 3, 1).expand(-1, -1, num_cells)\n pos2 = pos2.view(-1, 3, 1).expand(-1, -1, num_cells)\n index1 = index1.view(-1, 1).repeat(1, num_cells).view(-1)\n index2 = index2.view(-1, 1).repeat(1, num_cells).view(-1)\n # Add the PBC offsets for the second atom\n pos2 = pos2 + pbc_offsets_per_atom\n\n # Compute the squared distance between atoms\n atom_distance_sqr = torch.sum((pos1 - pos2) ** 2, dim=1)\n atom_distance_sqr = atom_distance_sqr.view(-1)\n\n # Remove pairs that are too far apart\n mask_within_radius = torch.le(atom_distance_sqr, radius * radius)\n # Remove pairs with the same atoms (distance = 0.0)\n mask_not_same = torch.gt(atom_distance_sqr, 0.0001)\n mask = torch.logical_and(mask_within_radius, mask_not_same)\n index1 = torch.masked_select(index1, mask)\n index2 = torch.masked_select(index2, mask)\n unit_cell = torch.masked_select(\n unit_cell_per_atom.view(-1, 3), mask.view(-1, 1).expand(-1, 3)\n )\n unit_cell = unit_cell.view(-1, 3)\n atom_distance_sqr = torch.masked_select(atom_distance_sqr, mask)\n\n mask_num_neighbors, num_neighbors_image = get_max_neighbors_mask(\n natoms=data.natoms,\n index=index1,\n atom_distance=atom_distance_sqr,\n max_num_neighbors_threshold=max_num_neighbors_threshold,\n )\n\n if not torch.all(mask_num_neighbors):\n # Mask out the atoms to ensure each atom has at most max_num_neighbors_threshold neighbors\n index1 = torch.masked_select(index1, mask_num_neighbors)\n index2 = torch.masked_select(index2, mask_num_neighbors)\n unit_cell = torch.masked_select(\n unit_cell.view(-1, 3), mask_num_neighbors.view(-1, 1).expand(-1, 3)\n )\n unit_cell = unit_cell.view(-1, 3)\n\n edge_index = torch.stack((index2, index1))\n\n return edge_index, unit_cell, num_neighbors_image\n\n\ndef get_max_neighbors_mask(\n natoms, index, atom_distance, max_num_neighbors_threshold\n):\n \"\"\"\n Give a mask that filters out edges so that each atom has at most\n `max_num_neighbors_threshold` neighbors.\n Assumes that `index` is sorted.\n \"\"\"\n device = natoms.device\n num_atoms = natoms.sum()\n\n # Get number of neighbors\n # segment_coo assumes sorted index\n ones = index.new_ones(1).expand_as(index)\n num_neighbors = segment_coo(ones, index, dim_size=num_atoms)\n max_num_neighbors = num_neighbors.max()\n num_neighbors_thresholded = num_neighbors.clamp(\n max=max_num_neighbors_threshold\n )\n\n # Get number of (thresholded) neighbors per image\n image_indptr = torch.zeros(\n natoms.shape[0] + 1, device=device, dtype=torch.long\n )\n image_indptr[1:] = torch.cumsum(natoms, dim=0)\n num_neighbors_image = segment_csr(num_neighbors_thresholded, image_indptr)\n\n # If max_num_neighbors is below the threshold, return early\n if (\n max_num_neighbors <= max_num_neighbors_threshold\n or max_num_neighbors_threshold <= 0\n ):\n mask_num_neighbors = torch.tensor(\n [True], dtype=bool, device=device\n ).expand_as(index)\n return mask_num_neighbors, num_neighbors_image\n\n # Create a tensor of size [num_atoms, max_num_neighbors] to sort the distances of the neighbors.\n # Fill with infinity so we can easily remove unused distances later.\n distance_sort = torch.full(\n [num_atoms * max_num_neighbors], np.inf, device=device\n )\n\n # Create an index map to map distances from atom_distance to distance_sort\n # index_sort_map assumes index to be sorted\n index_neighbor_offset = torch.cumsum(num_neighbors, dim=0) - num_neighbors\n index_neighbor_offset_expand = torch.repeat_interleave(\n index_neighbor_offset, num_neighbors\n )\n index_sort_map = (\n index * max_num_neighbors\n + torch.arange(len(index), device=device)\n - index_neighbor_offset_expand\n )\n distance_sort.index_copy_(0, index_sort_map, atom_distance)\n distance_sort = distance_sort.view(num_atoms, max_num_neighbors)\n\n # Sort neighboring atoms based on distance\n distance_sort, index_sort = torch.sort(distance_sort, dim=1)\n # Select the max_num_neighbors_threshold neighbors that are closest\n distance_sort = distance_sort[:, :max_num_neighbors_threshold]\n index_sort = index_sort[:, :max_num_neighbors_threshold]\n\n # Offset index_sort so that it indexes into index\n index_sort = index_sort + index_neighbor_offset.view(-1, 1).expand(\n -1, max_num_neighbors_threshold\n )\n # Remove \"unused pairs\" with infinite distances\n mask_finite = torch.isfinite(distance_sort)\n index_sort = torch.masked_select(index_sort, mask_finite)\n\n # At this point index_sort contains the index into index of the\n # closest max_num_neighbors_threshold neighbors per atom\n # Create a mask to remove all pairs not in index_sort\n mask_num_neighbors = torch.zeros(len(index), device=device, dtype=bool)\n mask_num_neighbors.index_fill_(0, index_sort, True)\n\n return mask_num_neighbors, num_neighbors_image\n\n\ndef get_pruned_edge_idx(edge_index, num_atoms=None, max_neigh=1e9):\n assert num_atoms is not None\n\n # removes neighbors > max_neigh\n # assumes neighbors are sorted in increasing distance\n _nonmax_idx = []\n for i in range(num_atoms):\n idx_i = torch.arange(len(edge_index[1]))[(edge_index[1] == i)][\n :max_neigh\n ]\n _nonmax_idx.append(idx_i)\n _nonmax_idx = torch.cat(_nonmax_idx)\n\n return _nonmax_idx\n\n\ndef merge_dicts(dict1: dict, dict2: dict):\n \"\"\"Recursively merge two dictionaries.\n Values in dict2 override values in dict1. If dict1 and dict2 contain a dictionary as a\n value, this will call itself recursively to merge these dictionaries.\n This does not modify the input dictionaries (creates an internal copy).\n Additionally returns a list of detected duplicates.\n Adapted from https://github.com/TUM-DAML/seml/blob/master/seml/utils.py\n\n Parameters\n ----------\n dict1: dict\n First dict.\n dict2: dict\n Second dict. Values in dict2 will override values from dict1 in case they share the same key.\n\n Returns\n -------\n return_dict: dict\n Merged dictionaries.\n \"\"\"\n if not isinstance(dict1, dict):\n raise ValueError(f\"Expecting dict1 to be dict, found {type(dict1)}.\")\n if not isinstance(dict2, dict):\n raise ValueError(f\"Expecting dict2 to be dict, found {type(dict2)}.\")\n\n return_dict = copy.deepcopy(dict1)\n duplicates = []\n\n for k, v in dict2.items():\n if k not in dict1:\n return_dict[k] = v\n else:\n if isinstance(v, dict) and isinstance(dict1[k], dict):\n return_dict[k], duplicates_k = merge_dicts(dict1[k], dict2[k])\n duplicates += [f\"{k}.{dup}\" for dup in duplicates_k]\n else:\n return_dict[k] = dict2[k]\n duplicates.append(k)\n\n return return_dict, duplicates\n\n\nclass SeverityLevelBetween(logging.Filter):\n def __init__(self, min_level, max_level):\n super().__init__()\n self.min_level = min_level\n self.max_level = max_level\n\n def filter(self, record):\n return self.min_level <= record.levelno < self.max_level\n\n\ndef setup_logging():\n root = logging.getLogger()\n\n # Perform setup only if logging has not been configured\n if not root.hasHandlers():\n root.setLevel(logging.INFO)\n\n log_formatter = logging.Formatter(\n \"%(asctime)s (%(levelname)s): %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n )\n\n # Send INFO to stdout\n handler_out = logging.StreamHandler(sys.stdout)\n handler_out.addFilter(\n SeverityLevelBetween(logging.INFO, logging.WARNING)\n )\n handler_out.setFormatter(log_formatter)\n root.addHandler(handler_out)\n\n # Send WARNING (and higher) to stderr\n handler_err = logging.StreamHandler(sys.stderr)\n handler_err.setLevel(logging.WARNING)\n handler_err.setFormatter(log_formatter)\n root.addHandler(handler_err)\n\n\ndef compute_neighbors(data, edge_index):\n # Get number of neighbors\n # segment_coo assumes sorted index\n ones = edge_index[1].new_ones(1).expand_as(edge_index[1])\n num_neighbors = segment_coo(\n ones, edge_index[1], dim_size=data.natoms.sum()\n )\n\n # Get number of neighbors per image\n image_indptr = torch.zeros(\n data.natoms.shape[0] + 1, device=data.pos.device, dtype=torch.long\n )\n image_indptr[1:] = torch.cumsum(data.natoms, dim=0)\n neighbors = segment_csr(num_neighbors, image_indptr)\n return neighbors\n\n\ndef check_traj_files(batch, traj_dir):\n if traj_dir is None:\n return False\n traj_dir = Path(traj_dir)\n traj_files = [traj_dir / f\"{id}.traj\" for id in batch[0].sid.tolist()]\n return all(fl.exists() for fl in traj_files)\n" ]
[ [ "torch.all", "torch.transpose", "torch.zeros", "torch.cat", "torch.sum", "torch.le", "torch.repeat_interleave", "torch.nn.PairwiseDistance", "torch.logical_and", "torch.save", "torch.norm", "torch.cumsum", "matplotlib.backends.backend_agg.FigureCanvasAgg", "torch.tensor", "torch.meshgrid", "torch.isfinite", "torch.bmm", "torch.sort", "torch.arange", "torch.masked_select", "torch.index_select", "torch.cuda.memory_cached", "torch.linspace", "torch.full", "torch.is_tensor", "torch.cuda.max_memory_cached", "torch.stack", "torch.ceil", "matplotlib.figure.Figure", "torch.cuda.max_memory_allocated", "torch.gt", "torch.cuda.memory_allocated", "torch.cross" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yenchenlin/pix2pix-flow
[ "c3536b4f9d676c1780fb59dbc77aec3c6435a65d" ]
[ "data_loaders/get_edges_shoes_joint.py" ]
[ "import numpy as np\nimport os\n\n\ndef downsample(x, resolution):\n assert x.dtype == np.float32\n assert x.shape[1] % resolution == 0\n assert x.shape[2] % resolution == 0\n if x.shape[1] == x.shape[2] == resolution:\n return x\n s = x.shape\n x = np.reshape(x, [s[0], resolution, s[1] // resolution,\n resolution, s[2] // resolution, s[3]])\n x = np.mean(x, (2, 4))\n return x\n\n\ndef x_to_uint8(x):\n x = np.clip(np.floor(x), 0, 255)\n return x.astype(np.uint8)\n\n\ndef shard(data, shards, rank):\n # Determinisitc shards\n x, y = data\n assert x.shape[0] == y.shape[0]\n assert x.shape[0] % shards == 0\n assert 0 <= rank < shards\n size = x.shape[0] // shards\n ind = rank*size\n return x[ind:ind+size], y[ind:ind+size]\n\n\ndef get_data(problem, shards, rank, data_augmentation_level, n_batch_train,\n n_batch_test, n_batch_init, resolution, flip_color=False,\n code_path=None):\n if problem == 'edges2shoes' or problem == 'facades':\n DIR = '../'\n x_train_A = np.load(\n os.path.join(DIR, '{}_32/train/A.npy'.format(problem)))\n x_test_A = np.load(\n os.path.join(DIR, '{}_32/val/A.npy'.format(problem)))\n\n x_train_B = np.load(\n os.path.join(DIR, '{}_32/train/B.npy'.format(problem)))\n x_test_B = np.load(\n os.path.join(DIR, '{}_32/val/B.npy'.format(problem)))\n\n # Discard last example if dataset size is not even for\n # distributed training\n if x_train_A.shape[0] % 2 != 0:\n x_train_A = x_train_A[:-1, :, :, :]\n x_train_B = x_train_B[:-1, :, :, :]\n\n y_train = np.zeros((x_train_A.shape[0]))\n y_test = np.zeros((x_test_A.shape[0]))\n y_train = np.reshape(y_train, [-1])\n y_test = np.reshape(y_test, [-1])\n y_train_A, y_train_B = y_train, y_train # Set up dummy y\n y_test_A, y_test_B = y_test, y_test # Set up dummy y\n else:\n raise Exception()\n\n\n print('n_train:', x_train_A.shape[0], 'n_test:', x_test_A.shape[0])\n\n # Shard before any shuffling\n x_train_A, y_train_A = shard((x_train_A, y_train_A), shards, rank)\n x_test_A, y_test_A = shard((x_test_A, y_test_A), shards, rank)\n\n x_train_B, y_train_B = shard((x_train_B, y_train_B), shards, rank)\n x_test_B, y_test_B = shard((x_test_B, y_test_B), shards, rank)\n\n print('n_shard_train:', x_train_A.shape[0], 'n_shard_test:', x_test_A.shape[0])\n\n from keras.preprocessing.image import ImageDataGenerator\n datagen_test = ImageDataGenerator()\n if data_augmentation_level == 0:\n datagen_train = ImageDataGenerator()\n else:\n if problem == 'edges2shoes':\n datagen_train = ImageDataGenerator(\n width_shift_range=0.1,\n height_shift_range=0.1\n )\n elif problem == 'facades':\n datagen_train = ImageDataGenerator(\n width_shift_range=0.1,\n height_shift_range=0.1\n )\n else:\n raise Exception()\n\n seed = 420\n datagen_train.fit(x_train_A, seed=seed)\n datagen_test.fit(x_test_A, seed=seed)\n train_flow_A = datagen_train.flow(x_train_A, y_train_A, n_batch_train, seed=seed)\n test_flow_A = datagen_test.flow(x_test_A, y_test_A, n_batch_test, shuffle=False, seed=seed)\n\n datagen_train.fit(x_train_B, seed=seed)\n datagen_test.fit(x_test_B, seed=seed)\n train_flow_B = datagen_train.flow(x_train_B, y_train_B, n_batch_train, seed=seed)\n test_flow_B = datagen_test.flow(x_test_B, y_test_B, n_batch_test, shuffle=False, seed=seed)\n\n def make_iterator(flow, resolution, code_path=None):\n def iterator():\n x_full, yz = flow.next()\n x_full = x_full.astype(np.float32)\n x = downsample(x_full, resolution)\n x = x_to_uint8(x)\n if code_path != None:\n y = np.squeeze(yz[:, :1])\n z = yz[:, 1:]\n return x, y, z\n else:\n y = yz\n return x, y\n\n return iterator\n\n #init_iterator = make_iterator(train_flow, resolution)\n train_iterator_A = make_iterator(train_flow_A, resolution, code_path)\n test_iterator_A = make_iterator(test_flow_A, resolution, code_path)\n\n train_iterator_B = make_iterator(train_flow_B, resolution, code_path)\n test_iterator_B = make_iterator(test_flow_B, resolution, code_path)\n\n # Get data for initialization\n data_init_A = make_batch(train_iterator_A, n_batch_train, n_batch_init, code_path=code_path)\n data_init_B = make_batch(train_iterator_B, n_batch_train, n_batch_init, code_path=code_path)\n\n return train_iterator_A, test_iterator_A, data_init_A, train_iterator_B, test_iterator_B, data_init_B\n\n\ndef make_batch(iterator, iterator_batch_size, required_batch_size, code_path=None):\n ib, rb = iterator_batch_size, required_batch_size\n #assert rb % ib == 0\n k = int(np.ceil(rb / ib))\n xs, ys, codes = [], [], []\n for i in range(k):\n if code_path != None:\n x, y, code = iterator()\n codes.append(code)\n else:\n x, y = iterator()\n xs.append(x)\n ys.append(y)\n x, y = np.concatenate(xs)[:rb], np.concatenate(ys)[:rb]\n if code_path != None:\n code = np.concatenate(codes)[:rb]\n return {'x': x, 'y': y, 'code': code}\n else:\n return {'x': x, 'y': y}\n" ]
[ [ "numpy.reshape", "numpy.squeeze", "numpy.concatenate", "numpy.ceil", "numpy.mean", "numpy.floor", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kent1325/Python_Crash_Course_Book_Projects
[ "1fd108139b79660f5f089bf2aba807cd54168fb2" ]
[ "data_visualization/death_valley_weather.py" ]
[ "import csv\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\n\nfilename = 'data/death_valley_2018_simple.csv'\nwith open(filename) as f:\n reader = csv.reader(f)\n header_row = next(reader)\n\n # Get high temperatures from this file.\n dates, highs, lows = [], [], []\n for row in reader:\n current_date = datetime.strptime(row[2], '%Y-%m-%d')\n try:\n high = int(row[4])\n low = int(row[5])\n except ValueError:\n print(f\"Missing data for {current_date}\")\n else:\n dates.append(current_date)\n highs.append(high)\n lows.append(low)\n\n# Plot the high and low temperatures.\nplt.style.use('seaborn')\nfig, ax = plt.subplots()\nax.plot(dates, highs, c='red', alpha=0.5)\nax.plot(dates, lows, c='blue', alpha=0.5)\nplt.fill_between(dates, highs, lows, facecolor='blue', alpha=0.1)\n\n# Format plot.\ntitle = \"Daily high and low temperatures - 2018\\nDeath Valley, CA\"\nplt.title(title, fontsize=24)\nplt.xlabel('', fontsize=16)\nfig.autofmt_xdate()\nplt.ylabel(\"Temperature (F)\", fontsize=16)\nplt.tick_params(axis='both', which='major', labelsize=16)\n\nplt.show()\n" ]
[ [ "matplotlib.pyplot.title", "matplotlib.pyplot.subplots", "matplotlib.pyplot.fill_between", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.tick_params", "matplotlib.pyplot.style.use", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rhullcsa/RunestoneServer
[ "db1ad6078f855a058e735bdbc6c1f130f1a34ee7" ]
[ "controllers/admin.py" ]
[ "# *******************************\n# |docname| - route to a textbook\n# *******************************\n# This controller provides routes to admin functions\n#\n# Imports\n# =======\n# These are listed in the order prescribed by `PEP 8\n# <http://www.python.org/dev/peps/pep-0008/#imports>`_.\n#\n# Standard library\n# ----------------\nimport csv\nimport datetime\nimport io\nimport json\nimport logging\nimport re\nimport uuid\nfrom collections import OrderedDict, Counter\nfrom random import randint\n\n# Third Party library\n# -------------------\nfrom dateutil.parser import parse\nfrom rs_grading import _get_assignment, send_lti_grades\nfrom runestone import cmap\nimport pandas as pd\nimport altair as alt\n\nfrom rs_practice import _get_qualified_questions\n\nlogger = logging.getLogger(settings.logger)\nlogger.setLevel(settings.log_level)\n\nadmin_logger(logger)\n\nALL_AUTOGRADE_OPTIONS = [\"manual\", \"all_or_nothing\", \"pct_correct\", \"interact\"]\nAUTOGRADE_POSSIBLE_VALUES = dict(\n actex=ALL_AUTOGRADE_OPTIONS,\n activecode=ALL_AUTOGRADE_OPTIONS,\n clickablearea=[\"manual\", \"all_or_nothing\", \"interact\"],\n codelens=ALL_AUTOGRADE_OPTIONS,\n datafile=[],\n dragndrop=[\"manual\", \"all_or_nothing\", \"interact\"],\n external=[],\n fillintheblank=ALL_AUTOGRADE_OPTIONS,\n khanex=ALL_AUTOGRADE_OPTIONS,\n lp_build=ALL_AUTOGRADE_OPTIONS,\n mchoice=ALL_AUTOGRADE_OPTIONS,\n page=[\"interact\"],\n parsonsprob=ALL_AUTOGRADE_OPTIONS,\n poll=[\"interact\"],\n quizly=ALL_AUTOGRADE_OPTIONS,\n reveal=[],\n selectquestion=ALL_AUTOGRADE_OPTIONS,\n shortanswer=ALL_AUTOGRADE_OPTIONS,\n showeval=[\"interact\"],\n video=[\"interact\"],\n youtube=[\"interact\"],\n)\n\nAUTOGRADEABLE = set(\n [\n \"clickablearea\",\n \"dragndrop\",\n \"fillintheblank\",\n \"khanex\",\n \"mchoice\",\n \"parsonsprob\",\n \"quizly\",\n \"selectquestion\",\n ]\n)\n\nALL_WHICH_OPTIONS = [\"first_answer\", \"last_answer\", \"best_answer\"]\nWHICH_TO_GRADE_POSSIBLE_VALUES = dict(\n actex=ALL_WHICH_OPTIONS,\n activecode=ALL_WHICH_OPTIONS,\n clickablearea=ALL_WHICH_OPTIONS,\n codelens=ALL_WHICH_OPTIONS,\n datafile=[],\n dragndrop=ALL_WHICH_OPTIONS,\n external=[],\n fillintheblank=ALL_WHICH_OPTIONS,\n khanex=ALL_WHICH_OPTIONS,\n lp_build=ALL_WHICH_OPTIONS,\n mchoice=ALL_WHICH_OPTIONS,\n page=ALL_WHICH_OPTIONS,\n parsonsprob=ALL_WHICH_OPTIONS,\n poll=[],\n quizly=ALL_WHICH_OPTIONS,\n reveal=[],\n selectquestion=ALL_WHICH_OPTIONS,\n shortanswer=ALL_WHICH_OPTIONS,\n showeval=ALL_WHICH_OPTIONS,\n video=[],\n youtube=[],\n)\n\n\[email protected]_login()\ndef index():\n redirect(URL(\"admin\", \"admin\"))\n\n\[email protected]_login()\ndef doc():\n response.title = \"Documentation\"\n return dict(course_id=auth.user.course_name, course=get_course_row(db.courses.ALL))\n\n\[email protected](\n lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n requires_login=True,\n)\ndef assignments():\n \"\"\"\n This is called for the assignments tab on the instructor interface\n When an assignment is selected get_assignment is called to gather the details\n for that assignment.\n \"\"\"\n response.title = \"Assignments\"\n cur_assignments = db(db.assignments.course == auth.user.course_id).select(\n orderby=db.assignments.duedate\n )\n assigndict = OrderedDict()\n for row in cur_assignments:\n assigndict[row.id] = row.name\n\n tags = []\n tag_query = db(db.tags).select()\n for tag in tag_query:\n tags.append(tag.tag_name)\n\n course = get_course_row(db.courses.ALL)\n base_course = course.base_course\n chapter_labels = []\n chapters_query = db(db.chapters.course_id == base_course).select(\n db.chapters.chapter_label\n )\n for row in chapters_query:\n chapter_labels.append(row.chapter_label)\n\n # See `models/db_ebook.py` for course_attributes table\n set_latex_preamble(course.base_course)\n\n return dict(\n coursename=auth.user.course_name,\n confirm=False,\n course_id=auth.user.course_name,\n assignments=assigndict,\n tags=tags,\n chapters=chapter_labels,\n toc=_get_toc_and_questions(), # <-- This Gets the readings and questions\n course=course,\n is_instructor=True,\n )\n\n\[email protected](\n lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n requires_login=True,\n)\ndef practice():\n response.title = \"Practice\"\n course = db(db.courses.id == auth.user.course_id).select().first()\n course_start_date = course.term_start_date\n\n start_date = course_start_date + datetime.timedelta(days=13)\n end_date = \"\"\n max_practice_days = 50\n max_practice_questions = 500\n day_points = 2\n question_points = 0.2\n questions_to_complete_day = 10\n flashcard_creation_method = 0\n graded = 1\n spacing = 0\n interleaving = 0\n error_start_date = 0\n error_end_date = 0\n error_max_practice_days = 0\n error_max_practice_questions = 0\n error_day_points = 0\n error_question_points = 0\n error_questions_to_complete_day = 0\n error_flashcard_creation_method = 0\n error_graded = 0\n\n already_exists = 0\n any_practice_settings = db(db.course_practice.auth_user_id == auth.user.id)\n practice_settings = any_practice_settings(\n db.course_practice.course_name == course.course_name\n )\n # If the instructor has created practice for other courses, don't randomize spacing and interleaving for the new\n # course.\n if not any_practice_settings.isempty():\n any_practice_settings = any_practice_settings.select().first()\n spacing = any_practice_settings.spacing\n interleaving = any_practice_settings.interleaving\n\n # Now checking to see if there are practice settings for this course.\n # If not, stick with the defaults.\n if (\n not practice_settings.isempty()\n and practice_settings.select().first().end_date is not None\n and practice_settings.select().first().end_date != \"\"\n ):\n practice_setting = practice_settings.select().first()\n start_date = practice_setting.start_date\n end_date = practice_setting.end_date\n max_practice_days = practice_setting.max_practice_days\n max_practice_questions = practice_setting.max_practice_questions\n day_points = practice_setting.day_points\n question_points = practice_setting.question_points\n questions_to_complete_day = practice_setting.questions_to_complete_day\n flashcard_creation_method = practice_setting.flashcard_creation_method\n graded = practice_setting.graded\n spacing = practice_setting.spacing\n interleaving = practice_setting.interleaving\n already_exists = 1\n else:\n if randint(0, 1) == 1:\n spacing = 1\n if randint(0, 1) == 1:\n interleaving = 1\n if practice_settings.isempty():\n db.course_practice.insert(\n auth_user_id=auth.user.id,\n course_name=course.course_name,\n start_date=start_date,\n end_date=end_date,\n max_practice_days=max_practice_days,\n max_practice_questions=max_practice_questions,\n day_points=day_points,\n question_points=question_points,\n questions_to_complete_day=questions_to_complete_day,\n flashcard_creation_method=flashcard_creation_method,\n graded=graded,\n spacing=spacing,\n interleaving=interleaving,\n )\n practice_settings = db(\n (db.course_practice.auth_user_id == auth.user.id)\n & (db.course_practice.course_name == course.course_name)\n )\n\n toc = \"''\"\n if flashcard_creation_method == 2:\n toc = _get_toc_and_questions()\n\n # If the GET request is to open the page for the first time (they're not submitting the form):\n if not (\n \"StartDate\" in request.vars\n or \"EndDate\" in request.vars\n or \"maxPracticeDays\" in request.vars\n or \"maxPracticeQuestions\" in request.vars\n or \"pointsPerDay\" in request.vars\n or \"pointsPerQuestion\" in request.vars\n or \"questionsPerDay\" in request.vars\n or \"flashcardsCreationType\" in request.vars\n or \"question_points\" in request.vars\n or \"graded\" in request.vars\n ):\n return dict(\n course_start_date=course_start_date,\n start_date=start_date,\n end_date=end_date,\n max_practice_days=max_practice_days,\n max_practice_questions=max_practice_questions,\n day_points=day_points,\n question_points=question_points,\n questions_to_complete_day=questions_to_complete_day,\n flashcard_creation_method=flashcard_creation_method,\n graded=graded,\n spacing=spacing,\n interleaving=interleaving,\n toc=toc,\n error_start_date=error_start_date,\n error_end_date=error_end_date,\n error_max_practice_days=error_max_practice_days,\n error_max_practice_questions=error_max_practice_questions,\n error_day_points=error_day_points,\n error_question_points=error_question_points,\n error_questions_to_complete_day=error_questions_to_complete_day,\n error_flashcard_creation_method=error_flashcard_creation_method,\n error_graded=error_graded,\n complete=already_exists,\n course=course,\n )\n else:\n try:\n start_date = datetime.datetime.strptime(\n request.vars.get(\"StartDate\", None), \"%Y-%m-%d\"\n ).date()\n if start_date < course_start_date:\n error_start_date = 1\n except Exception:\n error_start_date = 1\n try:\n end_date = datetime.datetime.strptime(\n request.vars.get(\"EndDate\", None), \"%Y-%m-%d\"\n ).date()\n if end_date < start_date:\n error_end_date = 1\n except Exception:\n error_end_date = 1\n if spacing == 1:\n try:\n max_practice_days = int(request.vars.get(\"maxPracticeDays\", None))\n except Exception:\n error_max_practice_days = 1\n else:\n try:\n max_practice_questions = int(\n request.vars.get(\"maxPracticeQuestions\", None)\n )\n except Exception:\n error_max_practice_questions = 1\n if spacing == 1:\n try:\n day_points = float(request.vars.get(\"pointsPerDay\", None))\n except Exception:\n error_day_points = 1\n else:\n try:\n question_points = float(request.vars.get(\"pointsPerQuestion\", None))\n except Exception:\n error_question_points = 1\n if spacing == 1:\n try:\n questions_to_complete_day = int(\n request.vars.get(\"questionsPerDay\", None)\n )\n except Exception:\n error_questions_to_complete_day = 1\n try:\n flashcard_creation_method = int(\n request.vars.get(\"flashcardsCreationType\", None)\n )\n except Exception:\n error_flashcard_creation_method = 1\n try:\n graded = int(request.vars.get(\"graded\", None))\n except Exception:\n error_graded = 1\n\n no_error = 0\n if (\n error_start_date == 0\n and error_end_date == 0\n and error_max_practice_days == 0\n and error_max_practice_questions == 0\n and error_day_points == 0\n and error_question_points == 0\n and error_questions_to_complete_day == 0\n and error_flashcard_creation_method == 0\n and error_graded == 0\n ):\n no_error = 1\n if no_error == 1:\n practice_settings.update(\n start_date=start_date,\n end_date=end_date,\n max_practice_days=max_practice_days,\n max_practice_questions=max_practice_questions,\n day_points=day_points,\n question_points=question_points,\n questions_to_complete_day=questions_to_complete_day,\n flashcard_creation_method=flashcard_creation_method,\n graded=graded,\n spacing=spacing,\n interleaving=interleaving,\n )\n\n toc = \"''\"\n if flashcard_creation_method == 2:\n toc = _get_toc_and_questions()\n return dict(\n course_id=auth.user.course_name,\n course_start_date=course_start_date,\n start_date=start_date,\n end_date=end_date,\n max_practice_days=max_practice_days,\n max_practice_questions=max_practice_questions,\n day_points=day_points,\n question_points=question_points,\n questions_to_complete_day=questions_to_complete_day,\n flashcard_creation_method=flashcard_creation_method,\n graded=graded,\n spacing=spacing,\n interleaving=interleaving,\n error_graded=error_graded,\n toc=toc,\n error_start_date=error_start_date,\n error_end_date=error_end_date,\n error_max_practice_days=error_max_practice_days,\n error_max_practice_questions=error_max_practice_questions,\n error_day_points=error_day_points,\n error_question_points=error_question_points,\n error_questions_to_complete_day=error_questions_to_complete_day,\n error_flashcard_creation_method=error_flashcard_creation_method,\n complete=no_error,\n course=course,\n )\n\n\[email protected](\n lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n requires_login=True,\n)\ndef add_practice_items():\n response.title = \"Add Practice Items\"\n course = db(db.courses.course_name == auth.user.course_name).select().first()\n data = json.loads(request.vars.data)\n\n # Was for Python 2.x\n # string_data = [x.encode('UTF8') for x in data]\n # Is for Python 3.x\n string_data = data\n\n now = datetime.datetime.utcnow()\n now_local = now - datetime.timedelta(hours=float(session.timezoneoffset))\n\n students = db((db.auth_user.course_name == auth.user.course_name)).select()\n chapters = db((db.chapters.course_id == course.base_course)).select()\n for chapter in chapters:\n subchapters = db((db.sub_chapters.chapter_id == chapter.id)).select()\n for subchapter in subchapters:\n subchapterTaught = db(\n (db.sub_chapter_taught.course_name == auth.user.course_name)\n & (db.sub_chapter_taught.chapter_label == chapter.chapter_label)\n & (\n db.sub_chapter_taught.sub_chapter_label\n == subchapter.sub_chapter_label\n )\n )\n questions = _get_qualified_questions(\n course.base_course,\n chapter.chapter_label,\n subchapter.sub_chapter_label,\n db,\n )\n if (\n \"{}/{}\".format(chapter.chapter_name, subchapter.sub_chapter_name)\n in string_data\n ):\n if subchapterTaught.isempty() and len(questions) > 0:\n db.sub_chapter_taught.insert(\n course_name=auth.user.course_name,\n chapter_label=chapter.chapter_label,\n sub_chapter_label=subchapter.sub_chapter_label,\n teaching_date=now_local.date(),\n )\n for student in students:\n flashcards = db(\n (db.user_topic_practice.user_id == student.id)\n & (db.user_topic_practice.course_name == course.course_name)\n & (\n db.user_topic_practice.chapter_label\n == chapter.chapter_label\n )\n & (\n db.user_topic_practice.sub_chapter_label\n == subchapter.sub_chapter_label\n )\n )\n if flashcards.isempty():\n db.user_topic_practice.insert(\n user_id=student.id,\n course_name=course.course_name,\n chapter_label=chapter.chapter_label,\n sub_chapter_label=subchapter.sub_chapter_label,\n question_name=questions.first().name,\n i_interval=0,\n e_factor=2.5,\n q=0,\n next_eligible_date=now_local.date(),\n # add as if yesterday, so can practice right away\n last_presented=now.date() - datetime.timedelta(1),\n last_completed=now.date() - datetime.timedelta(1),\n creation_time=now,\n timezoneoffset=float(session.timezoneoffset),\n )\n else:\n if not subchapterTaught.isempty():\n subchapterTaught.delete()\n db(\n (db.user_topic_practice.course_name == course.course_name)\n & (\n db.user_topic_practice.chapter_label\n == chapter.chapter_label\n )\n & (\n db.user_topic_practice.sub_chapter_label\n == subchapter.sub_chapter_label\n )\n ).delete()\n return json.dumps(dict(complete=True))\n\n\n# This is the primary controller when the instructor goes to the admin page.\[email protected](\n lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n requires_login=True,\n)\ndef admin():\n response.title = \"Admin\"\n sidQuery = db(db.courses.course_name == auth.user.course_name).select().first()\n # Now get the start date\n dateQuery = db(db.courses.course_name == auth.user.course_name).select()\n date = dateQuery[0].term_start_date\n date = date.strftime(\"%m/%d/%Y\")\n my_build = \"\"\n my_vers = 0\n mst_vers = 0\n\n cur_instructors = db(db.course_instructor.course == auth.user.course_id).select(\n db.course_instructor.instructor\n )\n instructordict = {}\n for row in cur_instructors:\n name = db(db.auth_user.id == row.instructor).select(\n db.auth_user.first_name, db.auth_user.last_name\n )\n for person in name:\n instructordict[str(row.instructor)] = (\n person.first_name + \" \" + person.last_name\n )\n\n cur_students = db(\n (db.user_courses.course_id == auth.user.course_id)\n & (db.user_courses.user_id == db.auth_user.id)\n ).select(\n db.user_courses.user_id,\n orderby=db.auth_user.last_name | db.auth_user.first_name,\n )\n\n studentdict = OrderedDict()\n for row in cur_students:\n person = db(db.auth_user.id == row.user_id).select(\n db.auth_user.username, db.auth_user.first_name, db.auth_user.last_name\n )\n for identity in person:\n name = (\n identity.first_name\n + \" \"\n + identity.last_name\n + \" (\"\n + identity.username\n + \")\"\n )\n if row.user_id not in instructordict:\n studentdict[row.user_id] = name\n\n course = db(db.courses.course_name == auth.user.course_name).select().first()\n course_attrs = getCourseAttributesDict(course.id)\n\n instructor_course_list = db(\n (db.course_instructor.instructor == auth.user.id)\n & (db.courses.id == db.course_instructor.course)\n & (db.courses.base_course == course.base_course)\n & (db.courses.course_name != course.course_name)\n ).select(db.courses.course_name, db.courses.id)\n base_course_id = (\n db(db.courses.course_name == course.base_course).select(db.courses.id).first()\n )\n base_course_id = base_course_id.id\n curr_start_date = course.term_start_date.strftime(\"%m/%d/%Y\")\n downloads_enabled = \"true\" if sidQuery.downloads_enabled else \"false\"\n allow_pairs = \"true\" if sidQuery.allow_pairs else \"false\"\n keys = (\n db(\n (db.course_lti_map.course_id == auth.user.course_id)\n & (db.lti_keys.id == db.course_lti_map.lti_id)\n )\n .select()\n .first()\n )\n if keys:\n consumer = keys.lti_keys.consumer\n secret = keys.lti_keys.secret\n else:\n consumer = \"\"\n secret = \"\"\n # valid exams to show are:\n # Exams the instructor has created for their course\n # Or exams embedded in the base course. Embedded exams will have from_source\n # set to True and will have the base course id instead of this courses id.\n exams = db(\n (db.assignments.is_timed == True)\n & (\n (db.assignments.course == course.id)\n | (\n (db.assignments.from_source == True)\n & (db.assignments.course == base_course_id)\n )\n )\n ).select()\n exams = [x.name for x in exams]\n try:\n motd = open(\"applications/runestone/static/motd.html\").read()\n except Exception:\n motd = \"You can cusomize this mesage by editing /static/motd.html\"\n return dict(\n startDate=date,\n coursename=auth.user.course_name,\n course_id=auth.user.course_name,\n instructors=instructordict,\n students=studentdict,\n curr_start_date=curr_start_date,\n confirm=True,\n build_info=my_build,\n my_vers=my_vers,\n mst_vers=mst_vers,\n course=sidQuery,\n downloads_enabled=downloads_enabled,\n allow_pairs=allow_pairs,\n instructor_course_list=instructor_course_list,\n motd=motd,\n consumer=consumer,\n secret=secret,\n examlist=exams,\n is_instructor=True,\n **course_attrs,\n )\n\n\n# Called in admin.js from courseStudents to populate the list of students\n# eBookConfig.getCourseStudentsURL\[email protected]_login()\ndef course_students():\n response.headers[\"content-type\"] = \"application/json\"\n cur_students = db(\n (db.user_courses.course_id == auth.user.course_id)\n & (db.auth_user.id == db.user_courses.user_id)\n ).select(\n db.auth_user.username,\n db.auth_user.first_name,\n db.auth_user.last_name,\n db.auth_user.id,\n orderby=db.auth_user.last_name | db.auth_user.first_name,\n )\n searchdict = OrderedDict()\n for row in cur_students:\n if not verifyInstructorStatus(auth.user.course_id, row.id):\n name = row.first_name + \" \" + row.last_name\n username = row.username\n searchdict[str(username)] = name\n return json.dumps(searchdict)\n\n\n# Called when an instructor clicks on the grading tab\[email protected](\n lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n requires_login=True,\n)\ndef grading():\n response.title = \"Grading\"\n assignments = {}\n assignments_query = db(db.assignments.course == auth.user.course_id).select()\n\n assignmentids = {}\n assignment_deadlines = {}\n question_points = {}\n\n for row in assignments_query:\n assignmentids[row.name] = int(row.id)\n # Retrieve relevant info for each question, ordering them based on their\n # order in the assignment.\n assignment_questions = db(\n (db.assignment_questions.assignment_id == int(row.id))\n & (db.assignment_questions.question_id == db.questions.id)\n ).select(\n db.assignment_questions.question_id,\n db.assignment_questions.points,\n db.questions.name,\n db.questions.question_type,\n db.questions.autograde,\n orderby=db.assignment_questions.sorting_priority,\n )\n questions = []\n if row.name not in question_points:\n question_points[row.name] = {}\n for q in assignment_questions:\n if (\n q.questions.question_type in AUTOGRADEABLE\n or q.questions.autograde == \"unittest\"\n ):\n name_suff = \"+\"\n else:\n name_suff = \"\"\n questions.append(q.questions.name + name_suff)\n question_points[row.name][q.questions.name] = q.assignment_questions.points\n\n assignments[row.name] = questions\n assignment_deadlines[row.name] = row.duedate.replace(\n tzinfo=datetime.timezone.utc\n ).isoformat()\n\n cur_students = db(db.user_courses.course_id == auth.user.course_id).select(\n db.user_courses.user_id\n )\n # TODO: investigate why this search dict is overriden by a call to course_students\n # on the grading page load????\n searchdict = {}\n for row in cur_students:\n isinstructor = verifyInstructorStatus(auth.user.course_id, row.user_id)\n logger.debug(f\"User {row.user_id} instructor status {isinstructor}\")\n if not isinstructor:\n person = (\n db(db.auth_user.id == row.user_id)\n .select(\n db.auth_user.username,\n db.auth_user.first_name,\n db.auth_user.last_name,\n )\n .first()\n )\n name = person.first_name + \" \" + person.last_name\n username = person.username\n searchdict[username] = name\n logger.debug(f\"Added {username} to searchdict\")\n\n course = db(db.courses.id == auth.user.course_id).select().first()\n base_course = course.base_course\n chapter_labels = {}\n chapters_query = db(db.chapters.course_id == base_course).select()\n for row in chapters_query:\n q_list = []\n chapter_questions = db(\n (db.questions.chapter == row.chapter_label)\n & (db.questions.base_course == base_course)\n & (db.questions.question_type != \"page\")\n ).select(orderby=db.questions.name)\n for chapter_q in chapter_questions:\n q_list.append(chapter_q.name)\n chapter_labels[row.chapter_label] = q_list\n\n set_latex_preamble(base_course)\n return dict(\n assignmentinfo=json.dumps(assignments),\n students=searchdict,\n chapters=json.dumps(chapter_labels),\n gradingUrl=URL(\"assignments\", \"get_problem\"),\n autogradingUrl=URL(\"assignments\", \"autograde\"),\n gradeRecordingUrl=URL(\"assignments\", \"record_grade\"),\n calcTotalsURL=URL(\"assignments\", \"calculate_totals\"),\n setTotalURL=URL(\"assignments\", \"record_assignment_score\"),\n sendLTIGradeURL=URL(\"assignments\", \"send_assignment_score_via_LTI\"),\n getCourseStudentsURL=URL(\"admin\", \"course_students\"),\n get_assignment_release_statesURL=URL(\"admin\", \"get_assignment_release_states\"),\n course_id=auth.user.course_name,\n assignmentids=json.dumps(assignmentids),\n assignment_deadlines=json.dumps(assignment_deadlines),\n question_points=json.dumps(question_points),\n is_instructor=True,\n course=course,\n )\n\n\[email protected](\n lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n requires_login=True,\n)\ndef removeStudents():\n \"\"\"\n Remove one or more students from the current course\n The query string should contain the key studentList which can be either\n a single id from auth_user or it could be a list of ids.\n\n This does not remove a student from the database but rather marks them as inactive in\n the database and moves them to the basecourse if they are not already enrolled in it.\n \"\"\"\n\n baseCourseName = (\n db(db.courses.course_name == auth.user.course_name)\n .select(db.courses.base_course)[0]\n .base_course\n )\n baseCourseID = (\n db(db.courses.course_name == baseCourseName).select(db.courses.id)[0].id\n )\n answer_tables = [\n \"mchoice_answers\",\n \"clickablearea_answers\",\n \"codelens_answers\",\n \"dragndrop_answers\",\n \"fitb_answers\",\n \"parsons_answers\",\n \"shortanswer_answers\",\n ]\n\n if not isinstance(request.vars[\"studentList\"], str):\n # Multiple ids selected\n studentList = request.vars[\"studentList\"]\n elif request.vars[\"studentList\"] == \"None\":\n # No id selected\n session.flash = T(\"No valid students were selected\")\n return redirect(\"/%s/admin/admin\" % (request.application))\n else:\n # One id selected\n studentList = [request.vars[\"studentList\"]]\n\n for studentID in studentList:\n logger.warning(\n \"{} has requested the removal of {}\".format(auth.user.username, studentID)\n )\n if studentID.isdigit() and int(studentID) != auth.user.id:\n sid = (\n db(db.auth_user.id == int(studentID))\n .select(db.auth_user.username)\n .first()\n )\n db(\n (db.user_courses.user_id == int(studentID))\n & (db.user_courses.course_id == auth.user.course_id)\n ).delete()\n\n baseCourseEnrollment = (\n db(\n (db.user_courses.user_id == int(studentID))\n & (db.user_courses.course_id == int(baseCourseID))\n )\n .select(db.user_courses.id)\n .first()\n )\n if baseCourseEnrollment is not None:\n logger.debug(\n \"{} is already enrolled in base course {}\".format(\n studentID, baseCourseName\n )\n )\n else:\n logger.debug(\n \"moving {} into base course {}\".format(studentID, baseCourseName)\n )\n db.user_courses.insert(user_id=int(studentID), course_id=baseCourseID)\n db(db.auth_user.id == int(studentID)).update(\n course_id=baseCourseID, course_name=baseCourseName, active=\"F\"\n )\n db(\n (db.useinfo.sid == sid)\n & (db.useinfo.course_id == auth.user.course_name)\n ).update(course_id=baseCourseName)\n for tbl in answer_tables:\n db(\n (db[tbl].sid == sid)\n & (db[tbl].course_name == auth.user.course_name)\n ).update(course_name=baseCourseName)\n db(\n (db.code.sid == sid) & (db.code.course_id == auth.user.course_id)\n ).update(course_id=baseCourseID)\n # leave user_chapter_progress and user_sub_chapter_progress alone for now.\n\n session.flash = T(\"You have successfully removed students\")\n return redirect(\"/%s/admin/admin\" % (request.application))\n\n\[email protected](\n lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n requires_login=True,\n)\ndef removeinstructor():\n \"\"\"\n admin/removeinstructor/<int>\n\n \"\"\"\n removed = []\n if request.args[0] != str(auth.user.id):\n db(\n (db.course_instructor.instructor == request.args[0])\n & (db.course_instructor.course == auth.user.course_id)\n ).delete()\n removed.append(True)\n return json.dumps(removed)\n else:\n session.flash = T(\"You cannot remove yourself as an instructor.\")\n removed.append(False)\n return json.dumps(removed)\n\n\[email protected](\n lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n requires_login=True,\n)\ndef addinstructor():\n \"\"\"\n admin/addinstructor/<int>\n\n \"\"\"\n response.headers[\"content-type\"] = \"application/json\"\n instructor = request.args(0)\n res = db(db.auth_user.id == instructor).select().first()\n if res:\n db.course_instructor.insert(course=auth.user.course_id, instructor=instructor)\n retval = \"Success\"\n else:\n retval = \"Cannot add non-existent user as instructor\"\n logger.error(\"Trying to add non-user {} as instructor\".format(instructor))\n\n return json.dumps(retval)\n\n\[email protected](\n lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n requires_login=True,\n)\ndef deletecourse():\n course_name = auth.user.course_name\n cset = db(db.courses.course_name == course_name)\n if not cset.isempty():\n res = cset.select(db.courses.id, db.courses.base_course).first()\n courseid = res.id\n basecourse = res.base_course\n bcid = db(db.courses.course_name == basecourse).select(db.courses.id).first()\n qset = db(\n (db.course_instructor.course == courseid)\n & (db.course_instructor.instructor == auth.user.id)\n )\n if not qset.isempty():\n qset.delete()\n students = db(db.auth_user.course_id == courseid)\n students.update(course_id=bcid)\n uset = db(db.user_courses.course_id == courseid)\n uset.delete()\n db(db.courses.id == courseid).delete()\n try:\n session.clear()\n except Exception:\n session.flash = \"Error, %s does not appear to exist\" % course_name\n else:\n session.flash = \"You are not the instructor of %s\" % course_name\n else:\n session.flash = \"course, %s, not found\" % course_name\n\n redirect(URL(\"default\", \"index\"))\n\n\[email protected](\n lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n requires_login=True,\n)\ndef removeassign():\n response.headers[\"content-type\"] = \"application/json\"\n\n try:\n assignment_id = int(request.vars[\"assignid\"])\n except Exception as e:\n logger.error(\n \"Could not remove assignment {} error {}\".format(\n request.vars[\"assignid\"], e\n )\n )\n session.flash = \"Cannot remove assignment with id of {}\".format(\n request.vars[\"assignid\"]\n )\n logger.error(\"Cannot Remove Assignment {}\".format(request.args(0)))\n return \"Error\"\n\n logger.debug(\"Removing assignment {}\".format(assignment_id))\n ct = db(db.assignments.id == assignment_id).delete()\n\n if ct == 1:\n return \"Success\"\n else:\n return \"Error\"\n\n\n#\n# This is only called by the create button in the popup where you give the assignment\n# its initial name. We might be able to refactor save_assignment to work in all cases.\[email protected](\n lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n requires_login=True,\n)\ndef createAssignment():\n response.headers[\"content-type\"] = \"application/json\"\n name = \"\"\n\n if \"name\" in request.vars and len(request.vars[\"name\"]) > 0:\n name = request.vars[\"name\"]\n else:\n return json.dumps(\"ERROR\")\n\n course = auth.user.course_id\n logger.debug(\"Adding new assignment {} for course: {}\".format(name, course))\n name_existsQ = len(\n db((db.assignments.name == name) & (db.assignments.course == course)).select()\n )\n if name_existsQ > 0:\n return json.dumps(\"EXISTS\")\n\n duplicatesource = request.vars[\"duplicate\"]\n if not duplicatesource:\n try:\n newassignID = db.assignments.insert(\n course=course,\n name=name,\n duedate=datetime.datetime.utcnow() + datetime.timedelta(days=7),\n released=False,\n visible=False,\n from_source=False,\n )\n db.commit()\n except Exception as ex:\n logger.error(\"ERROR CREATING ASSIGNMENT\", ex)\n return json.dumps(\"ERROR\")\n else:\n old_assignment = db(db.assignments.id == int(duplicatesource)).select().first()\n try:\n newassignID = db.assignments.insert(\n course=course,\n name=name,\n points=old_assignment.points,\n threshold_pct=old_assignment.threshold_pct,\n duedate=old_assignment.duedate,\n allow_self_autograde=old_assignment.allow_self_autograde,\n visible=old_assignment.visible,\n enforce_due=old_assignment.enforce_due,\n is_timed=old_assignment.is_timed,\n time_limit=old_assignment.time_limit,\n nofeedback=old_assignment.nofeedback,\n nopause=old_assignment.nopause,\n description=old_assignment.description,\n )\n old_questions = db(\n db.assignment_questions.assignment_id == old_assignment.id\n ).select()\n for q in old_questions:\n dq = q.as_dict()\n dq[\"assignment_id\"] = newassignID\n del dq[\"id\"]\n db.assignment_questions.insert(**dq)\n db.commit()\n except Exception as ex:\n logger.error(\"ERROR DUPLICATING ASSIGNMENT\", ex)\n return json.dumps(\"ERROR\")\n\n returndict = {name: newassignID}\n return json.dumps(returndict)\n\n\[email protected](\n lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n requires_login=True,\n)\ndef renameAssignment():\n response.headers[\"content-type\"] = \"application/json\"\n try:\n logger.debug(\n \"Renaming {} to {} for course {}.\".format(\n request.vars[\"original\"], request.vars[\"name\"], auth.user.course_id\n )\n )\n assignment_id = request.vars[\"original\"]\n name = request.vars[\"name\"]\n course = auth.user.course_id\n name_existsQ = len(\n db(\n (db.assignments.name == name) & (db.assignments.course == course)\n ).select()\n )\n if name_existsQ > 0:\n return json.dumps(\"EXISTS\")\n db(db.assignments.id == assignment_id).update(name=name)\n except Exception as ex:\n logger.error(ex)\n return json.dumps(\"ERROR\")\n try:\n returndict = {name: assignment_id}\n return json.dumps(returndict)\n except Exception as ex:\n logger.error(ex)\n return json.dumps(\"ERROR\")\n\n\[email protected](\n lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n requires_login=True,\n)\ndef questionBank():\n \"\"\"called by the questionBank function in admin.js\n Unpack all of the search criteria and then query the questions table\n to find matching questions.\n\n Returns:\n JSON: A list of questions that match the search criteria\n \"\"\"\n response.headers[\"Content-Type\"] = \"application/json\"\n\n row = (\n db(db.courses.id == auth.user.course_id)\n .select(db.courses.course_name, db.courses.base_course)\n .first()\n )\n base_course = row.base_course\n query_clauses = []\n\n # should we search the question by term?\n if request.vars.term:\n term_list = [x.strip() for x in request.vars.term.split()]\n query_clauses.append(db.questions.question.contains(term_list, all=True))\n\n if request.vars[\"chapter\"]:\n chapter_label = (\n db(db.chapters.chapter_label == request.vars[\"chapter\"])\n .select(db.chapters.chapter_label)\n .first()\n .chapter_label\n )\n chapterQ = db.questions.chapter == chapter_label\n query_clauses.append(chapterQ)\n\n if request.vars.min_difficulty:\n query_clauses.append(\n db.questions.difficulty > float(request.vars.min_difficulty)\n )\n if request.vars.max_difficulty:\n query_clauses.append(\n db.questions.difficulty < float(request.vars.max_difficulty)\n )\n\n if request.vars[\"author\"]:\n query_clauses.append(db.questions.author == request.vars[\"author\"])\n\n if request.vars[\"constrainbc\"] == \"true\":\n query_clauses.append(db.questions.base_course == base_course)\n\n my_name = f\"{auth.user.first_name} {auth.user.last_name}\"\n privacy_clause = (db.questions.is_private == False) | (\n db.questions.author == my_name\n )\n query_clauses.append(privacy_clause)\n\n is_join = False\n if request.vars.competency:\n is_join = True\n comp_clause = (db.competency.competency == request.vars.competency) & (\n db.competency.question == db.questions.id\n )\n if request.vars.isprim == \"true\":\n comp_clause = comp_clause & (db.competency.is_primary == True)\n\n query_clauses.append(comp_clause)\n\n myquery = query_clauses[0]\n for clause in query_clauses[1:]:\n myquery = myquery & clause\n\n print(myquery)\n rows = db(myquery).select()\n\n questions = []\n for q_row in rows:\n if is_join:\n questions.append((q_row.questions.name, q_row.questions.id))\n else:\n questions.append((q_row.name, q_row.id))\n\n return json.dumps(questions)\n\n\n# Deprecated; use add__or_update_assignment_question instead\[email protected](\n lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n requires_login=True,\n)\ndef addToAssignment():\n return add__or_update_assignment_question()\n\n\[email protected](\n lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n requires_login=True,\n)\ndef getQuestionInfo():\n \"\"\"\n called by the questionBank search interface\n Request Vars required:\n * assignment -- integer assignment id\n * question -- the name of the question\n \"\"\"\n question_name = request.vars[\"question\"]\n constrainbc = request.vars.constrainbc\n\n base_course = (\n db(db.courses.course_name == auth.user.course_name).select().first().base_course\n )\n query = db.questions.name == question_name\n if constrainbc == \"true\":\n query = query & (db.questions.base_course == base_course)\n\n row = db(query).select().first()\n\n if row:\n question_code = row.question\n htmlsrc = row.htmlsrc\n question_author = row.author\n question_difficulty = row.difficulty\n question_id = row.id\n else:\n return json.dumps({})\n\n tags = []\n question_tags = db((db.question_tags.question_id == question_id)).select()\n for row in question_tags:\n tag_id = row.tag_id\n tag_name = db((db.tags.id == tag_id)).select(db.tags.tag_name).first().tag_name\n tags.append(\" \" + str(tag_name))\n if question_difficulty is not None:\n returnDict = {\n \"code\": question_code,\n \"htmlsrc\": htmlsrc,\n \"author\": question_author,\n \"difficulty\": int(question_difficulty),\n \"tags\": tags,\n }\n else:\n returnDict = {\n \"code\": question_code,\n \"htmlsrc\": htmlsrc,\n \"author\": question_author,\n \"difficulty\": None,\n \"tags\": tags,\n }\n\n return json.dumps(returnDict)\n\n\[email protected](\n lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n requires_login=True,\n)\ndef edit_question():\n \"\"\"\n Called to save an updated version of an existing question\n 1. Can only be updated by the original author\n \"\"\"\n vars = request.vars\n old_qname = vars[\"question\"]\n new_qname = vars[\"name\"]\n try:\n difficulty = int(vars[\"difficulty\"])\n except Exception:\n difficulty = 0\n tags = vars[\"tags\"]\n base_course = (\n db(db.courses.id == auth.user.course_id)\n .select(db.courses.base_course)\n .first()\n .base_course\n )\n old_question = (\n db((db.questions.name == old_qname) & (db.questions.base_course == base_course))\n .select()\n .first()\n )\n\n if not old_question:\n return json.dumps(\"Could not find question {} to update\".format(old_qname))\n\n author = auth.user.first_name + \" \" + auth.user.last_name\n timestamp = datetime.datetime.utcnow()\n chapter = old_question.chapter\n question_type = old_question.question_type\n subchapter = old_question.subchapter\n\n question = vars[\"questiontext\"]\n htmlsrc = vars[\"htmlsrc\"]\n private = True if vars[\"isprivate\"] == \"true\" else False\n print(\"PRIVATE = \", private)\n\n if (\n old_qname == new_qname\n and old_question.author != author\n and not is_editor(auth.user.id)\n ):\n return json.dumps(\n \"You do not own this question and are not an editor. Please assign a new unique id\"\n )\n\n if old_qname != new_qname:\n newq = db(db.questions.name == new_qname).select().first()\n if newq and newq.author != author:\n return json.dumps(\n \"Name taken, you cannot replace a question you did not author\"\n )\n\n autograde = \"\"\n if re.search(r\":autograde:\\s+unittest\", question):\n autograde = \"unittest\"\n practice = \"\"\n topic = None\n if re.search(r\":practice:\\s+T\", question):\n practice = \"T\"\n topic = \"{}/{}\".format(chapter, subchapter)\n\n try:\n new_qid = db.questions.update_or_insert(\n (db.questions.name == new_qname)\n & (db.questions.base_course == base_course),\n difficulty=difficulty,\n question=question,\n name=new_qname,\n author=author,\n base_course=base_course,\n timestamp=timestamp,\n chapter=chapter,\n subchapter=subchapter,\n question_type=question_type,\n htmlsrc=htmlsrc,\n autograde=autograde,\n practice=practice,\n is_private=private,\n topic=topic,\n from_source=False,\n )\n if tags and tags != \"null\":\n tags = tags.split(\",\")\n for tag in tags:\n logger.error(\"TAG = %s\", tag)\n tag_id = db(db.tags.tag_name == tag).select(db.tags.id).first().id\n db.question_tags.insert(question_id=new_qid, tag_id=tag_id)\n return json.dumps(\"Success - Edited Question Saved\")\n except Exception as ex:\n logger.error(ex)\n return json.dumps(\"An error occurred saving your question {}\".format(str(ex)))\n\n\[email protected](\n lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n requires_login=True,\n)\ndef question_text():\n qname = request.vars.question_name\n constrainbc = request.vars.constrainbc\n base_course = (\n db(db.courses.id == auth.user.course_id)\n .select(db.courses.base_course)\n .first()\n .base_course\n )\n query = db.questions.name == qname\n if constrainbc == \"true\":\n query = query & (db.questions.base_course == base_course)\n try:\n q_text = db(query).select(db.questions.question).first().question\n except Exception:\n q_text = \"Error: Could not find source for {} in the database\".format(qname)\n\n if (\n q_text[0:2] == \"\\\\x\"\n ): # workaround Python2/3 SQLAlchemy/DAL incompatibility with text\n q_text = q_text[2:].decode(\"hex\")\n logger.debug(q_text)\n return json.dumps(q_text)\n\n\[email protected](\n lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n requires_login=True,\n)\ndef gettemplate():\n template = request.args[0]\n returndict = {}\n base = \"\"\n\n returndict[\"template\"] = base + cmap.get(template, \"\").__doc__\n\n base_course = (\n db(db.courses.id == auth.user.course_id)\n .select(db.courses.base_course)\n .first()\n .base_course\n )\n chapters = []\n chaptersrow = db(db.chapters.course_id == base_course).select(\n db.chapters.chapter_name, db.chapters.chapter_label\n )\n for row in chaptersrow:\n chapters.append((row[\"chapter_label\"], row[\"chapter_name\"]))\n logger.debug(chapters)\n returndict[\"chapters\"] = chapters\n\n return json.dumps(returndict)\n\n\[email protected](\n lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n requires_login=True,\n)\ndef createquestion():\n \"\"\"\n called from the questionBank interface when an instructor adds a new question to\n an assignment by writing it themselves\n request.vars parameters include\n * template - The kind of question\n * name - the unique identifier\n * question - rst source for the question\n * difficulty 0-5\n * tags\n * chapter\n * subchapter 'Exercises' by default\n * isprivate is this question shared with everyone?\n * tab\n * assignmentid': assignmentid\n * points integer number of points\n * timed- is this part of a timed exam\n * htmlsrc htmlsrc from the previewer\n \"\"\"\n row = (\n db(db.courses.id == auth.user.course_id)\n .select(db.courses.course_name, db.courses.base_course)\n .first()\n )\n base_course = row.base_course\n aid = request.vars[\"assignmentid\"]\n if aid == \"undefined\":\n logger.error(\n \"undefined assignmentid by {} for name {} subchap {} question {}\".format(\n auth.user.username,\n request.vars.name,\n request.vars.subchapter,\n request.vars.question,\n )\n )\n return json.dumps(\"ERROR\")\n\n assignmentid = int(aid)\n points = int(request.vars[\"points\"]) if request.vars[\"points\"] else 1\n timed = request.vars[\"timed\"]\n unittest = None\n practice = False\n topic = None\n if re.search(r\":autograde:\\s+unittest\", request.vars.question):\n unittest = \"unittest\"\n if re.search(r\":practice:\\s+T\", request.vars.question):\n practice = True\n topic = \"{}/{}\".format(request.vars.chapter, request.vars.subchapter)\n\n question_type = request.vars[\"template\"]\n g = re.search(r\"^\\s*.. (\\w+)::\", request.vars.question)\n if g:\n question_type = g.group(1)\n if question_type != request.vars[\"template\"]:\n logger.error(f\"question mismatch for question type {question_type}\")\n\n try:\n newqID = db.questions.insert(\n base_course=base_course,\n name=request.vars[\"name\"].strip(),\n chapter=request.vars[\"chapter\"],\n subchapter=request.vars[\"subchapter\"],\n author=auth.user.first_name + \" \" + auth.user.last_name,\n autograde=unittest,\n difficulty=request.vars[\"difficulty\"],\n question=request.vars[\"question\"],\n timestamp=datetime.datetime.utcnow(),\n question_type=question_type,\n is_private=request.vars[\"isprivate\"],\n practice=practice,\n from_source=False,\n topic=topic,\n htmlsrc=request.vars[\"htmlsrc\"],\n )\n\n if request.vars[\"template\"] == \"datafile\":\n # datafiles are not questions, but we would like instructors to be able\n # to add their own datafiles for projects or exercises. So we store\n # the datafile contents in the database instead of adding a question\n # to the assignment.\n divid = request.vars[\"name\"].strip()\n q = request.vars[\"question\"].lstrip()\n q = q.split(\"\\n\")\n first_blank = q.index(\"\")\n q = \"\\n\".join([x.lstrip() for x in q[first_blank + 1 :]])\n db.source_code.update_or_insert(\n (db.source_code.acid == divid)\n & (db.source_code.course_id == base_course),\n main_code=q,\n course_id=base_course,\n acid=divid,\n )\n else:\n db.assignment_questions.insert(\n assignment_id=assignmentid,\n question_id=newqID,\n timed=timed,\n points=points,\n autograde=unittest or \"pct_correct\",\n which_to_grade=\"best_answer\",\n sorting_priority=0,\n )\n\n returndict = {request.vars[\"name\"]: newqID, \"timed\": timed, \"points\": points}\n\n return json.dumps(returndict)\n except Exception as ex:\n logger.error(ex)\n return json.dumps(\"ERROR\")\n\n\n# @auth.requires(\n# lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n# requires_login=True,\n# )\n# replacing the above to allow any logged in account to access getToggleSrc and preview function\[email protected]_login()\ndef htmlsrc():\n \"\"\"\n Get the html source for a question. If just the divid is included then assume that\n the question must come from the current base course. If an assignment_id is provided\n then that question could come from any base course and so make sure it is part of the\n current assignment_questions set.\n \"\"\"\n acid = request.vars[\"acid\"]\n assignment_id = request.vars.assignmentId\n studentId = request.vars.sid\n htmlsrc = \"\"\n if assignment_id:\n logger.debug(f\"assignment_id = {assignment_id}\")\n res = (\n db(\n (db.questions.name == acid)\n & (db.assignment_questions.question_id == db.questions.id)\n & (db.assignment_questions.assignment_id == assignment_id)\n )\n .select(db.questions.htmlsrc, db.questions.question_type)\n .first()\n )\n else:\n res = (\n db(\n (db.questions.name == acid)\n & (db.questions.base_course == db.courses.base_course)\n & (db.courses.course_name == auth.user.course_name)\n )\n .select(db.questions.htmlsrc, db.questions.question_type)\n .first()\n )\n if res and (res.htmlsrc or res.question_type == \"selectquestion\"):\n if res.question_type == \"selectquestion\" and studentId:\n # Check the selected_questions table to see which actual question was chosen\n # then get that question.\n realq = (\n db(\n (db.selected_questions.selector_id == acid)\n & (db.selected_questions.sid == studentId)\n & (db.selected_questions.selected_id == db.questions.name)\n )\n .select(db.questions.htmlsrc)\n .first()\n )\n if realq:\n htmlsrc = realq.htmlsrc\n else:\n htmlsrc = res.htmlsrc\n else:\n logger.error(\n \"HTML Source not found for %s in course %s\", acid, auth.user.course_name\n )\n htmlsrc = \"<p>No preview available</p>\"\n if (\n htmlsrc and htmlsrc[0:2] == \"\\\\x\"\n ): # Workaround Python3/Python2 SQLAlchemy/DAL incompatibility with text columns\n htmlsrc = htmlsrc.decode(\"hex\")\n return json.dumps(htmlsrc)\n\n\[email protected](\n lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n requires_login=True,\n)\ndef getGradeComments():\n\n acid = request.vars[\"acid\"]\n sid = request.vars[\"sid\"]\n\n c = (\n db(\n (db.question_grades.sid == sid)\n & (db.question_grades.div_id == acid)\n & (db.question_grades.course_name == auth.user.course_name)\n )\n .select()\n .first()\n )\n if c is not None:\n return json.dumps({\"grade\": c.score, \"comments\": c.comment})\n else:\n return json.dumps(\"Error\")\n\n\ndef _get_lti_record(oauth_consumer_key):\n if oauth_consumer_key:\n return db(db.lti_keys.consumer == oauth_consumer_key).select().first()\n\n\[email protected](\n lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n requires_login=True,\n)\ndef releasegrades():\n try:\n assignmentid = request.vars[\"assignmentid\"]\n released = request.vars[\"released\"] == \"yes\"\n assignment = db(db.assignments.id == assignmentid).select().first()\n assignment.update_record(released=released)\n\n except Exception as ex:\n logger.error(ex)\n return \"ERROR\"\n\n if released:\n # send lti grades\n assignment = _get_assignment(assignmentid)\n lti_record = _get_lti_record(session.oauth_consumer_key)\n if assignment and lti_record:\n send_lti_grades(\n assignment.id, assignment.points, auth.user.course_id, lti_record, db\n )\n return \"Success\"\n\n\[email protected](\n lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n requires_login=True,\n)\ndef get_assignment_release_states():\n # return a dictionary with the release status of whether grades have been\n # released for each of the assignments for the current course\n try:\n assignments_query = db(db.assignments.course == auth.user.course_id).select()\n return json.dumps({row.name: row.released for row in assignments_query})\n except Exception as ex:\n print(ex)\n return json.dumps({})\n\n\ndef _get_toc_and_questions():\n # return a dictionary with a nested dictionary representing everything the\n # picker will need in the instructor's assignment authoring tab\n\n # Format is documented at https://www.jstree.com/docs/json/\n\n # try:\n course_row = get_course_row()\n base_course = course_row.base_course\n\n # First get the chapters associated with the current course, and insert them into the tree\n # Recurse, with each chapter:\n # -- get the subchapters associated with it, and insert into the subdictionary\n # -- Recurse; with each subchapter:\n # -- get the divs associated with it, and insert into the sub-sub-dictionary\n\n question_picker = []\n # This one doesn't include the questions, but otherwise the same\n reading_picker = []\n # This one is similar to reading_picker, but does not include sub-chapters with no practice question.\n practice_picker = []\n subchapters_taught_query = db(\n (db.sub_chapter_taught.course_name == auth.user.course_name)\n & (db.chapters.course_id == base_course)\n & (db.chapters.chapter_label == db.sub_chapter_taught.chapter_label)\n & (db.sub_chapters.chapter_id == db.chapters.id)\n & (db.sub_chapters.sub_chapter_label == db.sub_chapter_taught.sub_chapter_label)\n ).select(db.chapters.chapter_name, db.sub_chapters.sub_chapter_name)\n chapters_and_subchapters_taught = [\n (row.chapters.chapter_name, row.sub_chapters.sub_chapter_name)\n for row in subchapters_taught_query\n ]\n topic_query = db(\n (db.courses.course_name == auth.user.course_name)\n & (db.questions.base_course == db.courses.base_course)\n & (db.questions.practice == True) # noqa: E712\n ).select(\n db.questions.topic,\n db.questions.chapter,\n db.questions.subchapter,\n orderby=db.questions.id,\n )\n for q in topic_query:\n # We know chapter_name and sub_chapter_name include spaces.\n # So we cannot directly use the labels retrieved from q.topic as chapter_name and\n # sub_chapter_name and we need to query the corresponding chapter_name and sub_chapter_name from the\n # corresponding tables.\n topic_not_found = True\n if q.topic is not None:\n topic_not_found = False\n try:\n chap, subch = q.topic.split(\"/\")\n except Exception:\n # a badly formed \"topic\" for the question; just ignore it\n logger.info(\"Bad Topic: {}\".format(q.topic))\n topic_not_found = True\n try:\n chapter = db(\n (db.chapters.course_id == base_course)\n & (db.chapters.chapter_label == chap)\n ).select()[0]\n\n sub_chapter_name = (\n db(\n (db.sub_chapters.chapter_id == chapter.id)\n & (db.sub_chapters.sub_chapter_label == subch)\n )\n .select()[0]\n .sub_chapter_name\n )\n except Exception:\n # topic's chapter and subchapter are not in the book; ignore this topic\n logger.info(\n \"Missing Chapter {} or Subchapter {} for topic {}\".format(\n chap, subch, q.topic\n )\n )\n topic_not_found = True\n\n if topic_not_found:\n topic_not_found = False\n chap = q.chapter\n subch = q.subchapter\n try:\n chapter = db(\n (db.chapters.course_id == base_course)\n & (db.chapters.chapter_label == chap)\n ).select()[0]\n\n sub_chapter_name = (\n db(\n (db.sub_chapters.chapter_id == chapter.id)\n & (db.sub_chapters.sub_chapter_label == subch)\n )\n .select()[0]\n .sub_chapter_name\n )\n except Exception:\n # topic's chapter and subchapter are not in the book; ignore this topic\n logger.info(\"Missing Chapter {} or Subchapter {}\".format(chap, subch))\n topic_not_found = True\n\n if not topic_not_found:\n chapter_name = chapter.chapter_name\n # Find the item in practice picker for this chapter\n p_ch_info = None\n for ch_info in practice_picker:\n if ch_info[\"text\"] == chapter_name:\n p_ch_info = ch_info\n if not p_ch_info:\n # if there isn't one, add one\n p_ch_info = {}\n practice_picker.append(p_ch_info)\n p_ch_info[\"text\"] = chapter_name\n p_ch_info[\"children\"] = []\n # add the subchapter\n p_sub_ch_info = {}\n if sub_chapter_name not in [\n child[\"text\"] for child in p_ch_info[\"children\"]\n ]:\n p_ch_info[\"children\"].append(p_sub_ch_info)\n p_sub_ch_info[\"id\"] = \"{}/{}\".format(chapter_name, sub_chapter_name)\n p_sub_ch_info[\"text\"] = sub_chapter_name\n # checked if\n p_sub_ch_info[\"state\"] = {\n \"checked\": (chapter_name, sub_chapter_name)\n in chapters_and_subchapters_taught\n }\n\n # chapters are associated base_course.\n chapters_query = db((db.chapters.course_id == base_course)).select(\n orderby=db.chapters.chapter_num\n )\n ids = {row.chapter_name: row.chapter_num for row in chapters_query}\n practice_picker.sort(key=lambda d: ids[d[\"text\"]])\n\n for ch in chapters_query:\n q_ch_info = {}\n question_picker.append(q_ch_info)\n q_ch_info[\"text\"] = ch.chapter_name\n q_ch_info[\"children\"] = []\n # Copy the same stuff for reading picker.\n r_ch_info = {}\n reading_picker.append(r_ch_info)\n r_ch_info[\"text\"] = ch.chapter_name\n r_ch_info[\"children\"] = []\n # practice_questions = db((db.questions.chapter == ch.chapter_label) & \\\n # (db.questions.practice == True))\n # if not practice_questions.isempty():\n # # Copy the same stuff for practice picker.\n # p_ch_info = {}\n # practice_picker.append(p_ch_info)\n # p_ch_info['text'] = ch.chapter_name\n # p_ch_info['children'] = []\n # todo: check the chapters attribute to see if its available for readings\n subchapters_query = db(db.sub_chapters.chapter_id == ch.id).select(\n orderby=[db.sub_chapters.sub_chapter_num, db.sub_chapters.sub_chapter_name]\n )\n for sub_ch in subchapters_query:\n q_sub_ch_info = {}\n q_ch_info[\"children\"].append(q_sub_ch_info)\n q_sub_ch_info[\"text\"] = sub_ch.sub_chapter_name\n # Make the Exercises sub-chapters easy to access, since user-written problems will be added there.\n if sub_ch.sub_chapter_name == \"Exercises\":\n q_sub_ch_info[\"id\"] = ch.chapter_name + \" Exercises\"\n q_sub_ch_info[\"children\"] = []\n # Copy the same stuff for reading picker.\n if (\n sub_ch.skipreading == \"F\"\n or sub_ch.skipreading == False # noqa: E712\n or sub_ch.skipreading == None\n ):\n r_sub_ch_info = {}\n r_ch_info[\"children\"].append(r_sub_ch_info)\n r_sub_ch_info[\"id\"] = \"{}/{}\".format(\n ch.chapter_name, sub_ch.sub_chapter_name\n )\n r_sub_ch_info[\"text\"] = sub_ch.sub_chapter_name\n\n author = auth.user.first_name + \" \" + auth.user.last_name\n questions_query = db(\n (db.courses.course_name == auth.user.course_name)\n & (db.questions.base_course == db.courses.base_course)\n & (db.questions.chapter == ch.chapter_label)\n & (db.questions.question_type != \"page\")\n & (db.questions.subchapter == sub_ch.sub_chapter_label)\n & ((db.questions.author == author) | (db.questions.is_private == \"F\"))\n ).select(orderby=db.questions.id)\n for question in questions_query:\n if question.questions.qnumber:\n qlabel = question.questions.qnumber\n else:\n qlabel = question.questions.name\n q_info = dict(\n text=qlabel + _add_q_meta_info(question),\n id=question.questions.name,\n )\n q_sub_ch_info[\"children\"].append(q_info)\n return json.dumps(\n {\n \"reading_picker\": reading_picker,\n \"practice_picker\": practice_picker,\n \"question_picker\": question_picker,\n }\n )\n\n\n# This is the place to add meta information about questions for the\n# assignment builder\ndef _add_q_meta_info(qrow):\n qt = {\n \"mchoice\": \"Mchoice ✓\",\n \"clickablearea\": \"Clickable ✓\",\n \"youtube\": \"Video\",\n \"activecode\": \"ActiveCode\",\n \"poll\": \"Poll\",\n \"showeval\": \"ShowEval\",\n \"video\": \"Video\",\n \"dragndrop\": \"Matching ✓\",\n \"parsonsprob\": \"Parsons ✓\",\n \"codelens\": \"CodeLens\",\n \"lp_build\": \"LP ✓\",\n \"shortanswer\": \"ShortAns\",\n \"actex\": \"ActiveCode\",\n \"fillintheblank\": \"FillB ✓\",\n \"quizly\": \"Quizly ✓\",\n \"khanex\": \"KhanAcademy ✓\",\n }\n qt = qt.get(qrow.questions.question_type, \"\")\n\n if qrow.questions.autograde:\n ag = \" ✓\"\n else:\n ag = \"\"\n\n if qrow.questions.from_source:\n book = \"📘\"\n else:\n book = \"🏫\"\n\n name = qrow.questions.name\n\n res = \"\"\" <span style=\"color: green\">[{} {} {}\n </span> <span style=\"color: mediumblue\">({})</span>]\n <span>{}...</span>\"\"\".format(\n book, qt, ag, name, qrow.questions.description\n )\n\n return res\n\n\[email protected](\n lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n requires_login=True,\n)\ndef get_assignment():\n try:\n assignment_id = int(request.vars.assignmentid)\n except (TypeError, ValueError):\n assignment_row = None\n else:\n assignment_row = db(db.assignments.id == assignment_id).select().first()\n # Assemble the assignment-level properties\n if not assignment_row:\n logger.error(\n \"UNDEFINED assignment {} {} {}\".format(\n request.vars.assignmentid, auth.user.course_name, auth.user.username\n )\n )\n session.flash = \"Error: assignment ID {} does not exist\".format(\n request.vars.assignmentid\n )\n return redirect(URL(\"assignments\", \"chooseAssignment.html\"))\n\n _set_assignment_max_points(assignment_id)\n assignment_data = {}\n assignment_data[\"assignment_points\"] = assignment_row.points\n try:\n assignment_data[\"due_date\"] = assignment_row.duedate.strftime(\"%Y/%m/%d %H:%M\")\n except Exception as ex:\n logger.error(ex)\n assignment_data[\"due_date\"] = None\n assignment_data[\"description\"] = assignment_row.description\n assignment_data[\"visible\"] = assignment_row.visible\n assignment_data[\"enforce_due\"] = assignment_row.enforce_due\n assignment_data[\"is_timed\"] = assignment_row.is_timed\n assignment_data[\"time_limit\"] = assignment_row.time_limit\n assignment_data[\"from_source\"] = assignment_row.from_source\n assignment_data[\"nofeedback\"] = assignment_row.nofeedback\n assignment_data[\"nopause\"] = assignment_row.nopause\n assignment_data[\"is_peer\"] = assignment_row.is_peer\n\n # Still need to get:\n # -- timed properties of assignment\n # (See https://github.com/RunestoneInteractive/RunestoneServer/issues/930)\n base_course = (\n db(db.courses.id == auth.user.course_id)\n .select(db.courses.base_course)\n .first()\n .base_course\n )\n # Assemble the readings (subchapters) that are part of the assignment\n a_q_rows = db(\n (db.assignment_questions.assignment_id == assignment_id)\n & (db.assignment_questions.question_id == db.questions.id)\n & (db.questions.question_type == \"page\")\n ).select(orderby=db.assignment_questions.sorting_priority)\n pages_data = []\n for row in a_q_rows:\n if row.questions.question_type == \"page\":\n # get the count of 'things to do' in this chap/subchap\n activity_count = db(\n (db.questions.chapter == row.questions.chapter)\n & (db.questions.subchapter == row.questions.subchapter)\n & (db.questions.from_source == \"T\")\n & (\n (db.questions.optional == False) | (db.questions.optional == None)\n ) # noqa #711\n & (db.questions.base_course == base_course)\n ).count()\n\n pages_data.append(\n dict(\n name=row.questions.name,\n points=row.assignment_questions.points,\n autograde=row.assignment_questions.autograde,\n activity_count=activity_count,\n activities_required=row.assignment_questions.activities_required,\n which_to_grade=row.assignment_questions.which_to_grade,\n autograde_possible_values=AUTOGRADE_POSSIBLE_VALUES[\n row.questions.question_type\n ],\n which_to_grade_possible_values=WHICH_TO_GRADE_POSSIBLE_VALUES[\n row.questions.question_type\n ],\n )\n )\n\n # Assemble the questions that are part of the assignment\n a_q_rows = db(\n (db.assignment_questions.assignment_id == assignment_id)\n & (db.assignment_questions.question_id == db.questions.id)\n & (db.assignment_questions.reading_assignment == None) # noqa: E711\n ).select(orderby=db.assignment_questions.sorting_priority)\n # return json.dumps(db._lastsql)\n questions_data = []\n for row in a_q_rows:\n logger.debug(row.questions.question_type)\n if row.questions.question_type != \"page\":\n questions_data.append(\n dict(\n name=row.questions.name,\n points=row.assignment_questions.points,\n autograde=row.assignment_questions.autograde,\n which_to_grade=row.assignment_questions.which_to_grade,\n autograde_possible_values=AUTOGRADE_POSSIBLE_VALUES[\n row.questions.question_type\n ],\n which_to_grade_possible_values=WHICH_TO_GRADE_POSSIBLE_VALUES[\n row.questions.question_type\n ],\n )\n )\n\n return json.dumps(\n dict(\n assignment_data=assignment_data,\n pages_data=pages_data,\n questions_data=questions_data,\n )\n )\n\n\[email protected](\n lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n requires_login=True,\n)\ndef save_assignment():\n # This endpoint is for saving (updating) an assignment's top-level information, without any\n # questions or readings that might be part of the assignment\n # Should return the id of the assignment, if one is not passed in\n\n # The following fields must be provided in request.vars (see modesl/grouped_assignments.py for model definition):\n # -- assignment_id (if it's an existing assignment; if none provided, then we insert a new assignment)\n # -- description\n # -- duedate\n\n assignment_id = request.vars.get(\"assignment_id\")\n isVisible = request.vars[\"visible\"]\n isEnforced = request.vars[\"enforce_due\"]\n is_timed = request.vars[\"is_timed\"]\n time_limit = request.vars[\"timelimit\"]\n nofeedback = request.vars[\"nofeedback\"]\n nopause = request.vars[\"nopause\"]\n is_peer = request.vars[\"is_peer\"]\n try:\n d_str = request.vars[\"due\"]\n format_str = \"%Y/%m/%d %H:%M\"\n due = datetime.datetime.strptime(d_str, format_str)\n except Exception:\n logger.error(\"Bad Date format for assignment: {}\".format(d_str))\n due = datetime.datetime.utcnow() + datetime.timedelta(7)\n try:\n total = _set_assignment_max_points(assignment_id)\n db(db.assignments.id == assignment_id).update(\n course=auth.user.course_id,\n description=request.vars[\"description\"],\n points=total,\n duedate=due,\n is_timed=is_timed,\n visible=isVisible,\n enforce_due=isEnforced,\n time_limit=time_limit,\n nofeedback=nofeedback,\n nopause=nopause,\n is_peer=is_peer,\n current_index=0,\n )\n return json.dumps({request.vars[\"name\"]: assignment_id, \"status\": \"success\"})\n except Exception as ex:\n logger.error(ex)\n return json.dumps(\"ERROR\")\n\n\[email protected](\n lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n requires_login=True,\n)\ndef add__or_update_assignment_question():\n # This endpoint is for adding a question to an assignment, or updating an existing assignment_question\n\n # The following fields should be provided in request.vars:\n # -- assignment (an integer)\n # -- question (the question_name)\n # -- questionid\n # -- points\n # -- autograde\n # -- which_to_grade\n # -- reading_assignment (boolean, true if it's a page to visit rather than a directive to interact with)\n # -- sort_position (optional)\n if request.vars.assignment == \"undefined\":\n session.flash = (\n \"Error: Unable to update assignment in DB. No assignment is selected\"\n )\n return redirect(URL(\"admin\", \"assignments\"))\n\n assignment_id = int(request.vars[\"assignment\"])\n question_name = request.vars[\"question\"]\n question_id = request.vars.question_id\n if question_id:\n question_id = int(question_id)\n\n logger.debug(\n \"adding or updating assign id {} question_name {}\".format(\n assignment_id, question_name\n )\n )\n # This assumes that question will always be in DB already, before an assignment_question is created\n logger.debug(\"course_id %s\", auth.user.course_id)\n if not question_id:\n question_id = _get_question_id(question_name, auth.user.course_id)\n if question_id is None:\n logger.error(\n \"Question Not found for name = {} course = {}\".format(\n question_name, auth.user.course_id\n )\n )\n session.flash = \"Error: Cannot find question {} in the database\".format(\n question_name\n )\n return redirect(URL(\"admin\", \"assignments\"))\n\n base_course = (\n db(db.courses.id == auth.user.course_id)\n .select(db.courses.base_course)\n .first()\n .base_course\n )\n logger.debug(\"base course %s\", base_course)\n question_type = db.questions[question_id].question_type\n chapter = db.questions[question_id].chapter\n subchapter = db.questions[question_id].subchapter\n auto_grade = db.questions[question_id].autograde\n\n # Get the current sorting priority for a question, if its there.\n # otherwise assign it to the end of the list.\n tmpSp = request.vars[\"sort_position\"]\n if not tmpSp:\n tmpSp = _get_question_sorting_priority(assignment_id, question_id)\n\n if tmpSp is None:\n tmpSp = _get_max_sorting_priority(assignment_id) or 0\n sp = 1 + tmpSp\n else:\n sp = tmpSp\n\n activity_count = 0\n if question_type == \"page\":\n reading_assignment = \"T\"\n # get the count of 'things to do' in this chap/subchap\n activity_count = db(\n (db.questions.chapter == chapter)\n & (db.questions.subchapter == subchapter)\n & (db.questions.from_source == \"T\")\n & (db.questions.base_course == base_course)\n ).count()\n try:\n activities_required = int(request.vars.get(\"activities_required\"))\n if activities_required == -1:\n activities_required = max(int(activity_count * 0.8), 1)\n except Exception:\n logger.error(\"No Activities set for RA %s\", question_name)\n activities_required = None\n\n else:\n reading_assignment = None\n activities_required = None\n\n # Have to use try/except here instead of request.vars.get in case the points is '',\n # which doesn't convert to int\n try:\n points = int(request.vars[\"points\"])\n except Exception:\n points = activity_count\n\n # If no autograde type is provided, use ``interact`` as a fallback, since this works for all questions types.\n autograde = request.vars.get(\"autograde\", \"interact\")\n # Use ``best_answer`` as a safe fallback (see ``WHICH_TO_GRADE_POSSIBLE_VALUES`` in this file. )\n which_to_grade = request.vars.get(\"which_to_grade\", \"best_answer\")\n # Make sure the defaults are set correctly for activecode Qs\n if (\n question_type in (\"activecode\", \"actex\") and auto_grade != \"unittest\"\n ): # No unit tests for this question\n if autograde and autograde not in (\"manual\", \"interact\"):\n autograde = \"manual\"\n which_to_grade = \"\"\n if autograde is None:\n autograde = \"pct_correct\"\n try:\n # save the assignment_question\n db.assignment_questions.update_or_insert(\n (db.assignment_questions.assignment_id == assignment_id)\n & (db.assignment_questions.question_id == question_id),\n assignment_id=assignment_id,\n question_id=question_id,\n activities_required=activities_required,\n points=points,\n autograde=autograde,\n which_to_grade=which_to_grade,\n reading_assignment=reading_assignment,\n sorting_priority=sp,\n )\n total = _set_assignment_max_points(assignment_id)\n return json.dumps(\n dict(\n total=total,\n activity_count=activity_count,\n activities_required=activities_required,\n autograde_possible_values=AUTOGRADE_POSSIBLE_VALUES[question_type],\n which_to_grade_possible_values=WHICH_TO_GRADE_POSSIBLE_VALUES[\n question_type\n ],\n status=\"success\",\n question_id=question_name,\n points=points,\n autograde=autograde,\n which_to_grade=which_to_grade,\n assign_type=request.vars.assign_type,\n )\n )\n except Exception as ex:\n logger.error(ex)\n return json.dumps(\"Error\")\n\n\n# As we move toward a question bank model for questions, this relaxes the\n# question belongs to a course idea by first simply searching for the question\n# by name. If there is only one match then no problem. If there is more than one\n# then the base course of the current user should be preferred to ensure\n# backward compatibility.\ndef _get_question_id(question_name, course_id):\n # first try to just get the question by name.\n question = db((db.questions.name == question_name)).select(db.questions.id)\n # if there is more than one then use the course_id\n if len(question) > 1:\n question = (\n db(\n (db.questions.name == question_name)\n & (db.questions.base_course == db.courses.base_course)\n & (db.courses.id == course_id)\n )\n .select(db.questions.id)\n .first()\n )\n else:\n question = question[0]\n\n if question:\n return int(question.id)\n else:\n # Hmmm, what should we do if not found?\n return None\n\n # return int(db((db.questions.name == question_name) &\n # (db.questions.base_course == db.courses.base_course) &\n # (db.courses.id == course_id)\n # ).select(db.questions.id).first().id)\n\n\ndef _get_max_sorting_priority(assignment_id):\n max = db.assignment_questions.sorting_priority.max()\n return (\n db((db.assignment_questions.assignment_id == assignment_id))\n .select(max)\n .first()[max]\n )\n\n\ndef _get_question_sorting_priority(assignment_id, question_id):\n res = (\n db(\n (db.assignment_questions.assignment_id == assignment_id)\n & (db.assignment_questions.question_id == question_id)\n )\n .select(db.assignment_questions.sorting_priority)\n .first()\n )\n if res is not None:\n return res[\"sorting_priority\"]\n else:\n return res\n\n\[email protected](\n lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n requires_login=True,\n)\ndef delete_assignment_question():\n ## Deletes one assignment_question\n try:\n question_name = request.vars[\"name\"]\n assignment_id = int(request.vars[\"assignment_id\"])\n question_id = _get_question_id(question_name, auth.user.course_id)\n logger.debug(\"DELETEING A: %s Q:%s \", assignment_id, question_id)\n db(\n (db.assignment_questions.assignment_id == assignment_id)\n & (db.assignment_questions.question_id == question_id)\n ).delete()\n total = _set_assignment_max_points(assignment_id)\n return json.dumps({\"total\": total})\n except Exception as ex:\n logger.error(ex)\n return json.dumps(\"Error\")\n\n\[email protected]_membership(\"editor\")\ndef delete_question():\n qname = request.vars[\"name\"]\n base_course = request.vars[\"base_course\"]\n\n try:\n db(\n (db.questions.name == qname) & (db.questions.base_course == base_course)\n ).delete()\n return json.dumps({\"status\": \"Success\"})\n except Exception as ex:\n logger.error(ex)\n return json.dumps({\"status\": \"Error\"})\n\n\ndef _set_assignment_max_points(assignment_id):\n \"\"\"Called after a change to assignment questions.\n Recalculate the total, save it in the assignment row\n and return it.\"\"\"\n sum_op = db.assignment_questions.points.sum()\n total = (\n db(db.assignment_questions.assignment_id == assignment_id)\n .select(sum_op)\n .first()[sum_op]\n )\n if total is None:\n total = 0\n db(db.assignments.id == assignment_id).update(points=total)\n return total\n\n\[email protected](\n lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n requires_login=True,\n)\ndef reorder_assignment_questions():\n \"\"\"Called when the questions are reordered in the instructor assignments interface.\n request.vars must include:\n -- names: a list of strings for question_names\n -- assignment_id: a database record id\n\n The names list should be a list of *all* assignment_questions of that type (i.e., all that have the\n boolean reading_assignment flag set to True, or all that have it set to False).\n We will reassign sorting_priorities to all of them.\n \"\"\"\n question_names = request.vars[\"names[]\"] # a list of question_names\n assignment_id = int(request.vars[\"assignment_id\"])\n i = 0\n for name in question_names:\n i += 1\n question_id = _get_question_id(name, auth.user.course_id)\n db(\n (db.assignment_questions.question_id == question_id)\n & (db.assignment_questions.assignment_id == assignment_id)\n ).update(sorting_priority=i)\n\n return json.dumps(\"Reordered in DB\")\n\n\[email protected](\n lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n requires_login=True,\n)\ndef copy_assignment():\n \"\"\"\n vars:\n - oldassignment id or todo (-1 for all assignments)\n - course\n \"\"\"\n\n res = None\n if not verifyInstructorStatus(request.vars[\"course\"], auth.user):\n return \"Error: Not Authorized\"\n else:\n if request.vars.oldassignment == \"-1\":\n assignments = db(\n (db.assignments.course == db.courses.id)\n & (db.courses.course_name == request.vars[\"course\"])\n ).select()\n for a in assignments:\n print(\"A = {}\".format(a))\n res = _copy_one_assignment(request.vars[\"course\"], a.assignments[\"id\"])\n if res != \"success\":\n break\n else:\n res = _copy_one_assignment(\n request.vars[\"course\"], request.vars[\"oldassignment\"]\n )\n if res is None:\n return \"Error: No Assignments to copy\"\n else:\n return res\n\n\ndef _copy_one_assignment(course, oldid):\n old_course = db(db.courses.course_name == course).select().first()\n this_course = db(db.courses.course_name == auth.user.course_name).select().first()\n old_assignment = db(db.assignments.id == int(oldid)).select().first()\n due_delta = old_assignment.duedate.date() - old_course.term_start_date\n due_date = this_course.term_start_date + due_delta\n try:\n newassign_id = db.assignments.insert(\n course=auth.user.course_id,\n name=old_assignment.name,\n duedate=due_date,\n description=old_assignment.description,\n points=old_assignment.points,\n threshold_pct=old_assignment.threshold_pct,\n )\n except Exception as e:\n return f\"failed: {str(e)}\"\n\n old_questions = db(\n db.assignment_questions.assignment_id == old_assignment.id\n ).select()\n for q in old_questions:\n dq = q.as_dict()\n dq[\"assignment_id\"] = newassign_id\n del dq[\"id\"]\n db.assignment_questions.insert(**dq)\n\n return \"success\"\n\n\[email protected](\n lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n requires_login=True,\n)\ndef courselog():\n thecourse = db(db.courses.id == auth.user.course_id).select().first()\n course = auth.user.course_name\n\n data = pd.read_sql_query(\n \"\"\"\n select sid, useinfo.timestamp, event, act, div_id, chapter, subchapter\n from useinfo left outer join questions on div_id = name and questions.base_course = '{}'\n where course_id = '{}'\n order by useinfo.id\n \"\"\".format(\n thecourse.base_course, course\n ),\n settings.database_uri.replace(\"postgres://\", \"postgresql://\"),\n )\n data = data[~data.sid.str.contains(r\"^\\d{38,38}@\")]\n\n response.headers[\"Content-Type\"] = \"application/vnd.ms-excel\"\n response.headers[\n \"Content-Disposition\"\n ] = \"attachment; filename=data_for_{}.csv\".format(auth.user.course_name)\n return data.to_csv(na_rep=\" \")\n\n\[email protected](\n lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n requires_login=True,\n)\ndef codelog():\n course = auth.user.course_name\n\n data = pd.read_sql_query(\n \"\"\"\n select * from code where course_id = {}\n \"\"\".format(\n auth.user.course_id\n ),\n settings.database_uri.replace(\"postgres://\", \"postgresql://\"),\n )\n data = data[~data.sid.str.contains(r\"^\\d{38,38}@\")]\n\n response.headers[\"Content-Type\"] = \"application/vnd.ms-excel\"\n response.headers[\n \"Content-Disposition\"\n ] = \"attachment; filename=data_for_{}.csv\".format(auth.user.course_name)\n return data.to_csv(na_rep=\" \")\n\n\[email protected](\n lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n requires_login=True,\n)\ndef update_course():\n response.headers[\"Content-Type\"] = \"application/json\"\n\n thecourse = db(db.courses.id == auth.user.course_id).select().first()\n if thecourse:\n if \"new_date\" in request.vars:\n new_date = request.vars[\"new_date\"]\n try:\n new_date = str(parse(new_date).date())\n db(db.courses.id == thecourse.id).update(term_start_date=new_date)\n except ValueError:\n logger.error(\"Bad Date in update_course {}\".format(new_date))\n return json.dumps(dict(status=\"failed\"))\n if \"allow_pairs\" in request.vars:\n db(db.courses.id == thecourse.id).update(\n allow_pairs=(request.vars[\"allow_pairs\"] == \"true\")\n )\n if \"downloads_enabled\" in request.vars:\n print(\"DOWNLOADS = \", request.vars.enable_downloads)\n db(db.courses.id == thecourse.id).update(\n downloads_enabled=(request.vars[\"downloads_enabled\"] == \"true\")\n )\n if \"enable_compare_me\" in request.vars:\n db.course_attributes.update_or_insert(\n (db.course_attributes.course_id == thecourse.id)\n & (db.course_attributes.attr == \"enable_compare_me\"),\n course_id=thecourse.id,\n attr=\"enable_compare_me\",\n value=request.vars.enable_compare_me,\n )\n return json.dumps(dict(status=\"success\"))\n\n return json.dumps(dict(status=\"failed\"))\n\n\[email protected](\n lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n requires_login=True,\n)\ndef flag_question():\n qname = request.vars[\"question_name\"]\n\n base_course = (\n db(db.courses.id == auth.user.course_id)\n .select(db.courses.base_course)\n .first()\n .base_course\n )\n db((db.questions.name == qname) & (db.questions.base_course == base_course)).update(\n review_flag=\"T\"\n )\n\n return json.dumps(dict(status=\"success\"))\n\n\[email protected]_membership(\"editor\")\ndef clear_flag():\n qname = request.vars[\"question_name\"]\n base_course = request.vars[\"basecourse\"]\n\n db((db.questions.name == qname) & (db.questions.base_course == base_course)).update(\n review_flag=\"F\"\n )\n\n return json.dumps(dict(status=\"success\"))\n\n\[email protected](\n lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n requires_login=True,\n)\ndef enroll_students():\n if \"students\" not in request.vars:\n session.flash = \"please choose a CSV file with student data\"\n return redirect(URL(\"admin\", \"admin\"))\n students = request.vars.students\n the_course = db(db.courses.course_name == auth.user.course_name).select().first()\n try:\n # use utf-8-sig because it will work with files from excel that have\n # the byte order marker BOM set as an invisible first character in the file\n strfile = io.TextIOWrapper(students.file, encoding=\"utf-8-sig\")\n logger.debug(type(students.file))\n student_reader = csv.reader(strfile)\n validfile = io.TextIOWrapper(students.file, encoding=\"utf-8-sig\")\n validation_reader = csv.reader(validfile)\n except Exception as e:\n session.flash = \"please choose a CSV file with student data\"\n logger.error(e)\n return redirect(URL(\"admin\", \"admin\"))\n messages = []\n line = 0\n for row in validation_reader:\n line += 1\n if len(row) == 6:\n res = _validateUser(row[0], row[4], row[2], row[3], row[1], row[5], line)\n else:\n res = [f\"Error on line {line} you should have 6 fields\"]\n if res:\n messages.extend(res)\n\n if messages:\n return dict(\n coursename=auth.user.course_name,\n course_id=auth.user.course_name,\n course=the_course,\n messages=messages,\n )\n counter = 0\n success = True\n students.file.seek(0) # reset the file pointer for underlying data\n try:\n for row in student_reader:\n logger.debug(f\"ROW = {row}\")\n if len(row) < 6 or (len(row) > 6 and row[6] != \"\"):\n raise ValueError(\"CSV must provide six values for each user\")\n # CSV: username, email, fname, lname, password, course_name, db\n # Params: username, password, fname, lname, email, course_name,\n # If there are more than 6 values they are likey empty colums\n # we will ignore them. If it runs out wrong then there will\n # be some kind of error in the rest of the processing\n if row[0] != \"\":\n createUser(row[0], row[4], row[2], row[3], row[1], row[5])\n counter += 1\n else:\n logger.error(\"Skipping empty records in CSV\")\n except Exception as e:\n logger.error(e)\n db.rollback()\n counter = 0\n session.flash = \"Error creating users: {}\".format(e)\n success = False\n\n if success:\n db.commit()\n session.flash = \"created {} new users\".format(counter)\n\n return redirect(URL(\"admin\", \"admin\"))\n\n\[email protected](\n lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n requires_login=True,\n)\ndef resetpw():\n sid = int(request.vars.sid)\n newpw = request.vars.newpass\n user = db(db.auth_user.id == sid).select().first()\n logger.warning(\n \"Attempted password reset for {} by {}\".format(\n user.username, auth.user.username\n )\n )\n cl = db(db.user_courses.user_id == user.id).select(db.user_courses.course_id)\n course_list = [row.course_id for row in cl]\n if user.id == auth.user.id:\n res = {\"status\": \"fail\", \"message\": \"Sorry you cannot update your own password\"}\n return json.dumps(res)\n if user.course_id == auth.user.course_id or (auth.user.course_id in course_list):\n pw = CRYPT(auth.settings.hmac_key)(newpw)[0]\n db(db.auth_user.id == sid).update(password=pw)\n res = {\n \"status\": \"success\",\n \"message\": \"Success Reset password for {} {} ({})\".format(\n user.first_name, user.last_name, user.username\n ),\n }\n else:\n logger.error(\"Password reset not authorized for {}\".format(user.username))\n res = {\"status\": \"fail\", \"message\": \"You are not authorized for this user\"}\n\n return json.dumps(res)\n\n\[email protected]_membership(\"editor\")\ndef manage_exercises():\n books = db(db.editor_basecourse.editor == auth.user).select()\n the_course = db(db.courses.course_name == auth.user.course_name).select().first()\n qlist = []\n chapinfo = {}\n for book in books:\n questions = db(\n (db.questions.review_flag == \"T\")\n & (db.questions.base_course == book.base_course)\n & (\n (db.questions.from_source == \"F\") | (db.questions.from_source == None)\n ) # noqa: E711\n ).select(\n db.questions.htmlsrc,\n db.questions.difficulty,\n db.questions.name,\n db.questions.base_course,\n db.questions.chapter,\n )\n for q in questions:\n qlist.append(q)\n\n chapters = db(db.chapters.course_id == book.base_course).select(\n db.chapters.chapter_name,\n db.chapters.chapter_label,\n db.chapters.course_id,\n orderby=db.chapters.chapter_num,\n )\n chapinfo[book.base_course] = {}\n for chap in chapters:\n chapinfo[book.base_course][chap.chapter_label] = {\n \"title\": chap.chapter_name,\n \"basecourse\": book.base_course,\n }\n\n return dict(\n questioninfo=qlist,\n course=the_course,\n gradingUrl=URL(\"assignments\", \"get_problem\"),\n autogradingUrl=URL(\"assignments\", \"autograde\"),\n gradeRecordingUrl=URL(\"assignments\", \"record_grade\"),\n calcTotalsURL=URL(\"assignments\", \"calculate_totals\"),\n setTotalURL=URL(\"assignments\", \"record_assignment_score\"),\n sendLTIGradeURL=URL(\"assignments\", \"send_assignment_score_via_LTI\"),\n getCourseStudentsURL=URL(\"admin\", \"course_students\"),\n get_assignment_release_statesURL=URL(\"admin\", \"get_assignment_release_states\"),\n course_id=auth.user.course_name,\n tags=[],\n chapdict=chapinfo,\n )\n\n\[email protected](\n lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n requires_login=True,\n)\ndef get_assignment_list():\n course_name = request.vars.course_name\n course = db(db.courses.course_name == course_name).select().first()\n assign_list = db(db.assignments.course == course.id).select(\n db.assignments.id, db.assignments.name, orderby=db.assignments.duedate\n )\n res = []\n for assign in assign_list:\n res.append({\"id\": assign.id, \"name\": assign.name})\n\n return json.dumps(dict(assignments=res))\n\n\n# Create LTI Keys\n# ---------------\[email protected](\n lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n requires_login=True,\n)\ndef create_lti_keys():\n \"\"\"\n Generate a consumer and a secret key. Store them in the database\n and associate this key with the course of the instructor.\n The course_lti_mamp may look a little superflous now, but I think it will grow.\n There is no real magic about the keys so using a UUID seems like just as good\n a solution as anything.\n\n This API is triggered by the generateLTIKeys() function in admin.js and is\n one panel of the main admin page.\n\n Returns:\n JSON: A JSON object with the keys\n \"\"\"\n consumer = auth.user.course_name + \"-\" + str(uuid.uuid1())\n secret = str(uuid.uuid4())\n\n ltid = db.lti_keys.insert(consumer=consumer, secret=secret, application=\"runestone\")\n db.course_lti_map.insert(course_id=auth.user.course_id, lti_id=ltid)\n\n return json.dumps(dict(consumer=consumer, secret=secret))\n\n\[email protected](\n lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n requires_login=True,\n)\ndef simulate_exam():\n \"\"\"Simulate the distribution of questions on an exam\"\"\"\n\n # select * from assignment_questions join questions on question_id = questions.id where assignment_id =24;\n assignment_id = request.vars.assignment_id\n questions = db(\n (db.assignment_questions.question_id == db.questions.id)\n & (db.assignment_questions.assignment_id == assignment_id)\n ).select()\n\n proflist = []\n qsel = {}\n for q in questions:\n m = re.search(r\":proficiency:\\s+(\\w+)\", q.questions.question or \"\")\n if m:\n proflist.append(m.group(1))\n m = re.search(r\":fromid:\\s+(.*?)\\n\", q.questions.question or \"\", re.DOTALL)\n if m:\n qlist = m.group(1).split(\",\")\n qlist = [x.strip() for x in qlist]\n qsel[q.questions.name] = qlist\n\n logger.debug(f\"proficiency list {proflist}\")\n logger.debug(f\"questions {qsel}\")\n\n selections = {}\n for i in range(100):\n selections[i] = []\n for comp in proflist:\n q = find_question_for_prof(comp)\n selections[i].append(q)\n for k in qsel:\n selections[i].append(get_id_from_qname(random.choice(qsel[k])))\n\n logger.debug(f\"selected questions = {selections}\")\n\n all_p_profs = []\n all_s_profs = []\n for student in selections:\n for q in selections[student]:\n p_profs, s_profs = get_proficiencies_for_qid(q)\n all_p_profs.extend(p_profs)\n all_s_profs.extend(s_profs)\n\n pc = Counter(all_p_profs)\n sc = Counter(all_s_profs)\n df1 = pd.DataFrame({\"comp\": list(pc.keys()), \"freq\": list(pc.values())})\n df1[\"kind\"] = \"primary\"\n df2 = pd.DataFrame({\"comp\": list(sc.keys()), \"freq\": list(sc.values())})\n df2[\"kind\"] = \"secondary\"\n df = pd.concat([df1, df2])\n df[\"exam\"] = assignment_id\n\n bar_order = alt.EncodingSortField(field=\"freq\", op=\"sum\", order=\"descending\")\n c = (\n alt.Chart(df)\n .mark_bar()\n .encode(x=\"freq\", y=alt.Y(\"comp\", sort=bar_order), tooltip=\"freq\", color=\"kind\")\n )\n hmdata = c.to_json()\n tblhtml = df.to_html()\n\n return dict(\n course_id=auth.user.course_name,\n course=get_course_row(db.courses.ALL),\n hmdata=hmdata,\n tblhtml=tblhtml,\n )\n\n\ndef find_question_for_prof(prof):\n questionlist = []\n res = db(\n (db.competency.competency == prof) & (db.competency.question == db.questions.id)\n ).select(db.questions.id)\n if res:\n questionlist = [row.id for row in res]\n # logger.debug(questionlist)\n\n return random.choice(questionlist)\n\n\ndef get_id_from_qname(name):\n res = db(db.questions.name == name).select(db.questions.id).first()\n if res:\n logger.debug(res)\n return res.id\n\n\ndef get_proficiencies_for_qid(qid):\n res = db(db.competency.question == qid).select()\n\n plist = [p.competency for p in res if p.is_primary]\n slist = [p.competency for p in res if not p.is_primary]\n return plist, slist\n\n\[email protected](\n lambda: verifyInstructorStatus(auth.user.course_id, auth.user),\n requires_login=True,\n)\ndef reset_exam():\n sid = request.vars.student_id\n assignment_name = request.vars.exam_name\n\n res = db(db.auth_user.id == sid).select().first()\n if res:\n username = res.username\n else:\n return json.dumps({\"status\": \"Failed\", \"mess\": \"Unknown Student\"})\n\n # Remove records from the timed exam table\n num_del = db(\n (db.timed_exam.div_id == assignment_name) & (db.timed_exam.sid == username)\n ).delete()\n if num_del == 0:\n return json.dumps({\"status\": \"Failed\", \"mess\": \"Nothing saved\"})\n\n exam_qs = db(\n (db.assignments.name == assignment_name)\n & (db.assignments.course == auth.user.course_id)\n & (db.assignments.id == db.assignment_questions.assignment_id)\n & (db.questions.id == db.assignment_questions.question_id)\n ).select(db.questions.name)\n\n for q in exam_qs:\n num = db(\n (db.selected_questions.selector_id == q.name)\n & (db.selected_questions.sid == username)\n ).delete()\n if num > 0:\n logger.debug(f\"deleted {q.name} for {username} {num}\")\n\n return json.dumps({\"status\": \"Success\", \"mess\": \"Successfully Reset Exam\"})\n\n\ndef killer():\n print(routes_onerror)\n x = 5 / 0 # noqa: F841\n return \"ERROR\"\n" ]
[ [ "pandas.concat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
makr3la/regression
[ "7c402d7c044d6ed1d2b0c04e60134bb668acb0e7" ]
[ "tests/test_regression.py" ]
[ "from io import BytesIO\nimport os\nimport subprocess\n\nimport numpy as np\nimport pandas as pd\n\nfrom regression import app\nfrom regression import processing as lib\n\n\ndef test_lib():\n alfa, beta, n = np.random.normal(), np.random.normal(), 50\n noise = np.random.normal(0, 3, n)\n X = np.linspace(-100, 100)\n category = np.random.choice(['a', 'b'], n)\n y = alfa * X + beta + noise\n df = pd.DataFrame({'X': X, 'category': category, 'y': y})\n df.to_csv(os.path.join(os.path.dirname(__file__), 'test.csv'))\n df.to_excel(os.path.join(os.path.dirname(__file__), 'test.xlsx'))\n\n files = lib.find_files(os.path.dirname(__file__))\n assert [lib._allowed_file(f) for f in files]\n\n element = np.random.randint(0, n)\n form = {'X': f'{X[element]}', 'category': f'{category[element]}'}\n actual = y[element]\n X, y, sample = lib.process_data(files['test.xlsx'], form)\n prediction = lib.predict_ols(X, y, sample, n - 1)['mean']\n assert np.isclose(actual, prediction, atol=10)\n\n assert not lib._is_float('string')\n\n\ndef test_app():\n app.testing = True\n file = os.path.join(os.path.dirname(__file__), 'test.csv')\n with app.test_client() as c:\n responses = [\n c.get('/'),\n c.post('/', content_type='multipart/form-data',\n data=dict(file=(BytesIO(b'content'), 'upload.csv'))),\n c.post('/process', data={'file': f\"('test', '{file}')\"}),\n c.post('/result', data={'name': 'test', 'path': file,\n 'X': '', 'category': ''}),\n c.post('/result', data={'name': 'test', 'path': file,\n 'X': '120', 'category': 'a'})\n ]\n assert all([r.status_code in [200, 302] for r in responses])\n\n\ndef test_main():\n subprocess.check_call(['regression'])\n" ]
[ [ "numpy.linspace", "numpy.random.choice", "numpy.isclose", "pandas.DataFrame", "numpy.random.normal", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
sharique1006/Gaussian-Discriminant-Analysis
[ "4cbc707b5d9f9372f35d713980428113d6ec8d2c" ]
[ "Q4b.py" ]
[ "import numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom mpl_toolkits.mplot3d import Axes3D\nimport math\nimport sys\nimport os\n\ndata_dir = sys.argv[1]\nout_dir = sys.argv[2]\ndataX = os.path.join(sys.argv[1], 'q4x.dat')\ndataY = os.path.join(sys.argv[1], 'q4y.dat')\n\n# 4. Gaussian and Discriminant Analysis\ntrainX = np.loadtxt(dataX)\ntrainY = np.loadtxt(dataY, dtype=str)\n\ndef normalize(X):\n\tmu = np.mean(X)\n\tstd = np.std(X)\n\treturn (X - mu)/std\n\nX = normalize(trainX)\nY = np.array([0 if y == 'Alaska' else 1 for y in trainY]).reshape(-1, 1)\nm = len(Y)\n\ndef plotData():\n\tx1 = np.array([X[i,:] for i in np.where(Y == 0)[0]])\n\tx2 = np.array([X[i,:] for i in np.where(Y == 1)[0]])\n\n\tplt.plot(x1[:,0], x1[:,1], 'ro', marker = '.', label = 'Alaska')\n\tplt.plot(x2[:,0], x2[:,1], 'bo', marker = '+', label = 'Canada')\n\tplt.xlabel(r'$X_1$')\n\tplt.ylabel(r'$X_2$')\n\tplt.title('Training Data')\n\tplt.legend()\n\ttraining_data = os.path.join(sys.argv[2], \"Q4bTrainingData.png\")\n\tplt.savefig(training_data)\n\tplt.close()\n\nplotData()" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "matplotlib.use", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "numpy.std", "numpy.mean", "matplotlib.pyplot.close", "matplotlib.pyplot.xlabel", "numpy.array", "numpy.where", "numpy.loadtxt", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gronki/slezanbear
[ "f3d6203036fa664c0f1bec4b0fdca59605ee416e" ]
[ "data/plotmodel.py" ]
[ "# coding: utf-8\nimport gdal\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sys import argv\nfrom re import match, sub\n\ni2m = lambda i: 5.5 - 2.5 * np.log10(i)\nm2i = lambda m: 10 ** ((5.5 - m) / 2.5)\n\ncolors = [\n (18.000, '#FFFFFF'),\n (19.400, '#D2613D'),\n (20.200, '#FFDE00'),\n (20.800, '#476100'),\n (21.100, '#2E98A6'),\n (21.450, '#0E0036'),\n (21.600, '#3A0047'),\n (21.700, '#1C1C1C'),\n (22.000, '#000000'),\n]\n\n# ccolors = [ c for l,c in colors ]\n# clevels = [ l for l,c in colors ]\n\nsg = { 'red': [], 'green': [], 'blue': [] }\nfor l,c in colors:\n cr = int(c[1:3], 16)\n cg = int(c[3:5], 16)\n cb = int(c[5:7], 16)\n sg['red' ].append(( (l - 18.0) / (22.0 - 18.0), cr / 255.0, cr / 255.0))\n sg['green'].append(( (l - 18.0) / (22.0 - 18.0), cg / 255.0, cg / 255.0))\n sg['blue' ].append(( (l - 18.0) / (22.0 - 18.0), cb / 255.0, cb / 255.0))\n\nclevels = np.concatenate([\n np.arange(18.0, 20.0, 0.250),\n np.arange(20.0, 20.8, 0.125),\n np.arange(20.8, 21.5, 0.050),\n np.arange(21.5, 21.8, 0.025),\n np.array([22.0]) ])\n\nfrom matplotlib.colors import LinearSegmentedColormap\ncmap = LinearSegmentedColormap('sky', sg)\n\nfor fn in argv[1:]:\n\n if not match(r'.*\\.tif$', fn): continue\n\n gd = gdal.Open(fn)\n gt = gd.GetGeoTransform()\n nx = gd.RasterXSize\n ny = gd.RasterYSize\n arr = gd.ReadAsArray()\n hobs = arr[0,:,:]\n sky = arr[1:,:,:]\n\n fs = ( 13 * np.sqrt(nx/float(ny)), 12 / np.sqrt(nx/float(ny)) )\n\n ex = [ gt[0], gt[0] + nx * gt[1],\n gt[3] + ny * gt[5], gt[3] ]\n aspect = 1 / np.cos((gt[3] + (ny - 1) / 2 * gt[5]) * np.pi / 180)\n\n\n x = np.linspace(gt[0], gt[0] + nx * gt[1], nx)\n y = np.linspace(gt[3], gt[3] + ny * gt[5], ny)\n\n #---------------------------------------------------------------------------\n\n fig,ax = plt.subplots(figsize = fs)\n m = ax.imshow(hobs, extent = ex, cmap = 'BrBG_r', vmin = 0, vmax = 1000)\n ax.set_aspect(aspect)\n\n plt.colorbar(m, ax = ax)\n plt.savefig(sub(r'\\.tif$','.el.png',fn), dpi = 144, interpolation = 'none')\n\n #---------------------------------------------------------------------------\n\n fig,ax = plt.subplots(figsize = fs)\n m = ax.contourf(x, y, i2m(sky[3,:,:]), levels = clevels, cmap = cmap)\n plt.colorbar(m, ax = ax)\n ax.set_aspect(aspect)\n\n plt.savefig(sub(r'\\.tif$','.png',fn), dpi = 144)\n\n #---------------------------------------------------------------------------\n\n fig,ax = plt.subplots(figsize = fs)\n m = ax.contourf(x, y, i2m(sky[2,:,:]), levels = clevels, cmap = cmap)\n # m = ax.imshow(i2m(sky[2,:,:]), cmap = cmap, vmin = 18.0, vmax = 22.0)\n ax.set_aspect(aspect)\n plt.colorbar(m, ax = ax)\n plt.savefig(sub(r'\\.tif$','.noat.png',fn), dpi = 144)\n\n #---------------------------------------------------------------------------\n\n fig,ax = plt.subplots(figsize = fs)\n m = ax.imshow(-2.5 * np.log10(sky[3,:,:] / sky[2,:,:]),\n cmap = 'inferno_r', vmin = 0.0, vmax = 0.1,\n interpolation = 'none', extent = ex)\n ax.set_aspect(aspect)\n plt.colorbar(m, ax = ax)\n plt.savefig(sub(r'\\.tif$','.atdiff.png',fn), dpi = 144)\n\n #---------------------------------------------------------------------------\n\n fig,ax = plt.subplots(figsize = fs)\n m = ax.imshow(-2.5 * np.log10(sky[2,:,:] / sky[1,:,:]),\n cmap = 'RdGy', vmin = -0.1, vmax = 0.1, interpolation = 'none',\n extent = ex)\n ax.set_aspect(aspect)\n plt.colorbar(m, ax = ax)\n plt.savefig(sub(r'\\.tif$','.phdiff.png',fn), dpi = 144)\n" ]
[ [ "numpy.linspace", "numpy.arange", "matplotlib.colors.LinearSegmentedColormap", "matplotlib.pyplot.subplots", "numpy.cos", "matplotlib.pyplot.colorbar", "numpy.log10", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JonathanDHarris/skypy
[ "08b4406a54fbc8ac89b6316544a57e8952e3b2bd" ]
[ "skypy/galaxy/_spectrum_loaders.py" ]
[ "'''Implementations for spectrum loaders.'''\n\nimport numpy as np\nimport specutils\nimport astropy.utils.data\nimport astropy.table\nfrom astropy import __version__ as astropy_version\nfrom astropy import units\n\nimport os\nimport urllib\nfrom pkg_resources import resource_filename\n\n\ndef download_file(url, cache=True):\n '''download_file with some specific settings'''\n if astropy_version.startswith('3.'): # pragma: no cover\n extra_kwargs = {}\n else:\n extra_kwargs = {'pkgname': 'skypy'}\n return astropy.utils.data.download_file(\n url, cache=cache, show_progress=False, **extra_kwargs)\n\n\ndef combine_spectra(a, b):\n '''combine two spectra'''\n if a is None or b is None:\n return a or b\n\n if isinstance(a, specutils.SpectrumList) or isinstance(b, specutils.SpectrumList):\n a = a if isinstance(a, specutils.SpectrumList) else specutils.SpectrumList([a])\n b = b if isinstance(b, specutils.SpectrumList) else specutils.SpectrumList([b])\n return specutils.SpectrumList(a + b)\n\n if (len(a.spectral_axis) == len(b.spectral_axis)\n and np.allclose(a.spectral_axis, b.spectral_axis, atol=0, rtol=1e-10)\n and a.flux.unit.is_equivalent(b.flux.unit)):\n flux_a = np.atleast_2d(a.flux.value)\n flux_b = np.atleast_2d(b.flux.to_value(a.flux.unit))\n if flux_a.shape[1:] == flux_b.shape[1:]:\n return specutils.Spectrum1D(spectral_axis=a.spectral_axis,\n flux=np.concatenate([flux_a, flux_b])*a.flux.unit)\n\n return specutils.SpectrumList([a, b])\n\n\ndef file_loader(*filenames):\n '''load a file'''\n spectra = []\n for filename in filenames:\n spectra.append(specutils.Spectrum1D.read(filename))\n return spectra[0] if len(spectra) == 1 else specutils.SpectrumList(spectra)\n\n\ndef skypy_data_loader(module, name, *tags):\n '''load data from the skypy data package'''\n\n # result is spectrum or list of spectra\n spectra = None\n\n # load each tag separately\n for tag in tags:\n\n # get resource filename from module, name, and tag\n try:\n filename = resource_filename(f'skypy-data.{module}', f'{name}_{tag}.ecsv')\n except ModuleNotFoundError as exc:\n message = str(\"No module named 'skypy-data'. To install:\\n\"\n \"pip install skypy-data@https://github.com/\"\n \"skypyproject/skypy-data/archive/master.tar.gz\")\n raise ModuleNotFoundError(message) from exc\n\n # load the data file\n data = astropy.table.Table.read(filename, format='ascii.ecsv')\n\n # get the spectral axis\n spectral_axis = data['spectral_axis'].quantity\n\n # load all templates\n flux_unit = data['flux_0'].unit\n fluxes = []\n while 'flux_%d' % len(fluxes) in data.colnames:\n fluxes.append(data['flux_%d' % len(fluxes)].quantity.to_value(flux_unit))\n fluxes = np.squeeze(fluxes)*flux_unit\n\n # construct the Spectrum1D\n spectrum = specutils.Spectrum1D(spectral_axis=spectral_axis, flux=fluxes)\n\n # combine with existing\n spectra = combine_spectra(spectra, spectrum)\n\n return spectra\n\n\ndef decam_loader(*bands):\n '''load DECam bandpass filters'''\n\n # download DECam filter data\n filename = download_file(\n 'http://www.ctio.noao.edu/noao/sites/default/files/DECam/STD_BANDPASSES_DR1.fits')\n\n # load the data file\n data = astropy.table.Table.read(filename, format='fits')\n\n # set units\n data['LAMBDA'].unit = units.angstrom\n\n # get the spectral axis\n spectral_axis = data['LAMBDA'].quantity\n\n # load requested bands\n throughput = []\n for band in bands:\n throughput.append(data[band])\n throughput = np.squeeze(throughput)*units.dimensionless_unscaled\n\n # return the bandpasses as Spectrum1D\n return specutils.Spectrum1D(spectral_axis=spectral_axis, flux=throughput)\n\n\nspectrum_loaders = [\n # bandpasses\n ('(Johnson)_(U)?(B)?(V)?', skypy_data_loader, 'bandpasses'),\n ('(Cousins)_(R)?(I)?', skypy_data_loader, 'bandpasses'),\n ('DECam_(g)?(r)?(i)?(z)?(Y)?', decam_loader),\n\n # spectrum templates\n ('(kcorrect)_((?:raw)?spec(?:_nl)?(?:_nd)?)', skypy_data_loader, 'spectrum_templates'),\n\n # catchall file loader\n ('(.*)', file_loader),\n]\n" ]
[ [ "numpy.concatenate", "numpy.atleast_2d", "numpy.squeeze", "numpy.allclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Neil0406/Flask-echart
[ "54ffb3797b995889dd52e56bc4488f9a00318598" ]
[ "test.py" ]
[ "import pymysql\nimport pandas as pd\n\nMYSQL_HOST = 'localhost'\nMYSQL_DB = 'neildb'\nMYSQL_USER = 'root'\nMYSQL_PASS = 'root'\n\ndef connect_mysql(): #連線資料庫\n global connect, cursor\n connect = pymysql.connect(host = MYSQL_HOST, db = MYSQL_DB, user = MYSQL_USER, password = MYSQL_PASS,\n charset = 'utf8', use_unicode = True)\n cursor = connect.cursor()\n\n\n# def hiskio():\nconnect_mysql() #呼叫連線資料庫函式\ndf = pd.read_sql('SELECT * FROM hiskio', con = connect) #使用connect指定的Mysql獲取資料\ndata = df.to_dict('recode')\n\nresult = [] #將 PRICE 變成 int\n\n\nfor i in data:\n if i['price'] != '免費' and i['price'] != 'NaN':\n i['price'] = int(i['price'].replace('NT$ ',''))\n result.append(i)\n else :\n i['price'] = 0\n# d = {}\n# for i in result:\n# d[i['category']] = ''\n\n# title_num = [] #每種類課程總和 [45, 74, 10, 12, 45, 5, 25]\n# for j in d:\n# t = []\n# for i in result:\n# count = 0\n# if i['category'] == j:\n# count += 1\n# t.append(count)\n# title_num.append(sum(t))\n \n# title = [] #課程分類 ['網站前端', '網站後端', '物聯網 IOT', '手機應用', '數據分析', '遊戲開發', '微軟應用']\n# price = [] #每個種類課程的\"價格\"總和 [109686, 281049, 21090, 35899, 211530, 15860, 151690]\n# free = [] #免費課程的數量 [7, 13, 4, 1, 11, 0, 0]\n\n# for k in d :\n# title.append(k)\n# p = []\n# f = []\n# count = 0\n# for i in result:\n# if i['category'] == k and i['price'] != 0:\n# p.append(i['PRICE'])\n# if i['category'] == k and i['price'] == 0:\n# count =+ 1\n# f.append(count)\n# price.append(sum(p))\n# free.append(sum(f))\n\n# #price / (title_num - free) 計算平均 (扣掉免費課程的平均)\n\n# avg = [] # [2886, 4607, 3515, 3264, 6221, 3172, 6068]\n# for i in range(len(title)):\n# p = price[i] / (title_num[i] - free[i])\n# avg.append(int(f'{p:.0f}')) \n \n # return title, avg" ]
[ [ "pandas.read_sql" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
bkalita-git/nn_seq2seq_transformer
[ "b394bb2b5d6f167c2d4d5c582d1cf5d260841688" ]
[ "attention1.py" ]
[ "import torch\nimport loader1\nimport pickle\nembedding_size = 10\nencoder_file = \"encoder\"\ndecoder_file = \"decoder\"\nencoder_max_len = 6\ndecoder_max_len = 7\nnum_lines = 6\nbatch_size = 3\nnum_layers = 4\nld = loader1.load(encoder_file,encoder_max_len,decoder_file,decoder_max_len,num_lines,batch_size)\nclass Attention(torch.nn.Module):\n\tdef __init__(self,embedding):\n\t\tsuper(Attention,self).__init__()\n\t\tself.embedding\t= embedding\n\t\tself.emb1\t= torch.nn.Embedding(len(ld.e_data.vocab)+1 , self.embedding)\n\t\tself.lstm1\t= torch.nn.LSTM(num_layers=num_layers,input_size=self.embedding,hidden_size=self.embedding,batch_first=True)\n\t\tself.lin1\t= torch.nn.Linear(ld.e_max_len,ld.e_max_len)\n\t\tself.soft\t= torch.nn.Softmax(dim=1)\n\n\t\tself.emb2\t= torch.nn.Embedding(len(ld.d_data.vocab)+1,self.embedding,padding_idx=0)\n\t\tself.lin2\t= torch.nn.Linear(ld.e_max_len+self.embedding,self.embedding)\n\t\tself.lstm2\t= torch.nn.LSTM(num_layers=num_layers,input_size=self.embedding,hidden_size=self.embedding,batch_first=True)\n\t\tself.lin3\t= torch.nn.Linear(self.embedding,len(ld.d_data.vocab))\n\t\t\n\tdef forward(self,e,d):\n\t\tloss = 0\n\t\t#print(d[0])\n\t\tdx = d[0][:,:-1]\n\t\t#print(dx)\n\t\tdy = d[0][:,1:]\n\t\t#print(dy)\n\t\tdx = dx.t()\n\t\t#print(dx)\n\t\tdy = dy.t()\n\t\t#print(dy)\n\t\t\n\t\tpe = torch.nn.utils.rnn.pack_padded_sequence(e[0],e[1],batch_first=True)\n\t\t#print(pe)\n\t\t#print(e)\n\t\tepe = self.emb1(pe[0])\n\t\t#print(epe)\n\t\tepe = torch.nn.utils.rnn.PackedSequence(epe,pe[1])\n\t\t#print(epe)\n\t\tepo,(last_hid,last_state) = self.lstm1(epe)\n\t\t\n\t\tupo = torch.nn.utils.rnn.pad_packed_sequence(epo,batch_first=True,total_length=ld.e_max_len)[0]\n\t\t#print(upo)\n\t\t#print(last_hid)\n\t\t#------------------------------------------------------------------------------------------#\n\t\tfor d,l in zip(dx,dy):\n\t\t\t#modified hidden for multi_layer purpose+++++++++\n\t\t\tlast_hid_last = last_hid[-1][None]\n\t\t\t#last_hid_last = last_hid\n\t\t\n\t\t\tc = (last_hid_last[-1,:,None]*upo).sum(dim=2)\n\t\t\t#print(c)\n\t\t\tc = self.lin1(c)\n\t\t\t#print(c)\n\t\t\tc = self.soft(c)\n\t\t\t#print(c)\n\t\t\tc = (c[:,:,None]*upo).sum(dim=2)\n\t\t\t#print(c)\n\t\t\td = d[:,None]\n\t\t\t#print(d)\n\t\t\t#print(l)\n\t\t\tde = self.emb2(d)\n\t\t\t#print(de)\n\t\t\tc = c[:,None]\n\t\t\t#print(c)\n\t\t\tde = torch.cat([c,de],dim=2)\n\t\t\t#print(de)\n\t\t\tde = self.lin2(de)\n\t\t\t#print(de)\n\t\t\tdout,(last_hid,last_state) = self.lstm2(de,(last_hid,last_state))\n\t\t\t\n\t\t\t#print(dout)\n\t\t\t#print(last_hid)\n\t\t\t#print(dout)\n\t\t\tnon_zero_l_pos = l.nonzero().view(-1)\n\t\t\tl = l[non_zero_l_pos]\n\t\t\tdout = dout[non_zero_l_pos]\n\t\t\tif len(dout)>0:\n\t\t\t\tdout = self.lin3(dout)\n\t\t\t\t#print(dout)\n\t\t\t\tdout = dout[:,-1,:]\n\t\t\t\tl = l-1\n\t\t\t\t#print(dout)\n\t\t\t\t#print(l)\n\t\t\t\tloss += error(dout,l)\n\t\t\telse:\n\t\t\t\tcontinue\n\t\treturn loss\n\t\t#no problem-----------------------------------------------------------------------------------#\n\t\t\n\t\n\n\nattention = Attention(embedding_size)\nerror\t = torch.nn.CrossEntropyLoss()\noptim\t = torch.optim.Adam(attention.parameters(),lr=0.01)\nfor epoch in range(2000):\n\tfor e,d in zip(ld.e,ld.d):\n\t\toptim.zero_grad()\n\t\tloss = attention(e,d)\n\t\tloss.backward()\n\t\toptim.step()\n\t\t#print(loss)\nprint(loss)\ntorch.save(attention.state_dict(),'attention.pt')\nvocab = {\"e_vocab\":ld.e_data.vocab,\"d_vocab\":ld.d_data.vocab,\"embedding\":10,\"e_max_len\":ld.e_max_len,\"d_max_len\":ld.d_max_len,\"num_layers\":num_layers}\nf = open(\"vocab\",\"wb\")\npickle.dump(vocab,f,pickle.HIGHEST_PROTOCOL)\n" ]
[ [ "torch.nn.Softmax", "torch.nn.CrossEntropyLoss", "torch.nn.LSTM", "torch.cat", "torch.nn.utils.rnn.pack_padded_sequence", "torch.nn.Linear", "torch.nn.utils.rnn.PackedSequence", "torch.nn.utils.rnn.pad_packed_sequence" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Virginiaxu/DeepMosaic
[ "fe5bf9b98e36f0b9ef6bb88345d4afaa55054e96" ]
[ "deepmosaic/canvasPainter.py" ]
[ "import numpy as np\n\nMAX_DP = 500\nWIDTH = 300\n\ndef strand_to_index(reverse=False):\n if reverse:\n return 100\n else:\n return 200\n\ndef base_to_index(base):\n if base == \"A\":\n return 50\n elif base == \"C\":\n return 100\n elif base == \"G\":\n return 150\n elif base == \"T\":\n return 200\n elif base == \"N\":\n return 250\n\n\n\n#image with 3 channel\ndef paint_canvas(reads, pos):\n canvas = np.zeros([MAX_DP, WIDTH, 3], dtype=np.uint8)\n pos = pos - 1\n start_pos = pos - WIDTH/2\n end_pos = pos + WIDTH/2\n for i, read in enumerate(reads):\n if read.reference_start < start_pos:\n for pos in read.get_reference_positions():\n if pos >= start_pos:\n start = pos\n break\n offset = 0\n else:\n start = read.reference_start\n offset = int(read.reference_start - start_pos)\n read_sequence = read.query_sequence\n qualities = read.query_qualities\n ref_positions = read.get_reference_positions(full_length=len(read_sequence))\n strand_value = strand_to_index(read.is_reverse)\n for j, pos in enumerate(ref_positions):\n if pos==None or pos < start:\n continue\n canvas_index = pos-start+offset\n if canvas_index >= WIDTH:\n break\n base = read_sequence[j]\n canvas[i, canvas_index, 0] = base_to_index(base)\n canvas[i, canvas_index, 1] = qualities[j]\n canvas[i, canvas_index, 2] = strand_value\n return canvas\n" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Anonymous502/siamfda-for-eccv
[ "72dff5c174b7ebe30c59a6e21bb6f06fdb06c3fb" ]
[ "SiamFDA/utils/lr_scheduler.py" ]
[ "# Copyright (c) SenseTime. All Rights Reserved.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport math\n\nimport numpy as np\nfrom torch.optim.lr_scheduler import _LRScheduler\n\nfrom SiamFDA.core.config import cfg\n\n\nclass LRScheduler(_LRScheduler):\n def __init__(self, optimizer, last_epoch=-1):\n if 'lr_spaces' not in self.__dict__:\n raise Exception('lr_spaces must be set in \"LRSchduler\"')\n super(LRScheduler, self).__init__(optimizer, last_epoch)\n\n def get_cur_lr(self):\n return self.lr_spaces[self.last_epoch]\n\n def get_lr(self):\n epoch = self.last_epoch\n return [self.lr_spaces[epoch] * pg['initial_lr'] / self.start_lr\n for pg in self.optimizer.param_groups]\n\n def __repr__(self):\n return \"({}) lr spaces: \\n{}\".format(self.__class__.__name__,\n self.lr_spaces)\n\n\nclass LogScheduler(LRScheduler):\n def __init__(self, optimizer, start_lr=0.03, end_lr=5e-4,\n epochs=50, last_epoch=-1, **kwargs):\n self.start_lr = start_lr\n self.end_lr = end_lr\n self.epochs = epochs\n self.lr_spaces = np.logspace(math.log10(start_lr),\n math.log10(end_lr),\n epochs)\n\n super(LogScheduler, self).__init__(optimizer, last_epoch)\n\n\nclass StepScheduler(LRScheduler):\n def __init__(self, optimizer, start_lr=0.01, end_lr=None,\n step=10, mult=0.1, epochs=50, last_epoch=-1, **kwargs):\n if end_lr is not None:\n if start_lr is None:\n start_lr = end_lr / (mult ** (epochs // step))\n else: # for warm up policy\n mult = math.pow(end_lr/start_lr, 1. / (epochs // step))\n self.start_lr = start_lr\n self.lr_spaces = self.start_lr * (mult**(np.arange(epochs) // step))\n self.mult = mult\n self._step = step\n\n super(StepScheduler, self).__init__(optimizer, last_epoch)\n\n\nclass MultiStepScheduler(LRScheduler):\n def __init__(self, optimizer, start_lr=0.01, end_lr=None,\n steps=[10, 20, 30, 40], mult=0.5, epochs=50,\n last_epoch=-1, **kwargs):\n if end_lr is not None:\n if start_lr is None:\n start_lr = end_lr / (mult ** (len(steps)))\n else:\n mult = math.pow(end_lr/start_lr, 1. / len(steps))\n self.start_lr = start_lr\n self.lr_spaces = self._build_lr(start_lr, steps, mult, epochs)\n self.mult = mult\n self.steps = steps\n\n super(MultiStepScheduler, self).__init__(optimizer, last_epoch)\n\n def _build_lr(self, start_lr, steps, mult, epochs):\n lr = [0] * epochs\n lr[0] = start_lr\n for i in range(1, epochs):\n lr[i] = lr[i-1]\n if i in steps:\n lr[i] *= mult\n return np.array(lr, dtype=np.float32)\n\n\nclass LinearStepScheduler(LRScheduler):\n def __init__(self, optimizer, start_lr=0.01, end_lr=0.005,\n epochs=50, last_epoch=-1, **kwargs):\n self.start_lr = start_lr\n self.end_lr = end_lr\n self.lr_spaces = np.linspace(start_lr, end_lr, epochs)\n super(LinearStepScheduler, self).__init__(optimizer, last_epoch)\n\n\nclass CosStepScheduler(LRScheduler):\n def __init__(self, optimizer, start_lr=0.01, end_lr=0.005,\n epochs=50, last_epoch=-1, **kwargs):\n self.start_lr = start_lr\n self.end_lr = end_lr\n self.lr_spaces = self._build_lr(start_lr, end_lr, epochs)\n\n super(CosStepScheduler, self).__init__(optimizer, last_epoch)\n\n def _build_lr(self, start_lr, end_lr, epochs):\n index = np.arange(epochs).astype(np.float32)\n lr = end_lr + (start_lr - end_lr) * \\\n (1. + np.cos(index * np.pi / epochs)) * 0.5\n return lr.astype(np.float32)\n\n\nclass WarmUPScheduler(LRScheduler):\n def __init__(self, optimizer, warmup, normal, epochs=50, last_epoch=-1):\n warmup = warmup.lr_spaces # [::-1]\n normal = normal.lr_spaces\n self.lr_spaces = np.concatenate([warmup, normal])\n self.start_lr = normal[0]\n\n super(WarmUPScheduler, self).__init__(optimizer, last_epoch)\n\n\nLRs = {\n 'log': LogScheduler,\n 'step': StepScheduler,\n 'multi-step': MultiStepScheduler,\n 'linear': LinearStepScheduler,\n 'cos': CosStepScheduler}\n\n\ndef _build_lr_scheduler(optimizer, config, epochs=50, last_epoch=-1):\n return LRs[config.TYPE](optimizer, last_epoch=last_epoch,\n epochs=epochs, **config.KWARGS)\n\n\ndef _build_warm_up_scheduler(optimizer, epochs=50, last_epoch=-1):\n warmup_epoch = cfg.TRAIN.LR_WARMUP.EPOCH\n sc1 = _build_lr_scheduler(optimizer, cfg.TRAIN.LR_WARMUP,\n warmup_epoch, last_epoch)\n sc2 = _build_lr_scheduler(optimizer, cfg.TRAIN.LR,\n epochs - warmup_epoch, last_epoch)\n return WarmUPScheduler(optimizer, sc1, sc2, epochs, last_epoch)\n\n\ndef build_lr_scheduler(optimizer, epochs=50, last_epoch=-1):\n if cfg.TRAIN.LR_WARMUP.WARMUP:\n return _build_warm_up_scheduler(optimizer, epochs, last_epoch)\n else:\n return _build_lr_scheduler(optimizer, cfg.TRAIN.LR,\n epochs, last_epoch)\n\n\nif __name__ == '__main__':\n import torch.nn as nn\n from torch.optim import SGD\n\n class Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv = nn.Conv2d(10, 10, kernel_size=3)\n net = Net().parameters()\n optimizer = SGD(net, lr=0.01)\n\n # test1\n step = {\n 'type': 'step',\n 'start_lr': 0.01,\n 'step': 10,\n 'mult': 0.1\n }\n lr = build_lr_scheduler(optimizer, step)\n print(lr)\n\n log = {\n 'type': 'log',\n 'start_lr': 0.03,\n 'end_lr': 5e-4,\n }\n lr = build_lr_scheduler(optimizer, log)\n\n print(lr)\n\n log = {\n 'type': 'multi-step',\n \"start_lr\": 0.01,\n \"mult\": 0.1,\n \"steps\": [10, 15, 20]\n }\n lr = build_lr_scheduler(optimizer, log)\n print(lr)\n\n cos = {\n \"type\": 'cos',\n 'start_lr': 0.01,\n 'end_lr': 0.0005,\n }\n lr = build_lr_scheduler(optimizer, cos)\n print(lr)\n\n step = {\n 'type': 'step',\n 'start_lr': 0.001,\n 'end_lr': 0.03,\n 'step': 1,\n }\n\n warmup = log.copy()\n warmup['warmup'] = step\n warmup['warmup']['epoch'] = 5\n lr = build_lr_scheduler(optimizer, warmup, epochs=55)\n print(lr)\n\n lr.step()\n print(lr.last_epoch)\n\n lr.step(5)\n print(lr.last_epoch)\n" ]
[ [ "numpy.linspace", "numpy.arange", "torch.nn.Conv2d", "numpy.cos", "numpy.concatenate", "torch.optim.SGD", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ayasyrev/model_constructor
[ "3759a02dd9f7aa1ca3e6a4a5aefe72380886207e" ]
[ "model_constructor/yaresnet.py" ]
[ "# YaResBlock - former NewResBlock.\n# Yet another ResNet.\n\nimport torch.nn as nn\nfrom functools import partial\nfrom collections import OrderedDict\nfrom .layers import SEModule, ConvLayer, act_fn, noop, SimpleSelfAttention\nfrom .net import Net\nfrom torch.nn import Mish\n\n\n__all__ = ['YaResBlock', 'yaresnet_parameters', 'yaresnet34', 'yaresnet50']\n\n\nclass YaResBlock(nn.Module):\n '''YaResBlock. Reduce by pool instead of stride 2'''\n\n def __init__(self, expansion, ni, nh, stride=1,\n conv_layer=ConvLayer, act_fn=act_fn, zero_bn=True, bn_1st=True,\n pool=nn.AvgPool2d(2, ceil_mode=True), sa=False, sym=False,\n groups=1, dw=False, div_groups=None,\n se_module=SEModule, se=False, se_reduction=16\n ):\n super().__init__()\n nf, ni = nh * expansion, ni * expansion\n if div_groups is not None: # check if grops != 1 and div_groups\n groups = int(nh / div_groups)\n self.reduce = noop if stride == 1 else pool\n layers = [(\"conv_0\", conv_layer(ni, nh, 3, stride=1, act_fn=act_fn, bn_1st=bn_1st,\n groups=nh if dw else groups)),\n (\"conv_1\", conv_layer(nh, nf, 3, zero_bn=zero_bn, act=False, bn_1st=bn_1st))\n ] if expansion == 1 else [\n (\"conv_0\", conv_layer(ni, nh, 1, act_fn=act_fn, bn_1st=bn_1st)),\n (\"conv_1\", conv_layer(nh, nh, 3, stride=1, act_fn=act_fn, bn_1st=bn_1st,\n groups=nh if dw else groups)),\n (\"conv_2\", conv_layer(nh, nf, 1, zero_bn=zero_bn, act=False, bn_1st=bn_1st))\n ]\n if se:\n layers.append(('se', se_module(nf, se_reduction)))\n if sa:\n layers.append(('sa', SimpleSelfAttention(nf, ks=1, sym=sym)))\n self.convs = nn.Sequential(OrderedDict(layers))\n self.idconv = noop if ni == nf else conv_layer(ni, nf, 1, act=False)\n self.merge = act_fn\n\n def forward(self, x):\n o = self.reduce(x)\n return self.merge(self.convs(o) + self.idconv(o))\n\n\nyaresnet_parameters = {'block': YaResBlock, 'stem_sizes': [3, 32, 64, 64], 'act_fn': Mish(), 'stem_stride_on': 1}\nyaresnet34 = partial(Net, name='YaResnet34', expansion=1, layers=[3, 4, 6, 3], **yaresnet_parameters)\nyaresnet50 = partial(Net, name='YaResnet50', expansion=4, layers=[3, 4, 6, 3], **yaresnet_parameters)\n" ]
[ [ "torch.nn.Mish", "torch.nn.AvgPool2d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ersince76/derinogrenme_giris
[ "69790997622c957e40c2baf9faccfd3b658d5a40" ]
[ "ikili_metin_siniflandirma/binary_classification_birgun_karar_u1.py" ]
[ "\"\"\"\nGuncelleme 1, \nStandardScaler ve Pipeline\n\"\"\"\n\nimport preprocess_util\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import StratifiedKFold\n\n#upgrade1\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import Pipeline\n\n# seed 1923 ile sabitlenir\nseed = 1923\nnp.random.seed(seed)\n\n# veri seti yukle\ntriple_dictionary, text, label = preprocess_util.load_trigram_dataset()\n\n\n# girdi X ve Y olarak tanımlanıyor\nX = text #(4388, 1, 7254)\nX = X.reshape(4388,7254)\n\nencoder = LabelEncoder()\nencoder.fit(label)\nencoded_Y = encoder.transform(label)\n\n# temel model\ndef create_baseline():\n # model olustur\n model = Sequential()\n model.add(Dense(100, input_dim=len(triple_dictionary)+1, kernel_initializer='normal', activation='relu'))\n model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))\n # model derle\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model\n\n#%%\nestimators = []\nestimators.append(('standardize', StandardScaler()))\nestimators.append(('mlp', KerasClassifier(build_fn=create_baseline, epochs=100,\nbatch_size=5, verbose=1)))\npipeline = Pipeline(estimators)\nkfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)\nresults = cross_val_score(pipeline, X, encoded_Y, cv=kfold)\nprint(\"Standart Ölçek: %.2f%% (%.2f%%)\" % (results.mean()*100, results.std()*100))\n\n\n" ]
[ [ "sklearn.model_selection.cross_val_score", "numpy.random.seed", "sklearn.pipeline.Pipeline", "sklearn.model_selection.StratifiedKFold", "sklearn.preprocessing.StandardScaler", "sklearn.preprocessing.LabelEncoder" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
packetChor/Intracity-Fare-Estimation
[ "2ca3e477dac6af48015bc568ff20988bd51498e4" ]
[ "notebooks_&_scripts/Driver.py" ]
[ "import numpy as np\nimport pandas as pd\nfrom sklearn import neural_network\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import r2_score\nfrom sklearn.model_selection import train_test_split\n# from sklearn.model_selection import GridSearchCV\n\n\ndef testRUN_crCsv(X, y, ann):\n scaler.fit(X)\n X = scaler.transform(X)\n ann.fit(X, y)\n # Hyperparameter tuning\n # gs.fit(X, y)\n # print(\"best estimator :\\n\")\n # print(gs.best_estimator_)\n # print(\"Best parameters :\\n\")\n # print(gs.best_params_)\n # print(\"CV RESULTS : \\n\")\n # print(gs.cv_results_)\n predict = ann.predict(scaler.transform(test))\n predict = predict.round(decimals=2)\n predict = predict.reshape(predict.shape[0], 1)\n predict = np.concatenate([id_vec, predict], axis=1)\n predict = pd.DataFrame(data=predict, columns=['ID', 'FARE'])\n predict.to_csv(\"../answer.csv\", index=False, header=True, sep=',')\n\n\npass\n\n\ndef crossValidate(X, y, ann):\n X_train, X_test, y_train, y_test = train_test_split(X, y)\n scaler.fit(X_train)\n X_train = scaler.transform(X_train)\n X_test = scaler.transform(X_test)\n ann.fit(X_train, y_train)\n prediction = ann.predict(X_test)\n print(\"CV performance\")\n print(200 * r2_score(y_test, prediction))\n print(\"Train performance\")\n print(200 * r2_score(y_train, ann.predict(X_train)))\n\n\n# Seperating target variables from feature matrix\ntrain = pd.read_csv('../data/processed_train.csv')\ntest = pd.read_csv('../data/processed_test.csv')\nid_vec = np.array(test.loc[:, test.columns == 'ID'])\nlabel_drop = ['ID', 'cooling','bus','mean_lat', 'mean_long', 'TIME_AM',\n 'YEAR','DAY','TIMESTAMP']\ntrain.drop(label_drop, axis=1, inplace=True)\ntest.drop(label_drop, axis=1, inplace=True)\n\nX = train.drop(['FARE'], axis=1)\ny = train['FARE']\n\n\n# Creating classifier objects\nscaler = StandardScaler()\nann = neural_network.MLPRegressor(shuffle=True,\n alpha=2,\n hidden_layer_sizes=(150,150,150),\n max_iter=10000,\n random_state=1000,\n verbose=True)\n\n# gs = GridSearchCV(ann, param_grid={\n# 'hidden_layer_sizes': [(8), (10),\n# (10, 10, 10), (70, 50, 20), (15, 15, 15), (40, 40, 40)],\n# 'random_state': [100, 1000, 10000],\n# 'alpha': [0.01, 0.1, 1.0]},\n# n_jobs=-1,\n# scoring=make_scorer(r2_score),\n# verbose=10)\n\n# CROSS VALIDATION code\ncrossValidate(X, y, ann)\n# Real testing\ntestRUN_crCsv(X, y, ann)\n#########################################################\n" ]
[ [ "pandas.read_csv", "sklearn.metrics.r2_score", "sklearn.model_selection.train_test_split", "pandas.DataFrame", "numpy.concatenate", "sklearn.preprocessing.StandardScaler", "numpy.array", "sklearn.neural_network.MLPRegressor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
numagic/lumos
[ "f729354613fec84957384323da6d0b69e00ed7cc" ]
[ "lumos/models/tree_utils.py" ]
[ "from __future__ import annotations # Cyclic referencing of types\n\nfrom copy import deepcopy\nfrom typing import Any, Dict\nfrom jax.tree_util import register_pytree_node_class\n\nimport numpy as np\nimport casadi as cas\n\n\"\"\"\nNOTE:\n- we chose not to use existing 3rd party tree library yet, because it doesn't seem to\nbring benefit for the current design. (maybe with a redesign, then it would be). For \nexample, with anytree, the syntax isn't really much better.\n\nfrom anytree import Resolver\nresolver = Resolver(\"name\")\nresolver.get(changed_params, \"aero/base\").data[\"alpha\"] = alpha\n\nTODO:\n- audit where we should copy and how to avoid redunddant copying. Also if it really\ngets large, we need to find a way to improve efficiency.\n- clean-up dispatch vs backend\n\"\"\"\n\n\nclass Tree:\n def __init__(self, data, data_type: type, children: Dict[str, Tree] = {}):\n \"\"\"General tree type data structure.\n\n Currently it is mainly used to represent two types of structure:\n\n ConfigTree: model configuration that specifies what submodel types a model\n contains. The leaf node data type is string (of the submodel type)\n\n ParameterTree: tree structure for parameters that specify the parameters of the\n model including its submodels. The leaf node data type is a flat dictionary\n of parameters.\n \"\"\"\n # The dictionary actually holding the tree data\n self._data = data\n self._data_type = data_type\n self._children = {}\n\n for k, v in children.items():\n self.add_child(name=k, subtree=v)\n\n @property\n def data(self):\n return self._data\n\n def add_child(self, name: str, subtree: Tree):\n # FIXME: check name collision\n self._children[name] = subtree\n\n def get_child(self, name) -> Tree:\n return self._children[name]\n\n def _get_subtree(self, path, delimiter=\".\") -> Tree:\n if path:\n child_names = path.split(delimiter)\n t = self\n for n in child_names:\n t = t.get_child(n)\n return t\n else:\n return self\n\n def to_dict(self):\n\n # Ensure \"children\" key appears regardless whether a child exists\n d = {\n \"data\": self._data,\n \"children\": {},\n }\n\n if self._children:\n d[\"children\"] = {k: v.to_dict() for k, v in self._children.items()}\n\n # Note that the dictionary will contain mutable items, so we deep copy\n # such that modifing the dict can't modify the tree.\n return deepcopy(d)\n\n @classmethod\n def from_dict(cls, d):\n # Build from a copy of the dict so that changing the dict won't change the tree\n d = deepcopy(d)\n tree = cls(d[\"data\"])\n\n for k, v in d[\"children\"].items():\n tree.add_child(name=k, subtree=cls.from_dict(v))\n\n return tree\n\n def _set_leaf(self, path, value, delimiter=\".\"):\n if self._data_type == str:\n subtree = self._get_subtree(path, delimiter)\n subtree._data = value\n elif self._data_type == dict:\n *path_names, param_key = path.split(delimiter)\n subtree = self._get_subtree(delimiter.join(path_names))\n subtree._data[param_key] = value\n else:\n raise TypeError(\"Only str and dict supported for Tree at the moment\")\n\n def replace_subtree(self, path, subtree: Tree, delimiter=\".\"):\n \"\"\"Replace the subtree (with its children) with the new subtree\"\"\"\n if not isinstance(subtree, Tree):\n raise TypeError(\n f\"subtree input should be of type 'Tree', but got '{type(subtree)}'\"\n )\n *path_to_parent, child_name = path.split(delimiter)\n path_to_parent = delimiter.join(path_to_parent)\n parent = self._get_subtree(path_to_parent)\n\n # Ensure the child to be replaced does actually exist\n if child_name in parent._children:\n parent._children[child_name] = subtree\n else:\n raise KeyError(f\"{path_to_parent} does not contain a child: '{child_name}'\")\n\n def to_flat_dict(self):\n return self._make_flat_dict(), self._make_tree_def()\n\n def _make_flat_dict(self, prefix=\"\") -> Dict[str, Any]:\n # FIXME: This is not really very readable logic.\n # 1) on the first call, add_prefix returns elf.data itself, which is a dict,\n # which we later keep 'updating' -> so we need to deep copy when returning itself\n # 2) this works for config tree as well, but it's not realy very readable.\n\n if self._data_type == dict:\n flat_d = _add_prefix(self.data, prefix)\n else:\n # For str data type, the root would just add {\"\": root_data)\n # -> {\"\": root_data, \"child1\": child1_data, ...}\n flat_d = {prefix: self.data}\n\n for name, child in self._children.items():\n # NOTE: here add_prefix always operates on a dict\n flat_d.update(_add_prefix(child._make_flat_dict(prefix=name), prefix))\n\n return deepcopy(flat_d)\n\n def _make_tree_def(self, dummy_var=\"*\"):\n flat_d = self._make_flat_dict()\n\n t = self.__class__.from_dict(self.to_dict())\n for k in flat_d:\n # NOTE: here we make the treedef bascially a tree with data replaced by some\n # arbitrary value\n t._set_leaf(k, dummy_var)\n return t.to_dict()\n\n @classmethod\n def from_flat_dict(cls, tree_def, flat_dict):\n # TODO: it should be acutally possible to build from flat_dict without tree_def\n # right?\n flat_dict = deepcopy(flat_dict)\n unflattened = cls.from_dict(tree_def)\n for k, v in flat_dict.items():\n unflattened._set_leaf(k, v)\n\n return unflattened\n\n def __repr__(self) -> str:\n return str(self.to_dict())\n\n\n@register_pytree_node_class\nclass ParameterTree(Tree):\n def __init__(self, data, children: Dict[str, ConfigTree] = {}):\n super().__init__(data, data_type=dict, children=children)\n\n def get_param(self, path, delimiter=\".\"):\n *path_names, param_key = path.split(delimiter)\n t = self._get_subtree(delimiter.join(path_names))\n return t._data[param_key]\n\n def set_param(self, path, value, delimiter=\".\"):\n self._set_leaf(path, value, delimiter)\n\n # Methods required to register jax pytree\n def tree_flatten(self):\n flat_dict, tree_def = self.to_flat_dict()\n # NOTE: the aux variables must support equality comparison. Otherwise we would have\n # lots of recompilation due to cache-miss\n return list(flat_dict.values()), (tree_def, list(flat_dict.keys()))\n\n @classmethod\n def tree_unflatten(cls, aux, children):\n tree_def, flat_keys = aux\n flat_dict = dict(zip(flat_keys, children))\n return cls.from_flat_dict(tree_def, flat_dict)\n\n def tree_ravel(self):\n flat_list, aux = self.tree_flatten()\n\n tree_def, flat_keys = aux\n\n # FIXME: we could directly use the flat_list here, but we use flat_dict for now\n # because we know it works\n flat_dict = dict(zip(flat_keys, flat_list))\n\n flat, unravel = _ravel_flat_dict(flat_dict)\n unravel_pytree = lambda flat: self.__class__.tree_unflatten(\n aux, list(unravel(flat).values())\n )\n\n return flat, unravel_pytree\n\n\nclass ConfigTree(Tree):\n def __init__(self, data, children: Dict[str, ConfigTree] = {}):\n super().__init__(data, data_type=str, children=children)\n\n\ndef _add_prefix(d: Dict[str, Any], prefix: str, delimiter: str = \".\"):\n \"\"\"add prefix to keys\"\"\"\n assert isinstance(d, dict)\n if prefix:\n return {prefix + delimiter + n: v for n, v in d.items()}\n else:\n # We use a deepcopy here so add_prefix would alwasy return a new dictionary\n # which we could safely modify\n return deepcopy(d)\n\n\ndef _is_casadi_instance(x):\n return isinstance(x, (cas.SX, cas.MX, cas.DM))\n\n\ndef _size(x):\n # This is more like a dispatch rather (so operation depending on data type)\n # rather than backend selection\n if _is_casadi_instance(x):\n return x.numel()\n else:\n return np.size(x)\n\n\ndef _shape(x):\n if _is_casadi_instance(x):\n return x.shape\n else:\n return np.shape(x)\n\n\ndef _size_and_shape(x):\n return _size(x), _shape(x)\n\n\ndef _ravel(x):\n if _is_casadi_instance(x):\n return x.reshape((x.numel(), 1))\n else:\n return np.ravel(x)\n\n\ndef _split(arr, indices):\n if _is_casadi_instance(arr):\n indices = [0, *indices, arr.numel()]\n return cas.vertsplit(arr, indices)\n else:\n return np.split(arr, indices)\n\n\ndef _concat(lst):\n if _is_casadi_instance(lst[0]):\n return cas.vertcat(*lst)\n else:\n return np.hstack(lst)\n\n\ndef _unzip2(xys):\n xs = []\n ys = []\n for x, y in xys:\n xs.append(x)\n ys.append(y)\n return tuple(xs), tuple(ys)\n\n\ndef _ravel_flat_dict(flat_tree):\n if not flat_tree:\n return np.array([], np.double), lambda _: {}\n\n keys, values = flat_tree.keys(), flat_tree.values()\n sizes, shapes = _unzip2(_size_and_shape(x) for x in values)\n\n indices = np.cumsum(sizes)\n\n def unravel(arr):\n chunks = _split(arr, indices[:-1])\n reconstructed_values = [\n chunk.reshape(shape) if _size(chunk) > 1 else chunk[0]\n for chunk, shape in zip(chunks, shapes)\n ]\n\n return dict(zip(keys, reconstructed_values))\n\n raveled = _concat([_ravel(e) for e in values])\n\n return raveled, unravel\n" ]
[ [ "numpy.hstack", "numpy.split", "numpy.cumsum", "numpy.size", "numpy.shape", "numpy.ravel", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Rested/mutualism-visualisation
[ "b1223afebd688a7b1b8226dc5e56abb348a6dc16" ]
[ "core/maths.py" ]
[ "import numpy as np\n\n\ndef dX_dt(X, t=0, **params):\n animal, plant = X\n return np.array(\n [\n (params[\"r1\"] + (params[\"b12\"] * animal)) * plant\n - (\n (params[\"alpha1\"] + (params[\"c1\"] * params[\"b12\"] * animal))\n * (plant ** 2)\n ),\n (params[\"r2\"] + (params[\"b21\"] * plant)) * animal\n - (\n (params[\"alpha2\"] + (params[\"c2\"] * params[\"b21\"] * plant))\n * (animal ** 2)\n ),\n ]\n )\n\n\ndef get_fixed_points(**params):\n X_f0 = np.array([0.0, 0.0])\n K1 = params[\"r1\"] / params[\"alpha1\"]\n K2 = params[\"r2\"] / params[\"alpha2\"]\n X_f1 = np.array([K1, 0.0])\n X_f2 = np.array([0.0, K2])\n fixed_points = [X_f0]\n print(dX_dt(list(reversed(X_f1)), **params), dX_dt(list(reversed(X_f2)), **params))\n if all(dX_dt(list(reversed(X_f1)), **params) == 0):\n fixed_points.append(X_f1)\n if all(dX_dt(list(reversed(X_f2)), **params) == 0):\n fixed_points.append(X_f2)\n\n if params[\"r1\"] > 0 or params[\"r2\"] > 0:\n print(\n f\"There are partial extinction points at {fixed_points[1]} and {fixed_points[2]}\"\n )\n print(\"Trivial fixed points:\", fixed_points)\n return fixed_points\n\n\ndef get_non_trivial_fixed_points(r1, r2, alpha1, alpha2, b12, b21, c1, c2):\n A = (c2 * b21 * alpha1) + (c1 * b12 * b21)\n B = (alpha1 * alpha2) + (c1 * b12 * r2) - (c2 * b21 * r1) - b12 * b21\n C = -r1 * alpha2 - b12 * r2\n\n plant_values = np.roots([A, B, C])\n\n animal_values = list(\n map(lambda x: (r2 + (b21 * x)) / (alpha2 + (c2 * b21 * x)), plant_values)\n )\n\n non_trivial_fixed_points = list(zip(plant_values, animal_values))\n\n print(\"non trivial fixed points\", non_trivial_fixed_points)\n\n for fp in non_trivial_fixed_points:\n print(\n \"d/dt close to 0\",\n dX_dt(\n reversed(list(fp)),\n r1=r1,\n r2=r2,\n alpha1=alpha1,\n alpha2=alpha2,\n b12=b12,\n b21=b21,\n c1=c1,\n c2=c2,\n ),\n )\n\n return non_trivial_fixed_points\n" ]
[ [ "numpy.roots", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ethereum-ninja/statistical_analysis_toolkit
[ "6ebf6dc0093b85e555a6ca8de4eb73e57d4bc70b" ]
[ "statistical_analysis_toolkit.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.stats as stats\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# In[ ]:\n\n\ndef strength_of_evidence(p_value, alpha=0.05):\n assert(p_value == abs(p_value)) # should be positive\n if p_value > 0.1: return 'None/Weak'\n if p_value > 0.05 and p_value <= 0.1: return 'Moderate'\n if p_value > 0.01 and p_value <= 0.5: return 'Strong'\n if p_value <= 0.01: return 'Very Strong'\n return 'WTF'\n\n\n# In[ ]:\n\ndef critical_t(alpha, df):\n return stats.t.ppf(1 - (alpha / 2), df=df)\n\ndef critical_norm(alpha):\n return stats.norm.ppf(1 - (alpha / 2))\n\ndef critical_binomial(alpha):\n return stats.binom.ppf(1 - (alpha / 2))\n\ndef p_value_from_t(observed_t, df, sides=2.0):\n return sides * (1 - stats.norm.cdf(observed_t))\n\ndef p_value_from_binomial(observed_t, df, sides=2.0):\n return sides * (1 - stats.binom.cdf(observed_t))\n\ndef analyze_distribution(data, alpha=0.05, h0=0.0, color='b', is_log_data=False):\n #calc confidence level\n cl = 1-alpha\n \n #flatten data into one array\n all_values = data.values.flatten()\n #drop na values \n all_values = all_values[~np.isnan(all_values)]\n sample_mean = all_values.mean()\n pop_std = all_values.std(ddof=1) #ddof = 1 implies pop std()\n \n df = len(all_values) - len(data.columns) #degrees of freedom = number of observations - number of classes\n \n t_stat = critical_t(alpha, df) #get critical value(s)\n \n #plotting stuff\n hist_fig, hist_ax = plt.subplots(len(data.columns), 1, sharex=True)\n quant_fig, quant_ax = plt.subplots(1, len(data.columns))\n box_fig, box_ax = plt.subplots(1, 1)\n box_fig.suptitle('Box Plot')\n data.boxplot(vert=False)\n hist_fig.suptitle('Group Histograms')\n quant_fig.suptitle('Quantile Plots (Raw)')\n group_summaries = []\n \n if not is_log_data: #if we aren't looking at the log data - we should add it to a Q-Q plot to test for normality\n quantlog_fig, quantlog_ax = plt.subplots(1, len(data.columns))\n quantlog_fig.suptitle('Quantile Plots (Log)')\n \n i = 0\n for x in data:\n #drop any shitty data\n clean_data = data[x].dropna()\n #run some group stats\n group_se = clean_data.std()/len(clean_data)**0.5\n group_mean = clean_data.mean()\n group_std = clean_data.std()\n \n #plot a histogram\n group_df = len(clean_data) - 1\n group_t_stat = stats.t.ppf(1-(alpha/2),df=group_df)\n hist_ax[i].hist(clean_data, density=True)\n hist_ax[i].set_title(x)\n \n #plt a normal with same std and mean on top of hist\n x_lim = hist_ax[i].get_xlim()\n x_vals = np.linspace(x_lim[0], x_lim[1], 100)\n p = stats.norm.pdf(x_vals, group_mean, group_std)\n hist_ax[i].plot(x_vals, p, 'k', linewidth=2)\n \n #plot quantile plots of data and log x-form where appropriate\n stats.probplot(data[x].dropna(), plot=quant_ax[i])\n quant_ax[i].set_title(x)\n \n if not is_log_data:\n log_data = np.log(clean_data[clean_data != 0])\n quantlog_ax[i].set_title(x)\n stats.probplot(log_data, plot=quantlog_ax[i])\n\n \n #calc CL of std dev from X2 distribution\n #calc CL of mean from t-dist\n x2 = stats.chi2.ppf((1-alpha/2), df=group_df)\n lower_std = ((group_df*group_std**2)/x2)**0.5\n x2 = stats.chi2.ppf(alpha/2, df=group_df)\n upper_std = ((group_df*group_std**2)/x2)**0.5\n lower_mean = group_mean-group_t_stat*group_se\n upper_mean = group_mean+group_t_stat*group_se\n #lower, upper\n \n group_summaries.append(\n {\n 'summary_type': f'Group-{x}',\n 'p_value_perm':np.nan,\n 'p_value_t_test':np.nan,\n 'number_of_obs': len(clean_data),\n 'mean':group_mean,\n 'std_dev':group_std, \n 'std_err':group_se,\n 'min':clean_data.min(),\n 'max':clean_data.max(),\n 'observed_diff': np.nan,\n f'{cl}_cl_mean':[round(lower_mean, 4), round(upper_mean, 4)],\n f'{cl}_cl_std':[round(lower_std, 4), round(upper_std, 4)],\n 't_value': group_t_stat,\n 't_value_perm': np.nan,\n 'observed_t': np.nan,\n 'df':group_df\n }\n )\n i+=1\n\n \n #lets assume the std err as sigma = s/sqrt(n)\n if len(data.columns) == 1:\n std_err = pop_std/(len(all_values)**0.5)\n mean = all_values.mean()\n chunk_size = int(round(len(all_values)/2, 0))\n observed_t = (mean - h0)/pop_std/len(all_values)**0.5\n ower_mean = mean-t_stat*std_err\n upper_mean = mean+t_stat*std_err\n else: #pooled SD\n group_1 = data.iloc[:,0].dropna()\n group_2 = data.iloc[:,1].dropna()\n group_1_std = group_1.std(ddof=1)\n group_2_std = group_2.std(ddof=1)\n n1 = len(group_1) - 1\n n2 = len(group_2) - 1\n mean = group_1.mean() - group_2.mean()\n pop_std = (((n1*group_1_std**2)+(n2*group_2_std**2))/(n1+n2))**0.5\n chunk_size = len(group_1) if len(group_1) < len(group_2) else len(group_2)\n std_err = pop_std * ((1/len(group_1)) + (1/len(group_2)))**0.5\n observed_t = (mean-h0)/(pop_std *((1/len(group_1)) + (1/len(group_2)))**0.5)\n lower_mean = mean-t_stat*std_err\n upper_mean = mean+t_stat*std_err\n x2 = stats.chi2.ppf((1-alpha/2), df=df)\n lower_std = ((df*pop_std**2)/x2)**0.5\n x2 = stats.chi2.ppf(alpha/2, df=df)\n upper_std = ((df*pop_std**2)/x2)**0.5\n \n p_perm, t_perm, diff = perm_test(data, h0, mean, 1000, len(all_values), chunk_size)\n p_value = 2.0*(1 - stats.t.cdf(observed_t, df=df))\n #TODO: recalc CL based on 1-sided vs 2 sided\n pooled_summary = pd.DataFrame.from_dict(\n {\n 'summary_type': 'POOLED',\n 'p_value_perm':p_perm,\n 'p_value_t_test':p_value,\n 'number_of_obs': len(all_values),\n 'mean':mean,\n 'std_dev':pop_std, \n 'std_err':std_err,\n 'min':all_values.min(),\n 'max':all_values.max(),\n 'observed_diff': round(diff, 4),\n f'{cl}_cl_mean':[round(lower_mean, 4), round(upper_mean, 4)],\n f'{cl}_cl_std':[round(lower_std, 4), round(upper_std, 4)],\n 't_value': t_stat,\n 't_value_perm': t_perm,\n 'observed_t': observed_t,\n 'df':df\n }, orient='index').T.set_index(['summary_type'])\n group_summaries = [pd.DataFrame.from_dict(s, orient='index').T.set_index(['summary_type']) for s in group_summaries]\n group_summaries.append(pooled_summary)\n summary = pd.concat(group_summaries)\n \n #plot and shade critical regions\n norm_fig, norm_ax = plt.subplots(1, 1)\n band_size = 5 #could be param\n plot_min = mean - band_size * pop_std\n plot_max = mean + band_size * pop_std\n x = np.linspace(plot_min, plot_max, 1000)\n iq = stats.norm(h0, pop_std)\n low = h0 - (t_stat*pop_std)\n high = h0 + (t_stat*pop_std)\n normal = iq.pdf(x)\n norm_ax.plot(x, normal, 'r-', lw=3, label=f'Norm x={round(mean, 4)} std={round(pop_std, 4)}') #plot the norm\n norm_ax.plot([mean, mean], [0, normal.max()], 'g-', lw=3, label=f'Sample Mean={round(mean, 4)}') #plot the sample mean\n norm_ax.plot([h0, h0], [0, normal.max()], 'y-', lw=3, label=f'H0={round(h0, 4)}') #plot the pop mean\n norm_ax.plot([low, low], [0, iq.pdf(low)], 'c-*', lw=3) #plot the observed t\n norm_ax.plot([high, high], [0, iq.pdf(high)], 'c-*', lw=3) #plot the observed t\n norm_ax.legend(bbox_to_anchor=(1,-0.05), loc=\"lower right\", \n bbox_transform=norm_fig.transFigure, ncol=3)\n \n norm_fig.suptitle(f'2 Sided T-Test Alpha={alpha} T-Stat={round(t_stat, 4)} p-value (AUC)={round(p_value, 4)}')\n p_low = x[np.logical_and(x <= low, x >= plot_min)]\n p_high = x[np.logical_and(x >= high, x <= plot_max)]\n norm_ax.fill_between(\n p_low,\n iq.pdf(p_low),\n color=color,\n alpha=0.5,\n linewidth=0,\n )\n norm_ax.fill_between(\n p_high,\n iq.pdf(p_high),\n color=color,\n alpha=0.5,\n linewidth=0,\n )\n return summary\n\n\n# In[ ]:\n\n\ndef perm_test(data, expected, observed, number_of_permutations=1000.0, sample_size=30, chunk_size=10):\n #flatten data into one array\n all_values = pd.DataFrame(data.values.flatten())\n #drop na values \n all_values = all_values[~np.isnan(all_values)]\n assert(sample_size <= len(all_values))\n xbarholder = []\n counter = 0.0\n observed_diff = abs(expected - observed)\n for x in range(1, number_of_permutations):\n scramble = all_values.sample(sample_size)\n random_1 = scramble[0:chunk_size]\n random_2 = scramble[chunk_size:len(all_values)]\n assert(len(random_1) + len(random_2) == sample_size)\n diff = random_1.mean() - random_2.mean()\n xbarholder.append(diff.values[0])\n if abs(diff.values[0]) > observed_diff:\n counter += 1\n p_value = counter/number_of_permutations\n t_value = stats.t.ppf(p_value/2, df=number_of_permutations) #assume two-sided\n permutations = pd.DataFrame(pd.Series(xbarholder))\n #return another table of stats and histogram a-la-sas\n return p_value, t_value, observed_diff\n\n" ]
[ [ "scipy.stats.chi2.ppf", "scipy.stats.norm.ppf", "pandas.concat", "numpy.log", "pandas.Series", "numpy.linspace", "scipy.stats.norm.pdf", "scipy.stats.norm.cdf", "numpy.isnan", "matplotlib.pyplot.subplots", "scipy.stats.t.ppf", "scipy.stats.norm", "scipy.stats.probplot", "scipy.stats.binom.cdf", "scipy.stats.binom.ppf", "scipy.stats.t.cdf", "pandas.DataFrame.from_dict", "numpy.logical_and" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
ArjunNarayanan/DeepEdgeFlip
[ "d849c06a37ca861ee2cfc0b68fb02745643e2c88" ]
[ "ValueFunction/test_gcnconv.py" ]
[ "import torch\nfrom torch.nn import ReLU\nfrom torch_geometric.nn import GCNConv, Sequential, global_add_pool\nimport torch.optim as optim\nimport load_graphs\nimport numpy as np\n\n\nclass GCN(torch.nn.Module):\n def __init__(self, arch):\n super().__init__()\n self.gcn = make_network(arch)\n\n def forward(self, batch):\n x, edge_index = batch.x, batch.edge_index\n x = self.gcn(x, edge_index)\n\n return global_add_pool(x, batch.batch)\n\n\ndef make_network(arch):\n gcn = []\n for idx in range(len(arch) - 2):\n gcn.append((GCNConv(arch[idx], arch[idx + 1]), \"x, edge_index -> x\"))\n gcn.append(ReLU())\n\n gcn.append((GCNConv(arch[-2], arch[-1]), \"x, edge_index -> x\"))\n model = Sequential(\"x, edge_index\", gcn)\n return model\n\n\ndef train_model(model, optimizer, train_set, test_set, num_epochs=50):\n loss_function = torch.nn.MSELoss()\n\n train_loss_history = np.zeros([num_epochs, 1])\n test_loss_history = np.zeros([num_epochs, 1])\n test_accuracy = np.zeros([num_epochs, 1])\n\n for epoch in range(num_epochs):\n model.train()\n train_loss = 0\n for idx, batch in enumerate(train_set):\n optimizer.zero_grad()\n\n prediction = model(batch)\n loss = loss_function(prediction, batch.y)\n train_loss += loss.item()\n\n loss.backward()\n optimizer.step()\n\n train_loss_history[epoch] = train_loss / len(train_set)\n\n model.eval()\n\n with torch.no_grad():\n batch = next(iter(test_set))\n test_pred = model(batch)\n test_loss_history[epoch] = loss_function(test_pred, batch.y).item()\n\n test_pred = test_pred.numpy()\n ground_truth = batch.y.numpy()\n accuracy = measure_accuracy(test_pred, ground_truth)\n test_accuracy[epoch] = accuracy\n\n print(\"epoch = %d \\t train loss = %.3e \\t test loss = %.3e \\t accuracy = %.1f\" % (\n epoch, train_loss_history[epoch], test_loss_history[epoch], accuracy))\n\n return train_loss_history, test_loss_history, test_accuracy\n\n\ndef measure_accuracy(prediction, truth):\n n = len(prediction)\n assert len(truth) == n\n\n flags = np.zeros([n, n], dtype=bool)\n for i in range(n):\n for j in range(i, n):\n pi = prediction[i, 0]\n pj = prediction[j, 0]\n\n ti = truth[i, 0]\n tj = truth[j, 0]\n\n if (pi < pj and ti < tj) or (pi > pj and ti > tj) or (pi == pj and ti == tj):\n flags[i, j] = True\n\n num_correct = np.count_nonzero(flags)\n total_comparisons = n * (n + 1) / 2\n accuracy = num_correct / total_comparisons * 100\n return accuracy\n\n\ndef plot_loss(train,test,filename=\"\",title=\"\"):\n fig, ax = plt.subplots()\n ax.plot(train_loss, label=\"Train Loss\")\n ax.set_yscale(\"log\")\n ax.plot(test_loss, label=\"Test Loss\")\n ax.set_xlabel(\"Epochs\")\n ax.set_ylabel(\"Loss\")\n ax.set_title(title)\n ax.legend()\n fig.tight_layout()\n if len(filename) > 0:\n fig.savefig(filename)\n\ndef plot_accuracy(accuracy,title=\"\",filename=\"\"):\n fig, ax = plt.subplots()\n ax.plot(test_accuracy, color=\"orange\")\n ax.set_xlabel(\"Epochs\")\n ax.set_ylabel(\"Test accuracy\")\n ax.set_ylim([0, 100])\n ax.set_title(title)\n fig.tight_layout()\n if len(filename) > 0:\n fig.savefig(filename)\n\n\nnum_meshes = 2000\ntrain_ratio = 0.8\n\n# train_loader, test_loader = load_graphs.load_all_graphs(num_meshes, batch_size=32, train_ratio=train_ratio)\n\narch = [1,4,4,1]\nlr = 0.01\nnum_epochs = 50\nmodel = GCN(arch)\noptimizer = optim.Adam(model.parameters(), lr=lr)\n\ntrain_loss, test_loss, test_accuracy = train_model(model, optimizer, train_loader, test_loader, num_epochs=num_epochs)\n\n# import matplotlib.pyplot as plt\n# architecture = \"1-4-1\"\n# foldername = \"results\\\\\"\n\n" ]
[ [ "torch.no_grad", "numpy.count_nonzero", "torch.nn.ReLU", "numpy.zeros", "torch.nn.MSELoss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
neumannd/compliance-checker
[ "58262b23b6b138c656ef3e10b75306b700b9a71f" ]
[ "compliance_checker/cf/cf.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport logging\nimport os\nimport sqlite3\nimport sys\n\nfrom collections import OrderedDict, defaultdict\nfrom functools import wraps\nfrom warnings import warn\n\nimport numpy as np\nimport pyproj\nimport regex\n\nfrom cf_units import Unit\n\nfrom compliance_checker import cfutil\nfrom compliance_checker.base import BaseCheck, BaseNCCheck, Result, TestCtx\nfrom compliance_checker.cf import util\nfrom compliance_checker.cf.appendix_d import (\n dimless_vertical_coordinates_1_6,\n dimless_vertical_coordinates_1_7,\n no_missing_terms,\n)\nfrom compliance_checker.cf.appendix_e import cell_methods16, cell_methods17\nfrom compliance_checker.cf.appendix_f import (\n ellipsoid_names17,\n grid_mapping_attr_types16,\n grid_mapping_attr_types17,\n grid_mapping_dict16,\n grid_mapping_dict17,\n horizontal_datum_names17,\n prime_meridian_names17,\n)\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef print_exceptions(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except Exception as e:\n from traceback import print_exc\n\n print_exc()\n\n return wrapper\n\n\n# helper to see if we should do DSG tests\ndef is_likely_dsg(func):\n @wraps(func)\n def _dec(s, ds):\n if hasattr(ds, \"featureType\"):\n return func(s, ds)\n\n # @TODO: skips if we have formalized skips\n return None\n\n return _dec\n\n\nclass CFBaseCheck(BaseCheck):\n \"\"\"\n CF Convention Checker Base\n \"\"\"\n\n def __init__(self, options=None):\n # The compliance checker can be run on multiple datasets in a single\n # instantiation, so caching values has be done by the unique identifier\n # for each dataset loaded.\n\n # Each default dict is a key, value mapping from the dataset object to\n # a list of variables\n super(CFBaseCheck, self).__init__(options)\n self._coord_vars = defaultdict(list)\n self._ancillary_vars = defaultdict(list)\n self._clim_vars = defaultdict(list)\n self._metadata_vars = defaultdict(list)\n self._boundary_vars = defaultdict(list)\n self._geophysical_vars = defaultdict(list)\n self._aux_coords = defaultdict(list)\n\n self._std_names = util.StandardNameTable()\n\n self.section_titles = { # dict of section headers shared by grouped checks\n \"2.2\": \"§2.2 Data Types\",\n \"2.3\": \"§2.3 Naming Conventions\",\n \"2.4\": \"§2.4 Dimensions\",\n \"2.5\": \"§2.5 Variables\",\n \"2.6\": \"§2.6 Attributes\",\n \"3.1\": \"§3.1 Units\",\n \"3.2\": \"§3.2 Long Name\",\n \"3.3\": \"§3.3 Standard Name\",\n \"3.4\": \"§3.4 Ancillary Data\",\n \"3.5\": \"§3.5 Flags\",\n \"4\": \"§4 Coordinate Types\",\n \"4.1\": \"§4.1 Latitude Coordinate\",\n \"4.2\": \"§4.2 Longitude Coordinate\",\n \"4.3\": \"§4.3 Vertical Coordinate\",\n \"4.4\": \"§4.4 Time Coordinate\",\n \"4.5\": \"§4.5 Discrete Axis\",\n \"5\": \"§5 Coordinate Systems\",\n \"5.1\": \"§5.1 Independent Latitude, Longitude, Vertical, and Time Axes\",\n \"5.2\": \"§5.2 2-D Latitude, Longitude, Coordinate Variables\",\n \"5.3\": \"§5.3 Reduced Horizontal Grid\",\n \"5.4\": \"§5.4 Timeseries of Station Data\",\n \"5.5\": \"§5.5 Trajectories\",\n \"5.6\": \"§5.6 Horizontal Coorindate Reference Systems, Grid Mappings, Projections\",\n \"5.7\": \"§5.7 Scalar Coorindate Variables\",\n \"6.1\": \"§6.1 Labels\",\n \"6.2\": \"§6.2 Alternative Coordinates\",\n \"7.1\": \"§7.1 Cell Boundaries\",\n \"7.2\": \"§7.2 Cell Measures\",\n \"7.3\": \"§7.3 Cell Methods\",\n \"7.4\": \"§7.4 Climatological Statistics\",\n \"8.1\": \"§8.1 Packed Data\",\n \"8.2\": \"§8.2 Compression by Gathering\",\n \"9.1\": \"§9.1 Features and feature types\",\n \"9.2\": \"§9.2 Collections, instances, and elements\",\n \"9.3\": \"§9.3 Representations of Collections of features in data variables\",\n \"9.4\": \"§9.4 The featureType attribute\",\n \"9.5\": \"§9.5 Coordinates and metadata\",\n \"9.6\": \"§9.6 Missing Data\",\n }\n\n ################################################################################\n # Helper Methods - var classifications, etc\n ################################################################################\n\n def setup(self, ds):\n \"\"\"\n Initialize various special variable types within the class.\n Mutates a number of instance variables.\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n \"\"\"\n self.coord_vars = self._find_coord_vars(ds)\n self._find_aux_coord_vars(ds)\n self._find_ancillary_vars(ds)\n self._find_clim_vars(ds)\n self._find_boundary_vars(ds)\n self._find_metadata_vars(ds)\n self._find_cf_standard_name_table(ds)\n self._find_geophysical_vars(ds)\n coord_containing_vars = ds.get_variables_by_attributes(\n coordinates=lambda val: isinstance(val, str)\n )\n\n # coordinate data variables\n\n # Excerpt from \"§1.3 Overview\" on coordinate data\n # There are two methods used to identify variables that contain\n # coordinate data. The first is to use the NUG-defined \"coordinate\n # variables.\" The use of coordinate variables is required for all\n # dimensions that correspond to one dimensional space or time\n # coordinates . In cases where coordinate variables are not applicable,\n # the variables containing coordinate data are identified by the\n # coordinates attribute.\n\n # first read in variables referred to in coordinates which exist\n # in the dataset\n self.coord_data_vars = set()\n for var in coord_containing_vars:\n for coord_var_name in var.coordinates.strip().split(\" \"):\n if coord_var_name in ds.variables:\n self.coord_data_vars.add(coord_var_name)\n # then add in the NUG coordinate variables -- single dimension with\n # dimension name the same as coordinates\n self.coord_data_vars.update(self.coord_vars)\n\n def check_grid_mapping(self, ds):\n \"\"\"\n 5.6 When the coordinate variables for a horizontal grid are not\n longitude and latitude, it is required that the true latitude and\n longitude coordinates be supplied via the coordinates attribute. If in\n addition it is desired to describe the mapping between the given\n coordinate variables and the true latitude and longitude coordinates,\n the attribute grid_mapping may be used to supply this description.\n\n This attribute is attached to data variables so that variables with\n different mappings may be present in a single file. The attribute takes\n a string value which is the name of another variable in the file that\n provides the description of the mapping via a collection of attached\n attributes. This variable is called a grid mapping variable and is of\n arbitrary type since it contains no data. Its purpose is to act as a\n container for the attributes that define the mapping.\n\n The one attribute that all grid mapping variables must have is\n grid_mapping_name which takes a string value that contains the mapping's\n name. The other attributes that define a specific mapping depend on the\n value of grid_mapping_name. The valid values of grid_mapping_name along\n with the attributes that provide specific map parameter values are\n described in Appendix F, Grid Mappings.\n\n When the coordinate variables for a horizontal grid are longitude and\n latitude, a grid mapping variable with grid_mapping_name of\n latitude_longitude may be used to specify the ellipsoid and prime\n meridian.\n\n\n In order to make use of a grid mapping to directly calculate latitude\n and longitude values it is necessary to associate the coordinate\n variables with the independent variables of the mapping. This is done by\n assigning a standard_name to the coordinate variable. The appropriate\n values of the standard_name depend on the grid mapping and are given in\n Appendix F, Grid Mappings.\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: list\n :return: List of results\n \"\"\"\n\n ret_val = OrderedDict()\n grid_mapping_variables = cfutil.get_grid_mapping_variables(ds)\n\n # Check the grid_mapping attribute to be a non-empty string and that its reference exists\n for variable in ds.get_variables_by_attributes(\n grid_mapping=lambda x: x is not None\n ):\n grid_mapping = getattr(variable, \"grid_mapping\", None)\n defines_grid_mapping = self.get_test_ctx(\n BaseCheck.HIGH, self.section_titles[\"5.6\"], variable.name\n )\n defines_grid_mapping.assert_true(\n (isinstance(grid_mapping, str) and grid_mapping),\n \"{}'s grid_mapping attribute must be a \"\n + \"space-separated non-empty string\".format(variable.name),\n )\n if isinstance(grid_mapping, str):\n # TODO (badams): refactor functionality to split functionality\n # into requisite classes\n if \":\" in grid_mapping and self._cc_spec_version >= \"1.7\":\n colon_count = grid_mapping.count(\":\")\n re_all = regex.findall(\n r\"(\\w+):\\s*((?:\\w+\\s+)*(?:\\w+)(?![\\w:]))\", grid_mapping\n )\n if colon_count != len(re_all):\n defines_grid_mapping.out_of += 1\n defines_grid_mapping.messages.append(\n \"Could not consume entire grid_mapping expression, please check for well-formedness\"\n )\n else:\n for grid_var_name, coord_var_str in re_all:\n defines_grid_mapping.assert_true(\n grid_var_name in ds.variables,\n \"grid mapping variable {} must exist in this dataset\".format(\n grid_var_name\n ),\n )\n for ref_var in coord_var_str.split():\n defines_grid_mapping.assert_true(\n ref_var in ds.variables,\n \"Coordinate-related variable {} referenced by grid_mapping variable {} must exist in this dataset\".format(\n ref_var, grid_var_name\n ),\n )\n\n else:\n for grid_var_name in grid_mapping.split():\n defines_grid_mapping.assert_true(\n grid_var_name in ds.variables,\n \"grid mapping variable {} must exist in this dataset\".format(\n grid_var_name\n ),\n )\n ret_val[variable.name] = defines_grid_mapping.to_result()\n\n # Check the grid mapping variables themselves\n for grid_var_name in grid_mapping_variables:\n valid_grid_mapping = self.get_test_ctx(\n BaseCheck.HIGH, self.section_titles[\"5.6\"], grid_var_name\n )\n grid_var = ds.variables[grid_var_name]\n\n grid_mapping_name = getattr(grid_var, \"grid_mapping_name\", None)\n\n # Grid mapping name must be in appendix F\n valid_grid_mapping.assert_true(\n grid_mapping_name in self.grid_mapping_dict,\n \"{} is not a valid grid_mapping_name.\".format(grid_mapping_name)\n + \" See Appendix F for valid grid mappings\",\n )\n\n # The self.grid_mapping_dict has a values of:\n # - required attributes\n # - optional attributes (can't check)\n # - required standard_names defined\n # - at least one of these attributes must be defined\n\n # We can't do any of the other grid mapping checks if it's not a valid grid mapping name\n if grid_mapping_name not in self.grid_mapping_dict:\n ret_val[grid_mapping_name] = valid_grid_mapping.to_result()\n continue\n\n grid_mapping = self.grid_mapping_dict[grid_mapping_name]\n required_attrs = grid_mapping[0]\n # Make sure all the required attributes are defined\n for req in required_attrs:\n valid_grid_mapping.assert_true(\n hasattr(grid_var, req),\n \"{} is a required attribute for grid mapping {}\".format(\n req, grid_mapping_name\n ),\n )\n\n # Make sure that exactly one of the exclusive attributes exist\n if len(grid_mapping) == 4:\n at_least_attr = grid_mapping[3]\n number_found = 0\n for attr in at_least_attr:\n if hasattr(grid_var, attr):\n number_found += 1\n valid_grid_mapping.assert_true(\n number_found == 1,\n \"grid mapping {}\".format(grid_mapping_name)\n + \"must define exactly one of these attributes: \"\n + \"{}\".format(\" or \".join(at_least_attr)),\n )\n\n # Make sure that exactly one variable is defined for each of the required standard_names\n expected_std_names = grid_mapping[2]\n for expected_std_name in expected_std_names:\n found_vars = ds.get_variables_by_attributes(\n standard_name=expected_std_name\n )\n valid_grid_mapping.assert_true(\n len(found_vars) == 1,\n \"grid mapping {} requires exactly\".format(grid_mapping_name)\n + \"one variable with standard_name \"\n + \"{} to be defined\".format(expected_std_name),\n )\n\n ret_val[grid_var_name] = valid_grid_mapping.to_result()\n\n return ret_val\n\n def check_conventions_version(self, ds):\n \"\"\"\n CF §2.6.1 the NUG defined global attribute Conventions to the string\n value \"CF-<version_number>\"; check the Conventions attribute contains\n the appropriate string.\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: compliance_checker.base.Result\n \"\"\"\n\n valid = False\n reasoning = []\n correct_version_string = \"{}-{}\".format(\n self._cc_spec, self._cc_spec_version\n ).upper()\n if hasattr(ds, \"Conventions\"):\n conventions = regex.split(r\",|\\s+\", getattr(ds, \"Conventions\", \"\"))\n for convention in conventions:\n if convention == correct_version_string:\n valid = True\n break\n else:\n reasoning = [\n \"§2.6.1 Conventions global attribute does not contain \"\n '\"{}\"'.format(correct_version_string)\n ]\n else:\n valid = False\n reasoning = [\"§2.6.1 Conventions field is not present\"]\n return Result(\n BaseCheck.MEDIUM, valid, self.section_titles[\"2.6\"], msgs=reasoning\n )\n\n def _check_dimensionless_vertical_coordinates(\n self,\n ds,\n deprecated_units,\n version_specific_check,\n version_specific_dimless_vertical_coord_dict,\n ):\n \"\"\"\n Check the validity of dimensionless coordinates under CF\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :param list deprecated_units: list of string names of deprecated units\n :param function version_specific_check: version-specific implementation to check dimensionless vertical coord\n :param dict version_specific_dimless_coord_dict: version-specific dict of dimensionless vertical coords and computed standard names\n :return: List of results\n \"\"\"\n ret_val = []\n\n z_variables = cfutil.get_z_variables(ds)\n\n # call version-specific implementation\n for name in z_variables:\n version_specific_check(\n ds,\n name,\n deprecated_units,\n ret_val,\n version_specific_dimless_vertical_coord_dict,\n )\n\n return ret_val\n\n def _check_formula_terms(self, ds, coord, dimless_coords_dict):\n \"\"\"\n Checks a dimensionless vertical coordinate contains valid formula_terms\n\n - formula_terms is a non-empty string\n - formula_terms matches regdimless_coords_dictx\n - every variable defined in formula_terms exists\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: compliance_checker.base.Result\n \"\"\"\n variable = ds.variables[coord]\n standard_name = getattr(variable, \"standard_name\", None)\n formula_terms = getattr(variable, \"formula_terms\", None)\n valid_formula_terms = TestCtx(BaseCheck.HIGH, self.section_titles[\"4.3\"])\n\n valid_formula_terms.assert_true(\n isinstance(formula_terms, str) and formula_terms,\n \"§4.3.2: {}'s formula_terms is a required attribute and must be a non-empty string\"\n \"\".format(coord),\n )\n # We can't check any more\n if not formula_terms:\n return valid_formula_terms.to_result()\n\n # check that the formula_terms are well formed and are present\n # The pattern for formula terms is always component: variable_name\n # the regex grouping always has component names in even positions and\n # the corresponding variable name in odd positions.\n matches = regex.findall(\n r\"([A-Za-z][A-Za-z0-9_]*: )([A-Za-z][A-Za-z0-9_]*)\", variable.formula_terms\n )\n terms = set(m[0][:-2] for m in matches)\n # get the variables named in the formula terms and check if any\n # are not present in the dataset\n missing_vars = sorted(set(m[1] for m in matches) - set(ds.variables))\n missing_fmt = \"The following variable(s) referenced in {}:formula_terms are not present in the dataset: {}\"\n valid_formula_terms.assert_true(\n len(missing_vars) == 0, missing_fmt.format(coord, \", \".join(missing_vars))\n )\n # try to reconstruct formula_terms by adding space in between the regex\n # matches. If it doesn't exactly match the original, the formatting\n # of the attribute is incorrect\n reconstructed_formula = \" \".join(m[0] + m[1] for m in matches)\n valid_formula_terms.assert_true(\n reconstructed_formula == formula_terms,\n \"Attribute formula_terms is not well-formed\",\n )\n\n valid_formula_terms.assert_true(\n standard_name in dimless_coords_dict,\n \"unknown standard_name '{}' for dimensionless vertical coordinate {}\"\n \"\".format(standard_name, coord),\n )\n if standard_name not in dimless_coords_dict:\n return valid_formula_terms.to_result()\n\n valid_formula_terms.assert_true(\n no_missing_terms(standard_name, terms, dimless_coords_dict),\n \"{}'s formula_terms are invalid for {}, please see appendix D of CF 1.6\"\n \"\".format(coord, standard_name),\n )\n\n return valid_formula_terms.to_result()\n\n def _check_grid_mapping_attr_condition(self, attr, attr_name, ret_val):\n \"\"\"\n Evaluate a condition (or series of conditions) for a particular\n attribute. Designed to be overloaded in subclass implementations.\n\n :param attr: attribute to teset condition for\n :param str attr_name: name of the attribute\n :param list ret_val: list of results to append to\n :rtype None\n :return None\n \"\"\"\n raise NotImplementedError\n\n def _dims_in_order(self, dimension_order):\n \"\"\"\n :param list dimension_order: A list of axes\n :rtype: bool\n :return: Returns True if the dimensions are in order U*, T, Z, Y, X,\n False otherwise\n \"\"\"\n regx = regex.compile(r\"^[^TZYX]*T?Z?Y?X?$\")\n dimension_string = \"\".join(dimension_order)\n return regx.match(dimension_string) is not None\n\n def _parent_var_attr_type_check(self, attr_name, var, ctx):\n \"\"\"\n Checks that an attribute has an equivalent value to a parent variable.\n Takes an attribute name, variable, and test context on which to operate.\n :param str attr_name: The name of the attribute to be checked\n :param netCDF4.Variable var: The variable against which to be checked\n :param compliance_checker.base.TestCtx ctx: The associated test context to modify\n :rtype None\n :return None\n \"\"\"\n attr_val = var.getncattr(attr_name)\n\n if isinstance(attr_val, (str, bytes)):\n type_match = var.dtype.kind == \"S\"\n val_type = type(attr_val)\n else:\n val_type = attr_val.dtype.type\n type_match = val_type == var.dtype.type\n\n ctx.assert_true(\n type_match,\n \"Attribute '{}' (type: {}) and parent variable '{}' (type: {}) \"\n \"must have equivalent datatypes\".format(\n attr_name, val_type, var.name, var.dtype.type\n ),\n )\n\n def _find_aux_coord_vars(self, ds, refresh=False):\n \"\"\"\n Returns a list of auxiliary coordinate variables\n\n An auxiliary coordinate variable is any netCDF variable that contains\n coordinate data, but is not a coordinate variable (in the sense of the term\n defined by CF).\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :param bool refresh: if refresh is set to True, the cache is\n invalidated.\n :rtype: list\n :return: List of variable names (str) that are defined to be auxiliary\n coordinate variables.\n \"\"\"\n if self._aux_coords.get(ds, None) and refresh is False:\n return self._aux_coords[ds]\n\n self._aux_coords[ds] = cfutil.get_auxiliary_coordinate_variables(ds)\n return self._aux_coords[ds]\n\n def _find_boundary_vars(self, ds, refresh=False):\n \"\"\"\n Returns dictionary of boundary variables mapping the variable instance\n to the name of the variable acting as a boundary variable.\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :param bool refresh: if refresh is set to True, the cache is\n invalidated.\n :rtype: list\n :return: A list containing strings with boundary variable names.\n \"\"\"\n if self._boundary_vars.get(ds, None) and refresh is False:\n return self._boundary_vars[ds]\n\n self._boundary_vars[ds] = cfutil.get_cell_boundary_variables(ds)\n\n return self._boundary_vars[ds]\n\n def _find_ancillary_vars(self, ds, refresh=False):\n \"\"\"\n Returns a list of variable names that are defined as ancillary\n variables in the dataset ds.\n\n An ancillary variable generally is a metadata container and referenced\n from other variables via a string reference in an attribute.\n\n - via ancillary_variables (3.4)\n - \"grid mapping var\" (5.6)\n - TODO: more?\n\n The result is cached by the passed in dataset object inside of this\n checker. Pass refresh=True to redo the cached value.\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :param bool refresh: if refresh is set to True, the cache is\n invalidated.\n :rtype: list\n :return: List of variable names (str) that are defined as ancillary\n variables in the dataset ds.\n \"\"\"\n\n # Used the cached version if it exists and is not empty\n if self._ancillary_vars.get(ds, None) and refresh is False:\n return self._ancillary_vars[ds]\n\n # Invalidate the cache at all costs\n self._ancillary_vars[ds] = []\n\n for name, var in ds.variables.items():\n if hasattr(var, \"ancillary_variables\"):\n for anc_name in var.ancillary_variables.split(\" \"):\n if anc_name in ds.variables:\n self._ancillary_vars[ds].append(anc_name)\n\n if hasattr(var, \"grid_mapping\"):\n gm_name = var.grid_mapping\n if gm_name in ds.variables:\n self._ancillary_vars[ds].append(gm_name)\n\n return self._ancillary_vars[ds]\n\n def _find_clim_vars(self, ds, refresh=False):\n \"\"\"\n Returns a list of variables that are likely to be climatology variables based on CF §7.4\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :param bool refresh: if refresh is set to True, the cache is\n invalidated.\n :rtype: list\n :return: A list containing strings with geophysical variable\n names.\n \"\"\"\n\n if self._clim_vars.get(ds, None) and refresh is False:\n return self._clim_vars[ds]\n\n climatology_variable = cfutil.get_climatology_variable(ds)\n if climatology_variable:\n self._clim_vars[ds].append(climatology_variable)\n\n return self._clim_vars[ds]\n\n def _find_cf_standard_name_table(self, ds):\n \"\"\"\n Parse out the `standard_name_vocabulary` attribute and download that\n version of the cf standard name table. If the standard name table has\n already been downloaded, use the cached version. Modifies `_std_names`\n attribute to store standard names. Returns True if the file exists and\n False if it fails to download.\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: bool\n \"\"\"\n # Get the standard name vocab\n standard_name_vocabulary = getattr(ds, \"standard_name_vocabulary\", \"\")\n\n # Try to parse this attribute to get version\n version = None\n try:\n if \"cf standard name table\" in standard_name_vocabulary.lower():\n version = [\n s.strip(\"(\").strip(\")\").strip(\"v\").strip(\",\")\n for s in standard_name_vocabulary.split()\n ]\n # This assumes that table version number won't start with 0.\n version = [\n s\n for s in version\n if s.isdigit() and len(s) <= 2 and not s.startswith(\"0\")\n ]\n if len(version) > 1:\n return False\n else:\n try:\n version = version[0]\n except IndexError:\n warn(\n \"Cannot extract CF standard name version number \"\n \"from standard_name_vocabulary string\"\n )\n return False\n else:\n # Can't parse the attribute, use the packaged version\n return False\n # usually raised from .lower() with an incompatible (non-string)\n # data type\n except AttributeError:\n warn(\n \"Cannot convert standard name table to lowercase. This can \"\n \"occur if a non-string standard_name_vocabulary global \"\n \"attribute is supplied\"\n )\n return False\n\n if version.startswith(\"v\"): # i.e 'v34' -> '34' drop the v\n version = version[1:]\n\n # If the packaged version is what we're after, then we're good\n if version == self._std_names._version:\n print(\n \"Using packaged standard name table v{0}\".format(version),\n file=sys.stderr,\n )\n return False\n\n # Try to download the version specified\n try:\n data_directory = util.create_cached_data_dir()\n location = os.path.join(\n data_directory, \"cf-standard-name-table-test-{0}.xml\".format(version)\n )\n # Did we already download this before?\n if not os.path.isfile(location):\n util.download_cf_standard_name_table(version, location)\n print(\n \"Using downloaded standard name table v{0}\".format(version),\n file=sys.stderr,\n )\n else:\n print(\n \"Using cached standard name table v{0} from {1}\".format(\n version, location\n ),\n file=sys.stderr,\n )\n\n self._std_names = util.StandardNameTable(location)\n return True\n except Exception as e:\n # There was an error downloading the CF table. That's ok, we'll just use the packaged version\n warn(\n \"Problem fetching standard name table:\\n{0}\\n\"\n \"Using packaged v{1}\".format(e, self._std_names._version)\n )\n return False\n\n def _find_coord_vars(self, ds, refresh=False):\n \"\"\"\n Returns a list of variable names that identify as coordinate variables.\n\n The result is cached by the passed in dataset object inside of this\n checker. Pass refresh=True to redo the cached value.\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :param bool refresh: if refresh is set to True, the cache is\n invalidated.\n :rtype: list\n :return: A list of variables names (str) that are defined as coordinate\n variables in the dataset ds.\n \"\"\"\n if ds in self._coord_vars and refresh is False:\n return self._coord_vars[ds]\n\n self._coord_vars[ds] = cfutil.get_coordinate_variables(ds)\n\n return self._coord_vars[ds]\n\n def _find_geophysical_vars(self, ds, refresh=False):\n \"\"\"\n Returns a list of geophysical variables. Modifies\n `self._geophysical_vars`\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :param bool refresh: if refresh is set to True, the cache is\n invalidated.\n :rtype: list\n :return: A list containing strings with geophysical variable\n names.\n \"\"\"\n if self._geophysical_vars.get(ds, None) and refresh is False:\n return self._geophysical_vars[ds]\n\n self._geophysical_vars[ds] = cfutil.get_geophysical_variables(ds)\n\n return self._geophysical_vars[ds]\n\n def _find_metadata_vars(self, ds, refresh=False):\n \"\"\"\n Returns a list of netCDF variable instances for those that are likely metadata variables\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :param bool refresh: if refresh is set to True, the cache is\n invalidated.\n :rtype: list\n :return: List of variable names (str) that are likely metadata\n variable candidates.\n\n \"\"\"\n if self._metadata_vars.get(ds, None) and refresh is False:\n return self._metadata_vars[ds]\n\n self._metadata_vars[ds] = []\n for name, var in ds.variables.items():\n\n if name in self._find_ancillary_vars(ds) or name in self._find_coord_vars(\n ds\n ):\n continue\n\n if name in (\n \"platform_name\",\n \"station_name\",\n \"instrument_name\",\n \"station_id\",\n \"platform_id\",\n \"surface_altitude\",\n ):\n self._metadata_vars[ds].append(name)\n\n elif getattr(var, \"cf_role\", \"\") != \"\":\n self._metadata_vars[ds].append(name)\n\n elif (\n getattr(var, \"standard_name\", None) is None and len(var.dimensions) == 0\n ):\n self._metadata_vars[ds].append(name)\n\n return self._metadata_vars[ds]\n\n def _get_coord_axis_map(self, ds):\n \"\"\"\n Returns a dictionary mapping each coordinate to a letter identifier\n describing the _kind_ of coordinate.\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n\n :rtype: dict\n :return: A dictionary with variable names mapped to axis abbreviations,\n i.e. {'longitude': 'X', ... 'pressure': 'Z'}\n \"\"\"\n expected = [\"T\", \"Z\", \"Y\", \"X\"]\n coord_vars = self._find_coord_vars(ds)\n coord_axis_map = {}\n\n # L - Unlimited Coordinates\n # T - Time coordinates\n # Z - Depth/Altitude Coordinate\n # Y - Y-Coordinate (latitude)\n # X - X-Coordinate (longitude)\n # A - Auxiliary Coordinate\n # I - Instance Coordinate\n\n time_variables = cfutil.get_time_variables(ds)\n lat_variables = cfutil.get_latitude_variables(ds)\n lon_variables = cfutil.get_longitude_variables(ds)\n z_variables = cfutil.get_z_variables(ds)\n\n for coord_name in coord_vars:\n coord_var = ds.variables[coord_name]\n axis = getattr(coord_var, \"axis\", None)\n standard_name = getattr(coord_var, \"standard_name\", None)\n\n # Unlimited dimensions must come first\n if ds.dimensions[coord_name].isunlimited():\n coord_axis_map[coord_name] = \"L\"\n # axis takes precedence over standard_name\n elif axis in expected:\n coord_axis_map[coord_name] = axis\n elif standard_name == \"time\":\n coord_axis_map[coord_name] = \"T\"\n elif standard_name == \"longitude\":\n coord_axis_map[coord_name] = \"X\"\n elif standard_name == \"latitude\":\n coord_axis_map[coord_name] = \"Y\"\n elif standard_name in [\"height\", \"depth\", \"altitude\"]:\n coord_axis_map[coord_name] = \"Z\"\n elif cfutil.is_compression_coordinate(ds, coord_name):\n coord_axis_map[coord_name] = \"C\"\n elif coord_name in time_variables:\n coord_axis_map[coord_name] = \"T\"\n elif coord_name in z_variables:\n coord_axis_map[coord_name] = \"Z\"\n elif coord_name in lat_variables:\n coord_axis_map[coord_name] = \"Y\"\n elif coord_name in lon_variables:\n coord_axis_map[coord_name] = \"X\"\n else:\n # mark the coordinate variable as unknown\n coord_axis_map[coord_name] = \"U\"\n\n for dimension in self._get_instance_dimensions(ds):\n if dimension not in coord_axis_map:\n coord_axis_map[dimension] = \"I\"\n\n # Dimensions of auxiliary coordinate variables will be marked with A.\n # This is useful to help determine if the dimensions are used like a\n # mapping from grid coordinates to physical lat/lon\n for coord_name in self._find_aux_coord_vars(ds):\n coord_var = ds.variables[coord_name]\n # Skip label auxiliary coordinates\n if coord_var.dtype.char == \"S\":\n continue\n for dimension in coord_var.dimensions:\n if dimension not in coord_axis_map:\n coord_axis_map[dimension] = \"A\"\n\n # If a dimension does not have a coordinate variable mark it as unknown\n # 'U'\n for dimension in ds.dimensions:\n if dimension not in coord_axis_map:\n coord_axis_map[dimension] = \"U\"\n\n return coord_axis_map\n\n def _get_coord_vars(self, ds):\n coord_vars = []\n for name, var in ds.variables.items():\n if (name,) == var.dimensions:\n coord_vars.append(name)\n return coord_vars\n\n def _get_dimension_order(self, ds, name, coord_axis_map):\n \"\"\"\n Returns a list of strings corresponding to the named axis of the dimensions for a variable.\n\n Example::\n self._get_dimension_order(ds, 'temperature', coord_axis_map)\n --> ['T', 'Y', 'X']\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :param str name: Name of the variable\n :param dict coord_axis_map: A dictionary mapping each coordinate variable and dimension to a named axis\n\n :rtype: list\n :return: A list of strings corresponding to the named axis of the dimensions for a variable\n \"\"\"\n\n retval = []\n variable = ds.variables[name]\n for dim in variable.dimensions:\n retval.append(coord_axis_map[dim])\n return retval\n\n def _get_instance_dimensions(self, ds):\n \"\"\"\n Returns a list of dimensions marked as instance dimensions\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n\n :rtype: list\n :returns: A list of variable dimensions\n \"\"\"\n ret_val = []\n for variable in ds.get_variables_by_attributes(\n cf_role=lambda x: isinstance(x, str)\n ):\n if variable.ndim > 0:\n ret_val.append(variable.dimensions[0])\n return ret_val\n\n def _get_pretty_dimension_order(self, ds, name):\n \"\"\"\n Returns a comma separated string of the dimensions for a specified\n variable\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :param str name: A string with a valid NetCDF variable name for the\n dataset\n :rtype: str\n :return: A comma separated string of the variable's dimensions\n \"\"\"\n dim_names = []\n for dim in ds.variables[name].dimensions:\n dim_name = dim\n if ds.dimensions[dim].isunlimited():\n dim_name += \" (Unlimited)\"\n dim_names.append(dim_name)\n return \", \".join(dim_names)\n\n def _is_station_var(self, var):\n \"\"\"\n Returns True if the NetCDF variable is associated with a station, False\n otherwise.\n\n :param netCDF4.Variable var: a variable in an existing NetCDF dataset\n :rtype: bool\n :return: Status of whether variable appears to be associated with a\n station\n \"\"\"\n\n if getattr(var, \"standard_name\", None) in (\n \"platform_name\",\n \"station_name\",\n \"instrument_name\",\n ):\n return True\n return False\n\n def _split_standard_name(self, standard_name):\n \"\"\"\n Returns a tuple of the standard_name and standard_name modifier\n\n Nones are used to represent the absence of a modifier or standard_name\n\n :rtype: tuple\n :return: 2-tuple of standard_name and modifier as strings\n \"\"\"\n\n if isinstance(standard_name, str) and \" \" in standard_name:\n return standard_name.split(\" \", 1)\n # if this isn't a string, then it doesn't make sense to split\n # -- treat value as standard name with no modifier\n else:\n return standard_name, None\n\n def check_appendix_a(self, ds):\n \"\"\"\n Validates a CF dataset against the contents of its Appendix A table for\n attribute types and locations. Returns a list of results with the\n outcomes of the Appendix A validation results against the existing\n attributes in the docstring.\n\n :param netCDF4.Variable var: a variable in an existing NetCDF dataset\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: list\n :return: A list of results corresponding to the results returned\n \"\"\"\n # if 'enable_appendix_a_checks' isn't specified in the checks,\n # don't do anything on this check\n results = []\n if \"enable_appendix_a_checks\" not in self.options:\n return results\n possible_global_atts = set(ds.ncattrs()).intersection(self.appendix_a.keys())\n attr_location_ident = {\n \"G\": \"global attributes\",\n \"C\": \"coordinate data\",\n \"D\": \"non-coordinate data\",\n }\n\n def att_loc_print_helper(att_letter):\n \"\"\"\n Returns a string corresponding to attr_location ident in\n human-readable form. E.g. an input of 'G' will return\n \"global attributes (G)\"\n\n :param str att_letter: An attribute letter corresponding to the\n \"Use\" column in CF Appendix A\n :rtype: str\n :return: A string with a human-readable name followed by the input\n letter specified\n \"\"\"\n\n return \"{} ({})\".format(\n attr_location_ident.get(att_letter, \"other\"), att_letter\n )\n\n def _att_loc_msg(att_loc):\n \"\"\"\n Helper method for formatting an error message when an attribute\n appears in the improper location corresponding to the \"Use\" column\n in CF Appendix A.\n\n :param set att_loc: A set with the possible valid locations of the\n attribute corresponding to the \"Use\" column\n in CF Appendix A\n :rtype: str\n :return: A human-readable string with the possible valid locations\n of the attribute\n \"\"\"\n att_loc_len = len(att_loc)\n # this is a fallback in case an empty att_loc is passed\n # it generally should not occur\n valid_loc = \"no locations in the dataset\"\n loc_sort = sorted(att_loc)\n if att_loc_len == 1:\n valid_loc = att_loc_print_helper(loc_sort[0])\n elif att_loc_len == 2:\n valid_loc = \"{} and {}\".format(\n att_loc_print_helper(loc_sort[0]), att_loc_print_helper(loc_sort[1])\n )\n # shouldn't be reached under normal circumstances, as any attribute\n # should be either G, C, or D but if another\n # category is added, this will be useful.\n else:\n valid_loc = \", \".join(loc_sort[:-1]) + \", and {}\".format(\n att_loc_print_helper(loc_sort[-1])\n )\n return \"This attribute may only appear in {}.\".format(valid_loc)\n\n for global_att_name in possible_global_atts:\n global_att = ds.getncattr(global_att_name)\n att_dict = self.appendix_a[global_att_name]\n att_loc = att_dict[\"attr_loc\"]\n valid_loc_warn = _att_loc_msg(att_loc)\n if att_dict[\"cf_section\"] is not None:\n subsection_test = \".\".join(att_dict[\"cf_section\"].split(\".\")[:2])\n\n section_loc = self.section_titles.get(\n subsection_test, att_dict[\"cf_section\"]\n )\n else:\n section_loc = None\n test_ctx = TestCtx(BaseCheck.HIGH, section_loc)\n\n test_ctx.out_of += 1\n if \"G\" not in att_loc:\n test_ctx.messages.append(\n '[Appendix A] Attribute \"{}\" should not be present in global (G) '\n \"attributes. {}\".format(global_att_name, valid_loc_warn)\n )\n else:\n result = self._handle_dtype_check(global_att, global_att_name, att_dict)\n if not result[0]:\n test_ctx.messages.append(result[1])\n else:\n test_ctx.score += 1\n results.append(test_ctx.to_result())\n\n noncoord_vars = set(ds.variables) - set(self.coord_data_vars)\n for var_set, coord_letter in (\n (self.coord_data_vars, \"C\"),\n (noncoord_vars, \"D\"),\n ):\n for var_name in var_set:\n var = ds.variables[var_name]\n possible_attrs = set(var.ncattrs()).intersection(self.appendix_a.keys())\n for att_name in possible_attrs:\n att_dict = self.appendix_a[att_name]\n if att_dict[\"cf_section\"] is not None:\n subsection_test = \".\".join(\n att_dict[\"cf_section\"].split(\".\")[:2]\n )\n\n section_loc = self.section_titles.get(\n subsection_test, att_dict[\"cf_section\"]\n )\n else:\n section_loc = None\n test_ctx = TestCtx(BaseCheck.HIGH, section_loc, variable=var_name)\n att_loc = att_dict[\"attr_loc\"]\n valid_loc_warn = _att_loc_msg(att_loc)\n att = var.getncattr(att_name)\n test_ctx.out_of += 1\n if coord_letter not in att_loc:\n test_ctx.messages.append(\n '[Appendix A] Attribute \"{}\" should not be present in {} '\n 'variable \"{}\". {}'.format(\n att_name,\n att_loc_print_helper(coord_letter),\n var_name,\n valid_loc_warn,\n )\n )\n else:\n result = self._handle_dtype_check(att, att_name, att_dict, var)\n if not result[0]:\n test_ctx.messages.append(result[1])\n else:\n test_ctx.score += 1\n results.append(test_ctx.to_result())\n\n return results\n\n def _check_attr_type(self, attr_name, attr_type, attribute, variable=None):\n \"\"\"\n Check if an attribute `attr` is of the type `attr_type`. Upon getting\n a data type of 'D', the attr must have the same data type as the\n variable it is assigned to.\n\n Attributes designated type 'S' must be of type `str`. 'N' require\n numeric types, and 'D' requires the attribute type match the type\n of the variable it is assigned to.\n\n :param str attr_name: name of attr being checked (to format message)\n :param str attr_type: the correct type of the attribute\n :param attribute: attribute to check\n :param variable: if given, type should match attr\n :rtype tuple\n :return A two-tuple that contains pass/fail status as a boolean and\n a message string (or None if unset) as the second element.\n \"\"\"\n\n if attr_type == \"S\":\n if not isinstance(attribute, str):\n return [False, \"{} must be a string\".format(attr_name)]\n else:\n # if it's not a string, it should have a numpy dtype\n underlying_dtype = getattr(attribute, \"dtype\", None)\n\n # TODO check for np.nan separately\n if underlying_dtype is None:\n return [False, \"{} must be a numeric type\".format(attr_name)]\n\n # both D and N should be some kind of numeric value\n is_numeric = np.issubdtype(underlying_dtype, np.number)\n if attr_type == \"N\":\n if not is_numeric:\n return [False, \"{} must be a numeric type\".format(attr_name)]\n elif attr_type == \"D\":\n # TODO: handle edge case where variable is unset here\n temp_ctx = TestCtx()\n self._parent_var_attr_type_check(attr_name, variable, temp_ctx)\n var_dtype = getattr(variable, \"dtype\", None)\n if temp_ctx.messages:\n return (\n False,\n \"{} must be numeric and must be equivalent to {} dtype\".format(\n attr_name, var_dtype\n ),\n )\n else:\n # If we reached here, we fell off with an unrecognized type\n return (\n False,\n \"{} has unrecognized type '{}'\".format(attr_name, attr_type),\n )\n # pass if all other possible failure conditions have been evaluated\n return (True, None)\n\n def _handle_dtype_check(self, attribute, attr_name, attr_dict, variable=None):\n \"\"\"\n Helper function for Appendix A checks.\n\n :param attribute: The value of the attribute being checked\n :param str attr_name: The name of the attribute being processed\n :param dict attr_dict: The dict entry with type and attribute location\n information corresponding to this attribute\n :param variable: if given, the variable whose type to check against\n :rtype: tuple\n :return: A two-tuple that contains pass/fail status as a boolean and\n a message string (or None if unset) as the second element.\n \"\"\"\n attr_type = attr_dict[\"Type\"]\n if variable is None and \"G\" not in attr_dict[\"attr_loc\"]:\n raise ValueError(\n \"Non-global attributes must be associated with a \" \" variable\"\n )\n attr_str = (\n \"Global attribute {}\".format(attr_name)\n if \"G\" in attr_dict[\"attr_loc\"] and variable is None\n else \"Attribute {} in variable {}\".format(attr_name, variable.name)\n )\n\n # check the type\n return_value = self._check_attr_type(attr_name, attr_type, attribute, variable)\n\n # if the second element is a string, format it\n if isinstance(return_value[1], str):\n return_value[1] = return_value[1].format(attr_str)\n\n # convert to tuple for immutability and return\n return tuple(return_value)\n\n\nclass CFNCCheck(BaseNCCheck, CFBaseCheck):\n \"\"\"Inherits from both BaseNCCheck and CFBaseCheck to support\n checking netCDF datasets. Must inherit in this order, or certain\n attributes from BaseNCCheck (like supported_ds) will not be passed to\n CFNCCheck.\"\"\"\n\n pass\n\n\nappendix_a_base = {\n \"Conventions\": {\"Type\": \"S\", \"attr_loc\": {\"G\"}, \"cf_section\": None},\n \"_FillValue\": {\"Type\": \"D\", \"attr_loc\": {\"D\", \"C\"}, \"cf_section\": None},\n \"add_offset\": {\"Type\": \"N\", \"attr_loc\": {\"D\"}, \"cf_section\": \"8.1\"},\n \"ancillary_variables\": {\"Type\": \"S\", \"attr_loc\": {\"D\"}, \"cf_section\": \"3.4\"},\n \"axis\": {\"Type\": \"S\", \"attr_loc\": {\"C\"}, \"cf_section\": \"4\"},\n \"bounds\": {\"Type\": \"S\", \"attr_loc\": {\"C\"}, \"cf_section\": \"7.1\"},\n \"calendar\": {\"Type\": \"S\", \"attr_loc\": {\"C\"}, \"cf_section\": \"4.4.1\"},\n \"cell_measures\": {\"Type\": \"S\", \"attr_loc\": {\"D\"}, \"cf_section\": \"7.2\"},\n \"cell_methods\": {\"Type\": \"S\", \"attr_loc\": {\"D\"}, \"cf_section\": \"7.3\"},\n # cf_role type is \"C\" in document, which does not correspond\n # to types used, replaced with \"S\"\n \"cf_role\": {\"Type\": \"S\", \"attr_loc\": {\"C\"}, \"cf_section\": \"9.5\"},\n \"climatology\": {\"Type\": \"S\", \"attr_loc\": {\"C\"}, \"cf_section\": \"7.4\"},\n # comment was removed in this implementation\n \"compress\": {\"Type\": \"S\", \"attr_loc\": {\"C\"}, \"cf_section\": \"8.2\"},\n \"coordinates\": {\"Type\": \"S\", \"attr_loc\": {\"D\"}, \"cf_section\": \"5\"},\n # featureType type is \"C\" in document, which does not\n # correspond to types used, replaced with \"S\"\n \"featureType\": {\"Type\": \"S\", \"attr_loc\": {\"G\"}, \"cf_section\": \"9.4\"},\n \"flag_masks\": {\"Type\": \"D\", \"attr_loc\": {\"D\"}, \"cf_section\": \"3.5\"},\n \"flag_meanings\": {\"Type\": \"S\", \"attr_loc\": {\"D\"}, \"cf_section\": \"3.5\"},\n \"flag_values\": {\"Type\": \"D\", \"attr_loc\": {\"D\"}, \"cf_section\": \"3.5\"},\n \"formula_terms\": {\"Type\": \"S\", \"attr_loc\": {\"C\"}, \"cf_section\": \"4.3.2\"},\n \"grid_mapping\": {\"Type\": \"S\", \"attr_loc\": {\"D\"}, \"cf_section\": \"5.6\"},\n \"history\": {\"Type\": \"S\", \"attr_loc\": {\"G\"}, \"cf_section\": None},\n #'instance_dimension': {'Type': 'N', 'attr_loc': {'D'}, 'cf_section': '9.3'},\n \"institution\": {\"Type\": \"S\", \"attr_loc\": {\"G\", \"D\"}, \"cf_section\": \"2.6.2\"},\n \"leap_month\": {\"Type\": \"N\", \"attr_loc\": {\"C\"}, \"cf_section\": \"4.4.1\"},\n \"leap_year\": {\"Type\": \"N\", \"attr_loc\": {\"C\"}, \"cf_section\": \"4.4.1\"},\n \"long_name\": {\"Type\": \"S\", \"attr_loc\": {\"D\", \"C\"}, \"cf_section\": \"3.2\"},\n \"missing_value\": {\"Type\": \"D\", \"attr_loc\": {\"D\", \"C\"}, \"cf_section\": \"2.5.1\"},\n \"month_lengths\": {\"Type\": \"N\", \"attr_loc\": {\"C\"}, \"cf_section\": \"4.4.1\"},\n \"positive\": {\"Type\": \"S\", \"attr_loc\": {\"C\"}, \"cf_section\": None},\n \"references\": {\"Type\": \"S\", \"attr_loc\": {\"G\", \"D\"}, \"cf_section\": \"2.6.2\"},\n #'sample_dimension': {'Type': 'N', 'attr_loc': {'D'}, 'cf_section': '9.3'},\n \"scale_factor\": {\"Type\": \"N\", \"attr_loc\": {\"D\"}, \"cf_section\": \"8.1\"},\n \"source\": {\"Type\": \"S\", \"attr_loc\": {\"G\", \"D\"}, \"cf_section\": \"2.6.2\"},\n \"standard_error_multiplier\": {\"Type\": \"N\", \"attr_loc\": {\"D\"}, \"cf_section\": None},\n \"standard_name\": {\"Type\": \"S\", \"attr_loc\": {\"D\", \"C\"}, \"cf_section\": \"3.3\"},\n \"title\": {\"Type\": \"S\", \"attr_loc\": {\"G\"}, \"cf_section\": None},\n \"units\": {\"Type\": \"S\", \"attr_loc\": {\"D\", \"C\"}, \"cf_section\": \"3.1\"},\n \"valid_max\": {\"Type\": \"N\", \"attr_loc\": {\"D\", \"C\"}, \"cf_section\": None},\n \"valid_min\": {\"Type\": \"N\", \"attr_loc\": {\"D\", \"C\"}, \"cf_section\": None},\n \"valid_range\": {\"Type\": \"N\", \"attr_loc\": {\"D\", \"C\"}, \"cf_section\": None},\n}\n\n\nclass CF1_6Check(CFNCCheck):\n \"\"\"CF-1.6-specific implementation of CFBaseCheck; supports checking\n netCDF datasets.\n These checks are translated documents:\n http://cf-pcmdi.llnl.gov/documents/cf-conventions/1.6/cf-conventions.html\n http://cf-pcmdi.llnl.gov/conformance/requirements-and-recommendations/1.6/\"\"\"\n\n register_checker = True\n _cc_spec = \"cf\"\n _cc_spec_version = \"1.6\"\n _cc_description = \"Climate and Forecast Conventions (CF)\"\n _cc_url = \"http://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html\"\n _cc_display_headers = {3: \"Errors\", 2: \"Warnings\", 1: \"Info\"}\n appendix_a = appendix_a_base\n\n def __init__(self, options=None): # initialize with parent methods and data\n super(CF1_6Check, self).__init__(options)\n\n self.cell_methods = cell_methods16\n self.grid_mapping_dict = grid_mapping_dict16\n self.grid_mapping_attr_types = grid_mapping_attr_types16\n\n ###############################################################################\n # Chapter 2: NetCDF Files and Components\n ###############################################################################\n\n def check_data_types(self, ds):\n \"\"\"\n Checks the data type of all netCDF variables to ensure they are valid\n data types under CF.\n\n CF §2.2 The netCDF data types char, byte, short, int, float or real, and\n double are all acceptable\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: compliance_checker.base.Result\n \"\"\"\n fails = []\n total = len(ds.variables)\n\n for k, v in ds.variables.items():\n if v.dtype.kind != \"S\" and all(\n v.dtype.type != t\n for t in (\n np.character,\n np.dtype(\"|S1\"),\n np.dtype(\"b\"),\n np.dtype(\"i2\"),\n np.dtype(\"i4\"),\n np.float32,\n np.double,\n )\n ):\n fails.append(\n \"The variable {} failed because the datatype is {}\".format(\n k, v.datatype\n )\n )\n return Result(\n BaseCheck.HIGH,\n (total - len(fails), total),\n self.section_titles[\"2.2\"],\n msgs=fails,\n )\n\n def check_child_attr_data_types(self, ds):\n \"\"\"\n For any variables which contain any of the following attributes:\n - valid_min/valid_max\n - valid_range\n - scale_factor\n - add_offset\n - _FillValue\n the data type of the attribute must match the type of its parent variable as specified in the\n NetCDF User Guide (NUG) https://www.unidata.ucar.edu/software/netcdf/docs/attribute_conventions.html,\n referenced in the CF Conventions in Section 2.5.2\n (http://cfconventions.org/Data/cf-conventions/cf-conventions-1.7/cf-conventions.html#missing-data)\n\n :param netCDF4.Dataset ds: open netCDF dataset object\n :rtype: compliance_checker.base.Result\n \"\"\"\n\n ctx = TestCtx(BaseCheck.MEDIUM, self.section_titles[\"2.5\"])\n special_attrs = {\n \"actual_range\",\n \"valid_min\",\n \"valid_max\",\n \"valid_range\",\n \"scale_factor\",\n \"add_offset\",\n \"_FillValue\",\n }\n\n for var_name, var in ds.variables.items():\n for att_name in special_attrs.intersection(var.ncattrs()):\n self._parent_var_attr_type_check(att_name, var, ctx)\n return ctx.to_result()\n\n def check_naming_conventions(self, ds):\n \"\"\"\n Checks the variable names to ensure they are valid CF variable names under CF.\n\n CF §2.3 Variable, dimension and attribute names should begin with a letter\n and be composed of letters, digits, and underscores.\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: compliance_checker.base.Result\n \"\"\"\n ret_val = []\n variable_naming = TestCtx(BaseCheck.MEDIUM, self.section_titles[\"2.3\"])\n dimension_naming = TestCtx(BaseCheck.MEDIUM, self.section_titles[\"2.3\"])\n attribute_naming = TestCtx(BaseCheck.MEDIUM, self.section_titles[\"2.3\"])\n\n ignore_attributes = [\n \"_FillValue\",\n \"DODS\",\n \"_ChunkSizes\",\n \"_Coordinate\",\n \"_Unsigned\",\n ]\n\n rname = regex.compile(\"^[A-Za-z][A-Za-z0-9_]*$\")\n\n for name, variable in ds.variables.items():\n variable_naming.assert_true(\n rname.match(name) is not None,\n \"variable {} should begin with a letter and be composed of \"\n \"letters, digits, and underscores\".format(name),\n )\n\n # Keep track of all the attributes, we'll need to check them\n for attr in variable.ncattrs():\n if attr in ignore_attributes:\n continue\n # Special attributes made by THREDDS\n if attr.startswith(\"DODS\"):\n continue\n # Ignore model produced attributes\n if attr.startswith(\"_Coordinate\"):\n continue\n attribute_naming.assert_true(\n rname.match(attr) is not None,\n \"attribute {}:{} should begin with a letter and be composed of \"\n \"letters, digits, and underscores\".format(name, attr),\n )\n\n ret_val.append(variable_naming.to_result())\n\n for dimension in ds.dimensions:\n dimension_naming.assert_true(\n rname.match(dimension) is not None,\n \"dimension {} should begin with a latter and be composed of \"\n \"letters, digits, and underscores\".format(dimension),\n )\n ret_val.append(dimension_naming.to_result())\n\n for global_attr in ds.ncattrs():\n # Special attributes made by THREDDS\n if global_attr.startswith(\"DODS\"):\n continue\n if global_attr.startswith(\"EXTRA_DIMENSION\"):\n continue\n attribute_naming.assert_true(\n rname.match(global_attr) is not None,\n \"global attribute {} should begin with a letter and be composed of \"\n \"letters, digits, and underscores\".format(global_attr),\n )\n ret_val.append(attribute_naming.to_result())\n\n return ret_val\n\n def check_names_unique(self, ds):\n \"\"\"\n Checks the variable names for uniqueness regardless of case.\n\n CF §2.3 names should not be distinguished purely by case, i.e., if case\n is disregarded, no two names should be the same.\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: compliance_checker.base.Result\n \"\"\"\n fails = []\n total = len(ds.variables)\n names = defaultdict(int)\n\n for k in ds.variables:\n names[k.lower()] += 1\n\n fails = [\n \"Variables are not case sensitive. Duplicate variables named: %s\" % k\n for k, v in names.items()\n if v > 1\n ]\n return Result(\n BaseCheck.MEDIUM,\n (total - len(fails), total),\n self.section_titles[\"2.3\"],\n msgs=fails,\n )\n\n def check_dimension_names(self, ds):\n \"\"\"\n Checks variables contain no duplicate dimension names.\n\n CF §2.4 A variable may have any number of dimensions, including zero,\n and the dimensions must all have different names.\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: compliance_checker.base.Result\n \"\"\"\n fails = []\n total = len(ds.variables)\n\n for k, v in ds.variables.items():\n dims = defaultdict(int)\n for d in v.dimensions:\n dims[d] += 1\n\n for dimension, count in dims.items():\n if count > 1:\n fails.append(\n \"%s has two or more dimensions named %s\" % (k, dimension)\n )\n\n return Result(\n BaseCheck.HIGH,\n (total - len(fails), total),\n self.section_titles[\"2.4\"],\n msgs=fails,\n )\n\n def check_dimension_order(self, ds):\n \"\"\"\n Checks each variable's dimension order to ensure that the order is\n consistent and in order under CF §2.4\n\n CF §2.4 If any or all of the dimensions of a variable have the\n interpretations of \"date or time\" (T), \"height or depth\" (Z),\n \"latitude\" (Y), or \"longitude\" (X) then we recommend, those dimensions\n to appear in the relative order T, then Z, then Y, then X in the CDL\n definition corresponding to the file. All other dimensions should,\n whenever possible, be placed to the left of the spatiotemporal\n dimensions.\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: compliance_checker.base.Result\n \"\"\"\n valid_dimension_order = TestCtx(BaseCheck.MEDIUM, self.section_titles[\"2.4\"])\n # Build a map from coordinate variable to axis\n coord_axis_map = self._get_coord_axis_map(ds)\n\n # Check each variable's dimension order, excluding climatology and\n # bounds variables\n any_clim = cfutil.get_climatology_variable(ds)\n any_bounds = cfutil.get_cell_boundary_variables(ds)\n for name, variable in ds.variables.items():\n # Skip bounds/climatology variables, as they should implicitly\n # have the same order except for the bounds specific dimension.\n # This is tested later in the respective checks\n if name in any_bounds or name == any_clim:\n continue\n\n # Skip strings/labels\n if hasattr(variable.dtype, \"char\") and variable.dtype.char == \"S\":\n continue\n elif variable.dtype == str:\n continue\n\n if variable.dimensions:\n dimension_order = self._get_dimension_order(ds, name, coord_axis_map)\n valid_dimension_order.assert_true(\n self._dims_in_order(dimension_order),\n \"{}'s dimensions are not in the recommended order \"\n \"T, Z, Y, X. They are {}\"\n \"\".format(name, self._get_pretty_dimension_order(ds, name)),\n )\n return valid_dimension_order.to_result()\n\n def check_fill_value_outside_valid_range(self, ds):\n \"\"\"\n Checks each variable's _FillValue to ensure that it's in valid_range or\n between valid_min and valid_max according to CF §2.5.1\n\n CF §2.5.1 The _FillValue should be outside the range specified by\n valid_range (if used) for a variable.\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: list\n :return: List of Results\n \"\"\"\n valid_fill_range = TestCtx(BaseCheck.MEDIUM, self.section_titles[\"2.5\"])\n\n for name, variable in ds.variables.items():\n # If the variable doesn't have a defined _FillValue don't check it.\n\n if not hasattr(variable, \"_FillValue\"):\n continue\n\n fill_value = variable._FillValue\n\n attrs = variable.ncattrs()\n\n if \"valid_range\" in attrs:\n if isinstance(variable.valid_range, str):\n m = \"§2.5.1 Fill Values should be outside the range specified by valid_range\" # subsection message\n valid_fill_range.assert_true(\n False,\n \"{};\\n\\t{}:valid_range must be a numeric type not a string\".format(\n m, name\n ),\n )\n continue\n rmin, rmax = variable.valid_range\n spec_by = \"valid_range\"\n\n elif \"valid_min\" in attrs and \"valid_max\" in attrs:\n if isinstance(variable.valid_min, str):\n valid_fill_range.assert_true(\n False,\n \"{}:valid_min must be a numeric type not a string\".format(name),\n )\n if isinstance(variable.valid_max, str):\n valid_fill_range.assert_true(\n False,\n \"{}:valid_max must be a numeric type not a string\".format(name),\n )\n if isinstance(variable.valid_min, str) or isinstance(\n variable.valid_max, str\n ):\n continue\n rmin = variable.valid_min\n rmax = variable.valid_max\n spec_by = \"valid_min/valid_max\"\n else:\n continue\n\n if np.isnan(fill_value):\n valid = True\n else:\n valid = fill_value < rmin or fill_value > rmax\n\n valid_fill_range.assert_true(\n valid,\n \"{}:_FillValue ({}) should be outside the range specified by {} ({}, {})\"\n \"\".format(name, fill_value, spec_by, rmin, rmax),\n )\n\n return valid_fill_range.to_result()\n\n def check_convention_globals(self, ds):\n \"\"\"\n Check the common global attributes are strings if they exist.\n\n CF §2.6.2 title/history global attributes, must be strings. Do not need\n to exist.\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: list\n :return: List of Results\n \"\"\"\n attrs = [\"title\", \"history\"]\n\n valid_globals = TestCtx(BaseCheck.MEDIUM, self.section_titles[\"2.6\"])\n\n for attr in attrs:\n dataset_attr = getattr(ds, attr, None)\n is_string = isinstance(dataset_attr, str)\n valid_globals.assert_true(\n is_string and len(dataset_attr),\n \"§2.6.2 global attribute {} should exist and be a non-empty string\" # subsection message\n \"\".format(attr),\n )\n return valid_globals.to_result()\n\n def check_convention_possibly_var_attrs(self, ds):\n \"\"\"\n Check variable and global attributes are strings for recommended attributes under CF §2.6.2\n\n CF §2.6.2 institution, source, references, and comment, either global\n or assigned to individual variables. When an attribute appears both\n globally and as a variable attribute, the variable's version has\n precedence. Must be strings.\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: list\n :return: List of Results\n \"\"\"\n # The attrs are optional and only needs to be a string and non-empty if it\n # exists.\n attrs = [\"institution\", \"source\", \"references\", \"comment\"]\n\n valid_attributes = TestCtx(BaseCheck.MEDIUM, self.section_titles[\"2.6\"])\n\n attr_bin = set()\n # If the attribute is defined for any variable, check it and mark in\n # the set that we've seen it at least once.\n for name, variable in ds.variables.items():\n for attribute in variable.ncattrs():\n varattr = getattr(variable, attribute)\n if attribute in attrs:\n is_string = isinstance(varattr, str)\n valid_attributes.assert_true(\n is_string and len(varattr) > 0,\n \"§2.6.2 {}:{} should be a non-empty string\"\n \"\".format(name, attribute),\n )\n attr_bin.add(attribute)\n\n # Check all the global attributes too and mark if we've seen them\n for attribute in ds.ncattrs():\n dsattr = getattr(ds, attribute)\n if attribute in attrs:\n is_string = isinstance(dsattr, str)\n valid_attributes.assert_true(\n is_string and len(dsattr) > 0,\n \"§2.6.2 {} global attribute should be a non-empty string\"\n \"\".format(attribute),\n )\n attr_bin.add(attribute)\n return valid_attributes.to_result()\n\n ###############################################################################\n # Chapter 3: Description of the Data\n ###############################################################################\n\n def check_units(self, ds):\n \"\"\"\n Check the units attribute for all variables to ensure they are CF\n compliant under CF §3.1\n\n CF §3.1 The units attribute is required for all variables that represent dimensional quantities\n (except for boundary variables defined in Section 7.1, \"Cell Boundaries\" and climatology variables\n defined in Section 7.4, \"Climatological Statistics\").\n\n Units are not required for dimensionless quantities. A variable with no units attribute is assumed\n to be dimensionless. However, a units attribute specifying a dimensionless unit may optionally be\n included.\n\n - units required\n - type must be recognized by udunits\n - if standard name specified, must be consistent with standard name table, must also be consistent with a\n specified cell_methods attribute if present\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: list\n :return: List of results\n \"\"\"\n\n ret_val = []\n\n coordinate_variables = self._find_coord_vars(ds)\n auxiliary_coordinates = self._find_aux_coord_vars(ds)\n geophysical_variables = self._find_geophysical_vars(ds)\n unit_required_variables = (\n coordinate_variables + auxiliary_coordinates + geophysical_variables\n )\n\n for name in set(unit_required_variables):\n # For reduced horizontal grids, the compression index variable does\n # not require units.\n if cfutil.is_compression_coordinate(ds, name):\n continue\n\n variable = ds.variables[name]\n\n # Skip instance coordinate variables\n if getattr(variable, \"cf_role\", None) is not None:\n continue\n\n # Skip labels\n if variable.dtype.char == \"S\":\n continue\n\n standard_name = getattr(variable, \"standard_name\", None)\n standard_name, standard_name_modifier = self._split_standard_name(\n standard_name\n )\n\n units = getattr(variable, \"units\", None)\n\n valid_units = self._check_valid_cf_units(ds, name)\n ret_val.append(valid_units)\n\n units_attr_is_string = TestCtx(BaseCheck.MEDIUM, self.section_titles[\"3.1\"])\n\n # side effects, but better than teasing out the individual result\n if units_attr_is_string.assert_true(\n isinstance(units, str),\n \"units ({}) attribute of '{}' must be a string compatible with UDUNITS\".format(\n units, variable.name\n ),\n ):\n valid_udunits = self._check_valid_udunits(ds, name)\n ret_val.append(valid_udunits)\n ret_val.append(units_attr_is_string.to_result())\n\n if isinstance(standard_name, str):\n valid_standard_units = self._check_valid_standard_units(ds, name)\n ret_val.append(valid_standard_units)\n\n return ret_val\n\n def _check_valid_cf_units(self, ds, variable_name):\n \"\"\"\n Checks that the variable contains units attribute, the attribute is a\n string and the value is not deprecated by CF\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :param str variable_name: Name of the variable to be checked\n :rtype:\n :return: List of results\n \"\"\"\n\n # This list is straight from section 3\n deprecated = [\"level\", \"layer\", \"sigma_level\"]\n variable = ds.variables[variable_name]\n\n units = getattr(variable, \"units\", None)\n standard_name_full = getattr(variable, \"standard_name\", None)\n standard_name, standard_name_modifier = self._split_standard_name(\n standard_name_full\n )\n std_name_units_dimensionless = cfutil.is_dimensionless_standard_name(\n self._std_names._root, standard_name\n )\n # Is this even in the database? also, if there is no standard_name,\n # there's no way to know if it is dimensionless.\n should_be_dimensionless = (\n variable.dtype.char == \"S\"\n or std_name_units_dimensionless\n or standard_name is None\n )\n\n # 1) Units must exist\n valid_units = TestCtx(BaseCheck.HIGH, self.section_titles[\"3.1\"])\n valid_units.assert_true(\n should_be_dimensionless or units is not None,\n \"units attribute is required for {} when variable is not a dimensionless quantity\".format(\n variable_name\n ),\n )\n\n # Don't bother checking the rest\n if units is None and not should_be_dimensionless:\n return valid_units.to_result()\n # 2) units attribute must be a string\n valid_units.assert_true(\n should_be_dimensionless or isinstance(units, str),\n \"units attribute for {} needs to be a string\".format(variable_name),\n )\n\n # 3) units are not deprecated\n valid_units.assert_true(\n units not in deprecated,\n 'units for {}, \"{}\" are deprecated by CF 1.6'.format(variable_name, units),\n )\n\n return valid_units.to_result()\n\n def _check_valid_udunits(self, ds, variable_name):\n \"\"\"\n Checks that the variable's units are contained in UDUnits\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :param str variable_name: Name of the variable to be checked\n \"\"\"\n variable = ds.variables[variable_name]\n\n units = getattr(variable, \"units\", None)\n standard_name = getattr(variable, \"standard_name\", None)\n standard_name, standard_name_modifier = self._split_standard_name(standard_name)\n std_name_units_dimensionless = cfutil.is_dimensionless_standard_name(\n self._std_names._root, standard_name\n )\n\n # If the variable is supposed to be dimensionless, it automatically passes\n should_be_dimensionless = (\n variable.dtype.char == \"S\" or std_name_units_dimensionless\n )\n\n valid_udunits = TestCtx(BaseCheck.HIGH, self.section_titles[\"3.1\"])\n are_udunits = units is not None and util.units_known(units)\n valid_udunits.assert_true(\n should_be_dimensionless or are_udunits,\n 'units for {}, \"{}\" are not recognized by UDUNITS'.format(\n variable_name, units\n ),\n )\n return valid_udunits.to_result()\n\n def _check_valid_standard_units(self, ds, variable_name):\n \"\"\"\n Checks that the variable's units are appropriate for the standard name\n according to the CF standard name table and coordinate sections in CF\n 1.6\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :param str variable_name: Name of the variable to be checked\n \"\"\"\n variable = ds.variables[variable_name]\n units = getattr(variable, \"units\", None)\n standard_name = getattr(variable, \"standard_name\", None)\n\n valid_standard_units = TestCtx(BaseCheck.HIGH, self.section_titles[\"3.1\"])\n\n # If the variable is supposed to be dimensionless, it automatically passes\n std_name_units_dimensionless = cfutil.is_dimensionless_standard_name(\n self._std_names._root, standard_name\n )\n\n standard_name, standard_name_modifier = self._split_standard_name(standard_name)\n\n standard_entry = self._std_names.get(standard_name, None)\n if standard_entry is not None:\n canonical_units = standard_entry.canonical_units\n else:\n # Any unit comparisons with None returns False\n canonical_units = None\n\n # Other standard_name modifiers have the same units as the\n # unmodified standard name or are not checked for units.\n\n if standard_name_modifier == \"number_of_observations\":\n canonical_units = \"1\"\n\n # This section represents the different cases where simple udunits\n # comparison isn't comprehensive enough to determine if the units are\n # appropriate under CF\n\n # UDUnits accepts \"s\" as a unit of time but it should be <unit> since <epoch>\n if standard_name == \"time\":\n valid_standard_units.assert_true(\n util.units_convertible(units, \"seconds since 1970-01-01\"),\n \"time must be in a valid units format <unit> since <epoch> \"\n \"not {}\".format(units),\n )\n\n # UDunits can't tell the difference between east and north facing coordinates\n elif standard_name == \"latitude\":\n # degrees is allowed if using a transformed grid\n allowed_units = cfutil.VALID_LAT_UNITS | {\"degrees\"}\n valid_standard_units.assert_true(\n units.lower() in allowed_units,\n 'variables defining latitude (\"{}\") must use degrees_north '\n \"or degrees if defining a transformed grid. Currently \"\n \"{}\".format(variable_name, units),\n )\n # UDunits can't tell the difference between east and north facing coordinates\n elif standard_name == \"longitude\":\n # degrees is allowed if using a transformed grid\n allowed_units = cfutil.VALID_LON_UNITS | {\"degrees\"}\n valid_standard_units.assert_true(\n units.lower() in allowed_units,\n 'variables defining longitude (\"{}\") must use degrees_east '\n \"or degrees if defining a transformed grid. Currently \"\n \"{}\".format(variable_name, units),\n )\n # Standard Name table agrees the unit should be dimensionless\n elif std_name_units_dimensionless:\n valid_standard_units.assert_true(True, \"\")\n\n elif canonical_units is not None:\n valid_standard_units.assert_true(\n util.units_convertible(canonical_units, units),\n \"units for variable {} must be convertible to {} \"\n \"currently they are {}\".format(variable_name, canonical_units, units),\n )\n\n return valid_standard_units.to_result()\n\n def check_standard_name(self, ds):\n \"\"\"\n Check a variables's standard_name attribute to ensure that it meets CF\n compliance.\n\n CF §3.3 A standard name is associated with a variable via the attribute\n standard_name which takes a string value comprised of a standard name\n optionally followed by one or more blanks and a standard name modifier\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: list\n :return: List of results\n \"\"\"\n ret_val = []\n\n coord_vars = self._find_coord_vars(ds)\n aux_coord_vars = self._find_aux_coord_vars(ds)\n axis_vars = cfutil.get_axis_variables(ds)\n flag_vars = cfutil.get_flag_variables(ds)\n geophysical_vars = self._find_geophysical_vars(ds)\n\n variables_requiring_standard_names = (\n coord_vars + aux_coord_vars + axis_vars + flag_vars + geophysical_vars\n )\n for name in set(variables_requiring_standard_names):\n # Compression indices used in reduced horizontal grids or\n # compression schemes do not require attributes other than compress\n if cfutil.is_compression_coordinate(ds, name):\n continue\n\n ncvar = ds.variables[name]\n\n # §9 doesn't explicitly allow instance variables as coordinates but\n # it's loosely implied. Just in case, skip it.\n if hasattr(ncvar, \"cf_role\"):\n continue\n\n # Unfortunately, §6.1 allows for string types to be listed as\n # coordinates.\n if ncvar.dtype.char == \"S\":\n continue\n\n standard_name = getattr(ncvar, \"standard_name\", None)\n standard_name, standard_name_modifier = self._split_standard_name(\n standard_name\n )\n long_name = getattr(ncvar, \"long_name\", None)\n long_or_std_name = TestCtx(BaseCheck.HIGH, self.section_titles[\"3.3\"])\n if long_name is not None:\n long_name_present = True\n long_or_std_name.assert_true(\n isinstance(long_name, str),\n \"Attribute long_name for variable {} must be a string\".format(name),\n )\n else:\n long_name_present = False\n # §1.3 The long_name and standard_name attributes are used to\n # describe the content of each variable. For backwards\n # compatibility with COARDS neither is required, but use of at\n # least one of them is strongly recommended.\n\n # If standard_name is not defined but long_name is, don't continue\n # the check for this variable\n if standard_name is not None:\n standard_name_present = True\n valid_std_name = TestCtx(BaseCheck.HIGH, self.section_titles[\"3.3\"])\n valid_std_name.assert_true(\n isinstance(standard_name, str),\n \"Attribute standard_name for variable {} must be a string\".format(\n name\n ),\n )\n if isinstance(standard_name, str):\n valid_std_name.assert_true(\n standard_name in self._std_names,\n \"standard_name {} is not defined in Standard Name Table v{}\".format(\n standard_name or \"undefined\", self._std_names._version\n ),\n )\n\n ret_val.append(valid_std_name.to_result())\n\n # 2) optional - if modifiers, should be in table\n if standard_name_modifier is not None:\n valid_modifier = TestCtx(BaseCheck.HIGH, self.section_titles[\"3.3\"])\n allowed = [\n \"detection_minimum\",\n \"number_of_observations\",\n \"standard_error\",\n \"status_flag\",\n ]\n valid_modifier.assert_true(\n standard_name_modifier in allowed,\n \"standard_name modifier {} for variable {} is not a valid modifier \"\n \"according to appendix C\".format(standard_name_modifier, name),\n )\n\n ret_val.append(valid_modifier.to_result())\n else:\n standard_name_present = False\n\n long_or_std_name.assert_true(\n long_name_present or standard_name_present,\n \"Attribute long_name or/and standard_name is highly recommended for variable {}\".format(\n name\n ),\n )\n ret_val.append(long_or_std_name.to_result())\n return ret_val\n\n def check_ancillary_variables(self, ds):\n \"\"\"\n Checks the ancillary_variable attribute for all variables to ensure\n they are CF compliant.\n\n CF §3.4 It is a string attribute whose value is a blank separated list\n of variable names. The nature of the relationship between variables\n associated via ancillary_variables must be determined by other\n attributes. The variables listed by the ancillary_variables attribute\n will often have the standard name of the variable which points to them\n including a modifier (Appendix C, Standard Name Modifiers) to indicate\n the relationship.\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: list\n :return: List of results\n \"\"\"\n ret_val = []\n\n for ncvar in ds.get_variables_by_attributes(\n ancillary_variables=lambda x: x is not None\n ):\n name = ncvar.name\n valid_ancillary = TestCtx(BaseCheck.HIGH, self.section_titles[\"3.4\"])\n ancillary_variables = ncvar.ancillary_variables\n\n valid_ancillary.assert_true(\n isinstance(ancillary_variables, str),\n \"ancillary_variables attribute defined by {} \"\n \"should be string\".format(name),\n )\n\n # Can't perform the second check if it's not a string\n if not isinstance(ancillary_variables, str):\n ret_val.append(valid_ancillary.to_result())\n continue\n\n for ancillary_variable in ancillary_variables.split():\n valid_ancillary.assert_true(\n ancillary_variable in ds.variables,\n \"{} is not a variable in this dataset\".format(ancillary_variable),\n )\n\n ret_val.append(valid_ancillary.to_result())\n\n return ret_val\n\n def check_flags(self, ds):\n \"\"\"\n Check the flag_values, flag_masks and flag_meanings attributes for\n variables to ensure they are CF compliant.\n\n CF §3.5 The attributes flag_values, flag_masks and flag_meanings are\n intended to make variables that contain flag values self describing.\n Status codes and Boolean (binary) condition flags may be expressed with\n different combinations of flag_values and flag_masks attribute\n definitions.\n\n The flag_values and flag_meanings attributes describe a status flag\n consisting of mutually exclusive coded values.\n\n The flag_meanings attribute is a string whose value is a blank\n separated list of descriptive words or phrases, one for each flag\n value. Each word or phrase should consist of characters from the\n alphanumeric set and the following five: '_', '-', '.', '+', '@'.\n\n The flag_masks and flag_meanings attributes describe a number of\n independent Boolean conditions using bit field notation by setting\n unique bits in each flag_masks value.\n\n The flag_masks, flag_values and flag_meanings attributes, used\n together, describe a blend of independent Boolean conditions and\n enumerated status codes. A flagged condition is identified by a bitwise\n AND of the variable value and each flag_masks value; a result that\n matches the flag_values value indicates a true condition.\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: list\n :return: List of results\n \"\"\"\n ret_val = []\n\n for name in cfutil.get_flag_variables(ds):\n variable = ds.variables[name]\n flag_values = getattr(variable, \"flag_values\", None)\n flag_masks = getattr(variable, \"flag_masks\", None)\n\n valid_flags_var = TestCtx(BaseCheck.HIGH, self.section_titles[\"3.5\"])\n # Check that the variable defines mask or values\n valid_flags_var.assert_true(\n flag_values is not None or flag_masks is not None,\n \"{} does not define either flag_masks or flag_values\".format(name),\n )\n ret_val.append(valid_flags_var.to_result())\n\n valid_meanings = self._check_flag_meanings(ds, name)\n ret_val.append(valid_meanings)\n\n # check flag_values\n if flag_values is not None:\n valid_values = self._check_flag_values(ds, name)\n ret_val.append(valid_values)\n\n # check flag_masks\n if flag_masks is not None:\n valid_masks = self._check_flag_masks(ds, name)\n ret_val.append(valid_masks)\n\n if flag_values is not None and flag_masks is not None:\n allv = list(\n map(lambda a, b: a & b == a, list(zip(flag_values, flag_masks)))\n )\n\n allvr = Result(BaseCheck.MEDIUM, all(allv), self.section_titles[\"3.5\"])\n if not allvr.value:\n allvr.msgs = [\n \"flag masks and flag values for '{}' combined don't equal flag value\".format(\n name\n )\n ]\n\n ret_val.append(allvr)\n\n return ret_val\n\n def _check_flag_values(self, ds, name):\n \"\"\"\n Checks a variable's flag_values attribute for compliance under CF\n\n - flag_values exists as an array\n - unique elements in flag_values\n - flag_values si the same dtype as the variable\n - flag_values is the same length as flag_meanings\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :param str name: Name of variable to check\n :rtype: compliance_checker.base.Result\n \"\"\"\n variable = ds.variables[name]\n\n flag_values = getattr(variable, \"flag_values\", None)\n flag_meanings = getattr(variable, \"flag_meanings\", None)\n valid_values = TestCtx(BaseCheck.HIGH, self.section_titles[\"3.5\"])\n\n # flag_values must be a list of values, not a string or anything else\n valid_values.assert_true(\n isinstance(flag_values, np.ndarray),\n \"{}'s flag_values must be an array of values not {}\".format(\n name, type(flag_values)\n ),\n )\n\n # We can't perform any more checks\n if not isinstance(flag_values, np.ndarray):\n return valid_values.to_result()\n\n # the flag values must be independent, no repeating values\n flag_set = set(flag_values)\n valid_values.assert_true(\n len(flag_set) == len(flag_values),\n \"{}'s flag_values must be independent and can not be repeated\".format(name),\n )\n\n # the data type for flag_values should be the same as the variable\n valid_values.assert_true(\n variable.dtype.type == flag_values.dtype.type,\n \"flag_values ({}) must be the same data type as {} ({})\"\n \"\".format(flag_values.dtype.type, name, variable.dtype.type),\n )\n\n if isinstance(flag_meanings, str):\n flag_meanings = flag_meanings.split()\n valid_values.assert_true(\n len(flag_meanings) == len(flag_values),\n \"{}'s flag_meanings and flag_values should have the same number \".format(\n name\n )\n + \"of elements.\",\n )\n\n return valid_values.to_result()\n\n def _check_flag_masks(self, ds, name):\n \"\"\"\n Check a variable's flag_masks attribute for compliance under CF\n\n - flag_masks exists as an array\n - flag_masks is the same dtype as the variable\n - variable's dtype can support bit-field\n - flag_masks is the same length as flag_meanings\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :param str name: Variable name\n :rtype: compliance_checker.base.Result\n \"\"\"\n variable = ds.variables[name]\n\n flag_masks = variable.flag_masks\n flag_meanings = getattr(ds, \"flag_meanings\", None)\n\n valid_masks = TestCtx(BaseCheck.HIGH, self.section_titles[\"3.5\"])\n\n valid_masks.assert_true(\n isinstance(flag_masks, np.ndarray),\n \"{}'s flag_masks must be an array of values not {}\".format(\n name, type(flag_masks).__name__\n ),\n )\n\n if not isinstance(flag_masks, np.ndarray):\n return valid_masks.to_result()\n\n valid_masks.assert_true(\n variable.dtype.type == flag_masks.dtype.type,\n \"flag_masks ({}) mustbe the same data type as {} ({})\"\n \"\".format(flag_masks.dtype.type, name, variable.dtype.type),\n )\n\n type_ok = (\n np.issubdtype(variable.dtype, np.integer)\n or np.issubdtype(variable.dtype, \"S\")\n or np.issubdtype(variable.dtype, \"b\")\n )\n\n valid_masks.assert_true(\n type_ok,\n \"{}'s data type must be capable of bit-field expression\".format(name),\n )\n\n if isinstance(flag_meanings, str):\n flag_meanings = flag_meanings.split()\n valid_masks.assert_true(\n len(flag_meanings) == len(flag_masks),\n \"{} flag_meanings and flag_masks should have the same number \".format(\n name\n )\n + \"of elements.\",\n )\n\n return valid_masks.to_result()\n\n def _check_flag_meanings(self, ds, name):\n \"\"\"\n Check a variable's flag_meanings attribute for compliance under CF\n\n - flag_meanings exists\n - flag_meanings is a string\n - flag_meanings elements are valid strings\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :param str name: Variable name\n :rtype: compliance_checker.base.Result\n \"\"\"\n variable = ds.variables[name]\n flag_meanings = getattr(variable, \"flag_meanings\", None)\n valid_meanings = TestCtx(BaseCheck.HIGH, self.section_titles[\"3.5\"])\n\n valid_meanings.assert_true(\n flag_meanings is not None,\n \"{}'s flag_meanings attribute is required for flag variables\".format(name),\n )\n\n valid_meanings.assert_true(\n isinstance(flag_meanings, str),\n \"{}'s flag_meanings attribute must be a string\".format(name),\n )\n\n # We can't perform any additional checks if it's not a string\n if not isinstance(flag_meanings, str):\n return valid_meanings.to_result()\n\n valid_meanings.assert_true(\n len(flag_meanings) > 0, \"{}'s flag_meanings can't be empty\".format(name)\n )\n\n flag_regx = regex.compile(r\"^[0-9A-Za-z_\\-.+@]+$\")\n meanings = flag_meanings.split()\n for meaning in meanings:\n if flag_regx.match(meaning) is None:\n valid_meanings.assert_true(\n False,\n \"{}'s flag_meanings attribute defined an illegal flag meaning \".format(\n name\n )\n + \"{}\".format(meaning),\n )\n return valid_meanings.to_result()\n\n ###############################################################################\n # Chapter 4: Coordinate Types\n ###############################################################################\n\n def check_coordinate_types(self, ds):\n \"\"\"\n Check the axis attribute of coordinate variables\n\n CF §4 The attribute axis may be attached to a coordinate variable and\n given one of the values X, Y, Z or T which stand for a longitude,\n latitude, vertical, or time axis respectively. Alternatively the\n standard_name attribute may be used for direct identification.\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: list\n :return: List of results\n \"\"\"\n ret_val = []\n\n for variable in ds.get_variables_by_attributes(axis=lambda x: x is not None):\n name = variable.name\n # Coordinate compressions should not be checked as a valid\n # coordinate, which they are not. They are a mechanism to project\n # an array of indices onto a 2-d grid containing valid coordinates.\n if cfutil.is_compression_coordinate(ds, name):\n continue\n\n variable = ds.variables[name]\n # Even though it's not allowed in CF 1.6, it is allowed in CF 1.7\n # and we see people do it, often.\n if hasattr(variable, \"cf_role\"):\n continue\n\n # §6.1 allows for labels to be referenced as auxiliary coordinate\n # variables, which should not be checked like the rest of the\n # coordinates.\n if variable.dtype.char == \"S\":\n continue\n\n axis = getattr(variable, \"axis\", None)\n\n if axis is not None:\n valid_axis = self._check_axis(ds, name)\n ret_val.append(valid_axis)\n\n return ret_val\n\n def _check_axis(self, ds, name):\n \"\"\"\n Checks that the axis attribute is a string and an allowed value, namely\n one of 'T', 'X', 'Y', or 'Z'.\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :param str name: Name of the variable\n :rtype: compliance_checker.base.Result\n \"\"\"\n allowed_axis = [\"T\", \"X\", \"Y\", \"Z\"]\n variable = ds.variables[name]\n axis = variable.axis\n\n valid_axis = TestCtx(BaseCheck.HIGH, self.section_titles[\"4\"])\n axis_is_string = (isinstance(axis, str),)\n valid_axis.assert_true(\n axis_is_string and len(axis) > 0,\n \"{}'s axis attribute must be a non-empty string\".format(name),\n )\n\n # If axis isn't a string we can't continue any checks\n if not axis_is_string or len(axis) == 0:\n return valid_axis.to_result()\n\n valid_axis.assert_true(\n axis in allowed_axis,\n \"{}'s axis attribute must be T, X, Y, or Z, \".format(name)\n + \"currently {}\".format(axis),\n )\n\n return valid_axis.to_result()\n\n def check_latitude(self, ds):\n \"\"\"\n Check variable(s) that define latitude and are defined correctly according to CF.\n\n CF §4.1 Variables representing latitude must always explicitly include\n the units attribute; there is no default value. The recommended unit\n of latitude is degrees_north. Also acceptable are degree_north,\n degree_N, degrees_N, degreeN, and degreesN.\n\n Optionally, the latitude type may be indicated additionally by\n providing the standard_name attribute with the value latitude, and/or\n the axis attribute with the value Y.\n\n - Four checks per latitude variable\n - (H) latitude has units attribute\n - (M) latitude has an allowed units attribute\n - (L) latitude uses degrees_north (if not in rotated pole)\n - (M) latitude defines either standard_name or axis\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: list\n :return: List of results\n \"\"\"\n ret_val = []\n\n allowed_lat_units = [\n \"degrees_north\",\n \"degree_north\",\n \"degree_n\",\n \"degrees_n\",\n \"degreen\",\n \"degreesn\",\n ]\n\n # Determine the grid mappings in this dataset\n grid_mapping = []\n grid_mapping_variables = cfutil.get_grid_mapping_variables(ds)\n for name in grid_mapping_variables:\n variable = ds.variables[name]\n grid_mapping_name = getattr(variable, \"grid_mapping_name\", None)\n if grid_mapping_name:\n grid_mapping.append(grid_mapping_name)\n\n latitude_variables = cfutil.get_latitude_variables(ds)\n for latitude in latitude_variables:\n variable = ds.variables[latitude]\n units = getattr(variable, \"units\", None)\n units_is_string = isinstance(units, str)\n standard_name = getattr(variable, \"standard_name\", None)\n axis = getattr(variable, \"axis\", None)\n\n # Check that latitude defines units\n valid_latitude = TestCtx(BaseCheck.HIGH, self.section_titles[\"4.1\"])\n valid_latitude.assert_true(\n units is not None,\n \"latitude variable '{}' must define units\".format(latitude),\n )\n ret_val.append(valid_latitude.to_result())\n\n # Check that latitude uses allowed units\n allowed_units = TestCtx(BaseCheck.MEDIUM, self.section_titles[\"4.1\"])\n if standard_name == \"grid_latitude\":\n e_n_units = cfutil.VALID_LAT_UNITS | cfutil.VALID_LON_UNITS\n # check that the units aren't in east and north degrees units,\n # but are convertible to angular units\n allowed_units.assert_true(\n units not in e_n_units and Unit(units) == Unit(\"degree\"),\n \"Grid latitude variable '{}' should use degree equivalent units without east or north components. \"\n \"Current units are {}\".format(latitude, units),\n )\n else:\n allowed_units.assert_true(\n units_is_string and units.lower() in allowed_lat_units,\n \"latitude variable '{}' should define valid units for latitude\"\n \"\".format(latitude),\n )\n ret_val.append(allowed_units.to_result())\n\n # Check that latitude uses degrees_north\n if standard_name == \"latitude\" and units != \"degrees_north\":\n # This is only a recommendation and we won't penalize but we\n # will include a recommended action.\n msg = (\n \"CF recommends latitude variable '{}' to use units degrees_north\"\n \"\".format(latitude)\n )\n recommended_units = Result(\n BaseCheck.LOW, (1, 1), self.section_titles[\"4.1\"], [msg]\n )\n ret_val.append(recommended_units)\n\n y_variables = ds.get_variables_by_attributes(axis=\"Y\")\n # Check that latitude defines either standard_name or axis\n definition = TestCtx(BaseCheck.MEDIUM, self.section_titles[\"4.1\"])\n definition.assert_true(\n standard_name == \"latitude\" or axis == \"Y\" or y_variables != [],\n \"latitude variable '{}' should define standard_name='latitude' or axis='Y'\"\n \"\".format(latitude),\n )\n ret_val.append(definition.to_result())\n\n return ret_val\n\n def check_longitude(self, ds):\n \"\"\"\n Check variable(s) that define longitude and are defined correctly according to CF.\n\n CF §4.2 Variables representing longitude must always explicitly include\n the units attribute; there is no default value. The recommended unit\n of longitude is degrees_east. Also acceptable are degree_east,\n degree_E, degrees_E, degreeE, and degreesE.\n\n Optionally, the longitude type may be indicated additionally by\n providing the standard_name attribute with the value longitude, and/or\n the axis attribute with the value X.\n\n - Four checks per longitude variable\n - (H) longitude has units attribute\n - (M) longitude has an allowed units attribute\n - (L) longitude uses degrees_east (if not in rotated pole)\n - (M) longitude defines either standard_name or axis\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: list\n :return: List of results\n \"\"\"\n\n # TODO we already have a check_latitude... I'm sure we can make DRYer\n\n ret_val = []\n allowed_lon_units = [\n \"degrees_east\",\n \"degree_east\",\n \"degree_e\",\n \"degrees_e\",\n \"degreee\",\n \"degreese\",\n ]\n\n # Determine the grid mappings in this dataset\n grid_mapping = []\n grid_mapping_variables = cfutil.get_grid_mapping_variables(ds)\n for name in grid_mapping_variables:\n variable = ds.variables[name]\n grid_mapping_name = getattr(variable, \"grid_mapping_name\", None)\n if grid_mapping_name:\n grid_mapping.append(grid_mapping_name)\n\n longitude_variables = cfutil.get_longitude_variables(ds)\n for longitude in longitude_variables:\n variable = ds.variables[longitude]\n units = getattr(variable, \"units\", None)\n units_is_string = isinstance(units, str)\n standard_name = getattr(variable, \"standard_name\", None)\n axis = getattr(variable, \"axis\", None)\n\n # NOTE see docstring--should below be 4.1 or 4.2?\n # Check that longitude defines units\n valid_longitude = TestCtx(BaseCheck.HIGH, self.section_titles[\"4.2\"])\n valid_longitude.assert_true(\n units is not None,\n \"longitude variable '{}' must define units\".format(longitude),\n )\n ret_val.append(valid_longitude.to_result())\n\n # Check that longitude uses allowed units\n allowed_units = TestCtx(BaseCheck.MEDIUM, self.section_titles[\"4.2\"])\n if standard_name == \"grid_longitude\":\n e_n_units = cfutil.VALID_LAT_UNITS | cfutil.VALID_LON_UNITS\n # check that the units aren't in east and north degrees units,\n # but are convertible to angular units\n allowed_units.assert_true(\n units not in e_n_units and Unit(units) == Unit(\"degree\"),\n \"Grid longitude variable '{}' should use degree equivalent units without east or north components. \"\n \"Current units are {}\".format(longitude, units),\n )\n else:\n allowed_units.assert_true(\n units_is_string and units.lower() in allowed_lon_units,\n \"longitude variable '{}' should define valid units for longitude\"\n \"\".format(longitude),\n )\n ret_val.append(allowed_units.to_result())\n\n # Check that longitude uses degrees_east\n if standard_name == \"longitude\" and units != \"degrees_east\":\n # This is only a recommendation and we won't penalize but we\n # will include a recommended action.\n msg = (\n \"CF recommends longitude variable '{}' to use units degrees_east\"\n \"\".format(longitude)\n )\n recommended_units = Result(\n BaseCheck.LOW, (1, 1), self.section_titles[\"4.2\"], [msg]\n )\n ret_val.append(recommended_units)\n\n x_variables = ds.get_variables_by_attributes(axis=\"X\")\n # Check that longitude defines either standard_name or axis\n definition = TestCtx(BaseCheck.MEDIUM, self.section_titles[\"4.2\"])\n definition.assert_true(\n standard_name == \"longitude\" or axis == \"X\" or x_variables != [],\n \"longitude variable '{}' should define standard_name='longitude' or axis='X'\"\n \"\".format(longitude),\n )\n ret_val.append(definition.to_result())\n\n return ret_val\n\n def check_dimensional_vertical_coordinate(\n self, ds, dimless_vertical_coordinates=dimless_vertical_coordinates_1_6\n ):\n \"\"\"\n Check units for variables defining vertical position are valid under\n CF.\n\n CF §4.3.1 The units attribute for dimensional coordinates will be a string\n formatted as per the udunits.dat file.\n\n The acceptable units for vertical (depth or height) coordinate variables\n are:\n - units of pressure as listed in the file udunits.dat. For vertical axes\n the most commonly used of these include include bar, millibar,\n decibar, atmosphere (atm), pascal (Pa), and hPa.\n - units of length as listed in the file udunits.dat. For vertical axes\n the most commonly used of these include meter (metre, m), and\n kilometer (km).\n - other units listed in the file udunits.dat that may under certain\n circumstances reference vertical position such as units of density or\n temperature.\n\n Plural forms are also acceptable.\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: list\n :return: List of results\n \"\"\"\n ret_val = []\n z_variables = cfutil.get_z_variables(ds)\n # dimless_standard_names = [name for name, regx in dimless_vertical_coordinates]\n for name in z_variables:\n variable = ds.variables[name]\n standard_name = getattr(variable, \"standard_name\", None)\n units = getattr(variable, \"units\", None)\n positive = getattr(variable, \"positive\", None)\n # Skip the variable if it's dimensionless\n if (\n hasattr(variable, \"formula_terms\")\n or standard_name in dimless_vertical_coordinates\n ):\n continue\n\n valid_vertical_coord = TestCtx(BaseCheck.HIGH, self.section_titles[\"4.3\"])\n valid_vertical_coord.assert_true(\n isinstance(units, str) and units,\n \"§4.3.1 {}'s units must be defined for vertical coordinates, \"\n \"there is no default\".format(name),\n )\n\n if not util.units_convertible(\"bar\", units):\n valid_vertical_coord.assert_true(\n positive in (\"up\", \"down\"),\n \"{}: vertical coordinates not defining pressure must include \"\n \"a positive attribute that is either 'up' or 'down'\".format(name),\n )\n\n # _check_valid_standard_units, part of the Chapter 3 checks,\n # already verifies that this coordinate has valid units\n\n ret_val.append(valid_vertical_coord.to_result())\n\n return ret_val\n\n def _check_dimensionless_vertical_coordinate_1_6(\n self, ds, vname, deprecated_units, ret_val, dim_vert_coords_dict\n ):\n \"\"\"\n Check that a dimensionless vertical coordinate variable is valid under\n CF-1.6.\n\n :param netCDF4.Dataset ds: open netCDF4 dataset\n :param str name: variable name\n :param list ret_val: array to append Results to\n :rtype None\n \"\"\"\n variable = ds.variables[vname]\n standard_name = getattr(variable, \"standard_name\", None)\n units = getattr(variable, \"units\", None)\n formula_terms = getattr(variable, \"formula_terms\", None)\n # Skip the variable if it's dimensional\n if formula_terms is None and standard_name not in dim_vert_coords_dict:\n return\n\n is_not_deprecated = TestCtx(BaseCheck.LOW, self.section_titles[\"4.3\"])\n\n is_not_deprecated.assert_true(\n units not in deprecated_units,\n \"§4.3.2: units are deprecated by CF in variable {}: {}\"\n \"\".format(vname, units),\n )\n\n # check the vertical coordinates\n ret_val.append(is_not_deprecated.to_result())\n ret_val.append(self._check_formula_terms(ds, vname, dim_vert_coords_dict))\n\n def check_dimensionless_vertical_coordinates(self, ds):\n \"\"\"\n Check the validity of dimensionless coordinates under CF\n\n CF §4.3.2 The units attribute is not required for dimensionless\n coordinates.\n\n The standard_name attribute associates a coordinate with its definition\n from Appendix D, Dimensionless Vertical Coordinates. The definition\n provides a mapping between the dimensionless coordinate values and\n dimensional values that can positively and uniquely indicate the\n location of the data.\n\n A new attribute, formula_terms, is used to associate terms in the\n definitions with variables in a netCDF file. To maintain backwards\n compatibility with COARDS the use of these attributes is not required,\n but is strongly recommended.\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: list\n :return: List of results\n \"\"\"\n ret_val = []\n\n z_variables = cfutil.get_z_variables(ds)\n deprecated_units = [\"level\", \"layer\", \"sigma_level\"]\n\n ret_val.extend(\n self._check_dimensionless_vertical_coordinates(\n ds,\n deprecated_units,\n self._check_dimensionless_vertical_coordinate_1_6,\n dimless_vertical_coordinates_1_6,\n )\n )\n\n return ret_val\n\n def check_time_coordinate(self, ds):\n \"\"\"\n Check variables defining time are valid under CF\n\n CF §4.4 Variables representing time must always explicitly include the\n units attribute; there is no default value.\n\n The units attribute takes a string value formatted as per the\n recommendations in the Udunits package.\n\n The acceptable units for time are listed in the udunits.dat file. The\n most commonly used of these strings (and their abbreviations) includes\n day (d), hour (hr, h), minute (min) and second (sec, s). Plural forms\n are also acceptable. The reference time string (appearing after the\n identifier since) may include date alone; date and time; or date, time,\n and time zone. The reference time is required. A reference time in year\n 0 has a special meaning (see Section 7.4, \"Climatological Statistics\").\n\n Recommend that the unit year be used with caution. It is not a calendar\n year. For similar reasons the unit month should also be used with\n caution.\n\n A time coordinate is identifiable from its units string alone.\n Optionally, the time coordinate may be indicated additionally by\n providing the standard_name attribute with an appropriate value, and/or\n the axis attribute with the value T.\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: list\n :return: List of results\n \"\"\"\n\n ret_val = []\n for name in cfutil.get_time_variables(ds):\n variable = ds.variables[name]\n # Has units\n has_units = hasattr(variable, \"units\")\n if not has_units:\n result = Result(\n BaseCheck.HIGH,\n False,\n self.section_titles[\"4.4\"],\n [\"%s does not have units\" % name],\n )\n ret_val.append(result)\n continue\n # Correct and identifiable units\n result = Result(BaseCheck.HIGH, True, self.section_titles[\"4.4\"])\n ret_val.append(result)\n correct_units = util.units_temporal(variable.units)\n reasoning = None\n if not correct_units:\n reasoning = [\"%s does not have correct time units\" % name]\n result = Result(\n BaseCheck.HIGH, correct_units, self.section_titles[\"4.4\"], reasoning\n )\n ret_val.append(result)\n\n return ret_val\n\n def check_calendar(self, ds):\n \"\"\"\n Check the calendar attribute for variables defining time and ensure it\n is a valid calendar prescribed by CF.\n\n CF §4.4.1 In order to calculate a new date and time given a base date, base\n time and a time increment one must know what calendar to use.\n\n The values currently defined for calendar are:\n - gregorian or standard\n - proleptic_gregorian\n - noleap or 365_day\n - all_leap or 366_day\n - 360_day\n - julian\n - none\n\n The calendar attribute may be set to none in climate experiments that\n simulate a fixed time of year.\n The time of year is indicated by the date in the reference time of the\n units attribute.\n\n If none of the calendars defined above applies, a non-standard calendar\n can be defined. The lengths of each month are explicitly defined with\n the month_lengths attribute of the time axis.\n\n If leap years are included, then two other attributes of the time axis\n should also be defined:\n\n leap_year, leap_month\n\n The calendar attribute is not required when a non-standard calendar is\n being used. It is sufficient to define the calendar using the\n month_lengths attribute, along with leap_year, and leap_month as\n appropriate. However, the calendar attribute is allowed to take\n non-standard values and in that case defining the non-standard calendar\n using the appropriate attributes is required.\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: list\n :return: List of results\n \"\"\"\n valid_calendars = [\n \"gregorian\",\n \"standard\",\n \"proleptic_gregorian\",\n \"noleap\",\n \"365_day\",\n \"all_leap\",\n \"366_day\",\n \"360_day\",\n \"julian\",\n \"none\",\n ]\n\n ret_val = []\n\n # if has a calendar, check that it is within the valid values\n # otherwise no calendar is valid\n for time_var in ds.get_variables_by_attributes(\n calendar=lambda c: c is not None\n ):\n reasoning = None\n valid_calendar = time_var.calendar in valid_calendars\n\n if not valid_calendar:\n reasoning = [\n \"§4.4.1 Variable %s should have a valid calendar: '%s' is not a valid calendar\"\n % (time_var.name, time_var.calendar)\n ]\n\n # passes if the calendar is valid, otherwise notify of invalid\n # calendar\n\n result = Result(\n BaseCheck.LOW, valid_calendar, self.section_titles[\"4.4\"], reasoning\n )\n ret_val.append(result)\n\n return ret_val\n\n ###############################################################################\n # Chapter 5: Coordinate Systems\n ###############################################################################\n\n def check_aux_coordinates(self, ds):\n \"\"\"\n Chapter 5 paragraph 3\n\n The dimensions of an auxiliary coordinate variable must be a subset of\n the dimensions of the variable with which the coordinate is associated,\n with two exceptions. First, string-valued coordinates (Section 6.1,\n \"Labels\") have a dimension for maximum string length. Second, in the\n ragged array representations of data (Chapter 9, Discrete Sampling\n Geometries), special methods are needed to connect the data and\n coordinates.\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: list\n :return: List of results\n \"\"\"\n\n ret_val = []\n geophysical_variables = self._find_geophysical_vars(ds)\n for name in geophysical_variables:\n variable = ds.variables[name]\n coordinates = getattr(variable, \"coordinates\", None)\n # We use a set so we can assert\n dim_set = set(variable.dimensions)\n # No auxiliary coordinates, no check\n if not isinstance(coordinates, str) or coordinates == \"\":\n continue\n\n valid_aux_coords = TestCtx(BaseCheck.HIGH, self.section_titles[\"5\"])\n\n for aux_coord in coordinates.split():\n valid_aux_coords.assert_true(\n aux_coord in ds.variables,\n \"{}'s auxiliary coordinate specified by the coordinates attribute, {}, \"\n \"is not a variable in this dataset\"\n \"\".format(name, aux_coord),\n )\n if aux_coord not in ds.variables:\n continue\n\n # §6.1 Allows for \"labels\" to be referenced as coordinates\n if ds.variables[aux_coord].dtype.char == \"S\":\n continue\n\n aux_coord_dims = set(ds.variables[aux_coord].dimensions)\n valid_aux_coords.assert_true(\n aux_coord_dims.issubset(dim_set),\n \"dimensions for auxiliary coordinate variable {} ({}) \"\n \"are not a subset of dimensions for variable {} ({})\"\n \"\".format(\n aux_coord, \", \".join(aux_coord_dims), name, \", \".join(dim_set)\n ),\n )\n ret_val.append(valid_aux_coords.to_result())\n return ret_val\n\n def check_duplicate_axis(self, ds):\n \"\"\"\n Checks that no variable contains two coordinates defining the same\n axis.\n\n Chapter 5 paragraph 6\n\n If an axis attribute is attached to an auxiliary coordinate variable,\n it can be used by applications in the same way the `axis` attribute\n attached to a coordinate variable is used. However, it is not\n permissible for a [geophysical variable] to have both a coordinate\n variable and an auxiliary coordinate variable, or more than one of\n either type of variable, having an `axis` attribute with any given\n value e.g. there must be no more than one axis attribute for X for any\n [geophysical variable].\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: compliance_checker.base.Result\n :return: List of results\n \"\"\"\n\n ret_val = []\n geophysical_variables = self._find_geophysical_vars(ds)\n for name in geophysical_variables:\n no_duplicates = TestCtx(BaseCheck.HIGH, self.section_titles[\"5\"])\n axis_map = cfutil.get_axis_map(ds, name)\n axes = []\n # For every coordinate associated with this variable, keep track of\n # which coordinates define an axis and assert that there are no\n # duplicate axis attributes defined in the set of associated\n # coordinates. axis_map includes coordinates that don't actually have\n # an axis attribute, so we need to ignore those here.\n for axis, coords in axis_map.items():\n coords = [c for c in coords if hasattr(ds.variables[c], \"axis\")]\n no_duplicates.assert_true(\n len(coords) <= 1,\n \"'{}' has duplicate axis {} defined by [{}]\".format(\n name, axis, \", \".join(sorted(coords))\n ),\n )\n\n ret_val.append(no_duplicates.to_result())\n\n return ret_val\n\n def check_multi_dimensional_coords(self, ds):\n \"\"\"\n Checks that no multidimensional coordinate shares a name with its\n dimensions.\n\n Chapter 5 paragraph 4\n\n We recommend that the name of a [multidimensional coordinate] should\n not match the name of any of its dimensions.\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: list\n :return: List of results\n \"\"\"\n ret_val = []\n\n # This can only apply to auxiliary coordinate variables\n for coord in self._find_aux_coord_vars(ds):\n variable = ds.variables[coord]\n if variable.ndim < 2:\n continue\n not_matching = TestCtx(BaseCheck.MEDIUM, self.section_titles[\"5\"])\n\n not_matching.assert_true(\n coord not in variable.dimensions,\n \"{} shares the same name as one of its dimensions\" \"\".format(coord),\n )\n ret_val.append(not_matching.to_result())\n\n return ret_val\n\n def check_grid_coordinates(self, ds):\n \"\"\"\n 5.6 When the coordinate variables for a horizontal grid are not\n longitude and latitude, it is required that the true latitude and\n longitude coordinates be supplied via the coordinates attribute.\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: list\n :return: List of results\n \"\"\"\n ret_val = []\n latitudes = cfutil.get_true_latitude_variables(ds)\n longitudes = cfutil.get_true_longitude_variables(ds)\n\n check_featues = [\n \"2d-regular-grid\",\n \"2d-static-grid\",\n \"3d-regular-grid\",\n \"3d-static-grid\",\n \"mapped-grid\",\n \"reduced-grid\",\n ]\n\n # This one is tricky because there's a very subtle difference between\n # latitude as defined in Chapter 4 and \"true\" latitude as defined in\n # chapter 5.\n\n # For each geophysical variable that defines a grid, assert it is\n # associated with a true latitude or longitude coordinate.\n\n for variable in self._find_geophysical_vars(ds):\n # We use a set so we can do set-wise comparisons with coordinate\n # dimensions\n dimensions = set(ds.variables[variable].dimensions)\n # If it's not a grid, skip it\n if cfutil.guess_feature_type(ds, variable) not in check_featues:\n continue\n has_coords = TestCtx(BaseCheck.HIGH, self.section_titles[\"5.6\"])\n\n # axis_map is a defaultdict(list) mapping the axis to a list of\n # coordinate names. For example:\n # {'X': ['lon'], 'Y':['lat'], 'Z':['lev']}\n # The mapping comes from the dimensions of the variable and the\n # contents of the `coordinates` attribute only.\n axis_map = cfutil.get_axis_map(ds, variable)\n\n msg = (\n '{}\\'s coordinate variable \"{}\" is not one of the variables identifying true '\n + \"latitude/longitude and its dimensions are not a subset of {}'s dimensions\"\n )\n\n alt = (\n \"{} has no coordinate associated with a variable identified as true latitude/longitude; \"\n + \"its coordinate variable should also share a subset of {}'s dimensions\"\n )\n\n # Make sure we can find latitude and its dimensions are a subset\n _lat = None\n found_lat = False\n for lat in axis_map[\"Y\"]:\n _lat = lat\n is_subset_dims = set(ds.variables[lat].dimensions).issubset(dimensions)\n\n if is_subset_dims and lat in latitudes:\n found_lat = True\n break\n if _lat:\n has_coords.assert_true(found_lat, msg.format(variable, _lat, variable))\n else:\n has_coords.assert_true(found_lat, alt.format(variable, variable))\n\n # Make sure we can find longitude and its dimensions are a subset\n _lon = None\n found_lon = False\n for lon in axis_map[\"X\"]:\n _lon = lon\n is_subset_dims = set(ds.variables[lon].dimensions).issubset(dimensions)\n\n if is_subset_dims and lon in longitudes:\n found_lon = True\n break\n if _lon:\n has_coords.assert_true(found_lon, msg.format(variable, _lon, variable))\n else:\n has_coords.assert_true(found_lon, alt.format(variable, variable))\n\n ret_val.append(has_coords.to_result())\n return ret_val\n\n def check_reduced_horizontal_grid(self, ds):\n \"\"\"\n 5.3 A \"reduced\" longitude-latitude grid is one in which the points are\n arranged along constant latitude lines with the number of points on a\n latitude line decreasing toward the poles.\n\n Recommend that this type of gridded data be stored using the compression\n scheme described in Section 8.2, \"Compression by Gathering\". The\n compressed latitude and longitude auxiliary coordinate variables are\n identified by the coordinates attribute.\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: list\n :return: List of results\n \"\"\"\n ret_val = []\n # Create a set of coordinate variables defining `compress`\n lats = set(cfutil.get_latitude_variables(ds))\n lons = set(cfutil.get_longitude_variables(ds))\n\n for name in self._find_geophysical_vars(ds):\n coords = getattr(ds.variables[name], \"coordinates\", None)\n axis_map = cfutil.get_axis_map(ds, name)\n # If this variable has no coordinate that defines compression\n if \"C\" not in axis_map:\n continue\n\n valid_rgrid = TestCtx(BaseCheck.HIGH, self.section_titles[\"5.3\"])\n # Make sure reduced grid features define coordinates\n valid_rgrid.assert_true(\n isinstance(coords, str) and coords,\n \"reduced grid feature {} must define coordinates attribute\"\n \"\".format(name),\n )\n # We can't check anything else if there are no defined coordinates\n if not isinstance(coords, str) and coords:\n continue\n\n coord_set = set(coords.split())\n\n # Make sure it's associated with valid lat and valid lon\n valid_rgrid.assert_true(\n len(coord_set.intersection(lons)) > 0,\n \"{} must be associated with a valid longitude coordinate\".format(name),\n )\n valid_rgrid.assert_true(\n len(coord_set.intersection(lats)) > 0,\n \"{} must be associated with a valid latitude coordinate\".format(name),\n )\n valid_rgrid.assert_true(\n len(axis_map[\"C\"]) == 1,\n \"{} can not be associated with more than one compressed coordinates: \"\n \"({})\".format(name, \", \".join(axis_map[\"C\"])),\n )\n\n for compressed_coord in axis_map[\"C\"]:\n coord = ds.variables[compressed_coord]\n compress = getattr(coord, \"compress\", None)\n valid_rgrid.assert_true(\n isinstance(compress, str) and compress,\n \"compress attribute for compression coordinate {} must be a non-empty string\"\n \"\".format(compressed_coord),\n )\n if not isinstance(compress, str):\n continue\n for dim in compress.split():\n valid_rgrid.assert_true(\n dim in ds.dimensions,\n \"dimension {} referenced by {}:compress must exist\"\n \"\".format(dim, compressed_coord),\n )\n ret_val.append(valid_rgrid.to_result())\n\n return ret_val\n\n def _check_grid_mapping_attr_condition(self, attr, attr_name):\n \"\"\"\n Evaluate a condition (or series of conditions) for a particular\n attribute. Implementation for CF-1.6.\n\n :param attr: attribute to teset condition for\n :param str attr_name: name of the attribute\n :rtype tuple\n :return two-tuple of (bool, str)\n \"\"\"\n\n if attr_name == \"latitude_of_projection_origin\":\n return self._evaluate_latitude_of_projection_origin(attr)\n\n elif attr_name == \"longitude_of_projection_origin\":\n return self._evaluate_longitude_of_projection_origin(attr)\n\n elif attr_name == \"longitude_of_central_meridian\":\n return self._evaluate_longitude_of_central_meridian(attr)\n\n elif attr_name == \"longitude_of_prime_meridian\":\n return self._evaluate_longitude_of_prime_meridian(attr)\n\n elif attr_name == \"scale_factor_at_central_meridian\":\n return self._evaluate_scale_factor_at_central_meridian(attr)\n\n elif attr_name == \"scale_factor_at_projection_origin\":\n return self._evaluate_scale_factor_at_projection_origin(attr)\n\n elif attr_name == \"standard_parallel\":\n return self._evaluate_standard_parallel(attr)\n\n elif attr_name == \"straight_vertical_longitude_from_pole\":\n return self._evaluate_straight_vertical_longitude_from_pole(attr)\n\n else:\n raise NotImplementedError(\n \"Evaluation for {} not yet implemented\".format(attr_name)\n )\n\n def _evaluate_latitude_of_projection_origin(self, val):\n \"\"\"\n Evaluate the condition for `latitude_of_projection_origin` attribute.\n Return result. Value must be -90 <= x <= 90.\n\n :param val: value to be tested\n :rtype tuple\n :return two-tuple (bool, msg)\n \"\"\"\n\n return (\n (val >= -90.0) and (val <= 90.0),\n \"latitude_of_projection_origin must satisfy (-90 <= x <= 90)\",\n )\n\n def _evaluate_longitude_of_projection_origin(self, val):\n \"\"\"\n Evaluate the condition for `longitude_of_projection_origin` attribute.\n Return result.\n\n :param val: value to be tested\n :rtype tuple\n :return two-tuple (bool, msg)\n \"\"\"\n\n return (\n (val >= -180.0) and (val <= 180.0),\n \"longitude_of_projection_origin must satisfy (-180 <= x <= 180)\",\n )\n\n def _evaluate_longitude_of_central_meridian(self, val):\n \"\"\"\n Evaluate the condition for `longitude_of_central_meridian` attribute.\n Return result.\n\n :param val: value to be tested\n :rtype tuple\n :return two-tuple (bool, msg)\n \"\"\"\n\n return (\n (val >= -180.0) and (val <= 180.0),\n \"longitude_of_central_meridian must satisfy (-180 <= x <= 180)\",\n )\n\n def _evaluate_longitude_of_prime_meridian(self, val):\n \"\"\"\n Evaluate the condition for `longitude_of_prime_meridian` attribute.\n Return result.\n\n :param val: value to be tested\n :rtype tuple\n :return two-tuple (bool, msg)\n \"\"\"\n\n return (\n (val >= -180.0) and (val <= 180.0),\n \"longitude_of_prime_meridian must satisfy (-180 <= x <= 180)\",\n )\n\n def _evaluate_scale_factor_at_central_meridian(self, val):\n \"\"\"\n Evaluate the condition for `scale_factor_at_central_meridian` attribute.\n Return result.\n\n :param val: value to be tested\n :rtype tuple\n :return two-tuple (bool, msg)\n \"\"\"\n\n return (val > 0.0, \"scale_factor_at_central_meridian must be > 0.0\")\n\n def _evaluate_scale_factor_at_projection_origin(self, val):\n \"\"\"\n Evaluate the condition for `scale_factor_at_projection_origin` attribute.\n Return result.\n\n :param val: value to be tested\n :rtype tuple\n :return two-tuple (bool, msg)\n \"\"\"\n\n return (val > 0.0, \"scale_factor_at_projection_origin must be > 0.0\")\n\n def _evaluate_standard_parallel(self, val):\n \"\"\"\n Evaluate the condition for `standard_parallel` attribute. Return result.\n\n :param val: value to be tested\n :rtype tuple\n :return two-tuple (bool, msg)\n \"\"\"\n\n return (\n (val >= -90.0) and (val <= 90),\n \"standard_parallel must satisfy (-90 <= x <= 90)\",\n )\n\n def _evaluate_straight_vertical_longitude_from_pole(self, val):\n \"\"\"\n Evaluate the condition for `straight_vertical_longitude_from_pole`\n attribute. Return result.\n\n :param val: value to be tested\n :rtype tuple\n :return two-tuple (bool, msg)\n \"\"\"\n\n return (\n (val >= -180.0) and (val <= 180),\n \"straight_vertical_longitude_from_pole must satisfy (-180 <= x <= 180)\",\n )\n\n ###############################################################################\n # Chapter 6: Labels and Alternative Coordinates\n ###############################################################################\n\n def check_geographic_region(self, ds):\n \"\"\"\n 6.1.1 When data is representative of geographic regions which can be identified by names but which have complex\n boundaries that cannot practically be specified using longitude and latitude boundary coordinates, a labeled\n axis should be used to identify the regions.\n\n Recommend that the names be chosen from the list of standardized region names whenever possible. To indicate\n that the label values are standardized the variable that contains the labels must be given the standard_name\n attribute with the value region.\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: list\n :return: List of results\n \"\"\"\n ret_val = []\n region_list = [ # TODO maybe move this (and other info like it) into a config file?\n \"africa\",\n \"antarctica\",\n \"arabian_sea\",\n \"aral_sea\",\n \"arctic_ocean\",\n \"asia\",\n \"atlantic_ocean\",\n \"australia\",\n \"baltic_sea\",\n \"barents_opening\",\n \"barents_sea\",\n \"beaufort_sea\",\n \"bellingshausen_sea\",\n \"bering_sea\",\n \"bering_strait\",\n \"black_sea\",\n \"canadian_archipelago\",\n \"caribbean_sea\",\n \"caspian_sea\",\n \"central_america\",\n \"chukchi_sea\",\n \"contiguous_united_states\",\n \"denmark_strait\",\n \"drake_passage\",\n \"east_china_sea\",\n \"english_channel\",\n \"eurasia\",\n \"europe\",\n \"faroe_scotland_channel\",\n \"florida_bahamas_strait\",\n \"fram_strait\",\n \"global\",\n \"global_land\",\n \"global_ocean\",\n \"great_lakes\",\n \"greenland\",\n \"gulf_of_alaska\",\n \"gulf_of_mexico\",\n \"hudson_bay\",\n \"iceland_faroe_channel\",\n \"indian_ocean\",\n \"indonesian_throughflow\",\n \"indo_pacific_ocean\",\n \"irish_sea\",\n \"lake_baykal\",\n \"lake_chad\",\n \"lake_malawi\",\n \"lake_tanganyika\",\n \"lake_victoria\",\n \"mediterranean_sea\",\n \"mozambique_channel\",\n \"north_america\",\n \"north_sea\",\n \"norwegian_sea\",\n \"pacific_equatorial_undercurrent\",\n \"pacific_ocean\",\n \"persian_gulf\",\n \"red_sea\",\n \"ross_sea\",\n \"sea_of_japan\",\n \"sea_of_okhotsk\",\n \"south_america\",\n \"south_china_sea\",\n \"southern_ocean\",\n \"taiwan_luzon_straits\",\n \"weddell_sea\",\n \"windward_passage\",\n \"yellow_sea\",\n ]\n\n for var in ds.get_variables_by_attributes(standard_name=\"region\"):\n valid_region = TestCtx(BaseCheck.MEDIUM, self.section_titles[\"6.1\"])\n region = var[:]\n if np.ma.isMA(region):\n region = region.data\n valid_region.assert_true(\n \"\".join(region.astype(str)).lower() in region_list,\n \"6.1.1 '{}' specified by '{}' is not a valid region\".format(\n \"\".join(region.astype(str)), var.name\n ),\n )\n ret_val.append(valid_region.to_result())\n return ret_val\n\n ###############################################################################\n # Chapter 7: Data Representative of Cells\n ###############################################################################\n\n def check_cell_boundaries(self, ds):\n \"\"\"\n Checks the dimensions of cell boundary variables to ensure they are CF compliant.\n\n 7.1 To represent cells we add the attribute bounds to the appropriate coordinate variable(s). The value of bounds\n is the name of the variable that contains the vertices of the cell boundaries. We refer to this type of variable as\n a \"boundary variable.\" A boundary variable will have one more dimension than its associated coordinate or auxiliary\n coordinate variable. The additional dimension should be the most rapidly varying one, and its size is the maximum\n number of cell vertices.\n\n Applications that process cell boundary data often times need to determine whether or not adjacent cells share an\n edge. In order to facilitate this type of processing the following restrictions are placed on the data in boundary\n variables:\n\n Bounds for 1-D coordinate variables\n\n For a coordinate variable such as lat(lat) with associated boundary variable latbnd(x,2), the interval endpoints\n must be ordered consistently with the associated coordinate, e.g., for an increasing coordinate, lat(1) > lat(0)\n implies latbnd(i,1) >= latbnd(i,0) for all i\n\n If adjacent intervals are contiguous, the shared endpoint must be represented identically in each instance where\n it occurs in the boundary variable. For example, if the intervals that contain grid points lat(i) and lat(i+1) are\n contiguous, then latbnd(i+1,0) = latbnd(i,1).\n\n Bounds for 2-D coordinate variables with 4-sided cells\n\n In the case where the horizontal grid is described by two-dimensional auxiliary coordinate variables in latitude\n lat(n,m) and longitude lon(n,m), and the associated cells are four-sided, then the boundary variables are given\n in the form latbnd(n,m,4) and lonbnd(n,m,4), where the trailing index runs over the four vertices of the cells.\n\n Bounds for multi-dimensional coordinate variables with p-sided cells\n\n In all other cases, the bounds should be dimensioned (...,n,p), where (...,n) are the dimensions of the auxiliary\n coordinate variables, and p the number of vertices of the cells. The vertices must be traversed anticlockwise in the\n lon-lat plane as viewed from above. The starting vertex is not specified.\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: list\n :return: List of results\n \"\"\"\n\n # Note that test does not check monotonicity\n ret_val = []\n reasoning = []\n for variable_name, boundary_variable_name in cfutil.get_cell_boundary_map(\n ds\n ).items():\n variable = ds.variables[variable_name]\n valid = True\n reasoning = []\n if boundary_variable_name not in ds.variables:\n valid = False\n reasoning.append(\n \"Boundary variable {} referenced by {} not \".format(\n boundary_variable_name, variable.name\n )\n + \"found in dataset variables\"\n )\n else:\n boundary_variable = ds.variables[boundary_variable_name]\n # The number of dimensions in the bounds variable should always be\n # the number of dimensions in the referring variable + 1\n if boundary_variable.ndim < 2:\n valid = False\n reasoning.append(\n \"Boundary variable {} specified by {}\".format(\n boundary_variable.name, variable.name\n )\n + \" should have at least two dimensions to enclose the base \"\n + \"case of a one dimensionsal variable\"\n )\n if boundary_variable.ndim != variable.ndim + 1:\n valid = False\n reasoning.append(\n \"The number of dimensions of the variable %s is %s, but the \"\n \"number of dimensions of the boundary variable %s is %s. The boundary variable \"\n \"should have %s dimensions\"\n % (\n variable.name,\n variable.ndim,\n boundary_variable.name,\n boundary_variable.ndim,\n variable.ndim + 1,\n )\n )\n if variable.dimensions[:] != boundary_variable.dimensions[: variable.ndim]:\n valid = False\n reasoning.append(\n u\"Boundary variable coordinates (for {}) are in improper order: {}. Bounds-specific dimensions should be last\"\n \"\".format(variable.name, boundary_variable.dimensions)\n )\n\n # ensure p vertices form a valid simplex given previous a...n\n # previous auxiliary coordinates\n if (\n ds.dimensions[boundary_variable.dimensions[-1]].size\n < len(boundary_variable.dimensions[:-1]) + 1\n ):\n valid = False\n reasoning.append(\n \"Dimension {} of boundary variable (for {}) must have at least {} elements to form a simplex/closed cell with previous dimensions {}.\".format(\n boundary_variable.name,\n variable.name,\n len(variable.dimensions) + 1,\n boundary_variable.dimensions[:-1],\n )\n )\n result = Result(\n BaseCheck.MEDIUM, valid, self.section_titles[\"7.1\"], reasoning\n )\n ret_val.append(result)\n return ret_val\n\n def check_cell_measures(self, ds):\n \"\"\"\n 7.2 To indicate extra information about the spatial properties of a\n variable's grid cells, a cell_measures attribute may be defined for a\n variable. This is a string attribute comprising a list of\n blank-separated pairs of words of the form \"measure: name\". \"area\" and\n \"volume\" are the only defined measures.\n\n The \"name\" is the name of the variable containing the measure values,\n which we refer to as a \"measure variable\". The dimensions of the\n measure variable should be the same as or a subset of the dimensions of\n the variable to which they are related, but their order is not\n restricted.\n\n The variable must have a units attribute and may have other attributes\n such as a standard_name.\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: list\n :return: List of results\n \"\"\"\n ret_val = []\n reasoning = []\n variables = ds.get_variables_by_attributes(\n cell_measures=lambda c: c is not None\n )\n for var in variables:\n search_str = r\"^(?:area|volume): (\\w+)$\"\n search_res = regex.search(search_str, var.cell_measures)\n if not search_res:\n valid = False\n reasoning.append(\n \"The cell_measures attribute for variable {} \"\n \"is formatted incorrectly. It should take the\"\n \" form of either 'area: cell_var' or \"\n \"'volume: cell_var' where cell_var is the \"\n \"variable describing the cell measures\".format(var.name)\n )\n else:\n valid = True\n cell_meas_var_name = search_res.groups()[0]\n # TODO: cache previous results\n if cell_meas_var_name not in ds.variables:\n valid = False\n reasoning.append(\n \"Cell measure variable {} referred to by \"\n \"{} is not present in dataset variables\".format(\n cell_meas_var_name, var.name\n )\n )\n else:\n cell_meas_var = ds.variables[cell_meas_var_name]\n if not hasattr(cell_meas_var, \"units\"):\n valid = False\n reasoning.append(\n \"Cell measure variable {} is required \"\n \"to have units attribute defined.\".format(\n cell_meas_var_name\n )\n )\n if not set(cell_meas_var.dimensions).issubset(var.dimensions):\n valid = False\n reasoning.append(\n \"Cell measure variable {} must have \"\n \"dimensions which are a subset of \"\n \"those defined in variable {}.\".format(\n cell_meas_var_name, var.name\n )\n )\n\n result = Result(\n BaseCheck.MEDIUM, valid, (self.section_titles[\"7.2\"]), reasoning\n )\n ret_val.append(result)\n\n return ret_val\n\n def check_cell_methods(self, ds):\n \"\"\"\n 7.3 To describe the characteristic of a field that is represented by cell values, we define the cell_methods attribute\n of the variable. This is a string attribute comprising a list of blank-separated words of the form \"name: method\". Each\n \"name: method\" pair indicates that for an axis identified by name, the cell values representing the field have been\n determined or derived by the specified method.\n\n name can be a dimension of the variable, a scalar coordinate variable, a valid standard name, or the word \"area\"\n\n values of method should be selected from the list in Appendix E, Cell Methods, which includes point, sum, mean, maximum,\n minimum, mid_range, standard_deviation, variance, mode, and median. Case is not significant in the method name. Some\n methods (e.g., variance) imply a change of units of the variable, as is indicated in Appendix E, Cell Methods.\n\n Because the default interpretation for an intensive quantity differs from that of an extensive quantity and because this\n distinction may not be understood by some users of the data, it is recommended that every data variable include for each\n of its dimensions and each of its scalar coordinate variables the cell_methods information of interest (unless this\n information would not be meaningful). It is especially recommended that cell_methods be explicitly specified for each\n spatio-temporal dimension and each spatio-temporal scalar coordinate variable.\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: list\n :return: List of results\n \"\"\"\n\n ret_val = []\n psep = regex.compile(\n r\"(?P<vars>\\w+: )+(?P<method>\\w+) ?(?P<where>where (?P<wtypevar>\\w+) \"\n r\"?(?P<over>over (?P<otypevar>\\w+))?| ?)(?:\\((?P<paren_contents>[^)]*)\\))?\"\n )\n\n for var in ds.get_variables_by_attributes(cell_methods=lambda x: x is not None):\n if not getattr(var, \"cell_methods\", \"\"):\n continue\n\n method = getattr(var, \"cell_methods\", \"\")\n\n valid_attribute = TestCtx(\n BaseCheck.HIGH, self.section_titles[\"7.3\"]\n ) # changed from 7.1 to 7.3\n valid_attribute.assert_true(\n regex.match(psep, method) is not None,\n '\"{}\" is not a valid format for cell_methods attribute of \"{}\"'\n \"\".format(method, var.name),\n )\n ret_val.append(valid_attribute.to_result())\n\n valid_cell_names = TestCtx(BaseCheck.MEDIUM, self.section_titles[\"7.3\"])\n\n # check that the name is valid\n for match in regex.finditer(psep, method):\n # it is possible to have \"var1: var2: ... varn: ...\", so handle\n # that case\n for var_raw_str in match.captures(\"vars\"):\n # strip off the ' :' at the end of each match\n var_str = var_raw_str[:-2]\n if (\n var_str in var.dimensions\n or var_str == \"area\"\n or var_str in getattr(var, \"coordinates\", \"\")\n ):\n\n valid = True\n else:\n valid = False\n\n valid_cell_names.assert_true(\n valid,\n \"{}'s cell_methods name component {} does not match a dimension, \"\n \"area or auxiliary coordinate\".format(var.name, var_str),\n )\n\n ret_val.append(valid_cell_names.to_result())\n\n # Checks if the method value of the 'name: method' pair is acceptable\n valid_cell_methods = TestCtx(BaseCheck.MEDIUM, self.section_titles[\"7.3\"])\n\n for match in regex.finditer(psep, method):\n # CF section 7.3 - \"Case is not significant in the method name.\"\n valid_cell_methods.assert_true(\n match.group(\"method\").lower() in self.cell_methods,\n \"{}:cell_methods contains an invalid method: {}\"\n \"\".format(var.name, match.group(\"method\")),\n )\n\n ret_val.append(valid_cell_methods.to_result())\n\n for match in regex.finditer(psep, method):\n if match.group(\"paren_contents\") is not None:\n # split along spaces followed by words with a colon\n # not sure what to do if a comment contains a colon!\n ret_val.append(\n self._check_cell_methods_paren_info(\n match.group(\"paren_contents\"), var\n ).to_result()\n )\n\n return ret_val\n\n def _check_cell_methods_paren_info(self, paren_contents, var):\n \"\"\"\n Checks that the spacing and/or comment info contained inside the\n parentheses in cell_methods is well-formed\n \"\"\"\n valid_info = TestCtx(BaseCheck.MEDIUM, self.section_titles[\"7.3\"])\n # if there are no colons, this is a simple comment\n # TODO: are empty comments considered valid?\n if \":\" not in paren_contents:\n valid_info.out_of += 1\n valid_info.score += 1\n return valid_info\n # otherwise, split into k/v pairs\n kv_pair_pat = r\"(\\S+:)\\s+(.*(?=\\s+\\w+:)|[^:]+$)\\s*\"\n # otherwise, we must split further with intervals coming\n # first, followed by non-standard comments\n # we need the count of the matches, and re.findall() only returns\n # groups if they are present and we wish to see if the entire match\n # object concatenated together is the same as the original string\n pmatches = [m for m in regex.finditer(kv_pair_pat, paren_contents)]\n for i, pmatch in enumerate(pmatches):\n keyword, val = pmatch.groups()\n if keyword == \"interval:\":\n valid_info.out_of += 2\n interval_matches = regex.match(\n r\"^\\s*(?P<interval_number>\\S+)\\s+(?P<interval_units>\\S+)\\s*$\", val\n )\n # attempt to get the number for the interval\n if not interval_matches:\n valid_info.messages.append(\n '§7.3.3 {}:cell_methods contains an interval specification that does not parse: \"{}\". Should be in format \"interval: <number> <units>\"'.format(\n var.name, val\n )\n )\n else:\n try:\n float(interval_matches.group(\"interval_number\"))\n except ValueError:\n valid_info.messages.append(\n '§7.3.3 {}:cell_methods contains an interval value that does not parse as a numeric value: \"{}\".'.format(\n var.name, interval_matches.group(\"interval_number\")\n )\n )\n else:\n valid_info.score += 1\n\n # then the units\n try:\n Unit(interval_matches.group(\"interval_units\"))\n except ValueError:\n valid_info.messages.append(\n '§7.3.3 {}:cell_methods interval units \"{}\" is not parsable by UDUNITS.'.format(\n var.name, interval_matches.group(\"interval_units\")\n )\n )\n else:\n valid_info.score += 1\n elif keyword == \"comment:\":\n # comments can't really be invalid, except\n # if they come first or aren't last, and\n # maybe if they contain colons embedded in the\n # comment string\n valid_info.out_of += 1\n if len(pmatches) == 1:\n valid_info.messages.append(\n \"§7.3.3 If there is no standardized information, the keyword comment: should be omitted for variable {}\".format(\n var.name\n )\n )\n # otherwise check that the comment is the last\n # item in the parentheses\n elif i != len(pmatches) - 1:\n valid_info.messages.append(\n '§7.3.3 The non-standard \"comment:\" element must come after any standard elements in cell_methods for variable {}'.format(\n var.name\n )\n )\n #\n else:\n valid_info.score += 1\n else:\n valid_info.out_of += 1\n valid_info.messages.append(\n '§7.3.3 Invalid cell_methods keyword \"{}\" for variable {}. Must be one of [interval, comment]'.format(\n keyword, var.name\n )\n )\n\n # Ensure concatenated reconstructed matches are the same as the\n # original string. If they're not, there's likely a formatting error\n valid_info.assert_true(\n \"\".join(m.group(0) for m in pmatches) == paren_contents,\n \"§7.3.3 Parenthetical content inside {}:cell_methods is not well formed: {}\".format(\n var.name, paren_contents\n ),\n )\n\n return valid_info\n\n def check_climatological_statistics(self, ds):\n \"\"\"\n 7.4 A climatological time coordinate variable does not have a bounds attribute. Instead, it has a climatology\n attribute, which names a variable with dimensions (n,2), n being the dimension of the climatological time axis.\n Using the units and calendar of the time coordinate variable, element (i,0) of the climatology variable specifies\n the beginning of the first subinterval and element (i,1) the end of the last subinterval used to evaluate the\n climatological statistics with index i in the time dimension. The time coordinates should be values that are\n representative of the climatological time intervals, such that an application which does not recognise climatological\n time will nonetheless be able to make a reasonable interpretation.\n\n A climatological axis may use different statistical methods to measure variation among years, within years, and within\n days. The methods which can be specified are those listed in Appendix E, Cell Methods and each entry in the cell_methods\n attribute may also contain non-standardised information in parentheses after the method. The value of the cell_method\n attribute must be in one of the following forms:\n - time: method1 within years time: method2 over years\n - time: method1 within days time: method2 over days\n - time: method1 within days time: method2 over days time: method3 over years\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: list\n :return: List of results\n \"\"\"\n\n reasoning = []\n ret_val = []\n total_climate_count = 0\n valid_climate_count = 0\n all_clim_coord_var_names = []\n\n methods = [\n \"point\", # TODO change to appendix import once cf1.7 merged\n \"sum\",\n \"mean\",\n \"maximum\",\n \"minimum\",\n \"mid_range\",\n \"standard_deviation\",\n \"variance\",\n \"mode\",\n \"median\",\n ]\n\n # find any climatology axies variables; any variables which contain climatological stats will use\n # these variables as coordinates\n clim_time_coord_vars = ds.get_variables_by_attributes(\n climatology=lambda s: s is not None\n )\n\n # first, to determine whether or not we have a valid climatological time\n # coordinate variable, we need to make sure it has the attribute \"climatology\",\n # but not the attribute \"bounds\"\n for clim_coord_var in clim_time_coord_vars:\n if hasattr(clim_coord_var, \"bounds\"):\n reasoning.append(\n \"Variable {} has a climatology attribute and cannot also have a bounds attribute.\".format(\n clim_coord_var.name\n )\n )\n result = Result(\n BaseCheck.MEDIUM, False, (self.section_titles[\"7.4\"]), reasoning\n )\n ret_val.append(result)\n return ret_val\n\n # make sure the climatology variable referenced actually exists\n elif clim_coord_var.climatology not in ds.variables:\n reasoning.append(\n \"Variable {} referenced in time's climatology attribute does not exist\".format(\n ds.variables[\"time\"].climatology\n )\n )\n result = Result(\n BaseCheck.MEDIUM, False, (self.section_titles[\"7.4\"]), reasoning\n )\n ret_val.append(result)\n return ret_val\n\n # check that coordinate bounds are in the proper order.\n # make sure last elements are boundary variable specific dimensions\n if (\n clim_coord_var.dimensions[:]\n != ds.variables[clim_coord_var.climatology].dimensions[\n : clim_coord_var.ndim\n ]\n ):\n reasoning.append(\n u\"Climatology variable coordinates are in improper order: {}. Bounds-specific dimensions should be last\".format(\n ds.variables[clim_coord_var.climatology].dimensions\n )\n )\n return ret_val\n\n elif (\n ds.dimensions[\n ds.variables[clim_coord_var.climatology].dimensions[-1]\n ].size\n != 2\n ):\n reasoning.append(\n u\"Climatology dimension {} should only contain two elements\".format(\n boundary_variable.dimensions\n )\n )\n\n # passed all these checks, so we can add this clim_coord_var to our total list\n all_clim_coord_var_names.append(clim_coord_var.name)\n\n # for any variables which use a climatology time coordinate variable as a coordinate,\n # if they have a cell_methods attribute, it must comply with the form:\n # time: method1 within years time: method2 over years\n # time: method1 within days time: method2 over days\n # time: method1 within days time: method2 over days time: method3 over years\n # optionally followed by parentheses for explaining additional\n # info, e.g.\n # \"time: method1 within years time: method2 over years (sidereal years)\"\n\n meth_regex = \"(?:{})\".format(\n \"|\".join(methods)\n ) # \"or\" comparison for the methods\n re_string = (\n r\"^time: {0} within (years|days)\" # regex string to test\n r\" time: {0} over \\1(?<=days)(?: time: {0} over years)?\"\n r\"(?: \\([^)]+\\))?$\".format(meth_regex)\n )\n\n # find any variables with a valid climatological cell_methods\n for cell_method_var in ds.get_variables_by_attributes(\n cell_methods=lambda s: s is not None\n ):\n if any(\n [dim in all_clim_coord_var_names for dim in cell_method_var.dimensions]\n ):\n total_climate_count += 1\n if not regex.search(re_string, cell_method_var.cell_methods):\n reasoning.append(\n 'The \"time: method within years/days over years/days\" format is not correct in variable {}.'.format(\n cell_method_var.name\n )\n )\n else:\n valid_climate_count += 1\n\n result = Result(\n BaseCheck.MEDIUM,\n (valid_climate_count, total_climate_count),\n (self.section_titles[\"7.4\"]),\n reasoning,\n )\n ret_val.append(result)\n\n return ret_val\n\n ###############################################################################\n # Chapter 8: Reduction of Dataset Size\n ###############################################################################\n\n def check_packed_data(self, ds):\n \"\"\"\n 8.1 Simple packing may be achieved through the use of the optional NUG defined attributes scale_factor and\n add_offset. After the data values of a variable have been read, they are to be multiplied by the scale_factor,\n and have add_offset added to them.\n\n The units of a variable should be representative of the unpacked data.\n\n If the scale_factor and add_offset attributes are of the same data type as the associated variable, the unpacked\n data is assumed to be of the same data type as the packed data. However, if the scale_factor and add_offset\n attributes are of a different data type from the variable (containing the packed data) then the unpacked data\n should match the type of these attributes, which must both be of type float or both be of type double. An additional\n restriction in this case is that the variable containing the packed data must be of type byte, short or int. It is\n not advised to unpack an int into a float as there is a potential precision loss.\n\n When data to be packed contains missing values the attributes that indicate missing values (_FillValue, valid_min,\n valid_max, valid_range) must be of the same data type as the packed data.\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: list\n :return: List of results\n \"\"\"\n ret_val = []\n for name, var in ds.variables.items():\n\n add_offset = getattr(var, \"add_offset\", None)\n scale_factor = getattr(var, \"scale_factor\", None)\n if not (add_offset or scale_factor):\n continue\n\n valid = True\n reasoning = []\n\n # if only one of these attributes is defined, assume they\n # are the same type (value doesn't matter here)\n if not add_offset:\n add_offset = scale_factor\n if not scale_factor:\n scale_factor = add_offset\n\n if type(add_offset) != type(scale_factor):\n valid = False\n reasoning.append(\n \"Attributes add_offset and scale_factor have different data type.\"\n )\n elif type(scale_factor) != var.dtype.type:\n # Check both attributes are type float or double\n if not isinstance(scale_factor, (float, np.floating)):\n valid = False\n reasoning.append(\n \"Attributes add_offset and scale_factor are not of type float or double.\"\n )\n else:\n # Check variable type is byte, short or int\n if var.dtype.type not in [\n np.int,\n np.int8,\n np.int16,\n np.int32,\n np.int64,\n ]:\n valid = False\n reasoning.append(\"Variable is not of type byte, short, or int.\")\n\n result = Result(\n BaseCheck.MEDIUM, valid, self.section_titles[\"8.1\"], reasoning\n )\n ret_val.append(result)\n reasoning = []\n\n valid = True\n # test further with _FillValue , valid_min , valid_max , valid_range\n if hasattr(var, \"_FillValue\"):\n if var._FillValue.dtype.type != var.dtype.type:\n valid = False\n reasoning.append(\n \"Type of %s:_FillValue attribute (%s) does not match variable type (%s)\"\n % (name, var._FillValue.dtype.name, var.dtype.name)\n )\n if hasattr(var, \"valid_min\"):\n if var.valid_min.dtype.type != var.dtype.type:\n valid = False\n reasoning.append(\n \"Type of %svalid_min attribute (%s) does not match variable type (%s)\"\n % (name, var.valid_min.dtype.name, var.dtype.name)\n )\n if hasattr(var, \"valid_max\"):\n if var.valid_max.dtype.type != var.dtype.type:\n valid = False\n reasoning.append(\n \"Type of %s:valid_max attribute (%s) does not match variable type (%s)\"\n % (name, var.valid_max.dtype.name, var.dtype.name)\n )\n if hasattr(var, \"valid_range\"):\n if var.valid_range.dtype.type != var.dtype.type:\n valid = False\n reasoning.append(\n \"Type of %s:valid_range attribute (%s) does not match variable type (%s)\"\n % (name, var.valid_range.dtype.name, var.dtype.name)\n )\n\n result = Result(\n BaseCheck.MEDIUM, valid, self.section_titles[\"8.1\"], reasoning\n )\n ret_val.append(result)\n\n return ret_val\n\n def check_compression_gathering(self, ds):\n \"\"\"\n At the current time the netCDF interface does not provide for packing\n data. However a simple packing may be achieved through the use of the\n optional NUG defined attributes scale_factor and add_offset . After the\n data values of a variable have been read, they are to be multiplied by\n the scale_factor , and have add_offset added to them. If both\n attributes are present, the data are scaled before the offset is added.\n When scaled data are written, the application should first subtract the\n offset and then divide by the scale factor. The units of a variable\n should be representative of the unpacked data.\n\n This standard is more restrictive than the NUG with respect to the use\n of the scale_factor and add_offset attributes; ambiguities and\n precision problems related to data type conversions are resolved by\n these restrictions. If the scale_factor and add_offset attributes are\n of the same data type as the associated variable, the unpacked data is\n assumed to be of the same data type as the packed data. However, if the\n scale_factor and add_offset attributes are of a different data type\n from the variable (containing the packed data) then the unpacked data\n should match the type of these attributes, which must both be of type\n float or both be of type double . An additional restriction in this\n case is that the variable containing the packed data must be of type\n byte , short or int . It is not advised to unpack an int into a float\n as there is a potential precision loss.\n\n When data to be packed contains missing values the attributes that\n indicate missing values ( _FillValue , valid_min , valid_max ,\n valid_range ) must be of the same data type as\n the packed data. See Section 2.5.1, “Missing Data” for a discussion of\n how applications should treat variables that have attributes indicating\n both missing values and transformations defined by a scale and/or\n offset.\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: list\n :return: List of results\n \"\"\"\n ret_val = []\n for compress_var in ds.get_variables_by_attributes(\n compress=lambda s: s is not None\n ):\n valid = True\n reasoning = []\n # puts the referenced variable being compressed into a set\n compress_set = set(compress_var.compress.split(\" \"))\n if compress_var.ndim != 1:\n valid = False\n reasoning.append(\n \"Compression variable {} may only have one dimension\".format(\n compress_var.name\n )\n )\n # ensure compression variable is a proper index, and thus is an\n # signed or unsigned integer type of some sort\n if compress_var.dtype.kind not in {\"i\", \"u\"}:\n valid = False\n reasoning.append(\n \"Compression variable {} must be an integer type to form a proper array index\".format(\n compress_var.name\n )\n )\n # make sure all the variables referred to are contained by the\n # variables.\n if not compress_set.issubset(ds.dimensions):\n not_in_dims = sorted(compress_set.difference(ds.dimensions))\n valid = False\n reasoning.append(\n \"The following dimensions referenced by the compress attribute of variable {} do not exist: {}\".format(\n compress_var.name, not_in_dims\n )\n )\n\n result = Result(\n BaseCheck.MEDIUM, valid, self.section_titles[\"8.2\"], reasoning\n )\n ret_val.append(result)\n\n return ret_val\n\n ###############################################################################\n # Chapter 9: Discrete Sampling Geometries\n ###############################################################################\n\n def check_all_features_are_same_type(self, ds):\n \"\"\"\n Check that the feature types in a dataset are all the same.\n\n 9.1 The features contained within a collection must always be of the same type; and all the collections in a CF file\n must be of the same feature type.\n\n point, timeSeries, trajectory, profile, timeSeriesProfile, trajectoryProfile.\n\n The space-time coordinates that are indicated for each feature are mandatory. However a featureType may also include\n other space-time coordinates which are not mandatory (notably the z coordinate).\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: compliance_checker.base.Result\n \"\"\"\n all_the_same = TestCtx(BaseCheck.HIGH, self.section_titles[\"9.1\"])\n feature_types_found = defaultdict(list)\n for name in self._find_geophysical_vars(ds):\n feature = cfutil.guess_feature_type(ds, name)\n # If we can't figure out the feature type, penalize. Originally,\n # it was not penalized. However, this led to the issue that the\n # message did not appear in the output of compliance checker if\n # no other error/warning/information was printed out for section\n # 9.1.\n found = False\n if feature is not None:\n feature_types_found[feature].append(name)\n found = True\n all_the_same.assert_true(\n found, \"Unidentifiable feature for variable {}\" \"\".format(name)\n )\n feature_description = \", \".join(\n [\n \"{} ({})\".format(ftr, \", \".join(vrs))\n for ftr, vrs in feature_types_found.items()\n ]\n )\n\n all_the_same.assert_true(\n len(feature_types_found) < 2,\n \"Different feature types discovered in this dataset: {}\"\n \"\".format(feature_description),\n )\n\n return all_the_same.to_result()\n\n def check_feature_type(self, ds):\n \"\"\"\n Check the global attribute featureType for valid CF featureTypes\n\n 9.4 A global attribute, featureType, is required for all Discrete Geometry representations except the orthogonal\n multidimensional array representation, for which it is highly recommended.\n\n The value assigned to the featureType attribute is case-insensitive.\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: compliance_checker.base.Result\n \"\"\"\n # Due to case insensitive requirement, we list the possible featuretypes\n # in lower case and check using the .lower() method\n feature_list = [\n \"point\",\n \"timeseries\",\n \"trajectory\",\n \"profile\",\n \"timeseriesprofile\",\n \"trajectoryprofile\",\n ]\n\n feature_type = getattr(ds, \"featureType\", None)\n valid_feature_type = TestCtx(\n BaseCheck.HIGH, \"§9.1 Dataset contains a valid featureType\"\n )\n valid_feature_type.assert_true(\n feature_type is None or feature_type.lower() in feature_list,\n \"{} is not a valid CF featureType. It must be one of {}\"\n \"\".format(feature_type, \", \".join(feature_list)),\n )\n return valid_feature_type.to_result()\n\n def check_cf_role(self, ds):\n \"\"\"\n Check variables defining cf_role for legal cf_role values.\n\n §9.5 The only acceptable values of cf_role for Discrete Geometry CF\n data sets are timeseries_id, profile_id, and trajectory_id\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: compliance_checker.base.Result\n \"\"\"\n valid_roles = [\"timeseries_id\", \"profile_id\", \"trajectory_id\"]\n variable_count = 0\n for variable in ds.get_variables_by_attributes(cf_role=lambda x: x is not None):\n variable_count += 1\n name = variable.name\n valid_cf_role = TestCtx(BaseCheck.HIGH, self.section_titles[\"9.5\"])\n cf_role = variable.cf_role\n valid_cf_role.assert_true(\n cf_role in valid_roles,\n \"{} is not a valid cf_role value. It must be one of {}\"\n \"\".format(name, \", \".join(valid_roles)),\n )\n if variable_count > 0:\n m = (\n \"§9.5 The only acceptable values of cf_role for Discrete Geometry CF\"\n + \" data sets are timeseries_id, profile_id, and trajectory_id\"\n )\n valid_cf_role.assert_true(variable_count < 3, m)\n return valid_cf_role.to_result()\n\n def check_variable_features(self, ds):\n \"\"\"\n Checks the variable feature types match the dataset featureType attribute\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: list\n :return: List of results\n \"\"\"\n ret_val = []\n feature_list = [\n \"point\",\n \"timeSeries\",\n \"trajectory\",\n \"profile\",\n \"timeSeriesProfile\",\n \"trajectoryProfile\",\n ]\n # Don't bother checking if it's not a legal featureType\n feature_type = getattr(ds, \"featureType\", None)\n if feature_type not in feature_list:\n return []\n\n feature_type_map = {\n \"point\": [\"point\"],\n \"timeSeries\": [\n \"timeseries\",\n \"multi-timeseries-orthogonal\",\n \"multi-timeseries-incomplete\",\n ],\n \"trajectory\": [\"cf-trajectory\", \"single-trajectory\",],\n \"profile\": [\"profile-orthogonal\", \"profile-incomplete\"],\n \"timeSeriesProfile\": [\n \"timeseries-profile-single-station\",\n \"timeseries-profile-multi-station\",\n \"timeseries-profile-single-ortho-time\",\n \"timeseries-profile-multi-ortho-time\",\n \"timeseries-profile-ortho-depth\",\n \"timeseries-profile-incomplete\",\n ],\n \"trajectoryProfile\": [\n \"trajectory-profile-orthogonal\",\n \"trajectory-profile-incomplete\",\n ],\n }\n for name in self._find_geophysical_vars(ds):\n variable_feature = cfutil.guess_feature_type(ds, name)\n # If we can't figure it out, don't check it.\n if variable_feature is None:\n continue\n matching_feature = TestCtx(BaseCheck.MEDIUM, self.section_titles[\"9.1\"])\n matching_feature.assert_true(\n variable_feature in feature_type_map[feature_type],\n \"{} is not a {}, it is detected as a {}\"\n \"\".format(name, feature_type, variable_feature),\n )\n ret_val.append(matching_feature.to_result())\n\n return ret_val\n\n def check_hints(self, ds):\n \"\"\"\n Checks for potentially mislabeled metadata and makes suggestions for how to correct\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: list\n :return: List of results\n \"\"\"\n ret_val = []\n\n ret_val.extend(self._check_hint_bounds(ds))\n\n return ret_val\n\n def _check_hint_bounds(self, ds):\n \"\"\"\n Checks for variables ending with _bounds, if they are not cell methods,\n make the recommendation\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: list\n :return: List of results\n \"\"\"\n ret_val = []\n boundary_variables = cfutil.get_cell_boundary_variables(ds)\n for name in ds.variables:\n if name.endswith(\"_bounds\") and name not in boundary_variables:\n msg = (\n \"{} might be a cell boundary variable but there are no variables that define it \"\n \"as a boundary using the `bounds` attribute.\".format(name)\n )\n result = Result(BaseCheck.LOW, True, self.section_titles[\"7.1\"], [msg])\n ret_val.append(result)\n\n return ret_val\n\n\nclass CF1_7Check(CF1_6Check):\n \"\"\"Implementation for CF v1.7. Inherits from CF1_6Check as most of the\n checks are the same.\"\"\"\n\n # things that are specific to 1.7\n _cc_spec_version = \"1.7\"\n _cc_url = \"http://cfconventions.org/Data/cf-conventions/cf-conventions-1.7/cf-conventions.html\"\n\n appendix_a = appendix_a_base.copy()\n appendix_a.update(\n {\n \"actual_range\": {\n \"Type\": \"N\",\n \"attr_loc\": {\"D\", \"C\"},\n \"cf_section\": \"2.5.1\",\n },\n \"comment\": {\n \"Type\": \"S\",\n \"attr_loc\": {\"G\", \"D\", \"C\"},\n \"cf_section\": \"2.6.2\",\n },\n \"external_variables\": {\n \"Type\": \"S\",\n \"attr_loc\": {\"G\"},\n \"cf_section\": \"2.6.3\",\n },\n \"actual_range\": {\n \"Type\": \"N\",\n \"attr_loc\": {\"D\", \"C\"},\n \"cf_section\": \"2.5.1\",\n },\n \"scale_factor\": {\"Type\": \"N\", \"attr_loc\": {\"D\", \"C\"}, \"cf_section\": \"8.1\"},\n }\n )\n\n def __init__(self, options=None):\n super(CF1_7Check, self).__init__(options)\n\n self.cell_methods = cell_methods17\n self.grid_mapping_dict = grid_mapping_dict17\n self.grid_mapping_attr_types = grid_mapping_attr_types17\n\n def check_actual_range(self, ds):\n \"\"\"Check the actual_range attribute of variables. As stated in\n section 2.5.1 of version 1.7, this convention defines a two-element\n vector attribute designed to describe the actual minimum and actual\n maximum values of variables containing numeric data. Conditions:\n - the fist value of the two-element vector must be equal to the\n minimum of the data, and the second element equal to the maximum\n - if the data is packed, the elements of actual_range should have\n the same data type as the *unpacked* data\n - if valid_range is specified, both elements of actual_range should\n be within valid_range\n\n If a variable does not have an actual_range attribute, let it pass;\n including this attribute is only suggested. However, if the user is\n specifying the actual_range, the Result will be considered\n high-priority.\"\"\"\n\n ret_val = []\n\n for name, variable in ds.variables.items():\n msgs = []\n score = 0\n out_of = 0\n\n if not hasattr(variable, \"actual_range\"):\n continue # having this attr is only suggested, no Result needed\n else:\n\n if variable.mask: # remove mask\n variable.set_auto_mask(False)\n\n out_of += 1\n try:\n if (\n len(variable.actual_range) != 2\n ): # TODO is the attr also a numpy array? if so, .size\n msgs.append(\n \"actual_range of '{}' must be 2 elements\".format(name)\n )\n ret_val.append(\n Result( # putting result into list\n BaseCheck.HIGH,\n (score, out_of),\n self.section_titles[\"2.5\"],\n msgs,\n )\n )\n continue # no need to keep checking if already completely wrong\n else:\n score += 1\n except TypeError: # in case it's just a single number\n msgs.append(\"actual_range of '{}' must be 2 elements\".format(name))\n ret_val.append(\n Result( # putting result into list\n BaseCheck.HIGH,\n (score, out_of),\n self.section_titles[\"2.5\"],\n msgs,\n )\n )\n continue\n\n # check equality to existing min/max values\n # NOTE this is a data check\n out_of += 1\n if (variable.actual_range[0] != variable[:].min()) or (\n variable.actual_range[1] != variable[:].max()\n ):\n msgs.append(\n \"actual_range elements of '{}' inconsistent with its min/max values\".format(\n name\n )\n )\n else:\n score += 1\n\n # check that the actual range is within the valid range\n out_of += 1\n if hasattr(variable, \"valid_range\"): # check within valid_range\n if (variable.actual_range[0] < variable.valid_range[0]) or (\n variable.actual_range[1] > variable.valid_range[1]\n ):\n msgs.append(\n '\"{}\"\\'s actual_range must be within valid_range'.format(\n name\n )\n )\n else:\n score += 1\n\n # check the elements of the actual range have the appropriate\n # relationship to the valid_min and valid_max\n out_of += 2\n if hasattr(variable, \"valid_min\"):\n if variable.actual_range[0] < variable.valid_min:\n msgs.append(\n '\"{}\"\\'s actual_range first element must be >= valid_min ({})'.format(\n name, variable.valid_min\n )\n )\n else:\n score += 1\n if hasattr(variable, \"valid_max\"):\n if variable.actual_range[1] > variable.valid_max:\n msgs.append(\n '\"{}\"\\'s actual_range second element must be <= valid_max ({})'.format(\n name, variable.valid_max\n )\n )\n else:\n score += 1\n\n ret_val.append(\n Result( # putting result into list\n BaseCheck.HIGH, (score, out_of), self.section_titles[\"2.5\"], msgs\n )\n )\n return ret_val\n\n def check_cell_boundaries(self, ds):\n \"\"\"\n Checks the dimensions of cell boundary variables to ensure they are CF compliant\n per section 7.1.\n\n This method extends the CF1_6Check method; please see the original method for the\n complete doc string.\n\n If any variable contains both a formula_terms attribute *and* a bounding variable,\n that bounds variable must also have a formula_terms attribute.\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :returns list: List of results\n \"\"\"\n\n # Note that test does not check monotonicity\n ret_val = []\n reasoning = []\n for variable_name, boundary_variable_name in cfutil.get_cell_boundary_map(\n ds\n ).items():\n variable = ds.variables[variable_name]\n valid = True\n reasoning = []\n if boundary_variable_name not in ds.variables:\n valid = False\n reasoning.append(\n \"Boundary variable {} referenced by {} not \".format(\n boundary_variable_name, variable.name\n )\n + \"found in dataset variables\"\n )\n else:\n boundary_variable = ds.variables[boundary_variable_name]\n # The number of dimensions in the bounds variable should always be\n # the number of dimensions in the referring variable + 1\n if boundary_variable.ndim < 2:\n valid = False\n reasoning.append(\n \"Boundary variable {} specified by {}\".format(\n boundary_variable.name, variable.name\n )\n + \" should have at least two dimensions to enclose the base \"\n + \"case of a one dimensionsal variable\"\n )\n if boundary_variable.ndim != variable.ndim + 1:\n valid = False\n reasoning.append(\n \"The number of dimensions of the variable %s is %s, but the \"\n \"number of dimensions of the boundary variable %s is %s. The boundary variable \"\n \"should have %s dimensions\"\n % (\n variable.name,\n variable.ndim,\n boundary_variable.name,\n boundary_variable.ndim,\n variable.ndim + 1,\n )\n )\n if variable.dimensions[:] != boundary_variable.dimensions[: variable.ndim]:\n valid = False\n reasoning.append(\n u\"Boundary variable coordinates (for {}) are in improper order: {}. Bounds-specific dimensions should be last\"\n \"\".format(variable.name, boundary_variable.dimensions)\n )\n\n # ensure p vertices form a valid simplex given previous a...n\n # previous auxiliary coordinates\n if (\n ds.dimensions[boundary_variable.dimensions[-1]].size\n < len(boundary_variable.dimensions[:-1]) + 1\n ):\n valid = False\n reasoning.append(\n \"Dimension {} of boundary variable (for {}) must have at least {} elements to form a simplex/closed cell with previous dimensions {}.\".format(\n boundary_variable.name,\n variable.name,\n len(variable.dimensions) + 1,\n boundary_variable.dimensions[:-1],\n )\n )\n\n # check if formula_terms is present in the var; if so,\n # the bounds variable must also have a formula_terms attr\n if hasattr(variable, \"formula_terms\"):\n if not hasattr(boundary_variable, \"formula_terms\"):\n valid = False\n reasoning.append(\n \"'{}' has 'formula_terms' attr, bounds variable '{}' must also have 'formula_terms'\".format(\n variable_name, boundary_variable_name\n )\n )\n\n result = Result(\n BaseCheck.MEDIUM, valid, self.section_titles[\"7.1\"], reasoning\n )\n ret_val.append(result)\n return ret_val\n\n def check_cell_measures(self, ds):\n \"\"\"\n A method to over-ride the CF1_6Check method. In CF 1.7, it is specified\n that variable referenced by cell_measures must be in the dataset OR\n referenced by the global attribute \"external_variables\", which represent\n all the variables used in the dataset but not found in the dataset.\n\n 7.2 To indicate extra information about the spatial properties of a\n variable's grid cells, a cell_measures attribute may be defined for a\n variable. This is a string attribute comprising a list of\n blank-separated pairs of words of the form \"measure: name\". \"area\" and\n \"volume\" are the only defined measures.\n\n The \"name\" is the name of the variable containing the measure values,\n which we refer to as a \"measure variable\". The dimensions of the\n measure variable should be the same as or a subset of the dimensions of\n the variable to which they are related, but their order is not\n restricted.\n\n The variable must have a units attribute and may have other attributes\n such as a standard_name.\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: list\n :return: List of results\n \"\"\"\n ret_val = []\n reasoning = []\n variables = ds.get_variables_by_attributes(\n cell_measures=lambda c: c is not None\n )\n for var in variables:\n search_str = r\"^(?:area|volume): (\\w+)$\"\n search_res = regex.search(search_str, var.cell_measures)\n if not search_res:\n valid = False\n reasoning.append(\n \"The cell_measures attribute for variable {} \"\n \"is formatted incorrectly. It should take the\"\n \" form of either 'area: cell_var' or \"\n \"'volume: cell_var' where cell_var is the \"\n \"variable describing the cell measures\".format(var.name)\n )\n else:\n valid = True\n cell_meas_var_name = search_res.groups()[0]\n # TODO: cache previous results\n\n # if the dataset has external_variables, get it\n try:\n external_variables = ds.getncattr(\"external_variables\")\n except AttributeError:\n external_variables = []\n if cell_meas_var_name not in ds.variables:\n if cell_meas_var_name not in external_variables:\n valid = False\n reasoning.append(\n \"Cell measure variable {} referred to by {} is not present in dataset variables\".format(\n cell_meas_var_name, var.name\n )\n )\n else:\n valid = True\n\n # make Result\n result = Result(\n BaseCheck.MEDIUM, valid, (self.section_titles[\"7.2\"]), reasoning\n )\n ret_val.append(result)\n continue # can't test anything on an external var\n\n else:\n cell_meas_var = ds.variables[cell_meas_var_name]\n if not hasattr(cell_meas_var, \"units\"):\n valid = False\n reasoning.append(\n \"Cell measure variable {} is required \"\n \"to have units attribute defined.\".format(\n cell_meas_var_name\n )\n )\n if not set(cell_meas_var.dimensions).issubset(var.dimensions):\n valid = False\n reasoning.append(\n \"Cell measure variable {} must have \"\n \"dimensions which are a subset of \"\n \"those defined in variable {}.\".format(\n cell_meas_var_name, var.name\n )\n )\n\n result = Result(\n BaseCheck.MEDIUM, valid, (self.section_titles[\"7.2\"]), reasoning\n )\n ret_val.append(result)\n\n return ret_val\n\n def _check_grid_mapping_attr_condition(self, attr, attr_name):\n \"\"\"\n Evaluate a condition (or series of conditions) for a particular\n attribute. Implementation for CF-1.7.\n\n :param attr: attribute to teset condition for\n :param str attr_name: name of the attribute\n :rtype tuple\n :return two-tuple of (bool, str)\n \"\"\"\n\n if attr_name == \"geographic_crs_name\":\n return self._evaluate_geographic_crs_name(attr)\n\n elif attr_name == \"geoid_name\":\n return self._evaluate_geoid_name(attr)\n\n elif attr_name == \"geopotential_datum_name\":\n return self._evaluate_geopotential_datum_name(attr)\n\n elif attr_name == \"horizontal_datum_name\":\n return self._evaluate_horizontal_datum_name(attr)\n\n elif attr_name == \"prime_meridian_name\":\n return self._evaluate_prime_meridian_name(attr)\n\n elif attr_name == \"projected_crs_name\":\n return self._evaluate_projected_crs_name(attr)\n\n elif attr_name == \"reference_ellipsoid_name\":\n return self._evaluate_reference_ellipsoid_name(attr)\n\n elif attr_name == \"towgs84\":\n return self._evaluate_towgs84(attr)\n\n else: # invoke method from 1.6, as these names are all still valid\n return super(CF1_7Check, self)._check_grid_mapping_attr_condition(\n attr, attr_name\n )\n\n def _check_gmattr_existence_condition_geoid_name_geoptl_datum_name(self, var):\n \"\"\"\n Check to see if both geoid_name and geopotential_datum_name exist as attributes\n for `var`. They should not.\n\n :param netCDF4.Variable var\n :rtype tuple\n :return two-tuple (bool, str)\n \"\"\"\n\n msg = \"Both geoid_name and geopotential_datum_name cannot exist\"\n\n if (\"geoid_name\" in var.ncattrs()) and (\n \"geopotential_datum_name\" in var.ncattrs()\n ):\n return (False, msg)\n\n else:\n return (True, msg)\n\n def _check_gmattr_existence_condition_ell_pmerid_hdatum(self, var):\n \"\"\"\n If one of reference_ellipsoid_name, prime_meridian_name, or\n horizontal_datum_name are defined as grid_mapping attributes,\n they must all be defined.\n\n :param netCDF4.Variable var\n :rtype tuple\n :return two-tuple (bool, str)\n \"\"\"\n\n msg = (\n \"If any of reference_ellipsoid_name, prime_meridian_name, \"\n \"or horizontal_datum_name are defined, all must be defined.\"\n )\n\n _ncattrs = set(var.ncattrs())\n\n if any(\n [\n x in _ncattrs\n for x in [\n \"reference_ellipsoid_name\",\n \"prime_meridian_name\",\n \"horizontal_datum_name\",\n ]\n ]\n ) and (\n not set(\n [\n \"reference_ellipsoid_name\",\n \"prime_meridian_name\",\n \"horizontal_datum_name\",\n ]\n ).issubset(_ncattrs)\n ):\n return (False, msg)\n\n else:\n return (True, msg)\n\n def _get_projdb_conn(self):\n \"\"\"\n Return a SQLite Connection to the PROJ database.\n\n Returns:\n sqlite3.Connection\n \"\"\"\n\n proj_db_path = os.path.join(pyproj.datadir.get_data_dir(), \"proj.db\")\n return sqlite3.connect(proj_db_path)\n\n def _exec_query_str_with_params(self, qstr, argtuple):\n \"\"\"\n Execute a query string in a database connection with the given argument\n tuple. Return a result set.\n\n :param str qstr: desired query to be executed\n :param tuple argtuple: tuple of arguments to be supplied to query\n :rtype set\n \"\"\"\n\n conn = self._get_projdb_conn()\n return conn.execute(qstr, argtuple)\n\n def _evaluate_geographic_crs_name(self, val):\n \"\"\"\n Evaluate the condition for the geographic_crs_name attribute.\n\n :param val: value to be tested\n :rtype tuple\n :return two-tuple of (bool, str)\n \"\"\"\n\n query_str = (\n \"SELECT 1 FROM geodetic_crs WHERE name = ? \"\n \"UNION ALL \" # need union in case contained in other tables\n \"SELECT 1 FROM alias_name WHERE alt_name = ? \"\n \"AND table_name = 'geodetic_crs' LIMIT 1\"\n )\n\n # try to find the value in the database\n res_set = self._exec_query_str_with_params(query_str, (val, val))\n\n # does it exist? if so, amt returned be > 1\n return (\n len(res_set.fetchall()) > 0,\n \"geographic_crs_name must correspond to a valid OGC WKT GEOGCS name\",\n )\n\n def _evaluate_geoid_name(self, val):\n \"\"\"\n Evaluate the condition for the geod_name attribute.\n\n :param val: value to be tested\n :rtype tuple\n :return two-tuple of (bool, str)\n \"\"\"\n\n query_str = (\n \"SELECT 1 FROM vertical_datum WHERE name = ? \"\n \"UNION ALL \"\n \"SELECT 1 FROM alias_name WHERE alt_name = ? \"\n \"AND table_name = 'vertical_datum' LIMIT 1\"\n )\n\n # try to find the value in the database\n res_set = self._exec_query_str_with_params(query_str, (val, val))\n\n return (\n len(res_set.fetchall()) > 0,\n \"geoid_name must correspond to a valid OGC WKT VERT_DATUM name\",\n )\n\n def _evaluate_geopotential_datum_name(self, val):\n \"\"\"\n Evaluate the condition for the geogpotential_datum_name attribute.\n\n :param val: value to be tested\n :rtype tuple\n :return two-tuple of (bool, str)\n \"\"\"\n\n query_str = (\n \"SELECT 1 FROM vertical_datum WHERE name = ? \"\n \"UNION ALL \"\n \"SELECT 1 FROM alias_name WHERE alt_name = ? \"\n \"AND table_name = 'vertical_datum' LIMIT 1\"\n )\n\n # try to find the value in the database\n res_set = self._exec_query_str_with_params(query_str, (val, val))\n\n return (\n len(res_set.fetchall()) > 0,\n \"geopotential_datum_name must correspond to a valid OGC WKT VERT_DATUM name\",\n )\n\n def _evaluate_horizontal_datum_name(self, val):\n \"\"\"\n Evaluate the condition for the horizontal_datum_name attribute.\n\n :param val: value to be tested\n :rtype tuple\n :return two-tuple of (bool, str)\n \"\"\"\n\n return (\n val in horizontal_datum_names17,\n (\n \"{} must be a valid Horizontal Datum Name; \"\n \"see https://github.com/cf-convention/cf-conventions/wiki/Mapping-from-CF-Grid-Mapping-Attributes-to-CRS-WKT-Elements.\"\n ),\n )\n\n def _evaluate_prime_meridian_name(self, val):\n \"\"\"\n Evaluate the condition for the prime_meridian_name.\n\n :param val: value to be tested\n :rtype tuple\n :return two-tuple of (bool, str)\n \"\"\"\n\n return (\n val in prime_meridian_names17,\n (\n \"{} must be a valid Prime Meridian name; \"\n \"see https://github.com/cf-convention/cf-conventions/wiki/csv/prime_meridian.csv.\"\n ),\n )\n\n def _evaluate_projected_crs_name(self, val):\n \"\"\"\n Evaluate the condition for the projected_crs attribute.\n\n :param val: value to be tested\n :rtype tuple\n :return two-tuple of (bool, str)\n \"\"\"\n\n query_str = (\n \"SELECT 1 FROM projected_crs WHERE name = ? \"\n \"UNION ALL \"\n \"SELECT 1 FROM alias_name WHERE alt_name = ? \"\n \"AND table_name = 'projected_crs' LIMIT 1\"\n )\n\n # try to find the value in the database\n res_set = self._exec_query_str_with_params(query_str, (val, val))\n\n return (\n len(res_set.fetchall()) > 0,\n \"projected_crs_name must correspond to a valid OGC WKT PROJCS name\",\n )\n\n def _evaluate_reference_ellipsoid_name(self, val):\n \"\"\"\n Evaluate the condition for the reference_ellipsoid_name attribute.\n\n :param val: value to be tested\n :rtype tuple\n :return two-tuple of (bool, str)\n \"\"\"\n\n return (\n val in ellipsoid_names17,\n (\n \"{} must be a valid Ellipsoid Name; \"\n \"see https://github.com/cf-convention/cf-conventions/wiki/csv/ellipsoid.csv.\"\n ),\n )\n\n def _evaluate_towgs84(self, val):\n \"\"\"\n Evaluate the condition for the towgs84 attribute.\n\n :param val: value to be tested\n :rtype tuple\n :return two-tuple of (bool, str)\n \"\"\"\n\n msg = (\n \"towgs84 must be an array of length 3, 6, or 7 of double-precision\"\n \" and correspond to anm OGC WKT TOWGS84 node\"\n )\n\n # if not numpy type, return false\n if not getattr(val, \"dtype\", None):\n return (False, msg)\n\n # must be double-precision array\n elif val.dtype != np.float64:\n return (False, msg)\n\n # must be of length 3, 6, or 7\n elif not val.shape: # single value\n return (False, msg)\n\n elif not (val.size in (3, 6, 7)):\n return (False, msg)\n\n else:\n return (True, msg)\n\n def check_grid_mapping(self, ds):\n __doc__ = super(CF1_7Check, self).check_grid_mapping.__doc__\n prev_return = super(CF1_7Check, self).check_grid_mapping(ds)\n ret_val = []\n grid_mapping_variables = cfutil.get_grid_mapping_variables(ds)\n for var_name in sorted(grid_mapping_variables):\n var = ds.variables[var_name]\n test_ctx = self.get_test_ctx(\n BaseCheck.HIGH, self.section_titles[\"5.6\"], var.name\n )\n\n # TODO: check cases where crs_wkt provides part of a necessary\n # grid_mapping attribute, or where a grid_mapping attribute\n # overrides what has been provided in crs_wkt.\n # attempt to parse crs_wkt if it is present\n if \"crs_wkt\" in var.ncattrs():\n crs_wkt = var.crs_wkt\n if not isinstance(crs_wkt, str):\n test_ctx.messages.append(\"crs_wkt attribute must be a string\")\n test_ctx.out_of += 1\n else:\n try:\n pyproj.CRS.from_wkt(crs_wkt)\n except pyproj.exceptions.CRSError as crs_error:\n test_ctx.messages.append(\n \"Cannot parse crs_wkt attribute to CRS using Proj4. Proj4 error: {}\".format(\n str(crs_error)\n )\n )\n else:\n test_ctx.score += 1\n test_ctx.out_of += 1\n\n # existence_conditions\n exist_cond_1 = self._check_gmattr_existence_condition_geoid_name_geoptl_datum_name(\n var\n )\n test_ctx.assert_true(exist_cond_1[0], exist_cond_1[1])\n exist_cond_2 = self._check_gmattr_existence_condition_ell_pmerid_hdatum(var)\n test_ctx.assert_true(exist_cond_2[0], exist_cond_2[1])\n\n # handle vertical datum related grid_mapping attributes\n vert_datum_attrs = {}\n possible_vert_datum_attrs = {\"geoid_name\", \"geopotential_datum_name\"}\n vert_datum_attrs = possible_vert_datum_attrs.intersection(var.ncattrs())\n len_vdatum_name_attrs = len(vert_datum_attrs)\n # check that geoid_name and geopotential_datum_name are not both\n # present in the grid_mapping variable\n if len_vdatum_name_attrs == 2:\n test_ctx.out_of += 1\n test_ctx.messages.append(\n \"Cannot have both 'geoid_name' and \"\n \"'geopotential_datum_name' attributes in \"\n \"grid mapping variable '{}'\".format(var.name)\n )\n elif len_vdatum_name_attrs == 1:\n # should be one or zero attrs\n proj_db_path = os.path.join(pyproj.datadir.get_data_dir(), \"proj.db\")\n try:\n with sqlite3.connect(proj_db_path) as conn:\n v_datum_attr = next(iter(vert_datum_attrs))\n v_datum_value = getattr(var, v_datum_attr)\n v_datum_str_valid = self._process_v_datum_str(\n v_datum_value, conn\n )\n\n invalid_msg = (\n \"Vertical datum value '{}' for \"\n \"attribute '{}' in grid mapping \"\n \"variable '{}' is not valid\".format(\n v_datum_value, v_datum_attr, var.name\n )\n )\n test_ctx.assert_true(v_datum_str_valid, invalid_msg)\n except sqlite3.Error as e:\n # if we hit an error, skip the check\n warn(\n \"Error occurred while trying to query \"\n \"Proj4 SQLite database at {}: {}\".format(proj_db_path, str(e))\n )\n prev_return[var.name] = test_ctx.to_result()\n\n return prev_return\n\n def _process_v_datum_str(self, v_datum_str, conn):\n vdatum_query = \"\"\"SELECT 1 FROM alias_name WHERE\n table_name = 'vertical_datum' AND\n alt_name = ?\n UNION ALL\n SELECT 1 FROM vertical_datum WHERE\n name = ?\n LIMIT 1\"\"\"\n res_set = conn.execute(vdatum_query, (v_datum_str, v_datum_str))\n return len(res_set.fetchall()) > 0\n\n def _check_dimensionless_vertical_coordinate_1_7(\n self, ds, vname, deprecated_units, ret_val, dim_vert_coords_dict\n ):\n \"\"\"\n Check that a dimensionless vertical coordinate variable is valid under\n CF-1.7.\n\n :param netCDF4.Dataset ds: open netCDF4 dataset\n :param str name: variable name\n :param list ret_val: array to append Results to\n :rtype None\n \"\"\"\n variable = ds.variables[vname]\n standard_name = getattr(variable, \"standard_name\", None)\n units = getattr(variable, \"units\", None)\n formula_terms = getattr(variable, \"formula_terms\", None)\n # Skip the variable if it's dimensional\n if formula_terms is None and standard_name not in dim_vert_coords_dict:\n return\n\n # assert that the computed_standard_name is maps to the standard_name correctly\n correct_computed_std_name_ctx = TestCtx(\n BaseCheck.MEDIUM, self.section_titles[\"4.3\"]\n )\n _comp_std_name = dim_vert_coords_dict[standard_name][1]\n correct_computed_std_name_ctx.assert_true(\n getattr(variable, \"computed_standard_name\", None) in _comp_std_name,\n \"§4.3.3 The standard_name of `{}` must map to the correct computed_standard_name, `{}`\".format(\n vname, _comp_std_name\n ),\n )\n ret_val.append(correct_computed_std_name_ctx.to_result())\n\n def check_dimensionless_vertical_coordinates(self, ds):\n \"\"\"\n Check the validity of dimensionless coordinates under CF\n\n CF §4.3.2 The units attribute is not required for dimensionless\n coordinates.\n\n The standard_name attribute associates a coordinate with its definition\n from Appendix D, Dimensionless Vertical Coordinates. The definition\n provides a mapping between the dimensionless coordinate values and\n dimensional values that can positively and uniquely indicate the\n location of the data.\n\n A new attribute, formula_terms, is used to associate terms in the\n definitions with variables in a netCDF file. To maintain backwards\n compatibility with COARDS the use of these attributes is not required,\n but is strongly recommended.\n\n :param netCDF4.Dataset ds: An open netCDF dataset\n :rtype: list\n :return: List of results\n \"\"\"\n ret_val = []\n\n z_variables = cfutil.get_z_variables(ds)\n deprecated_units = [\"level\", \"layer\", \"sigma_level\"]\n\n # compose this function to use the results from the CF-1.6 check\n # and then extend it using a CF-1.7 addition\n ret_val.extend(\n self._check_dimensionless_vertical_coordinates(\n ds,\n deprecated_units,\n self._check_dimensionless_vertical_coordinate_1_6,\n dimless_vertical_coordinates_1_7,\n )\n )\n\n ret_val.extend(\n self._check_dimensionless_vertical_coordinates(\n ds,\n deprecated_units,\n self._check_dimensionless_vertical_coordinate_1_7,\n dimless_vertical_coordinates_1_7,\n )\n )\n\n return ret_val\n\n\nclass CFNCCheck(BaseNCCheck, CFBaseCheck):\n @classmethod\n def beliefs(cls): # @TODO\n return {}\n" ]
[ [ "numpy.isnan", "numpy.ma.isMA", "numpy.issubdtype", "numpy.dtype" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
stuart-knock/bokeh
[ "b609e8a458701c246e8f3102c3dce45692b54fe4" ]
[ "bokeh/server/blaze/views.py" ]
[ "from __future__ import absolute_import\n\nimport datetime as dt\n\nimport pandas as pd\nimport numpy as np\nfrom blaze import into\nfrom flask import request\nfrom six import iteritems\n\nfrom ..app import bokeh_app\nfrom ... import protocol\nfrom ...transforms import line_downsample\nfrom ...transforms import image_downsample\nfrom ...transforms import ar_downsample\nfrom ...models.ranges import Range1d\nfrom ..serverbb import prune\nfrom ..views.backbone import init_bokeh\nfrom ..views import make_json\n\nfrom mbs.views import _compserver\nfrom mbs.app import mbsbp\n\n_span = ar_downsample._span\n\ndef _make_range(r):\n \"\"\"Create a range from the start/end values passed.\n This function is required because some BokehJS Range objects\n have ids but some don't and some have docs but some don't...\n so this is sort of a #Hack....\n\n This may be removed when a better plot_state mechanism is created.\n \"\"\"\n return Range1d(start=r['start'], end=r['end'])\n\[email protected](\"/render/<docid>/<datasourceid>/<glyphid>\", methods=['GET', 'POST'])\ndef render(docid, datasourceid, glyphid):\n #load bokeh document\n clientdoc = bokeh_app.backbone_storage.get_document(docid)\n prune(clientdoc)\n #\n\n #init plotting.py\n init_bokeh(clientdoc)\n serverdatasource = clientdoc._models[datasourceid]\n glyph = clientdoc._models[glyphid]\n parameters = serverdatasource.transform\n json_data = request.json\n json_data['expr'] = serverdatasource.expr\n json_data['namespace'] = serverdatasource.namespace\n plot_state = json_data['plot_state']\n render_state = json_data.get('render_state', None)\n auto_bounds = json_data.get('auto_bounds', False)\n\n #convert json objects into actual range objects (hacky!)\n plot_state=dict([(k, _make_range(r)) for k,r in iteritems(plot_state)])\n\n #compute blaze data using the blaze server blueprint\n expr, result = _compserver(json_data)\n\n #convert blaze server output into other dataframe or numpy\n data_type = parameters.get('type', 'DataFrame')\n if data_type == 'DataFrame':\n data = into(pd.DataFrame, result)\n elif data_type == 'ndarray':\n data = into(np.ndarray, result)\n\n #call downsampling\n resample_op = serverdatasource.transform['resample']\n if resample_op == 'abstract rendering':\n result = ar_downsample.downsample(\n data,\n serverdatasource,\n glyph,\n plot_state,\n render_state,\n auto_bounds,\n )\n elif resample_op == 'line1d':\n result = line1d_downsample(\n data,\n serverdatasource,\n glyph,\n plot_state,\n render_state,\n auto_bounds,\n )\n elif resample_op == 'heatmap':\n result = heatmap_downsample(\n data,\n serverdatasource,\n glyph,\n plot_state,\n render_state,\n auto_bounds,\n )\n\n\n #return results\n result = make_json(protocol.serialize_json(result))\n return result\n\ndef convert_range_to_time(range_obj):\n #assume millis from javascript\n if isinstance(range_obj.start, int):\n range_obj.start = dt.datetime.fromtimestamp(range_obj.start / 1000.0)\n if isinstance(range_obj.end, int):\n range_obj.end = dt.datetime.fromtimestamp(range_obj.end / 1000.0)\n\n\n\ndef line1d_downsample(raw_data, data_source, glyph, plot_state,\n render_state, auto_bounds):\n domain_name = glyph.x['field']\n range_name = glyph.y['field']\n domain_col = raw_data[domain_name]\n range_col = raw_data[range_name]\n\n if auto_bounds:\n plot_state['data_x'].start = domain_col.min()\n plot_state['data_x'].end = domain_col.max()\n plot_state['data_y'].start = range_col.min()\n plot_state['data_y'].end = range_col.max()\n if domain_col.dtype.kind == \"M\":\n convert_range_to_time(plot_state['data_x'])\n if range_col.dtype.kind == \"M\":\n convert_range_to_time(plot_state['data_y'])\n if data_source.transform.get('direction', 'x') == 'x':\n domain_r = plot_state['data_x']\n range_r = plot_state['data_y']\n domain_screen_r = plot_state['screen_x']\n else:\n raise NotImplementedError\n screen_d_span = _span(domain_screen_r)\n data_r_span = _span(range_r)\n domain_limit = [domain_r.start, domain_r.end]\n if domain_col.dtype.kind == \"M\":\n domain_limit = np.array(domain_limit).astype('datetime64[ms]')\n raw_data = raw_data[(domain_col > domain_limit[0]) & (domain_col < domain_limit[1])]\n result = line_downsample.downsample(raw_data.to_records(),\n domain_name,\n range_name,\n domain_limit,\n data_r_span,\n screen_d_span,\n 'minmax')\n result['x_range'] = {'start': plot_state['data_x'].start,\n 'end': plot_state['data_x'].end}\n result['y_range'] = {'start': plot_state['data_y'].start,\n 'end': plot_state['data_y'].end}\n return result\n\ndef heatmap_downsample(raw_data, data_source, glyph, plot_state,\n render_state, auto_bounds):\n \n screen_x_r = plot_state['screen_x']\n screen_y_r = plot_state['screen_x']\n x_resolution = float(_span(screen_x_r))\n y_resolution = float(_span(screen_y_r))\n\n global_x_range = data_source.transform['global_x_range']\n global_y_range = data_source.transform['global_y_range']\n \n image_x_axis = np.linspace(global_x_range[0],\n global_x_range[1],\n raw_data.shape[1])\n image_y_axis = np.linspace(global_y_range[0],\n global_y_range[1],\n raw_data.shape[0])\n result = image_downsample.downsample(\n raw_data, image_x_axis, image_y_axis,\n plot_state['data_x'], plot_state['data_y'], x_resolution,\n y_resolution)\n output = result\n return output\n" ]
[ [ "numpy.array", "numpy.linspace" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
victorlifan/msds689
[ "d1c45bec1c5e00a6892316eaac9421d514afb96d" ]
[ "projects/iforest/savecancer.py" ]
[ "from sklearn.datasets import load_breast_cancer\nimport pandas as pd\n\ncancer = load_breast_cancer()\ndf = pd.DataFrame(data=cancer.data, columns=cancer.feature_names)\ndf['diagnosis'] = cancer.target\ndf.loc[df.diagnosis==0,'diagnosis'] = -1\ndf.loc[df.diagnosis==1,'diagnosis'] = 0\ndf.loc[df.diagnosis==-1,'diagnosis'] = 1\ndf.to_csv(\"cancer.csv\", index=False)\n" ]
[ [ "sklearn.datasets.load_breast_cancer", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
LoremasterLH/MatlabToPython
[ "bdfd827d7fc143332e3945fc980e915c6324eb6c" ]
[ "1-Vzorcenje/ukazi.py" ]
[ "# Author: Martin Konečnik\n# Contact: [email protected]\n# Licenced under MIT\n\n# 1-Vzorčenje\n# Predlagan IDE PyCharm Community Edition (na voljo za Windows, macOS in Linux)\n# https://www.jetbrains.com/pycharm/download\n# Navodila za pridobitev potrebnih knjižnic:\n# https://www.jetbrains.com/help/pycharm/installing-uninstalling-and-upgrading-packages.html\n# Kratka navodila. Znotraj GUI knjižnice dodamo prek File->Settings->Project: Name->Project Interpreter.\n# V tem oknu na desni strani kliknemo na plus in vpišemo ime knjižnice.\n# Privzeta bližnjica za zagon izbora je Alt+Shift+E\n\nimport numpy as np\nimport scipy.signal\nfrom matplotlib import cm # color mapping\nimport pylab as pylab\nimport matplotlib.pyplot as plt\nimport sounddevice as sd\nfrom pathlib import Path\nfrom PIL import Image\nfrom mpl_toolkits.mplot3d import axes3d\nfrom mpl_toolkits.mplot3d import art3d\n\n# ----------------------------------------------------------------------------------------\n# Vzorčenje in Nyquistov teorem;\nFvz = 100 # Frekvenca vzorčenja (v Hz)\nT = 1 # dolžina signala (v s)\ni = np.arange(float(T) * Fvz) / Fvz # vektor časovnih indeksov\nf1 = 5 # frekvenca sinusoide\nA1 = 1 # amplituda sinusoide\nfaza1 = 0.0 # faza sinusoide\n\n# ukazi.m:10 -- NOTE: Enak rezultat kot v Matlab 2017b\n# izris sinuside pri razlicnih fazah\nplt.figure() # Ta vrstica ni nujna, če odpremo le eno okno.\nfor faza1 in np.arange(0, 6.1, 0.1):\n plt.cla()\n # pri množenju z matriko je potrebno uporabiti numpy.np.dot(...)\n s = np.dot(A1, np.sin(np.dot(2 * np.pi * f1, i) + faza1 * np.pi))\n plt.plot(i, s)\n setattr(plt.gca, 'YLim', [-1, 1])\n plt.title('Fvz = {0} Hz, Frekvenca = {1} Hz, faza = {2} $\\pi$'.format(Fvz, f1, round(faza1, 1)))\n plt.xlabel('Čas (s)')\n plt.ylabel('Amplituda (dB)')\n plt.tight_layout()\n plt.waitforbuttonpress()\n\n# ukazi.m:23 -- NOTE: Preverjeno z Matlab\n# izris sinusid pri različnih frekvencah\nfaza1 = 0.0\nplt.figure()\nfor f1 in np.arange(Fvz + 1):\n plt.cla() # počistimo graf za naslednjo iteracijo\n s = np.dot(A1, np.sin(np.dot(2 * np.pi * f1, i) + faza1 * np.pi))\n plt.plot(i, s)\n plt.ylim(-1, 1)\n plt.title('Fvz = {0} Hz, Frekvenca = {1} Hz, faza = {2} $\\pi$'.format(Fvz, f1, faza1))\n plt.xlabel('Čas (s)')\n plt.ylabel('Amplituda (dB)')\n plt.pause(0.025)\n\n# ukazi.m:37 -- NOTE: Preverjeno z Matlab\n# izris sinusoid s frekvenco f1 in Fvz-f1\nf1 = 1\ns1 = np.sin(np.dot(2 * np.pi * f1, i) + faza1 * np.pi)\nplt.figure()\nplt.plot(i, s1, 'b')\nf2 = Fvz - f1\ns2 = np.sin(np.dot(np.dot(np.dot(2, np.pi), f2), i) + np.dot(faza1, np.pi))\nplt.plot(i, s2, 'r')\nplt.xlabel('Čas (s)')\nplt.ylabel('Amplituda')\nplt.title('Fvz = {0} Hz, Frekvenca1 = {1} Hz, Frekvenca2 = {2} Hz, faza = {3} $\\pi$'.format(Fvz, f1, f2, faza1))\nplt.tight_layout()\n# ----------------------------------------------------------------------------------------\n\n# glej tudi primera v Mathematici: (WheelIllusion.nbp in SamplingTheorem.nbp)\n\n# V Z O R Č E N J E Z V O K A\n# -----------------------------------------------------------------------------------------\n# ukazi.m:56 -- NOTE: Primerljiv rezultat v Matlab\n# vzorčenje zvoka\nFs = 44100 # vzorčevalna frekvenca\nbits = 16 # bitna ločljivost\nnchans = 1 # 1 (mono), 2 (stereo).\nposnetek = sd.rec(5 * Fs, Fs, nchans, blocking=True)\n\nplt.figure()\nplt.plot(posnetek)\n\nsd.play(posnetek, 44100)\nsd.play(posnetek, 44100 / 2)\nsd.play(posnetek, 2 * 44100)\n\n# -----------------------------------------------------------------------------------------\n# ukazi.m:73 -- NOTE: Primerljiv rezultat v Matlab\n# ali zaznate fazne spremembe? Spreminjajte faza1 med 0 in 2.0 in poženite ta demo...\nFvz = 44100 # vzorčevalna frekvenca\nT = 3 # čas v sekundah\ni = np.arange(0.0, T * Fvz, 1) / Fvz # vektor časovnih indeksov\nf1 = 500 # frekvenca sinusoide\nA1 = 0.3 # amplituda sinusoide\nfaza1 = 1.0 # faza sinusoide\n\ns = np.dot(A1, np.sin(np.dot(2 * np.pi * f1, i) + faza1 * np.pi)) # tvorjenje sinusoide\ns2 = np.dot(A1, np.sin(np.dot(2 * np.pi * f1, i) + 0 * np.pi)) # tvorjenje sinusoide\n\nsd.play(np.concatenate((s, s2)), Fvz) # pozor, dvojni oklepaji pri concatenate, ker sta s1 in s2 en parameter!\n# \"navadna\" polja združiš z s+s2, v numpy.array pa to sešteje istoležne elemente\n\n\n# -----------------------------------------------------------------------------------------\n# ukazi.m:88 -- NOTE: Primerljiv rezultat v Matlab\n# trije poskusi: 1. f1 = 50;\n# 2. f1 = 450;\n# 3. f1 = 1450;\n# 4. f1 = 2450;\n\nFvz = 44100 # vzorčevalna frekvenca\nT = 3 # čas v sekundah\ni = np.arange(T * Fvz) / Fvz # vektor časovnih indeksov\nf1 = 50 # frekvenca sinusoide\nA1 = 5.5 # amplituda sinusoide\nfaza1 = 0.0 # faza sinusoide\nf2 = f1 + 1 # frekvenca druge sinusoide\n\ns1 = np.dot(A1, np.sin(np.dot(2 * np.pi * f1, i) + faza1 * np.pi)) # tvorjenje prve sinusoide\ns2 = np.dot(A1, np.sin(np.dot(2 * np.pi * f2, i) + faza1 * np.pi)) # tvorjenje druge sinusoide\nsd.play(np.concatenate((s1, s2)), Fvz)\n\n# -----------------------------------------------------------------------------------------\n# ukazi.m:106 -- NOTE: Primerljiv rezultat v Matlab\n# ali zaznate zvok netopirja pri 90000 Hz? Nyquist?\nFvz = 44100 # vzorčevalna frekvenca\nT = 3\ni = np.arange(T * Fvz) / Fvz # vektor časovnih indeksov\nfnetopir = 140000 # frekvenca sinusoide\nA1 = 5.5 # amplituda sinusoide\nfaza1 = 1.0 # faza sinusoide\n\ns = np.dot(A1, np.sin(np.dot(2 * np.pi * fnetopir, i) + faza1 * np.pi)) # tvorjenje sinusoide\nsd.play(s, Fvz)\n\n# ukazi.m:118 -- NOTE: Preverjeno z Matlab\n# izris sinuside pri razlicnih fazah (verzija 2)\nFvz = 100\nT = 1\ni = np.arange(T * Fvz) / Fvz\nf1 = 5\nA1 = 5\nfaza1 = 0.0\n\n# spremninjanje frekvence...\nplt.close('all')\nfig, ax = plt.subplots(2) # create a figure with 2 subplots\nfig.tight_layout(rect=[0, 0.03, 1, 0.95])\n\nfor f1 in np.arange(0, Fvz+1):\n ax[0].clear()\n ax[1].clear()\n\n s = np.sin(np.dot(2 * np.pi * f1, i) + faza1 * np.pi)\n ax[0].plot(s)\n ax[0].set_ylim(-1, 1)\n ax[0].set_title('Časovna domena: Fvz = {0} Hz, Frekvenca = {1} Hz, faza = {2} $\\pi$'.format(Fvz, f1, faza1))\n\n ax[1].plot(abs(np.fft.fft(s)), 'r')\n ax[1].set_ylim(-1, 1)\n ax[1].set_title('Frekvenčna domena (abs): Fvz = {0} Hz, Frekvenca = {1} Hz, faza = {2} $\\pi$'.format(Fvz, f1, faza1))\n\n plt.waitforbuttonpress()\n\n\n# in faze... (več o tem na naslednjih vajah)\nf1 = 5\nA1 = 5\nfaza1 = 0.0\nplt.close('all')\nfig, ax = plt.subplots(2)\nfig.tight_layout(rect=[0, 0.03, 1, 0.95])\n\nfor faza1 in np.arange(0, 2.1, 0.1):\n ax[0].clear()\n ax[1].clear()\n\n s = np.sin(np.dot(2 * np.pi * f1, i) + faza1 * np.pi)\n ax[0].plot(s)\n ax[0].set_ylim(-1, 1)\n ax[0].set_title('Časovna domena: Fvz = {0} Hz, Frekvenca = {1} Hz, faza = {2} $\\pi$'.format(Fvz, f1, round(faza1, 1)))\n\n ax[1].plot(abs(np.fft.fft(s)), 'r')\n ax[1].set_ylim(-1, 1)\n ax[1].set_title('Frekvenčna domena (abs): Fvz = {0} Hz, Frekvenca = {1} Hz, faza = {2} $\\pi$'.format(Fvz, f1, round(faza1, 1)))\n\n plt.waitforbuttonpress()\n\n# S L I K E\n# -----------------------------------------------------------------------------------------\n# ukazi.m:181 -- NOTE: Preverjetno v Matlabu. Del ne deluje pravilno (označeno)\n# vzorčenje slik in Moire\n# Če datoteke ne najde, preverite pod \"Settings -> Project: Name -> Project Structure\" kje je root.\nA = pylab.array(Image.open(Path('./1-Vzorcenje/Moire.jpg')))\nplt.figure(figsize=(10, 10))\nplt.axis('off')\nplt.imshow(A)\nplt.title('originalna slika')\n\npvz = 3\nplt.figure()\nplt.axis('off')\nplt.imshow(A[::pvz, ::pvz])\nplt.title('podvzorčena slika: faktor podvzorčenja {0}'.format(pvz))\n\n# Celotna slika bistveno svetlejša kot v matlabu. Pri bitni ločljivosti 2, sta namesto sivin rumena in modra barva.\n# Podatki?\nst_bit = 2\nkvant = 2 ** (9 - st_bit)\nplt.figure(figsize=(10, 10))\nplt.axis('off')\nplt.imshow(np.dot(np.round(A[:, :, :] / kvant), kvant))\nplt.title('slika pri bitni ločljivosti {0}'.format(st_bit))\n\nfig, ax = plt.subplots(2, 2)\nfig.tight_layout()\n\nax[0, 0].imshow(np.dot(np.round(A[:, :, :] / kvant), kvant))\nax[0, 0].set_title('slika pri bitni ločljivosti {0}'.format(st_bit))\nax[0, 0].set_xticklabels([])\nax[0, 0].set_yticklabels([])\n\nax[0, 1].imshow(np.dot(np.round(A[:, :, 0] / kvant), kvant))\nax[0, 1].set_title('ravnina R pri bitni ločljivosti {0}'.format(st_bit))\nax[0, 1].set_xticklabels([])\nax[0, 1].set_yticklabels([])\n\nax[1, 0].imshow(np.dot(np.round(A[:, :, 1] / kvant), kvant))\nax[1, 0].set_title('ravnina G pri bitni ločljivosti {0}'.format(st_bit))\nax[1, 0].set_xticklabels([])\nax[1, 0].set_yticklabels([])\n\nax[1, 1].imshow(np.dot(np.round(A[:, :, 2] / kvant), kvant))\nax[1, 1].set_title('ravnina B pri bitni ločljivosti {0}'.format(st_bit))\nax[1, 1].set_xticklabels([])\nax[1, 1].set_yticklabels([])\n\n# -----------------------------------------------------------------------------------------\n# ukazi.m:215 -- Note: Plotting not working properly yet. Data shown in 2D.\n# spekter slik, Moire in Diskretna Fourierova transformacija (fft2)\nA = pylab.array(Image.open(Path('./1-Vzorcenje/Moire.jpg')))\nplt.figure().set_size_inches(10, 10)\nplt.imshow(A)\nplt.title('originalna slika')\nplt.axis('off')\n\nplt.close('all')\n\n# ukazi.m:224\nB = np.double(A[:, :, 0])\n\nfig = plt.figure()\nfig.set_size_inches(7, 7)\nax = fig.add_subplot(111, projection='3d')\nX = Y = np.array([np.arange(100), ]*100) # Creates a 100 * 100 array\nZ = abs(np.fft.fft2(B - np.mean(np.ravel(B)), s=(100, 100)))\nwire = ax.plot_wireframe(X, Y, Z, rstride=10, cstride=10)\n\n# Retrive data from internal storage of plot_wireframe, then delete it\nnx, ny, _ = np.shape(wire._segments3d)\nwire_x = np.array(wire._segments3d)[:, :, 0].ravel()\nwire_y = np.array(wire._segments3d)[:, :, 1].ravel()\nwire_z = np.array(wire._segments3d)[:, :, 2].ravel()\nwire.remove()\n\n# create data for a LineCollection\nwire_x1 = np.vstack([wire_x, np.roll(wire_x, 1)])\nwire_y1 = np.vstack([wire_y, np.roll(wire_y, 1)])\nwire_z1 = np.vstack([wire_z, np.roll(wire_z, 1)])\nto_delete = np.arange(0, nx*ny, ny)\nwire_x1 = np.delete(wire_x1, to_delete, axis=1)\nwire_y1 = np.delete(wire_y1, to_delete, axis=1)\nwire_z1 = np.delete(wire_z1, to_delete, axis=1)\nscalars = np.delete(wire_z, to_delete)\n\nsegs = [list(zip(xl, yl, zl)) for xl, yl, zl in \\\n zip(wire_x1.T, wire_y1.T, wire_z1.T)]\n\n# Plots the wireframe by a a line3DCollection\nmy_wire = art3d.Line3DCollection(segs, cmap=\"hsv\")\nmy_wire.set_array(scalars)\nax.add_collection(my_wire)\n\nplt.colorbar(my_wire)\nplt.show()\n\n\n# ukazi.m:226 -- Note: Not implemented. Same chart as previous section.\nB = np.double(A[:, :, 1])\nX, Y = np.meshgrid(np.arange(100), np.arange(100))\nZ = abs(np.fft.fft2(B - np.mean(np.ravel(B)))[0:100, 0:100])\ncolors = cm.Blues(Z)\nrcount, ccount, _ = colors.shape\n\n\nfig = plt.figure()\nfig.set_size_inches(7, 7)\nax = fig.gca(projection='3d')\nsurf = ax.plot_surface(X, Y, Z, rcount=rcount, ccount=ccount, facecolors=colors, shade=False)\nsurf.set_facecolor((0, 0, 0, 0))\nplt.title('G ravnina')\nplt.show()\n\n# ukazi.m:228 -- Note: Not implemented. Same chart as previous section.\nB = np.double(A[:, :, 2])\nX, Y = np.meshgrid(np.arange(100), np.arange(100))\nZ = abs(np.fft.fft2(B - np.mean(np.ravel(B)))[0:100, 0:100])\ncolors = cm.Blues(Z)\nrcount, ccount, _ = colors.shape\n\nfig = plt.figure()\nfig.set_size_inches(7, 7)\nax = fig.gca(projection='3d')\nsurf = ax.plot_surface(X, Y, Z, rcount=rcount, ccount=ccount, facecolors=colors, shade=False)\nsurf.set_facecolor((0, 0, 0, 0))\nplt.title('B ravnina')\nplt.show()\n\n# ukazi.m:231 -- Note: Not implemented. Same chart as previous section.\n# prevzorčena slika..................................\npvz = 4\nB = np.double(A[0::pvz, 0::pvz, 1])\nX, Y = np.meshgrid(np.arange(100), np.arange(100))\nZ = abs(np.fft.fft2(B - np.mean(np.ravel(B)))[0:100, 0:100])\ncolors = cm.Blues(Z)\nrcount, ccount, _ = colors.shape\n\nfig = plt.figure()\nfig.set_size_inches(7, 7)\nax = fig.gca(projection='3d')\nsurf = ax.plot_surface(X, Y, Z, rcount=rcount, ccount=ccount, facecolors=colors, shade=False)\nsurf.set_facecolor((0, 0, 0, 0))\nplt.title('R ravnina, po podvzorenju s faktorjem {0}'.format(pvz))\nplt.show()\n\n# ukazi.m:235\nB = np.double(A[0::pvz, 0::pvz, 2])\nX, Y = np.meshgrid(np.arange(100), np.arange(100))\nZ = abs(np.fft.fft2(B - np.mean(np.ravel(B)))[0:100, 0:100])\ncolors = cm.Blues(Z)\nrcount, ccount, _ = colors.shape\n\nfig = plt.figure()\nfig.set_size_inches(7, 7)\nax = fig.gca(projection='3d')\nsurf = ax.plot_surface(X, Y, Z, rcount=rcount, ccount=ccount, facecolors=colors, shade=False)\nsurf.set_facecolor((0, 0, 0, 0))\nplt.title('G ravnina, po podvzorenju s faktorjem {0}'.format(pvz))\nplt.show()\n\n# ukazi.m:237 -- Note: Not implemented. Same chart as previous section.\nB = np.double(A[0::pvz, 0::pvz, 3])\nX, Y = np.meshgrid(np.arange(100), np.arange(100))\nZ = abs(np.fft.fft2(B - np.mean(np.ravel(B)))[0:100, 0:100])\ncolors = cm.Blues(Z)\nrcount, ccount, _ = colors.shape\n\nfig = plt.figure()\nfig.set_size_inches(7, 7)\nax = fig.gca(projection='3d')\nsurf = ax.plot_surface(X, Y, Z, rcount=rcount, ccount=ccount, facecolors=colors, shade=False)\nsurf.set_facecolor((0, 0, 0, 0))\nplt.title('B ravnina, po podvzorenju s faktorjem {0}'.format(pvz))\nplt.show()\n\n# -----------------------------------------------------------------------------------------\n# ukazi.m:241\n# podvzorčenje slik in operator povprečenja\n\nA = pylab.array(Image.open(Path('./1-Vzorcenje/Moire.jpg')))\nplt.figure().set_size_inches(10, 10)\nplt.imshow(A)\nplt.title('originalna slika')\nplt.axis('off')\n\npvz = 3 # faktor podvzorčenja\nplt.figure()\nplt.imshow(A[0::pvz, 0::pvz, :])\nplt.title('podvzorčena slika: faktor podvzorčenja {0}'.format(pvz))\n\n# ukazi.m:253 -- Note: Mislim, da je ok, čeprav rezultat različen kot v Matlab (poglej spodaj).\n# operator povprečenja (verzija 1)\n# Zglajena slika v Matlabu ima pri meni moder odtenek. Nisem prepričan, če tako mora biti (tukaj odtenka ni).\nD = 3 # premer lokalne okolice piksla, na kateri se izračuna povprečna vrednost\nB = np.ndarray((A.shape[0] - D + 1, A.shape[1] - D + 1, D))\nfor r in np.arange(0, A.shape[0] - D).reshape(-1):\n for c in np.arange(0, A.shape[1] - D).reshape(-1):\n C = A[r + np.arange(0, D - 1), c + np.arange(0, D - 1), 0]\n B[r, c, 0] = np.mean(np.ravel(C))\n C = A[r + np.arange(0, D - 1), c + np.arange(0, D - 1), 1]\n B[r, c, 1] = np.mean(np.ravel(C))\n C = A[r + np.arange(0, D - 1), c + np.arange(0, D - 1), 2]\n B[r, c, 2] = np.mean(np.ravel(C))\n\nplt.figure()\nplt.imshow(np.uint8(B))\nplt.title('zglajena slika')\n\n# ukazi.m:270 -- Note: Naslovi se prekrivajo.\n# operator povpreenja (verzija 2)\n# isti operator povprečenja kot zgoraj, implementiran nekoliko drugače (veliko hitreja izvedba)\nD = 3\nB = np.ndarray((A.shape[0] + D - 1, A.shape[1] + D - 1, D))\nB[:, :, 0] = scipy.signal.convolve2d(np.double(A[:, :, 0]), np.ones((D, D), np.float) / D ** 2)\nB[:, :, 1] = scipy.signal.convolve2d(np.double(A[:, :, 1]), np.ones((D, D), np.float) / D ** 2)\nB[:, :, 2] = scipy.signal.convolve2d(np.double(A[:, :, 2]), np.ones((D, D), np.float) / D ** 2)\nB = np.uint8(B)\nplt.figure()\nplt.imshow(B)\nplt.title('zglajena slika')\n\n# prikaz\npvz = 3 # faktor podvzorčenja\nplt.figure()\nplt.subplot(1, 2, 1)\nplt.imshow(A[0::pvz, 0::pvz, :])\nplt.title('podvzorčena slika: faktor podvzorčenja {0}'.format(pvz))\nplt.subplot(1, 2, 2)\nplt.imshow(B[1::pvz, 1::pvz, :])\nplt.title('zglajena podvzorena slika: faktor podvzorenja {0}'.format(pvz))\n" ]
[ [ "numpy.dot", "matplotlib.pyplot.imshow", "numpy.ndarray", "matplotlib.pyplot.plot", "numpy.concatenate", "numpy.round", "numpy.double", "numpy.roll", "matplotlib.pyplot.tight_layout", "numpy.arange", "numpy.uint8", "matplotlib.pyplot.subplot", "matplotlib.pyplot.close", "matplotlib.pyplot.axis", "matplotlib.pyplot.waitforbuttonpress", "numpy.ravel", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "matplotlib.pyplot.ylim", "numpy.delete", "matplotlib.cm.Blues", "matplotlib.pyplot.show", "numpy.array", "matplotlib.pyplot.ylabel", "numpy.fft.fft", "matplotlib.pyplot.cla", "matplotlib.pyplot.subplots", "numpy.ones", "matplotlib.pyplot.colorbar", "numpy.shape", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.pause" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
brianWeng0223/toad
[ "9aa755273c8fc055c8a244f244e5437c189aa884" ]
[ "toad/nn/zoo/autoencoder_test.py" ]
[ "import sys\nimport torch\nimport pytest\nimport numpy as np\nfrom torch.utils.data import TensorDataset, DataLoader\n\nfrom .autoencoder import BaseAutoEncoder, VAE\n\n# skip testing with python 3.9 on linux\nif sys.version_info >= (3, 9) and sys.platform.startswith('linux'):\n pytest.skip(\"failed with python 3.9 on linux, need fix!\", allow_module_level = True)\n\n\nX = torch.Tensor(np.random.rand(20000, 784))\n\nloader = DataLoader(\n X,\n batch_size = 128,\n shuffle = True,\n)\n\ndef test_ae():\n ae = BaseAutoEncoder(784, 200, 10)\n ae.fit(loader, epoch = 1)\n\ndef test_vae():\n vae = VAE(784, 200, 10)\n vae.fit(loader, epoch = 1)\n" ]
[ [ "torch.utils.data.DataLoader", "numpy.random.rand" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fujikosu/tf-object-dection-DSVM
[ "a5fd231944e410ce16ad98d20b74fb02166051cd" ]
[ "object_detection/workaround/tfexample_decoder.py" ]
[ "import tensorflow as tf\n\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import sparse_ops\n\nItemHandler = tf.contrib.slim.tfexample_decoder.ItemHandler\n\nclass BackupHandler(ItemHandler):\n \"\"\"An ItemHandler that tries two ItemHandlers in order.\"\"\"\n\n def __init__(self, handler, backup):\n \"\"\"Initializes the BackupHandler handler.\n If the first Handler's tensors_to_item returns a Tensor with no elements,\n the second Handler is used.\n Args:\n handler: The primary ItemHandler.\n backup: The backup ItemHandler.\n Raises:\n ValueError: if either is not an ItemHandler.\n \"\"\"\n if not isinstance(handler, ItemHandler):\n raise ValueError('Primary handler is of type %s instead of ItemHandler'\n % type(handler))\n if not isinstance(backup, ItemHandler):\n raise ValueError('Backup handler is of type %s instead of ItemHandler'\n % type(backup))\n self._handler = handler\n self._backup = backup\n super(BackupHandler, self).__init__(handler.keys + backup.keys)\n\n def tensors_to_item(self, keys_to_tensors):\n item = self._handler.tensors_to_item(keys_to_tensors)\n return control_flow_ops.cond(\n pred=math_ops.equal(math_ops.reduce_prod(array_ops.shape(item)), 0),\n true_fn=lambda: self._backup.tensors_to_item(keys_to_tensors),\n false_fn=lambda: item)\n\nclass Tensor(ItemHandler):\n \"\"\"An ItemHandler that returns a parsed Tensor.\"\"\"\n\n def __init__(self, tensor_key, shape_keys=None, shape=None, default_value=0):\n \"\"\"Initializes the Tensor handler.\n Tensors are, by default, returned without any reshaping. However, there are\n two mechanisms which allow reshaping to occur at load time. If `shape_keys`\n is provided, both the `Tensor` corresponding to `tensor_key` and\n `shape_keys` is loaded and the former `Tensor` is reshaped with the values\n of the latter. Alternatively, if a fixed `shape` is provided, the `Tensor`\n corresponding to `tensor_key` is loaded and reshape appropriately.\n If neither `shape_keys` nor `shape` are provided, the `Tensor` will be\n returned without any reshaping.\n Args:\n tensor_key: the name of the `TFExample` feature to read the tensor from.\n shape_keys: Optional name or list of names of the TF-Example feature in\n which the tensor shape is stored. If a list, then each corresponds to\n one dimension of the shape.\n shape: Optional output shape of the `Tensor`. If provided, the `Tensor` is\n reshaped accordingly.\n default_value: The value used when the `tensor_key` is not found in a\n particular `TFExample`.\n Raises:\n ValueError: if both `shape_keys` and `shape` are specified.\n \"\"\"\n if shape_keys and shape is not None:\n raise ValueError('Cannot specify both shape_keys and shape parameters.')\n if shape_keys and not isinstance(shape_keys, list):\n shape_keys = [shape_keys]\n self._tensor_key = tensor_key\n self._shape_keys = shape_keys\n self._shape = shape\n self._default_value = default_value\n keys = [tensor_key]\n if shape_keys:\n keys.extend(shape_keys)\n super(Tensor, self).__init__(keys)\n\n def tensors_to_item(self, keys_to_tensors):\n tensor = keys_to_tensors[self._tensor_key]\n shape = self._shape\n if self._shape_keys:\n shape_dims = []\n for k in self._shape_keys:\n shape_dim = keys_to_tensors[k]\n if isinstance(shape_dim, sparse_tensor.SparseTensor):\n shape_dim = sparse_ops.sparse_tensor_to_dense(shape_dim)\n shape_dims.append(shape_dim)\n shape = array_ops.reshape(array_ops.stack(shape_dims), [-1])\n if isinstance(tensor, sparse_tensor.SparseTensor):\n if shape is not None:\n tensor = sparse_ops.sparse_reshape(tensor, shape)\n tensor = sparse_ops.sparse_tensor_to_dense(tensor, self._default_value)\n else:\n if shape is not None:\n tensor = array_ops.reshape(tensor, shape)\n return tensor\n\nclass LookupTensor(Tensor):\n \"\"\"An ItemHandler that returns a parsed Tensor, the result of a lookup.\"\"\"\n\n def __init__(self,\n tensor_key,\n table,\n shape_keys=None,\n shape=None,\n default_value=''):\n \"\"\"Initializes the LookupTensor handler.\n See Tensor. Simply calls a vocabulary (most often, a label mapping) lookup.\n Args:\n tensor_key: the name of the `TFExample` feature to read the tensor from.\n table: A tf.lookup table.\n shape_keys: Optional name or list of names of the TF-Example feature in\n which the tensor shape is stored. If a list, then each corresponds to\n one dimension of the shape.\n shape: Optional output shape of the `Tensor`. If provided, the `Tensor` is\n reshaped accordingly.\n default_value: The value used when the `tensor_key` is not found in a\n particular `TFExample`.\n Raises:\n ValueError: if both `shape_keys` and `shape` are specified.\n \"\"\"\n self._table = table\n super(LookupTensor, self).__init__(tensor_key, shape_keys, shape,\n default_value)\n\n def tensors_to_item(self, keys_to_tensors):\n unmapped_tensor = super(LookupTensor, self).tensors_to_item(keys_to_tensors)\n return self._table.lookup(unmapped_tensor)" ]
[ [ "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.array_ops.stack", "tensorflow.python.ops.array_ops.reshape", "tensorflow.python.ops.sparse_ops.sparse_tensor_to_dense", "tensorflow.python.ops.sparse_ops.sparse_reshape" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] } ]
iz4vve-talks/misc-training
[ "4f676080e54539cbaf283e611278fdd5d7ef93c4" ]
[ "python-structures-presentation/code/bad_code.py" ]
[ "#!/usr/bin/env python\nimport time\nstart = time.time()\n# START OMIT\nimport numpy as np\n\ncounter = [['yes', 0], ['no', 0]]\nbig_list = np.random.randint(0, 10000, 10000000)\ncheck_list = np.random.randint(0, 99999, 1000)\n\nfor number in check_list:\n if number in big_list:\n counter[0][1] = counter[0][1] + 1\n else:\n counter[1][1] = counter[1][1] + 1\n# END OMIT\nprint(counter)\n\nprint(f\"Script executed in {time.time() - start:.2f} seconds\")" ]
[ [ "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
karthikchary/ga-learner-dst-repo
[ "99b52f4b00cdc9012850b18a096c6a9217c56eb0" ]
[ "Make-sense-of-census/code.py" ]
[ "# --------------\n# Importing header files\r\nimport numpy as np\r\nimport warnings\r\n\r\nwarnings.filterwarnings('ignore')\r\n#path='subset_1000.csv'\r\n#New record\r\nnew_record=[[50, 9, 4, 1, 0, 0, 40, 0]]\r\n\r\n#Reading file\r\ndata = np.genfromtxt(path, delimiter=\",\", skip_header=1)\r\n\r\n#Code starts here\r\nprint(\"\\nData: \\n\\n\", data)\r\n\r\nprint(\"\\nType of data: \\n\\n\", type(data))\r\n\r\ncensus = np.concatenate((data,new_record), axis=0)\r\ncensus.shape\r\nage = census[:,0]\r\nmax_age =age.max()\r\nmin_age =age.min()\r\nage_mean = round(np.mean(age),2)\r\nage_std = round(np.std(age),2)\r\nrace_0 = census[census[:,2]==0]\r\nrace_1 = census[census[:,2]==1]\r\nrace_2 = census[census[:,2]==2]\r\nrace_3 = census[census[:,2]==3]\r\nrace_4 = census[census[:,2]==4]\r\nlen_0=len(race_0)\r\nlen_1=len(race_1)\r\nlen_2=len(race_2)\r\nlen_3=len(race_3)\r\nlen_4=len(race_4)\r\nrace_list=[len_0, len_1,len_2, len_3, len_4]\r\n\r\nminority_race=race_list.index(min(race_list))\r\n\r\nsenior_citizens=census[census[:,0]>60]\r\nworking_hours_sum = senior_citizens.sum(axis=0)[6]\r\nsenior_citizens_len=len(senior_citizens)\r\navg_working = round(working_hours_sum/senior_citizens_len,2)\r\nprint(avg_working)\r\nhigh = census[census[:,1] > 10]\r\navg_pay_high=high[:,7].mean()\r\nlow = census[census[:,1] <= 10]\r\nprint(avg_pay_high)\r\navg_pay_low=low[:,7].mean()\r\nprint(avg_pay_low)\n\n\n" ]
[ [ "numpy.concatenate", "numpy.std", "numpy.mean", "numpy.genfromtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pchandrasekaran1595/Anime-Cartoon-Image-Classification
[ "541effbe8eb64e454262ffd7e33862bf38d81e39" ]
[ "CLI/utils.py" ]
[ "import os\nimport re\nimport cv2\nimport json\nimport torch\nimport imgaug\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom time import time\nfrom imgaug import augmenters\nfrom torchvision import transforms\nfrom torch.utils.data import Dataset\nfrom torch.utils.data import DataLoader as DL\nfrom sklearn.model_selection import KFold\n\nDEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nTRANSFORM_FINAL = transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\nTRANSFORM = transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.68999, 0.67253, 0.65927], [0.34386, 0.34976, 0.35475])])\n\nSAVE_PATH = \"saves\"\nif not os.path.exists(SAVE_PATH):\n os.makedirs(SAVE_PATH)\n\n\nclass DS(Dataset):\n def __init__(self, images: np.ndarray, labels: np.ndarray = None, transform=None, mode: str = \"train\"):\n\n assert re.match(r\"^train$\", mode, re.IGNORECASE) or re.match(r\"^valid$\", mode, re.IGNORECASE) or re.match(r\"^test$\", mode, re.IGNORECASE), \"Invalid Mode\"\n \n self.mode = mode\n self.transform = transform\n self.images = images\n\n if re.match(r\"^train$\", mode, re.IGNORECASE) or re.match(r\"^valid$\", mode, re.IGNORECASE):\n self.labels = labels\n\n def __len__(self):\n return self.images.shape[0]\n\n def __getitem__(self, idx):\n if re.match(r\"^train$\", self.mode, re.IGNORECASE) or re.match(r\"^valid$\", self.mode, re.IGNORECASE):\n return self.transform(self.images[idx]), torch.FloatTensor(self.labels[idx])\n else:\n return self.transform(self.images[idx])\n\n\ndef breaker(num: int = 50, char: str = \"*\") -> None:\n print(\"\\n\" + num*char + \"\\n\")\n\n\ndef load_data(path: str) -> tuple:\n assert \"images.npy\" in os.listdir(path) and \"labels.npy\" in os.listdir(path), \"Please run python np_make.py\"\n\n images = np.load(os.path.join(path, \"images.npy\"))\n labels = np.load(os.path.join(path, \"labels.npy\"))\n\n return images, labels\n\n\ndef get_augment(seed: int):\n imgaug.seed(seed)\n augment = augmenters.Sequential([\n augmenters.HorizontalFLip(p=0.15),\n augmenters.VerticalFLip(p=0.15),\n augmenters.Affine(scale=(0.5, 1.5), translate_percent=(-0.1, 0.1), rotate=(-45, 45)),\n ])\n return augment\n\n\ndef prepare_train_and_valid_dataloaders(path: str, mode: str, batch_size: int, seed: int, augment: bool=False):\n\n images, labels = load_data(path)\n\n for tr_idx, va_idx in KFold(n_splits=5, shuffle=True, random_state=seed).split(images, labels):\n tr_images, va_images, tr_labels, va_labels = images[tr_idx], images[va_idx], labels[tr_idx], labels[va_idx]\n break\n\n if augment:\n augmenter = get_augment(seed)\n tr_images = augmenter(images=tr_images)\n \n if re.match(r\"^full$\", mode, re.IGNORECASE) or re.match(r\"^semi$\", mode, re.IGNORECASE):\n tr_data_setup = DS(tr_images, tr_labels, TRANSFORM, \"train\")\n va_data_setup = DS(va_images, va_labels, TRANSFORM, \"valid\")\n else:\n tr_data_setup = DS(tr_images, tr_labels, TRANSFORM_FINAL, \"train\")\n va_data_setup = DS(va_images, va_labels, TRANSFORM_FINAL, \"valid\")\n\n dataloaders = {\n \"train\" : DL(tr_data_setup, batch_size=batch_size, shuffle=True, generator=torch.manual_seed(seed)),\n \"valid\" : DL(va_data_setup, batch_size=batch_size, shuffle=False)\n }\n\n return dataloaders\n\n\ndef save_graphs(L: list, A: list) -> None:\n TL, VL, TA, VA = [], [], [], []\n for i in range(len(L)):\n TL.append(L[i][\"train\"])\n VL.append(L[i][\"valid\"])\n TA.append(A[i][\"train\"])\n VA.append(A[i][\"valid\"])\n x_Axis = np.arange(1, len(TL) + 1)\n plt.figure(\"Plots\")\n plt.subplot(1, 2, 1)\n plt.plot(x_Axis, TL, \"r\", label=\"Train\")\n plt.plot(x_Axis, VL, \"b\", label=\"Valid\")\n plt.legend()\n plt.grid()\n plt.title(\"Loss Graph\")\n plt.subplot(1, 2, 2)\n plt.plot(x_Axis, TA, \"r\", label=\"Train\")\n plt.plot(x_Axis, VA, \"b\", label=\"Valid\")\n plt.legend()\n plt.grid()\n plt.title(\"Accuracy Graph\")\n plt.savefig(os.path.join(SAVE_PATH, \"Graphs.jpg\"))\n plt.close(\"Plots\")\n\n\ndef show_graphs(L: list, A: list) -> None:\n TL, VL, TA, VA = [], [], [], []\n for i in range(len(L)):\n TL.append(L[i][\"train\"])\n VL.append(L[i][\"valid\"])\n TA.append(A[i][\"train\"])\n VA.append(A[i][\"valid\"])\n x_Axis = np.arange(1, len(TL) + 1)\n plt.figure(figsize=(12, 8))\n plt.subplot(1, 2, 1)\n plt.plot(x_Axis, TL, \"r\", label=\"Train\")\n plt.plot(x_Axis, VL, \"b\", label=\"Valid\")\n plt.legend()\n plt.grid()\n plt.title(\"Loss Graph\")\n plt.subplot(1, 2, 2)\n plt.plot(x_Axis, TA, \"r\", label=\"Train\")\n plt.plot(x_Axis, VA, \"b\", label=\"Valid\")\n plt.legend()\n plt.grid()\n plt.title(\"Accuracy Graph\")\n plt.show()\n\n\ndef fit(model=None, optimizer=None, scheduler=None, epochs=None, early_stopping_patience=None, \n dataloaders=None, verbose=False) -> tuple:\n \n def get_accuracy(y_pred, y_true):\n y_pred = torch.sigmoid(y_pred)\n\n y_pred[y_pred > 0.5] = 1\n y_pred[y_pred <= 0.5] = 0\n\n return torch.count_nonzero(y_pred == y_true).item() / len(y_pred)\n \n if verbose:\n breaker()\n print(\"Training ...\")\n breaker()\n\n bestLoss, bestAccs = {\"train\" : np.inf, \"valid\" : np.inf}, {\"train\" : 0.0, \"valid\" : 0.0}\n Losses, Accuracies = [], []\n name = \"state.pt\"\n\n start_time = time()\n for e in range(epochs):\n e_st = time()\n epochLoss, epochAccs = {\"train\" : 0.0, \"valid\" : 0.0}, {\"train\" : 0.0, \"valid\" : 0.0}\n\n for phase in [\"train\", \"valid\"]:\n if phase == \"train\":\n model.train()\n else:\n model.eval()\n \n lossPerPass, accsPerPass = [], []\n\n for X,y in dataloaders[phase]:\n X, y = X.to(DEVICE), y.to(DEVICE)\n\n optimizer.zero_grad()\n with torch.set_grad_enabled(phase == \"train\"):\n output = model(X)\n loss = torch.nn.BCEWithLogitsLoss()(output, y)\n if phase == \"train\":\n loss.backward()\n optimizer.step()\n lossPerPass.append(loss.item())\n accsPerPass.append(get_accuracy(output, y))\n epochLoss[phase] = np.mean(np.array(lossPerPass))\n epochAccs[phase] = np.mean(np.array(accsPerPass))\n Losses.append(epochLoss)\n Accuracies.append(epochAccs)\n\n if early_stopping_patience:\n if epochLoss[\"valid\"] < bestLoss[\"valid\"]:\n bestLoss = epochLoss\n BLE = e + 1\n torch.save({\"model_state_dict\": model.state_dict(),\n \"optim_state_dict\": optimizer.state_dict()},\n os.path.join(SAVE_PATH, name))\n early_stopping_step = 0\n else:\n early_stopping_step += 1\n if early_stopping_step > early_stopping_patience:\n print(\"\\nEarly Stopping at Epoch {}\".format(e + 1))\n break\n \n if epochLoss[\"valid\"] < bestLoss[\"valid\"]:\n bestLoss = epochLoss\n BLE = e + 1\n torch.save({\"model_state_dict\" : model.state_dict(),\n \"optim_state_dict\" : optimizer.state_dict()},\n os.path.join(SAVE_PATH, name))\n \n if epochAccs[\"valid\"] > bestAccs[\"valid\"]:\n bestAccs = epochAccs\n BAE = e + 1\n \n if scheduler:\n scheduler.step(epochLoss[\"valid\"])\n \n if verbose:\n print(\"Epoch: {} | Train Loss: {:.5f} | Valid Loss: {:.5f} |\\\nTrain Accs: {:.5f} | Valid Accs: {:.5f} | Time: {:.2f} seconds\".format(e+1, \n epochLoss[\"train\"], epochLoss[\"valid\"], \n epochAccs[\"train\"], epochAccs[\"valid\"], \n time()-e_st))\n\n if verbose: \n breaker()\n print(f\"Best Validation Loss at Epoch {BLE}\")\n breaker()\n print(f\"Best Validation Accs at Epoch {BAE}\")\n breaker()\n print(\"Time Taken [{} Epochs] : {:.2f} minutes\".format(len(Losses), (time()-start_time)/60))\n breaker()\n print(\"Training Completed\")\n breaker()\n\n return Losses, Accuracies, BLE, BAE, name\n\n\ndef predict(model=None, mode: str = None, image_path: str = None, size: int = 320) -> str:\n model.load_state_dict(torch.load(\"saves/state.pt\", map_location=DEVICE)[\"model_state_dict\"])\n model.eval()\n model.to(DEVICE)\n\n image = cv2.resize(src=cv2.cvtColor(src=cv2.imread(image_path, cv2.IMREAD_COLOR), code=cv2.COLOR_BGR2RGB), dsize=(size, size), interpolation=cv2.INTER_AREA)\n labels = json.load(open(\"labels.json\", \"r\"))\n\n with torch.no_grad():\n if re.match(r\"^full$\", mode, re.IGNORECASE) or re.match(r\"^semi$\", mode, re.IGNORECASE):\n output = torch.sigmoid(model(TRANSFORM(image).to(DEVICE).unsqueeze(dim=0)))\n else:\n output = torch.sigmoid(model(TRANSFORM_FINAL(image).to(DEVICE).unsqueeze(dim=0)), dim=1)\n \n if output.item() > 0.5:\n output = 1\n else:\n output = 0\n \n return labels[str(output)].title()\n" ]
[ [ "matplotlib.pyplot.legend", "torch.load", "torch.utils.data.DataLoader", "sklearn.model_selection.KFold", "matplotlib.pyplot.plot", "torch.nn.BCEWithLogitsLoss", "torch.set_grad_enabled", "torch.no_grad", "torch.FloatTensor", "torch.cuda.is_available", "matplotlib.pyplot.subplot", "matplotlib.pyplot.close", "matplotlib.pyplot.figure", "torch.sigmoid", "matplotlib.pyplot.title", "numpy.array", "matplotlib.pyplot.show", "torch.count_nonzero", "torch.manual_seed", "matplotlib.pyplot.grid" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ispmor/trabajo-fin-de-grado
[ "65f761b343b02ec3e4b4f3f5eee7494958b0b396" ]
[ "nbeats_additional_functions.py" ]
[ "import os\nimport wfdb\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\n\nimport torch\nfrom torch.nn import functional as F\n\n\ndef plot_scatter(*args, **kwargs):\n plt.plot(*args, **kwargs)\n plt.scatter(*args, **kwargs)\n\n\ndef data_generator(x, y, batch_size):\n while True:\n for xy_pair in split((x, y), batch_size):\n yield xy_pair\n\n\ndef split(arr, size):\n arrays = []\n while len(arr) > size:\n slice_ = arr[:size]\n arrays.append(slice_)\n arr = arr[size:]\n arrays.append(arr)\n return arrays\n\n\ndef batcher(dataset, batch_size, infinite=False):\n while True:\n x, y = dataset\n for x_, y_ in zip(split(x, batch_size), split(y, batch_size)):\n yield x_, y_\n if not infinite:\n break\n\n\ndef load(checkpoint_name, model, optimiser):\n if os.path.exists(checkpoint_name):\n checkpoint = torch.load(checkpoint_name, map_location=torch.device('cpu'))\n model.load_state_dict(checkpoint['model_state_dict'])\n optimiser.load_state_dict(checkpoint['optimiser_state_dict'])\n grad_step = checkpoint['grad_step']\n #print(f'Restored checkpoint from {checkpoint_name}.')\n return grad_step\n return 0\n\n\ndef save(checkpoint_name, model, optimiser, grad_step):\n torch.save({\n 'grad_step': grad_step,\n 'model_state_dict': model.state_dict(),\n 'optimiser_state_dict': optimiser.state_dict()\n }, checkpoint_name)\n\n\ndef train_100_grad_steps(checkpoint_name, data, device, net, optimiser, test_losses):\n global_step = load(checkpoint_name, net, optimiser)\n for x_train_batch, y_train_batch in data:\n global_step += 1\n optimiser.zero_grad()\n net.train()\n _, forecast = net(torch.tensor(x_train_batch, dtype=torch.float).to(device))\n loss = F.mse_loss(forecast, torch.tensor(y_train_batch, dtype=torch.float).to(device))\n loss.backward()\n optimiser.step()\n #Juan\n #if global_step % 30 == 0:\n #print(f'grad_step = {str(global_step).zfill(6)}, tr_loss = {loss.item():.6f}, te_loss = {test_losses[-1]:.6f}')\n if global_step > 0 and global_step % 100 == 0:\n with torch.no_grad():\n save(checkpoint_name, net, optimiser, global_step)\n break\n\n\ndef fit(checkpoint_name, net, optimiser, data_generator, on_save_callback, device, max_grad_steps=10000):\n #print('--- Training ---')\n initial_grad_step = load(checkpoint_name, net, optimiser)\n for grad_step, (x, target) in enumerate(data_generator):\n grad_step += initial_grad_step\n optimiser.zero_grad()\n net.train()\n backcast, forecast = net(torch.tensor(x, dtype=torch.float).to(device))\n loss = F.mse_loss(forecast, torch.tensor(target, dtype=torch.float).to(device))\n loss.backward()\n optimiser.step()\n #print(f'grad_step = {str(grad_step).zfill(6)}, loss = {loss.item():.6f}')\n if grad_step % 1000 == 0 or (grad_step < 1000 and grad_step % 100 == 0):\n with torch.no_grad():\n save(checkpoint_name, net, optimiser, grad_step)\n if on_save_callback is not None:\n on_save_callback(x, target, grad_step)\n if grad_step > max_grad_steps:\n print('Finished.')\n break\n\n\ndef eval_test(backcast_length, forecast_length, net, norm_constant, test_losses, x_test, y_test):\n net.eval()\n _, forecast = net(torch.tensor(x_test, dtype=torch.float))\n singular_loss = F.mse_loss(forecast, torch.tensor(y_test, dtype=torch.float)).item()\n test_losses.append(singular_loss)\n #Juan\n #p = forecast.detach().numpy()\n \n p = forecast.detach().cpu().numpy()\n '''\n subplots = [221, 222, 223, 224]\n plt.figure(1)\n for plot_id, i in enumerate(np.random.choice(range(len(x_test)), size=4, replace=False)):\n ff, xx, yy = p[i] * norm_constant, x_test[i] * norm_constant, y_test[i] * norm_constant\n plt.subplot(subplots[plot_id])\n plt.grid()\n plot_scatter(range(0, backcast_length), xx, color='b')\n plot_scatter(range(backcast_length, backcast_length + forecast_length), yy, color='g')\n plot_scatter(range(backcast_length, backcast_length + forecast_length), ff, color='r')\n plt.show()\n \n '''\n\n\ndef get_avg_score(net, x_test, y_test):\n net.eval()\n _, forecast = net(torch.tensor(x_test, dtype=torch.float))\n singular_loss = F.mse_loss(forecast, torch.tensor(y_test, dtype=torch.float)).item()\n return singular_loss\n\n\ndef one_file_training_data(data_dir, file, forecast_length, backcast_length, batch_size):\n normal_signal_data = []\n normal_signal_x = []\n\n x = wfdb.io.rdsamp(data_dir + file[:-4])\n normal_signal_data.append(x[0][:, 3])\n normal_signal_x.append(range(0, int(x[1]['sig_len'])))\n\n normal_signal_data = [y for sublist in normal_signal_data for y in sublist]\n normal_signal_x = [y for sublist in normal_signal_x for y in sublist]\n normal_signal_data = np.array(normal_signal_data)\n normal_signal_x = np.array(normal_signal_x)\n normal_signal_data.flatten()\n normal_signal_x.flatten()\n\n norm_constant = np.max(normal_signal_data)\n #print(norm_constant)\n normal_signal_data = normal_signal_data / norm_constant # leak to the test set here.\n\n x_train_batch, y = [], []\n for i in range(backcast_length, len(normal_signal_data) - forecast_length):\n x_train_batch.append(normal_signal_data[i - backcast_length:i])\n y.append(normal_signal_data[i:i + forecast_length])\n\n x_train_batch = np.array(x_train_batch) # [..., 0]\n y = np.array(y) # [..., 0]\n\n if len(x_train_batch) > 30000:\n x_train_batch = x_train_batch[0:int(len(x_train_batch) / 4)]\n y = y[0:int(len(y) / 4)]\n \n \n\n c = int(len(x_train_batch) * 0.8)\n x_train, x_test, y_train, y_test = train_test_split(x_train_batch, y, test_size=0.005, random_state=17)\n #x_train, y_train = x_train_batch[:c], y[:c]\n #x_test, y_test = x_train_batch[c:], y[c:]\n #print(x_train.shape, x_test.shape)\n #print(y_train.shape, y_test.shape)\n data = data_generator(x_train, y_train, batch_size)\n\n return data, x_test, y_test, norm_constant\n\n\ndef organise_data(data, data_header, forecast_length, backcast_length, batch_size):\n normal_signal_data = []\n normal_signal_x = []\n\n normal_signal_data.append(data[3])\n normal_signal_x.append(range(0, int(data_header[0].split(' ')[3])))\n\n normal_signal_data = [y for sublist in normal_signal_data for y in sublist]\n normal_signal_x = [y for sublist in normal_signal_x for y in sublist]\n normal_signal_data = np.array(normal_signal_data)\n normal_signal_x = np.array(normal_signal_x)\n normal_signal_data.flatten()\n normal_signal_x.flatten()\n\n norm_constant = np.max(normal_signal_data)\n #print(norm_constant)\n normal_signal_data = normal_signal_data / norm_constant # leak to the test set here.\n\n x, y = [], []\n for i in range(backcast_length, len(normal_signal_data) - forecast_length):\n x.append(normal_signal_data[i - backcast_length:i])\n y.append(normal_signal_data[i:i + forecast_length])\n\n x = np.array(x) # [..., 0]\n y = np.array(y) # [..., 0]\n\n if len(x) > 5000:\n x = x[0:5000]\n y = y[0:5000]\n\n return x, y\n" ]
[ [ "matplotlib.pyplot.scatter", "sklearn.model_selection.train_test_split", "torch.tensor", "matplotlib.pyplot.plot", "numpy.max", "torch.no_grad", "torch.device", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MehnaazAsad/RESOLVE_Statistics
[ "a7bdcc896ca2c51ab3417c46f07efe8c16825597" ]
[ "src/data/smf_bmf_data.py" ]
[ "\"\"\"\n{This script plots SMF and BMF from all 3 surveys}\n\"\"\"\n\n# Libs\nfrom cosmo_utils.utils.stats_funcs import Stats_one_arr\nfrom cosmo_utils.utils import work_paths as cwpaths\nfrom collections import OrderedDict\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc\nimport pandas as pd\nimport numpy as np\nimport argparse\nimport math\n\n__author__ = '{Mehnaaz Asad}'\n\nrc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']}, size=10)\nrc('text', usetex=True)\n\ndef read_catl(path_to_file, survey):\n \"\"\"\n Reads survey catalog from file\n\n Parameters\n ----------\n path_to_file: string\n Path to survey catalog file\n\n survey: string\n Name of survey\n\n Returns\n ---------\n catl: pandas dataframe\n Survey catalog with grpcz, abs rmag and stellar mass limits\n \n volume: float\n Volume of survey\n\n cvar: float\n Cosmic variance of survey\n \"\"\"\n if survey == 'eco':\n columns = ['name', 'radeg', 'dedeg', 'cz', 'grpcz', 'absrmag', \n 'logmstar', 'logmgas', 'grp', 'grpn', 'logmh', 'logmh_s', \n 'fc', 'grpmb', 'grpms']\n\n # 13878 galaxies\n eco_buff = pd.read_csv(path_to_file,delimiter=\",\", header=0, \\\n usecols=columns)\n\n if h == 1.0:\n volume = 151829.26 # Survey volume without buffer [Mpc/h]^3 in h=1.0\n cz_measurement = eco_buff.grpcz.values\n elif h == 0.7:\n #Survey volume without buffer [Mpc/h]^3\n volume = 151829.26 * 2.915 # convert from h = 1.0 to 0.7\n cz_measurement = eco_buff.cz.values\n cvar = 0.125\n\n if mass == 'smf':\n # 6456 galaxies \n catl = eco_buff.loc[(cz_measurement >= 3000) & \\\n (cz_measurement <= 7000) & (eco_buff.absrmag.values <= -17.33) & \\\n (eco_buff.logmstar.values >= 8.9)]\n elif mass == 'bmf':\n # Removing stellar mass cut\n catl = eco_buff.loc[(cz_measurement >= 3000) & \\\n (cz_measurement <= 7000) & (eco_buff.absrmag.values <= -17.33)]\n \n elif survey == 'resolvea' or survey == 'resolveb':\n columns = ['name', 'radeg', 'dedeg', 'cz', 'grpcz', 'absrmag', \n 'logmstar', 'logmgas', 'grp', 'grpn', 'grpnassoc', 'logmh', \n 'logmh_s', 'fc', 'grpmb', 'grpms', 'f_a', 'f_b']\n # 2286 galaxies\n resolve_live18 = pd.read_csv(path_to_file, delimiter=\",\", header=0, \\\n usecols=columns)\n\n if h == 1.0:\n volume = 13172.384 # Survey volume without buffer [Mpc/h]^3\n cz_measurement = resolve_live18.grpcz.values\n elif h == 0.7:\n #Survey volume without buffer [Mpc/h]^3\n volume = 13172.384 * 2.915 # convert from h = 1.0 to 0.7\n cz_measurement = resolve_live18.cz.values\n cvar = 0.30\n\n if survey == 'resolvea':\n if mass == 'smf':\n catl = resolve_live18.loc[(resolve_live18.f_a.values == 1) & \\\n (cz_measurement > 4500) & (cz_measurement < 7000) & \\\n (resolve_live18.absrmag.values < -17.33) & \\\n (resolve_live18.logmstar.values >= 8.9)]\n elif mass == 'bmf':\n catl = resolve_live18.loc[(resolve_live18.f_a.values == 1) & \\\n (cz_measurement > 4500) & (cz_measurement < 7000) & \\\n (resolve_live18.absrmag.values < -17.33)] \n \n elif survey == 'resolveb':\n if mass == 'smf':\n # 487 - cz, 369 - grpcz\n catl = resolve_live18.loc[(resolve_live18.f_b.values == 1) & \\\n (cz_measurement > 4500) & (cz_measurement < 7000) & \\\n (resolve_live18.absrmag.values < -17) & \\\n (resolve_live18.logmstar.values >= 8.7)]\n elif mass == 'bmf':\n catl = resolve_live18.loc[(resolve_live18.f_b.values == 1) & \\\n (cz_measurement > 4500) & (cz_measurement < 7000) & \\\n (resolve_live18.absrmag.values < -17)]\n\n if h == 1.0:\n volume = 4709.8373 # Survey volume without buffer [Mpc/h]^3\n elif h == 0.7:\n #Survey volume without buffer [Mpc/h]^3\n volume = 4709.8373 * 2.915 # convert from h = 1.0 to 0.7\n cvar = 0.58\n\n return catl, volume, cvar\n\ndef diff_smf(mstar_arr, volume, cvar_err):\n \"\"\"\n Calculates differential stellar mass function\n\n Parameters\n ----------\n mstar_arr: numpy array\n Array of stellar masses\n\n volume: float\n Volume of survey or simulation\n\n cvar_err: float\n Cosmic variance of survey\n\n h1_bool: boolean\n True if units of masses are h=1, False if units of masses are not h=1\n\n Returns\n ---------\n maxis: array\n Array of x-axis mass values\n\n phi: array\n Array of y-axis values\n\n err_tot: array\n Array of error values per bin\n \n bins: array\n Array of bin edge values\n \"\"\"\n if h == 1.0:\n logmstar_arr = np.log10((10**mstar_arr) / 2.041)\n bin_num = 12\n elif h == 0.7: \n logmstar_arr = mstar_arr\n bin_num = 16\n if survey == 'eco' or survey == 'resolvea':\n bins = np.linspace(8.9, 11.8, bin_num)\n print(\"{0} : {1}\".format(survey,len(logmstar_arr[logmstar_arr>=8.9])))\n elif survey == 'resolveb':\n bins = np.linspace(8.7, 11.8, bin_num)\n print(\"{0} : {1}\".format(survey,len(logmstar_arr[logmstar_arr>=8.7])))\n # Unnormalized histogram and bin edges\n phi, edg = np.histogram(logmstar_arr, bins=bins) # paper used 17 bins\n dm = edg[1] - edg[0] # Bin width\n maxis = 0.5 * (edg[1:] + edg[:-1]) # Mass axis i.e. bin centers\n # Normalized to volume and bin width\n err_poiss = np.sqrt(phi) / (volume * dm)\n err_cvar = cvar_err #/ (volume * dm)\n print('Poisson error: {0}'.format(err_poiss))\n \n err_tot = np.sqrt(err_cvar**2 + err_poiss**2)\n phi = phi / (volume * dm) # not a log quantity\n err_cvar = err_cvar*phi\n print('Cosmic variance error: {0}'.format(err_cvar))\n return maxis, phi, err_tot, bins\n\ndef calc_bary(mstar_arr, mgas_arr):\n \"\"\"\n Calculates baryonic mass from stellar and gas mass\n\n Parameters\n ----------\n mstar_arr: numpy array\n Array of stellar masses\n\n mgass_arr: numpy array\n Array of gas masses\n\n Returns\n ---------\n logmbary: numpy array\n Array of baryonic masses\n\n bin_num: int\n Number of bins to use\n \"\"\"\n if h == 1.0:\n logmbary = np.log10(((10**mstar_arr) + (10**mgas_arr)) / 2.041)\n bin_num = 12\n elif h == 0.7:\n logmbary = np.log10((10**mstar_arr) + (10**mgas_arr))\n bin_num = 16\n return logmbary, bin_num\n\ndef diff_bmf(logmbary_arr, volume, cvar_err, bin_num):\n \"\"\"\n Calculates differential baryonic mass function\n\n Parameters\n ----------\n mass_arr: numpy array\n Array of baryonic masses\n\n volume: float\n Volume of survey\n\n cvar_err: float\n Cosmic variance of survey\n \n bin_num: int\n Number of bins to use\n\n Returns\n ---------\n maxis: array\n Array of x-axis mass values\n\n phi: array\n Array of y-axis values\n\n err_tot: array\n Array of error values per bin\n \n bins: array\n Array of bin edge values\n \"\"\"\n # Unnormalized histogram and bin edges\n \n if survey == 'eco' or survey == 'resolvea':\n bins = np.linspace(9.4,12.0,bin_num)\n print(\"{0} : {1}\".format(survey,len(logmbary_arr[logmbary_arr>=9.4])))\n if survey == 'resolveb':\n bins = np.linspace(9.1,12.0,bin_num)\n print(\"{0} : {1}\".format(survey,len(logmbary_arr[logmbary_arr>=9.1])))\n phi, edg = np.histogram(logmbary_arr, bins=bins) # paper used 17 bins\n dm = edg[1] - edg[0] # Bin width\n maxis = 0.5 * (edg[1:] + edg[:-1]) # Mass axis i.e. bin centers\n # Normalized to volume and bin width\n err_poiss = np.sqrt(phi) / (volume * dm)\n err_cvar = cvar_err #/ (volume * dm)\n print('Poisson error: {0}'.format(err_poiss))\n \n err_tot = np.sqrt(err_cvar**2 + err_poiss**2)\n phi = phi / (volume * dm) # not a log quantity\n err_cvar = phi * err_cvar\n print('Cosmic variance error: {0}'.format(err_cvar))\n err_tot = err_cvar * phi\n return maxis, phi, err_tot, bins\n\ndef plot_massfunc(maxis_70, phi_70, err_70, maxis_100, phi_100, err_100):\n \"\"\"\n Plot SMF from data, best fit param values and param values corresponding to \n 68th percentile 1000 lowest chi^2 values\n\n Parameters\n ----------\n maxis_70: array\n Array of x-axis mass values for data SMF assuming h=0.7\n\n phi_70: array\n Array of y-axis values for data SMF assuming h=0.7\n\n err_70: array\n Array of error values per bin of data SMF assuming h=0.7\n\n maxis_100: array\n Array of x-axis mass values for data SMF assuming h=1.0\n\n phi_100: array\n Array of y-axis values for data SMF assuming h=1.0\n\n err_100: array\n Array of error values per bin of data SMF assuming h=1.0\n\n Returns\n ---------\n Nothing; SMF plot is saved in figures repository\n \"\"\"\n if survey == 'resolvea':\n line_label = 'RESOLVE-A'\n elif survey == 'resolveb':\n line_label = 'RESOLVE-B'\n elif survey == 'eco':\n line_label = 'ECO'\n\n fig1 = plt.figure(figsize=(10,10))\n plt.plot(maxis_70,phi_70,'k-')\n plt.fill_between(maxis_70,phi_70-err_70,phi_70+err_70,color='g',alpha=0.3)\n plt.errorbar(maxis_70,phi_70,yerr=err_70,color='k',fmt='-s',ecolor='k',\\\n markersize=4,capsize=5,capthick=0.5,label='{0} h=0.7'.format(line_label),\\\n zorder=10)\n plt.plot(maxis_100,phi_100,'k--')\n plt.fill_between(maxis_100,phi_100-err_100,phi_100+err_100,color='b',alpha=0.3)\n plt.errorbar(maxis_100,phi_100,yerr=err_100,color='k',fmt='--s',ecolor='k',\\\n markersize=4,capsize=5,capthick=0.5,label='{0} h=1.0'.format(line_label),\\\n zorder=10)\n plt.yscale('log')\n plt.ylim(10**-5,10**-1)\n if mass == 'smf':\n plt.xlabel(r'\\boldmath$\\log_{10}\\ M_\\star \\left[\\mathrm{M_\\odot}\\, \\mathrm{h^{-2}} \\right]$', fontsize=15)\n # if h == 0.7:\n # plt.xlabel(r'\\boldmath$\\log_{10}\\ M_\\star \\left[\\mathrm{M_\\odot}\\, \\mathrm{h_{70}}^{-2} \\right]$', fontsize=15)\n # elif h == 1.0:\n # plt.xlabel(r'\\boldmath$\\log_{10}\\ M_\\star \\left[\\mathrm{M_\\odot}\\, \\mathrm{h_{100}}^{-2} \\right]$', fontsize=15)\n elif mass == 'bmf':\n plt.xlabel(r'\\boldmath$\\log_{10}\\ M_{bary} \\left[\\mathrm{M_\\odot}\\, \\mathrm{h^{-2}} \\right]$', fontsize=15)\n # if h == 0.7:\n # plt.xlabel(r'\\boldmath$\\log_{10}\\ M_{bary} \\left[\\mathrm{M_\\odot}\\, \\mathrm{h_{70}}^{-2} \\right]$', fontsize=15)\n # elif h == 1.0:\n # plt.xlabel(r'\\boldmath$\\log_{10}\\ M_{bary} \\left[\\mathrm{M_\\odot}\\, \\mathrm{h_{100}}^{-2} \\right]$', fontsize=15)\n plt.ylabel(r'\\boldmath$\\Phi \\left[\\mathrm{dex}^{-1}\\,\\mathrm{Mpc}^{-3}\\,\\mathrm{h^{3}} \\right]$', fontsize=15)\n handles, labels = plt.gca().get_legend_handles_labels()\n by_label = OrderedDict(zip(labels, handles))\n plt.legend(by_label.values(), by_label.keys(), loc='best',prop={'size': 10})\n plt.show()\n # plt.savefig(path_to_figures + '{0}_{1}.png'.format(mass,survey))\n\ndef plot_smf_bmf(maxis_smf, maxis_bmf, phi_smf, phi_bmf, err_smf, err_bmf):\n \"\"\"\n Plot SMF and BMF from data\n\n Parameters\n ----------\n maxis_smf: array\n Array of x-axis mass values for data SMF assuming h=1.0\n\n phi_smf: array\n Array of y-axis values for data SMF assuming h=1.0\n\n err_smf: array\n Array of error values per bin of data SMF assuming h=1.0\n\n maxis_bmf: array\n Array of x-axis mass values for data BMF assuming h=1.0\n\n phi_bmf: array\n Array of y-axis values for data BMF assuming h=1.0\n\n err_bmf: array\n Array of error values per bin of data BMF assuming h=1.0\n\n Returns\n ---------\n Nothing; SMF plot is saved in figures repository\n \"\"\"\n if survey == 'resolvea':\n line_label = 'RESOLVE-A'\n elif survey == 'resolveb':\n line_label = 'RESOLVE-B'\n elif survey == 'eco':\n line_label = 'ECO'\n\n fig2 = plt.figure(figsize=(10,10))\n plt.plot(maxis_smf,phi_smf,'k-')\n plt.fill_between(maxis_smf,phi_smf-err_smf,phi_smf+err_smf,color='g',alpha=0.3)\n plt.errorbar(maxis_smf,phi_smf,yerr=err_smf,color='k',fmt='-s',ecolor='k',\\\n markersize=4,capsize=5,capthick=0.5,label='{0} smf'.format(line_label),\\\n zorder=10)\n plt.plot(maxis_bmf,phi_bmf,'k--')\n plt.fill_between(maxis_bmf,phi_bmf-err_bmf,phi_bmf+err_bmf,color='b',alpha=0.3)\n plt.errorbar(maxis_bmf,phi_bmf,yerr=err_bmf,color='k',fmt='--s',ecolor='k',\\\n markersize=4,capsize=5,capthick=0.5,label='{0} bmf'.format(line_label),\\\n zorder=10)\n plt.yscale('log')\n plt.ylim(10**-5,10**-1)\n plt.xlabel(r'\\boldmath$\\log_{10}\\ M \\left[\\mathrm{M_\\odot}\\, \\mathrm{h^{-2}} \\right]$', fontsize=15)\n plt.ylabel(r'\\boldmath$\\Phi \\left[\\mathrm{dex}^{-1}\\,\\mathrm{Mpc}^{-3}\\,\\mathrm{h^{3}} \\right]$', fontsize=15)\n handles, labels = plt.gca().get_legend_handles_labels()\n by_label = OrderedDict(zip(labels, handles))\n plt.legend(by_label.values(), by_label.keys(), loc='best',prop={'size': 10})\n plt.show()\n \ndef args_parser():\n \"\"\"\n Parsing arguments passed to script\n\n Returns\n -------\n args: \n Input arguments to the script\n \"\"\"\n print('Parsing in progress')\n parser = argparse.ArgumentParser()\n parser.add_argument('survey', type=str, \\\n help='Options: eco/resolvea/resolveb')\n parser.add_argument('type', type=str, \\\n help='Options: type of mass function (smf/bmf)')\n args = parser.parse_args()\n return args\n\ndef main(args):\n \"\"\"\n Main function that calls all other functions\n \n Parameters\n ----------\n args: \n Input arguments to the script\n\n \"\"\"\n global survey\n global mass\n global h\n global path_to_figures\n\n survey = args.survey\n mass = args.type\n\n dict_of_paths = cwpaths.cookiecutter_paths()\n path_to_raw = dict_of_paths['raw_dir']\n path_to_figures = dict_of_paths['plot_dir']\n\n if survey == 'eco':\n catl_file = path_to_raw + \"eco_all.csv\"\n elif survey == 'resolvea' or survey == 'resolveb':\n catl_file = path_to_raw + \"RESOLVE_liveJune2018.csv\"\n\n h = 0.7\n print('Reading catalog')\n catl, volume, cvar = read_catl(catl_file, survey)\n print(\"{0} : {1} total\".format(survey, len(catl)))\n\n print('Retrieving masses from catalog')\n if mass == 'smf':\n mstellar_arr = catl.logmstar.values\n maxis_70, phi_70, err_70, bins_70 = \\\n diff_smf(mstellar_arr, volume, cvar)\n elif mass == 'bmf':\n mstellar_arr = catl.logmstar.values\n \n mgas_arr = catl.logmgas.values\n mbary_arr, bin_num = calc_bary(mstellar_arr, mgas_arr)\n maxis_70, phi_70, err_70, bins_70 = \\\n diff_bmf(mbary_arr, volume, cvar, bin_num)\n\n h = 1.0\n print('Reading catalog')\n catl, volume, cvar = read_catl(catl_file, survey)\n print(\"{0} : {1} total\".format(survey, len(catl)))\n \n print('Retrieving masses from catalog')\n if mass == 'smf':\n mstellar_arr = catl.logmstar.values\n maxis_100, phi_100, err_100, bins_100 = \\\n diff_smf(mstellar_arr, volume, cvar)\n elif mass == 'bmf':\n mstellar_arr = catl.logmstar.values\n mgas_arr = catl.logmgas.values\n mbary_arr, bin_num = calc_bary(mstellar_arr, mgas_arr)\n maxis_100, phi_100, err_100, bins_100 = \\\n diff_bmf(mbary_arr, volume, cvar, bin_num)\n \"\"\"\n print('Plotting')\n plot_massfunc(maxis_70, phi_70, err_70, maxis_100, phi_100, err_100) \n\n mstellar_arr = catl.logmstar.values\n maxis_smf, phi_smf, err_smf, bins_smf = \\\n diff_smf(mstellar_arr, volume, cvar)\n\n mstellar_arr = catl.logmstar.values\n mgas_arr = catl.logmgas.values\n mbary_arr, bin_num = calc_bary(mstellar_arr, mgas_arr)\n maxis_bmf, phi_bmf, err_bmf, bins_bmf = \\\n diff_bmf(mbary_arr, volume, cvar, bin_num)\n \n #Assuming h=1.0 which is what is used in MCMC\n plot_smf_bmf(maxis_smf, maxis_bmf, phi_smf, phi_bmf, err_smf, err_bmf)\n\"\"\"\n# Main function\nif __name__ == '__main__':\n args = args_parser()\n main(args) \n" ]
[ [ "matplotlib.pyplot.gca", "pandas.read_csv", "numpy.sqrt", "numpy.linspace", "matplotlib.pyplot.ylim", "matplotlib.pyplot.yscale", "matplotlib.pyplot.plot", "matplotlib.pyplot.ylabel", "numpy.log10", "matplotlib.pyplot.fill_between", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "numpy.histogram", "matplotlib.rc", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
kiranrraj/100Days_Of_Coding
[ "ab75d83be9be87fb7bc83a3f3b72a4638dab22a1" ]
[ "Day_63/pie_chart.py" ]
[ "# Title : Pie chart\n# Author : Kiran Raj R.\n# Date : 19/11/2020\n\nimport matplotlib.pyplot as pp\n\nweek = [1, 2, 3, 4, 5, 6]\nexpence = [9, 8, 6, 4, 10, 7]\ncols = ['r','y','g','b','m','c']\n\nsection = [\"Food \", \"Recharge\", \"Electricity\", \"Water\", \"Loan\", \"Petrol\"]\n\npp.pie(week, \n labels=section, \n colors = cols, \n startangle=105, \n autopct = '%1.2f%%',\n explode=(0,0,0,0,0,0.1)\n )\npp.legend(loc=3)\npp.show()\n\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.pie", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hotmailbox/Structural-Engineering
[ "f34dcaec728fbb3e3a05c6f29ed5dabc621550cb" ]
[ "Code/snow_drift_by_polygons.py" ]
[ "'''\r\nBSD 3-Clause License\r\nCopyright (c) 2019, Donald N. Bockoven III\r\nAll rights reserved.\r\nRedistribution and use in source and binary forms, with or without\r\nmodification, are permitted provided that the following conditions are met:\r\n* Redistributions of source code must retain the above copyright notice, this\r\n list of conditions and the following disclaimer.\r\n* Redistributions in binary form must reproduce the above copyright notice,\r\n this list of conditions and the following disclaimer in the documentation\r\n and/or other materials provided with the distribution.\r\n* Neither the name of the copyright holder nor the names of its\r\n contributors may be used to endorse or promote products derived from\r\n this software without specific prior written permission.\r\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\r\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\r\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\r\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\r\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\r\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\r\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\r\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\r\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\r\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r\n'''\r\n\r\nfrom __future__ import division\r\nimport math\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndef length_by_two_points(ax,ay,bx,by):\r\n dx = abs(bx - ax)\r\n dy = abs(by - ay)\r\n\r\n length = (dx*dx + dy*dy)**0.5\r\n\r\n return length\r\n\r\ndef angle_by_two_points(ax,ay,bx,by):\r\n dx = bx-ax\r\n dy = by-ay\r\n\r\n if dx == 0 and dy > 0:\r\n angle = 90\r\n\r\n elif dx == 0 and dy <0:\r\n angle = 270\r\n\r\n else:\r\n angle = math.atan2(dy, dx)\r\n angle = angle * (180/math.pi)\r\n\r\n if angle < 0:\r\n angle = angle + 360\r\n\r\n return angle\r\n\r\ndef point_at_angle_distance(a=[0,0],angle_degrees=0,distance=1):\r\n #determine point at distance and angle from point 0\r\n\r\n dx = math.cos(math.radians(angle_degrees))*distance\r\n dy = math.sin(math.radians(angle_degrees))*distance\r\n\r\n point = [a[0] + dx, a[1] + dy]\r\n\r\n return point\r\n\r\ndef line_line_intersection_points(a0x,a0y,a1x,a1y,b0x,b0y,b1x,b1y):\r\n\r\n try:\r\n A = np.array([[a0x, a0y], [a1x, a1y]])\r\n B = np.array([[b0x, b0y], [b1x, b1y]])\r\n t, s = np.linalg.solve(np.array([A[1]-A[0], B[0]-B[1]]).T, B[0]-A[0])\r\n res = []\r\n res.append(((1-t)*A[0] + t*A[1]))\r\n res.append(((1-s)*B[0] + s*B[1]))\r\n except:\r\n res = 'no int'\r\n\r\n return res\r\n\r\ndef vector_direction(x1,y1,x2,y2):\r\n dx = x2-x1\r\n dy = y2-y1\r\n angle = math.atan2(dy,dx) % (2*math.pi)\r\n \r\n return angle\r\n \r\nclass Line:\r\n def __init__(self, start=[0,0], end=[1,1], hc_ft = 1.0, label='1', location='e'):\r\n self.error_string = ''\r\n\r\n if start == end:\r\n self.error_string = 'Not Valid - Start Point = End Point'\r\n\r\n else:\r\n self.start = start\r\n self.end = end\r\n self.startx = start[0]\r\n self.starty = start[1]\r\n self.endx = end[0]\r\n self.endy = end[1]\r\n\r\n self.location = location\r\n self.length_calc()\r\n self.angle_degrees_calc()\r\n self.hc_ft = hc_ft\r\n self.label = label\r\n\r\n self.drift_line_x = []\r\n self.drift_line_y = []\r\n self.drift_lu = []\r\n self.drift_hd = []\r\n self.drift_pd = []\r\n self.drift_w = []\r\n self.drift_plot_labels = []\r\n\r\n\r\n def reset_drift_lines(self):\r\n self.drift_line_x = []\r\n self.drift_line_y = []\r\n self.drift_lu = []\r\n self.drift_hd = []\r\n self.drift_pd = []\r\n self.drift_plot_labels = []\r\n\r\n def length_calc(self):\r\n dx = abs(self.endx - self.startx)\r\n dy = abs(self.endy - self.starty)\r\n\r\n self.length = (dx**2 + dy**2)**0.5\r\n\r\n return self.length\r\n\r\n def angle_degrees_calc(self):\r\n dx = self.endx - self.startx\r\n dy = self.endy - self.starty\r\n\r\n if dx == 0 and dy > 0:\r\n angle = 90\r\n\r\n elif dx == 0 and dy <0:\r\n angle = 270\r\n\r\n else:\r\n angle = math.atan2(dy, dx)\r\n angle = angle * (180/math.pi)\r\n\r\n if angle < 0:\r\n angle = angle + 360\r\n\r\n self.perp_angle = angle + 90\r\n\r\n if self.location == 'i':\r\n self.perp_angle = self.perp_angle + 180\r\n else:\r\n pass\r\n\r\n self.angle = angle\r\n\r\n return angle\r\n\r\n def interior_points_calc(self,num_points):\r\n l = self.length\r\n step = l/(num_points+1)\r\n \r\n start = point_at_angle_distance(self.start,self.angle,0.125)\r\n end = point_at_angle_distance(self.end,self.angle,-0.125)\r\n points = [start]\r\n\r\n for i in range(1,num_points+1):\r\n t = (i*step)/self.length\r\n\r\n x = ((1-t)*self.startx) + (t*self.endx)\r\n y = ((1-t)*self.starty) + (t*self.endy)\r\n\r\n point = [x,y]\r\n\r\n points.append(point)\r\n \r\n points.append(end)\r\n self.internal_points = points\r\n self.internal_points_x = [coordx[0] for coordx in points]\r\n self.internal_points_y = [coordy[1] for coordy in points]\r\n\r\n return points\r\n \r\n def drift_at_point(self, point_on_self, lines, snow_density_pcf, pg_psf, logging=1):\r\n dist=10\r\n perp_line_start = point_on_self\r\n angle = self.perp_angle\r\n \r\n calc_log = ''\r\n \r\n perp_lines = []\r\n intersect_points = []\r\n points_x = []\r\n points_y = []\r\n \r\n if logging == 1:\r\n calc_log = calc_log + indent + 'Internal Point ({0:.3f},{1:.3f}):\\n'.format(point_on_self[0],point_on_self[1])\r\n calc_log = calc_log + 2*indent + 'Perpendicular Angle:{0:.4f} degrees\\n'.format(angle)\r\n else:\r\n pass\r\n\r\n perp_line_end = point_at_angle_distance(perp_line_start, angle, dist)\r\n\r\n b0x = perp_line_start[0]\r\n b0y = perp_line_start[1]\r\n b1x = perp_line_end[0]\r\n b1y = perp_line_end[1]\r\n \r\n perp_line_vector = vector_direction(b0x,b0y,b1x,b1y)\r\n \r\n if logging == 1:\r\n calc_log = calc_log + 2*indent + 'Perpendicular Line:\\n'+ 2*indent +'start:({0:.4f},{1:.4f})\\n{4}end:({2:.4f},{3:.4f})\\n'.format(b0x,b0y,b1x,b1y,2*indent)\r\n else:\r\n pass\r\n\r\n valid_point = 0\r\n lu_valid = []\r\n for check_line in lines:\r\n if check_line == self:\r\n pass\r\n else:\r\n if logging == 1:\r\n calc_log = calc_log + '\\n' + 3*indent + 'Intersection with {0}:\\n'.format(check_line.label)\r\n else:\r\n pass\r\n a0x = check_line.startx\r\n a0y = check_line.starty\r\n a1x = check_line.endx\r\n a1y = check_line.endy\r\n\r\n intersect_point = line_line_intersection_points(a0x,a0y,a1x,a1y,b0x,b0y,b1x,b1y)\r\n\r\n if intersect_point == 'no int':\r\n if logging == 1:\r\n calc_log = calc_log + 3*indent + 'No Intersection\\n'\r\n else:\r\n pass\r\n else:\r\n point_x = intersect_point[0][0]\r\n point_y = intersect_point[0][1]\r\n\r\n #check point within start, end vertices of line being checked against\r\n x_ok = min(a0x,a1x)-tolerance <= point_x <= max(a0x,a1x)+tolerance\r\n y_ok = min(a0y,a1y)-tolerance <= point_y <= max(a0y,a1y)+tolerance\r\n\r\n #check point vector same as perp line\r\n point_vector = vector_direction(b0x,b0y,point_x,point_y)\r\n\r\n if logging == 1:\r\n calc_log = calc_log + 3*indent + 'x = {0}\\n'.format(point_x)\r\n calc_log = calc_log + 3*indent + 'range: {0} - {1}\\n'.format(a0x,a1x)\r\n calc_log = calc_log + 3*indent + 'check: {0}\\n'.format(x_ok)\r\n calc_log = calc_log + 3*indent + 'y = {0}\\n'.format(point_y)\r\n calc_log = calc_log + 3*indent + 'range: {0} - {1}\\n'.format(a0y,a1y)\r\n calc_log = calc_log + 3*indent + 'check: {0}\\n'.format(y_ok)\r\n calc_log = calc_log + 3*indent + 'Vector: {0} = Perp-Vector: {1}\\n'.format(point_vector,perp_line_vector)\r\n else:\r\n pass\r\n\r\n if x_ok == True and y_ok == True and (perp_line_vector-tolerance) <= point_vector <= (perp_line_vector+tolerance):\r\n valid_point +=1\r\n\r\n intersect_points.append(intersect_point)\r\n points_x.append(point_x)\r\n points_y.append(point_y)\r\n\r\n dx = abs(point_x - b0x)\r\n dy = abs(point_y - b0y)\r\n\r\n lu = (dx**2 + dy**2)**0.5\r\n lu_calc = lu\r\n lu = max(lu,25)\r\n\r\n lu_valid.append(lu)\r\n\r\n if logging == 1:\r\n calc_log = calc_log + 3*indent + 'Valid Intersection\\n'\r\n calc_log = calc_log + 4*indent + 'Lu = {0:.3f}\\n'.format(lu_calc)\r\n calc_log = calc_log + 4*indent + 'Lu = {0:.3f} - min lu of 25\\n'.format(lu)\r\n else:\r\n pass\r\n\r\n else:\r\n pass\r\n try:\r\n lu = min(lu_valid)\r\n except:\r\n lu = 0\r\n \r\n hd_ft = (0.43 * (lu**(1.0/3.0))*((pg_psf+10)**(1.0/4.0))) - 1.5\r\n hd_calc = hd_ft\r\n hd_ft = 0.75 * hd_ft\r\n\r\n if logging == 1:\r\n calc_log = calc_log + '\\n' + 2*indent + 'Valid Intersections:{0}\\n\\n'.format(valid_point)\r\n calc_log = calc_log + 2*indent + '**Drift Calculation**\\n'.format(lu)\r\n calc_log = calc_log + 2*indent + 'Lu = {0:.3f}\\n'.format(lu)\r\n calc_log = calc_log + 2*indent + 'Lu = {0:.3f} - min lu of 25\\n'.format(lu)\r\n calc_log = calc_log + 2*indent + 'hd = {0:.3f}\\n'.format(hd_calc)\r\n calc_log = calc_log + 2*indent + '0.75*hd = {0:.3f}\\n'.format(hd_ft)\r\n calc_log = calc_log + 2*indent + 'Edge Height = {0:.3f}\\n'.format(self.hc_ft)\r\n else:\r\n pass\r\n\r\n if hd_ft <= self.hc_ft:\r\n w_ft = 4*hd_ft\r\n hd_ft = hd_ft\r\n if logging == 1:\r\n calc_log = calc_log + 2*indent + 'w = 4*hd = {0:.3f}\\n'.format(w_ft)\r\n calc_log = calc_log + 2*indent + 'hd = hd = {0:.3f}\\n'.format(hd_ft)\r\n else:\r\n pass\r\n else:\r\n w_ft = min((4*hd_ft**2)/self.hc_ft, 8*self.hc_ft)\r\n hd_ft = self.hc_ft\r\n if logging == 1:\r\n calc_log = calc_log + 2*indent + 'w = min of 4*hd^2 / hc and 8*hc = {0:.3f}\\n'.format(w_ft)\r\n calc_log = calc_log + 2*indent + 'hd = hc = {0:.3f}\\n'.format(self.hc_ft)\r\n else:\r\n pass\r\n\r\n drift_point = point_at_angle_distance(perp_line_start, angle, w_ft)\r\n\r\n pd_psf = snow_density_pcf*hd_ft\r\n\r\n if logging == 1:\r\n calc_log = calc_log + 2*indent + 'Angle to Intersection: {0:.3f}\\n'.format(angle)\r\n calc_log = calc_log + 2*indent + 'pd = {0:.3f}\\n'.format(pd_psf)\r\n calc_log = calc_log + 2*indent + 'Interior Drift Coord = ({0:.3f},{1:.3f})\\n\\n'.format(drift_point[0],drift_point[1])\r\n else:\r\n pass\r\n\r\n drift_string = 'lu = {0:.2f} ft\\nhd = {1:.2f} ft\\nw = {2:.2f} ft\\npd = {3:.2f} psf'.format(lu,hd_ft,w_ft,pd_psf)\r\n \r\n return calc_log, perp_lines, intersect_points, points_x, points_y, drift_string\r\n\r\ndef drift_all(lines, snow_density_pcf, pg_psf,number_of_points=10, logging=1, tolerance=0.0001):\r\n \r\n calc_log = ''\r\n \r\n perp_lines = []\r\n intersect_points = []\r\n points_x = []\r\n points_y = []\r\n \r\n dist = 3000\r\n \r\n for line in lines:\r\n if logging == 1:\r\n int_points = number_of_points\r\n calc_log = calc_log + 'Number of Interior Points: {0}:\\n'.format(int_points)\r\n else:\r\n int_points = number_of_points\r\n \r\n line.interior_points_calc(int_points)\r\n line.reset_drift_lines()\r\n \r\n if logging == 1:\r\n calc_log = calc_log + '\\n--Intersection points for {0}--:\\n'.format(line.label)\r\n else:\r\n pass\r\n \r\n count = 0\r\n for interior_point in line.internal_points:\r\n perp_line_start = interior_point\r\n angle = line.perp_angle\r\n \r\n if logging == 1:\r\n calc_log = calc_log + indent + 'Internal Point {0}:\\n'.format(count+1)\r\n calc_log = calc_log + 2*indent + 'Perpendicular Angle:{0:.4f} degrees\\n'.format(angle)\r\n else:\r\n pass\r\n \r\n perp_line_end = point_at_angle_distance(perp_line_start, angle, dist)\r\n \r\n perp_lines.append([perp_line_start, perp_line_end])\r\n \r\n b0x = perp_line_start[0]\r\n b0y = perp_line_start[1]\r\n b1x = perp_line_end[0]\r\n b1y = perp_line_end[1]\r\n \r\n perp_line_vector = vector_direction(b0x,b0y,b1x,b1y)\r\n \r\n if logging == 1:\r\n calc_log = calc_log + 2*indent + 'Perpendicular Line:\\n'+ 2*indent +'start:({0:.4f},{1:.4f})\\n{4}end:({2:.4f},{3:.4f})\\n'.format(b0x,b0y,b1x,b1y,2*indent)\r\n else:\r\n pass\r\n \r\n valid_point = 0\r\n lu_valid = []\r\n for check_line in lines:\r\n if check_line == line:\r\n pass\r\n else:\r\n if logging == 1:\r\n calc_log = calc_log + '\\n' + 3*indent + 'Intersection with {0}:\\n'.format(check_line.label)\r\n else:\r\n pass\r\n a0x = check_line.startx\r\n a0y = check_line.starty\r\n a1x = check_line.endx\r\n a1y = check_line.endy\r\n \r\n intersect_point = line_line_intersection_points(a0x,a0y,a1x,a1y,b0x,b0y,b1x,b1y)\r\n \r\n if intersect_point == 'no int':\r\n if logging == 1:\r\n calc_log = calc_log + 3*indent + 'No Intersection\\n'\r\n else:\r\n pass\r\n else:\r\n point_x = intersect_point[0][0]\r\n point_y = intersect_point[0][1]\r\n \r\n #check point within start, end vertices of line being checked against\r\n x_ok = min(a0x,a1x)-tolerance <= point_x <= max(a0x,a1x)+tolerance\r\n y_ok = min(a0y,a1y)-tolerance <= point_y <= max(a0y,a1y)+tolerance\r\n \r\n #check point vector same as perp line\r\n #point_vector = vector_direction(b0x,b0y,point_x,point_y)\r\n \r\n #check point within perp line start, end vertices\r\n x_perp_ok = min(b0x,b1x)-tolerance <= point_x <= max(b0x,b1x)+tolerance\r\n y_perp_ok = min(b0y,b1y)-tolerance <= point_y <= max(b0y,b1y)+tolerance\r\n \r\n if logging == 1:\r\n calc_log = calc_log + 3*indent + 'x = {0}\\n'.format(point_x)\r\n calc_log = calc_log + 3*indent + 'range: {0} - {1}\\n'.format(a0x,a1x)\r\n calc_log = calc_log + 3*indent + 'check: {0}\\n'.format(x_ok)\r\n calc_log = calc_log + 3*indent + 'y = {0}\\n'.format(point_y)\r\n calc_log = calc_log + 3*indent + 'range: {0} - {1}\\n'.format(a0y,a1y)\r\n calc_log = calc_log + 3*indent + 'check: {0}\\n'.format(y_ok)\r\n calc_log = calc_log + 3*indent + 'Vector: {0} = Perp-Vector: {1}\\n'.format(point_vector,perp_line_vector)\r\n else:\r\n pass\r\n \r\n if x_ok == True and y_ok == True and x_perp_ok == True and y_perp_ok == True:\r\n valid_point +=1\r\n \r\n intersect_points.append(intersect_point)\r\n points_x.append(point_x)\r\n points_y.append(point_y)\r\n \r\n dx = abs(point_x - b0x)\r\n dy = abs(point_y - b0y)\r\n \r\n lu = (dx**2 + dy**2)**0.5\r\n lu_calc = lu\r\n lu = max(lu,25)\r\n \r\n lu_valid.append(lu)\r\n \r\n if logging == 1:\r\n calc_log = calc_log + 3*indent + 'Valid Intersection\\n'\r\n calc_log = calc_log + 4*indent + 'Lu = {0:.3f}\\n'.format(lu_calc)\r\n calc_log = calc_log + 4*indent + 'Lu = {0:.3f} - min lu of 25\\n'.format(lu)\r\n else:\r\n pass\r\n \r\n else:\r\n pass\r\n try:\r\n lu = min(lu_valid)\r\n except:\r\n lu = 0\r\n \r\n hd_ft = (0.43 * (lu**(1.0/3.0))*((pg_psf+10)**(1.0/4.0))) - 1.5\r\n hd_calc = hd_ft\r\n hd_ft = 0.75 * hd_ft\r\n \r\n if logging == 1:\r\n calc_log = calc_log + '\\n' + 2*indent + 'Valid Intersections:{0}\\n\\n'.format(valid_point)\r\n calc_log = calc_log + 2*indent + '**Drift Calculation**\\n'.format(lu)\r\n calc_log = calc_log + 2*indent + 'Lu = {0:.3f}\\n'.format(lu)\r\n calc_log = calc_log + 2*indent + 'Lu = {0:.3f} - min lu of 25\\n'.format(lu)\r\n calc_log = calc_log + 2*indent + 'hd = {0:.3f}\\n'.format(hd_calc)\r\n calc_log = calc_log + 2*indent + '0.75*hd = {0:.3f}\\n'.format(hd_ft)\r\n calc_log = calc_log + 2*indent + 'Edge Height = {0:.3f}\\n'.format(line.hc_ft)\r\n else:\r\n pass\r\n \r\n if hd_ft <= line.hc_ft:\r\n w_ft = 4*hd_ft\r\n hd_ft = hd_ft\r\n if logging == 1:\r\n calc_log = calc_log + 2*indent + 'w = 4*hd = {0:.3f}\\n'.format(w_ft)\r\n calc_log = calc_log + 2*indent + 'hd = hd = {0:.3f}\\n'.format(hd_ft)\r\n else:\r\n pass\r\n else:\r\n w_ft = min((4*hd_ft**2)/line.hc_ft, 8*line.hc_ft)\r\n hd_ft = line.hc_ft\r\n if logging == 1:\r\n calc_log = calc_log + 2*indent + 'w = min of 4*hd^2 / hc and 8*hc = {0:.3f}\\n'.format(w_ft)\r\n calc_log = calc_log + 2*indent + 'hd = hc = {0:.3f}\\n'.format(line.hc_ft)\r\n else:\r\n pass\r\n \r\n drift_point = point_at_angle_distance(perp_line_start, angle, w_ft)\r\n \r\n pd_psf = snow_density_pcf*hd_ft\r\n \r\n if logging == 1:\r\n calc_log = calc_log + 2*indent + 'Angle to Intersection: {0:.3f}\\n'.format(angle)\r\n calc_log = calc_log + 2*indent + 'pd = {0:.3f}\\n'.format(pd_psf)\r\n calc_log = calc_log + 2*indent + 'Interior Drift Coord = ({0:.3f},{1:.3f})\\n\\n'.format(drift_point[0],drift_point[1])\r\n else:\r\n pass\r\n \r\n line.drift_line_x.append(drift_point[0])\r\n line.drift_line_y.append(drift_point[1])\r\n line.drift_lu.append(lu)\r\n line.drift_hd.append(hd_ft)\r\n line.drift_pd.append(pd_psf)\r\n line.drift_w.append(w_ft)\r\n drift_string = 'lu = {0:.2f} ft, hd = {1:.2f} ft, w = {2:.2f} ft\\npd = {3:.2f} psf'.format(lu,hd_ft,w_ft,pd_psf)\r\n line.drift_plot_labels.append(drift_string)\r\n \r\n count+=1\r\n \r\n return calc_log, perp_lines, intersect_points, points_x, points_y\r\n\r\ndef lines_transformation_to_origin(lines):\r\n x = []\r\n y = []\r\n\r\n for line in lines:\r\n x.append(line.startx)\r\n y.append(line.starty)\r\n x.append(line.endx)\r\n y.append(line.endy)\r\n\r\n shift_x = min(x)\r\n shift_y = min(y)\r\n\r\n max_x = max(x) - shift_x\r\n max_y = max(y) - shift_y\r\n\r\n return shift_x, shift_y, max_x, max_y\r\n\r\ndef line_closest_to_point_and_point_on_line(point, lines):\r\n point_x = point[0]\r\n point_y = point[1]\r\n\r\n distance_to_each_line = []\r\n point_on_each_line = []\r\n\r\n tolerance = 0.000001 \r\n\r\n for line in lines:\r\n a0x = line.startx\r\n a0y = line.starty\r\n a1x = line.endx\r\n a1y = line.endy\r\n #check point within start, end vertices of line being checked against\r\n x_ok = min(a0x,a1x)-tolerance <= point_x <= max(a0x,a1x)+tolerance\r\n y_ok = min(a0y,a1y)-tolerance <= point_y <= max(a0y,a1y)+tolerance\r\n\r\n #secondary point at unit distance at 180+line perp angle\r\n point_2 = point_at_angle_distance(point,line.perp_angle+180,1)\r\n int_point = line_line_intersection_points(a0x,a0y,a1x,a1y,point[0],point[1],point_2[0],point_2[1])\r\n \r\n if int_point == 'no int':\r\n distance_to_each_line.append(1000000)\r\n point_on_each_line.append([line.startx,line.starty])\r\n else:\r\n #check point within start, end vertices of line being checked against\r\n x1_ok = min(a0x,a1x)-tolerance <= int_point[0][0] <= max(a0x,a1x)+tolerance\r\n y1_ok = min(a0y,a1y)-tolerance <= int_point[0][1] <= max(a0y,a1y)+tolerance\r\n\r\n if x1_ok==False or y1_ok==False:\r\n distance_to_each_line.append(1000000)\r\n point_on_each_line.append([line.startx,line.starty])\r\n else:\r\n distance = length_by_two_points(point[0],point[1],int_point[0][0],int_point[0][1])\r\n distance_to_each_line.append(distance)\r\n point_on_each_line.append([int_point[0][0],int_point[0][1]])\r\n \r\n segment = distance_to_each_line.index(min(distance_to_each_line))\r\n segment_point = point_on_each_line[segment]\r\n segment += 1 \r\n \r\n return distance_to_each_line, point_on_each_line, segment, segment_point\r\n\r\ndef export_dxf(path, lines):\r\n file = open(path,'w')\r\n file.write(' 0\\nSECTION\\n 2\\nENTITIES\\n')\r\n\r\n for line in lines:\r\n x0 = line.startx\r\n y0 = line.starty\r\n x1 = line.endx\r\n y1 = line.endy\r\n if line.location == 'e':\r\n layer = 'Exterior'\r\n else:\r\n layer = 'Interior'\r\n\r\n file.write(' 0\\nPOLYLINE\\n 8\\n{0}\\n 66\\n1\\n 10\\n0.0\\n 20\\n0.0\\n 30\\n0.0\\n 70\\n8\\n'.format(layer))\r\n file.write(' 0\\nVERTEX\\n 8\\n{0}\\n 10\\n{1:.3f}\\n 20\\n{2:.3f}\\n 30\\n0.0\\n'.format(layer,x0,y0))\r\n file.write(' 0\\nVERTEX\\n 8\\n{0}\\n 10\\n{1:.3f}\\n 20\\n{2:.3f}\\n 30\\n0.0\\n'.format(layer,x1,y1))\r\n file.write(' 0\\nSEQEND\\n')\r\n\r\n file.write(' 0\\nPOLYLINE\\n 62\\n4\\n 8\\nDrift\\n 66\\n1\\n 10\\n0.0\\n 20\\n0.0\\n 30\\n0.0\\n 70\\n8\\n')\r\n\r\n for i in range(0, len(line.drift_line_x)):\r\n x_drift = line.drift_line_x[i]\r\n y_drift = line.drift_line_y[i]\r\n file.write(' 0\\nVERTEX\\n 8\\nDrift\\n 10\\n{0:.3f}\\n 20\\n{1:.3f}\\n 30\\n0.0\\n'.format(x_drift,y_drift))\r\n file.write(' 0\\nSEQEND\\n')\r\n\r\n k=0\r\n prev_label = ''\r\n for point in line.internal_points_x:\r\n label = '{0:.2f} psf - w = {1:.2f} ft'.format(line.drift_pd[k], line.drift_w[k])\r\n if k+1 > len(line.drift_w)-1 or k+1 > len(line.drift_pd)-1:\r\n next_label = ''\r\n else:\r\n next_label = '{0:.2f} psf - w = {1:.2f} ft'.format(line.drift_pd[k+1], line.drift_w[k+1])\r\n if label != prev_label or label != next_label:\r\n file.write(' 0\\nPOLYLINE\\n 8\\nDrift-Label\\n 66\\n1\\n 10\\n0.0\\n 20\\n0.0\\n 30\\n0.0\\n 70\\n8\\n')\r\n x2 = line.internal_points_x[k]\r\n y2 = line.internal_points_y[k]\r\n x3 = line.drift_line_x[k]\r\n y3 = line.drift_line_y[k]\r\n file.write(' 0\\nVERTEX\\n 8\\nDrift-Label\\n 10\\n{0:.3f}\\n 20\\n{1:.3f}\\n 30\\n0.0\\n'.format(x2,y2))\r\n file.write(' 0\\nVERTEX\\n 8\\nDrift-Label\\n 10\\n{0:.3f}\\n 20\\n{1:.3f}\\n 30\\n0.0\\n'.format(x3,y3))\r\n file.write(' 0\\nSEQEND\\n')\r\n\r\n real_label = 'pd = {0:.2f} psf, w = {1:.2f} ft'.format(line.drift_pd[k], line.drift_w[k])\r\n\r\n angle = angle_by_two_points(x2,y2,x3,y3)\r\n length = length_by_two_points(x2,y2,x3,y3)\r\n anno_pt = point_at_angle_distance([x2,y2],angle,0)\r\n\r\n if 90.99 < angle < 269.99:\r\n angle = angle + 180\r\n else:\r\n pass\r\n\r\n file.write(' 0\\nTEXT\\n 62\\n3\\n 8\\nDrift-Label\\n 10\\n{0:.3f}\\n 20\\n{1:.3f}\\n 30\\n0.0\\n 40\\n0.5\\n 1\\n{2}\\n 72\\n0\\n 50\\n{3:.4f}\\n 11\\n{0:.3f}\\n 21\\n{1:.3f}\\n 31\\n0.0\\n 73\\n1\\n'.format(anno_pt[0],anno_pt[1],real_label,angle))\r\n else:\r\n pass\r\n k+=1\r\n prev_label = label\r\n\r\n file.write(' 0\\nENDSEC\\n 0\\nEOF')\r\n file.close()\r\n\r\n''' \r\n##testing area\r\nlogging = 0\r\nwrite_dxf = 0\r\ncreate_plot = 0\r\n\r\ntolerance = 0.000001\r\n\r\npg_psf = 25\r\nsnow_density_pcf = min((0.13*pg_psf) + 14, 30)\r\nCe = 1.0\r\nCt = 1.0\r\nCs = 1.0\r\nI = 1.0\r\npf_psf = 0.7*Ce*Ct*I*pg_psf\r\nps_psf = Cs*pf_psf\r\nhb_ft = ps_psf/snow_density_pcf\r\n\r\nhc_ft = [3,3,3,3,3,3,3,3]\r\n\r\nx = [1,51,51,51,51,26,26,26,26,11,11,11,11,1,1,1]\r\ny = [1,1,1,51,51,51,51,26,26,26,26,61,61,61,61,1]\r\n\r\nloc = ['e','e','e','e','e','e','e','e']\r\n\r\nif logging == 1:\r\n calc_log = '---Calculation Log---\\n\\n--Create Lines--\\n'\r\nelse:\r\n pass\r\n\r\nindent = ' '\r\nlines = []\r\n\r\nhc=0\r\nfor i in range(0, int(len(x)/2)):\r\n if logging == 1:\r\n calc_log = calc_log + 'Line {0}:\\n'.format(i+1)\r\n label = 'Line {0}'.format(i+1)\r\n if i == 0:\r\n i = i\r\n else:\r\n i *=2\r\n\r\n xs = x[i]\r\n ys = y[i]\r\n start = [xs,ys]\r\n xe = x[i+1]\r\n ye = y[i+1]\r\n end = [xe,ye]\r\n lines.append(Line(start,end, hc_ft[hc],label,loc[hc]))\r\n if logging == 1:\r\n calc_log = calc_log + 'start:({0:.4f},{1:.4f})\\nend:({2:.4f},{3:.4f})\\n\\n'.format(xs,ys,xe,ye)\r\n else:\r\n pass\r\n hc+=1\r\n\r\naab = ((1-0.5)*lines[0].startx) + (0.5*lines[0].endx)\r\nbaa = ((1-0.5)*lines[0].starty) + (0.5*lines[0].endy)\r\n\r\npoint_on_self = [aab,baa]\r\na_calc_log_line, a_perp_lines_line, a_intersect_points_line, a_points_x_line, a_points_y_line, a_drift_string_line = lines[0].drift_at_point(point_on_self, lines, snow_density_pcf, pg_psf, 1)\r\n\r\ncalc_log, perp_lines, intersect_points, points_x, points_y = drift_all(lines,snow_density_pcf,pg_psf,25,1)\r\n\r\ns_x, s_y, mx, my = lines_transformation_to_origin(lines)\r\nprint s_x\r\nprint s_y\r\nprint mx\r\nprint my\r\n\r\ntest1, test2, segment, segment_point = line_closest_to_point_and_point_on_line([35.5,9.4],lines)\r\nprint test1\r\nprint test2\r\nprint segment\r\nprint segment_point\r\n\r\ncolors = ['r','b','g','c','m','y','k','r','b','g','c','m','y','k','r','b','g','c','m','y','k','r','b','g','c','m','y','k','r','b','g','c','m','y','k','r','b','g','c','m','y','k','r','b','g','c','m','y','k']\r\ni=0\r\n\r\nif create_plot == 1:\r\n for line in lines:\r\n plt.plot([line.startx,line.endx], [line.starty,line.endy], color=colors[i])\r\n plt.plot(line.drift_line_x, line.drift_line_y, color=colors[i], marker = '+')\r\n plt.plot(line.internal_points_x, line.internal_points_y, color=colors[i], marker = '+')\r\n #plt.plot(line.drift_line_x, line.drift_line_y, color=colors[i])\r\n #plt.plot(line.internal_points_x, line.internal_points_y, color=colors[i])\r\n k=0\r\n prev_label = ''\r\n for point in line.internal_points_x:\r\n label = '{0:.2f} psf\\nw = {1:.2f} ft'.format(line.drift_pd[k], line.drift_w[k])\r\n if k+1 > len(line.drift_w)-1:\r\n next_label = ''\r\n else:\r\n next_label = '{0:.2f} psf\\nw = {1:.2f} ft'.format(line.drift_pd[k+1], line.drift_w[k+1])\r\n if label != prev_label or label != next_label:\r\n angle = 45\r\n plt.annotate(label,xy=(line.internal_points_x[k], line.internal_points_y[k]), xycoords='data', rotation=angle, horizontalalignment='left', verticalalignment='bottom' )\r\n plt.plot([line.internal_points_x[k],line.drift_line_x[k]], [line.internal_points_y[k],line.drift_line_y[k]], color='k')\r\n else:\r\n pass\r\n k+=1\r\n prev_label = label\r\n i+=1\r\n\r\n plt.ylim(ymax=max(y)+5, ymin=min(y)-5)\r\n plt.xlim(xmax=max(x)+5, xmin=min(x)-5)\r\n\r\n plt.show()\r\nelse:\r\n pass\r\n\r\nif logging == 1:\r\n file = open('Drift_by_lines_log.txt','w')\r\n file.write(calc_log)\r\n file.close()\r\nelse:\r\n pass\r\n\r\n#DXF file\r\nif write_dxf == 1:\r\n export_dxf('Drift_by_lines.dxf',lines)\r\nelse:\r\n pass\r\n'''\r\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Dodo33/alchemist-lib
[ "40c2d3b48d5f46315eb09e7f572d578b7e5324b4" ]
[ "alchemist_lib/datafeed/__init__.py" ]
[ "import datetime as dt\n\nimport pandas as pd\n\nfrom .poloniexdatafeed import PoloniexDataFeed\nfrom .bittrexdatafeed import BittrexDataFeed\n\nfrom ..database.ohlcv import Ohlcv\n\nfrom .. import utils\n\n\n\ndef get_data_sources_dict(session):\n\n \"\"\"\n Remember to change this method every time you add a module.\n\n Args:\n session (sqlalchemy.orm.session.Session): Connection to the database.\n\n Return:\n dsd (dict): Return a dictionary. The key must be the name of the data source in the database and the value must be an instance of the module charged to collect data.\n \"\"\"\n \n dsd = {\"poloniex\" : PoloniexDataFeed(session = session),\n \"bittrex\" : BittrexDataFeed(session = session)\n }\n \n return dsd\n\n\ndef get_last_price(assets):\n\n \"\"\"\n Returns the last trade price for every asset.\n The last price is retrived based on ``alchemist_lib.datafeed.get_data_sources_dict()``.\n\n Args:\n assets (alchemist_lib.database.asset.Asset, list[Asset]): List of assets which we want last trade price of.\n\n Return:\n df (pandas.DataFrame): A dataframe with the following columns:\n * asset (alchemist_lib.database.asset.Asset): Must be the index.\n * last_price (decimal.Decimal): The last price of the associated asset. \n \"\"\"\n\n assets = utils.to_list(assets)\n data_feed_source = None\n ds = get_data_sources_dict(session = None)\n \n df = pd.DataFrame(columns = [\"asset\", \"last_price\"]).set_index(\"asset\")\n for asset in assets:\n data_source_names = utils.get_data_source_names_from_asset(asset = asset)\n\n for ds_name, ds_inst in ds.items():\n if ds_name in data_source_names:\n data_feed_source = ds_inst\n break\n\n try:\n lp = data_feed_source.get_last_price(assets = asset)\n except AttributeError:#If data_feed_source remains None\n lp = pd.DataFrame()\n \n df = pd.concat([df, lp])\n\n return df\n\n\ndef save_ohlcv(session, assets, start_date, timeframe):\n\n \"\"\"\n This method collects and saves OHLCV data ( from start_date to utcnow() ).\n\n Args:\n session (sqlalchemy.orm.session.Session): Database connection.\n assets (alchemist_lib.database.asset.Asset, list[Asset]): List of assets which we want informations of.\n start_date (datetime.datetime): Datetime to start collecting data from.\n timeframe (str): Timeframe identifier.\n \"\"\"\n\n assets = utils.to_list(assets)\n ds = get_data_sources_dict(session = session)\n exch_assets = {}\n \n for asset in assets:\n data_source_names = utils.get_data_source_names_from_asset(asset = asset)\n\n for ds_name, ds_inst in ds.items():\n if ds_name in data_source_names:\n exch_assets.setdefault(ds_name, []).append(asset)\n\n\n for ds_name, ds_inst in ds.items():\n try:\n ds_inst.save_ohlcv(assets = exch_assets[ds_name], start_date = start_date, timeframe = timeframe)\n except Exception:\n pass\n\n \ndef save_last_ohlcv(session, assets, timeframe):\n\n \"\"\"\n This method collects and saves the last OHLCV candle.\n\n Args:\n assets (alchemist_lib.database.asset.Asset, list[Asset]): List of assets which we want informations of.\n timeframe (str): Timeframe identifier.\n \"\"\"\n\n assets = utils.to_list(assets)\n ds = get_data_sources_dict(session = session)\n exch_assets = {}\n \n for asset in assets:\n data_source_names = utils.get_data_source_names_from_asset(asset = asset)\n\n for ds_name, ds_inst in ds.items():\n if ds_name in data_source_names:\n exch_assets.setdefault(ds_name, []).append(asset)\n\n for ds_name, ds_inst in ds.items():\n try:\n ds_inst.save_last_ohlcv(assets = exch_assets[ds_name], timeframe = timeframe)\n except Exception:\n pass\n\n\ndef check_ohlcv_data(session, assets, timeframe, window_length):\n\n \"\"\"\n Check if all OHLCV candles needed are already saved in the db.\n It's useful in order to not requests OHLCV data more times (in different functions).\n\n Args:\n session (sqlalchemy.orm.session.Session): Database connection.\n assets (alchemist_lib.database.asset.Asset, list[Asset]): List of assets which we want informations of.\n timeframe (str): Timeframe identifier.\n window_length (int): The number of steps to do in the past.\n\n Return:\n assets_toret (list[Asset]): List of not-updated assets.\n \"\"\"\n \n assets_toret = []\n tf, tf_unit = utils.get_timeframe_data(timeframe = timeframe)\n start_date = utils.get_last_date_checkpoint(timeframe = timeframe)\n\n for asset in assets:\n for i in range(window_length):\n step = start_date - dt.timedelta(seconds = utils.timeframe_to_seconds(timeframe = timeframe) * i)\n \n res = session.query(Ohlcv).filter(Ohlcv.ohlcv_datetime == step,\n Ohlcv.asset == asset,\n Ohlcv.timeframe_id == timeframe).all()\n \n if len(res) <= 0:\n assets_toret.append(asset)\n break\n\n return assets_toret\n \n\n\n\n\n\n\n\n\n\n\n\n\n \n\n\n\n \n" ]
[ [ "pandas.concat", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
FredeJ/CSIKit
[ "5ce1b114be3d8b24a05576e5345ef43c9e5cd136" ]
[ "CSIKit/legacy/realtime_graph.py" ]
[ "import matplotlib.pyplot as plt\nimport numpy as np\nfrom filters import hampel, running_mean, running_stdev, bandpass\nfrom matlab import db, variance\nfrom read_bfee import BeamformReader\nfrom scipy import fftpack, signal\n\n\nclass RealtimeGraph:\n\n def __init__(self, graphType=\"default\"):\n plt.ion()\n self.fig, self.ax = plt.subplots()\n self.all_data = []\n self.graphType = graphType\n\n if graphType == \"default\":\n self.plotHampel, = plt.plot([], [], label=\"Hampel\")\n #self.plotStd, = plt.plot([], [], label=\"Standard Deviation\")\n self.plotAll, = plt.plot([], [], \"r\",label=\"Hampel + Running Mean\")\n plt.legend(loc=\"upper right\")\n\n plt.xlabel(\"Time (s)\")\n plt.ylabel(\"Amplitude (dBm)\")\n elif graphType == \"livebutt\":\n self.plotButt, = plt.plot([], [])\n \n plt.xlabel(\"Time (s)\")\n plt.ylabel(\"Amplitude (dBm)\") \n elif graphType == \"butter\":\n self.plotButt, = plt.plot([], [])\n self.ax.set_xlim([30, 200])\n\n plt.xlabel(\"Beats Per Minute (BPM)\")\n plt.ylabel(\"Amplitude (dBm/Hz)\")\n elif graphType == \"breath\":\n self.plotBreath, = plt.plot([], [])\n self.ax.set_xlim([0, 40])\n\n plt.xlabel(\"Beats Per Minute (BPM)\")\n plt.ylabel(\"Amplitude (dBm/Hz)\")\n elif graphType == \"heat\":\n plt.xlabel(\"Time (s)\")\n plt.ylabel(\"Subcarrier Index\")\n elif graphType == \"variance\":\n self.plot, = plt.plot([], [], label=\"Variance\")\n\n plt.xlabel(\"Subcarrier Index\")\n plt.ylabel(\"Variance\")\n\n def update(self, data):\n if self.graphType == \"default\":\n self.updateContents(data)\n elif self.graphType == \"butter\":\n self.updateButterworth(data)\n elif self.graphType == \"breath\":\n self.updateBreath(data)\n elif self.graphType == \"livebutt\":\n self.updateButterLive(data)\n elif self.graphType == \"heat\":\n self.updateHeat2(data)\n elif self.graphType == \"variance\":\n self.updateVariance(data)\n elif self.graphType == \"justbeats\":\n self.beatsfilter(data)\n\n def updateTimestamps(self):\n csi_trace = self.all_data\n time = [x[\"timestamp_low\"] for x in csi_trace]\n\n timediff = (np.diff(time))*10e-7\n time_stamp = np.cumsum(timediff)\n\n csi_trace[0][\"timestamp\"] = 0\n for x in csi_trace[1:]:\n x[\"timestamp\"] = time_stamp[csi_trace.index(x)-1]\n \n return True\n\n def getCSI(self, scaled_csi, metric=\"phasediff\"):\n no_frames = len(scaled_csi)\n no_subcarriers = scaled_csi[0][\"csi\"].shape[0]\n\n finalEntries = np.zeros((no_subcarriers, no_frames))\n\n for x in range(no_frames):\n scaled_entry = scaled_csi[x][\"csi\"]\n for y in range(no_subcarriers):\n if metric == \"phasediff\":\n if scaled_entry.shape[1] >= 2:\n #Not 100% sure this generates correct Phase Difference.\n finalEntries[y][x] = np.angle(scaled_entry[y][1][0])-np.angle(scaled_entry[y][0][0])\n else:\n #In cases where only one antenna is available,\n #reuse the previous value.\n finalEntries[y][x] = finalEntries[y][x-1]\n elif metric == \"amplitude\":\n finalEntries[y][x] = db(abs(scaled_entry[y][1][0]))\n\n return finalEntries\n\n def updateButterLive(self, data):\n self.all_data.append(data)\n self.updateTimestamps()\n\n if not self.updateTimestamps():\n self.all_data = self.all_data[:-1]\n return None\n scaled_csi = self.all_data\n\n no_frames = len(scaled_csi)\n no_subcarriers = scaled_csi[0][\"csi\"].shape[0]\n\n if no_frames < 50:\n return None\n\n #Replace complex CSI with amplitude.\n finalEntry = [db(abs(scaled_csi[x][\"csi\"][28][0][0])) for x in range(no_frames)]\n\n hampelData = hampel(finalEntry, 10)\n smoothedData = running_mean(hampelData, 30)\n y = smoothedData\n\n x = list([x[\"timestamp\"] for x in scaled_csi])\n tdelta = (x[-1] - x[0]) / len(x)\n\n Fs = 1/tdelta\n y = bandpass(5, 1.0, 1.3, Fs, y)\n\n self.plotButt.set_xdata(x)\n self.plotButt.set_ydata(np.abs(y))\n\n self.ax.relim()\n self.ax.autoscale_view()\n self.fig.canvas.draw()\n self.fig.canvas.flush_events()\n\n def updateButterworth(self, data):\n self.all_data.append(data)\n self.updateTimestamps()\n\n if not self.updateTimestamps():\n self.all_data = self.all_data[:-1]\n return None\n scaled_csi = self.all_data\n\n no_frames = len(scaled_csi)\n no_subcarriers = scaled_csi[0][\"csi\"].shape[0]\n\n if no_frames < 50:\n return None\n\n #Replace complex CSI with amplitude.\n finalEntry = [db(abs(scaled_csi[x][\"csi\"][15][0][0])) for x in range(no_frames)]\n\n hampelData = hampel(finalEntry, 10)\n smoothedData = running_mean(hampelData, 30)\n y = smoothedData\n\n x = list([x[\"timestamp\"] for x in scaled_csi])\n tdelta = (x[-1] - x[0]) / len(x)\n\n Fs = 1/tdelta\n n = no_frames\n\n y = bandpass(5, 1.0, 1.3, Fs, y)\n\n ffty = np.fft.rfft(y, len(y))\n freq = np.fft.rfftfreq(len(y), tdelta)\n freqX = [((i*Fs)/n)*60 for i in range(len(freq))]\n\n self.plotButt.set_xdata(freqX)\n self.plotButt.set_ydata(np.abs(ffty))\n\n self.ax.relim()\n self.ax.autoscale_view()\n self.fig.canvas.draw()\n self.fig.canvas.flush_events()\n\n def updateBreath(self, data):\n self.all_data.append(data)\n\n if not self.updateTimestamps():\n self.all_data = self.all_data[:-1]\n return None\n\n scaled_csi = self.all_data\n\n no_frames = len(scaled_csi)\n no_subcarriers = scaled_csi[0][\"csi\"].shape[0]\n\n if no_frames < 50:\n return None\n\n #Replace complex CSI with amplitude.\n finalEntry = [db(abs(scaled_csi[x][\"csi\"][15][1][0])) for x in range(no_frames)]\n\n hampelData = hampel(finalEntry, 10)\n smoothedData = running_mean(hampelData, 30)\n y = smoothedData\n\n y -= np.mean(y)\n x = list([x[\"timestamp\"] for x in scaled_csi])\n tdelta = (x[-1] - x[0]) / len(x)\n\n Fs = 1/tdelta\n n = no_frames\n\n ffty = np.fft.rfft(y, len(y))\n freq = np.fft.rfftfreq(len(y), tdelta)\n freqX = [((i*Fs)/n)*60 for i in range(len(freq))]\n\n self.plotBreath.set_xdata(freqX)\n self.plotBreath.set_ydata(np.abs(ffty))\n\n self.ax.relim()\n self.ax.autoscale_view()\n self.fig.canvas.draw()\n self.fig.canvas.flush_events()\n\n def updateContents(self, data):\n self.all_data.append(data)\n if not self.updateTimestamps():\n self.all_data = self.all_data[:-1]\n return None\n\n scaled_csi = self.all_data\n\n no_frames = len(scaled_csi)\n no_subcarriers = 30\n\n finalEntry = [db(abs(scaled_csi[x][\"csi\"][14][1][0])) for x in range(no_frames)] \n\n x = list([x[\"timestamp\"] for x in scaled_csi])\n \n #self.plotStand.set_xdata(x)\n #self.plotStand.set_ydata(finalEntry)\n\n hampelData = hampel(finalEntry, 20)\n #smoothedData = running_mean(hampelData.copy(), 15)\n\n self.plotHampel.set_xdata(x)\n self.plotHampel.set_ydata(hampelData)\n\n #self.plotAll.set_xdata(x)\n #self.plotAll.set_ydata(smoothedData)\n\n self.ax.relim()\n self.ax.autoscale_view()\n self.fig.canvas.draw()\n self.fig.canvas.flush_events()\n\n def updateVariance(self, data):\n\n self.all_data.append(data)\n if not self.updateTimestamps():\n self.all_data = self.all_data[:-1]\n return None\n scaled_csi = self.all_data\n\n no_frames = len(scaled_csi)\n no_subcarriers = scaled_csi[0][\"csi\"].shape[0]\n\n y = []\n\n finalEntry = np.zeros((no_subcarriers, no_frames))\n hampelData = np.zeros((no_subcarriers, no_frames))\n smoothedData = np.zeros((no_subcarriers, no_frames))\n\n for x in range(no_subcarriers):\n finalEntry[x] = [db(abs(scaled_csi[y][\"csi\"][x][0][0])) for y in range(no_frames)]\n #hampelData = hampel(finalEntry[x].flatten(), 10)\n #smoothedData = running_mean(hampelData, 25)\n y.append(variance(finalEntry[x]))\n\n x = list(range(1, 31))\n\n self.plot.set_xdata(x)\n self.plot.set_ydata(y)\n\n self.ax.relim()\n self.ax.autoscale_view()\n self.fig.canvas.draw()\n self.fig.canvas.flush_events()\n\n # def updateHeat2(self, data):\n # self.all_data.append(data)\n # if not self.updateTimestamps():\n # self.all_data = self.all_data[:-1]\n # return None\n # scaled_csi = self.all_data\n\n # no_frames = len(scaled_csi)\n # no_subcarriers = scaled_csi[0][\"csi\"].shape[0]\n # ylimit = scaled_csi[no_frames-1][\"timestamp\"]\n\n # if no_frames < 100:\n # return None\n\n # limits = [1, no_subcarriers, 0, ylimit]\n\n # finalEntry = np.zeros((no_frames, no_subcarriers))\n\n # #Replace complex CSI with amplitude.\n # for y in range(no_subcarriers):\n # for x in range(no_frames):\n # scaled_entry = scaled_csi[x][\"csi\"]\n # finalEntry[y][x] = db(abs(scaled_entry[y][0][0]))\n\n\n # for j in range(no_subcarriers):\n \n # sig = finalEntry[j] \n # #hampelData = hampel(sig, 10)\n # #smoothedData = running_mean(sig, 30)\n \n # y = sig.flatten()\n # x = list([x[\"timestamp\"] for x in scaled_csi])\n # tdelta = (x[-1] - x[0]) / len(x)\n\n # Fs = 1/tdelta\n # n = no_frames\n # y = bandpass(5, 1.0, 1.3, Fs, y)\n\n # for x in range(70):\n # y[x] = 0\n\n # finalEntry[j] = y\n\n # #x = subcarrier index\n # #y = time (s)\n # #z = amplitude (cBm)\n\n # im = self.ax.imshow(finalEntry, cmap=plt.cm.gist_rainbow_r, extent=limits, aspect=\"auto\")\n\n # cbar = self.ax.figure.colorbar(im, ax=self.ax)\n # cbar.ax.set_ylabel(\"Amplitude (dBm)\", rotation=-90, va=\"bottom\")\n\n # self.ax.relim()\n # self.ax.autoscale_view()\n # self.fig.canvas.draw()\n # self.fig.canvas.flush_events()\n\n def beatsfilter(self, data):\n self.all_data.append(data)\n scaled_csi = self.all_data\n no_frames = len(scaled_csi)\n\n if no_frames < 256:\n return None\n\n Fs = 10 \n\n no_subcarriers = scaled_csi[0][\"csi\"].shape[0]\n finalEntries = self.getCSI(scaled_csi)\n\n sigs = []\n\n for x in range(no_subcarriers):\n finalEntry = finalEntries[x].flatten()\n filtData = bandpass(7, 1, 1.5, Fs, finalEntry)\n \n for i in range(0, 70):\n filtData[i] = 0\n\n sigs.append(filtData)\n\n pxxs = []\n\n for data in sigs:\n f, Pxx_den = signal.welch(data, Fs)\n pxxs.append(Pxx_den)\n\n meanPsd = np.mean(pxxs, axis=0)\n print(\"Beats: %.2f\" % float(f[np.argmax(meanPsd)]*60))\n self.all_data = []\n\n def updateHeat2(self, data):\n self.all_data.append(data)\n if not self.updateTimestamps():\n self.all_data = self.all_data[:-1]\n return None\n scaled_csi = self.all_data\n\n no_frames = len(scaled_csi)\n no_subcarriers = scaled_csi[0][\"csi\"].shape[0]\n ylimit = scaled_csi[no_frames-1][\"timestamp\"]\n\n if no_frames < 80:\n return None\n\n # limits = [1, no_subcarriers, 0, ylimit]\n limits = [0, ylimit, 1, no_subcarriers]\n\n finalEntry = np.zeros((no_subcarriers, no_frames))\n\n #Replace complex CSI with amplitude.\n for y in range(no_subcarriers):\n for x in range(no_frames):\n scaled_entry = scaled_csi[x][\"csi\"]\n finalEntry[y][x] = db(abs(scaled_entry[y][0][0]))\n\n for j in range(no_subcarriers):\n sig = finalEntry[j] \n #hampelData = hampel(sig, 10)\n #smoothedData = running_mean(sig, 30)\n \n y = sig.flatten()\n y = bandpass(5, 1.0, 1.3, 20, y)\n\n for x in range(70):\n y[x] = 0\n\n finalEntry[j] = y\n\n #x = subcarrier index\n #y = time (s)\n #z = amplitude (cBm)\n\n if not hasattr(self, \"im\"):\n self.im = self.ax.imshow(finalEntry, cmap=\"jet\", extent=limits, aspect=\"auto\")\n cbar = self.ax.figure.colorbar(self.im, ax=self.ax)\n cbar.ax.set_ylabel(\"Amplitude (dBm)\", rotation=-91, va=\"bottom\") \n else:\n self.im.set_array(finalEntry)\n self.im.set_extent(limits)\n\n self.ax.relim()\n self.ax.autoscale_view()\n self.fig.canvas.draw()\n self.fig.canvas.flush_events()\n\n def updateHeat(self, data):\n self.all_data.append(data)\n if not self.updateTimestamps():\n self.all_data = self.all_data[:-1]\n return None\n scaled_csi = self.all_data\n\n no_frames = len(scaled_csi)\n no_subcarriers = scaled_csi[0][\"csi\"].shape[0]\n ylimit = scaled_csi[no_frames-1][\"timestamp\"]\n\n if no_frames < 10:\n return None\n\n limits = [1, no_subcarriers, 0, ylimit]\n\n finalEntry = np.zeros((no_frames, no_subcarriers))\n\n #Replace complex CSI with amplitude.\n for y in range(no_subcarriers):\n for x in range(no_frames):\n scaled_entry = scaled_csi[x][\"csi\"]\n finalEntry[x][y] = db(abs(scaled_entry[y][0][0]))\n\n #x = subcarrier index\n #y = time (s)\n #z = amplitude (cBm)\n\n finalEntry = finalEntry[::-1] \n\n if not hasattr(self, \"im\"):\n self.im = self.ax.imshow(finalEntry, cmap=plt.cm.gist_rainbow_r, extent=limits, aspect=\"auto\")\n cbar = self.ax.figure.colorbar(self.im, ax=self.ax)\n cbar.ax.set_ylabel(\"Amplitude (dBm)\", rotation=-91, va=\"bottom\") \n else:\n self.im.set_array(finalEntry)\n self.im.set_extent(limits)\n\n self.ax.relim()\n self.ax.autoscale_view()\n self.fig.canvas.draw()\n self.fig.canvas.flush_events()\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.abs", "numpy.cumsum", "matplotlib.pyplot.subplots", "matplotlib.pyplot.plot", "numpy.argmax", "numpy.mean", "numpy.diff", "matplotlib.pyplot.xlabel", "numpy.angle", "matplotlib.pyplot.ion", "numpy.zeros", "scipy.signal.welch", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
dianuchitop/DianaMartinez_Ejercicio30
[ "68dcfcab1b24ba371e3e1bbd8f3918bf5583dc7e" ]
[ "graficash.py" ]
[ "import numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib.animation import FuncAnimation\nfrom matplotlib.animation import PillowWriter\n\nm=100\nbeta=0.05\nc=1\ndx=1./m\ndt=beta*dx/c\nT_final=0.1\nn=int(T_final/dt)+1\n\ndata = np.loadtxt(\"Data.dat\")\ngrid = np.reshape(data, (n, m))\n\nfig = plt.figure()\nax = plt.axes(xlim=(0, 1), ylim=(-0.05, 0.05))\nplt.xlabel('Posicion[metros]')\nplt.ylabel('U')\nline, = ax.plot([], [], lw=3)\n\ndef init():\n line.set_data([], [])\n return line,\n\ndef animate(i):\n x = np.linspace(0, 1, m)\n y = 0.05*grid[i]\n t = (2/0.1)*i*dt\n fig.suptitle('Tiempo: %1.2f segundos' %t)\n line.set_data(x, y)\n return line,\n\nanim = FuncAnimation(fig, animate, init_func=init,\n frames=200, interval=20, blit=True,repeat=False)\n\nwriter = PillowWriter(fps=60)\nanim.save('Burger.gif', writer=writer)\n" ]
[ [ "matplotlib.animation.PillowWriter", "numpy.linspace", "numpy.reshape", "matplotlib.pyplot.axes", "matplotlib.pyplot.ylabel", "matplotlib.animation.FuncAnimation", "matplotlib.pyplot.xlabel", "numpy.loadtxt", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jejjohnson/kernellib
[ "eb9f80c1b605c8a6b5e8a324efd4ef07d8f59050" ]
[ "kernellib/kernels/derivative_functions/cythonize_rbf_derivative.py" ]
[ "from distutils.core import setup\nfrom Cython.Build import cythonize\nimport numpy\n\nsetup(\n name = 'RBF Derivative Cython',\n ext_modules = cythonize(\"rbf_derivative_cy.pyx\"),\n include_dirs = [numpy.get_include()]\n \n)" ]
[ [ "numpy.get_include" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Python3pkg/500lines
[ "e9c05e45d6eedf36ceef67ec5f817a39a07980fb" ]
[ "sampler/code/rpg.py" ]
[ "import numpy as np\nfrom multinomial import MultinomialDistribution\n\n\nclass MagicItemDistribution(object):\n\n # these are the names (and order) of the stats that all magical\n # items will have\n stats_names = (\"dexterity\", \"constitution\", \"strength\",\n \"intelligence\", \"wisdom\", \"charisma\")\n\n def __init__(self, bonus_probs, stats_probs, rso=np.random):\n \"\"\"Initialize a magic item distribution parameterized by `bonus_probs`\n and `stats_probs`.\n\n Parameters\n ----------\n bonus_probs: numpy array of length m\n The probabilities of the overall bonuses. Each index in\n the array corresponds to the bonus of that amount (e.g.\n index 0 is +0, index 1 is +1, etc.)\n\n stats_probs: numpy array of length 6\n The probabilities of how the overall bonus is distributed\n among the different stats. `stats_probs[i]` corresponds to\n the probability of giving a bonus point to the ith stat,\n i.e. the value at `MagicItemDistribution.stats_names[i]`.\n\n rso: numpy RandomState object (default: np.random)\n The random number generator\n\n \"\"\"\n # Create the multinomial distributions we'll be using\n self.bonus_dist = MultinomialDistribution(bonus_probs, rso=rso)\n self.stats_dist = MultinomialDistribution(stats_probs, rso=rso)\n\n def sample(self):\n \"\"\"Sample a random magical item.\n\n Returns\n -------\n dictionary\n The keys are the names of the stats, and the values are\n the bonus conferred to the corresponding stat.\n\n \"\"\"\n stats = self._sample_stats()\n item_stats = dict(list(zip(self.stats_names, stats)))\n return item_stats\n\n def log_pmf(self, item):\n \"\"\"Compute the log probability the given magical item.\n\n Parameters\n ----------\n item: dictionary\n The keys are the names of the stats, and the values are\n the bonus conferred to the corresponding stat.\n\n Returns\n -------\n float\n The value corresponding to log(p(item))\n\n \"\"\"\n # First pull out the bonus points for each stat, in the\n # correct order, then pass that to _stats_log_pmf.\n stats = np.array([item[stat] for stat in self.stats_names])\n log_pmf = self._stats_log_pmf(stats)\n return log_pmf\n\n def pmf(self, item):\n \"\"\"Compute the probability the given magical item.\n\n Parameters\n ----------\n item: dictionary\n The keys are the names of the stats, and the values are\n the bonus conferred to the corresponding stat.\n\n Returns\n -------\n float\n The value corresponding to p(item)\n\n \"\"\"\n return np.exp(self.log_pmf(item))\n\n def _sample_bonus(self):\n \"\"\"Sample a value of the overall bonus.\n\n Returns\n -------\n integer\n The overall bonus\n\n \"\"\"\n # The bonus is essentially just a sample from a multinomial\n # distribution with n=1; i.e., only one event occurs.\n sample = self.bonus_dist.sample(1)\n\n # `sample` is an array of zeros and a single one at the\n # location corresponding to the bonus. We want to convert this\n # one into the actual value of the bonus.\n bonus = np.argmax(sample)\n return bonus\n\n def _sample_stats(self):\n \"\"\"Sample the overall bonus and how it is distributed across the\n different stats.\n\n Returns\n -------\n numpy array of length 6\n The number of bonus points for each stat\n\n \"\"\"\n # First we need to sample the overall bonus\n bonus = self._sample_bonus()\n\n # Then, we use a different multinomial distribution to sample\n # how that bonus is distributed. The bonus corresponds to the\n # number of events.\n stats = self.stats_dist.sample(bonus)\n return stats\n\n def _bonus_log_pmf(self, bonus):\n \"\"\"Evaluate the log-PMF for the given bonus.\n\n Parameters\n ----------\n bonus: integer\n The total bonus.\n\n Returns\n -------\n float\n The value corresponding to log(p(bonus))\n\n \"\"\"\n # Make sure the value that is passed in is within the\n # appropriate bounds\n if bonus < 0 or bonus >= len(self.bonus_dist.p):\n return -np.inf\n\n # Convert the scalar bonus value into a vector of event\n # occurrences\n x = np.zeros(len(self.bonus_dist.p))\n x[bonus] = 1\n\n return self.bonus_dist.log_pmf(x)\n\n def _stats_log_pmf(self, stats):\n \"\"\"Evaluate the log-PMF for the given distribution of bonus points\n across the different stats.\n\n Parameters\n ----------\n stats: numpy array of length 6\n The distribution of bonus points across the stats\n\n Returns\n -------\n float\n The value corresponding to log(p(stats))\n\n \"\"\"\n # There are never any leftover bonus points, so the sum of the\n # stats gives us the total bonus.\n total_bonus = np.sum(stats)\n\n # First calculate the probability of the total bonus\n logp_bonus = self._bonus_log_pmf(total_bonus)\n\n # Then calculate the probability of the stats\n logp_stats = self.stats_dist.log_pmf(stats)\n\n # Then multiply them together (using addition, because we are\n # working with logs)\n log_pmf = logp_bonus + logp_stats\n return log_pmf\n\n\nclass DamageDistribution(object):\n\n def __init__(self, num_items, item_dist,\n num_dice_sides=12, num_hits=1, rso=np.random):\n \"\"\"Initialize a distribution over attack damage. This object can\n sample possible values for the attack damage dealt over\n `num_hits` hits when the player has `num_items` items, and\n where attack damage is computed by rolling dice with\n `num_dice_sides` sides.\n\n Parameters\n ----------\n num_items: int\n The number of items the player has.\n item_dist: MagicItemDistribution object\n The distribution over magic items.\n num_dice_sides: int (default: 12)\n The number of sides on each die.\n num_hits: int (default: 1)\n The number of hits across which we want to calculate damage.\n rso: numpy RandomState object (default: np.random)\n The random number generator\n\n \"\"\"\n # This is an array of integers corresponding to the sides of a\n # single die.\n self.dice_sides = np.arange(1, num_dice_sides + 1)\n # Create a multinomial distribution corresponding to one of\n # these dice. Each side has equal probabilities.\n self.dice_dist = MultinomialDistribution(\n np.ones(num_dice_sides) / float(num_dice_sides), rso=rso)\n\n self.num_hits = num_hits\n self.num_items = num_items\n self.item_dist = item_dist\n\n def sample(self):\n \"\"\"Sample the attack damage.\n\n Returns\n -------\n int\n The sampled damage\n\n \"\"\"\n # First, we need to randomly generate items (the number of\n # which was passed into the constructor).\n items = [self.item_dist.sample() for i in range(self.num_items)]\n\n # Based on the item stats (in particular, strength), compute\n # the number of dice we get to roll.\n num_dice = 1 + np.sum([item['strength'] for item in items])\n\n # Roll the dice and compute the resulting damage.\n dice_rolls = self.dice_dist.sample(self.num_hits * num_dice)\n damage = np.sum(self.dice_sides * dice_rolls)\n return damage\n" ]
[ [ "numpy.arange", "numpy.ones", "numpy.argmax", "numpy.array", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ddervs/GreenGraph
[ "bb65e5d9f2a34686add644e4fa1851aabf82c3c1" ]
[ "greengraph/classes/Map.py" ]
[ "import numpy as np\nimport requests\nfrom StringIO import StringIO\nfrom matplotlib import image as img\n\nclass Map(object):\n\n def __init__(self, latitude, longitude, satellite=True,\n zoom=10, size=(400, 400), sensor=False):\n\n base = \"http://maps.googleapis.com/maps/api/staticmap?\"\n\n params = dict(\n sensor=str(sensor).lower(),\n zoom=zoom,\n size=\"x\".join(map(str, size)),\n center=\",\".join(map(str, (latitude, longitude))),\n style=\"feature:all|element:labels|visibility:off\"\n )\n\n if satellite:\n params[\"maptype\"] = \"satellite\"\n self.image = requests.get(base, params=params).content\n # Fetch our PNG image data\n self.pixels = img.imread(StringIO(self.image))\n\n # Parse our PNG image as a numpy array\n def green(self, threshold):\n\n # Use NumPy to build an element-by-element logical array\n greener_than_red = self.pixels[:, :, 1] > threshold * self.pixels[:, :, 0]\n greener_than_blue = self.pixels[:, :, 1] > threshold * self.pixels[:, :, 2]\n green = np.logical_and(greener_than_red, greener_than_blue)\n return green\n\n def count_green(self, threshold=1.1):\n\n return np.sum(self.green(threshold))\n\n def show_green(self, threshold=1.1):\n\n green = self.green(threshold)\n out = green[:, :, np.newaxis] * np.array([0, 1, 0])[np.newaxis, np.newaxis, :]\n my_buffer = StringIO()\n img.imsave(my_buffer, out, format='png')\n return my_buffer.getvalue()\n" ]
[ [ "numpy.array", "numpy.logical_and", "matplotlib.image.imsave" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
luizhss/Gesture_Hand_Controller
[ "705c401f2b922b5b6036cb3eebd8155594878208" ]
[ "delay.py" ]
[ "import numpy as np\nfrom collections import Counter\n\n\nclass Delay():\n \"\"\"\n Delay Frames Class.\n\n This class is responsible to provoke delays on the execution frames.\n Required Arguments:\n classes {list[string], required}: List of class names from the trained classifier.\n\n Keyword Arguments:\n moving_average {float, optional}: minimum percentage of pose prediction\n in the last (frames_in_action or frames_out) frames to\n determine the prediction.\n (Default: {0.8})\n frames_in_action {int, optional}: number of frames to be considered to\n determine a pose when already in an action state.\n (Default: {20})\n frames_out {int, optional}: number of frames to be considered to\n determine a pose when in idle state.\n (Default: {45})\n\n \"\"\"\n\n def __init__(self, classes, moving_average=.8, frames_in_action=20, frames_out=45):\n self.in_action = False\n self.counter_class = np.empty((0))\n self.classes = list(classes)\n self.counter_confidences = np.empty(\n (0, len(classes)), dtype=np.float64)\n self.moving_average = moving_average\n self.frames_in_action = frames_in_action\n self.frames_out = frames_out\n self.ignore_frames = 0\n\n def reset_counter(self, ignore_next_frames=0):\n \"\"\"\n Clear counters arrays and can ignore the next frames\n \"\"\"\n\n self.in_action = False\n self.counter_class = np.empty((0))\n self.counter_confidences = np.empty(\n (0, len(self.classes)), dtype=np.float64)\n\n if ignore_next_frames > 0:\n self.ignore_frames = ignore_next_frames\n\n def get_prediction(self):\n \"\"\"\n Based the last frames, check if the most common prediction respect the\n moving average rule\n \"\"\"\n\n most_common_class, most_common_rep = Counter(\n self.counter_class).most_common(1)[0]\n\n if (self.in_action and most_common_rep >= self.moving_average * self.frames_in_action) or\\\n (not self.in_action and most_common_rep >= self.moving_average * self.frames_out):\n\n if most_common_class == 'Unknown':\n return ('Unknown', 1.0)\n\n idx_cls = self.classes.index(most_common_class)\n avg_confidence = self.counter_confidences.mean(axis=0)[idx_cls]\n\n return (most_common_class, avg_confidence)\n\n return ('Unknown', 1.0)\n\n def set_in_action(self, value):\n \"\"\"\n Change the in_action state\n \"\"\"\n\n if self.in_action == value:\n return\n self.in_action = value\n if value:\n self.counter_class = self.counter_class[-self.frames_in_action:]\n self.counter_confidences = self.counter_confidences[-self.frames_in_action:, :]\n\n def update(self, cls, conf=None):\n \"\"\"\n Based on the last frames, compute the most possible prediction and\n its confidence\n \"\"\"\n if conf is None:\n conf = np.zeros((1, len(self.classes)), dtype=np.float64)\n\n if self.ignore_frames > 0:\n self.ignore_frames -= 1\n return (None, None)\n\n self.counter_class = np.append(self.counter_class, cls)\n self.counter_confidences = np.vstack((self.counter_confidences, conf))\n\n if (self.in_action and len(self.counter_class) < self.frames_in_action) or\\\n (not self.in_action and len(self.counter_class) < self.frames_out):\n return (None, None)\n\n self.counter_class = np.delete(self.counter_class, 0)\n self.counter_confidences = np.delete(\n self.counter_confidences, 0, axis=0)\n\n return self.get_prediction()\n" ]
[ [ "numpy.append", "numpy.vstack", "numpy.delete", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sugi-san/BlendGAN
[ "7aff2aad8ee06c13d2ef421f02fa9a56d747ba58" ]
[ "style_transfer_folder.py" ]
[ "import argparse\nimport os\n\nimport cv2\nimport numpy as np\nimport torch\n\nfrom model import Generator\nfrom psp_encoder.psp_encoders import PSPEncoder\nfrom utils import ten2cv, cv2ten\nimport glob\nimport random\n\nseed = 0\n\nrandom.seed(seed)\nnp.random.seed(seed)\ntorch.manual_seed(seed)\ntorch.cuda.manual_seed_all(seed)\n\n\nif __name__ == '__main__':\n device = 'cuda'\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--size', type=int, default=1024)\n\n parser.add_argument('--ckpt', type=str, default='', help='path to BlendGAN checkpoint')\n parser.add_argument('--psp_encoder_ckpt', type=str, default='', help='path to psp_encoder checkpoint')\n\n parser.add_argument('--style_img_path', type=str, default=None, help='path to style image')\n parser.add_argument('--input_img_path', type=str, default=None, help='path to input image')\n parser.add_argument('--add_weight_index', type=int, default=6)\n\n parser.add_argument('--channel_multiplier', type=int, default=2)\n parser.add_argument('--outdir', type=str, default=\"\")\n\n args = parser.parse_args()\n\n outdir = args.outdir\n if not os.path.exists(outdir):\n os.makedirs(outdir, exist_ok=True)\n\n args.latent = 512\n args.n_mlp = 8\n\n checkpoint = torch.load(args.ckpt)\n model_dict = checkpoint['g_ema']\n print('ckpt: ', args.ckpt)\n\n g_ema = Generator(\n args.size, args.latent, args.n_mlp, channel_multiplier=args.channel_multiplier\n ).to(device)\n g_ema.load_state_dict(model_dict)\n g_ema.eval()\n\n psp_encoder = PSPEncoder(args.psp_encoder_ckpt, output_size=args.size).to(device)\n psp_encoder.eval()\n\n input_img_paths = sorted(glob.glob(os.path.join(args.input_img_path, '*.*')))\n style_img_paths = sorted(glob.glob(os.path.join(args.style_img_path, '*.*')))[:]\n\n num = 0\n\n for input_img_path in input_img_paths:\n print(num)\n num += 1\n\n name_in = os.path.splitext(os.path.basename(input_img_path))[0]\n img_in = cv2.imread(input_img_path, 1)\n img_in_ten = cv2ten(img_in, device)\n img_in = cv2.resize(img_in, (args.size, args.size))\n\n for style_img_path in style_img_paths:\n name_style = os.path.splitext(os.path.basename(style_img_path))[0]\n img_style = cv2.imread(style_img_path, 1)\n img_style_ten = cv2ten(img_style, device)\n img_style = cv2.resize(img_style, (args.size, args.size))\n\n with torch.no_grad():\n sample_style = g_ema.get_z_embed(img_style_ten)\n sample_in = psp_encoder(img_in_ten)\n img_out_ten, _ = g_ema([sample_in], z_embed=sample_style, add_weight_index=args.add_weight_index,\n input_is_latent=True, return_latents=False, randomize_noise=False)\n img_out = ten2cv(img_out_ten)\n out = np.concatenate([img_in, img_style, img_out], axis=1)\n # out = img_out\n cv2.imwrite(f'{args.outdir}/{name_in}_v_{name_style}.jpg', out)\n\n print('Done!')\n\n" ]
[ [ "numpy.random.seed", "torch.load", "torch.manual_seed", "numpy.concatenate", "torch.no_grad", "torch.cuda.manual_seed_all" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sjdv1982/seamless
[ "1b814341e74a56333c163f10e6f6ceab508b7df9" ]
[ "examples/BC/load_pdb.py" ]
[ "import Bio.PDB\nfrom io import StringIO\nimport numpy as np\npdb_data = StringIO(pdb)\np = Bio.PDB.PDBParser()\nstruc = p.get_structure(\"pdb\", pdb_data)\ncoors = []\nfor residue in struc.get_residues():\n for atom in residue.get_atoms():\n if atom.name == \"CA\":\n coors.append(atom.coord)\ncoors = np.stack(coors)\nresult = coors.astype(float)\n" ]
[ [ "numpy.stack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
YerbaPage/scikit-fem
[ "e5140e0648e4a0f1ea0b60de90851ab49d369453" ]
[ "docs/examples/ex35.py" ]
[ "\"\"\"Computation of the characteristic impedance and velocity factor of RG316\ncoaxial cable.\n\nThis example solves the series inductance (per meter) and parallel capacitance\n(per meter)\nof RG316 coaxial cable. These values are then used to compute the\ncharacteristic impedance and velocity factor of the cable.\n\nFrom transmission line theory it is known, that the characteristic impedance\nof a lossless transmission line is\n\n.. math::\n Z = \\\\sqrt{\\\\frac{L}{C}},\n\nwhile the phase velocity is\n\n.. math::\n v = \\\\frac{1}{\\\\sqrt{L C}},\n\nwhere :math:`L` is the series inductance per unit length of the transmission\nline and :math:`C` is the parallel capacitance per unit length of the\ntransmission line.\n\nFurther, the phase velocity relative to the speed of light is called the\nvelocity factor of the transmission line.\n\nRG316\n-----\n\nA piece of coaxial cable is composed of an inner conductor, which is\nsurrounded by a dielectric insulator.\nThe dielectric insulator in turn is surrounded by an outer conductor. Finally,\nthe outer conductor is surrounded by an outer insulator.\n\nFor an RG316 cable, the dimensions and materials of the components are\n\n- Inner conductor: OD 0.5mm, silver plated copper\n- Inner insulator: OD 1.52mm, PTFE\n- Outer conductor: OD 1.98mm, silver plated copper\n- Outer insulator: OD 2.48mm, PEF\n\nRG316 has a nominal characteristic impedance of 50 ohms and a velocity factor\nof 0.69.\n\nInductance\n----------\n\nInductance of the cable is computed using the magnetostatic equations\n\n.. math::\n \\\\nabla \\\\cdot \\\\boldsymbol{B} = 0\n\n \\\\nabla \\\\times \\\\boldsymbol{H} = \\\\boldsymbol{J}\n\nand the constitutive relation\n\n.. math::\n \\\\boldsymbol{B} = \\\\mu \\\\boldsymbol{H},\n\nwhere :math:`\\\\mu` is the permability of the medium.\n\nSince :math:`\\\\boldsymbol{B}` is divergence free, it can be written in terms\nof a vector potential :math:`\\\\boldsymbol{A}` as\n\n.. math::\n \\\\boldsymbol{B} = \\\\nabla \\\\times \\\\boldsymbol{A}.\n\nThus we have the strong form for the vector potential :math:`\\\\boldsymbol{A}`\nas\n\n.. math::\n \\\\nabla \\\\times (\\\\mu^{-1} \\\\nabla \\\\times \\\\boldsymbol{A}) =\n \\\\boldsymbol{J}.\n\nThe corresponding weak form is: find :math:`\\\\boldsymbol{A} \\\\in V` such that\n\n.. math::\n \\\\int_{\\\\Omega}\n (\\\\mu^{-1} \\\\nabla \\\\times \\\\boldsymbol{A}) \\\\cdot\n (\\\\nabla \\\\times \\\\boldsymbol{v})\\\\ \\\\mathrm{d}x -\n \\\\int_{\\\\partial \\\\Omega}\n (\\\\mu^{-1} \\\\nabla \\\\times \\\\boldsymbol{A}) \\\\times\n \\\\boldsymbol{n} \\\\cdot \\\\boldsymbol{v}\\\\ \\\\mathrm{d}x =\n \\\\int_{\\\\Omega} \\\\boldsymbol{J} \\\\cdot \\\\boldsymbol{v}\\\\ \\\\mathrm{d}x\n \\\\quad \\\\forall \\\\boldsymbol{v} \\\\in V.\n\nWe take the boundary condition\n:math:`\\\\boldsymbol{B} \\\\cdot \\\\boldsymbol{n} = 0` on\n:math:`\\\\partial \\\\Omega`, which is equivalent to\n:math:`\\\\boldsymbol{A} = 0` on\n:math:`\\\\partial \\\\Omega`. This is an essential boundary condition, which is\nenforced by the choice of :math:`V`.\nThus we have the final weak form: find :math:`\\\\boldsymbol{A} \\\\in V` such that\n\n.. math::\n \\\\int_{\\\\Omega}\n (\\\\mu^{-1} \\\\nabla \\\\times \\\\boldsymbol{A}) \\\\cdot\n (\\\\nabla \\\\times \\\\boldsymbol{v})\\\\ \\\\mathrm{d}x =\n \\\\int_{\\\\Omega} \\\\boldsymbol{J} \\\\cdot \\\\boldsymbol{v}\\\\ \\\\mathrm{d}x\n \\\\quad \\\\forall \\\\boldsymbol{v} \\\\in V.\n\n\nFor this application :math:`\\\\Omega` is taken to be the cross section of the\ncoaxial cable, and it is assumed that the cable has infinite length. It is\nassumed that the functions :math:`\\\\boldsymbol{J}`, :math:`\\\\boldsymbol{A}` as\nwell as and any :math:`\\\\boldsymbol{v} \\\\in V` depend only on coordinates\n:math:`x_1` and :math:`x_2` in the cross-section plane, and have a nonzero\ncomponent only in the perpendicular direction to the cross-section plane. In\nother words, they are assumed to have the following form\n\n.. math::\n \\\\boldsymbol{J} &= J(x_1, x_2) \\\\boldsymbol{e_3}\n\n \\\\boldsymbol{A} &= A(x_1, x_2) \\\\boldsymbol{e_3}\n\n \\\\boldsymbol{v} &= v(x_1, x_2) \\\\boldsymbol{e_3}\n\nThis reduces the problem to two dimensions. Taking the curl of a function of\nthe assumed form and substituting the result in the weak form gives a more\nfamiliar weak form in the cross-section plane: find :math:`A \\\\in V` such that\n\n.. math::\n \\\\int_{\\\\Omega}\n \\\\mu^{-1} (\\\\nabla A \\\\cdot \\\\nabla v)\\\\ \\\\mathrm{d}x =\n \\\\int_{\\\\Omega} J v\\\\ \\\\mathrm{d}x \\\\quad \\\\forall v \\\\in V.\n\n\nIn order to actually compute the inductance (per unit length) of the cable, a\ncurrent is passed through the inner conductor while an equal but opposite\ncurrent is passed through the outer conductor. The energy (per unit length)\nstored in the produced magnetic field is computed as\n\n.. math::\n E = \\\\frac{1}{2} \\\\int_{\\\\Omega} \\\\mu^{-1} |\\\\nabla A|^2\\\\ \\\\mathrm{d}x\n\nHowever, the energy (per unit length) stored in the magnetic field of the\ninductor can also be stated in terms of its inductance (per unit length) as\n\n.. math::\n E = \\\\frac{1}{2} L I^2,\n\nwhere :math:`L` is the inductance (per unit length) and :math:`I` is the\ncurrent passed through the inductor. Thus\n\n.. math::\n L = \\\\frac{2 E}{I^2}\n\n\nCapacitance\n-----------\n\nCapacitance of the cable is computed using the electrostatic equations\n\n.. math::\n \\\\nabla \\\\times \\\\boldsymbol{E} = \\\\boldsymbol{0}\n\n \\\\nabla \\\\cdot \\\\boldsymbol{D} = 0\n\nand the constitutive relation\n\n.. math::\n \\\\boldsymbol{D} = \\\\epsilon \\\\boldsymbol{E},\n\nwhere :math:`\\\\epsilon` is the permittivity of the medium.\n\nSince :math:`\\\\boldsymbol{E}` is curl-free, it can be written in terms of a\nscalar potential :math:`U` as\n\n.. math::\n \\\\boldsymbol{E} = -\\\\nabla U\n\nThus we have the strong form for the scalar potential :math:`U` as\n\n.. math::\n -\\\\nabla \\\\cdot (\\\\epsilon \\\\nabla U) = 0.\n\nHowever, this equation is only meaningful in a dielectric medium. In a\nconductor, the electric field is zero, and thus the potential is constant (and\nconceptually :math:`\\\\epsilon \\\\rightarrow \\\\infty`). The conductors need to\nbe excluded from the computation domain.\n\nIn any case, the equation has the familiar weak form: find :math:`U` such that\n\n.. math::\n \\\\int_{\\\\Omega} \\\\epsilon \\\\nabla U \\\\cdot \\\\nabla v\\\\ \\\\mathrm{d}x = 0\n \\\\quad \\\\forall v \\\\in V.\n\nConsider again the cross-section plane of the coaxial cable, with the cable\nitself extending out-of-plane into infinity. Take :math:`U` to depend only on\nthe coordinates :math:`x_1` and :math:`x_2` in the cross-section plane. This\nagain reduces the problem to two dimensions.\n\nDue to conducting media in the cross-section, the problem needs to split into\ntwo domains: the first domain consisting of the inner insulator and the second\ndomain consisting of the space outside the outer conductor. In both domains,\nwe have a non-homogeneous Dirichlet boundary condition for :math:`U` on the\nconductor surfaces, while in the second domain the potential has a homogeneous\nNeumann condition on the free space boundary.\n\nIn order to actually compute the capacitance (per unit length) of the cable, a\npotential is set on the inner conductor while an equal but opposite potential\nis set on the outer conductor. The energy (per unit length) of the produced\nelectic field is computed as\n\n.. math::\n E = \\\\frac{1}{2} \\\\int_{\\\\Omega} \\\\epsilon | \\\\nabla U |^2\\\\ \\\\mathrm{d}x\n\nHowever, the energy (per unit length) stored in the electic field of a\ncapacitor can also be stated in terms of its capacitance (per unit length) as\n\n.. math::\n E = \\\\frac{1}{2} C V^2,\n\nwhere :math:`C` is the capacitance (per unit length) and :math:`V` is the\npotential difference across the capacitor. Thus\n\n.. math::\n C = \\\\frac{2 E}{V^2}.\n\n\"\"\"\nfrom packaging import version\nfrom pathlib import Path\n\nfrom skfem.mesh import MeshTri\nfrom skfem.assembly import InteriorBasis, FacetBasis\nfrom skfem.utils import solve, asm, condense, project\nfrom skfem.element import ElementTriP1\nfrom skfem.models.poisson import laplace, unit_load, mass\nfrom skfem.io.json import from_file\n\nimport numpy as np\n\n\nmesh = from_file(Path(__file__).parent / 'meshes' / 'ex35.json')\n\nelement = ElementTriP1()\n\n# permeability of vacuum\nmu0 = 1.25663706212e-6\n# permittivity of vacuum\neps0 = 8.8541878128e-12\n\n# relative permittivity of polytetrafluoroethylene\neps_ptfe = 2.1\n# relative permittivity of fluorinated ethylene propylene\neps_fep = 2.1\n\n\nglobal_basis = InteriorBasis(mesh, element)\ninner_conductor_basis = InteriorBasis(\n mesh, element, elements=mesh.subdomains['inner_conductor'])\nouter_conductor_basis = InteriorBasis(\n mesh, element, elements=mesh.subdomains['outer_conductor'])\ninner_insulator_basis = InteriorBasis(\n mesh, element, elements=mesh.subdomains['inner_insulator'])\nouter_insulator_basis = InteriorBasis(\n mesh, element, elements=mesh.subdomains['outer_insulator'])\n\ninner_conductor_outer_surface_basis = FacetBasis(\n mesh, element, facets=mesh.boundaries['inner_conductor_outer_surface'])\nouter_conductor_inner_surface_basis = FacetBasis(\n mesh, element, facets=mesh.boundaries['outer_conductor_inner_surface'])\n\ndofs = {\n 'boundary':\n global_basis.get_dofs(mesh.boundaries['boundary']),\n 'inner_conductor_outer_surface':\n global_basis.get_dofs(mesh.boundaries['inner_conductor_outer_surface']),\n 'outer_conductor_inner_surface':\n global_basis.get_dofs(mesh.boundaries['outer_conductor_inner_surface'])\n}\n\n# functional to compute the integral of a load vector over the domain\nload_integral = solve(asm(mass, global_basis), asm(unit_load, global_basis))\n\n# all materials have a relative permeability of effectively 1\nK_mag = asm(laplace, global_basis) * (1/mu0)\n\n# pass 1A through the conductors\ncurrent = 1\n\nJ_inner_conductor = asm(unit_load, inner_conductor_basis)\n# scale inner conductor current density to have an integral\n# equal to current over domain\nJ_inner_conductor *= current / np.dot(J_inner_conductor, load_integral)\n\nJ_outer_conductor = asm(unit_load, outer_conductor_basis)\n# scale outer conductor current density to have an integral\n# equal to -current over domain\nJ_outer_conductor *= -current / np.dot(J_outer_conductor, load_integral)\n\nA = solve(*condense(\n K_mag, J_inner_conductor + J_outer_conductor, D=dofs['boundary']))\n\n# magnetic field energy from FEM\nE_mag = 0.5*np.dot(A, K_mag*A)\n# energy stored in inductor: E = 0.5*L*I^2\n# thus, L = 2*E/I^2\nL = 2*E_mag / (current**2)\n\nprint(f'L={L} H/m')\n\n# assemble the parts of the stiffness matrix for each material separately\nK_elec_inner_insulator = asm(laplace, inner_insulator_basis) * eps0 * eps_ptfe\nK_elec_outer_insulator = asm(laplace, outer_insulator_basis) * eps0 * eps_fep\n# use dummy value for permittivity, uniform U in conductor\nK_elec_inner_conductor = asm(laplace, inner_conductor_basis) * eps0\n# use dummy value for permittivity, uniform U in conductor\nK_elec_outer_conductor = asm(laplace, outer_conductor_basis) * eps0\n\n# global stiffness matrix is the sum of the subdomain contributions\nK_elec = (\n K_elec_inner_insulator + K_elec_outer_insulator +\n K_elec_inner_conductor + K_elec_outer_conductor)\n\n# set a 1V potential difference between the conductors\nvoltage = 1\n\n# initialize the non-homogeneous Dirichlet conditions on the conductor surfaces\nU = np.zeros(K_elec.shape[0])\nU[dofs['inner_conductor_outer_surface'].all()] = project(\n lambda x: voltage/2, basis_to=inner_conductor_outer_surface_basis,\n I=dofs['inner_conductor_outer_surface'])\nU[dofs['outer_conductor_inner_surface'].all()] = project(\n lambda x: -voltage/2, basis_to=outer_conductor_inner_surface_basis,\n I=dofs['outer_conductor_inner_surface'])\n\nU = solve(*condense(\n K_elec, np.zeros(K_elec.shape[1]), U,\n D=dofs['inner_conductor_outer_surface'] |\n dofs['outer_conductor_inner_surface']))\n\n# electric field energy\nE_elec = 0.5*np.dot(U, K_elec*U)\n# energy stored in a capacitor: E = 0.5*C*U^2\n# thus, C = 2*E/(U^2)\nC = 2*E_elec/(voltage**2)\n\nprint(f'C={C} F/m')\n\n# characteristic impedance of coax\nZ = np.sqrt(L/C)\nprint(f'Z={Z} ohm')\n\n# phase velocity factor (fraction of speed of light)\nv = (1/np.sqrt(L*C)) / 299792458\n\nprint(f'v={v} c')\n\nif __name__ == '__main__':\n from os.path import splitext\n from sys import argv\n from skfem.visuals.matplotlib import plot, savefig\n import matplotlib.pyplot as plt\n from skfem.utils import derivative\n\n B_x = derivative(A, global_basis, global_basis, 1)\n B_y = -derivative(A, global_basis, global_basis, 0)\n\n E_x = -derivative(U, global_basis, global_basis, 0)\n E_y = -derivative(U, global_basis, global_basis, 1)\n\n fig = plt.figure(figsize=(11.52, 5.12))\n\n ax1 = plt.subplot(1, 2, 1)\n plot(global_basis, np.sqrt(B_x**2 + B_y**2), ax=ax1, colorbar=True)\n ax1.set_title('Magnetic flux density (Tesla)')\n ax1.set_aspect('equal')\n ax1.set_yticks([])\n\n ax2 = plt.subplot(1, 2, 2)\n plot(global_basis, np.sqrt(E_x**2 + E_y**2), ax=ax2, colorbar=True)\n ax2.set_title('Electric field strength (V/m)')\n ax2.set_aspect('equal')\n ax2.set_yticks([])\n\n savefig(splitext(argv[0])[0] + '_solution.png')\n" ]
[ [ "numpy.dot", "numpy.sqrt", "matplotlib.pyplot.subplot", "numpy.zeros", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
salosyatov/football_stat
[ "a8cb68cd648f65f8dbb383b8795cbd8b0dbac5d3" ]
[ "src/models/train_net_model.py" ]
[ "import logging\nimport sys\nfrom typing import Tuple\n\nimport numpy as np\nimport torch\nfrom torch import nn, optim\nfrom torch.optim import Optimizer\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\nfrom .net import Network, TripletLoss, CachedDataset\nfrom ..params import TrainingParams, TestParams\n\nlogger = logging.getLogger(__name__)\nhandler = logging.StreamHandler(sys.stdout)\nlogger.setLevel(logging.INFO)\nlogger.addHandler(handler)\n\n\ndef train_net_model(train_data: Tuple[np.ndarray, np.ndarray, np.ndarray], val_data: Tuple[np.ndarray, np.ndarray, np.ndarray], train_params: TrainingParams) -> Tuple[Network, Optimizer]:\n if train_params.model_net_type == \"TripletLoss\":\n model = Network(emb_dim=train_params.embedding_size)\n else:\n raise NotImplementedError(f\"Net type {train_params.model_net_type} is not supported yet.\")\n\n logger.info(\"Training the net is started.\")\n torch.manual_seed(train_params.seed)\n np.random.seed(train_params.seed)\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n train_images, train_labels, train_paths = train_data\n train_ds = CachedDataset(train_images, train_labels, train_paths)\n train_loader = DataLoader(train_ds, batch_size=train_params.batch_size, shuffle=True)\n\n val_images, val_labels, val_paths = val_data\n test_ds = CachedDataset(val_images, val_labels, val_paths)\n test_loader = DataLoader(test_ds, batch_size=train_params.batch_size, shuffle=False, num_workers=4)\n\n def init_weights(m):\n if isinstance(m, nn.Conv2d):\n torch.nn.init.kaiming_normal_(m.weight)\n\n model.apply(init_weights)\n model = torch.jit.script(model).to(device)\n criterion = torch.jit.script(TripletLoss())\n optimizer = optim.Adam(model.parameters(), lr=train_params.lr)\n\n for epoch in tqdm(range(train_params.epochs), desc=\"Epochs\"):\n running_loss = []\n model.train()\n for step, (anchor_img, positive_img, negative_img, anchor_label, path) in enumerate(\n tqdm(train_loader, desc=\"Training\", leave=False)):\n anchor_img = anchor_img.to(device)\n positive_img = positive_img.to(device)\n negative_img = negative_img.to(device)\n\n optimizer.zero_grad()\n anchor_out = model(anchor_img)\n positive_out = model(positive_img)\n negative_out = model(negative_img)\n\n loss = criterion(anchor_out, positive_out, negative_out)\n loss.backward()\n optimizer.step()\n\n running_loss.append(loss.cpu().detach().numpy() * anchor_img.shape[0] / len(train_ds))\n if (epoch + 1) % 10 == 0:\n val_running_loss = []\n model.eval()\n with torch.no_grad():\n for step, (anchor_img, positive_img, negative_img, anchor_label, path) in enumerate(\n tqdm(test_loader, desc=\"Evaluating\", leave=False)):\n anchor_img = anchor_img.to(device)\n positive_img = positive_img.to(device)\n negative_img = negative_img.to(device)\n\n anchor_out = model(anchor_img)\n positive_out = model(positive_img)\n negative_out = model(negative_img)\n\n loss = criterion(anchor_out, positive_out, negative_out)\n\n val_running_loss.append(loss.cpu().detach().numpy() * anchor_img.shape[0] / len(test_ds))\n\n print(f\"\\rEpoch: {epoch + 1:3d}/{train_params.epochs:3d} - Train Loss: {np.sum(running_loss):.4f}, Val Loss: {np.sum(val_running_loss):.4f}\", end='')\n logger.info(\"Training the net is finished.\")\n return model, optimizer\n\n\ndef predict_net_model(model: Network, test_images: np.ndarray, test_params: TestParams) -> np.ndarray:\n ds = CachedDataset(test_images, None, None, return_paths=False, mode=\"test\")\n loader = DataLoader(ds, batch_size=test_params.batch_size, shuffle=False)\n\n results = []\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model.eval()\n with torch.no_grad():\n for img, _, _, _, _ in tqdm(loader, desc=\"Predicting...\"):\n results.append(model(img.to(device)).cpu().numpy())\n\n embeddings = np.concatenate(results)\n\n return embeddings\n\n\ndef serialize_net_model(model: Network, optimizer: Optimizer, output: str) -> str:\n torch.save({\"model_state_dict\": model.state_dict(), \"optimizer_state_dict\": optimizer.state_dict()}, output)\n return output\n\n" ]
[ [ "torch.jit.script", "numpy.random.seed", "torch.manual_seed", "torch.utils.data.DataLoader", "numpy.concatenate", "torch.no_grad", "torch.cuda.is_available", "numpy.sum", "torch.nn.init.kaiming_normal_" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kyle-rgb/NBA_Sportsbook_Project
[ "0287e2f8482c33eccecfe8e418314ccaad584594" ]
[ "src/data/shark.py" ]
[ "import time, sys, requests, re, bs4, os, json, time\nimport pandas as pd, numpy as np, sqlalchemy as sql, datetime as dt\nfrom concurrent.futures import ThreadPoolExecutor\nfrom selenium import webdriver\nfrom db_info import connection_str\n\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import Column, Integer, Float, String, DateTime\n\nstart = time.time()\nid_regex = re.compile(\"\\d{6,7}\") # -12-2017-{6,7}\ngame_links = []\n\n# Read in List of URL\nwith open(\"../../data/interim/shark_extras.txt\", \"r\") as f:\n game_links = f.read().splitlines()\n# INSERT dfs into MySQL db\n\n# Create Connection\nBase = declarative_base()\nengine = sql.create_engine(connection_str)\nconn = engine.connect()\n# Create db Classes\nclass Time(Base):\n __tablename__ = \"time\"\n id = Column(Integer, primary_key=True)\n timestamp = Column(String(100))\n book = Column(String(50))\n spread = Column(String(100)) # currently in one column: Line (Home)\n total = Column(String(100)) # currently in one column: Over / Under\n game_id = Column(Integer)\n \n\nclass Odds(Base):\n __tablename__ = \"odds\"\n id = Column(Integer, primary_key=True)\n team_abbv = Column(String(3))\n book = Column(String(50))\n moneyline = Column(Integer)\n spread = Column(Float)\n spread_odds = Column(Integer)\n total = Column(Float)\n over_odds = Column(Integer)\n under_odds = Column(Integer)\n game_id = Column(Integer)\n\nclass GameCodes(Base):\n __tablename__ = \"gamecodes\"\n id = Column(Integer, primary_key=True)\n home_abbv = Column(String(3))\n away_abbv = Column(String(3))\n date = Column(DateTime)\n game_id = Column(Integer)\n\nBase.metadata.create_all(engine)\n# Write Function for grabData Async\n # Use session objects and commits for integrity \n\ndef columnHelper(market, i):\n try:\n return market.split(\" \")[i]\n except IndexError:\n print(IndexError)\n return -999\n\ndef mknum(number):\n return -999 if number.lower() in ['', \"pk\", \"ev\"] else number\n\ndef mkstr(string):\n return \"X\" if string == \"\" or string == \" \" else string\n\ndef createLinkList(days=7):\n # Use Selenium To Game Specific URLs\n match_index = 0\n matchup_links = []\n driver = webdriver.Chrome(\"../../../../../Python/scraping/chromedriver.exe\")\n driver.get(\"https://www.oddsshark.com/nba/scores\")\n time.sleep(5) # Wait for Data to Appear \n for _ in range(days):\n button = driver.find_element_by_css_selector('button.button--arrow-left')\n wanted_links = driver.find_elements_by_link_text('Matchup')\n for link in wanted_links:\n matchup_links.append(link.get_attribute(\"href\"))\n button.click()\n time.sleep(2)\n\n with open(\"wanted_links.txt\", \"a\") as f:\n for url in matchup_links:\n f.write(url +\"\\n\")\n f.close()\n \n driver.quit()\n\n# These two tasks should be handled functionally.\ndef grabLines(url):\n # Press See More Button\n session = Session(bind=engine)\n response = requests.get(url)\n soup = bs4.BeautifulSoup(response.text, features=\"lxml\")\n tables = soup.select(\"script\")\n table = json.loads(tables[2].text)\n i = 0\n # odds_shark_df = pd.DataFrame(columns=[\"home_abbv\", \"away_abbv\", \"date\", \"game_id\"])\n book_df = pd.DataFrame(columns=[\"team_abbv\", \"book\", \"moneyline\", \"spread\", \"spread_odds\", \"total\", \"over_odds\", \"under_odds\", \"game_id\"])\n table = table[\"oddsshark_gamecenter\"]\n bookmaker_list = table[\"odds\"][\"data\"]\n # Gather Nominal Data\n home_abbv = table[\"matchup\"][\"home_abbreviation\"]\n away_abbv = table[\"matchup\"][\"away_abbreviation\"]\n game_id = table[\"matchup\"][\"event_id\"]\n date = table[\"matchup\"][\"event_date\"]\n\n session.add(GameCodes(home_abbv=home_abbv, away_abbv=away_abbv, date=date, game_id=game_id))\n # Gather Odds Data\n for book in bookmaker_list:\n spread = book[\"money_line_spread\"]\n # keys = [\"home\", \"away\"]\n for k in spread.keys():\n if k == \"home\":\n team_abbv = home_abbv\n else:\n team_abbv= away_abbv\n session.add(Odds(team_abbv=team_abbv, book=book[\"book\"][\"book_name\"], over_odds=mknum(book[\"over_under\"][\"over\"]), under_odds=mknum(book[\"over_under\"][\"under\"]),total=mknum(book[\"over_under\"][\"total\"]),\n moneyline=mknum(spread[k][\"money_line\"]), spread=mknum(spread[k][\"spread\"]), spread_odds=mknum(spread[k][\"spread_price\"]), game_id=game_id))\n i+=1 \n session.commit()\n session.close()\n return None\n\ndef grabTimedMarkets(id):\n# Grab Time Based Line Information From Embedded Link in Game Page\n session = Session(bind=engine)\n market_df = pd.DataFrame()\n columns_list = [\"Line (Home)\", \"Over / Under\"]\n response = requests.get(f\"https://www.oddsshark.com/nba/odds/line-history/{str(id)}\")\n soup = bs4.BeautifulSoup(response.text, features=\"lxml\")\n tables = soup.select(\"table\")\n df_list = pd.read_html(str(tables))\n \n for df in df_list:\n for col in df.columns:\n if col not in columns_list:\n df[\"Book\"] = col\n df = df.rename(columns={col: \"timestamp\"})\n market_df = pd.concat([market_df, df])\n \n \n \n market_df = market_df.rename(columns={\"Line (Home)\": \"spread\", \"Over / Under\": \"total\"})\n market_df = market_df.fillna(value=\"X\")\n market_dict_list = market_df.to_dict(orient=\"records\")\n for dict in market_dict_list:\n session.add(Time(timestamp=mkstr(dict[\"timestamp\"]), spread=mkstr(dict[\"spread\"]), total=mkstr(dict[\"total\"]), book=mkstr(dict[\"Book\"]), game_id=id))\n session.commit()\n\n \n session.commit()\n session.close()\n return None\n\n\ndef grabData(url):\n grabLines(url)\n grabTimedMarkets(id_regex.search(url)[0])\n\n#createLinkList(1500)\n# Test For Grabbing All Data from Sample Game\n# grabData(\"https://www.oddsshark.com/nba/denver-utah-odds-october-26-2021-1459586\")\n\nwith ThreadPoolExecutor() as executor:\n executor.map(grabData, game_links) \n\nprint(f\"completed in: {time.time()-start} seconds.\")\n" ]
[ [ "pandas.concat", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
dngoldberg/oggm
[ "b6e8106abab15940c7968c6b634aa9bc2f9ca419" ]
[ "oggm/core/climate.py" ]
[ "\"\"\"Climate data and mass-balance computations\"\"\"\n# Built ins\nimport logging\nimport os\nimport datetime\nimport warnings\nimport sys\n\n# External libs\nimport numpy as np\nimport xarray as xr\nimport netCDF4\nimport pandas as pd\nfrom scipy import stats\nfrom scipy import optimize as optimization\n\n# Optional libs\ntry:\n import salem\nexcept ImportError:\n pass\n\n# Locals\nfrom oggm import cfg\nfrom oggm import utils\nfrom oggm.core import centerlines\nfrom oggm import entity_task, global_task\nfrom oggm.exceptions import MassBalanceCalibrationError, InvalidParamsError\n\n# Module logger\nlog = logging.getLogger(__name__)\n\n\n@entity_task(log, writes=['climate_historical'])\ndef process_custom_climate_data(gdir, y0=None, y1=None,\n output_filesuffix=None):\n \"\"\"Processes and writes the climate data from a user-defined climate file.\n\n The input file must have a specific format (see\n https://github.com/OGGM/oggm-sample-data ->test-files/histalp_merged_hef.nc\n for an example).\n\n This is the way OGGM used to do it for HISTALP before it got automatised.\n\n Parameters\n ----------\n gdir : :py:class:`oggm.GlacierDirectory`\n the glacier directory to process\n y0 : int\n the starting year of the timeseries to write. The default is to take\n the entire time period available in the file, but with this kwarg\n you can shorten it (to save space or to crop bad data)\n y1 : int\n the starting year of the timeseries to write. The default is to take\n the entire time period available in the file, but with this kwarg\n you can shorten it (to save space or to crop bad data)\n output_filesuffix : str\n this add a suffix to the output file (useful to avoid overwriting\n previous experiments)\n \"\"\"\n\n if not (('climate_file' in cfg.PATHS) and\n os.path.exists(cfg.PATHS['climate_file'])):\n raise InvalidParamsError('Custom climate file not found')\n\n if cfg.PARAMS['baseline_climate'] not in ['', 'CUSTOM']:\n raise InvalidParamsError(\"When using custom climate data please set \"\n \"PARAMS['baseline_climate'] to an empty \"\n \"string or `CUSTOM`. Note also that you can \"\n \"now use the `process_histalp_data` task for \"\n \"automated HISTALP data processing.\")\n\n # read the file\n fpath = cfg.PATHS['climate_file']\n nc_ts = salem.GeoNetcdf(fpath)\n\n # set temporal subset for the ts data (hydro years)\n sm = cfg.PARAMS['hydro_month_' + gdir.hemisphere]\n em = sm - 1 if (sm > 1) else 12\n yrs = nc_ts.time.year\n y0 = yrs[0] if y0 is None else y0\n y1 = yrs[-1] if y1 is None else y1\n\n nc_ts.set_period(t0='{}-{:02d}-01'.format(y0, sm),\n t1='{}-{:02d}-01'.format(y1, em))\n time = nc_ts.time\n ny, r = divmod(len(time), 12)\n if r != 0:\n raise InvalidParamsError('Climate data should be full years')\n\n # Units\n assert nc_ts._nc.variables['hgt'].units.lower() in ['m', 'meters', 'meter',\n 'metres', 'metre']\n assert nc_ts._nc.variables['temp'].units.lower() in ['degc', 'degrees',\n 'degree', 'c']\n assert nc_ts._nc.variables['prcp'].units.lower() in ['kg m-2', 'l m-2',\n 'mm', 'millimeters',\n 'millimeter']\n\n # geoloc\n lon = nc_ts._nc.variables['lon'][:]\n lat = nc_ts._nc.variables['lat'][:]\n\n ilon = np.argmin(np.abs(lon - gdir.cenlon))\n ilat = np.argmin(np.abs(lat - gdir.cenlat))\n ref_pix_lon = lon[ilon]\n ref_pix_lat = lat[ilat]\n\n # read the data\n temp = nc_ts.get_vardata('temp')\n prcp = nc_ts.get_vardata('prcp')\n hgt = nc_ts.get_vardata('hgt')\n ttemp = temp[:, ilat-1:ilat+2, ilon-1:ilon+2]\n itemp = ttemp[:, 1, 1]\n thgt = hgt[ilat-1:ilat+2, ilon-1:ilon+2]\n ihgt = thgt[1, 1]\n thgt = thgt.flatten()\n iprcp = prcp[:, ilat, ilon]\n nc_ts.close()\n\n # Should we compute the gradient?\n use_grad = cfg.PARAMS['temp_use_local_gradient']\n igrad = None\n if use_grad:\n igrad = np.zeros(len(time)) * np.NaN\n for t, loct in enumerate(ttemp):\n slope, _, _, p_val, _ = stats.linregress(thgt,\n loct.flatten())\n igrad[t] = slope if (p_val < 0.01) else np.NaN\n\n gdir.write_monthly_climate_file(time, iprcp, itemp, ihgt,\n ref_pix_lon, ref_pix_lat,\n filesuffix=output_filesuffix,\n gradient=igrad,\n source=fpath)\n\n\n@entity_task(log)\ndef process_climate_data(gdir, y0=None, y1=None, output_filesuffix=None,\n **kwargs):\n \"\"\"Adds the selected climate data to this glacier directory.\n\n Short wrapper deciding on which task to run based on\n `cfg.PARAMS['baseline_climate']`.\n\n If you want to make it explicit, simply call the relevant task\n (e.g. oggm.shop.cru.process_cru_data).\n\n Parameters\n ----------\n gdir : :py:class:`oggm.GlacierDirectory`\n the glacier directory to process\n y0 : int\n the starting year of the timeseries to write. The default is to take\n the entire time period available in the file, but with this kwarg\n you can shorten it (to save space or to crop bad data)\n y1 : int\n the starting year of the timeseries to write. The default is to take\n the entire time period available in the file, but with this kwarg\n you can shorten it (to save space or to crop bad data)\n output_filesuffix : str\n this add a suffix to the output file (useful to avoid overwriting\n previous experiments)\n **kwargs :\n any other argument relevant to the task that will be called.\n \"\"\"\n\n print(gdir.rgi_id,file=sys.stderr)\n\n # Which climate should we use?\n baseline = cfg.PARAMS['baseline_climate']\n if baseline == 'CRU':\n from oggm.shop.cru import process_cru_data\n process_cru_data(gdir, output_filesuffix=output_filesuffix,\n y0=y0, y1=y1, **kwargs)\n elif baseline == 'HISTALP':\n from oggm.shop.histalp import process_histalp_data\n process_histalp_data(gdir, output_filesuffix=output_filesuffix,\n y0=y0, y1=y1, **kwargs)\n elif baseline in ['ERA5', 'ERA5L', 'CERA', 'ERA5dr']:\n from oggm.shop.ecmwf import process_ecmwf_data\n process_ecmwf_data(gdir, output_filesuffix=output_filesuffix,\n dataset=baseline, y0=y0, y1=y1, **kwargs)\n elif '+' in baseline:\n # This bit below assumes ECMWF only datasets, but it should be\n # quite easy to extend for HISTALP+ERA5L for example\n from oggm.shop.ecmwf import process_ecmwf_data\n his, ref = baseline.split('+')\n s = 'tmp_'\n process_ecmwf_data(gdir, output_filesuffix=s+his, dataset=his,\n y0=y0, y1=y1, **kwargs)\n process_ecmwf_data(gdir, output_filesuffix=s+ref, dataset=ref,\n y0=y0, y1=y1, **kwargs)\n historical_delta_method(gdir,\n ref_filesuffix=s+ref,\n hist_filesuffix=s+his,\n output_filesuffix=output_filesuffix)\n elif '|' in baseline:\n from oggm.shop.ecmwf import process_ecmwf_data\n his, ref = baseline.split('|')\n s = 'tmp_'\n process_ecmwf_data(gdir, output_filesuffix=s+his, dataset=his,\n y0=y0, y1=y1, **kwargs)\n process_ecmwf_data(gdir, output_filesuffix=s+ref, dataset=ref,\n y0=y0, y1=y1, **kwargs)\n historical_delta_method(gdir,\n ref_filesuffix=s+ref,\n hist_filesuffix=s+his,\n output_filesuffix=output_filesuffix,\n replace_with_ref_data=False)\n elif baseline == 'CUSTOM':\n process_custom_climate_data(gdir, y0=y0, y1=y1,\n output_filesuffix=output_filesuffix,\n **kwargs)\n else:\n raise ValueError(\"cfg.PARAMS['baseline_climate'] not understood\")\n\n\n@entity_task(log, writes=['climate_historical'])\ndef historical_delta_method(gdir, ref_filesuffix='', hist_filesuffix='',\n output_filesuffix='', ref_year_range=None,\n delete_input_files=True, scale_stddev=True,\n replace_with_ref_data=True):\n \"\"\"Applies the anomaly method to historical climate data\n\n This function can be used to prolongate historical time series,\n for example by bias-correcting CERA-20C to ERA5 or ERA5-Land.\n\n The timeseries must be already available in the glacier directory\n\n Parameters\n ----------\n gdir : :py:class:`oggm.GlacierDirectory`\n where to write the data\n ref_filesuffix : str\n the filesuffix of the historical climate data to take as reference\n hist_filesuffix : str\n the filesuffix of the historical climate data to apply to the\n reference\n output_filesuffix : str\n the filesuffix of the output file (usually left empty - i.e. this\n file will become the default)\n ref_year_range : tuple of str\n the year range for which you want to compute the anomalies. The\n default is to take the entire reference data period, but you could\n also choose `('1961', '1990')` for example\n delete_input_files : bool\n delete the input files after use - useful for operational runs\n where you don't want to carry too many files\n scale_stddev : bool\n whether or not to scale the temperature standard deviation as well\n (you probably want to do that)\n replace_with_ref_data : bool\n the default is to paste the bias-corrected data where no reference\n data is available, i.e. creating timeseries which are not consistent\n in time but \"better\" for recent times (e.g. CERA-20C until 1980,\n then ERA5). Set this to False to present this and make a consistent\n time series of CERA-20C (but bias corrected to the reference data,\n so \"better\" than CERA-20C out of the box).\n \"\"\"\n\n if ref_year_range is not None:\n raise NotImplementedError()\n\n # Read input\n f_ref = gdir.get_filepath('climate_historical', filesuffix=ref_filesuffix)\n with xr.open_dataset(f_ref) as ds:\n ref_temp = ds['temp']\n ref_prcp = ds['prcp']\n ref_hgt = float(ds.ref_hgt)\n ref_lon = float(ds.ref_pix_lon)\n ref_lat = float(ds.ref_pix_lat)\n source = ds.attrs.get('climate_source')\n\n f_his = gdir.get_filepath('climate_historical', filesuffix=hist_filesuffix)\n with xr.open_dataset(f_his) as ds:\n hist_temp = ds['temp']\n hist_prcp = ds['prcp']\n # To differentiate both cases\n if replace_with_ref_data:\n source = ds.attrs.get('climate_source') + '+' + source\n else:\n source = ds.attrs.get('climate_source') + '|' + source\n\n # Common time period\n cmn_time = (ref_temp + hist_temp)['time']\n assert len(cmn_time) // 12 == len(cmn_time) / 12\n # We need an even number of years for this to work\n if ((len(cmn_time) // 12) % 2) == 1:\n cmn_time = cmn_time.isel(time=slice(12, len(cmn_time)))\n assert len(cmn_time) // 12 == len(cmn_time) / 12\n assert ((len(cmn_time) // 12) % 2) == 0\n cmn_time_range = cmn_time.values[[0, -1]]\n\n # Select ref\n sref_temp = ref_temp.sel(time=slice(*cmn_time_range))\n sref_prcp = ref_prcp.sel(time=slice(*cmn_time_range))\n\n # See if we need to scale the variability\n if scale_stddev:\n # This is a bit more arithmetic\n sm = cfg.PARAMS['hydro_month_' + gdir.hemisphere]\n tmp_sel = hist_temp.sel(time=slice(*cmn_time_range))\n tmp_std = tmp_sel.groupby('time.month').std(dim='time')\n std_fac = sref_temp.groupby('time.month').std(dim='time') / tmp_std\n std_fac = std_fac.roll(month=13-sm, roll_coords=True)\n std_fac = np.tile(std_fac.data, len(hist_temp) // 12)\n win_size = len(cmn_time) + 1\n\n def roll_func(x, axis=None):\n assert axis == 1\n x = x[:, ::12]\n n = len(x[0, :]) // 2\n xm = np.nanmean(x, axis=axis)\n return xm + (x[:, n] - xm) * std_fac\n\n hist_temp = hist_temp.rolling(time=win_size, center=True,\n min_periods=1).reduce(roll_func)\n\n # compute monthly anomalies\n # of temp\n ts_tmp_sel = hist_temp.sel(time=slice(*cmn_time_range))\n ts_tmp_avg = ts_tmp_sel.groupby('time.month').mean(dim='time')\n ts_tmp = hist_temp.groupby('time.month') - ts_tmp_avg\n # of precip -- scaled anomalies\n ts_pre_avg = hist_prcp.sel(time=slice(*cmn_time_range))\n ts_pre_avg = ts_pre_avg.groupby('time.month').mean(dim='time')\n ts_pre_ano = hist_prcp.groupby('time.month') - ts_pre_avg\n # scaled anomalies is the default. Standard anomalies above\n # are used later for where ts_pre_avg == 0\n ts_pre = hist_prcp.groupby('time.month') / ts_pre_avg\n\n # reference averages\n # for temp\n loc_tmp = sref_temp.groupby('time.month').mean()\n ts_tmp = ts_tmp.groupby('time.month') + loc_tmp\n\n # for prcp\n loc_pre = sref_prcp.groupby('time.month').mean()\n # scaled anomalies\n ts_pre = ts_pre.groupby('time.month') * loc_pre\n # standard anomalies\n ts_pre_ano = ts_pre_ano.groupby('time.month') + loc_pre\n # Correct infinite values with standard anomalies\n ts_pre.values = np.where(np.isfinite(ts_pre.values),\n ts_pre.values,\n ts_pre_ano.values)\n # The previous step might create negative values (unlikely). Clip them\n ts_pre.values = utils.clip_min(ts_pre.values, 0)\n\n assert np.all(np.isfinite(ts_pre.values))\n assert np.all(np.isfinite(ts_tmp.values))\n\n if not replace_with_ref_data:\n # Just write what we have\n gdir.write_monthly_climate_file(ts_tmp.time.values,\n ts_pre.values, ts_tmp.values,\n ref_hgt, ref_lon, ref_lat,\n filesuffix=output_filesuffix,\n source=source)\n else:\n # Select all hist data before the ref\n ts_tmp = ts_tmp.sel(time=slice(ts_tmp.time[0], ref_temp.time[0]))\n ts_tmp = ts_tmp.isel(time=slice(0, -1))\n ts_pre = ts_pre.sel(time=slice(ts_tmp.time[0], ref_temp.time[0]))\n ts_pre = ts_pre.isel(time=slice(0, -1))\n # Concatenate and write\n gdir.write_monthly_climate_file(np.append(ts_pre.time, ref_prcp.time),\n np.append(ts_pre, ref_prcp),\n np.append(ts_tmp, ref_temp),\n ref_hgt, ref_lon, ref_lat,\n filesuffix=output_filesuffix,\n source=source)\n\n if delete_input_files:\n # Delete all files without suffix\n if ref_filesuffix:\n os.remove(f_ref)\n if hist_filesuffix:\n os.remove(f_his)\n\n\n@entity_task(log, writes=['climate_historical'])\ndef historical_climate_qc(gdir):\n \"\"\"\"Check the \"quality\" of climate data and correct it if needed.\n\n This forces the climate data to have at least one month of melt per year\n at the terminus of the glacier (i.e. simply shifting temperatures up\n when necessary), and at least one month where accumulation is possible\n at the glacier top (i.e. shifting the temperatures down).\n \"\"\"\n\n # Parameters\n temp_s = (cfg.PARAMS['temp_all_liq'] + cfg.PARAMS['temp_all_solid']) / 2\n temp_m = cfg.PARAMS['temp_melt']\n default_grad = cfg.PARAMS['temp_default_gradient']\n g_minmax = cfg.PARAMS['temp_local_gradient_bounds']\n qc_months = cfg.PARAMS['climate_qc_months']\n if qc_months == 0:\n return\n\n # Read file\n fpath = gdir.get_filepath('climate_historical')\n igrad = None\n with utils.ncDataset(fpath) as nc:\n # time\n # Read timeseries\n itemp = nc.variables['temp'][:]\n if 'gradient' in nc.variables:\n igrad = nc.variables['gradient'][:]\n # Security for stuff that can happen with local gradients\n igrad = np.where(~np.isfinite(igrad), default_grad, igrad)\n igrad = utils.clip_array(igrad, g_minmax[0], g_minmax[1])\n ref_hgt = nc.ref_hgt\n\n # Default gradient?\n if igrad is None:\n igrad = itemp * 0 + default_grad\n\n ny = len(igrad) // 12\n assert ny == len(igrad) / 12\n\n # Geometry data\n fls = gdir.read_pickle('inversion_flowlines')\n heights = np.array([])\n for fl in fls:\n heights = np.append(heights, fl.surface_h)\n top_h = np.max(heights)\n bot_h = np.min(heights)\n\n # First check - there should be at least one month of melt every year\n prev_ref_hgt = ref_hgt\n while True:\n ts_bot = itemp + default_grad * (bot_h - ref_hgt)\n ts_bot = (ts_bot.reshape((ny, 12)) > temp_m).sum(axis=1)\n if np.all(ts_bot >= qc_months):\n # Ok all good\n break\n # put ref hgt a bit higher so that we warm things a bit\n ref_hgt += 10\n\n # If we changed this it makes no sense to lower it down again,\n # so resume here:\n if ref_hgt != prev_ref_hgt:\n with utils.ncDataset(fpath, 'a') as nc:\n nc.ref_hgt = ref_hgt\n nc.uncorrected_ref_hgt = prev_ref_hgt\n gdir.add_to_diagnostics('ref_hgt_qc_diff', int(ref_hgt - prev_ref_hgt))\n return\n\n # Second check - there should be at least one month of acc every year\n while True:\n ts_top = itemp + default_grad * (top_h - ref_hgt)\n ts_top = (ts_top.reshape((ny, 12)) < temp_s).sum(axis=1)\n if np.all(ts_top >= qc_months):\n # Ok all good\n break\n # put ref hgt a bit lower so that we cold things a bit\n ref_hgt -= 10\n\n if ref_hgt != prev_ref_hgt:\n with utils.ncDataset(fpath, 'a') as nc:\n nc.ref_hgt = ref_hgt\n nc.uncorrected_ref_hgt = prev_ref_hgt\n gdir.add_to_diagnostics('ref_hgt_qc_diff', int(ref_hgt - prev_ref_hgt))\n\n\ndef mb_climate_on_height(gdir, heights, *, time_range=None, year_range=None):\n \"\"\"Mass-balance climate of the glacier at a specific height\n\n Reads the glacier's monthly climate data file and computes the\n temperature \"energies\" (temp above 0) and solid precipitation at the\n required height.\n\n All MB parameters are considered here! (i.e. melt temp, precip scaling\n factor, etc.)\n\n Parameters\n ----------\n gdir : GlacierDirectory\n the glacier directory\n heights: ndarray\n a 1D array of the heights (in meter) where you want the data\n time_range : [datetime, datetime], optional\n default is to read all data but with this you\n can provide a [t0, t1] bounds (inclusive).\n year_range : [int, int], optional\n Provide a [y0, y1] year range to get the data for specific\n (hydrological) years only. Easier to use than the time bounds above.\n\n Returns\n -------\n (time, tempformelt, prcpsol)::\n - time: array of shape (nt,)\n - tempformelt: array of shape (len(heights), nt)\n - prcpsol: array of shape (len(heights), nt)\n \"\"\"\n\n if year_range is not None:\n sm = cfg.PARAMS['hydro_month_' + gdir.hemisphere]\n em = sm - 1 if (sm > 1) else 12\n t0 = datetime.datetime(year_range[0]-1, sm, 1)\n t1 = datetime.datetime(year_range[1], em, 1)\n return mb_climate_on_height(gdir, heights, time_range=[t0, t1])\n\n # Parameters\n temp_all_solid = cfg.PARAMS['temp_all_solid']\n temp_all_liq = cfg.PARAMS['temp_all_liq']\n temp_melt = cfg.PARAMS['temp_melt']\n prcp_fac = cfg.PARAMS['prcp_scaling_factor']\n default_grad = cfg.PARAMS['temp_default_gradient']\n g_minmax = cfg.PARAMS['temp_local_gradient_bounds']\n\n # Read file\n igrad = None\n with utils.ncDataset(gdir.get_filepath('climate_historical')) as nc:\n # time\n time = nc.variables['time']\n time = netCDF4.num2date(time[:], time.units)\n if time_range is not None:\n p0 = np.where(time == time_range[0])[0]\n try:\n p0 = p0[0]\n except IndexError:\n raise MassBalanceCalibrationError('time_range[0] not found in '\n 'file')\n p1 = np.where(time == time_range[1])[0]\n try:\n p1 = p1[0]\n except IndexError:\n raise MassBalanceCalibrationError('time_range[1] not found in '\n 'file')\n else:\n p0 = 0\n p1 = len(time)-1\n\n time = time[p0:p1+1]\n\n # Read timeseries\n itemp = nc.variables['temp'][p0:p1+1]\n iprcp = nc.variables['prcp'][p0:p1+1]\n if 'gradient' in nc.variables:\n igrad = nc.variables['gradient'][p0:p1+1]\n # Security for stuff that can happen with local gradients\n igrad = np.where(~np.isfinite(igrad), default_grad, igrad)\n igrad = utils.clip_array(igrad, g_minmax[0], g_minmax[1])\n ref_hgt = nc.ref_hgt\n\n # Default gradient?\n if igrad is None:\n igrad = itemp * 0 + default_grad\n\n # Correct precipitation\n iprcp *= prcp_fac\n\n # For each height pixel:\n # Compute temp and tempformelt (temperature above melting threshold)\n npix = len(heights)\n grad_temp = np.atleast_2d(igrad).repeat(npix, 0)\n grad_temp *= (heights.repeat(len(time)).reshape(grad_temp.shape) - ref_hgt)\n temp2d = np.atleast_2d(itemp).repeat(npix, 0) + grad_temp\n temp2dformelt = temp2d - temp_melt\n temp2dformelt = utils.clip_min(temp2dformelt, 0)\n # Compute solid precipitation from total precipitation\n prcpsol = np.atleast_2d(iprcp).repeat(npix, 0)\n fac = 1 - (temp2d - temp_all_solid) / (temp_all_liq - temp_all_solid)\n fac = utils.clip_array(fac, 0, 1)\n prcpsol = prcpsol * fac\n\n return time, temp2dformelt, prcpsol\n\n\ndef mb_yearly_climate_on_height(gdir, heights, *,\n year_range=None, flatten=False):\n \"\"\"Yearly mass-balance climate of the glacier at a specific height\n\n See also: mb_climate_on_height\n\n Parameters\n ----------\n gdir : GlacierDirectory\n the glacier directory\n heights: ndarray\n a 1D array of the heights (in meter) where you want the data\n year_range : [int, int], optional\n Provide a [y0, y1] year range to get the data for specific\n (hydrological) years only.\n flatten : bool\n for some applications (glacier average MB) it's ok to flatten the\n data (average over height) prior to annual summing.\n\n Returns\n -------\n (years, tempformelt, prcpsol)::\n - years: array of shape (ny,)\n - tempformelt: array of shape (len(heights), ny) (or ny if flatten\n is set)\n - prcpsol: array of shape (len(heights), ny) (or ny if flatten\n is set)\n \"\"\"\n\n time, temp, prcp = mb_climate_on_height(gdir, heights,\n year_range=year_range)\n\n ny, r = divmod(len(time), 12)\n if r != 0:\n raise InvalidParamsError('Climate data should be N full years '\n 'exclusively')\n # Last year gives the tone of the hydro year\n years = np.arange(time[-1].year-ny+1, time[-1].year+1, 1)\n\n if flatten:\n # Spatial average\n temp_yr = np.zeros(len(years))\n prcp_yr = np.zeros(len(years))\n temp = np.mean(temp, axis=0)\n prcp = np.mean(prcp, axis=0)\n for i, y in enumerate(years):\n temp_yr[i] = np.sum(temp[i*12:(i+1)*12])\n prcp_yr[i] = np.sum(prcp[i*12:(i+1)*12])\n else:\n # Annual prcp and temp for each point (no spatial average)\n temp_yr = np.zeros((len(heights), len(years)))\n prcp_yr = np.zeros((len(heights), len(years)))\n for i, y in enumerate(years):\n temp_yr[:, i] = np.sum(temp[:, i*12:(i+1)*12], axis=1)\n prcp_yr[:, i] = np.sum(prcp[:, i*12:(i+1)*12], axis=1)\n\n return years, temp_yr, prcp_yr\n\n\ndef mb_yearly_climate_on_glacier(gdir, *, year_range=None):\n \"\"\"Yearly mass-balance climate at all glacier heights,\n multiplied with the flowlines widths. (all in pix coords.)\n\n See also: mb_climate_on_height\n\n Parameters\n ----------\n gdir : GlacierDirectory\n the glacier directory\n year_range : [int, int], optional\n Provide a [y0, y1] year range to get the data for specific\n (hydrological) years only.\n\n Returns\n -------\n (years, tempformelt, prcpsol)::\n - years: array of shape (ny)\n - tempformelt: array of shape (ny)\n - prcpsol: array of shape (ny)\n \"\"\"\n\n flowlines = gdir.read_pickle('inversion_flowlines')\n\n heights = np.array([])\n widths = np.array([])\n for fl in flowlines:\n heights = np.append(heights, fl.surface_h)\n widths = np.append(widths, fl.widths)\n\n years, temp, prcp = mb_yearly_climate_on_height(gdir, heights,\n year_range=year_range,\n flatten=False)\n\n temp = np.average(temp, axis=0, weights=widths)\n prcp = np.average(prcp, axis=0, weights=widths)\n\n return years, temp, prcp\n\n\n@entity_task(log)\ndef glacier_mu_candidates(gdir):\n \"\"\"Computes the mu candidates, glacier wide.\n\n For each 31 year-period centered on the year of interest, mu is is the\n temperature sensitivity necessary for the glacier with its current shape\n to be in equilibrium with its climate.\n\n This task is just for documentation and testing! It is not used in\n production anymore.\n\n Parameters\n ----------\n gdir : :py:class:`oggm.GlacierDirectory`\n the glacier directory to process\n \"\"\"\n\n warnings.warn('The task `glacier_mu_candidates` is deprecated. It should '\n 'only be used for testing.', DeprecationWarning)\n\n mu_hp = int(cfg.PARAMS['mu_star_halfperiod'])\n\n # Only get the years were we consider looking for tstar\n y0, y1 = cfg.PARAMS['tstar_search_window']\n ci = gdir.get_climate_info()\n y0 = y0 or ci['baseline_hydro_yr_0']\n y1 = y1 or ci['baseline_hydro_yr_1']\n\n years, temp_yr, prcp_yr = mb_yearly_climate_on_glacier(gdir,\n year_range=[y0, y1])\n\n # Compute mu for each 31-yr climatological period\n ny = len(years)\n mu_yr_clim = np.zeros(ny) * np.NaN\n for i, y in enumerate(years):\n # Ignore begin and end\n if ((i-mu_hp) < 0) or ((i+mu_hp) >= ny):\n continue\n t_avg = np.mean(temp_yr[i-mu_hp:i+mu_hp+1])\n if t_avg > 1e-3: # if too cold no melt possible\n prcp_ts = prcp_yr[i-mu_hp:i+mu_hp+1]\n mu_yr_clim[i] = np.mean(prcp_ts) / t_avg\n\n # Check that we found a least one mustar\n if np.sum(np.isfinite(mu_yr_clim)) < 1:\n raise MassBalanceCalibrationError('({}) no mustar candidates found.'\n .format(gdir.rgi_id))\n\n # Write\n return pd.Series(data=mu_yr_clim, index=years)\n\n\n@entity_task(log)\ndef t_star_from_refmb(gdir, mbdf=None, glacierwide=None):\n \"\"\"Computes the ref t* for the glacier, given a series of MB measurements.\n\n Parameters\n ----------\n gdir : oggm.GlacierDirectory\n mbdf: a pd.Series containing the observed MB data indexed by year\n if None, read automatically from the reference data\n\n Returns\n -------\n A dict: {t_star:[], bias:[], 'avg_mb_per_mu': [], 'avg_ref_mb': []}\n \"\"\"\n\n from oggm.core.massbalance import MultipleFlowlineMassBalance\n\n if glacierwide is None:\n glacierwide = cfg.PARAMS['tstar_search_glacierwide']\n\n # Be sure we have no marine terminating glacier\n assert not gdir.is_tidewater\n\n # Reference time series\n if mbdf is None:\n mbdf = gdir.get_ref_mb_data()['ANNUAL_BALANCE']\n\n # which years to look at\n ref_years = mbdf.index.values\n\n # Average oberved mass-balance\n ref_mb = np.mean(mbdf)\n\n # Compute one mu candidate per year and the associated statistics\n # Only get the years were we consider looking for tstar\n y0, y1 = cfg.PARAMS['tstar_search_window']\n ci = gdir.get_climate_info()\n y0 = y0 or ci['baseline_hydro_yr_0']\n y1 = y1 or ci['baseline_hydro_yr_1']\n years = np.arange(y0, y1+1)\n\n ny = len(years)\n mu_hp = int(cfg.PARAMS['mu_star_halfperiod'])\n mb_per_mu = pd.Series(index=years)\n\n if glacierwide:\n # The old (but fast) method to find t*\n _, temp, prcp = mb_yearly_climate_on_glacier(gdir, year_range=[y0, y1])\n\n # which years to look at\n selind = np.searchsorted(years, mbdf.index)\n sel_temp = temp[selind]\n sel_prcp = prcp[selind]\n sel_temp = np.mean(sel_temp)\n sel_prcp = np.mean(sel_prcp)\n\n for i, y in enumerate(years):\n\n # Ignore begin and end\n if ((i - mu_hp) < 0) or ((i + mu_hp) >= ny):\n continue\n\n # Compute the mu candidate\n t_avg = np.mean(temp[i - mu_hp:i + mu_hp + 1])\n if t_avg < 1e-3: # if too cold no melt possible\n continue\n mu = np.mean(prcp[i - mu_hp:i + mu_hp + 1]) / t_avg\n\n # Apply it\n mb_per_mu[y] = np.mean(sel_prcp - mu * sel_temp)\n\n else:\n # The new (but slow) method to find t*\n # Compute mu for each 31-yr climatological period\n fls = gdir.read_pickle('inversion_flowlines')\n for i, y in enumerate(years):\n # Ignore begin and end\n if ((i-mu_hp) < 0) or ((i+mu_hp) >= ny):\n continue\n # Calibrate the mu for this year\n for fl in fls:\n fl.mu_star_is_valid = False\n try:\n # TODO: this is slow and can be highly optimised\n # it reads the same data over and over again\n _recursive_mu_star_calibration(gdir, fls, y, first_call=True)\n # Compute the MB with it\n mb_mod = MultipleFlowlineMassBalance(gdir, fls, bias=0,\n check_calib_params=False)\n mb_ts = mb_mod.get_specific_mb(fls=fls, year=ref_years)\n mb_per_mu[y] = np.mean(mb_ts)\n except MassBalanceCalibrationError:\n pass\n\n # Diff to reference\n diff = (mb_per_mu - ref_mb).dropna()\n\n if len(diff) == 0:\n raise MassBalanceCalibrationError('No single valid mu candidate for '\n 'this glacier!')\n\n # Here we used to keep all possible mu* in order to later select\n # them based on some distance search algorithms.\n # (revision 81bc0923eab6301306184d26462f932b72b84117)\n #\n # As of Jul 2018, we will now stop this non-sense:\n # out of all mu*, let's just pick the one with the smallest bias.\n # It doesn't make much sense, but the same is true for other methods\n # as well -> this is how Ben used to do it, and he is clever\n # Another way would be to pick the closest to today or something\n amin = np.abs(diff).idxmin()\n\n # Write\n d = gdir.get_climate_info()\n d['t_star'] = amin\n d['bias'] = diff[amin]\n gdir.write_json(d, 'climate_info')\n\n return {'t_star': amin, 'bias': diff[amin],\n 'avg_mb_per_mu': mb_per_mu, 'avg_ref_mb': ref_mb}\n\n\ndef calving_mb(gdir):\n \"\"\"Calving mass-loss in specific MB equivalent.\n\n This is necessary to compute mu star.\n \"\"\"\n\n if not gdir.is_tidewater:\n return 0.\n\n # Ok. Just take the calving rate from cfg and change its units\n # Original units: km3 a-1, to change to mm a-1 (units of specific MB)\n rho = cfg.PARAMS['ice_density']\n return gdir.inversion_calving_rate * 1e9 * rho / gdir.rgi_area_m2\n\n\ndef _fallback_local_t_star(gdir):\n \"\"\"A Fallback function if climate.local_t_star raises an Error.\n\n This function will still write a `local_mustar.json`, filled with NANs,\n if climate.local_t_star fails and cfg.PARAMS['continue_on_error'] = True.\n\n Parameters\n ----------\n gdir : :py:class:`oggm.GlacierDirectory`\n the glacier directory to process\n\n \"\"\"\n # Scalars in a small dict for later\n df = dict()\n df['rgi_id'] = gdir.rgi_id\n df['t_star'] = np.nan\n df['bias'] = np.nan\n df['mu_star_glacierwide'] = np.nan\n gdir.write_json(df, 'local_mustar')\n\n\n@entity_task(log, writes=['local_mustar', 'climate_info'],\n fallback=_fallback_local_t_star)\ndef local_t_star(gdir, *, ref_df=None, tstar=None, bias=None):\n \"\"\"Compute the local t* and associated glacier-wide mu*.\n\n If ``tstar`` and ``bias`` are not provided, they will be interpolated from\n the reference t* list.\n\n Note: the glacier wide mu* is here just for indication. It might be\n different from the flowlines' mu* in some cases.\n\n Parameters\n ----------\n gdir : :py:class:`oggm.GlacierDirectory`\n the glacier directory to process\n ref_df : :py:class:`pandas.DataFrame`, optional\n replace the default calibration list with your own.\n tstar: int, optional\n the year where the glacier should be equilibrium\n bias: float, optional\n the associated reference bias\n \"\"\"\n\n # Relevant mb params\n params = ['temp_default_gradient', 'temp_all_solid', 'temp_all_liq',\n 'temp_melt', 'prcp_scaling_factor', 'climate_qc_months']\n\n if tstar is None or bias is None:\n # Do our own interpolation\n if ref_df is None:\n if not cfg.PARAMS['run_mb_calibration']:\n # Make some checks and use the default one\n climate_info = gdir.get_climate_info()\n source = climate_info['baseline_climate_source']\n ok_source = ['CRU TS4.01', 'CRU TS3.23', 'HISTALP']\n if not np.any(s in source.upper() for s in ok_source):\n msg = ('If you are using a custom climate file you should '\n 'run your own MB calibration.')\n raise MassBalanceCalibrationError(msg)\n v = gdir.rgi_version[0] # major version relevant\n\n # Check that the params are fine\n s = 'cru4' if 'CRU' in source else 'histalp'\n vn = 'oggm_ref_tstars_rgi{}_{}_calib_params'.format(v, s)\n # This is for as long as the old files are around\n if 'climate_qc_months' not in cfg.PARAMS[vn]:\n params.remove('climate_qc_months')\n for k in params:\n if cfg.PARAMS[k] != cfg.PARAMS[vn][k]:\n msg = ('The reference t* you are trying to use was '\n 'calibrated with different MB parameters. You '\n 'might have to run the calibration manually.')\n raise MassBalanceCalibrationError(msg)\n ref_df = cfg.PARAMS['oggm_ref_tstars_rgi{}_{}'.format(v, s)]\n else:\n # Use the the local calibration\n fp = os.path.join(cfg.PATHS['working_dir'], 'ref_tstars.csv')\n ref_df = pd.read_csv(fp)\n\n # Compute the distance to each glacier\n distances = utils.haversine(gdir.cenlon, gdir.cenlat,\n ref_df.lon, ref_df.lat)\n\n # Take the 10 closest\n aso = np.argsort(distances)[0:9]\n amin = ref_df.iloc[aso]\n distances = distances[aso]**2\n\n # If really close no need to divide, else weighted average\n if distances.iloc[0] <= 0.1:\n tstar = amin.tstar.iloc[0]\n bias = amin.bias.iloc[0]\n else:\n tstar = int(np.average(amin.tstar, weights=1./distances).round())\n bias = np.average(amin.bias, weights=1./distances)\n\n # Add the climate related params to the GlacierDir to make sure\n # other tools cannot fool around without re-calibration\n out = gdir.get_climate_info()\n out['mb_calib_params'] = {k: cfg.PARAMS[k] for k in params}\n gdir.write_json(out, 'climate_info')\n\n # We compute the overall mu* here but this is mostly for testing\n # Climate period\n mu_hp = int(cfg.PARAMS['mu_star_halfperiod'])\n yr = [tstar - mu_hp, tstar + mu_hp]\n\n # Do we have a calving glacier?\n cmb = calving_mb(gdir)\n\n log.info('(%s) local mu* computation for t*=%d', gdir.rgi_id, tstar)\n\n # Get the corresponding mu\n years, temp_yr, prcp_yr = mb_yearly_climate_on_glacier(gdir, year_range=yr)\n assert len(years) == (2 * mu_hp + 1)\n\n # mustar is taking calving into account (units of specific MB)\n mustar = (np.mean(prcp_yr) - cmb) / np.mean(temp_yr)\n if cfg.PARAMS['swarm_mu_method']:\n if not np.isfinite(mustar):\n mustar = (cfg.PARAMS['swarm_mu'])\n\n print('{} defaulting to a generic mu ({}) since it is infinite'.format(gdir.rgi_id, mustar))\n if not (cfg.PARAMS['min_mu_star'] <= mustar <= cfg.PARAMS['max_mu_star']):\n mustar = (cfg.PARAMS['swarm_mu'])\n\n print('{} defaulting to a generic mu ({}) since outside the range'.format(gdir.rgi_id, mustar))\n\n else:\n if not np.isfinite(mustar):\n raise MassBalanceCalibrationError('{} has a non finite '\n 'mu'.format(gdir.rgi_id))\n\n # Clip it?\n if cfg.PARAMS['clip_mu_star']:\n mustar = utils.clip_min(mustar, 0)\n\n # If mu out of bounds, raise\n if not (cfg.PARAMS['min_mu_star'] <= mustar <= cfg.PARAMS['max_mu_star']):\n raise MassBalanceCalibrationError('{}: mu* out of specified bounds: '\n '{:.2f}'.format(gdir.rgi_id, mustar))\n\n \n\n # Scalars in a small dict for later\n df = dict()\n df['rgi_id'] = gdir.rgi_id\n df['t_star'] = int(tstar)\n df['bias'] = bias\n df['mu_star_glacierwide'] = (mustar)\n gdir.write_json(df, 'local_mustar')\n\n\ndef _mu_star_per_minimization(x, fls, cmb, temp, prcp, widths):\n\n # Get the corresponding mu\n mus = np.array([])\n for fl in fls:\n mu = fl.mu_star if fl.mu_star_is_valid else x\n mus = np.append(mus, np.ones(fl.nx) * mu)\n\n # TODO: possible optimisation here\n out = np.average(prcp - mus[:, np.newaxis] * temp, axis=0, weights=widths)\n return np.mean(out - cmb)\n\n\ndef _recursive_mu_star_calibration(gdir, fls, t_star, first_call=True,\n force_mu=None):\n\n # Do we have a calving glacier? This is only for the first call!\n # The calving mass-balance is distributed over the valid tributaries of the\n # main line, i.e. bad tributaries are not considered for calving\n cmb = calving_mb(gdir) if first_call else 0.\n\n # Climate period\n mu_hp = int(cfg.PARAMS['mu_star_halfperiod'])\n yr_range = [t_star - mu_hp, t_star + mu_hp]\n\n # Get the corresponding mu\n heights = np.array([])\n widths = np.array([])\n for fl in fls:\n heights = np.append(heights, fl.surface_h)\n widths = np.append(widths, fl.widths)\n\n _, temp, prcp = mb_yearly_climate_on_height(gdir, heights,\n year_range=yr_range,\n flatten=False)\n#So basically usually trys to opimize finding mu but we can skip this dan_note_2\n# if force_mu is None and cfg.PARAMS['swarm_mu_method']:\n if force_mu is None:\n try:\n mu_star = optimization.brentq(_mu_star_per_minimization,\n cfg.PARAMS['min_mu_star'],\n cfg.PARAMS['max_mu_star'],\n args=(fls, cmb, temp, prcp, widths),\n xtol=1e-5)\n except ValueError:\n # This happens in very rare cases\n _mu_lim = _mu_star_per_minimization(cfg.PARAMS['min_mu_star'],\n fls, cmb, temp, prcp, widths)\n if _mu_lim < 0 and np.allclose(_mu_lim, 0):\n mu_star = 0.\n else:\n raise MassBalanceCalibrationError('{} mu* out of specified '\n 'bounds.'.format(gdir.rgi_id)\n )\n\n if not np.isfinite(mu_star):\n raise MassBalanceCalibrationError('{} '.format(gdir.rgi_id) +\n 'has a non finite mu.')\n else:\n mu_star = force_mu\n\n # Reset flux\n for fl in fls:\n fl.flux = np.zeros(len(fl.surface_h))\n\n # Flowlines in order to be sure - start with first guess mu*\n for fl in fls:\n y, t, p = mb_yearly_climate_on_height(gdir, fl.surface_h,\n year_range=yr_range,\n flatten=False)\n mu = fl.mu_star if fl.mu_star_is_valid else mu_star\n fl.set_apparent_mb(np.mean(p, axis=1) - mu*np.mean(t, axis=1),\n mu_star=mu)\n\n # Sometimes, low lying tributaries have a non-physically consistent\n # Mass-balance. These tributaries wouldn't exist with a single\n # glacier-wide mu*, and therefore need a specific calibration.\n # All other mus may be affected\n if cfg.PARAMS['correct_for_neg_flux'] and (len(fls) > 1):\n if np.any([fl.flux_needs_correction for fl in fls]):\n\n # We start with the highest Strahler number that needs correction\n not_ok = np.array([fl.flux_needs_correction for fl in fls])\n fl = np.array(fls)[not_ok][-1]\n\n # And we take all its tributaries\n inflows = centerlines.line_inflows(fl)\n\n # We find a new mu for these in a recursive call\n # TODO: this is where a flux kwarg can passed to tributaries\n _recursive_mu_star_calibration(gdir, inflows, t_star,\n first_call=False)\n\n # At this stage we should be ok\n assert np.all([~ fl.flux_needs_correction for fl in inflows])\n for fl in inflows:\n fl.mu_star_is_valid = True\n\n # After the above are OK we have to recalibrate all below\n _recursive_mu_star_calibration(gdir, fls, t_star,\n first_call=first_call)\n\n # At this stage we are good\n for fl in fls:\n fl.mu_star_is_valid = True\n\n\ndef _fallback_mu_star_calibration(gdir):\n \"\"\"A Fallback function if climate.mu_star_calibration raises an Error.\n\n\t This function will still read, expand and write a `local_mustar.json`,\n filled with NANs, if climate.mu_star_calibration fails\n and if cfg.PARAMS['continue_on_error'] = True.\n\n Parameters\n ----------\n gdir : :py:class:`oggm.GlacierDirectory`\n the glacier directory to process\n\n \"\"\"\n # read json\n df = gdir.read_json('local_mustar')\n # add these keys which mu_star_calibration would add\n df['mu_star_per_flowline'] = [np.nan]\n df['mu_star_flowline_avg'] = np.nan\n df['mu_star_allsame'] = np.nan\n # write\n gdir.write_json(df, 'local_mustar')\n\n\n@entity_task(log, writes=['inversion_flowlines'],\n fallback=_fallback_mu_star_calibration)\ndef mu_star_calibration(gdir):\n \"\"\"Compute the flowlines' mu* and the associated apparent mass-balance.\n\n If low lying tributaries have a non-physically consistent Mass-balance\n this function will either filter them out or calibrate each flowline with a\n specific mu*. The latter is default and recommended.\n\n Parameters\n ----------\n gdir : :py:class:`oggm.GlacierDirectory`\n the glacier directory to process\n \"\"\"\n\n # Interpolated data\n df = gdir.read_json('local_mustar')\n t_star = df['t_star']\n bias = df['bias']\n\n # For each flowline compute the apparent MB\n fls = gdir.read_pickle('inversion_flowlines')\n # If someone calls the task a second time we need to reset this\n for fl in fls:\n fl.mu_star_is_valid = False\n\n force_mu = 0 if df['mu_star_glacierwide'] == 0 else None\n #Added in for swarm ovveride\n if df['mu_star_glacierwide'] == cfg.PARAMS['swarm_mu']:\n force_mu = df['mu_star_glacierwide']\n #print('Forcing mu during calibration')\n\n\n # Let's go\n _recursive_mu_star_calibration(gdir, fls, t_star, force_mu=force_mu)\n\n # If the user wants to filter the bad ones we remove them and start all\n # over again until all tributaries are physically consistent with one mu\n # This should only work if cfg.PARAMS['correct_for_neg_flux'] == False\n do_filter = [fl.flux_needs_correction for fl in fls]\n if cfg.PARAMS['filter_for_neg_flux'] and np.any(do_filter):\n assert not do_filter[-1] # This should not happen\n # Keep only the good lines\n # TODO: this should use centerline.line_inflows for more efficiency!\n heads = [fl.orig_head for fl in fls if not fl.flux_needs_correction]\n centerlines.compute_centerlines(gdir, heads=heads, reset=True)\n centerlines.initialize_flowlines(gdir, reset=True)\n if gdir.has_file('downstream_line'):\n centerlines.compute_downstream_line(gdir, reset=True)\n centerlines.compute_downstream_bedshape(gdir, reset=True)\n centerlines.catchment_area(gdir, reset=True)\n centerlines.catchment_intersections(gdir, reset=True)\n centerlines.catchment_width_geom(gdir, reset=True)\n centerlines.catchment_width_correction(gdir, reset=True)\n local_t_star(gdir, tstar=t_star, bias=bias, reset=True)\n # Ok, re-call ourselves\n return mu_star_calibration(gdir, reset=True)\n\n # Check and write\n rho = cfg.PARAMS['ice_density']\n aflux = fls[-1].flux[-1] * 1e-9 / rho * gdir.grid.dx**2\n # If not marine and a bit far from zero, warning\n # cmb = calving_mb(gdir)\n # if cmb == 0 and not np.allclose(fls[-1].flux[-1], 0., atol=0.01):\n # log.info('(%s) flux should be zero, but is: '\n # '%.4f km3 ice yr-1', gdir.rgi_id, aflux)\n # If not marine and quite far from zero, error\n # if cmb == 0 and not np.allclose(fls[-1].flux[-1], 0., atol=1):\n # msg = ('({}) flux should be zero, but is: {:.4f} km3 ice yr-1'\n # .format(gdir.rgi_id, aflux))\n # raise MassBalanceCalibrationError(msg)\n gdir.write_pickle(fls, 'inversion_flowlines')\n\n # Store diagnostics\n mus = []\n weights = []\n for fl in fls:\n mus.append(fl.mu_star)\n weights.append(np.sum(fl.widths))\n df['mu_star_per_flowline'] = mus\n df['mu_star_flowline_avg'] = np.average(mus, weights=weights)\n all_same = np.allclose(mus, mus[0], atol=1e-3)\n df['mu_star_allsame'] = all_same\n if all_same:\n if not np.allclose(df['mu_star_flowline_avg'],\n df['mu_star_glacierwide'],\n atol=1e-3):\n raise MassBalanceCalibrationError('Unexpected difference between '\n 'glacier wide mu* and the '\n 'flowlines mu*.')\n # Write\n gdir.write_json(df, 'local_mustar')\n\n\n@entity_task(log, writes=['inversion_flowlines', 'linear_mb_params'])\ndef apparent_mb_from_linear_mb(gdir, mb_gradient=3., ela_h=None):\n \"\"\"Compute apparent mb from a linear mass-balance assumption (for testing).\n\n This is for testing currently, but could be used as alternative method\n for the inversion quite easily.\n\n Parameters\n ----------\n gdir : :py:class:`oggm.GlacierDirectory`\n the glacier directory to process\n \"\"\"\n\n # Do we have a calving glacier?\n cmb = calving_mb(gdir)\n\n # Get the height and widths along the fls\n h, w = gdir.get_inversion_flowline_hw()\n\n # Now find the ELA till the integrated mb is zero\n from oggm.core.massbalance import LinearMassBalance\n\n def to_minimize(ela_h):\n mbmod = LinearMassBalance(ela_h[0], grad=mb_gradient)\n smb = mbmod.get_specific_mb(heights=h, widths=w)\n return (smb - cmb)**2\n\n if ela_h is None:\n ela_h = optimization.minimize(to_minimize, [0.], bounds=((0, 10000), ))\n ela_h = ela_h['x'][0]\n\n mbmod = LinearMassBalance(ela_h, grad=mb_gradient)\n\n # For each flowline compute the apparent MB\n fls = gdir.read_pickle('inversion_flowlines')\n\n # Reset flux\n for fl in fls:\n fl.flux = np.zeros(len(fl.surface_h))\n\n # Flowlines in order to be sure\n rho = cfg.PARAMS['ice_density']\n for fl in fls:\n mbz = mbmod.get_annual_mb(fl.surface_h) * cfg.SEC_IN_YEAR * rho\n fl.set_apparent_mb(mbz)\n\n # Check and write\n aflux = fls[-1].flux[-1] * 1e-9 / rho * gdir.grid.dx**2\n # If not marine and a bit far from zero, warning\n if cmb == 0 and not np.allclose(fls[-1].flux[-1], 0., atol=0.01):\n log.info('(%s) flux should be zero, but is: '\n '%.4f km3 ice yr-1', gdir.rgi_id, aflux)\n # If not marine and quite far from zero, error\n if cmb == 0 and not np.allclose(fls[-1].flux[-1], 0., atol=1):\n msg = ('({}) flux should be zero, but is: {:.4f} km3 ice yr-1'\n .format(gdir.rgi_id, aflux))\n raise MassBalanceCalibrationError(msg)\n gdir.write_pickle(fls, 'inversion_flowlines')\n gdir.write_pickle({'ela_h': ela_h, 'grad': mb_gradient},\n 'linear_mb_params')\n\n\n@entity_task(log, writes=['inversion_flowlines'])\ndef apparent_mb_from_any_mb(gdir, mb_model=None, mb_years=None):\n \"\"\"Compute apparent mb from an arbitrary mass-balance profile.\n\n This searches for a mass-balance residual to add to the mass-balance\n profile so that the average specific MB is zero.\n\n Parameters\n ----------\n gdir : :py:class:`oggm.GlacierDirectory`\n the glacier directory to process\n mb_model : :py:class:`oggm.core.massbalance.MassBalanceModel`\n the mass-balance model to use (superseedes mb_list)\n mb_years : array\n the array of years from which you want to average the MB for (for\n mb_model only).\n \"\"\"\n\n # Do we have a calving glacier?\n cmb = calving_mb(gdir)\n\n # For each flowline compute the apparent MB\n fls = gdir.read_pickle('inversion_flowlines')\n\n def to_minimize(residual):\n smb = mb_model.get_specific_mb(fls=fls, year=mb_years)\n smb = np.mean(smb) + residual[0]\n return (smb - cmb)**2\n\n residual = optimization.minimize(to_minimize, [0.], bounds=((-1e5, 1e5), ))\n residual = residual['x'][0]\n\n # Reset flux\n for fl in fls:\n fl.flux = np.zeros(len(fl.surface_h))\n\n # Flowlines in order to be sure\n rho = cfg.PARAMS['ice_density']\n for fl_id, fl in enumerate(fls):\n mbz = 0\n for yr in mb_years:\n mbz += mb_model.get_annual_mb(fl.surface_h, year=yr,\n fls=fls, fl_id=fl_id)\n mbz = mbz / len(mb_years)\n fl.set_apparent_mb(mbz * cfg.SEC_IN_YEAR * rho + residual)\n\n # Check and write\n aflux = fls[-1].flux[-1] * 1e-9 / rho * gdir.grid.dx**2\n # If not marine and a bit far from zero, warning\n if cmb == 0 and not np.allclose(fls[-1].flux[-1], 0., atol=0.01):\n log.info('(%s) flux should be zero, but is: '\n '%.4f km3 ice yr-1', gdir.rgi_id, aflux)\n # If not marine and quite far from zero, error\n if cmb == 0 and not np.allclose(fls[-1].flux[-1], 0., atol=1):\n msg = ('({}) flux should be zero, but is: {:.4f} km3 ice yr-1'\n .format(gdir.rgi_id, aflux))\n raise MassBalanceCalibrationError(msg)\n gdir.write_pickle(fls, 'inversion_flowlines')\n\n\n@global_task\ndef compute_ref_t_stars(gdirs):\n \"\"\" Detects the best t* for the reference glaciers and writes them to disk\n\n This task will be needed for mass balance calibration of custom climate\n data. For CRU and HISTALP baseline climate a precalibrated list is\n available and should be used instead.\n\n Parameters\n ----------\n gdirs : list of :py:class:`oggm.GlacierDirectory` objects\n will be filtered for reference glaciers\n \"\"\"\n\n if not cfg.PARAMS['run_mb_calibration']:\n raise InvalidParamsError('Are you sure you want to calibrate the '\n 'reference t*? There is a pre-calibrated '\n 'version available. If you know what you are '\n 'doing and still want to calibrate, set the '\n '`run_mb_calibration` parameter to `True`.')\n\n log.info('Compute the reference t* and mu* for WGMS glaciers')\n\n # Should be iterable\n gdirs = utils.tolist(gdirs)\n\n # Reference glaciers only if in the list and period is good\n ref_gdirs = utils.get_ref_mb_glaciers(gdirs)\n\n # Run\n from oggm.workflow import execute_entity_task\n out = execute_entity_task(t_star_from_refmb, ref_gdirs)\n\n # Loop write\n df = pd.DataFrame()\n for gdir, res in zip(ref_gdirs, out):\n if res is None:\n # For certain parameters there is no valid mu candidate on certain\n # glaciers. E.g. if temp is to low for melt. This will raise an\n # error in t_star_from_refmb and should only get here if\n # continue_on_error = True\n # Do not add this glacier to the ref_tstar.csv\n # Think of better solution later\n continue\n\n # list of mus compatibles with refmb\n rid = gdir.rgi_id\n df.loc[rid, 'lon'] = gdir.cenlon\n df.loc[rid, 'lat'] = gdir.cenlat\n df.loc[rid, 'n_mb_years'] = len(gdir.get_ref_mb_data())\n df.loc[rid, 'tstar'] = res['t_star']\n df.loc[rid, 'bias'] = res['bias']\n\n # Write out\n df['tstar'] = df['tstar'].astype(int)\n df['n_mb_years'] = df['n_mb_years'].astype(int)\n file = os.path.join(cfg.PATHS['working_dir'], 'ref_tstars.csv')\n df.sort_index().to_csv(file)\n" ]
[ [ "pandas.Series", "pandas.DataFrame", "numpy.all", "numpy.max", "numpy.mean", "numpy.any", "numpy.searchsorted", "numpy.nanmean", "numpy.where", "pandas.read_csv", "numpy.allclose", "numpy.arange", "scipy.optimize.brentq", "numpy.zeros", "numpy.min", "numpy.atleast_2d", "numpy.append", "scipy.optimize.minimize", "numpy.argsort", "numpy.array", "numpy.sum", "numpy.abs", "numpy.isfinite", "numpy.ones", "numpy.average" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] } ]
sambit-giri/emulator
[ "daa8bc9df5ec804052423b11d0a1c7252a75f850" ]
[ "src/gaussian_process.py" ]
[ "import numpy as np\nfrom sklearn.metrics import r2_score\nimport pickle\nfrom . import helper_functions as hf\nfrom time import time\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error\n\ntry: import GPy\nexcept: print('Install GPy to use GPR_GPy and SparseGPR_GPy.')\n\ntry: import torch\nexcept: print('Install PyTorch.')\n\ntry:\n\timport pyro\n\timport pyro.contrib.gp as gp\n\timport pyro.distributions as dist\nexcept:\n\tprint('Install Pyro to use GPR_pyro.')\n\ntry:\n\timport gpytorch\nexcept:\n\tprint('Install gpytorch to use GPR_GPyTorch.')\n\n\nclass GPR_GPy:\n def __init__(self, max_iter=1000, max_f_eval=1000, kernel=None, verbose=True, n_restarts_optimizer=5, n_jobs=0):\n # define kernel\n self.kernel = kernel\n self.max_iter = max_iter\n self.max_f_eval = max_f_eval\n self.verbose = verbose\n self.n_jobs = n_jobs\n self.n_restarts_optimizer = n_restarts_optimizer\n \n def fit(self, X_train, y_train):\n # check kernel\n if self.kernel is None:\n print('Setting kernel to Matern32.')\n input_dim = X_train.shape[1]\n # self.kernel = GPy.kern.Matern52(input_dim,ARD=True)\n self.kernel = GPy.kern.Matern32(input_dim,ARD=True)\n # create simple GP model\n self.m = GPy.models.GPRegression(X_train,y_train,self.kernel)\n # optimize\n if self.n_restarts_optimizer:\n self.m.optimize_restarts(\n num_restarts=self.n_restarts_optimizer,\n robust=False,\n #verbose=self.verbose,\n messages=self.verbose,\n parallel=True if self.n_jobs else False,\n num_processes=self.n_jobs if self.n_jobs else None,\n max_f_eval=self.max_f_eval,\n max_iters=self.max_iter,\n )\n else:\n self.m.optimize(messages=self.verbose, max_f_eval=self.max_f_eval)\n \n def predict(self, X_test, return_std=False):\n y_pred, y_var = self.m.predict(X_test)\n if return_std: return y_pred, np.sqrt(y_var)\n return y_pred\n \n def score(self, X_test, y_test):\n y_pred, y_std = self.m.predict(X_test)\n scr = r2_score(y_test, y_pred)\n return scr\n\n def save_model(self, filename, save_trainset=True):\n # np.save(filename, self.m.param_array)\n save_dict = {'kernel': self.m.kern.to_dict(), 'param_array': self.m.param_array}\n if save_trainset:\n save_dict['X'] = np.array(self.m.X)\n save_dict['Y'] = np.array(self.m.Y)\n pickle.dump(save_dict, open(filename, 'wb'))\n print('Model parameters are saved.')\n\n def load_model(self, filename, X=None, Y=None):\n load_dict = pickle.load(open(filename, 'rb'))\n self.kernel = GPy.kern.Kern.from_dict(load_dict['kernel'])\n # self.num_inducing = load_dict['num_inducing']\n if 'X' in load_dict.keys() and 'Y' in load_dict.keys():\n X = load_dict['X']\n Y = load_dict['Y']\n else:\n print('The file does not contain the training data.')\n print('Please provide it to the load_model through X and Y parameters.')\n return None\n \n m_load = GPy.models.GPRegression(X, Y, initialize=False, kernel=self.kernel)\n m_load.update_model(False)\n m_load.initialize_parameter()\n m_load[:] = load_dict['param_array']\n m_load.update_model(True)\n self.m = m_load\n return m_load\n\n\nclass SparseGPR_GPy:\n def __init__(self, max_iter=1000, max_f_eval=1000, kernel=None, verbose=True, n_restarts_optimizer=5, n_jobs=0, num_inducing=10):\n # define kernel\n self.kernel = kernel\n self.max_iter = max_iter\n self.max_f_eval = max_f_eval\n self.verbose = verbose\n self.n_jobs = n_jobs\n self.n_restarts_optimizer = n_restarts_optimizer\n self.num_inducing = num_inducing\n\n def setup_model(self, X_train, y_train):\n input_dim = X_train.shape[1]\n # check kernel\n if self.kernel is None:\n print('Setting kernel to Matern32.')\n # self.kernel = GPy.kern.Matern52(input_dim,ARD=True)\n self.kernel = GPy.kern.Matern32(input_dim,ARD=True)\n\n # define inducing points\n # self.Z = np.random.rand(self.num_inducing,input_dim)*(X_train.max(axis=0)-X_train.min(axis=0))+X_train.min(axis=0)\n\n # create simple GP model\n # self.m = GPy.models.SparseGPRegression(X_train,y_train,Z=self.Z,kernel=self.kernel)\n self.m = GPy.models.SparseGPRegression(X_train,y_train,num_inducing=self.num_inducing,kernel=self.kernel)\n\n def fit(self, X_train, y_train):\n self.setup_model(X_train, y_train)\n \n # optimize\n if self.n_restarts_optimizer:\n self.m.optimize_restarts(\n num_restarts=self.n_restarts_optimizer,\n robust=False,\n #verbose=self.verbose,\n messages=self.verbose,\n parallel=True if self.n_jobs else False,\n num_processes=self.n_jobs if self.n_jobs else None,\n max_f_eval=self.max_f_eval,\n max_iters=self.max_iter,\n )\n else:\n self.m.optimize(messages=self.verbose, max_f_eval=self.max_f_eval)\n # if self.verbose:\n # print(self.m)\n return self.m\n \n def predict(self, X_test, return_std=False):\n y_pred, y_std = self.m.predict(X_test)\n if return_std: return y_pred, y_std\n return y_pred\n \n def score(self, X_test, y_test):\n y_pred, y_std = self.m.predict(X_test)\n scr = r2_score(y_test, y_pred)\n return scr\n\n def save_model(self, filename, save_trainset=True):\n \t# np.save(filename, self.m.param_array)\n \tsave_dict = {'kernel': self.m.kern.to_dict(), 'param_array': self.m.param_array, 'num_inducing': self.num_inducing}\n \tif save_trainset:\n \t\tsave_dict['X'] = np.array(self.m.X)\n \t\tsave_dict['Y'] = np.array(self.m.Y)\n \tpickle.dump(save_dict, open(filename, 'wb'))\n \tprint('Model parameters are saved.')\n\n def load_model(self, filename, X=None, Y=None):\n \tload_dict = pickle.load(open(filename, 'rb'))\n \tself.kernel = GPy.kern.Kern.from_dict(load_dict['kernel'])\n \tself.num_inducing = load_dict['num_inducing']\n \tif 'X' in load_dict.keys() and 'Y' in load_dict.keys():\n \t\tX = load_dict['X']\n \t\tY = load_dict['Y']\n \telse:\n \t\tprint('The file does not contain the training data.')\n \t\tprint('Please provide it to the load_model through X and Y parameters.')\n \t\treturn None\n \t\n \tm_load = GPy.models.SparseGPRegression(X, Y, initialize=False, num_inducing=self.num_inducing, kernel=self.kernel)\n \tm_load.update_model(False)\n \tm_load.initialize_parameter()\n \tm_load[:] = load_dict['param_array']\n \tm_load.update_model(True)\n \tself.m = m_load\n \treturn m_load\n \t\n\nclass SVGPR_GPy:\n def __init__(self, max_iter=1000, max_f_eval=1000, kernel=None, verbose=True, n_restarts_optimizer=5, n_jobs=0, num_inducing=10):\n # define kernel\n self.kernel = kernel\n self.max_iter = max_iter\n self.max_f_eval = max_f_eval\n self.verbose = verbose\n self.n_jobs = n_jobs\n self.n_restarts_optimizer = n_restarts_optimizer\n self.num_inducing = num_inducing\n \n def fit(self, X_train, y_train):\n input_dim = X_train.shape[1]\n # check kernel\n if self.kernel is None:\n print('Setting kernel to Matern32.')\n # self.kernel = GPy.kern.Matern52(input_dim,ARD=True)\n self.kernel = GPy.kern.Matern32(input_dim,ARD=True)\n\n # define inducing points\n #self.Z = np.random.rand(self.num_inducing,input_dim)*(X_train.max(axis=0)-X_train.min(axis=0))+X_train.min(axis=0)\n\n # create simple GP model\n self.m = GPy.models.SparseGPRegression(X,y,num_inducing=self.num_inducing,kernel=self.kernel)\n\n # optimize\n if self.n_restarts_optimizer:\n self.m.optimize_restarts(\n num_restarts=self.n_restarts_optimizer,\n robust=False,\n #verbose=self.verbose,\n messages=self.verbose,\n parallel=True if self.n_jobs else False,\n num_processes=self.n_jobs if self.n_jobs else None,\n max_f_eval=self.max_f_eval,\n max_iters=self.max_iter,\n )\n else:\n self.m.optimize(messages=self.verbose, max_f_eval=self.max_f_eval)\n \n def predict(self, X_test, return_std=False):\n y_pred, y_std = self.m.predict(X_test)\n if return_std: return y_pred, y_std\n return y_pred\n \n def score(self, X_test, y_test):\n y_pred, y_std = self.m.predict(X_test)\n scr = r2_score(y_test, y_pred)\n return scr\n\nclass GPR_pyro:\n\tdef __init__(self, max_iter=1000, tol=0.01, kernel=None, loss_fn=None, verbose=True, n_restarts_optimizer=5, n_jobs=0, estimate_method='MLE', learning_rate=1e-3):\n\t\t# define kernel\n\t\tself.kernel = kernel\n\t\tself.max_iter = max_iter\n\t\tself.verbose = verbose\n\t\tself.n_jobs = n_jobs\n\t\tself.n_restarts_optimizer = n_restarts_optimizer\n\t\tself.estimate_method = estimate_method\n\t\tself.learning_rate = learning_rate\n\t\tself.loss_fn = loss_fn\n\t\tself.tol = tol\n\n\tdef fit(self, train_x, train_y):\n\t\tif type(train_x)==np.ndarray: train_x = torch.from_numpy(train_x)\n\t\tif type(train_y)==np.ndarray: train_y = torch.from_numpy(train_y)\n\t\t# check kernel\n\t\tif self.kernel is None:\n\t\t\tprint('Setting kernel to Matern32.')\n\t\t\tinput_dim = train_x.shape[1]\n\t\t\tself.kernel = gp.kernels.Matern32(input_dim, variance=None, lengthscale=None, active_dims=None)\n\n\t\t# create simple GP model\n\t\tself.model = gp.models.GPRegression(train_x, train_y, self.kernel)\n\n\t\t# optimize\n\t\tself.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate)\n\t\tif self.loss_fn is None: self.loss_fn = pyro.infer.Trace_ELBO().differentiable_loss\n\t\tself.losses = np.array([])\n\t\tn_wait, max_wait = 0, 5\n\n\t\tfor i in range(self.max_iter):\n\t\t\tself.optimizer.zero_grad()\n\t\t\tloss = self.loss_fn(self.model.model, self.model.guide)\n\t\t\tloss.backward()\n\t\t\tself.optimizer.step()\n\t\t\tself.losses = np.append(self.losses,loss.item()) \n\t\t\tprint(i+1, loss.item())\n\t\t\tdloss = self.losses[-1]-self.losses[-2] \t\t\t\n\t\t\tif 0<=dloss and dloss<self.tol: n_wait += 1\n\t\t\telse: n_wait = 0\n\t\t\tif self.n_wait>=self.max_wait: break\n\n\tdef predict(self, X_test, return_std=True, return_cov=False):\n\t\ty_mean, y_cov = self.model(X_test, full_cov=True, noiseless=False)\n\n\t\tif return_std: \n\t\t\ty_std = cov.diag().sqrt()\n\t\t\treturn y_pred, y_std\n\t\tif return_cov: return y_pred, y_cov\n\t\treturn y_pred\n\n\tdef score(self, X_test, y_test):\n\t\ty_pred = self.predict(X_test, return_std=False, return_cov=False)\n\t\tscr = r2_score(y_test, y_pred)\n\t\treturn scr\n\nclass SparseGPR_pyro:\n def __init__(self, max_iter=1000, tol=0.001, kernel=None, error_fn=None, loss_fn=None, verbose=True, n_Xu=10, n_jobs=0, estimate_method='MLE', learning_rate=1e-3, method='VFE', n_restarts_optimizer=5, validation=0.1):\n # define kernel\n self.kernel = kernel\n self.max_iter = max_iter\n self.verbose = verbose\n self.n_jobs = n_jobs\n self.n_restarts_optimizer = n_restarts_optimizer\n self.estimate_method = estimate_method\n self.learning_rate = learning_rate\n self.loss_fn = loss_fn\n self.tol = tol\n self.n_Xu = n_Xu\n self.method = method\n self.error_fn = mean_squared_error if error_fn is None else error_fn\n self.validation = validation\n\n # # Initialise output\n self.model = None\n self.losses = None\n self.optimizer = None\n self.continue_run = False\n self.train_err = None\n self.valid_err = None\n\n def fit_1out(self, train_x, train_y, n_Xu=None, past_info=None):\n if n_Xu is not None: self.n_Xu = n_Xu\n\n if self.validation is not None:\n if type(train_x)!=np.ndarray: train_x = train_x.detach().numpy()\n if type(train_y)!=np.ndarray: train_y = train_y.detach().numpy()\n train_x, valid_x, train_y, valid_y = train_test_split(train_x, train_y, test_size=self.validation, random_state=42)\n valid_x = torch.from_numpy(valid_x)\n\n if type(train_x)==np.ndarray: train_x = torch.from_numpy(train_x)\n if type(train_y)==np.ndarray: train_y = torch.from_numpy(train_y)\n # check kernel\n if self.kernel is None:\n print('Setting kernel to Matern32.')\n input_dim = train_x.shape[1]\n self.kernel = gp.kernels.Matern32(input_dim, variance=None, lengthscale=None, active_dims=None)\n\n self.Xu = np.linspace(train_x.min(axis=0)[0].data.numpy(), train_x.max(axis=0)[0].data.numpy(), self.n_Xu)\n self.Xu = torch.from_numpy(self.Xu)\n\n # create simple GP model\n model = gp.models.SparseGPRegression(train_x, train_y, self.kernel, Xu=self.Xu, jitter=1.0e-5, approx=self.method) if past_info is None else past_info['model']\n\n # optimize\n optimizer = torch.optim.Adam(model.parameters(), lr=self.learning_rate) if past_info is None else past_info['optimizer']\n if self.loss_fn is None: self.loss_fn = pyro.infer.Trace_ELBO().differentiable_loss\n losses = np.array([]) if past_info is None else past_info['losses']\n\n tr_err, vl_err = 10000, 10000\n if self.validation is not None:\n train_err = np.array([]) if past_info is None else past_info['train_err']\n valid_err = np.array([]) if past_info is None else past_info['valid_err']\n\n n_wait, max_wait = 0, 5\n\n for i in range(losses.size,self.max_iter):\n optimizer.zero_grad()\n loss = self.loss_fn(model.model, model.guide)\n loss.backward()\n optimizer.step()\n losses = np.append(losses,loss.item()) \n if self.validation is not None:\n # print(type(train_y))\n tr_err = self.error_fn(train_y.detach().numpy(), model(train_x, full_cov=False)[0].detach().numpy())\n vl_err = self.error_fn(valid_y, model(valid_x, full_cov=False)[0].detach().numpy())\n train_err = np.append(train_err, tr_err)\n valid_err = np.append(valid_err, vl_err)\n if self.verbose: \n hf.loading_verbose(' ')\n hf.loading_verbose('{0} | loss={1:.2f} | train_error={2:.3f} | validation_error={2:.3f}'.format(i+1, loss.item(), tr_err, vl_err))\n dloss = losses[-1]-losses[-2] if len(losses)>2 else self.tol*1000\t\t\t\n if 0<=dloss and dloss<self.tol: n_wait += 1\n else: n_wait = 0\n if n_wait>=max_wait: break\n\n if self.validation is not None: return model, optimizer, losses, train_err, valid_err\n return model, optimizer, losses\n\n def fit(self, train_x, train_y, n_Xu=None):\n if n_Xu is not None: self.n_Xu = n_Xu\n\n if type(train_x)==np.ndarray: train_x = torch.from_numpy(train_x)\n if type(train_y)==np.ndarray: train_y = torch.from_numpy(train_y)\n # check kernel\n if self.kernel is None:\n print('Setting kernel to Matern32.')\n input_dim = train_x.shape[1]\n self.kernel = gp.kernels.Matern32(input_dim, variance=None, lengthscale=None, active_dims=None)\n\n if self.model is not None: self.continue_run = True\n\n tstart = time()\n if train_y.ndim==1:\n if self.validation is not None:\n past_info = {'model':self.model, 'losses':self.losses, 'optimizer':self.optimizer, 'train_err': self.train_err, 'valid_err':self.valid_err} if self.continue_run else None\n model, optimizer, losses, train_err, valid_err = self.fit_1out(train_x, train_y, past_info=past_info)\n self.model, self.optimizer, self.losses, self.train_err, self.valid_err = model, optimizer, losses, train_err, valid_err\n tend = time()\n else:\n past_info = {'model':self.model, 'losses':self.losses, 'optimizer':self.optimizer} if self.continue_run else None\n model, optimizer, losses = self.fit_1out(train_x, train_y, past_info=past_info)\n self.model, self.optimizer, self.losses = model, optimizer, losses\n tend = time()\n print('\\n...done | Time elapsed: {:.2f} s'.format(tend-tstart))\n else:\n if self.validation is not None:\n if self.model is None:\n self.model, self.optimizer, self.losses, self.train_err, self.valid_err = {}, {}, {}, {}, {}\n for i in range(train_y.shape[1]):\n print('Regressing output variable {}'.format(i+1))\n past_info = {'model':self.model[i], 'losses':self.losses[i], 'optimizer':self.optimizer[i], 'train_err': self.train_err[i], 'valid_err':self.valid_err[i]} if self.continue_run else None\n model, optimizer, losses, train_err, valid_err = self.fit_1out(train_x, train_y[:,i], past_info=past_info)\n self.model[i], self.optimizer[i], self.losses[i], self.train_err[i], self.valid_err[i] = model, optimizer, losses, train_err, valid_err\n tend = time()\n print('\\n...done | Time elapsed: {:.2f} s'.format(tend-tstart))\n else:\n if self.model is None:\n self.model, self.optimizer, self.losses = {}, {}, {}\n for i in range(train_y.shape[1]):\n print('Regressing output variable {}'.format(i+1))\n past_info = {'model':self.model[i], 'losses':self.losses[i], 'optimizer':self.optimizer[i]} if self.continue_run else None\n model, optimizer, losses = self.fit_1out(train_x, train_y[:,i], past_info=past_info)\n self.model[i], self.optimizer[i], self.losses[i] = model, optimizer, losses\n tend = time()\n print('\\n...done | Time elapsed: {:.2f} s'.format(tend-tstart))\n\n\n def predict_1out(self, X_test, return_std=True, return_cov=False):\n if type(X_test)==np.ndarray: X_test = torch.from_numpy(X_test)\n \n y_mean, y_cov = self.model(X_test, full_cov=True, noiseless=False)\n\n if return_std: \n y_std = y_cov.diag().sqrt()\n return y_mean.detach().numpy(), y_std.detach().numpy()\n if return_cov: return y_mean.detach().numpy(), y_cov.detach().numpy()\n return y_mean.detach().numpy()\n\n def predict(self, X_test, return_std=True, return_cov=False):\n if type(X_test)==np.ndarray: X_test = torch.from_numpy(X_test)\n if type(self.model) is dict:\n y_mean, y_cov = [], []\n for i in range(len(self.model)):\n y_mean0, y_cov0 = self.model[i](X_test, full_cov=True, noiseless=False)\n y_mean.append(y_mean0.detach().numpy())\n y_cov.append(y_cov0.detach().numpy())\n if return_std:\n y_std = [np.sqrt(np.diag(y_cov1)) for y_cov1 in y_cov]\n return np.array(y_mean).T, np.array(y_std).T\n if return_cov: return np.array(y_mean).T, np.array(y_cov).T\n return np.array(y_mean).T\n\n\n def score(self, X_test, y_test):\n if type(X_test)==np.ndarray: X_test = torch.from_numpy(X_test)\n if type(y_test)==torch.Tensor: y_test = y_test.detach().numpy()\n\n y_pred = self.predict(X_test, return_std=False, return_cov=False)\n scr = r2_score(y_test, y_pred)\n return scr\n\n\nclass GPR_GPyTorch:\n def __init__(self, max_iter=1000, tol=0.01, kernel=None, loss_fn=None, verbose=True, learning_rate=1e-3, optimizer=None, validation=0.1):\n # define kernel\n self.kernel = kernel\n self.max_iter = max_iter\n self.verbose = verbose\n self.learning_rate = learning_rate\n self.loss_fn = loss_fn\n self.tol = tol\n self.optimizer = optimizer\n # self.validation = validation\n\n self.train_loss = []\n self.valid_loss = []\n\n def prepare_model(self, train_x, train_y, kernel=None):\n multi_task = False\n if train_y.ndim>1:\n if train_y.shape[1]>1:\n multi_task = True\n\n if multi_task:\n print('Model for Multivariate output.')\n # We will use the GP model for multivariate output, exact inference\n class MultitaskGPModel(gpytorch.models.ExactGP):\n def __init__(self, train_x, train_y, likelihood):\n super(MultitaskGPModel, self).__init__(train_x, train_y, likelihood)\n self.mean_module = gpytorch.means.MultitaskMean(\n gpytorch.means.ConstantMean(), num_tasks=train_y.shape[1]\n )\n self.covar_module = gpytorch.kernels.MultitaskKernel(\n kernel, num_tasks=train_y.shape[1], rank=1\n )\n\n def forward(self, x):\n mean_x = self.mean_module(x)\n covar_x = self.covar_module(x)\n return gpytorch.distributions.MultitaskMultivariateNormal(mean_x, covar_x)\n\n # initialize likelihood and model\n self.likelihood = gpytorch.likelihoods.MultitaskGaussianLikelihood(num_tasks=train_y.shape[1])\n self.model = MultitaskGPModel(train_x, train_y, self.likelihood)\n else:\n # We will use the simplest form of GP model, exact inference\n class ExactGPModel(gpytorch.models.ExactGP):\n def __init__(self, train_x, train_y, likelihood):\n super(ExactGPModel, self).__init__(train_x, train_y, likelihood)\n self.mean_module = gpytorch.means.ConstantMean()\n self.covar_module = gpytorch.kernels.ScaleKernel(kernel)\n\n def forward(self, x):\n mean_x = self.mean_module(x)\n covar_x = self.covar_module(x)\n return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)\n\n # initialize likelihood and model\n self.likelihood = gpytorch.likelihoods.GaussianLikelihood()\n self.model = ExactGPModel(train_x, train_y, self.likelihood)\n\n def fit(self, train_x, train_y):\n # if self.validation is not None:\n # if type(train_x)!=np.ndarray: train_x = train_x.detach().numpy()\n # if type(train_y)!=np.ndarray: train_y = train_y.detach().numpy()\n # train_x, valid_x, train_y, valid_y = train_test_split(train_x, train_y, test_size=self.validation, random_state=42)\n # valid_x = torch.from_numpy(valid_x)\n\n if type(train_x)==np.ndarray: train_x = torch.from_numpy(train_x.astype(np.float32))\n if type(train_y)==np.ndarray: train_y = torch.from_numpy(train_y.astype(np.float32))\n print(train_x.shape, train_y.shape)\n\n # Check kernel\n if self.kernel is None:\n print('Setting kernel to Matern32.')\n self.kernel = gpytorch.kernels.MaternKernel(nu=1.5)\n\n # create simple GP model\n if len(self.train_loss)==0:\n self.prepare_model(train_x, train_y, kernel=self.kernel)\n\n # Find optimal model hyperparameters\n self.model.train()\n self.likelihood.train()\n\n\n if self.optimizer is None: \n print('Using the adam optimizer.')\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate)\n\n # \"Loss\" for GPs - the marginal log likelihood\n if self.loss_fn in [None, 'marginal_log_likelihood', 'mll']:\n mll = gpytorch.mlls.ExactMarginalLogLikelihood(self.likelihood, self.model)\n else: \n mll = self.loss_fn\n\n # optimize\n for i in range(len(self.train_loss),self.max_iter):\n # Zero gradients from previous iteration\n self.optimizer.zero_grad()\n # Output from model\n output = self.model(train_x)\n #print(type(output))\n #print(output)\n # Calc loss and backprop gradients\n #print(output, train_y.shape)\n loss = -mll(output, train_y)\n loss.backward()\n self.train_loss.append(loss.item())\n\n # if self.validation:\n # self.model.eval()\n # self.likelihood.eval()\n\n # valid_out = self.likelihood(self.model(valid_x))\n # valid_ls = -mll(valid_out, valid_y)\n # self.valid_loss.append(valid_ls.item())\n # print('Iter %d/%d - Train Loss: %.3f Valid Loss: %.3f ' % (\n # i + 1, self.max_iter, self.train_loss[-1], self.valid_loss[-1]\n # ))\n # self.model.train()\n # self.likelihood.train()\n # else:\n print('Iter %d/%d - Loss: %.3f ' % (\n i + 1, self.max_iter, self.train_loss[-1]\n ))\n self.optimizer.step()\n\n def predict(self, X_test, return_ci=True):\n if type(X_test)==np.ndarray: X_test = torch.from_numpy(X_test.astype(np.float32))\n\n # Get into evaluation (predictive posterior) mode\n model, likelihood = self.model, self.likelihood\n model.eval()\n likelihood.eval()\n\n\n # Test points are regularly spaced along [0,1]\n # Make predictions by feeding model through likelihood\n with torch.no_grad(), gpytorch.settings.fast_pred_var():\n observed_pred = likelihood(model(X_test))\n\n if return_ci:\n lower, upper = observed_pred.confidence_region()\n return observed_pred.mean.numpy(), lower.detach().numpy(), upper.detach().numpy()\n\n return observed_pred.detach().numpy()\n\n\n def score(self, X_test, y_test):\n if type(X_test)==np.ndarray: X_test = torch.from_numpy(X_test)\n if type(y_test)==torch.Tensor: y_test = y_test.detach().numpy()\n\n y_pred = self.predict(X_test, return_ci=False)\n scr = r2_score(y_test, y_pred)\n return scr\n\n\nclass SVGP_GPyTorch:\n def __init__(self, max_iter=1000, tol=0.01, kernel=None, loss_fn=None, verbose=True, learning_rate=1e-3, optimizer=None, validation=0.1):\n # define kernel\n self.kernel = kernel\n self.max_iter = max_iter\n self.verbose = verbose\n self.learning_rate = learning_rate\n self.loss_fn = loss_fn\n self.tol = tol\n self.optimizer = optimizer\n # self.validation = validation\n\n self.train_loss = []\n self.valid_loss = []\n\n def prepare_model(self, train_x, train_y, kernel=None):\n multi_task = False\n if train_y.ndim>1:\n if train_y.shape[1]>1:\n multi_task = True\n\n if multi_task:\n print('Model for Multivariate output.')\n # We will use the GP model for multivariate output, exact inference\n class MultitaskGPModel(gpytorch.models.ExactGP):\n def __init__(self, train_x, train_y, likelihood):\n super(MultitaskGPModel, self).__init__(train_x, train_y, likelihood)\n self.mean_module = gpytorch.means.MultitaskMean(\n gpytorch.means.ConstantMean(), num_tasks=train_y.shape[1]\n )\n self.covar_module = gpytorch.kernels.MultitaskKernel(\n kernel, num_tasks=train_y.shape[1], rank=1\n )\n\n def forward(self, x):\n mean_x = self.mean_module(x)\n covar_x = self.covar_module(x)\n return gpytorch.distributions.MultitaskMultivariateNormal(mean_x, covar_x)\n\n # initialize likelihood and model\n self.likelihood = gpytorch.likelihoods.MultitaskGaussianLikelihood(num_tasks=train_y.shape[1])\n self.model = MultitaskGPModel(train_x, train_y, self.likelihood)\n else:\n # We will use the simplest form of GP model, exact inference\n class ExactGPModel(gpytorch.models.ExactGP):\n def __init__(self, train_x, train_y, likelihood):\n super(ExactGPModel, self).__init__(train_x, train_y, likelihood)\n self.mean_module = gpytorch.means.ConstantMean()\n self.covar_module = gpytorch.kernels.ScaleKernel(kernel)\n\n def forward(self, x):\n mean_x = self.mean_module(x)\n covar_x = self.covar_module(x)\n return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)\n\n # initialize likelihood and model\n self.likelihood = gpytorch.likelihoods.GaussianLikelihood()\n self.model = ExactGPModel(train_x, train_y, self.likelihood)\n\n def fit(self, train_x, train_y):\n # if self.validation is not None:\n # if type(train_x)!=np.ndarray: train_x = train_x.detach().numpy()\n # if type(train_y)!=np.ndarray: train_y = train_y.detach().numpy()\n # train_x, valid_x, train_y, valid_y = train_test_split(train_x, train_y, test_size=self.validation, random_state=42)\n # valid_x = torch.from_numpy(valid_x)\n\n if type(train_x)==np.ndarray: train_x = torch.from_numpy(train_x.astype(np.float32))\n if type(train_y)==np.ndarray: train_y = torch.from_numpy(train_y.astype(np.float32))\n print(train_x.shape, train_y.shape)\n\n # Check kernel\n if self.kernel is None:\n print('Setting kernel to Matern32.')\n self.kernel = gpytorch.kernels.MaternKernel(nu=1.5)\n\n # create simple GP model\n if len(self.train_loss)==0:\n self.prepare_model(train_x, train_y, kernel=self.kernel)\n\n # Find optimal model hyperparameters\n self.model.train()\n self.likelihood.train()\n\n\n if self.optimizer is None: \n print('Using the adam optimizer.')\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate)\n\n # \"Loss\" for GPs - the marginal log likelihood\n if self.loss_fn in [None, 'marginal_log_likelihood', 'mll']:\n mll = gpytorch.mlls.ExactMarginalLogLikelihood(self.likelihood, self.model)\n else: \n mll = self.loss_fn\n\n # optimize\n for i in range(len(self.train_loss),self.max_iter):\n # Zero gradients from previous iteration\n self.optimizer.zero_grad()\n # Output from model\n output = self.model(train_x)\n #print(type(output))\n #print(output)\n # Calc loss and backprop gradients\n #print(output, train_y.shape)\n loss = -mll(output, train_y)\n loss.backward()\n self.train_loss.append(loss.item())\n\n # if self.validation:\n # self.model.eval()\n # self.likelihood.eval()\n\n # valid_out = self.likelihood(self.model(valid_x))\n # valid_ls = -mll(valid_out, valid_y)\n # self.valid_loss.append(valid_ls.item())\n # print('Iter %d/%d - Train Loss: %.3f Valid Loss: %.3f ' % (\n # i + 1, self.max_iter, self.train_loss[-1], self.valid_loss[-1]\n # ))\n # self.model.train()\n # self.likelihood.train()\n # else:\n print('Iter %d/%d - Loss: %.3f ' % (\n i + 1, self.max_iter, self.train_loss[-1]\n ))\n self.optimizer.step()\n\n def predict(self, X_test, return_ci=True):\n if type(X_test)==np.ndarray: X_test = torch.from_numpy(X_test.astype(np.float32))\n\n # Get into evaluation (predictive posterior) mode\n model, likelihood = self.model, self.likelihood\n model.eval()\n likelihood.eval()\n\n\n # Test points are regularly spaced along [0,1]\n # Make predictions by feeding model through likelihood\n with torch.no_grad(), gpytorch.settings.fast_pred_var():\n observed_pred = likelihood(model(X_test))\n\n if return_ci:\n lower, upper = observed_pred.confidence_region()\n return observed_pred.detach().numpy(), lower.detach().numpy(), upper.detach().numpy()\n\n return observed_pred.detach().numpy()\n\n\n def score(self, X_test, y_test):\n if type(X_test)==np.ndarray: X_test = torch.from_numpy(X_test)\n if type(y_test)==torch.Tensor: y_test = y_test.detach().numpy()\n\n y_pred = self.predict(X_test, return_ci=False)\n scr = r2_score(y_test, y_pred)\n return scr\n" ]
[ [ "numpy.diag", "sklearn.metrics.r2_score", "numpy.sqrt", "sklearn.model_selection.train_test_split", "torch.from_numpy", "numpy.append", "torch.no_grad", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Thrada/depthai-experiments
[ "f06312596b47427f9699a300ae8bc143dbbac000" ]
[ "gen2-triangulation/main.py" ]
[ "# import math\r\nfrom pathlib import Path\r\n\r\nimport blobconverter\r\nimport numpy as np\r\nimport math\r\nfrom visualizer import initialize_OpenGL, get_vector_direction, get_vector_intersection, start_OpenGL\r\nimport cv2\r\nimport depthai as dai\r\n\r\np = dai.Pipeline()\r\n\r\nleft_camera_position = (0.107, -0.038, 0.008)\r\nright_camera_position = (0.109, 0.039, 0.008)\r\ncameras = (left_camera_position, right_camera_position)\r\n\r\ndef populatePipeline(p, name):\r\n cam = p.create(dai.node.MonoCamera)\r\n socket = dai.CameraBoardSocket.LEFT if name == \"left\" else dai.CameraBoardSocket.RIGHT\r\n cam.setBoardSocket(socket)\r\n cam.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)\r\n\r\n # ImageManip for cropping (face detection NN requires input image of 300x300) and to change frame type\r\n face_manip = p.create(dai.node.ImageManip)\r\n face_manip.initialConfig.setResize(300, 300)\r\n # The NN model expects BGR input. By default ImageManip output type would be same as input (gray in this case)\r\n face_manip.initialConfig.setFrameType(dai.RawImgFrame.Type.BGR888p)\r\n cam.out.link(face_manip.inputImage)\r\n\r\n # NN that detects faces in the image\r\n face_nn = p.create(dai.node.NeuralNetwork)\r\n face_nn.setBlobPath(str(blobconverter.from_zoo(\"face-detection-retail-0004\", shaves=6)))\r\n face_manip.out.link(face_nn.input)\r\n\r\n # Send mono frames to the host via XLink\r\n cam_xout = p.create(dai.node.XLinkOut)\r\n cam_xout.setStreamName(\"mono_\" + name)\r\n face_nn.passthrough.link(cam_xout.input)\r\n\r\n # Script node will take the output from the NN as an input, get the first bounding box\r\n # and if the confidence is greater than 0.2, script will send ImageManipConfig to the manip_crop\r\n image_manip_script = p.create(dai.node.Script)\r\n image_manip_script.inputs['nn_in'].setBlocking(False)\r\n image_manip_script.inputs['nn_in'].setQueueSize(1)\r\n face_nn.out.link(image_manip_script.inputs['nn_in'])\r\n image_manip_script.setScriptData(\"\"\"\r\nwhile True:\r\n nn_in = node.io['nn_in'].get()\r\n nn_data = nn_in.getFirstLayerFp16()\r\n\r\n conf=nn_data[2]\r\n if 0.2<conf:\r\n x_min=nn_data[3]\r\n y_min=nn_data[4]\r\n x_max=nn_data[5]\r\n y_max=nn_data[6]\r\n cfg = ImageManipConfig()\r\n cfg.setCropRect(x_min, y_min, x_max, y_max)\r\n cfg.setResize(48, 48)\r\n cfg.setKeepAspectRatio(False)\r\n node.io['to_manip'].send(cfg)\r\n #node.warn(f\"1 from nn_in: {x_min}, {y_min}, {x_max}, {y_max}\")\r\n\"\"\")\r\n\r\n # This ImageManip will crop the mono frame based on the NN detections. Resulting image will be the cropped\r\n # face that was detected by the face-detection NN.\r\n manip_crop = p.create(dai.node.ImageManip)\r\n face_nn.passthrough.link(manip_crop.inputImage)\r\n image_manip_script.outputs['to_manip'].link(manip_crop.inputConfig)\r\n manip_crop.initialConfig.setResize(48, 48)\r\n manip_crop.setWaitForConfigInput(False)\r\n\r\n # Send ImageManipConfig to host so it can visualize the landmarks\r\n config_xout = p.create(dai.node.XLinkOut)\r\n config_xout.setStreamName(\"config_\" + name)\r\n image_manip_script.outputs['to_manip'].link(config_xout.input)\r\n\r\n crop_xout = p.createXLinkOut()\r\n crop_xout.setStreamName(\"crop_\" + name)\r\n manip_crop.out.link(crop_xout.input)\r\n\r\n # Second NN that detcts landmarks from the cropped 48x48 face\r\n landmarks_nn = p.createNeuralNetwork()\r\n landmarks_nn.setBlobPath(str(blobconverter.from_zoo(\"landmarks-regression-retail-0009\", shaves=6)))\r\n manip_crop.out.link(landmarks_nn.input)\r\n\r\n landmarks_nn_xout = p.createXLinkOut()\r\n landmarks_nn_xout.setStreamName(\"landmarks_\" + name)\r\n landmarks_nn.out.link(landmarks_nn_xout.input)\r\n\r\n\r\npopulatePipeline(p, \"right\")\r\npopulatePipeline(p, \"left\")\r\n\r\ndef get_landmark_3d(landmark):\r\n focal_length = 842\r\n landmark_norm = 0.5 - np.array(landmark)\r\n\r\n # image size\r\n landmark_image_coord = landmark_norm * 640\r\n\r\n landmark_spherical_coord = [math.atan2(landmark_image_coord[0], focal_length),\r\n -math.atan2(landmark_image_coord[1], focal_length) + math.pi / 2]\r\n\r\n landmarks_3D = [\r\n math.sin(landmark_spherical_coord[1]) * math.cos(landmark_spherical_coord[0]),\r\n math.sin(landmark_spherical_coord[1]) * math.sin(landmark_spherical_coord[0]),\r\n math.cos(landmark_spherical_coord[1])\r\n ]\r\n\r\n return landmarks_3D\r\n\r\ninitialize_OpenGL()\r\n\r\n# Pipeline is defined, now we can connect to the device\r\nwith dai.Device(p) as device:\r\n # Set device log level - to see logs from the Script node\r\n device.setLogLevel(dai.LogLevel.WARN)\r\n device.setLogOutputLevel(dai.LogLevel.WARN)\r\n\r\n # Start pipeline\r\n device.startPipeline()\r\n queues = []\r\n for name in [\"left\", \"right\"]:\r\n queues.append(device.getOutputQueue(name=\"mono_\"+name, maxSize=4, blocking=False))\r\n queues.append(device.getOutputQueue(name=\"crop_\"+name, maxSize=4, blocking=False))\r\n queues.append(device.getOutputQueue(name=\"landmarks_\"+name, maxSize=4, blocking=False))\r\n queues.append(device.getOutputQueue(name=\"config_\"+name, maxSize=4, blocking=False))\r\n while True:\r\n lr_landmarks = []\r\n for i in range(2):\r\n name = \"left\" if i == 1 else \"right\"\r\n # 300x300 Mono image frame\r\n inMono = queues[i*4].get()\r\n frame = inMono.getCvFrame()\r\n\r\n # Cropped+streched (48x48) mono image frame\r\n inCrop = queues[i*4 + 1].get()\r\n cropped_frame = inCrop.getCvFrame()\r\n\r\n inConfig = queues[i*4 + 3].tryGet()\r\n if inConfig is not None:\r\n xmin = int(300 * inConfig.getCropXMin())\r\n ymin = int(300 * inConfig.getCropYMin())\r\n xmax = int(300 * inConfig.getCropXMax())\r\n ymax = int(300 * inConfig.getCropYMax())\r\n cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2)\r\n\r\n width = inConfig.getCropXMax()-inConfig.getCropXMin()\r\n height = inConfig.getCropYMax()-inConfig.getCropYMin()\r\n\r\n # Facial landmarks from the second NN\r\n inLandmarks = queues[i*4 + 2].get()\r\n landmarks_layer = inLandmarks.getFirstLayerFp16()\r\n landmarks = np.array(landmarks_layer).reshape(5, 2)\r\n\r\n lr_landmarks.append(list(map(get_landmark_3d, landmarks)))\r\n for landmark in landmarks:\r\n cv2.circle(cropped_frame, (int(48*landmark[0]), int(48*landmark[1])), 3, (0, 255, 0))\r\n w = landmark[0] * width + inConfig.getCropXMin()\r\n h = landmark[1] * height + inConfig.getCropYMin()\r\n cv2.circle(frame, (int(w * 300), int(h * 300)), 3, (0,255,0))\r\n\r\n # Display both mono/cropped frames\r\n cv2.imshow(\"mono_\"+name, frame)\r\n cv2.imshow(\"crop_\"+name, cropped_frame)\r\n\r\n # 3D visualization\r\n if len(lr_landmarks) == 2 and len(lr_landmarks[0]) > 0 and len(lr_landmarks[1]) > 0:\r\n mid_intersects = []\r\n for i in range(5):\r\n left_vector = get_vector_direction(left_camera_position, lr_landmarks[0][i])\r\n right_vector = get_vector_direction(right_camera_position, lr_landmarks[1][i])\r\n intersection_landmark = get_vector_intersection(left_vector, left_camera_position, right_vector,\r\n right_camera_position)\r\n mid_intersects.append(intersection_landmark)\r\n\r\n start_OpenGL(mid_intersects, cameras, lr_landmarks[0], lr_landmarks[1])\r\n\r\n if cv2.waitKey(1) == ord('q'):\r\n break\r\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sabbbin/age-rep
[ "9f42db8c662b8c4a78e8aeab39c1a6989e09f46a" ]
[ "model-code/morph-ce.py" ]
[ "# coding: utf-8\n\n#############################################\n# Cross Entropy with ResNet-34\n#############################################\n\n# Imports\n\nimport os\nimport time\nimport pandas as pd\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport argparse\nimport sys\n\nfrom torch.utils.data import Dataset\nfrom torch.utils.data import DataLoader\n\nfrom torchvision import transforms\nfrom PIL import Image\n\ntorch.backends.cudnn.deterministic = True\n\nTRAIN_CSV_PATH = './morph2_train.csv'\nVALID_CSV_PATH = './morph2_valid.csv'\nTEST_CSV_PATH = './morph2_test.csv'\nIMAGE_PATH = '/shared_datasets/morph2/morph2-aligned-nose/jpg'\n\n\n# Argparse helper\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--cuda',\n type=int,\n default=-1)\n\nparser.add_argument('--seed',\n type=int,\n default=-1)\n\nparser.add_argument('--numworkers',\n type=int,\n default=3)\n\nparser.add_argument('--outpath',\n type=str,\n required=True)\n\nargs = parser.parse_args()\n\nNUM_WORKERS = args.numworkers\n\nif args.cuda >= 0:\n DEVICE = torch.device(\"cuda:%d\" % args.cuda)\nelse:\n DEVICE = torch.device(\"cpu\")\n\nif args.seed == -1:\n RANDOM_SEED = None\nelse:\n RANDOM_SEED = args.seed\n\nPATH = args.outpath\nif not os.path.exists(PATH):\n os.mkdir(PATH)\nLOGFILE = os.path.join(PATH, 'training.log')\nTEST_PREDICTIONS = os.path.join(PATH, 'test_predictions.log')\n\n# Logging\n\nheader = []\n\nheader.append('PyTorch Version: %s' % torch.__version__)\nheader.append('CUDA device available: %s' % torch.cuda.is_available())\nheader.append('Using CUDA device: %s' % DEVICE)\nheader.append('Random Seed: %s' % RANDOM_SEED)\nheader.append('Output Path: %s' % PATH)\nheader.append('Script: %s' % sys.argv[0])\n\nwith open(LOGFILE, 'w') as f:\n for entry in header:\n print(entry)\n f.write('%s\\n' % entry)\n f.flush()\n\n\n##########################\n# SETTINGS\n##########################\n\n# Hyperparameters\nlearning_rate = 0.0005\nnum_epochs = 200\n\n# Architecture\nNUM_CLASSES = 55\nBATCH_SIZE = 256\nGRAYSCALE = False\n\n###################\n# Dataset\n###################\n\n\nclass Morph2Dataset(Dataset):\n \"\"\"Custom Dataset for loading MORPH face images\"\"\"\n\n def __init__(self,\n csv_path, img_dir, transform=None):\n\n df = pd.read_csv(csv_path, index_col=0)\n self.img_dir = img_dir\n self.csv_path = csv_path\n self.img_names = df.index.values\n self.y = df['age'].values\n self.transform = transform\n\n def __getitem__(self, index):\n img = Image.open(os.path.join(self.img_dir,\n self.img_names[index]))\n\n if self.transform is not None:\n img = self.transform(img)\n\n label = self.y[index]\n\n return img, label\n\n def __len__(self):\n return self.y.shape[0]\n\n\ncustom_transform = transforms.Compose([transforms.CenterCrop((140, 140)),\n transforms.Resize((128, 128)),\n transforms.RandomCrop((120, 120)),\n transforms.ToTensor()])\n\ntrain_dataset = Morph2Dataset(csv_path=TRAIN_CSV_PATH,\n img_dir=IMAGE_PATH,\n transform=custom_transform)\n\n\ncustom_transform2 = transforms.Compose([transforms.CenterCrop((140, 140)),\n transforms.Resize((128, 128)),\n transforms.CenterCrop((120, 120)),\n transforms.ToTensor()])\n\ntest_dataset = Morph2Dataset(csv_path=TEST_CSV_PATH,\n img_dir=IMAGE_PATH,\n transform=custom_transform2)\n\nvalid_dataset = Morph2Dataset(csv_path=VALID_CSV_PATH,\n img_dir=IMAGE_PATH,\n transform=custom_transform2)\n\ntrain_loader = DataLoader(dataset=train_dataset,\n batch_size=BATCH_SIZE,\n shuffle=True,\n num_workers=NUM_WORKERS)\n\nvalid_loader = DataLoader(dataset=valid_dataset,\n batch_size=BATCH_SIZE,\n shuffle=False,\n num_workers=NUM_WORKERS)\n\ntest_loader = DataLoader(dataset=test_dataset,\n batch_size=BATCH_SIZE,\n shuffle=False,\n num_workers=NUM_WORKERS)\n\n\n##########################\n# MODEL\n##########################\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, layers, num_classes, grayscale):\n self.inplanes = 64\n if grayscale:\n in_dim = 1\n else:\n in_dim = 3\n super(ResNet, self).__init__()\n self.conv1 = nn.Conv2d(in_dim, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n self.avgpool = nn.AvgPool2d(4)\n self.fc = nn.Linear(512, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, (2. / n)**.5)\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n x = self.avgpool(x)\n\n x = x.view(x.size(0), -1)\n logits = self.fc(x)\n probas = F.softmax(logits, dim=1)\n return logits, probas\n\n\ndef resnet34(num_classes, grayscale):\n \"\"\"Constructs a ResNet-34 model.\"\"\"\n model = ResNet(block=BasicBlock, \n layers=[3, 4, 6, 3],\n num_classes=num_classes,\n grayscale=grayscale)\n return model\n\n\n###########################################\n# Initialize Cost, Model, and Optimizer\n###########################################\n\ntorch.manual_seed(RANDOM_SEED)\ntorch.cuda.manual_seed(RANDOM_SEED)\nmodel = resnet34(NUM_CLASSES, GRAYSCALE)\n\nmodel.to(DEVICE)\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) \n\n\ndef compute_mae_and_mse(model, data_loader, device):\n mae, mse, num_examples = 0., 0., 0\n for i, (features, targets) in enumerate(data_loader):\n\n features = features.to(device)\n targets = targets.to(device)\n\n logits, probas = model(features)\n _, predicted_labels = torch.max(probas, 1)\n num_examples += targets.size(0)\n mae += torch.sum(torch.abs(predicted_labels - targets))\n mse += torch.sum((predicted_labels - targets)**2)\n mae = mae.float() / num_examples\n mse = mse.float() / num_examples\n return mae, mse\n\n\nstart_time = time.time()\n\nbest_mae, best_rmse, best_epoch = 999, 999, -1\nfor epoch in range(num_epochs):\n\n model.train()\n for batch_idx, (features, targets) in enumerate(train_loader):\n\n features = features.to(DEVICE)\n targets = targets.to(DEVICE)\n\n # FORWARD AND BACK PROP\n logits, probas = model(features)\n cost = F.cross_entropy(logits, targets)\n optimizer.zero_grad()\n\n cost.backward()\n\n # UPDATE MODEL PARAMETERS\n optimizer.step()\n\n # LOGGING\n if not batch_idx % 50:\n s = ('Epoch: %03d/%03d | Batch %04d/%04d | Cost: %.4f'\n % (epoch+1, num_epochs, batch_idx,\n len(train_dataset)//BATCH_SIZE, cost))\n print(s)\n with open(LOGFILE, 'a') as f:\n f.write('%s\\n' % s)\n\n model.eval()\n with torch.set_grad_enabled(False):\n valid_mae, valid_mse = compute_mae_and_mse(model, valid_loader,\n device=DEVICE)\n\n if valid_mae < best_mae:\n best_mae, best_rmse, best_epoch = valid_mae, torch.sqrt(valid_mse), epoch\n ########## SAVE MODEL #############\n torch.save(model.state_dict(), os.path.join(PATH, 'best_model.pt'))\n\n\n s = 'MAE/RMSE: | Current Valid: %.2f/%.2f Ep. %d | Best Valid : %.2f/%.2f Ep. %d' % (\n valid_mae, torch.sqrt(valid_mse), epoch, best_mae, best_rmse, best_epoch)\n print(s)\n with open(LOGFILE, 'a') as f:\n f.write('%s\\n' % s)\n\n s = 'Time elapsed: %.2f min' % ((time.time() - start_time)/60)\n print(s)\n with open(LOGFILE, 'a') as f:\n f.write('%s\\n' % s)\n\nmodel.eval()\nwith torch.set_grad_enabled(False): # save memory during inference\n\n train_mae, train_mse = compute_mae_and_mse(model, train_loader,\n device=DEVICE)\n valid_mae, valid_mse = compute_mae_and_mse(model, valid_loader,\n device=DEVICE)\n test_mae, test_mse = compute_mae_and_mse(model, test_loader,\n device=DEVICE)\n\n s = 'MAE/RMSE: | Train: %.2f/%.2f | Valid: %.2f/%.2f | Test: %.2f/%.2f' % (\n train_mae, torch.sqrt(train_mse),\n valid_mae, torch.sqrt(valid_mse),\n test_mae, torch.sqrt(test_mse))\n print(s)\n with open(LOGFILE, 'a') as f:\n f.write('%s\\n' % s)\n\ns = 'Total Training Time: %.2f min' % ((time.time() - start_time)/60)\nprint(s)\nwith open(LOGFILE, 'a') as f:\n f.write('%s\\n' % s)\n\n\n########## EVALUATE BEST MODEL ######\nmodel.load_state_dict(torch.load(os.path.join(PATH, 'best_model.pt')))\nmodel.eval()\n\nwith torch.set_grad_enabled(False):\n train_mae, train_mse = compute_mae_and_mse(model, train_loader,\n device=DEVICE)\n valid_mae, valid_mse = compute_mae_and_mse(model, valid_loader,\n device=DEVICE)\n test_mae, test_mse = compute_mae_and_mse(model, test_loader,\n device=DEVICE)\n\n s = 'MAE/RMSE: | Best Train: %.2f/%.2f | Best Valid: %.2f/%.2f | Best Test: %.2f/%.2f' % (\n train_mae, torch.sqrt(train_mse),\n valid_mae, torch.sqrt(valid_mse),\n test_mae, torch.sqrt(test_mse))\n print(s)\n with open(LOGFILE, 'a') as f:\n f.write('%s\\n' % s)\n\n\n########## SAVE PREDICTIONS ######\nall_pred = []\nwith torch.set_grad_enabled(False):\n for batch_idx, (features, targets) in enumerate(test_loader):\n\n features = features.to(DEVICE)\n logits, probas = model(features)\n predict_levels = probas > 0.5\n predicted_labels = torch.sum(predict_levels, dim=1)\n lst = [str(int(i)) for i in predicted_labels]\n all_pred.extend(lst)\n\nwith open(TEST_PREDICTIONS, 'w') as f:\n all_pred = ','.join(all_pred)\n f.write(all_pred)\n" ]
[ [ "torch.abs", "torch.nn.functional.softmax", "torch.max", "torch.utils.data.DataLoader", "torch.sum", "torch.set_grad_enabled", "torch.cuda.is_available", "torch.device", "pandas.read_csv", "torch.sqrt", "torch.nn.Sequential", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.AvgPool2d", "torch.nn.BatchNorm2d", "torch.cuda.manual_seed", "torch.manual_seed", "torch.nn.functional.cross_entropy", "torch.nn.MaxPool2d", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
Jee-King/ICCV2021_Event_Frame_Tracking
[ "ea86cdd331748864ffaba35f5efbb3f2a02cdb03" ]
[ "pytracking/utils/plotting.py" ]
[ "import matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport torch\r\nimport cv2\r\n\r\n\r\ndef draw_figure(fig):\r\n fig.canvas.draw()\r\n fig.canvas.flush_events()\r\n plt.pause(0.001)\r\n\r\n\r\ndef show_tensor(a: torch.Tensor, fig_num = None, title = None, range=(None, None), ax=None):\r\n \"\"\"Display a 2D tensor.\r\n args:\r\n fig_num: Figure number.\r\n title: Title of figure.\r\n \"\"\"\r\n a_np = a.squeeze().cpu().clone().detach().numpy()\r\n if a_np.ndim == 3:\r\n a_np = np.transpose(a_np, (1, 2, 0))\r\n\r\n if ax is None:\r\n fig = plt.figure(fig_num)\r\n plt.tight_layout()\r\n plt.cla()\r\n plt.imshow(a_np, vmin=range[0], vmax=range[1])\r\n plt.axis('off')\r\n plt.axis('equal')\r\n if title is not None:\r\n plt.title(title)\r\n draw_figure(fig)\r\n else:\r\n ax.cla()\r\n ax.imshow(a_np, vmin=range[0], vmax=range[1])\r\n ax.set_axis_off()\r\n ax.axis('equal')\r\n if title is not None:\r\n ax.set_title(title)\r\n draw_figure(plt.gcf())\r\n\r\n\r\ndef plot_graph(a: torch.Tensor, fig_num = None, title = None):\r\n \"\"\"Plot graph. Data is a 1D tensor.\r\n args:\r\n fig_num: Figure number.\r\n title: Title of figure.\r\n \"\"\"\r\n a_np = a.squeeze().cpu().clone().detach().numpy()\r\n if a_np.ndim > 1:\r\n raise ValueError\r\n fig = plt.figure(fig_num)\r\n # plt.tight_layout()\r\n plt.cla()\r\n plt.plot(a_np)\r\n if title is not None:\r\n plt.title(title)\r\n draw_figure(fig)\r\n\r\n\r\ndef show_image_with_boxes(im, boxes, iou_pred=None, disp_ids=None):\r\n im_np = im.clone().cpu().squeeze().numpy()\r\n im_np = np.ascontiguousarray(im_np.transpose(1, 2, 0).astype(np.uint8))\r\n\r\n boxes = boxes.view(-1, 4).cpu().numpy().round().astype(int)\r\n\r\n # Draw proposals\r\n for i_ in range(boxes.shape[0]):\r\n if disp_ids is None or disp_ids[i_]:\r\n bb = boxes[i_, :]\r\n disp_color = (i_*38 % 256, (255 - i_*97) % 256, (123 + i_*66) % 256)\r\n cv2.rectangle(im_np, (bb[0], bb[1]), (bb[0] + bb[2], bb[1] + bb[3]),\r\n disp_color, 1)\r\n\r\n if iou_pred is not None:\r\n text_pos = (bb[0], bb[1] - 5)\r\n cv2.putText(im_np, 'ID={} IOU = {:3.2f}'.format(i_, iou_pred[i_]), text_pos,\r\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1, bottomLeftOrigin=False)\r\n\r\n im_tensor = torch.from_numpy(im_np.transpose(2, 0, 1)).float()\r\n\r\n return im_tensor\r\n\r\n\r\n\r\ndef _pascal_color_map(N=256, normalized=False):\r\n \"\"\"\r\n Python implementation of the color map function for the PASCAL VOC data set.\r\n Official Matlab version can be found in the PASCAL VOC devkit\r\n http://host.robots.ox.ac.uk/pascal/VOC/voc2012/index.html#devkit\r\n \"\"\"\r\n\r\n def bitget(byteval, idx):\r\n return (byteval & (1 << idx)) != 0\r\n\r\n dtype = 'float32' if normalized else 'uint8'\r\n cmap = np.zeros((N, 3), dtype=dtype)\r\n for i in range(N):\r\n r = g = b = 0\r\n c = i\r\n for j in range(8):\r\n r = r | (bitget(c, 0) << 7 - j)\r\n g = g | (bitget(c, 1) << 7 - j)\r\n b = b | (bitget(c, 2) << 7 - j)\r\n c = c >> 3\r\n\r\n cmap[i] = np.array([r, g, b])\r\n\r\n cmap = cmap / 255 if normalized else cmap\r\n return cmap\r\n\r\n\r\ndef overlay_mask(im, ann, alpha=0.5, colors=None, contour_thickness=None):\r\n \"\"\" Overlay mask over image.\r\n Source: https://github.com/albertomontesg/davis-interactive/blob/master/davisinteractive/utils/visualization.py\r\n This function allows you to overlay a mask over an image with some\r\n transparency.\r\n # Arguments\r\n im: Numpy Array. Array with the image. The shape must be (H, W, 3) and\r\n the pixels must be represented as `np.uint8` data type.\r\n ann: Numpy Array. Array with the mask. The shape must be (H, W) and the\r\n values must be intergers\r\n alpha: Float. Proportion of alpha to apply at the overlaid mask.\r\n colors: Numpy Array. Optional custom colormap. It must have shape (N, 3)\r\n being N the maximum number of colors to represent.\r\n contour_thickness: Integer. Thickness of each object index contour draw\r\n over the overlay. This function requires to have installed the\r\n package `opencv-python`.\r\n # Returns\r\n Numpy Array: Image of the overlay with shape (H, W, 3) and data type\r\n `np.uint8`.\r\n \"\"\"\r\n im, ann = np.asarray(im, dtype=np.uint8), np.asarray(ann, dtype=np.int)\r\n if im.shape[:-1] != ann.shape:\r\n raise ValueError('First two dimensions of `im` and `ann` must match')\r\n if im.shape[-1] != 3:\r\n raise ValueError('im must have three channels at the 3 dimension')\r\n\r\n colors = colors or _pascal_color_map()\r\n colors = np.asarray(colors, dtype=np.uint8)\r\n\r\n mask = colors[ann]\r\n fg = im * alpha + (1 - alpha) * mask\r\n\r\n img = im.copy()\r\n img[ann > 0] = fg[ann > 0]\r\n\r\n if contour_thickness: # pragma: no cover\r\n import cv2\r\n for obj_id in np.unique(ann[ann > 0]):\r\n contours = cv2.findContours((ann == obj_id).astype(\r\n np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:]\r\n cv2.drawContours(img, contours[0], -1, colors[obj_id].tolist(),\r\n contour_thickness)\r\n return img\r\n" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.title", "numpy.unique", "numpy.asarray", "matplotlib.pyplot.cla", "matplotlib.pyplot.gcf", "matplotlib.pyplot.plot", "matplotlib.pyplot.axis", "numpy.transpose", "numpy.array", "numpy.zeros", "matplotlib.pyplot.pause", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AlbertiPot/CTNAS
[ "ecb22ea66b7ba075c48ca4c4db28f68b777f45db" ]
[ "ctnas/core/controller/common_nas.py" ]
[ "import functools\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.distributions as distributions\nfrom torch.distributions.utils import probs_to_logits\n\nfrom core.genotypes import Genotype\nfrom core.utils import logger\n\n\nclass Controller(nn.Module):\n '''\n Controller for DARTS search space.\n '''\n\n def __init__(self, n_nodes, n_ops, device=\"cpu\", hidden_size=None,\n temperature=None, tanh_constant=None, op_tanh_reduce=None):\n super(Controller, self).__init__()\n self.n_nodes = n_nodes\n self.n_ops = n_ops\n self.n_nodes = n_nodes\n self.device = device\n\n self.hidden_size = hidden_size\n self.attention_hidden_size = hidden_size\n self.temperature = temperature\n self.tanh_constant = tanh_constant\n self.op_tanh_reduce = op_tanh_reduce\n\n # Embedding of (n_nodes+1) nodes\n # Note that the (n_nodes+2)-th node will not be used\n self.node_op_embedding = nn.Embedding(n_nodes + 1 + self.n_ops, self.hidden_size)\n\n self.lstm = nn.LSTMCell(self.hidden_size, self.hidden_size)\n\n self.emb_attn = nn.Linear(self.hidden_size, self.attention_hidden_size, bias=False)\n self.hid_attn = nn.Linear(self.hidden_size, self.attention_hidden_size, bias=False)\n self.v_attn = nn.Linear(self.hidden_size, 1, bias=False)\n\n self.w_soft = nn.Linear(self.hidden_size, self.n_ops)\n\n self.batch_size = 1\n self.n_prev_nodes = 2\n\n self.reset_parameters()\n\n @functools.lru_cache(maxsize=128)\n def _zeros(self, batch_size):\n return torch.zeros((batch_size, self.hidden_size), device=self.device, requires_grad=False)\n\n def reset_parameters(self, init_range=0.1):\n for param in self.parameters():\n param.data.uniform_(-init_range, init_range)\n\n def _scale_attention(self, logits, temperature, tanh_constant, constant_reduce=None):\n if temperature is not None:\n logits /= temperature\n if tanh_constant is not None:\n if constant_reduce is not None:\n tanh_constant /= constant_reduce\n logits = tanh_constant * torch.tanh(logits)\n return logits\n\n def _impl(self, probs):\n m = torch.distributions.Categorical(probs=probs)\n action = m.sample().view(-1)\n select_log_p = m.log_prob(action)\n entropy = m.entropy()\n return action, select_log_p, entropy\n\n def forward(self, force_uniform):\n node_log_ps = []\n op_log_ps = []\n node_entropys = []\n op_entropys = []\n nodes = []\n ops = []\n\n inputs = self._zeros(self.batch_size)\n hidden = self._zeros(self.batch_size), self._zeros(self.batch_size)\n embed = None\n for node_idx in range(self.n_nodes):\n for select in ((\"node\", \"op\")):\n for i in range(self.n_prev_nodes):\n if embed is None:\n embed = inputs\n else:\n embed = self.node_op_embedding(inputs)\n # import ipdb; ipdb.set_trace()\n if force_uniform:\n z = torch.zeros(node_idx+2 if select == \"node\" else self.n_ops, device=self.device)\n probs = F.softmax(z, dim=-1)\n else:\n hx, cx = self.lstm(embed, hidden)\n hidden = (hx, cx)\n if select == \"node\":\n # (node_idx+2, hidden_size)\n query = self.node_op_embedding.weight[:node_idx+2, :]\n # (node_idx+2, attention_hidden_size)\n query = torch.tanh(self.emb_attn(query) + self.hid_attn(hx))\n logits = self.v_attn(query).view(-1) # (node_idx+2,)\n logits = self._scale_attention(logits, self.temperature, self.tanh_constant)\n else:\n logits = self.w_soft(hx).view(-1)\n logits = self._scale_attention(logits, self.temperature, self.tanh_constant,\n self.op_tanh_reduce)\n probs = F.softmax(logits, dim=-1)\n action, select_log_p, entropy = self._impl(probs)\n if select == \"node\":\n node_log_ps.append(select_log_p)\n node_entropys.append(entropy)\n else:\n op_log_ps.append(select_log_p)\n op_entropys.append(entropy)\n\n if select == \"node\":\n inputs = action\n nodes.append(action)\n else:\n inputs = action + (self.n_nodes + 1)\n ops.append(action)\n\n ordinal_arch = Genotype.lstm_output_to_ordinal(self.n_nodes, torch.cat(nodes).tolist(), torch.cat(ops).tolist())\n return ordinal_arch, sum(node_log_ps)+sum(op_log_ps), sum(node_entropys)+sum(op_entropys)\n\n\nclass LargeSpaceController(nn.Module):\n def __init__(self, n_nodes, n_ops, device=\"cpu\", hidden_size=None,\n temperature=None, tanh_constant=None, op_tanh_reduce=None):\n super(LargeSpaceController, self).__init__()\n self.normal_arch_master = Controller(n_nodes, n_ops, device, hidden_size,\n temperature, tanh_constant, op_tanh_reduce)\n self.reduced_arch_master = Controller(n_nodes, n_ops, device, hidden_size,\n temperature, tanh_constant, op_tanh_reduce)\n\n def forward(self, force_uniform=False):\n normal_arch, normal_logp, normal_entropy = self.normal_arch_master(force_uniform)\n reduced_arch, reduced_logp, reduced_entropy = self.reduced_arch_master(force_uniform)\n return normal_arch, reduced_arch, normal_logp+reduced_logp, normal_entropy+reduced_entropy\n" ]
[ [ "torch.nn.functional.softmax", "torch.zeros", "torch.cat", "torch.nn.Embedding", "torch.nn.LSTMCell", "torch.nn.Linear", "torch.tanh", "torch.distributions.Categorical" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Kabongosalomon/axcell
[ "b41c1623377d89c3c45a61907f0a47ea029269de" ]
[ "axcell/helpers/cache.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport pandas as pd\nimport json\nfrom collections import defaultdict\nfrom pathlib import Path\n\n\n# these functions are used to cache various results\n# of corresponding pipeline steps, to make it faster to\n# rerun the pipeline or run in on batch of papers with various\n# steps on different machines. The exchange formats are ad hoc and\n# can be changed.\n\n\ndef _load_json(path):\n with Path(path).open('rt') as f:\n return json.load(f)\n\n\ndef _save_json(obj, path):\n with Path(path).open('wt') as f:\n json.dump(obj, f)\n\n\ndef load_references(path):\n return _load_json(path)\n\n\ndef save_references(references, path):\n _save_json(references, path)\n\n\ndef load_tags(path):\n return _load_json(path)\n\n\ndef save_tags(tags, path):\n _save_json(tags, path)\n\n\ndef load_structure(path):\n return _load_json(path)\n\n\ndef save_structure(structure, path):\n _save_json(structure, path)\n\n\ndef load_proposals(path):\n dtypes = defaultdict(lambda: str)\n dtypes['confidence'] = float\n dtypes['parsed'] = float\n\n na_values = {'confidence': '', 'parsed': ''}\n proposals = pd.read_csv(path, index_col=0, dtype=dtypes, na_values=na_values, keep_default_na=False)\n return proposals\n\n\ndef save_proposals(proposals, path):\n proposals.to_csv(path)\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
Jwy-Leo/pytorch_yolov3
[ "7deb87529e80cf715ec4fd90e1381293401998ea" ]
[ "darknet.py" ]
[ "import os\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom cfg import *\nimport numpy as np\nfrom region_layer import RegionLayer\nfrom yolo_layer import YoloLayer\n#from layers.batchnorm.bn import BN2d\n\nclass MaxPoolStride1(nn.Module):\n def __init__(self):\n super(MaxPoolStride1, self).__init__()\n\n def forward(self, x):\n x = F.max_pool2d(F.pad(x, (0,1,0,1), mode='replicate'), 2, stride=1)\n return x\n\nclass Upsample(nn.Module):\n def __init__(self, stride=2):\n super(Upsample, self).__init__()\n self.stride = stride\n def forward(self, x):\n stride = self.stride\n assert(x.data.dim() == 4)\n B = x.data.size(0)\n C = x.data.size(1)\n H = x.data.size(2)\n W = x.data.size(3)\n ws = stride\n hs = stride\n x = x.view(B, C, H, 1, W, 1).expand(B, C, H, hs, W, ws).contiguous().view(B, C, H*hs, W*ws)\n return x\n\nclass Reorg(nn.Module):\n def __init__(self, stride=2):\n super(Reorg, self).__init__()\n self.stride = stride\n def forward(self, x):\n stride = self.stride\n assert(x.data.dim() == 4)\n B = x.data.size(0)\n C = x.data.size(1)\n H = x.data.size(2)\n W = x.data.size(3)\n assert(H % stride == 0)\n assert(W % stride == 0)\n ws = stride\n hs = stride\n x = x.view(B, C, H//hs, hs, W//ws, ws).transpose(3,4).contiguous()\n x = x.view(B, C, (H//hs)*(W//ws), hs*ws).transpose(2,3).contiguous()\n x = x.view(B, C, hs*ws, H//hs, W//ws).transpose(1,2).contiguous()\n x = x.view(B, hs*ws*C, H//hs, W//ws)\n return x\n\nclass GlobalAvgPool2d(nn.Module):\n def __init__(self):\n super(GlobalAvgPool2d, self).__init__()\n\n def forward(self, x):\n N = x.data.size(0)\n C = x.data.size(1)\n H = x.data.size(2)\n W = x.data.size(3)\n x = F.avg_pool2d(x, (H, W))\n x = x.view(N, C)\n return x\n\n# for route and shortcut\nclass EmptyModule(nn.Module):\n def __init__(self):\n super(EmptyModule, self).__init__()\n\n def forward(self, x):\n return x\n\n# support route shortcut and reorg\n\nclass Darknet(nn.Module):\n def net_name(self):\n names_list = ('region', 'yolo')\n name = names_list[0]\n for m in self.models:\n if isinstance(m, YoloLayer):\n name = names_list[1]\n return name\n\n def getLossLayers(self):\n loss_layers = []\n for m in self.models:\n if isinstance(m, RegionLayer) or isinstance(m, YoloLayer):\n loss_layers.append(m)\n return loss_layers\n\n def __init__(self, cfgfile, use_cuda=True):\n super(Darknet, self).__init__()\n self.use_cuda = use_cuda\n self.blocks = parse_cfg(cfgfile)\n self.models = self.create_network(self.blocks) # merge conv, bn,leaky\n self.loss_layers = self.getLossLayers()\n\n #self.width = int(self.blocks[0]['width'])\n #self.height = int(self.blocks[0]['height'])\n\n if len(self.loss_layers) > 0:\n last = len(self.loss_layers)-1\n self.anchors = self.loss_layers[last].anchors\n self.num_anchors = self.loss_layers[last].num_anchors\n self.anchor_step = self.loss_layers[last].anchor_step\n self.num_classes = self.loss_layers[last].num_classes\n\n # default format : major=0, minor=1\n self.header = torch.IntTensor([0,1,0,0])\n self.seen = 0\n self.print_network()\n\n def forward(self, x):\n ind = -2\n #self.loss_layers = None\n outputs = dict()\n out_boxes = dict()\n outno = 0\n for block in self.blocks:\n ind = ind + 1\n\n if block['type'] == 'net':\n continue\n elif block['type'] in ['convolutional', 'maxpool', 'reorg', 'upsample', 'avgpool', 'softmax', 'connected']:\n x = self.models[ind](x)\n outputs[ind] = x\n elif block['type'] == 'route':\n layers = block['layers'].split(',')\n layers = [int(i) if int(i) > 0 else int(i)+ind for i in layers]\n if len(layers) == 1:\n x = outputs[layers[0]]\n elif len(layers) == 2:\n x1 = outputs[layers[0]]\n x2 = outputs[layers[1]]\n x = torch.cat((x1,x2),1)\n outputs[ind] = x\n elif block['type'] == 'shortcut':\n from_layer = int(block['from'])\n activation = block['activation']\n from_layer = from_layer if from_layer > 0 else from_layer + ind\n x1 = outputs[from_layer]\n x2 = outputs[ind-1]\n x = x1 + x2\n if activation == 'leaky':\n x = F.leaky_relu(x, 0.1, inplace=True)\n elif activation == 'relu':\n x = F.relu(x, inplace=True)\n outputs[ind] = x\n elif block['type'] in [ 'region', 'yolo']:\n boxes = self.models[ind].get_mask_boxes(x)\n out_boxes[outno]= boxes\n outno += 1\n outputs[ind] = None\n elif block['type'] == 'cost':\n continue\n else:\n print('unknown type %s' % (block['type']))\n return x if outno == 0 else out_boxes\n\n def print_network(self):\n print_cfg(self.blocks)\n\n def create_network(self, blocks):\n models = nn.ModuleList()\n \n prev_filters = 3\n out_filters =[]\n prev_stride = 1\n out_strides = []\n conv_id = 0\n ind = -2\n for block in blocks:\n ind += 1\n if block['type'] == 'net':\n prev_filters = int(block['channels'])\n self.width = int(block['width'])\n self.height = int(block['height'])\n continue\n elif block['type'] == 'convolutional':\n conv_id = conv_id + 1\n batch_normalize = int(block['batch_normalize'])\n filters = int(block['filters'])\n kernel_size = int(block['size'])\n stride = int(block['stride'])\n is_pad = int(block['pad'])\n pad = (kernel_size-1)//2 if is_pad else 0\n activation = block['activation']\n model = nn.Sequential()\n if batch_normalize:\n model.add_module('conv{0}'.format(conv_id), nn.Conv2d(prev_filters, filters, kernel_size, stride, pad, bias=False))\n model.add_module('bn{0}'.format(conv_id), nn.BatchNorm2d(filters))\n #model.add_module('bn{0}'.format(conv_id), BN2d(filters))\n else:\n model.add_module('conv{0}'.format(conv_id), nn.Conv2d(prev_filters, filters, kernel_size, stride, pad))\n if activation == 'leaky':\n model.add_module('leaky{0}'.format(conv_id), nn.LeakyReLU(0.1, inplace=True))\n elif activation == 'relu':\n model.add_module('relu{0}'.format(conv_id), nn.ReLU(inplace=True))\n prev_filters = filters\n out_filters.append(prev_filters)\n prev_stride = stride * prev_stride\n out_strides.append(prev_stride) \n models.append(model)\n elif block['type'] == 'maxpool':\n pool_size = int(block['size'])\n stride = int(block['stride'])\n if stride > 1:\n model = nn.MaxPool2d(pool_size, stride)\n else:\n model = MaxPoolStride1()\n out_filters.append(prev_filters)\n prev_stride = stride * prev_stride\n out_strides.append(prev_stride) \n models.append(model)\n elif block['type'] == 'avgpool':\n model = GlobalAvgPool2d()\n out_filters.append(prev_filters)\n models.append(model)\n elif block['type'] == 'softmax':\n model = nn.Softmax()\n out_strides.append(prev_stride)\n out_filters.append(prev_filters)\n models.append(model)\n elif block['type'] == 'cost':\n if block['_type'] == 'sse':\n model = nn.MSELoss(size_average=True)\n elif block['_type'] == 'L1':\n model = nn.L1Loss(size_average=True)\n elif block['_type'] == 'smooth':\n model = nn.SmoothL1Loss(size_average=True)\n out_filters.append(1)\n out_strides.append(prev_stride)\n models.append(model)\n elif block['type'] == 'reorg':\n stride = int(block['stride'])\n prev_filters = stride * stride * prev_filters\n out_filters.append(prev_filters)\n prev_stride = prev_stride * stride\n out_strides.append(prev_stride) \n models.append(Reorg(stride))\n elif block['type'] == 'upsample':\n stride = int(block['stride'])\n out_filters.append(prev_filters)\n prev_stride = prev_stride / stride\n out_strides.append(prev_stride) \n #models.append(nn.Upsample(scale_factor=stride, mode='nearest'))\n models.append(Upsample(stride))\n elif block['type'] == 'route':\n layers = block['layers'].split(',')\n ind = len(models)\n layers = [int(i) if int(i) > 0 else int(i)+ind for i in layers]\n if len(layers) == 1:\n prev_filters = out_filters[layers[0]]\n prev_stride = out_strides[layers[0]]\n elif len(layers) == 2:\n assert(layers[0] == ind - 1)\n prev_filters = out_filters[layers[0]] + out_filters[layers[1]]\n prev_stride = out_strides[layers[0]]\n out_filters.append(prev_filters)\n out_strides.append(prev_stride)\n models.append(EmptyModule())\n elif block['type'] == 'shortcut':\n ind = len(models)\n prev_filters = out_filters[ind-1]\n out_filters.append(prev_filters)\n prev_stride = out_strides[ind-1]\n out_strides.append(prev_stride)\n models.append(EmptyModule())\n elif block['type'] == 'connected':\n filters = int(block['output'])\n if block['activation'] == 'linear':\n model = nn.Linear(prev_filters, filters)\n elif block['activation'] == 'leaky':\n model = nn.Sequential(\n nn.Linear(prev_filters, filters),\n nn.LeakyReLU(0.1, inplace=True))\n elif block['activation'] == 'relu':\n model = nn.Sequential(\n nn.Linear(prev_filters, filters),\n nn.ReLU(inplace=True))\n prev_filters = filters\n out_filters.append(prev_filters)\n out_strides.append(prev_stride)\n models.append(model)\n elif block['type'] == 'region':\n region_layer = RegionLayer(use_cuda=self.use_cuda)\n anchors = block['anchors'].split(',')\n region_layer.anchors = [float(i) for i in anchors]\n region_layer.num_classes = int(block['classes'])\n region_layer.num_anchors = int(block['num'])\n region_layer.anchor_step = len(region_layer.anchors)//region_layer.num_anchors\n region_layer.rescore = int(block['rescore'])\n region_layer.object_scale = float(block['object_scale'])\n region_layer.noobject_scale = float(block['noobject_scale'])\n region_layer.class_scale = float(block['class_scale'])\n region_layer.coord_scale = float(block['coord_scale'])\n region_layer.thresh = float(block['thresh'])\n out_filters.append(prev_filters)\n out_strides.append(prev_stride)\n models.append(region_layer)\n elif block['type'] == 'yolo':\n yolo_layer = YoloLayer(use_cuda=self.use_cuda)\n anchors = block['anchors'].split(',')\n anchor_mask = block['mask'].split(',')\n yolo_layer.anchor_mask = [int(i) for i in anchor_mask]\n yolo_layer.anchors = [float(i) for i in anchors]\n yolo_layer.num_classes = int(block['classes'])\n yolo_layer.num_anchors = int(block['num'])\n yolo_layer.anchor_step = len(yolo_layer.anchors)//yolo_layer.num_anchors\n try:\n yolo_layer.rescore = int(block['rescore'])\n except:\n pass\n yolo_layer.ignore_thresh = float(block['ignore_thresh'])\n yolo_layer.truth_thresh = float(block['truth_thresh'])\n yolo_layer.stride = prev_stride\n yolo_layer.nth_layer = ind\n yolo_layer.net_width = self.width\n yolo_layer.net_height = self.height\n out_filters.append(prev_filters)\n out_strides.append(prev_stride)\n models.append(yolo_layer) \n else:\n print('unknown type %s' % (block['type']))\n \n return models\n\n def load_binfile(self, weightfile):\n fp = open(weightfile, 'rb')\n \n version = np.fromfile(fp, count=3, dtype=np.int32)\n version = [int(i) for i in version]\n if version[0]*10+version[1] >=2 and version[0] < 1000 and version[1] < 1000:\n seen = np.fromfile(fp, count=1, dtype=np.int64)\n else:\n seen = np.fromfile(fp, count=1, dtype=np.int32)\n self.header = torch.from_numpy(np.concatenate((version, seen), axis=0))\n self.seen = int(seen)\n body = np.fromfile(fp, dtype=np.float32)\n fp.close()\n return body\n\n def load_weights(self, weightfile):\n buf = self.load_binfile(weightfile)\n\n start = 0\n ind = -2\n for block in self.blocks:\n if start >= buf.size:\n break\n ind = ind + 1\n if block['type'] == 'net':\n continue\n elif block['type'] == 'convolutional':\n model = self.models[ind]\n batch_normalize = int(block['batch_normalize'])\n if batch_normalize:\n start = load_conv_bn(buf, start, model[0], model[1])\n else:\n start = load_conv(buf, start, model[0])\n elif block['type'] == 'connected':\n model = self.models[ind]\n if block['activation'] != 'linear':\n start = load_fc(buf, start, model[0])\n else:\n start = load_fc(buf, start, model)\n elif block['type'] == 'maxpool':\n pass\n elif block['type'] == 'reorg':\n pass\n elif block['type'] == 'upsample':\n pass\n elif block['type'] == 'route':\n pass\n elif block['type'] == 'shortcut':\n pass\n elif block['type'] == 'region':\n pass\n elif block['type'] == 'yolo':\n pass \n elif block['type'] == 'avgpool':\n pass\n elif block['type'] == 'softmax':\n pass\n elif block['type'] == 'cost':\n pass\n else:\n print('unknown type %s' % (block['type']))\n\n def save_weights(self, outfile, cutoff=0):\n if cutoff <= 0:\n cutoff = len(self.blocks)-1\n\n dirname = os.path.dirname(outfile)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n fp = open(outfile, 'wb')\n self.header[3] = self.seen\n header = np.array(self.header[0:3].numpy(), np.int32)\n header.tofile(fp)\n if (self.header[0]*10+self.header[1]) >= 2:\n seen = np.array(self.seen, np.int64)\n else:\n seen = np.array(self.seen, np.int32)\n seen.tofile(fp)\n\n ind = -1\n for blockId in range(1, cutoff+1):\n ind = ind + 1\n block = self.blocks[blockId]\n if block['type'] == 'convolutional':\n model = self.models[ind]\n batch_normalize = int(block['batch_normalize'])\n if batch_normalize:\n save_conv_bn(fp, model[0], model[1])\n else:\n save_conv(fp, model[0])\n elif block['type'] == 'connected':\n model = self.models[ind]\n if block['activation'] != 'linear':\n save_fc(fc, model)\n else:\n save_fc(fc, model[0])\n elif block['type'] == 'maxpool':\n pass\n elif block['type'] == 'reorg':\n pass\n elif block['type'] == 'upsample':\n pass \n elif block['type'] == 'route':\n pass\n elif block['type'] == 'shortcut':\n pass\n elif block['type'] == 'region':\n pass\n elif block['type'] == 'yolo':\n pass\n elif block['type'] == 'avgpool':\n pass\n elif block['type'] == 'softmax':\n pass\n elif block['type'] == 'cost':\n pass\n else:\n print('unknown type %s' % (block['type']))\n fp.close()\n" ]
[ [ "torch.nn.Softmax", "torch.cat", "numpy.concatenate", "torch.nn.L1Loss", "torch.nn.functional.relu", "torch.nn.functional.pad", "torch.nn.SmoothL1Loss", "torch.nn.Sequential", "torch.nn.functional.avg_pool2d", "torch.nn.ModuleList", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.LeakyReLU", "torch.nn.functional.leaky_relu", "torch.nn.BatchNorm2d", "numpy.array", "numpy.fromfile", "torch.nn.MaxPool2d", "torch.IntTensor", "torch.nn.ReLU", "torch.nn.MSELoss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bamos/hypertorch
[ "24826349f73d84f0820da429023f6f6aeada7c74" ]
[ "hypergrad/diff_optimizers.py" ]
[ "import torch\nfrom itertools import repeat\n\n\nclass DifferentiableOptimizer:\n def __init__(self, loss_f, dim_mult, data_or_iter=None):\n \"\"\"\n Args:\n loss_f: callable with signature (params, hparams, [data optional]) -> loss tensor\n data_or_iter: (x, y) or iterator over the data needed for loss_f\n \"\"\"\n self.data_iterator = None\n if data_or_iter:\n self.data_iterator = data_or_iter if hasattr(data_or_iter, '__next__') else repeat(data_or_iter)\n\n self.loss_f = loss_f\n self.dim_mult = dim_mult\n self.curr_loss = None\n\n def get_opt_params(self, params):\n opt_params = [p for p in params]\n opt_params.extend([torch.zeros_like(p) for p in params for _ in range(self.dim_mult-1) ])\n return opt_params\n\n def step(self, params, hparams, create_graph):\n raise NotImplementedError\n\n def __call__(self, params, hparams, create_graph=True):\n with torch.enable_grad():\n return self.step(params, hparams, create_graph)\n\n def get_loss(self, params, hparams):\n if self.data_iterator:\n data = next(self.data_iterator)\n self.curr_loss = self.loss_f(params, hparams, data)\n else:\n self.curr_loss = self.loss_f(params, hparams)\n return self.curr_loss\n\n\nclass HeavyBall(DifferentiableOptimizer):\n def __init__(self, loss_f, step_size, momentum, data_or_iter=None):\n super(HeavyBall, self).__init__(loss_f, dim_mult=2, data_or_iter=data_or_iter)\n self.loss_f = loss_f\n self.step_size_f = step_size if callable(step_size) else lambda x: step_size\n self.momentum_f = momentum if callable(momentum) else lambda x: momentum\n\n def step(self, params, hparams, create_graph):\n n = len(params) // 2\n p, p_aux = params[:n], params[n:]\n loss = self.get_loss(p, hparams)\n sz, mu = self.step_size_f(hparams), self.momentum_f(hparams)\n p_new, p_new_aux = heavy_ball_step(p, p_aux, loss, sz, mu, create_graph=create_graph)\n return [*p_new, *p_new_aux]\n\n\nclass Momentum(DifferentiableOptimizer):\n \"\"\"\n GD with momentum step as implemented in torch.optim.SGD\n .. math::\n v_{t+1} = \\mu * v_{t} + g_{t+1} \\\\\n p_{t+1} = p_{t} - lr * v_{t+1}\n \"\"\"\n def __init__(self, loss_f, step_size, momentum, data_or_iter=None):\n super(Momentum, self).__init__(loss_f, dim_mult=2, data_or_iter=data_or_iter)\n self.loss_f = loss_f\n self.step_size_f = step_size if callable(step_size) else lambda x: step_size\n self.momentum_f = momentum if callable(momentum) else lambda x: momentum\n\n def step(self, params, hparams, create_graph):\n n = len(params) // 2\n p, p_aux = params[:n], params[n:]\n loss = self.get_loss(p, hparams)\n sz, mu = self.step_size_f(hparams), self.momentum_f(hparams)\n p_new, p_new_aux = torch_momentum_step(p, p_aux, loss, sz, mu, create_graph=create_graph)\n return [*p_new, *p_new_aux]\n\n\nclass GradientDescent(DifferentiableOptimizer):\n def __init__(self, loss_f, step_size, data_or_iter=None):\n super(GradientDescent, self).__init__(loss_f, dim_mult=1, data_or_iter=data_or_iter)\n self.step_size_f = step_size if callable(step_size) else lambda x: step_size\n\n def step(self, params, hparams, create_graph):\n loss = self.get_loss(params, hparams)\n sz = self.step_size_f(hparams)\n return gd_step(params, loss, sz, create_graph=create_graph)\n\n\ndef gd_step(params, loss, step_size, create_graph=True):\n grads = torch.autograd.grad(loss, params, create_graph=create_graph)\n return [w - step_size * g for w, g in zip(params, grads)]\n\n\ndef heavy_ball_step(params, aux_params, loss, step_size, momentum, create_graph=True):\n grads = torch.autograd.grad(loss, params, create_graph=create_graph)\n return [w - step_size * g + momentum * (w - v) for g, w, v in zip(grads, params, aux_params)], params\n\n\ndef torch_momentum_step(params, aux_params, loss, step_size, momentum, create_graph=True):\n \"\"\"\n GD with momentum step as implemented in torch.optim.SGD\n .. math::\n v_{t+1} = \\mu * v_{t} + g_{t+1} \\\\\n p_{t+1} = p_{t} - lr * v_{t+1}\n \"\"\"\n grads = torch.autograd.grad(loss, params, create_graph=create_graph)\n new_aux_params = [momentum*v + g for v, g in zip(aux_params, grads)]\n return [w - step_size * nv for w, nv in zip(params, new_aux_params)], new_aux_params\n\n\n" ]
[ [ "torch.zeros_like", "torch.autograd.grad", "torch.enable_grad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ericdaat/self-label
[ "7c12f834c7b6bd5bee2f7f165aab33d4c4e50b51" ]
[ "models/resnetv1.py" ]
[ "import torch.nn as nn\nimport math\n\n__all__ = ['resnetv1','resnetv1_18']\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass Normalize(nn.Module):\n\n def __init__(self, power=2):\n super(Normalize, self).__init__()\n self.power = power\n\n def forward(self, x):\n norm = x.pow(self.power).sum(1, keepdim=True).pow(1. / self.power)\n out = x.div(norm)\n return out\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n def __init__(self, block, layers, in_channel=3, width=1, num_classes=[1000]):\n self.inplanes = 64\n super(ResNet, self).__init__()\n self.headcount = len(num_classes)\n self.base = int(64 * width)\n self.features = nn.Sequential(*[\n nn.Conv2d(in_channel, 64, kernel_size=7, stride=2, padding=3, bias=False),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1),\n self._make_layer(block, self.base, layers[0]),\n self._make_layer(block, self.base * 2, layers[1], stride=2),\n self._make_layer(block, self.base * 4, layers[2], stride=2),\n self._make_layer(block, self.base * 8, layers[3], stride=2),\n nn.AvgPool2d(7, stride=1),\n ])\n if len(num_classes) == 1:\n self.top_layer = nn.Sequential(nn.Linear(512*4, num_classes[0]))\n else:\n for a, i in enumerate(num_classes):\n setattr(self, \"top_layer%d\" % a, nn.Linear(512*4, i))\n self.top_layer = None\n\n for m in self.features.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = self.features(x)\n out = out.view(out.size(0), -1)\n if self.headcount == 1:\n if self.top_layer:\n out = self.top_layer(out)\n return out\n else:\n outp = []\n for i in range(self.headcount):\n outp.append(getattr(self, \"top_layer%d\" % i)(out))\n return outp\n\n\ndef resnet18(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-18 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model\n\n\ndef resnet34(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-34 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model\n\n\ndef resnet50(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-50 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model\n\ndef resnet101(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-101 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n return model\n\ndef resnetv1(num_classes=[1000]):\n \"\"\"Encoder for instance discrimination and MoCo\"\"\"\n return resnet50(num_classes=num_classes)\n\ndef resnetv1_18(num_classes=[1000]):\n \"\"\"Encoder for instance discrimination and MoCo\"\"\"\n return resnet18(num_classes=num_classes)\n\nif __name__ == '__main__':\n import torch\n model = resnetv1(num_classes=[500]*3)\n print([ k.shape for k in model(torch.randn(64,3,224,224))])" ]
[ [ "torch.nn.Sequential", "torch.randn", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.MaxPool2d", "torch.nn.AvgPool2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cb-stimmer/estim2bapi
[ "adeb6d4ef681c4e4be65d7ad1758edb9bf11ac58" ]
[ "examples/udp_motion_example/motion.py" ]
[ "import numpy as np\nimport pandas as pd\nfrom collections import deque\n\nclass EMA:\n def __init__(self, alpha):\n self.alpha = alpha\n self.ema = None\n self.emv = 0.0\n\n def __call__(self, value):\n delta = value\n if self.ema is not None: delta -= self.ema\n\n self.emv = (1.-self.alpha) * (self.emv + self.alpha*delta**2)\n\n if self.ema is None:\n self.ema = float(value)\n else:\n #self.ema = self.alpha * self.ema + (1.-self.alpha) * value\n self.ema = self.ema + self.alpha * delta\n\n print(self.ema, self.emv)\n return self.ema, self.emv\n\n def get_ema(self):\n if self.ema is None: return 0\n return self.ema\n\n\nclass History:\n def __init__(self, max_length=1000):\n self.counter = 0\n self.max_length = max_length\n self.hist = deque()\n self.vhist = deque()\n self.shist = deque()\n self.ahist = deque()\n\n self.speed_ema = EMA(alpha=0.9)\n\n def record(self, t, x, y, z):\n self.counter += 1\n self.hist.append( np.array([t, x, y, z]) )\n\n v = self.calc_velocity()\n self.vhist.append( np.array([t, v[0], v[1], v[2]]) )\n \n s = self.calc_speed()\n #s, sv = self.speed_ema(s)\n #self.speed_means = s\n #self.speed_stds = np.sqrt(sv)\n self.shist.append( np.array([t, s]) )\n\n pitch, roll = self.calc_angles(-1)\n self.ahist.append( np.array([t, pitch[0], roll[0]]) )\n\n if len(self.hist) > self.max_length:\n self.hist.popleft()\n if len(self.vhist) > self.max_length:\n self.vhist.popleft()\n if len(self.shist) > self.max_length:\n self.shist.popleft()\n if len(self.ahist) > self.max_length:\n self.ahist.popleft()\n\n def __len__(self):\n return len(self.hist)\n\n def get(self, pos):\n return self.hist[pos]\n\n def get_stats(self, low=0, high=None):\n means = np.mean(self.hist, axis=0) \n stds = np.std(self.hist, axis=0)\n return means[low:high], stds[low:high]\n\n def calc_velocities(self):\n d = np.diff(self.hist, axis=0)\n dt = d[:, 0]\n dt = np.reshape(dt, (-1, 1))\n dxyz = d[:, 1:4]\n return dxyz/dt\n\n def calc_speeds(self):\n vels = self.calc_velocities()\n return np.sqrt(np.sum(vels**2, axis=-1))\n\n def calc_angles(self, pos=None):\n if pos is None:\n xyz = np.array(self.hist)[:, 1:4]\n else:\n xyz = np.array(self.hist)[pos, 1:4]\n xyz = np.reshape(xyz, (-1, 3)) # single entry\n pitch = np.arctan(xyz[:, 0] / np.sqrt(xyz[:, 1]**2 + xyz[:, 2]**2)) * 180. / np.pi\n roll = np.arctan(xyz[:, 1] / np.sqrt(xyz[:, 0]**2 + xyz[:, 2]**2)) * 180. / np.pi\n return pitch, roll\n\n def calc_velocity(self):\n try:\n txyz_this = self.get(-1)\n txyz_last = self.get(-2)\n except IndexError:\n return np.zeros(3, dtype=float)\n d = txyz_this - txyz_last\n dt = d[0]\n dxyz = d[1:4]\n return dxyz/dt\n\n def calc_speed(self):\n vels = self.calc_velocity()\n return np.sqrt(np.sum(vels**2, axis=-1))\n\n def calibrate_velocities(self, motionstd=None):\n vels = self.calc_velocities()\n self.vel_means, self.vel_stds = np.mean(vels, axis=0), np.std(vels, axis=0)\n if motionstd is not None:\n self.vel_stds = motionstd\n\n return self.vel_means, self.vel_stds\n\n def calibrate_speeds(self, motionstd=None):\n speeds = self.calc_speeds()\n ##df = pd.DataFrame(speeds, columns=['vel'])\n ##ema = pd.ewma(df, alpha=0.5)\n ##self.speed_means = ema.mean().values[-1]\n ##self.speed_stds = ema.std().values[-1]\n self.speed_means, self.speed_stds = np.mean(speeds, axis=0), np.std(speeds, axis=0)\n if self.speed_stds < 1.5: self.speed_stds = 10.0\n \n if motionstd is not None:\n self.speed_stds = motionstd\n\n return self.speed_means, self.speed_stds\n\n def calibrate_angles(self, angstd=None):\n angles = self.calc_angles()\n self.angle_means, self.angle_stds = np.mean(angles, axis=1), np.std(angles, axis=1)\n if self.angle_stds[0] < 2.0: self.angle_stds[0] = 0.75\n if self.angle_stds[1] < 2.0: self.angle_stds[1] = 0.75\n if angstd is not None:\n self.angle_stds = np.array([angstd, angstd])\n\n return self.angle_means, self.angle_stds\n\n def test_velocity_trigger(self, motion_tol):\n vel = self.calc_velocity()\n trigger, = np.where( np.abs(vel) - motion_tol*self.vel_stds > 0 )\n if len(trigger) > 0:\n return True\n return False\n\n def test_speed_trigger(self, motion_tol):\n speed = self.calc_speed()\n trigger, = np.where( np.abs(speed) - motion_tol*self.speed_stds > 0 )\n if len(trigger) > 0:\n return True\n return False\n\n def test_angle_trigger(self, angle_tol):\n dangles = np.array(self.calc_angles(-1)).flatten() - self.angle_means\n trigger, = np.where( np.abs(dangles) > angle_tol*self.angle_stds )\n if len(trigger) > 0:\n return True\n return False\n\n\n" ]
[ [ "numpy.abs", "numpy.sqrt", "numpy.reshape", "numpy.std", "numpy.mean", "numpy.diff", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
obi-ml-public/echoAI-PET-measurements
[ "54d81edfdc465b8db0b1bb71ea01855e42a623bc" ]
[ "src/echoai_pet_measurements/processing.py" ]
[ "\"\"\"\nThis file is part of the echoAI-PET-measurements project.\n\"\"\"\nimport os\nimport numpy as np\nimport cv2\nimport lz4.frame\n\nclass Videoconverter:\n \"\"\" Preprocessing functions for echo videos\n min_rate: minimum frame rate\n min_frames: minimum required number of frames\n meta_df: data frame from collect_metadata script\n \"\"\"\n def __init__(self, max_frame_time_ms, min_frames, meta_df):\n self.max_frame_time = max_frame_time_ms\n self.min_rate = 1/max_frame_time_ms*1e3 if max_frame_time_ms is not None else None\n self.min_frames = min_frames\n self.meta_df = meta_df\n self.min_video_len = min_frames*max_frame_time_ms*1e-3 if max_frame_time_ms is not None else None\n\n def im_scale(self, im, dx, dy):\n \"\"\" convert single images to uint8 and resize by scale factors \"\"\"\n # We can do other things here: e.g. background subtraction or contrast enhancement\n im_scaled = np.uint8((im - np.amin(im)) / (np.amax(im) - np.amin(im)) * 256)\n # im_scaled_eq = cv2.equalizeHist(im_scaled) # histogram equalization (not needed)\n if (dx is not None) & (dy is not None):\n width = int(np.round(im_scaled.shape[1] * 7.5 * dx))\n height = int(np.round(im_scaled.shape[0] * 7.5 * dy))\n im_resized = cv2.resize(im_scaled, (width, height), interpolation=cv2.INTER_LINEAR)\n else:\n im_resized = im_scaled\n return im_resized\n\n def data2imarray(self, im_data, dx=None, dy=None):\n \"\"\"\n apply imscale function to np.array\n arg: im_array (frame, height, width)\n returns: im_array (height, width, frame)\n \"\"\"\n im_data = np.squeeze(im_data)\n im_list = [self.im_scale(im_data[im], dx, dy) for im in range(im_data.shape[0])]\n im_array = np.array(im_list, dtype=np.uint16)\n im_array = np.moveaxis(im_array, 0, -1)\n return im_array\n\n def subsample_time_index_list(self, frame_time, default_rate, n_frames):\n \"\"\"\n frame_time: time interval between frames [s]\n default_rate: matching frame rate [fps],\n n_frames: number of frames in the output\n \"\"\"\n default_times = np.arange(0, n_frames, 1) / default_rate\n times = np.arange(0, default_times[-1] + frame_time, frame_time)\n time_index_list = [np.argmin(np.abs(times - t)) for t in default_times]\n\n return time_index_list\n\n def subsample_video(self, image_array, frame_time):\n \"\"\"\n Select frames that are closest to a constant frame rate\n arg: image_array: np.array() [rows, columns, frame]\n \"\"\"\n rate = 1 / frame_time\n # Check if the video is long enough\n video_len = image_array.shape[-1] / rate\n subsampled_image_array = np.zeros(1)\n\n if (self.min_video_len < video_len) & (self.min_rate < rate):\n # print('Video is long enough and the rate is good.')\n # Get the frame index list\n time_index_list = self.subsample_time_index_list(frame_time=frame_time,\n default_rate=self.min_rate,\n n_frames=self.min_frames)\n # Subsample video: skip frames by time index\n subsampled_image_array = image_array[:, :, time_index_list]\n\n return subsampled_image_array\n\n def load_video(self, file):\n \"\"\" Just load a video file \"\"\"\n try:\n with lz4.frame.open(file, 'rb') as fp:\n data = np.load(fp)\n except IOError as err:\n print('Cannot open npy file.')\n print(err)\n data = None\n return data\n\n def process_data(self, data, deltaX, deltaY, frame_time):\n output_array = np.zeros(1)\n error = None\n if (0 < deltaX) & (deltaX < 1) & (0 < deltaY) & (deltaY < 1):\n frame_time *= 1e-3\n rate = 1 / frame_time\n video_len = data.shape[0] / rate\n # If the rate is higher, we need more frames\n if (self.min_video_len < video_len) & (self.min_rate < rate):\n image_array = self.data2imarray(im_data=data, dx=deltaX, dy=deltaY)\n output_array = self.subsample_video(image_array=image_array,\n frame_time=frame_time)\n else:\n if self.min_rate >= rate:\n print(f'Frame rate is too low: {rate:.2f}s^-1. Skipping.')\n error = 'frame_rate'\n if self.min_video_len >= video_len:\n print(f'Video is too short: {video_len:.2f}s. Skipping.')\n error = 'video_len'\n else:\n print('Meta data invalid for {}. Skipping file.')\n error = 'deltaXY'\n\n return error, output_array\n\n def process_video(self, filename):\n meta = self.meta_df[self.meta_df.filename == filename]\n output_array = np.zeros(1)\n error=None\n if meta.shape[0] > 0:\n deltaX = np.abs(meta.deltaX.values[0])\n deltaY = np.abs(meta.deltaY.values[0])\n if (0 < deltaX) & (deltaX < 1) & (0 < deltaY) & (deltaY < 1):\n frame_time = meta.frame_time.values[0] * 1e-3\n rate = 1 / frame_time\n file = os.path.join(meta.dir.values[0], filename)\n try:\n with lz4.frame.open(file, 'rb') as fp:\n data = np.load(fp)\n except IOError as err:\n print('Cannot open npy file.')\n print(err)\n error='load'\n else:\n video_len = data.shape[0] / rate\n # If the rate is higher, we need more frames\n if (self.min_video_len < video_len) & (self.min_rate < rate):\n image_array = self.data2imarray(im_data=data, dx=deltaX, dy=deltaY)\n output_array = self.subsample_video(image_array=image_array,\n frame_time=frame_time)\n else:\n if self.min_rate >= rate:\n print(f'Frame rate is too low: {rate:.2f}s^-1. Skipping.')\n error='frame_rate'\n if self.min_video_len >= video_len:\n print(f'Video is too short: {video_len:.2f}s. Skipping.')\n error='video_len'\n else:\n print('Meta data invalid for {}. Skipping'.format(filename))\n error='deltaXY'\n else:\n print('No meta data for {}. Skipping'.format(filename))\n return error, output_array\n" ]
[ [ "numpy.amax", "numpy.abs", "numpy.amin", "numpy.arange", "numpy.squeeze", "numpy.round", "numpy.moveaxis", "numpy.load", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Alicegaz/pytorch-lightning
[ "96b45f15aa04e0ad7e0ed20b8717ab1599148966" ]
[ "tests/trainer/test_dataloaders.py" ]
[ "import os\nimport platform\nfrom unittest.mock import patch\n\nimport pytest\nimport torch\nfrom packaging.version import parse\nfrom torch.utils.data.dataloader import DataLoader\nfrom torch.utils.data.dataset import IterableDataset, Subset\nfrom torch.utils.data.distributed import DistributedSampler\n\nimport tests.base.develop_pipelines as tpipes\nfrom pytorch_lightning import Trainer, Callback\nfrom pytorch_lightning.trainer.data_loading import _has_iterable_dataset, _has_len\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom tests.base import EvalModelTemplate\n\n\ndef test_fit_train_loader_only(tmpdir):\n\n model = EvalModelTemplate()\n train_dataloader = model.train_dataloader()\n\n model.train_dataloader = None\n model.val_dataloader = None\n model.test_dataloader = None\n\n model.validation_step = None\n model.validation_epoch_end = None\n\n model.test_step = None\n model.test_epoch_end = None\n\n trainer = Trainer(fast_dev_run=True, default_root_dir=tmpdir)\n trainer.fit(model, train_dataloader=train_dataloader)\n\n\ndef test_fit_val_loader_only(tmpdir):\n\n model = EvalModelTemplate()\n train_dataloader = model.train_dataloader()\n val_dataloader = model.val_dataloader()\n\n model.train_dataloader = None\n model.val_dataloader = None\n model.test_dataloader = None\n\n model.test_step = None\n model.test_epoch_end = None\n\n trainer = Trainer(fast_dev_run=True, default_root_dir=tmpdir)\n trainer.fit(model, train_dataloader=train_dataloader, val_dataloaders=val_dataloader)\n\n\[email protected](\"dataloader_options\", [\n dict(val_check_interval=1.1),\n dict(val_check_interval=10000),\n])\ndef test_dataloader_config_errors_runtime(tmpdir, dataloader_options):\n\n model = EvalModelTemplate()\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n **dataloader_options,\n )\n\n with pytest.raises(ValueError):\n # fit model\n trainer.fit(model)\n\n\[email protected](\"dataloader_options\", [\n dict(limit_train_batches=-0.1),\n dict(limit_train_batches=1.2),\n dict(limit_val_batches=-0.1),\n dict(limit_val_batches=1.2),\n dict(limit_test_batches=-0.1),\n dict(limit_test_batches=1.2),\n])\ndef test_dataloader_config_errors_init(tmpdir, dataloader_options):\n with pytest.raises(MisconfigurationException):\n Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n **dataloader_options,\n )\n\n\ndef test_multiple_val_dataloader(tmpdir):\n \"\"\"Verify multiple val_dataloader.\"\"\"\n\n model = EvalModelTemplate()\n model.val_dataloader = model.val_dataloader__multiple\n model.validation_step = model.validation_step__multiple_dataloaders\n model.validation_epoch_end = model.validation_epoch_end__multiple_dataloaders\n\n # fit model\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_val_batches=0.1,\n limit_train_batches=1.0,\n )\n result = trainer.fit(model)\n\n # verify training completed\n assert result == 1\n\n # verify there are 2 val loaders\n assert len(trainer.val_dataloaders) == 2, \\\n 'Multiple val_dataloaders not initiated properly'\n\n # make sure predictions are good for each val set\n for dataloader in trainer.val_dataloaders:\n tpipes.run_prediction(dataloader, trainer.model)\n\n\[email protected]('ckpt_path', [None, 'best', 'specific'])\ndef test_multiple_test_dataloader(tmpdir, ckpt_path):\n \"\"\"Verify multiple test_dataloader.\"\"\"\n\n model_template = EvalModelTemplate()\n\n class MultipleTestDataloaderModel(EvalModelTemplate):\n def test_dataloader(self):\n return model_template.test_dataloader__multiple()\n\n def test_step(self, batch, batch_idx, *args, **kwargs):\n return model_template.test_step__multiple_dataloaders(batch, batch_idx, *args, **kwargs)\n\n model = MultipleTestDataloaderModel()\n\n # fit model\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_val_batches=0.1,\n limit_train_batches=0.2,\n )\n trainer.fit(model)\n if ckpt_path == 'specific':\n ckpt_path = trainer.checkpoint_callback.best_model_path\n trainer.test(ckpt_path=ckpt_path)\n\n # verify there are 2 test loaders\n assert len(trainer.test_dataloaders) == 2, \\\n 'Multiple test_dataloaders not initiated properly'\n\n # make sure predictions are good for each test set\n for dataloader in trainer.test_dataloaders:\n tpipes.run_prediction(dataloader, trainer.model)\n\n # run the test method\n trainer.test(ckpt_path=ckpt_path)\n\n\ndef test_train_dataloader_passed_to_fit(tmpdir):\n \"\"\"Verify that train dataloader can be passed to fit \"\"\"\n\n # only train passed to fit\n model = EvalModelTemplate()\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_val_batches=0.1,\n limit_train_batches=0.2,\n )\n fit_options = dict(train_dataloader=model.dataloader(train=True))\n result = trainer.fit(model, **fit_options)\n\n assert result == 1\n\n\ndef test_train_val_dataloaders_passed_to_fit(tmpdir):\n \"\"\" Verify that train & val dataloader can be passed to fit \"\"\"\n\n # train, val passed to fit\n model = EvalModelTemplate()\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_val_batches=0.1,\n limit_train_batches=0.2,\n )\n fit_options = dict(train_dataloader=model.dataloader(train=True),\n val_dataloaders=model.dataloader(train=False))\n\n result = trainer.fit(model, **fit_options)\n assert result == 1\n assert len(trainer.val_dataloaders) == 1, \\\n f'`val_dataloaders` not initiated properly, got {trainer.val_dataloaders}'\n\n\[email protected]('ckpt_path', [None, 'best', 'specific'])\ndef test_all_dataloaders_passed_to_fit(tmpdir, ckpt_path):\n \"\"\"Verify train, val & test dataloader(s) can be passed to fit and test method\"\"\"\n\n model = EvalModelTemplate()\n\n # train, val and test passed to fit\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_val_batches=0.1,\n limit_train_batches=0.2,\n )\n fit_options = dict(train_dataloader=model.dataloader(train=True),\n val_dataloaders=model.dataloader(train=False))\n result = trainer.fit(model, **fit_options)\n\n if ckpt_path == 'specific':\n ckpt_path = trainer.checkpoint_callback.best_model_path\n test_options = dict(test_dataloaders=model.dataloader(train=False),\n ckpt_path=ckpt_path)\n trainer.test(**test_options)\n\n assert result == 1\n assert len(trainer.val_dataloaders) == 1, \\\n f'val_dataloaders` not initiated properly, got {trainer.val_dataloaders}'\n assert len(trainer.test_dataloaders) == 1, \\\n f'test_dataloaders` not initiated properly, got {trainer.test_dataloaders}'\n\n\[email protected]('ckpt_path', [None, 'best', 'specific'])\ndef test_multiple_dataloaders_passed_to_fit(tmpdir, ckpt_path):\n \"\"\"Verify that multiple val & test dataloaders can be passed to fit.\"\"\"\n\n model = EvalModelTemplate()\n model.validation_step = model.validation_step__multiple_dataloaders\n model.validation_epoch_end = model.validation_epoch_end__multiple_dataloaders\n model.test_step = model.test_step__multiple_dataloaders\n\n # train, multiple val and multiple test passed to fit\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_val_batches=0.1,\n limit_train_batches=0.2,\n )\n fit_options = dict(train_dataloader=model.dataloader(train=True),\n val_dataloaders=[model.dataloader(train=False),\n model.dataloader(train=False)])\n trainer.fit(model, **fit_options)\n if ckpt_path == 'specific':\n ckpt_path = trainer.checkpoint_callback.best_model_path\n test_options = dict(test_dataloaders=[model.dataloader(train=False),\n model.dataloader(train=False)],\n ckpt_path=ckpt_path)\n trainer.test(**test_options)\n\n assert len(trainer.val_dataloaders) == 2, \\\n f'Multiple `val_dataloaders` not initiated properly, got {trainer.val_dataloaders}'\n assert len(trainer.test_dataloaders) == 2, \\\n f'Multiple `test_dataloaders` not initiated properly, got {trainer.test_dataloaders}'\n\n\[email protected](\n ['limit_train_batches', 'limit_val_batches', 'limit_test_batches'],\n [\n pytest.param(0.0, 0.0, 0.0),\n pytest.param(0, 0, 0.5),\n pytest.param(1.0, 1.0, 1.0),\n pytest.param(0.2, 0.4, 0.4),\n ]\n)\ndef test_dataloaders_with_limit_percent_batches(tmpdir, limit_train_batches, limit_val_batches, limit_test_batches):\n \"\"\"Verify num_batches for val & test dataloaders passed with batch limit in percent\"\"\"\n model = EvalModelTemplate()\n model.val_dataloader = model.val_dataloader__multiple_mixed_length\n model.test_dataloader = model.test_dataloader__multiple_mixed_length\n model.validation_step = model.validation_step__multiple_dataloaders\n model.validation_epoch_end = model.validation_epoch_end__multiple_dataloaders\n model.test_step = model.test_step__multiple_dataloaders\n model.test_epoch_end = model.test_epoch_end__multiple_dataloaders\n\n # train, multiple val and multiple test passed with percent_check\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_train_batches=limit_train_batches,\n limit_val_batches=limit_val_batches,\n limit_test_batches=limit_test_batches,\n )\n trainer.fit(model)\n expected_train_batches = int(len(trainer.train_dataloader) * limit_train_batches)\n expected_val_batches = [\n int(len(dataloader) * limit_val_batches) for dataloader in trainer.val_dataloaders\n ]\n assert trainer.num_training_batches == expected_train_batches\n assert trainer.num_val_batches == expected_val_batches\n\n trainer.test(ckpt_path=None)\n expected_test_batches = [\n int(len(dataloader) * limit_test_batches) for dataloader in trainer.test_dataloaders\n ]\n assert trainer.num_test_batches == expected_test_batches\n\n\[email protected](\n ['limit_train_batches', 'limit_val_batches', 'limit_test_batches'],\n [\n pytest.param(0, 0, 0),\n pytest.param(1, 2, 3),\n pytest.param(1, 2, 1e50),\n ]\n)\ndef test_dataloaders_with_limit_num_batches(tmpdir, limit_train_batches, limit_val_batches, limit_test_batches):\n \"\"\"Verify num_batches for val & test dataloaders passed with batch limit as number\"\"\"\n os.environ['PL_DEV_DEBUG'] = '1'\n\n model = EvalModelTemplate()\n model.val_dataloader = model.val_dataloader__multiple_mixed_length\n model.test_dataloader = model.test_dataloader__multiple_mixed_length\n model.validation_step = model.validation_step__multiple_dataloaders\n model.validation_epoch_end = model.validation_epoch_end__multiple_dataloaders\n model.test_step = model.test_step__multiple_dataloaders\n model.test_epoch_end = model.test_epoch_end__multiple_dataloaders\n\n # train, multiple val and multiple test passed with percent_check\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_train_batches=limit_train_batches,\n limit_val_batches=limit_val_batches,\n limit_test_batches=limit_test_batches,\n )\n trainer.fit(model)\n\n # -------------------------------------------\n # MAKE SURE THE TRAINER SET THE CORRECT VALUES\n # -------------------------------------------\n assert trainer.num_training_batches == limit_train_batches\n assert trainer.num_val_batches == [limit_val_batches] * len(trainer.val_dataloaders)\n trainer.test(ckpt_path=None)\n\n # when the limit is greater than the number of test batches it should be the num in loaders\n test_dataloader_lengths = [len(x) for x in model.test_dataloader()]\n if limit_test_batches > 1e10:\n assert trainer.num_test_batches == test_dataloader_lengths\n else:\n assert trainer.num_test_batches == [limit_test_batches] * len(trainer.test_dataloaders)\n\n # -------------------------------------------\n # make sure we actually saw the expected num of batches\n # -------------------------------------------\n num_val_dataloaders = len(model.val_dataloader())\n num_test_dataloaders = len(model.test_dataloader())\n if limit_train_batches > 0:\n\n # make sure val batches are as expected\n assert len(trainer.dev_debugger.num_seen_val_check_batches) == num_val_dataloaders\n for dataloader_idx, num_batches in trainer.dev_debugger.num_seen_val_check_batches.items():\n assert num_batches == limit_val_batches\n\n # make sure test batches are as expected\n assert len(trainer.dev_debugger.num_seen_test_check_batches) == num_test_dataloaders\n for dataloader_idx, num_batches in trainer.dev_debugger.num_seen_test_check_batches.items():\n if limit_test_batches > 1e10:\n assert num_batches == test_dataloader_lengths[dataloader_idx]\n else:\n assert num_batches == limit_test_batches\n\n\ndef test_dataloaders_with_fast_dev_run(tmpdir):\n \"\"\"Verify num_batches for train, val & test dataloaders passed with fast_dev_run = True\"\"\"\n os.environ['PL_DEV_DEBUG'] = '1'\n\n model = EvalModelTemplate()\n model.val_dataloader = model.val_dataloader__multiple_mixed_length\n model.test_dataloader = model.test_dataloader__multiple_mixed_length\n model.validation_step = model.validation_step__multiple_dataloaders\n model.validation_epoch_end = model.validation_epoch_end__multiple_dataloaders\n model.test_step = model.test_step__multiple_dataloaders\n model.test_epoch_end = model.test_epoch_end__multiple_dataloaders\n\n # train, multiple val and multiple test dataloaders passed with fast_dev_run = True\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=2,\n fast_dev_run=True,\n )\n assert trainer.max_epochs == 1\n assert trainer.num_sanity_val_steps == 0\n\n trainer.fit(model)\n assert not trainer.disable_validation\n assert trainer.num_training_batches == 1\n assert trainer.num_val_batches == [1] * len(trainer.val_dataloaders)\n\n trainer.test(ckpt_path=None)\n assert trainer.num_test_batches == [1] * len(trainer.test_dataloaders)\n\n # verify sanity check batches match as expected\n num_val_dataloaders = len(model.val_dataloader())\n assert trainer.dev_debugger.num_seen_sanity_check_batches == trainer.num_sanity_val_steps * num_val_dataloaders\n\n\[email protected]('ckpt_path', [None, 'best', 'specific'])\ndef test_mixing_of_dataloader_options(tmpdir, ckpt_path):\n \"\"\"Verify that dataloaders can be passed to fit\"\"\"\n\n model = EvalModelTemplate()\n\n trainer_options = dict(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_val_batches=0.1,\n limit_train_batches=0.2,\n )\n\n # fit model\n trainer = Trainer(**trainer_options)\n results = trainer.fit(model, val_dataloaders=model.dataloader(train=False))\n assert results\n\n # fit model\n trainer = Trainer(**trainer_options)\n results = trainer.fit(model, val_dataloaders=model.dataloader(train=False))\n assert results\n if ckpt_path == 'specific':\n ckpt_path = trainer.checkpoint_callback.best_model_path\n trainer.test(test_dataloaders=model.dataloader(train=False), ckpt_path=ckpt_path)\n\n assert len(trainer.val_dataloaders) == 1, \\\n f'`val_dataloaders` not initiated properly, got {trainer.val_dataloaders}'\n assert len(trainer.test_dataloaders) == 1, \\\n f'`test_dataloaders` not initiated properly, got {trainer.test_dataloaders}'\n\n\ndef test_train_inf_dataloader_error(tmpdir):\n \"\"\"Test inf train data loader (e.g. IterableDataset)\"\"\"\n model = EvalModelTemplate()\n model.train_dataloader = model.train_dataloader__infinite\n\n trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, val_check_interval=0.5)\n\n with pytest.raises(MisconfigurationException, match='infinite DataLoader'):\n trainer.fit(model)\n\n\ndef test_val_inf_dataloader_error(tmpdir):\n \"\"\"Test inf train data loader (e.g. IterableDataset)\"\"\"\n model = EvalModelTemplate()\n model.val_dataloader = model.val_dataloader__infinite\n\n trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, limit_val_batches=0.5)\n\n with pytest.raises(MisconfigurationException, match='infinite DataLoader'):\n trainer.fit(model)\n\n\ndef test_test_inf_dataloader_error(tmpdir):\n \"\"\"Test inf train data loader (e.g. IterableDataset)\"\"\"\n model = EvalModelTemplate()\n model.test_dataloader = model.test_dataloader__infinite\n\n trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, limit_test_batches=0.5)\n\n with pytest.raises(MisconfigurationException, match='infinite DataLoader'):\n trainer.test(model)\n\n\[email protected]('check_interval', [50, 1.0])\ndef test_inf_train_dataloader(tmpdir, check_interval):\n \"\"\"Test inf train data loader (e.g. IterableDataset)\"\"\"\n\n model = EvalModelTemplate()\n model.train_dataloader = model.train_dataloader__infinite\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n val_check_interval=check_interval,\n )\n result = trainer.fit(model)\n # verify training completed\n assert result == 1\n\n\[email protected]('check_interval', [1.0])\ndef test_inf_val_dataloader(tmpdir, check_interval):\n \"\"\"Test inf val data loader (e.g. IterableDataset)\"\"\"\n\n model = EvalModelTemplate()\n model.val_dataloader = model.val_dataloader__infinite\n\n # logger file to get meta\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n val_check_interval=check_interval,\n )\n result = trainer.fit(model)\n\n # verify training completed\n assert result == 1\n\n\ndef test_error_on_zero_len_dataloader(tmpdir):\n \"\"\" Test that error is raised if a zero-length dataloader is defined \"\"\"\n\n model = EvalModelTemplate()\n model.train_dataloader = model.train_dataloader__zero_length\n\n # fit model\n with pytest.raises(ValueError):\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_train_batches=0.1,\n limit_val_batches=0.1,\n limit_test_batches=0.1,\n )\n trainer.fit(model)\n\n\[email protected](platform.system() == 'Windows', reason='Does not apply to Windows platform.')\[email protected]('ckpt_path', [None, 'best', 'specific'])\n@patch('pytorch_lightning.trainer.data_loading.multiprocessing.cpu_count', return_value=4)\ndef test_warning_with_few_workers(mock, tmpdir, ckpt_path):\n \"\"\" Test that error is raised if dataloader with only a few workers is used \"\"\"\n\n model = EvalModelTemplate()\n\n # logger file to get meta\n train_dl = model.dataloader(train=True)\n train_dl.num_workers = 0\n\n val_dl = model.dataloader(train=False)\n val_dl.num_workers = 0\n\n train_dl = model.dataloader(train=False)\n train_dl.num_workers = 0\n\n fit_options = dict(train_dataloader=train_dl,\n val_dataloaders=val_dl)\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_val_batches=0.1,\n limit_train_batches=0.2,\n )\n\n # fit model\n with pytest.warns(\n UserWarning, match='The dataloader, train dataloader, does not have many workers which may be a bottleneck.'\n ):\n trainer.fit(model, **fit_options)\n\n with pytest.warns(\n UserWarning, match='The dataloader, val dataloader 0, does not have many workers which may be a bottleneck.'\n ):\n trainer.fit(model, **fit_options)\n\n if ckpt_path == 'specific':\n ckpt_path = trainer.checkpoint_callback.best_model_path\n test_options = dict(test_dataloaders=train_dl, ckpt_path=ckpt_path)\n with pytest.warns(\n UserWarning, match='The dataloader, test dataloader 0, does not have many workers which may be a bottleneck.'\n ):\n trainer.test(**test_options)\n\n\[email protected](\n parse(torch.__version__) < parse(\"1.4.0\"),\n reason=\"IterableDataset with __len__ before 1.4 raises\",\n)\ndef test_warning_with_iterable_dataset_and_len(tmpdir):\n \"\"\" Tests that a warning messages is shown when an IterableDataset defines `__len__`. \"\"\"\n model = EvalModelTemplate()\n original_dataset = model.train_dataloader().dataset\n\n class IterableWithLen(IterableDataset):\n\n def __iter__(self):\n return iter(original_dataset)\n\n def __len__(self):\n return len(original_dataset)\n\n dataloader = DataLoader(IterableWithLen(), batch_size=16)\n assert _has_len(dataloader)\n assert _has_iterable_dataset(dataloader)\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_steps=3,\n )\n with pytest.warns(UserWarning, match='Your `IterableDataset` has `__len__` defined.'):\n trainer.fit(model, train_dataloader=dataloader, val_dataloaders=[dataloader])\n with pytest.warns(UserWarning, match='Your `IterableDataset` has `__len__` defined.'):\n trainer.test(model, test_dataloaders=[dataloader])\n\n\[email protected](torch.cuda.device_count() < 2, reason='Test requires multiple GPUs')\ndef test_dataloader_reinit_for_subclass(tmpdir):\n\n class CustomDataLoader(torch.utils.data.DataLoader):\n def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None,\n batch_sampler=None, num_workers=0, collate_fn=None,\n pin_memory=False, drop_last=False, timeout=0,\n worker_init_fn=None, dummy_kwarg=None, **kwargs):\n super().__init__(dataset, batch_size, shuffle, sampler, batch_sampler,\n num_workers, collate_fn, pin_memory, drop_last, timeout,\n worker_init_fn)\n\n self.dummy_kwarg = dummy_kwarg\n\n trainer = Trainer(\n gpus=[0, 1],\n num_nodes=1,\n distributed_backend='ddp_spawn',\n default_root_dir=tmpdir,\n )\n\n class CustomDummyObj:\n sampler = None\n\n result = trainer.auto_add_sampler(CustomDummyObj(), train=True)\n assert isinstance(result, CustomDummyObj), \"Wrongly reinstantiated data loader\"\n\n dataset = list(range(1000))\n result = trainer.auto_add_sampler(CustomDataLoader(dataset), train=True)\n assert isinstance(result, torch.utils.data.DataLoader)\n assert isinstance(result, CustomDataLoader)\n assert hasattr(result, 'dummy_kwarg')\n\n # Shuffled DataLoader should also work\n result = trainer.auto_add_sampler(CustomDataLoader(list(range(1000)), shuffle=True), train=True)\n assert isinstance(result, torch.utils.data.DataLoader)\n assert isinstance(result, CustomDataLoader)\n assert hasattr(result, 'dummy_kwarg')\n\n class CustomSampler(torch.utils.data.Sampler):\n pass\n\n # Should raise an error if existing sampler is being replaced\n with pytest.raises(MisconfigurationException, match='DistributedSampler'):\n trainer.auto_add_sampler(\n CustomDataLoader(list(range(1000)), sampler=CustomSampler(list(range(1000)))), train=True)\n\n\nclass DistribSamplerCallback(Callback):\n\n def on_train_start(self, trainer, pl_module):\n train_sampler = trainer.train_dataloader.sampler\n assert isinstance(train_sampler, DistributedSampler)\n assert train_sampler.shuffle\n\n def on_validation_start(self, trainer, pl_module):\n val_sampler = trainer.val_dataloaders[0].sampler\n assert isinstance(val_sampler, DistributedSampler)\n assert not val_sampler.shuffle\n\n def on_test_start(self, trainer, pl_module):\n test_sampler = trainer.test_dataloaders[0].sampler\n assert isinstance(test_sampler, DistributedSampler)\n assert not test_sampler.shuffle\n\n\[email protected](platform.system() == 'Windows', reason='Does not apply to Windows platform.')\[email protected](torch.cuda.device_count() < 2, reason='Test requires multiple GPUs')\ndef test_dataloader_distributed_sampler(tmpdir):\n \"\"\" Test DistributedSampler and it's arguments for DDP backend \"\"\"\n\n model = EvalModelTemplate()\n trainer = Trainer(\n gpus=[0, 1],\n num_nodes=1,\n distributed_backend='ddp_spawn',\n default_root_dir=tmpdir,\n max_steps=1,\n callbacks=[DistribSamplerCallback()]\n )\n trainer.fit(model)\n trainer.test(ckpt_path=None)\n\n\[email protected](torch.cuda.device_count() < 3, reason='Test requires multiple GPUs')\ndef test_batch_size_smaller_than_num_gpus(tmpdir):\n # we need at least 3 gpus for this test\n num_gpus = 3\n batch_size = 3\n\n class CurrentTestModel(EvalModelTemplate):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # batch norm doesn't work with batch size 1, we replace it\n self.c_d1_bn = torch.nn.ReLU()\n\n def training_step(self, *args, **kwargs):\n output = super().training_step(*args, **kwargs)\n loss = output['loss']\n # we make sure to add some metrics to the output dict,\n # this is essential for this test\n output['progress_bar'] = {'train_loss': loss}\n return output\n\n def train_dataloader(self):\n dataloader = super().train_dataloader()\n # construct a dataset with a size that is not divisible by num_gpus\n # therefore the last batch will have a size < num_gpus\n size = num_gpus * batch_size + (num_gpus - 1)\n dataset = Subset(dataloader.dataset, range(size))\n dataloader = DataLoader(\n dataset,\n batch_size=self.batch_size,\n drop_last=False,\n )\n return dataloader\n\n hparams = EvalModelTemplate.get_default_hparams()\n hparams['batch_size'] = batch_size\n model = CurrentTestModel(**hparams)\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_train_batches=0.1,\n limit_val_batches=0,\n gpus=num_gpus,\n )\n\n # we expect the reduction for the metrics also to happen on the last batch\n # where we will get fewer metrics than gpus\n result = trainer.fit(model)\n assert 1 == result\n\n\[email protected]('check_interval', [1.0])\ndef test_val_dataloader_not_implemented_error(tmpdir, check_interval):\n \"\"\"Test not_implemented_error data loader (e.g. IterableDataset)\"\"\"\n\n model = EvalModelTemplate()\n model.val_dataloader = model.val_dataloader__not_implemented_error\n\n # logger file to get meta\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_steps=5,\n max_epochs=1,\n val_check_interval=check_interval,\n )\n result = trainer.fit(model)\n # verify training completed\n assert result == 1\n\n\[email protected]('check_interval', [50, 1.0])\ndef test_train_dataloader_not_implemented_error(tmpdir, check_interval):\n \"\"\"Test not_implemented_error train data loader (e.g. IterableDataset)\"\"\"\n\n model = EvalModelTemplate()\n model.train_dataloader = model.train_dataloader__not_implemented_error\n model.val_dataloader = model.val_dataloader__not_implemented_error\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_steps=5,\n max_epochs=1,\n val_check_interval=check_interval\n )\n result = trainer.fit(model)\n # verify training completed\n assert result == 1\n\n\ndef test_train_dataloader_not_implemented_error_failed(tmpdir):\n \"\"\"Test not_implemented_error train data loader (e.g. IterableDataset)\"\"\"\n model = EvalModelTemplate()\n model.train_dataloader = model.train_dataloader__not_implemented_error\n\n trainer = Trainer(default_root_dir=tmpdir, max_steps=5, max_epochs=1, val_check_interval=0.5)\n\n with pytest.raises(MisconfigurationException, match='infinite DataLoader'):\n trainer.fit(model)\n\n\ndef test_val_dataloader_not_implemented_error_failed(tmpdir):\n \"\"\"Test not_implemented_error train data loader (e.g. IterableDataset)\"\"\"\n model = EvalModelTemplate()\n model.val_dataloader = model.val_dataloader__not_implemented_error\n\n trainer = Trainer(default_root_dir=tmpdir, max_steps=5, max_epochs=1, limit_val_batches=0.5)\n\n with pytest.raises(MisconfigurationException, match='infinite DataLoader'):\n trainer.fit(model)\n\n\ndef test_test_dataloader_not_implemented_error_failed(tmpdir):\n \"\"\"Test not_implemented_error train data loader (e.g. IterableDataset)\"\"\"\n model = EvalModelTemplate()\n model.test_dataloader = model.test_dataloader__not_implemented_error\n\n trainer = Trainer(default_root_dir=tmpdir, max_steps=5, max_epochs=1, limit_test_batches=0.5)\n\n with pytest.raises(MisconfigurationException, match='infinite DataLoader'):\n trainer.test(model)\n" ]
[ [ "torch.cuda.device_count", "torch.utils.data.dataloader.DataLoader", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gilbertslade/hmmlearn
[ "e525a8819acc05f02ea486dfdb944b8cd525835c" ]
[ "hmmlearn/hmm.py" ]
[ "# Hidden Markov Models\n#\n# Author: Ron Weiss <[email protected]>\n# Shiqiao Du <[email protected]>\n# API changes: Jaques Grobler <[email protected]>\n# Modifications to create of the HMMLearn module: Gael Varoquaux\n# More API changes: Sergei Lebedev <[email protected]>\n\n\"\"\"\nThe :mod:`hmmlearn.hmm` module implements hidden Markov models.\n\"\"\"\n\nimport numpy as np\nfrom scipy.misc import logsumexp\nfrom sklearn import cluster\nfrom sklearn.mixture import _validate_covars\nfrom sklearn.utils import check_random_state\n\nfrom .stats import log_multivariate_normal_density\nfrom .base import _BaseHMM\nfrom .utils import iter_from_X_lengths, normalize, fill_covars, distribute_covar_matrix_to_match_covariance_type\n\n__all__ = [\"GMMHMM\", \"GaussianHMM\", \"MultinomialHMM\"]\n\nCOVARIANCE_TYPES = frozenset((\"spherical\", \"diag\", \"full\", \"tied\"))\n\n\nclass GaussianHMM(_BaseHMM):\n \"\"\"Hidden Markov Model with Gaussian emissions.\n\n Parameters\n ----------\n n_components : int\n Number of states.\n\n covariance_type : string, optional\n String describing the type of covariance parameters to\n use. Must be one of\n\n * \"spherical\" --- each state uses a single variance value that\n applies to all features.\n * \"diag\" --- each state uses a diagonal covariance matrix.\n * \"full\" --- each state uses a full (i.e. unrestricted)\n covariance matrix.\n * \"tied\" --- all states use **the same** full covariance matrix.\n\n Defaults to \"diag\".\n\n min_covar : float, optional\n Floor on the diagonal of the covariance matrix to prevent\n overfitting. Defaults to 1e-3.\n\n startprob_prior : array, shape (n_components, ), optional\n Parameters of the Dirichlet prior distribution for\n :attr:`startprob_`.\n\n transmat_prior : array, shape (n_components, n_components), optional\n Parameters of the Dirichlet prior distribution for each row\n of the transition probabilities :attr:`transmat_`.\n\n means_prior, means_weight : array, shape (n_components, ), optional\n Mean and precision of the Normal prior distribtion for\n :attr:`means_`.\n\n covars_prior, covars_weight : array, shape (n_components, ), optional\n Parameters of the prior distribution for the covariance matrix\n :attr:`covars_`.\n\n If :attr:`covariance_type` is \"spherical\" or \"diag\" the prior is\n the inverse gamma distribution, otherwise --- the inverse Wishart\n distribution.\n\n algorithm : string, optional\n Decoder algorithm. Must be one of \"viterbi\" or`\"map\".\n Defaults to \"viterbi\".\n\n random_state: RandomState or an int seed, optional\n A random number generator instance.\n\n n_iter : int, optional\n Maximum number of iterations to perform.\n\n tol : float, optional\n Convergence threshold. EM will stop if the gain in log-likelihood\n is below this value.\n\n verbose : bool, optional\n When ``True`` per-iteration convergence reports are printed\n to :data:`sys.stderr`. You can diagnose convergence via the\n :attr:`monitor_` attribute.\n\n params : string, optional\n Controls which parameters are updated in the training\n process. Can contain any combination of 's' for startprob,\n 't' for transmat, 'm' for means and 'c' for covars. Defaults\n to all parameters.\n\n init_params : string, optional\n Controls which parameters are initialized prior to\n training. Can contain any combination of 's' for\n startprob, 't' for transmat, 'm' for means and 'c' for covars.\n Defaults to all parameters.\n\n Attributes\n ----------\n n_features : int\n Dimensionality of the Gaussian emissions.\n\n monitor\\_ : ConvergenceMonitor\n Monitor object used to check the convergence of EM.\n\n transmat\\_ : array, shape (n_components, n_components)\n Matrix of transition probabilities between states.\n\n startprob\\_ : array, shape (n_components, )\n Initial state occupation distribution.\n\n means\\_ : array, shape (n_components, n_features)\n Mean parameters for each state.\n\n covars\\_ : array\n Covariance parameters for each state.\n\n The shape depends on :attr:`covariance_type`::\n\n (n_components, ) if \"spherical\",\n (n_features, n_features) if \"tied\",\n (n_components, n_features) if \"diag\",\n (n_components, n_features, n_features) if \"full\"\n\n Examples\n --------\n >>> from hmmlearn.hmm import GaussianHMM\n >>> GaussianHMM(n_components=2)\n ... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE\n GaussianHMM(algorithm='viterbi',...\n \"\"\"\n def __init__(self, n_components=1, covariance_type='diag',\n min_covar=1e-3,\n startprob_prior=1.0, transmat_prior=1.0,\n means_prior=0, means_weight=0,\n covars_prior=1e-2, covars_weight=1,\n algorithm=\"viterbi\", random_state=None,\n n_iter=10, tol=1e-2, verbose=False,\n params=\"stmc\", init_params=\"stmc\"):\n _BaseHMM.__init__(self, n_components,\n startprob_prior=startprob_prior,\n transmat_prior=transmat_prior, algorithm=algorithm,\n random_state=random_state, n_iter=n_iter,\n tol=tol, params=params, verbose=verbose,\n init_params=init_params)\n\n self.covariance_type = covariance_type\n self.min_covar = min_covar\n self.means_prior = means_prior\n self.means_weight = means_weight\n self.covars_prior = covars_prior\n self.covars_weight = covars_weight\n\n @property\n def covars_(self):\n \"\"\"Return covars as a full matrix.\"\"\"\n return fill_covars(self._covars_, self.covariance_type,\n self.n_components, self.n_features)\n\n @covars_.setter\n def covars_(self, covars):\n self._covars_ = np.asarray(covars).copy()\n\n def _check(self):\n super(GaussianHMM, self)._check()\n\n self.means_ = np.asarray(self.means_)\n self.n_features = self.means_.shape[1]\n\n if self.covariance_type not in COVARIANCE_TYPES:\n raise ValueError('covariance_type must be one of {0}'\n .format(COVARIANCE_TYPES))\n\n _validate_covars(self._covars_, self.covariance_type,\n self.n_components)\n\n def _init(self, X, lengths=None):\n super(GaussianHMM, self)._init(X, lengths=lengths)\n\n _, n_features = X.shape\n if hasattr(self, 'n_features') and self.n_features != n_features:\n raise ValueError('Unexpected number of dimensions, got %s but '\n 'expected %s' % (n_features, self.n_features))\n\n self.n_features = n_features\n if 'm' in self.init_params or not hasattr(self, \"means_\"):\n kmeans = cluster.KMeans(n_clusters=self.n_components,\n random_state=self.random_state)\n kmeans.fit(X)\n self.means_ = kmeans.cluster_centers_\n if 'c' in self.init_params or not hasattr(self, \"covars_\"):\n cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])\n if not cv.shape:\n cv.shape = (1, 1)\n self._covars_ = distribute_covar_matrix_to_match_covariance_type(\n cv, self.covariance_type, self.n_components).copy()\n\n def _compute_log_likelihood(self, X):\n return log_multivariate_normal_density(\n X, self.means_, self._covars_, self.covariance_type)\n\n def _generate_sample_from_state(self, state, random_state=None):\n return random_state.multivariate_normal(\n self.means_[state], self.covars_[state]\n )\n\n def _initialize_sufficient_statistics(self):\n stats = super(GaussianHMM, self)._initialize_sufficient_statistics()\n stats['post'] = np.zeros(self.n_components)\n stats['obs'] = np.zeros((self.n_components, self.n_features))\n stats['obs**2'] = np.zeros((self.n_components, self.n_features))\n if self.covariance_type in ('tied', 'full'):\n stats['obs*obs.T'] = np.zeros((self.n_components, self.n_features,\n self.n_features))\n return stats\n\n def _accumulate_sufficient_statistics(self, stats, obs, framelogprob,\n posteriors, fwdlattice, bwdlattice):\n super(GaussianHMM, self)._accumulate_sufficient_statistics(\n stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice)\n\n if 'm' in self.params or 'c' in self.params:\n stats['post'] += posteriors.sum(axis=0)\n stats['obs'] += np.dot(posteriors.T, obs)\n\n if 'c' in self.params:\n if self.covariance_type in ('spherical', 'diag'):\n stats['obs**2'] += np.dot(posteriors.T, obs ** 2)\n elif self.covariance_type in ('tied', 'full'):\n # posteriors: (nt, nc); obs: (nt, nf); obs: (nt, nf)\n # -> (nc, nf, nf)\n stats['obs*obs.T'] += np.einsum(\n 'ij,ik,il->jkl', posteriors, obs, obs)\n\n def _do_mstep(self, stats):\n super(GaussianHMM, self)._do_mstep(stats)\n\n means_prior = self.means_prior\n means_weight = self.means_weight\n\n # TODO: find a proper reference for estimates for different\n # covariance models.\n # Based on Huang, Acero, Hon, \"Spoken Language Processing\",\n # p. 443 - 445\n denom = stats['post'][:, np.newaxis]\n if 'm' in self.params:\n self.means_ = ((means_weight * means_prior + stats['obs'])\n / (means_weight + denom))\n\n if 'c' in self.params:\n covars_prior = self.covars_prior\n covars_weight = self.covars_weight\n meandiff = self.means_ - means_prior\n\n if self.covariance_type in ('spherical', 'diag'):\n cv_num = (means_weight * meandiff**2\n + stats['obs**2']\n - 2 * self.means_ * stats['obs']\n + self.means_**2 * denom)\n cv_den = max(covars_weight - 1, 0) + denom\n self._covars_ = \\\n (covars_prior + cv_num) / np.maximum(cv_den, 1e-5)\n if self.covariance_type == 'spherical':\n self._covars_ = np.tile(\n self._covars_.mean(1)[:, np.newaxis],\n (1, self._covars_.shape[1]))\n elif self.covariance_type in ('tied', 'full'):\n cv_num = np.empty((self.n_components, self.n_features,\n self.n_features))\n for c in range(self.n_components):\n obsmean = np.outer(stats['obs'][c], self.means_[c])\n\n cv_num[c] = (means_weight * np.outer(meandiff[c],\n meandiff[c])\n + stats['obs*obs.T'][c]\n - obsmean - obsmean.T\n + np.outer(self.means_[c], self.means_[c])\n * stats['post'][c])\n cvweight = max(covars_weight - self.n_features, 0)\n if self.covariance_type == 'tied':\n self._covars_ = ((covars_prior + cv_num.sum(axis=0)) /\n (cvweight + stats['post'].sum()))\n elif self.covariance_type == 'full':\n self._covars_ = ((covars_prior + cv_num) /\n (cvweight + stats['post'][:, None, None]))\n\n\nclass MultinomialHMM(_BaseHMM):\n \"\"\"Hidden Markov Model with multinomial (discrete) emissions\n\n Parameters\n ----------\n\n n_components : int\n Number of states.\n\n startprob_prior : array, shape (n_components, ), optional\n Parameters of the Dirichlet prior distribution for\n :attr:`startprob_`.\n\n transmat_prior : array, shape (n_components, n_components), optional\n Parameters of the Dirichlet prior distribution for each row\n of the transition probabilities :attr:`transmat_`.\n\n algorithm : string, optional\n Decoder algorithm. Must be one of \"viterbi\" or \"map\".\n Defaults to \"viterbi\".\n\n random_state: RandomState or an int seed, optional\n A random number generator instance.\n\n n_iter : int, optional\n Maximum number of iterations to perform.\n\n tol : float, optional\n Convergence threshold. EM will stop if the gain in log-likelihood\n is below this value.\n\n verbose : bool, optional\n When ``True`` per-iteration convergence reports are printed\n to :data:`sys.stderr`. You can diagnose convergence via the\n :attr:`monitor_` attribute.\n\n params : string, optional\n Controls which parameters are updated in the training\n process. Can contain any combination of 's' for startprob,\n 't' for transmat, 'e' for emissionprob.\n Defaults to all parameters.\n\n init_params : string, optional\n Controls which parameters are initialized prior to\n training. Can contain any combination of 's' for\n startprob, 't' for transmat, 'e' for emissionprob.\n Defaults to all parameters.\n\n Attributes\n ----------\n n_features : int\n Number of possible symbols emitted by the model (in the samples).\n\n monitor\\_ : ConvergenceMonitor\n Monitor object used to check the convergence of EM.\n\n transmat\\_ : array, shape (n_components, n_components)\n Matrix of transition probabilities between states.\n\n startprob\\_ : array, shape (n_components, )\n Initial state occupation distribution.\n\n emissionprob\\_ : array, shape (n_components, n_features)\n Probability of emitting a given symbol when in each state.\n\n Examples\n --------\n >>> from hmmlearn.hmm import MultinomialHMM\n >>> MultinomialHMM(n_components=2)\n ... #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE\n MultinomialHMM(algorithm='viterbi',...\n \"\"\"\n # TODO: accept the prior on emissionprob_ for consistency.\n def __init__(self, n_components=1,\n startprob_prior=1.0, transmat_prior=1.0,\n algorithm=\"viterbi\", random_state=None,\n n_iter=10, tol=1e-2, verbose=False,\n params=\"ste\", init_params=\"ste\"):\n _BaseHMM.__init__(self, n_components,\n startprob_prior=startprob_prior,\n transmat_prior=transmat_prior,\n algorithm=algorithm,\n random_state=random_state,\n n_iter=n_iter, tol=tol, verbose=verbose,\n params=params, init_params=init_params)\n\n def _init(self, X, lengths=None):\n if not self._check_input_symbols(X):\n raise ValueError(\"expected a sample from \"\n \"a Multinomial distribution.\")\n\n super(MultinomialHMM, self)._init(X, lengths=lengths)\n self.random_state = check_random_state(self.random_state)\n\n if 'e' in self.init_params:\n if not hasattr(self, \"n_features\"):\n symbols = set()\n for i, j in iter_from_X_lengths(X, lengths):\n symbols |= set(X[i:j].flatten())\n self.n_features = len(symbols)\n self.emissionprob_ = self.random_state \\\n .rand(self.n_components, self.n_features)\n normalize(self.emissionprob_, axis=1)\n\n def _check(self):\n super(MultinomialHMM, self)._check()\n\n self.emissionprob_ = np.atleast_2d(self.emissionprob_)\n n_features = getattr(self, \"n_features\", self.emissionprob_.shape[1])\n if self.emissionprob_.shape != (self.n_components, n_features):\n raise ValueError(\n \"emissionprob_ must have shape (n_components, n_features)\")\n else:\n self.n_features = n_features\n\n def _compute_log_likelihood(self, X):\n return np.log(self.emissionprob_)[:, np.concatenate(X)].T\n\n def _generate_sample_from_state(self, state, random_state=None):\n cdf = np.cumsum(self.emissionprob_[state, :])\n random_state = check_random_state(random_state)\n return [(cdf > random_state.rand()).argmax()]\n\n def _initialize_sufficient_statistics(self):\n stats = super(MultinomialHMM, self)._initialize_sufficient_statistics()\n stats['obs'] = np.zeros((self.n_components, self.n_features))\n return stats\n\n def _accumulate_sufficient_statistics(self, stats, X, framelogprob,\n posteriors, fwdlattice, bwdlattice):\n super(MultinomialHMM, self)._accumulate_sufficient_statistics(\n stats, X, framelogprob, posteriors, fwdlattice, bwdlattice)\n if 'e' in self.params:\n for t, symbol in enumerate(np.concatenate(X)):\n stats['obs'][:, symbol] += posteriors[t]\n\n def _do_mstep(self, stats):\n super(MultinomialHMM, self)._do_mstep(stats)\n if 'e' in self.params:\n self.emissionprob_ = (stats['obs']\n / stats['obs'].sum(axis=1)[:, np.newaxis])\n\n def _check_input_symbols(self, X):\n \"\"\"Check if ``X`` is a sample from a Multinomial distribution.\n\n That is ``X`` should be an array of non-negative integers from\n range ``[min(X), max(X)]``, such that each integer from the range\n occurs in ``X`` at least once.\n\n For example ``[0, 0, 2, 1, 3, 1, 1]`` is a valid sample from a\n Multinomial distribution, while ``[0, 0, 3, 5, 10]`` is not.\n \"\"\"\n symbols = np.concatenate(X)\n if (len(symbols) == 1 or # not enough data\n symbols.dtype.kind != 'i' or # not an integer\n (symbols < 0).any()): # contains negative integers\n return False\n\n symbols.sort()\n return np.all(np.diff(symbols) <= 1)\n\n\nclass GMMHMM(_BaseHMM):\n \"\"\"Hidden Markov Model with Gaussian mixture emissions.\n\n Parameters\n ----------\n n_components : int\n Number of states in the model.\n\n n_mix : int\n Number of states in the GMM.\n\n covariance_type : string, optional\n String describing the type of covariance parameters to\n use. Must be one of\n\n * \"spherical\" --- each state uses a single variance value that\n applies to all features.\n * \"diag\" --- each state uses a diagonal covariance matrix.\n * \"full\" --- each state uses a full (i.e. unrestricted)\n covariance matrix.\n * \"tied\" --- all states use **the same** full covariance matrix.\n\n Defaults to \"diag\".\n\n min_covar : float, optional\n Floor on the diagonal of the covariance matrix to prevent\n overfitting. Defaults to 1e-3.\n\n startprob_prior : array, shape (n_components, ), optional\n Parameters of the Dirichlet prior distribution for\n :attr:`startprob_`.\n\n transmat_prior : array, shape (n_components, n_components), optional\n Parameters of the Dirichlet prior distribution for each row\n of the transition probabilities :attr:`transmat_`.\n\n weights_prior : array, shape (n_mix, ), optional\n Parameters of the Dirichlet prior distribution for\n :attr:`weights_`.\n\n means_prior, means_weight : array, shape (n_mix, ), optional\n Mean and precision of the Normal prior distribtion for\n :attr:`means_`.\n\n covars_prior, covars_weight : array, shape (n_mix, ), optional\n Parameters of the prior distribution for the covariance matrix\n :attr:`covars_`.\n\n If :attr:`covariance_type` is \"spherical\" or \"diag\" the prior is\n the inverse gamma distribution, otherwise --- the inverse Wishart\n distribution.\n\n algorithm : string, optional\n Decoder algorithm. Must be one of \"viterbi\" or \"map\".\n Defaults to \"viterbi\".\n\n random_state: RandomState or an int seed, optional\n A random number generator instance.\n\n n_iter : int, optional\n Maximum number of iterations to perform.\n\n tol : float, optional\n Convergence threshold. EM will stop if the gain in log-likelihood\n is below this value.\n\n verbose : bool, optional\n When ``True`` per-iteration convergence reports are printed\n to :data:`sys.stderr`. You can diagnose convergence via the\n :attr:`monitor_` attribute.\n\n init_params : string, optional\n Controls which parameters are initialized prior to training. Can\n contain any combination of 's' for startprob, 't' for transmat, 'm'\n for means, 'c' for covars, and 'w' for GMM mixing weights.\n Defaults to all parameters.\n\n params : string, optional\n Controls which parameters are updated in the training process. Can\n contain any combination of 's' for startprob, 't' for transmat, 'm' for\n means, and 'c' for covars, and 'w' for GMM mixing weights.\n Defaults to all parameters.\n\n Attributes\n ----------\n monitor\\_ : ConvergenceMonitor\n Monitor object used to check the convergence of EM.\n\n startprob\\_ : array, shape (n_components, )\n Initial state occupation distribution.\n\n transmat\\_ : array, shape (n_components, n_components)\n Matrix of transition probabilities between states.\n\n weights\\_ : array, shape (n_components, n_mix)\n Mixture weights for each state.\n\n means\\_ : array, shape (n_components, n_mix)\n Mean parameters for each mixture component in each state.\n\n covars\\_ : array\n Covariance parameters for each mixture components in each state.\n\n The shape depends on :attr:`covariance_type`::\n\n (n_components, n_mix) if \"spherical\",\n (n_components, n_features, n_features) if \"tied\",\n (n_components, n_mix, n_features) if \"diag\",\n (n_components, n_mix, n_features, n_features) if \"full\"\n \"\"\"\n\n def __init__(self, n_components=1, n_mix=1,\n min_covar=1e-3, startprob_prior=1.0, transmat_prior=1.0,\n weights_prior=1.0, means_prior=0.0, means_weight=0.0,\n covars_prior=None, covars_weight=None,\n algorithm=\"viterbi\", covariance_type=\"diag\",\n random_state=None, n_iter=10, tol=1e-2,\n verbose=False, params=\"stmcw\",\n init_params=\"stmcw\"):\n _BaseHMM.__init__(self, n_components,\n startprob_prior=startprob_prior,\n transmat_prior=transmat_prior,\n algorithm=algorithm, random_state=random_state,\n n_iter=n_iter, tol=tol, verbose=verbose,\n params=params, init_params=init_params)\n self.covariance_type = covariance_type\n self.min_covar = min_covar\n self.n_mix = n_mix\n self.weights_prior = weights_prior\n self.means_prior = means_prior\n self.means_weight = means_weight\n self.covars_prior = covars_prior\n self.covars_weight = covars_weight\n\n def _init(self, X, lengths=None):\n super(GMMHMM, self)._init(X, lengths=lengths)\n\n _n_samples, self.n_features = X.shape\n\n # Default values for covariance prior parameters\n self._init_covar_priors()\n self._fix_priors_shape()\n\n main_kmeans = cluster.KMeans(n_clusters=self.n_components,\n random_state=self.random_state)\n labels = main_kmeans.fit_predict(X)\n kmeanses = []\n for label in range(self.n_components):\n kmeans = cluster.KMeans(n_clusters=self.n_mix,\n random_state=self.random_state)\n kmeans.fit(X[np.where(labels == label)])\n kmeanses.append(kmeans)\n\n if 'w' in self.init_params or not hasattr(self, \"weights_\"):\n self.weights_ = (np.ones((self.n_components, self.n_mix)) /\n (np.ones((self.n_components, 1)) * self.n_mix))\n\n if 'm' in self.init_params or not hasattr(self, \"means_\"):\n self.means_ = np.zeros((self.n_components, self.n_mix,\n self.n_features))\n for i, kmeans in enumerate(kmeanses):\n self.means_[i] = kmeans.cluster_centers_\n\n if 'c' in self.init_params or not hasattr(self, \"covars_\"):\n cv = np.cov(X.T) + self.min_covar * np.eye(self.n_features)\n if not cv.shape:\n cv.shape = (1, 1)\n\n if self.covariance_type == 'tied':\n self.covars_ = np.zeros((self.n_components,\n self.n_features, self.n_features))\n self.covars_[:] = cv\n elif self.covariance_type == 'full':\n self.covars_ = np.zeros((self.n_components, self.n_mix,\n self.n_features, self.n_features))\n self.covars_[:] = cv\n elif self.covariance_type == 'diag':\n self.covars_ = np.zeros((self.n_components, self.n_mix,\n self.n_features))\n self.covars_[:] = np.diag(cv)\n elif self.covariance_type == 'spherical':\n self.covars_ = np.zeros((self.n_components, self.n_mix))\n self.covars_[:] = cv.mean()\n\n def _init_covar_priors(self):\n if self.covariance_type == \"full\":\n if self.covars_prior is None:\n self.covars_prior = 0.0\n if self.covars_weight is None:\n self.covars_weight = -(1.0 + self.n_features + 1.0)\n elif self.covariance_type == \"tied\":\n if self.covars_prior is None:\n self.covars_prior = 0.0\n if self.covars_weight is None:\n self.covars_weight = -(self.n_mix + self.n_features + 1.0)\n elif self.covariance_type == \"diag\":\n if self.covars_prior is None:\n self.covars_prior = -1.5\n if self.covars_weight is None:\n self.covars_weight = 0.0\n elif self.covariance_type == \"spherical\":\n if self.covars_prior is None:\n self.covars_prior = -(self.n_mix + 2.0) / 2.0\n if self.covars_weight is None:\n self.covars_weight = 0.0\n\n def _fix_priors_shape(self):\n # If priors are numbers, this function will make them into a\n # matrix of proper shape\n self.weights_prior = np.broadcast_to(\n self.weights_prior, (self.n_components, self.n_mix)).copy()\n self.means_prior = np.broadcast_to(\n self.means_prior,\n (self.n_components, self.n_mix, self.n_features)).copy()\n self.means_weight = np.broadcast_to(\n self.means_weight,\n (self.n_components, self.n_mix)).copy()\n\n if self.covariance_type == \"full\":\n self.covars_prior = np.broadcast_to(\n self.covars_prior,\n (self.n_components, self.n_mix,\n self.n_features, self.n_features)).copy()\n self.covars_weight = np.broadcast_to(\n self.covars_weight, (self.n_components, self.n_mix)).copy()\n elif self.covariance_type == \"tied\":\n self.covars_prior = np.broadcast_to(\n self.covars_prior,\n (self.n_components, self.n_features, self.n_features)).copy()\n self.covars_weight = np.broadcast_to(\n self.covars_weight, self.n_components).copy()\n elif self.covariance_type == \"diag\":\n self.covars_prior = np.broadcast_to(\n self.covars_prior,\n (self.n_components, self.n_mix, self.n_features)).copy()\n self.covars_weight = np.broadcast_to(\n self.covars_weight,\n (self.n_components, self.n_mix, self.n_features)).copy()\n elif self.covariance_type == \"spherical\":\n self.covars_prior = np.broadcast_to(\n self.covars_prior, (self.n_components, self.n_mix)).copy()\n self.covars_weight = np.broadcast_to(\n self.covars_weight, (self.n_components, self.n_mix)).copy()\n\n def _check(self):\n super(GMMHMM, self)._check()\n\n if not hasattr(self, \"n_features\"):\n self.n_features = self.means_.shape[2]\n\n self._init_covar_priors()\n self._fix_priors_shape()\n\n # Checking covariance type\n if self.covariance_type not in COVARIANCE_TYPES:\n raise ValueError(\"covariance_type must be one of {0}\"\n .format(COVARIANCE_TYPES))\n\n self.weights_ = np.array(self.weights_)\n # Checking mixture weights' shape\n if self.weights_.shape != (self.n_components, self.n_mix):\n raise ValueError(\"mixture weights must have shape \"\n \"(n_components, n_mix), \"\n \"actual shape: {0}\".format(self.weights_.shape))\n\n # Checking mixture weights' mathematical correctness\n if not np.allclose(np.sum(self.weights_, axis=1),\n np.ones(self.n_components)):\n raise ValueError(\"mixture weights must sum up to 1\")\n\n # Checking means' shape\n self.means_ = np.array(self.means_)\n if self.means_.shape != (self.n_components, self.n_mix,\n self.n_features):\n raise ValueError(\"mixture means must have shape \"\n \"(n_components, n_mix, n_features), \"\n \"actual shape: {0}\".format(self.means_.shape))\n\n # Checking covariances' shape\n self.covars_ = np.array(self.covars_)\n covars_shape = self.covars_.shape\n needed_shapes = {\n \"spherical\": (self.n_components, self.n_mix),\n \"tied\": (self.n_components, self.n_features, self.n_features),\n \"diag\": (self.n_components, self.n_mix, self.n_features),\n \"full\": (self.n_components, self.n_mix,\n self.n_features, self.n_features)\n }\n needed_shape = needed_shapes[self.covariance_type]\n if covars_shape != needed_shape:\n raise ValueError(\"{!r} mixture covars must have shape {0}, \"\n \"actual shape: {1}\"\n .format(self.covariance_type,\n needed_shape, covars_shape))\n\n # Checking covariances' mathematical correctness\n from scipy import linalg\n\n if (self.covariance_type == \"spherical\" or\n self.covariance_type == \"diag\"):\n if np.any(self.covars_ <= 0):\n raise ValueError(\"{!r} mixture covars must be non-negative\"\n .format(self.covariance_type))\n elif self.covariance_type == \"tied\":\n for i, covar in enumerate(self.covars_):\n if (not np.allclose(covar, covar.T) or\n np.any(linalg.eigvalsh(covar) <= 0)):\n raise ValueError(\"'tied' mixture covars must be \"\n \"symmetric, positive-definite\")\n elif self.covariance_type == \"full\":\n for i, mix_covars in enumerate(self.covars_):\n for j, covar in enumerate(mix_covars):\n if (not np.allclose(covar, covar.T) or\n np.any(linalg.eigvalsh(covar) <= 0)):\n raise ValueError(\"'full' covariance matrix of \"\n \"mixture {0} of component {1} must be \"\n \"symmetric, positive-definite\"\n .format(j, i))\n\n def _generate_sample_from_state(self, state, random_state=None):\n if random_state is None:\n random_state = self.random_state\n random_state = check_random_state(random_state)\n\n cur_weights = self.weights_[state]\n i_gauss = random_state.choice(self.n_mix, p=cur_weights)\n if self.covariance_type == 'tied':\n # self.covars_.shape == (n_components, n_features, n_features)\n # shouldn't that be (n_mix, ...)?\n covs = self.covars_\n else:\n covs = self.covars_[:, i_gauss]\n covs = fill_covars(covs, self.covariance_type,\n self.n_components, self.n_features)\n return random_state.multivariate_normal(\n self.means_[state, i_gauss], covs[state]\n )\n\n def _compute_log_weighted_gaussian_densities(self, X, i_comp):\n cur_means = self.means_[i_comp]\n cur_covs = self.covars_[i_comp]\n if self.covariance_type == 'spherical':\n cur_covs = cur_covs[:, np.newaxis]\n log_cur_weights = np.log(self.weights_[i_comp])\n\n return log_multivariate_normal_density(\n X, cur_means, cur_covs, self.covariance_type\n ) + log_cur_weights\n\n def _compute_log_likelihood(self, X):\n n_samples, _ = X.shape\n res = np.zeros((n_samples, self.n_components))\n\n for i in range(self.n_components):\n log_denses = self._compute_log_weighted_gaussian_densities(X, i)\n res[:, i] = logsumexp(log_denses, axis=1)\n\n return res\n\n def _initialize_sufficient_statistics(self):\n stats = super(GMMHMM, self)._initialize_sufficient_statistics()\n stats['n_samples'] = 0\n stats['post_comp_mix'] = None\n stats['post_mix_sum'] = np.zeros((self.n_components, self.n_mix))\n stats['post_sum'] = np.zeros(self.n_components)\n stats['samples'] = None\n stats['centered'] = None\n return stats\n\n def _accumulate_sufficient_statistics(self, stats, X, framelogprob,\n post_comp, fwdlattice, bwdlattice):\n\n # TODO: support multiple frames\n\n super(GMMHMM, self)._accumulate_sufficient_statistics(\n stats, X, framelogprob, post_comp, fwdlattice, bwdlattice\n )\n\n n_samples, _ = X.shape\n\n stats['n_samples'] = n_samples\n stats['samples'] = X\n\n prob_mix = np.zeros((n_samples, self.n_components, self.n_mix))\n for p in range(self.n_components):\n log_denses = self._compute_log_weighted_gaussian_densities(X, p)\n prob_mix[:, p, :] = np.exp(log_denses) + np.finfo(np.float).eps\n\n prob_mix_sum = np.sum(prob_mix, axis=2)\n post_mix = prob_mix / prob_mix_sum[:, :, np.newaxis]\n post_comp_mix = post_comp[:, :, np.newaxis] * post_mix\n stats['post_comp_mix'] = post_comp_mix\n\n stats['post_mix_sum'] = np.sum(post_comp_mix, axis=0)\n stats['post_sum'] = np.sum(post_comp, axis=0)\n\n stats['centered'] = X[:, np.newaxis, np.newaxis, :] - self.means_\n\n def _do_mstep(self, stats):\n super(GMMHMM, self)._do_mstep(stats)\n\n n_samples = stats['n_samples']\n n_features = self.n_features\n\n # Maximizing weights\n alphas_minus_one = self.weights_prior - 1\n new_weights_numer = stats['post_mix_sum'] + alphas_minus_one\n new_weights_denom = (\n stats['post_sum'] + np.sum(alphas_minus_one, axis=1)\n )[:, np.newaxis]\n new_weights = new_weights_numer / new_weights_denom\n\n # Maximizing means\n lambdas, mus = self.means_weight, self.means_prior\n new_means_numer = np.einsum(\n 'ijk,il->jkl',\n stats['post_comp_mix'], stats['samples']\n ) + lambdas[:, :, np.newaxis] * mus\n new_means_denom = (stats['post_mix_sum'] + lambdas)[:, :, np.newaxis]\n new_means = new_means_numer / new_means_denom\n\n # Maximizing covariances\n centered_means = self.means_ - mus\n\n if self.covariance_type == 'full':\n centered = stats['centered'].reshape((\n n_samples, self.n_components, self.n_mix, self.n_features, 1\n ))\n centered_t = stats['centered'].reshape((\n n_samples, self.n_components, self.n_mix, 1, self.n_features\n ))\n centered_dots = centered * centered_t\n\n psis_t = np.transpose(self.covars_prior, axes=(0, 1, 3, 2))\n nus = self.covars_weight\n\n centr_means_resh = centered_means.reshape((\n self.n_components, self.n_mix, self.n_features, 1\n ))\n centr_means_resh_t = centered_means.reshape((\n self.n_components, self.n_mix, 1, self.n_features\n ))\n centered_means_dots = centr_means_resh * centr_means_resh_t\n\n new_cov_numer = np.einsum(\n 'ijk,ijklm->jklm',\n stats['post_comp_mix'], centered_dots\n ) + psis_t + (lambdas[:, :, np.newaxis, np.newaxis] *\n centered_means_dots)\n new_cov_denom = (\n stats['post_mix_sum'] + 1 + nus + self.n_features + 1\n )[:, :, np.newaxis, np.newaxis]\n\n new_cov = new_cov_numer / new_cov_denom\n elif self.covariance_type == 'diag':\n centered2 = stats['centered'] ** 2\n centered_means2 = centered_means ** 2\n\n alphas = self.covars_prior\n betas = self.covars_weight\n\n new_cov_numer = np.einsum(\n 'ijk,ijkl->jkl',\n stats['post_comp_mix'], centered2\n ) + lambdas[:, :, np.newaxis] * centered_means2 + 2 * betas\n new_cov_denom = (\n stats['post_mix_sum'][:, :, np.newaxis] + 1 + 2 * (alphas + 1)\n )\n\n new_cov = new_cov_numer / new_cov_denom\n elif self.covariance_type == 'spherical':\n centered_norm2 = np.sum(stats['centered'] ** 2, axis=-1)\n\n alphas = self.covars_prior\n betas = self.covars_weight\n\n centered_means_norm2 = np.sum(centered_means ** 2, axis=-1)\n\n new_cov_numer = np.einsum(\n 'ijk,ijk->jk',\n stats['post_comp_mix'], centered_norm2\n ) + lambdas * centered_means_norm2 + 2 * betas\n new_cov_denom = (\n n_features * stats['post_mix_sum'] + n_features +\n 2 * (alphas + 1)\n )\n\n new_cov = new_cov_numer / new_cov_denom\n elif self.covariance_type == 'tied':\n centered = stats['centered'].reshape((\n n_samples, self.n_components, self.n_mix, self.n_features, 1\n ))\n centered_t = stats['centered'].reshape((\n n_samples, self.n_components, self.n_mix, 1, self.n_features\n ))\n centered_dots = centered * centered_t\n\n psis_t = np.transpose(self.covars_prior, axes=(0, 2, 1))\n nus = self.covars_weight\n\n centr_means_resh = centered_means.reshape((\n self.n_components, self.n_mix, self.n_features, 1\n ))\n centr_means_resh_t = centered_means.reshape((\n self.n_components, self.n_mix, 1, self.n_features\n ))\n centered_means_dots = centr_means_resh * centr_means_resh_t\n\n lambdas_cmdots_prod_sum = np.einsum(\n 'ij,ijkl->ikl',\n lambdas, centered_means_dots\n )\n\n new_cov_numer = np.einsum(\n 'ijk,ijklm->jlm',\n stats['post_comp_mix'], centered_dots\n ) + lambdas_cmdots_prod_sum + psis_t\n new_cov_denom = (\n stats['post_sum'] + self.n_mix + nus + self.n_features + 1\n )[:, np.newaxis, np.newaxis]\n\n new_cov = new_cov_numer / new_cov_denom\n\n # Assigning new values to class members\n self.weights_ = new_weights\n self.means_ = new_means\n self.covars_ = new_cov\n" ]
[ [ "numpy.diag", "numpy.dot", "sklearn.cluster.KMeans", "numpy.einsum", "numpy.asarray", "numpy.cumsum", "numpy.concatenate", "numpy.any", "numpy.exp", "numpy.where", "numpy.allclose", "numpy.eye", "numpy.finfo", "numpy.diff", "numpy.outer", "numpy.zeros", "numpy.log", "numpy.atleast_2d", "numpy.cov", "scipy.linalg.eigvalsh", "numpy.transpose", "numpy.array", "numpy.sum", "numpy.maximum", "sklearn.mixture._validate_covars", "numpy.ones", "numpy.broadcast_to", "sklearn.utils.check_random_state", "numpy.empty", "scipy.misc.logsumexp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.12", "0.14", "0.15" ], "tensorflow": [] } ]
danmlr/graph_transformer
[ "4f173315c16bac213a7a8ec8171bbdbc236e42d2" ]
[ "train/train_SBMs_node_classification.py" ]
[ "\"\"\"\r\n Utility functions for training one epoch \r\n and evaluating one epoch\r\n\"\"\"\r\nimport torch\r\nimport torch.nn as nn\r\nimport math\r\nimport dgl\r\n\r\nfrom train.metrics import accuracy_SBM as accuracy\r\n\r\ndef train_epoch(model, optimizer, device, data_loader, epoch):\r\n\r\n model.train()\r\n epoch_loss = 0\r\n epoch_train_acc = 0\r\n nb_data = 0\r\n gpu_mem = 0\r\n for iter, (batch_graphs, batch_labels) in enumerate(data_loader):\r\n batch_graphs = batch_graphs.to(device)\r\n batch_x = batch_graphs.ndata['feat'].to(device) # num x feat\r\n batch_e = batch_graphs.edata['feat'].to(device)\r\n batch_labels = batch_labels.to(device)\r\n optimizer.zero_grad()\r\n try:\r\n batch_lap_pos_enc = batch_graphs.ndata['lap_pos_enc'].to(device)\r\n sign_flip = torch.rand(batch_lap_pos_enc.size(1)).to(device)\r\n sign_flip[sign_flip>=0.5] = 1.0; sign_flip[sign_flip<0.5] = -1.0\r\n batch_lap_pos_enc = batch_lap_pos_enc * sign_flip.unsqueeze(0)\r\n ###############\"\"Ajout ci-dessous \r\n #batch_lap_pos_enc = torch.abs(batch_lap_pos_enc)\r\n\r\n except:\r\n batch_lap_pos_enc = None\r\n \r\n try:\r\n batch_wl_pos_enc = batch_graphs.ndata['wl_pos_enc'].to(device)\r\n except:\r\n batch_wl_pos_enc = None\r\n\r\n batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_lap_pos_enc, batch_wl_pos_enc)\r\n \r\n loss = model.loss(batch_scores, batch_labels)\r\n loss.backward()\r\n optimizer.step()\r\n epoch_loss += loss.detach().item()\r\n epoch_train_acc += accuracy(batch_scores, batch_labels)\r\n epoch_loss /= (iter + 1)\r\n epoch_train_acc /= (iter + 1)\r\n \r\n return epoch_loss, epoch_train_acc, optimizer\r\n\r\n\r\ndef evaluate_network(model, device, data_loader, epoch):\r\n \r\n model.eval()\r\n epoch_test_loss = 0\r\n epoch_test_acc = 0\r\n nb_data = 0\r\n with torch.no_grad():\r\n for iter, (batch_graphs, batch_labels) in enumerate(data_loader):\r\n batch_graphs = batch_graphs.to(device)\r\n batch_x = batch_graphs.ndata['feat'].to(device)\r\n batch_e = batch_graphs.edata['feat'].to(device)\r\n batch_labels = batch_labels.to(device)\r\n try:\r\n batch_lap_pos_enc = batch_graphs.ndata['lap_pos_enc'].to(device)\r\n except:\r\n batch_lap_pos_enc = None\r\n \r\n try:\r\n batch_wl_pos_enc = batch_graphs.ndata['wl_pos_enc'].to(device)\r\n except:\r\n batch_wl_pos_enc = None\r\n \r\n batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_lap_pos_enc, batch_wl_pos_enc)\r\n loss = model.loss(batch_scores, batch_labels) \r\n epoch_test_loss += loss.detach().item()\r\n epoch_test_acc += accuracy(batch_scores, batch_labels)\r\n epoch_test_loss /= (iter + 1)\r\n epoch_test_acc /= (iter + 1)\r\n \r\n return epoch_test_loss, epoch_test_acc\r\n\r\n\r\n" ]
[ [ "torch.no_grad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sngweicong/DeepCTR-Torch
[ "67d4e9d0c8a13aa4d614b2d04397a7f6e7a0e9af" ]
[ "avazu.py" ]
[ "import pandas as pd\nimport torch\nfrom sklearn.metrics import log_loss, roc_auc_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\n\nfrom deepctr_torch.inputs import SparseFeat, get_feature_names\nfrom deepctr_torch.models import FiBiNET\n\nif __name__ == \"__main__\":\n\n data = pd.read_csv(\"./sub_train.txt\")\n sparse_features = [\"hour\",\"C1\",\"banner_pos\",\"site_id\",\"site_domain\",\"site_category\",\"app_id\",\"app_domain\",\"app_category\",\"device_id\",\"device_ip\",\"device_model\",\"device_type\",\"device_conn_type\",\"C14\",\"C15\",\"C16\",\"C17\",\"C18\",\"C19\",\"C20\",\"C21\"]\n target = [\"click\"]\n\n # 1.Label Encoding for sparse features,and do simple Transformation for dense features\n for feat in sparse_features:\n lbe = LabelEncoder()\n data[feat] = lbe.fit_transform(data[feat])\n # 2.count #unique features for each sparse field\n fixlen_feature_columns = [SparseFeat(feat, data[feat].nunique())\n for feat in sparse_features]\n linear_feature_columns = fixlen_feature_columns\n dnn_feature_columns = fixlen_feature_columns\n feature_names = get_feature_names(linear_feature_columns + dnn_feature_columns)\n\n # 3.generate input data for model\n train, test = train_test_split(data, test_size=0.2)\n train_model_input = {name: train[name] for name in feature_names}\n test_model_input = {name: test[name] for name in feature_names}\n # 4.Define Model,train,predict and evaluate\n\n device = 'cpu'\n use_cuda = True\n if use_cuda and torch.cuda.is_available():\n print('cuda ready...')\n device = 'cuda:0'\n\n model = FiBiNET(linear_feature_columns, dnn_feature_columns, task='binary', device=device)\n model.compile(\"adam\", \"binary_crossentropy\", metrics=[\"binary_crossentropy\", \"auc\"], )\n history = model.fit(train_model_input,train[target].values,batch_size=256,epochs=10,verbose=2,validation_split=0.2)\n pred_ans = model.predict(test_model_input, batch_size=256)\n #print(pred_ans)\n #print(\" \")\n #print(test[target].values)\n print(\"\")\n print(\"test LogLoss\", round(log_loss(test[target].values, pred_ans), 4))\n print(\"test AUC\", round(roc_auc_score(test[target].values, pred_ans), 4))\n" ]
[ [ "sklearn.metrics.roc_auc_score", "pandas.read_csv", "sklearn.model_selection.train_test_split", "sklearn.metrics.log_loss", "torch.cuda.is_available", "sklearn.preprocessing.LabelEncoder" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
jmmshn/emmet
[ "b67fb4160d2350c611c8b2f534717a04eb6ce035" ]
[ "emmet-builders/emmet/builders/feff/xas.py" ]
[ "from typing import List, Dict\nfrom itertools import groupby, chain\nfrom datetime import datetime\nimport traceback\n\nimport numpy as np\nfrom monty.json import jsanitize\n\nfrom maggma.core import Store\nfrom maggma.builders import GroupBuilder\n\nfrom pymatgen.core import Structure\nfrom pymatgen.analysis.xas.spectrum import XAS, site_weighted_spectrum\nfrom pymatgen.symmetry.analyzer import SpacegroupAnalyzer\nfrom emmet.core.xas import XASDoc\nfrom emmet.builders.utils import maximal_spanning_non_intersecting_subsets\n\n\nclass XASBuilder(GroupBuilder):\n \"\"\"\n Generates XAS Docs from FEFF tasks\n\n # TODO: Generate MPID from materials collection rather than from task metadata\n \"\"\"\n\n def __init__(self, tasks: Store, xas: Store, num_samples: int = 200.0, **kwargs):\n self.tasks = tasks\n self.xas = xas\n self.num_samples = 200\n self.kwargs = kwargs\n\n super().__init__(source=tasks, target=xas, grouping_keys=[\"mp_id\"])\n self._target_keys_field = \"xas_ids\"\n\n def process_item(self, spectra: List[Dict]) -> Dict:\n\n mpid = spectra[0][\"mp_id\"]\n sandboxes = [doc.get(\"sandboxes\", []) for doc in spectra]\n sbxn_sets = maximal_spanning_non_intersecting_subsets(sandboxes)\n\n self.logger.debug(f\"Processing: {mpid}\")\n all_processed = []\n\n for sbxns in sbxn_sets:\n sbxn_spectra = [\n doc\n for doc in spectra\n if doc.get(\"sandboxes\", []) == list(sbxns)\n or doc.get(\"sandboxes\", []) == []\n ]\n\n try:\n\n processed = self.process_spectra(sbxn_spectra)\n for d in processed:\n d.update({\"state\": \"successful\"})\n\n all_processed.extend(processed)\n except Exception as e:\n self.logger.error(traceback.format_exc())\n all_processed.append(\n {\n \"error\": str(e),\n \"state\": \"failed\",\n \"task_ids\": list(d[self.xas.key] for d in sbxn_spectra),\n }\n )\n\n update_doc = {\n \"_bt\": datetime.utcnow(),\n }\n all_processed.update(\n {k: v for k, v in update_doc.items() if k not in processed}\n )\n\n return all_processed\n\n def process_spectra(self, items: List[Dict]) -> Dict:\n\n all_spectra = [feff_task_to_spectrum(task) for task in items]\n\n # Dictionary of all site to spectra mapping\n sites_to_spectra = {\n index: list(group)\n for index, group in groupby(\n sorted(all_spectra, key=lambda x: x.absorbing_index),\n key=lambda x: x.absorbing_index,\n )\n }\n\n # perform spectra merging\n for site, spectra in sites_to_spectra.items():\n type_to_spectra = {\n index: list(group)\n for index, group in groupby(\n sorted(\n spectra, key=lambda x: (x.edge, x.spectrum_type, x.last_updated)\n ),\n key=lambda x: (x.edge, x.spectrum_type),\n )\n }\n # Make K-Total\n if (\"K\", \"XANES\") in type_to_spectra and (\"K\", \"EXAFS\") in type_to_spectra:\n xanes = type_to_spectra[(\"K\", \"XANES\")][-1]\n exafs = type_to_spectra[(\"K\", \"EXAFS\")][-1]\n try:\n total_spectrum = xanes.stitch(exafs, mode=\"XAFS\")\n total_spectrum.absorbing_index = site\n total_spectrum.task_ids = xanes.task_ids + exafs.task_ids\n all_spectra.append(total_spectrum)\n except ValueError as e:\n self.logger.warning(e)\n\n # Make L23\n if (\"L2\", \"XANES\") in type_to_spectra and (\n \"L3\",\n \"XANES\",\n ) in type_to_spectra:\n l2 = type_to_spectra[(\"L2\", \"XANES\")][-1]\n l3 = type_to_spectra[(\"L3\", \"XANES\")][-1]\n try:\n total_spectrum = l2.stitch(l3, mode=\"L23\")\n total_spectrum.absorbing_index = site\n total_spectrum.task_ids = l2.task_ids + l3.task_ids\n all_spectra.append(total_spectrum)\n except ValueError as e:\n self.logger.warning(e)\n\n self.logger.debug(f\"Found {len(all_spectra)} spectra\")\n\n # Site-weighted averaging\n spectra_to_average = [\n list(group)\n for _, group in groupby(\n sorted(\n all_spectra,\n key=lambda x: (x.absorbing_element, x.edge, x.spectrum_type),\n ),\n key=lambda x: lambda x: (x.absorbing_element, x.edge, x.spectrum_type),\n )\n ]\n averaged_spectra = []\n\n for relevant_spectra in spectra_to_average:\n\n if len(relevant_spectra) > 0 and not is_missing_sites(relevant_spectra):\n if len(relevant_spectra) > 1:\n try:\n avg_spectrum = site_weighted_spectrum(\n relevant_spectra, num_samples=self.num_samples\n )\n avg_spectrum.task_ids = [\n id\n for spectrum in relevant_spectra\n for id in spectrum.task_ids\n ]\n averaged_spectra.append(avg_spectrum)\n except ValueError as e:\n self.logger.error(e)\n else:\n averaged_spectra.append(relevant_spectra[0])\n\n spectra_docs = [\n XASDoc.from_spectrum(spectrum).dict() for spectrum in averaged_spectra\n ]\n\n return spectra_docs\n\n def update_targets(self, items):\n \"\"\"\n Group buidler isn't designed for many-to-many so we unwrap that here\n \"\"\"\n\n items = list(filter(None.__ne__, chain.from_iterable(items)))\n super().update_targets(items)\n\n\ndef is_missing_sites(spectra):\n \"\"\"\n Determines if the collection of spectra are missing any indicies for the given element\n \"\"\"\n structure = spectra[0].structure\n element = spectra[0].absorbing_element\n\n # Find missing symmeterically inequivalent sites\n symm_sites = SymmSites(structure)\n absorption_indicies = {spectrum.absorbing_index for spectrum in spectra}\n\n missing_site_spectra_indicies = (\n set(structure.indices_from_symbol(element)) - absorption_indicies\n )\n for site_index in absorption_indicies:\n missing_site_spectra_indicies -= set(\n symm_sites.get_equivalent_site_indices(site_index)\n )\n\n return len(missing_site_spectra_indicies) != 0\n\n\nclass SymmSites:\n \"\"\"\n Wrapper to get equivalent site indicies from SpacegroupAnalyzer\n \"\"\"\n\n def __init__(self, structure):\n self.structure = structure\n sa = SpacegroupAnalyzer(self.structure)\n symm_data = sa.get_symmetry_dataset()\n # equivalency mapping for the structure\n # i'th site in the input structure equivalent to eq_atoms[i]'th site\n self.eq_atoms = symm_data[\"equivalent_atoms\"]\n\n def get_equivalent_site_indices(self, i):\n \"\"\"\n Site indices in the structure that are equivalent to the given site i.\n \"\"\"\n rv = np.argwhere(self.eq_atoms == self.eq_atoms[i]).squeeze().tolist()\n if isinstance(rv, int):\n rv = [rv]\n return rv\n\n\ndef feff_task_to_spectrum(doc):\n energy = doc[\"spectrum\"][0] # (eV)\n intensity = doc[\"spectrum\"][3] # (mu)\n structure: Structure = Structure.from_dict(doc[\"structure\"])\n # Clean site properties\n for site_prop in structure.site_properties.keys():\n structure.remove_site_property(site_prop)\n\n absorbing_index = doc[\"absorbing_atom\"]\n absorbing_element = structure[absorbing_index].specie\n edge = doc[\"edge\"]\n spectrum_type = doc[\"spectrum_type\"]\n\n spectrum = XAS(\n x=energy,\n y=intensity,\n structure=structure,\n absorbing_element=absorbing_element,\n absorbing_index=absorbing_index,\n edge=edge,\n spectrum_type=spectrum_type,\n )\n # Adding a attr is not a robust process\n # Figure out better solution later\n spectrum.last_updated = doc[\"last_updated\"]\n spectrum.task_ids = [doc[\"xas_id\"]]\n return spectrum\n" ]
[ [ "numpy.argwhere" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
FengJunJian/Teacher-Assistant-Knowledge-Distillation
[ "cdb61d853df260f638a6490998aaee6c4d787fed" ]
[ "resnet_cifar.py" ]
[ "\"\"\"\nresnet for cifar in pytorch\n\nReference:\n[1] K. He, X. Zhang, S. Ren, and J. Sun. Deep residual learning for image recognition. In CVPR, 2016.\n[2] K. He, X. Zhang, S. Ren, and J. Sun. Identity mappings in deep residual networks. In ECCV, 2016.\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport math\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion=1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion=4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes*4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes*4)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass PreActBasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(PreActBasicBlock, self).__init__()\n self.bn1 = nn.BatchNorm2d(inplanes)\n self.relu = nn.ReLU(inplace=True)\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv2 = conv3x3(planes, planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.bn1(x)\n out = self.relu(out)\n\n if self.downsample is not None:\n residual = self.downsample(out)\n\n out = self.conv1(out)\n\n out = self.bn2(out)\n out = self.relu(out)\n out = self.conv2(out)\n\n out += residual\n\n return out\n\n\nclass PreActBottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(PreActBottleneck, self).__init__()\n self.bn1 = nn.BatchNorm2d(inplanes)\n self.relu = nn.ReLU(inplace=True)\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes*4, kernel_size=1, bias=False)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.bn1(x)\n out = self.relu(out)\n\n if self.downsample is not None:\n residual = self.downsample(out)\n\n out = self.conv1(out)\n\n out = self.bn2(out)\n out = self.relu(out)\n out = self.conv2(out)\n\n out = self.bn3(out)\n out = self.relu(out)\n out = self.conv3(out)\n\n out += residual\n\n return out\n\n\nclass ResNet_Cifar(nn.Module):\n\n def __init__(self, block, layers, num_classes=10):\n super(ResNet_Cifar, self).__init__()\n self.inplanes = 16\n self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(16)\n self.relu = nn.ReLU(inplace=True)\n self.layer1 = self._make_layer(block, 16, layers[0])\n self.layer2 = self._make_layer(block, 32, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 64, layers[2], stride=2)\n self.avgpool = nn.AvgPool2d(8, stride=1)\n self.fc = nn.Linear(64 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion)\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):#[3, 3, 32, 32]\n x = self.conv1(x)#[3, 16, 32, 32]\n x = self.bn1(x)#\n x = self.relu(x)#[3, 16, 32, 32]\n\n x = self.layer1(x)#[3, 16, 32, 32]\n x = self.layer2(x)#[3, 32, 16, 16]\n x = self.layer3(x)#[3, 64, 8, 8]\n\n x = self.avgpool(x)#[3, 64, 1, 1]\n x = x.view(x.size(0), -1)#[3, 64]\n x = self.fc(x)#[3, num_classes]\n\n return x\n\n\nclass PreAct_ResNet_Cifar(nn.Module):\n\n def __init__(self, block, layers, num_classes=10):\n super(PreAct_ResNet_Cifar, self).__init__()\n self.inplanes = 16\n self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)\n self.layer1 = self._make_layer(block, 16, layers[0])\n self.layer2 = self._make_layer(block, 32, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 64, layers[2], stride=2)\n self.bn = nn.BatchNorm2d(64*block.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.avgpool = nn.AvgPool2d(8, stride=1)\n self.fc = nn.Linear(64*block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes*block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes*block.expansion, kernel_size=1, stride=stride, bias=False)\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes*block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n\n x = self.bn(x)\n x = self.relu(x)\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n\n\n\ndef resnet14_cifar(**kwargs):\n model = ResNet_Cifar(BasicBlock, [2, 2, 2], **kwargs)\n return model\n\ndef resnet8_cifar(**kwargs):\n model = ResNet_Cifar(BasicBlock, [1, 1, 1], **kwargs)\n return model\n\n\ndef resnet20_cifar(**kwargs):\n model = ResNet_Cifar(BasicBlock, [3, 3, 3], **kwargs)\n return model\n\ndef resnet26_cifar(**kwargs):\n model = ResNet_Cifar(BasicBlock, [4, 4, 4], **kwargs)\n return model\n\ndef resnet32_cifar(**kwargs):\n model = ResNet_Cifar(BasicBlock, [5, 5, 5], **kwargs)\n return model\n\n\ndef resnet44_cifar(**kwargs):\n model = ResNet_Cifar(BasicBlock, [7, 7, 7], **kwargs)\n return model\n\n\ndef resnet56_cifar(**kwargs):\n model = ResNet_Cifar(BasicBlock, [9, 9, 9], **kwargs)\n return model\n\n\ndef resnet110_cifar(**kwargs):\n model = ResNet_Cifar(BasicBlock, [18, 18, 18], **kwargs)\n return model\n\n\ndef resnet1202_cifar(**kwargs):\n model = ResNet_Cifar(BasicBlock, [200, 200, 200], **kwargs)\n return model\n\n\ndef resnet164_cifar(**kwargs):\n model = ResNet_Cifar(Bottleneck, [18, 18, 18], **kwargs)\n return model\n\n\ndef resnet1001_cifar(**kwargs):\n model = ResNet_Cifar(Bottleneck, [111, 111, 111], **kwargs)\n return model\n\n\ndef preact_resnet110_cifar(**kwargs):\n model = PreAct_ResNet_Cifar(PreActBasicBlock, [18, 18, 18], **kwargs)\n return model\n\n\ndef preact_resnet164_cifar(**kwargs):\n model = PreAct_ResNet_Cifar(PreActBottleneck, [18, 18, 18], **kwargs)\n return model\n\n\ndef preact_resnet1001_cifar(**kwargs):\n model = PreAct_ResNet_Cifar(PreActBottleneck, [111, 111, 111], **kwargs)\n return model\n\nresnet_book = {\n\t'8': resnet8_cifar,\n\t'14': resnet14_cifar,\n\t'20': resnet20_cifar,\n\t'26': resnet26_cifar,\n\t'32': resnet32_cifar,\n\t'44': resnet44_cifar,\n\t'56': resnet56_cifar,\n\t'110': resnet110_cifar,\n}\n" ]
[ [ "torch.nn.Sequential", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.AvgPool2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
simonemastella/SureBetFinder
[ "8dd1fb9ea2f8c42ee54c48af1ccdff878edfc7c0" ]
[ "scrapper/marathonbet.py" ]
[ "from bs4 import BeautifulSoup as bs4 #per scrap\nimport pandas as pd #per analizzare e creare dataframe\nfrom requests_html import HTMLSession\nimport webbrowser\nfrom datetime import datetime, timedelta\n\ndef scrap(link):\n if True: \n session = HTMLSession()\n with session.get(link) as res:\n res.html.render() \n soup = bs4(res.html.html, 'html5lib')\n risultato=pd.DataFrame(columns=['tipo','casoF','casoV'])\n tags=soup.findAll(\"div\",{\"class\":\"market-inline-block-table-wrapper\"})\n for tag in tags:\n titolo= tag.find(\"div\",{\"class\":\"name-field\"})\n if titolo!= None:\n titolo=titolo.getText().strip()\n if \"Risultato\" == titolo:\n esdop=tag.findAll(\"span\",{\"class\":\"selection-link active-selection\"})\n new_row = {'tipo':\"1X-2\", 'casoF':esdop[3].getText().strip(), 'casoV':esdop[2].getText().strip()}\n risultato = risultato.append(new_row, ignore_index=True)\n new_row = {'tipo':\"12-X\", 'casoF':esdop[4].getText().strip(), 'casoV':esdop[1].getText().strip()}\n risultato = risultato.append(new_row, ignore_index=True)\n new_row = {'tipo':\"2X-1\", 'casoF':esdop[5].getText().strip(), 'casoV':esdop[0].getText().strip()}\n risultato = risultato.append(new_row, ignore_index=True)\n \n elif \"Gol\" == titolo:\n righe= tag.findAll(\"tr\",{\"data-header-highlighted-bounded\":\"true\"})\n for riga in righe:\n tit=riga.find(\"td\",{\"nowrap\":\"nowrap\"})\n if tit!=None:\n if \"Entrambe le squadre segnano\" == tit.getText().strip():\n gng=riga.findAll(\"td\",{\"class\":\"price height-column-with-price\"})\n new_row = {'tipo':\"GOL/NOGOL\", 'casoF':gng[1].getText().strip(), 'casoV':gng[0].getText().strip()}\n risultato = risultato.append(new_row, ignore_index=True)\n\n\n elif \"Totale gol\" == titolo:\n uo=tag.findAll(\"div\",{\"class\":\"coeff-price\"})\n if len(uo)>4:\n new_row = {'tipo':\"UNDER/OVER 0.5\", 'casoF':uo[0].getText().strip(), 'casoV':uo[1].getText().strip()}\n risultato = risultato.append(new_row, ignore_index=True)\n new_row = {'tipo':\"UNDER/OVER 1.5\", 'casoF':uo[2].getText().strip(), 'casoV':uo[3].getText().strip()}\n risultato = risultato.append(new_row, ignore_index=True)\n new_row = {'tipo':\"UNDER/OVER 2.5\", 'casoF':uo[4].getText().strip(), 'casoV':uo[5].getText().strip()}\n risultato = risultato.append(new_row, ignore_index=True)\n new_row = {'tipo':\"UNDER/OVER 3.5\", 'casoF':uo[6].getText().strip(), 'casoV':uo[7].getText().strip()}\n risultato = risultato.append(new_row, ignore_index=True)\n try:\n new_row = {'tipo':\"UNDER/OVER 4.5\", 'casoF':uo[8].getText().strip(), 'casoV':uo[9].getText().strip()}\n risultato = risultato.append(new_row, ignore_index=True)\n except:\n pass\n \n return risultato\n \n \n\n \ndef scrapCampionato(num):\n campionato=[\"https://www.marathonbet.it/it/popular/Football/Italy/Serie+A+-+22434\",\n \"https://www.marathonbet.it/it/popular/Football/Italy/Serie+B+-+46723\",\n \"https://www.marathonbet.it/it/popular/Football/Clubs.+International/UEFA+Champions+League+-+21255\",\n \"https://www.marathonbet.it/it/popular/Football/Clubs.+International/UEFA+Europa+League+-+21366\",\n \"https://www.marathonbet.it/it/popular/Football/England/Premier+League+-+21520\",\n \"https://www.marathonbet.it/it/popular/Football/England/Championship+-+22807\",\n \"https://www.marathonbet.it/it/popular/Football/Spain/Primera+Division+-+8736\",\n \"https://www.marathonbet.it/it/popular/Football/Spain/Segunda+Division+-+48300\",\n \"https://www.marathonbet.it/it/popular/Football/Germany/Bundesliga+-+22436\",\n \"https://www.marathonbet.it/it/popular/Football/Germany/Bundesliga+2+-+42528\",\n \"https://www.marathonbet.it/it/popular/Football/France/Ligue+1+-+21533\",\n \"\",\n \"https://www.marathonbet.it/it/popular/Football/Netherlands/Eredivisie+-+38090\",\n \"https://www.marathonbet.it/it/popular/Football/Netherlands/Eerste+Divisie+-+345004\",\n \"https://www.marathonbet.it/it/popular/Football/Portugal/Primeira+Liga+-+43058\",\n \"https://www.marathonbet.it/it/popular/Football/Portugal/National+Championship+-+1916077\"]\n #italia, champions e europa, inghilterra, spagna, germania, francia, olanda, portogallo\n if campionato[num] == \"\":\n return 0\n risultato=pd.DataFrame(columns=['giorno','ora','match','link'])\n session = HTMLSession()\n with session.get(campionato[num]) as res:\n res.html.render() \n soup = bs4(res.html.html, 'html5lib')\n partite=(soup.findAll(\"table\",{\"class\":\"member-area-content-table\"}))\n #print(\"vuoto?\",len(partite))\n #print(len(giornate))\n for partita in partite:\n match=partita.findAll(\"span\",{\"data-member-link\":\"true\"})\n match=match[0].getText().strip()+\" - \"+match[1].getText().strip()\n link =\"https://www.marathonbet.it\"+partita.find(\"a\",{\"class\":\"member-link\"}).get(\"href\")\n dataora=partita.find(\"td\",{\"class\":\"date\"}).getText().strip().split(\" \") \n if len(dataora)==1:\n d = datetime.now()\n data=d.strftime(\"%d\")\n ora=dataora[0]\n else:\n data=dataora[0]\n ora=dataora[2]\n new_row = {'giorno':data, 'ora':ora, 'match':match,'link':link}\n risultato = risultato.append(new_row, ignore_index=True) \n if len(risultato)!=0:\n return risultato\n else:\n return scrapCampionato(num)\n \n\nif __name__ == '__main__':\n \n print(scrap(\"https://www.marathonbet.it/it/betting/Football/Italy/Serie+A/Lazio+vs+Juventus+-+10661623\"))" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
INTERA-Inc/mf6cts
[ "13967af777e88b112b1a9026b35841c322d34bf4", "13967af777e88b112b1a9026b35841c322d34bf4" ]
[ "verification/flopy/mf6/data/mfdataarray.py", "verification/flopy/utils/sfroutputfile.py" ]
[ "import sys, inspect, copy, os\nimport numpy as np\nfrom ..data.mfstructure import DatumType\nfrom .mfdatastorage import DataStorage, DataStructureType, DataStorageType\nfrom ...utils.datautil import MultiList, DatumUtil\nfrom ..mfbase import ExtFileAction, MFDataException, VerbosityLevel\nfrom ..utils.mfenums import DiscretizationType\nfrom ...datbase import DataType\nfrom .mffileaccess import MFFileAccessArray\nfrom .mfdata import MFMultiDimVar, MFTransient\nfrom ...mbase import ModelInterface\n\n\nclass MFArray(MFMultiDimVar):\n \"\"\"\n Provides an interface for the user to access and update MODFLOW array data.\n MFArray objects are not designed to be directly constructed by the end\n user. When a FloPy for MODFLOW 6 package object is constructed, the\n appropriate MFArray objects are automatically built.\n\n Parameters\n ----------\n sim_data : MFSimulationData\n data contained in the simulation\n structure : MFDataStructure\n describes the structure of the data\n data : list or ndarray\n actual data\n enable : bool\n enable/disable the array\n path : tuple\n path in the data dictionary to this MFArray\n dimensions : MFDataDimensions\n dimension information related to the model, package, and array\n\n \"\"\"\n\n def __init__(\n self,\n sim_data,\n model_or_sim,\n structure,\n data=None,\n enable=True,\n path=None,\n dimensions=None,\n ):\n super().__init__(\n sim_data, model_or_sim, structure, enable, path, dimensions\n )\n if self.structure.layered:\n try:\n self._layer_shape = self.layer_shape()\n except Exception as ex:\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(\n self.structure.get_model(),\n self.structure.get_package(),\n self._path,\n \"resolving layer dimensions\",\n self.structure.name,\n inspect.stack()[0][3],\n type_,\n value_,\n traceback_,\n None,\n self._simulation_data.debug,\n ex,\n )\n else:\n self._layer_shape = (1,)\n if self._layer_shape[0] is None:\n self._layer_shape = (1,)\n self._data_type = structure.data_item_structures[0].type\n try:\n shp_ml = MultiList(shape=self._layer_shape)\n self._data_storage = self._new_storage(\n shp_ml.get_total_size() != 1\n )\n except Exception as ex:\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(\n structure.get_model(),\n structure.get_package(),\n path,\n \"creating storage\",\n structure.name,\n inspect.stack()[0][3],\n type_,\n value_,\n traceback_,\n None,\n sim_data.debug,\n ex,\n )\n self._last_line_info = []\n if self.structure.type == DatumType.integer:\n multiplier = [1]\n else:\n multiplier = [1.0]\n if data is not None:\n try:\n self._get_storage_obj().set_data(\n data, key=self._current_key, multiplier=multiplier\n )\n except Exception as ex:\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(\n self.structure.get_model(),\n self.structure.get_package(),\n self._path,\n \"setting data\",\n self.structure.name,\n inspect.stack()[0][3],\n type_,\n value_,\n traceback_,\n None,\n self._simulation_data.debug,\n ex,\n )\n\n def __setattr__(self, name, value):\n if name == \"__setstate__\":\n raise AttributeError(name)\n elif name == \"fname\":\n self._get_storage_obj().layer_storage.first_item().fname = value\n elif name == \"factor\":\n self._get_storage_obj().layer_storage.first_item().factor = value\n elif name == \"iprn\":\n self._get_storage_obj().layer_storage.first_item().iprn = value\n elif name == \"binary\":\n self._get_storage_obj().layer_storage.first_item().binary = value\n else:\n super().__setattr__(name, value)\n\n def __getitem__(self, k):\n if isinstance(k, int):\n k = (k,)\n storage = self._get_storage_obj()\n if storage.layered and (isinstance(k, tuple) or isinstance(k, list)):\n if not storage.layer_storage.in_shape(k):\n comment = (\n 'Could not retrieve layer {} of \"{}\". There'\n \"are only {} layers available\"\n \".\".format(\n k, self.structure.name, len(storage.layer_storage)\n )\n )\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(\n self.structure.get_model(),\n self.structure.get_package(),\n self._path,\n \"getting data\",\n self.structure.name,\n inspect.stack()[0][3],\n type_,\n value_,\n traceback_,\n comment,\n self._simulation_data.debug,\n )\n # for layered data treat k as layer number(s)\n return storage.layer_storage[k]\n else:\n # for non-layered data treat k as an array/list index of the data\n if isinstance(k, int):\n try:\n if len(self._get_data(apply_mult=True).shape) == 1:\n return self._get_data(apply_mult=True)[k]\n elif self._get_data(apply_mult=True).shape[0] == 1:\n return self._get_data(apply_mult=True)[0, k]\n elif self._get_data(apply_mult=True).shape[1] == 1:\n return self._get_data(apply_mult=True)[k, 0]\n except Exception as ex:\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(\n self.structure.get_model(),\n self.structure.get_package(),\n self._path,\n \"setting data\",\n self.structure.name,\n inspect.stack()[0][3],\n type_,\n value_,\n traceback_,\n None,\n self._simulation_data.debug,\n ex,\n )\n\n comment = (\n f'Unable to resolve index \"{k}\" for multidimensional data.'\n )\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(\n self.structure.get_model(),\n self.structure.get_package(),\n self._path,\n \"getting data\",\n self.structure.name,\n inspect.stack()[0][3],\n type_,\n value_,\n traceback_,\n comment,\n self._simulation_data.debug,\n )\n else:\n try:\n if isinstance(k, tuple):\n if len(k) == 3:\n return self._get_data(apply_mult=True)[\n k[0], k[1], k[2]\n ]\n elif len(k) == 2:\n return self._get_data(apply_mult=True)[k[0], k[1]]\n if len(k) == 1:\n return self._get_data(apply_mult=True)[k]\n else:\n return self._get_data(apply_mult=True)[(k,)]\n except Exception as ex:\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(\n self.structure.get_model(),\n self.structure.get_package(),\n self._path,\n \"setting data\",\n self.structure.name,\n inspect.stack()[0][3],\n type_,\n value_,\n traceback_,\n None,\n self._simulation_data.debug,\n ex,\n )\n\n def __setitem__(self, k, value):\n storage = self._get_storage_obj()\n self._resync()\n if storage.layered:\n if isinstance(k, int):\n k = (k,)\n # for layered data treat k as a layer number\n try:\n storage.layer_storage[k]._set_data(value)\n except Exception as ex:\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(\n self.structure.get_model(),\n self.structure.get_package(),\n self._path,\n \"setting data\",\n self.structure.name,\n inspect.stack()[0][3],\n type_,\n value_,\n traceback_,\n None,\n self._simulation_data.debug,\n ex,\n )\n\n else:\n try:\n # for non-layered data treat k as an array/list index of the data\n a = self._get_data()\n a[k] = value\n a = a.astype(self._get_data().dtype)\n layer_storage = storage.layer_storage.first_item()\n self._get_storage_obj()._set_data(\n a, key=self._current_key, multiplier=layer_storage.factor\n )\n except Exception as ex:\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(\n self.structure.get_model(),\n self.structure.get_package(),\n self._path,\n \"setting data\",\n self.structure.name,\n inspect.stack()[0][3],\n type_,\n value_,\n traceback_,\n None,\n self._simulation_data.debug,\n ex,\n )\n\n @property\n def data_type(self):\n \"\"\"Type of data (DataType) stored in the array\"\"\"\n if self.structure.layered:\n return DataType.array3d\n else:\n return DataType.array2d\n\n @property\n def dtype(self):\n \"\"\"Type of data (numpy.dtype) stored in the array\"\"\"\n return self._get_data().dtype.type\n\n @property\n def plottable(self):\n \"\"\"If the array is plottable\"\"\"\n if self.model is None:\n return False\n else:\n return True\n\n @property\n def data(self):\n \"\"\"Returns array data. Calls get_data with default parameters.\"\"\"\n return self._get_data()\n\n def new_simulation(self, sim_data):\n \"\"\"Initialize MFArray object for a new simulation\n\n Parameters\n ----------\n sim_data : MFSimulationData\n Data dictionary containing simulation data.\n\n\n \"\"\"\n super().new_simulation(sim_data)\n self._data_storage = self._new_storage(False)\n self._layer_shape = (1,)\n\n def supports_layered(self):\n \"\"\"Returns whether this MFArray supports layered data\n\n Returns\n -------\n layered data supported: bool\n Whether or not this data object supports layered data\n\n \"\"\"\n\n try:\n model_grid = self._data_dimensions.get_model_grid()\n except Exception as ex:\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(\n self.structure.get_model(),\n self.structure.get_package(),\n self._path,\n \"getting model grid\",\n self.structure.name,\n inspect.stack()[0][3],\n type_,\n value_,\n traceback_,\n None,\n self._simulation_data.debug,\n ex,\n )\n return (\n self.structure.layered\n and model_grid.grid_type() != DiscretizationType.DISU\n )\n\n def set_layered_data(self, layered_data):\n \"\"\"Sets whether this MFArray supports layered data\n\n Parameters\n ----------\n layered_data : bool\n Whether data is layered or not.\n\n \"\"\"\n if layered_data is True and self.structure.layered is False:\n if (\n self._data_dimensions.get_model_grid().grid_type()\n == DiscretizationType.DISU\n ):\n comment = f\"Layered option not available for unstructured grid. {self._path}\"\n else:\n comment = (\n 'Data \"{}\" does not support layered option. '\n \"{}\".format(self._data_name, self._path)\n )\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(\n self.structure.get_model(),\n self.structure.get_package(),\n self._path,\n \"setting layered data\",\n self.structure.name,\n inspect.stack()[0][3],\n type_,\n value_,\n traceback_,\n comment,\n self._simulation_data.debug,\n )\n self._get_storage_obj().layered = layered_data\n\n def make_layered(self):\n \"\"\"Changes the data to be stored by layer instead of as a single array.\"\"\"\n\n if self.supports_layered():\n try:\n self._get_storage_obj().make_layered()\n except Exception as ex:\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(\n self.structure.get_model(),\n self.structure.get_package(),\n self._path,\n \"making data layered\",\n self.structure.name,\n inspect.stack()[0][3],\n type_,\n value_,\n traceback_,\n None,\n self._simulation_data.debug,\n ex,\n )\n else:\n if (\n self._data_dimensions.get_model_grid().grid_type()\n == DiscretizationType.DISU\n ):\n comment = f\"Layered option not available for unstructured grid. {self._path}\"\n else:\n comment = (\n 'Data \"{}\" does not support layered option. '\n \"{}\".format(self._data_name, self._path)\n )\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(\n self.structure.get_model(),\n self.structure.get_package(),\n self._path,\n \"converting data to layered\",\n self.structure.name,\n inspect.stack()[0][3],\n type_,\n value_,\n traceback_,\n comment,\n self._simulation_data.debug,\n )\n\n def store_as_external_file(\n self,\n external_file_path,\n layer=None,\n binary=False,\n replace_existing_external=True,\n check_data=True,\n ):\n \"\"\"Stores data from layer `layer` to an external file at\n `external_file_path`. For unlayered data do not pass in `layer`.\n If layer is not specified all layers will be stored with each layer\n as a separate file. If replace_existing_external is set to False,\n this method will not do anything if the data is already in an\n external file.\n\n Parameters\n ----------\n external_file_path : str\n Path to external file\n layer : int\n Which layer to store in external file, `None` value stores all\n layers.\n binary : bool\n Store data in a binary file\n replace_existing_external : bool\n Whether to replace an existing external file.\n check_data : bool\n Verify data prior to storing\n \"\"\"\n storage = self._get_storage_obj()\n if storage is None:\n self._set_storage_obj(self._new_storage(False, True))\n storage = self._get_storage_obj()\n # build list of layers\n if layer is None:\n layer_list = []\n for index in range(0, storage.layer_storage.get_total_size()):\n if (\n replace_existing_external\n or storage.layer_storage[index].data_storage_type\n == DataStorageType.internal_array\n or storage.layer_storage[index].data_storage_type\n == DataStorageType.internal_constant\n ):\n layer_list.append(index)\n else:\n if (\n replace_existing_external\n or storage.layer_storage[layer].data_storage_type\n == DataStorageType.internal_array\n or storage.layer_storage[layer].data_storage_type\n == DataStorageType.internal_constant\n ):\n layer_list = [layer]\n else:\n layer_list = []\n\n # store data from each layer in a separate file\n for current_layer in layer_list:\n # determine external file name for layer\n if len(layer_list) > 0:\n fname, ext = os.path.splitext(external_file_path)\n if len(layer_list) == 1:\n file_path = f\"{fname}{ext}\"\n else:\n file_path = f\"{fname}_layer{current_layer + 1}{ext}\"\n else:\n file_path = external_file_path\n if isinstance(current_layer, int):\n current_layer = (current_layer,)\n # get the layer's data\n data = self._get_data(current_layer, True)\n\n if data is None:\n # do not write empty data to an external file\n continue\n if isinstance(data, str) and self._tas_info(data)[0] is not None:\n # data must not be time array series information\n continue\n if storage.get_data_dimensions(current_layer)[0] == -9999:\n # data must have well defined dimensions to make external\n continue\n try:\n # store layer's data in external file\n if (\n self._simulation_data.verbosity_level.value\n >= VerbosityLevel.verbose.value\n ):\n print(\n \"Storing {} layer {} to external file {}..\"\n \".\".format(\n self.structure.name,\n current_layer[0] + 1,\n file_path,\n )\n )\n factor = storage.layer_storage[current_layer].factor\n external_data = {\n \"filename\": file_path,\n \"data\": self._get_data(current_layer, True),\n \"factor\": factor,\n \"binary\": binary,\n }\n self._set_data(\n external_data, layer=current_layer, check_data=False\n )\n except Exception as ex:\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(\n self.structure.get_model(),\n self.structure.get_package(),\n self._path,\n f\"storing data in external file {external_file_path}\",\n self.structure.name,\n inspect.stack()[0][3],\n type_,\n value_,\n traceback_,\n None,\n self._simulation_data.debug,\n ex,\n )\n\n def store_internal(\n self,\n layer=None,\n check_data=True,\n ):\n \"\"\"Stores data from layer `layer` internally. For unlayered data do\n not pass in `layer`. If layer is not specified all layers will be\n stored internally\n\n Parameters\n ----------\n layer : int\n Which layer to store in external file, `None` value stores all\n layers.\n check_data : bool\n Verify data prior to storing\n \"\"\"\n storage = self._get_storage_obj()\n if storage is None:\n self._set_storage_obj(self._new_storage(False, True))\n storage = self._get_storage_obj()\n # build list of layers\n if layer is None:\n layer_list = []\n for index in range(0, storage.layer_storage.get_total_size()):\n if (\n storage.layer_storage[index].data_storage_type\n == DataStorageType.external_file\n ):\n layer_list.append(index)\n else:\n if (\n storage.layer_storage[layer].data_storage_type\n == DataStorageType.external_file\n ):\n layer_list = [layer]\n else:\n layer_list = []\n\n # store data from each layer\n for current_layer in layer_list:\n if isinstance(current_layer, int):\n current_layer = (current_layer,)\n # get the layer's data\n data = self._get_data(current_layer, True)\n\n if data is None:\n # do not write empty data to an internal file\n continue\n try:\n # store layer's data internally\n if (\n self._simulation_data.verbosity_level.value\n >= VerbosityLevel.verbose.value\n ):\n print(\n \"Storing {} layer {} internally..\"\n \".\".format(\n self.structure.name,\n current_layer[0] + 1,\n )\n )\n factor = storage.layer_storage[current_layer].factor\n internal_data = {\n \"data\": self._get_data(current_layer, True),\n \"factor\": factor,\n }\n self._set_data(\n internal_data, layer=current_layer, check_data=False\n )\n except Exception as ex:\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(\n self.structure.get_model(),\n self.structure.get_package(),\n self._path,\n f\"storing data {self.structure.name} internally\",\n self.structure.name,\n inspect.stack()[0][3],\n type_,\n value_,\n traceback_,\n None,\n self._simulation_data.debug,\n ex,\n )\n\n def has_data(self, layer=None):\n \"\"\"Returns whether layer \"layer_num\" has any data associated with it.\n\n Parameters\n ----------\n layer_num : int\n Layer number to check for data. For unlayered data do not\n pass anything in\n\n Returns\n -------\n has data: bool\n Returns if there is data.\n\n \"\"\"\n storage = self._get_storage_obj()\n if storage is None:\n return False\n if isinstance(layer, int):\n layer = (layer,)\n try:\n return storage.has_data(layer)\n except Exception as ex:\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(\n self.structure.get_model(),\n self.structure.get_package(),\n self._path,\n \"checking for data\",\n self.structure.name,\n inspect.stack()[0][3],\n type_,\n value_,\n traceback_,\n None,\n self._simulation_data.debug,\n ex,\n )\n\n def get_data(self, layer=None, apply_mult=False, **kwargs):\n \"\"\"Returns the data associated with layer \"layer_num\". If \"layer_num\"\n is None, returns all data.\n\n Parameters\n ----------\n layer_num : int\n\n Returns\n -------\n data : ndarray\n Array data in an ndarray\n\n \"\"\"\n return self._get_data(layer, apply_mult, **kwargs)\n\n def _get_data(self, layer=None, apply_mult=False, **kwargs):\n if self._get_storage_obj() is None:\n self._data_storage = self._new_storage(False)\n if isinstance(layer, int):\n layer = (layer,)\n storage = self._get_storage_obj()\n if storage is not None:\n try:\n data = storage.get_data(layer, apply_mult)\n if (\n \"array\" in kwargs\n and kwargs[\"array\"]\n and isinstance(self, MFTransientArray)\n ):\n data = np.expand_dims(data, 0)\n return data\n except Exception as ex:\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(\n self.structure.get_model(),\n self.structure.get_package(),\n self._path,\n \"getting data\",\n self.structure.name,\n inspect.stack()[0][3],\n type_,\n value_,\n traceback_,\n None,\n self._simulation_data.debug,\n ex,\n )\n return None\n\n def set_data(self, data, multiplier=None, layer=None):\n \"\"\"Sets the contents of the data at layer `layer` to `data` with\n multiplier `multiplier`. For unlayered data do not pass in\n `layer`. Data can have the following formats:\n 1) ndarray - numpy ndarray containing all of the data\n 2) [data] - python list containing all of the data\n 3) val - a single constant value to be used for all of the data\n 4) {'filename':filename, 'factor':fct, 'iprn':print, 'data':data} -\n dictionary defining external file information\n 5) {'data':data, 'factor':fct, 'iprn':print) - dictionary defining\n internal information. Data that is layered can also be set by defining\n a list with a length equal to the number of layers in the model.\n Each layer in the list contains the data as defined in the\n formats above:\n [layer_1_val, [layer_2_array_vals],\n {'filename':file_with_layer_3_data, 'factor':fct, 'iprn':print}]\n\n Parameters\n ----------\n data : ndarray/list\n An ndarray or nested lists containing the data to set.\n multiplier : float\n Multiplier to apply to data\n layer : int\n Data layer that is being set\n\n \"\"\"\n self._set_data(data, multiplier, layer)\n\n def _set_data(self, data, multiplier=None, layer=None, check_data=True):\n self._resync()\n if self._get_storage_obj() is None:\n self._data_storage = self._new_storage(False)\n if multiplier is None:\n multiplier = [self._get_storage_obj().get_default_mult()]\n if isinstance(layer, int):\n layer = (layer,)\n if isinstance(data, str):\n # check to see if this is a time series array\n tas_name, tas_label = self._tas_info(data)\n if tas_name is not None:\n # verify and save as time series array\n self._get_storage_obj().set_tas(\n tas_name, tas_label, self._current_key, check_data\n )\n return\n\n storage = self._get_storage_obj()\n if self.structure.name == \"aux\" and layer is None:\n if isinstance(data, dict):\n aux_data = copy.deepcopy(data[\"data\"])\n else:\n aux_data = data\n # make a list out of a single item\n if (\n isinstance(aux_data, int)\n or isinstance(aux_data, float)\n or isinstance(aux_data, str)\n ):\n aux_data = [[aux_data]]\n # handle special case of aux variables in an array\n self.layered = True\n aux_var_names = (\n self._data_dimensions.package_dim.get_aux_variables()\n )\n if len(aux_data) == len(aux_var_names[0]) - 1:\n for layer, aux_var_data in enumerate(aux_data):\n if (\n layer > 0\n and layer >= storage.layer_storage.get_total_size()\n ):\n storage.add_layer()\n if isinstance(data, dict):\n # put layer data back in dictionary\n layer_data = data\n layer_data[\"data\"] = aux_var_data\n else:\n layer_data = aux_var_data\n try:\n storage.set_data(\n layer_data, [layer], multiplier, self._current_key\n )\n except Exception as ex:\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(\n self.structure.get_model(),\n self.structure.get_package(),\n self._path,\n \"setting data\",\n self.structure.name,\n inspect.stack()[0][3],\n type_,\n value_,\n traceback_,\n None,\n self._simulation_data.debug,\n ex,\n )\n else:\n message = (\n \"Unable to set data for aux variable. \"\n \"Expected {} aux variables but got \"\n \"{}.\".format(len(aux_var_names[0]), len(data))\n )\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(\n self._data_dimensions.structure.get_model(),\n self._data_dimensions.structure.get_package(),\n self._data_dimensions.structure.path,\n \"setting aux variables\",\n self._data_dimensions.structure.name,\n inspect.stack()[0][3],\n type_,\n value_,\n traceback_,\n message,\n self._simulation_data.debug,\n )\n else:\n try:\n storage.set_data(\n data, layer, multiplier, key=self._current_key\n )\n except Exception as ex:\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(\n self.structure.get_model(),\n self.structure.get_package(),\n self._path,\n \"setting data\",\n self.structure.name,\n inspect.stack()[0][3],\n type_,\n value_,\n traceback_,\n None,\n self._simulation_data.debug,\n ex,\n )\n self._layer_shape = storage.layer_storage.list_shape\n\n def load(\n self,\n first_line,\n file_handle,\n block_header,\n pre_data_comments=None,\n external_file_info=None,\n ):\n \"\"\"Loads data from first_line (the first line of data) and open file\n file_handle which is pointing to the second line of data. Returns a\n tuple with the first item indicating whether all data was read and\n the second item being the last line of text read from the file. This\n method is for internal flopy use and is not intended for the end user.\n\n Parameters\n ----------\n first_line : str\n A string containing the first line of data in this array.\n file_handle : file descriptor\n A file handle for the data file which points to the second\n line of data for this array\n block_header : MFBlockHeader\n Block header object that contains block header information\n for the block containing this data\n pre_data_comments : MFComment\n Comments immediately prior to the data\n external_file_info : list\n Contains information about storing files externally\n\n Returns\n -------\n more data : bool,\n next data line : str\n\n \"\"\"\n super().load(\n first_line,\n file_handle,\n block_header,\n pre_data_comments=None,\n external_file_info=None,\n )\n self._resync()\n if self.structure.layered:\n try:\n model_grid = self._data_dimensions.get_model_grid()\n except Exception as ex:\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(\n self.structure.get_model(),\n self.structure.get_package(),\n self._path,\n \"getting model grid\",\n self.structure.name,\n inspect.stack()[0][3],\n type_,\n value_,\n traceback_,\n None,\n self._simulation_data.debug,\n ex,\n )\n if self._layer_shape[-1] != model_grid.num_layers():\n if model_grid.grid_type() == DiscretizationType.DISU:\n self._layer_shape = (1,)\n else:\n self._layer_shape = (model_grid.num_layers(),)\n if self._layer_shape[-1] is None:\n self._layer_shape = (1,)\n shape_ml = MultiList(shape=self._layer_shape)\n self._set_storage_obj(\n self._new_storage(shape_ml.get_total_size() != 1, True)\n )\n file_access = MFFileAccessArray(\n self.structure,\n self._data_dimensions,\n self._simulation_data,\n self._path,\n self._current_key,\n )\n storage = self._get_storage_obj()\n self._layer_shape, return_val = file_access.load_from_package(\n first_line,\n file_handle,\n self._layer_shape,\n storage,\n self._keyword,\n pre_data_comments=None,\n )\n if external_file_info is not None:\n storage.point_to_existing_external_file(\n external_file_info, storage.layer_storage.get_total_size() - 1\n )\n\n return return_val\n\n def _is_layered_aux(self):\n # determine if this is the special aux variable case\n if (\n self.structure.name.lower() == \"aux\"\n and self._get_storage_obj().layered\n ):\n return True\n else:\n return False\n\n def get_file_entry(\n self, layer=None, ext_file_action=ExtFileAction.copy_relative_paths\n ):\n \"\"\"Returns a string containing the data in layer \"layer\" formatted for\n a MODFLOW 6 file. For unlayered data do not pass in \"layer\".\n\n Parameters\n ----------\n layer : int\n The layer to return file entry for.\n ext_file_action : ExtFileAction\n How to handle external paths.\n\n Returns\n -------\n file entry : str\n\n \"\"\"\n return self._get_file_entry(layer, ext_file_action)\n\n def _get_file_entry(\n self, layer=None, ext_file_action=ExtFileAction.copy_relative_paths\n ):\n if isinstance(layer, int):\n layer = (layer,)\n data_storage = self._get_storage_obj()\n if (\n data_storage is None\n or data_storage.layer_storage.get_total_size() == 0\n or not data_storage.has_data()\n ):\n return \"\"\n\n layered_aux = self._is_layered_aux()\n\n # prepare indent\n indent = self._simulation_data.indent_string\n shape_ml = MultiList(shape=self._layer_shape)\n if shape_ml.get_total_size() == 1:\n data_indent = indent\n else:\n data_indent = f\"{indent}{self._simulation_data.indent_string}\"\n\n file_entry_array = []\n if data_storage.data_structure_type == DataStructureType.scalar:\n # scalar data, like in the case of a time array series gets written\n # on a single line\n try:\n data = data_storage.get_data()\n except Exception as ex:\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(\n self.structure.get_model(),\n self.structure.get_package(),\n self._path,\n \"getting data\",\n self.structure.name,\n inspect.stack()[0][3],\n type_,\n value_,\n traceback_,\n None,\n self._simulation_data.debug,\n ex,\n )\n if (\n self.structure.data_item_structures[0].numeric_index\n or self.structure.data_item_structures[0].is_cellid\n ):\n # for cellid and numeric indices convert from 0 base to 1 based\n data = abs(data) + 1\n file_entry_array.append(\n f\"{indent}{self.structure.name}{indent}{data}\\n\"\n )\n elif data_storage.layered:\n if not layered_aux:\n if not self.structure.data_item_structures[0].just_data:\n name = self.structure.name\n file_entry_array.append(f\"{indent}{name}{indent}LAYERED\\n\")\n else:\n file_entry_array.append(f\"{indent}LAYERED\\n\")\n\n if layer is None:\n layer_min = shape_ml.first_index()\n layer_max = copy.deepcopy(self._layer_shape)\n else:\n # set layer range\n if not shape_ml.in_shape(layer):\n comment = (\n 'Layer {} for variable \"{}\" does not exist'\n \".\".format(layer, self._data_name)\n )\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(\n self.structure.get_model(),\n self.structure.get_package(),\n self._path,\n \"getting file entry\",\n self.structure.name,\n inspect.stack()[0][3],\n type_,\n value_,\n traceback_,\n comment,\n self._simulation_data.debug,\n )\n\n layer_min = layer\n layer_max = shape_ml.inc_shape_idx(layer)\n for layer in shape_ml.indexes(layer_min, layer_max):\n file_entry_array.append(\n self._get_file_entry_layer(\n layer,\n data_indent,\n data_storage.layer_storage[layer].data_storage_type,\n ext_file_action,\n layered_aux,\n )\n )\n else:\n # data is not layered\n if not self.structure.data_item_structures[0].just_data:\n if self._data_name == \"aux\":\n file_entry_array.append(\n f\"{indent}{self._get_aux_var_name([0])}\\n\"\n )\n else:\n file_entry_array.append(f\"{indent}{self.structure.name}\\n\")\n\n data_storage_type = data_storage.layer_storage[0].data_storage_type\n file_entry_array.append(\n self._get_file_entry_layer(\n None, data_indent, data_storage_type, ext_file_action\n )\n )\n\n return \"\".join(file_entry_array)\n\n def _new_storage(\n self, set_layers=True, base_storage=False, stress_period=0\n ):\n if set_layers:\n return DataStorage(\n self._simulation_data,\n self._model_or_sim,\n self._data_dimensions,\n self._get_file_entry,\n DataStorageType.internal_array,\n DataStructureType.ndarray,\n self._layer_shape,\n stress_period=stress_period,\n data_path=self._path,\n )\n else:\n return DataStorage(\n self._simulation_data,\n self._model_or_sim,\n self._data_dimensions,\n self._get_file_entry,\n DataStorageType.internal_array,\n DataStructureType.ndarray,\n stress_period=stress_period,\n data_path=self._path,\n )\n\n def _get_storage_obj(self):\n return self._data_storage\n\n def _set_storage_obj(self, storage):\n self._data_storage = storage\n\n def _get_file_entry_layer(\n self,\n layer,\n data_indent,\n storage_type,\n ext_file_action,\n layered_aux=False,\n ):\n if (\n not self.structure.data_item_structures[0].just_data\n and not layered_aux\n ):\n indent_string = \"{}{}\".format(\n self._simulation_data.indent_string,\n self._simulation_data.indent_string,\n )\n else:\n indent_string = self._simulation_data.indent_string\n\n file_entry = \"\"\n if layered_aux:\n try:\n # display aux name\n file_entry = (\n f\"{indent_string}{self._get_aux_var_name(layer)}\\n\"\n )\n except Exception as ex:\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(\n self.structure.get_model(),\n self.structure.get_package(),\n self._path,\n \"getting aux variables\",\n self.structure.name,\n inspect.stack()[0][3],\n type_,\n value_,\n traceback_,\n None,\n self._simulation_data.debug,\n ex,\n )\n indent_string = (\n f\"{indent_string}{self._simulation_data.indent_string}\"\n )\n\n data_storage = self._get_storage_obj()\n if storage_type == DataStorageType.internal_array:\n # internal data header + data\n format_str = self._get_internal_formatting_string(layer).upper()\n lay_str = self._get_data_layer_string(layer, data_indent).upper()\n file_entry = f\"{file_entry}{indent_string}{format_str}\\n{lay_str}\"\n elif storage_type == DataStorageType.internal_constant:\n # constant data\n try:\n const_val = data_storage.get_const_val(layer)\n except Exception as ex:\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(\n self.structure.get_model(),\n self.structure.get_package(),\n self._path,\n \"getting constant value\",\n self.structure.name,\n inspect.stack()[0][3],\n type_,\n value_,\n traceback_,\n None,\n self._simulation_data.debug,\n ex,\n )\n const_str = self._get_constant_formatting_string(\n const_val, layer, self._data_type\n ).upper()\n file_entry = f\"{file_entry}{indent_string}{const_str}\"\n else:\n # external data\n ext_str = self._get_external_formatting_string(\n layer, ext_file_action\n )\n file_entry = f\"{file_entry}{indent_string}{ext_str}\"\n # add to active list of external files\n try:\n file_path = data_storage.get_external_file_path(layer)\n except Exception as ex:\n type_, value_, traceback_ = sys.exc_info()\n comment = (\n f'Could not get external file path for layer \"{layer}\"',\n )\n raise MFDataException(\n self.structure.get_model(),\n self.structure.get_package(),\n self._path,\n \"getting external file path\",\n self.structure.name,\n inspect.stack()[0][3],\n type_,\n value_,\n traceback_,\n comment,\n self._simulation_data.debug,\n ex,\n )\n package_dim = self._data_dimensions.package_dim\n model_name = package_dim.model_dim[0].model_name\n self._simulation_data.mfpath.add_ext_file(file_path, model_name)\n return file_entry\n\n def _get_data_layer_string(self, layer, data_indent):\n # iterate through data layer\n try:\n data = self._get_storage_obj().get_data(layer, False)\n except Exception as ex:\n type_, value_, traceback_ = sys.exc_info()\n comment = f'Could not get data for layer \"{layer}\"'\n raise MFDataException(\n self.structure.get_model(),\n self.structure.get_package(),\n self._path,\n \"getting data\",\n self.structure.name,\n inspect.stack()[0][3],\n type_,\n value_,\n traceback_,\n comment,\n self._simulation_data.debug,\n ex,\n )\n file_access = MFFileAccessArray(\n self.structure,\n self._data_dimensions,\n self._simulation_data,\n self._path,\n self._current_key,\n )\n return file_access.get_data_string(data, self._data_type, data_indent)\n\n def _resolve_layer_index(self, layer, allow_multiple_layers=False):\n # handle layered vs non-layered data\n storage = self._get_storage_obj()\n if storage.layered:\n if layer is None:\n if storage.layer_storage.get_total_size() == 1:\n layer_index = [0]\n elif allow_multiple_layers:\n layer_index = storage.get_active_layer_indices()\n else:\n comment = (\n 'Data \"{}\" is layered but no '\n \"layer_num was specified\"\n \".\".format(self._data_name)\n )\n type_, value_, traceback_ = sys.exc_info()\n raise MFDataException(\n self.structure.get_model(),\n self.structure.get_package(),\n self._path,\n \"resolving layer index\",\n self.structure.name,\n inspect.stack()[0][3],\n type_,\n value_,\n traceback_,\n comment,\n self._simulation_data.debug,\n )\n\n else:\n layer_index = [layer]\n else:\n layer_index = [[0]]\n return layer_index\n\n def _verify_data(self, data_iter, layer_num):\n # TODO: Implement\n return True\n\n def plot(\n self,\n filename_base=None,\n file_extension=None,\n mflay=None,\n fignum=None,\n title=None,\n **kwargs,\n ):\n \"\"\"\n Plot 3-D model input data\n\n Parameters\n ----------\n filename_base : str\n Base file name that will be used to automatically generate file\n names for output image files. Plots will be exported as image\n files if file_name_base is not None. (default is None)\n file_extension : str\n Valid matplotlib.pyplot file extension for savefig(). Only used\n if filename_base is not None. (default is 'png')\n mflay : int\n MODFLOW zero-based layer number to return. If None, then all\n all layers will be included. (default is None)\n **kwargs : dict\n axes : list of matplotlib.pyplot.axis\n List of matplotlib.pyplot.axis that will be used to plot\n data for each layer. If axes=None axes will be generated.\n (default is None)\n pcolor : bool\n Boolean used to determine if matplotlib.pyplot.pcolormesh\n plot will be plotted. (default is True)\n colorbar : bool\n Boolean used to determine if a color bar will be added to\n the matplotlib.pyplot.pcolormesh. Only used if pcolor=True.\n (default is False)\n inactive : bool\n Boolean used to determine if a black overlay in inactive\n cells in a layer will be displayed. (default is True)\n contour : bool\n Boolean used to determine if matplotlib.pyplot.contour\n plot will be plotted. (default is False)\n clabel : bool\n Boolean used to determine if matplotlib.pyplot.clabel\n will be plotted. Only used if contour=True. (default is False)\n grid : bool\n Boolean used to determine if the model grid will be plotted\n on the figure. (default is False)\n masked_values : list\n List of unique values to be excluded from the plot.\n\n Returns\n ----------\n out : list\n Empty list is returned if filename_base is not None. Otherwise\n a list of matplotlib.pyplot.axis is returned.\n \"\"\"\n from flopy.plot import PlotUtilities\n\n if not self.plottable:\n raise TypeError(\n \"This MFArray is not plottable likely because modelgrid is \"\n \"not available.\"\n )\n\n modelgrid = self._get_model_grid()\n a = self.array\n num_plottable_layers = modelgrid.get_number_plottable_layers(a)\n\n if num_plottable_layers == 1:\n axes = PlotUtilities._plot_util2d_helper(\n self,\n title=title,\n filename_base=filename_base,\n file_extension=file_extension,\n fignum=fignum,\n **kwargs,\n )\n elif num_plottable_layers > 1:\n axes = PlotUtilities._plot_util3d_helper(\n self,\n filename_base=filename_base,\n file_extension=file_extension,\n mflay=mflay,\n fignum=fignum,\n **kwargs,\n )\n else:\n axes = None\n\n return axes\n\n\nclass MFTransientArray(MFArray, MFTransient):\n \"\"\"\n Provides an interface for the user to access and update MODFLOW transient\n array data. MFTransientArray objects are not designed to be directly\n constructed by the end user. When a FloPy for MODFLOW 6 package object is\n constructed, the appropriate MFArray objects are automatically built.\n\n Parameters\n ----------\n sim_data : MFSimulationData\n data contained in the simulation\n structure : MFDataStructure\n describes the structure of the data\n data : list or ndarray\n actual data\n enable : bool\n enable/disable the array\n path : tuple\n path in the data dictionary to this MFArray\n dimensions : MFDataDimensions\n dimension information related to the model, package, and array\n\n Examples\n --------\n\n \"\"\"\n\n def __init__(\n self,\n sim_data,\n model_or_sim,\n structure,\n enable=True,\n path=None,\n dimensions=None,\n ):\n super().__init__(\n sim_data=sim_data,\n model_or_sim=model_or_sim,\n structure=structure,\n data=None,\n enable=enable,\n path=path,\n dimensions=dimensions,\n )\n self._transient_setup(self._data_storage)\n self.repeating = True\n\n @property\n def data_type(self):\n \"\"\"Type of data (DataType) stored in the array\"\"\"\n return DataType.transient2d\n\n def remove_transient_key(self, transient_key):\n \"\"\"Removes a new transient time `transient_key` and any data stored\n at that time. This method is intended for internal library usage only.\n\n Parameters\n ----------\n transient_key : int\n Zero-based stress period\n\n \"\"\"\n if transient_key in self._data_storage:\n del self._data_storage[transient_key]\n\n def add_transient_key(self, transient_key):\n \"\"\"Adds a new transient time allowing data for that time to be stored\n and retrieved using the key `transient_key`. This method is intended\n for internal library usage only.\n\n Parameters\n ----------\n transient_key : int\n Zero-based stress period\n\n \"\"\"\n super().add_transient_key(transient_key)\n self._data_storage[transient_key] = super()._new_storage(\n stress_period=transient_key\n )\n\n def store_as_external_file(\n self,\n external_file_path,\n layer=None,\n binary=False,\n replace_existing_external=True,\n check_data=True,\n ):\n \"\"\"Stores data from layer `layer` to an external file at\n `external_file_path`. For unlayered data do not pass in `layer`.\n If layer is not specified all layers will be stored with each layer\n as a separate file. If replace_existing_external is set to False,\n this method will not do anything if the data is already in an\n external file.\n\n Parameters\n ----------\n external_file_path : str\n Path to external file\n layer : int\n Which layer to store in external file, `None` value stores all\n layers.\n binary : bool\n Store data in a binary file\n replace_existing_external : bool\n Whether to replace an existing external file.\n check_data : bool\n Verify data prior to storing\n \"\"\"\n # store each stress period in separate file(s)\n for sp in self._data_storage.keys():\n self._current_key = sp\n layer_storage = self._get_storage_obj().layer_storage\n if (\n layer_storage.get_total_size() > 0\n and self._get_storage_obj().layer_storage[0].data_storage_type\n != DataStorageType.external_file\n ):\n fname, ext = os.path.splitext(external_file_path)\n if DatumUtil.is_int(sp):\n full_name = f\"{fname}_{sp + 1}{ext}\"\n else:\n full_name = f\"{fname}_{sp}{ext}\"\n super().store_as_external_file(\n full_name,\n layer,\n binary,\n replace_existing_external,\n check_data,\n )\n\n def store_internal(\n self,\n layer=None,\n check_data=True,\n ):\n \"\"\"Stores data from layer `layer` internally. For unlayered data do\n not pass in `layer`. If layer is not specified all layers will be\n stored internally.\n\n Parameters\n ----------\n layer : int\n Which layer to store internally file, `None` value stores all\n layers.\n check_data : bool\n Verify data prior to storing\n \"\"\"\n for sp in self._data_storage.keys():\n self._current_key = sp\n layer_storage = self._get_storage_obj().layer_storage\n if (\n layer_storage.get_total_size() > 0\n and self._get_storage_obj().layer_storage[0].data_storage_type\n == DataStorageType.external_file\n ):\n super().store_internal(\n layer,\n check_data,\n )\n\n def get_data(self, layer=None, apply_mult=True, **kwargs):\n \"\"\"Returns the data associated with stress period key `layer`.\n If `layer` is None, returns all data for time `layer`.\n\n Parameters\n ----------\n layer : int\n Zero-based stress period of data to return\n apply_mult : bool\n Whether to apply multiplier to data prior to returning it\n\n \"\"\"\n if self._data_storage is not None and len(self._data_storage) > 0:\n if layer is None:\n output = None\n sim_time = self._data_dimensions.package_dim.model_dim[\n 0\n ].simulation_time\n num_sp = sim_time.get_num_stress_periods()\n if \"array\" in kwargs:\n data = None\n for sp in range(0, num_sp):\n if sp in self._data_storage:\n self.get_data_prep(sp)\n data = super().get_data(\n apply_mult=apply_mult, **kwargs\n )\n data = np.expand_dims(data, 0)\n else:\n if data is None:\n # get any data\n self.get_data_prep(self._data_storage.key()[0])\n data = super().get_data(\n apply_mult=apply_mult, **kwargs\n )\n data = np.expand_dims(data, 0)\n if self.structure.type == DatumType.integer:\n data = np.full_like(data, 0)\n else:\n data = np.full_like(data, 0.0)\n if output is None:\n output = data\n else:\n output = np.concatenate((output, data))\n return output\n else:\n for sp in range(0, num_sp):\n data = None\n if sp in self._data_storage:\n self.get_data_prep(sp)\n data = super().get_data(\n apply_mult=apply_mult, **kwargs\n )\n if output is None:\n if \"array\" in kwargs:\n output = [data]\n else:\n output = {sp: data}\n else:\n if \"array\" in kwargs:\n output.append(data)\n else:\n output[sp] = data\n return output\n else:\n self.get_data_prep(layer)\n return super().get_data(apply_mult=apply_mult)\n else:\n return None\n\n def set_data(self, data, multiplier=None, layer=None, key=None):\n \"\"\"Sets the contents of the data at layer `layer` and time `key` to\n `data` with multiplier `multiplier`. For unlayered data do not pass\n in `layer`.\n\n Parameters\n ----------\n data : dict, ndarray, list\n Data being set. Data can be a dictionary with keys as\n zero-based stress periods and values as the data. If data is\n an ndarray or list of lists, it will be assigned to the the\n stress period specified in `key`. If any is set to None, that\n stress period of data will be removed.\n multiplier : int\n multiplier to apply to data\n layer : int\n Layer of data being set. Keep default of None of data is not\n layered.\n key : int\n Zero based stress period to assign data too. Does not apply\n if `data` is a dictionary.\n \"\"\"\n\n if isinstance(data, dict):\n # each item in the dictionary is a list for one stress period\n # the dictionary key is the stress period the list is for\n del_keys = []\n for key, list_item in data.items():\n if list_item is None:\n self.remove_transient_key(key)\n del_keys.append(key)\n else:\n self._set_data_prep(list_item, key)\n super().set_data(list_item, multiplier, layer)\n for key in del_keys:\n del data[key]\n else:\n if key is None:\n # search for a key\n new_key_index = self.structure.first_non_keyword_index()\n if (\n new_key_index is not None\n and hasattr(data, \"__len__\")\n and len(data) > new_key_index\n ):\n key = data[new_key_index]\n else:\n key = 0\n if data is None:\n self.remove_transient_key(key)\n else:\n self._set_data_prep(data, key)\n super().set_data(data, multiplier, layer)\n\n def get_file_entry(\n self, key=0, ext_file_action=ExtFileAction.copy_relative_paths\n ):\n \"\"\"Returns a string containing the data in stress period \"key\".\n\n Parameters\n ----------\n key : int\n The stress period to return file entry for.\n ext_file_action : ExtFileAction\n How to handle external paths.\n\n Returns\n -------\n file entry : str\n\n \"\"\"\n\n self._get_file_entry_prep(key)\n return super().get_file_entry(ext_file_action=ext_file_action)\n\n def load(\n self,\n first_line,\n file_handle,\n block_header,\n pre_data_comments=None,\n external_file_info=None,\n ):\n \"\"\"Loads data from first_line (the first line of data) and open file\n handle which is pointing to the second line of data. Returns a\n tuple with the first item indicating whether all data was read\n and the second item being the last line of text read from the file.\n This method is for internal flopy use and is not intended to be called\n by the end user.\n\n Parameters\n ----------\n first_line : str\n A string containing the first line of data in this array.\n file_handle : file descriptor\n A file handle for the data file which points to the second\n line of data for this array\n block_header : MFBlockHeader\n Block header object that contains block header information\n for the block containing this data\n pre_data_comments : MFComment\n Comments immediately prior to the data\n external_file_info : list\n Contains information about storing files externally\n\n Returns\n -------\n more data : bool,\n next data line : str\n\n \"\"\"\n self._load_prep(block_header)\n return super().load(\n first_line, file_handle, pre_data_comments, external_file_info\n )\n\n def _new_storage(\n self, set_layers=True, base_storage=False, stress_period=0\n ):\n if base_storage:\n if not isinstance(stress_period, int):\n stress_period = 1\n return super()._new_storage(\n set_layers, base_storage, stress_period\n )\n else:\n return {}\n\n def _set_storage_obj(self, storage):\n self._data_storage[self._current_key] = storage\n\n def _get_storage_obj(self):\n if (\n self._current_key is None\n or self._current_key not in self._data_storage\n ):\n return None\n return self._data_storage[self._current_key]\n\n def plot(\n self,\n kper=None,\n filename_base=None,\n file_extension=None,\n mflay=None,\n fignum=None,\n **kwargs,\n ):\n \"\"\"\n Plot transient array model input data\n\n Parameters\n ----------\n transient2d : flopy.utils.util_array.Transient2D object\n filename_base : str\n Base file name that will be used to automatically generate file\n names for output image files. Plots will be exported as image\n files if file_name_base is not None. (default is None)\n file_extension : str\n Valid matplotlib.pyplot file extension for savefig(). Only used\n if filename_base is not None. (default is 'png')\n **kwargs : dict\n axes : list of matplotlib.pyplot.axis\n List of matplotlib.pyplot.axis that will be used to plot\n data for each layer. If axes=None axes will be generated.\n (default is None)\n pcolor : bool\n Boolean used to determine if matplotlib.pyplot.pcolormesh\n plot will be plotted. (default is True)\n colorbar : bool\n Boolean used to determine if a color bar will be added to\n the matplotlib.pyplot.pcolormesh. Only used if pcolor=True.\n (default is False)\n inactive : bool\n Boolean used to determine if a black overlay in inactive\n cells in a layer will be displayed. (default is True)\n contour : bool\n Boolean used to determine if matplotlib.pyplot.contour\n plot will be plotted. (default is False)\n clabel : bool\n Boolean used to determine if matplotlib.pyplot.clabel\n will be plotted. Only used if contour=True. (default is False)\n grid : bool\n Boolean used to determine if the model grid will be plotted\n on the figure. (default is False)\n masked_values : list\n List of unique values to be excluded from the plot.\n kper : str\n MODFLOW zero-based stress period number to return. If\n kper='all' then data for all stress period will be\n extracted. (default is zero).\n\n Returns\n ----------\n axes : list\n Empty list is returned if filename_base is not None. Otherwise\n a list of matplotlib.pyplot.axis is returned.\n \"\"\"\n from flopy.plot.plotutil import PlotUtilities\n\n if not self.plottable:\n raise TypeError(\"Simulation level packages are not plottable\")\n\n axes = PlotUtilities._plot_transient2d_helper(\n self,\n filename_base=filename_base,\n file_extension=file_extension,\n kper=kper,\n fignum=fignum,\n **kwargs,\n )\n return axes\n", "import numpy as np\n\n\nclass SfrFile:\n \"\"\"\n Read SFR package results from text file (ISTCB2 > 0)\n\n Parameters\n ----------\n filename : str\n Name of the sfr output file\n geometries : any\n Ignored\n verbose : any\n Ignored\n\n Attributes\n ----------\n\n Methods\n -------\n\n See Also\n --------\n\n Notes\n -----\n Indexing starts at one for: layer, row, column, segment, reach.\n Indexing starts at zero for: i, j, k, and kstpkper.\n\n Examples\n --------\n\n >>> import flopy\n >>> sfq = flopy.utils.SfrFile('mymodel.sfq')\n\n \"\"\"\n\n # non-float dtypes (default is float)\n dtypes = {\n \"layer\": int,\n \"row\": int,\n \"column\": int,\n \"segment\": int,\n \"reach\": int,\n }\n\n def __init__(self, filename, geometries=None, verbose=False):\n \"\"\"\n Class constructor.\n \"\"\"\n try:\n import pandas as pd\n\n self.pd = pd\n except ImportError:\n print(\"This method requires pandas\")\n self.pd = None\n return\n\n # get the number of rows to skip at top, and the number of data columns\n self.filename = filename\n evaluated_format = False\n has_gradient = False\n has_delUzstor = False\n has_elevation = False\n with open(self.filename) as f:\n for i, line in enumerate(f):\n if \"GRADIENT\" in line:\n has_gradient = True\n if \"CHNG. UNSAT.\" in line:\n has_delUzstor = True\n if \"ELEVATION\" in line:\n has_elevation = True\n items = line.strip().split()\n if len(items) > 0 and items[0].isdigit():\n evaluated_format = True\n self.sr = i\n self.ncol = len(items)\n break\n if not evaluated_format:\n raise ValueError(\n f\"could not evaluate format of {self.filename!r} for SfrFile\"\n )\n # all outputs start with the same 15 columns\n self.names = [\n \"layer\",\n \"row\",\n \"column\",\n \"segment\",\n \"reach\",\n \"Qin\",\n \"Qaquifer\",\n \"Qout\",\n \"Qovr\",\n \"Qprecip\",\n \"Qet\",\n \"stage\",\n \"depth\",\n \"width\",\n \"Cond\",\n ]\n if has_gradient and has_delUzstor:\n raise ValueError(\n \"column 16 should be either 'gradient' or 'Qwt', not both\"\n )\n elif has_gradient:\n self.names.append(\"gradient\")\n elif has_delUzstor:\n self.names += [\"Qwt\", \"delUzstor\"]\n if self.ncol == 18:\n self.names.append(\"gw_head\")\n if has_elevation:\n self.names.append(\"strtop\")\n self.times = self.get_times()\n self.geoms = None # not implemented yet\n self._df = None\n\n def get_times(self):\n \"\"\"\n Parse the stress period/timestep headers.\n\n Returns\n -------\n kstpkper : tuple\n list of kstp, kper tuples\n\n \"\"\"\n kstpkper = []\n with open(self.filename) as input:\n for line in input:\n if \"STEP\" in line:\n line = line.strip().split()\n kper, kstp = int(line[3]) - 1, int(line[5]) - 1\n kstpkper.append((kstp, kper))\n return kstpkper\n\n @property\n def df(self):\n if self._df is None:\n self._df = self.get_dataframe()\n return self._df\n\n @staticmethod\n def get_nstrm(df):\n \"\"\"\n Get the number of SFR cells from the results dataframe.\n\n Returns\n -------\n nrch : int\n Number of SFR cells\n\n \"\"\"\n wherereach1 = np.where((df.segment == 1) & (df.reach == 1))[0]\n if len(wherereach1) == 1:\n return len(df)\n elif len(wherereach1) > 1:\n return wherereach1[1]\n\n def get_dataframe(self):\n \"\"\"\n Read the whole text file into a pandas dataframe.\n\n Returns\n -------\n df : pandas dataframe\n SFR output as a pandas dataframe\n\n \"\"\"\n kwargs = {\n \"filepath_or_buffer\": self.filename,\n \"delim_whitespace\": True,\n \"header\": None,\n \"names\": self.names,\n \"skiprows\": self.sr,\n \"low_memory\": False,\n }\n try: # since pandas 1.3.0\n df = self.pd.read_csv(**kwargs, on_bad_lines=\"skip\")\n except TypeError: # before pandas 1.3.0\n df = self.pd.read_csv(**kwargs, error_bad_lines=False)\n\n # drop text between stress periods; convert to numeric\n df[\"layer\"] = self.pd.to_numeric(df.layer, errors=\"coerce\")\n df.dropna(axis=0, inplace=True)\n\n # convert to proper dtypes\n for c in df.columns:\n df[c] = df[c].astype(self.dtypes.get(c, float))\n\n # add time, reachID, and reach geometry (if it exists)\n self.nstrm = self.get_nstrm(df)\n dftimes = []\n times = self.get_times()\n newper = df.segment.diff().fillna(0).values < 0\n kstpkper = times.pop(0)\n for per in newper:\n if per:\n kstpkper = times.pop(0)\n dftimes.append(kstpkper)\n df[\"kstpkper\"] = dftimes\n df[\"k\"] = df[\"layer\"] - 1\n df[\"i\"] = df[\"row\"] - 1\n df[\"j\"] = df[\"column\"] - 1\n\n if self.geoms is not None:\n geoms = self.geoms * self.nstrm\n df[\"geometry\"] = geoms\n self._df = df\n return df\n\n def _get_result(self, segment, reach):\n \"\"\"\n\n Parameters\n ----------\n segment : int or sequence of ints\n Segment number for each location.\n reach : int or sequence of ints\n Reach number for each location\n\n Returns\n -------\n\n \"\"\"\n return self.df.loc[\n (self.df.segment == segment) & (self.df.reach == reach)\n ].copy()\n\n def get_results(self, segment, reach):\n \"\"\"\n Get results for a single reach or sequence of segments and reaches.\n\n Parameters\n ----------\n segment : int or sequence of ints\n Segment number for each location.\n reach : int or sequence of ints\n Reach number for each location\n\n Returns\n -------\n results : dataframe\n Dataframe of same format as SfrFile.df, but subset to input locations.\n\n \"\"\"\n try:\n segment = int(segment)\n reach = int(reach)\n results = self._get_result(segment, reach)\n except:\n locsr = list(zip(segment, reach))\n results = self.pd.DataFrame()\n for s, r in locsr:\n srresults = self._get_result(s, r)\n if len(srresults) > 0:\n results = results.append(srresults)\n else:\n print(f\"No results for segment {s}, reach {r}!\")\n return results\n" ]
[ [ "numpy.concatenate", "numpy.expand_dims", "numpy.full_like" ], [ "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
orfranco/IML.HUJI
[ "29a8a98ed20f9790eceebe620b3f2b738a808ad5" ]
[ "IMLearn/learners/regressors/polynomial_fitting.py" ]
[ "from __future__ import annotations\nfrom typing import NoReturn\nfrom . import LinearRegression\nfrom ...base import BaseEstimator\nimport numpy as np\n\n\nclass PolynomialFitting(BaseEstimator):\n \"\"\"\n Polynomial Fitting using Least Squares estimation\n \"\"\"\n\n def __init__(self, k: int) -> PolynomialFitting:\n \"\"\"\n Instantiate a polynomial fitting estimator\n\n Parameters\n ----------\n k : int\n Degree of polynomial to fit\n \"\"\"\n super().__init__()\n self._linear_regressor = LinearRegression(include_intercept=False)\n self._k = k\n\n def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:\n \"\"\"\n Fit Least Squares model to polynomial transformed samples\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data to fit an estimator for\n\n y : ndarray of shape (n_samples, )\n Responses of input data to fit to\n \"\"\"\n self._linear_regressor.fit(self.__transform(X), y)\n\n def _predict(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Predict responses for given samples using fitted estimator\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Input data to predict responses for\n\n Returns\n -------\n responses : ndarray of shape (n_samples, )\n Predicted responses of given samples\n \"\"\"\n return self._linear_regressor.predict(self.__transform(X))\n\n def _loss(self, X: np.ndarray, y: np.ndarray) -> float:\n \"\"\"\n Evaluate performance under MSE loss function\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n Test samples\n\n y : ndarray of shape (n_samples, )\n True labels of test samples\n\n Returns\n -------\n loss : float\n Performance under MSE loss function\n \"\"\"\n return self._linear_regressor.loss(self.__transform(X), y)\n\n def __transform(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Transform given input according to the univariate polynomial transformation\n\n Parameters\n ----------\n X: ndarray of shape (n_samples,)\n\n Returns\n -------\n transformed: ndarray of shape (n_samples, k+1)\n Vandermonde matrix of given samples up to degree k\n \"\"\"\n return np.vander(X, N=self._k+1)\n" ]
[ [ "numpy.vander" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
NIRVANALAN/Centroid_GCN
[ "e93ec415d769cc3b1bbf737056097e8cbe65ded5" ]
[ "train.py" ]
[ "from cluster import * # import cluster\nimport argparse\nimport pathlib\nfrom pathlib import Path\nimport time\nimport pdb\nimport numpy as np\nimport networkx as nx\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom dgl import DGLGraph\nfrom dgl.data import register_data_args, load_data\n\nfrom models import create_model\nfrom models.utils import EarlyStopping\n\n\ndef accuracy(logits, labels):\n _, indices = torch.max(logits, dim=1)\n correct = torch.sum(indices == labels)\n return correct.item() * 1.0 / len(labels)\n\n\ndef evaluate(model, features, labels, mask):\n model.eval()\n with torch.no_grad():\n logits, _ = model(features)\n # logits = model(features)\n logits = logits[mask]\n labels = labels[mask]\n _, indices = torch.max(logits, dim=1)\n correct = torch.sum(indices == labels)\n return correct.item() * 1.0 / len(labels)\n\n\ndef main(args):\n # load and preprocess dataset\n data = load_data(args)\n features = torch.FloatTensor(data.features)\n labels = torch.LongTensor(data.labels)\n np.save(f'{args.dataset}_labels', labels)\n if hasattr(torch, 'BoolTensor'):\n train_mask = torch.BoolTensor(data.train_mask)\n val_mask = torch.BoolTensor(data.val_mask)\n test_mask = torch.BoolTensor(data.test_mask)\n else:\n train_mask = torch.ByteTensor(data.train_mask)\n val_mask = torch.ByteTensor(data.val_mask)\n test_mask = torch.ByteTensor(data.test_mask)\n in_feats = features.shape[1]\n n_classes = data.num_labels\n n_edges = data.graph.number_of_edges()\n cluster_interval = args.cluster_interval\n print(\"\"\"----Data statistics------'\n #Edges %d\n #Classes %d\n #Train samples %d\n #Val samples %d\n #Test samples %d\"\"\" %\n (n_edges, n_classes,\n train_mask.int().sum().item(),\n val_mask.int().sum().item(),\n test_mask.int().sum().item()))\n\n if args.early_stop:\n stopper = EarlyStopping(patience=100)\n if args.gpu < 0:\n cuda = False\n else:\n cuda = True\n torch.cuda.set_device(args.gpu)\n features = features.cuda()\n device = features.device\n labels = labels.cuda()\n train_mask = train_mask.cuda()\n val_mask = val_mask.cuda()\n test_mask = test_mask.cuda()\n\n # graph preprocess and calculate normalization factor\n g = data.graph\n # add self loop\n if not args.no_self_loop:\n print('add self-loop')\n g.remove_edges_from(nx.selfloop_edges(g))\n g.add_edges_from(zip(g.nodes(), g.nodes()))\n g = DGLGraph(g)\n n_edges = g.number_of_edges()\n # normalization\n degs = g.in_degrees().float()\n norm = torch.pow(degs, -0.5)\n norm[torch.isinf(norm)] = 0\n if cuda:\n norm = norm.cuda()\n g.ndata['norm'] = norm.unsqueeze(1)\n\n # # create GCN model\n heads = ([args.num_heads] * args.num_layers) + [args.num_out_heads]\n model = create_model(args.arch, g,\n num_layers=args.num_layers,\n in_dim=in_feats,\n num_hidden=args.num_hidden,\n num_classes=n_classes,\n heads=heads,\n activation=F.elu,\n feat_drop=args.in_drop,\n attn_drop=args.attn_drop,\n negative_slope=args.negative_slope,\n residual=args.residual)\n\n print(model)\n if cuda:\n model.cuda()\n loss_fcn = torch.nn.CrossEntropyLoss()\n\n # use optimizer\n optimizer = torch.optim.Adam(model.parameters(),\n lr=args.lr,\n weight_decay=args.weight_decay)\n\n # Step 1. initilization with GCN\n # init graph feat\n dur = []\n centroid_emb, hidden_emb, cluster_ids = [], [], []\n att = []\n for epoch in range(args.epochs):\n model.train()\n if epoch >= 3:\n t0 = time.time()\n # cluster\n # forward\n if epoch < args.init_feat_epoch:\n # logits = model(features)\n logits, hidden_h = model(features)\n else:\n if epoch == args.init_feat_epoch or epoch % cluster_interval == 0:\n cluster_ids_x, cluster_centers = cluster(\n X=hidden_h.detach(), num_clusters=args.cluster_number, distance='cosine', method=args.cluster_method) # TODO: fix zero norm embedding\n centroid_emb.append(cluster_centers.detach().cpu().numpy())\n hidden_emb.append(hidden_h.detach().cpu().numpy())\n cluster_ids.append(cluster_ids_x.detach().cpu().numpy())\n pass\n logits, hidden_h = model(\n features, cluster_ids_x, cluster_centers, att)\n # logits, hidden_h = model(features)\n loss = loss_fcn(logits[train_mask], labels[train_mask])\n\n optimizer.zero_grad()\n # loss.backward(retain_graph=True)\n loss.backward(retain_graph=False)\n optimizer.step()\n\n if epoch >= 3:\n dur.append(time.time() - t0)\n if args.fastmode:\n val_acc = accuracy(logits[val_mask], labels[val_mask])\n else:\n val_acc = evaluate(model, features, labels, val_mask)\n if args.early_stop:\n if stopper.step(val_acc, model):\n break\n # acc = evaluate(model, features, labels, val_mask)\n print(\"Epoch {:05d} | Time(s) {:.4f} | Loss {:.4f} | Accuracy {:.4f} | \"\n \"ETputs(KTEPS) {:.2f}\". format(epoch, np.mean(dur), loss.item(),\n val_acc, n_edges / np.mean(dur) / 1000))\n\n print()\n acc = evaluate(model, features, labels, test_mask)\n print(\"Test accuracy {:.2%}\".format(acc))\n prefix = 'embedding'\n np.save(Path(prefix, f'{args.dataset}_centroid_emb'),\n np.array(centroid_emb))\n np.save(Path(prefix, f'{args.dataset}_hidden_emb'), np.array(hidden_emb))\n np.save(Path(prefix, f'{args.dataset}_att'), np.array(att))\n np.save(Path(prefix, f'{args.dataset}_cluster_ids'), np.array(cluster_ids))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='GCN')\n register_data_args(parser)\n # parser.add_argument(\"--dropout\", type=float, default=0.5,\n # help=\"dropout probability\")\n parser.add_argument(\"--gpu\", type=int, default=0,\n help=\"gpu\")\n parser.add_argument(\"--no-self-loop\", action='store_true', # !MUST IN GAT\n help=\"graph self-loop (default=False)\")\n # cluster\n parser.add_argument(\"--cluster_method\", type=str, default='kmeans',\n help=\"Cluster method, default=kmeans\")\n parser.add_argument(\"--cluster-interval\", type=int, default=25,\n help=\"interval of calculating cluster centroid\")\n parser.add_argument(\"--cluster-number\", type=int, default=6,\n help=\"interval of calculating cluster centroid\")\n\n # attention\n parser.add_argument(\"--epochs\", type=int, default=200,\n help=\"number of training epochs\")\n parser.add_argument(\"--num-heads\", type=int, default=8,\n help=\"number of hidden attention heads\")\n parser.add_argument(\"--num-out-heads\", type=int, default=1,\n help=\"number of output attention heads\")\n parser.add_argument(\"--num-layers\", type=int, default=1,\n help=\"number of hidden layers\")\n parser.add_argument(\"--num-hidden\", type=int, default=8,\n help=\"number of hidden units\")\n parser.add_argument(\"--residual\", action=\"store_true\", default=False,\n help=\"use residual connection\")\n parser.add_argument(\"--in-drop\", type=float, default=.6,\n help=\"input feature dropout\")\n parser.add_argument(\"--attn-drop\", type=float, default=.6,\n help=\"attention dropout\")\n parser.add_argument(\"--lr\", type=float, default=0.005,\n help=\"learning rate\")\n parser.add_argument('--weight-decay', type=float, default=5e-4,\n help=\"weight decay\")\n parser.add_argument('--negative-slope', type=float, default=0.2,\n help=\"the negative slope of leaky relu\")\n parser.add_argument('--early-stop', action='store_true', default=False,\n help=\"indicates whether to use early stop or not\")\n parser.add_argument('--fastmode', action=\"store_true\", default=False,\n help=\"skip re-evaluate the validation set\")\n\n parser.add_argument(\"--init-feat-epoch\", type=int, default=25,\n help=\"stage 1 training epoch number\")\n # MODEL\n parser.add_argument(\"--arch\", type=str, default='gcn',\n help='arch of gcn model, default: gcn')\n # parser.add_argument(\"--num-classes\", type=int, default=1500,\n # help=\"Number of clusters, for Reddit 1500 by default\")\n # parser.add_argument(\"--batch_size\", type=int, default=5000,\n # help=\"Batch size\")\n args = parser.parse_args()\n print(args)\n\n main(args)\n" ]
[ [ "torch.BoolTensor", "torch.ByteTensor", "torch.LongTensor", "torch.nn.CrossEntropyLoss", "torch.isinf", "torch.max", "torch.cuda.set_device", "torch.sum", "numpy.save", "torch.FloatTensor", "torch.no_grad", "numpy.mean", "numpy.array", "torch.pow" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SimuJenni/SemDefNet
[ "de64e661f7103df6550b47c1429896dab61d001c" ]
[ "models/SDNet_avgDisc_concat_new_2.py" ]
[ "import tensorflow as tf\nimport tensorflow.contrib.slim as slim\nfrom AlexNet_avgDisc_concat_noBN_2 import AlexNet\nfrom layers_new import up_conv2d, pixel_dropout, res_block_bottleneck\nfrom utils import montage_tf\n\n\nDEFAULT_FILTER_DIMS = [64, 128, 256, 512, 1024]\n\n\ndef toon_net_argscope(activation=tf.nn.elu, kernel_size=(3, 3), padding='SAME', training=True, center=True,\n w_reg=0.0001, fix_bn=False):\n \"\"\"Defines default parameter values for all the layers used in ToonNet.\n\n Args:\n activation: The default activation function\n kernel_size: The default kernel size for convolution layers\n padding: The default border mode\n training: Whether in train or eval mode\n center: Whether to use centering in batchnorm\n w_reg: Parameter for weight-decay\n\n Returns:\n An argscope\n \"\"\"\n train_bn = training and not fix_bn\n batch_norm_params = {\n 'is_training': train_bn,\n 'decay': 0.99,\n 'epsilon': 0.001,\n 'center': center,\n 'fused': True\n }\n he = tf.contrib.layers.variance_scaling_initializer(mode='FAN_AVG')\n with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.convolution2d_transpose],\n activation_fn=activation,\n normalizer_fn=slim.batch_norm,\n normalizer_params=batch_norm_params,\n weights_regularizer=slim.l2_regularizer(w_reg),\n biases_regularizer=slim.l2_regularizer(w_reg),\n weights_initializer=he):\n with slim.arg_scope([slim.conv2d, slim.convolution2d_transpose],\n kernel_size=kernel_size,\n padding=padding):\n with slim.arg_scope([slim.batch_norm], **batch_norm_params):\n with slim.arg_scope([slim.dropout], is_training=training) as arg_sc:\n return arg_sc\n\n\nclass SDNet:\n def __init__(self, num_layers, batch_size, target_shape, num_res=1, pool5=True, tag='default', fix_bn=False,\n disc_pad='VALID'):\n \"\"\"Initialises a ToonNet using the provided parameters.\n\n Args:\n num_layers: The number of convolutional down/upsampling layers to be used.\n batch_size: The batch-size used during training (used to generate training labels)\n \"\"\"\n self.name = 'SDNet_avgDisc_concat_new2_res{}_{}'.format(num_res, tag)\n self.num_layers = num_layers\n self.batch_size = batch_size\n self.im_shape = target_shape\n self.num_res_layers = num_res\n self.discriminator = AlexNet(fix_bn=fix_bn, pool5=pool5, pad=disc_pad)\n self.dec_im = self.dec_drop = self.disc_out = self.drop_pred = self.drop_label = self.rec_weights = None\n\n def net(self, imgs, reuse=None, train=True):\n \"\"\"Builds the full ToonNet architecture with the given inputs.\n\n Args:\n imgs: Placeholder for input images\n reuse: Whether to reuse already defined variables.\n train: Whether in train or eval mode\n\n Returns:\n dec_im: The autoencoded image\n dec_gen: The reconstructed image from cartoon and edge inputs\n disc_out: The discriminator output\n enc_im: Encoding of the image\n gen_enc: Output of the generator\n \"\"\"\n # Concatenate cartoon and edge for input to generator\n enc_im = self.encoder(imgs, reuse=reuse, training=train)\n pixel_drop, drop_mask = pixel_dropout(enc_im, 0.5)\n self.rec_weights = tf.image.resize_nearest_neighbor(drop_mask, self.im_shape[:2])\n drop_label_fake = slim.flatten(drop_mask)\n drop_label_real = tf.ones_like(drop_label_fake)\n self.drop_label = tf.concat([drop_label_real, drop_label_fake], 0)\n tf.summary.image('imgs/drop_mask', montage_tf(drop_mask, 2, 8), max_outputs=1)\n\n # Decode both encoded images and generator output using the same decoder\n self.dec_im = self.decoder(enc_im, reuse=reuse, training=train)\n self.dec_drop = self.generator(pixel_drop, drop_mask, reuse=reuse, training=train)\n\n # Build input for discriminator (discriminator tries to guess order of real/fake)\n disc_in = tf.concat([self.dec_im, self.dec_drop], 0)\n self.disc_out, self.drop_pred, __ = self.discriminator.discriminate(disc_in, reuse=reuse, training=train)\n\n return self.dec_im, self.dec_drop\n\n def labels_real(self):\n labels = tf.concat([tf.ones((self.batch_size,), dtype=tf.int64), tf.zeros((self.batch_size,), dtype=tf.int64)], 0)\n return tf.one_hot(labels, 2)\n\n def labels_fake(self):\n labels = tf.concat([tf.zeros((self.batch_size,), dtype=tf.int64), tf.ones((self.batch_size,), dtype=tf.int64)], 0)\n return tf.one_hot(labels, 2)\n\n def classifier(self, img, num_classes, reuse=None, training=True):\n \"\"\"Builds a classifier on top either the encoder, generator or discriminator trained in the AEGAN.\n\n Args:\n img: Input image\n num_classes: Number of output classes\n reuse: Whether to reuse already defined variables.\n training: Whether in train or eval mode\n\n Returns:\n Output logits from the classifier\n \"\"\"\n _, _, model = self.discriminator.discriminate(img, reuse=reuse, training=training, with_fc=False)\n model = self.discriminator.classify(model, num_classes, reuse=reuse, training=training)\n return model\n\n def generator(self, net, drop_mask, reuse=None, training=True):\n \"\"\"Builds a generator with the given inputs. Noise is induced in all convolutional layers.\n\n Args:\n net: Input to the generator (i.e. cartooned image and/or edge-map)\n reuse: Whether to reuse already defined variables\n training: Whether in train or eval mode.\n\n Returns:\n Encoding of the input.\n \"\"\"\n f_dims = DEFAULT_FILTER_DIMS\n res_dim = DEFAULT_FILTER_DIMS[self.num_layers - 1]\n with tf.variable_scope('generator', reuse=reuse):\n with slim.arg_scope(toon_net_argscope(padding='SAME', training=training)):\n net_in = net\n for i in range(self.num_res_layers):\n net = res_block_bottleneck(net, res_dim, res_dim / 4, noise_channels=32, scope='res_{}'.format(i))\n net = net_in + (1.0-drop_mask)*net\n for l in range(0, self.num_layers - 1):\n net = up_conv2d(net, num_outputs=f_dims[self.num_layers - l - 2], scope='deconv_{}'.format(l))\n net = up_conv2d(net, num_outputs=32, scope='deconv_{}'.format(self.num_layers))\n\n net = slim.conv2d(net, num_outputs=3, scope='deconv_{}'.format(self.num_layers + 1), stride=1,\n activation_fn=tf.nn.tanh, normalizer_fn=None)\n return net\n\n def encoder(self, net, reuse=None, training=True):\n \"\"\"Builds an encoder of the given inputs.\n\n Args:\n net: Input to the encoder (image)\n reuse: Whether to reuse already defined variables\n training: Whether in train or eval mode.\n\n Returns:\n Encoding of the input image.\n \"\"\"\n f_dims = DEFAULT_FILTER_DIMS\n with tf.variable_scope('encoder', reuse=reuse):\n with slim.arg_scope(toon_net_argscope(padding='SAME', training=training)):\n net = slim.conv2d(net, num_outputs=32, stride=1, scope='conv_0')\n for l in range(0, self.num_layers):\n net = slim.conv2d(net, num_outputs=f_dims[l], stride=2, scope='conv_{}'.format(l + 1))\n\n return net\n\n def decoder(self, net, reuse=None, training=True):\n \"\"\"Builds a decoder on top of net.\n\n Args:\n net: Input to the decoder (output of encoder)\n reuse: Whether to reuse already defined variables\n training: Whether in train or eval mode.\n\n Returns:\n Decoded image with 3 channels.\n \"\"\"\n f_dims = DEFAULT_FILTER_DIMS\n with tf.variable_scope('decoder', reuse=reuse):\n with slim.arg_scope(toon_net_argscope(padding='SAME', training=training)):\n for l in range(0, self.num_layers - 1):\n net = up_conv2d(net, num_outputs=f_dims[self.num_layers - l - 2], scope='deconv_{}'.format(l))\n net = up_conv2d(net, num_outputs=32, scope='deconv_{}'.format(self.num_layers))\n\n net = slim.conv2d(net, num_outputs=3, scope='deconv_{}'.format(self.num_layers + 1), stride=1,\n activation_fn=tf.nn.tanh, normalizer_fn=None)\n return net\n\n def ae_loss(self, imgs_train):\n # Define the losses for AE training\n ae_loss_scope = 'ae_loss'\n ae_loss = tf.losses.mean_squared_error(imgs_train, self.dec_im, scope=ae_loss_scope, weights=30.0)\n tf.summary.scalar('losses/autoencoder_mse', ae_loss)\n losses_ae = tf.losses.get_losses(ae_loss_scope)\n losses_ae += tf.losses.get_regularization_losses(ae_loss_scope)\n ae_total_loss = tf.add_n(losses_ae, name='ae_total_loss')\n return ae_total_loss\n\n def generator_loss(self, imgs_train):\n # Define the losses for generator training\n gen_loss_scope = 'gen_loss'\n fake_loss = tf.losses.softmax_cross_entropy(self.labels_fake(), self.disc_out, scope=gen_loss_scope,\n weights=1.0)\n tf.summary.scalar('losses/generator', fake_loss)\n ae_loss = tf.losses.mean_squared_error(imgs_train, self.dec_drop, scope=gen_loss_scope,\n weights=30.0*self.rec_weights)\n tf.summary.scalar('losses/generator_mse', ae_loss)\n drop_pred_loss = tf.losses.sigmoid_cross_entropy(tf.ones_like(self.drop_label), self.drop_pred,\n scope=gen_loss_scope, weights=0.1)\n tf.summary.scalar('losses/generator_drop', drop_pred_loss)\n losses_gen = tf.losses.get_losses(gen_loss_scope)\n losses_gen += tf.losses.get_regularization_losses(gen_loss_scope)\n gen_loss = tf.add_n(losses_gen, name='gen_total_loss')\n return gen_loss\n\n def discriminator_loss(self):\n # Define loss for discriminator training\n disc_loss_scope = 'disc_loss'\n real_loss = tf.losses.softmax_cross_entropy(self.labels_real(), self.disc_out, scope=disc_loss_scope,\n weights=1.0)\n tf.summary.scalar('losses/discriminator', real_loss)\n drop_pred_loss = tf.losses.sigmoid_cross_entropy(self.drop_label, self.drop_pred, scope=disc_loss_scope,\n weights=0.1)\n tf.summary.scalar('losses/discriminator_drop', drop_pred_loss)\n losses_disc = tf.losses.get_losses(disc_loss_scope)\n losses_disc += tf.losses.get_regularization_losses(disc_loss_scope)\n disc_total_loss = tf.add_n(losses_disc, name='disc_total_loss')\n\n real_pred = tf.arg_max(self.disc_out, 1)\n real_true = tf.arg_max(self.labels_real(), 1)\n tf.summary.scalar('accuracy/discriminator', slim.metrics.accuracy(real_pred, real_true))\n\n return disc_total_loss\n" ]
[ [ "tensorflow.concat", "tensorflow.contrib.slim.metrics.accuracy", "tensorflow.zeros", "tensorflow.contrib.slim.l2_regularizer", "tensorflow.contrib.slim.flatten", "tensorflow.add_n", "tensorflow.summary.scalar", "tensorflow.contrib.layers.variance_scaling_initializer", "tensorflow.losses.get_losses", "tensorflow.image.resize_nearest_neighbor", "tensorflow.contrib.slim.arg_scope", "tensorflow.one_hot", "tensorflow.losses.get_regularization_losses", "tensorflow.losses.mean_squared_error", "tensorflow.arg_max", "tensorflow.losses.sigmoid_cross_entropy", "tensorflow.ones_like", "tensorflow.ones", "tensorflow.contrib.slim.conv2d", "tensorflow.variable_scope" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
ndb796/vision
[ "8a820172393b1a1c06411f359dba7a305fd9a9b7" ]
[ "test/test_datasets.py" ]
[ "import contextlib\nimport sys\nimport os\nimport unittest\nfrom unittest import mock\nimport numpy as np\nimport PIL\nfrom PIL import Image\nfrom torch._utils_internal import get_file_path_2\nimport torchvision\nfrom torchvision.datasets import utils\nfrom common_utils import get_tmp_dir\nfrom fakedata_generation import mnist_root, imagenet_root, \\\n cityscapes_root, svhn_root, places365_root, widerface_root, stl10_root\nimport xml.etree.ElementTree as ET\nfrom urllib.request import Request, urlopen\nimport itertools\nimport datasets_utils\nimport pathlib\nimport pickle\nfrom torchvision import datasets\nimport torch\nimport shutil\nimport json\nimport random\nimport bz2\nimport torch.nn.functional as F\nimport string\nimport io\nimport zipfile\n\n\ntry:\n import scipy\n HAS_SCIPY = True\nexcept ImportError:\n HAS_SCIPY = False\n\ntry:\n import av\n HAS_PYAV = True\nexcept ImportError:\n HAS_PYAV = False\n\n\nclass DatasetTestcase(unittest.TestCase):\n def generic_classification_dataset_test(self, dataset, num_images=1):\n self.assertEqual(len(dataset), num_images)\n img, target = dataset[0]\n self.assertTrue(isinstance(img, PIL.Image.Image))\n self.assertTrue(isinstance(target, int))\n\n def generic_segmentation_dataset_test(self, dataset, num_images=1):\n self.assertEqual(len(dataset), num_images)\n img, target = dataset[0]\n self.assertTrue(isinstance(img, PIL.Image.Image))\n self.assertTrue(isinstance(target, PIL.Image.Image))\n\n\nclass Tester(DatasetTestcase):\n def test_imagefolder(self):\n # TODO: create the fake data on-the-fly\n FAKEDATA_DIR = get_file_path_2(\n os.path.dirname(os.path.abspath(__file__)), 'assets', 'fakedata')\n\n with get_tmp_dir(src=os.path.join(FAKEDATA_DIR, 'imagefolder')) as root:\n classes = sorted(['a', 'b'])\n class_a_image_files = [\n os.path.join(root, 'a', file) for file in ('a1.png', 'a2.png', 'a3.png')\n ]\n class_b_image_files = [\n os.path.join(root, 'b', file) for file in ('b1.png', 'b2.png', 'b3.png', 'b4.png')\n ]\n dataset = torchvision.datasets.ImageFolder(root, loader=lambda x: x)\n\n # test if all classes are present\n self.assertEqual(classes, sorted(dataset.classes))\n\n # test if combination of classes and class_to_index functions correctly\n for cls in classes:\n self.assertEqual(cls, dataset.classes[dataset.class_to_idx[cls]])\n\n # test if all images were detected correctly\n class_a_idx = dataset.class_to_idx['a']\n class_b_idx = dataset.class_to_idx['b']\n imgs_a = [(img_file, class_a_idx) for img_file in class_a_image_files]\n imgs_b = [(img_file, class_b_idx) for img_file in class_b_image_files]\n imgs = sorted(imgs_a + imgs_b)\n self.assertEqual(imgs, dataset.imgs)\n\n # test if the datasets outputs all images correctly\n outputs = sorted([dataset[i] for i in range(len(dataset))])\n self.assertEqual(imgs, outputs)\n\n # redo all tests with specified valid image files\n dataset = torchvision.datasets.ImageFolder(\n root, loader=lambda x: x, is_valid_file=lambda x: '3' in x)\n self.assertEqual(classes, sorted(dataset.classes))\n\n class_a_idx = dataset.class_to_idx['a']\n class_b_idx = dataset.class_to_idx['b']\n imgs_a = [(img_file, class_a_idx) for img_file in class_a_image_files\n if '3' in img_file]\n imgs_b = [(img_file, class_b_idx) for img_file in class_b_image_files\n if '3' in img_file]\n imgs = sorted(imgs_a + imgs_b)\n self.assertEqual(imgs, dataset.imgs)\n\n outputs = sorted([dataset[i] for i in range(len(dataset))])\n self.assertEqual(imgs, outputs)\n\n def test_imagefolder_empty(self):\n with get_tmp_dir() as root:\n with self.assertRaises(RuntimeError):\n torchvision.datasets.ImageFolder(root, loader=lambda x: x)\n\n with self.assertRaises(RuntimeError):\n torchvision.datasets.ImageFolder(\n root, loader=lambda x: x, is_valid_file=lambda x: False\n )\n\n @mock.patch('torchvision.datasets.mnist.download_and_extract_archive')\n def test_mnist(self, mock_download_extract):\n num_examples = 30\n with mnist_root(num_examples, \"MNIST\") as root:\n dataset = torchvision.datasets.MNIST(root, download=True)\n self.generic_classification_dataset_test(dataset, num_images=num_examples)\n img, target = dataset[0]\n self.assertEqual(dataset.class_to_idx[dataset.classes[0]], target)\n\n @mock.patch('torchvision.datasets.mnist.download_and_extract_archive')\n def test_kmnist(self, mock_download_extract):\n num_examples = 30\n with mnist_root(num_examples, \"KMNIST\") as root:\n dataset = torchvision.datasets.KMNIST(root, download=True)\n self.generic_classification_dataset_test(dataset, num_images=num_examples)\n img, target = dataset[0]\n self.assertEqual(dataset.class_to_idx[dataset.classes[0]], target)\n\n @mock.patch('torchvision.datasets.mnist.download_and_extract_archive')\n def test_fashionmnist(self, mock_download_extract):\n num_examples = 30\n with mnist_root(num_examples, \"FashionMNIST\") as root:\n dataset = torchvision.datasets.FashionMNIST(root, download=True)\n self.generic_classification_dataset_test(dataset, num_images=num_examples)\n img, target = dataset[0]\n self.assertEqual(dataset.class_to_idx[dataset.classes[0]], target)\n\n @mock.patch('torchvision.datasets.imagenet._verify_archive')\n @unittest.skipIf(not HAS_SCIPY, \"scipy unavailable\")\n def test_imagenet(self, mock_verify):\n with imagenet_root() as root:\n dataset = torchvision.datasets.ImageNet(root, split='train')\n self.generic_classification_dataset_test(dataset)\n\n dataset = torchvision.datasets.ImageNet(root, split='val')\n self.generic_classification_dataset_test(dataset)\n\n @mock.patch('torchvision.datasets.WIDERFace._check_integrity')\n @unittest.skipIf('win' in sys.platform, 'temporarily disabled on Windows')\n def test_widerface(self, mock_check_integrity):\n mock_check_integrity.return_value = True\n with widerface_root() as root:\n dataset = torchvision.datasets.WIDERFace(root, split='train')\n self.assertEqual(len(dataset), 1)\n img, target = dataset[0]\n self.assertTrue(isinstance(img, PIL.Image.Image))\n\n dataset = torchvision.datasets.WIDERFace(root, split='val')\n self.assertEqual(len(dataset), 1)\n img, target = dataset[0]\n self.assertTrue(isinstance(img, PIL.Image.Image))\n\n dataset = torchvision.datasets.WIDERFace(root, split='test')\n self.assertEqual(len(dataset), 1)\n img, target = dataset[0]\n self.assertTrue(isinstance(img, PIL.Image.Image))\n\n @unittest.skipIf('win' in sys.platform, 'temporarily disabled on Windows')\n def test_cityscapes(self):\n with cityscapes_root() as root:\n\n for mode in ['coarse', 'fine']:\n\n if mode == 'coarse':\n splits = ['train', 'train_extra', 'val']\n else:\n splits = ['train', 'val', 'test']\n\n for split in splits:\n for target_type in ['semantic', 'instance']:\n dataset = torchvision.datasets.Cityscapes(\n root, split=split, target_type=target_type, mode=mode)\n self.generic_segmentation_dataset_test(dataset, num_images=2)\n\n color_dataset = torchvision.datasets.Cityscapes(\n root, split=split, target_type='color', mode=mode)\n color_img, color_target = color_dataset[0]\n self.assertTrue(isinstance(color_img, PIL.Image.Image))\n self.assertTrue(np.array(color_target).shape[2] == 4)\n\n polygon_dataset = torchvision.datasets.Cityscapes(\n root, split=split, target_type='polygon', mode=mode)\n polygon_img, polygon_target = polygon_dataset[0]\n self.assertTrue(isinstance(polygon_img, PIL.Image.Image))\n self.assertTrue(isinstance(polygon_target, dict))\n self.assertTrue(isinstance(polygon_target['imgHeight'], int))\n self.assertTrue(isinstance(polygon_target['objects'], list))\n\n # Test multiple target types\n targets_combo = ['semantic', 'polygon', 'color']\n multiple_types_dataset = torchvision.datasets.Cityscapes(\n root, split=split, target_type=targets_combo, mode=mode)\n output = multiple_types_dataset[0]\n self.assertTrue(isinstance(output, tuple))\n self.assertTrue(len(output) == 2)\n self.assertTrue(isinstance(output[0], PIL.Image.Image))\n self.assertTrue(isinstance(output[1], tuple))\n self.assertTrue(len(output[1]) == 3)\n self.assertTrue(isinstance(output[1][0], PIL.Image.Image)) # semantic\n self.assertTrue(isinstance(output[1][1], dict)) # polygon\n self.assertTrue(isinstance(output[1][2], PIL.Image.Image)) # color\n\n @mock.patch('torchvision.datasets.SVHN._check_integrity')\n @unittest.skipIf(not HAS_SCIPY, \"scipy unavailable\")\n def test_svhn(self, mock_check):\n mock_check.return_value = True\n with svhn_root() as root:\n dataset = torchvision.datasets.SVHN(root, split=\"train\")\n self.generic_classification_dataset_test(dataset, num_images=2)\n\n dataset = torchvision.datasets.SVHN(root, split=\"test\")\n self.generic_classification_dataset_test(dataset, num_images=2)\n\n dataset = torchvision.datasets.SVHN(root, split=\"extra\")\n self.generic_classification_dataset_test(dataset, num_images=2)\n\n def test_places365(self):\n for split, small in itertools.product((\"train-standard\", \"train-challenge\", \"val\"), (False, True)):\n with places365_root(split=split, small=small) as places365:\n root, data = places365\n\n dataset = torchvision.datasets.Places365(root, split=split, small=small, download=True)\n self.generic_classification_dataset_test(dataset, num_images=len(data[\"imgs\"]))\n\n def test_places365_transforms(self):\n expected_image = \"image\"\n expected_target = \"target\"\n\n def transform(image):\n return expected_image\n\n def target_transform(target):\n return expected_target\n\n with places365_root() as places365:\n root, data = places365\n\n dataset = torchvision.datasets.Places365(\n root, transform=transform, target_transform=target_transform, download=True\n )\n actual_image, actual_target = dataset[0]\n\n self.assertEqual(actual_image, expected_image)\n self.assertEqual(actual_target, expected_target)\n\n def test_places365_devkit_download(self):\n for split in (\"train-standard\", \"train-challenge\", \"val\"):\n with self.subTest(split=split):\n with places365_root(split=split) as places365:\n root, data = places365\n\n dataset = torchvision.datasets.Places365(root, split=split, download=True)\n\n with self.subTest(\"classes\"):\n self.assertSequenceEqual(dataset.classes, data[\"classes\"])\n\n with self.subTest(\"class_to_idx\"):\n self.assertDictEqual(dataset.class_to_idx, data[\"class_to_idx\"])\n\n with self.subTest(\"imgs\"):\n self.assertSequenceEqual(dataset.imgs, data[\"imgs\"])\n\n def test_places365_devkit_no_download(self):\n for split in (\"train-standard\", \"train-challenge\", \"val\"):\n with self.subTest(split=split):\n with places365_root(split=split) as places365:\n root, data = places365\n\n with self.assertRaises(RuntimeError):\n torchvision.datasets.Places365(root, split=split, download=False)\n\n def test_places365_images_download(self):\n for split, small in itertools.product((\"train-standard\", \"train-challenge\", \"val\"), (False, True)):\n with self.subTest(split=split, small=small):\n with places365_root(split=split, small=small) as places365:\n root, data = places365\n\n dataset = torchvision.datasets.Places365(root, split=split, small=small, download=True)\n\n assert all(os.path.exists(item[0]) for item in dataset.imgs)\n\n def test_places365_images_download_preexisting(self):\n split = \"train-standard\"\n small = False\n images_dir = \"data_large_standard\"\n\n with places365_root(split=split, small=small) as places365:\n root, data = places365\n os.mkdir(os.path.join(root, images_dir))\n\n with self.assertRaises(RuntimeError):\n torchvision.datasets.Places365(root, split=split, small=small, download=True)\n\n def test_places365_repr_smoke(self):\n with places365_root() as places365:\n root, data = places365\n\n dataset = torchvision.datasets.Places365(root, download=True)\n self.assertIsInstance(repr(dataset), str)\n\n\nclass STL10Tester(DatasetTestcase):\n @contextlib.contextmanager\n def mocked_root(self):\n with stl10_root() as (root, data):\n yield root, data\n\n @contextlib.contextmanager\n def mocked_dataset(self, pre_extract=False, download=True, **kwargs):\n with self.mocked_root() as (root, data):\n if pre_extract:\n utils.extract_archive(os.path.join(root, data[\"archive\"]))\n dataset = torchvision.datasets.STL10(root, download=download, **kwargs)\n yield dataset, data\n\n def test_not_found(self):\n with self.assertRaises(RuntimeError):\n with self.mocked_dataset(download=False):\n pass\n\n def test_splits(self):\n for split in ('train', 'train+unlabeled', 'unlabeled', 'test'):\n with self.mocked_dataset(split=split) as (dataset, data):\n num_images = sum([data[\"num_images_in_split\"][part] for part in split.split(\"+\")])\n self.generic_classification_dataset_test(dataset, num_images=num_images)\n\n def test_folds(self):\n for fold in range(10):\n with self.mocked_dataset(split=\"train\", folds=fold) as (dataset, data):\n num_images = data[\"num_images_in_folds\"][fold]\n self.assertEqual(len(dataset), num_images)\n\n def test_invalid_folds1(self):\n with self.assertRaises(ValueError):\n with self.mocked_dataset(folds=10):\n pass\n\n def test_invalid_folds2(self):\n with self.assertRaises(ValueError):\n with self.mocked_dataset(folds=\"0\"):\n pass\n\n def test_transforms(self):\n expected_image = \"image\"\n expected_target = \"target\"\n\n def transform(image):\n return expected_image\n\n def target_transform(target):\n return expected_target\n\n with self.mocked_dataset(transform=transform, target_transform=target_transform) as (dataset, _):\n actual_image, actual_target = dataset[0]\n\n self.assertEqual(actual_image, expected_image)\n self.assertEqual(actual_target, expected_target)\n\n def test_unlabeled(self):\n with self.mocked_dataset(split=\"unlabeled\") as (dataset, _):\n labels = [dataset[idx][1] for idx in range(len(dataset))]\n self.assertTrue(all([label == -1 for label in labels]))\n\n @unittest.mock.patch(\"torchvision.datasets.stl10.download_and_extract_archive\")\n def test_download_preexisting(self, mock):\n with self.mocked_dataset(pre_extract=True) as (dataset, data):\n mock.assert_not_called()\n\n def test_repr_smoke(self):\n with self.mocked_dataset() as (dataset, _):\n self.assertIsInstance(repr(dataset), str)\n\n\nclass Caltech101TestCase(datasets_utils.ImageDatasetTestCase):\n DATASET_CLASS = datasets.Caltech101\n FEATURE_TYPES = (PIL.Image.Image, (int, np.ndarray, tuple))\n\n CONFIGS = datasets_utils.combinations_grid(target_type=(\"category\", \"annotation\", [\"category\", \"annotation\"]))\n REQUIRED_PACKAGES = (\"scipy\",)\n\n def inject_fake_data(self, tmpdir, config):\n root = pathlib.Path(tmpdir) / \"caltech101\"\n images = root / \"101_ObjectCategories\"\n annotations = root / \"Annotations\"\n\n categories = ((\"Faces\", \"Faces_2\"), (\"helicopter\", \"helicopter\"), (\"ying_yang\", \"ying_yang\"))\n num_images_per_category = 2\n\n for image_category, annotation_category in categories:\n datasets_utils.create_image_folder(\n root=images,\n name=image_category,\n file_name_fn=lambda idx: f\"image_{idx + 1:04d}.jpg\",\n num_examples=num_images_per_category,\n )\n self._create_annotation_folder(\n root=annotations,\n name=annotation_category,\n file_name_fn=lambda idx: f\"annotation_{idx + 1:04d}.mat\",\n num_examples=num_images_per_category,\n )\n\n # This is included in the original archive, but is removed by the dataset. Thus, an empty directory suffices.\n os.makedirs(images / \"BACKGROUND_Google\")\n\n return num_images_per_category * len(categories)\n\n def _create_annotation_folder(self, root, name, file_name_fn, num_examples):\n root = pathlib.Path(root) / name\n os.makedirs(root)\n\n for idx in range(num_examples):\n self._create_annotation_file(root, file_name_fn(idx))\n\n def _create_annotation_file(self, root, name):\n mdict = dict(obj_contour=torch.rand((2, torch.randint(3, 6, size=())), dtype=torch.float64).numpy())\n datasets_utils.lazy_importer.scipy.io.savemat(str(pathlib.Path(root) / name), mdict)\n\n def test_combined_targets(self):\n target_types = [\"category\", \"annotation\"]\n\n individual_targets = []\n for target_type in target_types:\n with self.create_dataset(target_type=target_type) as (dataset, _):\n _, target = dataset[0]\n individual_targets.append(target)\n\n with self.create_dataset(target_type=target_types) as (dataset, _):\n _, combined_targets = dataset[0]\n\n actual = len(individual_targets)\n expected = len(combined_targets)\n self.assertEqual(\n actual,\n expected,\n f\"The number of the returned combined targets does not match the the number targets if requested \"\n f\"individually: {actual} != {expected}\",\n )\n\n for target_type, combined_target, individual_target in zip(target_types, combined_targets, individual_targets):\n with self.subTest(target_type=target_type):\n actual = type(combined_target)\n expected = type(individual_target)\n self.assertIs(\n actual,\n expected,\n f\"Type of the combined target does not match the type of the corresponding individual target: \"\n f\"{actual} is not {expected}\",\n )\n\n\nclass Caltech256TestCase(datasets_utils.ImageDatasetTestCase):\n DATASET_CLASS = datasets.Caltech256\n\n def inject_fake_data(self, tmpdir, config):\n tmpdir = pathlib.Path(tmpdir) / \"caltech256\" / \"256_ObjectCategories\"\n\n categories = ((1, \"ak47\"), (127, \"laptop-101\"), (257, \"clutter\"))\n num_images_per_category = 2\n\n for idx, category in categories:\n datasets_utils.create_image_folder(\n tmpdir,\n name=f\"{idx:03d}.{category}\",\n file_name_fn=lambda image_idx: f\"{idx:03d}_{image_idx + 1:04d}.jpg\",\n num_examples=num_images_per_category,\n )\n\n return num_images_per_category * len(categories)\n\n\nclass CIFAR10TestCase(datasets_utils.ImageDatasetTestCase):\n DATASET_CLASS = datasets.CIFAR10\n CONFIGS = datasets_utils.combinations_grid(train=(True, False))\n\n _VERSION_CONFIG = dict(\n base_folder=\"cifar-10-batches-py\",\n train_files=tuple(f\"data_batch_{idx}\" for idx in range(1, 6)),\n test_files=(\"test_batch\",),\n labels_key=\"labels\",\n meta_file=\"batches.meta\",\n num_categories=10,\n categories_key=\"label_names\",\n )\n\n def inject_fake_data(self, tmpdir, config):\n tmpdir = pathlib.Path(tmpdir) / self._VERSION_CONFIG[\"base_folder\"]\n os.makedirs(tmpdir)\n\n num_images_per_file = 1\n for name in itertools.chain(self._VERSION_CONFIG[\"train_files\"], self._VERSION_CONFIG[\"test_files\"]):\n self._create_batch_file(tmpdir, name, num_images_per_file)\n\n categories = self._create_meta_file(tmpdir)\n\n return dict(\n num_examples=num_images_per_file\n * len(self._VERSION_CONFIG[\"train_files\"] if config[\"train\"] else self._VERSION_CONFIG[\"test_files\"]),\n categories=categories,\n )\n\n def _create_batch_file(self, root, name, num_images):\n data = datasets_utils.create_image_or_video_tensor((num_images, 32 * 32 * 3))\n labels = np.random.randint(0, self._VERSION_CONFIG[\"num_categories\"], size=num_images).tolist()\n self._create_binary_file(root, name, {\"data\": data, self._VERSION_CONFIG[\"labels_key\"]: labels})\n\n def _create_meta_file(self, root):\n categories = [\n f\"{idx:0{len(str(self._VERSION_CONFIG['num_categories'] - 1))}d}\"\n for idx in range(self._VERSION_CONFIG[\"num_categories\"])\n ]\n self._create_binary_file(\n root, self._VERSION_CONFIG[\"meta_file\"], {self._VERSION_CONFIG[\"categories_key\"]: categories}\n )\n return categories\n\n def _create_binary_file(self, root, name, content):\n with open(pathlib.Path(root) / name, \"wb\") as fh:\n pickle.dump(content, fh)\n\n def test_class_to_idx(self):\n with self.create_dataset() as (dataset, info):\n expected = {category: label for label, category in enumerate(info[\"categories\"])}\n actual = dataset.class_to_idx\n self.assertEqual(actual, expected)\n\n\nclass CIFAR100(CIFAR10TestCase):\n DATASET_CLASS = datasets.CIFAR100\n\n _VERSION_CONFIG = dict(\n base_folder=\"cifar-100-python\",\n train_files=(\"train\",),\n test_files=(\"test\",),\n labels_key=\"fine_labels\",\n meta_file=\"meta\",\n num_categories=100,\n categories_key=\"fine_label_names\",\n )\n\n\nclass CelebATestCase(datasets_utils.ImageDatasetTestCase):\n DATASET_CLASS = datasets.CelebA\n FEATURE_TYPES = (PIL.Image.Image, (torch.Tensor, int, tuple, type(None)))\n\n CONFIGS = datasets_utils.combinations_grid(\n split=(\"train\", \"valid\", \"test\", \"all\"),\n target_type=(\"attr\", \"identity\", \"bbox\", \"landmarks\", [\"attr\", \"identity\"]),\n )\n REQUIRED_PACKAGES = (\"pandas\",)\n\n _SPLIT_TO_IDX = dict(train=0, valid=1, test=2)\n\n def inject_fake_data(self, tmpdir, config):\n base_folder = pathlib.Path(tmpdir) / \"celeba\"\n os.makedirs(base_folder)\n\n num_images, num_images_per_split = self._create_split_txt(base_folder)\n\n datasets_utils.create_image_folder(\n base_folder, \"img_align_celeba\", lambda idx: f\"{idx + 1:06d}.jpg\", num_images\n )\n attr_names = self._create_attr_txt(base_folder, num_images)\n self._create_identity_txt(base_folder, num_images)\n self._create_bbox_txt(base_folder, num_images)\n self._create_landmarks_txt(base_folder, num_images)\n\n return dict(num_examples=num_images_per_split[config[\"split\"]], attr_names=attr_names)\n\n def _create_split_txt(self, root):\n num_images_per_split = dict(train=3, valid=2, test=1)\n\n data = [\n [self._SPLIT_TO_IDX[split]] for split, num_images in num_images_per_split.items() for _ in range(num_images)\n ]\n self._create_txt(root, \"list_eval_partition.txt\", data)\n\n num_images_per_split[\"all\"] = num_images = sum(num_images_per_split.values())\n return num_images, num_images_per_split\n\n def _create_attr_txt(self, root, num_images):\n header = (\"5_o_Clock_Shadow\", \"Young\")\n data = torch.rand((num_images, len(header))).ge(0.5).int().mul(2).sub(1).tolist()\n self._create_txt(root, \"list_attr_celeba.txt\", data, header=header, add_num_examples=True)\n return header\n\n def _create_identity_txt(self, root, num_images):\n data = torch.randint(1, 4, size=(num_images, 1)).tolist()\n self._create_txt(root, \"identity_CelebA.txt\", data)\n\n def _create_bbox_txt(self, root, num_images):\n header = (\"x_1\", \"y_1\", \"width\", \"height\")\n data = torch.randint(10, size=(num_images, len(header))).tolist()\n self._create_txt(\n root, \"list_bbox_celeba.txt\", data, header=header, add_num_examples=True, add_image_id_to_header=True\n )\n\n def _create_landmarks_txt(self, root, num_images):\n header = (\"lefteye_x\", \"rightmouth_y\")\n data = torch.randint(10, size=(num_images, len(header))).tolist()\n self._create_txt(root, \"list_landmarks_align_celeba.txt\", data, header=header, add_num_examples=True)\n\n def _create_txt(self, root, name, data, header=None, add_num_examples=False, add_image_id_to_header=False):\n with open(pathlib.Path(root) / name, \"w\") as fh:\n if add_num_examples:\n fh.write(f\"{len(data)}\\n\")\n\n if header:\n if add_image_id_to_header:\n header = (\"image_id\", *header)\n fh.write(f\"{' '.join(header)}\\n\")\n\n for idx, line in enumerate(data, 1):\n fh.write(f\"{' '.join((f'{idx:06d}.jpg', *[str(value) for value in line]))}\\n\")\n\n def test_combined_targets(self):\n target_types = [\"attr\", \"identity\", \"bbox\", \"landmarks\"]\n\n individual_targets = []\n for target_type in target_types:\n with self.create_dataset(target_type=target_type) as (dataset, _):\n _, target = dataset[0]\n individual_targets.append(target)\n\n with self.create_dataset(target_type=target_types) as (dataset, _):\n _, combined_targets = dataset[0]\n\n actual = len(individual_targets)\n expected = len(combined_targets)\n self.assertEqual(\n actual,\n expected,\n f\"The number of the returned combined targets does not match the the number targets if requested \"\n f\"individually: {actual} != {expected}\",\n )\n\n for target_type, combined_target, individual_target in zip(target_types, combined_targets, individual_targets):\n with self.subTest(target_type=target_type):\n actual = type(combined_target)\n expected = type(individual_target)\n self.assertIs(\n actual,\n expected,\n f\"Type of the combined target does not match the type of the corresponding individual target: \"\n f\"{actual} is not {expected}\",\n )\n\n def test_no_target(self):\n with self.create_dataset(target_type=[]) as (dataset, _):\n _, target = dataset[0]\n\n self.assertIsNone(target)\n\n def test_attr_names(self):\n with self.create_dataset() as (dataset, info):\n self.assertEqual(tuple(dataset.attr_names), info[\"attr_names\"])\n\n\nclass VOCSegmentationTestCase(datasets_utils.ImageDatasetTestCase):\n DATASET_CLASS = datasets.VOCSegmentation\n FEATURE_TYPES = (PIL.Image.Image, PIL.Image.Image)\n\n CONFIGS = (\n *datasets_utils.combinations_grid(\n year=[f\"20{year:02d}\" for year in range(7, 13)], image_set=(\"train\", \"val\", \"trainval\")\n ),\n dict(year=\"2007\", image_set=\"test\"),\n dict(year=\"2007-test\", image_set=\"test\"),\n )\n\n def inject_fake_data(self, tmpdir, config):\n year, is_test_set = (\n (\"2007\", True)\n if config[\"year\"] == \"2007-test\" or config[\"image_set\"] == \"test\"\n else (config[\"year\"], False)\n )\n image_set = config[\"image_set\"]\n\n base_dir = pathlib.Path(tmpdir)\n if year == \"2011\":\n base_dir /= \"TrainVal\"\n base_dir = base_dir / \"VOCdevkit\" / f\"VOC{year}\"\n os.makedirs(base_dir)\n\n num_images, num_images_per_image_set = self._create_image_set_files(base_dir, \"ImageSets\", is_test_set)\n datasets_utils.create_image_folder(base_dir, \"JPEGImages\", lambda idx: f\"{idx:06d}.jpg\", num_images)\n\n datasets_utils.create_image_folder(base_dir, \"SegmentationClass\", lambda idx: f\"{idx:06d}.png\", num_images)\n annotation = self._create_annotation_files(base_dir, \"Annotations\", num_images)\n\n return dict(num_examples=num_images_per_image_set[image_set], annotation=annotation)\n\n def _create_image_set_files(self, root, name, is_test_set):\n root = pathlib.Path(root) / name\n src = pathlib.Path(root) / \"Main\"\n os.makedirs(src, exist_ok=True)\n\n idcs = dict(train=(0, 1, 2), val=(3, 4), test=(5,))\n idcs[\"trainval\"] = (*idcs[\"train\"], *idcs[\"val\"])\n\n for image_set in (\"test\",) if is_test_set else (\"train\", \"val\", \"trainval\"):\n self._create_image_set_file(src, image_set, idcs[image_set])\n\n shutil.copytree(src, root / \"Segmentation\")\n\n num_images = max(itertools.chain(*idcs.values())) + 1\n num_images_per_image_set = dict([(image_set, len(idcs_)) for image_set, idcs_ in idcs.items()])\n return num_images, num_images_per_image_set\n\n def _create_image_set_file(self, root, image_set, idcs):\n with open(pathlib.Path(root) / f\"{image_set}.txt\", \"w\") as fh:\n fh.writelines([f\"{idx:06d}\\n\" for idx in idcs])\n\n def _create_annotation_files(self, root, name, num_images):\n root = pathlib.Path(root) / name\n os.makedirs(root)\n\n for idx in range(num_images):\n annotation = self._create_annotation_file(root, f\"{idx:06d}.xml\")\n\n return annotation\n\n def _create_annotation_file(self, root, name):\n def add_child(parent, name, text=None):\n child = ET.SubElement(parent, name)\n child.text = text\n return child\n\n def add_name(obj, name=\"dog\"):\n add_child(obj, \"name\", name)\n return name\n\n def add_bndbox(obj, bndbox=None):\n if bndbox is None:\n bndbox = {\"xmin\": \"1\", \"xmax\": \"2\", \"ymin\": \"3\", \"ymax\": \"4\"}\n\n obj = add_child(obj, \"bndbox\")\n for name, text in bndbox.items():\n add_child(obj, name, text)\n\n return bndbox\n\n annotation = ET.Element(\"annotation\")\n obj = add_child(annotation, \"object\")\n data = dict(name=add_name(obj), bndbox=add_bndbox(obj))\n\n with open(pathlib.Path(root) / name, \"wb\") as fh:\n fh.write(ET.tostring(annotation))\n\n return data\n\n\nclass VOCDetectionTestCase(VOCSegmentationTestCase):\n DATASET_CLASS = datasets.VOCDetection\n FEATURE_TYPES = (PIL.Image.Image, dict)\n\n def test_annotations(self):\n with self.create_dataset() as (dataset, info):\n _, target = dataset[0]\n\n self.assertIn(\"annotation\", target)\n annotation = target[\"annotation\"]\n\n self.assertIn(\"object\", annotation)\n objects = annotation[\"object\"]\n\n self.assertEqual(len(objects), 1)\n object = objects[0]\n\n self.assertEqual(object, info[\"annotation\"])\n\n\nclass CocoDetectionTestCase(datasets_utils.ImageDatasetTestCase):\n DATASET_CLASS = datasets.CocoDetection\n FEATURE_TYPES = (PIL.Image.Image, list)\n\n REQUIRED_PACKAGES = (\"pycocotools\",)\n\n _IMAGE_FOLDER = \"images\"\n _ANNOTATIONS_FOLDER = \"annotations\"\n _ANNOTATIONS_FILE = \"annotations.json\"\n\n def dataset_args(self, tmpdir, config):\n tmpdir = pathlib.Path(tmpdir)\n root = tmpdir / self._IMAGE_FOLDER\n annotation_file = tmpdir / self._ANNOTATIONS_FOLDER / self._ANNOTATIONS_FILE\n return root, annotation_file\n\n def inject_fake_data(self, tmpdir, config):\n tmpdir = pathlib.Path(tmpdir)\n\n num_images = 3\n num_annotations_per_image = 2\n\n files = datasets_utils.create_image_folder(\n tmpdir, name=self._IMAGE_FOLDER, file_name_fn=lambda idx: f\"{idx:012d}.jpg\", num_examples=num_images\n )\n file_names = [file.relative_to(tmpdir / self._IMAGE_FOLDER) for file in files]\n\n annotation_folder = tmpdir / self._ANNOTATIONS_FOLDER\n os.makedirs(annotation_folder)\n info = self._create_annotation_file(\n annotation_folder, self._ANNOTATIONS_FILE, file_names, num_annotations_per_image\n )\n\n info[\"num_examples\"] = num_images\n return info\n\n def _create_annotation_file(self, root, name, file_names, num_annotations_per_image):\n image_ids = [int(file_name.stem) for file_name in file_names]\n images = [dict(file_name=str(file_name), id=id) for file_name, id in zip(file_names, image_ids)]\n\n annotations, info = self._create_annotations(image_ids, num_annotations_per_image)\n self._create_json(root, name, dict(images=images, annotations=annotations))\n\n return info\n\n def _create_annotations(self, image_ids, num_annotations_per_image):\n annotations = datasets_utils.combinations_grid(\n image_id=image_ids, bbox=([1.0, 2.0, 3.0, 4.0],) * num_annotations_per_image\n )\n for id, annotation in enumerate(annotations):\n annotation[\"id\"] = id\n return annotations, dict()\n\n def _create_json(self, root, name, content):\n file = pathlib.Path(root) / name\n with open(file, \"w\") as fh:\n json.dump(content, fh)\n return file\n\n\nclass CocoCaptionsTestCase(CocoDetectionTestCase):\n DATASET_CLASS = datasets.CocoCaptions\n\n def _create_annotations(self, image_ids, num_annotations_per_image):\n captions = [str(idx) for idx in range(num_annotations_per_image)]\n annotations = datasets_utils.combinations_grid(image_id=image_ids, caption=captions)\n for id, annotation in enumerate(annotations):\n annotation[\"id\"] = id\n return annotations, dict(captions=captions)\n\n def test_captions(self):\n with self.create_dataset() as (dataset, info):\n _, captions = dataset[0]\n self.assertEqual(tuple(captions), tuple(info[\"captions\"]))\n\n\nclass UCF101TestCase(datasets_utils.VideoDatasetTestCase):\n DATASET_CLASS = datasets.UCF101\n\n CONFIGS = datasets_utils.combinations_grid(fold=(1, 2, 3), train=(True, False))\n\n _VIDEO_FOLDER = \"videos\"\n _ANNOTATIONS_FOLDER = \"annotations\"\n\n def dataset_args(self, tmpdir, config):\n tmpdir = pathlib.Path(tmpdir)\n root = tmpdir / self._VIDEO_FOLDER\n annotation_path = tmpdir / self._ANNOTATIONS_FOLDER\n return root, annotation_path\n\n def inject_fake_data(self, tmpdir, config):\n tmpdir = pathlib.Path(tmpdir)\n\n video_folder = tmpdir / self._VIDEO_FOLDER\n os.makedirs(video_folder)\n video_files = self._create_videos(video_folder)\n\n annotations_folder = tmpdir / self._ANNOTATIONS_FOLDER\n os.makedirs(annotations_folder)\n num_examples = self._create_annotation_files(annotations_folder, video_files, config[\"fold\"], config[\"train\"])\n\n return num_examples\n\n def _create_videos(self, root, num_examples_per_class=3):\n def file_name_fn(cls, idx, clips_per_group=2):\n return f\"v_{cls}_g{(idx // clips_per_group) + 1:02d}_c{(idx % clips_per_group) + 1:02d}.avi\"\n\n video_files = [\n datasets_utils.create_video_folder(root, cls, lambda idx: file_name_fn(cls, idx), num_examples_per_class)\n for cls in (\"ApplyEyeMakeup\", \"YoYo\")\n ]\n return [path.relative_to(root) for path in itertools.chain(*video_files)]\n\n def _create_annotation_files(self, root, video_files, fold, train):\n current_videos = random.sample(video_files, random.randrange(1, len(video_files) - 1))\n current_annotation = self._annotation_file_name(fold, train)\n self._create_annotation_file(root, current_annotation, current_videos)\n\n other_videos = set(video_files) - set(current_videos)\n other_annotations = [\n self._annotation_file_name(fold, train) for fold, train in itertools.product((1, 2, 3), (True, False))\n ]\n other_annotations.remove(current_annotation)\n for name in other_annotations:\n self._create_annotation_file(root, name, other_videos)\n\n return len(current_videos)\n\n def _annotation_file_name(self, fold, train):\n return f\"{'train' if train else 'test'}list{fold:02d}.txt\"\n\n def _create_annotation_file(self, root, name, video_files):\n with open(pathlib.Path(root) / name, \"w\") as fh:\n fh.writelines(f\"{file}\\n\" for file in sorted(video_files))\n\n\nclass LSUNTestCase(datasets_utils.ImageDatasetTestCase):\n DATASET_CLASS = datasets.LSUN\n\n REQUIRED_PACKAGES = (\"lmdb\",)\n CONFIGS = datasets_utils.combinations_grid(\n classes=(\"train\", \"test\", \"val\", [\"bedroom_train\", \"church_outdoor_train\"])\n )\n\n _CATEGORIES = (\n \"bedroom\",\n \"bridge\",\n \"church_outdoor\",\n \"classroom\",\n \"conference_room\",\n \"dining_room\",\n \"kitchen\",\n \"living_room\",\n \"restaurant\",\n \"tower\",\n )\n\n def inject_fake_data(self, tmpdir, config):\n root = pathlib.Path(tmpdir)\n\n num_images = 0\n for cls in self._parse_classes(config[\"classes\"]):\n num_images += self._create_lmdb(root, cls)\n\n return num_images\n\n @contextlib.contextmanager\n def create_dataset(\n self,\n *args, **kwargs\n ):\n with super().create_dataset(*args, **kwargs) as output:\n yield output\n # Currently datasets.LSUN caches the keys in the current directory rather than in the root directory. Thus,\n # this creates a number of unique _cache_* files in the current directory that will not be removed together\n # with the temporary directory\n for file in os.listdir(os.getcwd()):\n if file.startswith(\"_cache_\"):\n os.remove(file)\n\n def _parse_classes(self, classes):\n if not isinstance(classes, str):\n return classes\n\n split = classes\n if split == \"test\":\n return [split]\n\n return [f\"{category}_{split}\" for category in self._CATEGORIES]\n\n def _create_lmdb(self, root, cls):\n lmdb = datasets_utils.lazy_importer.lmdb\n hexdigits_lowercase = string.digits + string.ascii_lowercase[:6]\n\n folder = f\"{cls}_lmdb\"\n\n num_images = torch.randint(1, 4, size=()).item()\n format = \"webp\"\n files = datasets_utils.create_image_folder(root, folder, lambda idx: f\"{idx}.{format}\", num_images)\n\n with lmdb.open(str(root / folder)) as env, env.begin(write=True) as txn:\n for file in files:\n key = \"\".join(random.choice(hexdigits_lowercase) for _ in range(40)).encode()\n\n buffer = io.BytesIO()\n Image.open(file).save(buffer, format)\n buffer.seek(0)\n value = buffer.read()\n\n txn.put(key, value)\n\n os.remove(file)\n\n return num_images\n\n def test_not_found_or_corrupted(self):\n # LSUN does not raise built-in exception, but a custom one. It is expressive enough to not 'cast' it to\n # RuntimeError or FileNotFoundError that are normally checked by this test.\n with self.assertRaises(datasets_utils.lazy_importer.lmdb.Error):\n super().test_not_found_or_corrupted()\n\n\nclass Kinetics400TestCase(datasets_utils.VideoDatasetTestCase):\n DATASET_CLASS = datasets.Kinetics400\n\n def inject_fake_data(self, tmpdir, config):\n classes = (\"Abseiling\", \"Zumba\")\n num_videos_per_class = 2\n\n digits = string.ascii_letters + string.digits + \"-_\"\n for cls in classes:\n datasets_utils.create_video_folder(\n tmpdir,\n cls,\n lambda _: f\"{datasets_utils.create_random_string(11, digits)}.avi\",\n num_videos_per_class,\n )\n\n return num_videos_per_class * len(classes)\n\n def test_not_found_or_corrupted(self):\n self.skipTest(\"Dataset currently does not handle the case of no found videos.\")\n\n\nclass HMDB51TestCase(datasets_utils.VideoDatasetTestCase):\n DATASET_CLASS = datasets.HMDB51\n\n CONFIGS = datasets_utils.combinations_grid(fold=(1, 2, 3), train=(True, False))\n\n _VIDEO_FOLDER = \"videos\"\n _SPLITS_FOLDER = \"splits\"\n _CLASSES = (\"brush_hair\", \"wave\")\n\n def dataset_args(self, tmpdir, config):\n tmpdir = pathlib.Path(tmpdir)\n root = tmpdir / self._VIDEO_FOLDER\n annotation_path = tmpdir / self._SPLITS_FOLDER\n return root, annotation_path\n\n def inject_fake_data(self, tmpdir, config):\n tmpdir = pathlib.Path(tmpdir)\n\n video_folder = tmpdir / self._VIDEO_FOLDER\n os.makedirs(video_folder)\n video_files = self._create_videos(video_folder)\n\n splits_folder = tmpdir / self._SPLITS_FOLDER\n os.makedirs(splits_folder)\n num_examples = self._create_split_files(splits_folder, video_files, config[\"fold\"], config[\"train\"])\n\n return num_examples\n\n def _create_videos(self, root, num_examples_per_class=3):\n def file_name_fn(cls, idx, clips_per_group=2):\n return f\"{cls}_{(idx // clips_per_group) + 1:d}_{(idx % clips_per_group) + 1:d}.avi\"\n\n return [\n (\n cls,\n datasets_utils.create_video_folder(\n root,\n cls,\n lambda idx: file_name_fn(cls, idx),\n num_examples_per_class,\n ),\n )\n for cls in self._CLASSES\n ]\n\n def _create_split_files(self, root, video_files, fold, train):\n num_videos = num_train_videos = 0\n\n for cls, videos in video_files:\n num_videos += len(videos)\n\n train_videos = set(random.sample(videos, random.randrange(1, len(videos) - 1)))\n num_train_videos += len(train_videos)\n\n with open(pathlib.Path(root) / f\"{cls}_test_split{fold}.txt\", \"w\") as fh:\n fh.writelines(f\"{file.name} {1 if file in train_videos else 2}\\n\" for file in videos)\n\n return num_train_videos if train else (num_videos - num_train_videos)\n\n\nclass OmniglotTestCase(datasets_utils.ImageDatasetTestCase):\n DATASET_CLASS = datasets.Omniglot\n\n CONFIGS = datasets_utils.combinations_grid(background=(True, False))\n\n def inject_fake_data(self, tmpdir, config):\n target_folder = (\n pathlib.Path(tmpdir) / \"omniglot-py\" / f\"images_{'background' if config['background'] else 'evaluation'}\"\n )\n os.makedirs(target_folder)\n\n num_images = 0\n for name in (\"Alphabet_of_the_Magi\", \"Tifinagh\"):\n num_images += self._create_alphabet_folder(target_folder, name)\n\n return num_images\n\n def _create_alphabet_folder(self, root, name):\n num_images_total = 0\n for idx in range(torch.randint(1, 4, size=()).item()):\n num_images = torch.randint(1, 4, size=()).item()\n num_images_total += num_images\n\n datasets_utils.create_image_folder(\n root / name, f\"character{idx:02d}\", lambda image_idx: f\"{image_idx:02d}.png\", num_images\n )\n\n return num_images_total\n\n\nclass SBUTestCase(datasets_utils.ImageDatasetTestCase):\n DATASET_CLASS = datasets.SBU\n FEATURE_TYPES = (PIL.Image.Image, str)\n\n def inject_fake_data(self, tmpdir, config):\n num_images = 3\n\n dataset_folder = pathlib.Path(tmpdir) / \"dataset\"\n images = datasets_utils.create_image_folder(tmpdir, \"dataset\", self._create_file_name, num_images)\n\n self._create_urls_txt(dataset_folder, images)\n self._create_captions_txt(dataset_folder, num_images)\n\n return num_images\n\n def _create_file_name(self, idx):\n part1 = datasets_utils.create_random_string(10, string.digits)\n part2 = datasets_utils.create_random_string(10, string.ascii_lowercase, string.digits[:6])\n return f\"{part1}_{part2}.jpg\"\n\n def _create_urls_txt(self, root, images):\n with open(root / \"SBU_captioned_photo_dataset_urls.txt\", \"w\") as fh:\n for image in images:\n fh.write(\n f\"http://static.flickr.com/{datasets_utils.create_random_string(4, string.digits)}/{image.name}\\n\"\n )\n\n def _create_captions_txt(self, root, num_images):\n with open(root / \"SBU_captioned_photo_dataset_captions.txt\", \"w\") as fh:\n for _ in range(num_images):\n fh.write(f\"{datasets_utils.create_random_string(10)}\\n\")\n\n\nclass SEMEIONTestCase(datasets_utils.ImageDatasetTestCase):\n DATASET_CLASS = datasets.SEMEION\n\n def inject_fake_data(self, tmpdir, config):\n num_images = 3\n\n images = torch.rand(num_images, 256)\n labels = F.one_hot(torch.randint(10, size=(num_images,)))\n with open(pathlib.Path(tmpdir) / \"semeion.data\", \"w\") as fh:\n for image, one_hot_labels in zip(images, labels):\n image_columns = \" \".join([f\"{pixel.item():.4f}\" for pixel in image])\n labels_columns = \" \".join([str(label.item()) for label in one_hot_labels])\n fh.write(f\"{image_columns} {labels_columns}\\n\")\n\n return num_images\n\n\nclass USPSTestCase(datasets_utils.ImageDatasetTestCase):\n DATASET_CLASS = datasets.USPS\n\n CONFIGS = datasets_utils.combinations_grid(train=(True, False))\n\n def inject_fake_data(self, tmpdir, config):\n num_images = 2 if config[\"train\"] else 1\n\n images = torch.rand(num_images, 256) * 2 - 1\n labels = torch.randint(1, 11, size=(num_images,))\n\n with bz2.open(pathlib.Path(tmpdir) / f\"usps{'.t' if not config['train'] else ''}.bz2\", \"w\") as fh:\n for image, label in zip(images, labels):\n line = \" \".join((str(label.item()), *[f\"{idx}:{pixel:.6f}\" for idx, pixel in enumerate(image, 1)]))\n fh.write(f\"{line}\\n\".encode())\n\n return num_images\n\n\nclass SBDatasetTestCase(datasets_utils.ImageDatasetTestCase):\n DATASET_CLASS = datasets.SBDataset\n FEATURE_TYPES = (PIL.Image.Image, (np.ndarray, PIL.Image.Image))\n\n REQUIRED_PACKAGES = (\"scipy.io\", \"scipy.sparse\")\n\n CONFIGS = datasets_utils.combinations_grid(\n image_set=(\"train\", \"val\", \"train_noval\"), mode=(\"boundaries\", \"segmentation\")\n )\n\n _NUM_CLASSES = 20\n\n def inject_fake_data(self, tmpdir, config):\n num_images, num_images_per_image_set = self._create_split_files(tmpdir)\n\n sizes = self._create_target_folder(tmpdir, \"cls\", num_images)\n\n datasets_utils.create_image_folder(\n tmpdir, \"img\", lambda idx: f\"{self._file_stem(idx)}.jpg\", num_images, size=lambda idx: sizes[idx]\n )\n\n return num_images_per_image_set[config[\"image_set\"]]\n\n def _create_split_files(self, root):\n root = pathlib.Path(root)\n\n splits = dict(train=(0, 1, 2), train_noval=(0, 2), val=(3,))\n\n for split, idcs in splits.items():\n self._create_split_file(root, split, idcs)\n\n num_images = max(itertools.chain(*splits.values())) + 1\n num_images_per_split = dict([(split, len(idcs)) for split, idcs in splits.items()])\n return num_images, num_images_per_split\n\n def _create_split_file(self, root, name, idcs):\n with open(root / f\"{name}.txt\", \"w\") as fh:\n fh.writelines(f\"{self._file_stem(idx)}\\n\" for idx in idcs)\n\n def _create_target_folder(self, root, name, num_images):\n io = datasets_utils.lazy_importer.scipy.io\n\n target_folder = pathlib.Path(root) / name\n os.makedirs(target_folder)\n\n sizes = [torch.randint(1, 4, size=(2,)).tolist() for _ in range(num_images)]\n for idx, size in enumerate(sizes):\n content = dict(\n GTcls=dict(Boundaries=self._create_boundaries(size), Segmentation=self._create_segmentation(size))\n )\n io.savemat(target_folder / f\"{self._file_stem(idx)}.mat\", content)\n\n return sizes\n\n def _create_boundaries(self, size):\n sparse = datasets_utils.lazy_importer.scipy.sparse\n return [\n [sparse.csc_matrix(torch.randint(0, 2, size=size, dtype=torch.uint8).numpy())]\n for _ in range(self._NUM_CLASSES)\n ]\n\n def _create_segmentation(self, size):\n return torch.randint(0, self._NUM_CLASSES + 1, size=size, dtype=torch.uint8).numpy()\n\n def _file_stem(self, idx):\n return f\"2008_{idx:06d}\"\n\n\nclass FakeDataTestCase(datasets_utils.ImageDatasetTestCase):\n DATASET_CLASS = datasets.FakeData\n FEATURE_TYPES = (PIL.Image.Image, torch.Tensor)\n\n def dataset_args(self, tmpdir, config):\n return ()\n\n def inject_fake_data(self, tmpdir, config):\n return config[\"size\"]\n\n def test_not_found_or_corrupted(self):\n self.skipTest(\"The data is generated at creation and thus cannot be non-existent or corrupted.\")\n\n\nclass PhotoTourTestCase(datasets_utils.ImageDatasetTestCase):\n DATASET_CLASS = datasets.PhotoTour\n\n # The PhotoTour dataset returns examples with different features with respect to the 'train' parameter. Thus,\n # we overwrite 'FEATURE_TYPES' with a dummy value to satisfy the initial checks of the base class. Furthermore, we\n # overwrite the 'test_feature_types()' method to select the correct feature types before the test is run.\n FEATURE_TYPES = ()\n _TRAIN_FEATURE_TYPES = (torch.Tensor,)\n _TEST_FEATURE_TYPES = (torch.Tensor, torch.Tensor, torch.Tensor)\n\n CONFIGS = datasets_utils.combinations_grid(train=(True, False))\n\n _NAME = \"liberty\"\n\n def dataset_args(self, tmpdir, config):\n return tmpdir, self._NAME\n\n def inject_fake_data(self, tmpdir, config):\n tmpdir = pathlib.Path(tmpdir)\n\n # In contrast to the original data, the fake images injected here comprise only a single patch. Thus,\n # num_images == num_patches.\n num_patches = 5\n\n image_files = self._create_images(tmpdir, self._NAME, num_patches)\n point_ids, info_file = self._create_info_file(tmpdir / self._NAME, num_patches)\n num_matches, matches_file = self._create_matches_file(tmpdir / self._NAME, num_patches, point_ids)\n\n self._create_archive(tmpdir, self._NAME, *image_files, info_file, matches_file)\n\n return num_patches if config[\"train\"] else num_matches\n\n def _create_images(self, root, name, num_images):\n # The images in the PhotoTour dataset comprises of multiple grayscale patches of 64 x 64 pixels. Thus, the\n # smallest fake image is 64 x 64 pixels and comprises a single patch.\n return datasets_utils.create_image_folder(\n root, name, lambda idx: f\"patches{idx:04d}.bmp\", num_images, size=(1, 64, 64)\n )\n\n def _create_info_file(self, root, num_images):\n point_ids = torch.randint(num_images, size=(num_images,)).tolist()\n\n file = root / \"info.txt\"\n with open(file, \"w\") as fh:\n fh.writelines([f\"{point_id} 0\\n\" for point_id in point_ids])\n\n return point_ids, file\n\n def _create_matches_file(self, root, num_patches, point_ids):\n lines = [\n f\"{patch_id1} {point_ids[patch_id1]} 0 {patch_id2} {point_ids[patch_id2]} 0\\n\"\n for patch_id1, patch_id2 in itertools.combinations(range(num_patches), 2)\n ]\n\n file = root / \"m50_100000_100000_0.txt\"\n with open(file, \"w\") as fh:\n fh.writelines(lines)\n\n return len(lines), file\n\n def _create_archive(self, root, name, *files):\n archive = root / f\"{name}.zip\"\n with zipfile.ZipFile(archive, \"w\") as zip:\n for file in files:\n zip.write(file, arcname=file.relative_to(root))\n\n return archive\n\n @datasets_utils.test_all_configs\n def test_feature_types(self, config):\n feature_types = self.FEATURE_TYPES\n self.FEATURE_TYPES = self._TRAIN_FEATURE_TYPES if config[\"train\"] else self._TEST_FEATURE_TYPES\n try:\n super().test_feature_types.__wrapped__(self, config)\n finally:\n self.FEATURE_TYPES = feature_types\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.array", "torch.randint", "torch.rand", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]