repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list | possible_versions
list |
---|---|---|---|---|---|
blazma/ca3net | [
"bfeb4fa6c8d07f75778df671cc9500cf0316df51"
] | [
"scripts/bayesian_decoding.py"
] | [
"# -*- coding: utf8 -*-\n\"\"\"\nFunctions used for estimating position from spike trains and fitting trajectory of the animal (used for sequence replay detection)\nbased on: Davison et al. 2009 (the difference is that the tau_i(x) tuning curves are known here, since we generated them... see: `poisson_proc.py`)\nauthor: András Ecker last update: 01.2020\n\"\"\"\n\nimport os\nimport copy\nimport pickle\nimport numpy as np\nimport random as pyrandom\nfrom scipy.signal import convolve2d\nfrom scipy.special import factorial\nimport multiprocessing as mp\nfrom helper import load_tuning_curves\n\n\ninfield_rate = 20.0 # avg. in-field firing rate [Hz]\n\n\ndef extract_binspikecount(lb, ub, delta_t, t_incr, spike_times, spiking_neurons, tuning_curves):\n \"\"\"\n Builds container of spike counts in a given interval (bin)\n In order to save time in likelihood calculation only neurons which spike are taken into account\n :param lb, ub: lower and upper bounds for time binning\n :param delta_t: window size (in ms)\n :param t_incr: increment or step size (if less than delta_t than it's an overlapping sliding window)\n :param spike_times: np.array of ordered spike times (saved and loaded in ms)\n :param spiking_neurons: np.array (same shape as spike_times) with corresponding neuron IDx\n :param tuning_curves: dictionary of tuning curves {neuronID: tuning curve} (see `load_tuning_curves()`)\n :return: list (1 entry for every time bin) of dictionaries {i: n_i}\n \"\"\"\n\n assert delta_t >= t_incr\n bin_spike_counts = []\n t_start = lb; t_end = lb + delta_t\n while t_end < ub + t_incr:\n n_spikes = {}\n neuron_idx, counts = np.unique(spiking_neurons[np.where((t_start <= spike_times) & (spike_times < t_end))],\n return_counts=True)\n for i, count in zip(neuron_idx, counts):\n if i in tuning_curves:\n n_spikes[i] = count\n bin_spike_counts.append(n_spikes)\n t_start += t_incr; t_end += t_incr\n return bin_spike_counts\n\n\ndef calc_posterior(bin_spike_counts, tuning_curves, delta_t):\n \"\"\"\n Calculates posterior distribution of decoded place Pr(x|spikes) based on Davison et al. 2009\n Pr(spikes|x) = \\prod_{i=1}^N \\frac{(\\Delta t*tau_i(x))^n_i}{n_i!} e^{-\\Delta t*tau_i(x)} (* uniform prior...)\n (It actually implements it via log(likelihoods) for numerical stability)\n Assumptions: independent neurons; firing rates modeled with Poisson processes\n Vectorized implementation using only the spiking neurons in each bin\n (plus taking only the highest fraction before summing...)\n :param bin_spike_counts: list (1 entry for every time bin) of spike dictionaries {i: n_i} (see `extract_binspikecount()`)\n :param tuning_curves: dictionary of tuning curves {neuronID: tuning curve} (see `helper.py/load_tuning_curves()`)\n :param delta_t: delta t used for binning spikes (in ms)\n return: X_posterior: spatial_resolution*temporal_resolution array with calculated posterior probability Pr(x|spikes)\n \"\"\"\n\n delta_t *= 1e-3 # convert back to second\n n_spatial_points = pyrandom.sample(list(tuning_curves.values()), 1)[0].shape[0]\n X_posterior = np.zeros((n_spatial_points, len(bin_spike_counts))) # dim:x*t\n\n # could be a series of 3d array operations instead of this for loop...\n # ...but since only a portion of the 8000 neurons are spiking in every bin this one might be even faster\n for t, spikes in enumerate(bin_spike_counts):\n # prepare broadcasted variables\n n_spiking_neurons = len(spikes)\n expected_spikes = np.zeros((n_spatial_points, n_spiking_neurons)) # dim:x*i_spiking\n n_spikes = np.zeros_like(expected_spikes) # dim:x*i_spiking\n n_factorials = np.ones_like(expected_spikes) # dim:x*i_spiking\n for j, (neuron_id, n_spike) in enumerate(spikes.items()):\n tuning_curve = tuning_curves[neuron_id] * infield_rate\n tuning_curve[np.where(tuning_curve <= 0.1)] = 0.1\n expected_spikes[:, j] = tuning_curve * delta_t\n n_spikes[:, j] = n_spike\n n_factorials[:, j] = factorial(n_spike).item()\n # calculate log(likelihood)\n likelihoods = np.multiply(expected_spikes, 1.0/n_factorials)\n likelihoods = np.multiply(n_spikes, np.log(likelihoods))\n likelihoods = likelihoods - delta_t * expected_spikes\n likelihoods.sort(axis=1, kind=\"mergsort\")\n if likelihoods.shape[1] > 100:\n likelihoods = likelihoods[:, -100:] # take only the 100 highest values for numerical stability\n likelihoods = np.sum(likelihoods, axis=1)\n likelihoods -= np.max(likelihoods) # normalize before exp()\n likelihoods = np.exp(likelihoods)\n # calculate posterior\n X_posterior[:, t] = likelihoods / np.sum(likelihoods)\n return X_posterior\n\n\ndef _line(x, a, b):\n \"\"\"\n Dummy function used for line fitting\n :param x: independent variable\n :param a, b: slope and intercept\n \"\"\"\n return a*x + b\n\n\ndef _evaluate_fit(X_posterior, y, band_size=3):\n \"\"\"\n Calculates the goodness of fit based on Davison et al. 2009 (line fitting in a probability matrix)\n R(v, rho) = \\frac{1}{n} \\sum_{k=1}^n-1 Pr(|pos - (rho + v*k*\\Delta t)| < d)\n Masking matrix is based on Olafsdottir et al. 2016's MATLAB implementation\n :param X_posterior: posterior matrix (see `get_posterior()`)\n :param y: candidate fitted line\n :param band_size: distance (up and down) from fitted line to consider\n :return: R: goodness of fit (in [0, 1])\n \"\"\"\n\n n_spatial_points = X_posterior.shape[0]\n t = np.arange(0, X_posterior.shape[1])\n line_idx = np.clip(np.round(y)+n_spatial_points, 0, n_spatial_points*3-1).astype(int) # convert line to matrix idx\n # check if line is \"long enough\"\n if len(np.where((n_spatial_points <= line_idx) & (line_idx < n_spatial_points*2))[0]) < n_spatial_points / 3.0:\n return 0.0\n mask = np.zeros((n_spatial_points*3, X_posterior.shape[1])) # extend on top and bottom\n mask[line_idx, t] = 1\n # convolve with kernel to get the desired band width\n mask = convolve2d(mask, np.ones((2*band_size+1, 1)), mode=\"same\")\n mask = mask[int(n_spatial_points):int(n_spatial_points*2), :] # remove extra padding to get X_posterior's shape\n R = np.sum(np.multiply(X_posterior, mask)) / np.sum(X_posterior)\n return R\n\n\ndef fit_trajectory(X_posterior, slope_lims=(0.5, 3), grid_res=100):\n \"\"\"\n Brute force trajectory fit in the posterior matrix (based on Davison et al. 2009, see: `_evaluate_fit()`)\n :param X_posterior: posterior matrix (see `get_posterior()`)\n :param slope_lims: lower and upper bounds of splopes to test\n :param grid_res: number of points to try along one dimension\n :return: highest_R: best goodness of fit (see `_evaluate_fit()`)\n fit: fitted line\n best_params: slope and offset parameter corresponding to the highest R\n \"\"\"\n\n slopes = np.concatenate((np.linspace(-slope_lims[1], -slope_lims[0], int(grid_res/2.)),\n np.linspace(slope_lims[0], slope_lims[1], int(grid_res/2.))))\n offsets = np.linspace(-0.5*X_posterior.shape[0], X_posterior.shape[0]*1.5, grid_res)\n t = np.arange(0, X_posterior.shape[1])\n best_params = (slopes[0], offsets[0]); highest_R = 0.0\n for a in slopes:\n for b in offsets:\n y = _line(t, a, b)\n R = _evaluate_fit(X_posterior, y)\n if R > highest_R:\n highest_R = R\n best_params = (a, b)\n fit = _line(t, *best_params)\n return highest_R, fit, best_params\n\n\ndef _shuffle_tuning_curves(tuning_curves, seed):\n \"\"\"\n Shuffles neuron IDx and corresponding tuning curves (used for significance test)\n :param tuning_curves: {neuronID: tuning curve}\n :param seed: random seed for shuffling\n \"\"\"\n keys = list(tuning_curves.keys())\n vals = list(tuning_curves.values())\n np.random.seed(seed)\n np.random.shuffle(keys)\n return {key: vals[i] for i, key in enumerate(keys)}\n\n\ndef _test_significance_subprocess(inputs):\n \"\"\"\n Subprocess used by multiprocessing pool for significance test: log(likelihood) calculation and line fit\n :param inputs: see `calc_log_likelihoods()`\n :return: R: see `fit_trajectory()`\n \"\"\"\n X_posterior = calc_posterior(*inputs)\n R, _, _ = fit_trajectory(X_posterior)\n return R\n\n\ndef test_significance(bin_spike_counts, tuning_curves, delta_t, R, N):\n \"\"\"\n Test significance of fitted trajectory (and detected sequence replay) by shuffling the data and re-fitting many times\n :param delta_t, bin_spike_counts, tuning_curves: see `calc_log_likelihoods()`\n :param R: reference goodness of fit (from unshuffled data)\n :param N: number of shuffled versions tested\n :return: Rs: list of goodness of fits from the shuffled events\n \"\"\"\n\n orig_tuning_curves = copy.deepcopy(tuning_curves) # just to make sure...\n shuffled_tuning_curves = [_shuffle_tuning_curves(orig_tuning_curves, seed=12345+i) for i in range(N)]\n n = N if mp.cpu_count()-1 > N else mp.cpu_count()-1\n pool = mp.Pool(processes=n)\n Rs = pool.map(_test_significance_subprocess,\n zip([bin_spike_counts for _ in range(N)], shuffled_tuning_curves, [delta_t for _ in range(N)]))\n pool.terminate()\n significance = 1 if R > np.percentile(Rs, 95) else np.nan\n return significance, sorted(Rs)\n"
] | [
[
"numpy.log",
"numpy.ones_like",
"numpy.random.seed",
"numpy.linspace",
"numpy.multiply",
"numpy.arange",
"numpy.random.shuffle",
"numpy.ones",
"numpy.percentile",
"numpy.max",
"numpy.round",
"numpy.zeros_like",
"numpy.where",
"scipy.special.factorial",
"numpy.exp",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.14",
"1.6",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
danielsnider/mlcube_examples | [
"0d273ffcf1ee7d791f1c4b01dbaf9d00c6876dd9",
"0d273ffcf1ee7d791f1c4b01dbaf9d00c6876dd9"
] | [
"matmul/build/matmul.py",
"emdenoise/build/main.py"
] | [
"import argparse\nimport numpy as np\nimport tensorflow as tf\nfrom typing import List\nfrom datetime import datetime\nimport yaml\ntry:\n from yaml import CLoader as Loader, CDumper as Dumper\nexcept ImportError:\n from yaml import Loader, Dumper\n\n\ndef matmul(shape_a: List[int], shape_b: List[int], output_file: str) -> None:\n\n a = tf.random.normal(shape=shape_a)\n b = tf.random.normal(shape=shape_b)\n print(f\"shape_a={shape_a}, shape_b={shape_b}\")\n\n start_time = datetime.now()\n\n x = tf.matmul(a, b)\n\n print(\"\\n\" * 5)\n print(\"Time taken:\", datetime.now() - start_time)\n print(\"\\n\" * 5)\n\n np.savetxt(output_file, x)\n\n\nif __name__ == '__main__':\n \"\"\"\n MLCube declares the following contract:\n 1. First command line argument is always a task name\n 2. Second, third and so on are the task specific parameters. \n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('mlcube_task', type=str, help=\"Task for this MLCube.\")\n parser.add_argument('--parameters_file', '--parameters-file', type=str, required=True,\n help=\"YAML file with matrix shapes.\")\n parser.add_argument('--output_file', '--output-file', type=str, required=True,\n help=\"File with matrix multiplication results.\")\n args = parser.parse_args()\n\n if args.mlcube_task != 'matmul':\n raise ValueError(f\"Invalid task: {args.mlcube_task}\")\n\n with open(args.parameters_file) as stream:\n config = yaml.load(stream.read(), Loader=Loader)\n\n matmul(config['matrix_a'], config['matrix_b'], args.output_file)\n",
"from __future__ import (absolute_import, division, print_function, unicode_literals)\nfrom abc import (abstractmethod, ABC)\nimport time\nimport h5py\nimport tensorflow as tf\nimport numpy as np\nfrom pathlib import Path\nimport horovod.tensorflow.keras as hvd\nfrom tinydb import (TinyDB, Query)\nimport yaml\nimport os\nimport logging\nimport logging.config\nimport argparse\nfrom enum import Enum\nfrom typing import List\nimport wget\nimport zipfile\n\n# Height and Width of a single EM Graphene Image\nIMG_SIZE = 256\n\nlogger = logging.getLogger(__name__)\n\n\nclass Task(str, Enum):\n DownloadData = 'download'\n PreProcess = 'preprocess'\n Train = 'train'\n Test = 'test'\n\n\nclass DataLoader(ABC):\n \"\"\"Base class for data loaders\n\n This defines the interface that new data loaders must adhere to\n \"\"\"\n @property\n @abstractmethod\n def input_shape(self):\n pass\n\n @property\n @abstractmethod\n def output_shape(self):\n pass\n\n @abstractmethod\n def to_dataset(self):\n pass\n\n\ndef autoencoder(input_shape):\n def _conv_block(x_, num_filters_: int):\n x_ = tf.keras.layers.Conv2D(filters=num_filters_, kernel_size=3, activation='relu', padding='same')(x_)\n x_ = tf.keras.layers.BatchNormalization()(x_)\n x_ = tf.keras.layers.Conv2D(filters=num_filters_, kernel_size=3, activation='relu', padding='same')(x_)\n x_ = tf.keras.layers.BatchNormalization()(x_)\n return x_\n\n skip_layers = []\n\n input_layer = tf.keras.layers.Input(input_shape)\n x = input_layer\n\n for num_filters in (8, 16, 32):\n x = _conv_block(x, num_filters_=num_filters)\n skip_layers.append(x)\n x = tf.keras.layers.MaxPooling2D()(x)\n\n x = _conv_block(x, num_filters_=64)\n\n for num_filters in (32, 16, 8):\n x = tf.keras.layers.UpSampling2D()(x)\n x = tf.keras.layers.Concatenate()([x, skip_layers.pop(-1)])\n x = _conv_block(x, num_filters_=num_filters)\n\n x = tf.keras.layers.Conv2D(filters=1, kernel_size=3, activation='linear', padding='same')(x)\n\n model = tf.keras.models.Model(input_layer, x)\n return model\n\n\nclass EMGrapheneDataset(DataLoader):\n\n def __init__(self, data_dir, seed=None, batch_size=10):\n self._seed = seed\n self._data_dir = Path(data_dir)\n self._batch_size = batch_size\n\n @staticmethod\n def _load_data(path):\n path = path.decode()\n with h5py.File(path, \"r\") as hdf5_file:\n for i in range(len(hdf5_file['images'])):\n images = np.array(hdf5_file[\"images\"][i])\n yield images\n\n @property\n def input_shape(self):\n return IMG_SIZE, IMG_SIZE, 1\n\n @property\n def output_shape(self):\n return IMG_SIZE, IMG_SIZE, 1\n\n def to_dataset(self):\n types = tf.float32\n shapes = tf.TensorShape([IMG_SIZE, IMG_SIZE, 1])\n\n path = str(self._data_dir / 'graphene_img_noise.h5')\n noise_dataset = tf.data.Dataset.from_generator(EMGrapheneDataset._load_data,\n output_types=types,\n output_shapes=shapes,\n args=(path,))\n\n path = str(self._data_dir / 'graphene_img_clean.h5')\n clean_dataset = tf.data.Dataset.from_generator(EMGrapheneDataset._load_data,\n output_types=types,\n output_shapes=shapes,\n args=(path,))\n\n dataset = tf.data.Dataset.zip((noise_dataset, clean_dataset))\n dataset = dataset.shard(hvd.size(), hvd.rank())\n dataset = dataset.shuffle(1000)\n dataset = dataset.batch(self._batch_size)\n return dataset\n\n\nclass AverageMeter(object):\n\n def __init__(self):\n self.count = 0\n self.value = 0\n self.last = 0\n\n def record(self, value, n=1):\n self.last = value\n self.count += n\n self.value += value * n\n\n def get_value(self):\n if self.count == 0:\n return 0\n return self.value / self.count\n\n def get_last(self):\n return self.last\n\n\ndef sanitize_dict(d):\n d = d.copy()\n for k, v in d.items():\n if type(v) is dict:\n v = sanitize_dict(v)\n elif isinstance(v, np.floating) or isinstance(v, float):\n v = float(v)\n elif isinstance(v, set):\n v = list(v)\n elif hasattr(v, '__name__'):\n v = v.__name__\n else:\n v = str(v)\n d[k] = v\n return d\n\n\nclass TrackingClient:\n\n def __init__(self, path):\n path = Path(path)\n path.parent.mkdir(parents=True, exist_ok=True)\n self._db = TinyDB(str(path))\n\n def log_metric(self, key, value, step=0):\n value = sanitize_dict(value)\n metric = {'name': key, 'data': value, 'step': step,\n 'timestamp': time.time(), 'type': 'metric'}\n\n self._db.insert(metric)\n\n def log_tag(self, key, value):\n value = sanitize_dict(value)\n tag = {'name': key, 'data': value, 'type': 'tag'}\n self._db.insert(tag)\n\n def log_param(self, key, value):\n value = sanitize_dict(value)\n param = {'name': key, 'data': value, 'type': 'param'}\n self._db.insert(param)\n\n def get_metric(self, name):\n query = Query()\n return self._db.search((query.name == name) & (query.type == 'metric'))\n\n def get_metrics(self):\n query = Query()\n return self._db.search(query.type == 'metric')\n\n def get_param(self, name):\n query = Query()\n return self._db.search((query.name == name) & (query.type == 'param'))\n\n def get_params(self):\n query = Query()\n return self._db.search(query.type == 'param')\n\n def get_tag(self, name):\n query = Query()\n return self._db.search((query.name == name) & (query.type == 'tag'))\n\n def get_tags(self):\n query = Query()\n return self._db.search(query.type == 'tag')\n\n\nclass TrackingCallback(tf.keras.callbacks.Callback):\n\n def __init__(self, output_dir, batch_size, warmup_steps=1, log_batch=False):\n super().__init__()\n self._db = TrackingClient(Path(output_dir) / 'logs.json')\n self._current_step = 0\n self._warmup_steps = warmup_steps\n self._batch_size = batch_size\n\n self._train_meter = AverageMeter()\n self._predict_meter = AverageMeter()\n self._test_meter = AverageMeter()\n self._log_batch = log_batch\n\n self._t0 = None\n self._epoch_begin_time = None\n self._train_begin_time = None\n self._test_begin_time = None\n self._predict_begin_time = None\n\n def on_train_batch_begin(self, batch, logs=None):\n self._t0 = time.time()\n\n def on_train_batch_end(self, batch, logs=None):\n if self._current_step < self._warmup_steps:\n return\n\n t1 = time.time()\n batch_time = self._batch_size / (t1 - self._t0)\n\n self._train_meter.record(batch_time)\n\n if self._log_batch:\n self._db.log_metric('train_batch_log', logs, step=batch)\n\n def on_predict_batch_begin(self, batch, logs=None):\n self._t0 = time.time()\n\n def on_predict_batch_end(self, batch, logs=None):\n t1 = time.time()\n batch_time = self._batch_size / (t1 - self._t0)\n\n self._predict_meter.record(batch_time)\n\n if self._log_batch:\n self._db.log_metric('predict_batch_log', logs, step=batch)\n\n def on_test_batch_begin(self, batch, logs=None):\n self._t0 = time.time()\n\n def on_test_batch_end(self, batch, logs=None):\n t1 = time.time()\n batch_time = self._batch_size / (t1 - self._t0)\n\n self._test_meter.record(batch_time)\n\n if self._log_batch:\n self._db.log_metric('test_batch_log', logs, step=batch)\n\n def on_epoch_begin(self, epoch, logs=None):\n self._epoch_begin_time = time.time()\n\n def on_epoch_end(self, epoch, logs=None):\n self._current_step = epoch\n if epoch < self._warmup_steps:\n return\n\n metrics = {\n 'duration': time.time() - self._epoch_begin_time,\n 'samples_per_sec': self._train_meter.get_value()\n }\n if logs is not None:\n metrics.update(logs)\n self._db.log_metric('epoch_log', metrics, step=epoch)\n\n def on_train_begin(self, logs=None):\n self._train_begin_time = time.time()\n\n def on_train_end(self, logs=None):\n metrics = {\n 'duration': time.time() - self._train_begin_time,\n 'samples_per_sec': self._train_meter.get_value()\n }\n if logs is not None:\n metrics.update(logs)\n self._db.log_metric('train_log', metrics)\n\n def on_test_begin(self, logs=None):\n self._test_begin_time = time.time()\n\n def on_test_end(self, logs=None):\n metrics = {\n 'duration': time.time() - self._test_begin_time,\n 'samples_per_sec': self._test_meter.get_value()\n }\n if logs is not None:\n metrics.update(logs)\n self._db.log_metric('test_log', metrics)\n\n def on_predict_begin(self, logs=None):\n self._predict_begin_time = time.time()\n\n def on_predict_end(self, logs=None):\n metrics = {\n 'duration': time.time() - self._predict_begin_time,\n 'samples_per_sec': self._predict_meter.get_value()\n }\n if logs is not None:\n metrics.update(logs)\n self._db.log_metric('predict_log', metrics)\n\n\nhvd.init()\n\n\ndef train(data_dir=None, output_dir=None, model_dir=None, epochs=1, learning_rate=0.01, beta_1=0.9,\n beta_2=0.99, epsilon=1e-07, optimizer='Adam'):\n dataset = EMGrapheneDataset(data_dir=data_dir)\n\n opt = tf.keras.optimizers.Adam(learning_rate=learning_rate, beta_1=beta_1, beta_2=beta_2,\n epsilon=epsilon, amsgrad=False, name=optimizer)\n opt = hvd.DistributedOptimizer(opt)\n\n loss = tf.keras.losses.MeanSquaredError()\n\n model = autoencoder(dataset.input_shape)\n model.compile(loss=loss,\n optimizer=opt,\n experimental_run_tf_function=False)\n\n hooks = [\n hvd.callbacks.BroadcastGlobalVariablesCallback(0),\n hvd.callbacks.MetricAverageCallback(),\n ]\n if hvd.rank() == 0:\n # These hooks only need to be called by one instance.\n # Therefore we need to only add them on rank == 0\n tracker_hook = TrackingCallback(output_dir, 256, False)\n hooks.append(tracker_hook)\n\n model.fit(dataset.to_dataset(), epochs=epochs, callbacks=hooks)\n\n if hvd.rank() == 0:\n model_dir = Path(model_dir)\n weight_path = str(model_dir / 'weights')\n os.mkdir(weight_path)\n weights_file = str(model_dir / 'weights/final_weights.h5')\n model.save_weights(weights_file)\n os.mkdir(model_dir / 'models')\n model_path = str(model_dir / \"models\")\n model.save(model_path)\n print(\"weight path: \", os.listdir(weight_path))\n print(\"models path: \", os.listdir(model_path))\n\n\ndef test(data_dir=None, output_dir=None, model_dir=None, global_batch_size=256, log_batch=False):\n hooks = [\n hvd.callbacks.BroadcastGlobalVariablesCallback(0),\n hvd.callbacks.MetricAverageCallback(),\n ]\n model_path = Path(model_dir)\n model_path = str(model_path / \"models\")\n model = tf.keras.models.load_model(model_path)\n\n if hvd.rank() == 0:\n # These hooks only need to be called by one instance.\n # Therefore we need to only add them on rank == 0\n tracker_hook = TrackingCallback(output_dir, global_batch_size, log_batch)\n hooks.append(tracker_hook)\n\n print('Begin Predict...')\n\n weight_dir = Path(model_dir)\n weight_dir = weight_dir / 'weights'\n weights_file = weight_dir / 'final_weights.h5'\n\n # Edge case: user is trying to run inference but not training\n # See if we can find a pre-trained model from another run\n # If not then throw and error as we're in an inconsistent state.\n if not weights_file.exists():\n print('Searching for pre-trained models')\n\n weight_files = weight_dir.parent.glob('**/*final_weights.h5')\n weight_files = list(sorted(weight_files))\n if len(weight_files) == 0:\n raise RuntimeError(\n \"No pre-trained model exists! Please train a model before running inference!\")\n weights_file = weight_files[-1]\n\n print(f'Using weights file: {str(weights_file)}')\n model.load_weights(str(weights_file))\n\n dataset = EMGrapheneDataset(data_dir=data_dir).to_dataset()\n\n model.evaluate(dataset, callbacks=hooks)\n\n\ndef download_task(task_args: List[str]) -> None:\n \"\"\" Task: preprocess.\n Input parameters:\n --data_dir, --log_dir, --model_dir, --parameters_file\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_dir', '--data-dir', type=str, default=None, help=\"Dataset path.\")\n args = parser.parse_args(args=task_args)\n\n os.makedirs(args.data_dir, exist_ok=True)\n data_url = \"https://github.com/vibhatha/data_repo/raw/main/em_denoise/emdenoise_minibatch_v1.zip\"\n data_file_expected_dir = os.path.join(args.data_dir, 'emdenoise_minibatch_v1.zip')\n if not os.path.exists(data_file_expected_dir):\n filename = wget.download(data_url, out=args.data_dir)\n if not os.path.exists(data_file_expected_dir):\n raise ValueError(f'Em denoise data not downloaded to: {os.listdir(args.data_dir)}')\n print(f\"File downloaded : {args.data_dir}/{filename}\")\n\n\ndef preprocess_task(task_args: List[str]) -> None:\n \"\"\" Task: preprocess.\n Input parameters:\n --data_dir, --log_dir, --model_dir, --parameters_file\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_dir', '--data-dir', type=str, default=None, help=\"Dataset path.\")\n args = parser.parse_args(args=task_args)\n\n os.makedirs(args.data_dir, exist_ok=True)\n data_source_dir, data_dest_dir = args.data_dir, args.data_dir\n if not os.path.exists(os.path.join(data_source_dir, 'emdenoise_minibatch_v1.zip')):\n raise ValueError(f'Em denoise data not downloaded to: {os.listdir(data_source_dir)}')\n file = os.listdir(data_source_dir)[0]\n with zipfile.ZipFile(os.path.join(data_source_dir, file), \"r\") as zip_ref:\n zip_ref.extractall(data_dest_dir)\n assert len(os.listdir(data_dest_dir)) > 0\n\n\ndef parse_ml_args(task_args: List[str]) -> argparse.Namespace:\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_dir', '--data-dir', type=str, default=None, help=\"Dataset path.\")\n parser.add_argument('--model_dir', '--model-dir', type=str, default=None,\n help=\"Model output directory.\")\n parser.add_argument('--output_dir', '--output-dir', type=str, default=None,\n help=\"Output directory.\")\n parser.add_argument('--parameters_file', '--parameters-file', type=str, default=None,\n help=\"Parameters default values.\")\n args = parser.parse_args(args=task_args)\n\n print(\"Data Dir : \", args.data_dir)\n print(\"Model Dir : \", args.model_dir)\n print(\"Output Dir : \", args.output_dir)\n print(\"Data Dir files: \", os.listdir(args.data_dir))\n\n return args\n\n\ndef train_task(task_args: List[str]) -> None:\n \"\"\" Task: train.\n Input parameters:\n --data_dir, --log_dir, --model_dir, --parameters_file\n \"\"\"\n args = parse_ml_args(task_args)\n\n os.makedirs(args.model_dir, exist_ok=True)\n os.makedirs(args.output_dir, exist_ok=True)\n\n train_path = os.path.join(args.data_dir, \"emdenoise_minibatch_v1\", \"train\")\n assert os.path.exists(train_path)\n\n with open(args.parameters_file, 'r') as stream:\n parameters = yaml.load(stream, Loader=yaml.FullLoader)\n\n train(data_dir=train_path, output_dir=args.output_dir, model_dir=args.model_dir,\n epochs=int(parameters.get('epochs', 1)), learning_rate=float(parameters.get('learning_rate', 0.01)),\n beta_1=float(parameters.get('beta_1', 0.9)), beta_2=float(parameters.get('beta_2', 0.999)),\n epsilon=float(parameters.get('epsilon', 1e-07)), optimizer=parameters.get('optimizer', 'Adam'))\n\n\ndef test_task(task_args: List[str]) -> None:\n \"\"\" Task: train.\n Input parameters:\n --data_dir, --log_dir, --model_dir, --parameters_file\n \"\"\"\n args = parse_ml_args(task_args)\n\n os.makedirs(args.model_dir, exist_ok=True)\n os.makedirs(args.output_dir, exist_ok=True)\n\n test_path = os.path.join(args.data_dir, \"emdenoise_minibatch_v1\", \"test\")\n assert os.path.exists(test_path)\n\n with open(args.parameters_file, 'r') as stream:\n parameters = yaml.load(stream, Loader=yaml.FullLoader)\n\n test(data_dir=test_path, output_dir=args.output_dir, model_dir=args.model_dir,\n global_batch_size=int(parameters.get('global_batch_size', 256)), log_batch=True)\n\n\ndef main():\n \"\"\"\n mnist.py task task_specific_parameters...\n \"\"\"\n # noinspection PyBroadException\n try:\n parser = argparse.ArgumentParser()\n parser.add_argument('mlbox_task', type=str, help=\"Task for this MLCube.\")\n parser.add_argument('--log_dir', '--log-dir', type=str, required=True, help=\"Logging directory.\")\n ml_box_args, task_args = parser.parse_known_args()\n\n os.makedirs(ml_box_args.log_dir, exist_ok=True)\n logger_config = {\n \"version\": 1,\n \"disable_existing_loggers\": True,\n \"formatters\": {\n \"standard\": {\n \"format\": \"%(asctime)s - %(name)s - %(threadName)s - %(levelname)s - %(message)s\"},\n },\n \"handlers\": {\n \"file_handler\": {\n \"class\": \"logging.FileHandler\",\n \"level\": \"INFO\",\n \"formatter\": \"standard\",\n \"filename\": os.path.join(ml_box_args.log_dir,\n f\"mlbox_sciml_{ml_box_args.mlbox_task}.log\")\n }\n },\n \"loggers\": {\n \"\": {\"level\": \"INFO\", \"handlers\": [\"file_handler\"]},\n \"__main__\": {\"level\": \"NOTSET\", \"propagate\": \"yes\"},\n \"tensorflow\": {\"level\": \"NOTSET\", \"propagate\": \"yes\"}\n }\n }\n logging.config.dictConfig(logger_config)\n\n if ml_box_args.mlbox_task == Task.DownloadData:\n download_task(task_args)\n elif ml_box_args.mlbox_task == Task.PreProcess:\n preprocess_task(task_args)\n elif ml_box_args.mlbox_task == Task.Train:\n train_task(task_args)\n elif ml_box_args.mlbox_task == Task.Test:\n test_task(task_args)\n else:\n raise ValueError(f\"Unknown task: {task_args}\")\n except Exception as err:\n logger.exception(err)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.savetxt",
"tensorflow.matmul",
"tensorflow.random.normal"
],
[
"tensorflow.keras.layers.Concatenate",
"tensorflow.keras.models.load_model",
"tensorflow.TensorShape",
"tensorflow.keras.models.Model",
"tensorflow.keras.losses.MeanSquaredError",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.UpSampling2D",
"tensorflow.data.Dataset.zip",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.data.Dataset.from_generator",
"numpy.array",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Input"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
mofeing/qibo | [
"3eb675ba893bf35f103d41a8a64c86aae9cbf616",
"3eb675ba893bf35f103d41a8a64c86aae9cbf616",
"3eb675ba893bf35f103d41a8a64c86aae9cbf616"
] | [
"examples/reuploading_classifier/qlassifier.py",
"src/qibo/tests/test_models_evolution.py",
"examples/adiabatic/optimize.py"
] | [
"import numpy as np\nfrom qibo.models import Circuit\nfrom qibo import gates, K\nfrom datasets import create_dataset, create_target, fig_template, world_map_template\nfrom matplotlib.cm import get_cmap\nfrom matplotlib.colors import Normalize\nimport os\n\n\nclass single_qubit_classifier:\n def __init__(self, name, layers, grid=11, test_samples=1000, seed=0):\n \"\"\"Class with all computations needed for classification.\n\n Args:\n name (str): Name of the problem to create the dataset, to choose between\n ['circle', '3 circles', 'square', '4 squares', 'crown', 'tricrown', 'wavy lines'].\n layers (int): Number of layers to use in the classifier.\n grid (int): Number of points in one direction defining the grid of points.\n If not specified, the dataset does not follow a regular grid.\n samples (int): Number of points in the set, randomly located.\n This argument is ignored if grid is specified.\n seed (int): Random seed.\n\n Returns:\n Dataset for the given problem (x, y).\n \"\"\"\n np.random.seed(seed)\n self.name = name\n self.layers = layers\n self.training_set = create_dataset(name, grid=grid)\n self.test_set = create_dataset(name, samples=test_samples)\n self.target = create_target(name)\n self.params = np.random.randn(layers * 4)\n self._circuit = self._initialize_circuit()\n try:\n os.makedirs('results/'+self.name+'/%s_layers' % self.layers)\n except:\n pass\n\n def set_parameters(self, new_params):\n \"\"\"Method for updating parameters of the class.\n\n Args:\n new_params (array): New parameters to update\n \"\"\"\n self.params = new_params\n\n def _initialize_circuit(self):\n \"\"\"Creates variational circuit.\"\"\"\n C = Circuit(1)\n for l in range(self.layers):\n C.add(gates.RY(0, theta=0))\n C.add(gates.RZ(0, theta=0))\n return C\n\n def circuit(self, x):\n \"\"\"Method creating the circuit for a point (in the datasets).\n\n Args:\n x (array): Point to create the circuit.\n\n Returns:\n Qibo circuit.\n \"\"\"\n params = []\n for i in range(0, 4 * self.layers, 4):\n params.append(self.params[i] * x[0] + self.params[i + 1])\n params.append(self.params[i + 2] * x[1] + self.params[i + 3])\n self._circuit.set_parameters(params)\n return self._circuit\n\n def cost_function_one_point_fidelity(self, x, y):\n \"\"\"Method for computing the cost function for\n a given sample (in the datasets), using fidelity.\n\n Args:\n x (array): Point to create the circuit.\n y (int): label of x.\n\n Returns:\n float with the cost function.\n \"\"\"\n C = self.circuit(x)\n state = C.execute()\n cf = .5 * (1 - fidelity(state, self.target[y])) ** 2\n return cf\n\n def cost_function_fidelity(self, params=None):\n \"\"\"Method for computing the cost function for the training set, using fidelity.\n\n Args:\n params(array): new parameters to update before computing\n\n Returns:\n float with the cost function.\n \"\"\"\n if params is None:\n params = self.params\n\n self.set_parameters(params)\n cf = 0\n for x, y in zip(self.training_set[0], self.training_set[1]):\n cf += self.cost_function_one_point_fidelity(x, y)\n cf /= len(self.training_set[0])\n return cf\n\n def minimize(self, method='BFGS', options=None, compile=True):\n loss = self.cost_function_fidelity\n\n if method == 'cma':\n # Genetic optimizer\n import cma\n r = cma.fmin2(lambda p: K.to_numpy(loss(p)), self.params, 2)\n result = r[1].result.fbest\n parameters = r[1].result.xbest\n\n elif method == 'sgd':\n circuit = self.circuit(self.training_set[0])\n for gate in circuit.queue:\n if not K.supports_gradients:\n from qibo.config import raise_error\n raise_error(RuntimeError,\n 'Use tensorflow backend in order to compute gradients.')\n\n sgd_options = {\"nepochs\": 5001,\n \"nmessage\": 1000,\n \"optimizer\": \"Adamax\",\n \"learning_rate\": 0.5}\n if options is not None:\n sgd_options.update(options)\n\n # proceed with the training\n vparams = K.Variable(self.params)\n optimizer = getattr(K.optimizers, sgd_options[\"optimizer\"])(\n learning_rate=sgd_options[\"learning_rate\"])\n\n def opt_step():\n with K.GradientTape() as tape:\n l = loss(vparams)\n grads = tape.gradient(l, [vparams])\n optimizer.apply_gradients(zip(grads, [vparams]))\n return l, vparams\n\n if compile:\n opt_step = K.function(opt_step)\n\n l_optimal, params_optimal = 10, self.params\n for e in range(sgd_options[\"nepochs\"]):\n l, vparams = opt_step()\n if l < l_optimal:\n l_optimal, params_optimal = l, vparams\n if e % sgd_options[\"nmessage\"] == 0:\n print('ite %d : loss %f' % (e, K.to_numpy(l)))\n\n result = K.to_numpy(self.cost_function(params_optimal))\n parameters = K.to_numpy(params_optimal)\n\n else:\n import numpy as np\n from scipy.optimize import minimize\n m = minimize(lambda p: K.to_numpy(loss(p)), self.params,\n method=method, options=options)\n result = m.fun\n parameters = m.x\n\n return result, parameters\n\n def eval_test_set_fidelity(self):\n \"\"\"Method for evaluating points in the training set, using fidelity.\n\n Returns:\n list of guesses.\n \"\"\"\n labels = [[0]] * len(self.test_set[0])\n for j, x in enumerate(self.test_set[0]):\n C = self.circuit(x)\n state = C.execute()\n fids = np.empty(len(self.target))\n for i, t in enumerate(self.target):\n fids[i] = fidelity(state, t)\n labels[j] = np.argmax(fids)\n\n return labels\n\n def paint_results(self):\n \"\"\"Method for plotting the guessed labels and the right guesses.\n\n Returns:\n plot with results.\n \"\"\"\n fig, axs = fig_template(self.name)\n guess_labels = self.eval_test_set_fidelity()\n colors_classes = get_cmap('tab10')\n norm_class = Normalize(vmin=0, vmax=10)\n x = self.test_set[0]\n x_0, x_1 = x[:, 0], x[:, 1]\n axs[0].scatter(x_0, x_1, c=guess_labels, s=2,\n cmap=colors_classes, norm=norm_class)\n colors_rightwrong = get_cmap('RdYlGn')\n norm_rightwrong = Normalize(vmin=-.1, vmax=1.1)\n\n checks = [int(g == l) for g, l in zip(guess_labels, self.test_set[1])]\n axs[1].scatter(x_0, x_1, c=checks, s=2,\n cmap=colors_rightwrong, norm=norm_rightwrong)\n print('The accuracy for this classification is %.2f' %\n (100 * np.sum(checks) / len(checks)), '%')\n\n fig.savefig('results/'+self.name +\n '/%s_layers/test_set.pdf' % self.layers)\n\n def paint_world_map(self):\n \"\"\"Method for plotting the proper labels on the Bloch sphere.\n\n Returns:\n plot with 2D representation of Bloch sphere.\n \"\"\"\n angles = np.zeros((len(self.test_set[0]), 2))\n from datasets import laea_x, laea_y\n fig, ax = world_map_template()\n colors_classes = get_cmap('tab10')\n norm_class = Normalize(vmin=0, vmax=10)\n for i, x in enumerate(self.test_set[0]):\n C = self.circuit(x)\n state = C.execute()\n angles[i, 0] = np.pi / 2 - \\\n np.arccos(np.abs(state[0]) ** 2 - np.abs(state[1]) ** 2)\n angles[i, 1] = np.angle(state[1] / state[0])\n\n ax.scatter(laea_x(angles[:, 1], angles[:, 0]), laea_y(angles[:, 1], angles[:, 0]), c=self.test_set[1],\n cmap=colors_classes, s=15, norm=norm_class)\n\n if len(self.target) == 2:\n angles_0 = np.zeros(len(self.target))\n angles_1 = np.zeros(len(self.target))\n angles_0[0] = np.pi / 2\n angles_0[1] = -np.pi / 2\n col = list(range(2))\n\n elif len(self.target) == 3:\n angles_0 = np.zeros(len(self.target) + 1)\n angles_1 = np.zeros(len(self.target) + 1)\n angles_0[0] = np.pi / 2\n angles_0[1] = -np.pi / 6\n angles_0[2] = -np.pi / 6\n angles_0[3] = -np.pi / 6\n angles_1[2] = np.pi\n angles_1[3] = -np.pi\n col = list(range(3)) + [2]\n\n else:\n angles_0 = np.zeros(len(self.target))\n angles_1 = np.zeros(len(self.target))\n for i, state in enumerate(self.target):\n angles_0[i] = np.pi / 2 - \\\n np.arccos(np.abs(state[0]) ** 2 - np.abs(state[1]) ** 2)\n angles_1[i] = np.angle(state[1] / state[0])\n col = list(range(len(self.target)))\n\n ax.scatter(laea_x(angles_1, angles_0), laea_y(angles_1, angles_0), c=col,\n cmap=colors_classes, s=500, norm=norm_class, marker='P', zorder=11)\n\n ax.axis('off')\n\n fig.savefig('results/'+self.name +\n '/%s_layers/world_map.pdf' % self.layers)\n\n\ndef fidelity(state1, state2):\n return K.abs(K.sum(K.qnp.conj(state2) * state1)) ** 2\n",
"import pytest\nimport numpy as np\nfrom qibo import callbacks, hamiltonians, models, K\nfrom qibo.config import raise_error\nfrom scipy.linalg import expm\n\n\ndef assert_states_equal(state, target_state, atol=0):\n \"\"\"Asserts that two state vectors are equal up to a phase.\"\"\"\n phase = K.to_numpy(state)[0] / K.to_numpy(target_state)[0]\n K.assert_allclose(state, phase * target_state, atol=atol)\n\n\nclass TimeStepChecker(callbacks.BackendCallback):\n \"\"\"Callback that checks each evolution time step.\"\"\"\n\n def __init__(self, target_states, atol=0):\n super(TimeStepChecker, self).__init__()\n self.target_states = iter(target_states)\n self.atol = atol\n\n def _state_vector_call(self, state):\n assert_states_equal(state, next(self.target_states), atol=self.atol)\n\n def _density_matrix_call(self, state): # pragma: no cover\n raise_error(NotImplementedError)\n\n\ndef test_state_evolution_init():\n ham = hamiltonians.Z(2)\n evolution = models.StateEvolution(ham, dt=1)\n assert evolution.nqubits == 2\n # time-dependent Hamiltonian bad type\n with pytest.raises(TypeError):\n evol = models.StateEvolution(lambda t: \"abc\", dt=1e-2)\n # dt < 0\n with pytest.raises(ValueError):\n adev = models.StateEvolution(ham, dt=-1e-2)\n # pass accelerators without trotter Hamiltonian\n with pytest.raises(NotImplementedError):\n adev = models.StateEvolution(ham, dt=1e-2, accelerators={\"/GPU:0\": 2})\n\n\ndef test_state_evolution_get_initial_state():\n ham = hamiltonians.Z(2)\n evolution = models.StateEvolution(ham, dt=1)\n # execute without initial state\n with pytest.raises(ValueError):\n final_state = evolution(final_time=1)\n\n\[email protected]((\"solver\", \"atol\"),\n [(\"exp\", 0), (\"rk4\", 1e-2), (\"rk45\", 1e-1)])\ndef test_state_evolution_constant_hamiltonian(backend, solver, atol):\n nsteps = 200\n t = np.linspace(0, 1, nsteps + 1)\n phase = np.exp(2j * t)[:, np.newaxis]\n ones = np.ones((nsteps + 1, 2))\n target_psi = np.concatenate([phase, ones, phase.conj()], axis=1)\n\n dt = t[1] - t[0]\n checker = TimeStepChecker(target_psi, atol=atol)\n evolution = models.StateEvolution(hamiltonians.Z(2), dt=dt, solver=solver,\n callbacks=[checker])\n final_psi = evolution(final_time=1, initial_state=target_psi[0])\n\n\[email protected](\"nqubits,dt\", [(2, 1e-2)])\ndef test_state_evolution_time_dependent_hamiltonian(backend, nqubits, dt):\n ham = lambda t: np.cos(t) * hamiltonians.Z(nqubits)\n # Analytical solution\n target_psi = [np.ones(2 ** nqubits) / np.sqrt(2 ** nqubits)]\n for n in range(int(1 / dt)):\n prop = expm(-1j * dt * K.to_numpy(ham(n * dt).matrix))\n target_psi.append(prop.dot(target_psi[-1]))\n\n checker = TimeStepChecker(target_psi, atol=1e-8)\n evolution = models.StateEvolution(ham, dt=dt, callbacks=[checker])\n final_psi = evolution(final_time=1, initial_state=np.copy(target_psi[0]))\n\n\[email protected](\"nqubits\", [5])\[email protected](\"solver,dt,atol\", [(\"exp\", 1e-1, 1e-2), (\"rk45\", 1e-2, 1e-1)])\ndef test_state_evolution_trotter_hamiltonian(backend, accelerators, nqubits, solver, dt, atol):\n if accelerators is not None and solver != \"exp\":\n pytest.skip(\"Distributed evolution is supported only with exp solver.\")\n h = 1.0\n\n target_psi = [np.ones(2 ** nqubits) / np.sqrt(2 ** nqubits)]\n ham_matrix = K.to_numpy(hamiltonians.TFIM(nqubits, h=h).matrix)\n prop = expm(-1j * dt * ham_matrix)\n for n in range(int(1 / dt)):\n target_psi.append(prop.dot(target_psi[-1]))\n\n ham = hamiltonians.TFIM(nqubits, h=h, dense=False)\n checker = TimeStepChecker(target_psi, atol=atol)\n evolution = models.StateEvolution(ham, dt, solver=solver,\n callbacks=[checker],\n accelerators=accelerators)\n final_psi = evolution(final_time=1, initial_state=np.copy(target_psi[0]))\n\n # Change dt\n if solver == \"exp\":\n evolution = models.StateEvolution(ham, dt / 10, accelerators=accelerators)\n final_psi = evolution(final_time=1, initial_state=np.copy(target_psi[0]))\n assert_states_equal(final_psi.tensor, target_psi[-1], atol=atol)\n\n\ndef test_adiabatic_evolution_init():\n # Hamiltonians of bad type\n h0 = hamiltonians.X(3)\n s = lambda t: t\n with pytest.raises(TypeError):\n adev = models.AdiabaticEvolution(h0, lambda t: h0, s, dt=1e-2)\n h1 = hamiltonians.TFIM(2)\n with pytest.raises(TypeError):\n adev = models.AdiabaticEvolution(lambda t: h1, h1, s, dt=1e-2)\n # Hamiltonians with different number of qubits\n with pytest.raises(ValueError):\n adev = models.AdiabaticEvolution(h0, h1, s, dt=1e-2)\n # Adiabatic Hamiltonian with bad hamiltonian types\n from qibo.core.adiabatic import AdiabaticHamiltonian\n with pytest.raises(TypeError):\n h = AdiabaticHamiltonian(\"a\", \"b\") # pylint: disable=E0110\n # s with three arguments\n h0 = hamiltonians.X(2)\n s = lambda t, a, b: t + a + b\n with pytest.raises(ValueError):\n adev = models.AdiabaticEvolution(h0, h1, s, dt=1e-2)\n\n\ndef test_adiabatic_evolution_schedule():\n h0 = hamiltonians.X(3)\n h1 = hamiltonians.TFIM(3)\n adev = models.AdiabaticEvolution(h0, h1, lambda t: t, dt=1e-2)\n assert adev.schedule(0.2) == 0.2 # pylint: disable=E1102\n assert adev.schedule(0.8) == 0.8 # pylint: disable=E1102\n # s(0) != 0\n with pytest.raises(ValueError):\n adev = models.AdiabaticEvolution(h0, h1, lambda t: t + 1, dt=1e-2)\n # s(T) != 0\n with pytest.raises(ValueError):\n adev = models.AdiabaticEvolution(h0, h1, lambda t: t / 2, dt=1e-2)\n\n\ndef test_set_scheduling_parameters():\n \"\"\"Test ``AdiabaticEvolution.set_parameters``.\"\"\"\n h0 = hamiltonians.X(3)\n h1 = hamiltonians.TFIM(3)\n sp = lambda t, p: (1 - p[0]) * np.sqrt(t) + p[0] * t\n adevp = models.AdiabaticEvolution(h0, h1, sp, 1e-2)\n # access parametrized scheduling before setting parameters\n with pytest.raises(ValueError):\n s = adevp.schedule\n\n adevp.set_parameters([0.5, 1])\n\n target_s = lambda t: 0.5 * np.sqrt(t) + 0.5 * t\n for t in np.random.random(10):\n assert adevp.schedule(t) == target_s(t) # pylint: disable=E1102\n\n\[email protected](\"dense\", [False, True])\ndef test_adiabatic_evolution_hamiltonian(backend, dense):\n \"\"\"Test adiabatic evolution hamiltonian as a function of time.\"\"\"\n h0 = hamiltonians.X(2, dense=dense)\n h1 = hamiltonians.TFIM(2, dense=dense)\n adev = models.AdiabaticEvolution(h0, h1, lambda t: t, dt=1e-2)\n # try accessing hamiltonian before setting it\n with pytest.raises(RuntimeError):\n adev.hamiltonian(0.1)\n\n m1 = np.array([[0, 1, 1, 0], [1, 0, 0, 1],\n [1, 0, 0, 1], [0, 1, 1, 0]])\n m2 = np.diag([2, -2, -2, 2])\n ham = lambda t, T: - (1 - t / T) * m1 - (t / T) * m2\n\n adev.hamiltonian.total_time = 1\n for t in [0, 0.3, 0.7, 1.0]:\n if dense:\n matrix = adev.hamiltonian(t).matrix\n else:\n matrix = adev.hamiltonian(t).dense.matrix\n K.assert_allclose(matrix, ham(t, 1))\n\n #try using a different total time\n adev.hamiltonian.total_time = 2\n for t in [0, 0.3, 0.7, 1.0]:\n if dense:\n matrix = adev.hamiltonian(t).matrix\n else:\n matrix = adev.hamiltonian(t).dense.matrix\n K.assert_allclose(matrix, ham(t, 2))\n\n\[email protected](\"dt\", [1e-1])\ndef test_adiabatic_evolution_execute_exp(backend, dt):\n \"\"\"Test adiabatic evolution with exponential solver.\"\"\"\n h0 = hamiltonians.X(2)\n h1 = hamiltonians.TFIM(2)\n adev = models.AdiabaticEvolution(h0, h1, lambda t: t, dt=dt)\n\n m1 = np.array([[0, 1, 1, 0], [1, 0, 0, 1],\n [1, 0, 0, 1], [0, 1, 1, 0]])\n m2 = np.diag([2, -2, -2, 2])\n ham = lambda t: - (1 - t) * m1 - t * m2\n\n target_psi = np.ones(4) / 2\n nsteps = int(1 / dt)\n for n in range(nsteps):\n target_psi = expm(-1j * dt * ham(n * dt)).dot(target_psi)\n final_psi = adev(final_time=1)\n assert_states_equal(final_psi, target_psi)\n\n\[email protected](\"nqubits,dt\", [(4, 1e-1)])\ndef test_trotterized_adiabatic_evolution(backend, accelerators, nqubits, dt):\n \"\"\"Test adiabatic evolution using Trotterization.\"\"\"\n dense_h0 = hamiltonians.X(nqubits)\n dense_h1 = hamiltonians.TFIM(nqubits)\n\n target_psi = [np.ones(2 ** nqubits) / np.sqrt(2 ** nqubits)]\n ham = lambda t: dense_h0 * (1 - t) + dense_h1 * t\n for n in range(int(1 / dt)):\n prop = K.to_numpy(ham(n * dt).exp(dt))\n target_psi.append(prop.dot(target_psi[-1]))\n\n local_h0 = hamiltonians.X(nqubits, dense=False)\n local_h1 = hamiltonians.TFIM(nqubits, dense=False)\n checker = TimeStepChecker(target_psi, atol=dt)\n adev = models.AdiabaticEvolution(local_h0, local_h1, lambda t: t, dt,\n callbacks=[checker],\n accelerators=accelerators)\n final_psi = adev(final_time=1)\n\n\[email protected](\"solver\", [\"rk4\", \"rk45\"])\[email protected](\"dense\", [False, True])\[email protected](\"dt\", [0.1])\ndef test_adiabatic_evolution_execute_rk(backend, solver, dense, dt):\n \"\"\"Test adiabatic evolution with Runge-Kutta solver.\"\"\"\n h0 = hamiltonians.X(3, dense=dense)\n h1 = hamiltonians.TFIM(3, dense=dense)\n\n target_psi = [np.ones(8) / np.sqrt(8)]\n ham = lambda t: h0 * (1 - t) + h1 * t\n for n in range(int(1 / dt)):\n prop = K.to_numpy(ham(n * dt).exp(dt))\n target_psi.append(prop.dot(target_psi[-1]))\n\n checker = TimeStepChecker(target_psi, atol=dt)\n adev = models.AdiabaticEvolution(h0, h1, lambda t: t, dt, solver=\"rk4\",\n callbacks=[checker])\n final_psi = adev(final_time=1, initial_state=np.copy(target_psi[0]))\n\n\ndef test_adiabatic_evolution_execute_errors():\n h0 = hamiltonians.X(3)\n h1 = hamiltonians.TFIM(3)\n # Non-zero ``start_time``\n adev = models.AdiabaticEvolution(h0, h1, lambda t: t, dt=1e-2)\n with pytest.raises(NotImplementedError):\n final_state = adev(final_time=2, start_time=1)\n # execute without specifying variational parameters\n sp = lambda t, p: (1 - p) * np.sqrt(t) + p * t\n adevp = models.AdiabaticEvolution(h0, h1, sp, dt=1e-1)\n with pytest.raises(RuntimeError):\n final_state = adevp(final_time=1)\n\n\[email protected](\"solver,dt,atol\",\n [(\"exp\", 1e-1, 1e-10), (\"rk45\", 1e-2, 1e-2)])\ndef test_energy_callback(solver, dt, atol):\n \"\"\"Test using energy callback in adiabatic evolution.\"\"\"\n h0 = hamiltonians.X(2)\n h1 = hamiltonians.TFIM(2)\n energy = callbacks.Energy(h1)\n adev = models.AdiabaticEvolution(h0, h1, lambda t: t, dt=dt,\n callbacks=[energy], solver=solver)\n final_psi = adev(final_time=1)\n\n target_psi = np.ones(4) / 2\n calc_energy = lambda psi: psi.conj().dot(K.to_numpy(h1.matrix).dot(psi))\n target_energies = [calc_energy(target_psi)]\n ham = lambda t: h0 * (1 - t) + h1 * t\n for n in range(int(1 / dt)):\n prop = K.to_numpy(ham(n * dt).exp(dt))\n target_psi = prop.dot(target_psi)\n target_energies.append(calc_energy(target_psi))\n\n assert_states_equal(final_psi, target_psi, atol=atol)\n target_energies = K.cast(target_energies)\n K.assert_allclose(energy[:], target_energies, atol=atol)\n\n\ntest_names = \"method,options,messages,dense,filename\"\ntest_values = [\n (\"BFGS\", {'maxiter': 1}, True, True, \"adiabatic_bfgs.out\"),\n (\"BFGS\", {'maxiter': 1}, True, False, \"trotter_adiabatic_bfgs.out\"),\n (\"sgd\", {\"nepochs\": 5}, False, True, None)\n ]\[email protected](test_names, test_values)\ndef test_scheduling_optimization(method, options, messages, dense, filename):\n \"\"\"Test optimization of s(t).\"\"\"\n from qibo.tests.test_models_variational import assert_regression_fixture\n h0 = hamiltonians.X(3, dense=dense)\n h1 = hamiltonians.TFIM(3, dense=dense)\n sp = lambda t, p: (1 - p) * np.sqrt(t) + p * t\n adevp = models.AdiabaticEvolution(h0, h1, sp, dt=1e-1)\n\n if method == \"sgd\":\n from qibo import K\n if not K.supports_gradients:\n with pytest.raises(RuntimeError):\n best, params, _ = adevp.minimize([0.5, 1], method=method, options=options,\n messages=messages)\n else:\n best, params, _ = adevp.minimize([0.5, 1], method=method, options=options,\n messages=messages)\n\n if filename is not None:\n assert_regression_fixture(params, filename)\n",
"\"\"\"Adiabatic evolution scheduling optimization for the Ising Hamiltonian.\"\"\"\nimport argparse\nimport numpy as np\nfrom qibo import callbacks, hamiltonians, models\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--nqubits\", default=4, type=int)\nparser.add_argument(\"--hfield\", default=1, type=float)\nparser.add_argument(\"--params\", default=\"1\", type=str)\nparser.add_argument(\"--dt\", default=1e-2, type=float)\nparser.add_argument(\"--solver\", default=\"exp\", type=str)\nparser.add_argument(\"--method\", default=\"Powell\", type=str)\nparser.add_argument(\"--maxiter\", default=None, type=int)\nparser.add_argument(\"--save\", default=None, type=str)\n\n\ndef spolynomial(t, params):\n \"\"\"General polynomial scheduling satisfying s(0)=0 and s(1)=1\"\"\"\n f = sum(p * t ** (i + 2) for i, p in enumerate(params))\n f += (1 - np.sum(params)) * t\n return f\n\n\ndef main(nqubits, hfield, params, dt, solver, method, maxiter, save):\n \"\"\"Optimizes the scheduling of the adiabatic evolution.\n\n The ansatz for s(t) is a polynomial whose order is defined by the length of\n ``params`` given.\n\n Args:\n nqubits (int): Number of qubits in the system.\n hfield (float): Transverse field Ising model h-field h value.\n params (str): Initial guess for the free parameters.\n dt (float): Time step used for integration.\n solver (str): Solver used for integration.\n method (str): Which scipy optimizer to use.\n maxiter (int): Maximum iterations for scipy optimizer.\n save (str): Name to use for saving optimization history.\n If ``None`` history will not be saved.\n \"\"\"\n h0 = hamiltonians.X(nqubits)\n h1 = hamiltonians.TFIM(nqubits, h=hfield)\n\n # Calculate target values (H1 ground state)\n target_state = h1.ground_state()\n target_energy = h1.eigenvalues()[0].numpy().real\n\n # Check ground state\n state_energy = callbacks.Energy(h1)(target_state).numpy()\n np.testing.assert_allclose(state_energy.real, target_energy)\n\n evolution = models.AdiabaticEvolution(h0, h1, spolynomial, dt=dt,\n solver=solver)\n options = {\"maxiter\": maxiter, \"disp\": True}\n energy, parameters, _ = evolution.minimize(params, method=method,\n options=options,\n messages=True)\n\n print(\"\\nBest energy found:\", energy)\n print(\"Final parameters:\", parameters)\n\n final_state = evolution(parameters[-1])\n overlap = callbacks.Overlap(target_state)(final_state).numpy()\n print(\"Target energy:\", target_energy)\n print(\"Overlap:\", overlap)\n\n if save:\n evolution.opt_history[\"loss\"].append(target_energy)\n np.save(f\"optparams/{save}_n{nqubits}_loss.npy\",\n evolution.opt_history[\"loss\"])\n np.save(f\"optparams/{save}_n{nqubits}_params.npy\",\n evolution.opt_history[\"params\"])\n\n\nif __name__ == \"__main__\":\n args = vars(parser.parse_args())\n args[\"params\"] = [float(x) for x in args[\"params\"].split(\",\")]\n main(**args)\n"
] | [
[
"numpy.abs",
"numpy.random.seed",
"matplotlib.colors.Normalize",
"numpy.argmax",
"numpy.random.randn",
"matplotlib.cm.get_cmap",
"numpy.angle",
"numpy.sum"
],
[
"numpy.diag",
"numpy.random.random",
"numpy.sqrt",
"numpy.linspace",
"numpy.cos",
"scipy.linalg.expm",
"numpy.ones",
"numpy.copy",
"numpy.exp",
"numpy.array"
],
[
"numpy.sum",
"numpy.save",
"numpy.testing.assert_allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.12",
"0.14",
"0.15"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ZeQing-Liu/keras-bert | [
"2d3590dac33b4f097aefbff3d42b630626fd157d"
] | [
"tests/layers/test_task_embed.py"
] | [
"import unittest\n\nimport numpy as np\nfrom tensorflow import keras\nfrom tensorflow.keras import backend as K\n\nfrom keras_bert.layers import TaskEmbedding\n\n\nclass TestTaskEmbedding(unittest.TestCase):\n\n def test_mask_zero(self):\n embed_input = keras.layers.Input(shape=(5, 4))\n task_input = keras.layers.Input(shape=(1,))\n task_embed = TaskEmbedding(input_dim=2, output_dim=4, mask_zero=True)([embed_input, task_input])\n func = K.function([embed_input, task_input], [task_embed])\n embed, task = np.random.random((2, 5, 4)), np.array([[0], [1]])\n output = func([embed, task])[0]\n self.assertTrue(np.allclose(embed[0], output[0]))\n self.assertFalse(np.allclose(embed[1], output[1]))\n"
] | [
[
"numpy.random.random",
"numpy.allclose",
"tensorflow.keras.backend.function",
"numpy.array",
"tensorflow.keras.layers.Input"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
iascchen/ai_study_notes | [
"03f46c5e37670c10bd99000d979940db8878f36c",
"03f46c5e37670c10bd99000d979940db8878f36c"
] | [
"src/study_keras/6_hello_gan/hello_gan_cifar10.py",
"src/study_keras/1_hello_mnist/hello_mnist.py"
] | [
"import os\n\nimport numpy as np\nimport tensorflow.keras as keras\nfrom tensorflow.keras.preprocessing import image\nfrom tensorflow.keras import layers\n\nlatent_dim = 32\nheight = 32\nwidth = 32\nchannels = 3\n\n# =========================================\n\ngenerator_input = keras.Input(shape=(latent_dim,))\n\n# First, transform the input into a 16x16 128-channels feature map\nx = layers.Dense(128 * 16 * 16)(generator_input)\nx = layers.LeakyReLU()(x)\nx = layers.Reshape((16, 16, 128))(x)\n\n# Then, add a convolution layer\nx = layers.Conv2D(256, 5, padding='same')(x)\nx = layers.LeakyReLU()(x)\n\n# Upsample to 32x32\nx = layers.Conv2DTranspose(256, 4, strides=2, padding='same')(x)\nx = layers.LeakyReLU()(x)\n\n# Few more conv layers\nx = layers.Conv2D(256, 5, padding='same')(x)\nx = layers.LeakyReLU()(x)\nx = layers.Conv2D(256, 5, padding='same')(x)\nx = layers.LeakyReLU()(x)\n\n# Produce a 32x32 1-channel feature map\nx = layers.Conv2D(channels, 7, activation='tanh', padding='same')(x)\ngenerator = keras.models.Model(generator_input, x)\ngenerator.summary()\n\n# =========================================\n\ndiscriminator_input = layers.Input(shape=(height, width, channels))\nx = layers.Conv2D(128, 3)(discriminator_input)\nx = layers.LeakyReLU()(x)\nx = layers.Conv2D(128, 4, strides=2)(x)\nx = layers.LeakyReLU()(x)\nx = layers.Conv2D(128, 4, strides=2)(x)\nx = layers.LeakyReLU()(x)\nx = layers.Conv2D(128, 4, strides=2)(x)\nx = layers.LeakyReLU()(x)\nx = layers.Flatten()(x)\n\n# One dropout layer - important trick!\nx = layers.Dropout(0.4)(x)\n\n# Classification layer\nx = layers.Dense(1, activation='sigmoid')(x)\n\ndiscriminator = keras.models.Model(discriminator_input, x)\ndiscriminator.summary()\n\n# To stabilize training, we use learning rate decay\n# and gradient clipping (by value) in the optimizer.\ndiscriminator_optimizer = keras.optimizers.RMSprop(lr=0.0008, clipvalue=1.0, decay=1e-8)\ndiscriminator.compile(optimizer=discriminator_optimizer, loss='binary_crossentropy')\n\n# =========================================\n\n# Set discriminator weights to non-trainable\n# (will only apply to the `gan` model)\ndiscriminator.trainable = False\n\ngan_input = keras.Input(shape=(latent_dim,))\ngan_output = discriminator(generator(gan_input))\ngan = keras.models.Model(gan_input, gan_output)\n\ngan_optimizer = keras.optimizers.RMSprop(lr=0.0004, clipvalue=1.0, decay=1e-8)\ngan.compile(optimizer=gan_optimizer, loss='binary_crossentropy')\n\ngan.summary()\n\n# =========================================\n\n# Load CIFAR10 data\n(x_train, y_train), (_, _) = keras.datasets.cifar10.load_data()\n\n# Select frog images (class 6)\nx_train = x_train[y_train.flatten() == 6]\n\n# Normalize data\nx_train = x_train.reshape(\n (x_train.shape[0],) + (height, width, channels)).astype('float32') / 255.\n\niterations = 10000\nbatch_size = 20\nsave_dir = './gan_images'\n\n# Start training loop\nstart = 0\nfor step in range(iterations):\n # Sample random points in the latent space\n random_latent_vectors = np.random.normal(size=(batch_size, latent_dim))\n\n # Decode them to fake images\n generated_images = generator.predict(random_latent_vectors)\n\n # Combine them with real images\n stop = start + batch_size\n real_images = x_train[start: stop]\n combined_images = np.concatenate([generated_images, real_images])\n\n # Assemble labels discriminating real from fake images\n labels = np.concatenate([np.ones((batch_size, 1)),\n np.zeros((batch_size, 1))])\n # Add random noise to the labels - important trick!\n labels += 0.05 * np.random.random(labels.shape)\n\n # Train the discriminator\n d_loss = discriminator.train_on_batch(combined_images, labels)\n\n # sample random points in the latent space\n random_latent_vectors = np.random.normal(size=(batch_size, latent_dim))\n\n # Assemble labels that say \"all real images\"\n misleading_targets = np.zeros((batch_size, 1))\n\n # Train the generator (via the gan model,\n # where the discriminator weights are frozen)\n a_loss = gan.train_on_batch(random_latent_vectors, misleading_targets)\n\n start += batch_size\n if start > len(x_train) - batch_size:\n start = 0\n\n # Occasionally save / plot\n if step % 100 == 0:\n # Save model weights\n gan.save_weights('gan.h5')\n\n # Print metrics\n print('discriminator loss at step %s: %s' % (step, d_loss))\n print('adversarial loss at step %s: %s' % (step, a_loss))\n\n # Save one generated image\n img = image.array_to_img(generated_images[0] * 255., scale=False)\n img.save(os.path.join(save_dir, 'generated_frog' + str(step) + '.png'))\n\n # Save one real image, for comparison\n img = image.array_to_img(real_images[0] * 255., scale=False)\n img.save(os.path.join(save_dir, 'real_frog' + str(step) + '.png'))\n",
"from tensorflow.keras import datasets, layers, models\n\nmnist = datasets.mnist\n\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\nx_train, x_test = x_train / 255.0, x_test / 255.0\n\nmodel = models.Sequential([\n layers.Flatten(input_shape=(28, 28)),\n layers.Dense(128, activation='relu'),\n layers.Dropout(0.2),\n layers.Dense(10, activation='softmax')\n])\n\nmodel.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])\n\nmodel.fit(x_train, y_train, epochs=5)\nmodel.evaluate(x_test, y_test)\n"
] | [
[
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.LeakyReLU",
"tensorflow.keras.Input",
"numpy.random.random",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2DTranspose",
"tensorflow.keras.optimizers.RMSprop",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.datasets.cifar10.load_data",
"numpy.ones",
"numpy.concatenate",
"numpy.random.normal",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.preprocessing.image.array_to_img",
"tensorflow.keras.layers.Flatten",
"numpy.zeros",
"tensorflow.keras.layers.Input"
],
[
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Dropout"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
undeadinu/magenta | [
"1a5774e429849a3e79d00fc6d152ddd4a2d1ad31",
"1a5774e429849a3e79d00fc6d152ddd4a2d1ad31",
"1a5774e429849a3e79d00fc6d152ddd4a2d1ad31",
"1a5774e429849a3e79d00fc6d152ddd4a2d1ad31"
] | [
"magenta/pipelines/note_sequence_pipelines_test.py",
"magenta/models/music_vae/lstm_utils.py",
"magenta/models/coconet/lib_evaluation.py",
"magenta/models/drums_rnn/drums_rnn_model.py"
] | [
"# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for note_sequence_pipelines.\"\"\"\n\nfrom magenta.common import testing_lib as common_testing_lib\nfrom magenta.music import sequences_lib\nfrom magenta.music import testing_lib\nfrom magenta.pipelines import note_sequence_pipelines\nfrom magenta.protobuf import music_pb2\nimport tensorflow as tf\n\n\nclass PipelineUnitsCommonTest(tf.test.TestCase):\n\n def _unit_transform_test(self, unit, input_instance,\n expected_outputs):\n outputs = unit.transform(input_instance)\n self.assertTrue(isinstance(outputs, list))\n common_testing_lib.assert_set_equality(self, expected_outputs, outputs)\n self.assertEqual(unit.input_type, type(input_instance))\n if outputs:\n self.assertEqual(unit.output_type, type(outputs[0]))\n\n def testSplitter(self):\n note_sequence = common_testing_lib.parse_test_proto(\n music_pb2.NoteSequence,\n \"\"\"\n time_signatures: {\n numerator: 4\n denominator: 4}\n tempos: {\n qpm: 60}\"\"\")\n testing_lib.add_track_to_sequence(\n note_sequence, 0,\n [(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),\n (55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])\n expected_sequences = sequences_lib.split_note_sequence(note_sequence, 1.0)\n\n unit = note_sequence_pipelines.Splitter(1.0)\n self._unit_transform_test(unit, note_sequence, expected_sequences)\n\n def testTimeChangeSplitter(self):\n note_sequence = common_testing_lib.parse_test_proto(\n music_pb2.NoteSequence,\n \"\"\"\n time_signatures: {\n time: 2.0\n numerator: 3\n denominator: 4}\n tempos: {\n qpm: 60}\"\"\")\n testing_lib.add_track_to_sequence(\n note_sequence, 0,\n [(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),\n (55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])\n expected_sequences = sequences_lib.split_note_sequence_on_time_changes(\n note_sequence)\n\n unit = note_sequence_pipelines.TimeChangeSplitter()\n self._unit_transform_test(unit, note_sequence, expected_sequences)\n\n def testQuantizer(self):\n steps_per_quarter = 4\n note_sequence = common_testing_lib.parse_test_proto(\n music_pb2.NoteSequence,\n \"\"\"\n time_signatures: {\n numerator: 4\n denominator: 4}\n tempos: {\n qpm: 60}\"\"\")\n testing_lib.add_track_to_sequence(\n note_sequence, 0,\n [(12, 100, 0.01, 10.0), (11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50),\n (55, 120, 4.0, 4.01), (52, 99, 4.75, 5.0)])\n expected_quantized_sequence = sequences_lib.quantize_note_sequence(\n note_sequence, steps_per_quarter)\n\n unit = note_sequence_pipelines.Quantizer(steps_per_quarter)\n self._unit_transform_test(unit, note_sequence,\n [expected_quantized_sequence])\n\n def testSustainPipeline(self):\n note_sequence = common_testing_lib.parse_test_proto(\n music_pb2.NoteSequence,\n \"\"\"\n time_signatures: {\n numerator: 4\n denominator: 4}\n tempos: {\n qpm: 60}\"\"\")\n testing_lib.add_track_to_sequence(\n note_sequence, 0,\n [(11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50), (55, 120, 4.0, 4.01)])\n testing_lib.add_control_changes_to_sequence(\n note_sequence, 0,\n [(0.0, 64, 127), (0.75, 64, 0), (2.0, 64, 127), (3.0, 64, 0),\n (3.75, 64, 127), (4.5, 64, 127), (4.8, 64, 0), (4.9, 64, 127),\n (6.0, 64, 0)])\n expected_sequence = sequences_lib.apply_sustain_control_changes(\n note_sequence)\n\n unit = note_sequence_pipelines.SustainPipeline()\n self._unit_transform_test(unit, note_sequence, [expected_sequence])\n\n def testStretchPipeline(self):\n note_sequence = common_testing_lib.parse_test_proto(\n music_pb2.NoteSequence,\n \"\"\"\n time_signatures: {\n time: 1.0\n numerator: 4\n denominator: 4}\n tempos: {\n qpm: 60}\"\"\")\n testing_lib.add_track_to_sequence(\n note_sequence, 0,\n [(11, 55, 0.22, 0.50), (40, 45, 2.50, 3.50), (55, 120, 4.0, 4.01)])\n\n expected_sequences = [\n sequences_lib.stretch_note_sequence(note_sequence, 0.5),\n sequences_lib.stretch_note_sequence(note_sequence, 1.0),\n sequences_lib.stretch_note_sequence(note_sequence, 1.5)]\n\n unit = note_sequence_pipelines.StretchPipeline(\n stretch_factors=[0.5, 1.0, 1.5])\n self._unit_transform_test(unit, note_sequence, expected_sequences)\n\n def testTranspositionPipeline(self):\n note_sequence = common_testing_lib.parse_test_proto(\n music_pb2.NoteSequence,\n \"\"\"\n time_signatures: {\n numerator: 4\n denominator: 4}\n tempos: {\n qpm: 60}\"\"\")\n tp = note_sequence_pipelines.TranspositionPipeline(range(0, 2))\n testing_lib.add_track_to_sequence(\n note_sequence, 0,\n [(12, 100, 1.0, 4.0)])\n testing_lib.add_track_to_sequence(\n note_sequence, 1,\n [(36, 100, 2.0, 2.01)],\n is_drum=True)\n transposed = tp.transform(note_sequence)\n self.assertEqual(2, len(transposed))\n self.assertEqual(2, len(transposed[0].notes))\n self.assertEqual(2, len(transposed[1].notes))\n self.assertEqual(12, transposed[0].notes[0].pitch)\n self.assertEqual(13, transposed[1].notes[0].pitch)\n self.assertEqual(36, transposed[0].notes[1].pitch)\n self.assertEqual(36, transposed[1].notes[1].pitch)\n\n def testTranspositionPipelineOutOfRangeNotes(self):\n note_sequence = common_testing_lib.parse_test_proto(\n music_pb2.NoteSequence,\n \"\"\"\n time_signatures: {\n numerator: 4\n denominator: 4}\n tempos: {\n qpm: 60}\"\"\")\n tp = note_sequence_pipelines.TranspositionPipeline(\n range(-1, 2), min_pitch=0, max_pitch=12)\n testing_lib.add_track_to_sequence(\n note_sequence, 0,\n [(10, 100, 1.0, 2.0), (12, 100, 2.0, 4.0), (13, 100, 4.0, 5.0)])\n transposed = tp.transform(note_sequence)\n self.assertEqual(1, len(transposed))\n self.assertEqual(3, len(transposed[0].notes))\n self.assertEqual(9, transposed[0].notes[0].pitch)\n self.assertEqual(11, transposed[0].notes[1].pitch)\n self.assertEqual(12, transposed[0].notes[2].pitch)\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"MusicVAE LSTM model utilities.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\nimport tensorflow as tf\nfrom tensorflow.contrib import rnn\nfrom tensorflow.contrib import seq2seq\nfrom tensorflow.contrib.cudnn_rnn.python.layers import cudnn_rnn\nfrom tensorflow.python.util import nest\n\n\ndef rnn_cell(rnn_cell_size, dropout_keep_prob, residual, is_training=True):\n \"\"\"Builds an LSTMBlockCell based on the given parameters.\"\"\"\n dropout_keep_prob = dropout_keep_prob if is_training else 1.0\n cells = []\n for i in range(len(rnn_cell_size)):\n cell = rnn.LSTMBlockCell(rnn_cell_size[i])\n if residual:\n cell = rnn.ResidualWrapper(cell)\n if i == 0 or rnn_cell_size[i] != rnn_cell_size[i - 1]:\n cell = rnn.InputProjectionWrapper(cell, rnn_cell_size[i])\n cell = rnn.DropoutWrapper(\n cell,\n input_keep_prob=dropout_keep_prob)\n cells.append(cell)\n return rnn.MultiRNNCell(cells)\n\n\ndef cudnn_lstm_layer(layer_sizes, dropout_keep_prob, is_training=True,\n name_or_scope='rnn'):\n \"\"\"Builds a CudnnLSTM Layer based on the given parameters.\"\"\"\n dropout_keep_prob = dropout_keep_prob if is_training else 1.0\n for ls in layer_sizes:\n if ls != layer_sizes[0]:\n raise ValueError(\n 'CudnnLSTM does not support layers with differing sizes. Got: %s' %\n layer_sizes)\n lstm = cudnn_rnn.CudnnLSTM(\n num_layers=len(layer_sizes),\n num_units=layer_sizes[0],\n direction='unidirectional',\n dropout=1.0 - dropout_keep_prob,\n name=name_or_scope)\n\n class BackwardCompatibleCudnnLSTMSaveable(\n tf.contrib.cudnn_rnn.CudnnLSTMSaveable):\n \"\"\"Overrides CudnnLSTMSaveable for backward-compatibility.\"\"\"\n\n def _cudnn_to_tf_biases(self, *cu_biases):\n \"\"\"Overrides to subtract 1.0 from `forget_bias` (see BasicLSTMCell).\"\"\"\n (tf_bias,) = (\n super(BackwardCompatibleCudnnLSTMSaveable, self)._cudnn_to_tf_biases(\n *cu_biases))\n i, c, f, o = tf.split(tf_bias, 4)\n # Non-Cudnn LSTM cells add 1.0 to the forget bias variable.\n return (tf.concat([i, c, f - 1.0, o], axis=0),)\n\n def _tf_to_cudnn_biases(self, *tf_biases):\n \"\"\"Overrides to add 1.0 to `forget_bias` (see BasicLSTMCell).\"\"\"\n (tf_bias,) = tf_biases\n i, c, f, o = tf.split(tf_bias, 4)\n # Non-Cudnn LSTM cells add 1.0 to the forget bias variable.\n return (\n super(BackwardCompatibleCudnnLSTMSaveable, self)._tf_to_cudnn_biases(\n tf.concat([i, c, f + 1.0, o], axis=0)))\n\n def _TFCanonicalNamePrefix(self, layer, is_fwd=True):\n \"\"\"Overrides for backward-compatible variable names.\"\"\"\n if self._direction == 'unidirectional':\n return 'multi_rnn_cell/cell_%d/lstm_cell' % layer\n else:\n return (\n 'cell_%d/bidirectional_rnn/%s/multi_rnn_cell/cell_0/lstm_cell'\n % (layer, 'fw' if is_fwd else 'bw'))\n\n lstm._saveable_cls = BackwardCompatibleCudnnLSTMSaveable # pylint:disable=protected-access\n return lstm\n\n\ndef state_tuples_to_cudnn_lstm_state(lstm_state_tuples):\n \"\"\"Convert tuple of LSTMStateTuples to CudnnLSTM format.\"\"\"\n h = tf.stack([s.h for s in lstm_state_tuples])\n c = tf.stack([s.c for s in lstm_state_tuples])\n return (h, c)\n\n\ndef cudnn_lstm_state_to_state_tuples(cudnn_lstm_state):\n \"\"\"Convert CudnnLSTM format to tuple of LSTMStateTuples.\"\"\"\n h, c = cudnn_lstm_state\n return tuple(\n rnn.LSTMStateTuple(h=h_i, c=c_i)\n for h_i, c_i in zip(tf.unstack(h), tf.unstack(c)))\n\n\ndef _get_final_index(sequence_length, time_major=True):\n indices = [tf.maximum(0, sequence_length - 1),\n tf.range(sequence_length.shape[0])]\n if not time_major:\n indices = indices[-1::-1]\n return tf.stack(indices, axis=1)\n\n\ndef get_final(sequence, sequence_length, time_major=True):\n \"\"\"Get the final item in a batch of sequences.\"\"\"\n final_index = _get_final_index(sequence_length, time_major)\n return tf.gather_nd(sequence, final_index)\n\n\ndef set_final(sequence, sequence_length, values, time_major=False):\n \"\"\"Sets the final values in a batch of sequences, and clears those after.\"\"\"\n sequence_batch_major = (\n sequence if not time_major else tf.transpose(sequence, [1, 0, 2]))\n final_index = _get_final_index(sequence_length, time_major=False)\n mask = tf.sequence_mask(\n tf.maximum(0, sequence_length - 1),\n maxlen=sequence_batch_major.shape[1],\n dtype=tf.float32)\n sequence_batch_major = (\n tf.expand_dims(mask, axis=-1) * sequence_batch_major +\n tf.scatter_nd(final_index, values, tf.shape(sequence_batch_major)))\n if time_major:\n return tf.transpose(sequence_batch_major, [1, 0, 2])\n return sequence_batch_major\n\n\ndef initial_cell_state_from_embedding(cell, z, name=None):\n \"\"\"Computes an initial RNN `cell` state from an embedding, `z`.\"\"\"\n flat_state_sizes = nest.flatten(cell.state_size)\n return nest.pack_sequence_as(\n cell.zero_state(batch_size=z.shape[0], dtype=tf.float32),\n tf.split(\n tf.layers.dense(\n z,\n sum(flat_state_sizes),\n activation=tf.tanh,\n kernel_initializer=tf.random_normal_initializer(stddev=0.001),\n name=name),\n flat_state_sizes,\n axis=1))\n\n\ndef get_sampling_probability(hparams, is_training):\n \"\"\"Returns the sampling probability as a tensor based on the hparams.\n\n Supports three sampling schedules (`hparams.sampling_schedule`):\n constant: `hparams.sampling_rate` is the sampling probability. Must be in\n the interval [0, 1].\n exponential: `hparams.sampling_rate` is the base of the decay exponential.\n Must be in the interval (0, 1). Larger values imply a slower increase in\n sampling.\n inverse_sigmoid: `hparams.sampling_rate` is in the interval [1, inf).\n Larger values imply a slower increase in sampling.\n\n A constant value of 0 is returned if `hparams.sampling_schedule` is undefined.\n\n If not training and a non-0 sampling schedule is defined, a constant value of\n 1 is returned since this is assumed to be a test/eval job associated with a\n scheduled sampling trainer.\n\n Args:\n hparams: An HParams object containing model hyperparameters.\n is_training: Whether or not the model is being used for training.\n\n Raises:\n ValueError: On an invalid `sampling_schedule` or `sampling_rate` hparam.\n \"\"\"\n if (not hasattr(hparams, 'sampling_schedule') or\n not hparams.sampling_schedule or\n (hparams.sampling_schedule == 'constant' and hparams.sampling_rate == 0)):\n return tf.constant(0.0)\n\n if not is_training:\n # This is likely an eval/test job associated with a training job using\n # scheduled sampling.\n tf.logging.warning(\n 'Setting non-training sampling schedule from %s:%f to constant:1.0.',\n hparams.sampling_schedule, hparams.sampling_rate)\n hparams.sampling_schedule = 'constant'\n hparams.sampling_rate = 1.0\n\n schedule = hparams.sampling_schedule\n rate = hparams.sampling_rate\n step = tf.to_float(tf.train.get_global_step())\n\n if schedule == 'constant':\n if not 0 <= rate <= 1:\n raise ValueError(\n '`constant` sampling rate must be in the interval [0, 1]. Got %f.'\n % rate)\n sampling_probability = tf.to_float(rate)\n elif schedule == 'inverse_sigmoid':\n if rate < 1:\n raise ValueError(\n '`inverse_sigmoid` sampling rate must be at least 1. Got %f.' % rate)\n k = tf.to_float(rate)\n sampling_probability = 1.0 - k / (k + tf.exp(step / k))\n elif schedule == 'exponential':\n if not 0 < rate < 1:\n raise ValueError(\n '`exponential` sampling rate must be in the interval (0, 1). Got %f.'\n % hparams.sampling_rate)\n k = tf.to_float(rate)\n sampling_probability = 1.0 - tf.pow(k, step)\n else:\n raise ValueError('Invalid `sampling_schedule`: %s' % schedule)\n tf.summary.scalar('sampling_probability', sampling_probability)\n return sampling_probability\n\n\nclass LstmDecodeResults(\n collections.namedtuple('LstmDecodeResults',\n ('rnn_input', 'rnn_output', 'samples', 'final_state',\n 'final_sequence_lengths'))):\n pass\n\n\nclass Seq2SeqLstmDecoderOutput(\n collections.namedtuple('BasicDecoderOutput',\n ('rnn_input', 'rnn_output', 'sample_id'))):\n pass\n\n\nclass Seq2SeqLstmDecoder(seq2seq.BasicDecoder):\n \"\"\"Overrides BaseDecoder to include rnn inputs in the output.\"\"\"\n\n def __init__(self, cell, helper, initial_state, input_shape,\n output_layer=None):\n self._input_shape = input_shape\n super(Seq2SeqLstmDecoder, self).__init__(\n cell, helper, initial_state, output_layer)\n\n @property\n def output_size(self):\n return Seq2SeqLstmDecoderOutput(\n rnn_input=self._input_shape,\n rnn_output=self._rnn_output_size(),\n sample_id=self._helper.sample_ids_shape)\n\n @property\n def output_dtype(self):\n dtype = nest.flatten(self._initial_state)[0].dtype\n return Seq2SeqLstmDecoderOutput(\n dtype,\n nest.map_structure(lambda _: dtype, self._rnn_output_size()),\n self._helper.sample_ids_dtype)\n\n def step(self, time, inputs, state, name=None):\n results = super(Seq2SeqLstmDecoder, self).step(time, inputs, state, name)\n outputs = Seq2SeqLstmDecoderOutput(\n rnn_input=inputs,\n rnn_output=results[0].rnn_output,\n sample_id=results[0].sample_id)\n return (outputs,) + results[1:]\n\n\ndef maybe_split_sequence_lengths(sequence_length, num_splits, total_length):\n \"\"\"Validates and splits `sequence_length`, if necessary.\n\n Returned value must be used in graph for all validations to be executed.\n\n Args:\n sequence_length: A batch of sequence lengths, either sized `[batch_size]`\n and equal to either 0 or `total_length`, or sized\n `[batch_size, num_splits]`.\n num_splits: The scalar number of splits of the full sequences.\n total_length: The scalar total sequence length (potentially padded).\n\n Returns:\n sequence_length: If input shape was `[batch_size, num_splits]`, returns the\n same Tensor. Otherwise, returns a Tensor of that shape with each input\n length in the batch divided by `num_splits`.\n Raises:\n ValueError: If `sequence_length` is not shaped `[batch_size]` or\n `[batch_size, num_splits]`.\n tf.errors.InvalidArgumentError: If `sequence_length` is shaped\n `[batch_size]` and all values are not either 0 or `total_length`.\n \"\"\"\n if sequence_length.shape.ndims == 1:\n if total_length % num_splits != 0:\n raise ValueError(\n '`total_length` must be evenly divisible by `num_splits`.')\n with tf.control_dependencies(\n [tf.Assert(\n tf.reduce_all(\n tf.logical_or(tf.equal(sequence_length, 0),\n tf.equal(sequence_length, total_length))),\n data=[sequence_length])]):\n sequence_length = (\n tf.tile(tf.expand_dims(sequence_length, axis=1), [1, num_splits]) //\n num_splits)\n elif sequence_length.shape.ndims == 2:\n with tf.control_dependencies([\n tf.assert_less_equal(\n sequence_length,\n tf.constant(total_length // num_splits, tf.int32),\n message='Segment length cannot be more than '\n '`total_length / num_splits`.')]):\n sequence_length = tf.identity(sequence_length)\n sequence_length.set_shape([sequence_length.shape[0], num_splits])\n else:\n raise ValueError(\n 'Sequence lengths must be given as a vector or a 2D Tensor whose '\n 'second dimension size matches its initial hierarchical split. Got '\n 'shape: %s' % sequence_length.shape.as_list())\n return sequence_length\n",
"\"\"\"Helpers for evaluating the log likelihood of pianorolls under a model.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport time\n\nfrom magenta.models.coconet import lib_tfutil\nfrom magenta.models.coconet import lib_util\nimport numpy as np\nfrom scipy.misc import logsumexp\nimport tensorflow as tf\n\n\ndef evaluate(evaluator, pianorolls):\n \"\"\"Evaluate a sequence of pianorolls.\n\n The returned dictionary contains two kinds of evaluation results: the \"unit\"\n losses and the \"example\" losses. The unit loss measures the negative\n log-likelihood of each unit (e.g. note or frame). The example loss is the\n average of the unit loss across the example. Additionally, the dictionary\n contains various aggregates such as the mean and standard error of the mean\n of both losses, as well as min/max and quartile bounds.\n\n Args:\n evaluator: an instance of BaseEvaluator\n pianorolls: sequence of pianorolls to evaluate\n\n Returns:\n A dictionary with evaluation results.\n \"\"\"\n example_losses = []\n unit_losses = []\n\n for pi, pianoroll in enumerate(pianorolls):\n tf.logging.info(\"evaluating piece %d\", pi)\n start_time = time.time()\n\n unit_loss = -evaluator(pianoroll)\n example_loss = np.mean(unit_loss)\n\n example_losses.append(example_loss)\n unit_losses.append(unit_loss)\n\n duration = (time.time() - start_time) / 60.\n _report(unit_loss, prefix=\"%i %5.2fmin \" % (pi, duration))\n\n if np.isinf(example_loss):\n break\n\n _report(example_losses, prefix=\"FINAL example-level \")\n _report(unit_losses, prefix=\"FINAL unit-level \")\n\n rval = dict(example_losses=example_losses, unit_losses=unit_losses)\n rval.update((\"example_%s\" % k, v) for k, v in _stats(example_losses).items())\n rval.update(\n (\"unit_%s\" % k, v) for k, v in _stats(_flatcat(unit_losses)).items())\n return rval\n\n\ndef _report(losses, prefix=\"\"):\n tf.logging.info(\"%s loss %s\", prefix, _statstr(_flatcat(losses)))\n\n\ndef _stats(x):\n return dict(\n mean=np.mean(x),\n sem=np.std(x) / np.sqrt(len(x)),\n min=np.min(x),\n max=np.max(x),\n q1=np.percentile(x, 25),\n q2=np.percentile(x, 50),\n q3=np.percentile(x, 75))\n\n\ndef _statstr(x):\n return (\"mean/sem: {mean:8.5f}+-{sem:8.5f} {min:.5f} < {q1:.5f} < {q2:.5f} < \"\n \"{q3:.5f} < {max:.5g}\").format(**_stats(x))\n\n\ndef _flatcat(xs):\n return np.concatenate([x.flatten() for x in xs])\n\n\nclass BaseEvaluator(lib_util.Factory):\n \"\"\"Evaluator base class.\"\"\"\n\n def __init__(self, wmodel, chronological):\n \"\"\"Initialize BaseEvaluator instance.\n\n Args:\n wmodel: WrappedModel instance\n chronological: whether to evaluate in chronological order or in any order\n \"\"\"\n self.wmodel = wmodel\n self.chronological = chronological\n\n def predictor(pianorolls, masks):\n p = self.wmodel.sess.run(\n self.wmodel.model.predictions,\n feed_dict={\n self.wmodel.model.pianorolls: pianorolls,\n self.wmodel.model.masks: masks\n })\n return p\n\n self.predictor = lib_tfutil.RobustPredictor(predictor)\n\n @property\n def hparams(self):\n return self.wmodel.hparams\n\n @property\n def separate_instruments(self):\n return self.wmodel.hparams.separate_instruments\n\n def __call__(self, pianoroll):\n \"\"\"Evaluate a single pianoroll.\n\n Args:\n pianoroll: a single pianoroll, shaped (tt, pp, ii)\n\n Returns:\n unit losses\n \"\"\"\n raise NotImplementedError()\n\n def _update_lls(self, lls, x, pxhat, t, d):\n \"\"\"Update accumulated log-likelihoods.\n\n Note: the shape of `lls` and the range of `d` depends on the \"number of\n variables per time step\" `dd`, which is the number of instruments if\n instruments if instruments are separated or the number of pitches otherwise.\n\n Args:\n lls: (tt, dd)-shaped array of unit log-likelihoods.\n x: the pianoroll being evaluated, shape (B, tt, P, I).\n pxhat: the probabilities output by the model, shape (B, tt, P, I).\n t: the batch of time indices being evaluated, shape (B,).\n d: the batch of variable indices being evaluated, shape (B,).\n \"\"\"\n # The code below assumes x is binary, so instead of x * log(px) which is\n # inconveniently NaN if both x and log(px) are zero, we can use\n # where(x, log(px), 0).\n assert np.array_equal(x, x.astype(bool))\n if self.separate_instruments:\n index = (np.arange(x.shape[0]), t, slice(None), d)\n else:\n index = (np.arange(x.shape[0]), t, d, slice(None))\n lls[t, d] = np.log(np.where(x[index], pxhat[index], 1)).sum(axis=1)\n\n\nclass FrameEvaluator(BaseEvaluator):\n \"\"\"Framewise evaluator.\n\n Evaluates pianorolls one frame at a time. That is, the model is judged for its\n prediction of entire frames at a time, conditioning on its own samples rather\n than the ground truth of other instruments/pitches in the same frame.\n\n The frames are evaluated in random order, and within each frame the\n instruments/pitches are evaluated in random order.\n \"\"\"\n key = \"frame\"\n\n def __call__(self, pianoroll):\n tt, pp, ii = pianoroll.shape\n assert self.separate_instruments or ii == 1\n dd = ii if self.separate_instruments else pp\n\n # Compile a batch with each frame being an example.\n bb = tt\n xs = np.tile(pianoroll[None], [bb, 1, 1, 1])\n\n ts, ds = self.draw_ordering(tt, dd)\n\n # Set up sequence of masks to predict the first (according to ordering)\n # instrument for each frame\n mask = []\n mask_scratch = np.ones([tt, pp, ii], dtype=np.float32)\n for j, (t, d) in enumerate(zip(ts, ds)):\n # When time rolls over, reveal the entire current frame for purposes of\n # predicting the next one.\n if j % dd != 0:\n continue\n mask.append(mask_scratch.copy())\n mask_scratch[t, :, :] = 0\n assert np.allclose(mask_scratch, 0)\n del mask_scratch\n mask = np.array(mask)\n\n lls = np.zeros([tt, dd], dtype=np.float32)\n\n # We can't parallelize within the frame, as we need the predictions of\n # some of the other instruments.\n # Hence we outer loop over the instruments and parallelize across frames.\n xs_scratch = xs.copy()\n for d_idx in range(dd):\n # Call out to the model to get predictions for the first instrument\n # at each time step.\n pxhats = self.predictor(xs_scratch, mask)\n\n t, d = ts[d_idx::dd], ds[d_idx::dd]\n assert len(t) == bb and len(d) == bb\n\n # Write in predictions and update mask.\n if self.separate_instruments:\n xs_scratch[np.arange(bb), t, :, d] = np.eye(pp)[np.argmax(\n pxhats[np.arange(bb), t, :, d], axis=1)]\n mask[np.arange(bb), t, :, d] = 0\n # Every example in the batch sees one frame more than the previous.\n assert np.allclose(\n (1 - mask).sum(axis=(1, 2, 3)),\n [(k * dd + d_idx + 1) * pp for k in range(mask.shape[0])])\n else:\n xs_scratch[np.arange(bb), t, d, :] = (\n pxhats[np.arange(bb), t, d, :] > 0.5)\n mask[np.arange(bb), t, d, :] = 0\n # Every example in the batch sees one frame more than the previous.\n assert np.allclose(\n (1 - mask).sum(axis=(1, 2, 3)),\n [(k * dd + d_idx + 1) * ii for k in range(mask.shape[0])])\n\n self._update_lls(lls, xs, pxhats, t, d)\n\n # conjunction over notes within frames; frame is the unit of prediction\n return lls.sum(axis=1)\n\n def draw_ordering(self, tt, dd):\n o = np.arange(tt, dtype=np.int32)\n if not self.chronological:\n np.random.shuffle(o)\n # random variable orderings within each time step\n o = o[:, None] * dd + np.arange(dd, dtype=np.int32)[None, :]\n for t in range(tt):\n np.random.shuffle(o[t])\n o = o.reshape([tt * dd])\n ts, ds = np.unravel_index(o.T, dims=(tt, dd))\n return ts, ds\n\n\nclass NoteEvaluator(BaseEvaluator):\n \"\"\"Evalutes note-based negative likelihood.\"\"\"\n key = \"note\"\n\n def __call__(self, pianoroll):\n tt, pp, ii = pianoroll.shape\n assert self.separate_instruments or ii == 1\n dd = ii if self.separate_instruments else pp\n\n # compile a batch with an example for each variable\n bb = tt * dd\n xs = np.tile(pianoroll[None], [bb, 1, 1, 1])\n\n ts, ds = self.draw_ordering(tt, dd)\n assert len(ts) == bb and len(ds) == bb\n\n # set up sequence of masks, one for each variable\n mask = []\n mask_scratch = np.ones([tt, pp, ii], dtype=np.float32)\n for unused_j, (t, d) in enumerate(zip(ts, ds)):\n mask.append(mask_scratch.copy())\n if self.separate_instruments:\n mask_scratch[t, :, d] = 0\n else:\n mask_scratch[t, d, :] = 0\n assert np.allclose(mask_scratch, 0)\n del mask_scratch\n mask = np.array(mask)\n\n pxhats = self.predictor(xs, mask)\n\n lls = np.zeros([tt, dd], dtype=np.float32)\n self._update_lls(lls, xs, pxhats, ts, ds)\n return lls\n\n def _draw_ordering(self, tt, dd):\n o = np.arange(tt * dd, dtype=np.int32)\n if not self.chronological:\n np.random.shuffle(o)\n ts, ds = np.unravel_index(o.T, dims=(tt, dd))\n return ts, ds\n\n\nclass EnsemblingEvaluator(object):\n \"\"\"Decorating for ensembled evaluation.\n\n Calls the decorated evaluator multiple times so as to evaluate according to\n multiple orderings. The likelihoods from different orderings are averaged\n in probability space, which gives a better result than averaging in log space\n (which would correspond to a geometric mean that is unnormalized and tends\n to waste probability mass).\n \"\"\"\n key = \"_ensembling\"\n\n def __init__(self, evaluator, ensemble_size):\n self.evaluator = evaluator\n self.ensemble_size = ensemble_size\n\n def __call__(self, pianoroll):\n lls = [self.evaluator(pianoroll) for _ in range(self.ensemble_size)]\n return logsumexp(lls, b=1. / len(lls), axis=0)\n",
"# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Drums RNN model.\"\"\"\n\nimport magenta\nfrom magenta.models.shared import events_rnn_model\nimport magenta.music as mm\nimport tensorflow as tf\n\n\nclass DrumsRnnModel(events_rnn_model.EventSequenceRnnModel):\n \"\"\"Class for RNN drum track generation models.\"\"\"\n\n def generate_drum_track(self, num_steps, primer_drums, temperature=1.0,\n beam_size=1, branch_factor=1, steps_per_iteration=1):\n \"\"\"Generate a drum track from a primer drum track.\n\n Args:\n num_steps: The integer length in steps of the final drum track, after\n generation. Includes the primer.\n primer_drums: The primer drum track, a DrumTrack object.\n temperature: A float specifying how much to divide the logits by\n before computing the softmax. Greater than 1.0 makes drum tracks more\n random, less than 1.0 makes drum tracks less random.\n beam_size: An integer, beam size to use when generating drum tracks via\n beam search.\n branch_factor: An integer, beam search branch factor to use.\n steps_per_iteration: An integer, number of steps to take per beam search\n iteration.\n\n Returns:\n The generated DrumTrack object (which begins with the provided primer drum\n track).\n \"\"\"\n return self._generate_events(num_steps, primer_drums, temperature,\n beam_size, branch_factor, steps_per_iteration)\n\n def drum_track_log_likelihood(self, drums):\n \"\"\"Evaluate the log likelihood of a drum track under the model.\n\n Args:\n drums: The DrumTrack object for which to evaluate the log likelihood.\n\n Returns:\n The log likelihood of `drums` under this model.\n \"\"\"\n return self._evaluate_log_likelihood([drums])[0]\n\n\n# Default configurations.\ndefault_configs = {\n 'one_drum': events_rnn_model.EventSequenceRnnConfig(\n magenta.protobuf.generator_pb2.GeneratorDetails(\n id='one_drum',\n description='Drums RNN with 2-state encoding.'),\n magenta.music.OneHotEventSequenceEncoderDecoder(\n magenta.music.MultiDrumOneHotEncoding([\n [39] + # use hand clap as default when decoding\n list(range(mm.MIN_MIDI_PITCH, 39)) +\n list(range(39, mm.MAX_MIDI_PITCH + 1))])),\n tf.contrib.training.HParams(\n batch_size=128,\n rnn_layer_sizes=[128, 128],\n dropout_keep_prob=0.5,\n clip_norm=5,\n learning_rate=0.001),\n steps_per_quarter=2),\n\n 'drum_kit': events_rnn_model.EventSequenceRnnConfig(\n magenta.protobuf.generator_pb2.GeneratorDetails(\n id='drum_kit',\n description='Drums RNN with multiple drums and binary counters.'),\n magenta.music.LookbackEventSequenceEncoderDecoder(\n magenta.music.MultiDrumOneHotEncoding(),\n lookback_distances=[],\n binary_counter_bits=6),\n tf.contrib.training.HParams(\n batch_size=128,\n rnn_layer_sizes=[256, 256, 256],\n dropout_keep_prob=0.5,\n attn_length=32,\n clip_norm=3,\n learning_rate=0.001))\n}\n"
] | [
[
"tensorflow.test.main"
],
[
"tensorflow.logging.warning",
"tensorflow.contrib.rnn.InputProjectionWrapper",
"tensorflow.concat",
"tensorflow.stack",
"tensorflow.equal",
"tensorflow.contrib.rnn.LSTMBlockCell",
"tensorflow.summary.scalar",
"tensorflow.train.get_global_step",
"tensorflow.contrib.rnn.MultiRNNCell",
"tensorflow.to_float",
"tensorflow.random_normal_initializer",
"tensorflow.gather_nd",
"tensorflow.unstack",
"tensorflow.shape",
"tensorflow.pow",
"tensorflow.identity",
"tensorflow.exp",
"tensorflow.contrib.rnn.ResidualWrapper",
"tensorflow.contrib.rnn.LSTMStateTuple",
"tensorflow.split",
"tensorflow.transpose",
"tensorflow.constant",
"tensorflow.range",
"tensorflow.contrib.rnn.DropoutWrapper",
"tensorflow.maximum",
"tensorflow.expand_dims",
"tensorflow.python.util.nest.flatten"
],
[
"numpy.unravel_index",
"numpy.allclose",
"numpy.min",
"numpy.arange",
"numpy.eye",
"numpy.tile",
"numpy.percentile",
"numpy.ones",
"numpy.random.shuffle",
"numpy.max",
"numpy.std",
"tensorflow.logging.info",
"numpy.mean",
"numpy.where",
"numpy.array",
"numpy.zeros",
"numpy.isinf"
],
[
"tensorflow.contrib.training.HParams"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.4",
"1.5",
"1.7",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"1.2"
]
}
] |
sergejhorvat/d6tflow | [
"00ac64a3d31bf48062003c18ea3269e466322d46"
] | [
"docs/example.py"
] | [
"\nimport d6tflow\nimport luigi\nimport sklearn, sklearn.datasets, sklearn.svm\nimport pandas as pd\n\n# define workflow\nclass TaskGetData(d6tflow.tasks.TaskPqPandas): # save dataframe as parquet\n\n def run(self):\n iris = sklearn.datasets.load_iris()\n df_train = pd.DataFrame(iris.data,columns=['feature{}'.format(i) for i in range(4)])\n df_train['y'] = iris.target\n self.save(df_train) # quickly save dataframe\n\nclass TaskPreprocess(d6tflow.tasks.TaskPqPandas):\n do_preprocess = luigi.BoolParameter(default=True) # parameter for preprocessing yes/no\n\n def requires(self):\n return TaskGetData() # define dependency\n\n def run(self):\n df_train = self.input().load() # quickly load required data\n if self.do_preprocess:\n df_train.iloc[:,:-1] = sklearn.preprocessing.scale(df_train.iloc[:,:-1])\n self.save(df_train)\n\nclass TaskTrain(d6tflow.tasks.TaskPickle): # save output as pickle\n do_preprocess = luigi.BoolParameter(default=True)\n\n def requires(self):\n return TaskPreprocess(do_preprocess=self.do_preprocess)\n\n def run(self):\n df_train = self.input().load()\n model = sklearn.svm.SVC()\n model.fit(df_train.iloc[:,:-1], df_train['y'])\n self.save(model)\n\n# Check task dependencies and their execution status\nd6tflow.show(TaskTrain())\nd6tflow.show([TaskTrain(do_preprocess=False)])\n\n'''\n└─--[TaskTrain-{'do_preprocess': 'True'} (PENDING)]\n └─--[TaskPreprocess-{'do_preprocess': 'True'} (PENDING)]\n └─--[TaskGetData-{} (PENDING)]\n'''\n\n# Execute the model training task including dependencies\nd6tflow.run(TaskTrain())\n\n'''\n===== Luigi Execution Summary =====\n\nScheduled 3 tasks of which:\n* 3 ran successfully:\n - 1 TaskGetData()\n - 1 TaskPreprocess(do_preprocess=True)\n - 1 TaskTrain(do_preprocess=True)\n'''\n\n# Load task output to pandas dataframe and model object for model evaluation\nmodel = TaskTrain().output().load()\ndf_train = TaskPreprocess().output().load()\nprint(sklearn.metrics.accuracy_score(df_train['y'],model.predict(df_train.iloc[:,:-1])))\n# 0.9733333333333334\n\n# Intelligently rerun workflow after changing a preprocessing parameter\nd6tflow.show([TaskTrain(do_preprocess=False)])\n\n'''\n└─--[TaskTrain-{'do_preprocess': 'False'} (PENDING)]\n └─--[TaskPreprocess-{'do_preprocess': 'False'} (PENDING)]\n └─--[TaskGetData-{} (COMPLETE)] => this doesn't change and doesn't need to rerun\n'''\n\n"
] | [
[
"sklearn.preprocessing.scale",
"sklearn.datasets.load_iris",
"sklearn.svm.SVC"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tranhoangkhuongvn/cage-challenge-1 | [
"88c4b8fb7da9fd4e87bd60fdf6e4a5caa3f146b1"
] | [
"CybORG/CybORG/Agents/Branching_Agents/test_agent.py"
] | [
"from tqdm import tqdm\nimport torch \nimport torch.nn as nn \nimport torch.nn.functional as F \nimport torch.optim as optim \nfrom torch.distributions import Categorical \n\nimport numpy as np \nimport gym \nimport random\nfrom tqdm import tqdm\n\nfrom collections import namedtuple, deque\n\nfrom network import DuelingNetwork, BranchingQNetwork\nfrom utils import TensorEnv, ExperienceReplayMemory, AgentConfig, BranchingTensorEnv\nimport utils\n\n\nfrom CybORG import CybORG\nimport inspect\n\nfrom CybORG.Agents import TestAgent\nfrom CybORG.Agents.Wrappers.FixedFlatWrapper import FixedFlatWrapper\nfrom CybORG.Agents.Wrappers.IntListToAction import IntListToActionWrapper\nfrom CybORG.Agents.Wrappers.OpenAIGymWrapper import OpenAIGymWrapper\nfrom CybORG.Agents.Wrappers.ReduceActionSpaceWrapper import ReduceActionSpaceWrapper\nfrom CybORG.Agents.Wrappers.TrueTableWrapper import true_obs_to_table\n\n\nBUFFER_SIZE = int(300000)\nBATCH_SIZE = 64\nGAMMA = 0.99\nTAU = 0.001\nLR = 1e-4\nUPDATE_EVERY = 10\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nprint('Device used: ', device)\nseed_list = [0, 42, 500, 1000]\n\n\nclass ReplayBuffer:\n \"\"\"\n Fixed size buffer to store experience tuples\n \"\"\"\n def __init__(self, action_space, buffer_size, batch_size, seed=42, device=\"cpu\"):\n \"\"\"\n Params:\n action_size (int): dimension of each action in the action space\n buffer_size (int): maximum size of buffer\n batch_size (int): size of each training batch\n seed (int): random seed\n \"\"\"\n #self.action_size = action_size\n self.memory = deque(maxlen=buffer_size)\n self.action_space = action_space\n self.batch_size = batch_size\n self.experience = namedtuple(\"Experience\", field_names=[\"state\", \"action\", \"reward\", \"next_state\",\"done\"])\n self.seed = random.seed(seed)\n self.device = device\n\n def add(self, state, action, reward, next_state, done):\n \"\"\"Add a new experience to memory\"\"\"\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)\n\n\n def sample(self):\n \"\"\"Randomly sample a batch of experiences from memory\"\"\"\n experiences = random.sample(self.memory, k =self.batch_size)\n\n states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)\n #actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).long().to(device)\n\n action_list = [[] for i in range(len(self.action_space))]\n for idx in range(len(self.action_space)):\n action_list[idx] = torch.from_numpy(np.vstack([e.action[idx] for e in experiences if e is not None])).float().to(self.device)\n\n rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(self.device)\n next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(self.device)\n dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(self.device)\n\n return (states, action_list, rewards, next_states, dones)\n\n def __len__(self):\n \"\"\"Return the current size of internal memory\"\"\"\n return len(self.memory)\n\n\nclass BranchingDQN_Agent(nn.Module): \n\n def __init__(self, observation, action_space, seed=0, device=\"cpu\"): \n\n super().__init__()\n\n self.observation = observation\n self.action_space = action_space\n self.qnetwork_local = BranchingQNetwork(self.observation, self.action_space).to(device)\n self.qnetwork_target = BranchingQNetwork(self.observation, self.action_space).to(device)\n\n self.soft_update(self.qnetwork_local, self.qnetwork_target, 1.0)\n self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR)\n self.device = device\n # Replay memory\n self.memory = ReplayBuffer(self.action_space, BUFFER_SIZE, BATCH_SIZE, seed, device=device)\n\t\t#initialize time step (for updating every UPDATE_EVERY steps)\n self.t_step = 0\n\n\n def soft_update(self, local_model, target_model, tau):\n \"\"\"\n Soft update model parameters\n Q_target = tau*Q_local + (1-tau)*Q_target\n\n Params:\n local_model (Pytorch model): weights will be copied from target_model (Pytorch model)\n tau (float): interpolation parameter\n \"\"\"\n for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):\n target_param.data.copy_(tau*local_param.data + (1.0 - tau)*target_param.data)\n\n \n def step(self, state, action, reward, next_state, done):\n\t\t# Save experience in replay memory\n self.memory.add(state, action, reward, next_state, done)\n\t\t# learn every UPDATE_EVERY time steps\n self.t_step = (self.t_step + 1) % UPDATE_EVERY\n #print(self.t_step, len(self.memory))\n if self.t_step == 0:\n # if enough samples are available in memory, get random subset and learn\n if len(self.memory) > BATCH_SIZE:\n experiences = self.memory.sample()\n # sample a batch of 64 (s,a,r,s',d)\n self.learn(experiences, GAMMA)\n\n \n def learn(self, experiences, gamma):\n \"\"\"\n Update value parameters using given batch of experience tuples\n\n Params:\n experiences: (Tuple[torch.Variable]): tuple of (s,a,r,s',done) tuples\n gamma (float): discount factor\n \"\"\"\n \n states, actions, rewards, next_states, dones = experiences\n # reshape the actions:\n \n #actions = torch.stack(actions).transpose(0,1).long() # (batch, action_size, 1)\n \n Q_expected = self.qnetwork_local(states) # list of q_value for each action\n #Q_expected = torch.stack(Q_expected)\n for i in range(len(self.action_space)):\n Q_expected[i] = Q_expected[i].gather(1, actions[i].long())\n \n Q_expected = torch.stack(Q_expected).transpose(0, 1) # (batch, action_size, 1)\n Q_expected = Q_expected.squeeze(-1) # (batch, action_size)\n\n \n # Get max predicted Q values (for next states) from target model\n Q_targets_next = self.qnetwork_target(next_states)\n for i in range(len(self.action_space)):\n Q_targets_next[i] = Q_targets_next[i].max(-1, keepdim=True)[0]\n \n Q_targets_next = torch.stack(Q_targets_next).transpose(0, 1)\n Q_targets_next = Q_targets_next#.squeeze(-1) # (batch, action_size)\n # compute Q targets for current states\n Q_targets = rewards + (gamma * Q_targets_next.mean(1) * (1 - dones))\n \n #compute loss\n loss = F.mse_loss(Q_expected, Q_targets.repeat(1, len(self.action_space)))\n\n #Minimize the loss\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # Update target network\n self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)\n\n\t\n def soft_update(self, local_model, target_model, tau):\n \"\"\"\n Soft update model parameters\n Q_target = tau*Q_local + (1-tau)*Q_target\n\n Params:\n local_model (Pytorch model): weights will be copied from target_model (Pytorch model)\n tau (float): interpolation parameter\n \"\"\"\n for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):\n target_param.data.copy_(tau*local_param.data + (1.0 - tau)*target_param.data)\n\n \n \n def act(self, state, eps=0.):\n # TODO: take into account selection masks\n state = torch.from_numpy(state).float().unsqueeze(0).to(self.device)\n self.qnetwork_local.eval()\n with torch.no_grad():\n action_values = self.qnetwork_local(state)\n self.qnetwork_local.train()\n\n # Epsilon-greedy action selection\n if random.random() > eps:\n #print(\"greedy\")\n #return np.argmax(action_values.cpu().data.numpy())\n # TODO: why dont we use argmax?\n actions = [int(x.max(1)[1]) for x in action_values]\n return actions\n else:\n #print(\"random\")\n return [np.random.randint(0, i) for i in self.action_space]\n\n\n def get_action(self, x): \n\n with torch.no_grad(): \n # a = self.q(x).max(1)[1]\n out = self.q(x).squeeze(0) # remove first dim which was 1\n #print(f\"out: {out.shape} - {out}\")\n action = torch.argmax(out, dim = 1)\n return action.numpy()\n\n\n def update_policy(self, adam, memory, params): \n\n b_states, b_actions, b_rewards, b_next_states, b_masks = memory.sample(params.batch_size)\n\n states = torch.tensor(b_states).float() # batch of states [128, 24]\n actions = torch.tensor(b_actions).long().reshape(states.shape[0],-1,1)\n rewards = torch.tensor(b_rewards).float().reshape(-1,1)\n next_states = torch.tensor(b_next_states).float()\n masks = torch.tensor(b_masks).float().reshape(-1,1)\n\n qvals = self.q(states) # [128, 4, 6]\n \n # [128, 4]: q_values for selected actions\n current_q_values = self.q(states).gather(2, actions).squeeze(-1)\n print(f\"current_qvals: {current_q_values.shape}\")\n \n with torch.no_grad():\n argmax = torch.argmax(self.q(next_states), dim = 2)\n print(f\"argmax: {argmax.shape}\")\n max_next_q_vals = self.target(next_states).gather(2, argmax.unsqueeze(2)).squeeze(-1)\n print(f\"max_next_q_vals: {max_next_q_vals.shape}\")\n # calculate the avg across action dimension, keep 1 avg values\n max_next_q_vals = max_next_q_vals.mean(1, keepdim = True)\n print(\"-----------------\")\n print(f\"max_next_q_vals: {max_next_q_vals.shape}\")\n\n expected_q_vals = rewards + max_next_q_vals*0.99*masks\n # print(expected_q_vals[:5])\n\n print(f\"expected_q_vals: {expected_q_vals.shape}\")\n print(f\"current_q_values: {current_q_values.shape}\")\n loss = F.mse_loss(expected_q_vals, current_q_values)\n \n # input(loss)\n\n # print('\\n'*5)\n \n adam.zero_grad()\n loss.backward()\n\n for p in self.q.parameters(): \n p.grad.data.clamp_(-1.,1.)\n adam.step()\n\n self.update_counter += 1\n if self.update_counter % self.target_net_update_freq == 0: \n self.update_counter = 0 \n self.target.load_state_dict(self.q.state_dict())\n\n\ndef is_diff(s1, s2):\n s1 = np.array(s1)\n s2 = np.array(s2)\n\n diff = s1 - s2\n if sum(diff) == 0:\n return False\n \n return True\n\n\ndef argmax_rand(arr):\n\t# np.argmax with random tie breaking\n\treturn np.random.choice(np.flatnonzero(np.isclose(arr, np.max(arr), atol=1e-3)))\n\n\"\"\"\nExample of invalid action:\nmask [[0, 1, 2, 3, 4, 5, 6, 7], [0, 1], [0, 1, 2], [7], [1], [66], [0, 9, 10, 11, 12, 13, 14], [1, 2, 3, 4, 5], [0, 2]]\n\naction space: [8, 3, 4, 9, 2, 139, 15, 8, 4]\naction taken: <class 'list'> - [1, 1, 2, 7, 1, 93, 10, 5, 2]\nreward: -0.1\n\n\"\"\"\ndef train(rl_agent, n_episodes, max_step, eps_start=1.0, eps_end=0.1, eps_decay=0.99):\n stats = utils.Stats(num_episodes=n_episodes, continuous=True)\n scores = []\n steps = []\n scores_window = deque(maxlen=100)\n \n eps = eps_start\n \n for i_episode in range(n_episodes):\n print(\"\\rEpisode: {}\".format(i_episode))\n score = 0\n agent_name = 'Red'\n #cyborg = OpenAIGymWrapper(agent_name=agent_name, env=IntListToActionWrapper(FixedFlatWrapper(CybORG(path, 'sim'))))\n observation = cyborg.reset(agent=agent_name)\n #action_space = cyborg.get_action_space(agent_name)\n for t in range(max_step):\n actions = rl_agent.act(observation, eps)\n next_observation, reward, done, info = cyborg.step(action=actions)\n\n rl_agent.step(observation, actions, reward, next_observation, done)\n observation = next_observation\n score += reward\n if done:\n print(\"Done:\", t)\n break\n scores_window.append(score)\n eps = max(eps_end, eps_decay * eps)\n print('\\rEpisode {} Eps {}\\tAverage Score: {:.2f}'.format(i_episode, eps, np.mean(scores_window)), end=\"\")\n\n\n\n\n\n\nif __name__ == '__main__':\n # Test branching dqn agent\n from pprint import pprint\n print(torch.cuda.get_device_name())\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n print(\"device:\", device)\n scenario_name = \"Scenario1\"\n #scenario_name = \"scenario_12_hosts_2flag\"\n \n print(scenario_name)\n path = str(inspect.getfile(CybORG))\n path = path[:-10] + f'/Shared/Scenarios/{scenario_name}.yaml'\n\n agent_name = 'Red'\n cyborg = OpenAIGymWrapper(agent_name=agent_name, env=IntListToActionWrapper(FixedFlatWrapper(CybORG(path, 'sim'))))\n\n #cyborg = OpenAIGymWrapper(agent_name=agent_name, env=IntListToActionWrapper(FixedFlatWrapper(ReduceActionSpaceWrapper(CybORG(path, 'sim')))))\n #cyborg = OpenAIGymWrapper(agent_name=agent_name, env = CybORG(path, 'sim'))\n #cyborg = CybORG(path, \"sim\")\n \n observation = cyborg.reset(agent=agent_name)\n action_space = cyborg.get_action_space(agent_name) # 8, 3, 4, 1, 9, 2, 139, 9, 8, 1, 4\n \n \n print(action_space)\n print(observation.shape)\n\n full_obs = cyborg.env.reset(agent=agent_name)\n\n print(type(full_obs.selection_masks))\n print(full_obs.selection_masks)\n exit(0)\n # for k,v in action_space.items():\n # print(k)\n # print(v)\n\n # for k, v in observation.observation.items():\n # print(k)\n # print(v)\n # #print(observation.observation)\n \n selection_masks = [[i for i in range(a)] for a in action_space]\n print(selection_masks)\n\n test_bdqn_agent = BranchingDQN_Agent(observation, action_space, device=device)\n\n #train(test_bdqn_agent, n_episodes=3000, max_step=1500, eps_start=1.0, eps_end=0.1, eps_decay=0.999)\n\n #print(len(test_bdqn_agent.memory))\n\n accum_reward = 0\n\n true_state = cyborg.get_agent_state('True')\n \n\n import pdb; \n true_table = true_obs_to_table(true_state, cyborg)\n print(true_table)\n \n print(76*'-')\n pdb.set_trace()\n for i in range(5000):\n \n #import pdb; pdb.set_trace()\n actions = test_bdqn_agent.act(observation, eps=0.5)\n print(f\"action: {actions}\")\n #execute_action = []\n \n\n # for indx, action in enumerate(actions):\n # if len(selection_masks[indx]) == 0:\n # execute_action.append(None)\n # else:\n # execute_action.append(action)\n # print(f\"execute action: {execute_action}\")\n \n next_observation, r, done, info = cyborg.step(action=actions)\n\n \n #print(\"is diff:\", is_diff(observation, next_observation))\n print(f\"reward: {r}\")\n accum_reward += r\n print(f\"accum_reward: {accum_reward}\")\n\n if done:\n observation = cyborg.reset(agent=agent_name)\n print(\"Capture the flag at:\", i)\n\n # if r > 0:\n # print(\"Pos reward at:\", i)\n # true_state = cyborg.get_agent_state('True')\n \n\n # #pdb.set_trace()\n # true_table = true_obs_to_table(true_state, cyborg)\n # print(true_table)\n \n # print(76*'-')\n # #break\n\n # if r < 0:\n # print(\"Invalid action\")\n # #break\n action_space = info.get('action_space')\n print(f\"action space: {action_space}\")\n print(info[\"selection_masks\"])\n selection_masks = info[\"selection_masks\"]\n observation = next_observation\n \n\n\n"
] | [
[
"torch.from_numpy",
"torch.tensor",
"torch.nn.functional.mse_loss",
"numpy.max",
"torch.no_grad",
"numpy.random.randint",
"torch.cuda.is_available",
"torch.cuda.get_device_name",
"torch.stack",
"numpy.mean",
"numpy.array",
"numpy.vstack",
"torch.argmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DoraSzasz/mp-mri-prostate | [
"bd420534b4b5c464e5bbb4a07eabdc8724831f8a"
] | [
"pipeline/feature-classification/exp-3/selection-extraction/pipeline_select_adc.py"
] | [
"\"\"\"This pipeline is intended for selecting the more appropriate feature\"\"\"\nimport os\nimport numpy as np\n\nfrom sklearn.externals import joblib\n\nfrom collections import Counter\n\npath_feat_sel = '/data/prostate/results/mp-mri-prostate/exp-3/selection-extraction/rf/adc/feat_sel.pkl'\n# Load the data and select the 5th percentile\nfeat_sel = joblib.load(path_feat_sel)[2]\n\nlin_feat_sel = np.concatenate(feat_sel, axis=0)\ncount_feat = Counter(lin_feat_sel)\n\n# Get the most common element to get the correct size\nfeat_sel = count_feat.most_common(feat_sel[0].size)\nidx_feat_val = np.array([elt[0] for elt in feat_sel])\n\n# Save the information\npath_store = '/data/prostate/results/mp-mri-prostate/exp-3/selection-extraction/rf/adc'\nif not os.path.exists(path_store):\n os.makedirs(path_store)\njoblib.dump(idx_feat_val, os.path.join(path_store,\n 'idx_sel_feat.pkl'))\n"
] | [
[
"numpy.concatenate",
"numpy.array",
"sklearn.externals.joblib.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ppcrong/TestPython | [
"6a87d09e31ad662ce1dea707118d1e914dfeaba7"
] | [
"TestPython/ctypes_test.py"
] | [
"import string\nfrom ctypes import *\n\n# visual studio\n# dll_c = CDLL('dll/ctypes_dll.dll')\n# vscode\ndll_c = CDLL('dll/libctypes_dll.dll')\n\n# read data of value\n# method1 byref\nc_value = c_int(0)\nret = dll_c.test_value(byref(c_value))\nprint('ret: {}'.format(ret))\nprint('c_value: {}'.format(c_value))\n# method2 pointer\nc_value = c_int(0)\nc_value_p = pointer(c_value)\nret = dll_c.test_value(c_value_p)\nprint('ret: {}'.format(ret))\nprint('c_value: {}'.format(c_value))\n# method3 POINTER type in argtypes\ndll_c.test_value.argtypes = [POINTER(c_int)]\nc_value = c_int(0)\nret = dll_c.test_value(c_value)\n# ret = dll_c.test_value(byref(c_value)) # the same result as above line => https://blog.csdn.net/Kelvin_Yan/article/details/86546784\nprint('ret: {}'.format(ret))\nprint('c_value: {}'.format(c_value))\n\n# read data of byte array\nimport numpy as np\n\n# c_buf_p assign method 1\nbuf = np.arange(1, 17, dtype=np.byte)\nc_buf_p = (c_byte * len(buf))(*buf)\ndll_c.test_buf(c_buf_p)\n# dll_c.test_buf(byref(c_buf_p)) # the same result as above line\nbuf = list(c_buf_p)\nprint('buf: {}'.format(buf))\nbuf = np.ctypeslib.as_array(c_buf_p, np.int8)\nprint('buf: {}'.format(buf))\n\n# c_buf_p assign method 2\nc_buf_p = (c_byte * 16)()\ndll_c.test_buf(c_buf_p)\nbuf = list(c_buf_p)\nprint('buf: {}'.format(buf))\nbuf = cast(c_buf_p, POINTER(c_byte * 16)).contents\nprint('buf: {}'.format(buf))\nfor b in buf:\n print(b)\n\n\n# read data of bounding_box_s\nclass bounding_box_s(Structure):\n _fields_ = [\n ('x1', c_float),\n ('y1', c_float),\n ('x2', c_float),\n ('y2', c_float),\n ('score', c_float),\n ('class_num', c_int32)\n ]\n\n def __repr__(self):\n ret_str = ''\n for field in self._fields_:\n ret_str = '\\n'.join((ret_str, '\\t{}: {}'.format(field[0], getattr(self, field[0]))))\n return ret_str\n\n\nbox1 = bounding_box_s(x1=0, y1=0, x2=0, y2=0, score=0, class_num=0)\nbox2 = bounding_box_s(x1=0, y1=0, x2=0, y2=0, score=0, class_num=0)\nboxes = [box1, box2]\nprint('\\n===1==={}'.format(boxes))\n\n# c_boxes_p assign method 1\nc_boxes_p = (bounding_box_s * len(boxes))(*boxes)\nc_boxes_pp = pointer(c_boxes_p)\nret = dll_c.test_struct_array(byref(c_boxes_pp), len(boxes))\nprint('ret: {}'.format(ret))\nboxes = list(c_boxes_p)\nprint('\\n===2==={}'.format(boxes))\n\n# c_boxes_p assign method 2\nboxes = [box1, box2]\nprint('\\n===3==={}'.format(boxes))\nc_boxes_p = (bounding_box_s * 2)()\nc_boxes_pp = pointer(c_boxes_p)\nret = dll_c.test_struct_array(byref(c_boxes_pp), len(boxes))\nprint('ret: {}'.format(ret))\nboxes = list(c_boxes_p)\nprint('\\n===4==={}'.format(boxes))\nboxes = cast(c_boxes_p, POINTER(bounding_box_s * 2)).contents\nfor b in boxes:\n print('\\n===5==={}'.format(b))\n\n\n# read data of test_result_s\nclass test_result_s(Structure):\n _fields_ = [\n (\"class_count\", c_uint), # uint32_t\n (\"box_count\", c_uint), # boxes of all classes\n (\"boxes\", POINTER(POINTER(bounding_box_s))) # box array\n ]\n\n def __repr__(self):\n ret_str = ''\n for field in self._fields_:\n ret_str = '\\n'.join((ret_str, '\\t{}: {}'.format(field[0], getattr(self, field[0]))))\n return ret_str\n\n\n# box3 = bounding_box_s(x1=0, y1=0, x2=0, y2=0, score=0, class_num=0)\n# boxes = [box1, box2, box3]\n# c_boxes = (bounding_box_s * 3)()\n# c_boxes_p = pointer(c_boxes)\n# res1 = test_result_s(boxes=byref(c_boxes_p))\n# res2 = test_result_s(boxes=byref(c_boxes_p))\n# res3 = test_result_s(boxes=byref(c_boxes_p))\n# results = [res1, res2, res3]\n# print('\\n///1///{}'.format(results))\n# c_results = (test_result_s * len(results))(*results)\n# c_results_p = pointer(c_results)\n# ret = dll_c.test_struct_array2(byref(c_results_p), len(results))\n# print('ret: {}'.format(ret))\n# results = list(c_results)\n# print('\\n///2///{}'.format(results))\n\n\"\"\"\nreference: https://chrisheydrick.com/2016/02/06/passing-a-ctypes-array-of-struct-from-python-to-dll/\n\"\"\"\n\n\nclass structtest(Structure):\n _fields_ = [\n (\"x\", c_char),\n (\"y\", c_int),\n (\"z\", c_long)\n ]\n\n def __repr__(self):\n ret_str = ''\n for field in self._fields_:\n ret_str = '\\n'.join((ret_str, '\\t{}: {}'.format(field[0], getattr(self, field[0]))))\n return ret_str\n\n\nn_struct2 = 5\nstruct1 = structtest()\nstruct2 = (structtest * n_struct2)()\n\nprint(\"\\n///////////////////\\nBefore passing to .dll\")\nprint(struct1)\nfor i in range(n_struct2):\n print(\"struct2[{}] {}\".format(i, struct2[i]))\n\ndll_c.fillonestruct(byref(struct1))\ndll_c.fillmultiplestruct(byref(struct2), c_int(n_struct2))\nprint(\"\\nAfter passing to .dll\")\nprint(struct1)\nfor i in range(n_struct2):\n print(\"struct2[{}] {}\".format(i, struct2[i]))\n\n\"\"\"\nreference: https://stackoverflow.com/questions/8392203/dynamic-arrays-and-structures-in-structures-in-python\n\"\"\"\n\n\nclass Row(Structure):\n _fields_ = [('cols_count', c_int),\n ('cols', POINTER(c_char_p))]\n\n def __init__(self, cols):\n self.cols_count = cols\n # Allocate an array of character pointers\n pc = (c_char_p * cols)()\n self.cols = cast(pc, POINTER(c_char_p))\n\n\nclass Unit(Structure):\n _fields_ = [('rows_count', c_int),\n ('rows', POINTER(Row))]\n\n def __init__(self, rows, cols):\n self.rows_count = rows\n # Allocate an array of Row structures.\n # This does NOT call __init__.\n pr = (Row * rows)()\n # Call init manually with the column size.\n for r in pr:\n r.__init__(cols)\n self.rows = cast(pr, POINTER(Row))\n\n\nunit = Unit(2, 3)\n\n# Stuff some strings ('aaaaa','bbbbb',etc.)\n# >>> string.ascii_lowercase[0] * 5\n# 'aaaaa'\n# >>> string.ascii_lowercase[1] * 5\n# 'bbbbb'\nfor i in range(unit.rows_count):\n for j in range(unit.rows[i].cols_count):\n unit.rows[i].cols[j] = (string.ascii_lowercase[i * 5 + j] * 5).encode('utf-8') # convert string to c_char_p\n\ndll_c.my_func(byref(unit))\n"
] | [
[
"numpy.ctypeslib.as_array",
"numpy.arange"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Abhi-H/CNN-with-Fashion-MNIST-dataset | [
"67e238a51108aa4245a343eabc786e7d0475281a"
] | [
"FashionMNISTwithCNN.py"
] | [
"from __future__ import print_function\nimport torch.utils.data as data\nfrom PIL import Image\nimport os\nimport os.path\nimport errno\nimport torch\nimport codecs\n\n\nclass fashion(data.Dataset):\n \"\"\"`MNIST <http://yann.lecun.com/exdb/mnist/>`_ Dataset.\n Args:\n root (string): Root directory of dataset where ``processed/training.pt``\n and ``processed/test.pt`` exist.\n train (bool, optional): If True, creates dataset from ``training.pt``,\n otherwise from ``test.pt``.\n download (bool, optional): If true, downloads the dataset from the internet and\n puts it in root directory. If dataset is already downloaded, it is not\n downloaded again.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n \"\"\"\n urls = [\n 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz',\n 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz',\n 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz',\n 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz',\n ]\n raw_folder = 'raw'\n processed_folder = 'processed'\n training_file = 'training.pt'\n test_file = 'test.pt'\n\n def __init__(self, root, train=True, transform=None, target_transform=None, download=False):\n self.root = os.path.expanduser(root)\n self.transform = transform\n self.target_transform = target_transform\n self.train = train # training set or test set\n\n if download:\n self.download()\n\n if not self._check_exists():\n raise RuntimeError('Dataset not found.' +\n ' You can use download=True to download it')\n\n if self.train:\n self.train_data, self.train_labels = torch.load(\n os.path.join(root, self.processed_folder, self.training_file))\n else:\n self.test_data, self.test_labels = torch.load(os.path.join(root, self.processed_folder, self.test_file))\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n Returns:\n tuple: (image, target) where target is index of the target class.\n \"\"\"\n if self.train:\n img, target = self.train_data[index], self.train_labels[index]\n else:\n img, target = self.test_data[index], self.test_labels[index]\n\n # doing this so that it is consistent with all other datasets\n # to return a PIL Image\n img = Image.fromarray(img.numpy(), mode='L')\n\n if self.transform is not None:\n img = self.transform(img)\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return img, target\n\n def __len__(self):\n if self.train:\n return len(self.train_data)\n else:\n return len(self.test_data)\n\n def _check_exists(self):\n return os.path.exists(os.path.join(self.root, self.processed_folder, self.training_file)) and \\\n os.path.exists(os.path.join(self.root, self.processed_folder, self.test_file))\n\n def download(self):\n \"\"\"Download the MNIST data if it doesn't exist in processed_folder already.\"\"\"\n from six.moves import urllib\n import gzip\n\n if self._check_exists():\n return\n\n # download files\n try:\n os.makedirs(os.path.join(self.root, self.raw_folder))\n os.makedirs(os.path.join(self.root, self.processed_folder))\n except OSError as e:\n if e.errno == errno.EEXIST:\n pass\n else:\n raise\n\n for url in self.urls:\n print('Downloading ' + url)\n data = urllib.request.urlopen(url)\n filename = url.rpartition('/')[2]\n file_path = os.path.join(self.root, self.raw_folder, filename)\n with open(file_path, 'wb') as f:\n f.write(data.read())\n with open(file_path.replace('.gz', ''), 'wb') as out_f, \\\n gzip.GzipFile(file_path) as zip_f:\n out_f.write(zip_f.read())\n os.unlink(file_path)\n\n # process and save as torch files\n print('Processing...')\n\n training_set = (\n read_image_file(os.path.join(self.root, self.raw_folder, 'train-images-idx3-ubyte')),\n read_label_file(os.path.join(self.root, self.raw_folder, 'train-labels-idx1-ubyte'))\n )\n test_set = (\n read_image_file(os.path.join(self.root, self.raw_folder, 't10k-images-idx3-ubyte')),\n read_label_file(os.path.join(self.root, self.raw_folder, 't10k-labels-idx1-ubyte'))\n )\n with open(os.path.join(self.root, self.processed_folder, self.training_file), 'wb') as f:\n torch.save(training_set, f)\n with open(os.path.join(self.root, self.processed_folder, self.test_file), 'wb') as f:\n torch.save(test_set, f)\n\n print('Done!')\n\n\ndef get_int(b):\n return int(codecs.encode(b, 'hex'), 16)\n\n\ndef parse_byte(b):\n if isinstance(b, str):\n return ord(b)\n return b\n\n\ndef read_label_file(path):\n with open(path, 'rb') as f:\n data = f.read()\n assert get_int(data[:4]) == 2049\n length = get_int(data[4:8])\n labels = [parse_byte(b) for b in data[8:]]\n assert len(labels) == length\n return torch.LongTensor(labels)\n\n\ndef read_image_file(path):\n with open(path, 'rb') as f:\n data = f.read()\n assert get_int(data[:4]) == 2051\n length = get_int(data[4:8])\n num_rows = get_int(data[8:12])\n num_cols = get_int(data[12:16])\n images = []\n idx = 16\n for l in range(length):\n img = []\n images.append(img)\n for r in range(num_rows):\n row = []\n img.append(row)\n for c in range(num_cols):\n row.append(parse_byte(data[idx]))\n idx += 1\n assert len(images) == length\n return torch.ByteTensor(images).view(-1, 28, 28)\nimport torch\nimport torch.nn as nn\nimport torchvision.transforms as transforms\nfrom torch.autograd import Variable\ntrain_dataset=fashion(root='./data',train=True,transform=transforms.ToTensor(),download=True)\ntest_dataset=fashion(root='./data',train=False,transform=transforms.ToTensor(),download=True)\nbatch_size=100\nn_iters=18000\nnum_epochs=n_iters/(len(train_dataset)/batch_size)\nnum_epochs=int(num_epochs)\ntrain_loader=torch.utils.data.DataLoader(dataset=train_dataset,batch_size=batch_size,shuffle=True)\ntest_loader=torch.utils.data.DataLoader(dataset=test_dataset,batch_size=batch_size,shuffle=True)\nclass CNNModule(nn.Module):\n def __init__(self):\n super (CNNModule,self).__init__()\n\n self.cnn1 = nn.Conv2d(in_channels=1,out_channels=16,kernel_size=5,stride=1,padding=2)\n self.relu1=nn.ELU()\n nn.init.xavier_uniform(self.cnn1.weight)\n\n\n self.maxpool1=nn.MaxPool2d(kernel_size=2)\n\n self.cnn2=nn.Conv2d(in_channels=16,out_channels=32,kernel_size=5,stride=1,padding=2)\n self.relu2=nn.ELU()\n nn.init.xavier_uniform(self.cnn2.weight)\n\n\n self.maxpool2=nn.MaxPool2d(kernel_size=2)\n\n self.fcl=nn.Linear(32*7*7,10)\n def forward(self,x):\n out=self.cnn1(x)\n out=self.relu1(out)\n #print (\"CNN1\")\n #print (out.size())\n \n out=self.maxpool1(out)\n #print (\"Maxpool1\")\n #print (out.size())\n \n out=self.cnn2(out)\n out=self.relu2(out)\n #print (\"CNN2\")\n #print (out.size())\n out=self.maxpool2(out)\n #print (\"Maxpool2\")\n #print (out.size(0))\n \n out=out.view(out.size(0),-1)\n\n out=self.fcl(out)\n\n return out\nmodel=CNNModule()\ncriterion=nn.CrossEntropyLoss()\nlearning_rate=0.015\noptimizer=torch.optim.SGD(model.parameters(),lr=learning_rate)\niter=0\nfor epoch in range(num_epochs):\n for i,(images,labels) in enumerate (train_loader):\n images=Variable(images)\n labels=Variable(labels)\n\n optimizer.zero_grad()\n outputs=model(images)\n loss=criterion(outputs,labels)\n loss.backward()\n optimizer.step()\n iter+=1\n if iter%500==0:\n correct=0\n total=0\n for images,labels in test_loader:\n images=Variable(images)\n\n outputs=model(images)\n \n _,predicted=torch.max(outputs.data,1)\n total+=labels.size(0)\n correct+=(predicted==labels).sum()\n accuracy= (100.0* correct)/(total)\n print(\"Iteration:\"+str(iter)+\" Loss:\"+str(loss)+\" Accuracy:\"+str(accuracy))\n"
] | [
[
"torch.ByteTensor",
"torch.LongTensor",
"torch.nn.CrossEntropyLoss",
"torch.max",
"torch.nn.ELU",
"torch.nn.Conv2d",
"torch.utils.data.DataLoader",
"torch.nn.MaxPool2d",
"torch.nn.Linear",
"torch.save",
"torch.nn.init.xavier_uniform",
"torch.autograd.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pabloqb2000/py-led_audio_visualizer | [
"5a3616a0d93e1a8878d482bf8dcb985a326371b4"
] | [
"utils/to_leds.py"
] | [
"from rpi_ws281x import *\nimport numpy as np\n\n# LED strip configuration:\nLED_COUNT = 100 # Number of LED pixels.\nLED_PIN = 18 # GPIO pin connected to the pixels (18 uses PWM!).\n#LED_PIN = 10 # GPIO pin connected to the pixels (10 uses SPI /dev/spidev0.0).\nLED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)\nLED_DMA = 10 # DMA channel to use for generating signal (try 10)\nLED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest\nLED_INVERT = False # True to invert the signal (when using NPN transistor level shift)\nLED_CHANNEL = 0 # set to '1' for GPIOs 13, 19, 41, 45 or 53\nstrip = None\n\n'''\n Given:\n - frame: an iterable with n triplets of RGB (0-255 (int)) colors\n Lights the leds based on this colors\n'''\ndef frame_to_leds(frame):\n if not strip:\n return \n for i, value in enumerate(frame):\n r,g,b = value\n strip.setPixelColor(i, Color(int(r),int(g),int(b)))\n strip.show()\n\n'''\n On start initialize strip\n'''\n# Create NeoPixel object with appropriate configuration.\nstrip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL)\n# Intialize the library (must be called once before other functions).\nstrip.begin()\n# Set all pixels to 0 at start\nframe_to_leds(np.zeros((LED_COUNT, 3)))\n \n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ssghost/PySyft | [
"1921efeeda2c7b0bf93f17a33ddf59f8020fa653"
] | [
"syft/core/frameworks/torch/__init__.py"
] | [
"from .hook import TorchHook\nfrom .tensor import _SyftTensor, _LocalTensor, _PointerTensor\nfrom .tensor import _FixedPrecisionTensor, _TorchTensor, _PlusIsMinusTensor, _GeneralizedPointerTensor\nfrom .tensor import _SPDZTensor, _SNNTensor\nfrom enum import Enum, auto\n\n__all__ = ['TorchHook', '_SyftTensor', '_LocalTensor',\n '_PointerTensor', '_FixedPrecisionTensor', '_TorchTensor',\n '_PlusIsMinusTensor', '_GeneralizedPointerTensor', '_SPDZTensor',\n '_SNNTensor']\n\nimport torch\n\ntorch.encode_timer = 0\ntorch.handle_call_timer = 0\ntorch.execute_call_timer = 0\n\n# this is a list of all module functions in the torch module\ntorch.torch_funcs = dir(torch)\n\n# this is a list of all module functions in torch.nn.functional\ntorch.torch_functional_funcs = dir(torch.nn.functional)\n\n# Gathers all the functions from above\ntorch.torch_modules = {\n 'torch': torch.torch_funcs,\n 'torch.nn.functional': torch.torch_functional_funcs\n}\n# 'torch.nn.functional': torch.torch_functional_funcs\n\n# this is the list of torch tensor types that we will override for remote execution\ntorch.tensor_types = [torch.FloatTensor,\n torch.DoubleTensor,\n torch.HalfTensor,\n torch.ByteTensor,\n torch.CharTensor,\n torch.ShortTensor,\n torch.IntTensor,\n torch.LongTensor]\ntorch.tensor_types_tuple = tuple(torch.tensor_types)\n\ntorch.var_types = [torch.autograd.variable.Variable, torch.nn.Parameter]\ntorch.var_types_tuple = tuple(torch.var_types)\n\n# a list of all classes in which we will override their methods for remote execution\ntorch.tensorvar_types = torch.tensor_types + [torch.autograd.variable.Variable]\n\ntorch.tensorvar_types_strs = [x.__name__ for x in torch.tensorvar_types]\n\ntorch.syft_tensor_name = None\ntorch.tensor_type_names = [x.__name__ for x in torch.tensor_types]\ntorch.var_type_names = [x.__name__ for x in torch.var_types] + ['syft.Variable', 'syft.Parameter']\n\ntorch.tensorvar_methods = list(\n set(\n [method\n for tensorvar in torch.tensorvar_types\n for method in dir(tensorvar)]\n )\n)\ntorch.tensorvar_methods.append('get_shape')\ntorch.tensorvar_methods.append(\"share\")\ntorch.tensorvar_methods.append(\"fix_precision\")\ntorch.tensorvar_methods.append(\"decode\")\n\n# Torch functions we don't want to override\ntorch.torch_exclude = ['save', 'load', 'typename', 'is_tensor', 'manual_seed']\n\ntorch.guard = {\n '_PlusIsMinusTensor': _PlusIsMinusTensor,\n '_SPDZTensor': _SPDZTensor,\n '_FixedPrecisionTensor': _FixedPrecisionTensor,\n '_SNNTensor': _SNNTensor,\n 'Variable': torch.autograd.Variable,\n 'FloatTensor': torch.FloatTensor,\n 'DoubleTensor': torch.DoubleTensor,\n 'HalfTensor': torch.HalfTensor,\n 'ByteTensor': torch.ByteTensor,\n 'CharTensor': torch.CharTensor,\n 'ShortTensor': torch.ShortTensor,\n 'IntTensor': torch.IntTensor,\n 'LongTensor': torch.LongTensor,\n 'Parameter': torch.nn.Parameter\n}\nkeys = list(torch.guard.keys())\nfor key in keys:\n torch.guard['syft.' + key] = torch.guard[key]\n\n\ndef get_allowed_command(allowed):\n if isinstance(allowed, dict):\n allowed_names = set()\n for module_name, func_names in allowed.items():\n for func_name in func_names:\n allowed_names.add(module_name + '.' + func_name)\n allowed = allowed_names\n return allowed\n\nallowed_commands = {\n 'tensorvar_methods': get_allowed_command(torch.tensorvar_methods),\n 'torch_modules': get_allowed_command(torch.torch_modules)\n}\n\n\ndef get_native_torch_name(attr):\n elems = attr.split('.')\n elems[-1] = 'native_' + elems[-1]\n native_func_name = '.'.join(elems)\n return native_func_name\n\nnative_commands = {\n 'tensorvar_methods': {\n cmd: 'native_' + cmd for cmd in allowed_commands['tensorvar_methods']\n },\n 'torch_modules': {\n cmd: get_native_torch_name(cmd) for cmd in allowed_commands['torch_modules']\n }\n}\n\n\ndef eval_torch_modules():\n for cmd_name, native_cmd_name in native_commands['torch_modules'].items():\n if cmd_name not in torch.torch_exclude:\n try:\n native_commands['torch_modules'][cmd_name] = eval(native_cmd_name)\n except AttributeError:\n native_commands['torch_modules'][cmd_name] = eval(cmd_name)\n else:\n native_commands['torch_modules'][cmd_name] = eval(cmd_name)\n\ntorch.eval_torch_modules = eval_torch_modules\n\n\n\ndef _command_guard(command, torch_domain, get_native=False):\n if command not in allowed_commands[torch_domain]:\n raise RuntimeError(\n 'Command \"{}\" is not a supported Torch operation.'.format(command))\n if get_native:\n return native_commands[torch_domain][command]\n return command\n\ntorch._command_guard = _command_guard\n\n\ndef _is_command_valid_guard(command, allowed):\n try:\n torch._command_guard(command, allowed)\n except RuntimeError:\n return False\n return True\n\ntorch._is_command_valid_guard = _is_command_valid_guard\n"
] | [
[
"torch.guard.keys",
"torch.tensorvar_methods.append",
"torch._command_guard"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xlnwel/grl | [
"7d42bb2e78bc3e7b7c3ebbcf356a4d1cf12abebf",
"7d42bb2e78bc3e7b7c3ebbcf356a4d1cf12abebf",
"7d42bb2e78bc3e7b7c3ebbcf356a4d1cf12abebf",
"7d42bb2e78bc3e7b7c3ebbcf356a4d1cf12abebf"
] | [
"nn/norm.py",
"algo/mriqn/nn.py",
"utility/adabelief.py",
"nn/func.py"
] | [
"import numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras import layers, initializers\n\n\nEVONORM_B0 = 'evonorm_b0'\nEVONORM_S0 = 'evonorm_s0'\nLAYER_TYPES = (EVONORM_B0, EVONORM_S0)\n\n\nclass EvoNorm(layers.Layer):\n def __init__(self, \n name='evonorm',\n layer_type=EVONORM_B0,\n nonlinear=True,\n num_groups=32,\n decay=.9,\n epsilon=1e-5):\n super().__init__(name=name)\n assert layer_type in LAYER_TYPES, f'Expected layer type({LAYER_TYPES}), but get {layer_type}'\n self._layer_type = layer_type\n self._num_groups = num_groups\n self._decay = decay\n self._epsilon = epsilon\n\n def build(self, input_shape):\n var_shape = np.ones_like(input_shape, dtype=int)\n var_shape[-1] = input_shape[-1]\n var_shape = tuple(var_shape)\n\n self.beta = self.add_weight(\n 'beta',\n shape=var_shape,\n initializer=initializers.zeros,\n dtype=self._compute_dtype\n )\n self.gamma = self.add_weight(\n 'gamma',\n shape=var_shape, \n initializer=initializers.ones,\n dtype=self._compute_dtype\n )\n self.v = self.add_weight(\n 'v',\n shape=var_shape,\n initializer=initializers.ones,\n dtype=self._compute_dtype\n )\n if self._layer_type == EVONORM_B0:\n self.moving_variance = self.add_weight(\n 'moving_variance',\n shape=var_shape,\n initializer=initializers.ones,\n dtype=tf.float32,\n trainable=False\n )\n\n def call(self, x, training=True):\n if self._layer_type == EVONORM_S0:\n std = self._group_std(x)\n x = x * tf.nn.sigmoid(self.v * x) / std\n elif self._layer_type == EVONORM_B0:\n left = self._batch_std(x, training=training)\n right = self.v * x + self._instance_std(x)\n x = x / tf.maximum(left, right)\n else:\n raise ValueError(f'Unkown EvoNorm layer: {self._layer_type}')\n \n return x * self.gamma + self.beta\n\n def _batch_std(self, x, training=True):\n axes = tuple(range(len(x.shape)-1))\n if training:\n _, variance = tf.nn.moments(x, axes, keepdims=True)\n variance = tf.cast(variance, tf.float32)\n self.moving_variance.assign_sub((self.moving_variance - variance) * (1 - self._decay))\n else:\n variance = self.moving_variance\n std = tf.sqrt(variance+self._epsilon)\n return tf.cast(std, x.dtype)\n\n def _group_std(self, x):\n n = self._num_groups\n while n > 1:\n if x.shape[-1] % n == 0:\n break\n n -= 1\n x_shape = (-1,) + tuple(x.shape[1:])\n h, w, c = x.shape[-3:]\n g = c // n\n grouped_shape = (-1, ) + tuple(x.shape[1:-1]) + (n, g)\n x = tf.reshape(x, grouped_shape)\n _, variance = tf.nn.moments(x, [1, 2, 4], keepdims=True)\n std = tf.sqrt(variance + self._epsilon)\n std = tf.tile(std, [1, h, w, 1, g])\n std = tf.reshape(std, x_shape)\n return std\n\n def _instance_std(self, x):\n _, variance = tf.nn.moments(x, [-3, -2], keepdims=True)\n std = tf.sqrt(variance + self._epsilon)\n return std\n\nif __name__ == '__main__':\n tf.random.set_seed(0)\n x = tf.random.normal((2, 4, 4, 32))\n net = EvoNorm(layer_type=EVONORM_S0, num_groups=4)\n print(net(x))",
"import tensorflow as tf\n\nfrom utility.tf_utils import assert_rank\nfrom core.module import Ensemble\nfrom nn.func import Encoder, rnn\nfrom algo.iqn.nn import Quantile, Value\n\n\nclass RDQN(Ensemble):\n def __init__(self, config, env, **kwargs):\n super().__init__(\n model_fn=create_components, \n config=config,\n env=env,\n **kwargs)\n \n @tf.function\n def action(self, x, state, mask,\n prev_action=None, prev_reward=None,\n evaluation=False, epsilon=0,\n temp=1, return_stats=False,\n return_eval_stats=False):\n assert x.shape.ndims in (2, 4), x.shape\n\n x, state = self._encode(\n x, state, mask, prev_action, prev_reward)\n _, qt_embed = self.quantile(x)\n action = self.q.action(x, qt_embed, \n epsilon=epsilon, temp=temp, return_stats=return_stats)\n\n if evaluation:\n return tf.squeeze(action), state\n else:\n terms = {}\n action = tf.nest.map_structure(lambda x: tf.squeeze(x), action)\n if return_stats:\n action, terms = action\n terms.update({\n 'mu': self.q.compute_prob()\n })\n out = tf.nest.map_structure(lambda x: tf.squeeze(x), (action, terms))\n return out, state\n\n def _encode(self, x, state, mask, prev_action=None, prev_reward=None):\n x = tf.expand_dims(x, 1)\n mask = tf.expand_dims(mask, 1)\n x = self.encoder(x)\n if hasattr(self, 'rnn'):\n additional_rnn_input = self._process_additional_input(\n x, prev_action, prev_reward)\n x, state = self.rnn(x, state, mask, \n additional_input=additional_rnn_input)\n else:\n state = None\n x = tf.squeeze(x, 1)\n return x, state\n\n def _process_additional_input(self, x, prev_action, prev_reward):\n results = []\n if prev_action is not None:\n prev_action = tf.reshape(prev_action, (-1, 1))\n prev_action = tf.one_hot(prev_action, self.actor.action_dim, dtype=x.dtype)\n results.append(prev_action)\n if prev_reward is not None:\n prev_reward = tf.reshape(prev_reward, (-1, 1, 1))\n results.append(prev_reward)\n assert_rank(results, 3)\n return results\n\n def reset_states(self, states=None):\n if hasattr(self, 'rnn'):\n self.rnn.reset_states(states)\n\n def get_initial_state(self, inputs=None, batch_size=None, dtype=None):\n return self.rnn.get_initial_state(\n inputs, batch_size=batch_size, dtype=dtype) \\\n if hasattr(self, 'rnn') else None\n\n @property\n def state_size(self):\n return self.rnn.state_size if hasattr(self, 'rnn') else None\n \n @property\n def state_keys(self):\n return self.rnn.state_keys if hasattr(self, 'rnn') else ()\n\ndef create_components(config, env):\n action_dim = env.action_dim\n encoder_config = config['encoder']\n quantile_config = config['quantile']\n q_config = config['q']\n\n encoder_config['time_distributed'] = True\n model = dict(\n encoder=Encoder(encoder_config, name='encoder'),\n quantile=Quantile(quantile_config, name='phi'),\n q=Value(q_config, action_dim, name='q'),\n target_encoder=Encoder(encoder_config, name='target_encoder'),\n target_quantile=Quantile(quantile_config, name='target_phi'),\n target_q=Value(q_config, action_dim, name='target_q'),\n )\n if config.get('rnn'):\n rnn_config = config['rnn']\n model.update({\n 'rnn': rnn(rnn_config, name='rnn'),\n 'target_rnn': rnn(rnn_config, name='target_rnn')\n })\n\n return model\n\ndef create_model(config, env, **kwargs):\n return RDQN(config, env, **kwargs)\n",
"import tensorflow as tf\n\n\n\"\"\" Modified from Adabelief \"\"\"\nclass Adam(tf.keras.optimizers.Optimizer):\n def __init__(\n self,\n learning_rate=0.001,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-7,\n weight_decay=0.0,\n rectify=False,\n amsgrad=False,\n total_steps=0,\n warmup_proportion=0.1,\n min_lr=0.0,\n name=\"Adam\",\n **kwargs):\n r\"\"\"Construct a new Adam optimizer.\n Args:\n learning_rate: A `Tensor` or a floating point value, or a schedule\n that is a `tf.keras.optimizers.schedules.LearningRateSchedule`.\n The learning rate.\n beta_1: A float value or a constant float tensor.\n The exponential decay rate for the 1st moment estimates.\n beta_2: A float value or a constant float tensor.\n The exponential decay rate for the 2nd moment estimates.\n epsilon: A small constant for numerical stability.\n weight_decay: A `Tensor` or a floating point value, or a schedule\n that is a `tf.keras.optimizers.schedules.LearningRateSchedule`.\n Weight decay for each parameter.\n rectify: boolean. Whether to enable rectification as in RectifiedAdam\n amsgrad: boolean. Whether to apply AMSGrad variant of this\n algorithm from the paper \"On the Convergence of Adam and\n beyond\".\n sma_threshold. A float value.\n The threshold for simple mean average.\n total_steps: An integer. Total number of training steps.\n Enable warmup by setting a positive value.\n warmup_proportion: A floating point value.\n The proportion of increasing steps.\n min_lr: A floating point value. Minimum learning rate after warmup.\n name: Optional name for the operations created when applying\n gradients. Defaults to \"AdaBeliefOptimizer\".\n **kwargs: keyword arguments. Allowed to be {`clipnorm`,\n `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients\n by norm; `clipvalue` is clip gradients by value, `decay` is\n included for backward compatibility to allow time inverse\n decay of learning rate. `lr` is included for backward\n compatibility, recommended to use `learning_rate` instead.\n \"\"\"\n super().__init__(name, **kwargs)\n\n self._set_hyper(\"learning_rate\", kwargs.get(\"lr\", learning_rate))\n self._set_hyper(\"beta_1\", beta_1)\n self._set_hyper(\"beta_2\", beta_2)\n self._set_hyper(\"decay\", self._initial_decay)\n self._set_hyper(\"weight_decay\", weight_decay)\n self._set_hyper(\"total_steps\", int(total_steps))\n self._set_hyper(\"warmup_proportion\", warmup_proportion)\n self._set_hyper(\"min_lr\", min_lr)\n self.epsilon = epsilon or tf.keras.backend.epsilon()\n self.amsgrad = amsgrad\n self.rectify = rectify\n self._has_weight_decay = weight_decay != 0.0\n self._initial_total_steps = total_steps\n self._var_grad_map = {}\n\n def get_transformed_grads(self, vars):\n return [self._var_grad_map[v] for v in vars]\n\n def _create_slots(self, var_list):\n for var in var_list:\n self.add_slot(var, \"m\")\n for var in var_list:\n self.add_slot(var, \"v\")\n if self.amsgrad:\n for var in var_list:\n self.add_slot(var, \"vhat\")\n\n def set_weights(self, weights):\n params = self.weights\n num_vars = int((len(params) - 1) / 2)\n if len(weights) == 3 * num_vars + 1:\n weights = weights[: len(params)]\n super().set_weights(weights)\n\n def _decayed_wd(self, var_dtype):\n wd_t = self._get_hyper(\"weight_decay\", var_dtype)\n if isinstance(wd_t, tf.keras.optimizers.schedules.LearningRateSchedule):\n wd_t = tf.cast(wd_t(self.iterations), var_dtype)\n return wd_t\n\n def _resource_apply_dense(self, grad, var):\n var_dtype = var.dtype.base_dtype\n lr_t = self._decayed_lr(var_dtype)\n wd_t = self._decayed_wd(var_dtype)\n m = self.get_slot(var, \"m\")\n v = self.get_slot(var, \"v\")\n beta_1_t = self._get_hyper(\"beta_1\", var_dtype)\n beta_2_t = self._get_hyper(\"beta_2\", var_dtype)\n epsilon_t = tf.convert_to_tensor(self.epsilon, var_dtype)\n local_step = tf.cast(self.iterations + 1, var_dtype)\n beta_1_power = tf.math.pow(beta_1_t, local_step)\n beta_2_power = tf.math.pow(beta_2_t, local_step)\n\n if self._initial_total_steps > 0:\n total_steps = self._get_hyper(\"total_steps\", var_dtype)\n warmup_steps = total_steps * self._get_hyper(\"warmup_proportion\", var_dtype)\n min_lr = self._get_hyper(\"min_lr\", var_dtype)\n decay_steps = tf.maximum(total_steps - warmup_steps, 1)\n decay_rate = (min_lr - lr_t) / decay_steps\n lr_t = tf.where(\n local_step <= warmup_steps,\n lr_t * (local_step / warmup_steps),\n lr_t + decay_rate * tf.minimum(local_step - warmup_steps, decay_steps),\n )\n\n m_t = m.assign(\n beta_1_t * m + (1.0 - beta_1_t) * grad, use_locking=self._use_locking\n )\n m_corr_t = m_t / (1.0 - beta_1_power)\n\n v_t = v.assign(\n beta_2_t * v + (1.0 - beta_2_t) * tf.math.square(grad - m_t) + epsilon_t,\n use_locking=self._use_locking,\n )\n\n if self.amsgrad:\n vhat = self.get_slot(var, \"vhat\")\n vhat_t = vhat.assign(tf.maximum(vhat, v_t), use_locking=self._use_locking)\n v_corr_t = tf.math.sqrt(vhat_t / (1.0 - beta_2_power))\n else:\n vhat_t = None\n v_corr_t = tf.math.sqrt(v_t / (1.0 - beta_2_power))\n\n if self.rectify:\n sma_inf = 2.0 / (1.0 - beta_2_t) - 1.0\n sma_t = sma_inf - 2.0 * local_step * beta_2_power / (1.0 - beta_2_power)\n r_t = tf.math.sqrt(\n (sma_t - 4.0)\n / (sma_inf - 4.0)\n * (sma_t - 2.0)\n / (sma_inf - 2.0)\n * sma_inf\n / sma_t\n )\n sma_threshold = self._get_hyper(\"sma_threshold\", var_dtype)\n var_t = tf.where(\n sma_t >= sma_threshold,\n r_t * m_corr_t / (v_corr_t + epsilon_t),\n m_corr_t,\n )\n else:\n var_t = m_corr_t / (v_corr_t + epsilon_t)\n\n if self._has_weight_decay:\n var_t += wd_t * var\n\n self._var_grad_map[var] = -lr_t * var_t\n\n var_update = var.assign_sub(lr_t * var_t, use_locking=self._use_locking)\n\n updates = [var_update, m_t, v_t]\n if self.amsgrad:\n updates.append(vhat_t)\n return tf.group(*updates)\n\n def _resource_apply_sparse(self, grad, var, indices):\n raise NotImplementedError\n\n def get_config(self):\n config = super().get_config()\n config.update(\n {\n \"learning_rate\": self._serialize_hyperparameter(\"learning_rate\"),\n \"beta_1\": self._serialize_hyperparameter(\"beta_1\"),\n \"beta_2\": self._serialize_hyperparameter(\"beta_2\"),\n \"decay\": self._serialize_hyperparameter(\"decay\"),\n \"weight_decay\": self._serialize_hyperparameter(\"weight_decay\"),\n \"sma_threshold\": self._serialize_hyperparameter(\"sma_threshold\"),\n \"epsilon\": self.epsilon,\n \"amsgrad\": self.amsgrad,\n \"rectify\": self.rectify,\n \"total_steps\": self._serialize_hyperparameter(\"total_steps\"),\n \"warmup_proportion\": self._serialize_hyperparameter(\n \"warmup_proportion\"\n ),\n \"min_lr\": self._serialize_hyperparameter(\"min_lr\"),\n }\n )\n return config",
"from tensorflow.keras import layers\n\nfrom nn.cnn import cnn\nfrom nn.mlp import *\nfrom nn.rnns.lstm import MLSTM\nfrom nn.rnns.gru import MGRU\nfrom nn.dnc.dnc import DNC\n\n\ndef create_encoder(config, name='encoder'):\n config = config.copy()\n if 'cnn_name' in config:\n return cnn(**config, name=name)\n else:\n assert 'units_list' in config\n return mlp(**config, name=name)\n\nEncoder = create_encoder\n\ndef mlp(units_list=[], out_size=None, **kwargs):\n return MLP(units_list, out_size=out_size, **kwargs)\n\ndef rnn(config, name='rnn'):\n config = config.copy()\n rnn_name = config.pop('rnn_name')\n if rnn_name == 'gru':\n return layers.GRU(**config, name=name)\n elif rnn_name == 'mgru':\n return MGRU(config, name=name)\n elif rnn_name == 'lstm':\n return layers.LSTM(**config, name=name)\n elif rnn_name == 'mlstm':\n return MLSTM(config, name=name)\n else:\n raise ValueError(f'Unkown rnn: {rnn_name}')\n\ndef dnc_rnn(output_size, \n access_config=dict(memory_size=128, word_size=16, num_reads=4, num_writes=1), \n controller_config=dict(hidden_size=128),\n clip_value=20,\n name='dnc',\n rnn_config={}):\n \"\"\"Return an RNN that encapsulates DNC\n \n Args:\n output_size: Output dimension size of dnc\n access_config: A dictionary of access module configuration. \n memory_size: The number of memory slots\n word_size: The size of each memory slot\n num_reads: The number of read heads\n num_writes: The number of write heads\n name: name of the access module, optionally\n controller_config: A dictionary of controller(LSTM) module configuration\n clip_value: Clips controller and core output value to between\n `[-clip_value, clip_value]` if specified\n name: module name\n rnn_config: specifies extra arguments for keras.layers.RNN\n \"\"\"\n dnc_cell = DNC(access_config, \n controller_config, \n output_size, \n clip_value, \n name)\n return layers.RNN(dnc_cell, **rnn_config)\n"
] | [
[
"numpy.ones_like",
"tensorflow.nn.sigmoid",
"tensorflow.maximum",
"tensorflow.nn.moments",
"tensorflow.cast",
"tensorflow.reshape",
"tensorflow.sqrt",
"tensorflow.random.normal",
"tensorflow.tile",
"tensorflow.random.set_seed"
],
[
"tensorflow.reshape",
"tensorflow.squeeze",
"tensorflow.one_hot",
"tensorflow.expand_dims"
],
[
"tensorflow.convert_to_tensor",
"tensorflow.math.square",
"tensorflow.math.sqrt",
"tensorflow.maximum",
"tensorflow.cast",
"tensorflow.minimum",
"tensorflow.where",
"tensorflow.keras.backend.epsilon",
"tensorflow.math.pow",
"tensorflow.group"
],
[
"tensorflow.keras.layers.GRU",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.layers.RNN"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
PabloGomez96/ISAtmosphere | [
"ba98527f99cd145852beb930738d62c5337feb67"
] | [
"ISA.py"
] | [
"import numpy\nimport scipy.constants as Const\ndef AtmLayers(hG):\n Layers = ([0, 11e3, 25e3, 47e3, 53e3, 79e3, 90e3, 105e3])\n FHght = numpy.digitize(hG,Layers)\n switcher = {\n 1: numpy.array([False, -6.5e-3]),\n 2: numpy.array([True, 216.66]),\n 3: numpy.array([False, 3e-3]),\n 4: numpy.array([True, 282.66]),\n 5: numpy.array([False, -4.5e-3]),\n 6: numpy.array([True, 165.66]),\n 7: numpy.array([False, 4e-3])\n }\n return switcher.get(FHght, 'Ha ocurrido un error en la clasificacion atmosferica de la altitud de vuelo!')\n\ndef Temp(Height):\n\tHeight = float(Height)\n\thG = 6.370994e6/(6.370994e6 + Height) * Height\n\tLayer = AtmLayers(hG)\n\tif Layer[0] == False:\n\t\tTz = 288 + ((Layer[1]) * hG)\n\t\treturn Tz\n\telif Layer[0] == True:\n\t\treturn Layer[1]\n\telse:\n\t\tprint('Ha ocurrido un error operacional!')\n\ndef Press(Height):\n\tHeight = float(Height)\n\thG = 6.370994e6/(6.370994e6 + Height) * Height\n\tPz = 101325 * numpy.power((288 + (-6.5e-3) * hG)/288,Const.g * (-1)/(287 * (-6.5e-3)))\n\treturn Pz\n\ndef Densy(Height):\n\tHeight = float(Height)\n\thG = 6.370994e6/(6.370994e6 + Height) * Height\n\trhoz = 1.225 * numpy.power((288 + (-6.5e-3) * hG)/288,(Const.g * (-1)/(287 * (-6.5e-3))) - 1)\n\treturn rhoz\n\ndef Visc(Height):\n\tHeight = float(Height)\n\thG = 6.370994e6/(6.370994e6 + Height) * Height\n\tT = Temp(hG)\n\tT0 = 273.15\n\tu0 = 1.716e-5\n\tuz = u0 * numpy.power(T/T0,1.5) * ((T0 + 110.4)/(T + 110.4))\n\treturn uz\n\ndef Ss(Height):\n\timport pythermophy as pt\n\tHeight = float(Height)\n\thG = 6.370994e6/(6.370994e6 + Height) * Height\n\tT_0 = Temp(hG)\n\tp_0 = Press(hG)\n\tAir = pt.Fluid.init_from_file('/usr/local/lib/python3.8/dist-packages/pythermophy-0.1-py3.8.egg/fluids/Air.yaml')\n\tig = pt.IG(Air)\n\tSoundS = ig.get_speed_of_sound(T_0, p_0)\n\treturn SoundS\n"
] | [
[
"numpy.digitize",
"numpy.array",
"numpy.power"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ahaselsteiner/viroconcom | [
"186d768a7f39788b827173467febb038044199c7",
"186d768a7f39788b827173467febb038044199c7"
] | [
"examples/3D_contour.py",
"manual_tests/manual_test_HDC.py"
] | [
"\"\"\"\nA comprehensive example that shows the whole workflow of\n1) Loading data\n2) Defining the model structure for a joint distribution\n3) Estimating the parameter values of the model (\"Fitting\")\n4) Computing a 3D environmental contour\n\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nfrom virocon import (\n GlobalHierarchicalModel,\n LogNormalDistribution,\n ExponentiatedWeibullDistribution,\n DependenceFunction,\n WidthOfIntervalSlicer,\n HighestDensityContour,\n plot_marginal_quantiles,\n plot_dependence_functions,\n)\n\n\n# Load sea state measurements.\ndata = pd.read_csv(\"datasets/coastDat2_oneyear.csv\", sep=\";\", skipinitialspace=True)\ndata.index = pd.to_datetime(data.pop(data.columns[0]), format=\"%Y-%m-%d-%H\")\n\n\n# Define the structure of the joint model that we will use to describe\n# the the environmental data. To define a joint model, we define the\n# univariate parametric distributions and the dependence structure.\n# The dependence structure is defined using parametric functions.\n\n# A 3-parameter power function, which will be used as a dependence function.\ndef _power3(x, a, b, c):\n return a + b * x ** c\n\n\n# A 3-parameter exponential function, which will be used as a dependence function.\ndef _exp3(x, a, b, c):\n return a + b * np.exp(c * x)\n\n\n# A 3- parameter alpha function, which will be used as a dependence function.\ndef _alpha3(x, a, b, c, d_of_x):\n return (a + b * x ** c) / 2.0445 ** (1 / d_of_x(x))\n\n\n# A 4- parameter logistic function, which will be used as a dependence function.\ndef _logistics4(x, a=1, b=1, c=-1, d=1):\n return a + b / (1 + np.exp(c * (x - d)))\n\n\n# Lower and upper interval boundaries for the three parameter values.\nbounds = [(0, None), (0, None), (None, None)]\nlogistics_bounds = [(0, None), (0, None), (None, 0), (0, None)]\n\npower3 = DependenceFunction(_power3, bounds, latex=\"$a + b * x^c$\")\nexp3 = DependenceFunction(_exp3, bounds, latex=\"$a + b * \\exp(c * x)$\")\nlogistics4 = DependenceFunction(\n _logistics4,\n logistics_bounds,\n weights=lambda x, y: y,\n latex=\"$a + b / (1 + \\exp[c * (x -d)])$\",\n)\nalpha3 = DependenceFunction(\n _alpha3,\n bounds,\n d_of_x=logistics4,\n weights=lambda x, y: y,\n latex=\"$(a + b * x^c) / 2.0445^{1 / F()}$\",\n)\n\n# Define the structure of the joint distribution.\n\n# Wind speed.\ndist_description_0 = {\n \"distribution\": ExponentiatedWeibullDistribution(),\n \"intervals\": WidthOfIntervalSlicer(2, min_n_points=50),\n}\n# Wave height.\ndist_description_1 = {\n \"distribution\": ExponentiatedWeibullDistribution(f_delta=5),\n \"intervals\": WidthOfIntervalSlicer(0.5),\n \"conditional_on\": 0,\n \"parameters\": {\"alpha\": alpha3, \"beta\": logistics4,},\n}\n# Zero-up-crossing period.\ndist_description_2 = {\n \"distribution\": LogNormalDistribution(),\n \"conditional_on\": 1,\n \"parameters\": {\"mu\": power3, \"sigma\": exp3},\n}\n\n\nmodel = GlobalHierarchicalModel(\n [dist_description_0, dist_description_1, dist_description_2]\n)\n\n# Define a dictionary that describes the model.\nsemantics = {\n \"names\": [\"Wind speed\", \"Significant wave height\", \"Zero-up-crossing period\"],\n \"symbols\": [\"V\", \"H_s\", \"T_z\"],\n \"units\": [\"m/s\", \"m\", \"s\"],\n}\n\n# Fit the model to the data (estimate the model's parameter values).\nmodel.fit(data)\n\n# Print the estimated parameter values.\nprint(model)\n\n# Create plots to inspect the model's goodness-of-fit.\nfig1, axs = plt.subplots(1, 3, figsize=[20, 7], dpi=300)\nplot_marginal_quantiles(model, data, semantics, axes=axs)\nfig2, axs = plt.subplots(1, 4, figsize=[20, 7], dpi=300)\nplot_dependence_functions(model, semantics, axes=axs)\n\n# Calculate 3D Contour.\nstate_duration = 1 # hours\nreturn_period = 20 # years\nalpha = state_duration / (return_period * 365.25 * 24)\nHDC = HighestDensityContour(model, alpha, limits=[(0, 50), (0, 25), (0, 25)])\n\n# randomly select only 5% of the contour's points to increase performance\nrng = np.random.default_rng(42)\nn_contour_points = len(HDC.coordinates)\nrandom_points = rng.choice(\n n_contour_points, int(0.05 * n_contour_points), replace=False\n)\n\nXs = HDC.coordinates[random_points, 0]\nYs = HDC.coordinates[random_points, 1]\nZs = HDC.coordinates[random_points, 2]\n\nfig = plt.figure(dpi=300, figsize=[15, 7])\nax = fig.add_subplot(111, projection=\"3d\")\nax.scatter(Xs, Ys, Zs, c=\"#004488\")\nax.set_xlabel(\"Wind speed (m/s)\")\nax.set_ylabel(\"Significant wave height (m)\")\nax.set_zlabel(\"Zero-up-crossing period (s)\")\n",
"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom virocon import (\n WeibullDistribution,\n LogNormalDistribution,\n GlobalHierarchicalModel,\n DependenceFunction,\n calculate_alpha,\n HighestDensityContour,\n)\n\n\ndef _power3(x, a=0.1000, b=1.489, c=0.1901):\n return a + b * x ** c\n\n\n# A 3-parameter exponential function (a dependence function).\ndef _exp3(x, a=0.0400, b=0.1748, c=-0.2243):\n return a + b * np.exp(c * x)\n\n\nbounds = [(0, None), (0, None), (None, None)]\npower3 = DependenceFunction(_power3, bounds)\nexp3 = DependenceFunction(_exp3, bounds)\n\ndist_description_0 = {\n \"distribution\": WeibullDistribution(alpha=2.776, beta=1.471, gamma=0.8888),\n}\ndist_description_1 = {\n \"distribution\": LogNormalDistribution(),\n \"conditional_on\": 0,\n \"parameters\": {\"mu\": power3, \"sigma\": exp3},\n}\nghm = GlobalHierarchicalModel([dist_description_0, dist_description_1])\n\nalpha = calculate_alpha(3, 50)\nlimits = [(0, 20), (0, 18)]\ndeltas = [0.1, 0.1]\nmy_contour = HighestDensityContour(ghm, alpha, limits, deltas)\n\n\nmy_coordinates = my_contour.coordinates\n\n# %%\n\n# np.savez_compressed(\"reference_data_HDC.npz\", ref_coordinates=my_coordinates)\n\n# %% viroconcom v1\nimport sys\n\nsys.path.append(\"../viroconcom\")\nfrom viroconcom.distributions import (\n WeibullDistribution,\n LognormalDistribution,\n MultivariateDistribution,\n)\nfrom viroconcom.params import ConstantParam, FunctionParam\nfrom viroconcom.contours import HighestDensityContour # noqa\n\n# Define dependency tuple\ndep1 = (None, None, None)\ndep2 = (0, None, 0)\n# Define parameters\nshape = ConstantParam(1.471)\nloc = ConstantParam(0.8888)\nscale = ConstantParam(2.776)\npar1 = (shape, loc, scale)\nmu = FunctionParam(\"power3\", 0.1000, 1.489, 0.1901)\nsigma = FunctionParam(\"exp3\", 0.0400, 0.1748, -0.2243)\n# Create distributions\ndist1 = WeibullDistribution(*par1)\ndist2 = LognormalDistribution(mu=mu, sigma=sigma)\ndistributions = [dist1, dist2]\ndependencies = [dep1, dep2]\nmul_dist = MultivariateDistribution(distributions, dependencies)\n# Calculate contour\nn_years = 50\nlimits = [(0, 20), (0, 18)]\ndeltas = [0.1, 0.1]\nref_contour = HighestDensityContour(mul_dist, n_years, 3, limits, deltas)\n\nref_coordinates = np.array(ref_contour.coordinates).T\n\n\n# %% plots\nplt.close(\"all\")\nx = np.linspace((0, 0), (20, 18), num=100, endpoint=True)\nmy_f = ghm.pdf(x)\nref_f = mul_dist.pdf(x.T)\nassert np.max(ref_f - my_f) == 0\n\nplt.figure()\nplt.scatter(*ref_contour.coordinates, marker=\".\", label=\"viroconcom v1\")\nplt.scatter(\n my_contour.coordinates[:, 0],\n my_contour.coordinates[:, 1],\n marker=\".\",\n label=\"virocon v2\",\n)\nplt.legend()\n\n# np.testing.assert_almost_equal(my_coordinates, ref_coordinates)\n"
] | [
[
"pandas.read_csv",
"matplotlib.pyplot.subplots",
"numpy.exp",
"numpy.random.default_rng",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.scatter",
"numpy.linspace",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.exp",
"numpy.array",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
stgl/pymccrgb | [
"dc8ad2e46cbe6ff8081c32fa11bce68f869baafa"
] | [
"pymccrgb/core.py"
] | [
"\"\"\" Multiscale curvature classification of ground points with color updates \"\"\"\n\nimport numpy as np\n\nfrom copy import copy\n\nfrom .classification import make_sgd_pipeline\nfrom .features import calculate_color_features\nfrom .pointutils import equal_sample, intersect_rows\n\nfrom pymcc_lidar import calculate_excess_height\n\n\ndef classify_ground_mcc(data, scale, tol, downsample=False):\n \"\"\" Classifies ground points by a single iteration of the MCC algorithm\n\n Classifies ground and nonground (or \"high\") points by comparing the\n elevation of each data point to an interpolated surface. If downsample is\n True, a down-sampled version of the data coordinates will be used when\n interpolating (currently not implemented).\n\n Based on MCC algorithm implemented in [1]_ and [2]_.\n \n .. [1] Evans, Jeffrey S.; Hudak, Andrew T. 2007. A multiscale curvature\n algorithm for classifying discrete return LiDAR in forested\n environments. Geoscience and Remote Sensing. 45(4): 1029-1038.\n .. [2] https://sourceforge.net/p/mcclidar\n\n Parameters\n ----------\n data: array\n A n x 3 (or more) data matrix with rows [x, y, z, ...]\n\n scale: float\n The interpolation scale. This defines the resolution of the\n interpolated surface, which is calculated by a 3 x 3 windowed\n mean around each intrpolation point.\n\n tol: float\n The height tolerance. Points exceeding the durface by more than\n tol units are classified as nonground\n\n downsample: bool\n If True, use a downsampled dataset for interpolation.\n Not implemented.\n\n Returns\n -------\n An n x 1 array of point class labels. By default, 1 is ground,\n and 0 is nonground.\n \"\"\"\n\n if downsample:\n raise NotImplementedError(\"Downsampling has not been implemented.\")\n\n xyz = data[:, 0:3]\n height = calculate_excess_height(xyz.copy(order=\"C\"), scale)\n y = height < tol # 0 = nonground, 1 = ground\n return y\n\n\ndef mcc(\n data,\n scales=[0.5, 1, 1.5],\n tols=[0.3, 0.3, 0.3],\n threshs=[1, 0.1, 0.01],\n use_las_codes=False,\n verbose=False,\n):\n \"\"\" Classifies ground points using the MCC algorithm\n\n Classifies ground and nonground (or \"high\") points by comparing the\n elevation of each data point to an interpolated surface at user-defined\n scales. The algorithm iterates at each scale until a convergence threshold\n is reached based on the percentage of points classified as ground. Only\n ground points are retained after each iteration.\n\n Based on MCC algorithm implemented in [1]_ and [2]_.\n \n .. [1] Evans, Jeffrey S.; Hudak, Andrew T. 2007. A multiscale curvature\n algorithm for classifying discrete return LiDAR in forested\n environments. Geoscience and Remote Sensing. 45(4): 1029-1038.\n .. [2] https://sourceforge.net/p/mcclidar\n\n Parameters\n ----------\n data: array\n A n x d data matrix with rows [x, y, z, ...]\n\n scales: list\n The interpolation scales. This defines the resolution of the\n interpolated surface, which is calculated by a 3 x 3 windowed\n mean around each intrpolation point. Defaults to [0.5, 1, 1.5]\n meters.\n\n tols: list\n The height tolerances. Points exceeding the durface by more than\n tol units are classified as nonground. Deaults to 0.3 meters.\n\n threshs: list\n The convergence thresholds as percentages. Defaults to\n [1%, 0.1%, 0.01%]\n\n use_las_codes: bool\n If True, return LAS 1.4 classification codes (2 = ground,\n 4 = medium vegetation). Default False.\n\n Returns\n -------\n data: array\n An m x d array of ground points\n\n labels: array\n An n x 1 array of labels (1 is ground, 0 is nonground)\n \"\"\"\n original_data = copy(data)\n\n for scale, tol, thresh in zip(scales, tols, threshs):\n converged = False\n niter = 0\n while not converged:\n n_points = data.shape[0]\n y = classify_ground_mcc(data, scale, tol)\n ground = y == 1\n n_removed = np.sum(y == 0)\n converged = 100 * (n_removed / n_points) < thresh\n data = data[ground, :]\n\n if verbose:\n print(\"-\" * 20)\n print(\"MCC iteration\")\n print(\"-\" * 20)\n print(\n \"Scale: {:.2f}, Relative height: {:.1e}, iter: {}\".format(\n scale, tol, niter\n )\n )\n print(\n \"Removed {} nonground points ({:.2f} %)\".format(\n n_removed, 100 * (n_removed / n_points)\n )\n )\n\n niter += 1\n\n labels = intersect_rows(data, original_data)\n\n if verbose:\n n_ground = data.shape[0]\n n_points = original_data.shape[0]\n print(\n \"Retained {} ground points ({:.2f} %)\".format(\n n_ground, 100 * (n_ground / n_points)\n )\n )\n\n if use_las_codes:\n labels[labels == 0] = 4 # Vegetation\n labels[labels == 1] = 2 # Ground\n\n return data, labels\n\n\ndef mcc_rgb(\n data,\n scales=[0.5, 1, 1.5],\n tols=[0.3, 0.3, 0.3],\n threshs=[1, 0.1, 0.01],\n training_scales=None,\n training_tols=None,\n n_train=int(1e3),\n max_iter=20,\n n_jobs=1,\n seed=None,\n use_las_codes=False,\n verbose=False,\n **pipeline_kwargs,\n):\n \"\"\" Classifies ground points using the MCC-RGB algorithm\n\n Classifies ground and nonground (or \"high\") points by comparing the\n elevation of each data point to an interpolated surface at user-defined\n scales. The algorithm proceeds as MCC (see the mcc() documentation), except\n that ground points are reclassified based on their color similarity to\n nonground points.\n\n Parameters\n ----------\n data: array\n A n x d data matrix with rows [x, y, z, r, g, b ...]\n\n scales: list\n The interpolation scales. This defines the resolution of the\n interpolated surface, which is calculated by a 3 x 3 windowed\n mean around each interpolation point. Defaults to [0.5, 1, 1.5]\n meters. Scale domains are processed in order of increasing scale.\n\n tols: list\n The height tolerances. Points exceeding the surface by more than\n tol units are classified as nonground. Deaults to 0.3 meters.\n\n threshs: list\n The convergence thresholds as percentages. Defaults to\n [1%, 0.1%, 0.01%]\n\n training_scales: list\n The training interpolation scales.\n This defaults to the first scale domain (e.g., 0.5). Both\n training_scales and training_tols must be specified;\n otherwise the defaults are used.\n\n training_tols: list\n The training relative heights. Defaults to the first\n height tolerance (e.g., 0.3). Can be specified as a list or\n single value\n\n n_train: int\n The total number of points to use for training the color\n classifier. Defaults to 1E5.\n\n max_iter: int\n Maximum number of iterations in a scale domain.\n Defaults to 20.\n\n seed: int\n Optional seed value for selecting training data.\n\n use_las_codes: bool\n If True, return LAS 1.4 classification codes (2 = ground,\n 4 = medium vegetation). Default False.\n\n Returns\n -------\n data: array\n An m x d array of ground points\n\n labels: array\n An n x 1 array of labels (1 is ground, 0 is nonground)\n\n updated: array\n An n x 1 array of labels indicating whether the point was\n updated in an MCC-RGB step. -1 indicates the point's classification\n was not updated. If there are multiple training scales,\n this will be the index of the scale and tolerance range defined\n in training_scales and training_tols.\n \"\"\"\n if training_scales is None:\n training_scales = scales[0:1]\n if training_tols is None:\n training_tols = tols[0:1]\n if not isinstance(training_scales, list):\n scale = float(training_scales)\n training_scales = len(training_tols) * [scale]\n if not isinstance(training_tols, list):\n tol = float(training_tols)\n training_tols = len(training_scales) * [tol]\n\n if len(training_scales) != len(training_tols):\n raise ValueError(\n \"Not enough training scales or tolerances provided. Please give \"\n \"two lists of equal length, or a single value for training_tols.\"\n \"Arguments were training_scales={} and training_tols={}\".format(\n training_scales, training_tols\n )\n )\n\n params = zip(scales, tols)\n for scale, tol in zip(training_scales, training_tols):\n if (scale, tol) not in params:\n scales.append(scale)\n tols.append(tol)\n\n idx = np.argsort(scales)\n scales = np.array(scales)\n tols = np.array(tols)\n scales = scales[idx]\n tols = tols[idx]\n\n original_data = copy(data)\n\n # Mask NaN and infinite index/color values\n X = calculate_color_features(data)\n mask = np.isfinite(X).all(axis=-1)\n data = data[mask, :]\n n_points = data.shape[0]\n # updated = np.full((n_points,), fill_value=-1)\n reached_max_iter = False\n\n for scale, tol, thresh in zip(scales, tols, threshs):\n converged = False\n niter = 0\n while not converged and not reached_max_iter:\n y = classify_ground_mcc(data, scale, tol)\n\n if verbose:\n n_removed_mcc = np.sum(y == 0)\n print(\"-\" * 20)\n print(\"MCC step\")\n print(\"-\" * 20)\n print(\n \"Scale: {:.2f}, Relative height: {:.1e}, iter: {}\".format(\n scale, tol, niter\n )\n )\n print(\n \"Removed {} nonground points ({:.2f} %)\".format(\n n_removed_mcc, 100 * (n_removed_mcc / n_points)\n )\n )\n\n update_step = scale in training_scales and tol in training_tols\n first_iter = niter == 0\n if update_step and first_iter:\n if verbose:\n print(\"-\" * 20)\n print(\"Classification update step\")\n print(\"-\" * 20)\n try:\n X = calculate_color_features(data)\n X_train, y_train = equal_sample(\n X, y, size=int(n_train / 2), seed=seed\n )\n pipeline = make_sgd_pipeline(X_train, y_train, **pipeline_kwargs)\n\n if n_jobs > 1 or n_jobs == -1:\n if verbose:\n print(f\"Predicting in parallel using {n_jobs}\")\n\n from sklearn.externals.joblib import Parallel, delayed\n\n pool = Parallel(n_jobs=n_jobs)\n wrapper = delayed(pipeline.predict)\n result = pool(wrapper(x.reshape(1, -1)) for x in X[y == 1, :])\n y_pred_ground = np.array(result).ravel()\n else:\n y_pred_ground = pipeline.predict(X[y == 1, :])\n y_pred = np.zeros_like(y)\n y_pred[y == 1] = y_pred_ground\n\n # params = list(zip(training_scales, training_tols))\n # update_step_idx = params.index((scale, tol))\n # updated[(y == 1) & (y_pred == 0)] = update_step_idx\n\n if verbose:\n n_removed_clf = np.sum((y == 1) & (y_pred == 0))\n print(\n \"Scale: {:.2f}, Relative height: {:.1e}\".format(scale, tol)\n )\n print(\n \"Reclassified {} ground points as nonground ({:.2f} %)\".format(\n n_removed_clf, 100 * (n_removed_clf / n_points)\n )\n )\n\n y[(y == 1) & (y_pred == 0)] = 0\n except ValueError as e:\n print(\"Skipping classification update. \")\n print(\"ValueError: \" + str(e))\n\n ground = y == 1\n data = data[ground, :]\n\n n_removed = np.sum(y == 0)\n converged = 100 * (n_removed / n_points) < thresh\n reached_max_iter = niter >= max_iter\n\n if reached_max_iter and verbose:\n print(\"Reached maximum number of iterations ({})\".format(max_iter))\n\n niter += 1\n\n labels = intersect_rows(data, original_data)\n\n if verbose:\n n_ground = data.shape[0]\n n_points = original_data.shape[0]\n print()\n print(\n \"Retained {} ground points ({:.2f} %)\".format(\n n_ground, 100 * (n_ground / n_points)\n )\n )\n\n if use_las_codes:\n labels[labels == 0] = 4 # Vegetation\n labels[labels == 1] = 2 # Ground\n\n return data, labels # , updated\n"
] | [
[
"numpy.isfinite",
"sklearn.externals.joblib.delayed",
"numpy.zeros_like",
"numpy.argsort",
"numpy.array",
"numpy.sum",
"sklearn.externals.joblib.Parallel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Scitator/MONAI | [
"a42b563acf0c7504cee18ee84c8af2eff6e948a7"
] | [
"tests/test_copy_itemsd.py"
] | [
"# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport time\nimport sys\nimport torch\nimport numpy as np\nfrom parameterized import parameterized\nfrom monai.transforms import CopyItemsd\nfrom monai.utils import ensure_tuple\n\nTEST_CASE_1 = [\"img\", 1, \"img_1\"]\n\nTEST_CASE_2 = [[\"img\", \"seg\"], 1, [\"img_1\", \"seg_1\"]]\n\nTEST_CASE_3 = [\"img\", 2, [\"img_1\", \"img_2\"]]\n\nTEST_CASE_4 = [[\"img\", \"seg\"], 2, [\"img_1\", \"seg_1\", \"img_2\", \"seg_2\"]]\n\n\nclass TestCopyItemsd(unittest.TestCase):\n @parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4])\n def test_numpy_values(self, keys, times, names):\n input_data = {\"img\": np.array([[0, 1], [1, 2]]), \"seg\": np.array([[0, 1], [1, 2]])}\n result = CopyItemsd(keys=keys, times=times, names=names)(input_data)\n for name in ensure_tuple(names):\n self.assertTrue(name in result)\n result[name] += 1\n np.testing.assert_allclose(result[name], np.array([[1, 2], [2, 3]]))\n np.testing.assert_allclose(result[\"img\"], np.array([[0, 1], [1, 2]]))\n\n def test_tensor_values(self):\n device = torch.device(\"cuda:0\") if torch.cuda.is_available() else torch.device(\"cpu:0\")\n input_data = {\n \"img\": torch.tensor([[0, 1], [1, 2]], device=device),\n \"seg\": torch.tensor([[0, 1], [1, 2]], device=device),\n }\n result = CopyItemsd(keys=\"img\", times=1, names=\"img_1\")(input_data)\n self.assertTrue(\"img_1\" in result)\n result[\"img_1\"] += 1\n torch.testing.assert_allclose(result[\"img\"], torch.tensor([[0, 1], [1, 2]], device=device))\n torch.testing.assert_allclose(result[\"img_1\"], torch.tensor([[1, 2], [2, 3]], device=device))\n\n def test_array_values(self):\n input_data = {\"img\": [[0, 1], [1, 2]], \"seg\": [[0, 1], [1, 2]]}\n result = CopyItemsd(keys=\"img\", times=1, names=\"img_1\")(input_data)\n self.assertTrue(\"img_1\" in result)\n result[\"img_1\"][0][0] += 1\n np.testing.assert_allclose(result[\"img\"], [[0, 1], [1, 2]])\n np.testing.assert_allclose(result[\"img_1\"], [[1, 1], [1, 2]])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"torch.tensor",
"torch.cuda.is_available",
"numpy.testing.assert_allclose",
"torch.device",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vkotaru/Car-Racing | [
"f69cec857b6cd660ba6d1c37683ef8cfa0bee8b6"
] | [
"pycar_racing/utils.py"
] | [
"import numpy as np\n\n\ndef linear_regression(x, u, lamb):\n \"\"\"Estimates linear system dynamics\n x, u: data used in the regression\n lamb: regularization coefficient\n \"\"\"\n # Want to solve W^* = argmin sum_i ||W^T z_i - y_i ||_2^2 + lamb ||W||_F,\n # with z_i = [x_i u_i] and W \\in R^{n + d} x n\n Y = x[2 : x.shape[0], :]\n X = np.hstack((x[1 : (x.shape[0] - 1), :], u[1 : (x.shape[0] - 1), :]))\n\n Q = np.linalg.inv(np.dot(X.T, X) + lamb * np.eye(X.shape[1]))\n b = np.dot(X.T, Y)\n W = np.dot(Q, b)\n\n A = W.T[:, 0:6]\n B = W.T[:, 6:8]\n\n error_matrix = np.dot(X, W) - Y\n error_max = np.max(error_matrix, axis=0)\n error_min = np.min(error_matrix, axis=0)\n error = np.vstack((error_max, error_min))\n\n return A, B, error\n"
] | [
[
"numpy.hstack",
"numpy.dot",
"numpy.min",
"numpy.eye",
"numpy.max",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
muralinba12/MetaLearningForNER | [
"61b5159059e486b8e0b50fcd8089554bc26249f6"
] | [
"datasets/utils.py"
] | [
"import glob\nimport json\nimport os\nimport random\n\nfrom torch.utils import data\nfrom torch.nn import CrossEntropyLoss\nfrom torch.utils.data import Subset\n\nfrom datasets.episode import Episode\nfrom datasets.wsd_dataset import WordWSDDataset, MetaWSDDataset\nfrom datasets.ner_dataset import NERSampler, read_examples_from_file, get_labels\nfrom transformers import BertTokenizer\n\nbert_tokenizer = BertTokenizer.from_pretrained('bert-base-cased')\n\ndef write_json(json_dict, file_name):\n with open(file_name, 'w', encoding='utf8') as f:\n json.dump(json_dict, f, indent=4)\n\n\ndef read_json(file_name):\n with open(file_name, 'r', encoding='utf8') as f:\n json_dict = json.load(f)\n return json_dict\n\n\ndef get_max_batch_len(batch):\n return max([len(x[0]) for x in batch])\n\n\ndef prepare_batch(batch):\n max_len = get_max_batch_len(batch)\n x = []\n lengths = []\n y = []\n for inp_seq, target_seq in batch:\n lengths.append(len(inp_seq))\n target_seq = target_seq + [-1] * (max_len - len(target_seq))\n x.append(inp_seq)\n y.append(target_seq)\n \n # print (lengths[0], x[0], y[0])\n \n return x, lengths, y\n\n\ndef prepare_bert_batch(batch):\n x = []\n lengths = []\n y = []\n for sentences, labels in batch:\n tokens = []\n label_ids = []\n length = 0\n for word, label in zip(sentences, labels):\n word_tokens = bert_tokenizer.tokenize(word)\n tokens.extend(word_tokens)\n label_ids.extend([label] + [-1] * (len(word_tokens) - 1))\n length += len(word_tokens)\n \n # check\n# if all([lab == -1 for lab in label_ids]):\n# print (labels)\n assert(all([lab == -1 for lab in label_ids]) == False)\n \n x.append(tokens)\n lengths.append(length)\n y.append(label_ids)\n \n max_len = max(lengths)\n for i in range(len(y)):\n y[i] = y[i] + [-1] * (max_len - len(y[i]))\n \n# print (x[-1])\n# print (batch[-1][1])\n# print (y[-1])\n \n return x, lengths, y\n\n\ndef prepare_task_batch(batch):\n return batch\n\n\ndef generate_semcor_wsd_episodes(wsd_dataset, n_episodes, n_support_examples, n_query_examples, task):\n word_splits = {k: v for (k, v) in wsd_dataset.word_splits.items() if len(v['sentences']) >\n (n_support_examples + n_query_examples)}\n\n if n_episodes > len(word_splits):\n raise Exception('Not enough data available to generate {} episodes'.format(n_episodes))\n\n episodes = []\n for word in word_splits.keys():\n if len(episodes) == n_episodes:\n break\n indices = list(range(len(word_splits[word]['sentences'])))\n random.shuffle(indices)\n start_index = 0\n train_subset = WordWSDDataset(sentences=[word_splits[word]['sentences'][i] for i in indices[start_index: start_index + n_support_examples]],\n labels=[word_splits[word]['labels'][i] for i in indices[start_index: start_index + n_support_examples]],\n n_classes=len(wsd_dataset.sense_inventory[word]))\n support_loader = data.DataLoader(train_subset, batch_size=n_support_examples, collate_fn=prepare_batch)\n start_index += n_support_examples\n test_subset = WordWSDDataset(sentences=[word_splits[word]['sentences'][i] for i in indices[start_index: start_index + n_query_examples]],\n labels=[word_splits[word]['labels'][i] for i in indices[start_index: start_index + n_query_examples]],\n n_classes=len(wsd_dataset.sense_inventory[word]))\n query_loader = data.DataLoader(test_subset, batch_size=n_query_examples, collate_fn=prepare_batch)\n episode = Episode(support_loader=support_loader,\n query_loader=query_loader,\n base_task=task,\n task_id=task + '-' + word,\n n_classes=train_subset.n_classes)\n episodes.append(episode)\n return episodes\n\n\ndef generate_wsd_episodes(dir, n_episodes, n_support_examples, n_query_examples, task, meta_train=True):\n episodes = []\n for file_name in glob.glob(os.path.join(dir, '*.json')):\n if len(episodes) == n_episodes:\n break\n word = file_name.split(os.sep)[-1].split('.')[0]\n word_wsd_dataset = MetaWSDDataset(file_name)\n train_subset = Subset(word_wsd_dataset, range(0, n_support_examples))\n support_loader = data.DataLoader(train_subset, batch_size=n_support_examples, collate_fn=prepare_batch)\n if meta_train:\n test_subset = Subset(word_wsd_dataset, range(n_support_examples, n_support_examples + n_query_examples))\n else:\n test_subset = Subset(word_wsd_dataset, range(n_support_examples, len(word_wsd_dataset)))\n query_loader = data.DataLoader(test_subset, batch_size=n_query_examples, collate_fn=prepare_batch)\n episode = Episode(support_loader=support_loader,\n query_loader=query_loader,\n base_task=task,\n task_id=task + '-' + word,\n n_classes=word_wsd_dataset.n_classes)\n episodes.append(episode)\n return episodes\n\n\ndef generate_ner_episodes(dir, labels_file, n_episodes, n_support_examples, n_query_examples, task, \n meta_train=True, vectors='bert'):\n episodes = []\n labels = get_labels(labels_file)\n examples, label_map = read_examples_from_file(dir, labels)\n# print ('label_map', label_map)\n ner_dataset = NERSampler(examples, labels, label_map, 6, n_support_examples, n_query_examples, n_episodes)\n for index, ner_data in enumerate(ner_dataset):\n tags, sup_sents, query_sents = ner_data\n \n# print (len(tags), len(sup_sents.labels), len(query_sents.labels))\n if vectors == 'bert':\n support_loader = data.DataLoader(sup_sents, batch_size=6*n_support_examples, \n collate_fn=lambda pb: prepare_bert_batch(pb))\n query_loader = data.DataLoader(query_sents, batch_size=6*n_query_examples, \n collate_fn=lambda pb: prepare_bert_batch(pb))\n else:\n support_loader = data.DataLoader(sup_sents, batch_size=6*n_support_examples, \n collate_fn=prepare_batch)\n query_loader = data.DataLoader(query_sents, batch_size=6*n_query_examples, \n collate_fn=prepare_batch)\n episode = Episode(support_loader=support_loader,\n query_loader=query_loader,\n base_task=task,\n task_id=task + '-' + str(index),\n n_classes=len(labels))\n \n episodes.append(episode)\n return episodes"
] | [
[
"torch.utils.data.DataLoader"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TheRakeshPurohit/tensorflow | [
"bee6d5a268122df99e1e55a7b92517e84ad25bab",
"bee6d5a268122df99e1e55a7b92517e84ad25bab",
"bee6d5a268122df99e1e55a7b92517e84ad25bab",
"bee6d5a268122df99e1e55a7b92517e84ad25bab"
] | [
"tensorflow/python/trackable/converter.py",
"tensorflow/python/kernel_tests/data_structures/lookup_ops_test.py",
"tensorflow/core/tfrt/saved_model/tests/gen_resource_gather_v1.py",
"tensorflow/python/ops/numpy_ops/integration_test/benchmarks/micro_benchmarks.py"
] | [
"# Copyright 2022 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Util for converting a Python object to a Trackable.\"\"\"\n\n\nfrom tensorflow.python.eager import function_saved_model_utils\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.trackable import base\nfrom tensorflow.python.trackable import data_structures\n\n\ndef convert_to_trackable(obj, parent=None):\n \"\"\"Converts `obj` to `Trackable`.\"\"\"\n if isinstance(obj, base.Trackable):\n return obj\n obj = data_structures.wrap_or_unwrap(obj)\n if (tensor_util.is_tf_type(obj) and\n obj.dtype not in (dtypes.variant, dtypes.resource) and\n not resource_variable_ops.is_resource_variable(obj)):\n return function_saved_model_utils.TrackableConstant(obj, parent)\n if not isinstance(obj, base.Trackable):\n raise ValueError(f\"Cannot convert {obj} to Trackable.\")\n return obj\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for lookup ops.\"\"\"\nimport os\nimport tempfile\nimport unittest\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport six\n\nfrom tensorflow.python import tf2\nfrom tensorflow.python.checkpoint import util as checkpoint_util\nfrom tensorflow.python.client import session\nfrom tensorflow.python.data.experimental.ops import counter\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.eager import backprop\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.eager import function\nfrom tensorflow.python.eager import wrap_function\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors_impl\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.framework import test_ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import lookup_ops\nfrom tensorflow.python.ops import map_fn\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.ops.ragged import ragged_tensor\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.saved_model import load as saved_model_load\nfrom tensorflow.python.saved_model import save as saved_model_save\nfrom tensorflow.python.training import saver\nfrom tensorflow.python.training import server_lib\nfrom tensorflow.python.training.tracking import graph_view\nfrom tensorflow.python.training.tracking import tracking\nfrom tensorflow.python.training.tracking import util as trackable\nfrom tensorflow.python.util import compat\n\n\nclass BaseLookupTableTest(test.TestCase):\n\n def getHashTable(self):\n if tf2.enabled():\n return lookup_ops.StaticHashTable\n else:\n return lookup_ops.StaticHashTableV1\n\n def getVocabularyTable(self):\n if tf2.enabled():\n return lookup_ops.StaticVocabularyTable\n else:\n return lookup_ops.StaticVocabularyTableV1\n\n def initialize_table(self, table):\n if not tf2.enabled():\n self.evaluate(table.initializer)\n\n\nSKIP_ANONYMOUS_IN_TF1_REASON = (\n \"In v1 graph mode, each self.evaluate call will execute the handle \"\n \"creation op (e.g. AnonymousHashTable) which will create a new table \"\n \"resource unrelated to other self.evaluate calls, so we can't test \"\n \"anonymous resources with self.evaluate .\"\n)\n\n\[email protected]_parameters(\n (f\"_{is_anonymous}\", is_anonymous) for is_anonymous in [False, True])\nclass StaticHashTableTest(BaseLookupTableTest, parameterized.TestCase):\n\n def testStaticHashTable(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n default_val = -1\n keys = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n values = constant_op.constant([0, 1, 2], dtypes.int64)\n table = self.getHashTable()(\n lookup_ops.KeyValueTensorInitializer(keys, values),\n default_val,\n experimental_is_anonymous=is_anonymous)\n self.assertEqual(table._is_anonymous, is_anonymous)\n self.initialize_table(table)\n\n self.assertAllEqual(3, self.evaluate(table.size()))\n\n input_string = constant_op.constant([\"brain\", \"salad\", \"tank\"])\n output = table.lookup(input_string)\n self.assertAllEqual([3], output.get_shape())\n\n result = self.evaluate(output)\n self.assertAllEqual([0, 1, -1], result)\n\n exported_keys_tensor, exported_values_tensor = table.export()\n\n self.assertItemsEqual([b\"brain\", b\"salad\", b\"surgery\"],\n self.evaluate(exported_keys_tensor))\n self.assertItemsEqual([0, 1, 2], self.evaluate(exported_values_tensor))\n\n def testStaticHashTableFindHighRank(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n default_val = -1\n keys = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n values = constant_op.constant([0, 1, 2], dtypes.int64)\n table = self.getHashTable()(\n lookup_ops.KeyValueTensorInitializer(keys, values),\n default_val,\n experimental_is_anonymous=is_anonymous)\n self.initialize_table(table)\n\n self.assertAllEqual(3, self.evaluate(table.size()))\n\n input_string = constant_op.constant([[\"brain\", \"salad\"],\n [\"tank\", \"tarkus\"]])\n output = table.lookup(input_string)\n\n result = self.evaluate(output)\n self.assertAllEqual([[0, 1], [-1, -1]], result)\n\n def testStaticHashTableInitWithPythonArrays(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n default_val = -1\n keys = [\"brain\", \"salad\", \"surgery\"]\n values = [0, 1, 2]\n table = self.getHashTable()(\n lookup_ops.KeyValueTensorInitializer(\n keys, values, value_dtype=dtypes.int64),\n default_val,\n experimental_is_anonymous=is_anonymous)\n self.initialize_table(table)\n\n self.assertAllEqual(3, self.evaluate(table.size()))\n\n input_string = constant_op.constant([\"brain\", \"salad\", \"tank\"])\n output = table.lookup(input_string)\n\n result = self.evaluate(output)\n self.assertAllEqual([0, 1, -1], result)\n\n def testStaticHashTableInitWithNumPyArrays(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n default_val = -1\n keys = np.array([\"brain\", \"salad\", \"surgery\"], dtype=np.str_)\n values = np.array([0, 1, 2], dtype=np.int64)\n table = self.getHashTable()(\n lookup_ops.KeyValueTensorInitializer(keys, values),\n default_val,\n experimental_is_anonymous=is_anonymous)\n self.initialize_table(table)\n\n self.assertAllEqual(3, self.evaluate(table.size()))\n\n input_string = constant_op.constant([\"brain\", \"salad\", \"tank\"])\n output = table.lookup(input_string)\n\n result = self.evaluate(output)\n self.assertAllEqual([0, 1, -1], result)\n\n def testMultipleStaticHashTables(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n default_val = -1\n keys = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n values = constant_op.constant([0, 1, 2], dtypes.int64)\n\n table1 = self.getHashTable()(\n lookup_ops.KeyValueTensorInitializer(keys, values),\n default_val,\n experimental_is_anonymous=is_anonymous)\n table2 = self.getHashTable()(\n lookup_ops.KeyValueTensorInitializer(keys, values),\n default_val,\n experimental_is_anonymous=is_anonymous)\n table3 = self.getHashTable()(\n lookup_ops.KeyValueTensorInitializer(keys, values),\n default_val,\n experimental_is_anonymous=is_anonymous)\n\n self.initialize_table(table1)\n self.initialize_table(table2)\n self.initialize_table(table3)\n self.assertAllEqual(3, self.evaluate(table1.size()))\n self.assertAllEqual(3, self.evaluate(table2.size()))\n self.assertAllEqual(3, self.evaluate(table3.size()))\n\n input_string = constant_op.constant([\"brain\", \"salad\", \"tank\"])\n output1 = table1.lookup(input_string)\n output2 = table2.lookup(input_string)\n output3 = table3.lookup(input_string)\n\n out1, out2, out3 = self.evaluate([output1, output2, output3])\n self.assertAllEqual([0, 1, -1], out1)\n self.assertAllEqual([0, 1, -1], out2)\n self.assertAllEqual([0, 1, -1], out3)\n\n def testStaticHashTableWithTensorDefault(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n default_val = constant_op.constant(-1, dtypes.int64)\n keys = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n values = constant_op.constant([0, 1, 2], dtypes.int64)\n table = self.getHashTable()(\n lookup_ops.KeyValueTensorInitializer(keys, values),\n default_val,\n experimental_is_anonymous=is_anonymous)\n self.initialize_table(table)\n\n input_string = constant_op.constant([\"brain\", \"salad\", \"tank\"])\n output = table.lookup(input_string)\n\n result = self.evaluate(output)\n self.assertAllEqual([0, 1, -1], result)\n\n def testStaticHashTableGetItem(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n default_val = constant_op.constant(-1, dtypes.int64)\n keys = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n values = constant_op.constant([0, 1, 2], dtypes.int64)\n table = self.getHashTable()(\n lookup_ops.KeyValueTensorInitializer(keys, values),\n default_val,\n experimental_is_anonymous=is_anonymous)\n self.initialize_table(table)\n\n input_string = constant_op.constant([\"brain\", \"salad\", \"tank\"])\n output = table[input_string]\n\n result = self.evaluate(output)\n self.assertAllEqual([0, 1, -1], result)\n\n def testStaticHashTableWithSparseTensorInput(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n default_val = constant_op.constant(-1, dtypes.int64)\n keys = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n values = constant_op.constant([0, 1, 2], dtypes.int64)\n table = self.getHashTable()(\n lookup_ops.KeyValueTensorInitializer(keys, values),\n default_val,\n experimental_is_anonymous=is_anonymous)\n self.initialize_table(table)\n\n sp_indices = [[0, 0], [0, 1], [1, 0]]\n sp_shape = [2, 2]\n input_tensor = sparse_tensor.SparseTensor(\n constant_op.constant(sp_indices, dtypes.int64),\n constant_op.constant([\"brain\", \"salad\", \"tank\"]),\n constant_op.constant(sp_shape, dtypes.int64))\n output = table.lookup(input_tensor)\n\n out_indices, out_values, out_shape = self.evaluate(output)\n\n self.assertAllEqual([0, 1, -1], out_values)\n self.assertAllEqual(sp_indices, out_indices)\n self.assertAllEqual(sp_shape, out_shape)\n\n def testStaticHashTableWithRaggedTensorInput(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n default_val = constant_op.constant(-1, dtypes.int64)\n keys = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n values = constant_op.constant([0, 1, 2], dtypes.int64)\n table = self.getHashTable()(\n lookup_ops.KeyValueTensorInitializer(keys, values),\n default_val,\n experimental_is_anonymous=is_anonymous)\n self.initialize_table(table)\n\n row_splits = [0, 2, 3]\n input_tensor = ragged_tensor.RaggedTensor.from_row_splits(\n constant_op.constant([\"brain\", \"salad\", \"tank\"]),\n constant_op.constant(row_splits, dtypes.int64))\n output = table.lookup(input_tensor)\n\n out = self.evaluate(output)\n\n self.assertAllEqual([0, 1, -1], out.values)\n self.assertAllEqual(row_splits, out.row_splits)\n\n def testSignatureMismatch(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n default_val = -1\n keys = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n values = constant_op.constant([0, 1, 2], dtypes.int64)\n table = self.getHashTable()(\n lookup_ops.KeyValueTensorInitializer(keys, values),\n default_val,\n experimental_is_anonymous=is_anonymous)\n self.initialize_table(table)\n\n # Ref types do not produce a lookup signature mismatch.\n input_string_ref = variables.Variable(\"brain\")\n self.evaluate(input_string_ref.initializer)\n self.assertEqual(0, self.evaluate(table.lookup(input_string_ref)))\n\n input_string = constant_op.constant([1, 2, 3], dtypes.int64)\n with self.assertRaises(TypeError):\n table.lookup(input_string)\n\n with self.assertRaises(TypeError):\n self.getHashTable()(\n lookup_ops.KeyValueTensorInitializer(keys, values),\n \"UNK\",\n experimental_is_anonymous=is_anonymous)\n\n def testDTypes(self, is_anonymous):\n default_val = -1\n with self.assertRaises(TypeError):\n self.getHashTable()(\n lookup_ops.KeyValueTensorInitializer([\"a\"], [1], [dtypes.string],\n dtypes.int64),\n default_val,\n experimental_is_anonymous=is_anonymous)\n\n @test_util.run_v1_only(\"(Cached) Sessions not available in TF2.0\")\n def testNotInitialized(self, is_anonymous):\n with self.cached_session():\n default_val = -1\n table = self.getHashTable()(\n lookup_ops.KeyValueTensorInitializer([\"a\"], [1],\n value_dtype=dtypes.int64),\n default_val,\n experimental_is_anonymous=is_anonymous)\n\n input_string = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n output = table.lookup(input_string)\n\n with self.assertRaisesOpError(\"Table not initialized\"):\n self.evaluate(output)\n\n @test_util.run_v1_only(\"(Cached) Sessions not available in TF2.0\")\n def testInitializeTwice(self, is_anonymous):\n with self.cached_session():\n default_val = -1\n keys = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n values = constant_op.constant([0, 1, 2], dtypes.int64)\n table = self.getHashTable()(\n lookup_ops.KeyValueTensorInitializer(keys, values),\n default_val,\n experimental_is_anonymous=is_anonymous)\n self.initialize_table(table)\n # Make sure that initializing twice doesn't throw any errors.\n self.initialize_table(table)\n\n def testInitializationWithInvalidDimensions(self, is_anonymous):\n default_val = -1\n keys = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n values = constant_op.constant([0, 1, 2, 3, 4], dtypes.int64)\n\n raised_error = ValueError\n if context.executing_eagerly():\n raised_error = errors_impl.InvalidArgumentError\n with self.assertRaises(raised_error):\n self.getHashTable()(\n lookup_ops.KeyValueTensorInitializer(keys, values),\n default_val,\n experimental_is_anonymous=is_anonymous)\n\n @test_util.run_v1_only(\"Sessions not available in TF2.0\")\n def testMultipleSessions(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n # Start a server\n server = server_lib.Server({\"local0\": [\"localhost:0\"]},\n protocol=\"grpc\",\n start=True)\n # Create two sessions sharing the same state\n session1 = session.Session(server.target)\n session2 = session.Session(server.target)\n\n default_val = -1\n keys = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n values = constant_op.constant([0, 1, 2], dtypes.int64)\n table = self.getHashTable()(\n lookup_ops.KeyValueTensorInitializer(keys, values),\n default_val,\n name=\"t1\",\n experimental_is_anonymous=is_anonymous)\n\n # Init the table in the first session.\n with session1:\n self.initialize_table(table)\n self.assertAllEqual(3, self.evaluate(table.size()))\n\n # Init the table in the second session and verify that we do not get a\n # \"Table already initialized\" error.\n with session2:\n self.evaluate(table.initializer)\n self.assertAllEqual(3, self.evaluate(table.size()))\n\n @test_util.run_v2_only\n def testImportedHashTable(self, is_anonymous):\n g = ops.Graph()\n with g.as_default():\n t = lookup_ops.StaticHashTable(\n lookup_ops.KeyValueTensorInitializer([\"a\"], [1]),\n 2)\n init_op = t._init_op\n op = t.lookup(ops.convert_to_tensor([\"a\"]))\n meta_graph = saver.export_meta_graph()\n\n def f():\n saver.import_meta_graph(meta_graph)\n return ops.get_default_graph().get_tensor_by_name(op.name)\n\n wrapped = wrap_function.wrap_function(f, [])\n pruned_init_fn = wrapped.prune(\n (), [wrapped.graph.get_operation_by_name(init_op.name)])\n self.evaluate(pruned_init_fn())\n self.assertAllEqual([1], wrapped())\n\n def testStaticHashTableInt32String(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n default_val = \"n/a\"\n keys = constant_op.constant([0, 1, 2], dtypes.int32)\n values = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n table = self.getHashTable()(\n lookup_ops.KeyValueTensorInitializer(keys, values),\n default_val,\n experimental_is_anonymous=is_anonymous)\n self.initialize_table(table)\n\n input_tensor = constant_op.constant([0, 1, -1])\n output = table.lookup(input_tensor)\n\n result = self.evaluate(output)\n self.assertAllEqual([b\"brain\", b\"salad\", b\"n/a\"], result)\n\n def testTableUseInFunction(self, is_anonymous):\n if not context.executing_eagerly():\n self.skipTest(\"Only Eager mode test.\")\n keys = constant_op.constant([0, 1, 2], dtypes.int32)\n values = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n table = self.getHashTable()(\n lookup_ops.KeyValueTensorInitializer(keys, values),\n \"n/a\",\n experimental_is_anonymous=is_anonymous)\n\n @function.defun()\n def lookup_table_func(k):\n return table.lookup(k)\n\n result = lookup_table_func(constant_op.constant([0, 1, -1]))\n self.assertAllEqual([b\"brain\", b\"salad\", b\"n/a\"], result)\n result = lookup_table_func(constant_op.constant([2, -1, 1]))\n self.assertAllEqual([b\"surgery\", b\"n/a\", b\"salad\"], result)\n\n def testTableCreatedInFunction(self, is_anonymous):\n if not context.executing_eagerly():\n self.skipTest(\"Only Eager mode test.\")\n keys = constant_op.constant([0, 1, 2], dtypes.int32)\n values = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n\n @function.defun()\n def lookup_table_func(k):\n table = self.getHashTable()(\n lookup_ops.KeyValueTensorInitializer(keys, values),\n \"n/a\",\n experimental_is_anonymous=is_anonymous)\n return table.lookup(k)\n\n result = lookup_table_func(constant_op.constant([0, 1, -1]))\n self.assertAllEqual([b\"brain\", b\"salad\", b\"n/a\"], result)\n result = lookup_table_func(constant_op.constant([2, -1, 1]))\n self.assertAllEqual([b\"surgery\", b\"n/a\", b\"salad\"], result)\n\n def testTwoTablesInControlFlow(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n keys = constant_op.constant([1, 2, 3], dtypes.int32)\n values = constant_op.constant([5, 10, 15], dtypes.int32)\n\n def table_func1(x):\n table = self.getHashTable()(\n lookup_ops.KeyValueTensorInitializer(keys, values),\n -1,\n experimental_is_anonymous=is_anonymous)\n return table.lookup(x)\n\n elems = np.array([2, 4, 1], dtype=np.int32)\n result1 = map_fn.map_fn(table_func1, elems, dtype=dtypes.int32)\n\n def table_func2(x):\n table = self.getHashTable()(\n lookup_ops.KeyValueTensorInitializer(keys, values),\n -1,\n experimental_is_anonymous=is_anonymous)\n return table.lookup(x)\n\n elems = np.array([2, 4, 1], dtype=np.int32)\n result2 = map_fn.map_fn(table_func2, elems, dtype=dtypes.int32)\n\n self.evaluate(lookup_ops.tables_initializer())\n\n self.assertAllEqual([10, -1, 5], self.evaluate(result1))\n self.assertAllEqual([10, -1, 5], self.evaluate(result2))\n\n @test_util.enable_control_flow_v2\n def testLookupTableInWhileV2(self, is_anonymous):\n lookup = self.getHashTable()(\n lookup_ops.KeyValueTensorInitializer(\n constant_op.constant([2, 5], dtype=dtypes.int64),\n constant_op.constant([-10.0, 1], dtype=dtypes.float32)),\n -1,\n experimental_is_anonymous=is_anonymous)\n\n beta = variables.Variable(1.0, trainable=True)\n\n @def_function.function\n def get_loss(unused_beta):\n return map_fn.map_fn(\n lookup.lookup,\n constant_op.constant([2, 3], dtype=dtypes.int64),\n dtype=dtypes.float32)\n\n with backprop.GradientTape() as tape:\n loss = get_loss(beta)\n\n self.assertIsNone(tape.gradient(loss, beta))\n\n @test_util.enable_control_flow_v2\n def testLookupTableInCondV2(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n lookup = self.getHashTable()(\n lookup_ops.KeyValueTensorInitializer(\n constant_op.constant([2, 5], dtype=dtypes.int64),\n constant_op.constant([-10.0, 1], dtype=dtypes.float32)),\n -1,\n experimental_is_anonymous=is_anonymous)\n\n beta = variables.Variable(1.0, trainable=True)\n\n @def_function.function\n def get_loss(beta):\n\n def true_fn():\n return lookup.lookup(constant_op.constant(2, dtype=dtypes.int64))\n\n def false_fn():\n return constant_op.constant(0, dtype=dtypes.float32)\n\n return beta * control_flow_ops.cond(\n constant_op.constant(True), true_fn=true_fn, false_fn=false_fn)\n\n with backprop.GradientTape() as tape:\n loss = get_loss(beta)\n grad = tape.gradient(loss, beta)\n self.evaluate(variables.global_variables_initializer())\n self.evaluate(lookup_ops.tables_initializer())\n self.assertAllEqual(grad, -10.)\n\n def testExportShapeInference(self, is_anonymous):\n table = self.getHashTable()(\n lookup_ops.KeyValueTensorInitializer(\n constant_op.constant([2, 5], dtype=dtypes.int64),\n constant_op.constant([-10.0, 1], dtype=dtypes.float32)),\n -1,\n experimental_is_anonymous=is_anonymous)\n actual_shapes = [t.shape for t in table.export()]\n inferred_shapes = []\n\n @def_function.function\n def f():\n for t in table.export():\n inferred_shapes.append(t.shape)\n\n f()\n self.assertLen(actual_shapes, 2)\n self.assertLen(inferred_shapes, 2)\n self.assertTrue(inferred_shapes[0].is_compatible_with(actual_shapes[0]))\n self.assertTrue(inferred_shapes[1].is_compatible_with(actual_shapes[1]))\n\n @test_util.run_v2_only\n def testSavedModelSaveRestore(self, is_anonymous):\n save_dir = os.path.join(self.get_temp_dir(), \"save_restore\")\n save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), \"hash\")\n\n root = tracking.AutoTrackable()\n\n default_value = -1\n keys = constant_op.constant([11, 12, 13], dtypes.int64)\n values = constant_op.constant([0, 1, 2], dtypes.int64)\n root.table = self.getHashTable()(\n lookup_ops.KeyValueTensorInitializer(keys, values),\n default_value,\n experimental_is_anonymous=is_anonymous)\n\n @def_function.function(\n input_signature=[tensor_spec.TensorSpec((), dtypes.int64)])\n def lookup(key):\n return root.table.lookup(key)\n\n @def_function.function(input_signature=[])\n def size():\n return root.table.size()\n\n @def_function.function(input_signature=[])\n def is_ref_counting():\n return test_ops.is_resource_handle_ref_counting(\n root.table.resource_handle)\n\n root.lookup = lookup\n root.size = size\n root.is_ref_counting = is_ref_counting\n\n self.assertEqual(root.table.size(), 3)\n self.assertEqual(root.lookup(12), 1)\n self.assertEqual(root.lookup(10), -1)\n self.assertLen(root.table.export()[0], 3)\n self.assertEqual(root.is_ref_counting(), is_anonymous)\n\n saved_model_save.save(root, save_path)\n\n del root\n loaded = saved_model_load.load(save_path)\n self.assertEqual(loaded.size(), 3)\n self.assertEqual(loaded.lookup(12), 1)\n self.assertEqual(loaded.lookup(10), -1)\n self.assertEqual(loaded.is_ref_counting(), is_anonymous)\n\n\[email protected]_parameters(\n (f\"_{is_anonymous}\", is_anonymous) for is_anonymous in [False, True])\nclass KeyValueTensorInitializerTest(BaseLookupTableTest):\n\n def test_string(self, is_anonymous):\n init = lookup_ops.KeyValueTensorInitializer(\n (\"brain\", \"salad\", \"surgery\"), (0, 1, 2), dtypes.string, dtypes.int64)\n table = self.getHashTable()(\n init, default_value=-1, experimental_is_anonymous=is_anonymous)\n self.initialize_table(table)\n\n def test_multiple_tables(self, is_anonymous):\n with ops.name_scope(\"table_scope\"):\n init1 = lookup_ops.KeyValueTensorInitializer(\n (\"brain\", \"salad\", \"surgery\"), (0, 1, 2), dtypes.string, dtypes.int64)\n table1 = self.getHashTable()(\n init1, default_value=-1, experimental_is_anonymous=is_anonymous)\n if not context.executing_eagerly():\n self.assertEqual(\"hash_table\", table1.name)\n self.assertEqual(\"table_scope/hash_table\",\n table1.resource_handle.op.name)\n init2 = lookup_ops.KeyValueTensorInitializer(\n (\"brain\", \"salad\", \"surgery\"), (0, 1, 2), dtypes.string, dtypes.int64)\n table2 = self.getHashTable()(\n init2, default_value=-1, experimental_is_anonymous=is_anonymous)\n if not context.executing_eagerly():\n self.assertEqual(\"hash_table_1\", table2.name)\n self.assertEqual(\"table_scope/hash_table_1\",\n table2.resource_handle.op.name)\n\n def test_int64(self, is_anonymous):\n init = lookup_ops.KeyValueTensorInitializer((42, 1, -1000), (0, 1, 2),\n dtypes.int64, dtypes.int64)\n table = self.getHashTable()(\n init, default_value=-1, experimental_is_anonymous=is_anonymous)\n self.initialize_table(table)\n\n def test_int32(self, is_anonymous):\n init = lookup_ops.KeyValueTensorInitializer((42, 1, -1000), (0, 1, 2),\n dtypes.int32, dtypes.int64)\n with self.assertRaises(errors_impl.OpError):\n table = self.getHashTable()(\n init, default_value=-1, experimental_is_anonymous=is_anonymous)\n self.initialize_table(table)\n\n\[email protected]_parameters(\n (f\"_{is_anonymous}\", is_anonymous) for is_anonymous in [False, True])\nclass InitializeTableFromFileOpTest(BaseLookupTableTest):\n\n def _createVocabFile(self, basename, values=(\"brain\", \"salad\", \"surgery\")):\n vocabulary_file = os.path.join(self.get_temp_dir(), basename)\n with open(vocabulary_file, \"w\") as f:\n f.write(\"\\n\".join(values) + \"\\n\")\n return vocabulary_file\n\n def testInitializeStringTable(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n vocabulary_file = self._createVocabFile(\"one_column_1.txt\")\n default_value = -1\n init = lookup_ops.TextFileInitializer(\n vocabulary_file, dtypes.string, lookup_ops.TextFileIndex.WHOLE_LINE,\n dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER)\n self.assertIn(\"one_column_1.txt_-2_-1\", init._shared_name)\n table = self.getHashTable()(\n init, default_value, experimental_is_anonymous=is_anonymous)\n self.initialize_table(table)\n\n output = table.lookup(constant_op.constant([\"brain\", \"salad\", \"tank\"]))\n\n result = self.evaluate(output)\n self.assertAllEqual([0, 1, -1], result)\n\n def testInitializeInt64Table(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n vocabulary_file = self._createVocabFile(\n \"one_column_int64.txt\", values=(\"42\", \"1\", \"-1000\"))\n\n with self.cached_session():\n default_value = -1\n init = lookup_ops.TextFileInitializer(\n vocabulary_file, dtypes.int64, lookup_ops.TextFileIndex.WHOLE_LINE,\n dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER)\n self.assertIn(\"one_column_int64.txt_-2_-1\", init._shared_name)\n table = self.getHashTable()(\n init, default_value, experimental_is_anonymous=is_anonymous)\n self.initialize_table(table)\n\n output = table.lookup(\n constant_op.constant((42, 1, 11), dtype=dtypes.int64))\n\n result = self.evaluate(output)\n self.assertAllEqual([0, 1, -1], result)\n\n def testInitializeIndexTable(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n vocabulary_file = self._createVocabFile(\"one_column_2.txt\")\n\n with self.cached_session():\n default_value = \"UNK\"\n key_index = lookup_ops.TextFileIndex.LINE_NUMBER\n value_index = lookup_ops.TextFileIndex.WHOLE_LINE\n init = lookup_ops.TextFileInitializer(\n vocabulary_file, dtypes.int64, key_index, dtypes.string, value_index)\n self.assertIn(\"one_column_2.txt_-1_-2\", init._shared_name)\n table = self.getHashTable()(\n init, default_value, experimental_is_anonymous=is_anonymous)\n self.initialize_table(table)\n\n input_values = constant_op.constant([0, 1, 2, 3], dtypes.int64)\n output = table.lookup(input_values)\n\n result = self.evaluate(output)\n self.assertAllEqual([b\"brain\", b\"salad\", b\"surgery\", b\"UNK\"], result)\n\n def testMultiColumn(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n vocabulary_file = os.path.join(self.get_temp_dir(), \"three_columns.txt\")\n with open(vocabulary_file, \"w\") as f:\n f.write(\"\\n\".join([\"0\\tbrain\\t1\", \"1\\tsalad\\t5\", \"2\\tsurgery\\t6\"]) + \"\\n\")\n\n with self.cached_session():\n default_value = -1\n key_index = 1\n value_index = 2\n\n init = lookup_ops.TextFileInitializer(\n vocabulary_file, dtypes.string, key_index, dtypes.int64, value_index)\n self.assertIn(\"three_columns.txt_1_2\", init._shared_name)\n table = self.getHashTable()(\n init, default_value, experimental_is_anonymous=is_anonymous)\n self.initialize_table(table)\n\n input_string = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n output = table.lookup(input_string)\n\n result = self.evaluate(output)\n self.assertAllEqual([1, 5, 6], result)\n\n def testInvalidDataTypeInMultiColumn(self, is_anonymous):\n vocabulary_file = os.path.join(self.get_temp_dir(), \"three_columns.txt\")\n with open(vocabulary_file, \"w\") as f:\n f.write(\"\\n\".join([\"0\\tbrain\\t1\", \"1\\tsalad\\t5\", \"2\\tsurgery\\t6\"]) + \"\\n\")\n\n with self.cached_session():\n default_value = -1\n key_index = 2\n value_index = 1\n init = lookup_ops.TextFileInitializer(\n vocabulary_file, dtypes.string, key_index, dtypes.int64, value_index)\n self.assertIn(\"three_columns.txt_2_1\", init._shared_name)\n with self.assertRaisesOpError(\"is not a valid\"):\n table = self.getHashTable()(\n init, default_value, experimental_is_anonymous=is_anonymous)\n self.initialize_table(table)\n\n def testInvalidDataType(self, is_anonymous):\n vocabulary_file = self._createVocabFile(\"one_column_3.txt\")\n\n with self.cached_session():\n default_value = \"UNK\"\n key_index = lookup_ops.TextFileIndex.WHOLE_LINE\n value_index = lookup_ops.TextFileIndex.LINE_NUMBER\n\n with self.assertRaises(ValueError):\n init = lookup_ops.TextFileInitializer(vocabulary_file, dtypes.int64,\n key_index, dtypes.string,\n value_index)\n self.assertIn(\"one_column_3.txt_-2_-1\", init._shared_name)\n self.getHashTable()(\n init, default_value, experimental_is_anonymous=is_anonymous)\n\n def testInvalidIndex(self, is_anonymous):\n vocabulary_file = self._createVocabFile(\"one_column_4.txt\")\n with self.cached_session():\n default_value = -1\n key_index = 1 # second column of the line\n value_index = lookup_ops.TextFileIndex.LINE_NUMBER\n init = lookup_ops.TextFileInitializer(\n vocabulary_file, dtypes.string, key_index, dtypes.int64, value_index)\n self.assertIn(\"one_column_4.txt_1_-1\", init._shared_name)\n\n with self.assertRaisesOpError(\"Invalid number of columns\"):\n table = self.getHashTable()(\n init, default_value, experimental_is_anonymous=is_anonymous)\n self.initialize_table(table)\n\n def testInitializeSameTableWithMultipleNodes(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n vocabulary_file = self._createVocabFile(\"one_column_5.txt\")\n\n with self.cached_session():\n default_value = -1\n init1 = lookup_ops.TextFileInitializer(\n vocabulary_file, dtypes.string, lookup_ops.TextFileIndex.WHOLE_LINE,\n dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER)\n self.assertIn(\"one_column_5.txt_-2_-1\", init1._shared_name)\n table1 = self.getHashTable()(\n init1, default_value, experimental_is_anonymous=is_anonymous)\n init2 = lookup_ops.TextFileInitializer(\n vocabulary_file, dtypes.string, lookup_ops.TextFileIndex.WHOLE_LINE,\n dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER)\n self.assertIn(\"one_column_5.txt_-2_-1\", init2._shared_name)\n table2 = self.getHashTable()(\n init2, default_value, experimental_is_anonymous=is_anonymous)\n init3 = lookup_ops.TextFileInitializer(\n vocabulary_file, dtypes.string, lookup_ops.TextFileIndex.WHOLE_LINE,\n dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER)\n self.assertIn(\"one_column_5.txt_-2_-1\", init3._shared_name)\n table3 = self.getHashTable()(\n init3, default_value, experimental_is_anonymous=is_anonymous)\n\n self.evaluate(lookup_ops.tables_initializer())\n\n input_string = constant_op.constant([\"brain\", \"salad\", \"tank\"])\n\n output1 = table1.lookup(input_string)\n output2 = table2.lookup(input_string)\n output3 = table3.lookup(input_string)\n\n out1, out2, out3 = self.evaluate([output1, output2, output3])\n self.assertAllEqual([0, 1, -1], out1)\n self.assertAllEqual([0, 1, -1], out2)\n self.assertAllEqual([0, 1, -1], out3)\n\n def testInitializeTableWithNoFilename(self, is_anonymous):\n with self.cached_session():\n default_value = -1\n with self.assertRaises(ValueError):\n self.getHashTable()(\n lookup_ops.TextFileInitializer(\n \"\", dtypes.string, lookup_ops.TextFileIndex.WHOLE_LINE,\n dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER),\n default_value,\n experimental_is_anonymous=is_anonymous)\n\n def testInitializeWithVocabSize(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n with self.cached_session():\n default_value = -1\n vocab_size = 3\n vocabulary_file1 = self._createVocabFile(\"one_column6.txt\")\n init1 = lookup_ops.TextFileInitializer(\n vocabulary_file1,\n dtypes.string,\n lookup_ops.TextFileIndex.WHOLE_LINE,\n dtypes.int64,\n lookup_ops.TextFileIndex.LINE_NUMBER,\n vocab_size=vocab_size)\n self.assertIn(\"one_column6.txt_3_-2_-1\", init1._shared_name)\n table1 = self.getHashTable()(\n init1, default_value, experimental_is_anonymous=is_anonymous)\n\n # Initialize from file.\n self.initialize_table(table1)\n self.assertEqual(vocab_size, self.evaluate(table1.size()))\n\n vocabulary_file2 = self._createVocabFile(\"one_column7.txt\")\n vocab_size = 5\n init2 = lookup_ops.TextFileInitializer(\n vocabulary_file2,\n dtypes.string,\n lookup_ops.TextFileIndex.WHOLE_LINE,\n dtypes.int64,\n lookup_ops.TextFileIndex.LINE_NUMBER,\n vocab_size=vocab_size)\n self.assertIn(\"one_column7.txt_5_-2_-1\", init2._shared_name)\n with self.assertRaisesOpError(\"Invalid vocab_size\"):\n table2 = self.getHashTable()(\n init2, default_value, experimental_is_anonymous=is_anonymous)\n self.initialize_table(table2)\n\n vocab_size = 1\n vocabulary_file3 = self._createVocabFile(\"one_column3.txt\")\n init3 = lookup_ops.TextFileInitializer(\n vocabulary_file3,\n dtypes.string,\n lookup_ops.TextFileIndex.WHOLE_LINE,\n dtypes.int64,\n lookup_ops.TextFileIndex.LINE_NUMBER,\n vocab_size=vocab_size)\n self.assertIn(\"one_column3.txt_1_-2_-1\", init3._shared_name)\n table3 = self.getHashTable()(\n init3, default_value, experimental_is_anonymous=is_anonymous)\n\n # Smaller vocab size reads only vocab_size records.\n self.initialize_table(table3)\n self.assertEqual(vocab_size, self.evaluate(table3.size()))\n\n @test_util.run_v1_only(\"placeholder usage\")\n def testFeedVocabularyName(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n vocabulary_file = self._createVocabFile(\"feed_vocabulary.txt\")\n\n with self.cached_session():\n default_value = -1\n init = lookup_ops.TextFileInitializer(\n \"old_file.txt\", dtypes.string, lookup_ops.TextFileIndex.WHOLE_LINE,\n dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER)\n self.assertIn(\"old_file.txt_-2_-1\", init._shared_name)\n table = self.getHashTable()(\n init, default_value, experimental_is_anonymous=is_anonymous)\n\n # Initialize with non existing file (old_file.txt) should fail.\n # TODO(yleon): Update message, which might change per FileSystem.\n with self.assertRaisesOpError(\"old_file.txt\"):\n self.evaluate(table.initializer)\n\n # Initialize the model feeding the vocabulary file.\n filenames = ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)\n table.initializer.run(feed_dict={filenames[0]: vocabulary_file})\n\n input_string = constant_op.constant([\"brain\", \"salad\", \"tank\"])\n output = table.lookup(input_string)\n\n result = self.evaluate(output)\n self.assertAllEqual([0, 1, -1], result)\n\n def testInvalidFilenames(self, is_anonymous):\n vocabulary_file = self._createVocabFile(\"filename_shape.txt\")\n\n with self.cached_session():\n default_value = -1\n\n # Invalid data type\n other_type = constant_op.constant(1)\n with self.assertRaises(Exception) as cm:\n self.getHashTable()(\n lookup_ops.TextFileInitializer(\n other_type, dtypes.string, lookup_ops.TextFileIndex.WHOLE_LINE,\n dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER),\n default_value,\n experimental_is_anonymous=is_anonymous)\n self.assertIsInstance(cm.exception, (ValueError, TypeError))\n\n # Non-scalar filename\n filenames = constant_op.constant([vocabulary_file, vocabulary_file])\n if not context.executing_eagerly():\n with self.assertRaises(Exception) as cm:\n self.getHashTable()(\n lookup_ops.TextFileInitializer(\n filenames, dtypes.string, lookup_ops.TextFileIndex.WHOLE_LINE,\n dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER),\n default_value,\n experimental_is_anonymous=is_anonymous)\n self.assertIsInstance(cm.exception, (ValueError, TypeError))\n else:\n with self.assertRaises(errors_impl.InvalidArgumentError):\n self.getHashTable()(\n lookup_ops.TextFileInitializer(\n filenames, dtypes.string, lookup_ops.TextFileIndex.WHOLE_LINE,\n dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER),\n default_value,\n experimental_is_anonymous=is_anonymous)\n\n def testIdToStringTable(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n vocab_file = self._createVocabFile(\"feat_to_id_1.txt\")\n with self.cached_session():\n default_value = \"UNK\"\n vocab_size = 3\n init = lookup_ops.TextFileStringTableInitializer(\n vocab_file, vocab_size=vocab_size)\n self.assertTrue(\"feat_to_id_1.txt_3_-1_-2\", init._shared_name)\n table = self.getHashTable()(\n init, default_value, experimental_is_anonymous=is_anonymous)\n\n self.initialize_table(table)\n\n input_values = constant_op.constant([0, 1, 2, 3], dtypes.int64)\n\n out = table.lookup(input_values)\n self.assertAllEqual([b\"brain\", b\"salad\", b\"surgery\", b\"UNK\"],\n self.evaluate(out))\n self.assertEqual(vocab_size, self.evaluate(table.size()))\n\n def testStringToIdTable(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n vocab_file = self._createVocabFile(\"feat_to_id_2.txt\")\n with self.cached_session():\n default_value = -1\n vocab_size = 3\n init = lookup_ops.TextFileIdTableInitializer(\n vocab_file, vocab_size=vocab_size)\n self.assertTrue(\"feat_to_id_2.txt_3_-1_-2\", init._shared_name)\n table = self.getHashTable()(\n init, default_value, experimental_is_anonymous=is_anonymous)\n self.initialize_table(table)\n\n input_string = constant_op.constant([\"brain\", \"salad\", \"surgery\", \"UNK\"])\n\n out = table.lookup(input_string)\n self.assertAllEqual([0, 1, 2, -1], self.evaluate(out))\n self.assertEqual(vocab_size, self.evaluate(table.size()))\n\n def testInt64ToIdTable(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n vocab_file = self._createVocabFile(\n \"feat_to_id_3.txt\", values=(\"42\", \"1\", \"-1000\"))\n with self.cached_session():\n default_value = -1\n vocab_size = 3\n init = lookup_ops.TextFileIdTableInitializer(\n vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64)\n self.assertTrue(\"feat_to_id_3.txt_3_-1_-2\", init._shared_name)\n table = self.getHashTable()(\n init, default_value, experimental_is_anonymous=is_anonymous)\n self.initialize_table(table)\n\n out = table.lookup(\n constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int64))\n self.assertAllEqual((0, 1, 2, -1), self.evaluate(out))\n self.assertEqual(vocab_size, self.evaluate(table.size()))\n\n\[email protected]_parameters(\n (f\"_{is_anonymous}\", is_anonymous) for is_anonymous in [False, True])\nclass StaticVocabularyTableTest(BaseLookupTableTest):\n\n def _createVocabFile(self, basename, values=(\"brain\", \"salad\", \"surgery\")):\n vocabulary_file = os.path.join(self.get_temp_dir(), basename)\n with open(vocabulary_file, \"w\") as f:\n f.write(\"\\n\".join(values) + \"\\n\")\n return vocabulary_file\n\n def testStringStaticVocabularyTable(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n vocab_file = self._createVocabFile(\"feat_to_id_1.txt\")\n vocab_size = 3\n oov_buckets = 1\n table = self.getVocabularyTable()(\n lookup_ops.TextFileIdTableInitializer(\n vocab_file, vocab_size=vocab_size),\n oov_buckets,\n experimental_is_anonymous=is_anonymous)\n\n self.initialize_table(table)\n\n input_string = constant_op.constant([\"brain\", \"salad\", \"surgery\", \"UNK\"])\n\n out = table.lookup(input_string)\n self.assertAllEqual([0, 1, 2, 3], self.evaluate(out))\n self.assertEqual(vocab_size + oov_buckets, self.evaluate(table.size()))\n\n def testStaticVocabularyTableGetItem(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n vocab_file = self._createVocabFile(\"feat_to_id_1.txt\")\n vocab_size = 3\n oov_buckets = 1\n table = self.getVocabularyTable()(\n lookup_ops.TextFileIdTableInitializer(\n vocab_file, vocab_size=vocab_size),\n oov_buckets,\n experimental_is_anonymous=is_anonymous)\n\n self.initialize_table(table)\n\n input_string = constant_op.constant([\"brain\", \"salad\", \"surgery\", \"UNK\"])\n\n out = table[input_string]\n self.assertAllEqual([0, 1, 2, 3], self.evaluate(out))\n self.assertEqual(vocab_size + oov_buckets, self.evaluate(table.size()))\n\n def testInt32StaticVocabularyTable(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n vocab_file = self._createVocabFile(\"feat_to_id_2.txt\", (\"42\", \"1\", \"-1000\"))\n vocab_size = 3\n oov_buckets = 1\n table = self.getVocabularyTable()(\n lookup_ops.TextFileIdTableInitializer(\n vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64),\n oov_buckets,\n lookup_key_dtype=dtypes.int32,\n experimental_is_anonymous=is_anonymous)\n\n self.initialize_table(table)\n\n values = constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int32)\n\n out = table.lookup(values)\n self.assertAllEqual([0, 1, 2, 3], self.evaluate(out))\n self.assertEqual(vocab_size + oov_buckets, self.evaluate(table.size()))\n\n def testInt64StaticVocabularyTable(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n vocab_file = self._createVocabFile(\"feat_to_id_3.txt\", (\"42\", \"1\", \"-1000\"))\n vocab_size = 3\n oov_buckets = 1\n table = self.getVocabularyTable()(\n lookup_ops.TextFileIdTableInitializer(\n vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64),\n oov_buckets,\n experimental_is_anonymous=is_anonymous)\n\n self.initialize_table(table)\n\n values = constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int64)\n\n out = table.lookup(values)\n self.assertAllEqual([0, 1, 2, 3], self.evaluate(out))\n self.assertEqual(vocab_size + oov_buckets, self.evaluate(table.size()))\n\n def testStringStaticVocabularyTableNoInitializer(self, is_anonymous):\n oov_buckets = 5\n\n # Set a table that only uses hash buckets, for each input value returns\n # an id calculated by fingerprint(\"input\") mod oov_buckets.\n table = self.getVocabularyTable()(\n None, oov_buckets, experimental_is_anonymous=is_anonymous)\n self.initialize_table(table)\n\n values = constant_op.constant((\"brain\", \"salad\", \"surgery\"))\n\n out = table.lookup(values)\n self.assertAllEqual(\n [\n 3, # fingerprint(\"brain\") mod 5.\n 1, # fingerprint(\"salad\") mod 5.\n 4 # fingerprint(\"surgery\") mod 5\n ],\n self.evaluate(out))\n self.assertEqual(oov_buckets, self.evaluate(table.size()))\n\n def testStaticVocabularyTableWithMultipleInitializers(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n vocab_file = self._createVocabFile(\"feat_to_id_4.txt\")\n vocab_size = 3\n oov_buckets = 3\n\n init = lookup_ops.TextFileIdTableInitializer(\n vocab_file, vocab_size=vocab_size)\n table1 = self.getVocabularyTable()(\n init,\n oov_buckets,\n name=\"table1\",\n experimental_is_anonymous=is_anonymous)\n\n table2 = self.getVocabularyTable()(\n init,\n oov_buckets,\n name=\"table2\",\n experimental_is_anonymous=is_anonymous)\n\n self.evaluate(lookup_ops.tables_initializer())\n\n input_string = constant_op.constant(\n [\"fruit\", \"brain\", \"salad\", \"surgery\", \"UNK\"])\n\n out1 = table1.lookup(input_string)\n out2 = table2.lookup(input_string)\n\n out1, out2 = self.evaluate([out1, out2])\n self.assertAllEqual([5, 0, 1, 2, 5], out1)\n self.assertAllEqual([5, 0, 1, 2, 5], out2)\n self.assertEqual(vocab_size + oov_buckets, self.evaluate(table1.size()))\n self.assertEqual(vocab_size + oov_buckets, self.evaluate(table2.size()))\n\n def testStaticVocabularyTableInitializationAcrossSessions(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n vocab_file = self._createVocabFile(\"feat_to_id_5.txt\")\n with self.cached_session():\n vocab_size = 3\n oov_buckets = 1\n table1 = self.getVocabularyTable()(\n lookup_ops.TextFileIdTableInitializer(\n vocab_file, vocab_size=vocab_size),\n oov_buckets,\n experimental_is_anonymous=is_anonymous)\n\n self.initialize_table(table1)\n\n input_string_1 = constant_op.constant(\n [\"brain\", \"salad\", \"surgery\", \"UNK\"])\n\n out1 = table1.lookup(input_string_1)\n\n self.assertAllEqual([0, 1, 2, 3], self.evaluate(out1))\n self.assertEqual(vocab_size + oov_buckets, self.evaluate(table1.size()))\n\n with self.cached_session():\n vocab_size = 3\n oov_buckets = 1\n\n # Underlying lookup table already initialized in previous session.\n # No need to initialize table2\n table2 = self.getVocabularyTable()(\n lookup_ops.TextFileIdTableInitializer(\n vocab_file, vocab_size=vocab_size),\n oov_buckets,\n experimental_is_anonymous=is_anonymous)\n\n input_string_2 = constant_op.constant([\"fruit\", \"salad\", \"UNK\"])\n\n out2 = table2.lookup(input_string_2)\n\n self.assertAllEqual([3, 1, 3], self.evaluate(out2))\n self.assertEqual(vocab_size + oov_buckets, self.evaluate(table2.size()))\n\n def testStaticVocabularyTableAssetTracking(self, is_anonymous):\n vocab_file = self._createVocabFile(\"vocab.txt\")\n vocab_size = 3\n oov_buckets = 1\n table = self.getVocabularyTable()(\n lookup_ops.TextFileIdTableInitializer(\n vocab_file, vocab_size=vocab_size),\n oov_buckets,\n experimental_is_anonymous=is_anonymous)\n objects = checkpoint_util.list_objects(graph_view.ObjectGraphView(table))\n assets = list(filter(lambda obj: isinstance(obj, tracking.Asset), objects))\n self.assertLen(assets, 1)\n self.assertEqual(\n self.evaluate(assets[0].asset_path), compat.as_bytes(vocab_file))\n\n def testSparseTensor(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n vocab_file = self._createVocabFile(\"feat_to_id_7.txt\")\n input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]\n input_shape = [4, 4]\n sp_features = sparse_tensor.SparseTensor(\n constant_op.constant(input_indices, dtypes.int64),\n constant_op.constant([\"brain\", \"salad\", \"brain\", \"surgery\", \"tarkus\"],\n dtypes.string),\n constant_op.constant(input_shape, dtypes.int64))\n\n table = self.getVocabularyTable()(\n lookup_ops.TextFileIdTableInitializer(vocab_file, vocab_size=3),\n 1,\n experimental_is_anonymous=is_anonymous)\n self.initialize_table(table)\n\n sp_ids = table.lookup(sp_features)\n\n self.assertAllEqual([5], sp_ids.values._shape_as_list())\n\n sp_ids_ind, sp_ids_val, sp_ids_shape = self.evaluate(\n [sp_ids.indices, sp_ids.values, sp_ids.dense_shape])\n\n self.assertAllEqual(input_indices, sp_ids_ind)\n self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)\n self.assertAllEqual(input_shape, sp_ids_shape)\n\n def testRaggedTensor(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n vocab_file = self._createVocabFile(\"feat_to_id_7.txt\")\n input_row_splits = [0, 2, 4, 5]\n ragged_features = ragged_tensor.RaggedTensor.from_row_splits(\n constant_op.constant([\"brain\", \"salad\", \"brain\", \"surgery\", \"tarkus\"],\n dtypes.string),\n constant_op.constant(input_row_splits, dtypes.int64))\n\n table = self.getVocabularyTable()(\n lookup_ops.TextFileIdTableInitializer(vocab_file, vocab_size=3),\n 1,\n experimental_is_anonymous=is_anonymous)\n self.initialize_table(table)\n\n ragged_ids = table.lookup(ragged_features)\n\n self.assertAllEqual([5], ragged_ids.values._shape_as_list())\n\n ragged_ids_val, ragged_ids_row_splits = self.evaluate(\n [ragged_ids.values, ragged_ids.row_splits])\n\n self.assertAllEqual([0, 1, 0, 2, 3], ragged_ids_val)\n self.assertAllEqual(input_row_splits, ragged_ids_row_splits)\n\n def testInt32SparseTensor(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]\n input_shape = [4, 4]\n sp_features = sparse_tensor.SparseTensor(\n constant_op.constant(input_indices, dtypes.int64),\n constant_op.constant([42, 1, 42, -1000, 11], dtypes.int32),\n constant_op.constant(input_shape, dtypes.int64))\n\n table = self.getVocabularyTable()(\n lookup_ops.KeyValueTensorInitializer((42, 1, -1000), (0, 1, 2),\n dtypes.int64, dtypes.int64),\n 1,\n lookup_key_dtype=dtypes.int32,\n experimental_is_anonymous=is_anonymous)\n self.initialize_table(table)\n\n sp_ids = table.lookup(sp_features)\n\n self.assertAllEqual([5], sp_ids.values._shape_as_list())\n\n sp_ids_ind, sp_ids_val, sp_ids_shape = self.evaluate(\n [sp_ids.indices, sp_ids.values, sp_ids.dense_shape])\n\n self.assertAllEqual(input_indices, sp_ids_ind)\n self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)\n self.assertAllEqual(input_shape, sp_ids_shape)\n\n def testInt32RaggedTensor(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n input_row_splits = [0, 2, 4, 5]\n ragged_features = ragged_tensor.RaggedTensor.from_row_splits(\n constant_op.constant([42, 1, 42, -1000, 11], dtypes.int32),\n constant_op.constant(input_row_splits, dtypes.int64))\n\n table = self.getVocabularyTable()(\n lookup_ops.KeyValueTensorInitializer((42, 1, -1000), (0, 1, 2),\n dtypes.int64, dtypes.int64),\n 1,\n lookup_key_dtype=dtypes.int32,\n experimental_is_anonymous=is_anonymous)\n self.initialize_table(table)\n\n ragged_ids = table.lookup(ragged_features)\n\n self.assertAllEqual([5], ragged_ids.values._shape_as_list())\n\n ragged_ids_val, ragged_ids_row_splits = self.evaluate(\n [ragged_ids.values, ragged_ids.row_splits])\n\n self.assertAllEqual([0, 1, 0, 2, 3], ragged_ids_val)\n self.assertAllEqual(input_row_splits, ragged_ids_row_splits)\n\n def testInt64SparseTensor(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]\n input_shape = [4, 4]\n sp_features = sparse_tensor.SparseTensor(\n constant_op.constant(input_indices, dtypes.int64),\n constant_op.constant([42, 1, 42, -1000, 11], dtypes.int64),\n constant_op.constant(input_shape, dtypes.int64))\n\n table = self.getVocabularyTable()(\n lookup_ops.KeyValueTensorInitializer((42, 1, -1000), (0, 1, 2),\n dtypes.int64, dtypes.int64),\n 1,\n experimental_is_anonymous=is_anonymous)\n self.initialize_table(table)\n\n sp_ids = table.lookup(sp_features)\n\n self.assertAllEqual([5], sp_ids.values._shape_as_list())\n\n sp_ids_ind, sp_ids_val, sp_ids_shape = self.evaluate(\n [sp_ids.indices, sp_ids.values, sp_ids.dense_shape])\n\n self.assertAllEqual(input_indices, sp_ids_ind)\n self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)\n self.assertAllEqual(input_shape, sp_ids_shape)\n\n def testInt64RaggedTensor(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n input_row_splits = [0, 2, 4, 5]\n ragged_features = ragged_tensor.RaggedTensor.from_row_splits(\n constant_op.constant([42, 1, 42, -1000, 11], dtypes.int64),\n constant_op.constant(input_row_splits, dtypes.int64))\n\n table = self.getVocabularyTable()(\n lookup_ops.KeyValueTensorInitializer((42, 1, -1000), (0, 1, 2),\n dtypes.int64, dtypes.int64),\n 1,\n experimental_is_anonymous=is_anonymous)\n self.initialize_table(table)\n\n ragged_ids = table.lookup(ragged_features)\n\n self.assertAllEqual([5], ragged_ids.values._shape_as_list())\n\n ragged_ids_val, ragged_ids_row_splits = self.evaluate(\n [ragged_ids.values, ragged_ids.row_splits])\n\n self.assertAllEqual([0, 1, 0, 2, 3], ragged_ids_val)\n self.assertAllEqual(input_row_splits, ragged_ids_row_splits)\n\n def testStaticVocabularyTableNoInnerTable(self, is_anonymous):\n table = self.getVocabularyTable()(\n None, num_oov_buckets=1, experimental_is_anonymous=is_anonymous)\n self.assertIsNone(table.resource_handle)\n\n @test_util.run_v2_only\n def testSavedModelSaveRestore(self, is_anonymous):\n save_dir = os.path.join(self.get_temp_dir(), \"save_restore\")\n save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), \"hash\")\n\n root = tracking.AutoTrackable()\n\n vocab_file = self._createVocabFile(\"feat_to_id_3.txt\", (\"11\", \"12\", \"13\"))\n vocab_size = 3\n oov_buckets = 1\n root.table = self.getVocabularyTable()(\n lookup_ops.TextFileIdTableInitializer(\n vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64),\n oov_buckets,\n experimental_is_anonymous=is_anonymous)\n\n @def_function.function(\n input_signature=[tensor_spec.TensorSpec((), dtypes.int64)])\n def lookup(key):\n return root.table.lookup(key)\n\n @def_function.function(input_signature=[])\n def size():\n return root.table.size()\n\n @def_function.function(input_signature=[])\n def is_ref_counting():\n return test_ops.is_resource_handle_ref_counting(\n root.table.resource_handle)\n\n root.lookup = lookup\n root.size = size\n root.is_ref_counting = is_ref_counting\n\n self.assertEqual(root.table.size(), 4)\n self.assertEqual(root.lookup(12), 1)\n self.assertEqual(root.lookup(10), 3)\n self.assertEqual(root.is_ref_counting(), is_anonymous)\n\n saved_model_save.save(root, save_path)\n\n del root\n loaded = saved_model_load.load(save_path)\n self.assertEqual(loaded.size(), 4)\n self.assertEqual(loaded.lookup(12), 1)\n self.assertEqual(loaded.lookup(10), 3)\n self.assertEqual(loaded.is_ref_counting(), is_anonymous)\n\n\[email protected]_parameters(\n (f\"_{is_anonymous}\", is_anonymous) for is_anonymous in [False, True])\nclass DenseHashTableOpTest(test.TestCase):\n\n def testBasic(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n keys = constant_op.constant([11, 12, 13, 14], dtypes.int64)\n values = constant_op.constant([0, 1, 2, 3], dtypes.int64)\n table = lookup_ops.DenseHashTable(\n dtypes.int64,\n dtypes.int64,\n default_value=-1,\n empty_key=0,\n deleted_key=-1,\n experimental_is_anonymous=is_anonymous)\n self.assertAllEqual(0, self.evaluate(table.size()))\n\n self.evaluate(table.insert(keys, values))\n self.assertAllEqual(4, self.evaluate(table.size()))\n\n remove_string = constant_op.constant([12, 15], dtypes.int64)\n self.evaluate(table.remove(remove_string))\n self.assertAllEqual(3, self.evaluate(table.size()))\n\n input_string = constant_op.constant([11, 12, 15], dtypes.int64)\n output = table.lookup(input_string)\n self.assertAllEqual([3], output.get_shape())\n\n result = self.evaluate(output)\n self.assertAllEqual([0, -1, -1], result)\n\n def testGetItem(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n keys = constant_op.constant([11, 12, 13, 14], dtypes.int64)\n values = constant_op.constant([0, 1, 2, 3], dtypes.int64)\n table = lookup_ops.DenseHashTable(\n dtypes.int64,\n dtypes.int64,\n default_value=-1,\n empty_key=0,\n deleted_key=-1,\n experimental_is_anonymous=is_anonymous)\n\n self.evaluate(table.insert(keys, values))\n\n input_string = constant_op.constant([11, 12, 15], dtypes.int64)\n output = table[input_string]\n self.assertAllEqual([3], output.get_shape())\n\n result = self.evaluate(output)\n self.assertAllEqual([0, 1, -1], result)\n\n def testBasicBool(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n keys = constant_op.constant([11, 12, 13, 14], dtypes.int64)\n values = constant_op.constant([True, True, True, True], dtypes.bool)\n table = lookup_ops.DenseHashTable(\n dtypes.int64,\n dtypes.bool,\n default_value=False,\n empty_key=0,\n deleted_key=-1,\n experimental_is_anonymous=is_anonymous)\n self.assertAllEqual(0, self.evaluate(table.size()))\n\n self.evaluate(table.insert(keys, values))\n self.assertAllEqual(4, self.evaluate(table.size()))\n\n remove_string = constant_op.constant([11, 15], dtypes.int64)\n self.evaluate(table.remove(remove_string))\n self.assertAllEqual(3, self.evaluate(table.size()))\n\n input_string = constant_op.constant([11, 12, 15], dtypes.int64)\n output = table.lookup(input_string)\n self.assertAllEqual([3], output.get_shape())\n\n result = self.evaluate(output)\n self.assertAllEqual([False, True, False], result)\n\n def testSameEmptyAndDeletedKey(self, is_anonymous):\n with self.assertRaisesRegex(errors_impl.InvalidArgumentError,\n \"Empty and deleted keys\"):\n table = lookup_ops.DenseHashTable(\n dtypes.int64,\n dtypes.int64,\n default_value=-1,\n empty_key=42,\n deleted_key=42,\n experimental_is_anonymous=is_anonymous)\n self.assertAllEqual(0, self.evaluate(table.size()))\n\n @test_util.run_v1_only(\"uses placeholders\")\n def testLookupUnknownShape(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n with self.cached_session():\n keys = constant_op.constant([11, 12, 13], dtypes.int64)\n values = constant_op.constant([0, 1, 2], dtypes.int64)\n table = lookup_ops.DenseHashTable(\n dtypes.int64,\n dtypes.int64,\n default_value=-1,\n empty_key=0,\n deleted_key=-1,\n experimental_is_anonymous=is_anonymous)\n\n self.evaluate(table.insert(keys, values))\n self.assertAllEqual(3, self.evaluate(table.size()))\n\n placeholder_keys = array_ops.placeholder(dtypes.int64)\n output = table.lookup(placeholder_keys)\n self.assertAllEqual(None, output.get_shape())\n result = output.eval({placeholder_keys: [11, 12, 15]})\n self.assertAllEqual([0, 1, -1], result)\n\n def testMapStringToFloat(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n keys = constant_op.constant([\"a\", \"b\", \"c\", \"d\"], dtypes.string)\n values = constant_op.constant([0.0, 1.1, 2.2, 3.3], dtypes.float32)\n default_value = constant_op.constant(-1.5, dtypes.float32)\n table = lookup_ops.DenseHashTable(\n dtypes.string,\n dtypes.float32,\n default_value=default_value,\n empty_key=\"\",\n deleted_key=\"$\",\n experimental_is_anonymous=is_anonymous)\n self.assertAllEqual(0, self.evaluate(table.size()))\n\n self.evaluate(table.insert(keys, values))\n self.assertAllEqual(4, self.evaluate(table.size()))\n\n remove_string = constant_op.constant([\"b\", \"e\"])\n self.evaluate(table.remove(remove_string))\n self.assertAllEqual(3, self.evaluate(table.size()))\n\n input_string = constant_op.constant([\"a\", \"b\", \"d\", \"e\"], dtypes.string)\n output = table.lookup(input_string)\n self.assertAllEqual([4], output.get_shape())\n\n result = self.evaluate(output)\n self.assertAllClose([0, -1.5, 3.3, -1.5], result)\n\n def testMapInt64ToFloat(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n for float_dtype in [dtypes.float32, dtypes.float64]:\n keys = constant_op.constant([11, 12, 13, 14], dtypes.int64)\n values = constant_op.constant([0.0, 1.1, 2.2, 3.3], float_dtype)\n default_value = constant_op.constant(-1.5, float_dtype)\n table = lookup_ops.DenseHashTable(\n dtypes.int64,\n float_dtype,\n default_value=default_value,\n empty_key=0,\n deleted_key=-1,\n experimental_is_anonymous=is_anonymous)\n self.assertAllEqual(0, self.evaluate(table.size()))\n\n self.evaluate(table.insert(keys, values))\n self.assertAllEqual(4, self.evaluate(table.size()))\n\n remove_string = constant_op.constant([12, 15], dtypes.int64)\n self.evaluate(table.remove(remove_string))\n self.assertAllEqual(3, self.evaluate(table.size()))\n\n input_string = constant_op.constant([11, 12, 14, 15], dtypes.int64)\n output = table.lookup(input_string)\n self.assertAllEqual([4], output.get_shape())\n\n result = self.evaluate(output)\n self.assertAllClose([0, -1.5, 3.3, -1.5], result)\n\n def testVectorValues(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n keys = constant_op.constant([11, 12, 13], dtypes.int64)\n values = constant_op.constant([[0, 1, 2, 3], [3, 4, 5, 6], [6, 7, 8, 9]],\n dtypes.int64)\n default_value = constant_op.constant([-1, -2, -3, -4], dtypes.int64)\n table = lookup_ops.DenseHashTable(\n dtypes.int64,\n dtypes.int64,\n default_value=default_value,\n empty_key=0,\n deleted_key=-1,\n initial_num_buckets=4,\n experimental_is_anonymous=is_anonymous)\n self.assertAllEqual(0, self.evaluate(table.size()))\n\n self.evaluate(table.insert(keys, values))\n self.assertAllEqual(3, self.evaluate(table.size()))\n self.assertAllEqual(4, len(self.evaluate(table.export()[0])))\n\n self.evaluate(\n table.insert(\n constant_op.constant([14], dtypes.int64),\n constant_op.constant([[2, 3, 4, 5]], dtypes.int64)))\n self.assertAllEqual(4, self.evaluate(table.size()))\n self.assertAllEqual(8, len(self.evaluate(table.export()[0])))\n\n remove_string = constant_op.constant([12, 16], dtypes.int64)\n self.evaluate(table.remove(remove_string))\n self.assertAllEqual(3, self.evaluate(table.size()))\n self.assertAllEqual(8, len(self.evaluate(table.export()[0])))\n\n input_string = constant_op.constant([11, 12, 14, 15], dtypes.int64)\n output = table.lookup(input_string)\n self.assertAllEqual([4, 4],\n output.shape,\n msg=\"Saw shape: %s\" % output.shape)\n\n result = self.evaluate(output)\n self.assertAllEqual(\n [[0, 1, 2, 3], [-1, -2, -3, -4], [2, 3, 4, 5], [-1, -2, -3, -4]],\n result)\n\n def testVectorKeys(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n keys = constant_op.constant([[0, 1], [1, 2], [1, 3]], dtypes.int64)\n values = constant_op.constant([10, 11, 12], dtypes.int64)\n empty_key = constant_op.constant([0, 3], dtypes.int64)\n deleted_key = constant_op.constant([-1, -1], dtypes.int64)\n default_value = constant_op.constant(-1, dtypes.int64)\n table = lookup_ops.DenseHashTable(\n dtypes.int64,\n dtypes.int64,\n default_value=default_value,\n empty_key=empty_key,\n deleted_key=deleted_key,\n initial_num_buckets=8,\n experimental_is_anonymous=is_anonymous)\n self.assertAllEqual(0, self.evaluate(table.size()))\n\n self.evaluate(table.insert(keys, values))\n self.assertAllEqual(3, self.evaluate(table.size()))\n\n self.evaluate(\n table.insert(\n constant_op.constant([[0, 0]], dtypes.int64),\n constant_op.constant([13], dtypes.int64)))\n self.assertAllEqual(4, self.evaluate(table.size()))\n self.assertAllEqual(8, len(self.evaluate(table.export()[0])))\n\n remove_string = constant_op.constant([[1, 2], [7, 8]], dtypes.int64)\n self.evaluate(table.remove(remove_string))\n self.assertAllEqual(3, self.evaluate(table.size()))\n self.assertAllEqual(8, len(self.evaluate(table.export()[0])))\n\n input_string = constant_op.constant([[0, 1], [1, 2], [1, 3], [0, 2]],\n dtypes.int64)\n output = table.lookup(input_string)\n self.assertAllEqual([4], output.get_shape())\n\n result = self.evaluate(output)\n self.assertAllEqual([10, -1, 12, -1], result)\n\n def testResize(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n keys = constant_op.constant([11, 12, 13], dtypes.int64)\n values = constant_op.constant([0, 1, 2], dtypes.int64)\n table = lookup_ops.DenseHashTable(\n dtypes.int64,\n dtypes.int64,\n default_value=-1,\n empty_key=0,\n deleted_key=-1,\n initial_num_buckets=4,\n experimental_is_anonymous=is_anonymous)\n self.assertAllEqual(0, self.evaluate(table.size()))\n\n self.evaluate(table.insert(keys, values))\n self.assertAllEqual(3, self.evaluate(table.size()))\n self.assertAllEqual(4, len(self.evaluate(table.export()[0])))\n\n keys2 = constant_op.constant([12, 99], dtypes.int64)\n self.evaluate(table.remove(keys2))\n self.assertAllEqual(2, self.evaluate(table.size()))\n self.assertAllEqual(4, len(self.evaluate(table.export()[0])))\n\n keys3 = constant_op.constant([13, 14, 15, 16, 17], dtypes.int64)\n values3 = constant_op.constant([3, 4, 5, 6, 7], dtypes.int64)\n\n self.evaluate(table.insert(keys3, values3))\n self.assertAllEqual(6, self.evaluate(table.size()))\n self.assertAllEqual(16, len(self.evaluate(table.export()[0])))\n\n keys4 = constant_op.constant([10, 11, 12, 13, 14, 15, 16, 17, 18],\n dtypes.int64)\n output = table.lookup(keys4)\n self.assertAllEqual([-1, 0, -1, 3, 4, 5, 6, 7, -1], self.evaluate(output))\n\n def testExport(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n keys = constant_op.constant([11, 12, 13, 14], dtypes.int64)\n values = constant_op.constant([1, 2, 3, 4], dtypes.int64)\n table = lookup_ops.DenseHashTable(\n dtypes.int64,\n dtypes.int64,\n default_value=-1,\n empty_key=100,\n deleted_key=200,\n initial_num_buckets=8,\n experimental_is_anonymous=is_anonymous)\n self.assertAllEqual(0, self.evaluate(table.size()))\n\n self.evaluate(table.insert(keys, values))\n self.assertAllEqual(4, self.evaluate(table.size()))\n\n keys2 = constant_op.constant([12, 15], dtypes.int64)\n self.evaluate(table.remove(keys2))\n self.assertAllEqual(3, self.evaluate(table.size()))\n\n exported_keys, exported_values = table.export()\n\n np_keys = self.evaluate(exported_keys)\n np_values = self.evaluate(exported_values)\n\n self.assertAllEqual(8, len(np_keys))\n self.assertAllEqual(8, len(np_values))\n\n # pair up keys and values, drop extra added dimension\n pairs = np.dstack((np_keys.flatten(), np_values.flatten()))[0]\n # sort by key\n pairs = pairs[pairs[:, 0].argsort()]\n self.assertAllEqual([[11, 1], [13, 3], [14, 4], [100, 0], [100, 0],\n [100, 0], [100, 0], [200, 2]], pairs)\n\n @test_util.run_v1_only(\"Saver V1 only\")\n def testSaveRestore(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n save_dir = os.path.join(self.get_temp_dir(), \"save_restore\")\n save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), \"hash\")\n\n with self.session(graph=ops.Graph()) as sess:\n default_value = -1\n empty_key = 0\n deleted_key = -1\n keys = constant_op.constant([11, 12, 13, 14], dtypes.int64)\n values = constant_op.constant([0, 1, 2, 3], dtypes.int64)\n table = lookup_ops.DenseHashTable(\n dtypes.int64,\n dtypes.int64,\n default_value=default_value,\n empty_key=empty_key,\n deleted_key=deleted_key,\n name=\"t1\",\n checkpoint=True,\n initial_num_buckets=32,\n experimental_is_anonymous=is_anonymous)\n\n save = saver.Saver()\n\n self.assertAllEqual(0, table.size())\n table.insert(keys, values).run()\n self.assertAllEqual(4, table.size())\n self.assertAllEqual(32, len(table.export()[0].eval()))\n\n keys2 = constant_op.constant([12, 15], dtypes.int64)\n table.remove(keys2).run()\n self.assertAllEqual(3, table.size())\n self.assertAllEqual(32, len(table.export()[0].eval()))\n\n val = save.save(sess, save_path)\n self.assertIsInstance(val, six.string_types)\n self.assertEqual(save_path, val)\n\n with self.session(graph=ops.Graph()) as sess:\n table = lookup_ops.DenseHashTable(\n dtypes.int64,\n dtypes.int64,\n default_value=default_value,\n empty_key=empty_key,\n deleted_key=deleted_key,\n name=\"t1\",\n checkpoint=True,\n initial_num_buckets=64,\n experimental_is_anonymous=is_anonymous)\n table.insert(\n constant_op.constant([11, 14], dtypes.int64),\n constant_op.constant([12, 24], dtypes.int64)).run()\n self.assertAllEqual(2, table.size())\n self.assertAllEqual(64, len(table.export()[0].eval()))\n\n save = saver.Saver()\n\n # Restore the saved values in the parameter nodes.\n save.restore(sess, save_path)\n\n self.assertAllEqual(3, table.size())\n self.assertAllEqual(32, len(table.export()[0].eval()))\n\n input_string = constant_op.constant([10, 11, 12, 13, 14], dtypes.int64)\n output = table.lookup(input_string)\n self.assertAllEqual([-1, 0, -1, 2, 3], output)\n\n @test_util.run_v1_only(\"Saver V1 only\")\n def testSaveRestoreOnlyTable(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n save_dir = os.path.join(self.get_temp_dir(), \"save_restore\")\n save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), \"hash\")\n\n with self.session(graph=ops.Graph()) as sess:\n default_value = -1\n empty_key = 0\n deleted_key = -1\n keys = constant_op.constant([11, 12, 13, 14], dtypes.int64)\n values = constant_op.constant([0, 1, 2, 3], dtypes.int64)\n table = lookup_ops.DenseHashTable(\n dtypes.int64,\n dtypes.int64,\n default_value=default_value,\n empty_key=empty_key,\n deleted_key=deleted_key,\n name=\"t1\",\n checkpoint=True,\n initial_num_buckets=32,\n experimental_is_anonymous=is_anonymous)\n\n save = saver.Saver([table])\n\n self.assertAllEqual(0, table.size())\n table.insert(keys, values).run()\n self.assertAllEqual(4, table.size())\n self.assertAllEqual(32, len(table.export()[0].eval()))\n\n keys2 = constant_op.constant([12, 15], dtypes.int64)\n table.remove(keys2).run()\n self.assertAllEqual(3, table.size())\n self.assertAllEqual(32, len(table.export()[0].eval()))\n\n val = save.save(sess, save_path)\n self.assertIsInstance(val, six.string_types)\n self.assertEqual(save_path, val)\n\n with self.session(graph=ops.Graph()) as sess:\n table = lookup_ops.DenseHashTable(\n dtypes.int64,\n dtypes.int64,\n default_value=default_value,\n empty_key=empty_key,\n deleted_key=deleted_key,\n name=\"t1\",\n checkpoint=True,\n initial_num_buckets=64,\n experimental_is_anonymous=is_anonymous)\n table.insert(\n constant_op.constant([11, 14], dtypes.int64),\n constant_op.constant([12, 24], dtypes.int64)).run()\n self.assertAllEqual(2, table.size())\n self.assertAllEqual(64, len(table.export()[0].eval()))\n\n save = saver.Saver([table])\n\n # Restore the saved values in the parameter nodes.\n save.restore(sess, save_path)\n\n self.assertAllEqual(3, table.size())\n self.assertAllEqual(32, len(table.export()[0].eval()))\n\n input_string = constant_op.constant([10, 11, 12, 13, 14], dtypes.int64)\n output = table.lookup(input_string)\n self.assertAllEqual([-1, 0, -1, 2, 3], output)\n\n @test_util.run_in_graph_and_eager_modes\n def testObjectSaveRestore(self, is_anonymous):\n if is_anonymous and not context.executing_eagerly():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n save_dir = os.path.join(self.get_temp_dir(), \"save_restore\")\n save_prefix = os.path.join(tempfile.mkdtemp(prefix=save_dir), \"hash\")\n\n default_value = -1\n empty_key = 0\n deleted_key = -1\n keys = constant_op.constant([11, 12, 13], dtypes.int64)\n values = constant_op.constant([0, 1, 2], dtypes.int64)\n save_table = lookup_ops.DenseHashTable(\n dtypes.int64,\n dtypes.int64,\n default_value=default_value,\n empty_key=empty_key,\n deleted_key=deleted_key,\n name=\"t1\",\n checkpoint=True,\n initial_num_buckets=32,\n experimental_is_anonymous=is_anonymous)\n\n save_checkpoint = trackable.Checkpoint(table=save_table)\n\n self.assertAllEqual(0, self.evaluate(save_table.size()))\n self.evaluate(save_table.insert(keys, values))\n self.assertAllEqual(3, self.evaluate(save_table.size()))\n self.assertAllEqual(32, len(self.evaluate(save_table.export()[0])))\n\n save_path = save_checkpoint.save(save_prefix)\n del save_table, save_checkpoint\n\n load_table = lookup_ops.DenseHashTable(\n dtypes.int64,\n dtypes.int64,\n default_value=default_value,\n empty_key=empty_key,\n deleted_key=deleted_key,\n name=\"t1\",\n checkpoint=True,\n initial_num_buckets=64,\n experimental_is_anonymous=is_anonymous)\n self.evaluate(\n load_table.insert(\n constant_op.constant([11, 14], dtypes.int64),\n constant_op.constant([12, 24], dtypes.int64)))\n self.assertAllEqual(2, self.evaluate(load_table.size()))\n self.assertAllEqual(64, len(self.evaluate(load_table.export()[0])))\n\n restore_checkpoint = trackable.Checkpoint(table=load_table)\n\n # Restore the saved values in the parameter nodes.\n restore_checkpoint.restore(save_path).run_restore_ops()\n\n self.assertAllEqual(3, self.evaluate(load_table.size()))\n self.assertAllEqual(32, len(self.evaluate(load_table.export()[0])))\n\n input_string = constant_op.constant([10, 11, 12, 13, 14], dtypes.int64)\n output = load_table.lookup(input_string)\n self.assertAllEqual([-1, 0, 1, 2, -1], self.evaluate(output))\n\n @test_util.run_v2_only\n def testSavedModelSaveRestore(self, is_anonymous):\n save_dir = os.path.join(self.get_temp_dir(), \"save_restore\")\n save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), \"hash\")\n\n root = tracking.AutoTrackable()\n\n default_value = -1\n empty_key = 0\n deleted_key = -1\n keys = constant_op.constant([11, 12, 13], dtypes.int64)\n values = constant_op.constant([0, 1, 2], dtypes.int64)\n root.table = lookup_ops.DenseHashTable(\n dtypes.int64,\n dtypes.int64,\n default_value=default_value,\n empty_key=empty_key,\n deleted_key=deleted_key,\n name=\"t1\",\n checkpoint=True,\n initial_num_buckets=32,\n experimental_is_anonymous=is_anonymous)\n\n @def_function.function(\n input_signature=[tensor_spec.TensorSpec((), dtypes.int64)])\n def lookup(key):\n return root.table.lookup(key)\n\n @def_function.function(input_signature=[])\n def size():\n return root.table.size()\n\n @def_function.function(input_signature=[])\n def is_ref_counting():\n return test_ops.is_resource_handle_ref_counting(\n root.table.resource_handle)\n\n root.lookup = lookup\n root.size = size\n root.is_ref_counting = is_ref_counting\n\n self.assertEqual(root.table.size(), 0)\n root.table.insert(keys, values)\n self.assertEqual(root.table.size(), 3)\n self.assertEqual(root.table.lookup(12), 1)\n self.assertEqual(root.table.lookup(10), -1)\n self.assertEqual(len(root.table.export()[0]), 32)\n self.assertEqual(root.is_ref_counting(), is_anonymous)\n\n saved_model_save.save(root, save_path)\n\n del root\n loaded = saved_model_load.load(save_path)\n self.assertEqual(loaded.size(), 3)\n self.assertEqual(loaded.lookup(12), 1)\n self.assertEqual(loaded.lookup(10), -1)\n self.assertEqual(loaded.is_ref_counting(), is_anonymous)\n\n @test_util.run_v1_only(\"Saver V1 only\")\n def testVectorSaveRestore(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n save_dir = os.path.join(self.get_temp_dir(), \"vector_save_restore\")\n save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), \"hash\")\n\n with self.session(graph=ops.Graph()) as sess:\n empty_key = constant_op.constant([11, 13], dtypes.int64)\n deleted_key = constant_op.constant([-2, -3], dtypes.int64)\n default_value = constant_op.constant([-1, -2], dtypes.int64)\n keys = constant_op.constant([[11, 12], [11, 14], [12, 13], [13, 14]],\n dtypes.int64)\n values = constant_op.constant([[0, 1], [2, 3], [2, 4], [4, 5]],\n dtypes.int64)\n table = lookup_ops.DenseHashTable(\n dtypes.int64,\n dtypes.int64,\n default_value=default_value,\n empty_key=empty_key,\n deleted_key=deleted_key,\n name=\"t1\",\n checkpoint=True,\n initial_num_buckets=32,\n experimental_is_anonymous=is_anonymous)\n\n save = saver.Saver()\n\n self.assertAllEqual(0, table.size())\n table.insert(keys, values).run()\n self.assertAllEqual(4, table.size())\n self.assertAllEqual(32, len(table.export()[0].eval()))\n\n keys2 = constant_op.constant([[12, 13], [16, 17]], dtypes.int64)\n table.remove(keys2).run()\n self.assertAllEqual(3, table.size())\n self.assertAllEqual(32, len(table.export()[0].eval()))\n\n val = save.save(sess, save_path)\n self.assertIsInstance(val, six.string_types)\n self.assertEqual(save_path, val)\n\n with self.session(graph=ops.Graph()) as sess:\n empty_key = constant_op.constant([11, 13], dtypes.int64)\n deleted_key = constant_op.constant([-2, -3], dtypes.int64)\n default_value = constant_op.constant([-1, -2], dtypes.int64)\n table = lookup_ops.DenseHashTable(\n dtypes.int64,\n dtypes.int64,\n default_value=default_value,\n empty_key=empty_key,\n deleted_key=deleted_key,\n name=\"t1\",\n checkpoint=True,\n initial_num_buckets=64,\n experimental_is_anonymous=is_anonymous)\n table.insert(\n constant_op.constant([[11, 12], [13, 15]], dtypes.int64),\n constant_op.constant([[21, 22], [23, 24]], dtypes.int64)).run()\n self.assertAllEqual(2, table.size())\n self.assertAllEqual(64, len(table.export()[0].eval()))\n\n save = saver.Saver()\n\n # Restore the saved values in the parameter nodes.\n save.restore(sess, save_path)\n\n self.assertAllEqual(3, table.size())\n self.assertAllEqual(32, len(table.export()[0].eval()))\n\n input_string = constant_op.constant(\n [[11, 12], [11, 14], [11, 15], [13, 14], [13, 15]], dtypes.int64)\n output = table.lookup(input_string)\n self.assertAllEqual([[0, 1], [2, 3], [-1, -2], [4, 5], [-1, -2]],\n self.evaluate(output))\n\n @test_util.run_v1_only(\"Saver V1 only\")\n def testVectorScalarSaveRestore(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n save_dir = os.path.join(self.get_temp_dir(), \"vector_scalar_save_restore\")\n save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), \"hash\")\n\n with self.session(graph=ops.Graph()) as sess:\n empty_key = constant_op.constant([11, 13], dtypes.int64)\n deleted_key = constant_op.constant([-1, -1], dtypes.int64)\n default_value = constant_op.constant(-1, dtypes.int64)\n keys = constant_op.constant([[11, 12], [11, 14], [12, 13], [13, 14]],\n dtypes.int64)\n values = constant_op.constant([0, 1, 2, 3], dtypes.int64)\n table = lookup_ops.DenseHashTable(\n dtypes.int64,\n dtypes.int64,\n default_value=default_value,\n empty_key=empty_key,\n deleted_key=deleted_key,\n name=\"t2\",\n checkpoint=True,\n initial_num_buckets=32,\n experimental_is_anonymous=is_anonymous)\n\n save = saver.Saver()\n\n self.assertAllEqual(0, table.size())\n table.insert(keys, values).run()\n self.assertAllEqual(4, table.size())\n self.assertAllEqual(32, len(table.export()[0].eval()))\n\n keys2 = constant_op.constant([[12, 13], [15, 16]], dtypes.int64)\n table.remove(keys2).run()\n self.assertAllEqual(3, table.size())\n self.assertAllEqual(32, len(table.export()[0].eval()))\n\n val = save.save(sess, save_path)\n self.assertIsInstance(val, six.string_types)\n self.assertEqual(save_path, val)\n\n with self.session(graph=ops.Graph()) as sess:\n empty_key = constant_op.constant([11, 13], dtypes.int64)\n deleted_key = constant_op.constant([-1, -1], dtypes.int64)\n default_value = constant_op.constant(-1, dtypes.int64)\n table = lookup_ops.DenseHashTable(\n dtypes.int64,\n dtypes.int64,\n default_value=default_value,\n empty_key=empty_key,\n deleted_key=deleted_key,\n name=\"t2\",\n checkpoint=True,\n initial_num_buckets=64,\n experimental_is_anonymous=is_anonymous)\n table.insert(\n constant_op.constant([[11, 12], [13, 15]], dtypes.int64),\n constant_op.constant([3, 4], dtypes.int64)).run()\n self.assertAllEqual(2, table.size())\n self.assertAllEqual(64, len(table.export()[0].eval()))\n\n save = saver.Saver()\n\n # Restore the saved values in the parameter nodes.\n save.restore(sess, save_path)\n\n self.assertAllEqual(3, table.size())\n self.assertAllEqual(32, len(table.export()[0].eval()))\n\n input_string = constant_op.constant(\n [[11, 12], [11, 14], [11, 15], [13, 14], [13, 15]], dtypes.int64)\n output = table.lookup(input_string)\n self.assertAllEqual([0, 1, -1, 3, -1], output)\n\n def testReprobe(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n # Insert 6 keys into a table with 8 buckets.\n # The values are chosen to make sure collisions occur when using GCC STL\n keys = constant_op.constant([11, 12, 13, 19, 20, 21], dtypes.int64)\n values = constant_op.constant([51, 52, 53, 54, 55, 56], dtypes.int64)\n table = lookup_ops.DenseHashTable(\n dtypes.int64,\n dtypes.int64,\n default_value=-1,\n empty_key=0,\n deleted_key=-1,\n initial_num_buckets=8,\n experimental_is_anonymous=is_anonymous)\n self.assertAllEqual(0, self.evaluate(table.size()))\n\n self.evaluate(table.insert(keys, values))\n self.assertAllEqual(6, self.evaluate(table.size()))\n\n input_string = constant_op.constant([10, 11, 12, 13, 14, 19, 20, 21, 22],\n dtypes.int64)\n output = table.lookup(input_string)\n self.assertAllEqual([9], output.get_shape())\n\n result = self.evaluate(output)\n self.assertAllEqual([-1, 51, 52, 53, -1, 54, 55, 56, -1], result)\n\n def testCustomEmptyKey(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n keys = constant_op.constant([11, 0, 13], dtypes.int64)\n values = constant_op.constant([0, 1, 2], dtypes.int64)\n table = lookup_ops.DenseHashTable(\n dtypes.int64,\n dtypes.int64,\n default_value=-1,\n empty_key=12,\n deleted_key=-1,\n experimental_is_anonymous=is_anonymous)\n self.assertAllEqual(0, self.evaluate(table.size()))\n\n self.evaluate(table.insert(keys, values))\n self.assertAllEqual(3, self.evaluate(table.size()))\n\n input_string = constant_op.constant([11, 0, 15], dtypes.int64)\n output = table.lookup(input_string)\n self.assertAllEqual([3], output.get_shape())\n\n result = self.evaluate(output)\n self.assertAllEqual([0, 1, -1], result)\n\n def testErrors(self, is_anonymous):\n table = lookup_ops.DenseHashTable(\n dtypes.int64,\n dtypes.int64,\n default_value=-1,\n empty_key=0,\n deleted_key=-1,\n experimental_is_anonymous=is_anonymous)\n\n # Inserting the empty key returns an error\n keys1 = constant_op.constant([11, 0], dtypes.int64)\n values1 = constant_op.constant([0, 1], dtypes.int64)\n with self.assertRaisesRegex(errors_impl.InvalidArgumentError,\n \"empty_key\"):\n self.evaluate(table.insert(keys1, values1))\n\n # Looking up the empty key returns an error\n with self.assertRaisesRegex(errors_impl.InvalidArgumentError,\n \"empty_key\"):\n self.evaluate(table.lookup(keys1))\n\n # Inserting the deleted key returns an error\n keys2 = constant_op.constant([11, -1], dtypes.int64)\n values2 = constant_op.constant([0, 1], dtypes.int64)\n with self.assertRaisesRegex(errors_impl.InvalidArgumentError,\n \"deleted_key\"):\n self.evaluate(table.insert(keys2, values2))\n\n # Looking up the empty key returns an error\n with self.assertRaisesRegex(errors_impl.InvalidArgumentError,\n \"deleted_key\"):\n self.evaluate(table.lookup(keys2))\n\n # Arbitrary tensors of keys are not supported\n keys = constant_op.constant([[11, 0], [12, 1]], dtypes.int64)\n values = constant_op.constant([[11, 0], [12, 1]], dtypes.int64)\n with self.assertRaisesRegex(errors_impl.InvalidArgumentError,\n \"Expected key shape\"):\n self.evaluate(table.lookup(keys))\n with self.assertRaisesRegex(errors_impl.InvalidArgumentError,\n \"Expected key shape\"):\n self.evaluate(table.insert(keys, values))\n\n with self.assertRaisesRegex(errors_impl.InvalidArgumentError,\n \"Number of buckets must be\"):\n table2 = lookup_ops.DenseHashTable(\n dtypes.int64,\n dtypes.int64,\n default_value=-1,\n empty_key=17,\n deleted_key=-1,\n initial_num_buckets=12,\n experimental_is_anonymous=is_anonymous)\n self.assertAllEqual(0, self.evaluate(table2.size()))\n\n with self.assertRaisesRegex(\n errors_impl.InvalidArgumentError,\n \"Empty and deleted keys must have same shape\"):\n table3 = lookup_ops.DenseHashTable(\n dtypes.int64,\n dtypes.int64,\n default_value=-1,\n empty_key=42,\n deleted_key=[1, 2],\n experimental_is_anonymous=is_anonymous)\n self.assertAllEqual(0, self.evaluate(table3.size()))\n\n with self.assertRaisesRegex(errors_impl.InvalidArgumentError,\n \"Empty and deleted keys cannot be equal\"):\n table4 = lookup_ops.DenseHashTable(\n dtypes.int64,\n dtypes.int64,\n default_value=-1,\n empty_key=42,\n deleted_key=42,\n experimental_is_anonymous=is_anonymous)\n self.assertAllEqual(0, self.evaluate(table4.size()))\n\n with self.assertRaisesRegex(errors_impl.InvalidArgumentError,\n \"Empty and deleted keys cannot be equal\"):\n table5 = lookup_ops.DenseHashTable(\n dtypes.int64,\n dtypes.int64,\n default_value=-1,\n empty_key=[1, 2, 3],\n deleted_key=[1, 2, 3],\n experimental_is_anonymous=is_anonymous)\n self.assertAllEqual(0, self.evaluate(table5.size()))\n\n @test_util.run_in_graph_and_eager_modes\n def testStringToResource(self, is_anonymous):\n v = variables.Variable(1.)\n v1 = variables.Variable(1.)\n table = lookup_ops.DenseHashTable(\n dtypes.string,\n dtypes.resource,\n default_value=v.handle,\n empty_key=\"<empty>\",\n deleted_key=\"<deleted>\",\n experimental_is_anonymous=is_anonymous)\n self.assertEqual([], table.lookup(\"not_found\").shape)\n table.insert(\"v1\", v1.handle)\n self.assertEqual([], table.lookup(\"v1\").shape)\n\n def testExportShapeInference(self, is_anonymous):\n default_value = -1\n empty_key = 0\n deleted_key = -1\n table = lookup_ops.DenseHashTable(\n dtypes.int64,\n dtypes.int64,\n default_value=default_value,\n empty_key=empty_key,\n deleted_key=deleted_key,\n experimental_is_anonymous=is_anonymous)\n actual_shapes = [t.shape for t in table.export()]\n inferred_shapes = []\n\n @def_function.function\n def f():\n for t in table.export():\n inferred_shapes.append(t.shape)\n\n f()\n self.assertLen(actual_shapes, 2)\n self.assertLen(inferred_shapes, 2)\n self.assertTrue(inferred_shapes[0].is_compatible_with(actual_shapes[0]))\n self.assertTrue(inferred_shapes[1].is_compatible_with(actual_shapes[1]))\n\n\nclass IndexTableFromFile(test.TestCase):\n\n def _createVocabFile(self, basename, values=(\"brain\", \"salad\", \"surgery\")):\n vocabulary_file = os.path.join(self.get_temp_dir(), basename)\n with open(vocabulary_file, \"w\") as f:\n f.write(\"\\n\".join(values) + \"\\n\")\n return vocabulary_file\n\n def test_string_index_table_from_file(self):\n vocabulary_file = self._createVocabFile(\"f2i_vocab1.txt\")\n\n table = lookup_ops.index_table_from_file(\n vocabulary_file=vocabulary_file, num_oov_buckets=1)\n ids = table.lookup(constant_op.constant([\"salad\", \"surgery\", \"tarkus\"]))\n\n if not context.executing_eagerly():\n with self.assertRaises(errors_impl.OpError):\n self.evaluate(ids)\n self.evaluate(lookup_ops.tables_initializer())\n self.assertAllEqual((1, 2, 3), self.evaluate(ids))\n\n def test_string_index_table_from_multicolumn_file(self):\n vocabulary_file = self._createVocabFile(\n \"f2i_vocab1.txt\", values=(\"brain\\t300\", \"salad\\t20\", \"surgery\\t1\"))\n table = lookup_ops.index_table_from_file(\n vocabulary_file=vocabulary_file,\n num_oov_buckets=1,\n key_column_index=0,\n value_column_index=lookup_ops.TextFileIndex.LINE_NUMBER)\n ids = table.lookup(constant_op.constant([\"salad\", \"surgery\", \"tarkus\"]))\n\n if not context.executing_eagerly():\n with self.assertRaises(errors_impl.OpError):\n self.evaluate(ids)\n self.evaluate(lookup_ops.tables_initializer())\n self.assertAllEqual((1, 2, 3), self.evaluate(ids))\n\n def test_string_index_table_from_multicolumn_file_custom_delimiter(self):\n vocabulary_file = self._createVocabFile(\n \"f2i_vocab1.txt\", values=(\"brain 300\", \"salad 20\", \"surgery 1\"))\n table = lookup_ops.index_table_from_file(\n vocabulary_file=vocabulary_file,\n num_oov_buckets=1,\n key_column_index=0,\n value_column_index=lookup_ops.TextFileIndex.LINE_NUMBER,\n delimiter=\" \")\n ids = table.lookup(constant_op.constant([\"salad\", \"surgery\", \"tarkus\"]))\n\n if not context.executing_eagerly():\n with self.assertRaises(errors_impl.OpError):\n self.evaluate(ids)\n self.evaluate(lookup_ops.tables_initializer())\n self.assertAllEqual((1, 2, 3), self.evaluate(ids))\n\n def test_string_index_table_from_file_tensor_filename(self):\n vocabulary_file = self._createVocabFile(\"f2i_vocab1.txt\")\n vocabulary_file = constant_op.constant(vocabulary_file)\n table = lookup_ops.index_table_from_file(\n vocabulary_file=vocabulary_file, num_oov_buckets=1)\n ids = table.lookup(constant_op.constant([\"salad\", \"surgery\", \"tarkus\"]))\n\n if not context.executing_eagerly():\n with self.assertRaises(errors_impl.OpError):\n self.evaluate(ids)\n self.evaluate(lookup_ops.tables_initializer())\n self.assertAllEqual((1, 2, 3), self.evaluate(ids))\n if not context.executing_eagerly():\n self.assertEqual(1,\n len(ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)))\n\n @test_util.run_v1_only(\"placeholder usage\")\n def test_string_index_table_from_file_placeholder_filename(self):\n vocabulary_file = self._createVocabFile(\"f2i_vocab1.txt\")\n with self.cached_session():\n vocabulary_placeholder = array_ops.placeholder(dtypes.string, [])\n table = lookup_ops.index_table_from_file(\n vocabulary_file=vocabulary_placeholder, num_oov_buckets=1)\n ids = table.lookup(constant_op.constant([\"salad\", \"surgery\", \"tarkus\"]))\n\n with self.assertRaises(errors_impl.OpError):\n self.evaluate(ids)\n\n feed_dict = {vocabulary_placeholder.name: vocabulary_file}\n lookup_ops.tables_initializer().run(feed_dict=feed_dict)\n self.assertAllEqual((1, 2, 3), self.evaluate(ids))\n self.assertEqual(0,\n len(ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)))\n\n def test_int32_index_table_from_file(self):\n vocabulary_file = self._createVocabFile(\n \"f2i_vocab2.txt\", values=(\"42\", \"1\", \"-1000\"))\n table = lookup_ops.index_table_from_file(\n vocabulary_file=vocabulary_file,\n num_oov_buckets=1,\n key_dtype=dtypes.int32)\n ids = table.lookup(constant_op.constant((1, -1000, 11), dtype=dtypes.int32))\n\n if not context.executing_eagerly():\n with self.assertRaises(errors_impl.OpError):\n self.evaluate(ids)\n self.evaluate(lookup_ops.tables_initializer())\n self.assertAllEqual((1, 2, 3), self.evaluate(ids))\n\n def test_int64_index_table_from_file(self):\n vocabulary_file = self._createVocabFile(\n \"f2i_vocab3.txt\", values=(\"42\", \"1\", \"-1000\"))\n table = lookup_ops.index_table_from_file(\n vocabulary_file=vocabulary_file,\n num_oov_buckets=1,\n key_dtype=dtypes.int64)\n ids = table.lookup(constant_op.constant((1, -1000, 11), dtype=dtypes.int64))\n\n if not context.executing_eagerly():\n with self.assertRaises(errors_impl.OpError):\n self.evaluate(ids)\n self.evaluate(lookup_ops.tables_initializer())\n self.assertAllEqual((1, 2, 3), self.evaluate(ids))\n\n def test_index_table_from_file_with_default_value(self):\n default_value = -42\n vocabulary_file = self._createVocabFile(\"f2i_vocab4.txt\")\n table = lookup_ops.index_table_from_file(\n vocabulary_file=vocabulary_file, default_value=default_value)\n ids = table.lookup(constant_op.constant([\"salad\", \"surgery\", \"tarkus\"]))\n\n if not context.executing_eagerly():\n with self.assertRaises(errors_impl.OpError):\n self.evaluate(ids)\n self.evaluate(lookup_ops.tables_initializer())\n self.assertAllEqual((1, 2, default_value), self.evaluate(ids))\n\n def test_index_table_from_file_with_oov_buckets(self):\n vocabulary_file = self._createVocabFile(\"f2i_vocab5.txt\")\n table = lookup_ops.index_table_from_file(\n vocabulary_file=vocabulary_file, num_oov_buckets=1000)\n ids = table.lookup(\n constant_op.constant([\"salad\", \"surgery\", \"tarkus\", \"toccata\"]))\n\n if not context.executing_eagerly():\n with self.assertRaises(errors_impl.OpError):\n self.evaluate(ids)\n self.evaluate(lookup_ops.tables_initializer())\n self.assertAllEqual(\n (\n 1, # From vocabulary file.\n 2, # From vocabulary file.\n 867, # 3 + fingerprint(\"tarkus\") mod 300.\n 860), # 3 + fingerprint(\"toccata\") mod 300.\n self.evaluate(ids))\n\n def test_index_table_from_file_fails_with_empty_vocabulary_file_name(self):\n self.assertRaises(\n ValueError, lookup_ops.index_table_from_file, vocabulary_file=\"\")\n\n def test_index_table_from_file_fails_with_empty_vocabulary(self):\n self.assertRaises(\n ValueError, lookup_ops.index_table_from_file, vocabulary_file=None)\n\n def test_index_table_from_file_str_fails_with_zero_size_vocabulary(self):\n vocabulary_file = self._createVocabFile(\"zero_vocab_str.txt\")\n self.assertRaisesRegex(\n ValueError, \"`vocab_size` must be greater than 0, got 0 for \"\n \"vocabulary_file: .*zero_vocab_str.txt\",\n lookup_ops.index_table_from_file,\n vocabulary_file=vocabulary_file,\n vocab_size=0)\n\n def test_index_table_from_file_tensor_fails_with_zero_size_vocabulary(self):\n vocabulary_file = constant_op.constant(\n self._createVocabFile(\"zero_vocab_tensor.txt\"))\n self.assertRaisesRegex(\n ValueError, \"`vocab_size` must be greater than 0, got 0 for \"\n \"vocabulary_file: .*zero_vocab_tensor.txt\",\n lookup_ops.index_table_from_file,\n vocabulary_file=vocabulary_file,\n vocab_size=0)\n\n def test_index_table_from_file_with_vocab_size_too_small(self):\n vocabulary_file = self._createVocabFile(\"f2i_vocab6.txt\")\n table = lookup_ops.index_table_from_file(\n vocabulary_file=vocabulary_file, vocab_size=2)\n ids = table.lookup(constant_op.constant([\"salad\", \"surgery\", \"tarkus\"]))\n\n if not context.executing_eagerly():\n with self.assertRaises(errors_impl.OpError):\n self.evaluate(ids)\n self.evaluate(lookup_ops.tables_initializer())\n self.assertAllEqual((1, -1, -1), self.evaluate(ids))\n self.assertEqual(2, self.evaluate(table.size()))\n\n def test_index_table_from_file_with_vocab_size_too_large(self):\n vocabulary_file = self._createVocabFile(\"f2i_vocab7.txt\")\n with self.assertRaisesRegex(errors_impl.InvalidArgumentError,\n \"Invalid vocab_size\"):\n table = lookup_ops.index_table_from_file(\n vocabulary_file=vocabulary_file, vocab_size=4)\n self.evaluate(table.initializer)\n\n def test_index_table_from_file_with_vocab_size(self):\n vocabulary_file = self._createVocabFile(\"f2i_vocab8.txt\")\n\n self.assertRaises(\n ValueError,\n lookup_ops.index_table_from_file,\n vocabulary_file=vocabulary_file,\n vocab_size=0)\n\n table = lookup_ops.index_table_from_file(\n vocabulary_file=vocabulary_file, vocab_size=3)\n ids = table.lookup(constant_op.constant([\"salad\", \"surgery\", \"tarkus\"]))\n\n if not context.executing_eagerly():\n with self.assertRaises(errors_impl.OpError):\n self.evaluate(ids)\n self.evaluate(lookup_ops.tables_initializer())\n self.assertAllEqual((1, 2, -1), self.evaluate(ids))\n self.assertEqual(3, self.evaluate(table.size()))\n\n def test_index_table_from_file_with_invalid_hashers(self):\n vocabulary_file = self._createVocabFile(\"invalid_hasher.txt\")\n with self.assertRaises(TypeError):\n lookup_ops.index_table_from_file(\n vocabulary_file=vocabulary_file,\n vocab_size=3,\n num_oov_buckets=1,\n hasher_spec=1)\n\n table = lookup_ops.index_table_from_file(\n vocabulary_file=vocabulary_file,\n vocab_size=3,\n num_oov_buckets=1,\n hasher_spec=lookup_ops.HasherSpec(\"my-awesome-hash\", None))\n\n self.assertRaises(ValueError, table.lookup,\n constant_op.constant([\"salad\", \"surgery\", \"tarkus\"]))\n\n def test_index_table_from_file_table_ref_with_oov_buckets(self):\n vocabulary_file = self._createVocabFile(\"f2i_vocab9.txt\")\n table = lookup_ops.index_table_from_file(\n vocabulary_file=vocabulary_file, num_oov_buckets=1)\n self.assertIsNotNone(table.resource_handle)\n\n def test_index_table_from_file_table_ref_without_oov_buckets(self):\n vocabulary_file = self._createVocabFile(\"f2i_vocab10.txt\")\n table = lookup_ops.index_table_from_file(\n vocabulary_file=vocabulary_file, num_oov_buckets=0)\n self.assertIsNotNone(table.resource_handle)\n\n\nclass IndexTableFromTensor(test.TestCase):\n\n @test_util.run_in_graph_and_eager_modes\n def test_index_table_from_tensor_with_tensor_init(self):\n table = lookup_ops.index_table_from_tensor(\n vocabulary_list=(\"brain\", \"salad\", \"surgery\"), num_oov_buckets=1)\n\n if not context.executing_eagerly():\n with self.assertRaises(errors_impl.OpError):\n self.evaluate(\n table.lookup(constant_op.constant((\"salad\", \"surgery\", \"tarkus\"))))\n else:\n # Reinitializing a table in eager should work.\n table = lookup_ops.index_table_from_tensor(\n vocabulary_list=(\"brain\", \"salad\", \"surgery\"), num_oov_buckets=1)\n self.evaluate(lookup_ops.tables_initializer())\n ids = table.lookup(constant_op.constant((\"salad\", \"surgery\", \"tarkus\")))\n self.assertAllEqual((1, 2, 3), self.evaluate(ids))\n\n def test_int32_index_table_from_tensor_with_tensor_init(self):\n table = lookup_ops.index_table_from_tensor(\n vocabulary_list=(42, 1, -1000), num_oov_buckets=1, dtype=dtypes.int32)\n ids = table.lookup(constant_op.constant((1, -1000, 11), dtype=dtypes.int32))\n\n if not context.executing_eagerly():\n with self.assertRaises(errors_impl.FailedPreconditionError):\n self.evaluate(ids)\n self.evaluate(lookup_ops.tables_initializer())\n self.assertAllEqual((1, 2, 3), self.evaluate(ids))\n\n def test_int64_index_table_from_tensor_with_tensor_init(self):\n table = lookup_ops.index_table_from_tensor(\n vocabulary_list=(42, 1, -1000), num_oov_buckets=1, dtype=dtypes.int64)\n ids = table.lookup(constant_op.constant((1, -1000, 11), dtype=dtypes.int64))\n\n if not context.executing_eagerly():\n with self.assertRaises(errors_impl.FailedPreconditionError):\n self.evaluate(ids)\n self.evaluate(lookup_ops.tables_initializer())\n self.assertAllEqual((1, 2, 3), self.evaluate(ids))\n\n def test_index_table_from_tensor_with_default_value(self):\n default_value = -42\n table = lookup_ops.index_table_from_tensor(\n vocabulary_list=[\"brain\", \"salad\", \"surgery\"],\n default_value=default_value)\n ids = table.lookup(constant_op.constant([\"salad\", \"surgery\", \"tarkus\"]))\n\n if not context.executing_eagerly():\n with self.assertRaises(errors_impl.FailedPreconditionError):\n self.evaluate(ids)\n self.evaluate(lookup_ops.tables_initializer())\n self.assertAllEqual((1, 2, default_value), self.evaluate(ids))\n\n def test_index_table_from_tensor_missing_vocabulary_list(self):\n with self.assertRaisesRegex(ValueError,\n \"`vocabulary_list` must be specified\"):\n lookup_ops.index_table_from_tensor(\n vocabulary_list=None, num_oov_buckets=1)\n\n def test_index_table_from_tensor_empty_vocabulary_list(self):\n with self.assertRaisesRegex(errors_impl.OpError,\n \"keys and values cannot be empty\"):\n _ = lookup_ops.index_table_from_tensor(\n vocabulary_list=np.array([], dtype=np.str_), num_oov_buckets=1)\n self.evaluate(lookup_ops.tables_initializer())\n\n def test_index_table_from_tensor_with_invalid_hashers(self):\n with self.assertRaises(TypeError):\n lookup_ops.index_table_from_tensor(\n vocabulary_list=[\"brain\", \"salad\", \"surgery\"],\n num_oov_buckets=1,\n hasher_spec=1)\n\n table = lookup_ops.index_table_from_tensor(\n vocabulary_list=[\"brain\", \"salad\", \"surgery\"],\n num_oov_buckets=1,\n hasher_spec=lookup_ops.HasherSpec(\"my-awesome-hash\", None))\n\n self.assertRaises(ValueError, table.lookup,\n constant_op.constant([\"salad\", \"surgery\", \"tarkus\"]))\n\n\nclass IndexToStringTableFromFileTest(test.TestCase):\n\n def _createVocabFile(self, basename, values=(\"brain\", \"salad\", \"surgery\")):\n vocabulary_file = os.path.join(self.get_temp_dir(), basename)\n with open(vocabulary_file, \"w\") as f:\n f.write(\"\\n\".join(values) + \"\\n\")\n return vocabulary_file\n\n def test_index_to_string_table(self):\n vocabulary_path = self._createVocabFile(\"i2f_vocab1.txt\")\n # vocabulary_file supports string and tensor\n type_funcs = [str, constant_op.constant]\n for type_func in type_funcs:\n vocabulary_file = type_func(vocabulary_path)\n table = lookup_ops.index_to_string_table_from_file(\n vocabulary_file=vocabulary_file)\n features = table.lookup(constant_op.constant([0, 1, 2, 3], dtypes.int64))\n if not context.executing_eagerly():\n with self.assertRaises(errors_impl.OpError):\n self.evaluate(features)\n self.evaluate(lookup_ops.tables_initializer())\n self.assertAllEqual((b\"brain\", b\"salad\", b\"surgery\", b\"UNK\"),\n self.evaluate(features))\n\n def test_index_to_string_table_from_multicolumn_file(self):\n vocabulary_file = self._createVocabFile(\n \"f2i_vocab1.txt\", values=(\"brain\\t300\", \"salad\\t20\", \"surgery\\t1\"))\n table = lookup_ops.index_to_string_table_from_file(\n vocabulary_file=vocabulary_file,\n key_column_index=lookup_ops.TextFileIndex.LINE_NUMBER,\n value_column_index=0)\n features = table.lookup(constant_op.constant([0, 1, 2, 3], dtypes.int64))\n if not context.executing_eagerly():\n with self.assertRaises(errors_impl.OpError):\n self.evaluate(features)\n self.evaluate(lookup_ops.tables_initializer())\n self.assertAllEqual((b\"brain\", b\"salad\", b\"surgery\", b\"UNK\"),\n self.evaluate(features))\n\n def test_index_to_string_table_from_multicolumn_file_custom_delimiter(self):\n vocabulary_file = self._createVocabFile(\n \"f2i_vocab1.txt\", values=(\"brain 300\", \"salad 20\", \"surgery 1\"))\n table = lookup_ops.index_to_string_table_from_file(\n vocabulary_file=vocabulary_file,\n key_column_index=lookup_ops.TextFileIndex.LINE_NUMBER,\n value_column_index=0,\n delimiter=\" \")\n features = table.lookup(constant_op.constant([0, 1, 2, 3], dtypes.int64))\n if not context.executing_eagerly():\n with self.assertRaises(errors_impl.OpError):\n self.evaluate(features)\n self.evaluate(lookup_ops.tables_initializer())\n self.assertAllEqual((b\"brain\", b\"salad\", b\"surgery\", b\"UNK\"),\n self.evaluate(features))\n\n def test_index_to_string_table_with_default_value(self):\n default_value = b\"NONE\"\n vocabulary_file = self._createVocabFile(\"f2i_vocab2.txt\")\n table = lookup_ops.index_to_string_table_from_file(\n vocabulary_file=vocabulary_file, default_value=default_value)\n features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))\n if not context.executing_eagerly():\n with self.assertRaises(errors_impl.OpError):\n self.evaluate(features)\n self.evaluate(lookup_ops.tables_initializer())\n self.assertAllEqual((b\"salad\", b\"surgery\", default_value),\n self.evaluate(features))\n\n def test_index_to_string_table_with_vocab_size_too_small(self):\n default_value = b\"NONE\"\n vocabulary_file = self._createVocabFile(\"f2i_vocab2.txt\")\n table = lookup_ops.index_to_string_table_from_file(\n vocabulary_file=vocabulary_file,\n vocab_size=2,\n default_value=default_value)\n features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))\n if not context.executing_eagerly():\n with self.assertRaises(errors_impl.OpError):\n self.evaluate(features)\n self.evaluate(lookup_ops.tables_initializer())\n self.assertAllEqual((b\"salad\", default_value, default_value),\n self.evaluate(features))\n\n def test_index_to_string_table_with_vocab_size_too_large(self):\n vocabulary_file = self._createVocabFile(\"f2i_vocab6.txt\")\n with self.assertRaisesRegex(errors_impl.InvalidArgumentError,\n \"Invalid vocab_size\"):\n _ = lookup_ops.index_to_string_table_from_file(\n vocabulary_file=vocabulary_file, vocab_size=4)\n self.evaluate(lookup_ops.tables_initializer())\n\n def test_index_to_string_table_with_vocab_size(self):\n vocabulary_file = self._createVocabFile(\"f2i_vocab7.txt\")\n table = lookup_ops.index_to_string_table_from_file(\n vocabulary_file=vocabulary_file, vocab_size=3)\n features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))\n\n if not context.executing_eagerly():\n with self.assertRaises(errors_impl.OpError):\n self.evaluate(features)\n self.evaluate(lookup_ops.tables_initializer())\n self.assertAllEqual((b\"salad\", b\"surgery\", b\"UNK\"), self.evaluate(features))\n\n\nclass IndexToStringTableFromTensorTest(test.TestCase):\n\n def test_index_to_string_table_from_tensor(self):\n vocabulary_list = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n table = lookup_ops.index_to_string_table_from_tensor(\n vocabulary_list=vocabulary_list)\n\n indices = constant_op.constant([0, 1, 2, 3], dtypes.int64)\n features = table.lookup(indices)\n if not context.executing_eagerly():\n with self.assertRaises(errors_impl.OpError):\n self.evaluate(features)\n self.evaluate(lookup_ops.tables_initializer())\n\n self.assertAllEqual((b\"brain\", b\"salad\", b\"surgery\", b\"UNK\"),\n self.evaluate(features))\n\n def test_duplicate_entries(self):\n vocabulary_list = constant_op.constant([\"hello\", \"hello\"])\n table = lookup_ops.index_to_string_table_from_tensor(\n vocabulary_list=vocabulary_list)\n indices = constant_op.constant([0, 1, 4], dtypes.int64)\n features = table.lookup(indices)\n self.evaluate(lookup_ops.tables_initializer())\n self.assertAllEqual((b\"hello\", b\"hello\", b\"UNK\"), self.evaluate(features))\n\n def test_index_to_string_with_default_value(self):\n default_value = b\"NONE\"\n vocabulary_list = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n table = lookup_ops.index_to_string_table_from_tensor(\n vocabulary_list=vocabulary_list, default_value=default_value)\n indices = constant_op.constant([1, 2, 4], dtypes.int64)\n features = table.lookup(indices)\n if not context.executing_eagerly():\n with self.assertRaises(errors_impl.OpError):\n self.evaluate(features)\n self.evaluate(lookup_ops.tables_initializer())\n self.assertAllEqual((b\"salad\", b\"surgery\", default_value),\n self.evaluate(features))\n\n\nclass IdTableWithHashBucketsTest(test.TestCase):\n\n def _createVocabFile(self, basename, values=(\"brain\", \"salad\", \"surgery\")):\n vocabulary_file = os.path.join(self.get_temp_dir(), basename)\n with open(vocabulary_file, \"w\") as f:\n f.write(\"\\n\".join(values) + \"\\n\")\n return vocabulary_file\n\n def testStringIdTableWithHashBuckets(self):\n vocab_file = self._createVocabFile(\"feat_to_id_1.txt\")\n default_value = -1\n vocab_size = 3\n oov_buckets = 1\n table = lookup_ops.IdTableWithHashBuckets(\n lookup_ops.StaticHashTable(\n lookup_ops.TextFileIdTableInitializer(\n vocab_file, vocab_size=vocab_size), default_value),\n oov_buckets)\n\n self.evaluate(table.initializer)\n\n input_string = constant_op.constant([\"brain\", \"salad\", \"surgery\", \"UNK\"])\n\n out = table.lookup(input_string)\n self.assertAllEqual([0, 1, 2, 3], self.evaluate(out))\n self.assertEqual(vocab_size + oov_buckets, self.evaluate(table.size()))\n\n def testInt32IdTableWithHashBuckets(self):\n vocab_file = self._createVocabFile(\"feat_to_id_2.txt\", (\"42\", \"1\", \"-1000\"))\n default_value = -1\n vocab_size = 3\n oov_buckets = 1\n table = lookup_ops.IdTableWithHashBuckets(\n lookup_ops.StaticHashTable(\n lookup_ops.TextFileIdTableInitializer(\n vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64),\n default_value),\n oov_buckets,\n key_dtype=dtypes.int32)\n\n self.evaluate(table.initializer)\n\n values = constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int32)\n\n out = table.lookup(values)\n self.assertAllEqual([0, 1, 2, 3], self.evaluate(out))\n self.assertEqual(vocab_size + oov_buckets, self.evaluate(table.size()))\n\n def testInt64IdTableWithHashBuckets(self):\n vocab_file = self._createVocabFile(\"feat_to_id_3.txt\", (\"42\", \"1\", \"-1000\"))\n default_value = -1\n vocab_size = 3\n oov_buckets = 1\n table = lookup_ops.IdTableWithHashBuckets(\n lookup_ops.StaticHashTable(\n lookup_ops.TextFileIdTableInitializer(\n vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64),\n default_value), oov_buckets)\n\n self.evaluate(table.initializer)\n\n values = constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int64)\n\n out = table.lookup(values)\n self.assertAllEqual([0, 1, 2, 3], self.evaluate(out))\n self.assertEqual(vocab_size + oov_buckets, self.evaluate(table.size()))\n\n def testStringIdTableWithOnlyHashBucket(self):\n oov_buckets = 5\n\n # Set a table that only uses hash buckets, for each input value returns\n # an id calculated by fingerprint(\"input\") mod oov_buckets.\n table = lookup_ops.IdTableWithHashBuckets(None, oov_buckets)\n self.evaluate(table.initializer)\n\n values = constant_op.constant((\"brain\", \"salad\", \"surgery\"))\n\n out = table.lookup(values)\n self.assertAllEqual(\n [\n 3, # fingerprint(\"brain\") mod 5.\n 1, # fingerprint(\"salad\") mod 5.\n 4 # fingerprint(\"surgery\") mod 5\n ],\n self.evaluate(out))\n self.assertEqual(oov_buckets, self.evaluate(table.size()))\n\n def testInt32IdTableWithOnlyHashBucket(self):\n oov_buckets = 5\n\n # Set a table that only uses hash buckets, for each input value returns\n # an id calculated by fingerprint(\"input\") mod oov_buckets.\n table = lookup_ops.IdTableWithHashBuckets(\n None, oov_buckets, key_dtype=dtypes.int32)\n self.evaluate(table.initializer)\n\n input_string = constant_op.constant([42, 1, -1000], dtype=dtypes.int32)\n\n out = table.lookup(input_string)\n self.assertAllEqual(\n [\n 1, # fingerprint(\"42\") mod 5.\n 4, # fingerprint(\"1\") mod 5.\n 2 # fingerprint(\"-1000\") mod 5\n ],\n self.evaluate(out))\n self.assertEqual(oov_buckets, self.evaluate(table.size()))\n\n def testFloat64IdTableWithOnlyHashBucket(self):\n with self.assertRaisesRegex(TypeError, \"Invalid `key_dtype`\"):\n lookup_ops.IdTableWithHashBuckets(\n None, num_oov_buckets=5, key_dtype=dtypes.float64)\n\n def testBoolIdTableWithOnlyHashBucket(self):\n with self.assertRaisesRegex(TypeError, \"Invalid `key_dtype`\"):\n lookup_ops.IdTableWithHashBuckets(\n None, num_oov_buckets=5, key_dtype=dtypes.bool)\n\n def testIdTableWithHashBucketsWithMultipleInitializers(self):\n vocab_file = self._createVocabFile(\"feat_to_id_4.txt\")\n default_value = -1\n vocab_size = 3\n oov_buckets = 3\n\n vocab_table = lookup_ops.StaticHashTable(\n lookup_ops.TextFileIdTableInitializer(\n vocab_file, vocab_size=vocab_size), default_value)\n table1 = lookup_ops.IdTableWithHashBuckets(\n vocab_table,\n oov_buckets,\n hasher_spec=lookup_ops.FastHashSpec,\n name=\"table1\")\n\n table2 = lookup_ops.IdTableWithHashBuckets(\n vocab_table,\n oov_buckets,\n hasher_spec=lookup_ops.StrongHashSpec((1, 2)),\n name=\"table2\")\n\n self.evaluate(lookup_ops.tables_initializer())\n\n input_string = constant_op.constant(\n [\"fruit\", \"brain\", \"salad\", \"surgery\", \"UNK\"])\n\n out1 = table1.lookup(input_string)\n out2 = table2.lookup(input_string)\n\n out1, out2 = self.evaluate([out1, out2])\n self.assertAllEqual([5, 0, 1, 2, 5], out1)\n self.assertAllEqual([5, 0, 1, 2, 3], out2)\n self.assertEqual(vocab_size + oov_buckets, self.evaluate(table1.size()))\n self.assertEqual(vocab_size + oov_buckets, self.evaluate(table2.size()))\n if not context.executing_eagerly():\n test_util.assert_ops_in_graph({\n \"table1_Lookup/hash_bucket\": \"StringToHashBucketFast\",\n \"table2_Lookup/hash_bucket\": \"StringToHashBucketStrong\",\n }, ops.get_default_graph())\n\n def testIdTableWithHashBucketsInitializationAcrossSessions(self):\n vocab_file = self._createVocabFile(\"feat_to_id_5.txt\")\n default_value = -1\n vocab_size = 3\n oov_buckets = 1\n table1 = lookup_ops.IdTableWithHashBuckets(\n lookup_ops.StaticHashTable(\n lookup_ops.TextFileIdTableInitializer(\n vocab_file, vocab_size=vocab_size), default_value), oov_buckets)\n\n self.evaluate(table1.initializer)\n\n input_string_1 = constant_op.constant([\"brain\", \"salad\", \"surgery\", \"UNK\"])\n\n out1 = table1.lookup(input_string_1)\n\n self.assertAllEqual([0, 1, 2, 3], self.evaluate(out1))\n self.assertEqual(vocab_size + oov_buckets, self.evaluate(table1.size()))\n\n default_value = -1\n vocab_size = 3\n oov_buckets = 1\n\n # Underlying lookup table already initialized in previous session.\n # No need to call self.evaluate(table2.initializer)\n table2 = lookup_ops.IdTableWithHashBuckets(\n lookup_ops.StaticHashTable(\n lookup_ops.TextFileIdTableInitializer(\n vocab_file, vocab_size=vocab_size), default_value), oov_buckets)\n\n input_string_2 = constant_op.constant([\"fruit\", \"salad\", \"UNK\"])\n\n out2 = table2.lookup(input_string_2)\n\n self.assertAllEqual([3, 1, 3], self.evaluate(out2))\n self.assertEqual(vocab_size + oov_buckets, self.evaluate(table2.size()))\n\n def testIdTableWithHashBucketsWithMultipleInitializersDifferentDefault(self):\n vocab_file = self._createVocabFile(\"feat_to_id_6.txt\")\n default_value1 = -1\n vocab_size = 3\n oov_buckets = 0\n table1 = lookup_ops.IdTableWithHashBuckets(\n lookup_ops.StaticHashTable(\n lookup_ops.TextFileIdTableInitializer(\n vocab_file, vocab_size=vocab_size), default_value1),\n oov_buckets)\n\n default_value2 = -2\n table2 = lookup_ops.IdTableWithHashBuckets(\n lookup_ops.StaticHashTable(\n lookup_ops.TextFileIdTableInitializer(\n vocab_file, vocab_size=vocab_size), default_value2),\n oov_buckets)\n\n self.evaluate(lookup_ops.tables_initializer())\n\n input_string_1 = constant_op.constant(\n [\"brain\", \"salad\", \"surgery\", \"UNK\"])\n input_string_2 = constant_op.constant([\"fruit\", \"salad\", \"UNK\"])\n\n out1 = table1.lookup(input_string_1)\n out2 = table2.lookup(input_string_2)\n\n out1, out2 = self.evaluate([out1, out2])\n self.assertAllEqual([0, 1, 2, -1], out1)\n self.assertAllEqual([-2, 1, -2], out2)\n self.assertEqual(vocab_size + oov_buckets, self.evaluate(table1.size()))\n self.assertEqual(vocab_size + oov_buckets, self.evaluate(table2.size()))\n\n def testSparseTensor(self):\n vocab_file = self._createVocabFile(\"feat_to_id_7.txt\")\n input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]\n input_shape = [4, 4]\n sp_features = sparse_tensor.SparseTensor(\n constant_op.constant(input_indices, dtypes.int64),\n constant_op.constant([\"brain\", \"salad\", \"brain\", \"surgery\", \"tarkus\"],\n dtypes.string),\n constant_op.constant(input_shape, dtypes.int64))\n\n table = lookup_ops.IdTableWithHashBuckets(\n lookup_ops.StaticHashTable(\n lookup_ops.TextFileIdTableInitializer(vocab_file, vocab_size=3),\n -1), 1)\n self.evaluate(table.initializer)\n\n sp_ids = table.lookup(sp_features)\n\n self.assertAllEqual([5], sp_ids.values._shape_as_list())\n\n sp_ids_ind, sp_ids_val, sp_ids_shape = self.evaluate(\n [sp_ids.indices, sp_ids.values, sp_ids.dense_shape])\n\n self.assertAllEqual(input_indices, sp_ids_ind)\n self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)\n self.assertAllEqual(input_shape, sp_ids_shape)\n\n def testRaggedTensor(self):\n vocab_file = self._createVocabFile(\"feat_to_id_7.txt\")\n input_row_splits = [0, 2, 4, 5]\n ragged_features = ragged_tensor.RaggedTensor.from_row_splits(\n constant_op.constant([\"brain\", \"salad\", \"brain\", \"surgery\", \"tarkus\"],\n dtypes.string),\n constant_op.constant(input_row_splits, dtypes.int64))\n\n table = lookup_ops.IdTableWithHashBuckets(\n lookup_ops.StaticHashTable(\n lookup_ops.TextFileIdTableInitializer(vocab_file, vocab_size=3),\n -1), 1)\n self.evaluate(table.initializer)\n\n ragged_ids = table.lookup(ragged_features)\n self.assertAllEqual([5], ragged_ids.values._shape_as_list())\n\n ragged_ids_val, ragged_ids_row_splits = self.evaluate(\n [ragged_ids.values, ragged_ids.row_splits])\n\n self.assertAllEqual([0, 1, 0, 2, 3], ragged_ids_val)\n self.assertAllEqual(input_row_splits, ragged_ids_row_splits)\n\n def testInt32SparseTensor(self):\n input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]\n input_shape = [4, 4]\n sp_features = sparse_tensor.SparseTensor(\n constant_op.constant(input_indices, dtypes.int64),\n constant_op.constant([42, 1, 42, -1000, 11], dtypes.int32),\n constant_op.constant(input_shape, dtypes.int64))\n\n table = lookup_ops.IdTableWithHashBuckets(\n lookup_ops.StaticHashTable(\n lookup_ops.KeyValueTensorInitializer(\n (42, 1, -1000), (0, 1, 2), dtypes.int64, dtypes.int64), -1),\n 1,\n key_dtype=dtypes.int32)\n self.evaluate(table.initializer)\n\n sp_ids = table.lookup(sp_features)\n\n self.assertAllEqual([5], sp_ids.values._shape_as_list())\n\n sp_ids_ind, sp_ids_val, sp_ids_shape = self.evaluate(\n [sp_ids.indices, sp_ids.values, sp_ids.dense_shape])\n\n self.assertAllEqual(input_indices, sp_ids_ind)\n self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)\n self.assertAllEqual(input_shape, sp_ids_shape)\n\n def testInt32RaggedTensor(self):\n input_row_splits = [0, 2, 4, 5]\n ragged_features = ragged_tensor.RaggedTensor.from_row_splits(\n constant_op.constant([42, 1, 42, -1000, 11], dtypes.int32),\n constant_op.constant(input_row_splits, dtypes.int32))\n\n table = lookup_ops.IdTableWithHashBuckets(\n lookup_ops.StaticHashTable(\n lookup_ops.KeyValueTensorInitializer(\n (42, 1, -1000), (0, 1, 2), dtypes.int64, dtypes.int64), -1),\n 1,\n key_dtype=dtypes.int32)\n self.evaluate(table.initializer)\n\n ragged_ids = table.lookup(ragged_features)\n\n self.assertAllEqual([5], ragged_ids.values._shape_as_list())\n\n ragged_ids_val, ragged_ids_row_splits = self.evaluate(\n [ragged_ids.values, ragged_ids.row_splits])\n\n self.assertAllEqual([0, 1, 0, 2, 3], ragged_ids_val)\n self.assertAllEqual(input_row_splits, ragged_ids_row_splits)\n\n def testInt64SparseTensor(self):\n input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]\n input_shape = [4, 4]\n sp_features = sparse_tensor.SparseTensor(\n constant_op.constant(input_indices, dtypes.int64),\n constant_op.constant([42, 1, 42, -1000, 11], dtypes.int64),\n constant_op.constant(input_shape, dtypes.int64))\n\n table = lookup_ops.IdTableWithHashBuckets(\n lookup_ops.StaticHashTable(\n lookup_ops.KeyValueTensorInitializer(\n (42, 1, -1000), (0, 1, 2), dtypes.int64, dtypes.int64), -1),\n 1,\n key_dtype=dtypes.int64)\n self.evaluate(table.initializer)\n\n sp_ids = table.lookup(sp_features)\n\n self.assertAllEqual([5], sp_ids.values._shape_as_list())\n\n sp_ids_ind, sp_ids_val, sp_ids_shape = self.evaluate(\n [sp_ids.indices, sp_ids.values, sp_ids.dense_shape])\n\n self.assertAllEqual(input_indices, sp_ids_ind)\n self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)\n self.assertAllEqual(input_shape, sp_ids_shape)\n\n def testInt64RaggedTensor(self):\n input_row_splits = [0, 2, 4, 5]\n ragged_features = ragged_tensor.RaggedTensor.from_row_splits(\n constant_op.constant([42, 1, 42, -1000, 11], dtypes.int64),\n constant_op.constant(input_row_splits, dtypes.int64))\n\n table = lookup_ops.IdTableWithHashBuckets(\n lookup_ops.StaticHashTable(\n lookup_ops.KeyValueTensorInitializer(\n (42, 1, -1000), (0, 1, 2), dtypes.int64, dtypes.int64), -1),\n 1,\n key_dtype=dtypes.int64)\n self.evaluate(table.initializer)\n\n ragged_ids = table.lookup(ragged_features)\n\n self.assertAllEqual([5], ragged_ids.values._shape_as_list())\n\n ragged_ids_val, ragged_ids_row_splits = self.evaluate(\n [ragged_ids.values, ragged_ids.row_splits])\n\n self.assertAllEqual([0, 1, 0, 2, 3], ragged_ids_val)\n self.assertAllEqual(input_row_splits, ragged_ids_row_splits)\n\n def testIdTableWithHashBucketsWithInvalidHashers(self):\n vocab_file = self._createVocabFile(\"feat_to_id_4.txt\")\n default_value = -1\n vocab_size = 3\n oov_buckets = 1\n lookup_table = lookup_ops.StaticHashTable(\n lookup_ops.TextFileIdTableInitializer(\n vocab_file, vocab_size=vocab_size), default_value)\n\n with self.assertRaises(TypeError):\n lookup_ops.IdTableWithHashBuckets(\n lookup_table, oov_buckets, hasher_spec=1)\n\n table = lookup_ops.IdTableWithHashBuckets(\n lookup_table,\n oov_buckets,\n hasher_spec=lookup_ops.HasherSpec(\"my-awesome-hash\", None))\n\n input_string = constant_op.constant([\"brain\", \"salad\", \"surgery\", \"UNK\"])\n\n with self.assertRaises(ValueError):\n table.lookup(input_string)\n\n with self.assertRaises(ValueError):\n table = lookup_ops.IdTableWithHashBuckets(\n lookup_table, oov_buckets, hasher_spec=lookup_ops.StrongHashSpec([]))\n\n with self.assertRaises(ValueError):\n table = lookup_ops.IdTableWithHashBuckets(\n lookup_table,\n oov_buckets,\n hasher_spec=lookup_ops.StrongHashSpec([1, 2, 3]))\n\n with self.assertRaises(TypeError):\n table = lookup_ops.IdTableWithHashBuckets(\n lookup_table,\n oov_buckets,\n hasher_spec=lookup_ops.StrongHashSpec([None, 2]))\n\n def testIdTableWithHashBucketsNoInnerTable(self):\n table = lookup_ops.IdTableWithHashBuckets(None, num_oov_buckets=1)\n self.assertIsNone(table.resource_handle)\n\n\[email protected]_parameters(\n (f\"_{is_anonymous}\", is_anonymous) for is_anonymous in [False, True])\nclass MutableHashTableOpTest(test.TestCase):\n\n def testMutableHashTable(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n default_val = -1\n keys = constant_op.constant([\"brain\", \"salad\", \"surgery\", \"tarkus\"])\n values = constant_op.constant([0, 1, 2, 3], dtypes.int64)\n table = lookup_ops.MutableHashTable(\n dtypes.string,\n dtypes.int64,\n default_val,\n experimental_is_anonymous=is_anonymous)\n self.assertAllEqual(0, self.evaluate(table.size()))\n\n self.evaluate(table.insert(keys, values))\n self.assertAllEqual(4, self.evaluate(table.size()))\n\n remove_string = constant_op.constant([\"tarkus\", \"tank\"])\n self.evaluate(table.remove(remove_string))\n self.assertAllEqual(3, self.evaluate(table.size()))\n\n input_string = constant_op.constant([\"brain\", \"salad\", \"tank\"])\n output = table.lookup(input_string)\n self.assertAllEqual([3], output.get_shape())\n\n result = self.evaluate(output)\n self.assertAllEqual([0, 1, -1], result)\n\n exported_keys, exported_values = table.export()\n\n # exported data is in the order of the internal map, i.e. undefined\n sorted_keys = np.sort(self.evaluate(exported_keys))\n sorted_values = np.sort(self.evaluate(exported_values))\n self.assertAllEqual([b\"brain\", b\"salad\", b\"surgery\"], sorted_keys)\n self.assertAllEqual([0, 1, 2], sorted_values)\n\n # TODO(https://github.com/tensorflow/tensorflow/issues/24439): remove exepectedFailure when fixed\n @unittest.expectedFailure\n @test_util.run_v2_only\n def testImportedHashTable(self, is_anonymous):\n g = ops.Graph()\n with g.as_default():\n default_val = -1\n keys = constant_op.constant([\"brain\", \"salad\", \"surgery\", \"tarkus\"])\n values = constant_op.constant([0, 1, 2, 3], dtypes.int64)\n table = lookup_ops.MutableHashTable(\n dtypes.string,\n dtypes.int64,\n default_val,\n experimental_is_anonymous=is_anonymous)\n self.evaluate(table.insert(keys, values))\n op = table.lookup(constant_op.constant([\"brain\", \"salad\", \"tank\"]))\n meta_graph = saver.export_meta_graph()\n\n def f():\n saver.import_meta_graph(meta_graph)\n return ops.get_default_graph().get_tensor_by_name(op.name)\n\n wrapped = wrap_function.wrap_function(f, [])\n self.assertAllEqual([0, 1, -1], wrapped())\n\n @test_util.run_v1_only(\"SaverV1\")\n def testSaveRestore(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n save_dir = os.path.join(self.get_temp_dir(), \"save_restore\")\n save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), \"hash\")\n\n with self.session(graph=ops.Graph()) as sess:\n v0 = variables.Variable(10.0, name=\"v0\")\n v1 = variables.Variable(20.0, name=\"v1\")\n\n default_val = -1\n keys = constant_op.constant([\"b\", \"c\", \"d\"], dtypes.string)\n values = constant_op.constant([0, 1, 2], dtypes.int64)\n table = lookup_ops.MutableHashTable(\n dtypes.string,\n dtypes.int64,\n default_val,\n name=\"t1\",\n checkpoint=True,\n experimental_is_anonymous=is_anonymous)\n\n save = saver.Saver()\n self.evaluate(variables.global_variables_initializer())\n\n # Check that the parameter nodes have been initialized.\n self.assertEqual(10.0, self.evaluate(v0))\n self.assertEqual(20.0, self.evaluate(v1))\n\n self.assertAllEqual(0, self.evaluate(table.size()))\n self.evaluate(table.insert(keys, values))\n self.assertAllEqual(3, self.evaluate(table.size()))\n\n val = save.save(sess, save_path)\n self.assertIsInstance(val, six.string_types)\n self.assertEqual(save_path, val)\n\n with self.session(graph=ops.Graph()) as sess:\n v0 = variables.Variable(-1.0, name=\"v0\")\n v1 = variables.Variable(-1.0, name=\"v1\")\n default_val = -1\n table = lookup_ops.MutableHashTable(\n dtypes.string,\n dtypes.int64,\n default_val,\n name=\"t1\",\n checkpoint=True,\n experimental_is_anonymous=is_anonymous)\n self.evaluate(\n table.insert(\n constant_op.constant([\"a\", \"c\"], dtypes.string),\n constant_op.constant([12, 24], dtypes.int64)))\n self.assertAllEqual(2, self.evaluate(table.size()))\n\n save = saver.Saver()\n\n # Restore the saved values in the parameter nodes.\n save.restore(sess, save_path)\n # Check that the parameter nodes have been restored.\n self.assertEqual(10.0, self.evaluate(v0))\n self.assertEqual(20.0, self.evaluate(v1))\n\n self.assertAllEqual(3, self.evaluate(table.size()))\n\n input_string = constant_op.constant([\"a\", \"b\", \"c\", \"d\", \"e\"],\n dtypes.string)\n output = table.lookup(input_string)\n self.assertAllEqual([-1, 0, 1, 2, -1], self.evaluate(output))\n\n @test_util.run_v1_only(\"SaverV1\")\n def testSaveRestoreOnlyTable(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n save_dir = os.path.join(self.get_temp_dir(), \"save_restore\")\n save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), \"hash\")\n\n with self.session(graph=ops.Graph()) as sess:\n v0 = variables.Variable(10.0, name=\"v0\")\n v1 = variables.Variable(20.0, name=\"v1\")\n\n default_val = -1\n keys = constant_op.constant([\"b\", \"c\", \"d\"], dtypes.string)\n values = constant_op.constant([0, 1, 2], dtypes.int64)\n table = lookup_ops.MutableHashTable(\n dtypes.string,\n dtypes.int64,\n default_val,\n name=\"t1\",\n checkpoint=True,\n experimental_is_anonymous=is_anonymous)\n\n save = saver.Saver([table])\n self.evaluate(variables.global_variables_initializer())\n\n # Check that the parameter nodes have been initialized.\n self.assertEqual(10.0, self.evaluate(v0))\n self.assertEqual(20.0, self.evaluate(v1))\n\n self.assertAllEqual(0, self.evaluate(table.size()))\n self.evaluate(table.insert(keys, values))\n self.assertAllEqual(3, self.evaluate(table.size()))\n\n val = save.save(sess, save_path)\n self.assertIsInstance(val, six.string_types)\n self.assertEqual(save_path, val)\n\n with self.session(graph=ops.Graph()) as sess:\n default_val = -1\n table = lookup_ops.MutableHashTable(\n dtypes.string,\n dtypes.int64,\n default_val,\n name=\"t1\",\n checkpoint=True,\n experimental_is_anonymous=is_anonymous)\n self.evaluate(\n table.insert(\n constant_op.constant([\"a\", \"c\"], dtypes.string),\n constant_op.constant([12, 24], dtypes.int64)))\n self.assertAllEqual(2, self.evaluate(table.size()))\n\n save = saver.Saver([table])\n\n # Restore the saved values in the parameter nodes.\n save.restore(sess, save_path)\n # Check that the parameter nodes have been restored.\n\n self.assertAllEqual(3, self.evaluate(table.size()))\n\n input_string = constant_op.constant([\"a\", \"b\", \"c\", \"d\", \"e\"],\n dtypes.string)\n output = table.lookup(input_string)\n self.assertAllEqual([-1, 0, 1, 2, -1], self.evaluate(output))\n\n @test_util.run_in_graph_and_eager_modes\n def testObjectSaveRestore(self, is_anonymous):\n if is_anonymous and not context.executing_eagerly():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n save_dir = os.path.join(self.get_temp_dir(), \"save_restore\")\n save_prefix = os.path.join(tempfile.mkdtemp(prefix=save_dir), \"hash\")\n\n v0 = variables.Variable(10.0, name=\"v0\")\n v1 = variables.Variable(20.0, name=\"v1\")\n\n default_val = -1\n keys = constant_op.constant([\"b\", \"c\", \"d\"], dtypes.string)\n values = constant_op.constant([0, 1, 2], dtypes.int64)\n table = lookup_ops.MutableHashTable(\n dtypes.string,\n dtypes.int64,\n default_val,\n name=\"t1\",\n checkpoint=True,\n experimental_is_anonymous=is_anonymous)\n\n checkpoint = trackable.Checkpoint(table=table, v0=v0, v1=v1)\n self.evaluate([v0.initializer, v1.initializer])\n\n # Check that the parameter nodes have been initialized.\n self.assertEqual(10.0, self.evaluate(v0))\n self.assertEqual(20.0, self.evaluate(v1))\n\n self.assertAllEqual(0, self.evaluate(table.size()))\n self.evaluate(table.insert(keys, values))\n self.assertAllEqual(3, self.evaluate(table.size()))\n\n save_path = checkpoint.save(save_prefix)\n del table, checkpoint, v0, v1\n\n v0 = variables.Variable(-1.0, name=\"v0\")\n v1 = variables.Variable(-1.0, name=\"v1\")\n default_val = -1\n table = lookup_ops.MutableHashTable(\n dtypes.string,\n dtypes.int64,\n default_val,\n name=\"t1\",\n checkpoint=True,\n experimental_is_anonymous=is_anonymous)\n self.evaluate(\n table.insert(\n constant_op.constant([\"a\", \"c\"], dtypes.string),\n constant_op.constant([12, 24], dtypes.int64)))\n self.assertAllEqual(2, self.evaluate(table.size()))\n\n checkpoint = trackable.Checkpoint(table=table, v0=v0, v1=v1)\n\n # Restore the saved values in the parameter nodes.\n checkpoint.restore(save_path).run_restore_ops()\n # Check that the parameter nodes have been restored.\n self.assertEqual(10.0, self.evaluate(v0))\n self.assertEqual(20.0, self.evaluate(v1))\n\n self.assertAllEqual(3, self.evaluate(table.size()))\n\n input_string = constant_op.constant([\"a\", \"b\", \"c\", \"d\", \"e\"],\n dtypes.string)\n output = table.lookup(input_string)\n self.assertAllEqual([-1, 0, 1, 2, -1], self.evaluate(output))\n\n @test_util.run_v2_only\n def testSavedModelSaveRestore(self, is_anonymous):\n save_dir = os.path.join(self.get_temp_dir(), \"save_restore\")\n save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), \"hash\")\n\n root = tracking.AutoTrackable()\n\n default_value = -1\n keys = constant_op.constant([11, 12, 13], dtypes.int64)\n values = constant_op.constant([0, 1, 2], dtypes.int64)\n root.table = lookup_ops.MutableHashTable(\n dtypes.int64,\n dtypes.int64,\n default_value,\n experimental_is_anonymous=is_anonymous)\n\n @def_function.function(\n input_signature=[tensor_spec.TensorSpec((), dtypes.int64)])\n def lookup(key):\n return root.table.lookup(key)\n\n @def_function.function(input_signature=[])\n def size():\n return root.table.size()\n\n @def_function.function(input_signature=[])\n def is_ref_counting():\n return test_ops.is_resource_handle_ref_counting(\n root.table.resource_handle)\n\n root.lookup = lookup\n root.size = size\n root.is_ref_counting = is_ref_counting\n\n self.assertEqual(root.table.size(), 0)\n root.table.insert(keys, values)\n self.assertEqual(root.table.size(), 3)\n self.assertEqual(root.table.lookup(12), 1)\n self.assertEqual(root.table.lookup(10), -1)\n self.assertEqual(len(root.table.export()[0]), 3)\n self.assertEqual(root.is_ref_counting(), is_anonymous)\n\n saved_model_save.save(root, save_path)\n\n del root\n loaded = saved_model_load.load(save_path)\n self.assertEqual(loaded.size(), 3)\n self.assertEqual(loaded.lookup(12), 1)\n self.assertEqual(loaded.lookup(10), -1)\n self.assertEqual(loaded.is_ref_counting(), is_anonymous)\n\n @test_util.run_v1_only(\"Multiple sessions\")\n def testSharing(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n # Start a server to store the table state\n server = server_lib.Server({\"local0\": [\"localhost:0\"]},\n protocol=\"grpc\",\n start=True)\n # Create two sessions sharing the same state\n session1 = session.Session(server.target)\n session2 = session.Session(server.target)\n\n table = lookup_ops.MutableHashTable(\n dtypes.int64,\n dtypes.string,\n \"-\",\n name=\"t1\",\n experimental_is_anonymous=is_anonymous)\n\n # Populate the table in the first session\n with session1:\n self.assertAllEqual(0, table.size())\n\n keys = constant_op.constant([11, 12], dtypes.int64)\n values = constant_op.constant([\"a\", \"b\"])\n table.insert(keys, values).run()\n self.assertAllEqual(2, table.size())\n\n output = table.lookup(constant_op.constant([11, 12, 13], dtypes.int64))\n self.assertAllEqual([b\"a\", b\"b\", b\"-\"], output)\n\n # Verify that we can access the shared data from the second session\n with session2:\n self.assertAllEqual(2, table.size())\n\n output = table.lookup(constant_op.constant([10, 11, 12], dtypes.int64))\n self.assertAllEqual([b\"-\", b\"a\", b\"b\"], output)\n\n def testMutableHashTableOfTensors(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n default_val = constant_op.constant([-1, -1], dtypes.int64)\n keys = constant_op.constant([\"brain\", \"salad\", \"surgery\", \"tarkus\"])\n values = constant_op.constant([[0, 1], [2, 3], [4, 5], [6, 7]],\n dtypes.int64)\n table = lookup_ops.MutableHashTable(\n dtypes.string,\n dtypes.int64,\n default_val,\n experimental_is_anonymous=is_anonymous)\n self.assertAllEqual(0, self.evaluate(table.size()))\n\n self.evaluate(table.insert(keys, values))\n self.assertAllEqual(4, self.evaluate(table.size()))\n\n remove_string = constant_op.constant([\"tarkus\", \"tank\"])\n self.evaluate(table.remove(remove_string))\n self.assertAllEqual(3, self.evaluate(table.size()))\n\n input_string = constant_op.constant([\"brain\", \"salad\", \"tank\"])\n output = table.lookup(input_string)\n self.assertAllEqual([3, 2], output.get_shape())\n\n result = self.evaluate(output)\n self.assertAllEqual([[0, 1], [2, 3], [-1, -1]], result)\n\n exported_keys, exported_values = table.export()\n # exported data is in the order of the internal map, i.e. undefined\n sorted_keys = np.sort(self.evaluate(exported_keys))\n sorted_values = np.sort(self.evaluate(exported_values), axis=0)\n self.assertAllEqual([b\"brain\", b\"salad\", b\"surgery\"], sorted_keys)\n sorted_expected_values = np.sort([[4, 5], [2, 3], [0, 1]], axis=0)\n self.assertAllEqual(sorted_expected_values, sorted_values)\n\n def testMutableHashTableExportInsert(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n default_val = constant_op.constant([-1, -1], dtypes.int64)\n keys = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n values = constant_op.constant([[0, 1], [2, 3], [4, 5]], dtypes.int64)\n table1 = lookup_ops.MutableHashTable(\n dtypes.string,\n dtypes.int64,\n default_val,\n experimental_is_anonymous=is_anonymous)\n self.assertAllEqual(0, self.evaluate(table1.size()))\n self.evaluate(table1.insert(keys, values))\n self.assertAllEqual(3, self.evaluate(table1.size()))\n\n input_string = constant_op.constant([\"brain\", \"salad\", \"tank\"])\n expected_output = [[0, 1], [2, 3], [-1, -1]]\n output1 = table1.lookup(input_string)\n self.assertAllEqual(expected_output, self.evaluate(output1))\n\n exported_keys, exported_values = table1.export()\n self.assertAllEqual(3, self.evaluate(exported_keys).size)\n self.assertAllEqual(6, self.evaluate(exported_values).size)\n\n # Populate a second table from the exported data\n table2 = lookup_ops.MutableHashTable(\n dtypes.string,\n dtypes.int64,\n default_val,\n experimental_is_anonymous=is_anonymous)\n self.assertAllEqual(0, self.evaluate(table2.size()))\n self.evaluate(table2.insert(exported_keys, exported_values))\n self.assertAllEqual(3, self.evaluate(table2.size()))\n\n # Verify lookup result is still the same\n output2 = table2.lookup(input_string)\n self.assertAllEqual(expected_output, self.evaluate(output2))\n\n def testMutableHashTableOfTensorsInvalidShape(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n default_val = constant_op.constant([-1, -1], dtypes.int64)\n keys = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n table = lookup_ops.MutableHashTable(\n dtypes.string,\n dtypes.int64,\n default_val,\n experimental_is_anonymous=is_anonymous)\n\n # Shape [6] instead of [3, 2]\n values = constant_op.constant([0, 1, 2, 3, 4, 5], dtypes.int64)\n with self.assertRaisesOpError(\"Expected shape\"):\n self.evaluate(table.insert(keys, values))\n\n # Shape [2,3] instead of [3, 2]\n values = constant_op.constant([[0, 1, 2], [3, 4, 5]], dtypes.int64)\n with self.assertRaisesOpError(\"Expected shape\"):\n self.evaluate(table.insert(keys, values))\n\n # Shape [2, 2] instead of [3, 2]\n values = constant_op.constant([[0, 1], [2, 3]], dtypes.int64)\n with self.assertRaisesOpError(\"Expected shape\"):\n self.evaluate(table.insert(keys, values))\n\n # Shape [3, 1] instead of [3, 2]\n values = constant_op.constant([[0], [2], [4]], dtypes.int64)\n with self.assertRaisesOpError(\"Expected shape\"):\n self.evaluate(table.insert(keys, values))\n\n # Valid Insert\n values = constant_op.constant([[0, 1], [2, 3], [4, 5]], dtypes.int64)\n self.evaluate(table.insert(keys, values))\n self.assertAllEqual(3, self.evaluate(table.size()))\n\n def testMutableHashTableInvalidDefaultValue(self, is_anonymous):\n default_val = constant_op.constant([[-1, -1]], dtypes.int64)\n with self.assertRaisesOpError(\"Default value must be a vector\"):\n table = lookup_ops.MutableHashTable(\n dtypes.string,\n dtypes.int64,\n default_val,\n experimental_is_anonymous=is_anonymous)\n self.assertAllEqual(0, self.evaluate(table.size()))\n\n def testMutableHashTableDuplicateInsert(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n default_val = -1\n keys = constant_op.constant([\"brain\", \"salad\", \"surgery\", \"brain\"])\n values = constant_op.constant([0, 1, 2, 3], dtypes.int64)\n table = lookup_ops.MutableHashTable(\n dtypes.string,\n dtypes.int64,\n default_val,\n experimental_is_anonymous=is_anonymous)\n self.assertAllEqual(0, self.evaluate(table.size()))\n\n self.evaluate(table.insert(keys, values))\n self.assertAllEqual(3, self.evaluate(table.size()))\n\n input_string = constant_op.constant([\"brain\", \"salad\", \"tank\"])\n output = table.lookup(input_string)\n\n result = self.evaluate(output)\n self.assertAllEqual([3, 1, -1], result)\n\n def testMutableHashTableFindHighRank(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n default_val = -1\n keys = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n values = constant_op.constant([0, 1, 2], dtypes.int64)\n table = lookup_ops.MutableHashTable(\n dtypes.string,\n dtypes.int64,\n default_val,\n experimental_is_anonymous=is_anonymous)\n\n self.evaluate(table.insert(keys, values))\n self.assertAllEqual(3, self.evaluate(table.size()))\n\n input_string = constant_op.constant([[\"brain\", \"salad\"],\n [\"tank\", \"tarkus\"]])\n output = table.lookup(input_string)\n self.assertAllEqual([2, 2], output.get_shape())\n\n result = self.evaluate(output)\n self.assertAllEqual([[0, 1], [-1, -1]], result)\n\n def testMutableHashTableFindWithInvalidShapeDefaultValue(self, is_anonymous):\n default_val = [-1, -1]\n table = lookup_ops.MutableHashTable(\n dtypes.string,\n dtypes.int64,\n default_val,\n experimental_is_anonymous=is_anonymous)\n\n input_string = constant_op.constant([[\"brain\", \"salad\"], [\"tank\",\n \"tarkus\"]])\n\n invalid_default_val = constant_op.constant(\n [[-2, -3], [-4, -5], [-6, -7], [-8, -9]], dtypes.int64)\n\n with self.assertRaisesRegex(\n (ValueError, errors_impl.InvalidArgumentError),\n \"Expected shape \\[2\\] or \\[2,2,2\\] for default value, got \\[4,2]\"):\n self.evaluate(table.lookup(input_string, invalid_default_val))\n\n invalid_default_val = constant_op.constant([[[-2, -3], [-4, -5]]],\n dtypes.int64)\n with self.assertRaisesRegex(\n (ValueError, errors_impl.InvalidArgumentError),\n \"Expected shape \\[2\\] or \\[2,2,2\\] for default value, got \\[1,2,2\\]\"):\n self.evaluate(table.lookup(input_string, invalid_default_val))\n\n def testMutableHashTableFindHighRankScalarWithDynamicDefaultValue(\n self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n default_val = -1\n keys = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n values = constant_op.constant([0, 1, 2], dtypes.int64)\n table = lookup_ops.MutableHashTable(\n dtypes.string,\n dtypes.int64,\n default_val,\n experimental_is_anonymous=is_anonymous)\n\n self.evaluate(table.insert(keys, values))\n self.assertAllEqual(3, self.evaluate(table.size()))\n\n input_string = constant_op.constant([[\"brain\", \"salad\"], [\"tank\",\n \"tarkus\"]])\n\n dynamic_default_val = constant_op.constant([[-2, -3], [-4, -5]],\n dtypes.int64)\n output = table.lookup(input_string, dynamic_default_val)\n self.assertAllEqual([2, 2], output.get_shape())\n\n result = self.evaluate(output)\n self.assertAllEqual([[0, 1], [-4, -5]], result)\n\n def testMutableHashTableFindHighRankVectorWithDynamicDefaultValue(\n self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n default_val = [-1, -1]\n keys = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n values = constant_op.constant([[0, 1], [2, 3], [4, 5]], dtypes.int64)\n table = lookup_ops.MutableHashTable(\n dtypes.string,\n dtypes.int64,\n default_val,\n experimental_is_anonymous=is_anonymous)\n\n self.evaluate(table.insert(keys, values))\n self.assertAllEqual(3, self.evaluate(table.size()))\n\n input_string = constant_op.constant([[\"brain\", \"salad\"], [\"tank\",\n \"tarkus\"]])\n\n dynamic_default_val = constant_op.constant(\n [[[-2, -3], [-4, -5]], [[-6, -7], [-8, -9]]], dtypes.int64)\n output = table.lookup(input_string, dynamic_default_val)\n self.assertAllEqual([2, 2, 2], output.get_shape())\n\n result = self.evaluate(output)\n self.assertAllEqual([[[0, 1], [2, 3]], [[-6, -7], [-8, -9]]], result)\n\n def testMutableHashTableInsertHighRank(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n default_val = -1\n keys = constant_op.constant([[\"brain\", \"salad\"], [\"surgery\", \"tank\"]])\n values = constant_op.constant([[0, 1], [2, 3]], dtypes.int64)\n table = lookup_ops.MutableHashTable(\n dtypes.string,\n dtypes.int64,\n default_val,\n experimental_is_anonymous=is_anonymous)\n\n self.evaluate(table.insert(keys, values))\n self.assertAllEqual(4, self.evaluate(table.size()))\n\n input_string = constant_op.constant([\"brain\", \"salad\", \"tank\", \"tarkus\"])\n output = table.lookup(input_string)\n\n result = self.evaluate(output)\n self.assertAllEqual([0, 1, 3, -1], result)\n\n def testMutableHashTableRemoveHighRank(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n default_val = -1\n keys = constant_op.constant([[\"brain\", \"salad\"], [\"surgery\", \"tank\"]])\n values = constant_op.constant([[0, 1], [2, 3]], dtypes.int64)\n table = lookup_ops.MutableHashTable(\n dtypes.string,\n dtypes.int64,\n default_val,\n experimental_is_anonymous=is_anonymous)\n\n self.evaluate(table.insert(keys, values))\n self.assertAllEqual(4, self.evaluate(table.size()))\n\n remove_string = constant_op.constant([\"salad\", \"tarkus\"])\n self.evaluate(table.remove(remove_string))\n self.assertAllEqual(3, self.evaluate(table.size()))\n\n input_string = constant_op.constant([\"brain\", \"salad\", \"tank\", \"tarkus\"])\n output = table.lookup(input_string)\n\n result = self.evaluate(output)\n self.assertAllEqual([0, -1, 3, -1], result)\n\n def testMutableHashTableOfTensorsFindHighRank(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n default_val = constant_op.constant([-1, -1, -1], dtypes.int64)\n keys = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n values = constant_op.constant([[0, 1, 2], [2, 3, 4], [4, 5, 6]],\n dtypes.int64)\n table = lookup_ops.MutableHashTable(\n dtypes.string,\n dtypes.int64,\n default_val,\n experimental_is_anonymous=is_anonymous)\n\n self.evaluate(table.insert(keys, values))\n self.assertAllEqual(3, self.evaluate(table.size()))\n\n input_string = constant_op.constant([[\"brain\", \"salad\"],\n [\"tank\", \"tarkus\"]])\n output = table.lookup(input_string)\n self.assertAllEqual([2, 2, 3], output.get_shape())\n\n result = self.evaluate(output)\n self.assertAllEqual(\n [[[0, 1, 2], [2, 3, 4]], [[-1, -1, -1], [-1, -1, -1]]], result)\n\n def testMutableHashTableOfTensorsRemoveHighRank(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n default_val = constant_op.constant([-1, -1, -1], dtypes.int64)\n keys = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n values = constant_op.constant([[0, 1, 2], [2, 3, 4], [4, 5, 6]],\n dtypes.int64)\n table = lookup_ops.MutableHashTable(\n dtypes.string,\n dtypes.int64,\n default_val,\n experimental_is_anonymous=is_anonymous)\n\n self.evaluate(table.insert(keys, values))\n self.assertAllEqual(3, self.evaluate(table.size()))\n\n remove_string = constant_op.constant([[\"brain\", \"tank\"]])\n self.evaluate(table.remove(remove_string))\n self.assertAllEqual(2, self.evaluate(table.size()))\n\n input_string = constant_op.constant([[\"brain\", \"salad\"],\n [\"surgery\", \"tank\"]])\n output = table.lookup(input_string)\n self.assertAllEqual([2, 2, 3], output.get_shape())\n\n result = self.evaluate(output)\n self.assertAllEqual(\n [[[-1, -1, -1], [2, 3, 4]], [[4, 5, 6], [-1, -1, -1]]], result)\n\n def testMultipleMutableHashTables(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n default_val = -1\n keys = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n values = constant_op.constant([0, 1, 2], dtypes.int64)\n\n table1 = lookup_ops.MutableHashTable(\n dtypes.string,\n dtypes.int64,\n default_val,\n experimental_is_anonymous=is_anonymous)\n table2 = lookup_ops.MutableHashTable(\n dtypes.string,\n dtypes.int64,\n default_val,\n experimental_is_anonymous=is_anonymous)\n table3 = lookup_ops.MutableHashTable(\n dtypes.string,\n dtypes.int64,\n default_val,\n experimental_is_anonymous=is_anonymous)\n self.evaluate(table1.insert(keys, values))\n self.evaluate(table2.insert(keys, values))\n self.evaluate(table3.insert(keys, values))\n\n self.assertAllEqual(3, self.evaluate(table1.size()))\n self.assertAllEqual(3, self.evaluate(table2.size()))\n self.assertAllEqual(3, self.evaluate(table3.size()))\n\n input_string = constant_op.constant([\"brain\", \"salad\", \"tank\"])\n output1 = table1.lookup(input_string)\n output2 = table2.lookup(input_string)\n output3 = table3.lookup(input_string)\n\n out1, out2, out3 = self.evaluate([output1, output2, output3])\n self.assertAllEqual([0, 1, -1], out1)\n self.assertAllEqual([0, 1, -1], out2)\n self.assertAllEqual([0, 1, -1], out3)\n\n def testMutableHashTableWithTensorDefault(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n default_val = constant_op.constant(-1, dtypes.int64)\n keys = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n values = constant_op.constant([0, 1, 2], dtypes.int64)\n table = lookup_ops.MutableHashTable(\n dtypes.string,\n dtypes.int64,\n default_val,\n experimental_is_anonymous=is_anonymous)\n\n self.evaluate(table.insert(keys, values))\n self.assertAllEqual(3, self.evaluate(table.size()))\n\n input_string = constant_op.constant([\"brain\", \"salad\", \"tank\"])\n output = table.lookup(input_string)\n\n result = self.evaluate(output)\n self.assertAllEqual([0, 1, -1], result)\n\n def testSignatureMismatch(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n default_val = -1\n keys = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n values = constant_op.constant([0, 1, 2], dtypes.int64)\n table = lookup_ops.MutableHashTable(\n dtypes.string,\n dtypes.int64,\n default_val,\n experimental_is_anonymous=is_anonymous)\n\n # insert with keys of the wrong type\n with self.assertRaises(ValueError):\n self.evaluate(table.insert(constant_op.constant([4, 5, 6]), values))\n\n # insert with values of the wrong type\n with self.assertRaises(ValueError):\n self.evaluate(table.insert(keys, constant_op.constant([\"a\", \"b\", \"c\"])))\n\n self.assertAllEqual(0, self.evaluate(table.size()))\n\n self.evaluate(table.insert(keys, values))\n self.assertAllEqual(3, self.evaluate(table.size()))\n\n input_string_ref = variables.Variable(\"brain\")\n input_int64_ref = variables.Variable(-1, dtype=dtypes.int64)\n self.evaluate(variables.global_variables_initializer())\n\n # Ref types do not produce an insert signature mismatch.\n self.evaluate(table.insert(input_string_ref, input_int64_ref))\n self.assertAllEqual(3, self.evaluate(table.size()))\n\n # Ref types do not produce a lookup signature mismatch.\n self.assertEqual(-1, self.evaluate(table.lookup(input_string_ref)))\n\n # lookup with keys of the wrong type\n input_string = constant_op.constant([1, 2, 3], dtypes.int64)\n with self.assertRaises(ValueError):\n self.evaluate(table.lookup(input_string))\n\n # default value of the wrong type\n with self.assertRaises(TypeError):\n lookup_ops.MutableHashTable(\n dtypes.string,\n dtypes.int64,\n \"UNK\",\n experimental_is_anonymous=is_anonymous)\n\n def testMutableHashTableStringFloat(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n default_val = -1.5\n keys = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n values = constant_op.constant([0, 1.1, 2.2], dtypes.float32)\n table = lookup_ops.MutableHashTable(\n dtypes.string,\n dtypes.float32,\n default_val,\n experimental_is_anonymous=is_anonymous)\n self.assertAllEqual(0, self.evaluate(table.size()))\n\n self.evaluate(table.insert(keys, values))\n self.assertAllEqual(3, self.evaluate(table.size()))\n\n input_string = constant_op.constant([\"brain\", \"salad\", \"tank\"])\n output = table.lookup(input_string)\n\n result = self.evaluate(output)\n self.assertAllClose([0, 1.1, default_val], result)\n\n def testMutableHashTableIntFloat(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n default_val = -1.0\n keys = constant_op.constant([3, 7, 0], dtypes.int64)\n values = constant_op.constant([7.5, -1.2, 9.9], dtypes.float32)\n table = lookup_ops.MutableHashTable(\n dtypes.int64,\n dtypes.float32,\n default_val,\n experimental_is_anonymous=is_anonymous)\n self.assertAllEqual(0, self.evaluate(table.size()))\n\n self.evaluate(table.insert(keys, values))\n self.assertAllEqual(3, self.evaluate(table.size()))\n\n input_string = constant_op.constant([7, 0, 11], dtypes.int64)\n output = table.lookup(input_string)\n\n result = self.evaluate(output)\n self.assertAllClose([-1.2, 9.9, default_val], result)\n\n def testMutableHashTableInt64String(self, is_anonymous):\n if is_anonymous and not tf2.enabled():\n self.skipTest(SKIP_ANONYMOUS_IN_TF1_REASON)\n default_val = \"n/a\"\n keys = constant_op.constant([0, 1, 2], dtypes.int64)\n values = constant_op.constant([\"brain\", \"salad\", \"surgery\"])\n table = lookup_ops.MutableHashTable(\n dtypes.int64,\n dtypes.string,\n default_val,\n experimental_is_anonymous=is_anonymous)\n self.assertAllEqual(0, self.evaluate(table.size()))\n\n self.evaluate(table.insert(keys, values))\n self.assertAllEqual(3, self.evaluate(table.size()))\n\n input_string = constant_op.constant([0, 1, 3], dtypes.int64)\n output = table.lookup(input_string)\n\n result = self.evaluate(output)\n self.assertAllEqual((b\"brain\", b\"salad\", b\"n/a\"), result)\n\n def testExportShapeInference(self, is_anonymous):\n default_value = -1\n table = lookup_ops.MutableHashTable(\n dtypes.int64,\n dtypes.int64,\n default_value=default_value,\n experimental_is_anonymous=is_anonymous)\n actual_shapes = [t.shape for t in table.export()]\n inferred_shapes = []\n\n @def_function.function\n def f():\n for t in table.export():\n inferred_shapes.append(t.shape)\n\n f()\n self.assertLen(actual_shapes, 2)\n self.assertLen(inferred_shapes, 2)\n self.assertTrue(inferred_shapes[0].is_compatible_with(actual_shapes[0]))\n self.assertTrue(inferred_shapes[1].is_compatible_with(actual_shapes[1]))\n\n\nclass MutableHashTableBenchmark(test.Benchmark):\n\n def _create_table(self):\n return lookup_ops.MutableHashTable(dtypes.int64, dtypes.float32, 0.0)\n\n def benchmark_single_repeated_scalar_insert_scalar(self):\n table = self._create_table()\n value = variables.Variable(1.0)\n insert = table.insert(0, value)\n size = table.size()\n with session.Session() as sess:\n sess.run(value.initializer)\n self.run_op_benchmark(sess, insert, burn_iters=10, min_iters=10000)\n assert sess.run(size) == 1\n\n def benchmark_many_repeated_scalar_insert_scalar(self):\n table = self._create_table()\n c = dataset_ops.make_one_shot_iterator(counter.Counter()).get_next()\n value = variables.Variable(1.0)\n insert = table.insert(c, value)\n size = table.size()\n with session.Session() as sess:\n sess.run(value.initializer)\n self.run_op_benchmark(sess, insert, burn_iters=10, min_iters=10000)\n assert sess.run(size) >= 10000\n\n def benchmark_single_repeated_batch_32_insert_scalar(self):\n table = self._create_table()\n value = variables.Variable([1.0] * 32)\n insert = table.insert(list(range(32)), value)\n size = table.size()\n with session.Session() as sess:\n sess.run(value.initializer)\n self.run_op_benchmark(sess, insert, burn_iters=10, min_iters=1000)\n assert sess.run(size) == 32\n\n def benchmark_many_repeated_batch_32_insert_scalar(self):\n table = self._create_table()\n c = dataset_ops.make_one_shot_iterator(counter.Counter()).get_next()\n value = variables.Variable([1.0] * 32)\n insert = table.insert(32 * c + list(range(32)), value)\n size = table.size()\n with session.Session() as sess:\n sess.run(value.initializer)\n self.run_op_benchmark(sess, insert, burn_iters=10, min_iters=1000)\n assert sess.run(size) >= 1000 * 32\n\n\nclass DenseHashTableBenchmark(MutableHashTableBenchmark):\n\n def _create_table(self):\n return lookup_ops.DenseHashTable(\n dtypes.int64,\n dtypes.float32,\n default_value=0.0,\n empty_key=-1,\n deleted_key=-2)\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Generates a toy v1 saved model for testing.\"\"\"\n\nimport shutil\nfrom absl import app\nfrom absl import flags\nfrom tensorflow.python.client import session\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.saved_model import builder\nfrom tensorflow.python.saved_model import signature_constants\nfrom tensorflow.python.saved_model import signature_def_utils\nfrom tensorflow.python.saved_model import tag_constants\nfrom tensorflow.python.saved_model import utils\n\nflags.DEFINE_string('saved_model_path', '', 'Path to save the model to.')\nFLAGS = flags.FLAGS\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError('Too many command-line arguments.')\n\n shutil.rmtree(FLAGS.saved_model_path)\n\n # Create the graph\n x = array_ops.placeholder(dtypes.int32, shape=None, name='input')\n y = variables.Variable(\n name='y', dtype=dtypes.int32, initial_value=[[1, 2], [3, 4]])\n r = y.sparse_read(x, name='result')\n\n sess = session.Session()\n\n sess.run(variables.global_variables_initializer())\n\n sm_builder = builder.SavedModelBuilder(FLAGS.saved_model_path)\n tensor_info_x = utils.build_tensor_info(x)\n tensor_info_r = utils.build_tensor_info(r)\n\n toy_signature = (\n signature_def_utils.build_signature_def(\n inputs={'x': tensor_info_x},\n outputs={'r': tensor_info_r},\n method_name=signature_constants.PREDICT_METHOD_NAME))\n\n sm_builder.add_meta_graph_and_variables(\n sess, [tag_constants.SERVING],\n signature_def_map={\n signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: toy_signature,\n },\n strip_default_attrs=True)\n sm_builder.save()\n\n\nif __name__ == '__main__':\n app.run(main)\n",
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nr\"\"\"Micro benchmark.\n\nbazel run -c opt --config=cuda \\\n //third_party/tensorflow/python/ops/numpy_ops/benchmarks:micro_benchmarks -- \\\n --number=100 --repeat=100 \\\n --benchmarks=.\n\"\"\"\nimport gc\nimport time\n\nfrom absl import flags\nfrom absl import logging\n\nimport numpy as np # pylint: disable=unused-import\nimport tensorflow.compat.v2 as tf\n\nfrom tensorflow.python.ops import numpy_ops as tfnp # pylint: disable=g-direct-tensorflow-import\nfrom tensorflow.python.ops.numpy_ops.integration_test.benchmarks import numpy_mlp\nfrom tensorflow.python.ops.numpy_ops.integration_test.benchmarks import tf_numpy_mlp\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_integer('repeat', 100, '#Measurements per benchmark.')\nflags.DEFINE_integer('number', 100, '#Runs per a measure.')\n\n\nclass MicroBenchmarks(tf.test.Benchmark):\n \"\"\"Main micro benchmark class.\"\"\"\n\n def _benchmark_and_report(\n self,\n name,\n fn,\n repeat=None,\n number=None):\n \"\"\"Run fn repeat * number times, report time, and return fastest time.\"\"\"\n # Can't make these default above since the flags may not have been parsed\n # at module import time.\n repeat = repeat or int(FLAGS.repeat)\n number = number or int(FLAGS.number)\n\n # Warmup\n fn()\n\n times = []\n for _ in range(repeat):\n gc.disable()\n start = time.time()\n for _ in range(number):\n fn()\n times.append(time.time() - start)\n gc.enable()\n gc.collect()\n\n # Regular benchmark to report numbers.\n fastest_time_us = min(times) * 1e6 / number\n total_time = sum(times)\n self.report_benchmark(name=name,\n wall_time=total_time,\n extras={'fastest_time_us': fastest_time_us})\n\n return fastest_time_us\n\n def benchmark_tf_np_mlp_inference_batch_1_cpu(self):\n with tf.device('/CPU:0'):\n model = tf_numpy_mlp.MLP()\n x = tfnp.ones(shape=(1, 10)).astype(np.float32)\n self._benchmark_and_report(self._get_name(), lambda: model.inference(x))\n\n def benchmark_tf_np_tf_function_mlp_inference_batch_1_cpu(self):\n with tf.device('/CPU:0'):\n model = tf_numpy_mlp.MLP()\n x = tfnp.ones(shape=(1, 10)).astype(np.float32)\n self._benchmark_and_report(\n self._get_name(), tf.function(lambda: model.inference(x)))\n\n def benchmark_numpy_mlp_inference_batch_1_cpu(self):\n model = numpy_mlp.MLP()\n x = np.random.uniform(size=(1, 10)).astype(np.float32, copy=False)\n self._benchmark_and_report(self._get_name(), lambda: model.inference(x))\n\n def _benchmark_np_and_tf_np(self, name, op, args, repeat=None): # pylint: disable=redefined-builtin\n fn = getattr(np, op)\n assert fn is not None\n\n np_time = self._benchmark_and_report(\n '{}_numpy'.format(name), lambda: fn(*args), repeat=repeat)\n\n fn = getattr(tfnp, op)\n assert fn is not None\n\n with tf.device('CPU:0'):\n tf_time = self._benchmark_and_report(\n '{}_tfnp_cpu'.format(name), lambda: fn(*args), repeat=repeat)\n\n return np_time, tf_time\n\n def _print_times(self, op, sizes, times):\n # For easy reporting.\n print('For np.{}:'.format(op))\n print('{:<15} {:>11} {:>11}'.format('Size', 'NP time', 'TF NP Time'))\n for size, (np_time, tf_time) in zip(sizes, times):\n print('{:<15} {:>10.5}us {:>10.5}us'.format(\n str(size), np_time, tf_time))\n print()\n\n def _benchmark_np_and_tf_np_unary(self, op):\n sizes = [(100,), (10000,), (1000000,)]\n repeats = [FLAGS.repeat] * 2 + [10]\n times = []\n for size, repeat in zip(sizes, repeats):\n x = np.random.uniform(size=size).astype(np.float32, copy=False)\n name = '{}_{}'.format(self._get_name(), size)\n times.append(self._benchmark_np_and_tf_np(name, op, (x,), repeat))\n self._print_times(op, sizes, times)\n\n def benchmark_count_nonzero(self):\n self._benchmark_np_and_tf_np_unary('count_nonzero')\n\n def benchmark_log(self):\n self._benchmark_np_and_tf_np_unary('log')\n\n def benchmark_exp(self):\n self._benchmark_np_and_tf_np_unary('exp')\n\n def benchmark_tanh(self):\n self._benchmark_np_and_tf_np_unary('tanh')\n\n def benchmark_matmul(self):\n sizes = [(2, 2), (10, 10), (100, 100), (200, 200), (1000, 1000)]\n # Override repeat flag since this can be very slow.\n repeats = [FLAGS.repeat] * 3 + [50, 10]\n times = []\n for size, repeat in zip(sizes, repeats):\n x = np.random.uniform(size=size).astype(np.float32, copy=False)\n name = '{}_{}'.format(self._get_name(), size)\n times.append(\n self._benchmark_np_and_tf_np(name, 'matmul', (x, x), repeat=repeat))\n\n self._print_times('matmul', sizes, times)\n\n\nif __name__ == '__main__':\n logging.set_verbosity(logging.WARNING)\n tf.enable_v2_behavior()\n tf.test.main()\n"
] | [
[
"tensorflow.python.framework.tensor_util.is_tf_type",
"tensorflow.python.eager.function_saved_model_utils.TrackableConstant",
"tensorflow.python.trackable.data_structures.wrap_or_unwrap",
"tensorflow.python.ops.resource_variable_ops.is_resource_variable"
],
[
"tensorflow.python.ops.lookup_ops.index_to_string_table_from_tensor",
"tensorflow.python.ops.lookup_ops.index_table_from_file",
"tensorflow.python.ops.lookup_ops.IdTableWithHashBuckets",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.eager.backprop.GradientTape",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.ops.lookup_ops.TextFileInitializer",
"tensorflow.python.ops.lookup_ops.TextFileIdTableInitializer",
"tensorflow.python.training.saver.import_meta_graph",
"tensorflow.python.framework.test_util.run_v1_only",
"tensorflow.python.framework.ops.get_collection",
"tensorflow.python.training.tracking.graph_view.ObjectGraphView",
"tensorflow.python.eager.wrap_function.wrap_function",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.lookup_ops.KeyValueTensorInitializer",
"tensorflow.python.training.server_lib.Server",
"tensorflow.python.ops.lookup_ops.TextFileStringTableInitializer",
"tensorflow.python.ops.lookup_ops.index_table_from_tensor",
"tensorflow.python.saved_model.save.save",
"tensorflow.python.eager.def_function.function",
"tensorflow.python.training.tracking.tracking.AutoTrackable",
"tensorflow.python.eager.function.defun",
"tensorflow.python.ops.lookup_ops.tables_initializer",
"tensorflow.python.ops.lookup_ops.MutableHashTable",
"tensorflow.python.ops.lookup_ops.index_to_string_table_from_file",
"tensorflow.python.framework.test_ops.is_resource_handle_ref_counting",
"tensorflow.python.client.session.Session",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.lookup_ops.HasherSpec",
"numpy.array",
"tensorflow.python.ops.lookup_ops.DenseHashTable",
"tensorflow.python.saved_model.load.load",
"tensorflow.python.data.experimental.ops.counter.Counter",
"tensorflow.python.framework.tensor_spec.TensorSpec",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.ops.map_fn.map_fn",
"numpy.sort",
"tensorflow.python.util.compat.as_bytes",
"tensorflow.python.ops.lookup_ops.StrongHashSpec",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.tf2.enabled",
"tensorflow.python.training.saver.export_meta_graph",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.python.training.tracking.util.Checkpoint",
"tensorflow.python.training.saver.Saver",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.saved_model.utils.build_tensor_info",
"tensorflow.python.saved_model.builder.SavedModelBuilder",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.client.session.Session",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.python.saved_model.signature_def_utils.build_signature_def"
],
[
"tensorflow.compat.v2.test.main",
"tensorflow.compat.v2.enable_v2_behavior",
"tensorflow.compat.v2.device",
"tensorflow.python.ops.numpy_ops.integration_test.benchmarks.tf_numpy_mlp.MLP",
"tensorflow.python.ops.numpy_ops.ones",
"tensorflow.python.ops.numpy_ops.integration_test.benchmarks.numpy_mlp.MLP",
"numpy.random.uniform"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"2.4",
"2.3",
"2.9",
"2.5",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aowen-uwmad/PySpecTools | [
"3fd0b68352910df1e653370797a8edd46d92fa1c",
"3fd0b68352910df1e653370797a8edd46d92fa1c"
] | [
"pyspectools/astro/radiative.py",
"tests/test_assignment.py"
] | [
"\n\"\"\" radiative.py\n\n Routines for calculating quantities useful for\n radiative transfer, such as Einstein coefficients\n\"\"\"\nimport numpy as np\nimport pandas as pd\nfrom scipy import constants\n\nfrom pyspectools.units import kbcm, MHz2cm\n\n\ndef parse_str(filepath):\n # Function that will parse a .str file from SPCAT\n # This file can be generated by supplying 0020 as\n # the first flag in the `.int` file.\n # Returns a Pandas dataframe\n names = [\n \"Frequency\",\n \"RTDM\",\n \"Formatting\",\n \"Upper QN\",\n \"Lower QN\",\n \"Dipole Category\"\n ]\n # Use fixed-width reading in pandas\n str_df = pd.read_fwf(\n filepath,\n widths=[15, 15, 5, 12, 12, 11],\n names=names,\n index=False\n )\n return str_df\n\n\ndef I2S(I, Q, frequency, E_lower, T=300.):\n \"\"\"\n Function for converting intensity (in nm^2 MHz) to the more standard intrinsic linestrength, S_ij mu^2.\n\n Parameters\n ----------\n I - float\n The log of the transition intensity, typically given in catalog files\n Q - float\n Partition function at specified temperature T\n frequency - float\n Frequency of the transition in MHz\n E_lower - float\n Energy of the lower state in wavenumbers\n T - float\n Temperature in Kelvin\n\n Returns\n -------\n siju - float\n Value of the intrinsic linestrength\n \"\"\"\n E_upper = calc_E_upper(frequency, E_lower)\n # top part of the equation\n A = 10.**I * Q\n lower_factor = boltzmann_factor(E_lower, T) # Boltzmann factors\n upper_factor = boltzmann_factor(E_upper, T)\n # Calculate the lower part of the equation\n # The prefactor included here is taken from Brian\n # Drouin's notes\n B = (4.16231e-5 * frequency * (lower_factor - upper_factor))\n return A / B\n\n\ndef calc_E_upper(frequency, E_lower):\n \"\"\"\n Calculate the upper state energy, for a given lower state energy and the frequency of the transition.\n\n Parameters\n ----------\n frequency - float\n Frequency of the transition in MHz\n E_lower - float\n Lower state energy in wavenumbers\n\n Returns\n -------\n E_upper - float\n Upper state energy in wavenumbers\n \"\"\"\n transition_freq = MHz2cm(frequency)\n return transition_freq + E_lower\n\n\ndef boltzmann_factor(E, T):\n \"\"\"\n Calculate the Boltzmann weighting for a given state and temperature.\n\n Parameters\n ----------\n E - float\n State energy in wavenumbers\n T - float\n Temperature in Kelvin\n\n Returns\n -------\n boltzmann_factor - float\n Unitless Boltzmann factor for the state\n \"\"\"\n return np.exp(-E / (kbcm * T))\n\n\ndef approx_Q_linear(B, T):\n \"\"\"\n Approximate rotational partition function for a linear molecule.\n\n Parameters\n ----------\n B - float\n Rotational constant in MHz.\n T - float\n Temperature in Kelvin.\n\n Returns\n -------\n Q - float\n Rotational partition function at temperature T.\n \"\"\"\n Q = 2.0837e4 * (T / B)\n return Q\n\n\ndef approx_Q_top(A, B, T, sigma=1, C=None):\n \"\"\"\n Approximate expression for the (a)symmetric top partition function. The expression is adapted from Gordy and Cook,\n p.g. 57 equation 3.68. By default, the prolate top is used if the C constant is not specified, where B = C.\n Oblate case can also be specified if A = C.\n\n Parameters\n ----------\n A - float\n Rotational constant for the A principal axis, in MHz.\n B - float\n Rotational constant for the B principal axis, in MHz.\n T - float\n Temperature in Kelvin\n sigma - int\n Rotational level degeneracy; i.e. spin statistics\n C - float, optional\n Rotational constant for the C principal axis, in MHz. Defaults to None, which will reduce to the prolate\n top case.\n\n Returns\n -------\n Q - float\n Partition function for the molecule at temperature T\n \"\"\"\n if C is None:\n # For a symmetric top, B = C\n C = B\n Q = (5.34e6 / sigma) * (T**3. / (A * B * C))**0.5\n return Q\n\n\ndef einsteinA(S, frequency):\n # Prefactor is given in the PGopher Intensity formulae\n # http://pgopher.chm.bris.ac.uk/Help/intensityformulae.htm\n # Units of the prefactor are s^-1 MHz^-3 D^-2\n # Units of Einstein A coefficient should be in s^-1\n prefactor = 1.163965505e-20\n return prefactor * frequency**3. * S\n\n\ndef calc_einstein(str_filepath):\n \"\"\" High-level function for calculating Einstein A\n coefficients for a given .str file output from\n SPCAT.\n \"\"\"\n str_df = parse_str(str_filepath)\n # Calculate the transition moment dipole from the\n # square of the str output\n str_df[\"TDM\"] = str_df[\"RTDM\"]**2.\n # Calculate the Einstein A coefficients in units\n # of per second\n str_df[\"Einstein A\"] = einsteinA(\n str_df[\"TDM\"],\n str_df[\"Frequency\"]\n )\n # Sort the dataframe by ascending frequency\n str_df = str_df.sort_values([\"Frequency\"], ascending=True)\n return str_df\n",
"\"\"\"\ntest_assignment.py\n\nPyTests to check the functionality of the `assignment` module\nin `pyspectools.spectra`.\n\nThese tests use a pre-computed spectrum with known parameters,\nand performs a series of assertions to make sure each step is\nmaking sense.\n\"\"\"\n\nimport os\nimport shutil\nfrom tempfile import TemporaryDirectory\nfrom pathlib import Path\n\nimport pytest\nimport pandas as pd\nimport numpy as np\nimport yaml\n\nfrom pyspectools.spectra import assignment\n\n\ndef test_spectrum_load():\n \"\"\"\n This test makes sure that the spectrum parsing is done correctly.\n \n A Pandas DataFrame is read in \"manually\", and the same data is\n read in by `AssignmentSession.from_ascii`. The test then looks\n to make sure the number of elements (length of the dataframes)\n is equal, and then a numerical check to make sure all the rows\n are equal.\n \"\"\"\n test_root = os.getcwd()\n # get dictionary with all of the simulated peak data\n with open(\"spectrum-parameters.yml\", \"r\") as read_file:\n param_dict = yaml.load(read_file)\n with TemporaryDirectory() as tmpdir:\n tmppath = Path(tmpdir)\n # copy the spectrum into a temp directory, that way we don't\n # have to worry about cleaning up\n shutil.copy2(\"test-spectrum.csv\", tmppath.joinpath(\"test-spectrum.csv\"))\n spec_df = pd.read_csv(\"test-spectrum.csv\")\n os.chdir(tmppath)\n # Create an AssignmentSession\n session = assignment.AssignmentSession.from_ascii(\n \"test-spectrum.csv\",\n 0,\n skiprows=1,\n col_names=[\"Frequency\", \"Intensity\"],\n delimiter=\",\",\n verbose=False\n )\n # Check that the frequency and intensity columns are read correctly\n assert len(spec_df) == len(session.data)\n # this checks the whole dataframe is numerically equivalent\n assert spec_df.equals(session.data)\n os.chdir(test_root)\n\n\ndef test_spectrum_peaks():\n \"\"\"\n This test checks whether or not the peak finding algorithm\n is functioning properly by comparing the number of actual\n peaks with the number found using `AssignmentSession.find_peaks`.\n \n There is a tolerance value used here, which basically gives some\n leeway to the peak finding which is seldom perfect. As long as\n the discrepancy is lower than `TOLERANCE`, then the test will pass.\n \"\"\"\n test_root = os.getcwd()\n # The peak finding algorithm never finds every peak perfectly.\n # This TOLERANCE variable sets the minimum number to match\n TOLERANCE = 3\n # This is the minimum average deviation between the true\n # frequencies and the peak finding ones\n FREQ_TOLERANCE = 0.005\n # get dictionary with all of the simulated peak data\n with open(\"spectrum-parameters.yml\", \"r\") as read_file:\n param_dict = yaml.load(read_file)\n with TemporaryDirectory() as tmpdir:\n tmppath = Path(tmpdir)\n # copy the spectrum into a temp directory, that way we don't\n # have to worry about cleaning up\n shutil.copy2(\"test-spectrum.csv\", tmppath.joinpath(\"test-spectrum.csv\"))\n os.chdir(tmppath)\n # Create an AssignmentSession\n session = assignment.AssignmentSession.from_ascii(\n \"test-spectrum.csv\",\n 0,\n skiprows=1,\n col_names=[\"Frequency\", \"Intensity\"],\n delimiter=\",\",\n )\n # Run the peak finding, with the intensity threshold set close\n # to the floor - I know where the noise should be :)\n peaks_df = session.find_peaks(2.0, als=False)\n # Make sure enough peaks are found; it doesn't have to be perfect\n assert abs(len(peaks_df) - param_dict[\"NPEAKS\"]) <= TOLERANCE\n os.chdir(test_root)\n\n\ndef test_spectrum_linelist():\n \"\"\"\n This test checks to make sure the `LineList` assignment process\n is working.\n \n The first test checks whether or not every line is\n assigned in the spectrum - there should be no more\n U-lines after this process is done, although there\n is a `TOLERANCE` specified that must be greater or\n equal to the number of U-lines remaining.\n \n The second test compares the frequency of every\n assignment against the \"catalog\" or actual frequency;\n the mean unsigned difference between must be equal to\n or below `FREQ_TOLERANCE`.\n \"\"\"\n test_root = os.getcwd()\n # This TOLERANCE variable sets the minimum number of\n # assignments to match\n TOLERANCE = 3\n FREQ_TOLERANCE = 0.002\n # get dictionary with all of the simulated peak data\n with open(\"spectrum-parameters.yml\", \"r\") as read_file:\n param_dict = yaml.load(read_file)\n sub_dict = {key: param_dict[key] for key in [\"AMPS\", \"CENTERS\", \"ASSIGNMENTS\"]}\n assignment_df = pd.DataFrame(sub_dict)\n with TemporaryDirectory() as tmpdir:\n tmppath = Path(tmpdir)\n # copy the spectrum into a temp directory, that way we don't\n # have to worry about cleaning up\n shutil.copy2(\"test-spectrum.csv\", tmppath.joinpath(\"test-spectrum.csv\"))\n os.chdir(tmppath)\n # Create an AssignmentSession\n session = assignment.AssignmentSession.from_ascii(\n \"test-spectrum.csv\",\n 0,\n skiprows=1,\n col_names=[\"Frequency\", \"Intensity\"],\n delimiter=\",\",\n )\n # Run the peak finding, with the intensity threshold set close\n # to the floor - I know where the noise should be :)\n peaks_df = session.find_peaks(2.0, als=False)\n # This is the number of peaks found; assignment comparisons\n # should be made with this value\n NFOUND = len(peaks_df)\n # Generate LineList objects for each peak group\n for group in assignment_df[\"ASSIGNMENTS\"].unique():\n subgroup = assignment_df.loc[assignment_df[\"ASSIGNMENTS\"] == group]\n linelist = assignment.LineList.from_list(\n f\"Group{group}\", subgroup[\"CENTERS\"].tolist()\n )\n # PRIOR = len(session.line_list[\"Peaks\"].get_ulines())\n session.process_linelist(linelist=linelist)\n assignments = session.line_lists[\"Peaks\"].get_assignments()\n POST = len(assignments)\n print(POST)\n # Compare the number of assignments made vs. the number of peaks\n # Every peak should be assigned\n assert abs(POST - NFOUND) <= TOLERANCE\n # Check that the frequency deviation between catalog and matched\n # is below a threshold\n peak_freqs = np.array([getattr(line, \"frequency\") for line in assignments])\n actual_freqs = np.array([getattr(line, \"catalog_frequency\") for line in assignments])\n AVG_DEVIATION = np.mean(np.abs(peak_freqs - actual_freqs))\n assert AVG_DEVIATION <= FREQ_TOLERANCE\n os.chdir(test_root)\n\n"
] | [
[
"numpy.exp",
"pandas.read_fwf"
],
[
"pandas.read_csv",
"numpy.abs",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
JimmyRetza/Theano | [
"1985d4c73fabd5f08f54b922e73a9306e09c77a5",
"72d83bce0d547d54ab3513bcba35c166979f7a6f",
"1985d4c73fabd5f08f54b922e73a9306e09c77a5",
"72d83bce0d547d54ab3513bcba35c166979f7a6f",
"72d83bce0d547d54ab3513bcba35c166979f7a6f",
"72d83bce0d547d54ab3513bcba35c166979f7a6f"
] | [
"theano/tensor/subtensor.py",
"theano/gpuarray/tests/test_basic_ops.py",
"theano/gof/tests/test_graph_opt_caching.py",
"theano/tensor/nnet/tests/test_bn.py",
"theano/tensor/nnet/tests/speed_test_conv.py",
"theano/gpuarray/opt.py"
] | [
"from __future__ import absolute_import, print_function, division\nimport sys\nfrom textwrap import dedent\nimport collections\nimport warnings\nimport logging\n\nimport numpy as np\nfrom six import integer_types\nfrom six.moves import xrange\n\nimport theano\nfrom theano.compat import izip\nfrom theano.gradient import DisconnectedType\nfrom theano import gof\nfrom theano.gof import Apply, hashtype, Op, Type, MethodNotDefined, ParamsType\nfrom theano.printing import pprint\nfrom theano import scalar as scal\nfrom theano.tensor.basic import alloc\nfrom theano.tensor.basic import (addbroadcast, clip, get_scalar_constant_value,\n TensorType, NotScalarConstantError)\nfrom theano.tensor.elemwise import DimShuffle\nfrom theano.tensor.type_other import NoneConst, SliceType, NoneTypeT, make_slice\nfrom theano import config\n\nfrom .inc_code import inc_code\n\n_logger = logging.getLogger(\"theano.tensor.subtensor\")\n\n# Do a lazy import of the sparse module\nsparse_module_ref = None\n\n\nclass AdvancedIndexingError(TypeError):\n \"\"\"\n Raised when Subtensor is asked to perform advanced indexing.\n\n \"\"\"\n pass\n\n\nclass AdvancedBooleanIndexingError(TypeError):\n \"\"\"\n Raised when Subtensor is asked to perform advanced indexing with boolean masks.\n\n \"\"\"\n pass\n\n\n##########\n# Helpful functions to deal with Subtensor and IncSubtensor\n##########\n\ndef make_constant(args):\n \"\"\"\n Convert python litterals to theano constants in subtensor arguments.\n\n \"\"\"\n def conv(a):\n if a is None:\n return a\n elif isinstance(a, slice):\n return slice(conv(a.start),\n conv(a.stop),\n conv(a.step))\n elif isinstance(a, (integer_types, np.integer)):\n return scal.ScalarConstant(scal.int64, a)\n else:\n return a\n return tuple(map(conv, args))\n\n\ndef get_idx_list(inputs, idx_list, get_count=False):\n \"\"\"\n Given a list of inputs to the subtensor and its idx_list reorders\n the inputs according to the idx list to get the right values.\n\n If get_counts=True, instead returns the number of inputs consumed\n during this process.\n\n \"\"\"\n\n # The number of indices\n n = len(inputs) - 1\n\n # The subtensor (or idx_list) does not depend on the inputs.\n if n == 0:\n return tuple(idx_list)\n indices = list(reversed(list(inputs[1:])))\n\n # General case\n def convert(entry):\n if isinstance(entry, gof.Type):\n return indices.pop()\n elif isinstance(entry, slice):\n return slice(convert(entry.start),\n convert(entry.stop),\n convert(entry.step))\n else:\n return entry\n cdata = tuple(map(convert, idx_list))\n if get_count:\n return n - len(indices)\n else:\n return cdata\n\n\ndef get_canonical_form_slice(theslice, length):\n \"\"\"\n Given a slice [start:stop:step] transform it into a canonical form\n that respects the conventions imposed by python and numpy.\n\n In a canonical form a slice is represented by a canonical form slice,\n in which 0 <= start <= stop <= length and step > 0, and a flag which says\n if the resulting set of numbers needs to be reversed or not.\n\n \"\"\"\n from theano.tensor import switch, lt, ge, sgn\n if isinstance(theslice, slice):\n\n def analyze(x):\n try:\n x_constant = get_scalar_constant_value(x)\n is_constant = True\n except theano.tensor.NotScalarConstantError:\n x_constant = theano.tensor.extract_constant(x)\n is_constant = False\n return x_constant, is_constant\n\n start, is_start_constant = analyze(theslice.start)\n stop, is_stop_constant = analyze(theslice.stop)\n step, is_step_constant = analyze(theslice.step)\n length, is_length_constant = analyze(length)\n\n if step is None:\n step = 1\n is_step_constant = True\n\n # First handle the easier and common case where `step` is 1 and\n # either `start` or `stop` is a range boundary. More specializations\n # could be added later. This makes the resulting graph smaller than\n # in the generic case below.\n if step == 1:\n is_start_0 = (\n start is None or start == 0 or\n (is_start_constant and is_length_constant and\n start < 0 and start + length <= 0))\n is_stop_length = (\n stop is None or stop in [length, sys.maxsize] or\n (is_stop_constant and is_length_constant and\n stop >= length))\n if is_start_0:\n # 0:stop:1\n if is_stop_length:\n # Full slice.\n return slice(0, length, 1), 1\n if is_stop_constant and stop >= 0:\n return (slice(0, switch(lt(stop, length), stop, length),\n 1), 1)\n stop_plus_len = stop + length\n stop = switch(\n lt(stop, 0),\n # stop < 0\n switch(\n lt(stop_plus_len, 0),\n # stop + len < 0\n 0,\n # stop + len >= 0\n stop_plus_len),\n # stop >= 0: use min(stop, length)\n switch(lt(stop, length), stop, length))\n return slice(0, stop, 1), 1\n elif is_stop_length:\n # start:length:1\n if is_start_constant and start >= 0:\n return slice(switch(lt(start, length), start, length),\n length, 1), 1\n start_plus_len = start + length\n start = switch(\n lt(start, 0),\n # start < 0\n switch(\n lt(start_plus_len, 0),\n # start + len < 0\n 0,\n # start + len >= 0\n start_plus_len),\n # start >= 0: use min(start, length)\n switch(lt(start, length), start, length))\n return slice(start, length, 1), 1\n\n # This is the generic case.\n\n if is_step_constant:\n # When we know the sign of `step`, the graph can be made simpler.\n assert step != 0\n if step > 0:\n def switch_neg_step(a, b):\n return b\n abs_step = step\n sgn_step = 1\n else:\n def switch_neg_step(a, b):\n return a\n abs_step = -step\n sgn_step = -1\n else:\n is_step_neg = lt(step, 0)\n\n def switch_neg_step(a, b):\n return switch(is_step_neg, a, b)\n abs_step = abs(step)\n sgn_step = sgn(step)\n\n defstart = switch_neg_step(length - 1, 0)\n defstop = switch_neg_step(-1, length)\n if start is None:\n start = defstart\n else:\n start = switch(lt(start, 0), start + length, start)\n start = switch(lt(start, 0), switch_neg_step(-1, 0), start)\n start = switch(ge(start, length),\n switch_neg_step(length - 1, length),\n start)\n if stop is None or stop == sys.maxsize:\n # The special \"maxsize\" case is probably not needed here,\n # as slices containing maxsize are not generated by\n # __getslice__ anymore.\n stop = defstop\n else:\n stop = switch(lt(stop, 0), stop + length, stop)\n stop = switch(lt(stop, 0), -1, stop)\n stop = switch(ge(stop, length), length, stop)\n\n nw_stop = switch_neg_step(start + 1, stop)\n slice_len = (start - stop - 1) // abs_step + 1\n slice_len = switch(lt(slice_len, 0), 0, slice_len)\n neg_start = nw_stop - (slice_len - 1) * abs_step - 1\n neg_start = switch(lt(neg_start, 0), (nw_stop - 1), neg_start)\n nw_start = switch_neg_step(neg_start, start)\n nw_start = switch(lt(nw_start, 0), 0, nw_start)\n nw_stop = switch(lt(nw_stop, 0), 0, nw_stop)\n # Ensure start <= stop.\n nw_start = switch(lt(nw_start, nw_stop), nw_start, nw_stop)\n\n nw_step = abs_step\n if step != 1:\n reverse = sgn_step\n return slice(nw_start, nw_stop, nw_step), reverse\n else:\n return slice(nw_start, nw_stop, nw_step), 1\n else:\n value = theano.tensor.extract_constant(theslice)\n value = switch(lt(value, 0), (value + length), value)\n\n return value, 1\n\n\nclass Subtensor(Op):\n \"\"\"\n Return a subtensor view.\n\n The inputs array is the tensor x, followed by scalar integer types.\n TODO: WRITEME: how are the scalar integer variables formatted?\n\n This class uses a relatively complex internal representation of the inputs\n to remember how the input tensor x should be sliced.\n\n idx_list: instance variable TODO: WRITEME: is this a list or a tuple?\n (old docstring gives two conflicting\n descriptions)\n elements are either integers, theano scalar types, or slices.\n one element per \"explicitly named dimension\"\n TODO: WRITEME: what is an \"explicitly named dimension\" ?\n\n if integer:\n indexes into the inputs array\n if slice:\n start/stop/step members of each slice are integer indices\n into the inputs array or None\n integer indices be actual integers or theano scalar types\n\n Note that the idx_list defines the Op, so two Subtensor instances are\n considered to be different Ops if they have different idx_list fields.\n This means that the entries in it are theano Types, not theano Variables.\n\n @todo: add support for advanced tensor indexing (in Subtensor_dx too).\n\n \"\"\"\n e_subslice = 'nested slicing is not supported'\n e_indextype = \"Invalid index type or slice for Subtensor\"\n debug = 0\n check_input = False\n view_map = {0: [0]}\n _f16_ok = True\n __props__ = (\"idx_list\",)\n\n @staticmethod\n def collapse(idxs, cond):\n \"\"\"\n Parameters\n ----------\n idxs : a list of indices or slices.\n cond : a callable that returns a bool\n\n Returns\n -------\n list\n idxs, with the slices flattened out into a list.\n If cond is true for an entry, does not flatten it.\n\n \"\"\"\n ret = []\n\n def helper(entry):\n if cond(entry):\n ret.append(entry)\n elif isinstance(entry, slice):\n helper(entry.start)\n helper(entry.stop)\n helper(entry.step)\n\n for idx in idxs:\n helper(idx)\n\n return ret\n\n @staticmethod\n def convert(entry, slice_ok=True):\n \"\"\"\n Change references to Variables into references to Types.\n\n The \"idx_list\" field is unique to each Subtensor instance.\n It is not unique to each Apply node, so it should not refer to\n specific Variables.\n TODO: WRITEME: This method also accepts \"entry\" already being a Type;\n when would that happen?\n\n \"\"\"\n invalid_scal_types = [scal.float64, scal.float32, scal.float16]\n scal_types = [scal.int64, scal.int32, scal.int16, scal.int8]\n tensor_types = [theano.tensor.lscalar, theano.tensor.iscalar,\n theano.tensor.wscalar, theano.tensor.bscalar]\n invalid_tensor_types = [theano.tensor.fscalar, theano.tensor.dscalar,\n theano.tensor.cscalar, theano.tensor.zscalar]\n\n if (isinstance(entry, (np.ndarray, theano.tensor.Variable)) and\n hasattr(entry, 'dtype') and entry.dtype == 'bool'):\n raise AdvancedBooleanIndexingError(Subtensor.e_indextype, entry)\n\n if (isinstance(entry, gof.Variable) and\n (entry.type in invalid_scal_types or\n entry.type in invalid_tensor_types)):\n raise TypeError(\"Expected an integer\")\n\n if isinstance(entry, gof.Variable) and entry.type in scal_types:\n return entry.type\n elif isinstance(entry, gof.Type) and entry in scal_types:\n return entry\n\n if (isinstance(entry, gof.Variable) and\n entry.type in tensor_types and\n np.all(entry.type.broadcastable)):\n return scal.get_scalar_type(entry.type.dtype)\n elif (isinstance(entry, gof.Type) and\n entry in tensor_types and\n np.all(entry.broadcastable)):\n return scal.get_scalar_type(entry.dtype)\n elif slice_ok and isinstance(entry, slice):\n a = entry.start\n b = entry.stop\n c = entry.step\n\n if a is not None:\n slice_a = Subtensor.convert(a, False)\n else:\n slice_a = None\n\n if b is not None and b != sys.maxsize:\n # The special \"maxsize\" case is probably not needed here,\n # as slices containing maxsize are not generated by\n # __getslice__ anymore.\n slice_b = Subtensor.convert(b, False)\n else:\n slice_b = None\n\n if c is not None:\n slice_c = Subtensor.convert(c, False)\n else:\n slice_c = None\n\n return slice(slice_a, slice_b, slice_c)\n elif isinstance(entry, (integer_types, np.integer)):\n # Disallow the use of python scalars in idx_list\n raise TypeError(\"Python scalar in idx_list.\"\n \"Please report this error to theano-dev.\")\n else:\n raise AdvancedIndexingError(Subtensor.e_indextype, entry)\n\n def get_constant_idx(self, inputs, allow_partial=False,\n only_process_constants=False, elemwise=True):\n \"\"\"\n Return the idx_list with constant inputs replaced by their\n python scalar equivalent.\n May raise `theano.tensor.NotScalarConstantError` if the idx contains\n non-constant entries.\n\n If allow_partial is True, then entries that are not constant will\n stay as their input variable rather than raising an exception.\n\n None entries are always left as-is.\n\n Parameters\n ----------\n only_process_constants\n If True, we only attempt to obtain the value of an index/slice if\n it's directly constant and don't try to dig through dimshuffles,\n fills, allocs, and other to figure out its value.\n\n Examples\n --------\n Example usage where v, a are appropriately typed theano variables :\n >>> b = a[v, 1:3]\n >>> b.owner.op.idx_list\n (Scalar(int64), slice(Scalar(int64), Scalar(int64), None))\n >>> b.owner.op.get_constant_idx(b.owner.inputs, allow_partial=True)\n [v, slice(1, 3, None)]\n >>> b.owner.op.get_constant_idx(b.owner.inputs)\n NotScalarConstantError: v\n\n \"\"\"\n real_idx = get_idx_list(inputs, self.idx_list)\n\n def conv(val):\n if val is None:\n return None\n elif isinstance(val, slice):\n return slice(conv(val.start),\n conv(val.stop),\n conv(val.step))\n else:\n try:\n return get_scalar_constant_value(\n val,\n only_process_constants=only_process_constants,\n elemwise=elemwise)\n except theano.tensor.NotScalarConstantError:\n if allow_partial:\n return val\n else:\n raise\n\n return list(map(conv, real_idx))\n\n def __init__(self, idx_list):\n self.idx_list = tuple(map(self.convert, idx_list))\n\n @staticmethod\n def my_as_scalar(a):\n # Since scal.as_scalar does not know about tensor types (it would\n # create a circular import) , this method converts either a\n # TensorVariable or a ScalarVariable to a scalar.\n if isinstance(a, gof.Variable) and isinstance(a.type, TensorType):\n return theano.tensor.scalar_from_tensor(a)\n else:\n return scal.as_scalar(a)\n\n def make_node(self, x, *inputs):\n \"\"\"\n Parameters\n ----------\n x\n The tensor to take a subtensor of.\n inputs\n A list of theano Scalars.\n\n \"\"\"\n x = theano.tensor.as_tensor_variable(x)\n inputs = tuple(self.my_as_scalar(a) for a in inputs)\n\n idx_list = list(self.idx_list)\n if len(idx_list) > x.type.ndim:\n raise IndexError('too many indices for array')\n\n input_types = Subtensor.collapse(idx_list,\n lambda entry: isinstance(entry,\n gof.Type))\n if len(inputs) != len(input_types):\n raise IndexError(\n \"Not enough inputs to fill in the Subtensor template.\",\n inputs, idx_list)\n for input, expected_type in izip(inputs, input_types):\n if input.type != expected_type:\n raise TypeError(\n \"Wrong type for Subtensor template. Expected %s, got %s.\"\n % (input.type, expected_type))\n\n # infer the broadcasting pattern\n padded = (self.get_constant_idx((None,) + inputs, allow_partial=True) +\n [slice(None, None, None)] * (x.type.ndim - len(idx_list)))\n broadcastable = []\n for i, (p, bc) in enumerate(izip(padded, x.type.broadcastable)):\n if isinstance(p, slice):\n if bc:\n start = p.start\n try:\n start = get_scalar_constant_value(start)\n except NotScalarConstantError:\n pass\n if start is None or start == 0:\n start = p.start\n if start is None:\n start = 0\n if (p.stop is None or\n (isinstance(p.stop, (integer_types, np.integer,\n np.ndarray)) and\n p.stop > start)):\n broadcastable.append(True)\n continue\n\n broadcastable.append(False)\n\n return gof.Apply(self,\n (x, ) + inputs,\n [theano.tensor.tensor(dtype=x.type.dtype,\n broadcastable=broadcastable)])\n\n def perform(self, node, inputs, out_):\n out, = out_\n x = inputs[0]\n\n cdata = get_idx_list(inputs, self.idx_list)\n if len(cdata) == 1:\n cdata = cdata[0]\n\n out[0] = np.asarray(x.__getitem__(cdata))\n\n def infer_shape(self, node, shapes):\n xshp = shapes[0]\n assert len(xshp) == node.inputs[0].ndim\n outshp = []\n actual_idx_list = list(get_idx_list(node.inputs, self.idx_list))\n padded = (actual_idx_list +\n [slice(None, None, None)] * (len(xshp) - len(self.idx_list)))\n i = 0\n for idx, xl in izip(padded, xshp):\n if isinstance(idx, slice):\n # If it is the default (None, None, None) slice, or a variant,\n # the shape will be xl\n if ((idx.start in [None, 0]) and\n (idx.stop in [None, sys.maxsize]) and\n (idx.step is None or idx.step == 1)):\n outshp.append(xl)\n else:\n cnf = get_canonical_form_slice(idx, xl)[0]\n if cnf.step == 1:\n length = cnf.stop - cnf.start\n else:\n length = (cnf.stop - cnf.start - 1) // cnf.step + 1\n outshp.append(length)\n i += 1\n else:\n # That dimension is dropped\n pass\n assert i == node.outputs[0].ndim\n assert len(outshp) == node.outputs[0].ndim\n return [outshp]\n\n def grad(self, inputs, grads):\n gz, = grads\n x = inputs[0]\n rest = inputs[1:]\n if x.dtype in theano.tensor.discrete_dtypes:\n first = x.zeros_like().astype(theano.config.floatX)\n else:\n # For best optimization, we let this as an inc.\n # This allow the opt local_IncSubtensor_serialize to apply first.\n # We have an optimization that will convert this to a\n # set subtensor here at:\n # theano/tensor/opt.py:local_incsubtensor_of_zeros_to_setsubtensor()\n first = IncSubtensor(self.idx_list)(x.zeros_like(),\n gz, *rest)\n return ([first] + [DisconnectedType()()] * len(rest))\n\n def connection_pattern(self, node):\n\n rval = [[True]]\n\n for ipt in node.inputs[1:]:\n rval.append([False])\n\n return rval\n\n def __hash__(self):\n # TODO: optimize by cache this hash value\n msg = []\n for entry in self.idx_list:\n if isinstance(entry, slice):\n msg += [(entry.start, entry.stop, entry.step)]\n else:\n msg += [entry]\n\n idx_list = tuple(msg)\n # backport\n # idx_list = tuple((entry.start, entry.stop, entry.step)\n # if isinstance(entry, slice)\n # else entry\n # for entry in self.idx_list)\n return hash(idx_list)\n\n @staticmethod\n def str_from_slice(entry):\n msg = []\n for x in [entry.start, entry.stop, entry.step]:\n if x is None:\n msg.append(\"\")\n else:\n msg.append(str(x))\n return \":\".join(msg)\n\n def __str__(self):\n indices = []\n for entry in self.idx_list:\n if isinstance(entry, slice):\n indices.append(self.str_from_slice(entry))\n else:\n indices.append(str(entry))\n return \"%s{%s}\" % (self.__class__.__name__, \", \".join(indices))\n\n @staticmethod\n def default_helper_c_code_args():\n \"\"\"\n Returns a dictionary of default arguments to helper_c_code.\n\n \"\"\"\n\n return {\"c_prefix\": \"PyArray\",\n \"strides_mul\": 1}\n\n @staticmethod\n def helper_c_code(node, name, inputs, outputs, sub, idx_list, view_ndim,\n c_prefix=None,\n strides_mul=None):\n \"\"\"\n The parameters c_prefix are there to allow reusing this\n function on PyArray and GpuArray object.\n\n This fct take as input the x.\n\n \"\"\"\n\n default_args = Subtensor.default_helper_c_code_args()\n\n if strides_mul is None:\n strides_mul = default_args['strides_mul']\n\n if c_prefix is None:\n c_prefix = default_args['c_prefix']\n\n #\n # two arrays are created in C code:\n # is_slice: len == ndim, 0 means int, 1 means slice\n # subtensor_spec: len = n_ints + 3 * n_slices\n #\n fail = sub['fail']\n init_cmds = [] # initialization for subtensor_spec\n is_slice = []\n # TODO: change that, it might lead to unexpected results,\n # see assembla-#767\n NONE_CODE = sys.maxsize - 1\n\n pos = [0, 1] # annoying version of global variable for init_entry\n\n def inc_spec_pos(amt):\n pos[0] += amt\n\n def inc_input_pos(amt):\n pos[1] += amt\n\n def spec_pos():\n return pos[0]\n\n def input_pos():\n return pos[1]\n\n def init_entry(entry, depth=0):\n if isinstance(entry, (np.integer, integer_types)):\n init_cmds.append(\n \"subtensor_spec[%i] = %i;\" % (spec_pos(),\n entry))\n inc_spec_pos(1)\n if depth == 0:\n is_slice.append(0)\n elif isinstance(entry, Type):\n init_cmds.append(\n \"subtensor_spec[%i] = %s;\" % (spec_pos(),\n inputs[input_pos()]))\n inc_spec_pos(1)\n inc_input_pos(1)\n if depth == 0:\n is_slice.append(0)\n elif entry is None:\n init_cmds.append(\n \"subtensor_spec[%i] = %i;\" % (spec_pos(),\n NONE_CODE))\n inc_spec_pos(1)\n if depth == 0:\n is_slice.append(0)\n elif depth == 0 and isinstance(entry, slice):\n init_entry(entry.start, depth + 1)\n init_entry(entry.stop, depth + 1)\n init_entry(entry.step, depth + 1)\n is_slice.append(1)\n else:\n assert 0, entry\n\n for entry in idx_list:\n init_entry(entry)\n # make sure we used all inputs\n assert input_pos() == len(inputs), input_pos()\n assert len(is_slice) <= node.inputs[0].ndim, node.inputs[0].ndim\n\n len_is_slice = len(is_slice)\n\n len_subtensor_spec = spec_pos()\n subensor_spec = \"npy_intp subtensor_spec[%(len_subtensor_spec)s];\" % locals()\n if len_subtensor_spec == 0:\n subensor_spec = \"npy_intp * subtensor_spec = NULL;\"\n\n if is_slice:\n is_slice_init = \"int is_slice[] = {\" + \",\".join([str(s) for s in\n is_slice]) + \"};\"\n else:\n is_slice_init = \"int* is_slice = NULL;\"\n subtensor_init = \"\\n\".join(init_cmds)\n\n x, = inputs[:1]\n z, = outputs\n\n if view_ndim:\n rval = \"\"\"\n // Argument of the view\n npy_intp xview_dims[%(view_ndim)s];\n npy_intp xview_strides[%(view_ndim)s];\n\n \"\"\" % locals()\n else:\n rval = \"\"\"\n // Argument of the view\n npy_intp* xview_dims = NULL;\n npy_intp* xview_strides = NULL;\n\n \"\"\"\n\n rval += \"\"\"\n // One more argument of the view\n npy_intp xview_offset = 0;\n\n // The subtensor is created by iterating over the dimensions\n // and updating stride, shape, and data pointers\n\n %(is_slice_init)s\n %(subensor_spec)s\n %(subtensor_init)s;\n int spec_pos = 0; //position in subtensor_spec\n int inner_ii = 0; // the current dimension of zview\n int outer_ii = 0; // current dimension of z\n\n\n for (; outer_ii < %(len_is_slice)s; ++outer_ii)\n {\n if (is_slice[outer_ii])\n {\n npy_intp length = %(c_prefix)s_DIMS(%(x)s)[outer_ii];\n npy_intp slicelength;\n npy_intp start = subtensor_spec[spec_pos+0];\n npy_intp stop = subtensor_spec[spec_pos+1];\n npy_intp step = subtensor_spec[spec_pos+2];\n if (step == %(NONE_CODE)s) step = 1;\n\n npy_intp defstart = step < 0 ? length-1 : 0;\n npy_intp defstop = step < 0 ? -1 : length;\n\n // logic adapted from\n // PySlice_GetIndicesEx in python source\n if (!step)\n {\n PyErr_Format(PyExc_ValueError,\n \"slice step cannot be zero\");\n %(fail)s;\n }\n\n if (start == %(NONE_CODE)s)\n {\n start = defstart;\n }\n else\n {\n if (start < 0) start += length;\n if (start < 0) start = (step < 0) ? -1 : 0;\n if (start >= length)\n start = (step < 0) ? length - 1 : length;\n }\n\n if (stop == %(NONE_CODE)s)\n {\n stop = defstop;\n }\n else\n {\n if (stop < 0) stop += length;\n if (stop < 0) stop = (step < 0) ? -1 : 0;\n if (stop >= length)\n stop = (step < 0) ? length - 1 : length;\n }\n\n if ((step < 0 && stop >= start)\n || (step > 0 && start >= stop)) {\n slicelength = 0;\n }\n else if (step < 0) {\n slicelength = (stop-start+1)/step+1;\n }\n else {\n slicelength = (stop-start-1)/step+1;\n }\n\n if (0){\n fprintf(stdout, \"start %%zi\\\\n\", start);\n fprintf(stdout, \"stop %%zi\\\\n\", stop);\n fprintf(stdout, \"step %%zi\\\\n\", step);\n fprintf(stdout, \"length %%zi\\\\n\", length);\n fprintf(stdout, \"slicelength %%zi\\\\n\", slicelength);\n }\n\n assert (slicelength <= length);\n\n xview_offset += (npy_intp)%(c_prefix)s_STRIDES(%(x)s)[outer_ii]\n * start * %(strides_mul)s;\n xview_dims[inner_ii] = slicelength;\n xview_strides[inner_ii] = (npy_intp)%(c_prefix)s_STRIDES(%(x)s)[outer_ii] * step;\n\n inner_ii += 1;\n spec_pos += 3;\n }\n else // tuple coord `outer_ii` is an int\n {\n int idx = subtensor_spec[spec_pos];\n if (idx < 0) idx += %(c_prefix)s_DIMS(%(x)s)[outer_ii];\n if (idx >= 0)\n {\n if (idx < %(c_prefix)s_DIMS(%(x)s)[outer_ii])\n {\n xview_offset += (npy_intp)%(c_prefix)s_STRIDES(%(x)s)[outer_ii] * idx *\n %(strides_mul)s;\n }\n else\n {\n PyErr_Format(PyExc_IndexError,\"index out of bounds\");\n %(fail)s;\n }\n }\n else\n {\n PyErr_Format(PyExc_IndexError,\"index out of bounds\");\n %(fail)s;\n }\n\n spec_pos += 1;\n }\n }\n assert (inner_ii <= %(view_ndim)s);\n while (inner_ii < %(view_ndim)s)\n {\n assert (outer_ii < %(c_prefix)s_NDIM(%(x)s));\n xview_dims[inner_ii] = %(c_prefix)s_DIMS(%(x)s)[outer_ii];\n xview_strides[inner_ii] = %(c_prefix)s_STRIDES(%(x)s)[outer_ii];\n\n inner_ii += 1;\n outer_ii += 1;\n }\n \"\"\" % locals()\n # print rval\n return rval\n\n @staticmethod\n def helper_c_code_cache_version():\n return (9,)\n\n def c_code(self, node, name, inputs, outputs, sub): # DEBUG\n if not isinstance(node.inputs[0].type, theano.tensor.TensorType):\n raise NotImplementedError()\n\n x = inputs[0]\n z, = outputs\n ndim = node.inputs[0].ndim\n view_ndim = node.outputs[0].ndim\n fail = sub['fail']\n\n decl = \"PyArrayObject * xview = NULL;\"\n\n checkNDim = \"\"\"\n if (PyArray_NDIM(%(x)s) != %(ndim)s){\n PyErr_SetString(PyExc_ValueError,\n \"Expected %(ndim)s dimensions input\"\n );\n %(fail)s\n }\n \"\"\" % locals()\n\n get_xview = self.helper_c_code(node, name, inputs, outputs, sub,\n self.idx_list, view_ndim)\n build_view = \"\"\"\n //TODO: give this Op a second output so that this view can be cached\n //TODO: alternatively, fix the memory leak on failure\n Py_INCREF(PyArray_DESCR(%(x)s));\n xview = (PyArrayObject*)PyArray_NewFromDescr(\n &PyArray_Type,\n PyArray_DESCR(%(x)s),\n %(view_ndim)s,\n xview_dims,\n xview_strides,\n PyArray_BYTES(%(x)s) + xview_offset,\n PyArray_FLAGS(%(x)s),\n NULL);\n assert (PyArray_NDIM(xview) == %(view_ndim)s);\n if (!xview)\n {\n %(fail)s;\n }\n \"\"\" % locals()\n\n finish_view = \"\"\"\n Py_XDECREF(%(z)s);\n Py_INCREF(py_%(x)s);\n PyArray_SetBaseObject(xview, py_%(x)s);\n assert(py_%(x)s == (PyObject*)%(x)s);\n %(z)s = xview;\n \"\"\" % locals()\n\n return (decl + checkNDim +\n \"{\" + get_xview + build_view + finish_view + \"}\")\n\n def c_code_cache_version(self):\n hv = self.helper_c_code_cache_version()\n # If `helper_c_code_cache_version` is not versioned we do not want to\n # have a versioned version of this op's C code.\n if len(hv) == 0:\n return ()\n return (4, hv)\n\n def R_op(self, inputs, eval_points):\n # Subtensor is not differentiable wrt to its indices, therefore we\n # do not even need to consider the eval_points provided for those\n # (they should be defaulted to zeros_like by the global R_op)\n if eval_points[0] is None:\n return [None]\n return self(eval_points[0], *inputs[1:], **dict(return_list=True))\n\n\nclass SubtensorPrinter:\n\n def process(self, r, pstate):\n if r.owner is None:\n raise TypeError(\"Can only print Subtensor.\")\n elif isinstance(r.owner.op, Subtensor):\n idxs = r.owner.op.idx_list\n inputs = list(r.owner.inputs)\n input = inputs.pop(0)\n sidxs = []\n old_precedence = getattr(pstate, 'precedence', None)\n try:\n pstate.precedence = -1000\n\n for entry in idxs:\n if isinstance(entry, integer_types):\n sidxs.append(str(entry))\n elif isinstance(entry, scal.Scalar):\n sidxs.append(pstate.pprinter.process(inputs.pop()))\n elif isinstance(entry, slice):\n if entry.start is None or entry.start == 0:\n msg1 = \"\"\n else:\n msg1 = entry.start\n\n if entry.stop is None or entry.stop == sys.maxsize:\n msg2 = \"\"\n else:\n msg2 = entry.stop\n\n if entry.step is None:\n msg3 = \"\"\n else:\n msg3 = \":%s\" % entry.step\n\n sidxs.append(\"%s:%s%s\" % (msg1, msg2, msg3))\n finally:\n pstate.precedence = old_precedence\n\n try:\n pstate.precedence = 1000\n sub = pstate.pprinter.process(input, pstate)\n finally:\n pstate.precedence = old_precedence\n return \"%s[%s]\" % (sub, \", \".join(sidxs))\n else:\n raise TypeError(\"Can only print Subtensor.\")\n\npprint.assign(Subtensor, SubtensorPrinter())\n\n\ndef set_subtensor(x, y, inplace=False,\n tolerate_inplace_aliasing=False):\n \"\"\"\n Return x with the given subtensor overwritten by y.\n\n Parameters\n ----------\n x\n Symbolic variable for the lvalue of = operation.\n y\n Symbolic variable for the rvalue of = operation.\n tolerate_inplace_aliasing\n See inc_subtensor for documentation.\n\n Examples\n --------\n To replicate the numpy expression \"r[10:] = 5\", type\n\n >>> r = ivector()\n >>> new_r = set_subtensor(r[10:], 5)\n\n \"\"\"\n return inc_subtensor(x, y, inplace, set_instead_of_inc=True,\n tolerate_inplace_aliasing=tolerate_inplace_aliasing)\n\n\ndef inc_subtensor(x, y, inplace=False, set_instead_of_inc=False,\n tolerate_inplace_aliasing=False):\n \"\"\"\n Return x with the given subtensor incremented by y.\n\n Parameters\n ----------\n x\n The symbolic result of a Subtensor operation.\n y\n The amount by which to increment the subtensor in question.\n inplace\n Don't use. Theano will do it when possible.\n set_instead_of_inc\n If True, do a set_subtensor instead.\n tolerate_inplace_aliasing:\n Allow x and y to be views of a single underlying array even while\n working inplace. For correct results, x and y must not be overlapping\n views; if they overlap, the result of this Op will generally be\n incorrect. This value has no effect if inplace=False.\n\n Examples\n --------\n To replicate the numpy expression \"r[10:] += 5\", type\n\n >>> r = ivector()\n >>> new_r = inc_subtensor(r[10:], 5)\n\n \"\"\"\n # First of all, y cannot have a higher dimension than x,\n # nor have non-broadcastable dimensions where x is broadcastable.\n\n x = theano.tensor.as_tensor_variable(x)\n y = theano.tensor.as_tensor_variable(y)\n\n if y.ndim > x.ndim:\n raise TypeError((\"Trying to increment a %d-dimensional \"\n \"subtensor with a %d-dimensional value.\") % (x.ndim,\n y.ndim))\n\n dim_offset = x.ndim - y.ndim\n for dim in xrange(y.ndim):\n if (x.broadcastable[dim + dim_offset] and not y.broadcastable[dim]):\n # It is acceptable to try to increment a subtensor with a\n # broadcastable dim with a tensor that is not broadcastable\n # on that dimension. However, its length must then be 1.\n # We insert a Rebroadcast Op to make sure it is the case.\n y = addbroadcast(y, dim)\n\n if not x.owner:\n raise TypeError('x must be the result of a subtensor operation')\n\n # retrieve idx_list from x.owner\n if isinstance(x.owner.op, Subtensor):\n if tolerate_inplace_aliasing:\n destroyhandler_tolerate_aliased = [[0, 1]]\n else:\n destroyhandler_tolerate_aliased = []\n the_op = IncSubtensor(\n x.owner.op.idx_list, inplace, set_instead_of_inc,\n destroyhandler_tolerate_aliased=destroyhandler_tolerate_aliased)\n real_x = x.owner.inputs[0]\n real_idxargs = x.owner.inputs[1:]\n return the_op(real_x, y, *real_idxargs)\n elif isinstance(x.owner.op, AdvancedSubtensor1):\n real_x = x.owner.inputs[0]\n ilist = x.owner.inputs[1]\n the_op = AdvancedIncSubtensor1(inplace,\n set_instead_of_inc=set_instead_of_inc)\n return the_op(real_x, y, ilist)\n elif isinstance(x.owner.op, AdvancedSubtensor):\n real_x = x.owner.inputs[0]\n ilist = x.owner.inputs[1:]\n\n the_op = AdvancedIncSubtensor(inplace,\n set_instead_of_inc=set_instead_of_inc)\n return the_op(real_x, y, *ilist)\n elif isinstance(x.owner.op, AdvancedBooleanSubtensor):\n real_x = x.owner.inputs[0]\n ilist = x.owner.inputs[1:]\n\n the_op = AdvancedBooleanIncSubtensor(inplace,\n set_instead_of_inc=set_instead_of_inc)\n return the_op(real_x, y, *ilist)\n elif isinstance(x.owner.op, DimShuffle):\n inner_x = x.owner.inputs[0]\n # In the dimshuffle case, there are in fact two dimshuffles:\n # one to make the indexed dimension the last one,\n # and one to put it back where it was. So, in the case where we have\n # inc_subtensor(x[:,i], y), the graph is actually\n # inc_subtensor((x.T)[i].T, y).\n # We could get all the way to x, and then get rid of the dimshuffles\n # completely, but the problem is that advanced_inc_subtensor1 can only\n # work on the first (outer-most, left-most) dimension of x,\n # just like advanced_subtensor1.\n # So we call advanced_inc_subtensor1(x.T, i, y.T) (as we also need to\n # transpose y if it is not a scalar or a vector), but then we need to\n # return something that has the same shape as x, not as x.T (inner_x).\n # So re-apply the outer dimshuffle on the new inc_subtensor,\n # and return advanced_inc_subtensor1(x.T, i, y.T).T.\n\n # Get the dimshuffle pattern to apply to y.\n x_order = x.owner.op.new_order\n y_order = ['x'] * x.ndim\n for i, v in enumerate(x_order):\n if v != 'x' and (v - dim_offset) >= 0:\n y_order[v - dim_offset] = i\n\n # Warn if this code path would have produced wrong results in the past\n if config.warn.inc_set_subtensor1:\n # Dimshuffle pattern for y that would be equivalent to past code\n prev_y_order = ['x'] * (dim_offset) + list(range(y.ndim))\n if y_order != prev_y_order:\n warnings.warn(\n 'Although your current code is fine, please note that '\n 'earlier versions prior to 0.7 (or this development '\n 'version) may have yielded an incorrect result in '\n 'this `inc_subtensor` or `set_subtensor` operation. '\n 'To remove this warning, you can either set the '\n '`warn.inc_set_subtensor1` config option to `False`, '\n 'or `warn.ignore_bug_before` to at least \"0.7\".',\n stacklevel=2)\n\n inner_incsubtensor = inc_subtensor(\n inner_x,\n y.dimshuffle(y_order),\n inplace=inplace,\n set_instead_of_inc=set_instead_of_inc,\n tolerate_inplace_aliasing=tolerate_inplace_aliasing)\n # The broadcastable pattern of inner_x may not be the same as\n # the one of x, so we have to build a new dimshuffle here,\n # instead of reusing x.owner.op().\n return inner_incsubtensor.dimshuffle(x.owner.op.new_order)\n\n elif isinstance(x.owner.op, theano.tensor.Reshape):\n # This case happens when the indices are not arranged as a vector, but\n # as a higher-dimensional array. This is handled by the subtensor\n # by flattening this list, taking the subtensor, then reshaping the\n # result.\n inner_x = x.owner.inputs[0]\n # Try to apply inc_subtensor on inner_x.\n # If it works, there is no need to reshape, as the inc_subtensor\n # will have the same shape as inner_x, which is what we want.\n # We also explicitly duplicate y to its broadcasted shape\n # before we partially flatten it to inner_x dimension. This is\n # not strictly needed in all cases, but it is easier this way.\n if y.ndim > 0:\n # This if is needed to prevent some useless warning about\n # old code bug.\n expanded_y = alloc(y, *[x.shape[i] for i in xrange(x.ndim)])\n flattened_y = expanded_y.reshape(inner_x.shape)\n else:\n flattened_y = y\n\n # Warn if this code path would have produced wrong results in the past\n if config.warn.inc_set_subtensor1:\n if inner_x.ndim > 1 and sum(y.broadcastable) > 0:\n warnings.warn(\n 'Although your current code is fine, please note that '\n 'earlier versions prior to 0.7 (or this development '\n 'version) may have yielded an incorrect result in '\n 'this `inc_subtensor` or `set_subtensor` operation. '\n 'To remove this warning, you can either set the '\n '`warn.inc_set_subtensor1` config option to `False`, '\n 'or `warn.ignore_bug_before` to at least \"0.7\".',\n stacklevel=2)\n\n inner_incsubtensor = inc_subtensor(\n inner_x,\n flattened_y,\n inplace=inplace,\n set_instead_of_inc=set_instead_of_inc,\n tolerate_inplace_aliasing=tolerate_inplace_aliasing)\n return inner_incsubtensor\n else:\n raise TypeError('x must be the result of a subtensor operation')\n\n\nclass IncSubtensor(Op):\n \"\"\"\n Increment a subtensor.\n\n This is like numpy's\n\n x[i,j,k] += y\n\n It is used internally to implement the gradient on SubTensor.\n\n Parameters\n ----------\n set_instead_of_inc\n If True set the subtensor to the value instead of incrementing it by\n that value.\n\n \"\"\"\n\n check_input = False\n __props__ = (\"idx_list\", \"inplace\", \"set_instead_of_inc\")\n\n def __init__(self, idx_list, inplace=False, set_instead_of_inc=False,\n destroyhandler_tolerate_aliased=None):\n if destroyhandler_tolerate_aliased is None:\n destroyhandler_tolerate_aliased = []\n self.idx_list = list(map(Subtensor.convert, idx_list))\n self.inplace = inplace\n if inplace:\n self.destroy_map = {0: [0]}\n self.destroyhandler_tolerate_aliased = list(\n destroyhandler_tolerate_aliased)\n self.set_instead_of_inc = set_instead_of_inc\n\n def __hash__(self):\n msg = []\n for entry in self.idx_list:\n if isinstance(entry, slice):\n msg += [(entry.start, entry.stop, entry.step)]\n else:\n msg += [entry]\n\n idx_list = tuple(msg)\n # backport\n # idx_list = tuple((entry.start, entry.stop, entry.step)\n # if isinstance(entry, slice)\n # else entry\n # for entry in self.idx_list)\n return (hashtype(self) ^ hash(idx_list) ^ hash(self.inplace) ^\n hash(self.set_instead_of_inc))\n\n def __str__(self):\n indices = []\n for entry in self.idx_list:\n if isinstance(entry, slice):\n indices.append(Subtensor.str_from_slice(entry))\n else:\n indices.append(str(entry))\n if self.inplace:\n msg = 'Inplace'\n else:\n msg = ''\n if not self.set_instead_of_inc:\n msg += 'Inc'\n else:\n msg += 'Set'\n return \"%s{%s;%s}\" % (\n self.__class__.__name__,\n msg,\n \", \".join(indices))\n\n def make_node(self, x, y, *inputs):\n \"\"\"\n Parameters\n ----------\n x\n The tensor to increment.\n y\n The value to increment by.\n inputs: TODO WRITEME\n\n \"\"\"\n x, y = map(theano.tensor.as_tensor_variable, [x, y])\n if y.ndim > x.ndim:\n raise ValueError((\"Trying to increment a %d-dimensional \"\n \"subtensor with a %d-dimensional value.\") % (\n x.ndim, y.ndim))\n inputs = tuple(map(Subtensor.my_as_scalar, inputs))\n\n idx_list = list(self.idx_list)\n if len(idx_list) > x.type.ndim:\n raise IndexError('too many indices for array')\n\n input_types = Subtensor.collapse(\n idx_list,\n lambda entry: isinstance(entry, gof.Type))\n if len(inputs) != len(input_types):\n raise IndexError(\n \"Not enough inputs to fill in the Subtensor template.\",\n inputs, idx_list)\n for input, expected_type in izip(inputs, input_types):\n if input.type != expected_type:\n raise TypeError(\n \"Wrong type for Subtensor template. Expected %s, got %s.\"\n % (input.type, expected_type))\n\n return gof.Apply(self,\n (x, y) + inputs,\n [x.type()])\n\n def decl_view(self):\n return \"PyArrayObject * zview = NULL;\"\n\n def perform(self, node, inputs, out_):\n out, = out_\n x, y = inputs[:2]\n indices = list(reversed(inputs[2:]))\n\n def convert(entry):\n if isinstance(entry, gof.Type):\n rval = indices.pop()\n if sys.version_info < (2, 5):\n # Before Python 2.5, PySlice_GetIndicesEx requires\n # Python int to be passed.\n rval_ = int(rval)\n if rval_ != rval:\n raise IndexError((\n \"Invalid value for indexing: %s. \"\n \"That value may be too big.\") % rval)\n return rval_\n return rval\n elif isinstance(entry, slice):\n return slice(convert(entry.start),\n convert(entry.stop),\n convert(entry.step))\n else:\n return entry\n\n cdata = tuple(map(convert, self.idx_list))\n if len(cdata) == 1:\n cdata = cdata[0]\n if not self.inplace:\n x = x.copy()\n sub_x = x.__getitem__(cdata)\n if sub_x.shape:\n # we've sliced out an N-D tensor with N > 0\n if not self.set_instead_of_inc:\n sub_x += y\n else:\n # sub_x += -sub_x + y\n x.__setitem__(cdata, y)\n else:\n # scalar case\n if not self.set_instead_of_inc:\n x.__setitem__(cdata, sub_x + y)\n else:\n x.__setitem__(cdata, y)\n out[0] = x\n\n def c_code(self, node, name, inputs, outputs, sub):\n\n # This method delegates much of the work to helper\n # methods. This method implements the main logic\n # but subclasses may override the helper methods\n # to change the particulars, e.g. GpuIncSubtensor\n # turns the view/copy operations on numpy arrays\n # into the same operations on gpu arrays.\n\n self.do_type_checking(node)\n\n if self.inplace: # convert bool to int\n inplace = 1\n else:\n inplace = 0\n x = inputs[0]\n y = inputs[1]\n z, = outputs\n if self.set_instead_of_inc: # convert bool to int\n op_is_set = 1\n else:\n op_is_set = 0\n fail = sub['fail']\n view_ndim = (node.inputs[0].ndim -\n np.sum([not isinstance(idx, slice)\n for idx in self.idx_list]))\n\n copy_of_x = self.copy_of_x(x)\n\n copy_input_if_necessary = \"\"\"\n if (%(inplace)s)\n {\n if (%(x)s != %(z)s)\n {\n Py_XDECREF(%(z)s);\n Py_INCREF(%(x)s);\n %(z)s = %(x)s;\n }\n }\n else\n {\n Py_XDECREF(%(z)s);\n %(z)s = %(copy_of_x)s;\n if (!%(z)s) {\n // Exception already set\n %(fail)s\n }\n }\n \"\"\" % locals()\n\n # get info needed to make zview: a view of %(z)s\n helper_args = self.get_helper_c_code_args()\n\n get_zview = Subtensor.helper_c_code(\n node=node,\n name=name,\n inputs=outputs[:1] + inputs[2:],\n outputs=outputs,\n sub=sub,\n idx_list=self.idx_list,\n view_ndim=view_ndim,\n ** helper_args\n )\n\n # Make a view on the output, as we will write into it.\n alloc_zview = self.make_view_array(z, view_ndim)\n\n build_view = \"\"\"\n //TODO: give this Op a second output so that this view can be cached\n //TODO: alternatively, fix the memory leak on failure\n %(alloc_zview)s;\n if (!zview)\n {\n %(fail)s;\n }\n \"\"\" % locals()\n\n copy_into = self.copy_into(\"zview\", y)\n\n add_to_zview = self.add_to_zview(name, y, fail)\n\n make_modification = \"\"\"\n if (%(op_is_set)s)\n {\n if (%(copy_into)s) // does broadcasting\n {\n Py_DECREF(zview);\n %(fail)s;\n }\n }\n else\n {\n %(add_to_zview)s\n }\n \"\"\" % locals()\n return (self.decl_view() +\n copy_input_if_necessary +\n \"{\" +\n get_zview +\n build_view +\n make_modification +\n \"Py_DECREF(zview);\" +\n \"}\"\n )\n\n def do_type_checking(self, node):\n \"\"\"\n Should raise NotImplementedError if c_code does not support\n the types involved in this node.\n\n \"\"\"\n\n if not isinstance(node.inputs[0].type, theano.tensor.TensorType):\n raise NotImplementedError()\n\n def c_code_cache_version(self):\n hv = Subtensor.helper_c_code_cache_version()\n if hv:\n return (3, hv)\n else:\n return ()\n\n def copy_of_x(self, x):\n \"\"\"\n Parameters\n ----------\n x\n A string giving the name of a C variable pointing to an array.\n\n Returns\n -------\n object\n C code expression to make a copy of x.\n\n Base class uses PyArrayObject *, subclasses may override for\n different types of arrays.\n\n \"\"\"\n # Parameters of PyArrary_FromAny are:\n # array\n # dtype: we pass NULL to say any dtype is acceptable, so the existing\n # dtype will be copied\n # min_depth: we pass 0 to have this parameter ignored\n # max_depth: we pass 0 to have this parameter ignored\n # requirements: here we pass NPY_ARRAY_ENSURECOPY to force a copy\n # context: this is almost always NULL, I'm not sure what it's used for\n return \"\"\"(PyArrayObject*)PyArray_FromAny(py_%(x)s, NULL, 0, 0,\n NPY_ARRAY_ENSURECOPY, NULL)\"\"\" % locals()\n\n def make_view_array(self, x, view_ndim):\n \"\"\"\n Parameters\n ----------\n x\n A string identifying an array to be viewed.\n view_ndim\n A string specifying the number of dimensions to have in the view.\n\n This doesn't need to actually set up the view with the right indexing;\n we'll do that manually later.\n\n \"\"\"\n\n return \"\"\"Py_INCREF(PyArray_DESCR(%(x)s));\n zview = (PyArrayObject*)PyArray_NewFromDescr(\n &PyArray_Type,\n PyArray_DESCR(%(x)s),\n %(view_ndim)s,\n xview_dims, //PyArray_DIMS(%(x)s),\n xview_strides, //PyArray_STRIDES(%(x)s),\n PyArray_BYTES(%(x)s) + xview_offset, //PyArray_DATA(%(x)s),\n PyArray_FLAGS(%(x)s),\n NULL);\n \"\"\" % locals()\n\n def get_helper_c_code_args(self):\n \"\"\"\n Return a dictionary of arguments to pass to helper_c_code.\n\n \"\"\"\n return Subtensor.default_helper_c_code_args()\n\n def copy_into(self, view, source):\n \"\"\"\n Parameters\n ----------\n view : string\n C code expression for an array.\n source : string\n C code expression for an array.\n\n Returns\n -------\n object\n C code expression to copy source into view, and 0 on success.\n\n \"\"\"\n return \"\"\"PyArray_CopyInto(%(view)s, %(source)s)\"\"\" % locals()\n\n def add_to_zview(self, name, x, fail):\n \"\"\"\n Return C code to add x to zview. Should DECREF zview if the\n add fails.\n\n \"\"\"\n\n return \"\"\"\n PyArrayObject * add_rval = (PyArrayObject*)PyNumber_InPlaceAdd(\n (PyObject*)zview, py_%(x)s);\n if (add_rval)\n {\n assert (PyArray_Check((PyObject*)add_rval));\n assert (PyArray_DATA(add_rval) == PyArray_DATA(zview));\n Py_DECREF(add_rval);\n }\n else\n {\n Py_DECREF(zview);\n %(fail)s;\n }\"\"\" % locals()\n\n def infer_shape(self, node, shapes):\n return [shapes[0]]\n\n def R_op(self, inputs, eval_points):\n if eval_points[0] is None or eval_points[1] is None:\n return [None]\n # Again we ignore eval points for indices because incsubtensor is\n # not differentiable wrt to those\n return self(eval_points[0], eval_points[1], *inputs[2:],\n **dict(return_list=True))\n\n def connection_pattern(self, node):\n\n rval = [[True], [True]]\n\n for ipt in node.inputs[2:]:\n rval.append([False])\n\n return rval\n\n def grad(self, inputs, grads):\n g_output, = grads\n x, y = inputs[:2]\n idx_list = inputs[2:]\n\n if x.dtype in theano.tensor.discrete_dtypes:\n # The output dtype is the same as x\n gx = x.zeros_like(dtype=theano.config.floatX)\n if y.dtype in theano.tensor.discrete_dtypes:\n gy = y.zeros_like(dtype=theano.config.floatX)\n else:\n gy = y.zeros_like()\n elif x.dtype in theano.tensor.complex_dtypes:\n raise NotImplementedError(\"No support for complex grad yet\")\n else:\n if self.set_instead_of_inc:\n gx = set_subtensor(\n Subtensor(idx_list=self.idx_list)(g_output, *idx_list),\n theano.tensor.zeros_like(y))\n else:\n gx = g_output\n gy = Subtensor(idx_list=self.idx_list)(g_output, *idx_list)\n gy = _sum_grad_over_bcasted_dims(y, gy)\n\n return [gx, gy] + [DisconnectedType()()] * len(idx_list)\n\n\ndef _sum_grad_over_bcasted_dims(x, gx):\n \"\"\"\n Sum of gx over dimensions to reproduce x.broadcastable.\n\n This is useful to sum gradients over certain dimensions when\n x has been broadcasted, and we need to sum the gradient contributions\n over all duplications.\n\n \"\"\"\n if gx.broadcastable != x.broadcastable:\n x_dim_added = gx.ndim - x.ndim\n x_broad = (True,) * x_dim_added + x.broadcastable\n assert sum(gx.broadcastable) < sum(x_broad)\n axis_to_sum = []\n for i in xrange(gx.ndim):\n if gx.broadcastable[i] is False and x_broad[i] is True:\n axis_to_sum.append(i)\n elif (gx.broadcastable[i] is True and\n x_broad[i] is False):\n # This means that Theano was able to infer that\n # gx.shape[i] is 1, so x.shape[i] is 1, but we\n # didn't know it. It is fine.\n pass\n else:\n assert gx.broadcastable[i] == x_broad[i]\n gx = gx.sum(axis=axis_to_sum, keepdims=True)\n if gx.ndim != x.ndim:\n assert gx.ndim > x.ndim\n for i in xrange(x_dim_added):\n assert gx.broadcastable[i]\n gx = gx.dimshuffle(*list(range(x_dim_added, gx.ndim)))\n assert gx.broadcastable == x.broadcastable\n return gx\n\n\n#########################\n# Advanced indexing\n#########################\n#\n# Should reproduce numpy's behaviour, see url:\n# docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing\n\n\nclass AdvancedSubtensor1(Op):\n \"\"\"\n Implement x[ilist] where ilist is a vector of integers.\n\n \"\"\"\n # sparse_grad doesn't go in here since it only affects the output\n # of the grad() method.\n __props__ = ()\n _f16_ok = True\n check_input = False\n\n def __init__(self, sparse_grad=False):\n self.sparse_grad = sparse_grad\n\n def make_node(self, x, ilist):\n x_ = theano.tensor.as_tensor_variable(x)\n ilist_ = theano.tensor.as_tensor_variable(ilist)\n if ilist_.type.dtype not in theano.tensor.integer_dtypes:\n raise TypeError('index must be integers')\n if ilist_.type.ndim != 1:\n raise TypeError('index must be vector')\n if x_.type.ndim == 0:\n raise TypeError('cannot index into a scalar')\n bcast = (ilist_.broadcastable[0],) + x_.broadcastable[1:]\n return Apply(self, [x_, ilist_], [TensorType(dtype=x.dtype,\n broadcastable=bcast)()])\n\n def perform(self, node, inp, out_):\n x, i = inp\n out, = out_\n # Copy always implied by numpy advanced indexing semantic.\n if out[0] is not None and out[0].shape == (len(i),) + x.shape[1:]:\n o = out[0]\n else:\n o = None\n\n # If i.dtype is more precise than numpy.intp (int32 on 32-bit machines,\n # int64 on 64-bit machines), numpy may raise the following error:\n # TypeError: array cannot be safely cast to required type.\n # We need to check if values in i can fit in numpy.intp, because\n # if they don't, that should be an error (no array can have that\n # many elements on a 32-bit arch).\n if i.dtype != np.intp:\n i_ = theano._asarray(i, dtype=np.intp)\n if not np.can_cast(i.dtype, np.intp):\n # Check if there was actually an incorrect conversion\n if np.any(i != i_):\n raise IndexError(\n 'index contains values that are bigger '\n 'than the maximum array size on this system.', i)\n i = i_\n\n out[0] = x.take(i, axis=0, out=o)\n\n def connection_pattern(self, node):\n rval = [[True]]\n\n for ipt in node.inputs[1:]:\n rval.append([False])\n\n return rval\n\n def grad(self, inputs, grads):\n global sparse_module_ref\n x, ilist = inputs\n gz, = grads\n assert len(inputs) == 2\n if self.sparse_grad:\n if x.type.ndim != 2:\n raise TypeError(\n \"AdvancedSubtensor1: you can't take the sparse grad\"\n \" from a tensor with ndim != 2. ndim is \" +\n str(x.type.ndim))\n if sparse_module_ref is None:\n import theano.sparse as sparse_module_ref\n\n rval1 = [sparse_module_ref.construct_sparse_from_list(x, gz,\n ilist)]\n else:\n rval1 = [advanced_inc_subtensor1(x.zeros_like(), gz, ilist)]\n return rval1 + [DisconnectedType()()] * (len(inputs) - 1)\n\n def R_op(self, inputs, eval_points):\n if eval_points[0] is None:\n return [None]\n return self.make_node(eval_points[0], *inputs[1:]).outputs\n\n def infer_shape(self, node, ishapes):\n x, ilist = ishapes\n return [ilist + x[1:]]\n\n def c_support_code(self):\n # In some versions of numpy, NPY_MIN_INTP is defined as MIN_LONG,\n # which is not defined. It should be NPY_MIN_LONG instead in that case.\n return dedent(\"\"\"\\\n #ifndef MIN_LONG\n #define MIN_LONG NPY_MIN_LONG\n #endif\"\"\")\n\n def c_code(self, node, name, input_names, output_names, sub):\n if self.__class__ is not AdvancedSubtensor1:\n raise MethodNotDefined(\n \"c_code defined for AdvancedSubtensor1,\"\n \" not for child class\", type(self))\n a_name, i_name = input_names[0], input_names[1]\n output_name = output_names[0]\n fail = sub['fail']\n return \"\"\"\n PyArrayObject *indices;\n int i_type = PyArray_TYPE(%(i_name)s);\n if (i_type != NPY_INTP) {\n // Cast %(i_name)s to NPY_INTP (expected by PyArray_TakeFrom),\n // if all values fit.\n if (!PyArray_CanCastSafely(i_type, NPY_INTP) &&\n PyArray_SIZE(%(i_name)s) > 0) {\n npy_int64 min_val, max_val;\n PyObject* py_min_val = PyArray_Min(%(i_name)s, NPY_MAXDIMS,\n NULL);\n if (py_min_val == NULL) {\n %(fail)s;\n }\n min_val = PyLong_AsLongLong(py_min_val);\n Py_DECREF(py_min_val);\n if (min_val == -1 && PyErr_Occurred()) {\n %(fail)s;\n }\n PyObject* py_max_val = PyArray_Max(%(i_name)s, NPY_MAXDIMS,\n NULL);\n if (py_max_val == NULL) {\n %(fail)s;\n }\n max_val = PyLong_AsLongLong(py_max_val);\n Py_DECREF(py_max_val);\n if (max_val == -1 && PyErr_Occurred()) {\n %(fail)s;\n }\n if (min_val < NPY_MIN_INTP || max_val > NPY_MAX_INTP) {\n PyErr_SetString(PyExc_IndexError,\n \"Index contains values \"\n \"that are bigger than the maximum array \"\n \"size on this system.\");\n %(fail)s;\n }\n }\n indices = (PyArrayObject*) PyArray_Cast(%(i_name)s, NPY_INTP);\n if (indices == NULL) {\n %(fail)s;\n }\n }\n else {\n indices = %(i_name)s;\n Py_INCREF(indices);\n }\n if (%(output_name)s != NULL) {\n npy_intp nd, i, *shape;\n nd = PyArray_NDIM(%(a_name)s) + PyArray_NDIM(indices) - 1;\n if (PyArray_NDIM(%(output_name)s) != nd) {\n Py_CLEAR(%(output_name)s);\n }\n else {\n shape = PyArray_DIMS(%(output_name)s);\n for (i = 0; i < PyArray_NDIM(indices); i++) {\n if (shape[i] != PyArray_DIMS(indices)[i]) {\n Py_CLEAR(%(output_name)s);\n break;\n }\n }\n if (%(output_name)s != NULL) {\n for (; i < nd; i++) {\n if (shape[i] != PyArray_DIMS(%(a_name)s)[\n i-PyArray_NDIM(indices)+1]) {\n Py_CLEAR(%(output_name)s);\n break;\n }\n }\n }\n }\n }\n %(output_name)s = (PyArrayObject*)PyArray_TakeFrom(\n %(a_name)s, (PyObject*)indices, 0, %(output_name)s, NPY_RAISE);\n Py_DECREF(indices);\n if (%(output_name)s == NULL) %(fail)s;\n \"\"\" % locals()\n\n def c_code_cache_version(self):\n return (0, 1, 2)\n\nadvanced_subtensor1 = AdvancedSubtensor1()\n\n\nclass AdvancedIncSubtensor1(Op):\n \"\"\"\n Increments a subtensor using advanced slicing (list of index).\n\n \"\"\"\n\n __props__ = ('inplace', 'set_instead_of_inc')\n check_input = False\n params_type = ParamsType(inplace=scal.bool,\n set_instead_of_inc=scal.bool)\n\n def __init__(self, inplace=False, set_instead_of_inc=False):\n self.inplace = bool(inplace)\n self.set_instead_of_inc = bool(set_instead_of_inc)\n if inplace:\n self.destroy_map = {0: [0]}\n\n def clone_inplace(self):\n return self.__class__(\n inplace=True,\n set_instead_of_inc=self.set_instead_of_inc)\n\n def __str__(self):\n if self.inplace:\n msg = \"inplace\"\n else:\n msg = \"no_inplace\"\n if self.set_instead_of_inc:\n msg += \",set\"\n else:\n msg += \",inc\"\n\n return self.__class__.__name__ + \"{%s}\" % msg\n\n def make_node(self, x, y, ilist):\n x_ = theano.tensor.as_tensor_variable(x)\n y_ = theano.tensor.as_tensor_variable(y)\n ilist_ = theano.tensor.as_tensor_variable(ilist)\n\n if ilist_.type.dtype not in theano.tensor.integer_dtypes:\n raise TypeError('index must be integers')\n if ilist_.type.ndim != 1:\n raise TypeError('index must be vector')\n if x_.type.ndim == 0:\n raise TypeError('cannot index into a scalar')\n if y_.type.ndim > x_.type.ndim:\n if self.set_instead_of_inc:\n opname = 'set'\n else:\n opname = 'increment'\n raise TypeError(\n 'cannot %s x subtensor with ndim=%s'\n ' by y with ndim=%s to x subtensor with ndim=%s ' % (\n opname, x_.type.ndim, y_.type.ndim))\n\n return Apply(self, [x_, y_, ilist_], [x_.type()])\n\n def copy_of_x(self, x):\n \"\"\"\n Parameters\n ----------\n x : string\n Gives the name of a C variable pointing to an array.\n\n Returns\n -------\n object\n C code expression to make a copy of x.\n\n Base class uses PyArrayObject *, subclasses may override for\n different types of arrays.\n\n \"\"\"\n # Parameters of PyArrary_FromAny are:\n # array\n # dtype: we pass NULL to say any dtype is acceptable, so the existing\n # dtype will be copied\n # min_depth: we pass 0 to have this parameter ignored\n # max_depth: we pass 0 to have this parameter ignored\n # requirements: here we pass NPY_ARRAY_ENSURECOPY to force a copy\n # context: this is almost always NULL, I'm not sure what it's used for\n return \"\"\"(PyArrayObject*)PyArray_FromAny(py_%(x)s, NULL, 0, 0,\n NPY_ARRAY_ENSURECOPY, NULL)\"\"\" % locals()\n\n def c_support_code(self):\n return inc_code()\n\n def c_code(self, node, name, input_names, output_names, sub):\n numpy_ver = [int(n) for n in np.__version__.split('.')[:2]]\n if bool(numpy_ver < [1, 8]):\n raise NotImplementedError\n x, y, idx = input_names\n out = output_names[0]\n copy_of_x = self.copy_of_x(x)\n\n return \"\"\"\n PyObject* rval = NULL;\n if (%(params)s->inplace)\n {\n if (%(x)s != %(out)s)\n {\n Py_XDECREF(%(out)s);\n Py_INCREF(%(x)s);\n %(out)s = %(x)s;\n }\n }\n else\n {\n Py_XDECREF(%(out)s);\n %(out)s = %(copy_of_x)s;\n if (!%(out)s) {\n // Exception already set\n %(fail)s\n }\n }\n if (inplace_increment(%(out)s, (PyObject *)%(idx)s, %(y)s, (1 - %(params)s->set_instead_of_inc))) {\n %(fail)s;\n }\n Py_XDECREF(rval);\n \"\"\" % dict(x=x, y=y, idx=idx, out=out, copy_of_x=copy_of_x,\n params=sub['params'], fail=sub['fail'])\n\n def c_code_cache_version(self):\n return (8,)\n\n def perform(self, node, inp, out_, params):\n # TODO opt to make this inplace\n x, y, idx = inp\n out, = out_\n if not self.inplace:\n x = x.copy()\n # In Numpy, x[idx] += y doesn't work if the same index is present\n # many times: it does it only once. Is it a bug? In any case, for\n # this reason we implement our own 'inc' iteration.\n\n if self.set_instead_of_inc:\n x[idx] = y\n else:\n np.add.at(x, idx, y)\n\n out[0] = x\n\n def infer_shape(self, node, ishapes):\n x, y, ilist = ishapes\n return [x]\n\n def R_op(self, inputs, eval_points):\n if None in eval_points[:2]:\n return [None]\n return self.make_node(eval_points[0], eval_points[1],\n *inputs[2:]).outputs\n\n def connection_pattern(self, node):\n\n rval = [[True], [True], [False]]\n return rval\n\n def grad(self, inputs, grads):\n g_output, = grads\n x, y, idx_list = inputs\n if x.dtype in theano.tensor.discrete_dtypes:\n # The output dtype is the same as x\n gx = x.zeros_like(dtype=theano.config.floatX)\n if y.dtype in theano.tensor.discrete_dtypes:\n gy = y.zeros_like(dtype=theano.config.floatX)\n else:\n gy = y.zeros_like()\n elif x.dtype in theano.tensor.complex_dtypes:\n raise NotImplementedError(\"No support for complex grad yet\")\n else:\n if self.set_instead_of_inc:\n gx = advanced_set_subtensor1(\n g_output,\n y.zeros_like(),\n idx_list)\n else:\n gx = g_output\n gy = advanced_subtensor1(g_output, idx_list)\n gy = _sum_grad_over_bcasted_dims(y, gy)\n\n return [gx, gy] + [DisconnectedType()()]\n\nadvanced_inc_subtensor1 = AdvancedIncSubtensor1()\nadvanced_set_subtensor1 = AdvancedIncSubtensor1(set_instead_of_inc=True)\n\n\ndef as_index_variable(idx):\n if idx is None:\n return NoneConst.clone()\n if isinstance(idx, slice):\n return make_slice(idx)\n if isinstance(idx, gof.Variable) and isinstance(idx.type, SliceType):\n return idx\n if isinstance(idx, gof.Variable) and isinstance(idx.type, NoneTypeT):\n return idx\n idx = theano.tensor.as_tensor_variable(idx)\n if idx.type.dtype not in theano.tensor.discrete_dtypes:\n raise TypeError('index must be integers or a boolean mask')\n return idx\n\n\ndef adv_index_broadcastable_pattern(a, idx):\n \"\"\"\n This function is only used to determine the broadcast pattern for\n AdvancedSubtensor output variable.\n\n For this, we make a fake ndarray and a fake idx and call use ask numpy\n the output. From this, we find the output broadcast pattern.\n\n \"\"\"\n\n def replace_slice(v):\n if isinstance(v, gof.Apply):\n if len(v.outputs) != 1:\n raise ValueError(\n \"It is ambiguous which output of a multi-output Op has\"\n \" to be fetched.\", v)\n else:\n v = v.outputs[0]\n\n if NoneConst.equals(v):\n return None\n if isinstance(v.type, SliceType):\n return slice(None, None)\n\n if v.dtype == 'bool':\n return np.ones((2,) * v.ndim, v.dtype)\n else:\n return np.zeros((2,) * v.ndim, int)\n\n newidx = tuple(map(replace_slice, idx))\n\n # 2 - True = 1; 2 - False = 2\n fakeshape = [2 - bc for bc in a.broadcastable]\n retshape = np.empty(fakeshape)[newidx].shape\n return tuple([dim == 1 for dim in retshape])\n\n\ndef check_advanced_indexing_dimensions(input, idx_list):\n \"\"\"\n This function checks if the index list in idx_list is correct.\n If there are any boolean masks, we check if the mask has the\n same shape as the input. This is enforced in NumPy 0.13.0 and\n newer, but not by earlier versions. If the size is not the same,\n this method raises an IndexError.\n \"\"\"\n dim_seen = 0\n for index in idx_list:\n if index is np.newaxis:\n # skip, does not count as an input dimension\n pass\n elif isinstance(index, np.ndarray) and index.dtype == 'bool':\n for i in xrange(index.ndim):\n if index.shape[i] != input.shape[dim_seen + i]:\n raise IndexError('boolean index did not match indexed array '\n 'along dimension %d; dimension is %d but '\n 'corresponding boolean dimension is %d' %\n (dim_seen + i, input.shape[dim_seen + i],\n index.shape[i]))\n dim_seen += index.ndim\n else:\n dim_seen += 1\n\n\ndef check_and_reject_bool(args_el):\n try:\n if (isinstance(args_el, (np.bool_, bool)) or\n args_el.dtype == 'bool'):\n raise TypeError('AdvancedSubtensor does not support boolean '\n 'masks for indexing. Use AdvancedBooleanSubtensor '\n 'instead. ')\n except AttributeError:\n pass\n\n if (not isinstance(args_el, theano.tensor.Variable) and\n isinstance(args_el, collections.Iterable)):\n for el in args_el:\n check_and_reject_bool(el)\n\n\nclass BaseAdvancedSubtensor(Op):\n \"\"\"\n Abstract base class for AdvancedSubtensor and AdvancedBooleanSubtensor.\n Implements advanced indexing with boolean masks.\n\n \"\"\"\n\n # Should be used by __getitem__ and __getslice__, as follows:\n # AdvancedSubtensor()(self, *args) or\n # AdvancedBooleanSubtensor()(self, *args),\n # if args contains and advanced indexing pattern\n __props__ = ()\n\n def make_node(self, x, *index):\n x = theano.tensor.as_tensor_variable(x)\n\n index = tuple(map(as_index_variable, index))\n bcast = adv_index_broadcastable_pattern(x, index)\n return gof.Apply(self,\n (x,) + index,\n [theano.tensor.tensor(dtype=x.type.dtype,\n broadcastable=bcast)])\n\n def R_op(self, inputs, eval_points):\n if eval_points[0] is None:\n return [None]\n return self.make_node(eval_points[0], *inputs[1:]).outputs\n\n def infer_shape(self, node, ishapes):\n # Default case, we don't know\n raise theano.tensor.basic.ShapeError(\"case not implemented\")\n\n def perform(self, node, inputs, out_):\n out, = out_\n check_advanced_indexing_dimensions(inputs[0], inputs[1:])\n rval = inputs[0].__getitem__(inputs[1:])\n # When there are no arrays, we are not actually doing advanced\n # indexing, so __getitem__ will not return a copy.\n # Since no view_map is set, we need to copy the returned value\n if not any(isinstance(v.type, TensorType) and v.ndim > 0\n for v in node.inputs[1:]):\n rval = rval.copy()\n out[0] = rval\n\n def connection_pattern(self, node):\n rval = [[True]]\n\n for ipt in node.inputs[1:]:\n rval.append([False])\n\n return rval\n\n\nclass AdvancedSubtensor(BaseAdvancedSubtensor):\n \"\"\"\n Return a subtensor copy, using advanced indexing.\n\n \"\"\"\n\n # Should be used by __getitem__ and __getslice__, as follows:\n # AdvancedSubtensor()(self, *args),\n # if args contains and advanced indexing pattern\n\n def make_node(self, x, *index):\n check_and_reject_bool(index)\n return super(AdvancedSubtensor, self).make_node(x, *index)\n\n def infer_shape(self, node, ishapes):\n # Really special case\n if len(ishapes) == 3:\n xshp, ind1shp, ind2shp = ishapes\n if (len(xshp) == 2 and\n ind1shp is not None and len(ind1shp) == 1 and\n ind2shp is not None and len(ind2shp) == 1):\n # if the graph is correct, we can assume ind1shp[0] and\n # ind2shp[0] will have the same value.\n # Try to return the one closest to the graph input.\n if node.inputs[2].owner is None:\n return [ind2shp]\n else:\n return [ind1shp]\n return super(AdvancedSubtensor, self).infer_shape(node, ishapes)\n\n def grad(self, inputs, grads):\n gz, = grads\n x = inputs[0]\n rest = inputs[1:]\n return [advanced_inc_subtensor(theano.tensor.zeros_like(x), gz,\n *rest)] + \\\n [DisconnectedType()()] * len(rest)\nadvanced_subtensor = AdvancedSubtensor()\n\n\nclass AdvancedBooleanSubtensor(BaseAdvancedSubtensor):\n \"\"\"\n Return a subtensor copy, using advanced indexing with boolean masks.\n\n \"\"\"\n\n # Should be used by __getitem__ and __getslice__, as follows:\n # AdvancedBooleanSubtensor()(self, *args),\n # if args contains and advanced indexing pattern with boolean masks\n\n def grad(self, inputs, grads):\n gz, = grads\n x = inputs[0]\n rest = inputs[1:]\n return [advanced_boolean_inc_subtensor(theano.tensor.zeros_like(x), gz,\n *rest)] + \\\n [DisconnectedType()()] * len(rest)\nadvanced_boolean_subtensor = AdvancedBooleanSubtensor()\n\n\nclass BaseAdvancedIncSubtensor(Op):\n \"\"\"\n Base class for AdvancedIncSubtensor and AdvancedBooleanIncSubtensor.\n Increments a subtensor using advanced indexing.\n \"\"\"\n\n __props__ = (\"inplace\", \"set_instead_of_inc\")\n\n def __init__(self, inplace=False, set_instead_of_inc=False):\n self.inplace = inplace\n self.set_instead_of_inc = set_instead_of_inc\n # The assert is needed as in the pass the first argument was\n # something else that was not used.\n assert isinstance(inplace, bool)\n if self.inplace:\n raise NotImplementedError('In place computation is not'\n ' implemented')\n\n def __str__(self):\n return \"%s{%s, %s}\" % (self.__class__.__name__,\n \"inplace=\" + str(self.inplace),\n \" set_instead_of_inc=\" +\n str(self. set_instead_of_inc))\n\n def make_node(self, x, y, *inputs):\n x = theano.tensor.as_tensor_variable(x)\n y = theano.tensor.as_tensor_variable(y)\n\n new_inputs = []\n for inp in inputs:\n if isinstance(inp, (list, tuple)):\n inp = theano.tensor.as_tensor_variable(inp)\n new_inputs.append(inp)\n return gof.Apply(self,\n (x, y) + tuple(new_inputs),\n [theano.tensor.tensor(\n dtype=x.type.dtype,\n broadcastable=x.type.broadcastable)])\n\n def perform(self, node, inputs, out_):\n # TODO: 1. opt to make this in place 2. generalize as described in\n # AdvancedSubtensor's perform TODO\n\n check_advanced_indexing_dimensions(inputs[0], inputs[2:])\n\n out, = out_\n if not self.inplace:\n out[0] = inputs[0].copy()\n else:\n out[0] = inputs[0]\n\n if self.set_instead_of_inc:\n out[0][inputs[2:]] = inputs[1]\n else:\n np.add.at(out[0], tuple(inputs[2:]), inputs[1])\n\n def infer_shape(self, node, ishapes):\n return [ishapes[0]]\n\n def connection_pattern(self, node):\n\n rval = [[True], [True]]\n\n for ipt in node.inputs[2:]:\n rval.append([False])\n\n return rval\n\n def R_op(self, inputs, eval_points):\n if None in eval_points[:2]:\n return [None]\n return self.make_node(eval_points[0], eval_points[1],\n *inputs[2:]).outputs\n\n\nclass AdvancedIncSubtensor(BaseAdvancedIncSubtensor):\n \"\"\"\n Increments a subtensor using advanced indexing.\n \"\"\"\n\n def make_node(self, x, y, *inputs):\n check_and_reject_bool(inputs)\n return super(AdvancedIncSubtensor, self).make_node(x, y, *inputs)\n\n def grad(self, inpt, output_gradients):\n x, y = inpt[:2]\n idxs = inpt[2:]\n outgrad, = output_gradients\n if x.dtype in theano.tensor.discrete_dtypes:\n # The output dtype is the same as x\n gx = x.zeros_like(dtype=theano.config.floatX)\n if y.dtype in theano.tensor.discrete_dtypes:\n gy = y.zeros_like(dtype=theano.config.floatX)\n else:\n gy = y.zeros_like()\n elif x.dtype in theano.tensor.complex_dtypes:\n raise NotImplementedError(\"No support for complex grad yet\")\n else:\n if self.set_instead_of_inc:\n gx = advanced_set_subtensor(\n outgrad,\n y.zeros_like(),\n *idxs)\n else:\n gx = outgrad\n gy = advanced_subtensor(outgrad, *idxs)\n # Make sure to sum gy over the dimensions of y that have been\n # added or broadcasted\n gy = _sum_grad_over_bcasted_dims(y, gy)\n return [gx, gy] + \\\n [DisconnectedType()() for _ in idxs]\nadvanced_inc_subtensor = AdvancedIncSubtensor()\nadvanced_set_subtensor = AdvancedIncSubtensor(set_instead_of_inc=True)\n\n\nclass AdvancedBooleanIncSubtensor(BaseAdvancedIncSubtensor):\n \"\"\"\n Increments a subtensor using advanced indexing with boolean masks.\n \"\"\"\n\n def grad(self, inpt, output_gradients):\n x, y = inpt[:2]\n idxs = inpt[2:]\n outgrad, = output_gradients\n if x.dtype in theano.tensor.discrete_dtypes:\n # The output dtype is the same as x\n gx = x.zeros_like(dtype=theano.config.floatX)\n if y.dtype in theano.tensor.discrete_dtypes:\n gy = y.zeros_like(dtype=theano.config.floatX)\n else:\n gy = y.zeros_like()\n elif x.dtype in theano.tensor.complex_dtypes:\n raise NotImplementedError(\"No support for complex grad yet\")\n else:\n if self.set_instead_of_inc:\n gx = advanced_set_subtensor(\n outgrad,\n y.zeros_like(),\n *idxs)\n else:\n gx = outgrad\n gy = advanced_boolean_subtensor(outgrad, *idxs)\n # Make sure to sum gy over the dimensions of y that have been\n # added or broadcasted\n gy = _sum_grad_over_bcasted_dims(y, gy)\n return [gx, gy] + \\\n [DisconnectedType()() for _ in idxs]\nadvanced_boolean_inc_subtensor = AdvancedBooleanIncSubtensor()\nadvanced_boolean_set_subtensor = AdvancedBooleanIncSubtensor(set_instead_of_inc=True)\n\n\ndef take(a, indices, axis=None, mode='raise'):\n a = theano.tensor.as_tensor_variable(a)\n indices = theano.tensor.as_tensor_variable(indices)\n # Reuse advanced_subtensor1 if indices is a vector\n if indices.ndim == 1:\n if mode == 'clip':\n indices = clip(indices, 0, a.shape[axis] - 1)\n elif mode == 'wrap':\n indices = indices % a.shape[axis]\n if axis is None:\n return advanced_subtensor1(a.flatten(), indices)\n elif axis == 0:\n return advanced_subtensor1(a, indices)\n else:\n if axis < 0:\n axis += a.ndim\n assert axis >= 0\n shuffle = list(range(a.ndim))\n shuffle[0] = axis\n shuffle[axis] = 0\n return advanced_subtensor1(\n a.dimshuffle(shuffle), indices).dimshuffle(shuffle)\n if axis is None:\n shape = indices.shape\n ndim = indices.ndim\n else:\n # If axis is 0, don't generate a useless concatenation.\n if axis == 0:\n shape = theano.tensor.concatenate(\n [indices.shape, a.shape[axis + 1:]])\n else:\n if axis < 0:\n axis += a.ndim\n shape = theano.tensor.concatenate(\n [a.shape[:axis], indices.shape, a.shape[axis + 1:]])\n ndim = a.ndim + indices.ndim - 1\n return take(a, indices.flatten(), axis, mode).reshape(shape, ndim)\n",
"from __future__ import absolute_import, print_function, division\nimport unittest\nfrom theano.compat import izip\n\nfrom six import iteritems\n\nimport numpy as np\nimport theano\nimport theano.tensor as T\nfrom theano.tensor import TensorType\nfrom theano.tensor.basic import alloc\n\n# Don't import test classes otherwise they get tested as part of the file\nfrom theano.tensor.tests import test_basic\nfrom theano.tensor.tests.test_basic import rand, safe_make_node\nfrom theano.tests import unittest_tools as utt\n\nfrom ..type import (GpuArrayType, get_context,\n gpuarray_shared_constructor)\nfrom ..basic_ops import (\n host_from_gpu, HostFromGpu, GpuFromHost, GpuReshape, GpuToGpu,\n GpuAlloc, GpuAllocEmpty, GpuContiguous,\n gpu_join, GpuJoin, GpuSplit, GpuEye, gpu_contiguous)\nfrom ..elemwise import GpuDimShuffle, GpuElemwise\nfrom ..subtensor import GpuSubtensor\n\nfrom .config import mode_with_gpu, mode_without_gpu, test_ctx_name\n\nfrom pygpu import gpuarray\n\nutt.seed_rng()\nrng = np.random.RandomState(seed=utt.fetch_seed())\n\n\ndef inplace_func(inputs, outputs, mode=None, allow_input_downcast=False,\n on_unused_input='raise', name=None):\n if mode is None:\n mode = mode_with_gpu\n return theano.function(inputs, outputs, mode=mode,\n allow_input_downcast=allow_input_downcast,\n accept_inplace=True,\n on_unused_input=on_unused_input, name=name)\n\n\ndef fake_shared(value, name=None, strict=False, allow_downcast=None, **kwargs):\n from theano.tensor.sharedvar import tensor_constructor, scalar_constructor\n for c in (gpuarray_shared_constructor, tensor_constructor,\n scalar_constructor):\n try:\n return c(value, name=name, strict=strict,\n allow_downcast=allow_downcast, **kwargs)\n except TypeError:\n continue\n\n\ndef rand_gpuarray(*shape, **kwargs):\n r = rng.rand(*shape) * 2 - 1\n dtype = kwargs.pop('dtype', theano.config.floatX)\n cls = kwargs.pop('cls', None)\n if len(kwargs) != 0:\n raise TypeError('Unexpected argument %s', list(kwargs.keys())[0])\n return gpuarray.array(r, dtype=dtype, cls=cls,\n context=get_context(test_ctx_name))\n\n\ndef makeTester(name, op, gpu_op, cases, checks=None, mode_gpu=mode_with_gpu,\n mode_nogpu=mode_without_gpu, skip=False, eps=1e-10):\n if checks is None:\n checks = {}\n\n _op = op\n _gpu_op = gpu_op\n _cases = cases\n _skip = skip\n _checks = checks\n\n class Checker(unittest.TestCase, utt.TestOptimizationMixin):\n op = staticmethod(_op)\n gpu_op = staticmethod(_gpu_op)\n cases = _cases\n skip = _skip\n checks = _checks\n\n def setUp(self):\n eval(self.__class__.__module__ + '.' + self.__class__.__name__)\n\n def test_all(self):\n if skip:\n from nose.plugins.skip import SkipTest\n raise SkipTest(skip)\n\n for testname, inputs in iteritems(cases):\n for _ in range(len(inputs)):\n if type(inputs[_]) is float:\n inputs[_] = np.asarray(inputs[_],\n dtype=theano.config.floatX)\n self.run_case(testname, inputs)\n\n def run_case(self, testname, inputs):\n inputs_ref = [theano.shared(inp) for inp in inputs]\n inputs_tst = [theano.shared(inp) for inp in inputs]\n\n try:\n node_ref = safe_make_node(self.op, *inputs_ref)\n node_tst = safe_make_node(self.op, *inputs_tst)\n except Exception as exc:\n err_msg = (\"Test %s::%s: Error occurred while making \"\n \"a node with inputs %s\") % (self.gpu_op, testname,\n inputs)\n exc.args += (err_msg,)\n raise\n\n try:\n f_ref = inplace_func([], node_ref.outputs, mode=mode_nogpu)\n f_tst = inplace_func([], node_tst.outputs, mode=mode_gpu)\n except Exception as exc:\n err_msg = (\"Test %s::%s: Error occurred while trying to \"\n \"make a Function\") % (self.gpu_op, testname)\n exc.args += (err_msg,)\n raise\n\n self.assertFunctionContains1(f_tst, self.gpu_op)\n\n ref_e = None\n try:\n expecteds = f_ref()\n except Exception as exc:\n ref_e = exc\n\n try:\n variables = f_tst()\n except Exception as exc:\n if ref_e is None:\n err_msg = (\"Test %s::%s: exception when calling the \"\n \"Function\") % (self.gpu_op, testname)\n exc.args += (err_msg,)\n raise\n else:\n # if we raised an exception of the same type we're good.\n if isinstance(exc, type(ref_e)):\n return\n else:\n err_msg = (\"Test %s::%s: exception raised during test \"\n \"call was not the same as the reference \"\n \"call (got: %s, expected %s)\" %\n (self.gpu_op, testname, type(exc),\n type(ref_e)))\n exc.args += (err_msg,)\n raise\n\n for i, (variable, expected) in \\\n enumerate(izip(variables, expecteds)):\n if variable.dtype != expected.dtype or \\\n variable.shape != expected.shape or \\\n not TensorType.values_eq_approx(variable,\n expected):\n self.fail((\"Test %s::%s: Output %s gave the wrong \"\n \"value. With inputs %s, expected %s \"\n \"(dtype %s), got %s (dtype %s).\" %\n (self.op, testname, i, inputs, expected,\n expected.dtype, variable, variable.dtype)))\n\n for description, check in iteritems(self.checks):\n if not check(inputs, variables):\n self.fail((\"Test %s::%s: Failed check: %s \"\n \"(inputs were %s, ouputs were %s)\") %\n (self.op, testname, description,\n inputs, variables))\n\n Checker.__name__ = name\n if hasattr(Checker, '__qualname__'):\n Checker.__qualname__ = name\n return Checker\n\n\ndef test_transfer_cpu_gpu():\n a = T.fmatrix('a')\n g = GpuArrayType(dtype='float32', broadcastable=(False, False))('g')\n\n av = np.asarray(rng.rand(5, 4), dtype='float32')\n gv = gpuarray.array(av, context=get_context(test_ctx_name))\n\n f = theano.function([a], GpuFromHost(test_ctx_name)(a))\n fv = f(av)\n assert GpuArrayType.values_eq(fv, gv)\n\n f = theano.function([g], host_from_gpu(g))\n fv = f(gv)\n assert np.all(fv == av)\n\n\ndef test_transfer_gpu_gpu():\n g = GpuArrayType(dtype='float32', broadcastable=(False, False),\n context_name=test_ctx_name)()\n\n av = np.asarray(rng.rand(5, 4), dtype='float32')\n gv = gpuarray.array(av, context=get_context(test_ctx_name))\n mode = mode_with_gpu.excluding('cut_gpua_host_transfers', 'local_cut_gpua_host_gpua')\n f = theano.function([g], GpuToGpu(test_ctx_name)(g), mode=mode)\n topo = f.maker.fgraph.toposort()\n assert len(topo) == 1\n assert isinstance(topo[0].op, GpuToGpu)\n fv = f(gv)\n assert GpuArrayType.values_eq(fv, gv)\n\n\ndef test_transfer_strided():\n # This is just to ensure that it works in theano\n # libgpuarray has a much more comprehensive suit of tests to\n # ensure correctness\n a = T.fmatrix('a')\n g = GpuArrayType(dtype='float32', broadcastable=(False, False))('g')\n\n av = np.asarray(rng.rand(5, 8), dtype='float32')\n gv = gpuarray.array(av, context=get_context(test_ctx_name))\n\n av = av[:, ::2]\n gv = gv[:, ::2]\n\n f = theano.function([a], GpuFromHost(test_ctx_name)(a))\n fv = f(av)\n assert GpuArrayType.values_eq(fv, gv)\n\n f = theano.function([g], host_from_gpu(g))\n fv = f(gv)\n assert np.all(fv == av)\n\n\ndef gpu_alloc_expected(x, *shp):\n g = gpuarray.empty(shp, dtype=x.dtype, context=get_context(test_ctx_name))\n g[:] = x\n return g\n\n\nGpuAllocTester = makeTester(\n name=\"GpuAllocTester\",\n # The +1 is there to allow the lift to the GPU.\n op=lambda *args: alloc(*args) + 1,\n gpu_op=GpuAlloc(test_ctx_name),\n cases=dict(\n correct01=(rand(), np.int32(7)),\n # just gives a DeepCopyOp with possibly wrong results on the CPU\n # correct01_bcast=(rand(1), np.int32(7)),\n correct02=(rand(), np.int32(4), np.int32(7)),\n correct12=(rand(7), np.int32(4), np.int32(7)),\n correct13=(rand(7), np.int32(2), np.int32(4),\n np.int32(7)),\n correct23=(rand(4, 7), np.int32(2), np.int32(4),\n np.int32(7)),\n bad_shape12=(rand(7), np.int32(7), np.int32(5)),\n )\n)\n\n\nclass TestAlloc(test_basic.TestAlloc):\n dtype = \"float32\"\n mode = mode_with_gpu\n shared = staticmethod(gpuarray_shared_constructor)\n allocs = [GpuAlloc(test_ctx_name), GpuAlloc(test_ctx_name), T.Alloc()]\n\n\ndef test_alloc_empty():\n for dt in ['float32', 'int8']:\n f = theano.function([], GpuAllocEmpty(dt, context_name=test_ctx_name)(2, 3))\n assert len(f.maker.fgraph.apply_nodes) == 1\n out = f()\n assert out.shape == (2, 3)\n assert out.dtype == dt\n\n f = theano.function([], [GpuAllocEmpty('uint64', test_ctx_name)(3, 2),\n GpuAllocEmpty('uint64', test_ctx_name)(3, 2)])\n out = f()\n assert out[0].shape == (3, 2)\n assert out[0].dtype == 'uint64'\n assert out[1].shape == (3, 2)\n assert out[1].dtype == 'uint64'\n assert len([node for node in f.maker.fgraph.apply_nodes\n if isinstance(node.op, GpuAllocEmpty)]) == 1\n\n\ndef test_shape():\n x = GpuArrayType(dtype='float32', broadcastable=[False, False, False])()\n v = gpuarray.zeros((3, 4, 5), dtype='float32', context=get_context(test_ctx_name))\n f = theano.function([x], x.shape)\n topo = f.maker.fgraph.toposort()\n assert np.all(f(v) == (3, 4, 5))\n if theano.config.mode != 'FAST_COMPILE':\n assert len(topo) == 4\n assert isinstance(topo[0].op, T.opt.Shape_i)\n assert isinstance(topo[1].op, T.opt.Shape_i)\n assert isinstance(topo[2].op, T.opt.Shape_i)\n assert isinstance(topo[3].op, T.opt.MakeVector)\n mode = mode_with_gpu.excluding(\"local_shape_to_shape_i\")\n f = theano.function([x], x.shape, mode=mode)\n topo = f.maker.fgraph.toposort()\n assert np.all(f(v) == (3, 4, 5))\n assert len(topo) == 1\n assert isinstance(topo[0].op, T.Shape)\n\n\ndef test_gpu_contiguous():\n a = T.fmatrix('a')\n i = T.iscalar('i')\n a_val = np.asarray(np.random.rand(4, 5), dtype='float32')\n # The reshape is needed otherwise we make the subtensor on the CPU\n # to transfer less data.\n f = theano.function([a, i], gpu_contiguous(a.reshape((5, 4))[::i]),\n mode=mode_with_gpu)\n topo = f.maker.fgraph.toposort()\n assert any([isinstance(node.op, GpuSubtensor) for node in topo])\n assert any([isinstance(node.op, GpuContiguous) for node in topo])\n assert f(a_val, 1).flags.c_contiguous\n assert f(a_val, 2).flags.c_contiguous\n assert f(a_val, 2).flags.c_contiguous\n\n\nclass G_reshape(test_basic.T_reshape):\n def shortDescription(self):\n return None\n\n def __init__(self, name):\n test_basic.T_reshape.__init__(\n self, name,\n shared=gpuarray_shared_constructor,\n op=GpuReshape,\n mode=mode_with_gpu,\n ignore_topo=(HostFromGpu, GpuFromHost,\n theano.compile.DeepCopyOp,\n GpuDimShuffle, GpuElemwise,\n theano.tensor.opt.Shape_i,\n theano.tensor.opt.MakeVector))\n assert self.op == GpuReshape\n\n\nclass G_comparison(test_basic.test_comparison):\n def setUp(self):\n utt.seed_rng()\n self.mode = mode_with_gpu\n self.shared = gpuarray_shared_constructor\n self.dtypes = ['float64', 'float32']\n\n\nclass G_Join_and_Split(test_basic.T_Join_and_Split):\n def setUp(self):\n super(G_Join_and_Split, self).setUp()\n self.mode = mode_with_gpu.excluding('constant_folding')\n self.join_op = GpuJoin()\n self.split_op_class = GpuSplit\n # Use join instead of MakeVector since there is no MakeVector on GPU\n self.make_vector_op = GpuJoin()\n # this is to avoid errors with limited devices\n self.floatX = 'float32'\n self.hide_error = theano.config.mode not in ['DebugMode', 'DEBUG_MODE']\n\n def shared(x, **kwargs):\n return gpuarray_shared_constructor(x, target=test_ctx_name,\n **kwargs)\n self.shared = shared\n\n def test_gpusplit_opt(self):\n # Test that we move the node to the GPU\n # Also test float16 computation at the same time.\n rng = np.random.RandomState(seed=utt.fetch_seed())\n m = self.shared(rng.rand(4, 6).astype('float16'))\n o = T.Split(2)(m, 0, [2, 2])\n assert o[0].dtype == 'float16'\n f = theano.function([], o, mode=self.mode)\n assert any([isinstance(node.op, self.split_op_class)\n for node in f.maker.fgraph.toposort()])\n o1, o2 = f()\n assert np.allclose(o1, m.get_value(borrow=True)[:2])\n assert np.allclose(o2, m.get_value(borrow=True)[2:])\n\n\ndef test_gpujoin_gpualloc():\n a = T.fmatrix('a')\n a_val = np.asarray(np.random.rand(4, 5), dtype='float32')\n b = T.fmatrix('b')\n b_val = np.asarray(np.random.rand(3, 5), dtype='float32')\n\n f = theano.function([a, b], T.join(0, T.zeros_like(a), T.ones_like(b)) + 4,\n mode=mode_without_gpu)\n f_gpu = theano.function([a, b], T.join(0, T.zeros_like(a), T.ones_like(b)),\n mode=mode_with_gpu)\n f_gpu2 = theano.function([a, b], T.join(0, T.zeros_like(a),\n T.ones_like(b)) + 4,\n mode=mode_with_gpu)\n assert sum([node.op == T.alloc for node in f.maker.fgraph.toposort()]) == 2\n assert sum([node.op == T.join_ for node in f.maker.fgraph.toposort()]) == 1\n assert sum([isinstance(node.op, GpuAlloc)\n for node in f_gpu.maker.fgraph.toposort()]) == 2\n assert sum([node.op == gpu_join\n for node in f_gpu.maker.fgraph.toposort()]) == 1\n assert sum([isinstance(node.op, GpuAlloc)\n for node in f_gpu2.maker.fgraph.toposort()]) == 2\n assert sum([node.op == gpu_join\n for node in f_gpu2.maker.fgraph.toposort()]) == 1\n assert np.allclose(f(a_val, b_val), f_gpu2(a_val, b_val))\n\n\ndef test_gpueye():\n def check(dtype, N, M_=None, k=0):\n # Theano does not accept None as a tensor.\n # So we must use a real value.\n M = M_\n # Currently DebugMode does not support None as inputs even if this is\n # allowed.\n if M is None:\n M = N\n N_symb = T.iscalar()\n M_symb = T.iscalar()\n k_symb = T.iscalar()\n out = T.eye(N_symb, M_symb, k_symb, dtype=dtype) + np.array(1).astype(dtype)\n f = theano.function([N_symb, M_symb, k_symb],\n out,\n mode=mode_with_gpu)\n\n result = np.asarray(f(N, M, k)) - np.array(1).astype(dtype)\n assert np.allclose(result, np.eye(N, M_, k, dtype=dtype))\n assert result.dtype == np.dtype(dtype)\n assert any([isinstance(node.op, GpuEye)\n for node in f.maker.fgraph.toposort()])\n\n for dtype in ['float32', 'int32', 'float16']:\n yield check, dtype, 3\n # M != N, k = 0\n yield check, dtype, 3, 5\n yield check, dtype, 5, 3\n # N == M, k != 0\n yield check, dtype, 3, 3, 1\n yield check, dtype, 3, 3, -1\n # N < M, k != 0\n yield check, dtype, 3, 5, 1\n yield check, dtype, 3, 5, -1\n # N > M, k != 0\n yield check, dtype, 5, 3, 1\n yield check, dtype, 5, 3, -1\n # k > M, -k > N, k > M, k > N\n yield check, dtype, 5, 3, 3\n yield check, dtype, 3, 5, 3\n yield check, dtype, 5, 3, -3\n yield check, dtype, 3, 5, -3\n yield check, dtype, 5, 3, 6\n yield check, dtype, 3, 5, -6\n\n\ndef test_hostfromgpu_shape_i():\n # Test that the shape is lifted over hostfromgpu\n\n m = mode_with_gpu.including('local_dot_to_dot22',\n 'local_dot22_to_dot22scalar',\n 'specialize')\n a = T.fmatrix('a')\n ca = theano.gpuarray.type.GpuArrayType('float32', (False, False))()\n av = np.asarray(np.random.rand(5, 4), dtype='float32')\n cv = gpuarray.asarray(np.random.rand(5, 4),\n dtype='float32',\n context=get_context(test_ctx_name))\n\n f = theano.function([a], GpuFromHost(test_ctx_name)(a), mode=m)\n assert any(isinstance(x.op, GpuFromHost)\n for x in f.maker.fgraph.toposort())\n f = theano.function([a], GpuFromHost(test_ctx_name)(a).shape, mode=m)\n topo = f.maker.fgraph.toposort()\n assert isinstance(topo[0].op, T.opt.Shape_i)\n assert isinstance(topo[1].op, T.opt.Shape_i)\n assert isinstance(topo[2].op, T.opt.MakeVector)\n assert tuple(f(av)) == (5, 4)\n\n f = theano.function([ca], host_from_gpu(ca), mode=m)\n assert host_from_gpu in [x.op\n for x in f.maker.fgraph.toposort()]\n f = theano.function([ca], host_from_gpu(ca).shape, mode=m)\n topo = f.maker.fgraph.toposort()\n assert isinstance(topo[0].op, theano.compile.Shape_i)\n assert isinstance(topo[1].op, theano.compile.Shape_i)\n assert isinstance(topo[2].op, theano.tensor.opt.MakeVector)\n assert tuple(f(cv)) == (5, 4)\n\n\ndef test_Gpujoin_inplace():\n # Test Gpujoin to work inplace.\n #\n # This function tests the case when several elements are passed to the\n # Gpujoin function but all except one of them are empty. In this case\n # Gpujoin should work inplace and the output should be the view of the\n # non-empty element.\n s = T.lscalar()\n data = np.array([3, 4, 5], dtype=theano.config.floatX)\n x = gpuarray_shared_constructor(data, borrow=True)\n z = T.zeros((s,))\n\n join = GpuJoin(view=0)\n c = join(0, x, z)\n\n f = theano.function([s], theano.Out(c, borrow=True))\n if not isinstance(mode_with_gpu, theano.compile.DebugMode):\n assert x.get_value(borrow=True, return_internal_type=True) is f(0)\n assert np.allclose(f(0), [3, 4, 5])\n",
"from __future__ import absolute_import, print_function, division\nimport os\nimport numpy as np\nimport theano\nimport theano.tensor as T\n\nfloatX = 'float32'\n\n\ndef test_graph_opt_caching():\n opt_db_file = os.path.join(theano.config.compiledir, 'optimized_graphs.pkl')\n if os.path.exists(opt_db_file):\n os.remove(opt_db_file)\n\n mode = theano.config.mode\n if mode in [\"DEBUG_MODE\", \"DebugMode\"]:\n mode = \"FAST_RUN\"\n default = theano.config.cache_optimizations\n try:\n theano.config.cache_optimizations = True\n a = T.fmatrix('a')\n b = T.fmatrix('b')\n c = theano.shared(np.ones((10, 10), dtype=floatX))\n d = theano.shared(np.ones((10, 10), dtype=floatX))\n e = T.sum(T.sum(T.sum(a ** 2 + b) + c) + d)\n f1 = theano.function([a, b], e, mode=mode)\n\n m = T.fmatrix('x1')\n n = T.fmatrix('x2')\n p = theano.shared(np.ones((10, 10), dtype=floatX))\n q = theano.shared(np.ones((10, 10), dtype=floatX))\n j = T.sum(T.sum(T.sum(m ** 2 + n) + p) + q)\n f2 = theano.function([m, n], j, mode=mode)\n\n in1 = np.ones((10, 10), dtype=floatX)\n in2 = np.ones((10, 10), dtype=floatX)\n assert f1(in1, in2) == f2(in1, in2)\n finally:\n theano.config.cache_optimizations = default\n\nif __name__ == '__main__':\n test_graph_opt_caching()\n",
"from __future__ import absolute_import, print_function, division\nimport theano\nimport theano.tensor as T\nfrom theano.tests import unittest_tools as utt\nimport numpy as np\nfrom collections import OrderedDict\n\nfrom theano.tensor.nnet import bn\n\n\ndef test_BNComposite():\n try:\n orig = theano.config.compute_test_value\n\n theano.config.compute_test_value = 'raise'\n\n def bn_ref(x, G, B, M, V):\n n = (x - M) / V\n return n * G + B\n\n np.random.seed(1234)\n X = 1 + np.random.random([10, 20]).astype('float32')\n B = 1 + np.random.random([20]).astype('float32')\n G = 1 + np.random.random([20]).astype('float32')\n M = 1 + np.random.random([20]).astype('float32')\n V = 1 + np.random.random([20]).astype('float32')\n\n x = theano.tensor.matrix('x')\n b = theano.tensor.vector('b')\n g = theano.tensor.vector('g')\n m = theano.tensor.vector('m')\n v = theano.tensor.vector('v')\n\n x.tag.test_value = np.random.rand(2, 2).astype(theano.config.floatX)\n b.tag.test_value = np.random.rand(2).astype(theano.config.floatX)\n g.tag.test_value = np.random.rand(2).astype(theano.config.floatX)\n m.tag.test_value = np.random.rand(2).astype(theano.config.floatX)\n v.tag.test_value = np.random.rand(2).astype(theano.config.floatX)\n\n bn_ref_op = bn_ref(x, g, b, m, v)\n f_ref = theano.function([x, b, g, m, v], [bn_ref_op])\n res_ref = f_ref(X, G, B, M, V)\n for mode in ['low_mem', 'high_mem']:\n bn_op = bn.batch_normalization(x, g, b, m, v, mode=mode)\n f = theano.function([x, b, g, m, v], [bn_op])\n res = f(X, G, B, M, V)\n utt.assert_allclose(res_ref, res)\n finally:\n theano.config.compute_test_value = orig\n\n\ndef test_batch_normalization():\n\n def bn_ref(x, G, B, M, V):\n n = (x - M) / V\n return n * G + B\n\n np.random.seed(1234)\n X = 1 + np.random.random([10, 20]).astype('float32')\n B = 1 + np.random.random([20]).astype('float32')\n G = 1 + np.random.random([20]).astype('float32')\n M = 1 + np.random.random([20]).astype('float32')\n V = 1 + np.random.random([20]).astype('float32')\n\n x = theano.tensor.matrix('x')\n b = theano.tensor.vector('b')\n g = theano.tensor.vector('g')\n m = theano.tensor.vector('m')\n v = theano.tensor.vector('v')\n\n bn_ref_op = bn_ref(x, g, b, m, v)\n f_ref = theano.function([x, g, b, m, v], [bn_ref_op])\n res_ref = f_ref(X, G, B, M, V)\n for mode in ['low_mem', 'high_mem']:\n bn_op = bn.batch_normalization(x, g, b, m, v, mode=mode)\n f = theano.function([x, g, b, m, v], [bn_op])\n res = f(X, G, B, M, V)\n utt.assert_allclose(res_ref, res)\n\n def bn_f(inputs, gamma, beta, mean, std):\n return bn.batch_normalization(inputs, gamma, beta, mean, std, mode=mode)\n utt.verify_grad(bn_f, [X, G, B, M, V])\n\n bn_ref_op = bn_ref(x, g, b, x.mean(axis=0, keepdims=True), x.std(axis=0, keepdims=True))\n f_ref = theano.function([x, b, g], [bn_ref_op])\n res_ref = f_ref(X, G, B)\n for mode in ['low_mem', 'high_mem']:\n bn_op = bn.batch_normalization(x, g, b, x.mean(axis=0, keepdims=True), x.std(axis=0, keepdims=True), mode=mode)\n f = theano.function([x, b, g], [bn_op])\n res = f(X, G, B)\n utt.assert_allclose(res_ref, res)\n\n def bn_f(inputs, gamma, beta, mean, std):\n return bn.batch_normalization(inputs, gamma, beta, mean, std, mode=mode)\n utt.verify_grad(bn_f, [X, G, B,\n X.mean(axis=0)[np.newaxis], X.std(axis=0)[np.newaxis]])\n\n\ndef test_bn_feature_maps():\n\n def bn_ref(x, G, B, M, V):\n n = (x - M) / V\n return n * G + B\n\n np.random.seed(1234)\n X = 1 + np.random.random([2, 3, 4, 4]).astype('float32')\n B = 1 + np.random.random([3]).astype('float32')\n G = 1 + np.random.random([3]).astype('float32')\n M = 1 + np.random.random([3]).astype('float32')\n V = 1 + np.random.random([3]).astype('float32')\n\n x = theano.tensor.tensor4('x')\n b = theano.tensor.vector('b')\n g = theano.tensor.vector('g')\n m = theano.tensor.vector('m')\n v = theano.tensor.vector('v')\n\n bn_ref_op = bn_ref(x,\n g.dimshuffle('x', 0, 'x', 'x'),\n b.dimshuffle('x', 0, 'x', 'x'),\n m.dimshuffle('x', 0, 'x', 'x'),\n v.dimshuffle('x', 0, 'x', 'x'))\n f_ref = theano.function([x, b, g, m, v], [bn_ref_op])\n res_ref = f_ref(X, G, B, M, V)\n\n for mode in ['low_mem', 'high_mem']:\n bn_op = bn.batch_normalization(x,\n g.dimshuffle('x', 0, 'x', 'x'),\n b.dimshuffle('x', 0, 'x', 'x'),\n m.dimshuffle('x', 0, 'x', 'x'),\n v.dimshuffle('x', 0, 'x', 'x'),\n mode=mode)\n f = theano.function([x, b, g, m, v], [bn_op])\n res = f(X, G, B, M, V)\n utt.assert_allclose(res_ref, res)\n\n def conv_bn(inputs, gamma, beta, mean, std):\n return bn.batch_normalization(inputs,\n gamma.dimshuffle('x', 0, 'x', 'x'),\n beta.dimshuffle('x', 0, 'x', 'x'),\n mean.dimshuffle('x', 0, 'x', 'x'),\n std.dimshuffle('x', 0, 'x', 'x'),\n mode=mode)\n utt.verify_grad(conv_bn, [X, G, B, M, V])\n\n\ndef test_batch_normalization_train():\n utt.seed_rng()\n\n for axes in ('per-activation', 'spatial', (1, 2, 3, 4)):\n for vartype in (T.tensor5, T.tensor3, T.vector):\n x, scale, bias, running_mean, running_var = (vartype(n)\n for n in ('x', 'scale', 'bias',\n 'running_mean',\n 'running_var'))\n ndim = x.ndim\n eps = 5e-3 # some non-standard value to test if it's used\n running_average_factor = 0.3\n\n # remove non-existing axes\n if isinstance(axes, tuple):\n axes = tuple(i for i in axes if i < ndim)\n if len(axes) == 0:\n continue\n\n # forward pass\n out, x_mean, x_invstd, out_running_mean, out_running_var = \\\n bn.batch_normalization_train(\n x, scale, bias, axes, eps,\n running_average_factor, running_mean, running_var)\n # reference forward pass\n if axes == 'per-activation':\n axes2 = (0,)\n elif axes == 'spatial':\n axes2 = (0,) + tuple(range(2, ndim))\n else:\n axes2 = axes\n x_mean2 = x.mean(axis=axes2, keepdims=True)\n x_var2 = x.var(axis=axes2, keepdims=True)\n x_invstd2 = T.inv(T.sqrt(x_var2 + eps))\n scale2 = T.addbroadcast(scale, *axes2)\n bias2 = T.addbroadcast(bias, *axes2)\n out2 = (x - x_mean2) * (scale2 * x_invstd2) + bias2\n m = T.cast(T.prod(x.shape) / T.prod(scale.shape), theano.config.floatX)\n out_running_mean2 = running_mean * (1 - running_average_factor) + \\\n x_mean2 * running_average_factor\n out_running_var2 = running_var * (1 - running_average_factor) + \\\n (m / (m - 1)) * x_var2 * running_average_factor\n # backward pass\n dy = vartype('dy')\n grads = T.grad(None, wrt=[x, scale, bias], known_grads={out: dy})\n # reference backward pass\n grads2 = T.grad(None, wrt=[x, scale, bias], known_grads={out2: dy})\n # second-order backward pass\n dx = vartype('dinputs')\n dscale = vartype('dscale')\n dbias = vartype('dbias')\n grad_grads = T.grad(None, wrt=[x, dy, scale], known_grads=OrderedDict(\n {grads[0]: dx, grads[1]: dscale, grads[2]: dbias}),\n consider_constant=[x, dy, scale, bias, x_mean, x_invstd, running_mean, running_var],\n return_disconnected='zero')\n # reference second-order backward pass\n grad_grads2 = T.grad(None, wrt=[x, dy, scale], known_grads=OrderedDict(\n {grads2[0]: dx, grads2[1]: dscale, grads2[2]: dbias}),\n consider_constant=[x, dy, scale, bias, x_mean2, x_var2, running_mean, running_var],\n return_disconnected='zero')\n # compile\n f = theano.function([x, scale, bias, running_mean, running_var, dy, dx, dscale, dbias],\n [out, x_mean, x_invstd, out_running_mean, out_running_var,\n out2, x_mean2, x_invstd2, out_running_mean2, out_running_var2] +\n grads + grads2 + grad_grads + grad_grads2)\n # check if the abstract Ops have been replaced\n assert not any([isinstance(n.op, (bn.AbstractBatchNormTrain,\n bn.AbstractBatchNormInference,\n bn.AbstractBatchNormTrainGrad))\n for n in f.maker.fgraph.toposort()])\n # run\n for data_shape in ((5, 10, 30, 40, 10), (4, 3, 1, 1, 1), (2, 3, 5, 5, 5)):\n data_shape = data_shape[:ndim]\n param_shape = tuple(1 if d in axes2 else s\n for d, s in enumerate(data_shape))\n X = 4 + 3 * np.random.randn(*data_shape).astype(theano.config.floatX)\n Dy = -1 + 2 * np.random.randn(*data_shape).astype(theano.config.floatX)\n Scale = np.random.randn(*param_shape).astype(theano.config.floatX)\n Bias = np.random.randn(*param_shape).astype(theano.config.floatX)\n Running_mean = np.random.randn(*param_shape).astype(theano.config.floatX)\n Running_var = np.random.randn(*param_shape).astype(theano.config.floatX)\n Dx = 4 + 3 * np.random.randn(*data_shape).astype(theano.config.floatX)\n Dscale = -1 + 2 * np.random.randn(*param_shape).astype(theano.config.floatX)\n Dbias = np.random.randn(*param_shape).astype(theano.config.floatX)\n\n outputs = f(X, Scale, Bias, Running_mean, Running_var, Dy, Dx, Dscale, Dbias)\n # compare outputs\n utt.assert_allclose(outputs[0], outputs[0 + 5]) # out\n utt.assert_allclose(outputs[1], outputs[1 + 5]) # mean\n utt.assert_allclose(outputs[2], outputs[2 + 5]) # invstd\n utt.assert_allclose(outputs[3], outputs[3 + 5]) # running_mean\n utt.assert_allclose(np.nan_to_num(outputs[4]),\n np.nan_to_num(outputs[4 + 5])) # running_var\n # compare gradients\n utt.assert_allclose(outputs[10], outputs[10 + 3], atol=1e-4) # dx\n utt.assert_allclose(outputs[11], outputs[11 + 3], rtol=2e-4, atol=1e-4) # dscale\n utt.assert_allclose(outputs[12], outputs[12 + 3]) # dbias\n # compare second-order gradients\n utt.assert_allclose(outputs[16], outputs[16 + 3], atol=1e-4) # ddx\n utt.assert_allclose(outputs[17], outputs[17 + 3]) # ddy\n utt.assert_allclose(outputs[18], outputs[18 + 3], rtol=3e-4, atol=1e-4) # ddscale\n\n\ndef test_batch_normalization_train_grad_grad():\n utt.seed_rng()\n\n for axes in ('per-activation', 'spatial', (1, 2, 3, 4)):\n for vartype in (T.tensor5, T.tensor4, T.tensor3, T.matrix, T.vector):\n # run these experiments with float64 for sufficient numerical stability\n x, dy, scale, x_mean, x_invstd = (vartype(n, dtype='float64')\n for n in ('x', 'dy', 'scale',\n 'x_mean', 'x_invstd'))\n ndim = x.ndim\n\n # reference forward pass\n if axes == 'per-activation':\n axes = (0,)\n elif axes == 'spatial':\n axes = (0,) + tuple(range(2, ndim))\n else:\n # remove non-existing axes\n axes = tuple(i for i in axes if i < ndim)\n if len(axes) == 0:\n continue\n\n def bn_grad_wrt_inputs_f(x, dy, scale, x_mean, x_invstd):\n g_inputs, g_scale, g_bias = bn.AbstractBatchNormTrainGrad(axes)(x, dy, scale, x_mean, x_invstd)\n return g_inputs\n\n def bn_grad_wrt_scale_f(x, dy, scale, x_mean, x_invstd):\n g_inputs, g_scale, g_bias = bn.AbstractBatchNormTrainGrad(axes)(x, dy, scale, x_mean, x_invstd)\n return g_scale\n\n def bn_grad_wrt_bias_f(x, dy, scale, x_mean, x_invstd):\n g_inputs, g_scale, g_bias = bn.AbstractBatchNormTrainGrad(axes)(x, dy, scale, x_mean, x_invstd)\n return g_bias\n\n # run\n for data_shape in ((4, 3, 3, 3, 3), (4, 3, 1, 1, 1), (2, 3, 5, 3, 2)):\n data_shape = data_shape[:ndim]\n param_shape = tuple(1 if d in axes else s\n for d, s in enumerate(data_shape))\n # force float64 for sufficient numerical stability\n x_val = 4 + 3 * np.random.randn(*data_shape).astype('float64')\n dy_val = -1 + 2 * np.random.randn(*data_shape).astype('float64')\n scale_val = np.random.randn(*param_shape).astype('float64')\n x_mean_val = np.random.randn(*param_shape).astype('float64')\n x_invstd_val = np.random.randn(*param_shape).astype('float64')\n\n utt.verify_grad(bn_grad_wrt_inputs_f, [x_val, dy_val, scale_val, x_mean_val, x_invstd_val], abs_tol=5e-4, rel_tol=5e-4)\n utt.verify_grad(bn_grad_wrt_scale_f, [x_val, dy_val, scale_val, x_mean_val, x_invstd_val])\n utt.verify_grad(bn_grad_wrt_bias_f, [x_val, dy_val, scale_val, x_mean_val, x_invstd_val])\n\n\ndef test_batch_normalization_train_without_running_averages():\n # compile and run batch_normalization_train without running averages\n utt.seed_rng()\n\n x, scale, bias, dy = T.tensor4('x'), T.tensor4('scale'), T.tensor4('bias'), T.tensor4('dy')\n data_shape = (5, 10, 30, 25)\n param_shape = (1, 10, 30, 25)\n\n # forward pass\n out, x_mean, x_invstd = bn.batch_normalization_train(x, scale, bias, 'per-activation')\n # backward pass\n grads = T.grad(None, wrt=[x, scale, bias], known_grads={out: dy})\n # compile\n f = theano.function([x, scale, bias, dy], [out, x_mean, x_invstd] + grads)\n # check if the abstract Ops have been replaced\n assert not any([isinstance(n.op, (bn.AbstractBatchNormTrain,\n bn.AbstractBatchNormInference,\n bn.AbstractBatchNormTrainGrad))\n for n in f.maker.fgraph.toposort()])\n # run\n X = 4 + 3 * np.random.randn(*data_shape).astype(theano.config.floatX)\n Dy = -1 + 2 * np.random.randn(*data_shape).astype(theano.config.floatX)\n Scale = np.random.randn(*param_shape).astype(theano.config.floatX)\n Bias = np.random.randn(*param_shape).astype(theano.config.floatX)\n f(X, Scale, Bias, Dy)\n\n\ndef test_batch_normalization_train_broadcast():\n for axes in ('per-activation', 'spatial', (1, 2, 3, 4)):\n for vartype in (T.tensor5, T.tensor4, T.tensor3, T.matrix, T.vector):\n x = vartype('x')\n ndim = x.ndim\n eps = 5e-3 # some non-standard value to test if it's used\n running_average_factor = 0.3\n\n # remove non-existing axes\n if isinstance(axes, tuple):\n axes = tuple(i for i in axes if i < ndim)\n if len(axes) == 0:\n continue\n\n # convert axes to explicit list\n if axes == 'per-activation':\n axes2 = (0,)\n elif axes == 'spatial':\n axes2 = (0,) + tuple(range(2, ndim))\n else:\n axes2 = axes\n\n # compute axes for parameter tensors\n non_bc_axes = tuple(i for i in range(ndim) if i not in axes2)\n params_dimshuffle = ['x'] * ndim\n for i, axis in enumerate(non_bc_axes):\n params_dimshuffle[axis] = i\n\n # construct non-broadcasted parameter variables\n param_type = T.TensorType(x.dtype, (False,) * len(non_bc_axes))\n scale, bias, running_mean, running_var = (param_type(n)\n for n in ('scale', 'bias',\n 'running_mean',\n 'running_var'))\n\n # broadcast parameter variables\n scale_bc = scale.dimshuffle(params_dimshuffle)\n bias_bc = bias.dimshuffle(params_dimshuffle)\n running_mean_bc = running_mean.dimshuffle(params_dimshuffle)\n running_var_bc = running_var.dimshuffle(params_dimshuffle)\n\n # batch_normalization_train with original, non-broadcasted variables\n train_non_bc = \\\n bn.batch_normalization_train(\n x, scale, bias, axes, eps,\n running_average_factor, running_mean, running_var)\n # batch_normalization_train with broadcasted variables\n train_bc = \\\n bn.batch_normalization_train(\n x, scale_bc, bias_bc, axes, eps,\n running_average_factor, running_mean_bc, running_var_bc)\n train_bc = tuple([train_bc[0]] + # out\n [r.dimshuffle(non_bc_axes) for r in train_bc[1:]])\n\n # batch_normalization_test with original, non-broadcasted variables\n test_non_bc = \\\n bn.batch_normalization_test(\n x, scale, bias, running_mean, running_var, axes, eps)\n # batch_normalization_test with broadcasted variables\n test_bc = \\\n bn.batch_normalization_test(\n x, scale_bc, bias_bc, running_mean_bc, running_var_bc, axes, eps)\n\n # subtract the results of the non-broadcasted and broadcasted calls\n results_non_bc = train_non_bc + (test_non_bc,)\n results_bc = train_bc + (test_bc,)\n results = [abs(r - r_bc) for (r, r_bc) in zip(results_non_bc, results_bc)]\n\n # compile to compute all differences\n f = theano.function([x, scale, bias, running_mean, running_var],\n T.sum(sum(results)))\n\n # the paired ops are exactly the same, so the optimizer should have\n # collapsed the sum of differences to a constant zero\n nodes = f.maker.fgraph.toposort()\n if theano.config.mode != \"FAST_COMPILE\":\n assert len(nodes) == 1\n assert isinstance(nodes[0].op, theano.compile.DeepCopyOp)\n inputs = [np.asarray(np.random.rand(*((4,) * n)), x.dtype)\n for n in [x.ndim, scale.ndim, bias.ndim,\n running_mean.ndim, running_var.ndim]]\n assert 0.0 == f(*inputs)\n\n\ndef test_batch_normalization_test():\n for axes in ('per-activation', 'spatial', (1, 2, 3, 4)):\n for vartype in (T.tensor5, T.tensor3, T.vector):\n x, scale, bias, mean, var = (vartype(n)\n for n in ('x', 'scale', 'bias', 'mean', 'var'))\n ndim = x.ndim\n eps = 5e-3 # some non-standard value to test if it's used\n\n # remove non-existing axes\n if isinstance(axes, tuple):\n axes = tuple(i for i in axes if i < ndim)\n if len(axes) == 0:\n continue\n\n # forward pass\n out = bn.batch_normalization_test(x, scale, bias, mean,\n var, axes, eps)\n # reference forward pass\n if axes == 'per-activation':\n axes2 = (0,)\n elif axes == 'spatial':\n axes2 = (0,) + tuple(range(2, ndim))\n else:\n axes2 = axes\n scale2, bias2, mean2, var2 = (T.addbroadcast(t, *axes2)\n for t in (scale, bias, mean, var))\n out2 = (x - mean2) * (scale2 / T.sqrt(var2 + eps)) + bias2\n # backward pass\n dy = vartype('dy')\n grads = T.grad(None, wrt=[x, scale, bias, mean, var], known_grads={out: dy})\n # reference backward pass\n grads2 = T.grad(None, wrt=[x, scale, bias, mean, var], known_grads={out2: dy})\n # compile\n f = theano.function([x, scale, bias, mean, var, dy],\n [out, out2] + grads + grads2)\n # check if the abstract Ops have been replaced\n assert not any([isinstance(n.op, (bn.AbstractBatchNormTrain,\n bn.AbstractBatchNormInference,\n bn.AbstractBatchNormTrainGrad))\n for n in f.maker.fgraph.toposort()])\n # run\n for data_shape in ((10, 20, 30, 40, 10), (4, 3, 1, 1, 1), (1, 1, 5, 5, 5)):\n data_shape = data_shape[:ndim]\n param_shape = tuple(1 if d in axes2 else s\n for d, s in enumerate(data_shape))\n X = 4 + 3 * np.random.randn(*data_shape).astype(theano.config.floatX)\n Dy = -1 + 2 * np.random.randn(*data_shape).astype(theano.config.floatX)\n Scale = np.random.randn(*param_shape).astype(theano.config.floatX)\n Bias = np.random.randn(*param_shape).astype(theano.config.floatX)\n Mean = np.random.randn(*param_shape).astype(theano.config.floatX)\n Var = np.random.rand(*param_shape).astype(theano.config.floatX)\n outputs = f(X, Scale, Bias, Mean, Var, Dy)\n # compare outputs\n utt.assert_allclose(outputs[0], outputs[1]) # out\n # compare gradients\n utt.assert_allclose(outputs[2], outputs[2 + 5], atol=4e-5) # dx\n utt.assert_allclose(outputs[3], outputs[3 + 5], atol=4e-5) # dscale\n utt.assert_allclose(outputs[4], outputs[4 + 5]) # dbias\n utt.assert_allclose(outputs[5], outputs[5 + 5]) # dmean\n utt.assert_allclose(outputs[6], outputs[6 + 5], rtol=2e-3, atol=4e-5) # dvar\n\n\ndef test_batch_normalization_broadcastable():\n # check if the broadcastable pattern is preserved by the optimizations\n x, dy, scale, bias, mean, var = (T.scalar(n).dimshuffle(['x'] * 5)\n for n in ('x', 'dy', 'scale', 'bias', 'mean', 'var'))\n\n # forward pass\n out_train, x_mean, x_invstd = bn.batch_normalization_train(x, scale, bias, 'spatial')\n out_test = bn.batch_normalization_test(x, scale, bias, mean, var, 'spatial')\n # backward pass\n grads_train = T.grad(None, wrt=[x, scale, bias], known_grads={out_train: dy})\n grads_test = T.grad(None, wrt=[x, scale, bias], known_grads={out_test: dy})\n # compile\n f = theano.function([x, scale, bias, mean, var, dy],\n [out_train, x_mean, x_invstd, out_test] + grads_train + grads_test)\n assert not any([isinstance(n.op, (bn.AbstractBatchNormTrain,\n bn.AbstractBatchNormInference,\n bn.AbstractBatchNormTrainGrad))\n for n in f.maker.fgraph.toposort()])\n",
"from __future__ import absolute_import, print_function, division\nimport time\n\nimport numpy as N\nfrom six.moves import xrange\n\n\nfrom theano import function, Mode\nimport theano.tensor as T\nfrom theano.tensor.nnet.conv import ConvOp\n\n\ndef flip(kern, kshp):\n \"flip the kernel as scipy.convolv2d do it flipped.\"\n flip = N.zeros(kern.shape)\n if len(kern.shape) == 2:\n kern = kern.reshape(-1)\n it = reversed(kern)\n for i in xrange(kshp[0]):\n for j in xrange(kshp[1]):\n flip[i, j] = next(it)\n elif len(kern.shape) == 3:\n kern = kern.reshape(kern.shape[0], -1)\n for k in xrange(kern.shape[0]):\n it = reversed(kern[k, :])\n for i in xrange(kshp[0]):\n for j in xrange(kshp[1]):\n flip[k, i, j] = next(it)\n elif len(kern.shape) == 4:\n kern = kern.reshape(kern.shape[0], kern.shape[1], -1)\n for k in xrange(kern.shape[0]):\n for m in xrange(kern.shape[1]):\n it = reversed(kern[k, m, :])\n for i in xrange(kshp[0]):\n for j in xrange(kshp[1]):\n flip[k, m, i, j] = next(it)\n else:\n raise NotImplementedError()\n return flip\n\nglobal_rng = N.random.RandomState(3423489)\n\ndmatrix4 = T.TensorType('float64', (False, False, False, False))\n\n\ndef exec_multilayer_conv_nnet_old(\n conv_mode, ss, bsize, imshp, kshps, nkerns,\n unroll_batch=0, unroll_kern=0, img=T.dmatrix(), validate=True,\n conv_op_py=False, do_print=True, repeat=1,\n unroll_patch=False, unroll_patch_size=False, verbose=0):\n\n # build actual input images\n imgval = global_rng.rand(bsize, imshp[0], imshp[1], imshp[2])\n\n a = T.dmatrix()\n kerns = [a for i in nkerns]\n inputs4 = dmatrix4()\n kerns4 = dmatrix4()\n\n # for each layer\n ntot = 0\n tctot = 0\n tpytot = 0\n\n for kshp, kern, nkern, n_layer in zip(kshps, kerns, nkerns,\n xrange(len(nkerns))):\n if do_print:\n print('************* layer %i ***************' % n_layer)\n print(conv_mode, ss, n_layer, kshp, nkern)\n\n # actual values\n w = global_rng.random_sample(N.r_[nkern, imshp[0], kshp])\n w_flip = flip(w, kshp).reshape(w.shape)\n\n # manual implementation\n # check first stage\n padimg = imgval\n if conv_mode == 'full':\n padimg_shp = N.array(imshp[1:]) + 2 * (N.array(kshp) - N.array([1, 1]))\n padimg = N.zeros(N.r_[bsize, imshp[0], padimg_shp])\n padimg[\n :, :, kshp[0] - 1:-kshp[0] + 1,\n kshp[1] - 1:-kshp[1] + 1] = imgval\n\n outshp = N.hstack((nkern, ConvOp.getOutputShape(imshp[1:], kshp, ss, conv_mode)))\n\n time1 = time.time()\n outval = N.zeros(N.r_[bsize, outshp])\n if validate:\n # causes an atexit problem\n from scipy.signal.sigtools import _convolve2d\n from scipy.signal.signaltools import _valfrommode, _bvalfromboundary\n val = _valfrommode(conv_mode)\n bval = _bvalfromboundary('fill')\n for b in xrange(bsize): # loop over batches\n for n in xrange(nkern): # loop over filters\n for i in xrange(imshp[0]): # loop over input feature maps\n outval[b, n, ...] += _convolve2d(\n imgval[b, i, ...], w_flip[n, i, ...], 1, val, bval, 0)[0::ss[0], 0::ss[1]]\n ntot += time.time() - time1\n\n # ConvOp\n if unroll_patch and not unroll_patch_size:\n conv_op = ConvOp(dx=ss[0], dy=ss[1], output_mode=conv_mode,\n unroll_patch=unroll_patch, verbose=verbose)(inputs4, kerns4)\n else:\n conv_op = ConvOp(imshp, kshp, nkern, bsize, ss[0], ss[1], conv_mode,\n unroll_batch=unroll_batch, unroll_kern=unroll_kern, unroll_patch=unroll_patch, verbose=verbose)(inputs4, kerns4)\n # l1shp = N.hstack((nkern,\n # ConvOp.getOutputShape(imshp[1:], kshp, ss, conv_mode)))\n propup2 = function([inputs4, kerns4], conv_op)\n propup3 = function([inputs4, kerns4], conv_op, mode=Mode(linker=\"py\"))\n\n time1 = time.time()\n for i in xrange(repeat):\n hidval2_ = propup2(imgval, w_flip)\n hidval2 = hidval2_ # [:,:,0::ss[0],0::ss[1]]\n tctot += time.time() - time1\n\n if conv_op_py:\n time1 = time.time()\n for i in xrange(repeat):\n hidval3_ = propup3(imgval, w_flip)\n hidval3 = hidval3_ # [:,:,0::ss[0],0::ss[1]]\n tpytot += time.time() - time1\n assert (N.abs(hidval2 - hidval3) < 1e-5).all()\n else:\n tpytot += 0\n\n if validate:\n temp = N.abs(outval - hidval2)\n assert (temp < 1e-5).all()\n if validate and conv_op_py:\n temp = N.abs(outval - hidval3)\n assert (temp < 1e-5).all()\n\n imshp = tuple(outshp)\n imgval = outval.reshape(bsize, outshp[0], outshp[1], outshp[2])\n\n return tctot, tpytot, ntot\n\n\ndef exec_multilayer_conv_nnet(\n conv_mode, ss, bsize, imshp, kshps, nkerns,\n unroll_batch=0, unroll_kern=0, img=T.dmatrix(),\n do_print=True, repeat=1, unroll_patch=False,\n unroll_patch_size=False, verbose=0):\n\n # build actual input images\n imgval = global_rng.rand(bsize, imshp[0], imshp[1], imshp[2])\n\n a = T.dmatrix()\n kerns = [a for i in nkerns]\n inputs4 = dmatrix4()\n kerns4 = dmatrix4()\n\n # for each layer\n ntot = 0\n tctot = 0\n tpytot = 0\n\n for kshp, kern, nkern, n_layer in zip(kshps, kerns, nkerns, xrange(len(nkerns))):\n if do_print:\n print('************* layer %i ***************' % n_layer)\n print(conv_mode, ss, n_layer, kshp, nkern)\n\n # actual values\n w = global_rng.random_sample(N.r_[nkern, imshp[0], kshp])\n w_flip = flip(w, kshp).reshape(w.shape)\n\n outshp = N.hstack((nkern, ConvOp.getOutputShape(imshp[1:], kshp, ss, conv_mode)))\n\n time1 = time.time()\n # outval = N.zeros(N.r_[bsize, outshp])\n\n # ConvOp\n if unroll_patch and not unroll_patch_size:\n conv_op = ConvOp(dx=ss[0], dy=ss[1], output_mode=conv_mode,\n unroll_patch=unroll_patch, verbose=verbose)(inputs4, kerns4)\n else:\n conv_op = ConvOp(imshp, kshp, nkern, bsize, ss[0], ss[1], conv_mode,\n unroll_batch=unroll_batch, unroll_kern=unroll_kern, unroll_patch=unroll_patch, verbose=verbose)(inputs4, kerns4)\n # l1shp = N.hstack((nkern,\n # ConvOp.getOutputShape(imshp[1:], kshp, ss, conv_mode)))\n propup2 = function([inputs4, kerns4], conv_op)\n\n time1 = time.time()\n for i in xrange(repeat):\n propup2(imgval, w_flip)\n tctot += time.time() - time1\n\n imshp = tuple(outshp)\n # imgval = outval.reshape(bsize, outshp[0], outshp[1], outshp[2])\n\n return tctot, tpytot, ntot\n\n\ndef speed_multilayer_conv():\n # calculate the speed up of different combination of unroll\n # put the parameter to the same you will try.\n # validate = False # we don't validate the result to have it much faster!\n repeat = 3\n verbose = 1\n unroll_batch = [1, 2, 3, 4, 5, 6, 10] # 15, 30, 60 always much slower\n unroll_kern = [1, 2, 3, 4, 5, 6, 10] # 15, 30, 60 always much slower\n # unroll_batch = [1,4,5]\n # unroll_kern = [1,4,5]\n # unroll_batch = [1,4]\n # unroll_kern = [1,4]\n # unroll_patch = [True, False]\n bsize = 60 # batch size\n imshp_start = (1, 48, 48) # un square shape to test more corner case.\n kshps = ([11, 12],) # un square shape to test more corner case.\n nkerns = [60] # per output pixel\n ssizes = [(1, 1), ] # (1,1)]#(2,2) bugged\n convmodes = ['valid', 'full']\n # do_convolve2 = False\n a = T.dmatrix()\n kerns = [a for i in nkerns]\n\n assert len(kshps) == len(nkerns) == len(kerns)\n timing = N.zeros((len(unroll_batch), len(unroll_kern), 3, len(convmodes) * len(ssizes)))\n t_b_k = []\n # calculate the timing with unrolling\n\n print('time unroll batch kern')\n best = []\n worst = []\n t_ = []\n for unroll_b, n_b in zip(unroll_batch, xrange(len(unroll_batch))):\n for unroll_k, n_k in zip(unroll_kern, xrange(len(unroll_kern))):\n t_b_k.append(str(unroll_b) + \"/\" + str(unroll_k))\n if not t_:\n tctot, tpytot, ntot = [], [], []\n for conv_mode, n_mode in zip(convmodes, xrange(len(convmodes))):\n for ss, n_ss in zip(ssizes, xrange(len(ssizes))):\n # tctot_, tpytot_, ntot_ = exec_multilayer_conv_nnet_old(conv_mode, ss, bsize, imshp_start, kshps, nkerns, unroll_batch=unroll_b, unroll_kern=unroll_k, validate=validate, verbose=verbose,do_print=False)\n tctot_, tpytot_, ntot_ = exec_multilayer_conv_nnet(conv_mode, ss, bsize, imshp_start, kshps, nkerns, unroll_batch=unroll_b, unroll_kern=unroll_k, verbose=verbose, do_print=False, repeat=repeat)\n tctot += [tctot_]\n tpytot += [tpytot_]\n ntot += [ntot_]\n if unroll_b == 4 and unroll_k == 4:\n # print \"unroll 4/4\",tctot\n best = tctot\n if unroll_b == 1 and unroll_k == 1:\n # print \"unroll 1/1\",tctot\n worst = tctot\n timing[n_b, n_k] = [tctot, tpytot, ntot] # [sum(tctot), sum(tpytot), sum(ntot)]\n if not t_:\n t = timing[:, :, 0, :] # We select only the c timing.\n else:\n t = t_\n t = N.asarray(t)\n # calculate the old timing\n print('time old version')\n tctot, tpytot, ntot = [], [], []\n tctot_ = []\n if not tctot_:\n for conv_mode, n_mode in zip(convmodes, xrange(len(convmodes))):\n for ss, n_ss in zip(ssizes, xrange(len(ssizes))):\n # tctot_, tpytot_, ntot_ = exec_multilayer_conv_nnet_old(conv_mode, ss, bsize, imshp_start, kshps, nkerns, unroll_batch=0, unroll_kern=0, validate=validate, verbose=verbose,do_print=False)\n tctot_, tpytot_, ntot_ = exec_multilayer_conv_nnet(conv_mode, ss, bsize, imshp_start, kshps, nkerns, unroll_batch=0, unroll_kern=0, verbose=verbose, do_print=False, repeat=repeat)\n tctot += [tctot_]\n tpytot += [tpytot_]\n ntot += [ntot_]\n else:\n tctot = N.asarray(tctot_)\n print(\"old code timing %.3fs\" % sum(tctot), tctot)\n best = N.asarray(best)\n worst = N.asarray(worst)\n print(\"timing for unrolled version\")\n print(\"unroll_batch/unroll_kern valid_mode full_mode\")\n for n_b in xrange(len(unroll_batch)):\n for n_k in xrange(len(unroll_kern)):\n print((unroll_batch[n_b], unroll_kern[n_k]) + tuple(t[n_b, n_k]), ',')\n # t_detail = t\n t = t.sum(axis=2)\n print(\"max %.3fs\" % t.max(), \"max param(batch unloop size/kernel unloop size)\", t_b_k[t.argmax()])\n print(\"min %.3fs\" % t.min(), \"min param(batch unloop size/kernel unloop size)\", t_b_k[t.argmin()])\n print(\"speedup vs (1/1)%.3fx, vs old %.3fx\" % (t.max() / t.min(), sum(tctot) / t.min()))\n print(worst / best, tctot / best)\n\n # calculate the timing of unroll_patch\n print('time unroll_patch')\n tctot_patch = []\n tctot_patch_size = []\n for conv_mode, n_mode in zip(convmodes, xrange(len(convmodes))):\n for ss, n_ss in zip(ssizes, xrange(len(ssizes))):\n # tctot_, tpytot_, ntot_ = exec_multilayer_conv_nnet_old(conv_mode, ss, bsize, imshp_start, kshps, nkerns, unroll_batch=0, unroll_kern=0, validate=validate,unroll_patch=True,verbose=verbose,do_print=False)\n tctot_, tpytot_, ntot_ = exec_multilayer_conv_nnet(conv_mode, ss, bsize, imshp_start, kshps, nkerns, unroll_batch=0, unroll_kern=0, unroll_patch=True, verbose=verbose, do_print=False, repeat=repeat)\n tctot_patch += [tctot_]\n # tctot_, tpytot_, ntot_ = exec_multilayer_conv_nnet_old(conv_mode, ss, bsize, imshp_start, kshps, nkerns, unroll_batch=0, unroll_kern=0, validate=validate,unroll_patch=True,verbose=verbose,do_print=False,unroll_patch_size=True)\n tctot_, tpytot_, ntot_ = exec_multilayer_conv_nnet(conv_mode, ss, bsize, imshp_start, kshps, nkerns, unroll_batch=0, unroll_kern=0, unroll_patch=True, verbose=verbose, do_print=False, unroll_patch_size=True, repeat=repeat)\n tctot_patch_size += [tctot_]\n\n t_patch = sum(tctot_patch)\n print(\"unroll_patch without shape time\", tctot_patch)\n print(\"speedup vs (1/1)%.3fx, vs old %.3fx\" % (t.max() / t_patch, sum(tctot) / t_patch))\n print(best / tctot_patch, worst / tctot_patch)\n t_patch_size = sum(tctot_patch_size)\n print(\"unroll_patch with shape time\", tctot_patch_size)\n print(\"speedup vs (1/1)%.3fx, vs old %.3fx\" % (t.max() / t_patch_size, sum(tctot) / t_patch_size))\n print(best / tctot_patch_size, worst / tctot_patch_size)\n return\n\nif __name__ == '__main__':\n speed_multilayer_conv()\n",
"from __future__ import absolute_import, print_function, division\nimport copy\nimport numpy as np\nimport logging\nimport pdb\nimport time\nfrom six import iteritems\nfrom six.moves import xrange\nimport sys\n\nimport theano\nfrom theano import tensor, scalar, gof, config\nfrom theano.compile import optdb\nfrom theano.compile.ops import shape_i\nfrom theano.gof import (local_optimizer, EquilibriumDB, TopoOptimizer,\n LocalGroupDB,\n SequenceDB, Optimizer, DB, toolbox, graph)\nfrom theano.gof.opt import (LocalMetaOptimizer, copy_stack_trace,\n inherit_stack_trace)\nfrom theano.ifelse import IfElse\nfrom theano.misc.ordered_set import OrderedSet\n\nfrom theano.scalar.basic import Scalar, Pow, Cast\nfrom theano.scalar.basic import log, neg, true_div\nfrom theano.scalar.basic_scipy import Erfinv, Erfcinv\nfrom theano.scan_module import scan_utils, scan_op, scan_opt\n\nfrom theano.tensor.nnet import bn, conv3d2d\nfrom theano.tensor.nnet.conv import ConvOp\nfrom theano.tensor.nnet.blocksparse import SparseBlockGemv, SparseBlockOuter\nfrom theano.tensor.nnet.abstract_conv import (BaseAbstractConv,\n AbstractConv2d,\n AbstractConv2d_gradWeights,\n AbstractConv2d_gradInputs,\n AbstractConv3d,\n AbstractConv3d_gradWeights,\n AbstractConv3d_gradInputs,\n get_conv_output_shape)\nfrom theano.tensor.nnet.neighbours import Images2Neibs\nfrom theano.tensor.nnet.ctc import ConnectionistTemporalClassification\nimport theano.tensor.nlinalg as nlinalg\nimport theano.tensor.signal.pool as pool\nimport theano.tensor.slinalg as slinalg\nfrom collections import Counter\n\nfrom theano.tests.breakpoint import PdbBreakpoint\n\nfrom .type import (GpuArrayType, GpuArrayConstant, get_context,\n ContextNotDefined, move_to_gpu)\nfrom .basic_ops import (as_gpuarray_variable, infer_context_name,\n host_from_gpu, GpuToGpu,\n HostFromGpu, GpuFromHost,\n GpuSplit, GpuContiguous, gpu_contiguous,\n GpuAlloc, GpuAllocEmpty, GpuReshape,\n GpuEye, gpu_join, GpuJoin)\nfrom .blas import (gpu_dot22, GpuGemm, GpuGer, GpuGemmBatch,\n gpugemm_no_inplace, gpugemm_inplace,\n gpugemmbatch_no_inplace,\n gpugemv_no_inplace, gpugemv_inplace,\n GpuCorrMM, GpuCorrMM_gradInputs, GpuCorrMM_gradWeights,\n GpuCorr3dMM, GpuCorr3dMM_gradInputs, GpuCorr3dMM_gradWeights)\nfrom .pool import (GpuPool, GpuMaxPoolGrad, GpuAveragePoolGrad, GpuMaxPoolRop,\n GpuDownsampleFactorMaxGradGrad)\nfrom .blocksparse import (GpuSparseBlockGemv, GpuSparseBlockOuter,\n gpu_sparse_block_outer,\n gpu_sparse_block_outer_inplace,\n gpu_sparse_block_gemv, gpu_sparse_block_gemv_inplace)\nfrom .nnet import (gpu_crossentropy_softmax_1hot_with_bias_dx,\n gpu_crossentropy_softmax_argmax_1hot_with_bias,\n gpu_softmax_with_bias, gpu_softmax)\nfrom .elemwise import (GpuElemwise, GpuDimShuffle, GpuCAReduceCuda,\n GpuCAReduceCPY, gpu_erfinv, gpu_erfcinv,\n max_inputs_to_GpuElemwise)\nfrom .subtensor import (GpuIncSubtensor, GpuSubtensor,\n GpuAdvancedSubtensor,\n GpuAdvancedSubtensor1,\n GpuAdvancedBooleanSubtensor,\n GpuAdvancedIncSubtensor,\n GpuAdvancedIncSubtensor1,\n GpuAdvancedIncSubtensor1_dev20,\n GpuAdvancedBooleanIncSubtensor,\n GpuAllocDiag, GpuExtractDiag)\nfrom .opt_util import alpha_merge, output_merge, pad_dims, unpad_dims\nfrom .reduction import GpuMaxAndArgmax\nfrom .linalg import (GpuCusolverSolve, MATRIX_STRUCTURES_SOLVE, GpuCholesky,\n cusolver_available, GpuMagmaMatrixInverse, gpu_svd,\n GpuMagmaCholesky, gpu_qr, GpuMagmaEigh,\n GpuCublasTriangularSolve, cublas_available)\nfrom .neighbours import GpuImages2Neibs\nfrom .ctc import GpuConnectionistTemporalClassification\n\n_logger = logging.getLogger(\"theano.gpuarray.opt\")\n\n\ngpu_optimizer = EquilibriumDB()\ngpu_cut_copies = EquilibriumDB()\n\n# Not used for an EquilibriumOptimizer. It has the \"tracks\" that we need for GraphToGPUDB.\ngpu_optimizer2 = EquilibriumDB()\n\n\nclass GraphToGPUDB(DB):\n \"\"\"\n Retrieves the list local optimizers based on the optimizer flag's value\n from EquilibriumOptimizer by calling the method query.\n\n \"\"\"\n\n def query(self, *tags, **kwtags):\n opt = gpu_optimizer2.query(*tags, **kwtags)\n return GraphToGPU(opt.local_optimizers_all, opt.local_optimizers_map)\n\n\ngpu_seqopt = SequenceDB()\n\ngpu_seqopt.register('gpuarray_graph_optimization', GraphToGPUDB(), -0.5,\n 'fast_compile', 'fast_run', 'gpuarray')\n\ngpu_seqopt.register('gpuarray_local_optimizations', gpu_optimizer, 1,\n 'fast_compile', 'fast_run', 'gpuarray', 'gpuarray_local_optimiziations')\ngpu_seqopt.register('gpuarray_cut_transfers', gpu_cut_copies, 2,\n 'fast_compile', 'fast_run', 'gpuarray')\n\n# do not add 'fast_run' to these two as this would always enable gpuarray mode\noptdb.register('gpuarray_opt', gpu_seqopt,\n optdb.__position__.get('add_destroy_handler', 49.5) - 1,\n 'gpuarray')\n\n\ndef register_opt(*tags, **kwargs):\n def f(local_opt):\n name = (kwargs and kwargs.pop('name')) or local_opt.__name__\n gpu_optimizer.register(name, local_opt, 'fast_run', 'gpuarray', *tags)\n return local_opt\n return f\n\n\ndef register_opt2(tracks, *tags, **kwargs):\n '''\n Decorator for the new GraphToGPU optimizer.\n Takes an extra parameter(Op) compared to register_opt decorator.\n\n Parameters\n ----------\n tracks : List of Op class Or Op instance or None\n The Node's Op to which optimization is being applied.\n\n tags : String\n The optimization tag to which the optimizer will be registered.\n\n '''\n def f(local_opt):\n name = (kwargs and kwargs.pop('name')) or local_opt.__name__\n if isinstance(local_opt, theano.gof.DB):\n opt = local_opt\n else:\n opt = theano.gof.local_optimizer(tracks)(local_opt)\n gpu_optimizer2.register(name, opt, 'fast_run', 'gpuarray', *tags)\n return local_opt\n return f\n\n\ndef register_inplace(*tags, **kwargs):\n def f(local_opt):\n name = (kwargs and kwargs.pop('name')) or local_opt.__name__\n optdb.register(\n name, TopoOptimizer(\n local_opt, failure_callback=TopoOptimizer.warn_inplace),\n 60, 'fast_run', 'inplace', 'gpuarray', *tags)\n return local_opt\n return f\n\n\nregister_opt('fast_compile')(theano.tensor.opt.local_track_shape_i)\nregister_opt(final_opt=True, name='gpua_constant_folding')(\n tensor.opt.constant_folding)\ngpu_optimizer.register('local_remove_all_assert',\n theano.tensor.opt.local_remove_all_assert,\n 'unsafe')\n\n\n# Define a few operations to use in optimizations,\n# in order to avoid introducin new CPU Ops, or useless ones.\ndef safe_to_gpu(x, ctx_name):\n if isinstance(x.type, tensor.TensorType):\n return GpuFromHost(ctx_name)(x)\n else:\n return x\n\n\ndef safe_to_cpu(x):\n if isinstance(x.type, GpuArrayType):\n return x.transfer('cpu')\n else:\n return x\n\ngpu_log = GpuElemwise(log)\ngpu_neg = GpuElemwise(neg)\ngpu_true_div = GpuElemwise(true_div)\n\n\ndef op_lifter(OP, cuda_only=False):\n \"\"\"\n OP(..., host_from_gpu(), ...) -> host_from_gpu(GpuOP(...))\n\n gpu_from_host(OP(inp0, ...)) -> GpuOP(inp0, ...)\n\n \"\"\"\n def f(maker):\n def local_opt(node):\n if type(node.op) in OP:\n # Either one of our inputs is on the gpu or\n # all of our clients are on the gpu\n replace = False\n # TODO: Maybe set context_name with infer_context_name()?\n context_name = None\n # We replace if any input is a host_from_gpu\n for i in node.inputs:\n if (i.owner and i.owner.op == host_from_gpu and\n move_to_gpu(i)):\n context_name = i.owner.inputs[0].type.context_name\n replace = True\n break\n\n if not replace:\n # We replace if *all* clients are on the GPU\n clients = [c for o in node.outputs for c in o.clients]\n replace = len(clients) != 0\n for c, idx in clients:\n if (c == 'output' or\n not isinstance(c.op, GpuFromHost)):\n replace = False\n # TODO: check that the clients want the same context?\n if replace:\n # All clients are GpuFromHost and we have at least one\n context_name = clients[0][0].op.context_name\n\n # Check if we should replace\n if (not replace or\n (cuda_only and\n get_context(context_name).kind != b'cuda') or\n any([\"complex\" in getattr(i, 'dtype', \"\")\n for i in node.inputs])):\n return False\n\n # tag the inputs with the context in case\n # the context was derived from the outputs\n for i in node.inputs:\n i.tag.context_name = context_name\n\n new_op = maker(node.op, context_name, node.inputs, node.outputs)\n\n # This is needed as sometimes new_op inherits from OP.\n if new_op and new_op != node.op:\n if isinstance(new_op, theano.Op):\n new_outputs = new_op(*node.inputs, return_list=True)\n to_cpu_fn = safe_to_cpu\n elif isinstance(new_op, (tuple, list)):\n new_outputs = new_op\n to_cpu_fn = safe_to_cpu\n else: # suppose it is a variable on the GPU\n new_outputs = [new_op]\n\n def to_cpu_fn(x):\n return x.transfer('cpu')\n # copy stack traces onto gpu outputs\n # also copy the stack traces onto HostFromGpu outputs\n on_cpu = []\n for old_output, new_output in zip(node.outputs, new_outputs):\n copy_stack_trace(old_output, new_output)\n cpu = to_cpu_fn(new_output)\n on_cpu.append(cpu)\n copy_stack_trace(old_output, cpu)\n return on_cpu\n return False\n local_opt.__name__ = maker.__name__\n return local_optimizer(OP)(local_opt)\n return f\n\n\nclass InputToGpuOptimizer(Optimizer):\n \"\"\"\n Transfer the input to the gpu to start the rolling wave.\n\n \"\"\"\n def add_requirements(self, fgraph):\n fgraph.attach_feature(toolbox.ReplaceValidate())\n\n def apply(self, fgraph):\n for input in fgraph.inputs:\n if isinstance(input.type, GpuArrayType):\n continue\n\n # If all clients are outputs or transfers don't do anything.\n if (all(cl[0] == 'output' or isinstance(cl[0].op, GpuFromHost)\n for cl in input.clients)):\n continue\n\n target = getattr(input.tag, 'target', None)\n if target == 'cpu':\n continue\n if (isinstance(input.type, tensor.TensorType) and\n not move_to_gpu(input)):\n continue\n\n try:\n new_input = GpuFromHost(target)(input).transfer('cpu')\n fgraph.replace_validate(input, new_input,\n \"InputToGpuOptimizer\")\n except TypeError:\n # This could fail if the inputs are not TensorTypes\n pass\n except ContextNotDefined:\n if hasattr(input.tag, 'target'):\n raise\n # If there is no context tag and no default context\n # then it stays on the CPU\n pass\n\n\ngpu_seqopt.register('InputToGpuArrayOptimizer', InputToGpuOptimizer(),\n 0, 'fast_run', 'fast_compile', 'merge')\n\n\nclass GraphToGPU(Optimizer):\n \"\"\"\n Transfer the graph as a whole to GPU instead of transferring node by node.\n\n Parameters\n ----------\n local_optimizers_all : List or SortedSet\n The local optimizations to apply to a node.\n local_optimizers_map : Dict\n Dictionary object containing the mapping of Op to list of\n LocalOptimizers.\n \"\"\"\n\n def __init__(self, local_optimizers_all, local_optimizers_map):\n self.local_optimizers_all = local_optimizers_all\n self.local_optimizers_map = local_optimizers_map\n\n def add_requirements(self, fgraph):\n fgraph.attach_feature(toolbox.ReplaceValidate())\n\n def apply(self, fgraph):\n mapping = {}\n time_opts = {}\n node_created = {}\n process_count = {}\n t_topo = time.time()\n topo = fgraph.toposort()\n time_topo = time.time()\n toposort_timing = time_topo - t_topo\n\n # Building a new graph\n # Iterating through inputs of graph\n target = infer_context_name(*fgraph.inputs)\n for i in fgraph.inputs:\n if isinstance(i.type, tensor.TensorType) and move_to_gpu(i):\n mapping[i] = i.transfer(getattr(i.tag, 'target', target))\n else:\n mapping[i] = i\n for i in fgraph.variables:\n if isinstance(i, theano.Constant):\n mapping[i] = i\n for node in topo:\n for lopt in (self.local_optimizers_map.get(node.op, []) +\n self.local_optimizers_map.get(type(node.op), []) +\n self.local_optimizers_all):\n process_count.setdefault(lopt, 0)\n time_opts.setdefault(lopt, 0)\n node_created.setdefault(lopt, 0)\n\n for node in topo:\n\n if isinstance(node.op, HostFromGpu):\n mapping[node.outputs[0]] = mapping[node.inputs[0]]\n continue\n\n # Move only if any of the inputs are on the GPU.\n move_to_GPU = False\n\n context_name = None\n for i in [mapping[i] for i in node.inputs]:\n if isinstance(i.type, GpuArrayType):\n context_name = i.type.context_name\n move_to_GPU = True\n break\n if (not move_to_GPU and\n isinstance(node.op, (theano.tensor.Alloc,\n theano.tensor.AllocEmpty,\n theano.tensor.basic.Eye))):\n # If the Alloc[Empty] have a client that will be moved\n # to the GPU, we should move the Alloc* on the GPU.\n\n # We approximate this by supposing that if we have an\n # optimization for one of the clients op, then we will\n # move the client to the GPU.\n for c, _ in node.outputs[0].clients:\n if (c != 'output' and\n (self.local_optimizers_map.get(c.op, []) +\n self.local_optimizers_map.get(type(c.op), []))):\n move_to_GPU = True\n new_ops = None\n if move_to_GPU and any([\"complex\" in getattr(i, 'dtype', \"\")\n for i in node.inputs]):\n move_to_GPU = False\n\n # Apply the lifter\n if move_to_GPU:\n for lopt in (self.local_optimizers_map.get(node.op, []) +\n self.local_optimizers_map.get(type(node.op), []) +\n self.local_optimizers_all):\n t_opt = time.time()\n new_ops = lopt.transform(node.op, context_name,\n [mapping[i] for i in node.inputs],\n node.outputs)\n t_opt2 = time.time()\n time_opts[lopt] += t_opt2 - t_opt\n\n if new_ops:\n process_count[lopt] += 1\n break\n outputs = []\n\n if isinstance(new_ops, theano.Op):\n with inherit_stack_trace(node.outputs):\n outputs = new_ops(*[mapping[i] for i in node.inputs], return_list=True)\n elif not new_ops:\n newnode = node.clone_with_new_inputs([mapping.get(i) for i in node.inputs])\n outputs = newnode.outputs\n elif isinstance(new_ops, (tuple, list)):\n outputs = new_ops\n elif isinstance(new_ops, theano.Variable):\n outputs = [new_ops]\n\n for old_output, new_output in zip(node.outputs, outputs):\n copy_stack_trace(old_output, new_output)\n\n if new_ops:\n node_created[lopt] += len(graph.ops([mapping[i] for i in node.inputs], outputs))\n if any([getattr(old_o, 'dtype', None) != getattr(new_o, 'dtype', None)\n for old_o, new_o in zip(outputs, node.outputs)]):\n _logger.warning(\n \"The optimization %s returned bad dtype. Skipping it.\"\n \" Write to theano-dev mailing list about this.\" %\n str(lopt))\n newnode = node.clone_with_new_inputs([mapping.get(i) for i in node.inputs])\n outputs = newnode.outputs\n\n for new_o, old_o in zip(outputs, node.outputs):\n assert len(outputs) == len(node.outputs)\n mapping[old_o] = new_o\n\n new_nodes = []\n for o in fgraph.outputs:\n new_o = mapping[o]\n if new_o.type != o.type:\n assert isinstance(o.type, tensor.TensorType)\n assert isinstance(new_o.type, GpuArrayType)\n\n # This condition is needed in the case one input is an\n # output of the graph. Without this, it would\n # introduce cycle as we don't replace correctly that\n # case. It would also add extra transfer to/from the\n # gpu.\n if (new_o.owner and\n isinstance(new_o.owner.op, GpuFromHost) and\n new_o.owner.inputs[0].type == o.type):\n new_o = new_o.owner.inputs[0]\n else:\n new_o = copy_stack_trace(o, safe_to_cpu(new_o))\n new_nodes.append(new_o)\n fgraph.replace_all_validate(zip(fgraph.outputs, new_nodes),\n reason=self.__class__.__name__)\n\n return (self, toposort_timing, time_opts, node_created, process_count)\n\n @staticmethod\n def print_profile(stream, prof, level=0):\n (opt, toposort_timing, time_opts, node_created, process_count) = prof\n blanc = (' ' * level)\n print(blanc, \"GraphToGPUOptimizer\", end=' ', file=stream)\n\n print(blanc, getattr(opt, \"name\",\n getattr(opt, \"__name__\", \"\")), file=stream)\n\n print(blanc, \" time io_toposort %.3fs\" % toposort_timing, file=stream)\n\n s = sum(time_opts.values())\n print(blanc, \"Total time taken by local optimizers %.3fs \" % s, file=stream)\n\n count_opt = []\n not_used = []\n not_used_time = 0\n\n for o, count in iteritems(process_count):\n if count > 0:\n count_opt.append((time_opts[o], count,\n node_created[o], o))\n else:\n not_used.append((time_opts[o], o))\n not_used_time += time_opts[o]\n\n if count_opt:\n print(blanc,\n ' times - times applied - Node created - name:',\n file=stream)\n count_opt.sort()\n for (t, count, n_created, o) in count_opt[::-1]:\n print(blanc, ' %.3fs - %d - %d - %s' % (\n t, count, n_created, o), file=stream)\n print(blanc, ' %.3fs - in %d optimization that were not used (display only those with a runtime > 0)' % (\n not_used_time, len(not_used)), file=stream)\n not_used.sort(key=lambda nu: (nu[0], str(nu[1])))\n for (t, o) in not_used[::-1]:\n if t > 0:\n # Skip opt that have 0 times, they probably wasn't even tried.\n print(blanc + \" \", ' %.3fs - %s' % (t, o), file=stream)\n print(file=stream)\n\n @staticmethod\n def merge_profile(prof1, prof2):\n # (opt, toposort_timing, time_opts, node_created, process_count) = prof1\n local_optimizers = OrderedSet(prof1[0].local_optimizers_all).union(\n prof2[0].local_optimizers_all)\n\n def merge_dict(d1, d2):\n \"\"\"\n merge 2 dicts by adding the values.\n \"\"\"\n d = d1.copy()\n for k, v in iteritems(d2):\n if k in d:\n d[k] += v\n else:\n d[k] = v\n return d\n\n local_optimizers_map = merge_dict(prof1[0].local_optimizers_map,\n prof2[0].local_optimizers_map)\n new_opt = GraphToGPU(local_optimizers, local_optimizers_map)\n\n toposort_timing = prof1[1] + prof2[1]\n time_opts = merge_dict(prof1[2], prof2[2])\n node_created = merge_dict(prof1[3], prof2[3])\n process_count = merge_dict(prof1[4], prof2[4])\n return (new_opt,\n toposort_timing,\n time_opts,\n node_created,\n process_count)\n\n def print_summary(self, stream=sys.stdout, level=0, depth=-1):\n print(\"%s%s (%i)\" % (\n (' ' * level), self.__class__.__name__, id(self)), file=stream)\n if depth != 0:\n map_values = []\n for opts in self.local_optimizers_map.values():\n map_values += opts\n for opt in self.local_optimizers_all + map_values:\n opt.print_summary(stream, level=(level + 2), depth=(depth - 1))\n\n\n@local_optimizer([GpuFromHost, GpuToGpu, HostFromGpu])\ndef local_cut_gpu_transfers(node):\n # gpu[ab] -> host -> gpub\n if (isinstance(node.op, GpuFromHost) and\n node.inputs[0].owner and\n isinstance(node.inputs[0].owner.op, HostFromGpu)):\n other = node.inputs[0].owner.inputs[0]\n if node.op.context_name == other.type.context_name:\n return [other]\n else:\n return [GpuToGpu(node.op.context_name)(other)]\n\n # ? -> gpua -> host\n elif (isinstance(node.op, HostFromGpu) and\n node.inputs[0].owner):\n n2 = node.inputs[0].owner\n\n # host ->\n if isinstance(n2.op, GpuFromHost):\n return [n2.inputs[0]]\n\n # gpub ->\n if isinstance(n2.op, GpuToGpu):\n return [n2.inputs[0].transfer('cpu')]\n\n # ? -> gpua -> gpub\n elif isinstance(node.op, GpuToGpu):\n # Transfer within same context\n if node.inputs[0].type.context_name == node.op.context_name:\n return [node.inputs[0]]\n\n if node.inputs[0].owner:\n n2 = node.inputs[0].owner\n\n # host ->\n if isinstance(n2.op, GpuFromHost):\n return [as_gpuarray_variable(n2.inputs[0],\n node.op.context_name)]\n\n # gpuc ->\n if isinstance(n2.op, GpuToGpu):\n if node.op.context_name == n2.inputs[0].type.context_name:\n return [n2.inputs[0]]\n else:\n return [node.op(n2.inputs[0])]\n\n\ngpu_cut_copies.register('cut_gpua_host_transfers', local_cut_gpu_transfers,\n 'fast_compile', 'fast_run', 'gpuarray')\ngpu_cut_copies.register('cut_gpua_constant_transfers',\n tensor.opt.constant_folding,\n 'fast_compile', 'fast_run', 'gpuarray')\noptdb['canonicalize'].register('local_cut_gpua_host_gpua',\n local_cut_gpu_transfers,\n 'fast_compile', 'fast_run', 'gpuarray')\n\n\n@register_opt('fast_compile')\n@local_optimizer([tensor.Alloc])\ndef local_gpua_alloc2(node):\n \"\"\"\n Join(axis, {Alloc or HostFromGPU}, ...) -> Join(axis, GpuAlloc, Alloc, ...)\n\n Moves an alloc that is an input to join to the gpu.\n\n \"\"\"\n try:\n get_context(None)\n except ContextNotDefined:\n # If there is no default context then we do not perform the move here.\n return\n if (isinstance(node.op, tensor.Alloc) and\n all(c != 'output' and\n isinstance(c.op, tensor.Join) and\n all(i.owner and\n i.owner.op in [host_from_gpu, tensor.alloc]\n for i in c.inputs[1:])\n for c, idx in node.outputs[0].clients)):\n return [GpuAlloc(None)(*node.inputs).transfer('cpu')]\n\n\n@register_opt('fast_compile')\n@op_lifter([tensor.Alloc])\n@register_opt2([tensor.Alloc], 'fast_compile')\ndef local_gpuaalloc(op, context_name, inputs, outputs):\n return GpuAlloc(context_name)(*inputs)\n\n\n@register_opt('fast_compile')\n@op_lifter([tensor.AllocEmpty])\n@register_opt2([tensor.AllocEmpty], 'fast_compile')\ndef local_gpua_alloc_empty(op, context_name, inputs, outputs):\n # We use _props_dict() to make sure that the GPU op know all the\n # CPU op props.\n return GpuAllocEmpty(context_name=context_name, **op._props_dict())(*inputs)\n\n\n@register_opt()\n@local_optimizer([GpuAlloc])\ndef local_gpualloc_memset_0(node):\n if isinstance(node.op, GpuAlloc) and not node.op.memset_0:\n inp = node.inputs[0]\n if (isinstance(inp, GpuArrayConstant) and\n inp.data.size == 1 and\n (np.asarray(inp.data) == 0).all()):\n new_op = GpuAlloc(node.op.context_name, memset_0=True)\n with inherit_stack_trace(node.outputs):\n return new_op(*node.inputs, return_list=True)\n\n\n# Don't register by default.\[email protected]_optimizer([GpuAllocEmpty])\ndef local_gpua_alloc_empty_to_zeros(node):\n if isinstance(node.op, GpuAllocEmpty):\n context_name = infer_context_name(*node.inputs)\n z = np.asarray(0, dtype=node.outputs[0].dtype)\n with inherit_stack_trace(node.outputs):\n return [GpuAlloc(context_name)(\n as_gpuarray_variable(z, context_name), *node.inputs)]\noptdb.register('local_gpua_alloc_empty_to_zeros',\n theano.tensor.opt.in2out(local_gpua_alloc_empty_to_zeros),\n # After move to gpu and merge2, before inplace.\n 49.3,\n 'alloc_empty_to_zeros',)\n\n\n@register_opt()\n@local_optimizer([GpuContiguous])\ndef local_gpu_contiguous_gpu_contiguous(node):\n \"\"\"\n gpu_contiguous(gpu_contiguous(x)) -> gpu_contiguous(x)\n\n \"\"\"\n if isinstance(node.op, GpuContiguous):\n inp = node.inputs[0]\n if inp.owner and isinstance(inp.owner.op, GpuContiguous):\n return [inp]\n\n\n@register_opt('fast_compile')\n@op_lifter([tensor.extra_ops.CpuContiguous])\n@register_opt2([tensor.extra_ops.CpuContiguous], 'fast_compile')\ndef local_gpua_contiguous(op, context_name, inputs, outputs):\n return gpu_contiguous\n\n\n@register_opt('fast_compile')\n@op_lifter([tensor.Reshape])\n@register_opt2([tensor.Reshape], 'fast_compile')\ndef local_gpua_reshape(op, context_name, inputs, outputs):\n res = GpuReshape(op.ndim)\n return res\n\n\n@register_opt('fast_compile')\n@op_lifter([tensor.Rebroadcast])\n@register_opt2([tensor.Rebroadcast], 'fast_compile')\ndef local_gpua_rebroadcast(op, context_name, inputs, outputs):\n return op(as_gpuarray_variable(inputs[0], context_name))\n\n\n@register_opt('fast_compile')\n@op_lifter([tensor.Flatten])\n@register_opt2([tensor.Flatten], 'fast_compile')\ndef local_gpua_flatten(op, context_name, inputs, outputs):\n shp = []\n if op.outdim != 1:\n shp = [inputs[0].shape[i] for i in range(op.outdim - 1)]\n shp += [-1]\n res = GpuReshape(op.outdim)\n o = res(inputs[0], theano.tensor.as_tensor_variable(shp))\n return o\n\n\n@register_opt('fast_compile')\n@op_lifter([tensor.Elemwise])\n@register_opt2([tensor.Elemwise], 'fast_compile')\ndef local_gpua_elemwise(op, context_name, inputs, outputs):\n scal_op = op.scalar_op\n name = op.name\n if name:\n name = 'Gpu' + name\n if len(outputs) > 1:\n return\n\n have_cuda = False\n have_opencl = False\n if inputs and isinstance(inputs[0].type, GpuArrayType):\n kind = inputs[0].type.context.kind\n if kind.startswith(b'opencl'):\n have_opencl = True\n elif kind.startswith(b'cuda'):\n have_cuda = True\n convert = {Erfinv: gpu_erfinv,\n Erfcinv: gpu_erfcinv}\n\n if scal_op.__class__ in convert:\n scal_op = convert[scal_op.__class__]\n if have_opencl:\n _logger.warning(\n 'Function \"%s\" is not supported with OpenCL. Use \"device=cuda\" instead.' %\n scal_op)\n if not have_cuda:\n return None\n if not scal_op.supports_c_code(inputs, outputs):\n return\n res = GpuElemwise(scal_op, name=name,\n inplace_pattern=copy.copy(op.inplace_pattern),\n nfunc_spec=op.nfunc_spec)\n\n # If the elemwise operation is a pow, casts might be required on the\n # inputs and or outputs because only the (float, float)->float and\n # (double, double)->double cases are implemented at the moment.\n if isinstance(op.scalar_op, Pow):\n\n # Only transfer the computation on the gpu if the output dtype is\n # floating point. Else, give up on the transfer to the gpu.\n out_dtype = outputs[0].dtype\n if out_dtype not in ['float16', 'float32', 'float64']:\n return\n\n # Transfer the inputs on the GPU and cast them to the right dtype.\n new_inputs = []\n for inp in inputs:\n if inp.dtype != out_dtype:\n gpu_cast_op = GpuElemwise(Cast(Scalar(out_dtype)))\n new_inputs.append(gpu_cast_op(as_gpuarray_variable(inp, context_name)))\n else:\n new_inputs.append(as_gpuarray_variable(inp, context_name))\n\n # Perform the exponent on the gpu and transfer the output back to the\n # cpu.\n gpu_output = res(*new_inputs)\n return [gpu_output]\n elif op.scalar_op in (scalar.add, scalar.mul):\n try:\n return [split_inputs(inputs, max_inputs_to_GpuElemwise(outputs), res)]\n except ValueError:\n return False\n else:\n return res\n\n\ndef split_inputs(inputs, max_nb_inputs, op):\n \"\"\"\n For some ops like add and mul, a large number of inputs can make nvcc fail\n compilation of our current code. We don't want node in the graph that can't\n execute as this break DebugMode.\n\n This should not happen for other GpuElemwise as their is only the fusion\n that can generate op with too much input and it check for that.\n\n Parameters\n ----------\n inputs: List of theano variables.\n List of inputs to node.\n max_nb_inputs: int\n Maximum number of inputs the node can handle without\n compilation fail.\n op : Theano operator instance.\n Operator that should be used to rebuild the computation graph with smaller\n number of inputs per node.\n \"\"\"\n if max_nb_inputs <= 1 and len(inputs) > 1:\n raise ValueError(\"Can not split nodes because inputs' dimensionality and/or\"\n \" number of outputs is too large\")\n\n while len(inputs) > max_nb_inputs:\n inner_ops = []\n for i in range(0, len(inputs), max_nb_inputs):\n inner_ops.append(op(*inputs[i: i + max_nb_inputs]))\n inputs = inner_ops\n\n return op(*inputs)\n\n\ngpu_local_elemwise_fusion = tensor.opt.local_elemwise_fusion_op(\n GpuElemwise,\n max_inputs_to_GpuElemwise)\noptdb.register('gpua_elemwise_fusion',\n # 48.5 move to gpu\n # 48.6 specialize\n # 49 cpu fusion\n # 49.5 add destroy handler\n tensor.opt.FusionOptimizer(gpu_local_elemwise_fusion), 49,\n 'fast_run', 'fusion', 'local_elemwise_fusion', 'gpuarray')\n\ninplace_gpu_elemwise_opt = tensor.opt.InplaceElemwiseOptimizer(\n GpuElemwise)\noptdb.register('gpua_inplace_opt', inplace_gpu_elemwise_opt, 75,\n 'inplace_elemwise_optimizer', 'fast_run', 'inplace', 'gpuarray')\n\nregister_opt(tensor.opt.local_useless_elemwise)\n\n\n@register_opt('fast_compile')\n@op_lifter([tensor.DimShuffle])\n@register_opt2([tensor.DimShuffle], 'fast_compile')\ndef local_gpua_dimshuffle(op, context_name, inputs, outputs):\n return GpuDimShuffle(op.input_broadcastable,\n op.new_order)\n\n\n@register_opt('fast_compile')\n@op_lifter([tensor.SpecifyShape])\n@register_opt2([tensor.SpecifyShape], 'fast_compile')\ndef local_gpua_specifyShape(op, context_name, inputs, outputs):\n if isinstance(inputs[0].type, GpuArrayType):\n return\n return local_gpua_specifyShape_graph(op, context_name, inputs, outputs)\n\n\n@register_opt2([tensor.SpecifyShape], 'fast_compile')\ndef local_gpua_specifyShape_graph(op, context_name, inputs, outputs):\n inp = [as_gpuarray_variable(inputs[0], context_name)]\n inp += inputs[1:]\n return tensor.specify_shape(*inp)\n\n\n@register_opt('fast_compile')\n@op_lifter([theano.compile.ops.Shape])\ndef local_gpua_shape(op, context_name, inputs, outputs):\n # op_lifter will call this opt too frequently as the output is\n # always on the CPU.\n if isinstance(inputs[0].type, GpuArrayType):\n return\n return local_gpua_shape_graph(op, context_name, inputs, outputs)\n\n\n@register_opt2([tensor.compile.ops.Shape], 'fast_compile')\ndef local_gpua_shape_graph(op, context_name, inputs, outputs):\n return [as_gpuarray_variable(inputs[0], context_name).shape]\n\n\ndef gpu_print_wrapper(op, cnda):\n op.old_op.global_fn(op.old_op, np.asarray(cnda))\n\n\n@register_opt('fast_compile')\n@op_lifter([tensor.printing.Print])\n@register_opt2([tensor.printing.Print], 'fast_compile')\ndef local_gpua_print_op(op, context_name, inputs, outputs):\n x, = inputs\n with inherit_stack_trace(outputs):\n gpu_x = as_gpuarray_variable(x, context_name=context_name)\n new_op = op.__class__(global_fn=gpu_print_wrapper)\n new_op.old_op = op\n return new_op(gpu_x)\n\n\n@register_opt('fast_compile')\n@local_optimizer([PdbBreakpoint])\ndef local_gpu_pdbbreakpoint_op(node):\n if isinstance(node.op, PdbBreakpoint):\n\n old_inputs = node.inputs\n old_outputs = node.outputs\n\n new_inputs = node.inputs[:1]\n input_transfered = []\n\n # Go through the monitored variables, only transferring on GPU those\n # for which the input comes from the GPU or the output will be\n # transferred on the GPU.\n nb_monitored_vars = len(node.outputs)\n for i in range(nb_monitored_vars):\n\n inp = old_inputs[i + 1]\n out = old_outputs[i]\n\n input_is_from_gpu = (inp.owner and\n isinstance(inp.owner.op, HostFromGpu))\n output_goes_to_gpu = False\n for c in out.clients:\n if c == 'output':\n continue\n if isinstance(c[0].op, GpuFromHost):\n output_goes_to_gpu = True\n context_name = c[0].op.context_name\n break\n\n if input_is_from_gpu:\n # The op should be applied on the GPU version of the input\n new_inputs.append(inp.owner.inputs[0])\n input_transfered.append(True)\n\n elif output_goes_to_gpu:\n # The input should be transferred to the gpu\n new_inputs.append(as_gpuarray_variable(inp, context_name))\n input_transfered.append(True)\n\n else:\n # No transfer is required.\n new_inputs.append(inp)\n input_transfered.append(False)\n\n # Only continue the optimization if at least one input has been\n # transferred to the gpu\n if not any(input_transfered):\n return False\n\n # Apply the op on the new inputs\n with inherit_stack_trace(node.outputs):\n new_op_outputs = node.op(*new_inputs, return_list=True)\n\n # Propagate the transfer to the gpu through the outputs that require\n # it\n new_outputs = []\n for i in range(len(new_op_outputs)):\n if input_transfered[i]:\n new_outputs.append(new_op_outputs[i].transfer('cpu'))\n else:\n new_outputs.append(new_op_outputs[i])\n\n return new_outputs\n\n return False\n\n\n@register_opt('fast_compile')\n@op_lifter([IfElse])\n@register_opt2([IfElse], 'fast_compile')\ndef local_gpua_lazy_ifelse(op, context_name, inputs, outputs):\n if op.gpu:\n return\n c = inputs[0]\n inps = []\n falses = []\n # ifelse need corresponding true/false inputs variables to be of the same type.\n # But we can't rely on inputs to respect that, as GraphToGPU don't enforce that.\n # So we need to take care of this here.\n for v1, v2 in zip(inputs[1:1 + op.n_outs], inputs[1 + op.n_outs:]):\n if ((isinstance(v1.type, tensor.TensorType) and move_to_gpu(v1)) or\n isinstance(v1.type, GpuArrayType) or\n isinstance(v2.type, GpuArrayType)):\n inps.append(as_gpuarray_variable(v1, context_name))\n falses.append(as_gpuarray_variable(v2, context_name))\n else:\n inps.append(v1)\n falses.append(v2)\n inps.extend(falses)\n return IfElse(op.n_outs, gpu=True)(c, *inps, return_list=True)\n\n\n@register_opt('fast_compile')\n@op_lifter([tensor.Join])\n@register_opt2([tensor.Join], 'fast_compile')\ndef local_gpua_join(op, context_name, inputs, outputs):\n return gpu_join\n\n\n@register_opt('fast_compile')\n@local_optimizer([GpuJoin])\ndef local_gpua_join_1(node):\n # join of a single element\n if (isinstance(node.op, GpuJoin) and\n len(node.inputs) == 2):\n return [node.inputs[1]]\n\n\n@register_opt('fast_compile')\n@op_lifter([tensor.Split])\n@register_opt2([tensor.Split], 'fast_compile')\ndef local_gpua_split(op, context_name, inputs, outputs):\n # TODO use props\n return GpuSplit(op.len_splits)\n\n\n@register_opt('fast_compile')\n@op_lifter([tensor.Subtensor])\ndef local_gpua_subtensor(op, context_name, inputs, outputs):\n x = inputs[0]\n if (x.owner and isinstance(x.owner.op, HostFromGpu)):\n gpu_x = x.owner.inputs[0]\n if (gpu_x.owner and\n isinstance(gpu_x.owner.op, GpuFromHost) and\n # And it is a shared var or an input of the graph.\n not gpu_x.owner.inputs[0].owner):\n if len(x.clients) == 1:\n if any([n == 'output' or any([isinstance(v.type, GpuArrayType)\n for v in n.inputs + n.outputs])\n for n, _ in outputs[0].clients]):\n return\n else:\n return [gpu_x.owner.op(outputs[0]).transfer('cpu')]\n\n return GpuSubtensor(op.idx_list)\n\n\n@register_opt2([tensor.Subtensor], 'fast_compile')\ndef local_gpua_subtensor_graph(op, context_name, inputs, outputs):\n # We need different code as the condition is different as inputs\n # aren't the same.\n x = inputs[0]\n # We don't want to move the subtensor to the GPU if the inputs is\n # on the CPU and the only client of the CPU node is this\n # subtensor. This allow to have a smaller transfer.\n\n if (x.owner and isinstance(x.owner.op, GpuFromHost)):\n cpu_x = x.owner.inputs[0]\n # And it is a shared var or an input of the graph.\n # and is used by only 1 node.\n # x is in the new graph, so we can't tests its number of clients.\n if not cpu_x.owner and len(cpu_x.clients) == 1:\n c = outputs[0].clients\n # If the subtensor have only 1 client, do it on the CPU.\n # We let the other optimization to take care to move the\n # next node or not.\n if len(c) == 1:\n return\n return GpuSubtensor(op.idx_list)\n\n\n@register_opt('fast_compile')\n@op_lifter([tensor.IncSubtensor])\n@register_opt2([tensor.IncSubtensor], 'fast_compile')\ndef local_gpua_inc_subtensor(op, context_name, inputs, outputs):\n op = GpuIncSubtensor(op.idx_list, op.inplace,\n op.set_instead_of_inc,\n op.destroyhandler_tolerate_aliased)\n ret = op(*inputs)\n val = getattr(outputs[0].tag, 'nan_guard_mode_check', True)\n ret.tag.nan_guard_mode_check = val\n return ret\n\n\n@register_opt('fast_compile')\n@op_lifter([tensor.AdvancedSubtensor1])\n@register_opt2([tensor.AdvancedSubtensor1], 'fast_compile')\ndef local_gpua_advanced_subtensor1(op, context_name, inputs, outputs):\n return GpuAdvancedSubtensor1()\n\n\n@register_opt('fast_compile')\n@op_lifter([tensor.AdvancedSubtensor])\n@register_opt2([tensor.AdvancedSubtensor], 'fast_compile')\ndef local_gpua_advanced_subtensor(op, context_name, inputs, outputs):\n return GpuAdvancedSubtensor()\n\n\n@register_opt('fast_compile')\n@op_lifter([tensor.AdvancedBooleanSubtensor])\n@register_opt2([tensor.AdvancedBooleanSubtensor], 'fast_compile')\ndef local_gpua_advanced_boolean_subtensor(op, context_name, inputs, outputs):\n return GpuAdvancedBooleanSubtensor()\n\n\n@register_opt('fast_compile')\n@op_lifter([tensor.AdvancedIncSubtensor1])\n@register_opt2([tensor.AdvancedIncSubtensor1], 'fast_compile')\ndef local_gpua_advanced_incsubtensor1(op, context_name, inputs, outputs):\n x, y, ilist = inputs\n\n set_instead_of_inc = op.set_instead_of_inc\n\n if (x.ndim == 1 and y.ndim == 0 and\n config.deterministic == 'default'):\n x = x.dimshuffle(0, 'x')\n y = y.dimshuffle('x', 'x')\n ret = GpuAdvancedIncSubtensor1_dev20(\n set_instead_of_inc=set_instead_of_inc)(x, y, ilist)\n ret = GpuDimShuffle(ret.type.broadcastable, [0])(ret)\n return ret\n elif (x.ndim != 2 or y.ndim != 2 or\n config.deterministic == 'more'):\n return GpuAdvancedIncSubtensor1(\n set_instead_of_inc=set_instead_of_inc)\n else:\n return GpuAdvancedIncSubtensor1_dev20(\n set_instead_of_inc=set_instead_of_inc)\n\n\n# Do not register this optimization for now, as it slows down the\n# execution by a lot in important cases.\n# @register_opt('fast_compile')\n# @op_lifter([tensor.AdvancedIncSubtensor])\n# @register_opt2([tensor.AdvancedIncSubtensor], 'fast_compile')\ndef local_gpua_advanced_incsubtensor(op, context_name, inputs, outputs):\n if not op.set_instead_of_inc:\n return GpuAdvancedIncSubtensor()\n else:\n return False\n\n\n# Do not register this optimization for now, as it slows down the\n# execution by a lot in important cases.\n# @register_opt('fast_compile')\n# @op_lifter([tensor.AdvancedBooleanIncSubtensor])\n# @register_opt2([tensor.AdvancedBooleanIncSubtensor], 'fast_compile')\ndef local_gpua_advanced_boolean_incsubtensor(op, context_name, inputs, outputs):\n # GpuAdvancedIncSubtensor only works with a single boolean mask,\n # but not with fancy combinations.\n if not op.set_instead_of_inc and len(inputs) == 3:\n return GpuAdvancedBooleanIncSubtensor()\n else:\n return False\n\n\n@register_inplace()\n@local_optimizer([GpuAdvancedIncSubtensor1, GpuAdvancedIncSubtensor1_dev20])\ndef local_advincsub1_gpua_inplace(node):\n if isinstance(node.op, (GpuAdvancedIncSubtensor1,\n GpuAdvancedIncSubtensor1_dev20)):\n if not node.op.inplace:\n return [node.op.clone_inplace()(*node.inputs)]\n\n\n# AllocDiag\n@register_opt('fast_compile')\n@op_lifter([tensor.AllocDiag])\n@register_opt2([theano.tensor.AllocDiag], 'fast_compile')\ndef local_gpu_alloc_diag(op, context_name, inputs, outputs):\n if outputs[0].ndim != 2:\n # AllocDiag only supports 2d output\n return False\n return GpuAllocDiag(offset=op.offset)\n\n\n# ExtractDiag\n@register_opt('fast_compile')\n@op_lifter([tensor.ExtractDiag])\n@register_opt2([theano.tensor.ExtractDiag], 'fast_compile')\ndef local_gpu_extract_diag(op, context_name, inputs, outputs):\n return GpuExtractDiag(offset=op.offset, axis1=op.axis1, axis2=op.axis2, view=op.view)\n\n\n@register_opt('fast_compile')\n@op_lifter([tensor.CAReduce, tensor.Sum, tensor.elemwise.Prod])\n@register_opt2([tensor.CAReduce, tensor.Sum, tensor.elemwise.Prod], 'fast_compile')\ndef local_gpua_careduce(op, context_name, inputs, outputs):\n if isinstance(op.scalar_op, (scalar.Add, scalar.Mul,\n scalar.Maximum, scalar.Minimum)):\n\n ctx = get_context(context_name)\n if ctx.kind == b'opencl':\n op2 = GpuCAReduceCPY\n if op.scalar_op not in [scalar.add, scalar.mul]:\n # We don't support yet all reduction with cpy code.\n return\n elif ctx.kind == b'cuda':\n op2 = GpuCAReduceCuda\n else:\n return False\n x, = inputs\n idtype = x.dtype\n adtype = getattr(op, 'acc_dtype', None)\n odtype = getattr(op, 'dtype', outputs[0].dtype)\n\n # Force accumulator to float32 for float32 inputs since tree\n # reduction will not loose as much precision as linear\n # accumulation and float64 is much slower on GPU.\n if idtype == 'float32' and odtype == 'float32':\n adtype = 'float32'\n\n greduce = op2(\n op.scalar_op, axis=op.axis,\n dtype=odtype,\n acc_dtype=adtype)\n with inherit_stack_trace(outputs):\n gvar = greduce(x)\n # We need to have the make node called, otherwise the mask can\n # be None\n if (op2 is GpuCAReduceCPY or\n gvar.owner.op.supports_c_code([\n as_gpuarray_variable(x, context_name)])):\n return greduce\n else:\n # Try to make a simpler pattern based on reshaping\n # The principle is that if two adjacent dimensions have\n # the same value in the reduce_mask, then we can reshape\n # to make them a single dimension, do the reduction, and\n # then reshape to get them back.\n\n if op.axis is None:\n reduce_mask = [1] * x.type.ndim\n else:\n reduce_mask = [0] * x.type.ndim\n for a in op.axis:\n assert reduce_mask[a] == 0\n reduce_mask[a] = 1\n\n new_in_shp = [shape_i(x, 0)]\n new_mask = [reduce_mask[0]]\n for i in xrange(1, x.type.ndim):\n if reduce_mask[i] == reduce_mask[i - 1]:\n new_in_shp[-1] *= shape_i(x, i)\n else:\n new_mask.append(reduce_mask[i])\n new_in_shp.append(shape_i(x, i))\n new_axis = []\n for idx, m in enumerate(new_mask):\n if m == 1:\n new_axis.append(idx)\n greduce = op2(\n op.scalar_op,\n axis=new_axis, reduce_mask=new_mask,\n dtype=odtype,\n acc_dtype=adtype)\n with inherit_stack_trace(outputs):\n reshaped_x = x.reshape(tensor.stack(new_in_shp))\n gpu_reshaped_x = as_gpuarray_variable(reshaped_x, context_name)\n # We need to have the make node called, otherwise the mask can\n # be None\n gvar = greduce(gpu_reshaped_x)\n reshaped_gpu_inputs = [gpu_reshaped_x]\n if greduce.supports_c_code(reshaped_gpu_inputs):\n reduce_reshaped_x = greduce(gpu_reshaped_x)\n\n if reduce_reshaped_x.ndim != outputs[0].ndim:\n out_shp = []\n for i in range(x.ndim):\n if i not in op.axis:\n out_shp.append(shape_i(x, i))\n unreshaped_reduce = GpuReshape(len(out_shp))(\n reduce_reshaped_x,\n tensor.stack(out_shp))\n else:\n unreshaped_reduce = reduce_reshaped_x\n return [unreshaped_reduce]\n\n\n@register_opt('fast_compile')\n@op_lifter([tensor.blas.Gemv, tensor.blas_c.CGemv])\n@register_opt2([tensor.blas.Gemv], 'fast_compile')\ndef local_gpua_gemv(op, context_name, inputs, outputs):\n if inputs[0].dtype == 'float16':\n # Use gemm implementation as cublas gemv don't support float16\n return gpugemm_no_inplace(inputs[0][:, None],\n inputs[1],\n inputs[2],\n inputs[3][:, None],\n inputs[4]).dimshuffle(0)\n\n if inputs[0].dtype not in ['float32', 'float64']:\n return\n if op.inplace:\n return gpugemv_inplace\n else:\n return gpugemv_no_inplace\n\n\n@register_opt('fast_compile')\n@op_lifter([tensor.blas.Gemm])\n@register_opt2([tensor.blas.Gemm], 'fast_compile')\ndef local_gpua_gemm(op, context_name, inputs, outputs):\n if inputs[0].dtype not in ['float16', 'float32', 'float64']:\n return\n if op.inplace:\n return gpugemm_inplace\n else:\n return gpugemm_no_inplace\n\n\n@register_opt('fast_compile')\n@op_lifter([tensor.blas.BatchedDot])\n@register_opt2([tensor.blas.BatchedDot], 'fast_compile')\ndef local_gpua_gemmbatch(op, context_name, inputs, outputs):\n if inputs[0].dtype not in ['float16', 'float32', 'float64']:\n return\n with inherit_stack_trace(outputs):\n a, b = inputs\n # Since GpuGemmBatch only supports 3D inputs and output,\n # we need to add broadcastable dims to the inputs, and drop\n # them from outputs\n output_dims = [0, 1, 2]\n if a.ndim == 2:\n a = GpuDimShuffle(a.broadcastable, (0, 'x', 1))(a)\n del output_dims[1]\n if b.ndim == 2:\n b = GpuDimShuffle(b.broadcastable, (0, 1, 'x'))(b)\n del output_dims[-1]\n # In case of mismatched dtypes, we also have to upcast\n out_dtype = outputs[0].dtype\n if a.dtype != out_dtype or b.dtype != out_dtype:\n gpu_cast_op = GpuElemwise(Cast(Scalar(out_dtype)))\n if a.dtype != out_dtype:\n a = gpu_cast_op(a)\n if b.dtype != out_dtype:\n b = gpu_cast_op(b)\n\n c = GpuAllocEmpty(out_dtype, context_name)(\n a.shape[0], a.shape[1], b.shape[2])\n out = gpugemmbatch_no_inplace(c, np.asarray(1.0, dtype=out_dtype),\n a, b, np.asarray(0.0, dtype=out_dtype))\n if len(output_dims) != 3:\n out = GpuDimShuffle(out.broadcastable, output_dims)(out)\n return out\n\n\n@register_opt()\n@alpha_merge(GpuGemm, alpha_in=1, beta_in=4)\ndef local_gpua_gemm_alpha_merge(node, *inputs):\n return [gpugemm_no_inplace(*inputs)]\n\n\n@register_opt()\n@output_merge(GpuGemm, alpha_in=1, beta_in=4, out_in=0)\ndef local_gpua_gemm_output_merge(node, *inputs):\n return [gpugemm_no_inplace(*inputs)]\n\n\n@register_opt()\n@alpha_merge(GpuGemmBatch, alpha_in=1, beta_in=4)\ndef local_gpua_gemmbatch_alpha_merge(node, *inputs):\n return [gpugemmbatch_no_inplace(*inputs)]\n\n\n@register_opt()\n@output_merge(GpuGemmBatch, alpha_in=1, beta_in=4, out_in=0)\ndef local_gpua_gemmbatch_output_merge(node, *inputs):\n return [gpugemmbatch_no_inplace(*inputs)]\n\n\n@register_opt('fast_compile')\n@op_lifter([tensor.blas.Ger, tensor.blas_c.CGer, tensor.blas_scipy.ScipyGer])\n@register_opt2([tensor.blas.Ger, tensor.blas_c.CGer, tensor.blas_scipy.ScipyGer], 'fast_compile')\ndef local_gpua_ger(op, context_name, inputs, outputs):\n if inputs[0].dtype not in ['float32', 'float64']:\n return\n return GpuGer(inplace=op.destructive)\n\n\n@register_opt('fast_compile')\n@op_lifter([tensor.blas.Dot22])\n@register_opt2([tensor.blas.Dot22], 'fast_compile')\ndef local_gpua_dot22(op, context_name, inputs, outputs):\n return gpu_dot22\n\n\n@register_opt('fast_compile')\n@op_lifter([tensor.blas.Dot22Scalar])\n@register_opt2([tensor.blas.Dot22Scalar], 'fast_compile')\ndef local_gpua_dot22scalar(op, context_name, inputs, outputs):\n with inherit_stack_trace(outputs):\n x, y, a = inputs\n x = as_gpuarray_variable(x, context_name)\n y = as_gpuarray_variable(y, context_name)\n z = GpuAllocEmpty(x.dtype, context_name)(x.shape[0], y.shape[1])\n return [gpugemm_no_inplace(z, a, x, y, 0)]\n\n\n@register_opt('fast_compile')\n@op_lifter([tensor.basic.Eye])\n@register_opt2([tensor.basic.Eye], 'fast_compile')\ndef local_gpua_eye(op, context_name, inputs, outputs):\n return GpuEye(dtype=op.dtype, context_name=context_name)\n\n\n@register_opt('fast_compile')\n@op_lifter([tensor.nnet.CrossentropySoftmaxArgmax1HotWithBias])\n@register_opt2([tensor.nnet.CrossentropySoftmaxArgmax1HotWithBias], 'fast_compile')\ndef local_gpua_crossentropysoftmaxargmax1hotwithbias(op, context_name, inputs, outputs):\n return gpu_crossentropy_softmax_argmax_1hot_with_bias\n\n\n@register_opt('fast_compile')\n@op_lifter([tensor.nnet.CrossentropySoftmax1HotWithBiasDx])\n@register_opt2([tensor.nnet.CrossentropySoftmax1HotWithBiasDx], 'fast_compile')\ndef local_gpua_crossentropysoftmax1hotwithbiasdx(op, context_name, inputs, outputs):\n return gpu_crossentropy_softmax_1hot_with_bias_dx\n\n\n@register_opt('fast_compile')\n@op_lifter([tensor.nnet.Softmax])\n@register_opt2([tensor.nnet.Softmax], 'fast_compile')\ndef local_gpua_softmax(op, context_name, inputs, outputs):\n return gpu_softmax\n\n\n@register_opt('fast_compile')\n@op_lifter([tensor.nnet.SoftmaxWithBias])\n@register_opt2([tensor.nnet.SoftmaxWithBias], 'fast_compile')\ndef local_gpua_softmaxwithbias(op, context_name, inputs, outputs):\n return gpu_softmax_with_bias\n\n\n@register_opt('fast_compile')\n@op_lifter([tensor.nnet.CrossentropyCategorical1Hot])\n@register_opt2([tensor.nnet.CrossentropyCategorical1Hot], 'fast_compile')\ndef local_gpu_crossentropycategorical1hot(op, context_name, inputs, outputs):\n # There is no corresponding GPU Op, but we can express it as:\n # coding, one_of_n = inputs\n # -log(coding[arange(coding.shape[0]), one_of_n])\n coding, one_of_n = inputs\n idx0 = theano.tensor.arange(shape_i(coding, 0))\n return [gpu_neg(gpu_log(coding[idx0, one_of_n]))]\n\n\n@register_opt('fast_compile')\n@op_lifter([tensor.nnet.CrossentropyCategorical1HotGrad])\n@register_opt2([tensor.nnet.CrossentropyCategorical1HotGrad], 'fast_compile')\ndef local_gpu_crossentropycategorical1hotgrad(op, context_name, inputs, outputs):\n # There is no corresponding GPU Op, but we can express it as:\n # gy, coding, one_of_n = inputs\n # gcoding = zeros_like(coding)\n # gcoding[arange(coding.shape[0]), one_of_n] = -g / (\n # coding[arange(coding.shape[0]), one_of_n])\n gy, coding, one_of_n = inputs\n idx0 = theano.tensor.arange(shape_i(coding, 0))\n z = GpuAlloc(context_name, memset_0=True)(\n as_gpuarray_variable(np.zeros((), dtype=coding.dtype), context_name),\n *[shape_i(coding, i) for i in xrange(coding.ndim)])\n gcoding = tensor.set_subtensor(\n z[idx0, one_of_n],\n gpu_neg(gpu_true_div(gy, coding[idx0, one_of_n])))\n return [gcoding.transfer(context_name)]\n\n\n@register_opt('fast_compile')\n@op_lifter([theano.tensor.opt.Assert])\ndef local_gpua_assert(op, context_name, inputs, outputs):\n if isinstance(inputs[0].type, GpuArrayType):\n return\n return local_gpua_assert_graph(op, context_name, inputs, outputs)\n\n\n@register_opt2([theano.tensor.opt.Assert], 'fast_compile')\ndef local_gpua_assert_graph(op, context_name, inputs, outputs):\n return [op(as_gpuarray_variable(inputs[0], context_name),\n *inputs[1:])]\n\n\n@register_opt('fast_compile')\n@op_lifter([ConvOp])\n@register_opt2([ConvOp], 'fast_compile')\ndef local_gpua_error_convop(op, context_name, inputs, outputs):\n assert False, \"\"\"\nConvOp does not work with the gpuarray backend.\n\nUse the new convolution interface to have GPU convolution working:\ntheano.tensor.nnet.conv2d()\n\"\"\"\n\n\n@register_opt('fast_compile')\n@op_lifter([SparseBlockGemv])\n@register_opt2([SparseBlockGemv], 'fast_compile')\ndef local_gpua_sparseblockgemv(op, context_name, inputs, outputs):\n if inputs[0].dtype == 'float16':\n return\n if op.inplace:\n return gpu_sparse_block_gemv_inplace\n else:\n return gpu_sparse_block_gemv\n\n\n@register_opt('fast_compile')\n@op_lifter([SparseBlockOuter])\n@register_opt2([SparseBlockOuter], 'fast_compile')\ndef local_gpua_sparseblockouter(op, context_name, inputs, outputs):\n if inputs[0].dtype == 'float16':\n return\n if op.inplace:\n return gpu_sparse_block_outer_inplace\n else:\n return gpu_sparse_block_outer\n\n\n@register_inplace()\n@local_optimizer([GpuSparseBlockGemv], inplace=True)\ndef local_inplace_sparseblockgemv(node):\n if isinstance(node.op, GpuSparseBlockGemv) and not node.op.inplace:\n return [gpu_sparse_block_gemv_inplace(*node.inputs)]\n\n\n@register_inplace()\n@local_optimizer([GpuSparseBlockOuter], inplace=True)\ndef local_inplace_sparseblockouter(node):\n if isinstance(node.op, GpuSparseBlockOuter) and not node.op.inplace:\n return [GpuSparseBlockOuter(inplace=True)(*node.inputs)]\n\n\n# Move to Gpu optimization\n@local_optimizer([GpuFromHost,\n AbstractConv2d,\n AbstractConv2d_gradWeights,\n AbstractConv2d_gradInputs,\n AbstractConv3d,\n AbstractConv3d_gradWeights,\n AbstractConv3d_gradInputs])\ndef local_conv_gpu_conv(node):\n \"\"\"\n gpu_from_host(AbstractConv) -> AbstractConv(gpu_from_host)\n\n AbstractConv(host_from_gpu) -> host_from_gpu(AbstractConv)\n \"\"\"\n if isinstance(node.op, GpuFromHost):\n host_input = node.inputs[0]\n if host_input.owner and isinstance(host_input.owner.op,\n BaseAbstractConv):\n\n conv = host_input.owner.op\n inps = list(host_input.owner.inputs)\n ctx = infer_context_name(*inps)\n inps[0] = as_gpuarray_variable(inps[0], context_name=ctx)\n inps[1] = as_gpuarray_variable(inps[1], context_name=ctx)\n out = conv(*inps)\n # out is on the GPU because both inputs are.\n out = theano.tensor.patternbroadcast(out,\n node.outputs[0].broadcastable)\n return [out]\n\n if isinstance(node.op, BaseAbstractConv):\n # conv(host_from_gpu) -> host_from_gpu(gpu_conv)\n inp1 = node.inputs[0]\n inp2 = node.inputs[1]\n if ((isinstance(inp1.type, GpuArrayType) and\n isinstance(inp2.type, GpuArrayType))):\n # Both inputs are already directly on the GPU, nothing to do\n return\n\n inp1_on_gpu = (isinstance(inp1.type, GpuArrayType) or\n (inp1.owner and isinstance(inp1.owner.op, HostFromGpu)))\n inp2_on_gpu = (isinstance(inp2.type, GpuArrayType) or\n (inp2.owner and isinstance(inp2.owner.op, HostFromGpu)))\n\n if inp1_on_gpu or inp2_on_gpu:\n conv = node.op\n inps = list(node.inputs)\n ctx = infer_context_name(*inps)\n inps[0] = as_gpuarray_variable(inps[0], context_name=ctx)\n inps[1] = as_gpuarray_variable(inps[1], context_name=ctx)\n out = conv(*inps)\n # out is on the GPU because both inputs are.\n out = theano.tensor.patternbroadcast(\n out,\n node.outputs[0].broadcastable)\n # If the original output was on CPU, we have to transfer it\n if isinstance(node.outputs[0].type, tensor.TensorType):\n return [tensor.as_tensor_variable(out)]\n else:\n return [out]\n\n\nregister_opt()(local_conv_gpu_conv)\n\n\n# CorrMM opt\n@local_optimizer([AbstractConv2d])\ndef local_abstractconv_gemm(node):\n if not isinstance(node.op, AbstractConv2d):\n return None\n img, kern = node.inputs\n if (not isinstance(img.type, GpuArrayType) or\n not isinstance(kern.type, GpuArrayType)):\n return None\n ctx = infer_context_name(img, kern)\n\n border_mode = node.op.border_mode\n subsample = node.op.subsample\n filter_dilation = node.op.filter_dilation\n num_groups = node.op.num_groups\n unshared = node.op.unshared\n\n flip = (slice(None),) * (kern.ndim - 2) + \\\n (slice(None, None, -1),) * 2\n kern_axes = (1, 0) + tuple(i for i in range(2, kern.ndim))\n if ((border_mode == 'full') and (subsample == (1, 1)) and num_groups == 1 and not unshared):\n if not node.op.filter_flip:\n kern = kern[flip]\n # need to dimshuffle the kernel for full convolution\n kern = kern.dimshuffle(kern_axes)\n # call GpuCorrMM_gradInputs\n rval = GpuCorrMM_gradInputs('valid',\n subsample,\n filter_dilation)(\n gpu_contiguous(kern), gpu_contiguous(img))\n else:\n # need to flip the kernel if necessary\n if node.op.filter_flip:\n kern = kern[flip]\n # By default use GpuCorrMM\n rval = GpuCorrMM(border_mode,\n subsample,\n filter_dilation,\n num_groups,\n unshared)(gpu_contiguous(img),\n gpu_contiguous(kern))\n\n # call GpuCorrMM_gradWeights if good\n # (the latter is faster if batchsize * kernelHeight * kernelWidth\n # is larger than inputChannels * outputHeight * outputWidth.\n # GpuConv does not always store information on the batchsize and\n # channels, though, so we only use what information we have.)\n if ((subsample == (1, 1)) and (filter_dilation == (1, 1)) and\n (node.op.imshp is not None) and\n (None not in node.op.imshp[-2:]) and\n (node.op.kshp is not None) and\n (None not in node.op.kshp) and\n border_mode != \"half\" and\n num_groups == 1 and\n not unshared):\n # we know the kernel and output size\n prod1 = node.op.kshp[0] * node.op.kshp[-3]\n prod2 = ((node.op.imshp[-2] - node.op.kshp[0] + 1) *\n (node.op.imshp[-1] - node.op.kshp[-3] + 1))\n if (None not in node.op.imshp[:1]):\n # we also know batchsize and input channels\n prod1 *= node.op.imshp[0]\n prod2 *= node.op.imshp[1]\n # compare to decide\n if prod1 > prod2:\n rval = GpuCorrMM_gradWeights(border_mode,\n subsample,\n filter_dilation)(\n gpu_contiguous(img.dimshuffle(1, 0, 2, 3)),\n gpu_contiguous(kern.dimshuffle(1, 0, 2, 3)))\n # (we need to wrap the result in as_gpuarray_variable,\n # because we are not allowed to replace a GpuArray with\n # a DimShuffle instance in a graph optimization)\n rval = as_gpuarray_variable(\n rval.dimshuffle(1, 0, 2, 3),\n context_name=ctx)\n return [rval]\n\n\n# CorrMM opt used for Meta-optimizer\n@local_optimizer([AbstractConv2d])\ndef local_abstractconv_gemm_def(node):\n if not isinstance(node.op, AbstractConv2d):\n return None\n img, kern = node.inputs\n if (not isinstance(img.type, GpuArrayType) or\n not isinstance(kern.type, GpuArrayType)):\n return None\n\n border_mode = node.op.border_mode\n subsample = node.op.subsample\n filter_dilation = node.op.filter_dilation\n num_groups = node.op.num_groups\n unshared = node.op.unshared\n\n if node.op.filter_flip:\n flip = (slice(None),) * (kern.ndim - 2) + \\\n (slice(None, None, -1),) * 2\n kern = kern[flip]\n rval = GpuCorrMM(border_mode,\n subsample,\n filter_dilation,\n num_groups,\n unshared)(gpu_contiguous(img),\n gpu_contiguous(kern))\n return [rval]\n\n\n@local_optimizer([AbstractConv2d])\ndef local_abstractconv_gemm_alt(node):\n if not isinstance(node.op, AbstractConv2d):\n return None\n img, kern = node.inputs\n if (not isinstance(img.type, GpuArrayType) or\n not isinstance(kern.type, GpuArrayType)):\n return None\n ctx = infer_context_name(img, kern)\n\n border_mode = node.op.border_mode\n subsample = node.op.subsample\n filter_dilation = node.op.filter_dilation\n num_groups = node.op.num_groups\n unshared = node.op.unshared\n\n if border_mode == 'full' and subsample == (1, 1) and num_groups == 1 and not unshared:\n if not node.op.filter_flip:\n kern = kern[:, :, ::-1, ::-1]\n\n kern = kern.dimshuffle(1, 0, 2, 3)\n rval = GpuCorrMM_gradInputs('valid',\n subsample,\n filter_dilation)(\n gpu_contiguous(kern), gpu_contiguous(img))\n\n elif (border_mode == 'valid' and subsample == (1, 1) and filter_dilation == (1, 1) and\n num_groups == 1 and not unshared):\n if node.op.filter_flip:\n kern = kern[:, :, ::-1, ::-1]\n\n rval = GpuCorrMM_gradWeights(border_mode,\n subsample,\n filter_dilation)(\n gpu_contiguous(img.dimshuffle(1, 0, 2, 3)),\n gpu_contiguous(kern.dimshuffle(1, 0, 2, 3)))\n rval = as_gpuarray_variable(rval.dimshuffle(1, 0, 2, 3),\n context_name=ctx)\n else:\n return None\n\n return [rval]\n\n\n@local_optimizer([AbstractConv3d])\ndef local_abstractconv3d_gemm(node):\n if not isinstance(node.op, AbstractConv3d):\n return None\n img, kern = node.inputs\n if (not isinstance(img.type, GpuArrayType) or\n not isinstance(kern.type, GpuArrayType)):\n return None\n ctx = infer_context_name(img, kern)\n\n border_mode = node.op.border_mode\n subsample = node.op.subsample\n filter_dilation = node.op.filter_dilation\n num_groups = node.op.num_groups\n if ((border_mode == 'full') and (subsample == (1, 1, 1)) and num_groups == 1):\n if not node.op.filter_flip:\n kern = kern[:, :, ::-1, ::-1, ::-1]\n # need to dimshuffle the kernel for full convolution\n kern = kern.dimshuffle(1, 0, 2, 3, 4)\n # call GpuCorr3dMM_gradInputs\n rval = GpuCorr3dMM_gradInputs('valid',\n subsample,\n filter_dilation)(\n gpu_contiguous(kern), gpu_contiguous(img))\n else:\n # need to flip the kernel if necessary\n if node.op.filter_flip:\n kern = kern[:, :, ::-1, ::-1, ::-1]\n # By default use GpuCorr3dMM\n rval = GpuCorr3dMM(border_mode,\n subsample,\n filter_dilation,\n num_groups)(gpu_contiguous(img),\n gpu_contiguous(kern))\n\n # call GpuCorr3dMM_gradWeights if good\n # (the latter is faster if batchsize * kernelHeight * kernelWidth * kernelDepth\n # is larger than inputChannels * outputHeight * outputWidth * outputDepth.\n # GpuConv does not always store information on the batchsize and\n # channels, though, so we only use what information we have.)\n if ((subsample == (1, 1, 1)) and (filter_dilation == (1, 1, 1)) and\n (node.op.imshp is not None) and\n (None not in node.op.imshp[-3:]) and\n (node.op.kshp is not None) and\n (None not in node.op.kshp) and\n border_mode != \"half\" and\n num_groups == 1):\n # we know the kernel and output size\n prod1 = node.op.kshp[0] * node.op.kshp[1] * node.op.kshp[2]\n prod2 = ((node.op.imshp[-3] - node.op.kshp[0] + 1) *\n (node.op.imshp[-2] - node.op.kshp[1] + 1) *\n (node.op.imshp[-1] - node.op.kshp[2] + 1))\n if (None not in node.op.imshp[:1]):\n # we also know batchsize and input channels\n prod1 *= node.op.imshp[0]\n prod2 *= node.op.imshp[1]\n # compare to decide\n if prod1 > prod2:\n rval = GpuCorr3dMM_gradWeights(border_mode,\n subsample,\n filter_dilation)(\n gpu_contiguous(img.dimshuffle(1, 0, 2, 3, 4)),\n gpu_contiguous(kern.dimshuffle(1, 0, 2, 3, 4)))\n # (we need to wrap the result in as_gpuarray_variable,\n # because we are not allowed to replace a GpuArray with\n # a DimShuffle instance in a graph optimization)\n rval = as_gpuarray_variable(\n rval.dimshuffle(1, 0, 2, 3, 4),\n context_name=ctx)\n return [rval]\n\n\n# Corr3dMM opt used for Meta-optimizer\n@local_optimizer([AbstractConv3d])\ndef local_abstractconv3d_gemm_def(node):\n if not isinstance(node.op, AbstractConv3d):\n return None\n img, kern = node.inputs\n if (not isinstance(img.type, GpuArrayType) or\n not isinstance(kern.type, GpuArrayType)):\n return None\n\n border_mode = node.op.border_mode\n subsample = node.op.subsample\n filter_dilation = node.op.filter_dilation\n if node.op.filter_flip:\n kern = kern[:, :, ::-1, ::-1, ::-1]\n # By default use GpuCorr3dMM\n rval = GpuCorr3dMM(border_mode,\n subsample,\n filter_dilation,\n node.op.num_groups)(gpu_contiguous(img),\n gpu_contiguous(kern))\n return [rval]\n\n\n@local_optimizer([AbstractConv3d])\ndef local_abstractconv3d_alt(node):\n if not isinstance(node.op, AbstractConv3d):\n return None\n img, kern = node.inputs\n if (not isinstance(img.type, GpuArrayType) or\n not isinstance(kern.type, GpuArrayType)):\n return None\n ctx = infer_context_name(img, kern)\n\n border_mode = node.op.border_mode\n subsample = node.op.subsample\n filter_dilation = node.op.filter_dilation\n num_groups = node.op.num_groups\n\n if((border_mode == 'full') and (subsample == (1, 1, 1)) and\n (num_groups == 1)):\n if not node.op.filter_flip:\n kern = kern[:, :, ::-1, ::-1, ::-1]\n kern = kern.dimshuffle(1, 0, 2, 3, 4)\n rval = GpuCorr3dMM_gradInputs('valid',\n subsample,\n filter_dilation)(\n gpu_contiguous(kern), gpu_contiguous(img))\n\n elif(subsample == (1, 1, 1) and filter_dilation == (1, 1, 1) and\n border_mode == 'valid' and num_groups == 1):\n if node.op.filter_flip:\n kern = kern[:, :, ::-1, ::-1, ::-1]\n rval = GpuCorr3dMM_gradWeights(border_mode,\n subsample,\n filter_dilation)(\n gpu_contiguous(img.dimshuffle(1, 0, 2, 3, 4)),\n gpu_contiguous(kern.dimshuffle(1, 0, 2, 3, 4)))\n rval = as_gpuarray_variable(rval.dimshuffle(1, 0, 2, 3, 4),\n context_name=ctx)\n else:\n return None\n return [rval]\n\n\n@local_optimizer([AbstractConv3d])\ndef local_abstractconv3d2d(node):\n if not isinstance(node.op, AbstractConv3d):\n return None\n img, kern = node.inputs\n if (not isinstance(img.type, GpuArrayType) or\n not isinstance(kern.type, GpuArrayType)):\n return None\n\n ctx = infer_context_name(img, kern)\n border_mode = node.op.border_mode\n subsample = node.op.subsample\n filter_dilation = node.op.filter_dilation\n num_groups = node.op.num_groups\n\n if(subsample == (1, 1, 1) and filter_dilation == (1, 1, 1) and\n num_groups == 1):\n reorder_array = [0, 2, 1, 3, 4]\n rval = conv3d2d.conv3d(gpu_contiguous(img.dimshuffle(*reorder_array)),\n gpu_contiguous(kern.dimshuffle(*reorder_array)),\n [node.op.imshp[i] for i in reorder_array],\n [node.op.kshp[i] for i in reorder_array],\n border_mode=border_mode)\n rval = as_gpuarray_variable(rval.dimshuffle(*reorder_array),\n context_name=ctx)\n\n return [rval]\n else:\n return None\n\n\n@local_optimizer([AbstractConv2d_gradWeights])\ndef local_abstractconv_gradweights_gemm(node):\n if not isinstance(node.op, AbstractConv2d_gradWeights):\n return None\n img, topgrad, shape = node.inputs\n if not isinstance(img.type, GpuArrayType) or \\\n not isinstance(topgrad.type, GpuArrayType):\n return None\n ctx = infer_context_name(img, topgrad)\n\n rval = GpuCorrMM_gradWeights(border_mode=node.op.border_mode,\n subsample=node.op.subsample,\n filter_dilation=node.op.filter_dilation,\n num_groups=node.op.num_groups,\n unshared=node.op.unshared)(\n gpu_contiguous(img), gpu_contiguous(topgrad), shape)\n flip = (slice(None),) * (rval.ndim - 2) + \\\n (slice(None, None, -1),) * 2\n if node.op.filter_flip:\n rval = rval[flip]\n rval = tensor.patternbroadcast(rval, node.outputs[0].broadcastable)\n rval = as_gpuarray_variable(rval, context_name=ctx)\n return [rval]\n\n\n@local_optimizer([AbstractConv2d_gradWeights])\ndef local_abstractconv_gemm_gradweights_alt(node):\n if not isinstance(node.op, AbstractConv2d_gradWeights):\n return None\n img, topgrad, shape = node.inputs\n if not isinstance(img.type, GpuArrayType) or \\\n not isinstance(topgrad.type, GpuArrayType):\n return None\n ctx = infer_context_name(img, topgrad)\n border_mode = node.op.border_mode\n subsample = node.op.subsample\n filter_dilation = node.op.filter_dilation\n num_groups = node.op.num_groups\n unshared = node.op.unshared\n\n if(border_mode == 'valid' and subsample == (1, 1) and filter_dilation == (1, 1) and\n num_groups == 1 and not unshared):\n rval = GpuCorrMM(border_mode,\n subsample,\n filter_dilation)(\n gpu_contiguous(img.dimshuffle(1, 0, 2, 3)),\n gpu_contiguous(topgrad.dimshuffle(1, 0, 2, 3)))\n\n if node.op.filter_flip:\n rval = rval[:, :, ::-1, ::-1]\n\n rval = rval.dimshuffle(1, 0, 2, 3)\n rval = tensor.patternbroadcast(rval, node.outputs[0].broadcastable)\n rval = as_gpuarray_variable(rval, context_name=ctx)\n return [rval]\n else:\n return None\n\n\n@local_optimizer([AbstractConv3d_gradWeights])\ndef local_abstractconv3d_gemm_gradweights_alt(node):\n if not isinstance(node.op, AbstractConv3d_gradWeights):\n return None\n img, topgrad, shape = node.inputs\n if not isinstance(img.type, GpuArrayType) or \\\n not isinstance(topgrad.type, GpuArrayType):\n return None\n ctx = infer_context_name(img, topgrad)\n border_mode = node.op.border_mode\n subsample = node.op.subsample\n filter_dilation = node.op.filter_dilation\n num_groups = node.op.num_groups\n\n if(border_mode == 'valid' and subsample == (1, 1, 1) and\n filter_dilation == (1, 1, 1) and num_groups == 1):\n rval = GpuCorr3dMM(border_mode,\n subsample,\n filter_dilation)(\n gpu_contiguous(img.dimshuffle(1, 0, 2, 3, 4)),\n gpu_contiguous(topgrad.dimshuffle(1, 0, 2, 3, 4)))\n\n if node.op.filter_flip:\n rval = rval[:, :, ::-1, ::-1, ::-1]\n\n rval = rval.dimshuffle(1, 0, 2, 3, 4)\n rval = tensor.patternbroadcast(rval, node.outputs[0].broadcastable)\n rval = as_gpuarray_variable(rval, context_name=ctx)\n return [rval]\n else:\n return None\n\n\n@local_optimizer([AbstractConv3d_gradWeights])\ndef local_abstractconv3d_gradweights_gemm(node):\n if not isinstance(node.op, AbstractConv3d_gradWeights):\n return None\n img, topgrad, shape = node.inputs\n if not isinstance(img.type, GpuArrayType) or \\\n not isinstance(topgrad.type, GpuArrayType):\n return None\n ctx = infer_context_name(img, topgrad)\n\n rval = GpuCorr3dMM_gradWeights(border_mode=node.op.border_mode,\n subsample=node.op.subsample,\n filter_dilation=node.op.filter_dilation,\n num_groups=node.op.num_groups)(\n gpu_contiguous(img), gpu_contiguous(topgrad), shape)\n if node.op.filter_flip:\n rval = rval[:, :, ::-1, ::-1, ::-1]\n rval = tensor.patternbroadcast(rval, node.outputs[0].broadcastable)\n rval = as_gpuarray_variable(rval, context_name=ctx)\n return [rval]\n\n\n@local_optimizer([AbstractConv2d_gradInputs])\ndef local_abstractconv_gradinputs_gemm(node):\n if not isinstance(node.op, AbstractConv2d_gradInputs):\n return None\n kern, topgrad, shape = node.inputs\n if not isinstance(kern.type, GpuArrayType) or \\\n not isinstance(topgrad.type, GpuArrayType):\n return None\n\n if node.op.filter_flip:\n flip = (slice(None),) * (kern.ndim - 2) + \\\n (slice(None, None, -1),) * 2\n kern = kern[flip]\n\n rval = GpuCorrMM_gradInputs(border_mode=node.op.border_mode,\n subsample=node.op.subsample,\n filter_dilation=node.op.filter_dilation,\n num_groups=node.op.num_groups,\n unshared=node.op.unshared)(\n gpu_contiguous(kern), gpu_contiguous(topgrad), shape)\n return [rval]\n\n\n@local_optimizer([AbstractConv2d_gradInputs])\ndef local_abstractconv_gradinputs_gemm_alt(node):\n if not isinstance(node.op, AbstractConv2d_gradInputs):\n return None\n kern, topgrad, shape = node.inputs\n if not isinstance(kern.type, GpuArrayType) or \\\n not isinstance(topgrad.type, GpuArrayType):\n return None\n border_mode = node.op.border_mode\n subsample = node.op.subsample\n filter_dilation = node.op.filter_dilation\n num_groups = node.op.num_groups\n unshared = node.op.unshared\n\n if border_mode == 'valid' and subsample == (1, 1) and num_groups == 1 and not unshared:\n if not node.op.filter_flip:\n kern = kern[:, :, ::-1, ::-1]\n\n rval = GpuCorrMM(border_mode='full',\n subsample=subsample,\n filter_dilation=filter_dilation)(\n gpu_contiguous(topgrad),\n gpu_contiguous(kern.dimshuffle(1, 0, 2, 3)))\n return [rval]\n else:\n return None\n\n\n@local_optimizer([AbstractConv3d_gradInputs])\ndef local_abstractconv3d_gradinputs_gemm(node):\n if not isinstance(node.op, AbstractConv3d_gradInputs):\n return None\n kern, topgrad, shape = node.inputs\n if not isinstance(kern.type, GpuArrayType) or \\\n not isinstance(topgrad.type, GpuArrayType):\n return None\n\n if node.op.filter_flip:\n kern = kern[:, :, ::-1, ::-1, ::-1]\n\n rval = GpuCorr3dMM_gradInputs(border_mode=node.op.border_mode,\n subsample=node.op.subsample,\n filter_dilation=node.op.filter_dilation,\n num_groups=node.op.num_groups)(\n gpu_contiguous(kern), gpu_contiguous(topgrad), shape)\n return [rval]\n\n\n@local_optimizer([AbstractConv3d_gradInputs])\ndef local_abstractconv3d_gradinputs_gemm_alt(node):\n if not isinstance(node.op, AbstractConv3d_gradInputs):\n return None\n kern, topgrad, shape = node.inputs\n if not isinstance(kern.type, GpuArrayType) or \\\n not isinstance(topgrad.type, GpuArrayType):\n return None\n border_mode = node.op.border_mode\n subsample = node.op.subsample\n filter_dilation = node.op.filter_dilation\n num_groups = node.op.num_groups\n\n if(border_mode == 'valid' and subsample == (1, 1, 1) and\n num_groups == 1):\n if not node.op.filter_flip:\n kern = kern[:, :, ::-1, ::-1, ::-1]\n rval = GpuCorr3dMM(border_mode='full',\n subsample=subsample,\n filter_dilation=filter_dilation)(\n gpu_contiguous(topgrad),\n gpu_contiguous(kern.dimshuffle(1, 0, 2, 3, 4)))\n return [rval]\n else:\n return None\n\n\nclass ConvMetaOptimizer(LocalMetaOptimizer):\n\n def __init__(self):\n super(ConvMetaOptimizer, self).__init__()\n\n def time_call(self, fn):\n start = time.time()\n fn()[0].sync()\n return time.time() - start\n\n def provide_inputs(self, node, inputs):\n result = {}\n\n shapes = (node.op.imshp, node.op.kshp)\n if(node.op.imshp is None or node.op.kshp is None or\n any([s is None for shape in shapes for s in shape])):\n return result\n\n if type(node.op) in [AbstractConv2d, AbstractConv3d]:\n img, kern = node.inputs\n for(var, shape) in zip((img, kern), shapes):\n result[var] = theano.shared(np.random.random(shape).astype(var.dtype),\n var.name,\n broadcastable=var.broadcastable,\n borrow=True)\n\n if type(node.op) in [AbstractConv2d_gradWeights, AbstractConv3d_gradWeights]:\n img, top, kshape = node.inputs\n\n tshp = get_conv_output_shape(node.op.imshp,\n node.op.kshp,\n node.op.border_mode,\n node.op.subsample,\n node.op.filter_dilation)\n convdim = img.ndim - 2\n\n result[kshape] = theano.tensor.as_tensor_variable(node.op.kshp[-convdim:])\n\n for(var, shape) in zip((img, top), (node.op.imshp, tshp)):\n result[var] = theano.shared(np.random.random(shape).astype(var.dtype),\n var.name,\n broadcastable=var.broadcastable,\n borrow=True)\n\n if type(node.op) in [AbstractConv2d_gradInputs, AbstractConv3d_gradInputs]:\n kern, top, ishape = node.inputs\n\n tshp = get_conv_output_shape(node.op.imshp,\n node.op.kshp,\n node.op.border_mode,\n node.op.subsample,\n node.op.filter_dilation)\n\n result[ishape] = theano.tensor.as_tensor_variable(node.op.imshp[2:])\n\n for(var, shape) in zip((kern, top), (node.op.kshp, tshp)):\n result[var] = theano.shared(np.random.random(shape).astype(var.dtype),\n var.name,\n broadcastable=var.broadcastable,\n borrow=True)\n\n return result\n\n def get_opts(self, node):\n opts = Counter([opt for opt in self.track_dict[type(node.op)]\n if opt in self.tag_dict['default']])\n include_tags = config.metaopt.optimizer_including.split(':')\n exclude_tags = config.metaopt.optimizer_excluding.split(':')\n\n for in_opt in include_tags:\n opts.update([opt for opt in self.track_dict[type(node.op)]\n if opt in self.tag_dict[in_opt]])\n\n for ex_opt in exclude_tags:\n opts.subtract([opt for opt in self.track_dict[type(node.op)]\n if opt in self.tag_dict[ex_opt]])\n\n opts = list(opts + Counter())\n return opts\n\n\n# This deals with any abstract convs that have a transfer somewhere\n@register_opt('fast_compile', 'conv_dnn', 'cudnn')\n@op_lifter([AbstractConv2d,\n AbstractConv2d_gradWeights,\n AbstractConv2d_gradInputs,\n AbstractConv3d,\n AbstractConv3d_gradWeights,\n AbstractConv3d_gradInputs])\ndef local_gpua_abstractconv(op, context_name, inputs, outputs):\n if isinstance(outputs[0].type, GpuArrayType):\n # Don't handle this node here, it's already on the GPU.\n return\n return local_gpua_lift_abstractconv_graph(op, context_name, inputs, outputs)\n\n\n@register_opt2([AbstractConv2d,\n AbstractConv2d_gradWeights,\n AbstractConv2d_gradInputs,\n AbstractConv3d,\n AbstractConv3d_gradWeights,\n AbstractConv3d_gradInputs], 'fast_compile')\ndef local_gpua_lift_abstractconv_graph(op, context_name, inputs, outputs):\n inps = list(inputs)\n inps[0] = as_gpuarray_variable(inputs[0],\n context_name=context_name)\n inps[1] = as_gpuarray_variable(inputs[1],\n context_name=context_name)\n return [op(*inps)]\n\n\ndef local_gpu_pool(op, ctx_name, inputs, outputs):\n assert op.__props__ == ('ignore_border', 'mode', 'ndim')\n inp, ws, stride, pad = inputs\n nd = op.ndim\n if nd not in (2, 3):\n return\n inp = gpu_contiguous(as_gpuarray_variable(inp, ctx_name))\n\n op = GpuPool(op.ignore_border, op.mode, op.ndim)\n if inp.ndim == nd + 2:\n return op(inp, ws, stride, pad)\n else:\n # reshape to 4D or 5D with 2 non-pooling dimensions\n inp_padded = pad_dims(inp, 2, nd)\n ret_padded = op(inp_padded, ws, stride, pad)\n return unpad_dims(ret_padded, inp, 2, nd)\n\n\npool_db = LocalGroupDB()\npool_db2 = LocalGroupDB(local_opt=theano.gof.opt.GraphToGPULocalOptGroup)\npool_db2.__name__ = \"pool_db2\"\nlifter = op_lifter([pool.Pool])(local_gpu_pool)\npool_db.register(\"local_gpu_pool\", lifter,\n 'gpuarray', 'fast_compile', 'fast_run',\n position=1)\npool_db2.register(\"local_gpu_pool\",\n local_optimizer([pool.Pool])(local_gpu_pool),\n 'gpuarray', 'fast_compile', 'fast_run',\n position=1)\nregister_opt('fast_compile', name='pool_db')(pool_db)\nregister_opt2([pool.Pool], 'fast_compile', name='pool_db2')(pool_db2)\n\n\ndef local_gpu_max_pool_grad(op, ctx_name, inputs, outputs):\n assert op.__props__ == ('ignore_border', 'mode', 'ndim')\n\n inp, out, out_grad, ws, stride, pad = inputs\n nd = op.ndim\n if nd not in (2, 3):\n return\n inp = gpu_contiguous(as_gpuarray_variable(inp, ctx_name))\n out = gpu_contiguous(as_gpuarray_variable(out, ctx_name))\n out_grad = gpu_contiguous(as_gpuarray_variable(out_grad, ctx_name))\n\n op = GpuMaxPoolGrad(op.ignore_border, op.mode, op.ndim)\n if inp.ndim == nd + 2:\n return op(inp, out, out_grad, ws, stride, pad)\n else:\n # reshape to 4D or 5D with 2 non-pooling dimensions\n inp_padded = pad_dims(inp, 2, nd)\n out_padded = pad_dims(out, 2, nd)\n out_grad_padded = pad_dims(out_grad, 2, nd)\n ret_padded = op(inp_padded, out_padded, out_grad_padded,\n ws, stride, pad)\n return unpad_dims(ret_padded, inp, 2, nd)\n\n\nlifter = op_lifter([pool.MaxPoolGrad])(local_gpu_max_pool_grad)\npool_db.register(\"local_gpu_max_pool_grad\", lifter,\n 'gpuarray', 'fast_compile', 'fast_run',\n position=1)\npool_db2.register(\"local_gpu_max_pool_grad\",\n local_optimizer([pool.MaxPoolGrad])(local_gpu_max_pool_grad),\n 'gpuarray', 'fast_compile', 'fast_run',\n position=1)\n\n\ndef local_gpu_average_pool_grad(op, ctx_name, inputs, outputs):\n assert op.__props__ == ('ignore_border', 'mode', 'ndim')\n\n inp, out_grad, ws, stride, pad = inputs\n nd = op.ndim\n if nd not in (2, 3):\n return\n inp = gpu_contiguous(as_gpuarray_variable(inp, ctx_name))\n out_grad = gpu_contiguous(as_gpuarray_variable(out_grad, ctx_name))\n\n op = GpuAveragePoolGrad(op.ignore_border, op.mode, op.ndim)\n if inp.ndim == nd + 2:\n return op(inp, out_grad, ws, stride, pad)\n else:\n # reshape to 4D or 5D with 2 non-pooling dimensions\n inp_padded = pad_dims(inp, 2, nd)\n out_grad_padded = pad_dims(out_grad, 2, nd)\n ret_padded = op(inp_padded, out_grad_padded,\n ws, stride, pad)\n return unpad_dims(ret_padded, inp, 2, nd)\n\n\nlifter = op_lifter([pool.AveragePoolGrad])(local_gpu_average_pool_grad)\npool_db.register(\"local_gpu_average_pool_grad\", lifter,\n 'gpuarray', 'fast_compile', 'fast_run',\n position=1)\npool_db2.register(\"local_gpu_average_pool_grad\",\n local_optimizer([pool.AveragePoolGrad])(local_gpu_average_pool_grad),\n 'gpuarray', 'fast_compile', 'fast_run',\n position=1)\n\n\n@register_opt()\n@op_lifter([pool.DownsampleFactorMaxGradGrad])\n@register_opt2([pool.DownsampleFactorMaxGradGrad])\ndef local_gpu_downsample_factor_max_grad_grad(op, ctx_name, inputs, outputs):\n assert op.__props__ == ('ignore_border', 'mode', 'ndim')\n inp, out, out_grad, ws, stride, pad = inputs\n nd = op.ndim\n if nd not in (2, 3):\n return\n inp = gpu_contiguous(as_gpuarray_variable(inp, ctx_name))\n out = gpu_contiguous(as_gpuarray_variable(out, ctx_name))\n out_grad = gpu_contiguous(as_gpuarray_variable(out_grad, ctx_name))\n\n op = GpuDownsampleFactorMaxGradGrad(op.ignore_border, op.mode, op.ndim)\n if inp.ndim == nd + 2:\n return op(inp, out, out_grad, ws, stride, pad)\n else:\n # reshape to 4D or 5D with 2 non-pooling dimensions\n inp_padded = pad_dims(inp, 2, nd)\n out_padded = pad_dims(out, 2, nd)\n out_grad_padded = pad_dims(out_grad, 2, nd)\n ret_padded = op(inp_padded, out_padded, out_grad_padded,\n ws, stride, pad)\n return unpad_dims(ret_padded, inp, 2, nd)\n\n\n@register_opt()\n@op_lifter([pool.MaxPoolRop])\n@register_opt2([pool.MaxPoolRop])\ndef local_gpu_max_pool_rop(op, ctx_name, inputs, outputs):\n assert op.__props__ == ('ignore_border', 'mode', 'ndim')\n inp, eval_inp, ws, stride, pad = inputs\n nd = op.ndim\n if nd not in (2, 3):\n return\n inp = gpu_contiguous(as_gpuarray_variable(inp, ctx_name))\n eval_inp = gpu_contiguous(as_gpuarray_variable(eval_inp, ctx_name))\n\n op = GpuMaxPoolRop(op.ignore_border, op.mode, op.ndim)\n if inp.ndim == nd + 2:\n return op(inp, eval_inp, ws, stride, pad)\n else:\n # reshape to 4D or 5D with 2 non-pooling dimensions\n inp_padded = pad_dims(inp, 2, nd)\n eval_inp_padded = pad_dims(eval_inp, 2, nd)\n ret_padded = op(inp_padded, eval_inp_padded, ws, stride, pad)\n return unpad_dims(ret_padded, inp, 2, nd)\n\n\n@register_opt(\"low_memory\")\n@local_optimizer([GpuCAReduceCuda])\ndef local_gpu_elemwise_careduce(node):\n \"\"\"\n Merge some GpuCAReduceCuda and GPUElemwise.\n Currently merged:\n - SUM(X^2)\n - SUM(ABS(X))\n\n \"\"\"\n if (isinstance(node.op, GpuCAReduceCuda) and\n node.op.pre_scalar_op is None and\n node.inputs[0].owner and\n isinstance(node.inputs[0].owner.op, GpuElemwise) and\n # The Op support all scalar with 1 inputs. We don't\n # automatically add more case, as some like trigonometic\n # operation with some reduction pattern will probably results\n # in slow down.\n isinstance(node.inputs[0].owner.op.scalar_op, (scalar.basic.Sqr,\n scalar.basic.Abs))):\n inp = node.inputs[0].owner.inputs[0]\n props = node.op._props_dict()\n props[\"pre_scalar_op\"] = node.inputs[0].owner.op.scalar_op\n with inherit_stack_trace(node.outputs):\n out = GpuCAReduceCuda(**props)(inp)\n return [out]\n\n\n@local_optimizer(None)\ndef local_assert_no_cpu_op(node):\n if (all([var.owner and isinstance(var.owner.op, HostFromGpu)\n for var in node.inputs]) and\n any([[c for c in var.clients if isinstance(c[0].op, GpuFromHost)]\n for var in node.outputs])):\n\n if config.assert_no_cpu_op == \"warn\":\n _logger.warning((\"CPU Op %s is detected in the computation \"\n \"graph\") % node)\n elif config.assert_no_cpu_op == \"raise\":\n raise AssertionError(\"The Op %s is on CPU.\" % node)\n elif config.assert_no_cpu_op == \"pdb\":\n pdb.set_trace()\n\n\n# Register the local_assert_no_cpu_op:\nassert_no_cpu_op = theano.tensor.opt.in2out(local_assert_no_cpu_op,\n name='assert_no_cpu_op')\n# 49.2 is after device specialization & fusion optimizations for last transfers\noptdb.register('gpua_assert_no_cpu_op', assert_no_cpu_op, 49.2,\n 'assert_no_cpu_op')\n\n\ndef tensor_to_gpu(x, context_name):\n if isinstance(x.type, tensor.TensorType):\n y = GpuArrayType(broadcastable=x.type.broadcastable,\n context_name=context_name,\n dtype=x.type.dtype)()\n if x.name:\n y.name = x.name + '[Gpua]'\n return y\n else:\n return x\n\n\ndef gpu_safe_new(x, tag=''):\n \"\"\"\n Internal function that constructs a new variable from x with the same\n type, but with a different name (old name + tag). This function is used\n by gradient, or the R-op to construct new variables for the inputs of\n the inner graph such that there is no interference between the original\n graph and the newly constructed graph.\n\n \"\"\"\n if hasattr(x, 'name') and x.name is not None:\n nw_name = x.name + tag\n else:\n nw_name = None\n\n if isinstance(x, theano.Constant):\n return x.clone()\n\n nw_x = x.type()\n nw_x.name = nw_name\n return nw_x\n\n\ndef gpu_reconstruct_graph(inputs, outputs, tag=None):\n \"\"\"\n Different interface to clone, that allows you to pass inputs.\n Compared to clone, this method always replaces the inputs with\n new variables of the same type, and returns those (in the same\n order as the original inputs).\n\n \"\"\"\n if tag is None:\n tag = ''\n nw_inputs = [gpu_safe_new(x, tag) for x in inputs]\n givens = {}\n for nw_x, x in zip(nw_inputs, inputs):\n givens[x] = nw_x\n nw_outputs = scan_utils.clone(outputs, replace=givens)\n return (nw_inputs, nw_outputs)\n\n\n@register_opt('scan', 'fast_compile')\n@op_lifter([scan_op.Scan])\n@register_opt2([scan_op.Scan], 'fast_compile')\ndef local_gpua_scan_to_gpua(op, context_name, inputs, outputs):\n info = copy.deepcopy(op.info)\n if info.get('gpua', False):\n return\n info['gpua'] = True\n nw_ins = [inputs[0]]\n e = (1 +\n op.n_seqs +\n op.n_mit_mot +\n op.n_mit_sot +\n op.n_sit_sot +\n op.n_shared_outs)\n nw_ins += [safe_to_gpu(x, context_name) for x in inputs[1:e]]\n b = e\n e = e + op.n_nit_sot\n nw_ins += inputs[b:e]\n nw_ins += [safe_to_gpu(x, context_name) for x in inputs[e:]]\n scan_ins = [tensor_to_gpu(x, context_name) for x in op.inputs]\n\n # The inner output corresponding to the looping condition should not be\n # moved to the gpu\n if op.info['as_while']:\n scan_outs = [safe_to_gpu(x, context_name) for x in op.outputs[:-1]]\n scan_outs += [op.outputs[-1]]\n else:\n scan_outs = [safe_to_gpu(x, context_name) for x in op.outputs]\n scan_outs = scan_utils.clone(\n scan_outs,\n replace=list(zip(op.inputs,\n (safe_to_cpu(x) for x in scan_ins))))\n\n # We need to construct the hash here, because scan\n # __init__ does not know about the gpu and can not\n # handle graphs with inputs being on the gpu\n tmp_in, tmp_out = gpu_reconstruct_graph(scan_ins, scan_outs)\n local_fgraph = gof.FunctionGraph(tmp_in, tmp_out, clone=True)\n _cmodule_key = gof.CLinker().cmodule_key_(local_fgraph, [])\n info['gpu_hash'] = hash(_cmodule_key)\n\n def typebuild(dtype, broadcastable, context_name=context_name):\n return GpuArrayType(dtype=dtype, broadcastable=broadcastable,\n context_name=context_name)\n\n nw_op = scan_op.Scan(scan_ins, scan_outs, info,\n typeConstructor=typebuild).make_node(*nw_ins)\n return nw_op.outputs\n\n\ndef _scan_type_infer(node):\n context_name = infer_context_name(*node.inputs)\n\n def typebuild(dtype, broadcastable, context_name=context_name):\n return GpuArrayType(dtype=dtype, broadcastable=broadcastable,\n context_name=context_name)\n return typebuild\n\n\n# Add optimization : maxandargmax (CPU -> GPU)\n@register_opt('fast_compile')\n@op_lifter([tensor.MaxAndArgmax])\n@register_opt2([tensor.MaxAndArgmax], 'fast_compile')\ndef local_gpu_maxandargmax(op, context_name, inputs, outputs):\n op = GpuMaxAndArgmax(op.get_params(None))\n if inputs[0].dtype == \"float16\":\n # For now it is better to copy/cast on the GPU then transfer to the CPU\n casted_inputs = inputs[0].astype('float32')\n ret = op(casted_inputs)\n return [ret[0].astype('float16'), ret[1]]\n return op\n\n\n@register_opt('fast_compile')\n@op_lifter([Images2Neibs])\n@register_opt2([Images2Neibs], 'fast_compile')\ndef local_gpua_images2neibs(op, context_name, inputs, outputs):\n if op.mode in ['valid', 'half', 'full', 'ignore_borders', 'wrap_centered']:\n return GpuImages2Neibs(op.mode)\n\n\n# solve\n@register_opt('fast_compile')\n@op_lifter([slinalg.Solve])\n@register_opt2([theano.tensor.slinalg.Solve], 'fast_compile')\ndef local_gpu_solve(op, context_name, inputs, outputs):\n if inputs[0].dtype not in ['float16', 'float32']:\n return\n if op.A_structure not in MATRIX_STRUCTURES_SOLVE:\n return\n\n if op.A_structure in ['lower_triangular', 'upper_triangular']:\n if not cublas_available:\n return\n lower = op.A_structure == 'lower_triangular'\n op = GpuCublasTriangularSolve(lower)\n else:\n if not cusolver_available:\n return\n op = GpuCusolverSolve(A_structure=op.A_structure)\n\n if inputs[0].dtype == 'float16':\n return op(inputs[0].astype('float32'),\n inputs[1].astype('float32')).astype('float16')\n return op\n\n\n@register_inplace()\n@local_optimizer([GpuCusolverSolve], inplace=True)\ndef local_inplace_gpu_solve(node):\n if isinstance(node.op, GpuCusolverSolve) and not node.op.inplace:\n with inherit_stack_trace(node.outputs):\n return [GpuCusolverSolve(A_structure=node.op.A_structure, trans=node.op.trans,\n inplace=True)(*node.inputs)]\n\n\n# Cholesky decomposition\ndef local_gpu_cholesky(op, context_name, inputs, outputs):\n if not cusolver_available:\n return\n if inputs[0].dtype not in ['float16', 'float32']:\n return\n op = GpuCholesky(lower=op.lower, inplace=op.destructive)\n if inputs[0].dtype == 'float16':\n return op(inputs[0].astype('float32')).astype('float16')\n\n return op\nmatrix_ops_db = LocalGroupDB()\nmatrix_ops_db2 = LocalGroupDB(local_opt=theano.gof.opt.GraphToGPULocalOptGroup)\nmatrix_ops_db2.__name__ = \"matrix_ops_db2\"\n\n# For Cholesky decomposition, magma 2.2 is slower than cusolver 8 (tested for\n# matrices of size 1000). Thus, cusolver is prioritized during graph\n# optimizations. To explicitly use magma, you should disable cusolver using\n# `optimizer_excluding=cusolver` in Theano config.\nlifter = op_lifter([slinalg.Cholesky])(local_gpu_cholesky)\nmatrix_ops_db.register(\"local_gpu_cholesky\", lifter,\n 'gpuarray', 'fast_compile', 'fast_run', 'cusolver',\n position=0)\nmatrix_ops_db2.register(\"local_gpu_cholesky\",\n local_optimizer([slinalg.Cholesky])(local_gpu_cholesky),\n 'gpuarray', 'fast_compile', 'fast_run', 'cusolver',\n position=0)\nregister_opt('fast_compile', name='matrix_ops_db')(matrix_ops_db)\nregister_opt2([slinalg.Solve], 'fast_compile', name='matrix_ops_db2')(matrix_ops_db2)\n\n\n@register_inplace()\n@local_optimizer([GpuCholesky], inplace=True)\ndef local_inplace_gpu_cholesky(node):\n if isinstance(node.op, GpuCholesky) and not node.op.inplace:\n with inherit_stack_trace(node.outputs):\n return [node.op.clone_inplace()(*node.inputs)]\n\n\ndef local_gpu_magma_cholesky(op, context_name, inputs, outputs):\n if not config.magma.enabled:\n return\n if inputs[0].dtype not in ['float16', 'float32']:\n return\n op = GpuMagmaCholesky(lower=op.lower, inplace=op.destructive)\n if inputs[0].dtype == 'float16':\n return op(inputs[0].astype('float32')).astype('float16')\n return op\nlifter = op_lifter([slinalg.Cholesky])(local_gpu_magma_cholesky)\nmatrix_ops_db.register(\"local_gpu_magma_cholesky\", lifter,\n 'gpuarray', 'fast_compile', 'fast_run', 'magma',\n position=1)\nmatrix_ops_db2.register(\"local_gpu_magma_cholesky\",\n local_optimizer([slinalg.Cholesky])(local_gpu_magma_cholesky),\n 'gpuarray', 'fast_compile', 'fast_run', 'magma',\n position=1)\n\n\n@register_inplace()\n@local_optimizer([GpuMagmaCholesky], inplace=True)\ndef local_inplace_gpu_magma_cholesky(node):\n if isinstance(node.op, GpuMagmaCholesky) and not node.op.inplace:\n return [node.op.clone_inplace()(*node.inputs)]\n\n\n# QR decomposition\n@register_opt('magma', 'fast_compile')\n@op_lifter([nlinalg.QRFull])\n@register_opt2([theano.tensor.nlinalg.QRFull], 'magma', 'fast_compile')\ndef local_gpu_magma_qr(op, context_name, inputs, outputs):\n if not config.magma.enabled or op.mode != 'reduced':\n return\n if inputs[0].dtype not in ['float16', 'float32']:\n return\n x = inputs[0]\n if inputs[0].dtype == 'float16':\n x = inputs[0].astype('float32')\n out = gpu_qr(x, complete=True)\n if inputs[0].dtype == 'float16':\n return [o.astype('float16') for o in out]\n return out\n\n\n@register_opt('magma', 'fast_compile')\n@op_lifter([nlinalg.QRIncomplete])\n@register_opt2([theano.tensor.nlinalg.QRIncomplete], 'magma', 'fast_compile')\ndef local_gpu_magma_qr_incomplete(op, context_name, inputs, outputs):\n if not config.magma.enabled:\n return\n if inputs[0].dtype not in ['float16', 'float32']:\n return\n x = inputs[0]\n if inputs[0].dtype == 'float16':\n x = inputs[0].astype('float32')\n out = gpu_qr(x, complete=False)\n if inputs[0].dtype == 'float16':\n return [out.astype('float16')]\n return out\n\n\n# Matrix inverse\n@register_opt('magma', 'fast_compile')\n@op_lifter([nlinalg.MatrixInverse])\n@register_opt2([theano.tensor.nlinalg.MatrixInverse], 'magma', 'fast_compile')\ndef local_gpu_magma_matrix_inverse(op, context_name, inputs, outputs):\n if not config.magma.enabled:\n return\n if inputs[0].dtype not in ['float16', 'float32']:\n return\n op = GpuMagmaMatrixInverse()\n if inputs[0].dtype == 'float16':\n return op(inputs[0].astype('float32')).astype('float16')\n return op\n\n\n@register_inplace()\n@local_optimizer([GpuMagmaMatrixInverse])\ndef local_inplace_gpu_magma_matrix_inverse(node):\n if isinstance(node.op, GpuMagmaMatrixInverse) and not node.op.inplace:\n with inherit_stack_trace(node.outputs):\n return [node.op.clone_inplace()(*node.inputs)]\n\n\n# Eigen decomposition of a symmetric matrix\n@register_opt('magma', 'fast_compile')\n@op_lifter([nlinalg.Eigh])\n@register_opt2([theano.tensor.nlinalg.Eigh], 'magma', 'fast_compile')\ndef local_gpu_magma_eigh(op, context_name, inputs, outputs):\n if not config.magma.enabled:\n return\n if inputs[0].dtype not in ['float16', 'float32']:\n return\n op = GpuMagmaEigh(UPLO=op.UPLO, compute_v=True)\n if inputs[0].dtype == 'float16':\n return op(inputs[0].astype('float32')).astype('float16')\n return op\n\n\n# Singular Value Decomposition\n@register_opt('magma', 'fast_compile')\n@op_lifter([nlinalg.SVD])\n@register_opt2([theano.tensor.nlinalg.SVD], 'magma', 'fast_compile')\ndef local_gpu_magma_svd(op, context_name, inputs, outputs):\n if not config.magma.enabled:\n return\n if inputs[0].dtype not in ['float16', 'float32']:\n return\n x = inputs[0]\n if inputs[0].dtype == 'float16':\n x = inputs[0].astype('float32')\n out = gpu_svd(x, compute_uv=op.compute_uv, full_matrices=op.full_matrices)\n if inputs[0].dtype == 'float16':\n if op.compute_uv:\n out = [o.astype('float16') for o in out]\n else:\n out = [out.astype('float16')]\n return out\n\n\n@register_opt('ctc', 'fast_compile')\n@op_lifter([theano.tensor.nnet.ctc.ConnectionistTemporalClassification])\n@register_opt2([ConnectionistTemporalClassification], 'ctc', 'fast_compile')\ndef local_gpu_ctc(op, context_name, inputs, outputs):\n op = GpuConnectionistTemporalClassification(compute_grad=op.compute_grad)\n return op.make_node(*inputs).outputs\n\n\n# Do not register in fast_run or fast_compile.\n# It will be added to fast_run if the GPU is enabled.\noptdb.register('gpua_scanOp_make_inplace',\n scan_opt.ScanInplaceOptimizer(typeInfer=_scan_type_infer,\n gpua_flag=True),\n 75,\n 'gpuarray',\n 'inplace',\n 'scan')\n\n\n# Register GPU convolution implementation\n# They are tried in a specific order so we can control\n# which ones take precedence over others.\nabstractconv_groupopt = theano.gof.optdb.LocalGroupDB()\nabstractconv_groupopt.__name__ = \"gpuarray_abstractconv_opts\"\nregister_opt('fast_compile')(abstractconv_groupopt)\n\n# We import these opts here instead of at the top of this file\n# to avoid a circular dependency problem with dnn\nfrom .dnn import (local_abstractconv_cudnn,\n local_abstractconv_gw_cudnn,\n local_abstractconv_gi_cudnn, # noqa: 402\n local_abstractconv_cudnn_alt,\n local_abstractconv3d_cudnn_alt)\n\nabstractconv_groupopt.register('local_abstractconv_dnn',\n local_abstractconv_cudnn, 20,\n 'conv_dnn',\n 'gpuarray', 'fast_compile', 'fast_run', 'cudnn')\nabstractconv_groupopt.register('local_abstractconv_gw_dnn',\n local_abstractconv_gw_cudnn, 20,\n 'conv_dnn',\n 'gpuarray', 'fast_compile', 'fast_run', 'cudnn')\nabstractconv_groupopt.register('local_abstractconv_gi_dnn',\n local_abstractconv_gi_cudnn, 20,\n 'conv_dnn',\n 'gpuarray', 'fast_compile', 'fast_run', 'cudnn')\n# The GEMM-based convolution comes last to catch all remaining cases.\n# It can be disabled by excluding 'conv_gemm'.\nabstractconv_groupopt.register('local_abstractconv_gemm', local_abstractconv_gemm, 30,\n 'conv_gemm',\n 'gpuarray', 'fast_compile', 'fast_run')\nabstractconv_groupopt.register('local_abstractconv3d_gemm', local_abstractconv3d_gemm, 30,\n 'conv_gemm',\n 'gpuarray', 'fast_compile', 'fast_run')\nabstractconv_groupopt.register('local_abstractconv_gradweights_gemm',\n local_abstractconv_gradweights_gemm, 30,\n 'conv_gemm',\n 'gpuarray', 'fast_compile', 'fast_run')\nabstractconv_groupopt.register('local_abstractconv3d_gradweights_gemm',\n local_abstractconv3d_gradweights_gemm, 30,\n 'conv_gemm',\n 'gpuarray', 'fast_compile', 'fast_run')\nabstractconv_groupopt.register('local_abstractconv_gradinputs',\n local_abstractconv_gradinputs_gemm, 30,\n 'conv_gemm',\n 'gpuarray', 'fast_compile', 'fast_run')\nabstractconv_groupopt.register('local_abstractconv3d_gradinputs',\n local_abstractconv3d_gradinputs_gemm, 30,\n 'conv_gemm',\n 'gpuarray', 'fast_compile', 'fast_run')\n\nconv_metaopt = ConvMetaOptimizer()\n\nconv_metaopt.register(local_abstractconv_cudnn,\n ['default', 'cudnn', 'conv_dnn'])\nconv_metaopt.register(local_abstractconv_gw_cudnn,\n ['default', 'cudnn', 'conv_dnn'])\nconv_metaopt.register(local_abstractconv_gi_cudnn,\n ['default', 'cudnn', 'conv_dnn'])\nconv_metaopt.register(local_abstractconv_gemm_def,\n ['default', 'conv_gemm'])\nconv_metaopt.register(local_abstractconv3d_gemm_def,\n ['default', 'conv_gemm'])\nconv_metaopt.register(local_abstractconv_gradweights_gemm,\n ['default', 'conv_gemm'])\nconv_metaopt.register(local_abstractconv3d_gradweights_gemm,\n ['default', 'conv_gemm'])\nconv_metaopt.register(local_abstractconv_gradinputs_gemm,\n ['default', 'conv_gemm'])\nconv_metaopt.register(local_abstractconv3d_gradinputs_gemm,\n ['default', 'conv_gemm'])\nconv_metaopt.register(local_abstractconv_gemm_alt,\n ['default', 'alternative', 'conv_gemm'])\nconv_metaopt.register(local_abstractconv_gemm_gradweights_alt,\n ['default', 'alternative', 'conv_gemm'])\nconv_metaopt.register(local_abstractconv_gradinputs_gemm_alt,\n ['default', 'alternative', 'conv_gemm'])\nconv_metaopt.register(local_abstractconv_cudnn_alt,\n ['default', 'alternative', 'cudnn', 'conv_dnn'])\nconv_metaopt.register(local_abstractconv3d_cudnn_alt,\n ['default', 'alternative', 'cudnn', 'conv_dnn'])\nconv_metaopt.register(local_abstractconv3d_alt,\n ['default', 'alternative', 'conv_gemm'])\nconv_metaopt.register(local_abstractconv3d_gemm_gradweights_alt,\n ['default', 'alternative', 'conv_gemm'])\nconv_metaopt.register(local_abstractconv3d_gradinputs_gemm_alt,\n ['default', 'alternative', 'conv_gemm'])\nconv_metaopt.register(local_abstractconv3d2d,\n ['alternative', 'conv3d2d'])\n\nabstractconv_groupopt.register('conv_metaopt', conv_metaopt, 'conv_meta', position=0)\n\n# Register cuDNN batch normalization implementation\n\n# We import these opts here instead of at the top of this file\n# to avoid a circular dependency problem with dnn\nfrom .dnn import (local_abstract_batch_norm_train_cudnn,\n local_abstract_batch_norm_train_grad_cudnn,\n local_abstract_batch_norm_inference_cudnn) # noqa: 402\n\nabstract_batch_norm_groupopt = theano.gof.optdb.LocalGroupDB()\nabstract_batch_norm_groupopt.__name__ = \"gpuarray_batchnorm_opts\"\nregister_opt('fast_compile')(abstract_batch_norm_groupopt)\n\nabstract_batch_norm_db = LocalGroupDB()\nabstract_batch_norm_db2 = LocalGroupDB(\n local_opt=theano.gof.opt.GraphToGPULocalOptGroup)\nabstract_batch_norm_db2.__name__ = \"abstract_batch_norm_db2\"\nregister_opt('fast_compile', name='abstract_batch_norm_db')(\n abstract_batch_norm_db)\nregister_opt2([bn.AbstractBatchNormTrain,\n bn.AbstractBatchNormTrainGrad,\n bn.AbstractBatchNormInference],\n 'fast_compile', name='abstract_batch_norm_db2')(\n abstract_batch_norm_db2)\n\nfor op, fct, cpu in [(bn.AbstractBatchNormTrain,\n local_abstract_batch_norm_train_cudnn,\n bn.local_abstract_batch_norm_train),\n (bn.AbstractBatchNormTrainGrad,\n local_abstract_batch_norm_train_grad_cudnn,\n bn.local_abstract_batch_norm_train_grad),\n (bn.AbstractBatchNormInference,\n local_abstract_batch_norm_inference_cudnn,\n bn.local_abstract_batch_norm_inference)]:\n lifter = op_lifter([op])(fct)\n abstract_batch_norm_db.register(fct.__name__,\n lifter,\n 'gpuarray', 'fast_compile', 'fast_run',\n 'cudnn', 'batchnorm_dnn',\n position=1)\n abstract_batch_norm_db2.register(fct.__name__,\n local_optimizer([op])(fct),\n 'gpuarray', 'fast_compile', 'fast_run',\n 'cudnn', 'batchnorm_dnn',\n position=1)\n # cpu is a normal optimization. We can't register it in\n # GraphToGPU. So for now, only add it to the slower EQ phase. If\n # there is no cuDNN, we still want to move it to the GPU now with\n # a Theano graph so to have this graph on the GPU.\n abstract_batch_norm_db.register(cpu.__name__, cpu,\n 'gpuarray', 'fast_compile', 'fast_run',\n position='last')\n"
] | [
[
"numpy.add.at",
"numpy.can_cast",
"numpy.__version__.split",
"numpy.ones",
"numpy.all",
"numpy.any",
"numpy.zeros",
"numpy.empty"
],
[
"numpy.asarray",
"numpy.eye",
"numpy.int32",
"numpy.dtype",
"numpy.all",
"numpy.random.rand",
"numpy.array"
],
[
"numpy.ones"
],
[
"numpy.random.random",
"numpy.random.seed",
"numpy.nan_to_num",
"numpy.random.randn",
"numpy.random.rand"
],
[
"numpy.abs",
"scipy.signal.signaltools._valfrommode",
"scipy.signal.sigtools._convolve2d",
"numpy.asarray",
"scipy.signal.signaltools._bvalfromboundary",
"numpy.array",
"numpy.random.RandomState",
"numpy.zeros"
],
[
"numpy.asarray",
"numpy.random.random",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SamuelBrand1/covid-19-in-households-public | [
"a0740d85f8f9fb1ae67dbd9c5a92f1085e4d9ea1"
] | [
"examples/carehome_vacc_analysis/plot_no_vacc_results.py"
] | [
"from pickle import load\nfrom matplotlib.pyplot import subplots\nfrom matplotlib.cm import get_cmap\n\nwith open('carehome_no_vacc_sol.pkl','rb') as f:\n no_vacc_output, ave_hh_by_class = load(f)\n\nlgd=['S','E','M', 'C', 'R', 'D']\n\nt = no_vacc_output['t']\ndata_list = [no_vacc_output['S_no_vacc']/ave_hh_by_class,\n no_vacc_output['E_no_vacc']/ave_hh_by_class,\n no_vacc_output['M_no_vacc']/ave_hh_by_class,\n no_vacc_output['C_no_vacc']/ave_hh_by_class,\n no_vacc_output['R_no_vacc']/ave_hh_by_class,\n no_vacc_output['D_no_vacc']/ave_hh_by_class]\n\nfig, (axis_P, axis_S, axis_A) = subplots(3,1, sharex=True)\n\ncmap = get_cmap('tab20')\nalpha = 0.5\nfor i in range(1,len(data_list)):\n axis_P.plot(\n t, data_list[i][:,0], label=lgd[i],\n color=cmap(i/len(data_list)), alpha=alpha)\naxis_P.set_ylabel('Proportion of population')\naxis_P.set_title('Patients')\naxis_P.legend(ncol=1, bbox_to_anchor=(1,0.50))\n\nfor i in range(1,len(data_list)-2):\n axis_S.plot(\n t, data_list[i][:,1], label=lgd[i],\n color=cmap(i/len(data_list)), alpha=alpha)\naxis_S.set_ylabel('Proportion of population')\naxis_S.set_title('Permanent staff')\naxis_S.legend(ncol=1, bbox_to_anchor=(1,0.50))\n\nfor i in range(1,len(data_list)-2):\n axis_A.plot(\n t, data_list[i][:,2], label=lgd[i],\n color=cmap(i/len(data_list)), alpha=alpha)\naxis_A.set_ylabel('Proportion of population')\naxis_A.set_title('Agency workers')\naxis_A.legend(ncol=1, bbox_to_anchor=(1,0.50))\n\nfig.savefig('carehome-plot.png', bbox_inches='tight', dpi=300)\n\nfig, ax = subplots(1,1,sharex=True)\nax.plot(t,data_list[0][:,0] +\n data_list[1][:,0] +\n data_list[2][:,0] +\n data_list[3][:,0] +\n data_list[4][:,0])\nfig.savefig('total_patients.png', bbox_inches='tight', dpi=300)\n"
] | [
[
"matplotlib.pyplot.subplots",
"matplotlib.cm.get_cmap"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rosa-yuri/my_projects | [
"8b430f5e5d6c3dc1f8e1a59251bf1cdd90e3501b"
] | [
"color/colorImg.py"
] | [
"# (2) 영상 처리 및 이미지 기반의 데이터 분석 통합 GUI 툴\n# --> RAW, GIF, JPG, PNG, BMP, TIF 영상 데이터 분석 및 처리\n# --> 영상 데이터와 CSV, SQLite, MySQL, XLS 저장 및 로딩 기능 지원\n# --> Image Processing 알고리즘 (화소점, 화소영역, 기하학 등)\n# --> 히스토그램을 통한 데이터 분석 및 영상 개선 알고리즘\n# --> 대량의 영상 빅데이터 자동변환 기능 (자동화)\n\n## 라이브러리\nfrom tkinter import *\nfrom tkinter.filedialog import *\nfrom tkinter.simpledialog import *\nimport operator\nimport os.path\nimport math\nimport threading\nimport numpy\nimport xlsxwriter\nfrom xlsxwriter import Workbook\nimport struct\nimport csv\nimport sqlite3\nimport pymysql\nimport xlwt\nimport glob\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\nfrom wand.color import Color\nfrom wand.drawing import Drawing\nfrom wand.image import *\n\n## 함수선언부\n\n### 파일메뉴\n#### 이미지 로딩 및 디스플레이\n\ndef loadImage(fname) : # Gray-scale 이미지 로딩하기\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n fsize = os.path.getsize(fname) # 파일 크기 확인\n inH = inW = int(math.sqrt(fsize)) # 입력메모리 크기 결정! (중요)\n inImage = []; tmpList = []\n for i in range(inH) : # 입력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(inW) :\n tmpList.append(0)\n inImage.append(tmpList)\n # 파일 --> 메모리로 데이터 로딩\n fp = open(fname, 'rb') # 파일 열기(바이너리 모드)\n for i in range(inH) :\n for k in range(inW) :\n inImage[i][k] = int(ord(fp.read(1)))\n fp.close()\n\ndef display_raw() : #Gray-scale 이미지 출력하기\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n # 기존에 캐버스 있으면 뜯어내기.\n if canvas != None :\n canvas.destroy()\n # 화면 준비 (고정됨)\n VIEW_X, VIEW_Y = 256, 256\n if VIEW_X >= outW or VIEW_Y >= outH : # 영상이 128미만이면\n VIEW_X = outW\n VIEW_Y = outH\n step = 1 # 건너뛸숫자\n else :\n step = outW / VIEW_X # step을 실수도 인정. 128, 256, 512 단위가 아닌 것 고려.\n\n window.geometry(str(VIEW_X*2) + 'x' + str(VIEW_Y*2))\n canvas = Canvas(window, width=VIEW_X, height=VIEW_Y)\n paper = PhotoImage(width=VIEW_X, height=VIEW_Y)\n canvas.create_image((VIEW_X/2, VIEW_X/2), image=paper, state='normal')\n # 화면에 출력. 실수 step을 위해서 numpy 사용\n def putPixel() :\n for i in numpy.arange(0, outH,step) :\n for k in numpy.arange(0, outW,step) :\n i = int(i); k = int(k) # 첨자이므로 정수화\n data = outImage[i][k]\n paper.put('#%02x%02x%02x' % (data, data, data),\n ( int(k/step),int(i/step)))\n\n threading.Thread(target=putPixel).start()\n canvas.pack(expand=1, anchor =CENTER)\n status.configure(text='이미지 정보:' + str(outW) + 'x' + str(outH) )\n\n\ndef loadColorImage(fname) : # Color 이미지 로딩하기\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR,outImageG,outImageB , inW, inH, outW, outH\n\n photo = PhotoImage(file=filename)\n inW = photo.width(); inH = photo.height()\n\n inImageR, inImageG, inImageB = [], [],[]; tmpList = []\n for i in range(inH) : # 입력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(inW) :\n tmpList.append(0)\n inImageR.append(tmpList[:])\n for i in range(inH): # 입력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(inW):\n tmpList.append(0)\n inImageG.append(tmpList[:])\n for i in range(inH): # 입력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(inW):\n tmpList.append(0)\n inImageB.append(tmpList[:])\n\n # 파일 --> 메모리로 데이터 로딩\n for i in range(inH) :\n for k in range(inW) :\n r, g, b = photo.get(k,i)\n #print(r,g,b,end='/')\n inImageR[i][k] = r\n inImageG[i][k] = g\n inImageB[i][k] = b\n #print(inImageR[i][k], inImageG[i][k], inImageB[i][k], end='/')\n photo=None\n\ndef display_Color() :\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, inW, inH, outW, outH\n # 기존에 캐버스 있으면 뜯어내기.\n if canvas != None:\n canvas.destroy()\n # 화면 준비 (고정됨)\n VIEW_X, VIEW_Y = 256, 256\n if VIEW_X >= outW or VIEW_Y >= outH: # 영상이 128미만이면\n VIEW_X = outW\n VIEW_Y = outH\n step = 1 # 건너뛸숫자\n else:\n step = outW / VIEW_X # step을 실수도 인정. 128, 256, 512 단위가 아닌 것 고려.\n\n window.geometry(str(VIEW_X * 2) + 'x' + str(VIEW_Y * 2))\n canvas = Canvas(window, width=VIEW_X, height=VIEW_Y)\n paper = PhotoImage(width=VIEW_X, height=VIEW_Y)\n canvas.create_image((VIEW_X / 2, VIEW_X / 2), image=paper, state='normal')\n\n # 화면에 출력. 실수 step을 위해서 numpy 사용\n def putPixel():\n for i in numpy.arange(0, outH, step):\n for k in numpy.arange(0, outW, step):\n i = int(i)\n k = int(k) # 첨자이므로 정수화\n dataR = outImageR[i][k]\n dataG = outImageG[i][k]\n dataB = outImageB[i][k]\n # print(dataR, dataG, dataB, end='/')\n paper.put('#%02x%02x%02x' % (dataR, dataG, dataB), (int(k / step), int(i / step)))\n threading.Thread(target=putPixel).start()\n canvas.pack(expand=1, anchor=CENTER)\n status.configure(text='이미지 정보:' + str(outW) + 'x' + str(outH))\n\n\ndef display_first_Color():\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH, photo, paper_copy\n if canvas != None:\n canvas.destroy()\n # 화면 준비 (고정됨)\n VIEW_X, VIEW_Y = 256, 256\n if VIEW_X >= outW or VIEW_Y >= outH: # 영상이 128미만이면\n VIEW_X = outW\n VIEW_Y = outH\n step = 1 # 건너뛸숫자\n else:\n step = outW / VIEW_X # step을 실수도 인정. 128, 256, 512 단위가 아닌 것 고려.\n\n window.geometry(str(VIEW_X * 2) + 'x' + str(VIEW_Y * 2))\n canvas = Canvas(window, width=VIEW_X, height=VIEW_Y)\n paper = PhotoImage(width=VIEW_X, height=VIEW_Y)\n canvas.create_image((VIEW_X / 2, VIEW_X / 2), image=paper, state='normal')\n\n # 화면에 출력. 실수 step을 위해서 numpy 사용\n def putPixel():\n for i in numpy.arange(0, outH, step):\n for k in numpy.arange(0, outW, step):\n i = int(i)\n k = int(k) # 첨자이므로 정수화\n dataR = outImageR[i][k]\n dataG = outImageG[i][k]\n dataB = outImageB[i][k]\n paper.put('#%02x%02x%02x' % (dataR, dataG, dataB), (int(k / step), int(i / step)))\n paper_copy.put('#%02x%02x%02x' % (dataR, dataG, dataB), (int(k / step), int(i / step)))\n\n threading.Thread(target=putPixel).start()\n canvas.pack()\n\n\ndef display_copy_Color():\n global window, canvas, pLabel, paper, filename, inImage, outImage, inW, inH, outW, outH, photo, paper_copy\n if canvas != None:\n canvas.destroy()\n window.geometry(str(outH * 2) + 'x' + str(outW))\n canvas = Canvas(window, width=outW, height=outH)\n canvas.create_image((outW / 2, outH / 2), image=paper, state='normal')\n canvas.pack(side=RIGHT)\n photo = PhotoImage()\n pLabel = Label(window, image=photo)\n pLabel.pack(side=LEFT)\n pLabel.configure(image=paper_copy)\n\n\ndef rollback_gif():\n global window, canvas, paper, PLabel, filename, inImage, outImage, inW, inH, outW, outH, photo, paper_copy\n if pLabel != None:\n pLabel.destroy()\n loadColorImage(filename)\n equal_Color()\n\n\n#### 데이터 파일형식 로딩\n\ndef loadRawCSV(fname) :\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n fsize = -1\n fp = open(fname, 'r')\n for f in fp :\n fsize += 1\n fp.close()\n inH = inW = int(math.sqrt(fsize)) # 입력메모리 크기 결정! (중요)\n inImage = []; tmpList = []\n for i in range(inH) : # 입력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(inW) :\n tmpList.append(0)\n inImage.append(tmpList)\n # 파일 --> 메모리로 데이터 로딩\n fp = open(fname, 'r') # 파일 열기(바이너리 모드)\n csvFP = csv.reader(fp)\n next(csvFP)\n for row_list in csvFP :\n row= int(row_list[0]) ; col = int(row_list[1]) ; value=int(row_list[2])\n inImage[row][col] = value\n fp.close()\n\n#### 열기메뉴\n\ndef openRawFile() : #Gray-scale 이미지 불러오기\n global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH\n filename = askopenfilename(parent=window,\n filetypes=((\"RAW파일\", \"*.raw\"), (\"모든파일\", \"*.*\")))\n loadImage(filename) # 파일 --> 입력메모리\n equal_raw() # 입력메모리--> 출력메모리\n\n\ndef openColorFile() : #Color 이미지 불러오기\n global window, canvas, paper, filename,inImageR, inImageG, inImageB, outImageR,outImageG,outImageB ,inW, inH, outW, outH\n filename = askopenfilename(parent=window,\n filetypes=((\"그림파일\", \"*.gif;*.jpg;*.png;*.tif;*.bmp\"), (\"모든파일\", \"*.*\")))\n loadColorImage(filename) # 파일 --> 입력메모리\n equal_Color() # 입력메모리--> 출력메모리\n\n\ndef openRawCSV() :\n global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH\n filename = askopenfilename(parent=window,\n filetypes=((\"CSV파일\", \"*.csv\"), (\"모든파일\", \"*.*\")))\n loadRawCSV(filename) # 파일 --> 입력메모리\n equal_raw() # 입력메모리--> 출력메모리\n\n\n\ndef openRawSQLite() :\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n global csvList, input_file\n con = sqlite3.connect('imageDB') # 데이터베이스 지정(또는 연결)\n cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)\n try :\n sql = \"SELECT DISTINCT filename, resolution FROM imageTable\"\n cur.execute(sql)\n tableNameList = [] # ['강아지:128', '강아지:512' ....]\n while True :\n row = cur.fetchone()\n if row == None :\n break\n tableNameList.append( row[0] + ':' + str(row[1]) )\n\n ######## 내부 함수 (Inner Function) : 함수 안의 함수,지역함수 #######\n def selectTable() :\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n selectedIndex = listbox.curselection()[0]\n subWindow.destroy()\n fname, res = tableNameList[selectedIndex].split(':')\n filename = fname\n sql = \"SELECT row, col, value FROM imageTable WHERE filename='\" + \\\n fname + \"' AND resolution=\" + res\n print(sql)\n cur.execute(sql)\n\n inH = inW = int(res)\n inImage = []; tmpList = []\n for i in range(inH): # 입력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(inW):\n tmpList.append(0)\n inImage.append(tmpList)\n while True :\n row_tuple = cur.fetchone()\n if row_tuple == None :\n break\n row, col, value = row_tuple\n inImage[row][col] = value\n\n cur.close()\n con.close()\n equal_raw()\n print(\"Ok.\")\n\n ################################################################\n\n subWindow = Toplevel(window)\n listbox = Listbox(subWindow)\n button = Button(subWindow, text='선택', command=selectTable)\n listbox.pack(); button.pack()\n for sName in tableNameList :\n listbox.insert(END, sName)\n subWindow.lift()\n except :\n cur.close()\n con.close()\n print(\"Error.\")\n\ndef openRawMySQL() :\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n global csvList, input_file\n con = pymysql.connect(host='192.168.200.131', user='root', password='1234', db='imageDB', charset='utf8') # 데이터베이스 지정(또는 연결)\n cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)\n try :\n sql = \"SELECT DISTINCT filename, resolution FROM imageTable\"\n cur.execute(sql)\n tableNameList = [] # ['강아지:128', '강아지:512' ....]\n while True :\n row = cur.fetchone()\n if row == None :\n break\n tableNameList.append( row[0] + ':' + str(row[1]) )\n\n ######## 내부 함수 (Inner Function) : 함수 안의 함수,지역함수 #######\n def selectTable() :\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n selectedIndex = listbox.curselection()[0]\n subWindow.destroy()\n fname, res = tableNameList[selectedIndex].split(':')\n filename = fname\n sql = \"SELECT row, col, value FROM imageTable WHERE filename='\" + \\\n fname + \"' AND resolution=\" + res\n print(sql)\n cur.execute(sql)\n\n inH = inW = int(res)\n inImage = []; tmpList = []\n for i in range(inH): # 입력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(inW):\n tmpList.append(0)\n inImage.append(tmpList)\n while True :\n row_tuple = cur.fetchone()\n if row_tuple == None :\n break\n row, col, value = row_tuple\n inImage[row][col] = value\n\n cur.close()\n con.close()\n equal_raw()\n print(\"Ok! openMySQL\")\n\n ################################################################\n\n subWindow = Toplevel(window)\n listbox = Listbox(subWindow)\n button = Button(subWindow, text='선택', command=selectTable)\n listbox.pack(); button.pack()\n for sName in tableNameList :\n listbox.insert(END, sName)\n subWindow.lift()\n\n except :\n cur.close()\n con.close()\n print(\"Error! openMySQL\")\n\n\n#### 저장메뉴\n\ndef saveRawFile() :\n global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH\n saveFp = asksaveasfile(parent=window, mode='wb', defaultextension=\"*.raw\", filetypes=((\"RAW파일\", \"*.raw\"), (\"모든파일\", \"*.*\")))\n for i in range(outH):\n for k in range(outW):\n saveFp.write(struct.pack('B',outImage[i][k]))\n saveFp.close()\n\ndef saveColorFile() :\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, inW, inH, outW, outH\n draw = Drawing() # 빈 판을 준비\n\n saveFp = asksaveasfile(parent=window, mode='w', defaultextension='.png'\n , filetypes=((\"그림파일\", \"*.gif;*.jpg;*.png;*.tif;*.bmp\"), (\"모든파일\", \"*.*\")))\n\n # 빈 판에 칼라찍기. '#000000~#FFFFFF'\n for i in range(outH):\n for k in range(outW):\n dataR = outImageR[i][k]\n dataG = outImageG[i][k]\n dataB = outImageB[i][k]\n hexStr = '#'\n if dataR > 15:\n hexStr += hex(dataR)[2:]\n else:\n hexStr += ('0' + hex(dataR)[2:])\n if dataG > 15:\n hexStr += hex(dataG)[2:]\n else:\n hexStr += ('0' + hex(dataG)[2:])\n if dataB > 15:\n hexStr += hex(dataB)[2:]\n else:\n hexStr += ('0' + hex(dataB)[2:])\n draw.fill_color = Color(hexStr)\n draw.color(k, i, 'replace')\n\n with Image(filename=filename) as img:\n draw(img)\n img.save(filename=saveFp.name)\n\n print('Save... OK!')\n\ndef saveRawCSV() :\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n output_file = asksaveasfile(parent=window, mode='w',\n defaultextension=\"*.csv\", filetypes=((\"CSV파일\", \"*.csv\"), (\"모든파일\", \"*.*\")))\n output_file = output_file.name\n\n header = ['Column', 'Row', 'Value']\n with open(output_file, 'w', newline='') as filewriter:\n csvWriter = csv.writer(filewriter)\n csvWriter.writerow(header)\n for row in range(outW):\n for col in range(outH):\n data = outImage[row][col]\n row_list = [row, col, data]\n csvWriter.writerow(row_list)\n\ndef saveRawSQLite() :\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n global csvList, input_file\n con = sqlite3.connect('imageDB') # 데이터베이스 지정(또는 연결)\n cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)\n # 열이름 리스트 만들기\n colList = []\n fname = os.path.basename(filename).split(\".\")[0]\n try:\n sql = \"CREATE TABLE imageTable( filename CHAR(20), resolution smallint\" + \\\n \", row smallint, col smallint, value smallint)\"\n cur.execute(sql)\n except:\n pass\n\n for i in range(inW) :\n for k in range(inH) :\n sql = \"INSERT INTO imageTable VALUES('\" + fname + \"',\" + str(inW) + \\\n \",\" + str(i) + \",\" + str(k) + \",\" + str(inImage[i][k]) +\")\"\n cur.execute(sql)\n con.commit()\n cur.close()\n con.close() # 데이터베이스 연결 종료\n print('Ok!')\n\ndef saveRawMySQL() :\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n global csvList, input_file\n con = pymysql.connect(host='192.168.200.131', user='root', password='1234', db='imageDB', charset='utf8') # 데이터베이스 지정(또는 연결)\n cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)\n # 열이름 리스트 만들기\n colList = []\n fname = os.path.basename(filename).split(\".\")[0]\n try:\n sql = \"CREATE TABLE imageTable( filename CHAR(20), resolution smallint\" + \\\n \", row smallint, col smallint, value smallint)\"\n cur.execute(sql)\n except:\n pass\n\n try:\n sql = \"DELETE FROM imageTable WHERE filename='\" + \\\n fname + \"' AND resolution=\" + str(outW)\n cur.execute(sql)\n con.commit()\n except:\n pass\n\n for i in range(inW) :\n for k in range(inH) :\n sql = \"INSERT INTO imageTable VALUES('\" + fname + \"',\" + str(outW) + \\\n \",\" + str(i) + \",\" + str(k) + \",\" + str(outImage[i][k]) +\")\"\n cur.execute(sql)\n con.commit()\n cur.close()\n con.close() # 데이터베이스 연결 종료\n print('Ok! saveMySQL')\n\n\ndef saveNumExcel():\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n output_file = asksaveasfile(parent=window, mode='w',\n defaultextension=\"*.xlsx\", filetypes=((\"EXCEL파일\", \"*.xls\"), (\"모든파일\", \"*.*\")))\n output_file = output_file.name\n\n sheetName = os.path.basename(output_file).split(\".\")[0]\n wb = xlwt.Workbook()\n ws = wb.add_sheet(sheetName)\n\n for rowNum in range(outH):\n for colNum in range(outW):\n data = outImage[rowNum][colNum]\n ws.write(rowNum, colNum, data)\n wb.save(output_file)\n\ndef saveRawExcel():\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n output_file = asksaveasfile(parent=window, mode='w',\n defaultextension=\"*.xlsx\", filetypes=((\"XLSX파일\", \"*.xls\"), (\"모든파일\", \"*.*\")))\n output_file = output_file.name\n\n sheetName = os.path.basename(output_file).split(\".\")[0]\n wb = xlsxwriter.Workbook(output_file)\n ws = wb.add_worksheet(sheetName)\n\n # 워크시트의 열 너비 및 행 높이를 지정\n ws.set_column(0, outW, 1.0) # 약 0.34 열 너비\n for r in range(outH):\n ws.set_row(r, 9.5) # 약 0.35 행 높이\n\n for rowNum in range(outW):\n for colNum in range(outH):\n data = outImage[rowNum][colNum]\n # data 값으로 셀의 배경색을 조절 #000000~#FFFFFF\n if data > 15:\n hexStr = '#' + (hex(data)[2:]) * 3\n else:\n hexStr = '#' + ('0' + hex(data)[2:]) * 3\n\n # 셀의 포맷을 준비\n cell_format = wb.add_format()\n cell_format.set_bg_color(hexStr)\n ws.write(rowNum, colNum, '', cell_format)\n wb.close()\n\n\ndef saveColorNumExcel():\n global window, canvas, paper, filename, inImage, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, inW, inH, outW, outH\n output_file = asksaveasfile(parent=window, mode='w',\n defaultextension=\"*.xlsx\", filetypes=((\"EXCEL파일\", \"*.xls\"), (\"모든파일\", \"*.*\")))\n output_file = output_file.name\n\n sheetName = os.path.basename(output_file).split(\".\")[0]\n wb = xlwt.Workbook()\n ws = wb.add_sheet(sheetName)\n\n for rowNum in range(outH):\n for colNum in range(outW):\n data = outImageR[rowNum][colNum]\n data += outImageG[rowNum][colNum]\n data += outImageB[rowNum][colNum]\n ws.write(rowNum, colNum, data)\n wb.save(output_file)\n\ndef saveColorExcel():\n global window, canvas, paper, filename, inImage, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, inW, inH, outW, outH\n output_file = asksaveasfile(parent=window, mode='w',\n defaultextension=\"*.xlsx\", filetypes=((\"XLSX파일\", \"*.xls\"), (\"모든파일\", \"*.*\")))\n output_file = output_file.name\n\n sheetName = os.path.basename(output_file).split(\".\")[0]\n wb = xlsxwriter.Workbook(output_file)\n ws = wb.add_worksheet(sheetName)\n\n # 워크시트의 열 너비 및 행 높이를 지정\n ws.set_column(0, outW, 1.0) # 약 0.34 열 너비\n for r in range(outH):\n ws.set_row(r, 9.5) # 약 0.35 행 높이\n\n for rowNum in range(outW):\n for colNum in range(outH):\n dataR = outImageR[rowNum][colNum]\n # data 값으로 셀의 배경색을 조절 #000000~#FFFFFF\n if dataR <= 15: # 15 이하일 경우, 1자리 수이기 때문에 0을 추가\n hexStr = '#' + ('0' + hex(dataR)[2:])\n else:\n hexStr = '#' + (hex(dataR)[2:]) # 16진수 변환 후, R(2자리)\n\n dataG = outImageG[rowNum][colNum]\n # data 값으로 셀의 배경색을 조절 #000000~#FFFFFF\n if dataG <= 15:\n hexStr += ('0' + hex(dataG)[2:]) # G(2자리)\n else:\n hexStr += hex(dataG)[2:]\n dataB = outImageB[rowNum][colNum]\n # data 값으로 셀의 배경색을 조절 #000000~#FFFFFF\n if dataB <= 15:\n hexStr += ('0' + hex(dataB)[2:]) # B(2자리)\n else:\n hexStr += hex(dataB)[2:]\n\n # 셀의 포맷을 준비\n cell_format = wb.add_format() # RGB코드는 #을 앞에\n cell_format.set_bg_color((hexStr))\n ws.write(rowNum, colNum, '', cell_format)\n\n wb.close()\n\ndef saveRawAllMySQL(): # 폴더 안의 raw 파일들을 모두 DB로 저장\n global window, canvas, paper, filename, inImage, inW, inH\n con = pymysql.connect(host='192.168.226.131', user='root', password='1234', db='imagedb',\n charset='utf8') # pymySQL 연결\n cur = con.cursor()\n dirName = askdirectory()\n file_list = glob.glob(os.path.join(dirName, \"*.raw\")) # 폴더지정\n for input_file in file_list:\n filereader = open(input_file, 'rb')\n fsize = os.path.getsize(input_file) # raw파일 size\n inH = inW = int(math.sqrt(fsize))\n tableName = os.path.basename(input_file).split(\".\")[0] # tablename을 위한 축략\n colList = [\"row\", \"col\", \"grayscale\"] # table col에 들어갈 리스트\n try:\n sql = \"CREATE table \" + tableName + \"(\"\n for colname in colList:\n sql += colname + \" int(5),\"\n sql = sql[:-1]\n sql += \")\"\n cur.execute(sql) # table query (filename, row, col, grayscale)\n except:\n print(\"error --> \", input_file)\n for i in range(inW):\n for j in range(inH):\n sql = \"INSERT into \" + tableName + \" Values(\"\n sql += str(i) + \", \" + str(j) + \", \" + str(int(ord(filereader.read(1))))\n sql += \")\"\n try:\n cur.execute(sql) # value insert\n except:\n pass\n filereader.close()\n\n def saveRAW(): # DB 안의 RAW(filename / row / col / grayscale) 데이터를 파일로 저장하는 함수\n global window, canvas, paper, filename, inImage, inW, inH\n con = pymysql.connect(host='192.168.226.131', user='root', password='1234', db='imagedb',\n charset='utf8') # pymySQL 연결\n cur = con.cursor()\n sql = \"SHOW TABLES\" # table description이 담긴 정보를 리턴하는 쿼리\n cur.execute(sql)\n dirName = askdirectory() # 저장할 폴더 directory ask\n tableNameList = []\n while True:\n row = cur.fetchone()\n if row == None:\n break\n tableNameList.append(row[0]) # tableNameList -> 모든 Table의 이름을 받아 리스트\n for tableName in tableNameList:\n sql = \"SELECT * FROM \" + tableName\n cur.execute(sql)\n while True:\n row = cur.fetchone()\n if row == None:\n break\n output_file = dirName + \"/\" + tableName + \".raw\" # save file 경로\n saveFp = open(output_file, \"wb\")\n saveFp.write(struct.pack('B', row[2]))\n saveFp.close()\n cur.close()\n con.close()\n\n con.commit()\n cur.close()\n con.close()\n print(\"OK\")\n\n\n#### 종료메뉴\ndef exitFile():\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, inW, inH, outW, outH\n window.quit()\n window.destroy()\n\n\n\n### 화소점처리\n\n#### 동일영상보기\n\ndef equal_raw() : # Gray-scale사본 영상 알고리즘\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n # 중요! 출력메모리의 크기를 결정\n outW = inW\n outH = inH\n outImage = []; tmpList = []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImage.append(tmpList)\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n for i in range(inH) :\n for k in range(inW) :\n outImage[i][k] = inImage[i][k]\n display_raw()\n\ndef equal_Color() : # Color사본 영상 알고리즘\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, inW, inH, outW, outH\n # 중요! 출력메모리의 크기를 결정\n outW = inW\n outH = inH\n outImageR, outImageG, outImageB = [], [], []\n tmpList = []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageR.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageG.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageB.append(tmpList[:])\n\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n for i in range(inH):\n for k in range(inW):\n outImageR[i][k] = inImageR[i][k]\n outImageG[i][k] = inImageG[i][k]\n outImageB[i][k] = inImageB[i][k]\n # print(outImageR[i][k], outImageG[i][k], outImageB[i][k], end='/')\n\n display_Color()\n\n#### 밝기 조정\n\n## Gray scale 값\ndef raw_brightAdd() : # Gray scale 밝기조정(덧셈)\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n # 중요! 출력메모리의 크기를 결정\n outW = inW\n outH = inH\n outImage=[]\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImage.append(tmpList[:])\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n value = askinteger('밝게하기', '밝게할 값-->', minvalue=1, maxvalue=255)\n for i in range(inH) :\n for k in range(inW) :\n if inImage[i][k] + value > 255 :\n outImage[i][k] = 255\n else :\n outImage[i][k] = inImage[i][k] + value\n display_raw()\n\ndef raw_brightSub() : # Gray scale 밝기조정(뺄셈)\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n # 중요! 출력메모리의 크기를 결정\n outW = inW\n outH = inH\n outImage=[]\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImage.append(tmpList[:])\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n value = askinteger('어둡게하기', '어둡게할 값-->', minvalue=1, maxvalue=255)\n for i in range(inH) :\n for k in range(inW) :\n if inImage[i][k] - value < 0 :\n outImage[i][k] = 0\n else :\n outImage[i][k] = inImage[i][k] - value\n display_raw()\n\ndef raw_mulitply() : # Gray scale 밝기조정(곱셈)\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n # 중요! 출력메모리의 크기를 결정\n outW = inW\n outH = inH\n outImage=[]\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImage.append(tmpList[:])\n\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n value = askinteger('더 밝게하기', '숫자(1~10) 입력:', minvalue=1, maxvalue=10)\n for i in range(inH) :\n for k in range(inW) :\n if inImage[i][k] * value > 255 :\n outImage[i][k] = 255\n elif inImage[i][k] * value < 0:\n outImage[i][k] = 0\n else:\n outImage[i][k] = int(inImage[i][k] * value)\n display_raw()\n\ndef raw_division() : # Gray-scale 밝기조정(나눗셈)\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n # 중요! 출력메모리의 크기를 결정\n outW = inW\n outH = inH\n outImage=[]\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImage.append(tmpList[:])\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n value = askinteger('어둡게하기', '숫자(1~10) 입력:', minvalue=1, maxvalue=10)\n for i in range(inH):\n for k in range(inW):\n if outImage[i][k] / value < 0:\n outImage[i][k] = 0\n elif outImage[i][k] / value < 0:\n outImage[i][k] = 0\n else:\n outImage[i][k] = int(outImage[i][k] / value)\n display_raw()\n\n\n\n## Color 값\ndef color_brightAdd() : # Color 밝기조정(덧셈)\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR,outImageG,outImageB, inW, inH, outW, outH\n # 중요! 출력메모리의 크기를 결정\n outW = inW\n outH = inH\n\n outImageR, outImageG, outImageB = [], [], []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageR.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageG.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageB.append(tmpList[:])\n\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n value = askinteger('밝게하기', '밝게할 값-->', minvalue=1, maxvalue=255)\n for i in range(inH) :\n for k in range(inW) :\n if inImageR[i][k] + value > 255 :\n outImageR[i][k] = 255\n else :\n outImageR[i][k] = inImageR[i][k] + value\n if inImageG[i][k] + value > 255 :\n outImageG[i][k] = 255\n else :\n outImageG[i][k] = inImageG[i][k] + value\n if inImageB[i][k] + value > 255 :\n outImageB[i][k] = 255\n else :\n outImageB[i][k] = inImageB[i][k] + value\n\n display_Color()\n\ndef color_brightSub() : # Color 밝기조정(뺄셈)\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR,outImageG,outImageB, inW, inH, outW, outH\n # 중요! 출력메모리의 크기를 결정\n outW = inW\n outH = inH\n outImageR, outImageG, outImageB = [], [], []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageR.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageG.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageB.append(tmpList[:])\n\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n value = askinteger('어둡게하기', '어둡게할 값-->', minvalue=1, maxvalue=255)\n for i in range(inH) :\n for k in range(inW) :\n if inImageR[i][k] - value < 0 :\n outImageR[i][k] = 0\n else :\n outImageR[i][k] = inImageR[i][k] - value\n if inImageG[i][k] - value < 0:\n outImageG[i][k] = 0\n else :\n outImageG[i][k] = inImageG[i][k] - value\n if inImageB[i][k] - value < 0 :\n outImageB[i][k] = 0\n else :\n outImageB[i][k] = inImageB[i][k] - value\n\n display_Color()\n\ndef color_mulitply() : # Color 밝기조정(곱셈)\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR,outImageG,outImageB, inW, inH, outW, outH\n # 중요! 출력메모리의 크기를 결정\n outW = inW\n outH = inH\n outImageR, outImageG, outImageB = [], [], []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageR.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageG.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageB.append(tmpList[:])\n\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n value = askinteger('더 밝게하기', '숫자(1~10) 입력:', minvalue=1, maxvalue=10)\n for i in range(inH) :\n for k in range(inW) :\n if inImageR[i][k] * value > 255 :\n outImageR[i][k] = 255\n elif inImageR[i][k] * value < 0:\n outImageR[i][k] = 0\n else:\n outImageR[i][k] = int(inImageR[i][k] * value)\n if inImageG[i][k] * value > 255:\n outImageG[i][k] = 255\n elif inImageG[i][k] * value < 0:\n outImageG[i][k] = 0\n else:\n outImageG[i][k] = int(inImageG[i][k] * value)\n if inImageB[i][k] * value > 255 :\n outImageB[i][k] = 255\n elif inImageB[i][k] * value < 0:\n outImageB[i][k] = 0\n else:\n outImageB[i][k] = int(inImageB[i][k] * value)\n\n display_Color()\n\ndef color_division() : # Color 밝기조정(나눗셈)\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR,outImageG,outImageB, inW, inH, outW, outH\n # 중요! 출력메모리의 크기를 결정\n outW = inW\n outH = inH\n outImageR, outImageG, outImageB = [], [], []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageR.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageG.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageB.append(tmpList[:])\n\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n value = askinteger('어둡게하기', '숫자(1~10) 입력:', minvalue=1, maxvalue=10)\n for i in range(inH):\n for k in range(inW):\n if inImageR[i][k] / value < 0:\n outImageR[i][k] = 0\n elif inImageR[i][k] / value < 0:\n outImageR[i][k] = 0\n else:\n outImageR[i][k] = int(inImageR[i][k] / value)\n for i in range(inH):\n for k in range(inW):\n if inImageG[i][k] - value < 0:\n outImageG[i][k] = 0\n elif inImageG[i][k] / value < 0:\n outImageG[i][k] = 0\n else:\n outImageG[i][k] = int(inImageG[i][k] / value)\n for i in range(inH):\n for k in range(inW):\n if inImageB[i][k] - value < 0:\n outImageB[i][k] = 0\n elif inImageB[i][k] / value < 0:\n outImageB[i][k] = 0\n else:\n outImageB[i][k] = int(inImageB[i][k] / value)\n\n display_Color()\n\n\n\n#### 화소값처리\n\ndef raw_reverse(): #Gray-scale 화소값 반전\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n # 중요!! 출력메모리 크기 결정\n outW = inW\n outH = inH\n\n for i in range(outH): # 입력 메모리 확보(0으로 초기화)\n tmplist = []\n for k in range(outW):\n tmplist.append(0) # 0으로 초기화\n outImage.append(tmplist)\n #####################################\n ## 이제부터 진짜 영상처리 알고리즘 구현 ##\n ####################################\n for i in range(inH):\n for k in range(inW):\n outImage[i][k] = 255 - inImage[i][k]\n display_raw()\n\ndef raw_cap(): #Gray-scale 파라볼라(Cap)\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n # 중요!! 출력메모리 크기 결정\n outW = inW\n outH = inH\n\n for i in range(outH): # 입력 메모리 확보(0으로 초기화)\n tmplist = []\n for k in range(outW):\n tmplist.append(0) # 0으로 초기화\n outImage.append(tmplist)\n #####################################\n ## 이제부터 진짜 영상처리 알고리즘 구현 ##\n ####################################\n for i in range(inH):\n for k in range(inW):\n new_value = 255 - 255 * pow((getdouble)(inImage[i][k] / 128.0) - 1.0, 2)\n if new_value < 0:\n outImage[i][k] = 0\n elif new_value > 255:\n outImage[i][k] = 255\n else:\n outImage[i][k] = int(new_value)\n display_raw()\n\ndef raw_cup(): #Gray-scale 파라볼라(Cup)\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n # 중요!! 출력메모리 크기 결정\n outW = inW\n outH = inH\n\n for i in range(outH): # 입력 메모리 확보(0으로 초기화)\n tmplist = []\n for k in range(outW):\n tmplist.append(0) # 0으로 초기화\n outImage.append(tmplist)\n #####################################\n ## 이제부터 진짜 영상처리 알고리즘 구현 ##\n ####################################\n for i in range(inH):\n for k in range(inW):\n new_value = 255 * pow((getdouble)(inImage[i][k] / 128.0) - 1.0, 2)\n if new_value < 0:\n outImage[i][k] = 0\n elif new_value > 255:\n outImage[i][k] = 255\n else:\n outImage[i][k] = int(new_value)\n display_raw()\n\ndef raw_gamma(): #Gray-scale 감마값\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n # 중요!! 출력메모리 크기 결정\n outW = inW\n outH = inH\n\n for i in range(outH): # 입력 메모리 확보(0으로 초기화)\n tmplist = []\n for k in range(outW):\n tmplist.append(0) # 0으로 초기화\n outImage.append(tmplist)\n #####################################\n ## 이제부터 진짜 영상처리 알고리즘 구현 ##\n ####################################\n value = askfloat('감마처리', '실수값(0~1)을 입력하세요:', minvalue=0, maxvalue=1)\n for i in range(inH):\n for k in range(inW):\n new_value = pow(inImage[i][k], 1 / value)\n if new_value < 0:\n outImage[i][k] = 0\n elif new_value > 255:\n outImage[i][k] = 255\n else:\n outImage[i][k] = int(new_value)\n display_raw()\n\n\ndef raw_binarAdaptive(): #Gray-scale 적응 이진화\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n # 중요!! 출력메모리 크기 결정\n outW = inW\n outH = inH\n\n for i in range(outH): # 입력 메모리 확보(0으로 초기화)\n tmplist = []\n for k in range(outW):\n tmplist.append(0) # 0으로 초기화\n outImage.append(tmplist)\n #####################################\n ## 이제부터 진짜 영상처리 알고리즘 구현 ##\n ####################################\n value = askfloat('이진화 처리', '숫자(1~255)를 입력하세요.', minvalue=1, maxvalue=255)\n for i in range(inH):\n for k in range(inW):\n if inImage[i][k] >= value:\n outImage[i][k] = 255\n else:\n outImage[i][k] = 0\n display_raw()\n\ndef raw_spotLight(): #Gray-scale 범위 강조 변환\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n # 중요!! 출력메모리 크기 결정\n outW = inW\n outH = inH\n\n for i in range(outH): # 입력 메모리 확보(0으로 초기화)\n tmplist = []\n for k in range(outW):\n tmplist.append(0) # 0으로 초기화\n outImage.append(tmplist)\n #####################################\n ## 이제부터 진짜 영상처리 알고리즘 구현 ##\n ####################################\n\n startPoint = askinteger('강조할 범위 값', '시작 값(0~255):', minvalue=0, maxvalue=255)\n endPoint = askinteger('강조할 범위 값', '마지막 값(0~255):', minvalue=0, maxvalue=255)\n for i in range(inH):\n for k in range(inW):\n if (inImage[i][k] >= startPoint) & (inImage[i][k] <= endPoint):\n outImage[i][k] = 255\n else:\n outImage[i][k] = inImage[i][k]\n display_raw()\n\n\ndef raw_morphing() : # Gray-scale 모핑(합성) 알고리즘\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n # 중요! 출력메모리의 크기를 결정\n outW = inW\n outH = inH\n\n #영상파일 선택\n filename2 = askopenfilename(parent=window,filetypes=((\"RAW파일\", \"*.raw\"), (\"모든파일\", \"*.*\")))\n if filename2 =='' or filename2 ==None:\n return\n inImage2=[]\n fsize2=os.path.getsize(filename2)\n inH2 = inW2 =int(math.sqrt(fsize2))\n if inH2 != inH:\n return\n fp2 = open(filename2, 'rb')\n for i in range(inH2): # 출력메모리 확보(0으로 초기화)\n tmpList=[]\n for k in range(inW2):\n data = int(ord(fp2.read(1)))\n tmpList.append(data)\n inImage2.append(tmpList)\n fp2.close()\n\n outImage = []\n tmpList = []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImage.append(tmpList)\n\n ###########################\n # 진짜 영상처리 알고리즘을 구현#\n ###########################\n value = askinteger('합성비율', '두번째 영상의 가중치(%) 값-->', minvalue=1, maxvalue=99)\n w1 = (1-value/100) #첫번째 영상의 가중치\n w2 = 1-w1 #두번째 영상의 가중치\n for i in range(inH) :\n for k in range(inW) :\n data=int(inImage[i][k]*w1 +inImage2[i][k]*w2)\n if data > 255 :\n data = 255\n elif data <0:\n data =0\n outImage[i][k] = data\n display_raw()\n\n\ndef raw_endIn() : # 엔드-인 검색 알고리즘\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n # 중요! 출력메모리의 크기를 결정\n outW = inW; outH = inH;\n outImage = []; tmpList = []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImage.append(tmpList)\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n minVal, maxVal, HIGH = 255, 0, 255\n for i in range(inH) :\n for k in range(inW) :\n data = inImage[i][k]\n if data > maxVal:\n maxVal = data\n if data < minVal:\n minVal = data\n limit = askinteger('엔드인','상하범위:', minvalue=1, maxvalue=127)\n maxVal -= limit\n minVal += limit\n\n #히스토그램 스트레칭시키기\n #new = (old - minVal) * HIGH / (maxVal - minVal)\n for i in range(inH):\n for k in range(inW):\n value = int((inImage[i][k] - minVal) * HIGH / (maxVal - minVal))\n if value < 0 :\n value = 0\n if value > 255:\n value = 255\n outImage[i][k] = value\n display_raw()\n\n\n### Color값\ndef color_reverse(): #Color 화소값 반전\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, inW, inH, outW, outH\n # 중요!! 출력메모리 크기 결정\n # 중요!! 출력메모리 크기 결정\n outW = inW\n outH = inH\n outImageR, outImageG, outImageB = [], [], [];\n tmpList = []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageR.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageG.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageB.append(tmpList[:])\n #####################################\n ## 이제부터 진짜 영상처리 알고리즘 구현 ##\n ####################################\n for i in range(inH):\n for k in range(inW):\n outImageR[i][k] = 255 - inImageR[i][k]\n for i in range(inH):\n for k in range(inW):\n outImageG[i][k] = 255 - inImageG[i][k]\n for i in range(inH):\n for k in range(inW):\n outImageB[i][k] = 255 - inImageB[i][k]\n display_Color()\n\n\ndef color_cap(): #color 파라볼라(Cap)\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, inW, inH, outW, outH\n # 중요!! 출력메모리 크기 결정\n # 중요!! 출력메모리 크기 결정\n outW = inW\n outH = inH\n outImageR, outImageG, outImageB = [], [], []\n\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageR.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageG.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageB.append(tmpList[:])\n #####################################\n ## 이제부터 진짜 영상처리 알고리즘 구현 ##\n ####################################\n for i in range(inH):\n for k in range(inW):\n new_value = 255 - 255 * pow((getdouble)(inImageR[i][k] / 128.0) - 1.0, 2)\n if new_value < 0:\n outImageR[i][k] = 0\n elif new_value > 255:\n outImageR[i][k] = 255\n else:\n outImageR[i][k] = int(new_value)\n for i in range(inH):\n for k in range(inW):\n new_value = 255 - 255 * pow((getdouble)(inImageG[i][k] / 128.0) - 1.0, 2)\n if new_value < 0:\n outImageG[i][k] = 0\n elif new_value > 255:\n outImageG[i][k] = 255\n else:\n outImageG[i][k] = int(new_value)\n for i in range(inH):\n for k in range(inW):\n new_value = 255 - 255 * pow((getdouble)(inImageB[i][k] / 128.0) - 1.0, 2)\n if new_value < 0:\n outImageB[i][k] = 0\n elif new_value > 255:\n outImageB[i][k] = 255\n else:\n outImageB[i][k] = int(new_value)\n\n display_Color()\n\ndef color_cup(): #color 파라볼라(Cup)\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, inW, inH, outW, outH\n # 중요!! 출력메모리 크기 결정\n # 중요!! 출력메모리 크기 결정\n outW = inW\n outH = inH\n outImageR, outImageG, outImageB = [], [], []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageR.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageG.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageB.append(tmpList[:])\n #####################################\n ## 이제부터 진짜 영상처리 알고리즘 구현 ##\n ####################################\n for i in range(inH):\n for k in range(inW):\n new_value = 255 * pow((getdouble)(inImageR[i][k] / 128.0) - 1.0, 2)\n if new_value < 0:\n outImageR[i][k] = 0\n elif new_value > 255:\n outImageR[i][k] = 255\n else:\n outImageR[i][k] = int(new_value)\n for i in range(inH):\n for k in range(inW):\n new_value = 255 * pow((getdouble)(inImageG[i][k] / 128.0) - 1.0, 2)\n if new_value < 0:\n outImageG[i][k] = 0\n elif new_value > 255:\n outImageG[i][k] = 255\n else:\n outImageG[i][k] = int(new_value)\n for i in range(inH):\n for k in range(inW):\n new_value = 255 * pow((getdouble)(inImageB[i][k] / 128.0) - 1.0, 2)\n if new_value < 0:\n outImageB[i][k] = 0\n elif new_value > 255:\n outImageB[i][k] = 255\n else:\n outImageB[i][k] = int(new_value)\n display_Color()\n\n\ndef color_gamma(): #Color 감마값\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, inW, inH, outW, outH\n # 중요!! 출력메모리 크기 결정\n outW = inW\n outH = inH\n outImageR, outImageG, outImageB = [], [], []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageR.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageG.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageB.append(tmpList[:])\n #####################################\n ## 이제부터 진짜 영상처리 알고리즘 구현 ##\n ####################################\n value = askfloat('감마처리R', '실수값(0~1)을 입력하세요:', minvalue=0, maxvalue=1)\n for i in range(inH):\n for k in range(inW):\n new_valueR = pow(inImageR[i][k], 1 / value)\n if new_valueR < 0:\n outImageR[i][k] = 0\n elif new_valueR > 255:\n outImageR[i][k] = 255\n else:\n outImageR[i][k] = int(new_valueR)\n for i in range(inH):\n for k in range(inW):\n new_valueG = pow(inImageG[i][k], 1 / value)\n if new_valueG < 0:\n outImageG[i][k] = 0\n elif new_valueG > 255:\n outImageG[i][k] = 255\n else:\n outImageG[i][k] = int(new_valueG)\n for i in range(inH):\n for k in range(inW):\n new_valueB = pow(inImageB[i][k], 1 / value)\n if new_valueB < 0:\n outImageB[i][k] = 0\n elif new_valueB > 255:\n outImageB[i][k] = 255\n else:\n outImageB[i][k] = int(new_valueB)\n display_Color()\n\n\ndef color_binarAdaptive(): #Color 적응 이진화\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, inW, inH, outW, outH\n # 중요!! 출력메모리 크기 결정\n outW = inW\n outH = inH\n outImageR, outImageG, outImageB = [], [], []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageR.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageG.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageB.append(tmpList[:])\n #####################################\n ## 이제부터 진짜 영상처리 알고리즘 구현 ##\n ####################################\n value = askfloat('이진화 처리', '숫자(1~255)를 입력하세요.', minvalue=1, maxvalue=255)\n for i in range(inH):\n for k in range(inW):\n if inImageR[i][k] >= value:\n outImageR[i][k] = 255\n else:\n outImageR[i][k] = 0\n for i in range(inH):\n for k in range(inW):\n if inImageG[i][k] >= value:\n outImageG[i][k] = 255\n else:\n outImageG[i][k] = 0\n for i in range(inH):\n for k in range(inW):\n if inImageB[i][k] >= value:\n outImageB[i][k] = 255\n else:\n outImageB[i][k] = 0\n display_Color()\n\ndef color_spotLight(): #Color 범위 강조 변환\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, inW, inH, outW, outH\n # 중요!! 출력메모리 크기 결정\n outW = inW\n outH = inH\n outImageR, outImageG, outImageB = [], [], []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageR.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageG.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageB.append(tmpList[:])\n #####################################\n ## 이제부터 진짜 영상처리 알고리즘 구현 ##\n ####################################\n\n startPoint = askinteger('강조할 범위 값', '시작 값(0~255):', minvalue=0, maxvalue=255)\n endPoint = askinteger('강조할 범위 값', '마지막 값(0~255):', minvalue=0, maxvalue=255)\n startPointR = startPoint\n startPointG = startPoint\n startPointB = startPoint\n endPointR = endPoint\n endPointG = endPoint\n endPointB = endPoint\n\n for i in range(inH):\n for k in range(inW):\n if (inImageR[i][k] >= startPointR) & (inImageR[i][k] <= endPointR):\n outImageR[i][k] = 255\n else:\n outImageR[i][k] = inImageR[i][k]\n for i in range(inH):\n for k in range(inW):\n if (inImageG[i][k] >= startPointG) & (inImageG[i][k] <= endPointG):\n outImageG[i][k] = 255\n else:\n outImageG[i][k] = inImageG[i][k]\n for i in range(inH):\n for k in range(inW):\n if (inImageB[i][k] >= startPointB) & (inImageB[i][k] <= endPointB):\n outImageB[i][k] = 255\n else:\n outImageB[i][k] = inImageB[i][k]\n display_Color()\n\n\ndef color_morphing() : # Color 모핑(합성) 알고리즘\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, inW, inH, outW, outH\n\n #영상파일 선택\n filename2 = askopenfilename(parent=window,filetypes=((\"그림파일\", \"*.gif;*.png;*.jpg;*.tif\"), (\"모든파일\", \"*.*\")))\n photo = PhotoImage(file=filename2)\n inW2 = photo.width()\n inH2 = photo.height()\n if filename2 =='' or filename2 ==None:\n return\n\n # 중요!! 출력메모리 크기 결정\n inImageR2,inImageG2,inImageB2=[],[],[]\n for i in range(inH2): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(inW2):\n tmpList.append(0)\n inImageR2.append(tmpList[:])\n for i in range(inH2): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(inW2):\n tmpList.append(0)\n inImageG2.append(tmpList[:])\n for i in range(inH2): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(inW2):\n tmpList.append(0)\n inImageB2.append(tmpList[:])\n\n ## r,g,b 값 가져오기\n outW, outH = inW, inH\n if inH2 != inH:\n return\n fp2 = open(filename2, 'rb')\n for i in range(inH2):\n for k in range(inW2):\n r, g, b = photo.get(k, i)\n # print(r,g,b,end='/')\n inImageR2[i][k] = r\n inImageG2[i][k] = g\n inImageB2[i][k] = b\n # print(inImageR[i][k], inImageG[i][k], inImageB[i][k], end='/')\n photo = None\n\n ###########################\n # 진짜 영상처리 알고리즘을 구현#\n ###########################\n value = askinteger('합성비율', '두번째 영상의 가중치(%) 값-->', minvalue=1, maxvalue=99)\n wR1 = (1-value/100) #첫번째 영상의 가중치\n wR2 = 1-wR1 #두번째 영상의 가중치\n wG1 = (1-value/100) #첫번째 영상의 가중치\n wG2 = 1-wG1 #두번째 영상의 가중치\n wB1 = (1-value/100) #첫번째 영상의 가중치\n wB2 = 1-wB1 #두번째 영상의 가중치\n for i in range(inH) :\n for k in range(inW) :\n dataR=int(inImageR[i][k]*wR1 +inImageR2[i][k]*wR2)\n if dataR > 255 :\n dataR = 255\n elif dataR <0:\n dataR =0\n outImageR[i][k] = dataR\n for i in range(inH) :\n for k in range(inW) :\n dataG=int(inImageG[i][k]*wG1 +inImageG2[i][k]*wG2)\n if dataG > 255 :\n dataG = 255\n elif dataG <0:\n dataG =0\n outImageG[i][k] = dataG\n for i in range(inH) :\n for k in range(inW) :\n dataB=int(inImageB[i][k]*wB1 +inImageB2[i][k]*wB2)\n if dataB > 255 :\n dataB = 255\n elif dataB <0:\n dataB =0\n outImageB[i][k] = dataB\n fp2.close()\n display_Color()\n\n### 기하학처리\n\n#### Gray-scale\n\ndef raw_zoomIn():# Gray-scale 화면확대\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n # 중요! 출력메모리의 크기를 결정\n scale = askinteger('확대하기', '값(2~32)을 입력하세요:', minvalue=2, maxvalue=32)\n outW = int(inW * scale)\n outH = int(inH * scale)\n outImage = []\n tmpList = []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImage.append(tmpList)\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n for i in range(outH):\n for k in range(outW):\n outImage[i][k] = inImage[int(i / scale)][int(k / scale)]\n display_raw()\n\ndef raw_zoomOut(): # Gray-scale 화면축소\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n # 중요! 출력메모리의 크기를 결정\n scale = askinteger('축소하기', '값(2~32)을 입력하세요:', minvalue=2, maxvalue=32)\n outW = int(inW / scale)\n outH = int(inH / scale)\n outImage = []\n tmpList = []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImage.append(tmpList)\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n for i in range(inH):\n for k in range(inW):\n outImage[int(i / scale)][int(k / scale)] = inImage[i][k]\n display_raw()\n\n\ndef raw_upDown() : # Gray-scale 상하반전\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n # 중요! 출력메모리의 크기를 결정\n outW = inW\n outH = inH\n outImage = []\n tmpList = []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImage.append(tmpList)\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n for i in range(inH) :\n for k in range(inW) :\n outImage[outW-1-i][k] = inImage[i][k]\n display_raw()\n\ndef raw_rightLeft(): #Gray-scale 좌우반전\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n # 중요! 출력메모리의 크기를 결정\n outW = inW\n outH = inH\n outImage = []\n tmpList = []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImage.append(tmpList)\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n for i in range(inH) :\n for k in range(inW) :\n outImage[i][outH-1-k] = inImage[i][k]\n display_raw()\n\n\n\n## 화면이동 서브 기능 함수\n\ndef raw_panImage() :\n global raw_panYN\n raw_panYN = True\n\ndef raw_mouseClick(event) : # Gray-scale 마우스 이벤트 알고리즘\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n global sx, sy, ex, ey, raw_panYN\n if not raw_panYN :\n return\n sx = event.x; sy = event.y\n\ndef raw_mouseDrop(event): # Gray-scale 마우스 이벤트 알고리즘\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n global sx, sy, ex, ey, raw_panYN\n if not raw_panYN:\n return\n ex = event.x; ey = event.y\n my = sx - ex ; mx = sy - ey\n\n # 중요! 출력메모리의 크기를 결정\n outW = inW; outH = inH\n outImage = []; tmpList = []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImage.append(tmpList)\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n for i in range(inH) :\n for k in range(inW) :\n if 0<= i-mx <outH and 0<= k-my < outW :\n outImage[i-mx][k-my] = inImage[i][k]\n raw_panYN = False\n display_raw()\n\n\n## 영상 회전\ndef raw_rotate1(): #영상회전(포워딩)\n global inImage, outImage, inH, inW, outH, outW, window, canvas, paper, filename\n degree = askinteger('각도', '값 입력', minvalue=0, maxvalue=360)\n # 출력 파일의 크기 결정.\n outW = inW;\n outH = inH\n # 출력 영상 메모리 확보\n outImage = []\n for i in range(0, inW):\n tmpList = []\n for k in range(0, inH):\n tmpList.append(0)\n outImage.append(tmpList)\n ### 진짜 영상 처리 알고리즘 ###\n radian = degree * 3.141592 / 180.0\n for i in range(0, inW):\n for k in range(0, inH):\n xs = i;\n ys = k\n xd = int(math.cos(radian) * xs - math.sin(radian) * ys)\n yd = int(math.sin(radian) * xs + math.cos(radian) * ys)\n if 0 <= xd < outW and 0 <= yd < outH:\n outImage[xd][yd] = inImage[xs][ys]\n ###############################\n display_raw()\n\n\ndef raw_rotate2(): #영상회전(백워딩 및 중앙)\n global inImage, outImage, inH, inW, outH, outW, window, canvas, paper, filename\n degree = askinteger('각도', '값 입력', minvalue=0, maxvalue=360)\n # 출력 파일의 크기 결정.\n outW = inW;\n outH = inH\n # 출력 영상 메모리 확보\n outImage = []\n for i in range(0, inW):\n tmpList = []\n for k in range(0, inH):\n tmpList.append(0)\n outImage.append(tmpList)\n ### 진짜 영상 처리 알고리즘 ###\n radian = degree * 3.141592 / 180.0\n cx = int(inW / 2);\n cy = int(inH / 2)\n for i in range(0, outW):\n for k in range(0, outH):\n xs = i;\n ys = k\n xd = int(math.cos(radian) * (xs - cx)\n - math.sin(radian) * (ys - cy)) + cx\n yd = int(math.sin(radian) * (xs - cx)\n + math.cos(radian) * (ys - cy)) + cy\n if 0 <= xd < outW and 0 <= yd < outH:\n outImage[xs][ys] = inImage[xd][yd]\n else:\n outImage[xs][ys] = 255\n ###############################\n display_raw()\n\n\ndef raw_rotate3(): # 영상회전(확대)\n global inImage, outImage, inH, inW, outH, outW, window, canvas, paper, filename\n degree = askinteger('각도', '값 입력', minvalue=0, maxvalue=360)\n # 출력 파일의 크기 결정.\n radian90 = (90 - degree) * 3.141592 / 180.0\n radian = degree * 3.141592 / 180.0\n\n outW = int(inH * math.cos(radian90) + inW * math.cos(radian))\n outH = int(inH * math.cos(radian) + inW * math.cos(radian90))\n\n # outW = inW; outH = inH\n # 출력 영상 메모리 확보\n outImage = []\n for i in range(0, outW):\n tmpList = []\n for k in range(0, outH):\n tmpList.append(0)\n outImage.append(tmpList)\n ### 진짜 영상 처리 알고리즘 ###\n\n # inImage2 크기를 outImage와 동일하게\n inImage2 = []\n for i in range(0, outW):\n tmpList = []\n for k in range(0, outH):\n tmpList.append(255)\n inImage2.append(tmpList)\n\n # inImage --> inImage2의 중앙으로\n gap = int((outW - inW) / 2)\n for i in range(0, inW):\n for k in range(0, inH):\n inImage2[i + gap][k + gap] = inImage[i][k]\n\n ### 진짜 영상 처리 알고리즘 ###\n cx = int(outW / 2);\n cy = int(outH / 2)\n\n for i in range(0, outW):\n for k in range(0, outH):\n xs = i;\n ys = k\n xd = int(math.cos(radian) * (xs - cx)\n - math.sin(radian) * (ys - cy)) + cx\n yd = int(math.sin(radian) * (xs - cx)\n + math.cos(radian) * (ys - cy)) + cy\n # if 0 <= xd < outW and 0 <= yd < outH :\n if 0 <= xd < outW and 0 <= yd < outH:\n outImage[xs][ys] = inImage2[xd][yd]\n else:\n outImage[xs][ys] = 255\n ###############################\n display_raw()\n\n\n\n#### Color값\n\ndef color_zoomIn():# Color 화면확대\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, inW, inH, outW, outH\n # 중요! 출력메모리의 크기를 결정\n scale = askinteger('축소하기', '값(2~32)을 입력하세요:', minvalue=2, maxvalue=32)\n outW = int(inW * scale)\n outH = int(inH * scale)\n outImageR, outImageG, outImageB = [], [], []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageR.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageG.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageB.append(tmpList[:])\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n for i in range(outH):\n for k in range(outW):\n outImageR[i][k] = inImageR[int(i / scale)][int(k / scale)]\n for i in range(outH):\n for k in range(outW):\n outImageG[i][k] = inImageG[int(i / scale)][int(k / scale)]\n for i in range(outH):\n for k in range(outW):\n outImageB[i][k] = inImageB[int(i / scale)][int(k / scale)]\n display_Color()\n\ndef color_zoomOut(): # Color 화면축소\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, inW, inH, outW, outH\n # 중요! 출력메모리의 크기를 결정\n scale = askinteger('축소하기', '값(2~32)을 입력하세요:', minvalue=2, maxvalue=32)\n outW = int(inW / scale)\n outH = int(inH / scale)\n outImageR, outImageG, outImageB = [], [], []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageR.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageG.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageB.append(tmpList[:])\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n for i in range(inH):\n for k in range(inW):\n outImageR[int(i / scale)][int(k / scale)] = inImageR[i][k]\n for i in range(inH):\n for k in range(inW):\n outImageG[int(i / scale)][int(k / scale)] = inImageG[i][k]\n for i in range(inH):\n for k in range(inW):\n outImageB[int(i / scale)][int(k / scale)] = inImageB[i][k]\n display_Color()\n\ndef color_upDown() : # Color 상하반전\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, inW, inH, outW, outH\n # 중요!! 출력메모리 크기 결정\n outW = inW\n outH = inH\n outImageR, outImageG, outImageB = [], [], []\n tmpList = []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageR.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageG.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageB.append(tmpList[:])\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n for i in range(inH) :\n for k in range(inW) :\n outImage[outW-1-i][k] = inImage[i][k]\n display_Color()\n\ndef color_rightLeft(): # Color 좌우반전\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, inW, inH, outW, outH\n # 중요!! 출력메모리 크기 결정\n outW = inW\n outH = inH\n outImageR, outImageG, outImageB = [], [], []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageR.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageG.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageB.append(tmpList[:])\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n for i in range(inH) :\n for k in range(inW) :\n outImage[i][outH-1-k] = inImage[i][k]\n display_Color()\n\n\n\ndef color_upDown() : # Color 상하반전\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, inW, inH, outW, outH\n # 중요!! 출력메모리 크기 결정\n outW = inW\n outH = inH\n outImageR, outImageG, outImageB = [], [], []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageR.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageG.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageB.append(tmpList[:])\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n for i in range(inH) :\n for k in range(inW) :\n outImageR[outW-1-i][k] = inImageR[i][k]\n for i in range(inH) :\n for k in range(inW) :\n outImageG[outW-1-i][k] = inImageG[i][k]\n for i in range(inH) :\n for k in range(inW) :\n outImageB[outW-1-i][k] = inImageB[i][k]\n display_Color()\n\ndef color_rightLeft(): # Color 좌우반전\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, inW, inH, outW, outH\n # 중요!! 출력메모리 크기 결정\n outW = inW\n outH = inH\n outImageR, outImageG, outImageB = [], [], []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageR.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageG.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageB.append(tmpList[:])\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n for i in range(inH) :\n for k in range(inW) :\n outImageR[i][outH-1-k] = inImageR[i][k]\n for i in range(inH) :\n for k in range(inW) :\n outImageG[i][outH-1-k] = inImageG[i][k]\n for i in range(inH) :\n for k in range(inW) :\n outImageB[i][outH-1-k] = inImageB[i][k]\n display_Color()\n\n\n## 화면이동 서브 기능 함수\n\ndef color_panImage() :\n global color_panYN\n color_panYN = True\n\ndef color_mouseClick(event) : # color 마우스 이벤트 알고리즘\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, inW, inH, outW, outH\n global sx, sy, ex, ey, color_panYN\n if not color_panYN :\n return\n sx = event.x; sy = event.y\n\ndef color_mouseDrop(event): # color 마우스 이벤트 알고리즘\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, inW, inH, outW, outH\n global sx, sy, ex, ey, color_panYN\n if not color_panYN:\n return\n ex = event.x; ey = event.y\n my = sx - ex ; mx = sy - ey\n\n # 중요!! 출력메모리 크기 결정\n outW = inW\n outH = inH\n outImageR, outImageG, outImageB = [], [], []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageR.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageG.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageB.append(tmpList[:])\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n for i in range(inH) :\n for k in range(inW) :\n if 0<= i-mx <outH and 0<= k-my < outW :\n outImageR[i-mx][k-my] = inImageR[i][k]\n\n for i in range(inH) :\n for k in range(inW) :\n if 0<= i-mx <outH and 0<= k-my < outW :\n outImageG[i-mx][k-my] = inImageG[i][k]\n\n for i in range(inH) :\n for k in range(inW) :\n if 0<= i-mx <outH and 0<= k-my < outW :\n outImageB[i-mx][k-my] = inImageB[i][k]\n color_panYN = False\n display_Color()\n\n## 영상 회전\ndef color_rotate1(): #영상회전(포워딩)\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, inW, inH, outW, outH\n degree = askinteger('각도', '값 입력', minvalue=0, maxvalue=360)\n # 출력 파일의 크기 결정.\n outW = inW\n outH = inH\n # 출력 영상 메모리 확보\n outImageR, outImageG, outImageB = [], [], []\n\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageR.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageG.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageB.append(tmpList[:])\n ### 진짜 영상 처리 알고리즘 ###\n radian = degree * 3.141592 / 180.0\n for i in range(0, inW):\n for k in range(0, inH):\n xs = i\n ys = k\n xd = int(math.cos(radian) * xs - math.sin(radian) * ys)\n yd = int(math.sin(radian) * xs + math.cos(radian) * ys)\n if 0 <= xd < outW and 0 <= yd < outH:\n outImageR[xd][yd] = inImageR[xs][ys]\n for i in range(0, inW):\n for k in range(0, inH):\n xs = i\n ys = k\n xd = int(math.cos(radian) * xs - math.sin(radian) * ys)\n yd = int(math.sin(radian) * xs + math.cos(radian) * ys)\n if 0 <= xd < outW and 0 <= yd < outH:\n outImageG[xd][yd] = inImageG[xs][ys]\n for i in range(0, inW):\n for k in range(0, inH):\n xs = i\n ys = k\n xd = int(math.cos(radian) * xs - math.sin(radian) * ys)\n yd = int(math.sin(radian) * xs + math.cos(radian) * ys)\n if 0 <= xd < outW and 0 <= yd < outH:\n outImageB[xd][yd] = inImageB[xs][ys]\n ###############################\n display_Color()\n\n\ndef color_rotate2(): #영상회전(백워딩 및 중앙)\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, inW, inH, outW, outH\n degree = askinteger('각도', '값 입력', minvalue=0, maxvalue=360)\n # 출력 파일의 크기 결정.\n outW = inW\n outH = inH\n # 출력 영상 메모리 확보\n outImageR, outImageG, outImageB = [], [], []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageR.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageG.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageB.append(tmpList[:])\n ### 진짜 영상 처리 알고리즘 ###\n radian = degree * 3.141592 / 180.0\n cx = int(inW / 2);\n cy = int(inH / 2)\n for i in range(0, outW):\n for k in range(0, outH):\n xs = i\n ys = k\n xd = int(math.cos(radian) * (xs - cx)\n - math.sin(radian) * (ys - cy)) + cx\n yd = int(math.sin(radian) * (xs - cx)\n + math.cos(radian) * (ys - cy)) + cy\n if 0 <= xd < outW and 0 <= yd < outH:\n outImageR[xs][ys] = inImageR[xd][yd]\n else:\n outImageR[xs][ys] = 255\n for i in range(0, outW):\n for k in range(0, outH):\n xs = i\n ys = k\n xd = int(math.cos(radian) * (xs - cx)\n - math.sin(radian) * (ys - cy)) + cx\n yd = int(math.sin(radian) * (xs - cx)\n + math.cos(radian) * (ys - cy)) + cy\n if 0 <= xd < outW and 0 <= yd < outH:\n outImageG[xs][ys] = inImageG[xd][yd]\n else:\n outImageG[xs][ys] = 255\n for i in range(0, outW):\n for k in range(0, outH):\n xs = i\n ys = k\n xd = int(math.cos(radian) * (xs - cx)\n - math.sin(radian) * (ys - cy)) + cx\n yd = int(math.sin(radian) * (xs - cx)\n + math.cos(radian) * (ys - cy)) + cy\n if 0 <= xd < outW and 0 <= yd < outH:\n outImageB[xs][ys] = inImageB[xd][yd]\n else:\n outImageB[xs][ys] = 255\n ###############################\n display_Color()\n\n\ndef color_rotate3(): # 영상회전(확대)\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, inW, inH, outW, outH\n degree = askinteger('각도', '값 입력', minvalue=0, maxvalue=360)\n # 출력 파일의 크기 결정.\n radian90 = (90 - degree) * 3.141592 / 180.0\n radian = degree * 3.141592 / 180.0\n\n outW = int(inH * math.cos(radian90) + inW * math.cos(radian))\n outH = int(inH * math.cos(radian) + inW * math.cos(radian90))\n\n # outW = inW; outH = inH\n # 출력 영상 메모리 확보\n outImageR = []\n outImageG = []\n outImageB = []\n for i in range(0, outW):\n tmpList = []\n for k in range(0, outH):\n tmpList.append(0)\n outImageR.append(tmpList)\n for i in range(0, outW):\n tmpList = []\n for k in range(0, outH):\n tmpList.append(0)\n outImageG.append(tmpList)\n for i in range(0, outW):\n tmpList = []\n for k in range(0, outH):\n tmpList.append(0)\n outImageB.append(tmpList)\n ### 진짜 영상 처리 알고리즘 ###\n\n # inImage2 크기를 outImage와 동일하게\n inImageR2 = []\n inImageG2 = []\n inImageB2 = []\n for i in range(0, outW):\n tmpList = []\n for k in range(0, outH):\n tmpList.append(255)\n inImageR2.append(tmpList)\n for i in range(0, outW):\n tmpList = []\n for k in range(0, outH):\n tmpList.append(255)\n inImageG2.append(tmpList)\n for i in range(0, outW):\n tmpList = []\n for k in range(0, outH):\n tmpList.append(255)\n inImageB2.append(tmpList)\n\n # inImage --> inImage2의 중앙으로\n gap = int((outW - inW) / 2)\n for i in range(0, inW):\n for k in range(0, inH):\n inImageR2[i + gap][k + gap] = inImageR[i][k]\n for i in range(0, inW):\n for k in range(0, inH):\n inImageG2[i + gap][k + gap] = inImageG[i][k]\n for i in range(0, inW):\n for k in range(0, inH):\n inImageB2[i + gap][k + gap] = inImageB[i][k]\n\n ### 진짜 영상 처리 알고리즘 ###\n cx = int(outW / 2)\n cy = int(outH / 2)\n\n for i in range(0, outW):\n for k in range(0, outH):\n xs = i\n ys = k\n xd = int(math.cos(radian) * (xs - cx)\n - math.sin(radian) * (ys - cy)) + cx\n yd = int(math.sin(radian) * (xs - cx)\n + math.cos(radian) * (ys - cy)) + cy\n # if 0 <= xd < outW and 0 <= yd < outH :\n if 0 <= xd < outW and 0 <= yd < outH:\n outImageR[xs][ys] = inImageR2[xd][yd]\n else:\n outImageR[xs][ys] = 255\n for i in range(0, outW):\n for k in range(0, outH):\n xs = i\n ys = k\n xd = int(math.cos(radian) * (xs - cx)\n - math.sin(radian) * (ys - cy)) + cx\n yd = int(math.sin(radian) * (xs - cx)\n + math.cos(radian) * (ys - cy)) + cy\n # if 0 <= xd < outW and 0 <= yd < outH :\n if 0 <= xd < outW and 0 <= yd < outH:\n outImageG[xs][ys] = inImageG2[xd][yd]\n else:\n outImageG[xs][ys] = 255\n for i in range(0, outW):\n for k in range(0, outH):\n xs = i\n ys = k\n xd = int(math.cos(radian) * (xs - cx)\n - math.sin(radian) * (ys - cy)) + cx\n yd = int(math.sin(radian) * (xs - cx)\n + math.cos(radian) * (ys - cy)) + cy\n # if 0 <= xd < outW and 0 <= yd < outH :\n if 0 <= xd < outW and 0 <= yd < outH:\n outImageB[xs][ys] = inImageB2[xd][yd]\n else:\n outImageB[xs][ys] = 255\n ###############################\n display_Color()\n\n\n\n\n### 화소영역처리\n\n#### Gray scale\n\ndef raw_embossing() : # Gray scale 마스크 활용 엠보싱 알고리즘\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n # 중요! 출력메모리의 크기를 결정\n outW = inW\n outH = inH\n outImage = []\n tmpList = []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImage.append(tmpList)\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n MSIZE=3 #마스크 사이즈\n mask = [[-1,0,0],[0,0,0],[0,0,1]]\n #임시 입력 영상 = inImage보다 2개열이 큼\n tmpInImage =[]\n for i in range(inH +2):\n tmpList=[]\n for k in range(inW +2):\n tmpList.append(128)\n tmpInImage.append(tmpList)\n tmpOutImage = []\n for i in range(outH):\n tmpList=[]\n for k in range(outW):\n tmpList.append(0)\n tmpOutImage.append(tmpList)\n print(tmpOutImage)\n print(tmpInImage)\n #원래 입력--> 임시 입력\n for i in range(inH):\n for k in range(inW):\n tmpInImage[i+1][k+1] = inImage[i][k]\n\n # 회선연산하기. 마스크로 쭉 긁으면서 계산하기\n for i in range(1, inH):\n for k in range(1, inW):\n #1개 점을 처리하되 3x3 반복해서 처리: 마스크 연산은 모두 곱해서 더함.\n s = 0.0\n for m in range(0,MSIZE):\n for n in range(0,MSIZE):\n s += mask[m][n] *tmpInImage[i+(m-1)][k+(n-1)]\n tmpOutImage[i-1][k-1] = s\n\n #127 더해주기(마스크의 합계가 0인 경우)\n for i in range(outW):\n for k in range(outH):\n tmpOutImage[i][k] +=127\n\n #임시출력 -> 원래 출력\n for i in range(outW):\n for k in range(outH):\n value = int(tmpOutImage[i][k])\n if value > 255:\n value = 255\n if value < 0 :\n value = 0\n outImage[i][k] = value\n display_raw()\n\n\ndef raw_bluring() : # Gray scale 마스크 활용 블러링 알고리즘\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n # 중요! 출력메모리의 크기를 결정\n outW = inW\n outH = inH\n outImage = []\n tmpList = []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImage.append(tmpList)\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n MSIZE=3 #마스크 사이즈\n mask = [[1./9,1./9,1./9],[1./9,1./9,1./9],[1./9,1./9,1./9]]\n #임시 입력 영상 = inImage보다 2개열이 큼\n tmpInImage =[]\n for i in range(inH +2):\n tmpList=[]\n for k in range(inW +2):\n tmpList.append(128)\n tmpInImage.append(tmpList)\n tmpOutImage = []\n for i in range(outH):\n tmpList=[]\n for k in range(outW):\n tmpList.append(0)\n tmpOutImage.append(tmpList)\n print(tmpOutImage)\n print(tmpInImage)\n #원래 입력--> 임시 입력\n for i in range(inH):\n for k in range(inW):\n tmpInImage[i+1][k+1] = inImage[i][k]\n\n # 회선연산하기. 마스크로 쭉 긁으면서 계산하기\n for i in range(1, inH):\n for k in range(1, inW):\n #1개 점을 처리하되 3x3 반복해서 처리: 마스크 연산은 모두 곱해서 더함.\n s = 0.0\n for m in range(0,MSIZE):\n for n in range(0,MSIZE):\n s += mask[m][n] *tmpInImage[i+(m-1)][k+(n-1)]\n tmpOutImage[i-1][k-1] = s\n\n #임시출력 -> 원래 출력\n for i in range(outW):\n for k in range(outH):\n value = int(tmpOutImage[i][k])\n if value > 255:\n value = 255\n if value < 0 :\n value = 0\n outImage[i][k] = value\n display_raw()\n\n\ndef raw_GaussianFilter() : # Gray scale 마스크 활용 가우시안필터 알고리즘\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n # 중요! 출력메모리의 크기를 결정\n outW = inW\n outH = inH\n outImage = []\n tmpList = []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImage.append(tmpList)\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n MSIZE=3 #마스크 사이즈\n mask = [[1./16., 1./8., 1./16.],[1./8., 1./4., 1./8.],[1./16., 1./8., 1./16.]]\n #임시 입력 영상 = inImage보다 2개열이 큼\n tmpInImage =[]\n for i in range(inH +2):\n tmpList=[]\n for k in range(inW +2):\n tmpList.append(128)\n tmpInImage.append(tmpList)\n tmpOutImage = []\n for i in range(outH):\n tmpList=[]\n for k in range(outW):\n tmpList.append(0)\n tmpOutImage.append(tmpList)\n print(tmpOutImage)\n print(tmpInImage)\n #원래 입력--> 임시 입력\n for i in range(inH):\n for k in range(inW):\n tmpInImage[i+1][k+1] = inImage[i][k]\n\n # 회선연산하기. 마스크로 쭉 긁으면서 계산하기\n for i in range(1, inH):\n for k in range(1, inW):\n #1개 점을 처리하되 3x3 반복해서 처리: 마스크 연산은 모두 곱해서 더함.\n s = 0.0\n for m in range(0,MSIZE):\n for n in range(0,MSIZE):\n s += mask[m][n] *tmpInImage[i+(m-1)][k+(n-1)]\n tmpOutImage[i-1][k-1] = s\n\n #임시출력 -> 원래 출력\n for i in range(outW):\n for k in range(outH):\n value = int(tmpOutImage[i][k])\n if value > 255:\n value = 255\n if value < 0 :\n value = 0\n outImage[i][k] = value\n display_raw()\n\ndef raw_Sharpening() : # Gray scale 마스크 활용 샤프닝 알고리즘\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n # 중요! 출력메모리의 크기를 결정\n outW = inW\n outH = inH\n outImage = []\n tmpList = []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImage.append(tmpList)\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n MSIZE=3 #마스크 사이즈\n mask = [[0., -1., 0.],[-1., 5., -1.],[0., -1., 0.]]\n #임시 입력 영상 = inImage보다 2개열이 큼\n tmpInImage =[]\n for i in range(inH +2):\n tmpList=[]\n for k in range(inW +2):\n tmpList.append(128)\n tmpInImage.append(tmpList)\n tmpOutImage = []\n for i in range(outH):\n tmpList=[]\n for k in range(outW):\n tmpList.append(0)\n tmpOutImage.append(tmpList)\n print(tmpOutImage)\n print(tmpInImage)\n #원래 입력--> 임시 입력\n for i in range(inH):\n for k in range(inW):\n tmpInImage[i+1][k+1] = inImage[i][k]\n\n # 회선연산하기. 마스크로 쭉 긁으면서 계산하기\n for i in range(1, inH):\n for k in range(1, inW):\n #1개 점을 처리하되 3x3 반복해서 처리: 마스크 연산은 모두 곱해서 더함.\n s = 0.0\n for m in range(0,MSIZE):\n for n in range(0,MSIZE):\n s += mask[m][n] *tmpInImage[i+(m-1)][k+(n-1)]\n tmpOutImage[i-1][k-1] = s\n\n #임시출력 -> 원래 출력\n for i in range(outW):\n for k in range(outH):\n value = int(tmpOutImage[i][k])\n if value > 255:\n value = 255\n if value < 0 :\n value = 0\n outImage[i][k] = value\n display_raw()\n\ndef raw_HpfSharpening() : # Gray scale 마스크 활용 고주파 샤프닝 알고리즘\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n # 중요! 출력메모리의 크기를 결정\n outW = inW\n outH = inH\n outImage = []\n tmpList = []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImage.append(tmpList)\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n MSIZE=3 #마스크 사이즈\n mask = [[-1./9., -1./9., -1.],[-1./9., 8./9., -1./9.],[-1./9., -1./9., -1./9.]]\n #임시 입력 영상 = inImage보다 2개열이 큼\n tmpInImage =[]\n for i in range(inH +2):\n tmpList=[]\n for k in range(inW +2):\n tmpList.append(128)\n tmpInImage.append(tmpList)\n tmpOutImage = []\n for i in range(outH):\n tmpList=[]\n for k in range(outW):\n tmpList.append(0)\n tmpOutImage.append(tmpList)\n print(tmpOutImage)\n print(tmpInImage)\n #원래 입력--> 임시 입력\n for i in range(inH):\n for k in range(inW):\n tmpInImage[i+1][k+1] = inImage[i][k]\n\n # 회선연산하기. 마스크로 쭉 긁으면서 계산하기\n for i in range(1, inH):\n for k in range(1, inW):\n #1개 점을 처리하되 3x3 반복해서 처리: 마스크 연산은 모두 곱해서 더함.\n s = 0.0\n for m in range(0,MSIZE):\n for n in range(0,MSIZE):\n s += mask[m][n] *tmpInImage[i+(m-1)][k+(n-1)]\n tmpOutImage[i-1][k-1] = s\n\n #임시출력 -> 원래 출력\n for i in range(outW):\n for k in range(outH):\n value = int(tmpOutImage[i][k])\n if value > 255:\n value = 255\n if value < 0 :\n value = 0\n outImage[i][k] = value\n display_raw()\n\n\ndef raw_LpfSharpening() : # Gray scale 마스크 활용 저주파 샤프닝 알고리즘\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n # 중요! 출력메모리의 크기를 결정\n outW = inW\n outH = inH\n outImage = []\n tmpList = []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImage.append(tmpList)\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n MSIZE=3 #마스크 사이즈\n mask = [[1./9., 1./9., 1./9.],[1./9., 1./9., 1./9.],[1./9., 1./9., 1./9.]]\n #임시 입력 영상 = inImage보다 2개열이 큼\n tmpInImage =[]\n for i in range(inH +2):\n tmpList=[]\n for k in range(inW +2):\n tmpList.append(128)\n tmpInImage.append(tmpList)\n tmpOutImage = []\n for i in range(outH):\n tmpList=[]\n for k in range(outW):\n tmpList.append(0)\n tmpOutImage.append(tmpList)\n print(tmpOutImage)\n print(tmpInImage)\n #원래 입력--> 임시 입력\n for i in range(inH):\n for k in range(inW):\n tmpInImage[i+1][k+1] = inImage[i][k]\n\n # 회선연산하기. 마스크로 쭉 긁으면서 계산하기\n for i in range(1, inH):\n for k in range(1, inW):\n #1개 점을 처리하되 3x3 반복해서 처리: 마스크 연산은 모두 곱해서 더함.\n s = 0.0\n for m in range(0,MSIZE):\n for n in range(0,MSIZE):\n s += mask[m][n] *tmpInImage[i+(m-1)][k+(n-1)]\n tmpOutImage[i-1][k-1] = s\n\n #임시출력 -> 원래 출력\n for i in range(outW):\n for k in range(outH):\n value = int(tmpOutImage[i][k])\n if value > 255:\n value = 255\n if value < 0 :\n value = 0\n outImage[i][k] = value\n display_raw()\n\n\ndef raw_HomogenOperator() : # Gray scale 유사 연산자 에지 검출 알고리즘\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n # 중요!! 출력메모리 크기 결정\n outW = inW\n outH = inH\n print(inW, inH)\n outImage= []\n tmpList = []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImage.append(tmpList[:])\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n\n tmpInImage =[]\n for i in range(inH +2):\n tmpList=[]\n for k in range(inW +2):\n tmpList.append(128)\n tmpInImage.append(tmpList)\n\n tmpOutImage = []\n for i in range(outH):\n tmpList=[]\n for k in range(outW):\n tmpList.append(0)\n tmpOutImage.append(tmpList)\n\n #원래 입력--> 임시 입력\n for i in range(inH):\n for k in range(inW):\n tmpInImage[i+1][k+1] = inImage[i][k]\n\n # 회선연산하기.\n MSIZE =3\n for i in range(1, inH):\n for k in range(1, inW):\n max = 0.0 #블록이 이동할 때마다 최대값 초기화\n for m in range(0,MSIZE):\n for n in range(0,MSIZE):\n if (tmpInImage[i+1][k+1] - tmpInImage[i+n][k+m]) >= max: #블록의 가운데 값 - 블록의 주변 픽셀 값의 절대 값 중 최대값 찾기\n max = abs(tmpInImage[i+1][k+1] - tmpInImage[i+(m-1)][k+(n-1)])\n tmpOutImage[i-1][k-1] = max\n\n #임시출력 -> 원래 출력\n for i in range(outW):\n for k in range(outH):\n value = int(tmpOutImage[i][k])\n if value > 255:\n value = 255\n if value < 0 :\n value = 0\n outImage[i][k] = value\n display_Color()\n\n\n\n\n\n#### Color\n\ndef color_embossing() : # Color 마스크 활용 엠보싱 알고리즘\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, inW, inH, outW, outH\n # 중요!! 출력메모리 크기 결정\n outW = inW\n outH = inH\n outImageR, outImageG, outImageB = [], [], []\n tmpList = []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageR.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageG.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageB.append(tmpList[:])\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n MSIZE=3 #마스크 사이즈\n maskR = [[-1,0,0],[0,0,0],[0,0,1]]\n maskG = [[-1,0,0],[0,0,0],[0,0,1]]\n maskB = [[-1,0,0],[0,0,0],[0,0,1]]\n #임시 입력 영상 = inImage보다 2개열이 큼\n tmpInImageR, tmpInImageG, tmpInImageB =[],[],[]\n\n for i in range(inH +2):\n tmpList=[]\n for k in range(inW +2):\n tmpList.append(128)\n tmpInImageR.append(tmpList)\n for i in range(inH +2):\n tmpList=[]\n for k in range(inW +2):\n tmpList.append(128)\n tmpInImageG.append(tmpList)\n for i in range(inH +2):\n tmpList=[]\n for k in range(inW +2):\n tmpList.append(128)\n tmpInImageB.append(tmpList)\n\n tmpOutImageR, tmpOutImageG, tmpOutImageB = [], [], []\n for i in range(outH):\n tmpList=[]\n for k in range(outW):\n tmpList.append(0)\n tmpOutImageR.append(tmpList)\n for i in range(outH):\n tmpList=[]\n for k in range(outW):\n tmpList.append(0)\n tmpOutImageG.append(tmpList)\n for i in range(outH):\n tmpList=[]\n for k in range(outW):\n tmpList.append(0)\n tmpOutImageB.append(tmpList)\n\n #원래 입력--> 임시 입력\n for i in range(inH):\n for k in range(inW):\n tmpInImageR[i+1][k+1] = inImageR[i][k]\n for i in range(inH):\n for k in range(inW):\n tmpInImageG[i+1][k+1] = inImageG[i][k]\n for i in range(inH):\n for k in range(inW):\n tmpInImageB[i+1][k+1] = inImageB[i][k]\n\n # 회선연산하기. 마스크로 쭉 긁으면서 계산하기\n for i in range(1, inH):\n for k in range(1, inW):\n #1개 점을 처리하되 3x3 반복해서 처리: 마스크 연산은 모두 곱해서 더함.\n s = 0.0\n for m in range(0,MSIZE):\n for n in range(0,MSIZE):\n s += maskR[m][n] *tmpInImageR[i+(m-1)][k+(n-1)]\n tmpOutImageR[i-1][k-1] = s\n for i in range(1, inH):\n for k in range(1, inW):\n #1개 점을 처리하되 3x3 반복해서 처리: 마스크 연산은 모두 곱해서 더함.\n s = 0.0\n for m in range(0,MSIZE):\n for n in range(0,MSIZE):\n s += maskG[m][n] *tmpInImageG[i+(m-1)][k+(n-1)]\n tmpOutImageG[i-1][k-1] = s\n for i in range(1, inH):\n for k in range(1, inW):\n #1개 점을 처리하되 3x3 반복해서 처리: 마스크 연산은 모두 곱해서 더함.\n s = 0.0\n for m in range(0,MSIZE):\n for n in range(0,MSIZE):\n s += maskB[m][n] *tmpInImageB[i+(m-1)][k+(n-1)]\n tmpOutImageB[i-1][k-1] = s\n\n #127 더해주기(마스크의 합계가 0인 경우)\n for i in range(outW):\n for k in range(outH):\n tmpOutImageR[i][k] +=127\n for i in range(outW):\n for k in range(outH):\n tmpOutImageG[i][k] +=127\n for i in range(outW):\n for k in range(outH):\n tmpOutImageB[i][k] +=127\n\n #임시출력 -> 원래 출력\n for i in range(outW):\n for k in range(outH):\n value = int(tmpOutImageR[i][k])\n if value > 255:\n value = 255\n if value < 0 :\n value = 0\n outImageR[i][k] = value\n for i in range(outW):\n for k in range(outH):\n value = int(tmpOutImageG[i][k])\n if value > 255:\n value = 255\n if value < 0 :\n value = 0\n outImageG[i][k] = value\n for i in range(outW):\n for k in range(outH):\n value = int(tmpOutImageB[i][k])\n if value > 255:\n value = 255\n if value < 0 :\n value = 0\n outImageB[i][k] = value\n display_Color()\n\n\ndef color_bluring() : # Color 마스크 활용 블러링 알고리즘\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, inW, inH, outW, outH\n # 중요!! 출력메모리 크기 결정\n outW = inW\n outH = inH\n outImageR, outImageG, outImageB = [], [], []\n tmpList = []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageR.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageG.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageB.append(tmpList[:])\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n MSIZE=3 #마스크 사이즈\n maskR = [[1./9., 1./9.,1./9.],[1./9.,1./9.,1./9.],[1./9.,1./9.,1./9.]]\n maskG = [[1./9., 1./9.,1./9.],[1./9.,1./9.,1./9.],[1./9.,1./9.,1./9.]]\n maskB = [[1./9., 1./9.,1./9.],[1./9.,1./9.,1./9.],[1./9.,1./9.,1./9.]]\n #임시 입력 영상 = inImage보다 2개열이 큼\n tmpInImageR, tmpInImageG, tmpInImageB =[],[],[]\n\n for i in range(inH +2):\n tmpList=[]\n for k in range(inW +2):\n tmpList.append(128)\n tmpInImageR.append(tmpList)\n for i in range(inH +2):\n tmpList=[]\n for k in range(inW +2):\n tmpList.append(128)\n tmpInImageG.append(tmpList)\n for i in range(inH +2):\n tmpList=[]\n for k in range(inW +2):\n tmpList.append(128)\n tmpInImageB.append(tmpList)\n\n tmpOutImageR, tmpOutImageG, tmpOutImageB = [], [], []\n for i in range(outH):\n tmpList=[]\n for k in range(outW):\n tmpList.append(0)\n tmpOutImageR.append(tmpList)\n for i in range(outH):\n tmpList=[]\n for k in range(outW):\n tmpList.append(0)\n tmpOutImageG.append(tmpList)\n for i in range(outH):\n tmpList=[]\n for k in range(outW):\n tmpList.append(0)\n tmpOutImageB.append(tmpList)\n\n #원래 입력--> 임시 입력\n for i in range(inH):\n for k in range(inW):\n tmpInImageR[i+1][k+1] = inImageR[i][k]\n for i in range(inH):\n for k in range(inW):\n tmpInImageG[i+1][k+1] = inImageG[i][k]\n for i in range(inH):\n for k in range(inW):\n tmpInImageB[i+1][k+1] = inImageB[i][k]\n\n # 회선연산하기. 마스크로 쭉 긁으면서 계산하기\n for i in range(1, inH):\n for k in range(1, inW):\n #1개 점을 처리하되 3x3 반복해서 처리: 마스크 연산은 모두 곱해서 더함.\n s = 0.0\n for m in range(0,MSIZE):\n for n in range(0,MSIZE):\n s += maskR[m][n] *tmpInImageR[i+(m-1)][k+(n-1)]\n tmpOutImageR[i-1][k-1] = s\n for i in range(1, inH):\n for k in range(1, inW):\n #1개 점을 처리하되 3x3 반복해서 처리: 마스크 연산은 모두 곱해서 더함.\n s = 0.0\n for m in range(0,MSIZE):\n for n in range(0,MSIZE):\n s += maskG[m][n] *tmpInImageG[i+(m-1)][k+(n-1)]\n tmpOutImageG[i-1][k-1] = s\n for i in range(1, inH):\n for k in range(1, inW):\n #1개 점을 처리하되 3x3 반복해서 처리: 마스크 연산은 모두 곱해서 더함.\n s = 0.0\n for m in range(0,MSIZE):\n for n in range(0,MSIZE):\n s += maskB[m][n] *tmpInImageB[i+(m-1)][k+(n-1)]\n tmpOutImageB[i-1][k-1] = s\n\n #임시출력 -> 원래 출력\n for i in range(outW):\n for k in range(outH):\n value = int(tmpOutImageR[i][k])\n if value > 255:\n value = 255\n if value < 0 :\n value = 0\n outImageR[i][k] = value\n for i in range(outW):\n for k in range(outH):\n value = int(tmpOutImageG[i][k])\n if value > 255:\n value = 255\n if value < 0 :\n value = 0\n outImageG[i][k] = value\n for i in range(outW):\n for k in range(outH):\n value = int(tmpOutImageB[i][k])\n if value > 255:\n value = 255\n if value < 0 :\n value = 0\n outImageB[i][k] = value\n display_Color()\n\n\ndef color_GaussianFilter() : # Color 마스크 활용 가우시안필터 알고리즘\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, inW, inH, outW, outH\n # 중요!! 출력메모리 크기 결정\n outW = inW\n outH = inH\n outImageR, outImageG, outImageB = [], [], []\n tmpList = []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageR.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageG.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageB.append(tmpList[:])\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n MSIZE=3 #마스크 사이즈\n maskR = [[1./16., 1./8., 1./16.],[1./8., 1./4., 1./8.],[1./16., 1./8., 1./16.]]\n maskG = [[1./16., 1./8., 1./16.],[1./8., 1./4., 1./8.],[1./16., 1./8., 1./16.]]\n maskB = [[1./16., 1./8., 1./16.],[1./8., 1./4., 1./8.],[1./16., 1./8., 1./16.]]\n #임시 입력 영상 = inImage보다 2개열이 큼\n tmpInImageR, tmpInImageG, tmpInImageB =[],[],[]\n\n for i in range(inH +2):\n tmpList=[]\n for k in range(inW +2):\n tmpList.append(128)\n tmpInImageR.append(tmpList)\n for i in range(inH +2):\n tmpList=[]\n for k in range(inW +2):\n tmpList.append(128)\n tmpInImageG.append(tmpList)\n for i in range(inH +2):\n tmpList=[]\n for k in range(inW +2):\n tmpList.append(128)\n tmpInImageB.append(tmpList)\n\n tmpOutImageR, tmpOutImageG, tmpOutImageB = [], [], []\n for i in range(outH):\n tmpList=[]\n for k in range(outW):\n tmpList.append(0)\n tmpOutImageR.append(tmpList)\n for i in range(outH):\n tmpList=[]\n for k in range(outW):\n tmpList.append(0)\n tmpOutImageG.append(tmpList)\n for i in range(outH):\n tmpList=[]\n for k in range(outW):\n tmpList.append(0)\n tmpOutImageB.append(tmpList)\n\n #원래 입력--> 임시 입력\n for i in range(inH):\n for k in range(inW):\n tmpInImageR[i+1][k+1] = inImageR[i][k]\n for i in range(inH):\n for k in range(inW):\n tmpInImageG[i+1][k+1] = inImageG[i][k]\n for i in range(inH):\n for k in range(inW):\n tmpInImageB[i+1][k+1] = inImageB[i][k]\n\n # 회선연산하기. 마스크로 쭉 긁으면서 계산하기\n for i in range(1, inH):\n for k in range(1, inW):\n #1개 점을 처리하되 3x3 반복해서 처리: 마스크 연산은 모두 곱해서 더함.\n s = 0.0\n for m in range(0,MSIZE):\n for n in range(0,MSIZE):\n s += maskR[m][n] *tmpInImageR[i+(m-1)][k+(n-1)]\n tmpOutImageR[i-1][k-1] = s\n for i in range(1, inH):\n for k in range(1, inW):\n #1개 점을 처리하되 3x3 반복해서 처리: 마스크 연산은 모두 곱해서 더함.\n s = 0.0\n for m in range(0,MSIZE):\n for n in range(0,MSIZE):\n s += maskG[m][n] *tmpInImageG[i+(m-1)][k+(n-1)]\n tmpOutImageG[i-1][k-1] = s\n for i in range(1, inH):\n for k in range(1, inW):\n #1개 점을 처리하되 3x3 반복해서 처리: 마스크 연산은 모두 곱해서 더함.\n s = 0.0\n for m in range(0,MSIZE):\n for n in range(0,MSIZE):\n s += maskB[m][n] *tmpInImageB[i+(m-1)][k+(n-1)]\n tmpOutImageB[i-1][k-1] = s\n\n #임시출력 -> 원래 출력\n for i in range(outW):\n for k in range(outH):\n value = int(tmpOutImageR[i][k])\n if value > 255:\n value = 255\n if value < 0 :\n value = 0\n outImageR[i][k] = value\n for i in range(outW):\n for k in range(outH):\n value = int(tmpOutImageG[i][k])\n if value > 255:\n value = 255\n if value < 0 :\n value = 0\n outImageG[i][k] = value\n for i in range(outW):\n for k in range(outH):\n value = int(tmpOutImageB[i][k])\n if value > 255:\n value = 255\n if value < 0 :\n value = 0\n outImageB[i][k] = value\n display_Color()\n\n\ndef color_Sharpening() : # Color 마스크 활용 샤프닝 알고리즘\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, inW, inH, outW, outH\n # 중요!! 출력메모리 크기 결정\n outW = inW\n outH = inH\n outImageR, outImageG, outImageB = [], [], []\n tmpList = []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageR.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageG.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageB.append(tmpList[:])\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n MSIZE=3 #마스크 사이즈\n maskR = [[0., -1., 0.],[-1., 5., -1.],[0., -1., 0.]]\n maskG = [[0., -1., 0.],[-1., 5., -1.],[0., -1., 0.]]\n maskB = [[0., -1., 0.],[-1., 5., -1.],[0., -1., 0.]]\n #임시 입력 영상 = inImage보다 2개열이 큼\n tmpInImageR, tmpInImageG, tmpInImageB =[],[],[]\n\n for i in range(inH +2):\n tmpList=[]\n for k in range(inW +2):\n tmpList.append(128)\n tmpInImageR.append(tmpList)\n for i in range(inH +2):\n tmpList=[]\n for k in range(inW +2):\n tmpList.append(128)\n tmpInImageG.append(tmpList)\n for i in range(inH +2):\n tmpList=[]\n for k in range(inW +2):\n tmpList.append(128)\n tmpInImageB.append(tmpList)\n\n tmpOutImageR, tmpOutImageG, tmpOutImageB = [], [], []\n for i in range(outH):\n tmpList=[]\n for k in range(outW):\n tmpList.append(0)\n tmpOutImageR.append(tmpList)\n for i in range(outH):\n tmpList=[]\n for k in range(outW):\n tmpList.append(0)\n tmpOutImageG.append(tmpList)\n for i in range(outH):\n tmpList=[]\n for k in range(outW):\n tmpList.append(0)\n tmpOutImageB.append(tmpList)\n\n #원래 입력--> 임시 입력\n for i in range(inH):\n for k in range(inW):\n tmpInImageR[i+1][k+1] = inImageR[i][k]\n for i in range(inH):\n for k in range(inW):\n tmpInImageG[i+1][k+1] = inImageG[i][k]\n for i in range(inH):\n for k in range(inW):\n tmpInImageB[i+1][k+1] = inImageB[i][k]\n\n # 회선연산하기. 마스크로 쭉 긁으면서 계산하기\n for i in range(1, inH):\n for k in range(1, inW):\n #1개 점을 처리하되 3x3 반복해서 처리: 마스크 연산은 모두 곱해서 더함.\n s = 0.0\n for m in range(0,MSIZE):\n for n in range(0,MSIZE):\n s += maskR[m][n] *tmpInImageR[i+(m-1)][k+(n-1)]\n tmpOutImageR[i-1][k-1] = s\n for i in range(1, inH):\n for k in range(1, inW):\n #1개 점을 처리하되 3x3 반복해서 처리: 마스크 연산은 모두 곱해서 더함.\n s = 0.0\n for m in range(0,MSIZE):\n for n in range(0,MSIZE):\n s += maskG[m][n] *tmpInImageG[i+(m-1)][k+(n-1)]\n tmpOutImageG[i-1][k-1] = s\n for i in range(1, inH):\n for k in range(1, inW):\n #1개 점을 처리하되 3x3 반복해서 처리: 마스크 연산은 모두 곱해서 더함.\n s = 0.0\n for m in range(0,MSIZE):\n for n in range(0,MSIZE):\n s += maskB[m][n] *tmpInImageB[i+(m-1)][k+(n-1)]\n tmpOutImageB[i-1][k-1] = s\n\n #임시출력 -> 원래 출력\n for i in range(outW):\n for k in range(outH):\n value = int(tmpOutImageR[i][k])\n if value > 255:\n value = 255\n if value < 0 :\n value = 0\n outImageR[i][k] = value\n for i in range(outW):\n for k in range(outH):\n value = int(tmpOutImageG[i][k])\n if value > 255:\n value = 255\n if value < 0 :\n value = 0\n outImageG[i][k] = value\n for i in range(outW):\n for k in range(outH):\n value = int(tmpOutImageB[i][k])\n if value > 255:\n value = 255\n if value < 0 :\n value = 0\n outImageB[i][k] = value\n display_Color()\n\n\ndef color_HpfSharpening() : # Color 마스크 활용 고주파 샤프닝 알고리즘\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, inW, inH, outW, outH\n # 중요!! 출력메모리 크기 결정\n outW = inW\n outH = inH\n outImageR, outImageG, outImageB = [], [], []\n tmpList = []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageR.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageG.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageB.append(tmpList[:])\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n MSIZE=3 #마스크 사이즈\n maskR = [[-1./9., -1./9., -1.],[-1./9., 8./9., -1./9.],[-1./9., -1./9., -1./9.]]\n maskG = [[-1./9., -1./9., -1.],[-1./9., 8./9., -1./9.],[-1./9., -1./9., -1./9.]]\n maskB = [[-1./9., -1./9., -1.],[-1./9., 8./9., -1./9.],[-1./9., -1./9., -1./9.]]\n #임시 입력 영상 = inImage보다 2개열이 큼\n tmpInImageR, tmpInImageG, tmpInImageB =[],[],[]\n\n for i in range(inH +2):\n tmpList=[]\n for k in range(inW +2):\n tmpList.append(128)\n tmpInImageR.append(tmpList)\n for i in range(inH +2):\n tmpList=[]\n for k in range(inW +2):\n tmpList.append(128)\n tmpInImageG.append(tmpList)\n for i in range(inH +2):\n tmpList=[]\n for k in range(inW +2):\n tmpList.append(128)\n tmpInImageB.append(tmpList)\n\n tmpOutImageR, tmpOutImageG, tmpOutImageB = [], [], []\n for i in range(outH):\n tmpList=[]\n for k in range(outW):\n tmpList.append(0)\n tmpOutImageR.append(tmpList)\n for i in range(outH):\n tmpList=[]\n for k in range(outW):\n tmpList.append(0)\n tmpOutImageG.append(tmpList)\n for i in range(outH):\n tmpList=[]\n for k in range(outW):\n tmpList.append(0)\n tmpOutImageB.append(tmpList)\n\n #원래 입력--> 임시 입력\n for i in range(inH):\n for k in range(inW):\n tmpInImageR[i+1][k+1] = inImageR[i][k]\n for i in range(inH):\n for k in range(inW):\n tmpInImageG[i+1][k+1] = inImageG[i][k]\n for i in range(inH):\n for k in range(inW):\n tmpInImageB[i+1][k+1] = inImageB[i][k]\n\n # 회선연산하기. 마스크로 쭉 긁으면서 계산하기\n for i in range(1, inH):\n for k in range(1, inW):\n #1개 점을 처리하되 3x3 반복해서 처리: 마스크 연산은 모두 곱해서 더함.\n s = 0.0\n for m in range(0,MSIZE):\n for n in range(0,MSIZE):\n s += maskR[m][n] *tmpInImageR[i+(m-1)][k+(n-1)]\n tmpOutImageR[i-1][k-1] = s\n for i in range(1, inH):\n for k in range(1, inW):\n #1개 점을 처리하되 3x3 반복해서 처리: 마스크 연산은 모두 곱해서 더함.\n s = 0.0\n for m in range(0,MSIZE):\n for n in range(0,MSIZE):\n s += maskG[m][n] *tmpInImageG[i+(m-1)][k+(n-1)]\n tmpOutImageG[i-1][k-1] = s\n for i in range(1, inH):\n for k in range(1, inW):\n #1개 점을 처리하되 3x3 반복해서 처리: 마스크 연산은 모두 곱해서 더함.\n s = 0.0\n for m in range(0,MSIZE):\n for n in range(0,MSIZE):\n s += maskB[m][n] *tmpInImageB[i+(m-1)][k+(n-1)]\n tmpOutImageB[i-1][k-1] = s\n\n #임시출력 -> 원래 출력\n for i in range(outW):\n for k in range(outH):\n value = int(tmpOutImageR[i][k])\n if value > 255:\n value = 255\n if value < 0 :\n value = 0\n outImageR[i][k] = value\n for i in range(outW):\n for k in range(outH):\n value = int(tmpOutImageG[i][k])\n if value > 255:\n value = 255\n if value < 0 :\n value = 0\n outImageG[i][k] = value\n for i in range(outW):\n for k in range(outH):\n value = int(tmpOutImageB[i][k])\n if value > 255:\n value = 255\n if value < 0 :\n value = 0\n outImageB[i][k] = value\n display_Color()\n\ndef color_LpfSharpening() : # Color 마스크 활용 저주파 샤프닝 알고리즘\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, inW, inH, outW, outH\n # 중요!! 출력메모리 크기 결정\n outW = inW\n outH = inH\n outImageR, outImageG, outImageB = [], [], []\n\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageR.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageG.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageB.append(tmpList[:])\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n MSIZE=3 #마스크 사이즈\n maskR = [[1./9., 1./9., 1./9.],[1./9., 1./9., 1./9.],[1./9., 1./9., 1./9.]]\n maskG = [[1./9., 1./9., 1./9.],[1./9., 1./9., 1./9.],[1./9., 1./9., 1./9.]]\n maskB = [[1./9., 1./9., 1./9.],[1./9., 1./9., 1./9.],[1./9., 1./9., 1./9.]]\n #임시 입력 영상 = inImage보다 2개열이 큼\n tmpInImageR, tmpInImageG, tmpInImageB =[],[],[]\n\n for i in range(inH +2):\n tmpList=[]\n for k in range(inW +2):\n tmpList.append(128)\n tmpInImageR.append(tmpList)\n for i in range(inH +2):\n tmpList=[]\n for k in range(inW +2):\n tmpList.append(128)\n tmpInImageG.append(tmpList)\n for i in range(inH +2):\n tmpList=[]\n for k in range(inW +2):\n tmpList.append(128)\n tmpInImageB.append(tmpList)\n\n tmpOutImageR, tmpOutImageG, tmpOutImageB = [], [], []\n for i in range(outH):\n tmpList=[]\n for k in range(outW):\n tmpList.append(0)\n tmpOutImageR.append(tmpList)\n for i in range(outH):\n tmpList=[]\n for k in range(outW):\n tmpList.append(0)\n tmpOutImageG.append(tmpList)\n for i in range(outH):\n tmpList=[]\n for k in range(outW):\n tmpList.append(0)\n tmpOutImageB.append(tmpList)\n\n #원래 입력--> 임시 입력\n for i in range(inH):\n for k in range(inW):\n tmpInImageR[i+1][k+1] = inImageR[i][k]\n for i in range(inH):\n for k in range(inW):\n tmpInImageG[i+1][k+1] = inImageG[i][k]\n for i in range(inH):\n for k in range(inW):\n tmpInImageB[i+1][k+1] = inImageB[i][k]\n\n # 회선연산하기. 마스크로 쭉 긁으면서 계산하기\n for i in range(1, inH):\n for k in range(1, inW):\n #1개 점을 처리하되 3x3 반복해서 처리: 마스크 연산은 모두 곱해서 더함.\n s = 0.0\n for m in range(0,MSIZE):\n for n in range(0,MSIZE):\n s += maskR[m][n] *tmpInImageR[i+(m-1)][k+(n-1)]\n tmpOutImageR[i-1][k-1] = s\n for i in range(1, inH):\n for k in range(1, inW):\n #1개 점을 처리하되 3x3 반복해서 처리: 마스크 연산은 모두 곱해서 더함.\n s = 0.0\n for m in range(0,MSIZE):\n for n in range(0,MSIZE):\n s += maskG[m][n] *tmpInImageG[i+(m-1)][k+(n-1)]\n tmpOutImageG[i-1][k-1] = s\n for i in range(1, inH):\n for k in range(1, inW):\n #1개 점을 처리하되 3x3 반복해서 처리: 마스크 연산은 모두 곱해서 더함.\n s = 0.0\n for m in range(0,MSIZE):\n for n in range(0,MSIZE):\n s += maskB[m][n] *tmpInImageB[i+(m-1)][k+(n-1)]\n tmpOutImageB[i-1][k-1] = s\n\n #임시출력 -> 원래 출력\n for i in range(outW):\n for k in range(outH):\n value = int(tmpOutImageR[i][k])\n if value > 255:\n value = 255\n if value < 0 :\n value = 0\n outImageR[i][k] = value\n for i in range(outW):\n for k in range(outH):\n value = int(tmpOutImageG[i][k])\n if value > 255:\n value = 255\n if value < 0 :\n value = 0\n outImageG[i][k] = value\n for i in range(outW):\n for k in range(outH):\n value = int(tmpOutImageB[i][k])\n if value > 255:\n value = 255\n if value < 0 :\n value = 0\n outImageB[i][k] = value\n display_Color()\n\n\ndef color_DiffOperatorHor() : # Color 마스크 활용 수평이동과 차분처리 알고리즘\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, inW, inH, outW, outH\n # 중요!! 출력메모리 크기 결정\n outW = inW\n outH = inH\n outImageR, outImageG, outImageB = [], [], []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageR.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageG.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageB.append(tmpList[:])\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n MSIZE=3 #마스크 사이즈\n maskR = [[0., -1., 0.],[0., 1., 0.],[0.,0.,0.]]\n maskG = [[0., -1., 0.],[0., 1., 0.],[0.,0.,0.]]\n maskB = [[0., -1., 0.],[0., 1., 0.],[0.,0.,0.]]\n #임시 입력 영상 = inImage보다 2개열이 큼\n tmpInImageR, tmpInImageG, tmpInImageB =[],[],[]\n\n for i in range(inH +2):\n tmpList=[]\n for k in range(inW +2):\n tmpList.append(128)\n tmpInImageR.append(tmpList)\n for i in range(inH +2):\n tmpList=[]\n for k in range(inW +2):\n tmpList.append(128)\n tmpInImageG.append(tmpList)\n for i in range(inH +2):\n tmpList=[]\n for k in range(inW +2):\n tmpList.append(128)\n tmpInImageB.append(tmpList)\n\n tmpOutImageR, tmpOutImageG, tmpOutImageB = [], [], []\n for i in range(outH):\n tmpList=[]\n for k in range(outW):\n tmpList.append(0)\n tmpOutImageR.append(tmpList)\n for i in range(outH):\n tmpList=[]\n for k in range(outW):\n tmpList.append(0)\n tmpOutImageG.append(tmpList)\n for i in range(outH):\n tmpList=[]\n for k in range(outW):\n tmpList.append(0)\n tmpOutImageB.append(tmpList)\n\n #원래 입력--> 임시 입력\n for i in range(inH):\n for k in range(inW):\n tmpInImageR[i+1][k+1] = inImageR[i][k]\n for i in range(inH):\n for k in range(inW):\n tmpInImageG[i+1][k+1] = inImageG[i][k]\n for i in range(inH):\n for k in range(inW):\n tmpInImageB[i+1][k+1] = inImageB[i][k]\n\n #임시출력 -> 원래 출력\n for i in range(outW):\n for k in range(outH):\n value = int(tmpOutImageR[i][k])\n if value > 255:\n value = 255\n if value < 0 :\n value = 0\n outImageR[i][k] = value\n for i in range(outW):\n for k in range(outH):\n value = int(tmpOutImageG[i][k])\n if value > 255:\n value = 255\n if value < 0 :\n value = 0\n outImageG[i][k] = value\n for i in range(outW):\n for k in range(outH):\n value = int(tmpOutImageB[i][k])\n if value > 255:\n value = 255\n if value < 0 :\n value = 0\n outImageB[i][k] = value\n display_Color()\n\n\n\ndef color_HomogenOperator() : # Color 유사 연산자 에지 검출 알고리즘\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, inW, inH, outW, outH\n # 중요!! 출력메모리 크기 결정\n outW = inW\n outH = inH\n print(inW, inH)\n outImageR, outImageG, outImageB = [], [], []\n tmpList = []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageR.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageG.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageB.append(tmpList[:])\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n\n tmpInImageR, tmpInImageG, tmpInImageB =[],[],[]\n\n for i in range(inH +2):\n tmpList=[]\n for k in range(inW +2):\n tmpList.append(128)\n tmpInImageR.append(tmpList)\n for i in range(inH +2):\n tmpList=[]\n for k in range(inW +2):\n tmpList.append(128)\n tmpInImageG.append(tmpList)\n for i in range(inH +2):\n tmpList=[]\n for k in range(inW +2):\n tmpList.append(128)\n tmpInImageB.append(tmpList)\n\n tmpOutImageR, tmpOutImageG, tmpOutImageB = [], [], []\n for i in range(outH):\n tmpList=[]\n for k in range(outW):\n tmpList.append(0)\n tmpOutImageR.append(tmpList)\n for i in range(outH):\n tmpList=[]\n for k in range(outW):\n tmpList.append(0)\n tmpOutImageG.append(tmpList)\n for i in range(outH):\n tmpList=[]\n for k in range(outW):\n tmpList.append(0)\n tmpOutImageB.append(tmpList)\n\n #원래 입력--> 임시 입력\n for i in range(inH):\n for k in range(inW):\n print(i*inW+k)\n tmpInImageR[i+1][k+1] = inImageR[i][k]\n for i in range(inH):\n for k in range(inW):\n tmpInImageG[i+1][k+1] = inImageG[i][k]\n for i in range(inH):\n for k in range(inW):\n tmpInImageB[i+1][k+1] = inImageB[i][k]\n\n # 회선연산하기.\n MSIZE =3\n for i in range(1, inH):\n for k in range(1, inW):\n max = 0.0 #블록이 이동할 때마다 최대값 초기화\n for m in range(0,MSIZE):\n for n in range(0,MSIZE):\n if (tmpInImageR[i+1][k+1] - tmpInImageR[i+n][k+m]) >= max: #블록의 가운데 값 - 블록의 주변 픽셀 값의 절대 값 중 최대값 찾기\n max = abs(tmpInImageR[i+1][k+1] - tmpInImageR[i+(m-1)][k+(n-1)])\n tmpOutImageR[i-1][k-1] = max\n for i in range(1, inH):\n for k in range(1, inW):\n max = 0.0 #블록이 이동할 때마다 최대값 초기화\n for m in range(0,MSIZE):\n for n in range(0,MSIZE):\n if (tmpInImageG[i+1][k+1] - tmpInImageG[i+n][k+m]) >= max: #블록의 가운데 값 - 블록의 주변 픽셀 값의 절대 값 중 최대값 찾기\n max = abs(tmpInImageG[i+1][k+1] - tmpInImageG[i+(m-1)][k+(n-1)])\n tmpOutImageG[i-1][k-1] = max\n for i in range(1, inH):\n for k in range(1, inW):\n max = 0.0 #블록이 이동할 때마다 최대값 초기화\n for m in range(0,MSIZE):\n for n in range(0,MSIZE):\n if (tmpInImageB[i+1][k+1] - tmpInImageB[i+n][k+m]) >= max: #블록의 가운데 값 - 블록의 주변 픽셀 값의 절대 값 중 최대값 찾기\n max = abs(tmpInImageB[i+1][k+1] - tmpInImageB[i+(m-1)][k+(n-1)])\n tmpOutImageB[i-1][k-1] = max\n\n #임시출력 -> 원래 출력\n for i in range(outW):\n for k in range(outH):\n value = int(tmpOutImageR[i][k])\n if value > 255:\n value = 255\n if value < 0 :\n value = 0\n outImageR[i][k] = value\n for i in range(outW):\n for k in range(outH):\n value = int(tmpOutImageG[i][k])\n if value > 255:\n value = 255\n if value < 0 :\n value = 0\n outImageG[i][k] = value\n for i in range(outW):\n for k in range(outH):\n value = int(tmpOutImageB[i][k])\n if value > 255:\n value = 255\n if value < 0 :\n value = 0\n outImageB[i][k] = value\n display_Color()\n\n\n\n### 데이터 분석\n\ndef raw_data(): #Gray-scale 데이터값 분석\n rawDic= {} # 색상:갯수\n XSIZE = outImage.width()\n YSIZE = outImage.height()\n for i in range(XSIZE):\n for k in range(YSIZE):\n rawData = outImage.get(i, k)\n if rawData in rawDic:\n rawDic[rawData] += 1\n else:\n rawDic[rawData] = 1\n\n rawDataList = sorted(rawDic.items(), key=operator.itemgetter(1))\n minCount= rawDataList[0]\n maxCount= rawDataList[-1]\n rawSum = 0\n for item in rawDataList:\n rawSum += item[0] * item[1]\n rawAvg = rawSum / (XSIZE * YSIZE)\n\n rawDataList = sorted(rawDic.items(), key=operator.itemgetter(0))\n rawStream = []\n for item in rawDataList:\n for i in range(item[1]):\n rawStream.append(item[0])\n upperPos = int((XSIZE * YSIZE) / 10 / 100)\n lowerPos = int((XSIZE * YSIZE) / -10 / 100)\n midPos = int((XSIZE * YSIZE) / 2)\n raw_upper= rawStream[upperPos]\n raw_lower= rawStream[lowerPos]\n raw_mid= rawStream[midPos]\n\n subWindow = Toplevel(window) # 부모(window)에 종속된 서브윈도\n subWindow.geometry('200x100')\n label1 = Label(subWindow, text='픽셀 합계:' + rawSum)\n label1.pack()\n label2 = Label(subWindow, text='픽셀 평균값:' + rawAvg)\n label2.pack()\n label3 = Label(subWindow, text='최소출현 픽셀값:' + minCount)\n label3.pack()\n label4 = Label(subWindow, text='최다출현 픽셀값:' + maxCount)\n label4.pack()\n label5 = Label(subWindow, text='픽셀 상위수:' + raw_upper)\n label5.pack()\n label6 = Label(subWindow, text='픽셀 하위수:' + raw_lower)\n label6.pack()\n label6 = Label(subWindow, text='픽셀 중위수:' + raw_mid)\n label6.pack()\n subWindow.mainloop()\n\n\ndef raw_histogram() : # 히스토 그램\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n countList = [0] * 256; normalList = [0] * 256\n\n for i in range(outH) :\n for k in range(outW) :\n value = outImage[i][k]\n countList[value] += 1\n\n # 정규화된값 = (카운트값 - 최소값) * High / (최대값 - 최소값)\n maxVal = max (countList); minVal = min(countList)\n for i in range(len(countList)) :\n normalList[i] = (countList[i] - minVal) * 256 / (maxVal - minVal)\n\n # 화면 출력\n subWindow = Toplevel(window)\n subWindow.geometry('256x256')\n subCanvas = Canvas(subWindow, width=256, height=256)\n subPaper = PhotoImage(width=256, height=256)\n subCanvas.create_image((256/2,256/2), image=subPaper, state='normal')\n\n for i in range(0, 256) :\n for k in range(0, int(normalList[i])) :\n data = 0\n subPaper.put('#%02x%02x%02x' % (data, data, data), (i, 255-k))\n subCanvas.pack(expand=1, anchor=CENTER)\n subWindow.mainloop()\n\n\ndef raw_histo_plt() : # 히스토 그램\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n countList = [0] * 256\n\n for i in range(outH) :\n for k in range(outW) :\n value = outImage[i][k]\n countList[value] += 1\n plt.plot(countList)\n plt.show()\n\n\ndef raw_histoStretch() : # 히스토그램 스트레칭 알고리즘\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n # 중요! 출력메모리의 크기를 결정\n outW = inW; outH = inH;\n outImage = []; tmpList = []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImage.append(tmpList)\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n minVal, maxVal, HIGH = 255, 0, 255\n for i in range(inH) :\n for k in range(inW) :\n data = inImage[i][k]\n if data > maxVal:\n maxVal = data\n if data < minVal:\n minVal = data\n\n #히스토그램 스트레칭시키기\n #new = (old - minVal) * HIGH / (maxVal - minVal)\n for i in range(inH):\n for k in range(inW):\n value = int((inImage[i][k] - minVal) * HIGH / (maxVal - minVal))\n if value < 0 :\n value = 0\n if value > 255:\n value = 255\n outImage[i][k] = value\n display_raw()\n\ndef raw_endIn() : # 엔드-인 검색 알고리즘\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n # 중요! 출력메모리의 크기를 결정\n outW = inW; outH = inH;\n outImage = []; tmpList = []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImage.append(tmpList)\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n minVal, maxVal, HIGH = 255, 0, 255\n for i in range(inH) :\n for k in range(inW) :\n data = inImage[i][k]\n if data > maxVal:\n maxVal = data\n if data < minVal:\n minVal = data\n limit = askinteger('엔드인','상하범위:', minvalue=1, maxvalue=127)\n maxVal -= limit\n minVal += limit\n\n #히스토그램 스트레칭시키기\n #new = (old - minVal) * HIGH / (maxVal - minVal)\n for i in range(inH):\n for k in range(inW):\n value = int((inImage[i][k] - minVal) * HIGH / (maxVal - minVal))\n if value < 0 :\n value = 0\n if value > 255:\n value = 255\n outImage[i][k] = value\n display_raw()\n\n\ndef raw_histoEqual() : # 히스토그램 평활화 알고리즘\n global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH\n # 중요! 출력메모리의 크기를 결정\n outW = inW; outH = inH;\n outImage = []; tmpList = []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImage.append(tmpList)\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n histo = [0]*255\n sumHisto = [0]*255\n normalHisto= [0]*255\n minVal, maxVal, HIGH = 255, 0, 255\n #히스토그램 작성\n for i in range(inH) :\n for k in range(inW) :\n value = inImage[i][k]\n histo[value]+=1\n\n #누적 히스토그램 작성\n sumVal = 0\n for i in range(len(histo)) :\n sumVal += histo[i]\n sumHisto[i] = sumVal\n\n #정규화된 누적 히스토그램: (누적의 합/(행개수*열개수)) * HIGH\n for i in range(len(sumHisto)):\n normalHisto[i] = int(sumHisto[i] / (outW * outH) * HIGH)\n\n #정규화된 값으로 출력하기\n for i in range(inH):\n for k in range(inW):\n index = inImage[i][k]\n outImage[i][k] =normalHisto[index]\n display_raw()\n\n\n\n## Color 값\ndef color_data(): #Color 데이터값 분석\n global window, canvas, paper, filename, outImage, inImage, inImageR, inImageG, inImageB,outImageR, outImageG, outImageB, inW, inH, outW, outH\n rDic, bDic, gDic = {}, {}, {} # 색상:갯수\n photo = PhotoImage(file=filename)\n YSIZE = photo.height()\n XSIZE = photo.width()\n for i in range(XSIZE):\n for k in range(YSIZE):\n r, g, b = photo.get(i, k)\n if r in rDic:\n rDic[r] += 1\n else:\n rDic[r] = 1\n if g in gDic:\n gDic[g] += 1\n else:\n gDic[g] = 1\n if b in bDic:\n bDic[b] += 1\n else:\n bDic[b] = 1\n rcountList = sorted(rDic.items(), key=operator.itemgetter(1))\n gcountList = sorted(gDic.items(), key=operator.itemgetter(1))\n bcountList = sorted(bDic.items(), key=operator.itemgetter(1))\n minCount= rcountList[0]+gcountList[0]+bcountList[0]\n maxCount= rcountList[-1]+ gcountList[-1]+bcountList[-1]\n rSum = 0\n for item in rcountList:\n rSum += item[0] * item[1]\n rAvg = rSum / (XSIZE * YSIZE)\n gSum = 0\n for item in gcountList:\n gSum += item[0] * item[1]\n gAvg = gSum / (XSIZE * YSIZE)\n bSum = 0\n for item in bcountList:\n bSum += item[0] * item[1]\n bAvg = bSum / (XSIZE * YSIZE)\n rgb_avg= rAvg+ gAvg+ bAvg\n rgb_sum= rSum+ gSum+ bSum\n\n rcountList = sorted(rDic.items(), key=operator.itemgetter(0))\n gcountList = sorted(gDic.items(), key=operator.itemgetter(0))\n bcountList = sorted(bDic.items(), key=operator.itemgetter(0))\n rStream, gStream, bStream = [], [], []\n for item in rcountList:\n for i in range(item[1]):\n rStream.append(item[0])\n for item in gcountList:\n for i in range(item[1]):\n gStream.append(item[0])\n for item in bcountList:\n for i in range(item[1]):\n bStream.append(item[0])\n upperPos = int((XSIZE * YSIZE) / 10 / 100)\n lowerPos = int((XSIZE * YSIZE) / -10 / 100)\n midPos = int((XSIZE * YSIZE) / 2)\n rgb_upper= rStream[upperPos]+ gStream[upperPos]+ bStream[upperPos]\n rgb_lower= rStream[lowerPos]+ gStream[lowerPos]+bStream[lowerPos]\n rgb_mid= rStream[midPos]+ gStream[midPos]+ bStream[midPos]\n\n subWindow = Toplevel(window) # 부모(window)에 종속된 서브윈도\n subWindow.geometry('500x500')\n label1 = Label(subWindow, text='r,g,b픽셀 합계:' + str(rSum)+','+ str(gSum) +','+ str(bSum))\n label1.pack()\n label2 = Label(subWindow, text='r,g,b 픽셀 평균값:' + str(rAvg)+','+ str(gAvg)+','+str(bAvg))\n label2.pack()\n label3 = Label(subWindow, text='최소출현 r,g,b픽셀값:' +str(rcountList[0])+','+str(gcountList[0])+','+str(bcountList[0]))\n label3.pack()\n label4 = Label(subWindow, text='최다출현 r,g,b픽셀값:' + str(rcountList[-1])+','+str(gcountList[-1])+','+str(bcountList[-1]))\n label4.pack()\n label5 = Label(subWindow, text='r,g,b픽셀 상위수:' +str(rStream[upperPos])+','+str(gStream[upperPos])+ ','+str(bStream[upperPos]))\n label5.pack()\n label6 = Label(subWindow, text='r,g,b픽셀 하위수:' +str(rStream[lowerPos])+','+ str(gStream[lowerPos])+','+str(bStream[lowerPos]))\n label6.pack()\n label6 = Label(subWindow, text='r,g,b픽셀 중위수:' + str(rStream[midPos])+','+ str(gStream[midPos])+ ','+str(bStream[midPos]))\n label6.pack()\n subWindow.mainloop()\n\n\ndef color_histo_normal() : # 정규화 히스토그램\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, inW, inH, outW, outH\n countListR,countListG,countListB = [0] * 256,[0] * 256,[0] * 256\n normalListR,normalListG,normalListB = [0] * 256,[0] * 256,[0] * 256\n\n for i in range(outH) :\n for k in range(outW) :\n valueR = outImageR[i][k]\n countListR[valueR] += 1\n for i in range(outH) :\n for k in range(outW) :\n valueG = outImageG[i][k]\n countListG[valueG] += 1\n for i in range(outH) :\n for k in range(outW) :\n valueB = outImageB[i][k]\n countListB[valueB] += 1\n\n # 정규화된값 = (카운트값 - 최소값) * High / (최대값 - 최소값)\n maxValR = max(countListR)\n maxValG = max(countListG)\n maxValB = max(countListB)\n minValR = min(countListR)\n minValG = min(countListG)\n minValB = min(countListB)\n normalList = [0] * 256\n for i in range(len(countListR)) :\n normalListR = int((countListR[i] - minValR) * 256 / (maxValR - minValR))\n normalList[0] += normalListR\n for i in range(len(countListG)) :\n normalListG = int((countListG[i] - minValG) * 256 / (maxValG - minValG))\n normalList[1] += normalListG\n for i in range(len(countListB)) :\n normalListB = int((countListB[i] - minValB) * 256 / (maxValB - minValB))\n normalList[2] += normalListB\n\n n_bins = 200\n fig, axs = plt.subplots(3, tight_layout=True)\n axs[0].hist(normalList[0], color='r', bins=n_bins)\n axs[1].hist(normalList[1], color='g', bins=n_bins)\n axs[2].hist(normalList[2], color='b', bins=n_bins)\n plt.show()\n\n # 화면 출력\n # subWindow = Toplevel(window)\n # subWindow.geometry('256x256')\n # subCanvasR = Canvas(subWindow, width=256, height=256)\n # subCanvasG = Canvas(subWindow, width=256, height=256)\n # subCanvasB = Canvas(subWindow, width=256, height=256)\n # subPaperR = PhotoImage(width=256, height=256)\n # subPaperG = PhotoImage(width=256, height=256)\n # subPaperB = PhotoImage(width=256, height=256)\n # canvasR = subCanvasR.create_image((256/2,256/2), image=subPaperR, state='normal')\n # canvasG = subCanvasG.create_image((256/2,256/2), image=subPaperG, state='normal')\n # canvasB = subCanvasB.create_image((256/2,256/2), image=subPaperB, state='normal')\n\n # for i in range(0, 256) :\n # for k in range(0, int(normalListR[i])) :\n # dataR, dataG, dataB = normalListR[i], normalListB[i], normalListG[i]\n # resR = subPaperR.put('#%02x%02x%02x' % (dataR, dataR, dataR), (i, 255-k))\n # resG = subPaperG.put('#%02x%02x%02x' % (dataG, dataG, dataG), (i, 255-k))\n # resB = subPaperB.put('#%02x%02x%02x' % (dataB, dataB, dataB), (i, 255-k))\n # for i in range(0, 256) :\n # for k in range(0, int(normalListG[i])) :\n # dataG = 0\n # subPaper.put('#%02x%02x%02x' % (dataG, dataG, dataG), (i, 255-k))\n # for i in range(0, 256) :\n # for k in range(0, int(normalListB[i])) :\n # dataB = 0\n # subPaper.put('#%02x%02x%02x' % (dataB, dataB, dataB), (i, 255-k))\n # subCanvasR.pack(expand=1, anchor=CENTER)\n # subCanvasG.pack(expand=1, anchor=CENTER)\n # subCanvasB.pack(expand=1, anchor=CENTER)\n # subWindow.mainloop()\n\n\n\ndef color_histo_plt() : # 히스토 그램\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, inW, inH, outW, outH\n countListR = [0] * 256\n countListG = [0] * 256\n countListB = [0] * 256\n\n for i in range(outH) :\n for k in range(outW) :\n value = outImageR[i][k]\n countListR[value] += 1\n for i in range(outH) :\n for k in range(outW) :\n value = outImageG[i][k]\n countListG[value] += 1\n for i in range(outH) :\n for k in range(outW) :\n value = outImageB[i][k]\n countListB[value] += 1\n\n n_bins = 200\n fig, axs = plt.subplots(3, tight_layout=True)\n axs[0].hist(countListR, color='r', bins=n_bins)\n axs[1].hist(countListG, color='g', bins=n_bins)\n axs[2].hist(countListB, color='b', bins=n_bins)\n plt.show()\n\n\ndef color_histoStretch() : # 히스토그램 스트레칭 알고리즘\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, inW, inH, outW, outH\n # 중요!! 출력메모리 크기 결정\n outW = inW\n outH = inH\n outImageR, outImageG, outImageB = [], [], []\n tmpList = []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageR.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageG.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageB.append(tmpList[:])\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n minValR, maxValR, HIGHR = 255, 0, 255\n for i in range(inH) :\n for k in range(inW) :\n data = inImageR[i][k]\n if data > maxValR:\n maxValR = data\n if data < minValR:\n minValR = data\n minValG, maxValG, HIGHG = 255, 0, 255\n for i in range(inH) :\n for k in range(inW) :\n data = inImageG[i][k]\n if data > maxValG:\n maxValG = data\n if data < minValG:\n minValG = data\n minValB, maxValB, HIGHB = 255, 0, 255\n for i in range(inH) :\n for k in range(inW) :\n data = inImageB[i][k]\n if data > maxValB:\n maxValB = data\n if data < minValB:\n minValB = data\n\n #히스토그램 스트레칭시키기\n #new = (old - minVal) * HIGH / (maxVal - minVal)\n for i in range(inH):\n for k in range(inW):\n value = int((inImageR[i][k] - minValR) * HIGHR / (maxValR - minValR))\n if value < 0 :\n value = 0\n if value > 255:\n value = 255\n outImageR[i][k] = value\n for i in range(inH):\n for k in range(inW):\n value = int((inImageG[i][k] - minValG) * HIGHG / (maxValG - minValG))\n if value < 0 :\n value = 0\n if value > 255:\n value = 255\n outImageG[i][k] = value\n for i in range(inH):\n for k in range(inW):\n value = int((inImageB[i][k] - minValB) * HIGHB / (maxValB - minValB))\n if value < 0 :\n value = 0\n if value > 255:\n value = 255\n outImageB[i][k] = value\n display_Color()\n\ndef color_endIn() : # 엔드-인 검색 알고리즘\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, inW, inH, outW, outH\n # 중요!! 출력메모리 크기 결정\n outW = inW\n outH = inH\n outImageR, outImageG, outImageB = [], [], []\n tmpList = []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageR.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageG.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageB.append(tmpList[:])\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n minVal, maxVal, HIGH = 255, 0, 255\n for i in range(inH) :\n for k in range(inW) :\n data = inImageR[i][k]\n if data > maxVal:\n maxVal = data\n if data < minVal:\n minVal = data\n for i in range(inH) :\n for k in range(inW) :\n data = inImageG[i][k]\n if data > maxVal:\n maxVal = data\n if data < minVal:\n minVal = data\n for i in range(inH) :\n for k in range(inW) :\n data = inImageB[i][k]\n if data > maxVal:\n maxVal = data\n if data < minVal:\n minVal = data\n limit = askinteger('엔드인','상하범위:', minvalue=1, maxvalue=127)\n maxVal -= limit\n minVal += limit\n\n #히스토그램 스트레칭시키기\n #new = (old - minVal) * HIGH / (maxVal - minVal)\n for i in range(inH):\n for k in range(inW):\n value = int((inImageR[i][k] - minVal) * HIGH / (maxVal - minVal))\n if value < 0 :\n value = 0\n if value > 255:\n value = 255\n outImageR[i][k] = value\n for i in range(inH):\n for k in range(inW):\n value = int((inImageG[i][k] - minVal) * HIGH / (maxVal - minVal))\n if value < 0 :\n value = 0\n if value > 255:\n value = 255\n outImageG[i][k] = value\n for i in range(inH):\n for k in range(inW):\n value = int((inImageB[i][k] - minVal) * HIGH / (maxVal - minVal))\n if value < 0 :\n value = 0\n if value > 255:\n value = 255\n outImageB[i][k] = value\n display_Color()\n\n\ndef color_histoEqual() : # 히스토그램 평활화 알고리즘\n global window, canvas, paper, filename, inImageR, inImageG, inImageB, outImageR, outImageG, outImageB, inW, inH, outW, outH\n # 중요!! 출력메모리 크기 결정\n outW = inW\n outH = inH\n outImageR, outImageG, outImageB = [], [], []\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageR.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageG.append(tmpList[:])\n for i in range(outH): # 출력메모리 확보(0으로 초기화)\n tmpList = []\n for k in range(outW):\n tmpList.append(0)\n outImageB.append(tmpList[:])\n #############################\n # 진짜 영상처리 알고리즘을 구현\n ############################\n histoR = []\n histoG = [0]*255\n histoB = [0]*255\n sumHistoR = [0]*255\n sumHistoG = [0]*255\n sumHistoB = [0]*255\n normalHistoR = [0]*255\n normalHistoG = [0]*255\n normalHistoB = [0]*255\n minVal, maxVal, HIGH = 255, 0, 255\n #히스토그램 작성\n for i in range(inH) :\n for k in range(inW) :\n value = inImageR[i][k]\n histoR[value]+=1\n for i in range(inH) :\n for k in range(inW) :\n value = inImageG[i][k]\n histoG[value]+=1\n for i in range(inH) :\n for k in range(inW) :\n value = inImageB[i][k]\n histoB[value]+=1\n\n #누적 히스토그램 작성\n sumVal = 0\n for i in range(len(histoR)) :\n sumVal += histoR[i]\n sumHistoR[i] = sumVal\n for i in range(len(histoG)) :\n sumVal += histoG[i]\n sumHistoG[i] = sumVal\n for i in range(len(histoB)) :\n sumVal += histoB[i]\n sumHistoB[i] = sumVal\n\n #정규화된 누적 히스토그램: (누적의 합/(행개수*열개수)) * HIGH\n for i in range(len(sumHistoR)):\n normalHistoR[i] = int(sumHistoR[i] / (outW * outH) * HIGH)\n for i in range(len(sumHistoG)):\n normalHistoG[i] = int(sumHistoG[i] / (outW * outH) * HIGH)\n for i in range(len(sumHistoB)):\n normalHistoB[i] = int(sumHistoB[i] / (outW * outH) * HIGH)\n\n #정규화된 값으로 출력하기\n for i in range(inH):\n for k in range(inW):\n index = inImageR[i][k]\n outImageR[i][k] =normalHistoR[index]\n for i in range(inH):\n for k in range(inW):\n index = inImageG[i][k]\n outImageG[i][k] =normalHistoG[index]\n for i in range(inH):\n for k in range(inW):\n index = inImageB[i][k]\n outImageB[i][k] =normalHistoB[index]\n display_Color()\n\n\n\n## 전역 변수부\nwindow, canvas, paper, filename = [None] * 4\ninImage, outImage = [], []; inW, inH, outW, outH = [0] * 4\ncolor_panYN = False\nraw_panYN = False\nsx, sy, ex, ey = [0] * 4\nVIEW_X, VIEW_Y = 128, 128\nstatus = None\n\n## 메인 코드부\nwindow = Tk(); window.geometry('400x400');\nwindow.title('DATS(Digital Image Analysis & Processing Total Solution) Ver 0.91')\nwindow.bind(\"<Button-1>\", raw_mouseClick)\nwindow.bind(\"<ButtonRelease-1>\", raw_mouseDrop)\nwindow.bind(\"<Button-1>\", color_mouseClick)\nwindow.bind(\"<ButtonRelease-1>\", color_mouseDrop)\n\nstatus = Label(window, text='이미지 정보:', bd=1, relief=SUNKEN, anchor=W)\nstatus.pack(side=BOTTOM, fill=X)\n\n\n# 위젯 메뉴\nmainMenu = Menu(window)\nwindow.config(menu=mainMenu)\n\n### 파일메뉴\nfileMenu = Menu(mainMenu)\nmainMenu.add_cascade(label='파일', menu=fileMenu)\n\nopenMenu = Menu(fileMenu)\nfileMenu.add_cascade(label='열기', menu=openMenu)\nopenMenu.add_command(label='Gray-scale image 가져오기', command=openRawFile)\nopenMenu.add_command(label='Color image 가져오기', command=openColorFile)\nopenMenu.add_command(label='CSV형식 가져오기', command=openRawCSV)\nopenMenu.add_command(label='SQLite에서 가져오기', command=openRawSQLite)\nopenMenu.add_command(label='MySQL에서 가져오기', command=openRawMySQL)\n\nsaveMenu = Menu(fileMenu)\nfileMenu.add_cascade(label='저장', menu=saveMenu)\nsaveMenu.add_command(label='Gray-scale image 저장', command=saveRawFile)\nsaveMenu.add_command(label='Color image 저장', command=saveColorFile)\nsaveMenu.add_command(label='Excel(숫자)형식으로 저장', command=saveNumExcel)\nsaveMenu.add_command(label='Excel(음영)형식으로 저장', command=saveRawExcel)\nsaveMenu.add_command(label='Excel(RGB숫자)형식으로 저장', command=saveColorNumExcel)\nsaveMenu.add_command(label='Excel(컬러)형식으로 저장', command=saveColorExcel)\nsaveMenu.add_command(label='CSV형식으로 저장', command=saveRawCSV)\n# saveMenu.add_command(label='CSV(셔플)형식으로 저장', command=saveShuffleCSV)\n\nsendMenu = Menu(fileMenu)\nfileMenu.add_cascade(label='DB에 내보내기', menu=sendMenu)\nsendMenu.add_command(label='SQLite에 내보내기', command=saveRawSQLite)\nsendMenu.add_command(label='MySQL에 내보내기', command=saveRawMySQL)\nsendMenu.add_separator()\nsendMenu.add_command(label='RAW폴더-MySQL에 내보내기', command=saveRawAllMySQL)\n\nfileMenu.add_separator()\nfileMenu.add_command(label='종료', command=exitFile)\n\n\n### 화소점처리\npixelMenu = Menu(mainMenu)\nmainMenu.add_cascade(label='화소점처리', menu=pixelMenu)\n\nequalMenu = Menu(pixelMenu)\npixelMenu.add_cascade(label='동일영상보기', menu=equalMenu)\nequalMenu.add_command(label='Gray-동일영상', command=equal_raw)\nequalMenu.add_command(label='Color-동일영상', command=equal_Color)\n\nraw_brightnessMenu = Menu(pixelMenu)\npixelMenu.add_cascade(label='Gray scale-밝기조정', menu=raw_brightnessMenu)\nraw_brightnessMenu.add_command(label='밝게(덧셈)', command=raw_brightAdd)\nraw_brightnessMenu.add_command(label='어둡게(뺄셈)', command=raw_brightSub)\nraw_brightnessMenu.add_command(label='더 밝게(곱셈)', command=raw_mulitply)\nraw_brightnessMenu.add_command(label='더 어둡게(나눗셈)', command=raw_division)\n\ncolor_brightnessMenu = Menu(pixelMenu)\npixelMenu.add_cascade(label='Color-밝기조정', menu=color_brightnessMenu)\ncolor_brightnessMenu.add_command(label='밝게(덧셈)', command=color_brightAdd)\ncolor_brightnessMenu.add_command(label='어둡게(뺄셈)', command=color_brightSub)\ncolor_brightnessMenu.add_command(label='더 밝게(곱셈)', command=color_mulitply)\ncolor_brightnessMenu.add_command(label='더 어둡게(나눗셈)', command=color_division)\n\nraw_pixelMenu = Menu(pixelMenu)\npixelMenu.add_cascade(label='Gray scale-화소값처리',menu=raw_pixelMenu)\nraw_pixelMenu.add_command(label='화소값반전', command=raw_reverse)\nraw_pixelMenu.add_command(label='파라볼라(Cap)', command=raw_cap)\nraw_pixelMenu.add_command(label='파라볼라(Cup)', command=raw_cup)\nraw_pixelMenu.add_command(label='감마', command=raw_gamma)\nraw_pixelMenu.add_command(label='적응이진화', command=raw_binarAdaptive)\nraw_pixelMenu.add_command(label='범위강조변환', command=raw_spotLight)\nraw_pixelMenu.add_command(label='합성', command=raw_morphing)\n\ncolor_pixelMenu = Menu(pixelMenu)\npixelMenu.add_cascade(label='Color-화소값처리',menu=color_pixelMenu)\ncolor_pixelMenu.add_command(label='화소값반전', command=color_reverse)\ncolor_pixelMenu.add_command(label='파라볼라(Cap)', command=color_cap)\ncolor_pixelMenu.add_command(label='파라볼라(Cup)', command=color_cup)\ncolor_pixelMenu.add_command(label='감마', command=color_gamma)\ncolor_pixelMenu.add_command(label='적응이진화', command=color_binarAdaptive)\ncolor_pixelMenu.add_command(label='범위강조변환', command=color_spotLight)\ncolor_pixelMenu.add_command(label='합성', command=color_morphing)\n\n\n### 기하학처리\ngeoMenu = Menu(mainMenu)\nmainMenu.add_cascade(label='기하학 처리', menu=geoMenu)\n\nraw_geoMenu = Menu(geoMenu)\ngeoMenu.add_cascade(label='Gray scale-기하학 처리', menu=raw_geoMenu)\nraw_geoMenu.add_command(label='화면확대', command=raw_zoomIn)\nraw_geoMenu.add_command(label='화면축소', command=raw_zoomOut)\nraw_geoMenu.add_command(label='상하반전', command=raw_upDown)\nraw_geoMenu.add_command(label='좌우반전', command=raw_rightLeft)\nraw_geoMenu.add_command(label='화면이동', command=raw_panImage)\nraw_geoMenu.add_separator()\nraw_geoMenu.add_command(label='영상회전(포워딩)', command=raw_rotate1)\nraw_geoMenu.add_command(label='영상회전(백워딩 및 중앙)', command=raw_rotate2)\nraw_geoMenu.add_command(label='영상회전(확대)', command=raw_rotate3)\n\ncolor_geoMenu = Menu(geoMenu)\ngeoMenu.add_cascade(label='Color-기하학 처리', menu=color_geoMenu)\ncolor_geoMenu.add_command(label='화면확대', command=color_zoomIn)\ncolor_geoMenu.add_command(label='화면축소', command=color_zoomOut)\ncolor_geoMenu.add_command(label='상하반전', command=color_upDown)\ncolor_geoMenu.add_command(label='좌우반전', command=color_rightLeft)\ncolor_geoMenu.add_command(label='화면이동', command=color_panImage)\ncolor_geoMenu.add_separator()\ncolor_geoMenu.add_command(label='영상회전(포워딩)', command=color_rotate1)\ncolor_geoMenu.add_command(label='영상회전(백워딩 및 중앙)', command=color_rotate2)\ncolor_geoMenu.add_command(label='영상회전(확대)', command=color_rotate3)\n\n### 화소영역처리\nareaMenu = Menu(mainMenu)\nmainMenu.add_cascade(label='화소영역처리', menu=areaMenu)\n\nraw_areaMenu = Menu(areaMenu)\nareaMenu.add_cascade(label='Gray scale-화소영역처리', menu=raw_areaMenu)\nraw_areaMenu.add_command(label='엠보싱', command=raw_embossing)\nraw_areaMenu.add_command(label='블러링', command=raw_bluring)\nraw_areaMenu.add_command(label='가우시안필터', command=raw_GaussianFilter)\nraw_areaMenu.add_command(label='샤프닝', command=raw_Sharpening)\nraw_areaMenu.add_command(label='고주파 샤프닝', command=raw_HpfSharpening)\nraw_areaMenu.add_command(label='저주파 샤프닝', command=raw_LpfSharpening)\nraw_areaMenu.add_command(label='유사 연산자 에지 검출', command=raw_HomogenOperator)\n\n\ncolor_areaMenu = Menu(areaMenu)\nareaMenu.add_cascade(label='Color-화소영역처리', menu=color_areaMenu)\ncolor_areaMenu.add_command(label='엠보싱', command=color_embossing)\ncolor_areaMenu.add_command(label='블러링', command=color_bluring)\ncolor_areaMenu.add_command(label='가우시안필터', command=color_GaussianFilter)\ncolor_areaMenu.add_command(label='샤프닝', command=color_Sharpening)\ncolor_areaMenu.add_command(label='고주파 샤프닝', command=color_HpfSharpening)\ncolor_areaMenu.add_command(label='저주파 샤프닝', command=color_LpfSharpening)\ncolor_areaMenu.add_command(label='유사 연산자 에지 검출', command=color_HomogenOperator)\n\n\n\n### 데이터 분석\nanalyzeMenu = Menu(mainMenu)\nmainMenu.add_cascade(label='데이터분석', menu=analyzeMenu)\n\nraw_analyzeMenu = Menu(analyzeMenu)\nanalyzeMenu.add_cascade(label='Gray scale-데이터분석', menu=raw_analyzeMenu)\nraw_analyzeMenu.add_command(label='데이터값 분석', command=raw_data)\nraw_analyzeMenu.add_command(label='히스토그램', command=raw_histogram)\nraw_analyzeMenu.add_command(label='히스토그램(matplotlib)', command=raw_histo_plt)\nraw_analyzeMenu.add_separator()\nraw_analyzeMenu.add_command(label='히스토그램 평활화', command=raw_histoEqual)\nraw_analyzeMenu.add_command(label='히스토그램 스트레칭', command=raw_histoStretch)\nraw_analyzeMenu.add_command(label='히스토그램 엔드-인 탐색', command=raw_endIn)\n\ncolor_analyzeMenu = Menu(analyzeMenu)\nanalyzeMenu.add_cascade(label='Color-데이터분석', menu=color_analyzeMenu)\ncolor_analyzeMenu.add_command(label='테이터값 분석', command=color_data)\ncolor_analyzeMenu.add_command(label='히스토그램', command=color_histo_normal)\ncolor_analyzeMenu.add_command(label='히스토그램(matplotlib)', command=color_histo_plt)\ncolor_analyzeMenu.add_separator()\ncolor_analyzeMenu.add_command(label='히스토그램 평활화', command=color_histoEqual)\ncolor_analyzeMenu.add_command(label='히스토그램 스트레칭', command=color_histoStretch)\ncolor_analyzeMenu.add_command(label='히스토그램 엔드-인 탐색', command=color_endIn)\n\nwindow.mainloop()\n"
] | [
[
"matplotlib.pyplot.plot",
"numpy.arange",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DJongstra/habitat-lab | [
"42c52f65ad0b758ec7d2dd14ec5d3d64f365d24b"
] | [
"habitat_baselines/rl/ppo/ppo_trainer.py"
] | [
"#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport contextlib\nimport os\nimport random\nimport time\nfrom collections import defaultdict, deque\nfrom typing import Any, Dict, List, Optional\n\nimport numpy as np\nimport torch\nimport tqdm\nfrom gym import spaces\nfrom torch import nn\nfrom torch.optim.lr_scheduler import LambdaLR\n\nfrom habitat import Config, VectorEnv, logger\nfrom habitat.utils import profiling_wrapper\nfrom habitat.utils.visualizations.utils import observations_to_image\nfrom habitat_baselines.common.base_trainer import BaseRLTrainer\nfrom habitat_baselines.common.baseline_registry import baseline_registry\nfrom habitat_baselines.common.environments import get_env_class\nfrom habitat_baselines.common.obs_transformers import (\n apply_obs_transforms_batch,\n apply_obs_transforms_obs_space,\n get_active_obs_transforms,\n)\nfrom habitat_baselines.common.rollout_storage import RolloutStorage\nfrom habitat_baselines.common.tensorboard_utils import TensorboardWriter\nfrom habitat_baselines.rl.ddppo.algo import DDPPO\nfrom habitat_baselines.rl.ddppo.algo.ddp_utils import (\n EXIT,\n REQUEUE,\n add_signal_handlers,\n get_distrib_size,\n init_distrib_slurm,\n is_slurm_batch_job,\n load_interrupted_state,\n rank0_only,\n requeue_job,\n save_interrupted_state,\n)\nfrom habitat_baselines.rl.ppo import PPO\nfrom habitat_baselines.rl.ppo.policy import Policy\nfrom habitat_baselines.utils.common import (\n ObservationBatchingCache,\n batch_obs,\n generate_video,\n)\nfrom habitat_baselines.utils.env_utils import construct_envs\n\n\n@baseline_registry.register_trainer(name=\"ddppo\")\n@baseline_registry.register_trainer(name=\"ppo\")\nclass PPOTrainer(BaseRLTrainer):\n r\"\"\"Trainer class for PPO algorithm\n Paper: https://arxiv.org/abs/1707.06347.\n \"\"\"\n supported_tasks = [\"Nav-v0\"]\n\n SHORT_ROLLOUT_THRESHOLD: float = 0.25\n _is_distributed: bool\n _obs_batching_cache: ObservationBatchingCache\n envs: VectorEnv\n agent: PPO\n actor_critic: Policy\n\n def __init__(self, config=None):\n interrupted_state = load_interrupted_state()\n if interrupted_state is not None:\n config = interrupted_state[\"config\"]\n\n super().__init__(config)\n self.actor_critic = None\n self.agent = None\n self.envs = None\n self.obs_transforms = []\n\n self._static_encoder = False\n self._encoder = None\n self._obs_space = None\n\n # Distirbuted if the world size would be\n # greater than 1\n self._is_distributed = get_distrib_size()[2] > 1\n self._obs_batching_cache = ObservationBatchingCache()\n\n @property\n def obs_space(self):\n if self._obs_space is None and self.envs is not None:\n self._obs_space = self.envs.observation_spaces[0]\n\n return self._obs_space\n\n @obs_space.setter\n def obs_space(self, new_obs_space):\n self._obs_space = new_obs_space\n\n def _all_reduce(self, t: torch.Tensor) -> torch.Tensor:\n r\"\"\"All reduce helper method that moves things to the correct\n device and only runs if distributed\n \"\"\"\n if not self._is_distributed:\n return t\n\n orig_device = t.device\n t = t.to(device=self.device)\n torch.distributed.all_reduce(t)\n\n return t.to(device=orig_device)\n\n def _setup_actor_critic_agent(self, ppo_cfg: Config) -> None:\n r\"\"\"Sets up actor critic and agent for PPO.\n\n Args:\n ppo_cfg: config node with relevant params\n\n Returns:\n None\n \"\"\"\n logger.add_filehandler(self.config.LOG_FILE)\n\n policy = baseline_registry.get_policy(self.config.RL.POLICY.name)\n observation_space = self.obs_space\n self.obs_transforms = get_active_obs_transforms(self.config)\n observation_space = apply_obs_transforms_obs_space(\n observation_space, self.obs_transforms\n )\n self.actor_critic = policy.from_config(\n self.config, observation_space, self.envs.action_spaces[0]\n )\n self.obs_space = observation_space\n self.actor_critic.to(self.device)\n\n if (\n self.config.RL.DDPPO.pretrained_encoder\n or self.config.RL.DDPPO.pretrained\n ):\n pretrained_state = torch.load(\n self.config.RL.DDPPO.pretrained_weights, map_location=\"cpu\"\n )\n\n if self.config.RL.DDPPO.pretrained:\n self.actor_critic.load_state_dict(\n {\n k[len(\"actor_critic.\") :]: v\n for k, v in pretrained_state[\"state_dict\"].items()\n }\n )\n elif self.config.RL.DDPPO.pretrained_encoder:\n prefix = \"actor_critic.net.visual_encoder.\"\n self.actor_critic.net.visual_encoder.load_state_dict(\n {\n k[len(prefix) :]: v\n for k, v in pretrained_state[\"state_dict\"].items()\n if k.startswith(prefix)\n }\n )\n\n if not self.config.RL.DDPPO.train_encoder:\n self._static_encoder = True\n for param in self.actor_critic.net.visual_encoder.parameters():\n param.requires_grad_(False)\n\n if self.config.RL.DDPPO.reset_critic:\n nn.init.orthogonal_(self.actor_critic.critic.fc.weight)\n nn.init.constant_(self.actor_critic.critic.fc.bias, 0)\n\n self.agent = (DDPPO if self._is_distributed else PPO)(\n actor_critic=self.actor_critic,\n clip_param=ppo_cfg.clip_param,\n ppo_epoch=ppo_cfg.ppo_epoch,\n num_mini_batch=ppo_cfg.num_mini_batch,\n value_loss_coef=ppo_cfg.value_loss_coef,\n entropy_coef=ppo_cfg.entropy_coef,\n lr=ppo_cfg.lr,\n eps=ppo_cfg.eps,\n max_grad_norm=ppo_cfg.max_grad_norm,\n use_normalized_advantage=ppo_cfg.use_normalized_advantage,\n )\n\n def _init_envs(self, config=None):\n if config is None:\n config = self.config\n\n self.envs = construct_envs(\n config,\n get_env_class(config.ENV_NAME),\n workers_ignore_signals=is_slurm_batch_job(),\n )\n\n def _init_train(self):\n if self.config.RL.DDPPO.force_distributed:\n self._is_distributed = True\n\n if is_slurm_batch_job():\n add_signal_handlers()\n\n if self._is_distributed:\n local_rank, tcp_store = init_distrib_slurm(\n self.config.RL.DDPPO.distrib_backend\n )\n if rank0_only():\n logger.info(\n \"Initialized DD-PPO with {} workers\".format(\n torch.distributed.get_world_size()\n )\n )\n\n self.config.defrost()\n self.config.TORCH_GPU_ID = local_rank\n self.config.SIMULATOR_GPU_ID = local_rank\n # Multiply by the number of simulators to make sure they also get unique seeds\n self.config.TASK_CONFIG.SEED += (\n torch.distributed.get_rank() * self.config.NUM_ENVIRONMENTS\n )\n self.config.freeze()\n\n random.seed(self.config.TASK_CONFIG.SEED)\n np.random.seed(self.config.TASK_CONFIG.SEED)\n torch.manual_seed(self.config.TASK_CONFIG.SEED)\n self.num_rollouts_done_store = torch.distributed.PrefixStore(\n \"rollout_tracker\", tcp_store\n )\n self.num_rollouts_done_store.set(\"num_done\", \"0\")\n\n if rank0_only() and self.config.VERBOSE:\n logger.info(f\"config: {self.config}\")\n\n profiling_wrapper.configure(\n capture_start_step=self.config.PROFILING.CAPTURE_START_STEP,\n num_steps_to_capture=self.config.PROFILING.NUM_STEPS_TO_CAPTURE,\n )\n\n self._init_envs()\n\n ppo_cfg = self.config.RL.PPO\n if torch.cuda.is_available():\n self.device = torch.device(\"cuda\", self.config.TORCH_GPU_ID)\n torch.cuda.set_device(self.device)\n else:\n self.device = torch.device(\"cpu\")\n\n if rank0_only() and not os.path.isdir(self.config.CHECKPOINT_FOLDER):\n os.makedirs(self.config.CHECKPOINT_FOLDER)\n\n self._setup_actor_critic_agent(ppo_cfg)\n if self._is_distributed:\n self.agent.init_distributed(find_unused_params=True)\n\n logger.info(\n \"agent number of parameters: {}\".format(\n sum(param.numel() for param in self.agent.parameters())\n )\n )\n\n obs_space = self.obs_space\n if self._static_encoder:\n self._encoder = self.actor_critic.net.visual_encoder\n obs_space = spaces.Dict(\n {\n \"visual_features\": spaces.Box(\n low=np.finfo(np.float32).min,\n high=np.finfo(np.float32).max,\n shape=self._encoder.output_shape,\n dtype=np.float32,\n ),\n **obs_space.spaces,\n }\n )\n\n self._nbuffers = 2 if ppo_cfg.use_double_buffered_sampler else 1\n self.rollouts = RolloutStorage(\n ppo_cfg.num_steps,\n self.envs.num_envs,\n obs_space,\n self.envs.action_spaces[0],\n ppo_cfg.hidden_size,\n num_recurrent_layers=self.actor_critic.net.num_recurrent_layers,\n is_double_buffered=ppo_cfg.use_double_buffered_sampler,\n )\n self.rollouts.to(self.device)\n\n observations = self.envs.reset()\n batch = batch_obs(\n observations, device=self.device, cache=self._obs_batching_cache\n )\n batch = apply_obs_transforms_batch(batch, self.obs_transforms)\n\n if self._static_encoder:\n with torch.no_grad():\n batch[\"visual_features\"] = self._encoder(batch)\n\n self.rollouts.buffers[\"observations\"][0] = batch\n\n self.current_episode_reward = torch.zeros(self.envs.num_envs, 1)\n self.running_episode_stats = dict(\n count=torch.zeros(self.envs.num_envs, 1),\n reward=torch.zeros(self.envs.num_envs, 1),\n )\n self.window_episode_stats = defaultdict(\n lambda: deque(maxlen=ppo_cfg.reward_window_size)\n )\n\n self.env_time = 0.0\n self.pth_time = 0.0\n self.t_start = time.time()\n\n @rank0_only\n @profiling_wrapper.RangeContext(\"save_checkpoint\")\n def save_checkpoint(\n self, file_name: str, extra_state: Optional[Dict] = None\n ) -> None:\n r\"\"\"Save checkpoint with specified name.\n\n Args:\n file_name: file name for checkpoint\n\n Returns:\n None\n \"\"\"\n checkpoint = {\n \"state_dict\": self.agent.state_dict(),\n \"config\": self.config,\n }\n if extra_state is not None:\n checkpoint[\"extra_state\"] = extra_state\n\n torch.save(\n checkpoint, os.path.join(self.config.CHECKPOINT_FOLDER, file_name)\n )\n\n def load_checkpoint(self, checkpoint_path: str, *args, **kwargs) -> Dict:\n r\"\"\"Load checkpoint of specified path as a dict.\n\n Args:\n checkpoint_path: path of target checkpoint\n *args: additional positional args\n **kwargs: additional keyword args\n\n Returns:\n dict containing checkpoint info\n \"\"\"\n return torch.load(checkpoint_path, *args, **kwargs)\n\n METRICS_BLACKLIST = {\"top_down_map\", \"collisions.is_collision\"}\n\n @classmethod\n def _extract_scalars_from_info(\n cls, info: Dict[str, Any]\n ) -> Dict[str, float]:\n result = {}\n for k, v in info.items():\n if k in cls.METRICS_BLACKLIST:\n continue\n\n if isinstance(v, dict):\n result.update(\n {\n k + \".\" + subk: subv\n for subk, subv in cls._extract_scalars_from_info(\n v\n ).items()\n if (k + \".\" + subk) not in cls.METRICS_BLACKLIST\n }\n )\n # Things that are scalar-like will have an np.size of 1.\n # Strings also have an np.size of 1, so explicitly ban those\n elif np.size(v) == 1 and not isinstance(v, str):\n result[k] = float(v)\n\n return result\n\n @classmethod\n def _extract_scalars_from_infos(\n cls, infos: List[Dict[str, Any]]\n ) -> Dict[str, List[float]]:\n\n results = defaultdict(list)\n for i in range(len(infos)):\n for k, v in cls._extract_scalars_from_info(infos[i]).items():\n results[k].append(v)\n\n return results\n\n def _compute_actions_and_step_envs(self, buffer_index: int = 0):\n num_envs = self.envs.num_envs\n env_slice = slice(\n int(buffer_index * num_envs / self._nbuffers),\n int((buffer_index + 1) * num_envs / self._nbuffers),\n )\n\n t_sample_action = time.time()\n\n # sample actions\n with torch.no_grad():\n step_batch = self.rollouts.buffers[\n self.rollouts.current_rollout_step_idxs[buffer_index],\n env_slice,\n ]\n\n profiling_wrapper.range_push(\"compute actions\")\n (\n values,\n actions,\n actions_log_probs,\n recurrent_hidden_states,\n ) = self.actor_critic.act(\n step_batch[\"observations\"],\n step_batch[\"recurrent_hidden_states\"],\n step_batch[\"prev_actions\"],\n step_batch[\"masks\"],\n )\n\n # NB: Move actions to CPU. If CUDA tensors are\n # sent in to env.step(), that will create CUDA contexts\n # in the subprocesses.\n # For backwards compatibility, we also call .item() to convert to\n # an int\n actions = actions.to(device=\"cpu\")\n self.pth_time += time.time() - t_sample_action\n\n profiling_wrapper.range_pop() # compute actions\n\n t_step_env = time.time()\n\n for index_env, act in zip(\n range(env_slice.start, env_slice.stop), actions.unbind(0)\n ):\n self.envs.async_step_at(index_env, act.item())\n\n self.env_time += time.time() - t_step_env\n\n self.rollouts.insert(\n next_recurrent_hidden_states=recurrent_hidden_states,\n actions=actions,\n action_log_probs=actions_log_probs,\n value_preds=values,\n buffer_index=buffer_index,\n )\n\n def _collect_environment_result(self, buffer_index: int = 0):\n num_envs = self.envs.num_envs\n env_slice = slice(\n int(buffer_index * num_envs / self._nbuffers),\n int((buffer_index + 1) * num_envs / self._nbuffers),\n )\n\n t_step_env = time.time()\n outputs = [\n self.envs.wait_step_at(index_env)\n for index_env in range(env_slice.start, env_slice.stop)\n ]\n\n observations, rewards_l, dones, infos = [\n list(x) for x in zip(*outputs)\n ]\n\n self.env_time += time.time() - t_step_env\n\n t_update_stats = time.time()\n batch = batch_obs(\n observations, device=self.device, cache=self._obs_batching_cache\n )\n batch = apply_obs_transforms_batch(batch, self.obs_transforms)\n\n rewards = torch.tensor(\n rewards_l,\n dtype=torch.float,\n device=self.current_episode_reward.device,\n )\n rewards = rewards.unsqueeze(1)\n\n not_done_masks = torch.tensor(\n [[not done] for done in dones],\n dtype=torch.bool,\n device=self.current_episode_reward.device,\n )\n done_masks = torch.logical_not(not_done_masks)\n\n self.current_episode_reward[env_slice] += rewards\n current_ep_reward = self.current_episode_reward[env_slice]\n self.running_episode_stats[\"reward\"][env_slice] += current_ep_reward.where(done_masks, current_ep_reward.new_zeros(())) # type: ignore\n self.running_episode_stats[\"count\"][env_slice] += done_masks.float() # type: ignore\n for k, v_k in self._extract_scalars_from_infos(infos).items():\n v = torch.tensor(\n v_k,\n dtype=torch.float,\n device=self.current_episode_reward.device,\n ).unsqueeze(1)\n if k not in self.running_episode_stats:\n self.running_episode_stats[k] = torch.zeros_like(\n self.running_episode_stats[\"count\"]\n )\n\n self.running_episode_stats[k][env_slice] += v.where(done_masks, v.new_zeros(())) # type: ignore\n\n self.current_episode_reward[env_slice].masked_fill_(done_masks, 0.0)\n\n if self._static_encoder:\n with torch.no_grad():\n batch[\"visual_features\"] = self._encoder(batch)\n\n self.rollouts.insert(\n next_observations=batch,\n rewards=rewards,\n next_masks=not_done_masks,\n buffer_index=buffer_index,\n )\n\n self.rollouts.advance_rollout(buffer_index)\n\n self.pth_time += time.time() - t_update_stats\n\n return env_slice.stop - env_slice.start\n\n @profiling_wrapper.RangeContext(\"_collect_rollout_step\")\n def _collect_rollout_step(self):\n self._compute_actions_and_step_envs()\n return self._collect_environment_result()\n\n @profiling_wrapper.RangeContext(\"_update_agent\")\n def _update_agent(self):\n ppo_cfg = self.config.RL.PPO\n t_update_model = time.time()\n with torch.no_grad():\n step_batch = self.rollouts.buffers[\n self.rollouts.current_rollout_step_idx\n ]\n\n next_value = self.actor_critic.get_value(\n step_batch[\"observations\"],\n step_batch[\"recurrent_hidden_states\"],\n step_batch[\"prev_actions\"],\n step_batch[\"masks\"],\n )\n\n self.rollouts.compute_returns(\n next_value, ppo_cfg.use_gae, ppo_cfg.gamma, ppo_cfg.tau\n )\n\n self.agent.train()\n\n value_loss, action_loss, dist_entropy = self.agent.update(\n self.rollouts\n )\n\n self.rollouts.after_update()\n self.pth_time += time.time() - t_update_model\n\n return (\n value_loss,\n action_loss,\n dist_entropy,\n )\n\n def _coalesce_post_step(\n self, losses: Dict[str, float], count_steps_delta: int\n ) -> Dict[str, float]:\n stats_ordering = sorted(self.running_episode_stats.keys())\n stats = torch.stack(\n [self.running_episode_stats[k] for k in stats_ordering], 0\n )\n\n stats = self._all_reduce(stats)\n\n for i, k in enumerate(stats_ordering):\n self.window_episode_stats[k].append(stats[i])\n\n if self._is_distributed:\n loss_name_ordering = sorted(losses.keys())\n stats = torch.tensor(\n [losses[k] for k in loss_name_ordering] + [count_steps_delta],\n device=\"cpu\",\n dtype=torch.float32,\n )\n stats = self._all_reduce(stats)\n count_steps_delta = int(stats[-1].item())\n stats /= torch.distributed.get_world_size()\n\n losses = {\n k: stats[i].item() for i, k in enumerate(loss_name_ordering)\n }\n\n if self._is_distributed and rank0_only():\n self.num_rollouts_done_store.set(\"num_done\", \"0\")\n\n self.num_steps_done += count_steps_delta\n\n return losses\n\n @rank0_only\n def _training_log(\n self, writer, losses: Dict[str, float], prev_time: int = 0\n ):\n deltas = {\n k: (\n (v[-1] - v[0]).sum().item()\n if len(v) > 1\n else v[0].sum().item()\n )\n for k, v in self.window_episode_stats.items()\n }\n deltas[\"count\"] = max(deltas[\"count\"], 1.0)\n\n writer.add_scalar(\n \"reward\",\n deltas[\"reward\"] / deltas[\"count\"],\n self.num_steps_done,\n )\n\n # Check to see if there are any metrics\n # that haven't been logged yet\n metrics = {\n k: v / deltas[\"count\"]\n for k, v in deltas.items()\n if k not in {\"reward\", \"count\"}\n }\n if len(metrics) > 0:\n writer.add_scalars(\"metrics\", metrics, self.num_steps_done)\n\n writer.add_scalars(\n \"losses\",\n losses,\n self.num_steps_done,\n )\n\n # log stats\n if self.num_updates_done % self.config.LOG_INTERVAL == 0:\n logger.info(\n \"update: {}\\tfps: {:.3f}\\t\".format(\n self.num_updates_done,\n self.num_steps_done\n / ((time.time() - self.t_start) + prev_time),\n )\n )\n\n logger.info(\n \"update: {}\\tenv-time: {:.3f}s\\tpth-time: {:.3f}s\\t\"\n \"frames: {}\".format(\n self.num_updates_done,\n self.env_time,\n self.pth_time,\n self.num_steps_done,\n )\n )\n\n logger.info(\n \"Average window size: {} {}\".format(\n len(self.window_episode_stats[\"count\"]),\n \" \".join(\n \"{}: {:.3f}\".format(k, v / deltas[\"count\"])\n for k, v in deltas.items()\n if k != \"count\"\n ),\n )\n )\n\n def should_end_early(self, rollout_step) -> bool:\n if not self._is_distributed:\n return False\n # This is where the preemption of workers happens. If a\n # worker detects it will be a straggler, it preempts itself!\n return (\n rollout_step\n >= self.config.RL.PPO.num_steps * self.SHORT_ROLLOUT_THRESHOLD\n ) and int(self.num_rollouts_done_store.get(\"num_done\")) >= (\n self.config.RL.DDPPO.sync_frac * torch.distributed.get_world_size()\n )\n\n @profiling_wrapper.RangeContext(\"train\")\n def train(self) -> None:\n r\"\"\"Main method for training DD/PPO.\n\n Returns:\n None\n \"\"\"\n\n self._init_train()\n\n count_checkpoints = 0\n prev_time = 0\n\n lr_scheduler = LambdaLR(\n optimizer=self.agent.optimizer,\n lr_lambda=lambda x: 1 - self.percent_done(),\n )\n\n interrupted_state = load_interrupted_state()\n if interrupted_state is not None:\n self.agent.load_state_dict(interrupted_state[\"state_dict\"])\n self.agent.optimizer.load_state_dict(\n interrupted_state[\"optim_state\"]\n )\n lr_scheduler.load_state_dict(interrupted_state[\"lr_sched_state\"])\n\n requeue_stats = interrupted_state[\"requeue_stats\"]\n self.env_time = requeue_stats[\"env_time\"]\n self.pth_time = requeue_stats[\"pth_time\"]\n self.num_steps_done = requeue_stats[\"num_steps_done\"]\n self.num_updates_done = requeue_stats[\"num_updates_done\"]\n self._last_checkpoint_percent = requeue_stats[\n \"_last_checkpoint_percent\"\n ]\n count_checkpoints = requeue_stats[\"count_checkpoints\"]\n prev_time = requeue_stats[\"prev_time\"]\n\n self._last_checkpoint_percent = requeue_stats[\n \"_last_checkpoint_percent\"\n ]\n\n ppo_cfg = self.config.RL.PPO\n\n with (\n TensorboardWriter(\n self.config.TENSORBOARD_DIR, flush_secs=self.flush_secs\n )\n if rank0_only()\n else contextlib.suppress()\n ) as writer:\n while not self.is_done():\n profiling_wrapper.on_start_step()\n profiling_wrapper.range_push(\"train update\")\n\n if ppo_cfg.use_linear_clip_decay:\n self.agent.clip_param = ppo_cfg.clip_param * (\n 1 - self.percent_done()\n )\n\n if EXIT.is_set():\n profiling_wrapper.range_pop() # train update\n\n self.envs.close()\n\n if REQUEUE.is_set() and rank0_only():\n requeue_stats = dict(\n env_time=self.env_time,\n pth_time=self.pth_time,\n count_checkpoints=count_checkpoints,\n num_steps_done=self.num_steps_done,\n num_updates_done=self.num_updates_done,\n _last_checkpoint_percent=self._last_checkpoint_percent,\n prev_time=(time.time() - self.t_start) + prev_time,\n )\n save_interrupted_state(\n dict(\n state_dict=self.agent.state_dict(),\n optim_state=self.agent.optimizer.state_dict(),\n lr_sched_state=lr_scheduler.state_dict(),\n config=self.config,\n requeue_stats=requeue_stats,\n )\n )\n\n requeue_job()\n return\n\n self.agent.eval()\n count_steps_delta = 0\n profiling_wrapper.range_push(\"rollouts loop\")\n\n profiling_wrapper.range_push(\"_collect_rollout_step\")\n for buffer_index in range(self._nbuffers):\n self._compute_actions_and_step_envs(buffer_index)\n\n for step in range(ppo_cfg.num_steps):\n is_last_step = (\n self.should_end_early(step + 1)\n or (step + 1) == ppo_cfg.num_steps\n )\n\n for buffer_index in range(self._nbuffers):\n count_steps_delta += self._collect_environment_result(\n buffer_index\n )\n\n if (buffer_index + 1) == self._nbuffers:\n profiling_wrapper.range_pop() # _collect_rollout_step\n\n if not is_last_step:\n if (buffer_index + 1) == self._nbuffers:\n profiling_wrapper.range_push(\n \"_collect_rollout_step\"\n )\n\n self._compute_actions_and_step_envs(buffer_index)\n\n if is_last_step:\n break\n\n profiling_wrapper.range_pop() # rollouts loop\n\n if self._is_distributed:\n self.num_rollouts_done_store.add(\"num_done\", 1)\n\n (\n value_loss,\n action_loss,\n dist_entropy,\n ) = self._update_agent()\n\n if ppo_cfg.use_linear_lr_decay:\n lr_scheduler.step() # type: ignore\n\n self.num_updates_done += 1\n losses = self._coalesce_post_step(\n dict(value_loss=value_loss, action_loss=action_loss),\n count_steps_delta,\n )\n\n self._training_log(writer, losses, prev_time)\n\n # checkpoint model\n if rank0_only() and self.should_checkpoint():\n self.save_checkpoint(\n f\"ckpt.{count_checkpoints}.pth\",\n dict(\n step=self.num_steps_done,\n wall_time=(time.time() - self.t_start) + prev_time,\n ),\n )\n count_checkpoints += 1\n\n profiling_wrapper.range_pop() # train update\n\n self.envs.close()\n\n def _eval_checkpoint(\n self,\n checkpoint_path: str,\n writer: TensorboardWriter,\n checkpoint_index: int = 0,\n ) -> None:\n r\"\"\"Evaluates a single checkpoint.\n\n Args:\n checkpoint_path: path of checkpoint\n writer: tensorboard writer object for logging to tensorboard\n checkpoint_index: index of cur checkpoint for logging\n\n Returns:\n None\n \"\"\"\n if self._is_distributed:\n raise RuntimeError(\"Evaluation does not support distributed mode\")\n\n # Map location CPU is almost always better than mapping to a CUDA device.\n ckpt_dict = self.load_checkpoint(checkpoint_path, map_location=\"cpu\")\n\n if self.config.EVAL.USE_CKPT_CONFIG:\n config = self._setup_eval_config(ckpt_dict[\"config\"])\n else:\n config = self.config.clone()\n\n ppo_cfg = config.RL.PPO\n\n config.defrost()\n config.TASK_CONFIG.DATASET.SPLIT = config.EVAL.SPLIT\n config.freeze()\n\n if len(self.config.VIDEO_OPTION) > 0:\n config.defrost()\n config.TASK_CONFIG.TASK.MEASUREMENTS.append(\"TOP_DOWN_MAP\")\n config.TASK_CONFIG.TASK.MEASUREMENTS.append(\"COLLISIONS\")\n config.freeze()\n\n if config.VERBOSE:\n logger.info(f\"env config: {config}\")\n\n self._init_envs(config)\n self._setup_actor_critic_agent(ppo_cfg)\n\n self.agent.load_state_dict(ckpt_dict[\"state_dict\"])\n self.actor_critic = self.agent.actor_critic\n\n observations = self.envs.reset()\n batch = batch_obs(\n observations, device=self.device, cache=self._obs_batching_cache\n )\n batch = apply_obs_transforms_batch(batch, self.obs_transforms)\n\n current_episode_reward = torch.zeros(\n self.envs.num_envs, 1, device=\"cpu\"\n )\n\n test_recurrent_hidden_states = torch.zeros(\n self.config.NUM_ENVIRONMENTS,\n self.actor_critic.net.num_recurrent_layers,\n ppo_cfg.hidden_size,\n device=self.device,\n )\n prev_actions = torch.zeros(\n self.config.NUM_ENVIRONMENTS,\n 1,\n device=self.device,\n dtype=torch.long,\n )\n not_done_masks = torch.zeros(\n self.config.NUM_ENVIRONMENTS,\n 1,\n device=self.device,\n dtype=torch.bool,\n )\n stats_episodes: Dict[\n Any, Any\n ] = {} # dict of dicts that stores stats per episode\n\n rgb_frames = [\n [] for _ in range(self.config.NUM_ENVIRONMENTS)\n ] # type: List[List[np.ndarray]]\n if len(self.config.VIDEO_OPTION) > 0:\n os.makedirs(self.config.VIDEO_DIR, exist_ok=True)\n\n number_of_eval_episodes = self.config.TEST_EPISODE_COUNT\n if number_of_eval_episodes == -1:\n number_of_eval_episodes = sum(self.envs.number_of_episodes)\n else:\n total_num_eps = sum(self.envs.number_of_episodes)\n if total_num_eps < number_of_eval_episodes:\n logger.warn(\n f\"Config specified {number_of_eval_episodes} eval episodes\"\n \", dataset only has {total_num_eps}.\"\n )\n logger.warn(f\"Evaluating with {total_num_eps} instead.\")\n number_of_eval_episodes = total_num_eps\n\n pbar = tqdm.tqdm(total=number_of_eval_episodes)\n self.actor_critic.eval()\n while (\n len(stats_episodes) < number_of_eval_episodes\n and self.envs.num_envs > 0\n ):\n current_episodes = self.envs.current_episodes()\n\n with torch.no_grad():\n (\n _,\n actions,\n _,\n test_recurrent_hidden_states,\n ) = self.actor_critic.act(\n batch,\n test_recurrent_hidden_states,\n prev_actions,\n not_done_masks,\n deterministic=False,\n )\n\n prev_actions.copy_(actions) # type: ignore\n\n # NB: Move actions to CPU. If CUDA tensors are\n # sent in to env.step(), that will create CUDA contexts\n # in the subprocesses.\n # For backwards compatibility, we also call .item() to convert to\n # an int\n step_data = [a.item() for a in actions.to(device=\"cpu\")]\n\n outputs = self.envs.step(step_data)\n\n observations, rewards_l, dones, infos = [\n list(x) for x in zip(*outputs)\n ]\n batch = batch_obs(\n observations,\n device=self.device,\n cache=self._obs_batching_cache,\n )\n batch = apply_obs_transforms_batch(batch, self.obs_transforms)\n\n not_done_masks = torch.tensor(\n [[not done] for done in dones],\n dtype=torch.bool,\n device=\"cpu\",\n )\n\n rewards = torch.tensor(\n rewards_l, dtype=torch.float, device=\"cpu\"\n ).unsqueeze(1)\n current_episode_reward += rewards\n next_episodes = self.envs.current_episodes()\n envs_to_pause = []\n n_envs = self.envs.num_envs\n for i in range(n_envs):\n if (\n next_episodes[i].scene_id,\n next_episodes[i].episode_id,\n ) in stats_episodes:\n envs_to_pause.append(i)\n\n # episode ended\n if not not_done_masks[i].item():\n pbar.update()\n episode_stats = {}\n episode_stats[\"reward\"] = current_episode_reward[i].item()\n episode_stats.update(\n self._extract_scalars_from_info(infos[i])\n )\n current_episode_reward[i] = 0\n # use scene_id + episode_id as unique id for storing stats\n stats_episodes[\n (\n current_episodes[i].scene_id,\n current_episodes[i].episode_id,\n )\n ] = episode_stats\n\n if len(self.config.VIDEO_OPTION) > 0:\n generate_video(\n video_option=self.config.VIDEO_OPTION,\n video_dir=self.config.VIDEO_DIR,\n images=rgb_frames[i],\n episode_id=current_episodes[i].episode_id,\n checkpoint_idx=checkpoint_index,\n metrics=self._extract_scalars_from_info(infos[i]),\n tb_writer=writer,\n )\n\n rgb_frames[i] = []\n\n # episode continues\n elif len(self.config.VIDEO_OPTION) > 0:\n # TODO move normalization / channel changing out of the policy and undo it here\n frame = observations_to_image(\n {k: v[i] for k, v in batch.items()}, infos[i]\n )\n rgb_frames[i].append(frame)\n\n not_done_masks = not_done_masks.to(device=self.device)\n (\n self.envs,\n test_recurrent_hidden_states,\n not_done_masks,\n current_episode_reward,\n prev_actions,\n batch,\n rgb_frames,\n ) = self._pause_envs(\n envs_to_pause,\n self.envs,\n test_recurrent_hidden_states,\n not_done_masks,\n current_episode_reward,\n prev_actions,\n batch,\n rgb_frames,\n )\n\n num_episodes = len(stats_episodes)\n aggregated_stats = {}\n for stat_key in next(iter(stats_episodes.values())).keys():\n aggregated_stats[stat_key] = (\n sum(v[stat_key] for v in stats_episodes.values())\n / num_episodes\n )\n\n for k, v in aggregated_stats.items():\n logger.info(f\"Average episode {k}: {v:.4f}\")\n\n step_id = checkpoint_index\n if \"extra_state\" in ckpt_dict and \"step\" in ckpt_dict[\"extra_state\"]:\n step_id = ckpt_dict[\"extra_state\"][\"step\"]\n\n writer.add_scalars(\n \"eval_reward\",\n {\"average reward\": aggregated_stats[\"reward\"]},\n step_id,\n )\n\n metrics = {k: v for k, v in aggregated_stats.items() if k != \"reward\"}\n if len(metrics) > 0:\n writer.add_scalars(\"eval_metrics\", metrics, step_id)\n\n self.envs.close()\n"
] | [
[
"torch.load",
"torch.zeros",
"torch.distributed.PrefixStore",
"torch.no_grad",
"torch.cuda.is_available",
"torch.device",
"torch.distributed.get_rank",
"torch.logical_not",
"torch.tensor",
"numpy.finfo",
"numpy.size",
"torch.nn.init.constant_",
"torch.zeros_like",
"torch.stack",
"torch.distributed.get_world_size",
"numpy.random.seed",
"torch.cuda.set_device",
"torch.manual_seed",
"torch.nn.init.orthogonal_",
"torch.distributed.all_reduce"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fminga/tiqs | [
"3e03658feb518025ec4b7c3c00d08bbda94d2b1e"
] | [
"tiqs-notebooks/example.py"
] | [
"from qutip import *\nimport numpy as np\nimport scipy as sc\nimport matplotlib.pyplot as plt\nimport time\n\n## this is for spcific configurations, default set to false\nparallel_active = False\nif parallel_active == True:\n from qutip.parallel import parfor, parallel_map, serial_map\n\n# Example: a spin chain with temperature and dissipation. \n# This chain has a U(1) symmetry.\n# In the reduced basis that means a Z_n symmetry with n=lattice_size+1).\n\nlattice_size=6\nN_max=2\noperators_list=[]\n\na = tensor(destroy(N_max), identity(N_max))\nb = tensor(identity(N_max), destroy(N_max))\nide = tensor(identity(N_max), identity(N_max))\n\nfor j in range(2,lattice_size):\n a = tensor(a, identity(N_max))\n b = tensor(b, identity(N_max))\n ide = tensor(ide, identity(N_max))\n\noperators_list.append(a)\noperators_list.append(b)\n\nfor i in range(2,lattice_size):\n c = tensor(identity(N_max), identity(N_max))\n for j in range(2,lattice_size):\n if i == j:\n c = tensor(c, destroy(N_max))\n else:\n c = tensor(c, identity(N_max))\n operators_list.append(c)\n\nomega = 1.0 # onsite energy\nJ = 0.5 # hopping term\ngamma_p = 1.0 # incohrent pump rate\ngamma_m = 1.4 # incohrent decay rate\n\nH = 0*a\nfor i in range(lattice_size):\n site = operators_list[i]\n nearest = operators_list[(i + 1)%lattice_size]\n H = H + omega*(ide - 2*site.dag()*site)\n H = H + J*(site*nearest.dag() + site.dag()*nearest)\n \nc_ops_minus = []\nc_ops_plus = []\n\nfor j in operators_list:\n c_ops_minus.append(np.sqrt(gamma_m)*j)\n c_ops_plus.append(np.sqrt(gamma_p)*j.dag())\n\nrepresentatives = find_representative_traslation_and_Zn(N_max, lattice_size, lattice_size + 1)\n[rotation, sectors] = rotation_matrix(N_max, lattice_size, representatives)\nrotation = Qobj(rotation, dims = H.dims)\nrotated_Hamiltonian = rotation*H*rotation.dag()\nappropriate_jumps_minus = build_appropriate_jumps(lattice_size, c_ops_minus,rotation)\nappropriate_jumps_plus = build_appropriate_jumps(lattice_size, c_ops_plus,rotation)\n\n# now you have the \"rotated_Hamiltonian\" which is correctly dived in the symmetry \n# sectors, and \"appropriate_jumps_minus\", which describe jump between symmetry\n# sectors\n\n### visualisation\nplt.matshow(np.abs(H.full()))\nplt.matshow(np.abs(rotated_Hamiltonian.full()))\nplt.matshow(np.abs(c_ops_minus[1].full()))\nplt.matshow(np.abs(appropriate_jumps_minus[1].full()))\nplt.matshow(np.abs(c_ops_plus[1].full()))\nplt.matshow(np.abs(appropriate_jumps_plus[1].full()))\n\n#check the eigenvalues graphically\nplt.figure(15)\nplt.plot(np.sort(rotated_Hamiltonian.eigenenergies()))\nplt.plot(np.sort(H.eigenenergies()))\n\n#and by comparing the eigenvalues\nsorted_eigenvalues = np.sort(rotated_Hamiltonian.eigenenergies()), -np.sort(H.eigenenergies())\nprint(np.sum(np.abs(np.add(sorted_eigenvalues))))\n\n#effect on the wavefunction\npsi0 = tensor(basis(N_max,0), basis(N_max,0))\n\nfor j in range(2, lattice_size):\n psi0 = tensor(psi0, basis(N_max,0))\n \nevol = -1.j*2*rotated_Hamiltonian\nevol = evol.expm() \npure_evolution = evol*psi0\npure_evolution = pure_evolution/np.sqrt(pure_evolution.norm())\n\nplt.matshow(np.abs(pure_evolution.full()))\n# effects of one jumps\n\nfor j in appropriate_jumps_plus:\n plt.matshow(np.abs(evol*j*pure_evolution.full()))\n\n# effects of several jumps\nfor j in appropriate_jumps_plus:\n for k in appropriate_jumps_plus:\n plt.matshow(np.abs(evol*k*evol*j*pure_evolution.full()))\n"
] | [
[
"numpy.add",
"numpy.sqrt",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lambdabaa/tensorflow | [
"b7e7addbd06c3ba414565f1cd50d734a45f6e12d",
"b7e7addbd06c3ba414565f1cd50d734a45f6e12d",
"b7e7addbd06c3ba414565f1cd50d734a45f6e12d",
"b7e7addbd06c3ba414565f1cd50d734a45f6e12d"
] | [
"tensorflow/python/__init__.py",
"tensorflow/python/keras/saving/saved_model/save_impl.py",
"tensorflow/lite/tools/pip_package/setup.py",
"tensorflow/python/keras/optimizers.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Import core names of TensorFlow.\n\nPrograms that want to build TensorFlow Ops and Graphs without having to import\nthe constructors and utilities individually can import this file:\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\"\"\"\n\nimport ctypes\nimport importlib\nimport sys\nimport traceback\n\n# TODO(drpng): write up instructions for editing this file in a doc and point to\n# the doc instead.\n# If you want to edit this file to expose modules in public tensorflow API, you\n# need to follow these steps:\n# 1. Consult with tensorflow team and get approval for adding a new API to the\n# public interface.\n# 2. Document the module in the gen_docs_combined.py.\n# 3. Import the module in the main tensorflow namespace by adding an import\n# statement in this file.\n# 4. Sanitize the entry point by making sure that your module does not expose\n# transitively imported modules used for implementation, such as os, sys.\n\n# go/tf-wildcard-import\n# pylint: disable=wildcard-import,g-bad-import-order,g-import-not-at-top\n\nimport numpy as np\n\nfrom tensorflow.python import pywrap_tensorflow\nfrom tensorflow.python import _pywrap_utils\nfrom tensorflow.python import _pywrap_tfprof\nfrom tensorflow.python import _pywrap_events_writer\nfrom tensorflow.python import _pywrap_util_port\nfrom tensorflow.python import _pywrap_stat_summarizer\n\n# Protocol buffers\nfrom tensorflow.core.framework.graph_pb2 import *\nfrom tensorflow.core.framework.node_def_pb2 import *\nfrom tensorflow.core.framework.summary_pb2 import *\nfrom tensorflow.core.framework.attr_value_pb2 import *\nfrom tensorflow.core.protobuf.meta_graph_pb2 import TensorInfo\nfrom tensorflow.core.protobuf.meta_graph_pb2 import MetaGraphDef\nfrom tensorflow.core.protobuf.config_pb2 import *\nfrom tensorflow.core.protobuf.tensorflow_server_pb2 import *\nfrom tensorflow.core.util.event_pb2 import *\n\n# Framework\nfrom tensorflow.python.framework.framework_lib import * # pylint: disable=redefined-builtin\nfrom tensorflow.python.framework.versions import *\nfrom tensorflow.python.framework import config\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import graph_util\n\n# Session\nfrom tensorflow.python.client.client_lib import *\n\n# Ops\nfrom tensorflow.python.ops.standard_ops import *\n\n# Namespaces\nfrom tensorflow.python.ops import initializers_ns as initializers\n\n# pylint: enable=wildcard-import\n\n# Bring in subpackages.\nfrom tensorflow.python import data\nfrom tensorflow.python import distribute\nfrom tensorflow.python import keras\nfrom tensorflow.python.feature_column import feature_column_lib as feature_column\nfrom tensorflow.python.layers import layers\nfrom tensorflow.python.module import module\nfrom tensorflow.python.ops import bitwise_ops as bitwise\nfrom tensorflow.python.ops import gradient_checker_v2\nfrom tensorflow.python.ops import image_ops as image\nfrom tensorflow.python.ops import manip_ops as manip\nfrom tensorflow.python.ops import metrics\nfrom tensorflow.python.ops import nn\nfrom tensorflow.python.ops import ragged\nfrom tensorflow.python.ops import sets\nfrom tensorflow.python.ops import stateful_random_ops\nfrom tensorflow.python.ops.distributions import distributions\nfrom tensorflow.python.ops.linalg import linalg\nfrom tensorflow.python.ops.losses import losses\nfrom tensorflow.python.ops.signal import signal\nfrom tensorflow.python.profiler import profiler\nfrom tensorflow.python.saved_model import saved_model\nfrom tensorflow.python.summary import summary\nfrom tensorflow.python.tpu import api\nfrom tensorflow.python.user_ops import user_ops\nfrom tensorflow.python.util import compat\n\n# Import to make sure the ops are registered.\nfrom tensorflow.python.ops import gen_audio_ops\nfrom tensorflow.python.ops import gen_boosted_trees_ops\nfrom tensorflow.python.ops import gen_cudnn_rnn_ops\nfrom tensorflow.python.ops import gen_rnn_ops\nfrom tensorflow.python.ops import gen_sendrecv_ops\n\n# Import the names from python/training.py as train.Name.\nfrom tensorflow.python.training import training as train\n\n# Sub-package for performing i/o directly instead of via ops in a graph.\nfrom tensorflow.python.lib.io import python_io\n\n# Make some application and test modules available.\nfrom tensorflow.python.platform import app\nfrom tensorflow.python.platform import flags\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.platform import resource_loader\nfrom tensorflow.python.platform import sysconfig\nfrom tensorflow.python.platform import test\n\nfrom tensorflow.python.compat import v2_compat\n\nfrom tensorflow.python.util.all_util import make_all\nfrom tensorflow.python.util.tf_export import tf_export\n\n# Eager execution\nfrom tensorflow.python.eager.context import executing_eagerly\nfrom tensorflow.python.eager.remote import connect_to_remote_host\nfrom tensorflow.python.eager.def_function import function\nfrom tensorflow.python.framework.ops import enable_eager_execution\n\n# Check whether TF2_BEHAVIOR is turned on.\nfrom tensorflow.python.eager import monitoring as _monitoring\nfrom tensorflow.python import tf2 as _tf2\n_tf2_gauge = _monitoring.BoolGauge('/tensorflow/api/tf2_enable',\n 'Environment variable TF2_BEHAVIOR is set\".')\n_tf2_gauge.get_cell().set(_tf2.enabled())\n\n# Necessary for the symbols in this module to be taken into account by\n# the namespace management system (API decorators).\nfrom tensorflow.python.ops import rnn\nfrom tensorflow.python.ops import rnn_cell\n\n# TensorFlow Debugger (tfdbg).\nfrom tensorflow.python.debug.lib import check_numerics_callback\n\n# XLA JIT compiler APIs.\nfrom tensorflow.python.compiler.xla import jit\nfrom tensorflow.python.compiler.xla import xla\n\n# MLIR APIs.\nfrom tensorflow.python.compiler.mlir import mlir\n\n# Required due to `rnn` and `rnn_cell` not being imported in `nn` directly\n# (due to a circular dependency issue: rnn depends on layers).\nnn.dynamic_rnn = rnn.dynamic_rnn\nnn.static_rnn = rnn.static_rnn\nnn.raw_rnn = rnn.raw_rnn\nnn.bidirectional_dynamic_rnn = rnn.bidirectional_dynamic_rnn\nnn.static_state_saving_rnn = rnn.static_state_saving_rnn\nnn.rnn_cell = rnn_cell\n\n# Export protos\n# pylint: disable=undefined-variable\ntf_export(v1=['AttrValue'])(AttrValue)\ntf_export(v1=['ConfigProto'])(ConfigProto)\ntf_export(v1=['Event', 'summary.Event'])(Event)\ntf_export(v1=['GPUOptions'])(GPUOptions)\ntf_export(v1=['GraphDef'])(GraphDef)\ntf_export(v1=['GraphOptions'])(GraphOptions)\ntf_export(v1=['HistogramProto'])(HistogramProto)\ntf_export(v1=['LogMessage'])(LogMessage)\ntf_export(v1=['MetaGraphDef'])(MetaGraphDef)\ntf_export(v1=['NameAttrList'])(NameAttrList)\ntf_export(v1=['NodeDef'])(NodeDef)\ntf_export(v1=['OptimizerOptions'])(OptimizerOptions)\ntf_export(v1=['RunMetadata'])(RunMetadata)\ntf_export(v1=['RunOptions'])(RunOptions)\ntf_export(v1=['SessionLog', 'summary.SessionLog'])(SessionLog)\ntf_export(v1=['Summary', 'summary.Summary'])(Summary)\ntf_export(v1=['summary.SummaryDescription'])(SummaryDescription)\ntf_export(v1=['SummaryMetadata'])(SummaryMetadata)\ntf_export(v1=['summary.TaggedRunMetadata'])(TaggedRunMetadata)\ntf_export(v1=['TensorInfo'])(TensorInfo)\n# pylint: enable=undefined-variable\n\n# Special dunders that we choose to export:\n_exported_dunders = set([\n '__version__',\n '__git_version__',\n '__compiler_version__',\n '__cxx11_abi_flag__',\n '__monolithic_build__',\n])\n\n# Expose symbols minus dunders, unless they are whitelisted above.\n# This is necessary to export our dunders.\n__all__ = [s for s in dir() if s in _exported_dunders or not s.startswith('_')]\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Keras SavedModel serialization.\n\nTODO (kathywu): Move to layer_serialization.py. Some model-specific logic should\ngo to model_serialization.py.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport weakref\n\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.eager import function\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.keras.engine import base_layer_utils\nfrom tensorflow.python.keras.engine import input_spec\nfrom tensorflow.python.keras.saving import saving_utils\nfrom tensorflow.python.keras.saving.saved_model import constants\nfrom tensorflow.python.keras.saving.saved_model import load as keras_load\nfrom tensorflow.python.keras.saving.saved_model import serialized_attributes\nfrom tensorflow.python.keras.saving.saved_model import utils\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training.tracking import base as trackable\nfrom tensorflow.python.training.tracking import data_structures\nfrom tensorflow.python.training.tracking import layer_utils as trackable_layer_utils\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util import tf_decorator\nfrom tensorflow.python.util import tf_inspect\nfrom tensorflow.python.util.lazy_loader import LazyLoader\n\n# To avoid circular dependencies between keras/engine and keras/saving,\n# code in keras/saving must delay imports.\n\n# TODO(b/134426265): Switch back to single-quotes to match the rest of the file\n# once the issue with copybara is fixed.\n# pylint:disable=g-inconsistent-quotes\nbase_layer = LazyLoader(\n \"base_layer\", globals(),\n \"tensorflow.python.keras.engine.base_layer\")\ninput_layer = LazyLoader(\n \"input_layer\", globals(),\n \"tensorflow.python.keras.engine.input_layer\")\ntraining_lib = LazyLoader(\n \"training_lib\", globals(),\n \"tensorflow.python.keras.engine.training\")\nsequential_lib = LazyLoader(\n \"sequential_lib\", globals(),\n \"tensorflow.python.keras.engine.sequential\")\n# pylint:enable=g-inconsistent-quotes\n\n\ndef should_skip_serialization(layer):\n \"\"\"Skip serializing extra objects and functions if layer inputs aren't set.\"\"\"\n if isinstance(layer, training_lib.Model):\n try:\n # pylint:disable=pointless-statement\n layer.inputs\n layer.input_names\n # pylint:enable=pointless-statement\n except AttributeError:\n # If the model does not have inputs set, because it was not called or its\n # input shapes were not recorded, we won't have a signature so can't trace\n # a function. But the user may still save an object with this Model\n # attached; we won't fail the whole tf.saved_model.save.\n logging.warning('Skipping full serialization of Keras model {}, because '\n 'its inputs are not defined.'.format(layer))\n return True\n else:\n return False\n else:\n if not layer.built:\n logging.warning('Skipping full serialization of Keras layer {}, because '\n 'it is not built.'.format(layer))\n return True\n return False\n\n\ndef wrap_layer_objects(layer, serialization_cache):\n \"\"\"Returns extra trackable objects to attach to the serialized layer.\n\n Args:\n layer: Keras Layer object.\n serialization_cache: Dictionary shared between all objects during\n serialization.\n\n Returns:\n A dictionary containing all checkpointable objects from a\n SerializedAttributes object. See LayerAttributes and ModelAttributes for\n entire list of objects\n \"\"\"\n # Wrap all regularization losses as tf.functions.\n # First, generate list of all regularization losses in this layer and\n # sublayers.\n all_losses = layer._callable_losses[:] # pylint: disable=protected-access\n for child_layer in _list_all_layers(layer):\n all_losses.extend(child_layer._callable_losses) # pylint: disable=protected-access\n # Next, wrap all loss functions as tf.functions. Use the serialization cache\n # to store already-wrapped functions.\n keras_loss_cache = serialization_cache.setdefault('keras_losses', {})\n wrapped_loss_functions = []\n for loss_fn in all_losses:\n if loss_fn in keras_loss_cache:\n wrapped_loss_functions.append(keras_loss_cache[loss_fn])\n else:\n wrapped_loss = _wrap_unconditional_loss(loss_fn, len(keras_loss_cache))\n keras_loss_cache[loss_fn] = wrapped_loss\n wrapped_loss_functions.append(wrapped_loss)\n wrapped_layer_losses = [keras_loss_cache[fn]\n for fn in layer._callable_losses[:]] # pylint: disable=protected-access\n return dict(\n variables=data_structures.ListWrapper(layer.variables),\n trainable_variables=data_structures.ListWrapper(\n layer.trainable_variables),\n non_trainable_variables=data_structures.ListWrapper(\n layer.non_trainable_variables),\n layers=data_structures.ListWrapper(_list_all_layers(layer)),\n metrics=data_structures.ListWrapper(layer.metrics),\n regularization_losses=data_structures.ListWrapper(\n wrapped_loss_functions),\n layer_regularization_losses=data_structures.ListWrapper(\n wrapped_layer_losses))\n\n\ndef wrap_layer_functions(layer, serialization_cache):\n \"\"\"Returns dict of wrapped layer call function and losses in tf.functions.\n\n Args:\n layer: Keras Layer object.\n serialization_cache: Dictionary shared between all objects during\n serialization.\n\n Returns:\n A dictionary containing all keras tf.functions to serialize. See\n LayerAttributes and ModelAttributes for the list of all attributes.\n \"\"\"\n # Since Sequential models may be modified in place using model.add() or\n # model.pop(), don't use saved functions.\n if (isinstance(layer, keras_load.RevivedLayer) and\n not isinstance(layer, sequential_lib.Sequential)):\n return {fn_name: getattr(layer.keras_api, fn_name, None)\n for fn_name in serialized_attributes.LayerAttributes.all_functions}\n\n # Reset the losses of the layer and its children. The call function in each\n # child layer is replaced with tf.functions.\n original_fns = _replace_child_layer_functions(layer, serialization_cache)\n original_losses = _reset_layer_losses(layer)\n\n # Wrap all the layer call and activity regularizer functions.\n\n # Use LayerCallCollection to ensure that all layer call functions (__call__,\n # call with losses) are traced with the same inputs.\n call_collection = LayerCallCollection(layer)\n call_fn_with_losses = call_collection.add_function(\n _wrap_call_and_conditional_losses(layer),\n '{}_layer_call_and_return_conditional_losses'.format(layer.name))\n call_fn = call_collection.add_function(\n _extract_outputs_from_fn(layer, call_fn_with_losses),\n '{}_layer_call_fn'.format(layer.name))\n\n fns = {'call_and_return_conditional_losses': call_fn_with_losses,\n '__call__': call_fn}\n\n if layer.activity_regularizer is not None:\n fns['activity_regularizer_fn'] = _wrap_activity_regularizer(layer)\n fns['call_and_return_all_conditional_losses'] = (\n call_collection.add_function(\n _append_activity_regularizer_loss(layer,\n call_fn_with_losses,\n fns['activity_regularizer_fn']),\n '{}_layer_call_and_return_all_conditional_losses'.format(layer.name)\n ))\n else:\n fns['activity_regularizer_fn'] = None\n fns['call_and_return_all_conditional_losses'] = call_fn_with_losses\n\n # Manually trigger traces before restoring the overwritten functions. The\n # functions are traced within the layer call context to ensure that layer\n # functions (e.g. add_loss) behave as though running in graph mode.\n with base_layer_utils.call_context().enter(\n layer, inputs=None, build_graph=True, training=None, saving=True):\n for fn in fns.values():\n if fn is not None and fn.input_signature is not None:\n fn.get_concrete_function()\n\n # Restore overwritten functions and losses\n _restore_child_layer_functions(original_fns)\n _restore_layer_losses(original_losses)\n\n return fns\n\n\ndef default_save_signature(layer):\n original_losses = _reset_layer_losses(layer)\n fn = saving_utils.trace_model_call(layer)\n fn.get_concrete_function()\n _restore_layer_losses(original_losses)\n return fn\n\n\ndef _list_all_layers(obj):\n if isinstance(obj, training_lib.Model):\n return obj.layers\n else:\n return list(\n trackable_layer_utils.filter_empty_layer_containers(obj._layers)) # pylint: disable=protected-access\n\n\ndef _replace_child_layer_functions(layer, serialization_cache):\n \"\"\"Replaces functions in the children layers with wrapped tf.functions.\n\n This step allows functions from parent layers to reference the wrapped\n functions from their children layers instead of retracing the ops.\n\n This function also resets all losses stored in the layer. These are stored in\n the returned dictionary. Use `_restore_child_layer_functions` to restore\n the original attributes.\n\n Args:\n layer: Keras Layer object.\n serialization_cache: Dictionary shared between all objects during\n serialization.\n\n Returns:\n Dictionary mapping layer objects -> original functions and losses:\n { Child layer 1: {\n 'losses': Original losses,\n 'call': Original call function\n 'activity_regularizer': Original activity regularizer},\n Child layer 2: ...\n }\n \"\"\"\n # pylint: disable=protected-access\n original_fns = {}\n for child_layer in _list_all_layers(layer):\n if isinstance(child_layer, input_layer.InputLayer):\n continue\n\n if child_layer not in serialization_cache[constants.KERAS_CACHE_KEY]:\n layer_fns = (\n child_layer._trackable_saved_model_saver._get_serialized_attributes(\n serialization_cache).functions)\n else:\n layer_fns = (\n serialization_cache[constants.KERAS_CACHE_KEY][child_layer].functions)\n if not layer_fns:\n # This indicates either:\n # - circular dependency, which means the current layer's functions\n # should be wrapped first.\n # - Child layer's inputs are not defined, so its functions have not been\n # wrapped. In this case, no replacement is necessary so move on to the\n # next child.\n continue\n original_fns[child_layer] = {\n 'call': child_layer.call,\n 'activity_regularizer': child_layer.activity_regularizer\n }\n with trackable.no_automatic_dependency_tracking_scope(child_layer):\n try:\n child_layer.activity_regularizer = layer_fns.get(\n 'activity_regularizer_fn')\n except AttributeError:\n # Some layers have an unsettable activity regularizer.\n pass\n child_layer.call = utils.use_wrapped_call(\n child_layer, layer_fns['call_and_return_conditional_losses'],\n default_training_value=False)\n return original_fns\n # pylint: enable=protected-access\n\n\ndef _restore_child_layer_functions(original_fns):\n \"\"\"Restores attributes replaced with `_replace_child_layer_functions`.\"\"\"\n for child_layer, fns in original_fns.items():\n with trackable.no_automatic_dependency_tracking_scope(child_layer):\n child_layer.call = fns['call']\n try:\n child_layer.activity_regularizer = fns['activity_regularizer']\n except AttributeError:\n pass\n\n\n# pylint: disable=protected-access\ndef _reset_layer_losses(parent_layer):\n \"\"\"Resets losses of layer and its sublayers, and returns original losses.\"\"\"\n losses_dict = {}\n for layer in _list_all_layers(parent_layer) + [parent_layer]:\n losses_dict[layer] = {'losses': layer._losses[:],\n 'eager_losses': layer._eager_losses[:]}\n with trackable.no_automatic_dependency_tracking_scope(layer):\n layer._losses = []\n layer._eager_losses = []\n return losses_dict\n\n\ndef _restore_layer_losses(losses_dict):\n for layer in losses_dict:\n with trackable.no_automatic_dependency_tracking_scope(layer):\n layer._losses = losses_dict[layer]['losses']\n layer._eager_losses = losses_dict[layer]['eager_losses']\n# pylint: enable=protected-access\n\n\ndef layer_uses_training_bool(layer):\n \"\"\"Returns whether this layer or any of its children uses the training arg.\"\"\"\n if layer._expects_training_arg: # pylint: disable=protected-access\n return True\n visited = {layer}\n to_visit = _list_all_layers(layer)\n while to_visit:\n layer = to_visit.pop()\n if layer in visited:\n continue\n if layer._expects_training_arg: # pylint: disable=protected-access\n return True\n visited.add(layer)\n to_visit.extend(_list_all_layers(layer))\n return False\n\n\nclass LayerCallCollection(object):\n \"\"\"Groups wrapped layer call functions.\n\n This is used to ensure that all layer call functions are traced with the same\n inputs-\n - call\n - call_and_return_conditional_losses\n - call_and_return_all_conditional_losses\n \"\"\"\n\n def __init__(self, layer):\n self.layer = layer\n\n self.layer_call_method = _get_layer_call_method(layer)\n self._expects_training_arg = layer_uses_training_bool(layer)\n self._training_arg_index = utils.get_training_arg_index(\n self.layer_call_method)\n\n # If the layer call function has kwargs, then the traced function cannot\n # have an input signature.\n arg_spec = tf_inspect.getfullargspec(\n self.layer_call_method)\n self._has_kwargs = bool(self._expects_training_arg or\n arg_spec.defaults or\n arg_spec.kwonlyargs or\n arg_spec.varkw)\n\n self._input_signature = self._generate_input_signature(layer)\n self._functions = weakref.WeakValueDictionary()\n # Bool indicating whether this object is currently tracing the layer call\n # functions.\n self.tracing = False\n\n def _generate_input_signature(self, layer):\n \"\"\"Inspects layer object and returns the inferred input signature.\n\n Args:\n layer: Layer object.\n\n Returns:\n List of possibly nested TensorSpecs of the layer call function inputs.\n The list does not contain the `training` argument.\n \"\"\"\n if (isinstance(layer.call, def_function.Function) and\n layer.call.input_signature is not None):\n return layer.call.input_signature\n else:\n if isinstance(layer, training_lib.Model):\n return saving_utils.model_input_signature(layer)\n elif layer.input_spec is not None:\n\n def to_tensor_spec_or_none(x):\n spec = input_spec.to_tensor_spec(x, layer.dtype)\n # If the shape is too general (e.g. multiple dimensions are allowed),\n # return None so that separate functions can be generated for each\n # inferred input signature.\n # TODO(b/134962016): currently partial signatures are not supported.\n if spec.shape == tensor_shape.TensorShape(None):\n return None\n return spec\n input_signature = [nest.map_structure(\n to_tensor_spec_or_none, layer.input_spec)]\n\n return input_signature\n else:\n return None\n\n def add_trace(self, *args, **kwargs):\n \"\"\"Traces all functions with the same args and kwargs.\n\n Args:\n *args: Positional args passed to the original function.\n **kwargs: Keyword args passed to the original function.\n \"\"\"\n args = list(args)\n kwargs = kwargs.copy()\n self.tracing = True\n for fn in self._functions.values():\n # TODO(kathywu): Replace arguments with broader shapes defined in the\n # input signature.\n if self._expects_training_arg:\n def trace_with_training(value, fn=fn):\n utils.set_training_arg(value, self._training_arg_index, args, kwargs)\n with K.learning_phase_scope(value):\n fn.get_concrete_function(*args, **kwargs)\n\n trace_with_training(True)\n trace_with_training(False)\n else:\n fn.get_concrete_function(*args, **kwargs)\n self.tracing = False\n\n @property\n def fn_input_signature(self):\n \"\"\"Returns input signature for the wrapped layer call function.\"\"\"\n if self._has_kwargs:\n # Input signatures may only describe tensor arguments and kwargs are not\n # supported.\n return None\n if None in nest.flatten(self._input_signature):\n # TODO(b/134962016): If input signature cannot be partially defined.\n return None\n return self._input_signature\n\n def training_arg_was_passed(self, args, kwargs):\n if not self.layer._expects_training_arg and self._expects_training_arg: # pylint: disable=protected-access\n return (utils.get_training_arg(self._training_arg_index, args, kwargs)\n is not None)\n else:\n return self.layer._call_arg_was_passed( # pylint: disable=protected-access\n 'training', args, kwargs, inputs_in_args=True)\n\n def get_training_arg_value(self, args, kwargs):\n if not self.layer._expects_training_arg and self._expects_training_arg: # pylint: disable=protected-access\n return utils.get_training_arg(self._training_arg_index, args, kwargs)\n else:\n return self.layer._get_call_arg_value( # pylint: disable=protected-access\n 'training', args, kwargs, inputs_in_args=True)\n\n def _maybe_wrap_with_training_arg(self, call_fn):\n \"\"\"Wraps call function with added training argument if necessary.\"\"\"\n if not self.layer._expects_training_arg and self._expects_training_arg: # pylint: disable=protected-access\n # Add training arg to wrapper function.\n arg_spec = tf_inspect.getfullargspec(call_fn)\n args = arg_spec.args + ['training']\n defaults = list(arg_spec.defaults or [])\n defaults.append(False)\n new_arg_spec = tf_inspect.FullArgSpec(\n args=args,\n varargs=arg_spec.varargs,\n varkw=arg_spec.varkw,\n defaults=defaults,\n kwonlyargs=arg_spec.kwonlyargs,\n kwonlydefaults=arg_spec.kwonlydefaults,\n annotations=arg_spec.annotations)\n\n # Set new training arg index\n self._training_arg_index = len(args) - 1\n if tf_inspect.ismethod(call_fn):\n self._training_arg_index -= 1\n\n def wrap_with_training_arg(*args, **kwargs):\n # Remove the training value, since the original call_fn does not expect\n # a training arg. Instead, the training value will be propagated using\n # the call context created in LayerCall.\n args = list(args)\n kwargs = kwargs.copy()\n utils.remove_training_arg(self._training_arg_index, args, kwargs)\n return call_fn(*args, **kwargs)\n\n return tf_decorator.make_decorator(\n target=call_fn,\n decorator_func=wrap_with_training_arg,\n decorator_argspec=new_arg_spec)\n\n return call_fn\n\n def add_function(self, call_fn, name):\n \"\"\"Adds a layer call function to the collection.\"\"\"\n self._functions[name] = fn = LayerCall(\n self, self._maybe_wrap_with_training_arg(call_fn), name,\n input_signature=self.fn_input_signature)\n\n if (None not in nest.flatten(self._input_signature) and\n self._has_kwargs):\n # Manually add traces for layers that have keyword arguments and have\n # a fully defined input signature.\n self.add_trace(*self._input_signature)\n return fn\n\n\ndef layer_call_wrapper(call_collection, method):\n \"\"\"Ensures layer losses are kept the same, and runs method in call context.\"\"\"\n def wrapper(*args, **kwargs):\n \"\"\"Calls method within call context.\"\"\"\n layer = call_collection.layer\n training = None\n inputs = None\n # pylint: disable=protected-access\n if (args or kwargs) and call_collection.training_arg_was_passed(\n args, kwargs):\n inputs = args[0]\n training = call_collection.get_training_arg_value(args, kwargs)\n # pylint: enable=protected-access\n original_losses = _reset_layer_losses(layer)\n with base_layer_utils.call_context().enter(\n layer, inputs=inputs, build_graph=False, training=training,\n saving=True):\n ret = method(*args, **kwargs)\n _restore_layer_losses(original_losses)\n return ret\n return tf_decorator.make_decorator(target=method, decorator_func=wrapper)\n\n\nclass LayerCall(def_function.Function):\n \"\"\"Function that triggers traces of other functions in the same collection.\"\"\"\n\n def __init__(self, call_collection, python_function, *args, **kwargs):\n self.call_collection = call_collection\n self.original_call = call_collection.layer_call_method\n python_function = layer_call_wrapper(call_collection, python_function)\n super(LayerCall, self).__init__(python_function, *args, **kwargs)\n\n def __call__(self, *args, **kwargs):\n if not self.call_collection.tracing:\n self.call_collection.add_trace(*args, **kwargs)\n return super(LayerCall, self).__call__(*args, **kwargs)\n\n def get_concrete_function(self, *args, **kwargs):\n if not self.call_collection.tracing:\n self.call_collection.add_trace(*args, **kwargs)\n return super(LayerCall, self).get_concrete_function(*args, **kwargs)\n\n\ndef _wrap_call_and_conditional_losses(layer):\n \"\"\"Wraps call function that returns a tuple of (outputs, losses).\n\n The losses returned are conditional on the inputs passed to the call function.\n Unconditional losses (e.g. weight regularizeration) are wrapped separately.\n\n Args:\n layer: a Keras layer object\n\n Returns:\n python call function that returns outputs and conditional losses -- excludes\n activity regularizer\n \"\"\"\n # Create function that generates both outputs and losses\n layer_call = _get_layer_call_method(layer)\n def call_and_return_conditional_losses(inputs, *args, **kwargs):\n return layer_call(inputs, *args, **kwargs), layer.get_losses_for(inputs)\n return _create_call_fn_decorator(layer, call_and_return_conditional_losses)\n\n\ndef _extract_outputs_from_fn(layer, call_and_return_conditional_losses):\n \"\"\"Returns a function that returns only call function outputs.\"\"\"\n if isinstance(layer, keras_load.RevivedLayer):\n return layer.keras_api.__call__ # pylint: disable=protected-access\n def call(inputs, *args, **kwargs):\n return call_and_return_conditional_losses(inputs, *args, **kwargs)[0]\n return _create_call_fn_decorator(layer, call)\n\n\ndef _append_activity_regularizer_loss(\n layer, call_fn_with_losses, activity_regularizer_fn):\n \"\"\"Appends activity regularizer loss to losses returned by the wrapped fn.\"\"\"\n def fn(inputs, *args, **kwargs):\n outputs, losses = call_fn_with_losses(inputs, *args, **kwargs)\n losses.append(activity_regularizer_fn(outputs))\n return outputs, losses\n return _create_call_fn_decorator(layer, fn)\n\n\ndef _create_call_fn_decorator(layer, wrapped_call):\n call_fn = _get_layer_call_method(layer)\n fn, arg_spec = utils.maybe_add_training_arg(\n call_fn, wrapped_call, layer._expects_training_arg, # pylint: disable=protected-access\n default_training_value=False)\n return tf_decorator.make_decorator(\n target=call_fn,\n decorator_func=fn,\n decorator_argspec=arg_spec)\n\n\ndef _wrap_unconditional_loss(loss_fn, index):\n \"\"\"Wraps callable/unconditonal loss, returning a serializable function.\"\"\"\n # Extract original loss function from partial function\n fn = loss_fn.args[0] if isinstance(loss_fn, functools.partial) else loss_fn\n if isinstance(fn, def_function.Function):\n return fn\n else:\n return def_function.Function(\n fn, 'loss_fn_{}'.format(index), input_signature=[])\n\n\ndef _wrap_activity_regularizer(layer):\n \"\"\"Wraps the activity regularizer.\"\"\"\n if isinstance(layer.activity_regularizer, def_function.Function):\n return layer.activity_regularizer\n return def_function.Function(\n layer.activity_regularizer,\n '{}_activity_regularizer'.format(layer.name),\n input_signature=[tensor_spec.TensorSpec(None, layer.dtype or K.floatx())])\n\n\ndef _get_layer_call_method(layer):\n if isinstance(layer.call, (def_function.Function, function.ConcreteFunction)):\n return layer.call.python_function\n return layer.call\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"TensorFlow Lite is for mobile and embedded devices.\n\nTensorFlow Lite is the official solution for running machine learning models on\nmobile and embedded devices. It enables on-device machine learning inference\nwith low latency and a small binary size on Android, iOS, and other operating\nsystems.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport multiprocessing\nimport os\nimport subprocess\n\nfrom distutils.command.build_ext import build_ext\nimport numpy\n\nfrom setuptools import Extension\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom setuptools.command.build_py import build_py\nPACKAGE_NAME = 'tflite_runtime'\nPACKAGE_VERSION = os.environ['TENSORFLOW_VERSION']\nDOCLINES = __doc__.split('\\n')\nTENSORFLOW_DIR = os.environ['TENSORFLOW_SRC_ROOT']\n\n# Setup cross compiling\nTARGET = os.environ.get('TENSORFLOW_TARGET', None)\nif TARGET == 'rpi':\n os.environ['CXX'] = 'arm-rpi-linux-gnueabihf-g++'\n os.environ['CC'] = 'arm-rpi-linux-gnueabihf-gcc'\nelif TARGET == 'aarch64':\n os.environ['CXX'] = 'aarch64-linux-gnu-g++'\n os.environ['CC'] = 'aarch64-linux-gnu-gcc'\nMAKE_CROSS_OPTIONS = ['TARGET=%s' % TARGET] if TARGET else []\n\nRELATIVE_MAKE_DIR = os.path.join('tensorflow', 'lite', 'tools', 'make')\nMAKE_DIR = os.path.join(TENSORFLOW_DIR, RELATIVE_MAKE_DIR)\nDOWNLOADS_DIR = os.path.join(MAKE_DIR, 'downloads')\nRELATIVE_MAKEFILE_PATH = os.path.join(RELATIVE_MAKE_DIR, 'Makefile')\nDOWNLOAD_SCRIPT_PATH = os.path.join(MAKE_DIR, 'download_dependencies.sh')\n\n\n# Check physical memory and if we are on a reasonable non small SOC machine\n# with more than 4GB, use all the CPUs, otherwisxe only 1.\ndef get_build_cpus():\n physical_bytes = os.sysconf('SC_PAGESIZE') * os.sysconf('SC_PHYS_PAGES')\n if physical_bytes < (1<<30) * 4:\n return 1\n else:\n return multiprocessing.cpu_count()\n\n\ndef make_args(target='', quiet=True):\n \"\"\"Construct make command line.\"\"\"\n args = (['make', 'SHELL=/bin/bash',\n 'BUILD_WITH_NNAPI=false', '-C', TENSORFLOW_DIR]\n + MAKE_CROSS_OPTIONS +\n ['-f', RELATIVE_MAKEFILE_PATH, '-j',\n str(get_build_cpus())])\n if quiet:\n args.append('--quiet')\n if target:\n args.append(target)\n return args\n\n\ndef make_output(target):\n \"\"\"Invoke make on the target and return output.\"\"\"\n return subprocess.check_output(make_args(target)).decode('utf-8').strip()\n\n\ndef make():\n \"\"\"Invoke make to build tflite C++ sources.\n\n Build dependencies:\n apt-get install swig libjpeg-dev zlib1g-dev python3-dev python3-nump\n \"\"\"\n subprocess.check_call(make_args(quiet=False))\n\n\ndef download_dependencies():\n \"\"\"Download build dependencies if haven't done yet.\"\"\"\n if not os.path.isdir(DOWNLOADS_DIR) or not os.listdir(DOWNLOADS_DIR):\n subprocess.check_call(DOWNLOAD_SCRIPT_PATH)\n\n\nclass CustomBuildExt(build_ext, object):\n \"\"\"Customized build extension.\"\"\"\n\n def get_ext_filename(self, ext_name):\n if TARGET:\n ext_path = ext_name.split('.')\n return os.path.join(*ext_path) + '.so'\n return super(CustomBuildExt, self).get_ext_filename(ext_name)\n\n def run(self):\n download_dependencies()\n make()\n\n return super(CustomBuildExt, self).run()\n\n\nclass CustomBuildPy(build_py, object):\n\n def run(self):\n self.run_command('build_ext')\n return super(CustomBuildPy, self).run()\n\n\nLIB_TFLITE = 'tensorflow-lite'\nLIB_TFLITE_DIR = make_output('libdir')\n\next = Extension(\n name='%s._interpreter_wrapper' % PACKAGE_NAME,\n language='c++',\n sources=['interpreter_wrapper/interpreter_wrapper.i',\n 'interpreter_wrapper/interpreter_wrapper.cc',\n 'interpreter_wrapper/numpy.cc',\n 'interpreter_wrapper/python_error_reporter.cc',\n 'interpreter_wrapper/python_utils.cc'],\n swig_opts=['-c++',\n '-I%s' % TENSORFLOW_DIR,\n '-module', 'interpreter_wrapper',\n '-outdir', PACKAGE_NAME],\n extra_compile_args=['-std=c++11'],\n include_dirs=[TENSORFLOW_DIR,\n os.path.join(TENSORFLOW_DIR, 'tensorflow', 'lite', 'tools',\n 'pip_package'),\n numpy.get_include(),\n os.path.join(DOWNLOADS_DIR, 'flatbuffers', 'include'),\n os.path.join(DOWNLOADS_DIR, 'absl')],\n libraries=[LIB_TFLITE],\n library_dirs=[LIB_TFLITE_DIR])\n\nsetup(\n name=PACKAGE_NAME.replace('_', '-'),\n version=PACKAGE_VERSION,\n description=DOCLINES[0],\n long_description='\\n'.join(DOCLINES[2:]),\n url='https://www.tensorflow.org/lite/',\n author='Google, LLC',\n author_email='[email protected]',\n license='Apache 2.0',\n include_package_data=True,\n keywords='tflite tensorflow tensor machine learning',\n packages=find_packages(exclude=[]),\n ext_modules=[ext],\n install_requires=[\n 'numpy >= 1.12.1',\n ],\n cmdclass={\n 'build_ext': CustomBuildExt,\n 'build_py': CustomBuildPy,\n }\n)\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=invalid-name\n\"\"\"Built-in optimizer classes.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport six\nfrom six.moves import zip # pylint: disable=redefined-builtin\n\nfrom tensorflow.python.distribute import distribution_strategy_context\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.keras.optimizer_v2 import adadelta as adadelta_v2\nfrom tensorflow.python.keras.optimizer_v2 import adagrad as adagrad_v2\nfrom tensorflow.python.keras.optimizer_v2 import adam as adam_v2\nfrom tensorflow.python.keras.optimizer_v2 import adamax as adamax_v2\nfrom tensorflow.python.keras.optimizer_v2 import ftrl\nfrom tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_v2\nfrom tensorflow.python.keras.optimizer_v2 import nadam as nadam_v2\nfrom tensorflow.python.keras.optimizer_v2 import optimizer_v2\nfrom tensorflow.python.keras.optimizer_v2 import rmsprop as rmsprop_v2\nfrom tensorflow.python.keras.utils.generic_utils import deserialize_keras_object\nfrom tensorflow.python.keras.utils.generic_utils import serialize_keras_object\nfrom tensorflow.python.ops import clip_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.training import optimizer as tf_optimizer_module\nfrom tensorflow.python.training import training_util\nfrom tensorflow.python.training.tracking import base as trackable\nfrom tensorflow.python.util.tf_export import keras_export\n\n\nclass Optimizer(object):\n \"\"\"Abstract optimizer base class.\n\n Note: this is the parent class of all optimizers, not an actual optimizer\n that can be used for training models.\n\n All Keras optimizers support the following keyword arguments:\n\n clipnorm: float >= 0. Gradients will be clipped\n when their L2 norm exceeds this value.\n clipvalue: float >= 0. Gradients will be clipped\n when their absolute value exceeds this value.\n \"\"\"\n\n def __init__(self, **kwargs):\n allowed_kwargs = {'clipnorm', 'clipvalue'}\n for k in kwargs:\n if k not in allowed_kwargs:\n raise TypeError('Unexpected keyword argument '\n 'passed to optimizer: ' + str(k))\n # checks that clipnorm >= 0 and clipvalue >= 0\n if kwargs[k] < 0:\n raise ValueError('Expected {} >= 0, received: {}'.format(k, kwargs[k]))\n self.__dict__.update(kwargs)\n self.updates = []\n self.weights = []\n\n def get_updates(self, loss, params):\n raise NotImplementedError\n\n def get_gradients(self, loss, params):\n \"\"\"Returns gradients of `loss` with respect to `params`.\n\n Arguments:\n loss: Loss tensor.\n params: List of variables.\n\n Returns:\n List of gradient tensors.\n\n Raises:\n ValueError: In case any gradient cannot be computed (e.g. if gradient\n function not implemented).\n \"\"\"\n grads = K.gradients(loss, params)\n if None in grads:\n raise ValueError('An operation has `None` for gradient. '\n 'Please make sure that all of your ops have a '\n 'gradient defined (i.e. are differentiable). '\n 'Common ops without gradient: '\n 'K.argmax, K.round, K.eval.')\n if hasattr(self, 'clipnorm'):\n grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads]\n if hasattr(self, 'clipvalue'):\n grads = [\n clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue)\n for g in grads\n ]\n return grads\n\n def set_weights(self, weights):\n \"\"\"Sets the weights of the optimizer, from Numpy arrays.\n\n Should only be called after computing the gradients\n (otherwise the optimizer has no weights).\n\n Arguments:\n weights: a list of Numpy arrays. The number of arrays and their shape\n must match number of the dimensions of the weights of the optimizer\n (i.e. it should match the output of `get_weights`).\n\n Raises:\n ValueError: in case of incompatible weight shapes.\n \"\"\"\n params = self.weights\n if len(params) != len(weights):\n raise ValueError('Length of the specified weight list (' +\n str(len(weights)) +\n ') does not match the number of weights '\n 'of the optimizer (' + str(len(params)) + ')')\n weight_value_tuples = []\n param_values = K.batch_get_value(params)\n for pv, p, w in zip(param_values, params, weights):\n if pv.shape != w.shape:\n raise ValueError('Optimizer weight shape ' + str(pv.shape) +\n ' not compatible with '\n 'provided weight shape ' + str(w.shape))\n weight_value_tuples.append((p, w))\n K.batch_set_value(weight_value_tuples)\n\n def get_weights(self):\n \"\"\"Returns the current value of the weights of the optimizer.\n\n Returns:\n A list of numpy arrays.\n \"\"\"\n return K.batch_get_value(self.weights)\n\n def get_config(self):\n config = {}\n if hasattr(self, 'clipnorm'):\n config['clipnorm'] = self.clipnorm\n if hasattr(self, 'clipvalue'):\n config['clipvalue'] = self.clipvalue\n return config\n\n @classmethod\n def from_config(cls, config):\n return cls(**config)\n\n\nclass SGD(Optimizer):\n \"\"\"Stochastic gradient descent optimizer.\n\n Includes support for momentum,\n learning rate decay, and Nesterov momentum.\n\n Arguments:\n lr: float >= 0. Learning rate.\n momentum: float >= 0. Parameter that accelerates SGD in the relevant\n direction and dampens oscillations.\n decay: float >= 0. Learning rate decay over each update.\n nesterov: boolean. Whether to apply Nesterov momentum.\n \"\"\"\n\n def __init__(self, lr=0.01, momentum=0., decay=0., nesterov=False, **kwargs):\n super(SGD, self).__init__(**kwargs)\n with K.name_scope(self.__class__.__name__):\n self.iterations = K.variable(0, dtype='int64', name='iterations')\n self.lr = K.variable(lr, name='lr')\n self.momentum = K.variable(momentum, name='momentum')\n self.decay = K.variable(decay, name='decay')\n self.initial_decay = decay\n self.nesterov = nesterov\n\n def get_updates(self, loss, params):\n grads = self.get_gradients(loss, params)\n self.updates = [state_ops.assign_add(self.iterations, 1)]\n\n lr = self.lr\n if self.initial_decay > 0:\n lr = lr * ( # pylint: disable=g-no-augmented-assignment\n 1. /\n (1. +\n self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))\n # momentum\n shapes = [K.int_shape(p) for p in params]\n moments = [K.zeros(shape) for shape in shapes]\n self.weights = [self.iterations] + moments\n for p, g, m in zip(params, grads, moments):\n v = self.momentum * m - lr * g # velocity\n self.updates.append(state_ops.assign(m, v))\n\n if self.nesterov:\n new_p = p + self.momentum * v - lr * g\n else:\n new_p = p + v\n\n # Apply constraints.\n if getattr(p, 'constraint', None) is not None:\n new_p = p.constraint(new_p)\n\n self.updates.append(state_ops.assign(p, new_p))\n return self.updates\n\n def get_config(self):\n config = {\n 'lr': float(K.get_value(self.lr)),\n 'momentum': float(K.get_value(self.momentum)),\n 'decay': float(K.get_value(self.decay)),\n 'nesterov': self.nesterov\n }\n base_config = super(SGD, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass RMSprop(Optimizer):\n \"\"\"RMSProp optimizer.\n\n It is recommended to leave the parameters of this optimizer\n at their default values\n (except the learning rate, which can be freely tuned).\n\n Arguments:\n lr: float >= 0. Learning rate.\n rho: float >= 0.\n epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.\n decay: float >= 0. Learning rate decay over each update.\n \"\"\"\n\n def __init__(self, lr=0.001, rho=0.9, epsilon=None, decay=0., **kwargs):\n super(RMSprop, self).__init__(**kwargs)\n with K.name_scope(self.__class__.__name__):\n self.lr = K.variable(lr, name='lr')\n self.rho = K.variable(rho, name='rho')\n self.decay = K.variable(decay, name='decay')\n self.iterations = K.variable(0, dtype='int64', name='iterations')\n if epsilon is None:\n epsilon = K.epsilon()\n self.epsilon = epsilon\n self.initial_decay = decay\n\n def get_updates(self, loss, params):\n grads = self.get_gradients(loss, params)\n accumulators = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]\n self.weights = accumulators\n self.updates = [state_ops.assign_add(self.iterations, 1)]\n\n lr = self.lr\n if self.initial_decay > 0:\n lr = lr * ( # pylint: disable=g-no-augmented-assignment\n 1. /\n (1. +\n self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))\n\n for p, g, a in zip(params, grads, accumulators):\n # update accumulator\n new_a = self.rho * a + (1. - self.rho) * math_ops.square(g)\n self.updates.append(state_ops.assign(a, new_a))\n new_p = p - lr * g / (K.sqrt(new_a) + self.epsilon)\n\n # Apply constraints.\n if getattr(p, 'constraint', None) is not None:\n new_p = p.constraint(new_p)\n\n self.updates.append(state_ops.assign(p, new_p))\n return self.updates\n\n def get_config(self):\n config = {\n 'lr': float(K.get_value(self.lr)),\n 'rho': float(K.get_value(self.rho)),\n 'decay': float(K.get_value(self.decay)),\n 'epsilon': self.epsilon\n }\n base_config = super(RMSprop, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass Adagrad(Optimizer):\n \"\"\"Adagrad optimizer.\n\n Adagrad is an optimizer with parameter-specific learning rates,\n which are adapted relative to how frequently a parameter gets\n updated during training. The more updates a parameter receives,\n the smaller the updates.\n\n It is recommended to leave the parameters of this optimizer\n at their default values.\n\n # Arguments\n lr: float >= 0. Initial learning rate.\n epsilon: float >= 0. If `None`, defaults to `K.epsilon()`.\n decay: float >= 0. Learning rate decay over each update.\n\n # References\n - [Adaptive Subgradient Methods for Online Learning and Stochastic\n Optimization](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)\n \"\"\"\n\n def __init__(self, lr=0.01, epsilon=None, decay=0., **kwargs):\n super(Adagrad, self).__init__(**kwargs)\n with K.name_scope(self.__class__.__name__):\n self.lr = K.variable(lr, name='lr')\n self.decay = K.variable(decay, name='decay')\n self.iterations = K.variable(0, dtype='int64', name='iterations')\n if epsilon is None:\n epsilon = K.epsilon()\n self.epsilon = epsilon\n self.initial_decay = decay\n\n def get_updates(self, loss, params):\n grads = self.get_gradients(loss, params)\n shapes = [K.int_shape(p) for p in params]\n accumulators = [K.zeros(shape) for shape in shapes]\n self.weights = accumulators\n self.updates = [state_ops.assign_add(self.iterations, 1)]\n\n lr = self.lr\n if self.initial_decay > 0:\n lr = lr * ( # pylint: disable=g-no-augmented-assignment\n 1. /\n (1. +\n self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))\n\n for p, g, a in zip(params, grads, accumulators):\n new_a = a + math_ops.square(g) # update accumulator\n self.updates.append(state_ops.assign(a, new_a))\n new_p = p - lr * g / (K.sqrt(new_a) + self.epsilon)\n\n # Apply constraints.\n if getattr(p, 'constraint', None) is not None:\n new_p = p.constraint(new_p)\n\n self.updates.append(state_ops.assign(p, new_p))\n return self.updates\n\n def get_config(self):\n config = {\n 'lr': float(K.get_value(self.lr)),\n 'decay': float(K.get_value(self.decay)),\n 'epsilon': self.epsilon\n }\n base_config = super(Adagrad, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass Adadelta(Optimizer):\n \"\"\"Adadelta optimizer.\n\n Adadelta is a more robust extension of Adagrad\n that adapts learning rates based on a moving window of gradient updates,\n instead of accumulating all past gradients. This way, Adadelta continues\n learning even when many updates have been done. Compared to Adagrad, in the\n original version of Adadelta you don't have to set an initial learning\n rate. In this version, initial learning rate and decay factor can\n be set, as in most other Keras optimizers.\n\n It is recommended to leave the parameters of this optimizer\n at their default values.\n\n # Arguments\n lr: float >= 0. Initial learning rate, defaults to 1.\n It is recommended to leave it at the default value.\n rho: float >= 0. Adadelta decay factor, corresponding to fraction of\n gradient to keep at each time step.\n epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.\n decay: float >= 0. Initial learning rate decay.\n\n # References\n - [Adadelta - an adaptive learning rate\n method](http://arxiv.org/abs/1212.5701)\n \"\"\"\n\n def __init__(self, lr=1.0, rho=0.95, epsilon=None, decay=0., **kwargs):\n super(Adadelta, self).__init__(**kwargs)\n with K.name_scope(self.__class__.__name__):\n self.lr = K.variable(lr, name='lr')\n self.decay = K.variable(decay, name='decay')\n self.iterations = K.variable(0, dtype='int64', name='iterations')\n if epsilon is None:\n epsilon = K.epsilon()\n self.rho = rho\n self.epsilon = epsilon\n self.initial_decay = decay\n\n def get_updates(self, loss, params):\n grads = self.get_gradients(loss, params)\n shapes = [K.int_shape(p) for p in params]\n accumulators = [K.zeros(shape) for shape in shapes]\n delta_accumulators = [K.zeros(shape) for shape in shapes]\n self.weights = accumulators + delta_accumulators\n self.updates = [state_ops.assign_add(self.iterations, 1)]\n\n lr = self.lr\n if self.initial_decay > 0:\n lr = lr * ( # pylint: disable=g-no-augmented-assignment\n 1. /\n (1. +\n self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))\n\n for p, g, a, d_a in zip(params, grads, accumulators, delta_accumulators):\n # update accumulator\n new_a = self.rho * a + (1. - self.rho) * math_ops.square(g)\n self.updates.append(state_ops.assign(a, new_a))\n\n # use the new accumulator and the *old* delta_accumulator\n update = g * K.sqrt(d_a + self.epsilon) / K.sqrt(new_a + self.epsilon)\n new_p = p - lr * update\n\n # Apply constraints.\n if getattr(p, 'constraint', None) is not None:\n new_p = p.constraint(new_p)\n\n self.updates.append(state_ops.assign(p, new_p))\n\n # update delta_accumulator\n new_d_a = self.rho * d_a + (1 - self.rho) * math_ops.square(update)\n self.updates.append(state_ops.assign(d_a, new_d_a))\n return self.updates\n\n def get_config(self):\n config = {\n 'lr': float(K.get_value(self.lr)),\n 'rho': self.rho,\n 'decay': float(K.get_value(self.decay)),\n 'epsilon': self.epsilon\n }\n base_config = super(Adadelta, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass Adam(Optimizer):\n \"\"\"Adam optimizer.\n\n Default parameters follow those provided in the original paper.\n\n Arguments:\n lr: float >= 0. Learning rate.\n beta_1: float, 0 < beta < 1. Generally close to 1.\n beta_2: float, 0 < beta < 1. Generally close to 1.\n epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.\n decay: float >= 0. Learning rate decay over each update.\n amsgrad: boolean. Whether to apply the AMSGrad variant of this algorithm\n from the paper \"On the Convergence of Adam and Beyond\".\n \"\"\"\n\n def __init__(self,\n lr=0.001,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=None,\n decay=0.,\n amsgrad=False,\n **kwargs):\n super(Adam, self).__init__(**kwargs)\n with K.name_scope(self.__class__.__name__):\n self.iterations = K.variable(0, dtype='int64', name='iterations')\n self.lr = K.variable(lr, name='lr')\n self.beta_1 = K.variable(beta_1, name='beta_1')\n self.beta_2 = K.variable(beta_2, name='beta_2')\n self.decay = K.variable(decay, name='decay')\n if epsilon is None:\n epsilon = K.epsilon()\n self.epsilon = epsilon\n self.initial_decay = decay\n self.amsgrad = amsgrad\n\n def get_updates(self, loss, params):\n grads = self.get_gradients(loss, params)\n self.updates = []\n\n lr = self.lr\n if self.initial_decay > 0:\n lr = lr * ( # pylint: disable=g-no-augmented-assignment\n 1. /\n (1. +\n self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))\n\n with ops.control_dependencies([state_ops.assign_add(self.iterations, 1)]):\n t = math_ops.cast(self.iterations, K.floatx())\n lr_t = lr * (\n K.sqrt(1. - math_ops.pow(self.beta_2, t)) /\n (1. - math_ops.pow(self.beta_1, t)))\n\n ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]\n vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]\n if self.amsgrad:\n vhats = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]\n else:\n vhats = [K.zeros(1) for _ in params]\n self.weights = [self.iterations] + ms + vs + vhats\n\n for p, g, m, v, vhat in zip(params, grads, ms, vs, vhats):\n m_t = (self.beta_1 * m) + (1. - self.beta_1) * g\n v_t = (self.beta_2 * v) + (1. - self.beta_2) * math_ops.square(g)\n if self.amsgrad:\n vhat_t = math_ops.maximum(vhat, v_t)\n p_t = p - lr_t * m_t / (K.sqrt(vhat_t) + self.epsilon)\n self.updates.append(state_ops.assign(vhat, vhat_t))\n else:\n p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)\n\n self.updates.append(state_ops.assign(m, m_t))\n self.updates.append(state_ops.assign(v, v_t))\n new_p = p_t\n\n # Apply constraints.\n if getattr(p, 'constraint', None) is not None:\n new_p = p.constraint(new_p)\n\n self.updates.append(state_ops.assign(p, new_p))\n return self.updates\n\n def get_config(self):\n config = {\n 'lr': float(K.get_value(self.lr)),\n 'beta_1': float(K.get_value(self.beta_1)),\n 'beta_2': float(K.get_value(self.beta_2)),\n 'decay': float(K.get_value(self.decay)),\n 'epsilon': self.epsilon,\n 'amsgrad': self.amsgrad\n }\n base_config = super(Adam, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass Adamax(Optimizer):\n \"\"\"Adamax optimizer from Adam paper's Section 7.\n\n It is a variant of Adam based on the infinity norm.\n Default parameters follow those provided in the paper.\n\n Arguments:\n lr: float >= 0. Learning rate.\n beta_1/beta_2: floats, 0 < beta < 1. Generally close to 1.\n epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.\n decay: float >= 0. Learning rate decay over each update.\n \"\"\"\n\n def __init__(self,\n lr=0.002,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=None,\n decay=0.,\n **kwargs):\n super(Adamax, self).__init__(**kwargs)\n with K.name_scope(self.__class__.__name__):\n self.iterations = K.variable(0, dtype='int64', name='iterations')\n self.lr = K.variable(lr, name='lr')\n self.beta_1 = K.variable(beta_1, name='beta_1')\n self.beta_2 = K.variable(beta_2, name='beta_2')\n self.decay = K.variable(decay, name='decay')\n if epsilon is None:\n epsilon = K.epsilon()\n self.epsilon = epsilon\n self.initial_decay = decay\n\n def get_updates(self, loss, params):\n grads = self.get_gradients(loss, params)\n self.updates = []\n\n lr = self.lr\n if self.initial_decay > 0:\n lr = lr * ( # pylint: disable=g-no-augmented-assignment\n 1. /\n (1. +\n self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))\n\n with ops.control_dependencies([state_ops.assign_add(self.iterations, 1)]):\n t = math_ops.cast(self.iterations, K.floatx())\n lr_t = lr / (1. - math_ops.pow(self.beta_1, t))\n\n shapes = [K.int_shape(p) for p in params]\n # zero init of 1st moment\n ms = [K.zeros(shape) for shape in shapes]\n # zero init of exponentially weighted infinity norm\n us = [K.zeros(shape) for shape in shapes]\n self.weights = [self.iterations] + ms + us\n\n for p, g, m, u in zip(params, grads, ms, us):\n\n m_t = (self.beta_1 * m) + (1. - self.beta_1) * g\n u_t = math_ops.maximum(self.beta_2 * u, math_ops.abs(g))\n p_t = p - lr_t * m_t / (u_t + self.epsilon)\n\n self.updates.append(state_ops.assign(m, m_t))\n self.updates.append(state_ops.assign(u, u_t))\n new_p = p_t\n\n # Apply constraints.\n if getattr(p, 'constraint', None) is not None:\n new_p = p.constraint(new_p)\n\n self.updates.append(state_ops.assign(p, new_p))\n return self.updates\n\n def get_config(self):\n config = {\n 'lr': float(K.get_value(self.lr)),\n 'beta_1': float(K.get_value(self.beta_1)),\n 'beta_2': float(K.get_value(self.beta_2)),\n 'decay': float(K.get_value(self.decay)),\n 'epsilon': self.epsilon\n }\n base_config = super(Adamax, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass Nadam(Optimizer):\n \"\"\"Nesterov Adam optimizer.\n\n Much like Adam is essentially RMSprop with momentum,\n Nadam is Adam RMSprop with Nesterov momentum.\n\n Default parameters follow those provided in the paper.\n It is recommended to leave the parameters of this optimizer\n at their default values.\n\n Arguments:\n lr: float >= 0. Learning rate.\n beta_1/beta_2: floats, 0 < beta < 1. Generally close to 1.\n epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.\n \"\"\"\n\n def __init__(self,\n lr=0.002,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=None,\n schedule_decay=0.004,\n **kwargs):\n super(Nadam, self).__init__(**kwargs)\n with K.name_scope(self.__class__.__name__):\n self.iterations = K.variable(0, dtype='int64', name='iterations')\n self.m_schedule = K.variable(1., name='m_schedule')\n self.lr = K.variable(lr, name='lr')\n self.beta_1 = K.variable(beta_1, name='beta_1')\n self.beta_2 = K.variable(beta_2, name='beta_2')\n if epsilon is None:\n epsilon = K.epsilon()\n self.epsilon = epsilon\n self.schedule_decay = schedule_decay\n\n def get_updates(self, loss, params):\n grads = self.get_gradients(loss, params)\n self.updates = []\n\n with ops.control_dependencies([state_ops.assign_add(self.iterations, 1)]):\n t = math_ops.cast(self.iterations, K.floatx())\n\n # Due to the recommendations in [2], i.e. warming momentum schedule\n momentum_cache_t = self.beta_1 * (\n 1. - 0.5 *\n (math_ops.pow(K.cast_to_floatx(0.96), t * self.schedule_decay)))\n momentum_cache_t_1 = self.beta_1 * (\n 1. - 0.5 *\n (math_ops.pow(K.cast_to_floatx(0.96), (t + 1) * self.schedule_decay)))\n m_schedule_new = self.m_schedule * momentum_cache_t\n m_schedule_next = self.m_schedule * momentum_cache_t * momentum_cache_t_1\n self.updates.append((self.m_schedule, m_schedule_new))\n\n shapes = [K.int_shape(p) for p in params]\n ms = [K.zeros(shape) for shape in shapes]\n vs = [K.zeros(shape) for shape in shapes]\n\n self.weights = [self.iterations, self.m_schedule] + ms + vs\n\n for p, g, m, v in zip(params, grads, ms, vs):\n # the following equations given in [1]\n g_prime = g / (1. - m_schedule_new)\n m_t = self.beta_1 * m + (1. - self.beta_1) * g\n m_t_prime = m_t / (1. - m_schedule_next)\n v_t = self.beta_2 * v + (1. - self.beta_2) * math_ops.square(g)\n v_t_prime = v_t / (1. - math_ops.pow(self.beta_2, t))\n m_t_bar = (1. -\n momentum_cache_t) * g_prime + momentum_cache_t_1 * m_t_prime\n\n self.updates.append(state_ops.assign(m, m_t))\n self.updates.append(state_ops.assign(v, v_t))\n\n p_t = p - self.lr * m_t_bar / (K.sqrt(v_t_prime) + self.epsilon)\n new_p = p_t\n\n # Apply constraints.\n if getattr(p, 'constraint', None) is not None:\n new_p = p.constraint(new_p)\n\n self.updates.append(state_ops.assign(p, new_p))\n return self.updates\n\n def get_config(self):\n config = {\n 'lr': float(K.get_value(self.lr)),\n 'beta_1': float(K.get_value(self.beta_1)),\n 'beta_2': float(K.get_value(self.beta_2)),\n 'epsilon': self.epsilon,\n 'schedule_decay': self.schedule_decay\n }\n base_config = super(Nadam, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass TFOptimizer(Optimizer, trackable.Trackable):\n \"\"\"Wrapper class for native TensorFlow optimizers.\"\"\"\n\n def __init__(self, optimizer, iterations=None): # pylint: disable=super-init-not-called\n self.optimizer = optimizer\n self._track_trackable(optimizer, name='optimizer')\n if iterations is None:\n with K.name_scope(self.__class__.__name__):\n self.iterations = K.variable(0, dtype='int64', name='iterations')\n else:\n self.iterations = iterations\n self._track_trackable(self.iterations, name='global_step')\n\n def apply_gradients(self, grads):\n self.optimizer.apply_gradients(grads, global_step=self.iterations)\n\n def get_grads(self, loss, params):\n return self.optimizer.compute_gradients(loss, params)\n\n def get_updates(self, loss, params):\n if distribution_strategy_context.has_strategy():\n self.updates = []\n\n if not params:\n # After the model vars have been created, the second call to get_updates\n # is called with params as an empty list. This ensures that we call\n # compute_gradients with params=None.\n grads = self.optimizer.compute_gradients(loss)\n else:\n grads = self.optimizer.compute_gradients(loss, params)\n global_step = training_util.get_global_step()\n opt_update = self.optimizer.apply_gradients(grads, global_step)\n else:\n if not params:\n self.updates = [state_ops.assign_add(self.iterations, 1)]\n return self.updates\n\n # Updates list starts out empty because the iterations variable is\n # incremented in optimizer.apply_gradients()\n self.updates = []\n grads = self.optimizer.compute_gradients(loss, params)\n opt_update = self.optimizer.apply_gradients(\n grads, global_step=self.iterations)\n\n self.updates.append(opt_update)\n return self.updates\n\n @property\n def weights(self):\n raise NotImplementedError\n\n def get_config(self):\n raise NotImplementedError\n\n def from_config(self, config):\n raise NotImplementedError\n\n\n# Aliases.\n\nsgd = SGD\nrmsprop = RMSprop\nadagrad = Adagrad\nadadelta = Adadelta\nadam = Adam\nadamax = Adamax\nnadam = Nadam\n\n\n@keras_export('keras.optimizers.serialize')\ndef serialize(optimizer):\n return serialize_keras_object(optimizer)\n\n\n@keras_export('keras.optimizers.deserialize')\ndef deserialize(config, custom_objects=None):\n \"\"\"Inverse of the `serialize` function.\n\n Arguments:\n config: Optimizer configuration dictionary.\n custom_objects: Optional dictionary mapping names (strings) to custom\n objects (classes and functions) to be considered during deserialization.\n\n Returns:\n A Keras Optimizer instance.\n \"\"\"\n from tensorflow.python.keras.mixed_precision.experimental import loss_scale_optimizer # pylint: disable=g-import-not-at-top\n all_classes = {\n 'adadelta': adadelta_v2.Adadelta,\n 'adagrad': adagrad_v2.Adagrad,\n 'adam': adam_v2.Adam,\n 'adamax': adamax_v2.Adamax,\n 'nadam': nadam_v2.Nadam,\n 'rmsprop': rmsprop_v2.RMSprop,\n 'sgd': gradient_descent_v2.SGD,\n 'ftrl': ftrl.Ftrl,\n 'lossscaleoptimizer': loss_scale_optimizer.LossScaleOptimizer,\n }\n\n # Make deserialization case-insensitive for built-in optimizers.\n if config['class_name'].lower() in all_classes:\n config['class_name'] = config['class_name'].lower()\n return deserialize_keras_object(\n config,\n module_objects=all_classes,\n custom_objects=custom_objects,\n printable_module_name='optimizer')\n\n\n@keras_export('keras.optimizers.get')\ndef get(identifier):\n \"\"\"Retrieves a Keras Optimizer instance.\n\n Arguments:\n identifier: Optimizer identifier, one of\n - String: name of an optimizer\n - Dictionary: configuration dictionary. - Keras Optimizer instance (it\n will be returned unchanged). - TensorFlow Optimizer instance (it\n will be wrapped as a Keras Optimizer).\n\n Returns:\n A Keras Optimizer instance.\n\n Raises:\n ValueError: If `identifier` cannot be interpreted.\n \"\"\"\n if isinstance(identifier, (Optimizer, optimizer_v2.OptimizerV2)):\n return identifier\n # Wrap TF optimizer instances\n elif isinstance(identifier, tf_optimizer_module.Optimizer):\n opt = TFOptimizer(identifier)\n K.track_tf_optimizer(opt)\n return opt\n elif isinstance(identifier, dict):\n return deserialize(identifier)\n elif isinstance(identifier, six.string_types):\n config = {'class_name': str(identifier), 'config': {}}\n return deserialize(config)\n else:\n raise ValueError('Could not interpret optimizer identifier:', identifier)\n"
] | [
[
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.tf2.enabled",
"tensorflow.python.eager.monitoring.BoolGauge"
],
[
"tensorflow.python.util.tf_inspect.ismethod",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.training.tracking.layer_utils.filter_empty_layer_containers",
"tensorflow.python.util.tf_inspect.getfullargspec",
"tensorflow.python.util.tf_decorator.make_decorator",
"tensorflow.python.keras.backend.learning_phase_scope",
"tensorflow.python.keras.engine.input_spec.to_tensor_spec",
"tensorflow.python.keras.backend.floatx",
"tensorflow.python.util.nest.map_structure",
"tensorflow.python.training.tracking.base.no_automatic_dependency_tracking_scope",
"tensorflow.python.training.tracking.data_structures.ListWrapper",
"tensorflow.python.keras.engine.base_layer_utils.call_context",
"tensorflow.python.keras.saving.saved_model.utils.get_training_arg",
"tensorflow.python.keras.saving.saved_model.utils.remove_training_arg",
"tensorflow.python.util.tf_inspect.FullArgSpec",
"tensorflow.python.keras.saving.saved_model.utils.maybe_add_training_arg",
"tensorflow.python.keras.saving.saving_utils.model_input_signature",
"tensorflow.python.keras.saving.saved_model.utils.use_wrapped_call",
"tensorflow.python.keras.saving.saved_model.utils.get_training_arg_index",
"tensorflow.python.keras.saving.saving_utils.trace_model_call",
"tensorflow.python.keras.saving.saved_model.utils.set_training_arg",
"tensorflow.python.util.nest.flatten"
],
[
"numpy.get_include"
],
[
"tensorflow.python.keras.backend.zeros",
"tensorflow.python.keras.backend.name_scope",
"tensorflow.python.keras.backend.track_tf_optimizer",
"tensorflow.python.keras.backend.batch_get_value",
"tensorflow.python.ops.state_ops.assign_add",
"tensorflow.python.keras.backend.int_shape",
"tensorflow.python.keras.backend.variable",
"tensorflow.python.keras.backend.dtype",
"tensorflow.python.distribute.distribution_strategy_context.has_strategy",
"tensorflow.python.ops.state_ops.assign",
"tensorflow.python.ops.math_ops.abs",
"tensorflow.python.training.training_util.get_global_step",
"tensorflow.python.util.tf_export.keras_export",
"tensorflow.python.ops.clip_ops.clip_by_value",
"tensorflow.python.keras.backend.gradients",
"tensorflow.python.keras.backend.floatx",
"tensorflow.python.keras.utils.generic_utils.serialize_keras_object",
"tensorflow.python.ops.math_ops.square",
"tensorflow.python.ops.math_ops.pow",
"tensorflow.python.ops.clip_ops.clip_by_norm",
"tensorflow.python.keras.utils.generic_utils.deserialize_keras_object",
"tensorflow.python.keras.backend.get_value",
"tensorflow.python.keras.backend.cast_to_floatx",
"tensorflow.python.keras.backend.batch_set_value",
"tensorflow.python.keras.backend.sqrt",
"tensorflow.python.ops.math_ops.maximum",
"tensorflow.python.keras.backend.epsilon"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.13",
"1.10",
"1.12"
]
}
] |
giraffe-tree/play-tf | [
"30f39f228d55fdeb35f1bd420b3bb29ecd3ade96",
"30f39f228d55fdeb35f1bd420b3bb29ecd3ade96",
"30f39f228d55fdeb35f1bd420b3bb29ecd3ade96"
] | [
"marvanZhouTutorial/04_try_variable.py",
"marvanZhouTutorial/06_try_activation_func_visualization.py",
"try_myself/basic/try_l2_regularizer2.py"
] | [
"import tensorflow as tf\nimport numpy as np\n\n# 本节主要讲解变量/常量\n\nstate = tf.Variable(0,name=\"counter\")\n# print(state.name)\none = tf.constant(1)\nstate2 = tf.add(state,one)\nupdate = tf.assign(state,state2)\n\n# 定义了变量后, 一定要用下面的\n# old -> tf.initialize_all_variables()\ninit = tf.global_variables_initializer()\n\nwith tf.Session() as sess:\n\tsess.run(init)\n\tfor _ in range(3):\n\t\tsess.run(update)\n\t\tprint(sess.run(state))\n\n\n",
"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n# 本节在 try_activation_func.py 的基础上, 做可视化\n\n\n# y = Wx\n# y = AF(Wx)\n# relu, sigmoid, tanh\n# 注意这些激励函数是要可以微分的\n# 梯度爆炸, 梯度消失\n\n# 少量层结构\n# CNN relu\n# RNN relu tanh\n\ndef add_layer(inputs, in_size, out_size, activation_function=None):\n '''\n 添加一个神经层\n '''\n # 定义一个矩阵 in_size行,out_size列矩阵\n Weights = tf.Variable(tf.random_normal([in_size, out_size]))\n # 列表\n biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)\n\n Wx_plus_bias = tf.matmul(inputs, Weights) + biases\n\n if activation_function is None:\n outputs = Wx_plus_bias\n else:\n outputs = activation_function(Wx_plus_bias)\n return outputs\n\n\n# 定义数据\nx_data = np.linspace(-1, 1, 300)[:, np.newaxis]\nnoise = np.random.normal(0, 0.05, x_data.shape)\ny_data = np.square(x_data) - 0.5 + noise\n\nxs = tf.placeholder(tf.float32,[None,1])\nys = tf.placeholder(tf.float32,[None,1])\n\n# 隐藏层\nl1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu)\n\nprediction = add_layer(l1, 10, 1, activation_function=None)\n\n# 损失函数\nloss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction),\n reduction_indices=[1]))\n\n# 学习效率, 一般小于1\ntrain_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)\n\n# 定义了变量后, 一定要用下面的\n# old -> tf.initialize_all_variables()\ninit = tf.global_variables_initializer()\n\nsess = tf.Session()\nsess.run(init)\n\nfig = plt.figure()\nax = fig.add_subplot(1,1,1)\nax.scatter(x_data,y_data)\n\n# 继续往下走\nplt.ion()\n# plt.show(block = False ) 旧版使用\nplt.show()\n\nfor i in range(1000):\n sess.run(train_step,feed_dict={xs: x_data,ys:y_data})\n if i % 50 ==0:\n # print(sess.run(loss,feed_dict={xs:x_data,ys:y_data}))\n try:\n ax.lines.remove(lines[0])\n except Exception:\n pass \n prediction_value = sess.run(prediction,feed_dict={xs:x_data})\n lines = ax.plot(x_data,prediction_value,'r-',lw=5)\n plt.pause(0.1)\n\n\n",
"import tensorflow as tf\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndata = []\nlabel = []\nnp.random.seed(0)\n\n# 以原点为圆心,半径为1的圆把散点划分成红蓝两部分,并加入随机噪音。\nfor i in range(150):\n x1 = np.random.uniform(-1, 1)\n x2 = np.random.uniform(0, 2)\n if x1 ** 2 + x2 ** 2 <= 1:\n data.append([np.random.normal(x1, 0.1), np.random.normal(x2, 0.1)])\n label.append(0)\n else:\n data.append([np.random.normal(x1, 0.1), np.random.normal(x2, 0.1)])\n label.append(1)\n\ndata = np.hstack(data).reshape(-1, 2)\nlabel = np.hstack(label).reshape(-1, 1)\nprint(data.shape)\nprint(label.shape)\n\n# 这里 c=label -> c=np.squeeze(label)\nplt.scatter(data[:, 0], data[:, 1], c=np.squeeze(label),\n cmap=\"RdBu\", vmin=-.2, vmax=1.2, edgecolor=\"white\")\nplt.show()\n\n\ndef get_weight(shape, lambda1):\n var = tf.Variable(tf.random_normal(shape), dtype=tf.float32)\n tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(lambda1)(var))\n return var\n\n\nx = tf.placeholder(tf.float32, shape=(None, 2))\ny_ = tf.placeholder(tf.float32, shape=(None, 1))\nsample_size = len(data)\n\n# 每层节点的个数\nlayer_dimension = [2, 10, 5, 3, 1]\n\nn_layers = len(layer_dimension)\n\ncur_layer = x\nin_dimension = layer_dimension[0]\n\n# 循环生成网络结构\nfor i in range(1, n_layers):\n out_dimension = layer_dimension[i]\n weight = get_weight([in_dimension, out_dimension], 0.003)\n bias = tf.Variable(tf.constant(0.1, shape=[out_dimension]))\n cur_layer = tf.nn.elu(tf.matmul(cur_layer, weight) + bias)\n in_dimension = layer_dimension[i]\n\ny = cur_layer\n\n# 损失函数的定义。\nmse_loss = tf.reduce_sum(tf.pow(y_ - y, 2)) / sample_size\ntf.add_to_collection('losses', mse_loss)\nloss = tf.add_n(tf.get_collection('losses'))\n\n# 定义训练的目标函数mse_loss,训练次数及训练模型\n# 训练不带正则项的\n# train_op = tf.train.AdamOptimizer(0.001).minimize(mse_loss)\n# 训练正则项\ntrain_op = tf.train.AdamOptimizer(0.001).minimize(loss)\n\nTRAINING_STEPS = 10000\n\nwith tf.Session() as sess:\n tf.global_variables_initializer().run()\n for i in range(TRAINING_STEPS):\n sess.run(train_op, feed_dict={x: data, y_: label})\n if i % 1000 == 0:\n print(\"After %d steps, mse_loss: %f\" % (i, sess.run(mse_loss, feed_dict={x: data, y_: label})))\n\n # 画出训练后的分割曲线\n xx, yy = np.mgrid[-1.2:1.2:.01, -0.2:2.2:.01]\n grid = np.c_[xx.ravel(), yy.ravel()]\n probs = sess.run(y, feed_dict={x: grid})\n probs = probs.reshape(xx.shape)\n\nplt.scatter(data[:, 0], data[:, 1], c=np.squeeze(label),\n cmap=\"RdBu\", vmin=-.2, vmax=1.2, edgecolor=\"white\")\nplt.contour(xx, yy, probs, levels=[.5], cmap=\"Greys\", vmin=0, vmax=.1)\nplt.show()\n"
] | [
[
"tensorflow.constant",
"tensorflow.Variable",
"tensorflow.assign",
"tensorflow.global_variables_initializer",
"tensorflow.add",
"tensorflow.Session"
],
[
"numpy.square",
"tensorflow.matmul",
"numpy.linspace",
"tensorflow.zeros",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"numpy.random.normal",
"tensorflow.random_normal",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.Session",
"tensorflow.square",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"numpy.hstack",
"tensorflow.matmul",
"tensorflow.constant",
"numpy.random.seed",
"tensorflow.pow",
"tensorflow.get_collection",
"numpy.squeeze",
"tensorflow.placeholder",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.global_variables_initializer",
"numpy.random.normal",
"matplotlib.pyplot.contour",
"tensorflow.Session",
"tensorflow.train.AdamOptimizer",
"numpy.random.uniform",
"matplotlib.pyplot.show",
"tensorflow.add_to_collection",
"tensorflow.random_normal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
kgkang/deep-learning-from-scratch | [
"1b2c680cc69e9fd1bbc51ef9753f24c3d92890f1"
] | [
"test-ch05/model_classification.py"
] | [
"\nimport numpy as np\nimport matplotlib.pylab as plt\n\ndef sigmod(z):\n return 1 / (1 + np.exp(-z))\n\n\nsz = np.linspace(-10,10,100)\nsa = sigmod(sz)\n\nplt.plot(sz, sa)\nplt.show()\n\n"
] | [
[
"numpy.exp",
"matplotlib.pylab.show",
"numpy.linspace",
"matplotlib.pylab.plot"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
christinarudolf/TheFluShot | [
"5a5d6c6d7b7bb9c46d48c35480ee32db5e7d64db"
] | [
"modeling/train.py"
] | [
"from logging import getLogger\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error, r2_score\nfrom sklearn.linear_model import LinearRegression\nimport pickle\nimport warnings\nimport mlflow\nfrom mlflow.sklearn import save_model # , log_model\n\nfrom modeling.feature_engineering import (\n fill_missing_values,\n drop_column,\n transform_altitude,\n altitude_high_meters_mean,\n altitude_mean_log_mean,\n altitude_low_meters_mean,\n)\n\nfrom modeling.config import TRACKING_URI, EXPERIMENT_NAME\n\nwarnings.filterwarnings(\"ignore\")\nlogger = getLogger(__name__)\n\n\ndef __get_data():\n logger.info(\"Getting the data\")\n # coffee data\n url = \"https://github.com/jldbc/coffee-quality-database/raw/master/data/robusta_data_cleaned.csv\"\n coffee_features = pd.read_csv(url)\n\n # coffee score\n\n url = \"https://raw.githubusercontent.com/jldbc/coffee-quality-database/master/data/robusta_ratings_raw.csv\"\n coffee_quality = pd.read_csv(url)\n\n # cleaning data and preparing\n Y = coffee_quality[\"quality_score\"]\n X = coffee_features.select_dtypes([\"number\"])\n\n # splittin into train and test\n X_train, X_test, y_train, y_test = train_test_split(\n X, Y, test_size=0.30, random_state=42\n )\n ## in order to exemplify how the predict will work.. we will save the y_train\n logger.info(\"Saving test data in the data folder .. wo feat eng\")\n X_test.to_csv(\"data/X_test.csv\", index=False)\n y_test.to_csv(\"data/y_test.csv\", index=False)\n\n logger.info(\"Feature engineering on train\")\n X_train = transform_altitude(X_train)\n X_train = drop_column(X_train, col_name=\"Unnamed: 0\")\n X_train = drop_column(X_train, col_name=\"Quakers\")\n X_train = fill_missing_values(X_train)\n\n # feature eng on test data\n logger.info(\"Feature engineering on test\")\n X_test = transform_altitude(X_test)\n X_test = drop_column(X_test, col_name=\"Unnamed: 0\")\n X_test = drop_column(X_test, col_name=\"Quakers\")\n X_test = fill_missing_values(X_test)\n\n return X_train, X_test, y_train, y_test\n\n\ndef __compute_and_log_metrics(\n y_true: pd.Series, y_pred: pd.Series, prefix: str = \"train\"\n):\n mse = mean_squared_error(y_true, y_pred)\n r2 = r2_score(y_true, y_pred)\n\n logger.info(\n \"Linear Regression performance on \"\n + str(prefix)\n + \" set: MSE = {:.1f}, R2 = {:.1%},\".format(mse, r2)\n )\n mlflow.log_metric(prefix + \"-\" + \"MSE\", mse)\n mlflow.log_metric(prefix + \"-\" + \"R2\", r2)\n\n return mse, r2\n\n\ndef run_training():\n logger.info(f\"Getting the data\")\n X_train, X_test, y_train, y_test = __get_data()\n\n logger.info(\"Training simple model and tracking with MLFlow\")\n mlflow.set_tracking_uri(TRACKING_URI)\n mlflow.set_experiment(EXPERIMENT_NAME)\n # model\n logger.info(\"Training a simple linear regression\")\n with mlflow.start_run():\n reg = LinearRegression().fit(X_train, y_train)\n # taking some parameters out of the feature eng.. in your case you can use the params from CV\n params = {\n \"altitude_low_meters_mean\": altitude_low_meters_mean,\n \"altitude_high_meters_mean\": altitude_high_meters_mean,\n \"altitude_mean_log_mean\": altitude_mean_log_mean,\n \"fit_intercept\": True,\n }\n mlflow.log_params(params)\n mlflow.set_tag(\"worst_model\", \"True\")\n y_train_pred = reg.predict(X_train)\n\n __compute_and_log_metrics(y_train, y_train_pred)\n\n y_test_pred = reg.predict(X_test)\n __compute_and_log_metrics(y_test, y_test_pred, \"test\")\n\n logger.info(\"this is obviously fishy\")\n # saving the model\n logger.info(\"Saving model in the model folder\")\n path = \"models/linear\"\n #save_model(sk_model=reg, path=path)\n # logging the model to mlflow will not work without a AWS Connection setup.. too complex for now\n\n\nif __name__ == \"__main__\":\n import logging\n\n logger = logging.getLogger()\n logging.basicConfig(format=\"%(asctime)s: %(message)s\")\n logging.getLogger(\"pyhive\").setLevel(logging.CRITICAL) # avoid excessive logs\n logger.setLevel(logging.INFO)\n\n run_training()"
] | [
[
"pandas.read_csv",
"sklearn.metrics.r2_score",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.mean_squared_error",
"sklearn.linear_model.LinearRegression"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
datitran/falcon-prediction-app | [
"b63c72ea7c2cb9ffeaa1decf06ecdfa6b23de602"
] | [
"src/prediction_app/predict.py"
] | [
"import base64\nimport json\nimport falcon\nimport numpy as np\nfrom io import BytesIO\nfrom PIL import Image, ImageOps\n\n\ndef convert_image(image):\n img = Image.open(image).convert('L')\n inverted_img = ImageOps.invert(img)\n data = np.asarray(inverted_img, dtype='int32')\n rescaled_data = (data / 255).reshape(1, 28, 28, 1)\n return rescaled_data\n\n\nclass GetResource(object):\n\n def __init__(self, model):\n self.model = model\n\n def on_get(self, req, resp):\n resp.status = falcon.HTTP_200\n resp.body = 'Hello World!'\n\n def on_post(self, req, resp):\n \"\"\"\n (echo -n '{\"image\": \"'; four_test.png; echo '\"}') |\n curl -H \"Content-Type: application/json\" -d @- http://0.0.0.0:8080/predict\n \"\"\"\n image = json.loads(req.stream.read())\n decoded_image = base64.b64decode(image.get('image'))\n data = convert_image(BytesIO(decoded_image))\n predicted_data = self.model.predict_classes(data)[0]\n\n output = {'prediction': str(predicted_data)}\n resp.status = falcon.HTTP_200\n resp.body = json.dumps(output, ensure_ascii=False)\n"
] | [
[
"numpy.asarray"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wyddmw/RotPred | [
"18ca1a565fdbf90e8016e51ed5a3b84dc12109f3"
] | [
"pcdet/models/backbones_3d/vfe/prop_utils/voting_module.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.utils.data\nimport torch.nn.functional as F\n\n\nclass PointNetEncoder(nn.Module):\n def __init__(self, in_channel=64):\n super(PointNetEncoder, self).__init__()\n self.conv1 = torch.nn.Conv1d(in_channel, 128, 1)\n self.conv2 = torch.nn.Conv1d(128, 1024, 1)\n self.bn1 = nn.BatchNorm1d(128)\n self.bn2 = nn.BatchNorm1d(1024)\n\n def forward(self, x):\n B, D, N = x.size()\n x = F.relu(self.bn1(self.conv1(x)))\n \n pointfeat = x\n x = F.relu(self.bn2(self.conv2(x)))\n x = torch.max(x, 2, keepdim=True)[0]\n x = x.view(-1, 1024)\n x = x.view(-1, 1024, 1).repeat(1, 1, N)\n return torch.cat([x, pointfeat], 1)\n \n\nclass PointNetBackbone(nn.Module):\n def __init__(self, in_channel=64):\n super(PointNetBackbone, self).__init__()\n self.feat = PointNetEncoder(in_channel=in_channel)\n self.conv1 = torch.nn.Conv1d(1152, 512, 1)\n self.conv2 = torch.nn.Conv1d(512, 256, 1)\n self.conv3 = torch.nn.Conv1d(256, 128, 1)\n self.bn1 = nn.BatchNorm1d(512)\n self.bn2 = nn.BatchNorm1d(256)\n self.bn3 = nn.BatchNorm1d(128)\n \n def forward(self, x):\n batchsize = x.size()[0]\n n_pts = x.size()[2]\n x = self.feat(x)\n x = F.relu(self.bn1(self.conv1(x)))\n x = F.relu(self.bn2(self.conv2(x)))\n x = F.relu(self.bn3(self.conv3(x)))\n return x\n\n\nclass VotingModule(nn.Module):\n def __init__(self, in_channel=64, feature_channel=128, num_voting=1):\n super(VotingModule, self).__init__()\n self.pointnet = PointNetBackbone(in_channel)\n self.conv1 = nn.Conv1d(feature_channel, feature_channel, 1, bias=False)\n self.conv2 = nn.Conv1d(feature_channel, feature_channel, 1, bias=False) \n self.offset = nn.Conv1d(feature_channel, 2, 1, bias=False)\n self.stride = nn.Conv1d(feature_channel, 1, 1, bias=False)\n self.prob = nn.Conv1d(feature_channel, 1, 1, bias=False)\n self.sigmoid = nn.Sigmoid()\n self.bn1 = nn.BatchNorm1d(feature_channel)\n self.bn2 = nn.BatchNorm1d(feature_channel)\n\n def forward(self, input_feature):\n voting_feature = self.pointnet(input_feature)\n voting_feature = F.relu(self.bn1(self.conv1(voting_feature)))\n voting_feature = F.relu(self.bn2(self.conv2(voting_feature)))\n centering_offset = self.offset(voting_feature)\n stride = F.relu(self.stride(voting_feature))\n prob = self.sigmoid(self.prob(voting_feature))\n return centering_offset, stride, prob\n\t\n\nif __name__ == '__main__':\n model = VotingModule()\n xyz = torch.rand(12, 64, 6000)\n data_dict = {'pillar_feature': xyz}\n output = model(data_dict)\n"
] | [
[
"torch.nn.BatchNorm1d",
"torch.max",
"torch.cat",
"torch.nn.Sigmoid",
"torch.rand",
"torch.nn.Conv1d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
KMCzajkowski/pyscf | [
"e8af41d910cc0d3963655120c0b689590ad978e7",
"e8af41d910cc0d3963655120c0b689590ad978e7",
"e8af41d910cc0d3963655120c0b689590ad978e7",
"e8af41d910cc0d3963655120c0b689590ad978e7",
"e8af41d910cc0d3963655120c0b689590ad978e7"
] | [
"pyscf/cc/ccsd_t.py",
"pyscf/nao/m_log_mesh.py",
"pyscf/pbc/df/df_ao2mo.py",
"pyscf/nao/m_siesta2blanko_denvec.py",
"pyscf/nao/m_siesta_wfsx.py"
] | [
"#!/usr/bin/env python\n#\n# Author: Qiming Sun <[email protected]>\n#\n\nimport gc\nimport time\nimport ctypes\nimport tempfile\nimport numpy\nimport h5py\nfrom pyscf import lib\nfrom pyscf import symm\nfrom pyscf.lib import logger\nfrom pyscf.cc import _ccsd\n\n'''\nCCSD(T)\n'''\n\n# t3 as ijkabc\n\n# JCP, 94, 442. Error in Eq (1), should be [ia] >= [jb] >= [kc]\ndef kernel(mycc, eris, t1=None, t2=None, verbose=logger.NOTE):\n cpu1 = cpu0 = (time.clock(), time.time())\n log = logger.new_logger(mycc, verbose)\n if t1 is None: t1 = mycc.t1\n if t2 is None: t2 = mycc.t2\n\n nocc, nvir = t1.shape\n nmo = nocc + nvir\n\n _tmpfile = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)\n ftmp = h5py.File(_tmpfile.name)\n eris_vvop = ftmp.create_dataset('vvop', (nvir,nvir,nocc,nmo), 'f8')\n orbsym = _sort_eri(mycc, eris, nocc, nvir, eris_vvop, log)\n\n ftmp['t2'] = t2 # read back late. Cache t2T in t2 to reduce memory footprint\n mo_energy, t1T, t2T, vooo = _sort_t2_vooo_(mycc, orbsym, t1, t2, eris)\n cpu1 = log.timer_debug1('CCSD(T) sort_eri', *cpu1)\n\n cpu2 = list(cpu1)\n orbsym = numpy.hstack((numpy.sort(orbsym[:nocc]),numpy.sort(orbsym[nocc:])))\n o_ir_loc = numpy.append(0, numpy.cumsum(numpy.bincount(orbsym[:nocc], minlength=8)))\n v_ir_loc = numpy.append(0, numpy.cumsum(numpy.bincount(orbsym[nocc:], minlength=8)))\n o_sym = orbsym[:nocc]\n oo_sym = (o_sym[:,None] ^ o_sym).ravel()\n oo_ir_loc = numpy.append(0, numpy.cumsum(numpy.bincount(oo_sym, minlength=8)))\n nirrep = max(oo_sym) + 1\n\n orbsym = orbsym.astype(numpy.int32)\n o_ir_loc = o_ir_loc.astype(numpy.int32)\n v_ir_loc = v_ir_loc.astype(numpy.int32)\n oo_ir_loc = oo_ir_loc.astype(numpy.int32)\n et_sum = [0]\n def contract(a0, a1, b0, b1, cache):\n cache_row_a, cache_col_a, cache_row_b, cache_col_b = cache\n drv = _ccsd.libcc.CCsd_t_contract\n drv.restype = ctypes.c_double\n et = drv(mo_energy.ctypes.data_as(ctypes.c_void_p),\n t1T.ctypes.data_as(ctypes.c_void_p),\n t2T.ctypes.data_as(ctypes.c_void_p),\n vooo.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_int(nocc), ctypes.c_int(nvir),\n ctypes.c_int(a0), ctypes.c_int(a1),\n ctypes.c_int(b0), ctypes.c_int(b1),\n ctypes.c_int(nirrep),\n o_ir_loc.ctypes.data_as(ctypes.c_void_p),\n v_ir_loc.ctypes.data_as(ctypes.c_void_p),\n oo_ir_loc.ctypes.data_as(ctypes.c_void_p),\n orbsym.ctypes.data_as(ctypes.c_void_p),\n cache_row_a.ctypes.data_as(ctypes.c_void_p),\n cache_col_a.ctypes.data_as(ctypes.c_void_p),\n cache_row_b.ctypes.data_as(ctypes.c_void_p),\n cache_col_b.ctypes.data_as(ctypes.c_void_p))\n cpu2[:] = log.timer_debug1('contract %d:%d,%d:%d'%(a0,a1,b0,b1), *cpu2)\n et_sum[0] += et\n return et\n\n # The rest 20% memory for cache b\n mem_now = lib.current_memory()[0]\n max_memory = max(2000, mycc.max_memory - mem_now)\n bufsize = max(1, (max_memory*1e6/8-nocc**3*100)*.7/(nocc*nmo))\n log.debug('max_memory %d MB (%d MB in use)', max_memory, mem_now)\n for a0, a1 in reversed(list(lib.prange_tril(0, nvir, bufsize))):\n with lib.call_in_background(contract) as async_contract:\n cache_row_a = numpy.asarray(eris_vvop[a0:a1,:a1], order='C')\n cache_col_a = numpy.asarray(eris_vvop[:a0,a0:a1], order='C')\n async_contract(a0, a1, a0, a1, (cache_row_a,cache_col_a,\n cache_row_a,cache_col_a))\n\n for b0, b1 in lib.prange_tril(0, a0, bufsize/6):\n cache_row_b = numpy.asarray(eris_vvop[b0:b1,:b1], order='C')\n cache_col_b = numpy.asarray(eris_vvop[:b0,b0:b1], order='C')\n async_contract(a0, a1, b0, b1, (cache_row_a,cache_col_a,\n cache_row_b,cache_col_b))\n cache_row_b = cache_col_b = None\n cache_row_a = cache_col_a = None\n\n t2[:] = ftmp['t2']\n ftmp.close()\n _tmpfile = None\n et = et_sum[0] * 2\n log.timer('CCSD(T)', *cpu0)\n log.note('CCSD(T) correction = %.15g', et)\n return et\n\ndef _sort_eri(mycc, eris, nocc, nvir, vvop, log):\n cpu1 = (time.clock(), time.time())\n mol = mycc.mol\n nmo = nocc + nvir\n\n if mol.symmetry:\n orbsym = symm.addons.label_orb_symm(mol, mol.irrep_id, mol.symm_orb,\n eris.mo_coeff, check=False)\n orbsym = numpy.asarray(orbsym, dtype=numpy.int32) % 10\n else:\n orbsym = numpy.zeros(nmo, dtype=numpy.int32)\n\n o_sorted = _irrep_argsort(orbsym[:nocc])\n v_sorted = _irrep_argsort(orbsym[nocc:])\n vrank = numpy.argsort(v_sorted)\n\n max_memory = max(2000, mycc.max_memory - lib.current_memory()[0])\n max_memory = min(8000, max_memory*.9)\n blksize = min(nvir, max(16, int(max_memory*1e6/8/(nvir*nocc*nmo))))\n with lib.call_in_background(vvop.__setitem__) as save:\n bufopv = numpy.empty((nocc,nmo,nvir))\n buf1 = numpy.empty_like(bufopv)\n buf = numpy.empty((nocc,nvir,nvir))\n for j0, j1 in lib.prange(0, nvir, blksize):\n ovov = numpy.asarray(eris.ovov[:,j0:j1])\n ovvv = numpy.asarray(eris.ovvv[:,j0:j1])\n for j in range(j0,j1):\n oov = ovov[o_sorted,j-j0]\n ovv = lib.unpack_tril(ovvv[o_sorted,j-j0], out=buf)\n bufopv[:,:nocc,:] = oov[:,o_sorted][:,:,v_sorted]\n bufopv[:,nocc:,:] = ovv[:,v_sorted][:,:,v_sorted]\n save(vrank[j], bufopv.transpose(2,0,1))\n bufopv, buf1 = buf1, bufopv\n cpu1 = log.timer_debug1('transpose %d:%d'%(j0,j1), *cpu1)\n\n return orbsym\n\ndef _sort_t2_vooo_(mycc, orbsym, t1, t2, eris):\n ovoo = numpy.asarray(eris.ovoo)\n nocc, nvir = t1.shape\n if mycc.mol.symmetry:\n orbsym = numpy.asarray(orbsym, dtype=numpy.int32)\n o_sorted = _irrep_argsort(orbsym[:nocc])\n v_sorted = _irrep_argsort(orbsym[nocc:])\n mo_energy = eris.fock.diagonal()\n mo_energy = numpy.hstack((mo_energy[:nocc][o_sorted],\n mo_energy[nocc:][v_sorted]))\n t1T = numpy.asarray(t1.T[v_sorted][:,o_sorted], order='C')\n\n o_sym = orbsym[o_sorted]\n oo_sym = (o_sym[:,None] ^ o_sym).ravel()\n oo_sorted = _irrep_argsort(oo_sym)\n #:vooo = eris.ovoo.transpose(1,0,2,3)\n #:vooo = vooo[v_sorted][:,o_sorted][:,:,o_sorted][:,:,:,o_sorted]\n #:vooo = vooo.reshape(nvir,-1,nocc)[:,oo_sorted]\n oo_idx = numpy.arange(nocc**2).reshape(nocc,nocc)[o_sorted][:,o_sorted]\n oo_idx = oo_idx.ravel()[oo_sorted]\n oo_idx = (oo_idx[:,None]*nocc+o_sorted).ravel()\n vooo = lib.take_2d(ovoo.transpose(1,0,2,3).reshape(nvir,-1), v_sorted, oo_idx)\n\n #:t2T = t2.transpose(2,3,1,0)\n #:t2T = ref_t2T[v_sorted][:,v_sorted][:,:,o_sorted][:,:,:,o_sorted]\n #:t2T = ref_t2T.reshape(nvir,nvir,-1)[:,:,oo_sorted]\n t2T = lib.transpose(t2.reshape(nocc**2,-1))\n oo_idx = numpy.arange(nocc**2).reshape(nocc,nocc).T[o_sorted][:,o_sorted]\n oo_idx = oo_idx.ravel()[oo_sorted]\n vv_idx = (v_sorted[:,None]*nvir+v_sorted).ravel()\n t2T = lib.take_2d(t2T.reshape(nvir**2,-1), vv_idx, oo_idx, out=t2)\n t2T = t2T.reshape(nvir,nvir,nocc,nocc)\n else:\n t1T = t1.T.copy()\n t2T = lib.transpose(t2.reshape(nocc**2,-1))\n t2T = lib.transpose(t2T.reshape(-1,nocc,nocc), axes=(0,2,1), out=t2)\n vooo = ovoo.transpose(1,0,2,3).copy()\n mo_energy = numpy.asarray(eris.fock.diagonal(), order='C')\n vooo = vooo.reshape(nvir,nocc,nocc,nocc)\n t2T = t2T.reshape(nvir,nvir,nocc,nocc)\n return mo_energy, t1T, t2T, vooo\n\ndef _irrep_argsort(orbsym):\n return numpy.hstack([numpy.where(orbsym == i)[0] for i in range(8)])\n\n\nif __name__ == '__main__':\n from pyscf import gto\n from pyscf import scf\n from pyscf import cc\n\n mol = gto.M()\n numpy.random.seed(12)\n nocc, nvir = 5, 12\n eris = lambda :None\n eris.ovvv = numpy.random.random((nocc,nvir,nvir*(nvir+1)//2)) * .1\n eris.ovoo = numpy.random.random((nocc,nvir,nocc,nocc)) * .1\n eris.ovov = numpy.random.random((nocc,nvir,nocc,nvir)) * .1\n t1 = numpy.random.random((nocc,nvir)) * .1\n t2 = numpy.random.random((nocc,nocc,nvir,nvir)) * .1\n t2 = t2 + t2.transpose(1,0,3,2)\n mf = scf.RHF(mol)\n mcc = cc.CCSD(mf)\n mcc.mo_energy = mcc._scf.mo_energy = numpy.arange(0., nocc+nvir)\n eris.fock = numpy.diag(mcc.mo_energy)\n print(kernel(mcc, eris, t1, t2) + 8.4953387936460398)\n\n mol = gto.Mole()\n mol.atom = [\n [8 , (0. , 0. , 0.)],\n [1 , (0. , -.957 , .587)],\n [1 , (0.2, .757 , .487)]]\n\n mol.basis = 'ccpvdz'\n mol.build()\n rhf = scf.RHF(mol)\n rhf.conv_tol = 1e-14\n rhf.scf()\n mcc = cc.CCSD(rhf)\n mcc.conv_tol = 1e-14\n mcc.ccsd()\n e3a = kernel(mcc, mcc.ao2mo())\n print(e3a - -0.0033300722704016289)\n\n mol = gto.Mole()\n mol.atom = [\n [8 , (0. , 0. , 0.)],\n [1 , (0. , -.757 , .587)],\n [1 , (0. , .757 , .587)]]\n mol.symmetry = True\n\n mol.basis = 'ccpvdz'\n mol.build()\n rhf = scf.RHF(mol)\n rhf.conv_tol = 1e-14\n rhf.scf()\n mcc = cc.CCSD(rhf)\n mcc.conv_tol = 1e-14\n mcc.ccsd()\n e3a = kernel(mcc, mcc.ao2mo())\n print(e3a - -0.003060022611584471)\n",
"from __future__ import division, print_function\nimport numpy as np\n\n#\n#\n#\ndef get_default_log_mesh_param4gto(gto, tol_in=None):\n rmin_gcs = 10.0\n rmax_gcs = -1.0\n akmx_gcs = -1.0\n\n tol = 1e-7 if tol_in is None else tol_in\n seen_species = [] # this is auxiliary to organize the loop over species \n for ia in range(gto.natm):\n if gto.atom_symbol(ia) in seen_species: continue\n seen_species.append(gto.atom_symbol(ia))\n for sid in gto.atom_shell_ids(ia):\n for power,coeffs in zip(gto.bas_exp(sid), gto.bas_ctr_coeff(sid)):\n for coeff in coeffs:\n rmin_gcs = min(rmin_gcs, np.sqrt( abs(np.log(1.0-tol)/power )))\n rmax_gcs = max(rmax_gcs, np.sqrt( abs(np.log(abs(coeff))-np.log(tol))/power ))\n akmx_gcs = max(akmx_gcs, np.sqrt( abs(np.log(abs(coeff))-np.log(tol))*4*power ))\n\n if rmin_gcs<1e-9 : print('rmin_gcs<1e-9') # Last check \n if rmax_gcs>1e+2 : print('rmax_gcs>1e+2')\n if akmx_gcs>1e+4 : print('akmx_gcs>1e+4', __name__)\n return 1024,rmin_gcs,rmax_gcs,akmx_gcs\n\n#\n#\n#\ndef get_default_log_mesh_param4ion(sp2ion):\n from pyscf.nao.m_next235 import next235\n \"\"\" Determines the default (optimal) parameters for radial orbitals given on equidistant grid\"\"\"\n npts = max(max(ion[\"paos\"][\"npts\"]) for ion in sp2ion)\n nr_def = next235( max(2.0*npts, 1024.0) )\n rmin_def = min(min(ion[\"paos\"][\"delta\"]) for ion in sp2ion)\n rmax_def = 2.3*max(max(ion[\"paos\"][\"cutoff\"]) for ion in sp2ion)\n kmax_def = 1.0/rmin_def/np.pi\n return nr_def,rmin_def,rmax_def,kmax_def\n\n#\n#\n#\ndef get_default_log_mesh_param4gpaw(sp2dic):\n \"\"\" Determines the default (optimal) parameters for radial orbitals given on equidistant grid\"\"\"\n sp2key = sp2dic.keys()\n nr_def = 1024\n rmin_def = 1.0e100\n rmax_grid = -1.0e100\n for key in sp2key: \n rmin_def = min(rmin_def, sp2dic[key].basis.rgd.r_g[1])\n rmax_grid = max(rmax_grid, sp2dic[key].basis.rgd.r_g[-1])\n rmax_def = 2.3*rmax_grid\n kmax_def = 1.0/rmin_def/np.pi\n return nr_def,rmin_def,rmax_def,kmax_def\n\n# sp2dic = setups.setups\n# print('dir(r_g) ', dir(sp2dic[sp2id[1]].basis.rgd.r_g))\n# print(sp2dic[sp2id[0]].basis.rgd.r_g.size)\n# print(sp2dic[sp2id[1]].basis.rgd.r_g.size)\n \n\n#\n#\n#\ndef log_mesh(nr, rmin, rmax, kmax=None):\n \"\"\"\n Initializes log grid in real and reciprocal (momentum) spaces.\n These grids are used in James Talman's subroutines. \n \"\"\"\n assert(type(nr)==int and nr>2)\n \n rhomin=np.log(rmin)\n rhomax=np.log(rmax)\n kmax = 1.0/rmin/np.pi if kmax is None else kmax\n kapmin=np.log(kmax)-rhomax+rhomin\n\n rr=np.array(np.exp( np.linspace(rhomin, rhomax, nr)) )\n pp=np.array(rr*(np.exp(kapmin)/rr[0]))\n\n return rr, pp\n\n#\n#\n#\nclass log_mesh_c():\n ''' Constructor of the log grid used with NAOs.'''\n def __init__(self):\n self.state = 'call an initialize method...'\n return\n \n def init_log_mesh_gto(self, gto, rcut_tol=1e-7, nr=None, rmin=None, rmax=None, kmax=None):\n \"\"\" Initialize an optimal logarithmic mesh based on Gaussian orbitals from pySCF\"\"\"\n #self.gto = gto cannot copy GTO object here... because python3 + deepcopy in m_ao_log_hartree fails\n self.rcut_tol = rcut_tol\n nr_def,rmin_def,rmax_def,kmax_def = get_default_log_mesh_param4gto(gto, rcut_tol)\n self.nr = nr_def if nr is None else nr\n self.rmin = rmin_def if rmin is None else rmin\n self.rmax = rmax_def if rmax is None else rmax\n self.kmax = kmax_def if kmax is None else kmax\n assert(self.rmin>0.0); assert(self.kmax>0.0); assert(self.nr>2); assert(self.rmax>self.rmin);\n self.rr,self.pp = log_mesh(self.nr, self.rmin, self.rmax, self.kmax)\n self.state = 'can be useful for something'\n return self\n \n \n def init_log_mesh_ion(self, sp2ion, nr=None, rmin=None, rmax=None, kmax=None):\n \"\"\" Initialize an optimal logarithmic mesh based on information from SIESTA ion files\"\"\"\n self.sp2ion = sp2ion\n nr_def,rmin_def,rmax_def,kmax_def = get_default_log_mesh_param4ion(sp2ion)\n self.nr = nr_def if nr is None else nr\n self.rmin = rmin_def if rmin is None else rmin\n self.rmax = rmax_def if rmax is None else rmax\n self.kmax = kmax_def if kmax is None else kmax\n assert(self.rmin>0.0); assert(self.kmax>0.0); assert(self.nr>2); assert(self.rmax>self.rmin);\n self.rr,self.pp = log_mesh(self.nr, self.rmin, self.rmax, self.kmax)\n self.state = 'can be useful for something'\n return self\n\n def init_log_mesh_gpaw(self, setups, nr=None, rmin=None, rmax=None, kmax=None):\n \"\"\"\n This initializes an optimal logarithmic mesh based on setups from GPAW\n \"\"\"\n\n #self.setups = setups same problem than in m_ao_log\n nr_def,rmin_def,rmax_def,kmax_def = get_default_log_mesh_param4gpaw(setups.setups)\n self.nr = nr_def if nr is None else nr\n self.rmin = rmin_def if rmin is None else rmin\n self.rmax = rmax_def if rmax is None else rmax\n self.kmax = kmax_def if kmax is None else kmax\n assert self.rmin>0.0\n assert self.kmax>0.0 \n assert self.nr>2\n assert self.rmax>self.rmin\n self.rr,self.pp = log_mesh(self.nr, self.rmin, self.rmax, self.kmax)\n self.state = 'can be useful for something'\n return self\n\n def init_log_mesh(self, rr, pp):\n assert(len(pp)==len(rr))\n self.rr,self.pp = rr,pp\n self.nr = len(rr)\n self.rmin = rr[0]\n self.rmax = rr[-1]\n self.kmax = pp[-1]\n self.state = 'can be useful for something'\n return self\n",
"#!/usr/bin/env python\n#\n# Author: Qiming Sun <[email protected]>\n#\n\nimport numpy\nfrom pyscf import lib\nfrom pyscf import ao2mo\nfrom pyscf.ao2mo import _ao2mo\nfrom pyscf.ao2mo.incore import iden_coeffs, _conc_mos\nfrom pyscf.pbc.df.df_jk import zdotNN, zdotCN, zdotNC\nfrom pyscf.pbc.df.fft_ao2mo import _format_kpts\nfrom pyscf.pbc.lib.kpt_misc import is_zero, gamma_point\n\n\ndef get_eri(mydf, kpts=None, compact=True):\n if mydf._cderi is None:\n mydf.build()\n\n cell = mydf.cell\n kptijkl = _format_kpts(kpts)\n kpti, kptj, kptk, kptl = kptijkl\n nao = cell.nao_nr()\n nao_pair = nao * (nao+1) // 2\n max_memory = max(2000, mydf.max_memory-lib.current_memory()[0]-nao**4*8/1e6)\n\n####################\n# gamma point, the integral is real and with s4 symmetry\n if gamma_point(kptijkl):\n eriR = numpy.zeros((nao_pair,nao_pair))\n for LpqR, LpqI in mydf.sr_loop(kptijkl[:2], max_memory, True):\n lib.ddot(LpqR.T, LpqR, 1, eriR, 1)\n LpqR = LpqI = None\n if not compact:\n eriR = ao2mo.restore(1, eriR, nao).reshape(nao**2,-1)\n return eriR\n\n elif is_zero(kpti-kptk) and is_zero(kptj-kptl):\n eriR = numpy.zeros((nao*nao,nao*nao))\n eriI = numpy.zeros((nao*nao,nao*nao))\n for LpqR, LpqI in mydf.sr_loop(kptijkl[:2], max_memory, False):\n zdotNN(LpqR.T, LpqI.T, LpqR, LpqI, 1, eriR, eriI, 1)\n LpqR = LpqI = None\n return eriR + eriI*1j\n\n####################\n# (kpt) i == j == k == l != 0\n#\n# (kpt) i == l && j == k && i != j && j != k =>\n# both vbar and ovlp are zero. It corresponds to the exchange integral.\n#\n# complex integrals, N^4 elements\n elif is_zero(kpti-kptl) and is_zero(kptj-kptk):\n eriR = numpy.zeros((nao*nao,nao*nao))\n eriI = numpy.zeros((nao*nao,nao*nao))\n for LpqR, LpqI in mydf.sr_loop(kptijkl[:2], max_memory, False):\n zdotNC(LpqR.T, LpqI.T, LpqR, LpqI, 1, eriR, eriI, 1)\n LpqR = LpqI = None\n# transpose(0,1,3,2) because\n# j == k && i == l =>\n# (L|ij).transpose(0,2,1).conj() = (L^*|ji) = (L^*|kl) => (M|kl)\n eri = lib.transpose((eriR+eriI*1j).reshape(-1,nao,nao), axes=(0,2,1))\n return eri.reshape(nao**2,-1)\n\n####################\n# aosym = s1, complex integrals\n#\n# kpti == kptj => kptl == kptk\n# If kpti == kptj, (kptl-kptk)*a has to be multiples of 2pi because of the wave\n# vector symmetry. k is a fraction of reciprocal basis, 0 < k/b < 1, by definition.\n# So kptl/b - kptk/b must be -1 < k/b < 1.\n#\n else:\n eriR = numpy.zeros((nao*nao,nao*nao))\n eriI = numpy.zeros((nao*nao,nao*nao))\n for (LpqR, LpqI), (LrsR, LrsI) in \\\n lib.izip(mydf.sr_loop(kptijkl[:2], max_memory, False),\n mydf.sr_loop(kptijkl[2:], max_memory, False)):\n zdotNN(LpqR.T, LpqI.T, LrsR, LrsI, 1, eriR, eriI, 1)\n LpqR = LpqI = LrsR = LrsI = None\n return eriR + eriI*1j\n\n\ndef general(mydf, mo_coeffs, kpts=None, compact=True):\n if mydf._cderi is None:\n mydf.build()\n\n kptijkl = _format_kpts(kpts)\n kpti, kptj, kptk, kptl = kptijkl\n if isinstance(mo_coeffs, numpy.ndarray) and mo_coeffs.ndim == 2:\n mo_coeffs = (mo_coeffs,) * 4\n all_real = not any(numpy.iscomplexobj(mo) for mo in mo_coeffs)\n max_memory = max(2000, (mydf.max_memory - lib.current_memory()[0]) * .5)\n\n####################\n# gamma point, the integral is real and with s4 symmetry\n if gamma_point(kptijkl) and all_real:\n ijmosym, nij_pair, moij, ijslice = _conc_mos(mo_coeffs[0], mo_coeffs[1], compact)\n klmosym, nkl_pair, mokl, klslice = _conc_mos(mo_coeffs[2], mo_coeffs[3], compact)\n eri_mo = numpy.zeros((nij_pair,nkl_pair))\n sym = (iden_coeffs(mo_coeffs[0], mo_coeffs[2]) and\n iden_coeffs(mo_coeffs[1], mo_coeffs[3]))\n ijR = klR = None\n for LpqR, LpqI in mydf.sr_loop(kptijkl[:2], max_memory, True):\n ijR, klR = _dtrans(LpqR, ijR, ijmosym, moij, ijslice,\n LpqR, klR, klmosym, mokl, klslice, sym)\n lib.ddot(ijR.T, klR, 1, eri_mo, 1)\n LpqR = LpqI = None\n return eri_mo\n\n elif is_zero(kpti-kptk) and is_zero(kptj-kptl):\n mo_coeffs = _mo_as_complex(mo_coeffs)\n nij_pair, moij, ijslice = _conc_mos(mo_coeffs[0], mo_coeffs[1])[1:]\n nkl_pair, mokl, klslice = _conc_mos(mo_coeffs[2], mo_coeffs[3])[1:]\n eri_mo = numpy.zeros((nij_pair,nkl_pair), dtype=numpy.complex)\n sym = (iden_coeffs(mo_coeffs[0], mo_coeffs[2]) and\n iden_coeffs(mo_coeffs[1], mo_coeffs[3]))\n\n zij = zkl = None\n for LpqR, LpqI in mydf.sr_loop(kptijkl[:2], max_memory, False):\n buf = LpqR+LpqI*1j\n zij, zkl = _ztrans(buf, zij, moij, ijslice,\n buf, zkl, mokl, klslice, sym)\n lib.dot(zij.T, zkl, 1, eri_mo, 1)\n LpqR = LpqI = buf = None\n return eri_mo\n\n####################\n# (kpt) i == j == k == l != 0\n# (kpt) i == l && j == k && i != j && j != k =>\n#\n elif is_zero(kpti-kptl) and is_zero(kptj-kptk):\n mo_coeffs = _mo_as_complex(mo_coeffs)\n nij_pair, moij, ijslice = _conc_mos(mo_coeffs[0], mo_coeffs[1])[1:]\n nlk_pair, molk, lkslice = _conc_mos(mo_coeffs[3], mo_coeffs[2])[1:]\n eri_mo = numpy.zeros((nij_pair,nlk_pair), dtype=numpy.complex)\n sym = (iden_coeffs(mo_coeffs[0], mo_coeffs[3]) and\n iden_coeffs(mo_coeffs[1], mo_coeffs[2]))\n\n zij = zlk = None\n for LpqR, LpqI in mydf.sr_loop(kptijkl[:2], max_memory, False):\n buf = LpqR+LpqI*1j\n zij, zlk = _ztrans(buf, zij, moij, ijslice,\n buf, zlk, molk, lkslice, sym)\n lib.dot(zij.T, zlk.conj(), 1, eri_mo, 1)\n LpqR = LpqI = buf = None\n nmok = mo_coeffs[2].shape[1]\n nmol = mo_coeffs[3].shape[1]\n eri_mo = lib.transpose(eri_mo.reshape(-1,nmol,nmok), axes=(0,2,1))\n return eri_mo.reshape(nij_pair,nlk_pair)\n\n####################\n# aosym = s1, complex integrals\n#\n# If kpti == kptj, (kptl-kptk)*a has to be multiples of 2pi because of the wave\n# vector symmetry. k is a fraction of reciprocal basis, 0 < k/b < 1, by definition.\n# So kptl/b - kptk/b must be -1 < k/b < 1. => kptl == kptk\n#\n else:\n mo_coeffs = _mo_as_complex(mo_coeffs)\n nij_pair, moij, ijslice = _conc_mos(mo_coeffs[0], mo_coeffs[1])[1:]\n nkl_pair, mokl, klslice = _conc_mos(mo_coeffs[2], mo_coeffs[3])[1:]\n eri_mo = numpy.zeros((nij_pair,nkl_pair), dtype=numpy.complex)\n\n zij = zkl = None\n for (LpqR, LpqI), (LrsR, LrsI) in \\\n lib.izip(mydf.sr_loop(kptijkl[:2], max_memory, False),\n mydf.sr_loop(kptijkl[2:], max_memory, False)):\n zij, zkl = _ztrans(LpqR+LpqI*1j, zij, moij, ijslice,\n LrsR+LrsI*1j, zkl, mokl, klslice, False)\n lib.dot(zij.T, zkl, 1, eri_mo, 1)\n LpqR = LpqI = LrsR = LrsI = None\n return eri_mo\n\n\ndef _mo_as_complex(mo_coeffs):\n mos = []\n for c in mo_coeffs:\n if c.dtype == numpy.float64:\n mos.append(c+0j)\n else:\n mos.append(c)\n return mos\n\ndef _dtrans(Lpq, Lij, ijmosym, moij, ijslice,\n Lrs, Lkl, klmosym, mokl, klslice, sym):\n Lij = _ao2mo.nr_e2(Lpq, moij, ijslice, aosym='s2', mosym=ijmosym, out=Lij)\n if sym:\n Lkl = Lij\n else:\n Lkl = _ao2mo.nr_e2(Lrs, mokl, klslice, aosym='s2', mosym=klmosym, out=Lkl)\n return Lij, Lkl\n\ndef _ztrans(Lpq, zij, moij, ijslice, Lrs, zkl, mokl, klslice, sym):\n tao = []\n ao_loc = None\n zij = _ao2mo.r_e2(Lpq, moij, ijslice, tao, ao_loc, out=zij)\n if sym:\n zkl = zij\n else:\n zkl = _ao2mo.r_e2(Lrs, mokl, klslice, tao, ao_loc, out=zkl)\n return zij, zkl\n\n",
"import numpy as np\n\n#\n#\n#\ndef _siesta2blanko_denvec(orb2m, vec, orb_sc2orb_uc=None):\n\n n,nreim = vec.shape\n\n if orb_sc2orb_uc is None:\n orb_sc2m = orb2m\n else:\n orb_sc2m = np.zeros_like(orb_sc2orb_uc)\n for orb_sc,orb_uc in enumerate(orb_sc2orb_uc): orb_sc2m[orb_sc] = orb2m[orb_uc]\n\n orb2ph = (-1.0)**orb_sc2m\n \n if(nreim==1):\n vec[:,0] = vec[:,0]*orb2ph[:]\n\n elif(nreim==2):\n\n #print(vec[0:3,:], ' vec')\n cvec = vec.view(dtype=np.complex64)\n #print(cvec[0:3], 'cvec', cvec.shape) # I expected cvec.shape = (n), but got (n,1)...\n cvec[:,0] = cvec[:,0] * orb2ph\n #print(cvec[0:3], ' cvec2')\n vec = cvec.view(dtype=np.float32)\n #print(vec[0:3], ' vec2')\n\n #raise RuntimeError('debug')\n\n else:\n raise SystemError('!nreim')\n\n return(0)\n",
"from __future__ import print_function, division\nfrom ctypes import POINTER, c_int64, c_float, c_double, c_char_p, create_string_buffer\nimport os\nimport sys\nimport numpy as np\nfrom numpy import zeros, empty \nfrom pyscf.nao.m_libnao import libnao\n\n# interfacing with fortran subroutines \nlibnao.siesta_wfsx_book_size.argtypes = (c_char_p, POINTER(c_int64), POINTER(c_int64), POINTER(c_int64))\nlibnao.siesta_wfsx_book_read.argtypes = (c_char_p, POINTER(c_int64), POINTER(c_int64), POINTER(c_int64))\nlibnao.siesta_wfsx_dread.argtypes = (c_char_p, POINTER(c_int64), POINTER(c_double), POINTER(c_int64))\nlibnao.siesta_wfsx_sread.argtypes = (c_char_p, POINTER(c_int64), POINTER(c_float), POINTER(c_int64))\n# END of interfacing with fortran subroutines \n\n#\n#\n#\ndef siesta_wfsx_book_read_py(fname, nreim):\n \"\"\" Creates buffer for integer data from .WFSX files \"\"\"\n name = create_string_buffer(fname.encode())\n bufsize = c_int64(-999)\n ios = c_int64(22)\n libnao.siesta_wfsx_book_size(name, c_int64(nreim), bufsize, ios)\n if ios.value!=0 : return None\n idat = empty(bufsize.value, dtype=np.int64)\n libnao.siesta_wfsx_book_read(name, c_int64(nreim), idat.ctypes.data_as(POINTER(c_int64)), ios)\n if ios.value!=0 : return None\n return idat\n\n#\n#\n#\ndef siesta_wfsx_dread(w, nreim):\n ddata = empty(w.nkpoints*w.nspin*w.norbs + + w.nkpoints*3)\n ios = c_int64(-999)\n libnao.siesta_wfsx_dread(create_string_buffer(w.fname.encode()), c_int64(nreim), ddata.ctypes.data_as(POINTER(c_double)), ios)\n if ios.value!=0 : raise RuntimeError('ios!=0 %d'%(ios.value))\n return ddata\n\n#\n#\n#\ndef siesta_wfsx_sread(w, sdata, nreim):\n name = create_string_buffer(w.fname.encode())\n bufsize = w.nkpoints*w.nspin*w.norbs**2*w.nreim\n ios = c_int64(-999)\n libnao.siesta_wfsx_sread(name, c_int64(nreim), sdata.ctypes.data_as(POINTER(c_float)), ios)\n if ios.value!=0 : raise RuntimeError('ios!=0 %d'%(ios.value))\n\n\nclass siesta_wfsx_c():\n def __init__(self, label='siesta', chdir='.', force_gamma=None):\n\n nreim = -999\n if force_gamma is not None:\n if force_gamma : nreim = 1\n \n self.label = label\n ends = ['fullBZ.WFSX', 'WFSX']\n for end in ends:\n fname = chdir+'/'+label+'.'+end\n idat = siesta_wfsx_book_read_py(fname, nreim)\n if idat is None :\n print(fname, ' skip') \n continue\n self.fname = fname\n break\n \n if idat is None : raise RuntimeError('No .WFSX file found')\n \n i = 0\n self.nkpoints = idat[i]; i=i+1\n self.nspin = idat[i]; i=i+1\n self.norbs = idat[i]; i=i+1\n self.gamma = idat[i]>0 if force_gamma is None else force_gamma; i=i+1\n self.orb2atm = idat[i:i+self.norbs]; i=i+self.norbs\n self.orb2ao = idat[i:i+self.norbs]; i=i+self.norbs\n self.orb2n = idat[i:i+self.norbs]; i=i+self.norbs\n if(self.gamma) :\n self.nreim = 1;\n else: \n self.nreim = 2;\n \n # list of caracter that could be used to split the psf file name\n splen = idat[i]; i=i+1\n self.orb2strspecie = []\n for j in range(self.norbs):\n splabel = ''\n for k in range(splen):\n splabel = splabel + chr(idat[i]); i=i+1\n splabel = splabel.replace(\" \", \"\")\n ch = splabel\n self.orb2strspecie.append(ch)\n\n self.sp2strspecie = []\n for strsp in self.orb2strspecie:\n if strsp not in self.sp2strspecie:\n self.sp2strspecie.append(strsp)\n\n symlen = idat[i]; i=i+1\n self.orb2strsym = []\n for j in range(self.norbs):\n symlabel = '' # make oneliner here (to oneline)\n for k in range(symlen):\n symlabel = symlabel + chr(idat[i]); i=i+1\n self.orb2strsym.append(symlabel.strip())\n\n ### Read double precision data\n ddata = siesta_wfsx_dread(self, self.nreim)\n\n self.ksn2e = empty((self.nkpoints,self.nspin,self.norbs))\n self.k2xyz = empty((self.nkpoints,3))\n i = 0\n for k in range(self.nkpoints):\n for s in range(self.nspin):\n for n in range(self.norbs):\n self.ksn2e[k,s,n] = ddata[i]; i=i+1\n\n for k in range(self.nkpoints):\n for j in range(3):\n self.k2xyz[k,j] = ddata[i]; i=i+1\n\n ### Read single precision data\n \n self.x = np.require(zeros((self.nkpoints,self.nspin,self.norbs,self.norbs,self.nreim), dtype=np.float32), requirements='CW')\n siesta_wfsx_sread(self, self.x, self.nreim)\n"
] | [
[
"numpy.diag",
"numpy.hstack",
"numpy.random.random",
"numpy.random.seed",
"numpy.asarray",
"numpy.arange",
"numpy.empty_like",
"numpy.sort",
"numpy.bincount",
"numpy.argsort",
"numpy.zeros",
"numpy.where",
"numpy.empty"
],
[
"numpy.log",
"numpy.exp",
"numpy.linspace"
],
[
"numpy.zeros",
"numpy.iscomplexobj"
],
[
"numpy.zeros_like"
],
[
"numpy.zeros",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Syuukakou/DeepLook_into_Android-type_ELF | [
"ef021f893b9c257aea77a06f5383857e382c74fe",
"ef021f893b9c257aea77a06f5383857e382c74fe"
] | [
"Linux_Android_Classification/Src/GridSearchCV/RF/importances_process.py",
"Linux_Android_Classification/Src/functions.py"
] | [
"import ast, os, json\nfrom pprint import pprint\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ndef transform_str2list(file):\n str_contents = []\n with open(file, \"r\") as f:\n for line in f:\n str_contents.append(line.strip('\\n'))\n results = []\n str_contents[0] = str_contents[0] + \"]\"\n str_contents[0] = str_contents[0].replace(\" \", \",\")\n results.append(ast.literal_eval(str_contents[0]))\n\n str_contents[217] = str_contents[217][1:]\n str_contents[217] = \"[\" + str_contents[217]\n str_contents[217] = str_contents[217].replace(\" \", \",\")\n\n i = 1\n while i < 217:\n str_contents[i] = str_contents[i][1:]\n # print(\"origin: \", contents[i])\n str_contents[i] = \"[\" + str_contents[i] + \"]\"\n str_contents[i] = str_contents[i].replace(\" \", \",\")\n # print(contents[i])\n rev = ast.literal_eval(str_contents[i])\n # print(rev)\n results.append(rev)\n i += 1\n results.append(ast.literal_eval(str_contents[217]))\n fin_results = []\n for item in results:\n fin_results.extend(item)\n\n return fin_results\n\n\ndef calculate_average(dict_data):\n total_average = {}\n total_len = len(dict_data[\"1.txt\"])\n for t in range(total_len):\n total_average[t] = 0\n for key in dict_data:\n for i in range(total_len):\n total_average[i] += dict_data[key][i]\n\n for v in total_average:\n total_average[v] = total_average[v] / 10\n\n return total_average\n\n\ndef get_features_names():\n names = []\n with open(r\"result1/features_importances.txt\", \"r\") as f:\n for line in f:\n names.append(line.strip('\\n'))\n names_results = ast.literal_eval(names[0])\n\n return names_results\n\n\nif __name__ == '__main__':\n with open(r\"../../../Files/opcodes2id_624.json\", \"r\") as f:\n data_opcode = json.load(f)\n print(data_opcode)\n with open(r\"../../../Files/word2id_249.json\", \"r\") as wf:\n data_words = json.load(wf)\n with open(r\"result1/importances.json\", \"r\") as iff:\n data_importances = json.load(iff)\n\n important_values = list(data_importances.values())[:50]\n some_data = {}\n for i in data_importances:\n if data_importances[i] in important_values:\n some_data[i] = data_importances[i]\n\n words_importances = {}\n opcodes_importances = {}\n for key in some_data:\n if key in data_opcode:\n opcodes_importances[key] = some_data[key]\n elif key in data_words:\n words_importances[key] = some_data[key]\n\n # opcode_importances = {}\n # word_importances = {}\n # for key in data_importances:\n # if key in data_opcode:\n # opcode_importances[key] = data_importances[key]\n # elif key in data_words:\n # word_importances[key] = data_importances[key]\n \n # plot data_importances by seaborn\n keywords = list(some_data.keys())\n importance_coef = list(some_data.values())\n # sns.set_style(\"darkgrid\")\n # ax = sns.barplot(x=keywords, y=importance_coef)\n # ax.set()\n # # ax.set(xlable=\"keywords\", ylabel=\"importances\")\n # ax.set_title(\"keywords importances\")\n\n # matplotlib\n plt.figure(figsize=(18, 6))\n plt.bar(keywords, importance_coef)\n plt.title(\"Importance of Keywords Extracted from Opcodes and Function Call Names\", fontsize=20)\n plt.xticks(rotation=30, ha='right', fontsize=15)\n plt.yticks(fontsize=15)\n\n plt.tight_layout()\n plt.show()\n\n # fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(25, 10))\n # # fig.subtitle(\"keywords importances from opcodes and function call names\")\n # op_names = list(opcodes_importances.keys())\n # op_imp = list(opcodes_importances.values())\n # ax1.bar(op_names, op_imp)\n # ax1.set_title(\"Importance of Keywords Extracted from Opcodes\", fontsize=20)\n # ax1.tick_params(axis='x', rotation=60, labelsize=15, labelleft=True)\n # ax1.tick_params(axis='y', labelsize=15)\n #\n #\n #\n # words_names = list(words_importances.keys())\n # words_imp = list(words_importances.values())\n # ax2.bar(words_names, words_imp)\n # ax2.yaxis.tick_right()\n # ax2.set_title(\"Importance of Keywords Extracted from Function Call Names\", fontsize=20)\n # ax2.tick_params(axis='x', rotation=60, labelsize=15, labelleft=True)\n # ax2.tick_params(axis='y', labelsize=15)\n #\n #\n # plt.tight_layout()\n # plt.show()\n\n\n\n # folder_path = r\"result1/importances\"\n # results = {}\n # for file in os.listdir(folder_path):\n # filepath = os.path.join(folder_path, file)\n # if os.path.isfile(filepath):\n # results[file] = transform_str2list(filepath)\n # # results = transform_str2list(r\"result1/importances/1.txt\")\n # # pprint(results)\n\n # average = calculate_average(results)\n # print(average)\n # names = get_features_names()\n # print(\"names: \", len(names), \"averages: \", len(average))\n # new_results = {}\n # for key in average:\n # new_results[names[key]] = average[key]\n # print(new_results)\n\n # # sort\n # sorted_results = dict(sorted(new_results.items(), key=lambda item: item[1], reverse=True))\n # print(sorted_results)\n # with open(r\"result1/importances.json\", \"w\") as jf:\n # json.dump(sorted_results, jf)\n",
"import csv\nimport json\nimport os.path\nimport pandas as pd\n\n\ndef get_sample_hashcodes(file):\n with open(file, \"r\") as f:\n data = json.load(f)\n\n return list(data.keys())\n\n\ndef cfg_hashcodes(file):\n hash_codes = []\n with open(file, \"r\") as f:\n for line in f:\n hash_codes.append(line.strip('\\n'))\n\n return hash_codes\n\n\ndef create_feature(dataset_folder):\n \"\"\"\n label: Linux ELF --> 1, Android ELF --> 0\n features: - the count of libraries(libraries), fcns_counts,\n - header_info entrypoint(header_info->entrypoint), number of sections(header_info->numberof_sections),\n number of segments(header_info->numberof_segments), program_header_size(header_info->program_header_size),\n section_header_size(header_info->section_header_size),\n - section_info total entrypoint and total size(section_info->entropy, section_info->entry_size)\n\n :param dataset_folder:\n :return:\n \"\"\"\n error_hash = set()\n csv_data = pd.read_csv(r\"../Files/opcodes624_fcns98-malicious.csv\", header=None)\n metrix = csv_data.values\n mal_hash = metrix[:, -1].tolist()\n\n # mal_hash = get_sample_hashcodes(r\"../../52617_cve_usage.json\")\n for h in mal_hash:\n filename = str(h) + \".json\"\n filepath = os.path.join(dataset_folder, filename)\n if os.path.isfile(filepath):\n with open(filepath, \"r\") as f:\n data = json.load(f)\n feature_list = []\n feature_list.append(len(data[\"libraries\"]))\n feature_list.append(data[\"fcns_counts\"])\n # header info\n if \"numberof_sections\" in data[\"header_info\"]:\n feature_list.append(data[\"header_info\"][\"numberof_sections\"])\n else:\n feature_list.append(len(data[\"section_info\"]))\n error_hash.add(h)\n\n if \"numberof_segments\" in data[\"header_info\"]:\n feature_list.append(data[\"header_info\"][\"numberof_segments\"])\n else:\n feature_list.append(len(data[\"segments_info\"]))\n error_hash.add(h)\n\n if \"program_header_size\" in data[\"header_info\"]:\n feature_list.append(data[\"header_info\"][\"program_header_size\"])\n else:\n feature_list.append(0)\n error_hash.add(h)\n\n if \"section_header_size\" in data[\"header_info\"]:\n feature_list.append(data[\"header_info\"][\"section_header_size\"])\n else:\n feature_list.append(0)\n error_hash.add(h)\n # section info\n section_info = data[\"section_info\"]\n entropy = 0\n entry_size = 0\n\n for section in section_info:\n if \"entropy\" in section_info and \"entry_size\" in section_info:\n entropy += section_info[section][\"entropy\"]\n entry_size += section_info[section][\"entry_size\"]\n else:\n entropy = 0\n entry_size = 0\n feature_list.append(entropy)\n feature_list.append(entry_size)\n\n if data[\"androidMal\"]:\n # feature_list.append(0)\n feature_list.append(\"android\")\n else: # Linux ELF\n # feature_list.append(1)\n feature_list.append(\"linux\")\n with open(r\"../Files/features_v3.csv\", \"a+\", newline=\"\") as ff:\n writer = csv.writer(ff, dialect=\"excel\")\n writer.writerow(feature_list)\n print(h, \" processed!!\")\n print(error_hash)\n\n\ndef cal_average(file):\n data = []\n with open(file, \"r\") as f:\n for line in f:\n num = float(line.strip('\\n'))\n data.append(num)\n average = sum(data) / len(data)\n return average\n\n\nif __name__ == '__main__':\n result = cal_average(r\"GridSearchCV/LRC/results/fcns_Rf_results.txt\")\n print(result)\n # create_feature(r\"C:\\Users\\Syuukakou\\Documents\\dataset_v1\")\n # hashcodes_list = get_sample_hashcodes(r\"../../52617_cve_usage.json\")\n # print(len(hashcodes_list))\n # cfg_hashlist = cfg_hashcodes(r\"../Files/reCFG_hashcodes.txt\")\n # print(len(set(cfg_hashlist)))\n # diff = list(set(hashcodes_list).difference(set(cfg_hashlist)))\n # # print(diff)\n # print(len(diff))\n # print(len(list(set(hashcodes_list).intersection(set(cfg_hashlist)))))\n # results = []\n # folder = r\"C:\\Users\\Syuukakou\\Documents\\dataset\"\n # for file in os.listdir(folder):\n # results.append(file.strip('.json'))\n\n\n # # with open(r\"C:\\Users\\Syuukakou\\Documents\\dataset\", \"r\") as f:\n # # for line in f:\n # # results.append(line.strip('.json'))\n # diff1 = list(set(cfg_hashlist).difference(set(results)))\n # common = list(set(cfg_hashlist).intersection(set(results)))\n # print(len(diff1))\n # print(len(common))\n # diff_h = []\n # for h in diff1:\n # for h1 in results:\n # if h in h1:\n # diff_h.append(h1)\n # print(len(diff_h))\n # common.extend(diff_h)\n # print(len(common))\n # with open(r\"../Files/reCFG_hashcodes.txt\", \"w\") as wf:\n # for item in common:\n # wf.write(\"%s\\n\" % item)\n"
] | [
[
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
pallgeuer/mmpose | [
"d3c17d5e6bdb9dbaca19f3bf53aa2802105355fd",
"d3c17d5e6bdb9dbaca19f3bf53aa2802105355fd",
"d3c17d5e6bdb9dbaca19f3bf53aa2802105355fd",
"d3c17d5e6bdb9dbaca19f3bf53aa2802105355fd",
"d3c17d5e6bdb9dbaca19f3bf53aa2802105355fd",
"d3c17d5e6bdb9dbaca19f3bf53aa2802105355fd"
] | [
"tests/test_pipelines/test_pose3d_transform.py",
"mmpose/models/necks/fpn.py",
"mmpose/datasets/pipelines/bottom_up_transform.py",
"mmpose/datasets/datasets/hand/hand_coco_wholebody_dataset.py",
"tests/test_post_processing/test_group.py",
"tools/webcam/webcam_apis/nodes/helper_node.py"
] | [
"# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport os.path as osp\nimport tempfile\n\nimport mmcv\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_almost_equal\n\nfrom mmpose.core import SimpleCamera\nfrom mmpose.datasets.pipelines import Compose\n\nH36M_JOINT_IDX = [14, 2, 1, 0, 3, 4, 5, 16, 12, 17, 18, 9, 10, 11, 8, 7, 6]\n\n\ndef get_data_sample():\n\n def _parse_h36m_imgname(imgname):\n \"\"\"Parse imgname to get information of subject, action and camera.\n\n A typical h36m image filename is like:\n S1_Directions_1.54138969_000001.jpg\n \"\"\"\n subj, rest = osp.basename(imgname).split('_', 1)\n action, rest = rest.split('.', 1)\n camera, rest = rest.split('_', 1)\n return subj, action, camera\n\n ann_flle = 'tests/data/h36m/test_h36m.npz'\n camera_param_file = 'tests/data/h36m/cameras.pkl'\n\n data = np.load(ann_flle)\n cameras = mmcv.load(camera_param_file)\n\n _imgnames = data['imgname']\n _joints_2d = data['part'][:, H36M_JOINT_IDX].astype(np.float32)\n _joints_3d = data['S'][:, H36M_JOINT_IDX].astype(np.float32)\n _centers = data['center'].astype(np.float32)\n _scales = data['scale'].astype(np.float32)\n\n frame_ids = [0]\n target_frame_id = 0\n\n results = {\n 'frame_ids': frame_ids,\n 'target_frame_id': target_frame_id,\n 'input_2d': _joints_2d[frame_ids, :, :2],\n 'input_2d_visible': _joints_2d[frame_ids, :, -1:],\n 'input_3d': _joints_3d[frame_ids, :, :3],\n 'input_3d_visible': _joints_3d[frame_ids, :, -1:],\n 'target': _joints_3d[target_frame_id, :, :3],\n 'target_visible': _joints_3d[target_frame_id, :, -1:],\n 'imgnames': _imgnames[frame_ids],\n 'scales': _scales[frame_ids],\n 'centers': _centers[frame_ids],\n }\n\n # add camera parameters\n subj, _, camera = _parse_h36m_imgname(_imgnames[frame_ids[0]])\n results['camera_param'] = cameras[(subj, camera)]\n\n # add image size\n results['image_width'] = results['camera_param']['w']\n results['image_height'] = results['camera_param']['h']\n\n # add ann_info\n ann_info = {}\n ann_info['num_joints'] = 17\n ann_info['joint_weights'] = np.full(17, 1.0, dtype=np.float32)\n ann_info['flip_pairs'] = [[1, 4], [2, 5], [3, 6], [11, 14], [12, 15],\n [13, 16]]\n ann_info['upper_body_ids'] = (0, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)\n ann_info['lower_body_ids'] = (1, 2, 3, 4, 5, 6)\n ann_info['use_different_joint_weights'] = False\n\n results['ann_info'] = ann_info\n\n return results\n\n\ndef test_joint_transforms():\n results = get_data_sample()\n\n mean = np.random.rand(16, 3).astype(np.float32)\n std = np.random.rand(16, 3).astype(np.float32) + 1e-6\n\n pipeline = [\n dict(\n type='RelativeJointRandomFlip',\n item='target',\n flip_cfg=dict(center_mode='root', center_index=0),\n visible_item='target_visible',\n flip_prob=1.,\n flip_camera=True),\n dict(\n type='GetRootCenteredPose',\n item='target',\n root_index=0,\n root_name='global_position',\n remove_root=True),\n dict(\n type='NormalizeJointCoordinate', item='target', mean=mean,\n std=std),\n dict(type='PoseSequenceToTensor', item='target'),\n dict(\n type='ImageCoordinateNormalization',\n item='input_2d',\n norm_camera=True),\n dict(type='CollectCameraIntrinsics'),\n dict(\n type='Collect',\n keys=[('input_2d', 'input'), ('target', 'output'), 'flip_pairs',\n 'intrinsics'],\n meta_name='metas',\n meta_keys=['camera_param'])\n ]\n\n pipeline = Compose(pipeline)\n output = pipeline(copy.deepcopy(results))\n\n # test transformation of target\n joints_0 = results['target']\n joints_1 = output['output'].numpy()\n # manually do transformations\n flip_pairs = output['flip_pairs']\n _joints_0_flipped = joints_0.copy()\n for _l, _r in flip_pairs:\n _joints_0_flipped[..., _l, :] = joints_0[..., _r, :]\n _joints_0_flipped[..., _r, :] = joints_0[..., _l, :]\n _joints_0_flipped[...,\n 0] = 2 * joints_0[..., 0:1, 0] - _joints_0_flipped[...,\n 0]\n joints_0 = _joints_0_flipped\n joints_0 = (joints_0[..., 1:, :] - joints_0[..., 0:1, :] - mean) / std\n # convert to [K*C, T]\n joints_0 = joints_0.reshape(-1)[..., None]\n np.testing.assert_array_almost_equal(joints_0, joints_1)\n\n # test transformation of input\n joints_0 = results['input_2d']\n joints_1 = output['input']\n # manually do transformations\n center = np.array(\n [0.5 * results['image_width'], 0.5 * results['image_height']],\n dtype=np.float32)\n scale = np.array(0.5 * results['image_width'], dtype=np.float32)\n joints_0 = (joints_0 - center) / scale\n np.testing.assert_array_almost_equal(joints_0, joints_1)\n\n # test transformation of camera parameters\n camera_param_0 = results['camera_param']\n camera_param_1 = output['metas'].data['camera_param']\n # manually flip and normalization\n camera_param_0['c'][0] *= -1\n camera_param_0['p'][0] *= -1\n camera_param_0['c'] = (camera_param_0['c'] -\n np.array(center)[:, None]) / scale\n camera_param_0['f'] = camera_param_0['f'] / scale\n np.testing.assert_array_almost_equal(camera_param_0['c'],\n camera_param_1['c'])\n np.testing.assert_array_almost_equal(camera_param_0['f'],\n camera_param_1['f'])\n\n # test CollectCameraIntrinsics\n intrinsics_0 = np.concatenate([\n results['camera_param']['f'].reshape(2),\n results['camera_param']['c'].reshape(2),\n results['camera_param']['k'].reshape(3),\n results['camera_param']['p'].reshape(2)\n ])\n intrinsics_1 = output['intrinsics']\n np.testing.assert_array_almost_equal(intrinsics_0, intrinsics_1)\n\n # test load mean/std from file\n with tempfile.TemporaryDirectory() as tmpdir:\n norm_param = {'mean': mean, 'std': std}\n norm_param_file = osp.join(tmpdir, 'norm_param.pkl')\n mmcv.dump(norm_param, norm_param_file)\n\n pipeline = [\n dict(\n type='NormalizeJointCoordinate',\n item='target',\n norm_param_file=norm_param_file),\n ]\n pipeline = Compose(pipeline)\n\n\ndef test_camera_projection():\n results = get_data_sample()\n pipeline_1 = [\n dict(\n type='CameraProjection',\n item='input_3d',\n output_name='input_3d_w',\n camera_type='SimpleCamera',\n mode='camera_to_world'),\n dict(\n type='CameraProjection',\n item='input_3d_w',\n output_name='input_3d_wp',\n camera_type='SimpleCamera',\n mode='world_to_pixel'),\n dict(\n type='CameraProjection',\n item='input_3d',\n output_name='input_3d_p',\n camera_type='SimpleCamera',\n mode='camera_to_pixel'),\n dict(type='Collect', keys=['input_3d_wp', 'input_3d_p'], meta_keys=[])\n ]\n camera_param = results['camera_param'].copy()\n camera_param['K'] = np.concatenate(\n (np.diagflat(camera_param['f']), camera_param['c']), axis=-1)\n pipeline_2 = [\n dict(\n type='CameraProjection',\n item='input_3d',\n output_name='input_3d_w',\n camera_type='SimpleCamera',\n camera_param=camera_param,\n mode='camera_to_world'),\n dict(\n type='CameraProjection',\n item='input_3d_w',\n output_name='input_3d_wp',\n camera_type='SimpleCamera',\n camera_param=camera_param,\n mode='world_to_pixel'),\n dict(\n type='CameraProjection',\n item='input_3d',\n output_name='input_3d_p',\n camera_type='SimpleCamera',\n camera_param=camera_param,\n mode='camera_to_pixel'),\n dict(\n type='CameraProjection',\n item='input_3d_w',\n output_name='input_3d_wc',\n camera_type='SimpleCamera',\n camera_param=camera_param,\n mode='world_to_camera'),\n dict(\n type='Collect',\n keys=['input_3d_wp', 'input_3d_p', 'input_2d'],\n meta_keys=[])\n ]\n\n output1 = Compose(pipeline_1)(results)\n output2 = Compose(pipeline_2)(results)\n\n np.testing.assert_allclose(\n output1['input_3d_wp'], output1['input_3d_p'], rtol=1e-6)\n\n np.testing.assert_allclose(\n output2['input_3d_wp'], output2['input_3d_p'], rtol=1e-6)\n\n np.testing.assert_allclose(\n output2['input_3d_p'], output2['input_2d'], rtol=1e-3, atol=1e-1)\n\n # test invalid camera parameters\n with pytest.raises(ValueError):\n # missing intrinsic parameters\n camera_param_wo_intrinsic = camera_param.copy()\n camera_param_wo_intrinsic.pop('K')\n camera_param_wo_intrinsic.pop('f')\n camera_param_wo_intrinsic.pop('c')\n _ = Compose([\n dict(\n type='CameraProjection',\n item='input_3d',\n camera_type='SimpleCamera',\n camera_param=camera_param_wo_intrinsic,\n mode='camera_to_pixel')\n ])\n\n with pytest.raises(ValueError):\n # invalid mode\n _ = Compose([\n dict(\n type='CameraProjection',\n item='input_3d',\n camera_type='SimpleCamera',\n camera_param=camera_param,\n mode='dummy')\n ])\n\n # test camera without undistortion\n camera_param_wo_undistortion = camera_param.copy()\n camera_param_wo_undistortion.pop('k')\n camera_param_wo_undistortion.pop('p')\n _ = Compose([\n dict(\n type='CameraProjection',\n item='input_3d',\n camera_type='SimpleCamera',\n camera_param=camera_param_wo_undistortion,\n mode='camera_to_pixel')\n ])\n\n # test pixel to camera transformation\n camera = SimpleCamera(camera_param_wo_undistortion)\n kpt_camera = np.random.rand(14, 3)\n kpt_pixel = camera.camera_to_pixel(kpt_camera)\n _kpt_camera = camera.pixel_to_camera(\n np.concatenate([kpt_pixel, kpt_camera[:, [2]]], -1))\n assert_array_almost_equal(_kpt_camera, kpt_camera, decimal=4)\n\n\ndef test_3d_heatmap_generation():\n ann_info = dict(\n image_size=np.array([256, 256]),\n heatmap_size=np.array([64, 64, 64]),\n heatmap3d_depth_bound=400.0,\n num_joints=17,\n joint_weights=np.ones((17, 1), dtype=np.float32),\n use_different_joint_weights=False)\n\n results = dict(\n joints_3d=np.zeros([17, 3]),\n joints_3d_visible=np.ones([17, 3]),\n ann_info=ann_info)\n\n pipeline = Compose([dict(type='Generate3DHeatmapTarget')])\n results_3d = pipeline(results)\n assert results_3d['target'].shape == (17, 64, 64, 64)\n assert results_3d['target_weight'].shape == (17, 1)\n\n # test joint_indices\n pipeline = Compose(\n [dict(type='Generate3DHeatmapTarget', joint_indices=[0, 8, 16])])\n results_3d = pipeline(results)\n assert results_3d['target'].shape == (3, 64, 64, 64)\n assert results_3d['target_weight'].shape == (3, 1)\n\n\ndef test_voxel3D_heatmap_generation():\n heatmap_size = [200, 160]\n cube_size = [8, 8, 2]\n ann_info = dict(\n image_size=np.array([800, 640]),\n heatmap_size=np.array([heatmap_size]),\n num_joints=17,\n num_scales=1,\n space_size=[12000.0, 12000.0, 2000.0],\n space_center=[3000.0, 4500.0, 1000.0],\n cube_size=cube_size)\n\n results = dict(\n joints_3d=np.ones([2, 17, 3]),\n joints_3d_visible=np.ones([2, 17, 3]),\n ann_info=ann_info)\n\n # test single joint index\n joint_indices = [[11, 12]]\n pipeline = Compose([\n dict(\n type='GenerateVoxel3DHeatmapTarget',\n sigma=200.0,\n joint_indices=joint_indices,\n ),\n ])\n results_ = pipeline(results)\n assert results_['targets_3d'].shape == (8, 8, 2)\n\n # test multiple joint indices\n joint_indices = [0, 8, 6]\n pipeline = Compose([\n dict(\n type='GenerateVoxel3DHeatmapTarget',\n sigma=200.0,\n joint_indices=joint_indices,\n ),\n ])\n results_ = pipeline(results)\n assert results_['targets_3d'].shape == (3, 8, 8, 2)\n\n\ndef test_input_heatmap_generation():\n heatmap_size = [200, 160]\n ann_info = dict(\n image_size=np.array([800, 640]),\n heatmap_size=np.array([heatmap_size]),\n num_joints=17,\n num_scales=1,\n )\n\n results = dict(\n joints=np.zeros([2, 17, 3]),\n joints_visible=np.ones([2, 17, 3]),\n ann_info=ann_info)\n\n pipeline = dict(\n type='GenerateInputHeatmaps',\n item='joints',\n visible_item='joints_visible',\n obscured=0.0,\n from_pred=False,\n sigma=3,\n scale=1.0,\n base_size=96,\n target_type='gaussian',\n heatmap_cfg=dict(\n base_scale=0.9,\n offset=0.03,\n threshold=0.6,\n extra=[\n dict(joint_ids=[7, 8], scale_factor=0.5, threshold=0.1),\n dict(\n joint_ids=[9, 10],\n scale_factor=0.2,\n threshold=0.1,\n ),\n dict(\n joint_ids=[0, 1, 2, 3, 4, 5, 6, 11, 12, 13, 14, 15, 16],\n scale_factor=0.5,\n threshold=0.05)\n ]))\n\n pipelines = Compose([pipeline])\n results_ = pipelines(results)\n assert results_['input_heatmaps'][0].shape == (17, heatmap_size[1],\n heatmap_size[0])\n\n # test `obscured`\n pipeline_copy = copy.deepcopy(pipeline)\n pipeline_copy['obscured'] = 0.5\n pipelines = Compose([pipeline])\n results_ = pipelines(results)\n assert results_['input_heatmaps'][0].shape == (17, heatmap_size[1],\n heatmap_size[0])\n\n # test `heatmap_cfg`\n pipeline_copy = copy.deepcopy(pipeline)\n pipeline_copy['heatmap_cfg'] = None\n pipelines = Compose([pipeline])\n results_ = pipelines(results)\n assert results_['input_heatmaps'][0].shape == (17, heatmap_size[1],\n heatmap_size[0])\n\n # test `from_pred`\n pipeline_copy = copy.deepcopy(pipeline)\n pipeline_copy['from_pred'] = True\n pipelines = Compose([pipeline])\n results_ = pipelines(results)\n assert results_['input_heatmaps'][0].shape == (17, heatmap_size[1],\n heatmap_size[0])\n # test `from_pred` & `scale`\n pipeline_copy = copy.deepcopy(pipeline)\n pipeline_copy['from_pred'] = True\n pipeline_copy['scale'] = None\n pipelines = Compose([pipeline])\n results_ = pipelines(results)\n assert results_['input_heatmaps'][0].shape == (17, heatmap_size[1],\n heatmap_size[0])\n\n\ndef test_affine_joints():\n ann_info = dict(image_size=np.array([800, 640]))\n\n results = dict(\n center=np.array([180, 144]),\n scale=np.array([360, 288], dtype=np.float32),\n rotation=0.0,\n joints=np.ones((3, 17, 2)),\n joints_visible=np.ones((3, 17, 2)),\n ann_info=ann_info)\n\n pipeline = Compose([\n dict(\n type='AffineJoints', item='joints', visible_item='joints_visible')\n ])\n results_ = pipeline(results)\n assert results_['joints'].shape == (3, 17, 2)\n assert results_['joints_visible'].shape == (3, 17, 2)\n\n # test `joints_visible` is zero\n results_copy = copy.deepcopy(results)\n results_copy['joints_visible'] = np.zeros((3, 17, 2))\n results_ = pipeline(results)\n assert results_['joints'].shape == (3, 17, 2)\n assert results_['joints_visible'].shape == (3, 17, 2)\n",
"# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule, xavier_init\nfrom mmcv.runner import auto_fp16\n\nfrom ..builder import NECKS\n\n\[email protected]_module()\nclass FPN(nn.Module):\n r\"\"\"Feature Pyramid Network.\n\n This is an implementation of paper `Feature Pyramid Networks for Object\n Detection <https://arxiv.org/abs/1612.03144>`_.\n\n Args:\n in_channels (list[int]): Number of input channels per scale.\n out_channels (int): Number of output channels (used at each scale).\n num_outs (int): Number of output scales.\n start_level (int): Index of the start input backbone level used to\n build the feature pyramid. Default: 0.\n end_level (int): Index of the end input backbone level (exclusive) to\n build the feature pyramid. Default: -1, which means the last level.\n add_extra_convs (bool | str): If bool, it decides whether to add conv\n layers on top of the original feature maps. Default to False.\n If True, it is equivalent to `add_extra_convs='on_input'`.\n If str, it specifies the source feature map of the extra convs.\n Only the following options are allowed\n\n - 'on_input': Last feat map of neck inputs (i.e. backbone feature).\n - 'on_lateral': Last feature map after lateral convs.\n - 'on_output': The last output feature map after fpn convs.\n relu_before_extra_convs (bool): Whether to apply relu before the extra\n conv. Default: False.\n no_norm_on_lateral (bool): Whether to apply norm on lateral.\n Default: False.\n conv_cfg (dict): Config dict for convolution layer. Default: None.\n norm_cfg (dict): Config dict for normalization layer. Default: None.\n act_cfg (dict): Config dict for activation layer in ConvModule.\n Default: None.\n upsample_cfg (dict): Config dict for interpolate layer.\n Default: dict(mode='nearest').\n\n Example:\n >>> import torch\n >>> in_channels = [2, 3, 5, 7]\n >>> scales = [340, 170, 84, 43]\n >>> inputs = [torch.rand(1, c, s, s)\n ... for c, s in zip(in_channels, scales)]\n >>> self = FPN(in_channels, 11, len(in_channels)).eval()\n >>> outputs = self.forward(inputs)\n >>> for i in range(len(outputs)):\n ... print(f'outputs[{i}].shape = {outputs[i].shape}')\n outputs[0].shape = torch.Size([1, 11, 340, 340])\n outputs[1].shape = torch.Size([1, 11, 170, 170])\n outputs[2].shape = torch.Size([1, 11, 84, 84])\n outputs[3].shape = torch.Size([1, 11, 43, 43])\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n num_outs,\n start_level=0,\n end_level=-1,\n add_extra_convs=False,\n relu_before_extra_convs=False,\n no_norm_on_lateral=False,\n conv_cfg=None,\n norm_cfg=None,\n act_cfg=None,\n upsample_cfg=dict(mode='nearest')):\n super().__init__()\n assert isinstance(in_channels, list)\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.num_ins = len(in_channels)\n self.num_outs = num_outs\n self.relu_before_extra_convs = relu_before_extra_convs\n self.no_norm_on_lateral = no_norm_on_lateral\n self.fp16_enabled = False\n self.upsample_cfg = upsample_cfg.copy()\n\n if end_level == -1 or end_level == self.num_ins - 1:\n self.backbone_end_level = self.num_ins\n assert num_outs >= self.num_ins - start_level\n else:\n # if end_level is not the last level, no extra level is allowed\n self.backbone_end_level = end_level + 1\n assert end_level < self.num_ins\n assert num_outs == end_level - start_level + 1\n self.start_level = start_level\n self.end_level = end_level\n self.add_extra_convs = add_extra_convs\n assert isinstance(add_extra_convs, (str, bool))\n if isinstance(add_extra_convs, str):\n # Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output'\n assert add_extra_convs in ('on_input', 'on_lateral', 'on_output')\n elif add_extra_convs: # True\n self.add_extra_convs = 'on_input'\n\n self.lateral_convs = nn.ModuleList()\n self.fpn_convs = nn.ModuleList()\n\n for i in range(self.start_level, self.backbone_end_level):\n l_conv = ConvModule(\n in_channels[i],\n out_channels,\n 1,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg if not self.no_norm_on_lateral else None,\n act_cfg=act_cfg,\n inplace=False)\n fpn_conv = ConvModule(\n out_channels,\n out_channels,\n 3,\n padding=1,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=act_cfg,\n inplace=False)\n\n self.lateral_convs.append(l_conv)\n self.fpn_convs.append(fpn_conv)\n\n # add extra conv layers (e.g., RetinaNet)\n extra_levels = num_outs - self.backbone_end_level + self.start_level\n if self.add_extra_convs and extra_levels >= 1:\n for i in range(extra_levels):\n if i == 0 and self.add_extra_convs == 'on_input':\n in_channels = self.in_channels[self.backbone_end_level - 1]\n else:\n in_channels = out_channels\n extra_fpn_conv = ConvModule(\n in_channels,\n out_channels,\n 3,\n stride=2,\n padding=1,\n conv_cfg=conv_cfg,\n norm_cfg=norm_cfg,\n act_cfg=act_cfg,\n inplace=False)\n self.fpn_convs.append(extra_fpn_conv)\n\n def init_weights(self):\n \"\"\"Initialize model weights.\"\"\"\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n xavier_init(m, distribution='uniform')\n\n @auto_fp16()\n def forward(self, inputs):\n \"\"\"Forward function.\"\"\"\n assert len(inputs) == len(self.in_channels)\n\n # build laterals\n laterals = [\n lateral_conv(inputs[i + self.start_level])\n for i, lateral_conv in enumerate(self.lateral_convs)\n ]\n\n # build top-down path\n used_backbone_levels = len(laterals)\n for i in range(used_backbone_levels - 1, 0, -1):\n # In some cases, fixing `scale factor` (e.g. 2) is preferred, but\n # it cannot co-exist with `size` in `F.interpolate`.\n if 'scale_factor' in self.upsample_cfg:\n # fix runtime error of \"+=\" inplace operation in PyTorch 1.10\n laterals[i - 1] = laterals[i - 1] + F.interpolate(\n laterals[i], **self.upsample_cfg)\n else:\n prev_shape = laterals[i - 1].shape[2:]\n laterals[i - 1] = laterals[i - 1] + F.interpolate(\n laterals[i], size=prev_shape, **self.upsample_cfg)\n\n # build outputs\n # part 1: from original levels\n outs = [\n self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)\n ]\n # part 2: add extra levels\n if self.num_outs > len(outs):\n # use max pool to get more levels on top of outputs\n # (e.g., Faster R-CNN, Mask R-CNN)\n if not self.add_extra_convs:\n for i in range(self.num_outs - used_backbone_levels):\n outs.append(F.max_pool2d(outs[-1], 1, stride=2))\n # add conv layers on top of original feature maps (RetinaNet)\n else:\n if self.add_extra_convs == 'on_input':\n extra_source = inputs[self.backbone_end_level - 1]\n elif self.add_extra_convs == 'on_lateral':\n extra_source = laterals[-1]\n elif self.add_extra_convs == 'on_output':\n extra_source = outs[-1]\n else:\n raise NotImplementedError\n outs.append(self.fpn_convs[used_backbone_levels](extra_source))\n for i in range(used_backbone_levels + 1, self.num_outs):\n if self.relu_before_extra_convs:\n outs.append(self.fpn_convs[i](F.relu(outs[-1])))\n else:\n outs.append(self.fpn_convs[i](outs[-1]))\n return outs\n",
"# Copyright (c) OpenMMLab. All rights reserved.\nimport cv2\nimport numpy as np\n\nfrom mmpose.core.post_processing import (get_affine_transform, get_warp_matrix,\n warp_affine_joints)\nfrom mmpose.datasets.builder import PIPELINES\nfrom .shared_transform import Compose\n\n\ndef _ceil_to_multiples_of(x, base=64):\n \"\"\"Transform x to the integral multiple of the base.\"\"\"\n return int(np.ceil(x / base)) * base\n\n\ndef _get_multi_scale_size(image,\n input_size,\n current_scale,\n min_scale,\n use_udp=False):\n \"\"\"Get the size for multi-scale training.\n\n Args:\n image: Input image.\n input_size (np.ndarray[2]): Size (w, h) of the image input.\n current_scale (float): Scale factor.\n min_scale (float): Minimal scale.\n use_udp (bool): To use unbiased data processing.\n Paper ref: Huang et al. The Devil is in the Details: Delving into\n Unbiased Data Processing for Human Pose Estimation (CVPR 2020).\n\n Returns:\n tuple: A tuple containing multi-scale sizes.\n\n - (w_resized, h_resized) (tuple(int)): resized width/height\n - center (np.ndarray)image center\n - scale (np.ndarray): scales wrt width/height\n \"\"\"\n assert len(input_size) == 2\n h, w, _ = image.shape\n\n # calculate the size for min_scale\n min_input_w = _ceil_to_multiples_of(min_scale * input_size[0], 64)\n min_input_h = _ceil_to_multiples_of(min_scale * input_size[1], 64)\n if w < h:\n w_resized = int(min_input_w * current_scale / min_scale)\n h_resized = int(\n _ceil_to_multiples_of(min_input_w / w * h, 64) * current_scale /\n min_scale)\n if use_udp:\n scale_w = w - 1.0\n scale_h = (h_resized - 1.0) / (w_resized - 1.0) * (w - 1.0)\n else:\n scale_w = w / 200.0\n scale_h = h_resized / w_resized * w / 200.0\n else:\n h_resized = int(min_input_h * current_scale / min_scale)\n w_resized = int(\n _ceil_to_multiples_of(min_input_h / h * w, 64) * current_scale /\n min_scale)\n if use_udp:\n scale_h = h - 1.0\n scale_w = (w_resized - 1.0) / (h_resized - 1.0) * (h - 1.0)\n else:\n scale_h = h / 200.0\n scale_w = w_resized / h_resized * h / 200.0\n if use_udp:\n center = (scale_w / 2.0, scale_h / 2.0)\n else:\n center = np.array([round(w / 2.0), round(h / 2.0)])\n return (w_resized, h_resized), center, np.array([scale_w, scale_h])\n\n\ndef _resize_align_multi_scale(image, input_size, current_scale, min_scale):\n \"\"\"Resize the images for multi-scale training.\n\n Args:\n image: Input image\n input_size (np.ndarray[2]): Size (w, h) of the image input\n current_scale (float): Current scale\n min_scale (float): Minimal scale\n\n Returns:\n tuple: A tuple containing image info.\n\n - image_resized (np.ndarray): resized image\n - center (np.ndarray): center of image\n - scale (np.ndarray): scale\n \"\"\"\n assert len(input_size) == 2\n size_resized, center, scale = _get_multi_scale_size(\n image, input_size, current_scale, min_scale)\n\n trans = get_affine_transform(center, scale, 0, size_resized)\n image_resized = cv2.warpAffine(image, trans, size_resized)\n\n return image_resized, center, scale\n\n\ndef _resize_align_multi_scale_udp(image, input_size, current_scale, min_scale):\n \"\"\"Resize the images for multi-scale training.\n\n Args:\n image: Input image\n input_size (np.ndarray[2]): Size (w, h) of the image input\n current_scale (float): Current scale\n min_scale (float): Minimal scale\n\n Returns:\n tuple: A tuple containing image info.\n\n - image_resized (np.ndarray): resized image\n - center (np.ndarray): center of image\n - scale (np.ndarray): scale\n \"\"\"\n assert len(input_size) == 2\n size_resized, _, _ = _get_multi_scale_size(image, input_size,\n current_scale, min_scale, True)\n\n _, center, scale = _get_multi_scale_size(image, input_size, min_scale,\n min_scale, True)\n\n trans = get_warp_matrix(\n theta=0,\n size_input=np.array(scale, dtype=np.float32),\n size_dst=np.array(size_resized, dtype=np.float32) - 1.0,\n size_target=np.array(scale, dtype=np.float32))\n image_resized = cv2.warpAffine(\n image.copy(), trans, size_resized, flags=cv2.INTER_LINEAR)\n\n return image_resized, center, scale\n\n\nclass HeatmapGenerator:\n \"\"\"Generate heatmaps for bottom-up models.\n\n Args:\n num_joints (int): Number of keypoints\n output_size (np.ndarray): Size (w, h) of feature map\n sigma (int): Sigma of the heatmaps.\n use_udp (bool): To use unbiased data processing.\n Paper ref: Huang et al. The Devil is in the Details: Delving into\n Unbiased Data Processing for Human Pose Estimation (CVPR 2020).\n \"\"\"\n\n def __init__(self, output_size, num_joints, sigma=-1, use_udp=False):\n if not isinstance(output_size, np.ndarray):\n output_size = np.array(output_size)\n if output_size.size > 1:\n assert len(output_size) == 2\n self.output_size = output_size\n else:\n self.output_size = np.array([output_size, output_size],\n dtype=np.int)\n self.num_joints = num_joints\n if sigma < 0:\n sigma = self.output_size.prod()**0.5 / 64\n self.sigma = sigma\n size = 6 * sigma + 3\n self.use_udp = use_udp\n if use_udp:\n self.x = np.arange(0, size, 1, np.float32)\n self.y = self.x[:, None]\n else:\n x = np.arange(0, size, 1, np.float32)\n y = x[:, None]\n x0, y0 = 3 * sigma + 1, 3 * sigma + 1\n self.g = np.exp(-((x - x0)**2 + (y - y0)**2) / (2 * sigma**2))\n\n def __call__(self, joints):\n \"\"\"Generate heatmaps.\"\"\"\n hms = np.zeros(\n (self.num_joints, self.output_size[1], self.output_size[0]),\n dtype=np.float32)\n\n sigma = self.sigma\n for p in joints:\n for idx, pt in enumerate(p):\n if pt[2] > 0:\n x, y = int(pt[0]), int(pt[1])\n if x < 0 or y < 0 or \\\n x >= self.output_size[0] or y >= self.output_size[1]:\n continue\n\n if self.use_udp:\n x0 = 3 * sigma + 1 + pt[0] - x\n y0 = 3 * sigma + 1 + pt[1] - y\n g = np.exp(-((self.x - x0)**2 + (self.y - y0)**2) /\n (2 * sigma**2))\n else:\n g = self.g\n\n ul = int(np.round(x - 3 * sigma -\n 1)), int(np.round(y - 3 * sigma - 1))\n br = int(np.round(x + 3 * sigma +\n 2)), int(np.round(y + 3 * sigma + 2))\n\n c, d = max(0,\n -ul[0]), min(br[0], self.output_size[0]) - ul[0]\n a, b = max(0,\n -ul[1]), min(br[1], self.output_size[1]) - ul[1]\n\n cc, dd = max(0, ul[0]), min(br[0], self.output_size[0])\n aa, bb = max(0, ul[1]), min(br[1], self.output_size[1])\n hms[idx, aa:bb,\n cc:dd] = np.maximum(hms[idx, aa:bb, cc:dd], g[a:b,\n c:d])\n return hms\n\n\nclass JointsEncoder:\n \"\"\"Encodes the visible joints into (coordinates, score); The coordinate of\n one joint and its score are of `int` type.\n\n (idx * output_size**2 + y * output_size + x, 1) or (0, 0).\n\n Args:\n max_num_people(int): Max number of people in an image\n num_joints(int): Number of keypoints\n output_size(np.ndarray): Size (w, h) of feature map\n tag_per_joint(bool): Option to use one tag map per joint.\n \"\"\"\n\n def __init__(self, max_num_people, num_joints, output_size, tag_per_joint):\n self.max_num_people = max_num_people\n self.num_joints = num_joints\n if not isinstance(output_size, np.ndarray):\n output_size = np.array(output_size)\n if output_size.size > 1:\n assert len(output_size) == 2\n self.output_size = output_size\n else:\n self.output_size = np.array([output_size, output_size],\n dtype=np.int)\n self.tag_per_joint = tag_per_joint\n\n def __call__(self, joints):\n \"\"\"\n Note:\n - number of people in image: N\n - number of keypoints: K\n - max number of people in an image: M\n\n Args:\n joints (np.ndarray[N,K,3])\n\n Returns:\n visible_kpts (np.ndarray[M,K,2]).\n \"\"\"\n visible_kpts = np.zeros((self.max_num_people, self.num_joints, 2),\n dtype=np.float32)\n for i in range(len(joints)):\n tot = 0\n for idx, pt in enumerate(joints[i]):\n x, y = int(pt[0]), int(pt[1])\n if (pt[2] > 0 and 0 <= y < self.output_size[1]\n and 0 <= x < self.output_size[0]):\n if self.tag_per_joint:\n visible_kpts[i][tot] = \\\n (idx * self.output_size.prod()\n + y * self.output_size[0] + x, 1)\n else:\n visible_kpts[i][tot] = (y * self.output_size[0] + x, 1)\n tot += 1\n return visible_kpts\n\n\nclass PAFGenerator:\n \"\"\"Generate part affinity fields.\n\n Args:\n output_size (np.ndarray): Size (w, h) of feature map.\n limb_width (int): Limb width of part affinity fields.\n skeleton (list[list]): connections of joints.\n \"\"\"\n\n def __init__(self, output_size, limb_width, skeleton):\n if not isinstance(output_size, np.ndarray):\n output_size = np.array(output_size)\n if output_size.size > 1:\n assert len(output_size) == 2\n self.output_size = output_size\n else:\n self.output_size = np.array([output_size, output_size],\n dtype=np.int)\n self.limb_width = limb_width\n self.skeleton = skeleton\n\n def _accumulate_paf_map_(self, pafs, src, dst, count):\n \"\"\"Accumulate part affinity fields between two given joints.\n\n Args:\n pafs (np.ndarray[2,H,W]): paf maps (2 dimensions:x axis and\n y axis) for a certain limb connection. This argument will\n be modified inplace.\n src (np.ndarray[2,]): coordinates of the source joint.\n dst (np.ndarray[2,]): coordinates of the destination joint.\n count (np.ndarray[H,W]): count map that preserves the number\n of non-zero vectors at each point. This argument will be\n modified inplace.\n \"\"\"\n limb_vec = dst - src\n norm = np.linalg.norm(limb_vec)\n if norm == 0:\n unit_limb_vec = np.zeros(2)\n else:\n unit_limb_vec = limb_vec / norm\n\n min_x = max(np.floor(min(src[0], dst[0]) - self.limb_width), 0)\n max_x = min(\n np.ceil(max(src[0], dst[0]) + self.limb_width),\n self.output_size[0] - 1)\n min_y = max(np.floor(min(src[1], dst[1]) - self.limb_width), 0)\n max_y = min(\n np.ceil(max(src[1], dst[1]) + self.limb_width),\n self.output_size[1] - 1)\n\n range_x = list(range(int(min_x), int(max_x + 1), 1))\n range_y = list(range(int(min_y), int(max_y + 1), 1))\n\n mask = np.zeros_like(count, dtype=bool)\n if len(range_x) > 0 and len(range_y) > 0:\n xx, yy = np.meshgrid(range_x, range_y)\n delta_x = xx - src[0]\n delta_y = yy - src[1]\n dist = np.abs(delta_x * unit_limb_vec[1] -\n delta_y * unit_limb_vec[0])\n mask_local = (dist < self.limb_width)\n mask[yy, xx] = mask_local\n\n pafs[0, mask] += unit_limb_vec[0]\n pafs[1, mask] += unit_limb_vec[1]\n count += mask\n\n return pafs, count\n\n def __call__(self, joints):\n \"\"\"Generate the target part affinity fields.\"\"\"\n pafs = np.zeros(\n (len(self.skeleton) * 2, self.output_size[1], self.output_size[0]),\n dtype=np.float32)\n\n for idx, sk in enumerate(self.skeleton):\n count = np.zeros((self.output_size[1], self.output_size[0]),\n dtype=np.float32)\n\n for p in joints:\n src = p[sk[0]]\n dst = p[sk[1]]\n if src[2] > 0 and dst[2] > 0:\n self._accumulate_paf_map_(pafs[2 * idx:2 * idx + 2],\n src[:2], dst[:2], count)\n\n pafs[2 * idx:2 * idx + 2] /= np.maximum(count, 1)\n\n return pafs\n\n\[email protected]_module()\nclass BottomUpRandomFlip:\n \"\"\"Data augmentation with random image flip for bottom-up.\n\n Args:\n flip_prob (float): Probability of flip.\n \"\"\"\n\n def __init__(self, flip_prob=0.5):\n self.flip_prob = flip_prob\n\n def __call__(self, results):\n \"\"\"Perform data augmentation with random image flip.\"\"\"\n image, mask, joints = results['img'], results['mask'], results[\n 'joints']\n self.flip_index = results['ann_info']['flip_index']\n self.output_size = results['ann_info']['heatmap_size']\n\n assert isinstance(mask, list)\n assert isinstance(joints, list)\n assert len(mask) == len(joints)\n assert len(mask) == len(self.output_size)\n\n if np.random.random() < self.flip_prob:\n image = image[:, ::-1].copy() - np.zeros_like(image)\n for i, _output_size in enumerate(self.output_size):\n if not isinstance(_output_size, np.ndarray):\n _output_size = np.array(_output_size)\n if _output_size.size > 1:\n assert len(_output_size) == 2\n else:\n _output_size = np.array([_output_size, _output_size],\n dtype=np.int)\n mask[i] = mask[i][:, ::-1].copy()\n joints[i] = joints[i][:, self.flip_index]\n joints[i][:, :, 0] = _output_size[0] - joints[i][:, :, 0] - 1\n results['img'], results['mask'], results[\n 'joints'] = image, mask, joints\n return results\n\n\[email protected]_module()\nclass BottomUpRandomAffine:\n \"\"\"Data augmentation with random scaling & rotating.\n\n Args:\n rot_factor (int): Rotating to [-rotation_factor, rotation_factor]\n scale_factor (float): Scaling to [1-scale_factor, 1+scale_factor]\n scale_type: wrt ``long`` or ``short`` length of the image.\n trans_factor: Translation factor.\n use_udp (bool): To use unbiased data processing.\n Paper ref: Huang et al. The Devil is in the Details: Delving into\n Unbiased Data Processing for Human Pose Estimation (CVPR 2020).\n \"\"\"\n\n def __init__(self,\n rot_factor,\n scale_factor,\n scale_type,\n trans_factor,\n use_udp=False):\n self.max_rotation = rot_factor\n self.min_scale = scale_factor[0]\n self.max_scale = scale_factor[1]\n self.scale_type = scale_type\n self.trans_factor = trans_factor\n self.use_udp = use_udp\n\n def _get_scale(self, image_size, resized_size):\n w, h = image_size\n w_resized, h_resized = resized_size\n if w / w_resized < h / h_resized:\n if self.scale_type == 'long':\n w_pad = h / h_resized * w_resized\n h_pad = h\n elif self.scale_type == 'short':\n w_pad = w\n h_pad = w / w_resized * h_resized\n else:\n raise ValueError(f'Unknown scale type: {self.scale_type}')\n else:\n if self.scale_type == 'long':\n w_pad = w\n h_pad = w / w_resized * h_resized\n elif self.scale_type == 'short':\n w_pad = h / h_resized * w_resized\n h_pad = h\n else:\n raise ValueError(f'Unknown scale type: {self.scale_type}')\n\n scale = np.array([w_pad, h_pad], dtype=np.float32)\n\n return scale\n\n def __call__(self, results):\n \"\"\"Perform data augmentation with random scaling & rotating.\"\"\"\n image, mask, joints = results['img'], results['mask'], results[\n 'joints']\n\n self.input_size = results['ann_info']['image_size']\n if not isinstance(self.input_size, np.ndarray):\n self.input_size = np.array(self.input_size)\n if self.input_size.size > 1:\n assert len(self.input_size) == 2\n else:\n self.input_size = [self.input_size, self.input_size]\n self.output_size = results['ann_info']['heatmap_size']\n\n assert isinstance(mask, list)\n assert isinstance(joints, list)\n assert len(mask) == len(joints)\n assert len(mask) == len(self.output_size), (len(mask),\n len(self.output_size),\n self.output_size)\n\n height, width = image.shape[:2]\n if self.use_udp:\n center = np.array(((width - 1.0) / 2, (height - 1.0) / 2))\n else:\n center = np.array((width / 2, height / 2))\n\n img_scale = np.array([width, height], dtype=np.float32)\n aug_scale = np.random.random() * (self.max_scale - self.min_scale) \\\n + self.min_scale\n img_scale *= aug_scale\n aug_rot = (np.random.random() * 2 - 1) * self.max_rotation\n\n if self.trans_factor > 0:\n dx = np.random.randint(-self.trans_factor * img_scale[0] / 200.0,\n self.trans_factor * img_scale[0] / 200.0)\n dy = np.random.randint(-self.trans_factor * img_scale[1] / 200.0,\n self.trans_factor * img_scale[1] / 200.0)\n\n center[0] += dx\n center[1] += dy\n if self.use_udp:\n for i, _output_size in enumerate(self.output_size):\n if not isinstance(_output_size, np.ndarray):\n _output_size = np.array(_output_size)\n if _output_size.size > 1:\n assert len(_output_size) == 2\n else:\n _output_size = [_output_size, _output_size]\n\n scale = self._get_scale(img_scale, _output_size)\n\n trans = get_warp_matrix(\n theta=aug_rot,\n size_input=center * 2.0,\n size_dst=np.array(\n (_output_size[0], _output_size[1]), dtype=np.float32) -\n 1.0,\n size_target=scale)\n mask[i] = cv2.warpAffine(\n (mask[i] * 255).astype(np.uint8),\n trans, (int(_output_size[0]), int(_output_size[1])),\n flags=cv2.INTER_LINEAR) / 255\n mask[i] = (mask[i] > 0.5).astype(np.float32)\n joints[i][:, :, 0:2] = \\\n warp_affine_joints(joints[i][:, :, 0:2].copy(), trans)\n if results['ann_info']['scale_aware_sigma']:\n joints[i][:, :, 3] = joints[i][:, :, 3] / aug_scale\n scale = self._get_scale(img_scale, self.input_size)\n mat_input = get_warp_matrix(\n theta=aug_rot,\n size_input=center * 2.0,\n size_dst=np.array((self.input_size[0], self.input_size[1]),\n dtype=np.float32) - 1.0,\n size_target=scale)\n image = cv2.warpAffine(\n image,\n mat_input, (int(self.input_size[0]), int(self.input_size[1])),\n flags=cv2.INTER_LINEAR)\n else:\n for i, _output_size in enumerate(self.output_size):\n if not isinstance(_output_size, np.ndarray):\n _output_size = np.array(_output_size)\n if _output_size.size > 1:\n assert len(_output_size) == 2\n else:\n _output_size = [_output_size, _output_size]\n scale = self._get_scale(img_scale, _output_size)\n mat_output = get_affine_transform(\n center=center,\n scale=scale / 200.0,\n rot=aug_rot,\n output_size=_output_size)\n mask[i] = cv2.warpAffine(\n (mask[i] * 255).astype(np.uint8), mat_output,\n (int(_output_size[0]), int(_output_size[1]))) / 255\n mask[i] = (mask[i] > 0.5).astype(np.float32)\n\n joints[i][:, :, 0:2] = \\\n warp_affine_joints(joints[i][:, :, 0:2], mat_output)\n if results['ann_info']['scale_aware_sigma']:\n joints[i][:, :, 3] = joints[i][:, :, 3] / aug_scale\n\n scale = self._get_scale(img_scale, self.input_size)\n mat_input = get_affine_transform(\n center=center,\n scale=scale / 200.0,\n rot=aug_rot,\n output_size=self.input_size)\n image = cv2.warpAffine(image, mat_input, (int(\n self.input_size[0]), int(self.input_size[1])))\n\n results['img'], results['mask'], results[\n 'joints'] = image, mask, joints\n\n return results\n\n\[email protected]_module()\nclass BottomUpGenerateHeatmapTarget:\n \"\"\"Generate multi-scale heatmap target for bottom-up.\n\n Args:\n sigma (int): Sigma of heatmap Gaussian\n max_num_people (int): Maximum number of people in an image\n use_udp (bool): To use unbiased data processing.\n Paper ref: Huang et al. The Devil is in the Details: Delving into\n Unbiased Data Processing for Human Pose Estimation (CVPR 2020).\n \"\"\"\n\n def __init__(self, sigma, use_udp=False):\n self.sigma = sigma\n self.use_udp = use_udp\n\n def _generate(self, num_joints, heatmap_size):\n \"\"\"Get heatmap generator.\"\"\"\n heatmap_generator = [\n HeatmapGenerator(output_size, num_joints, self.sigma, self.use_udp)\n for output_size in heatmap_size\n ]\n return heatmap_generator\n\n def __call__(self, results):\n \"\"\"Generate multi-scale heatmap target for bottom-up.\"\"\"\n heatmap_generator = \\\n self._generate(results['ann_info']['num_joints'],\n results['ann_info']['heatmap_size'])\n target_list = list()\n joints_list = results['joints']\n\n for scale_id in range(results['ann_info']['num_scales']):\n heatmaps = heatmap_generator[scale_id](joints_list[scale_id])\n target_list.append(heatmaps.astype(np.float32))\n results['target'] = target_list\n\n return results\n\n\[email protected]_module()\nclass BottomUpGenerateTarget:\n \"\"\"Generate multi-scale heatmap target for associate embedding.\n\n Args:\n sigma (int): Sigma of heatmap Gaussian\n max_num_people (int): Maximum number of people in an image\n use_udp (bool): To use unbiased data processing.\n Paper ref: Huang et al. The Devil is in the Details: Delving into\n Unbiased Data Processing for Human Pose Estimation (CVPR 2020).\n \"\"\"\n\n def __init__(self, sigma, max_num_people, use_udp=False):\n self.sigma = sigma\n self.max_num_people = max_num_people\n self.use_udp = use_udp\n\n def _generate(self, num_joints, heatmap_size):\n \"\"\"Get heatmap generator and joint encoder.\"\"\"\n heatmap_generator = [\n HeatmapGenerator(output_size, num_joints, self.sigma, self.use_udp)\n for output_size in heatmap_size\n ]\n joints_encoder = [\n JointsEncoder(self.max_num_people, num_joints, output_size, True)\n for output_size in heatmap_size\n ]\n return heatmap_generator, joints_encoder\n\n def __call__(self, results):\n \"\"\"Generate multi-scale heatmap target for bottom-up.\"\"\"\n heatmap_generator, joints_encoder = \\\n self._generate(results['ann_info']['num_joints'],\n results['ann_info']['heatmap_size'])\n target_list = list()\n mask_list, joints_list = results['mask'], results['joints']\n\n for scale_id in range(results['ann_info']['num_scales']):\n target_t = heatmap_generator[scale_id](joints_list[scale_id])\n joints_t = joints_encoder[scale_id](joints_list[scale_id])\n\n target_list.append(target_t.astype(np.float32))\n mask_list[scale_id] = mask_list[scale_id].astype(np.float32)\n joints_list[scale_id] = joints_t.astype(np.int32)\n\n results['masks'], results['joints'] = mask_list, joints_list\n results['targets'] = target_list\n\n return results\n\n\[email protected]_module()\nclass BottomUpGeneratePAFTarget:\n \"\"\"Generate multi-scale heatmaps and part affinity fields (PAF) target for\n bottom-up. Paper ref: Cao et al. Realtime Multi-Person 2D Human Pose\n Estimation using Part Affinity Fields (CVPR 2017).\n\n Args:\n limb_width (int): Limb width of part affinity fields\n \"\"\"\n\n def __init__(self, limb_width, skeleton=None):\n self.limb_width = limb_width\n self.skeleton = skeleton\n\n def _generate(self, heatmap_size, skeleton):\n \"\"\"Get PAF generator.\"\"\"\n paf_generator = [\n PAFGenerator(output_size, self.limb_width, skeleton)\n for output_size in heatmap_size\n ]\n return paf_generator\n\n def __call__(self, results):\n \"\"\"Generate multi-scale part affinity fields for bottom-up.\"\"\"\n if self.skeleton is None:\n assert results['ann_info']['skeleton'] is not None\n self.skeleton = results['ann_info']['skeleton']\n\n paf_generator = \\\n self._generate(results['ann_info']['heatmap_size'],\n self.skeleton)\n target_list = list()\n joints_list = results['joints']\n\n for scale_id in range(results['ann_info']['num_scales']):\n pafs = paf_generator[scale_id](joints_list[scale_id])\n target_list.append(pafs.astype(np.float32))\n\n results['target'] = target_list\n\n return results\n\n\[email protected]_module()\nclass BottomUpGetImgSize:\n \"\"\"Get multi-scale image sizes for bottom-up, including base_size and\n test_scale_factor. Keep the ratio and the image is resized to\n `results['ann_info']['image_size']×current_scale`.\n\n Args:\n test_scale_factor (List[float]): Multi scale\n current_scale (int): default 1\n use_udp (bool): To use unbiased data processing.\n Paper ref: Huang et al. The Devil is in the Details: Delving into\n Unbiased Data Processing for Human Pose Estimation (CVPR 2020).\n \"\"\"\n\n def __init__(self, test_scale_factor, current_scale=1, use_udp=False):\n self.test_scale_factor = test_scale_factor\n self.min_scale = min(test_scale_factor)\n self.current_scale = current_scale\n self.use_udp = use_udp\n\n def __call__(self, results):\n \"\"\"Get multi-scale image sizes for bottom-up.\"\"\"\n input_size = results['ann_info']['image_size']\n if not isinstance(input_size, np.ndarray):\n input_size = np.array(input_size)\n if input_size.size > 1:\n assert len(input_size) == 2\n else:\n input_size = np.array([input_size, input_size], dtype=np.int)\n img = results['img']\n\n h, w, _ = img.shape\n\n # calculate the size for min_scale\n min_input_w = _ceil_to_multiples_of(self.min_scale * input_size[0], 64)\n min_input_h = _ceil_to_multiples_of(self.min_scale * input_size[1], 64)\n if w < h:\n w_resized = int(min_input_w * self.current_scale / self.min_scale)\n h_resized = int(\n _ceil_to_multiples_of(min_input_w / w * h, 64) *\n self.current_scale / self.min_scale)\n if self.use_udp:\n scale_w = w - 1.0\n scale_h = (h_resized - 1.0) / (w_resized - 1.0) * (w - 1.0)\n else:\n scale_w = w / 200.0\n scale_h = h_resized / w_resized * w / 200.0\n else:\n h_resized = int(min_input_h * self.current_scale / self.min_scale)\n w_resized = int(\n _ceil_to_multiples_of(min_input_h / h * w, 64) *\n self.current_scale / self.min_scale)\n if self.use_udp:\n scale_h = h - 1.0\n scale_w = (w_resized - 1.0) / (h_resized - 1.0) * (h - 1.0)\n else:\n scale_h = h / 200.0\n scale_w = w_resized / h_resized * h / 200.0\n if self.use_udp:\n center = (scale_w / 2.0, scale_h / 2.0)\n else:\n center = np.array([round(w / 2.0), round(h / 2.0)])\n results['ann_info']['test_scale_factor'] = self.test_scale_factor\n results['ann_info']['base_size'] = (w_resized, h_resized)\n results['ann_info']['center'] = center\n results['ann_info']['scale'] = np.array([scale_w, scale_h])\n\n return results\n\n\[email protected]_module()\nclass BottomUpResizeAlign:\n \"\"\"Resize multi-scale size and align transform for bottom-up.\n\n Args:\n transforms (List): ToTensor & Normalize\n use_udp (bool): To use unbiased data processing.\n Paper ref: Huang et al. The Devil is in the Details: Delving into\n Unbiased Data Processing for Human Pose Estimation (CVPR 2020).\n \"\"\"\n\n def __init__(self, transforms, use_udp=False):\n self.transforms = Compose(transforms)\n if use_udp:\n self._resize_align_multi_scale = _resize_align_multi_scale_udp\n else:\n self._resize_align_multi_scale = _resize_align_multi_scale\n\n def __call__(self, results):\n \"\"\"Resize multi-scale size and align transform for bottom-up.\"\"\"\n input_size = results['ann_info']['image_size']\n if not isinstance(input_size, np.ndarray):\n input_size = np.array(input_size)\n if input_size.size > 1:\n assert len(input_size) == 2\n else:\n input_size = np.array([input_size, input_size], dtype=np.int)\n test_scale_factor = results['ann_info']['test_scale_factor']\n aug_data = []\n\n for _, s in enumerate(sorted(test_scale_factor, reverse=True)):\n _results = results.copy()\n image_resized, _, _ = self._resize_align_multi_scale(\n _results['img'], input_size, s, min(test_scale_factor))\n _results['img'] = image_resized\n _results = self.transforms(_results)\n transformed_img = _results['img'].unsqueeze(0)\n aug_data.append(transformed_img)\n\n results['ann_info']['aug_data'] = aug_data\n\n return results\n",
"# Copyright (c) OpenMMLab. All rights reserved.\nimport os.path as osp\nimport tempfile\nimport warnings\nfrom collections import OrderedDict\n\nimport numpy as np\nfrom mmcv import Config, deprecated_api_warning\n\nfrom mmpose.datasets.builder import DATASETS\nfrom ..base import Kpt2dSviewRgbImgTopDownDataset\n\n\[email protected]_module()\nclass HandCocoWholeBodyDataset(Kpt2dSviewRgbImgTopDownDataset):\n \"\"\"CocoWholeBodyDataset for top-down hand pose estimation.\n\n \"Whole-Body Human Pose Estimation in the Wild\", ECCV'2020.\n More details can be found in the `paper\n <https://arxiv.org/abs/2007.11858>`__ .\n\n The dataset loads raw features and apply specified transforms\n to return a dict containing the image tensors and other information.\n\n COCO-WholeBody Hand keypoint indexes::\n\n 0: 'wrist',\n 1: 'thumb1',\n 2: 'thumb2',\n 3: 'thumb3',\n 4: 'thumb4',\n 5: 'forefinger1',\n 6: 'forefinger2',\n 7: 'forefinger3',\n 8: 'forefinger4',\n 9: 'middle_finger1',\n 10: 'middle_finger2',\n 11: 'middle_finger3',\n 12: 'middle_finger4',\n 13: 'ring_finger1',\n 14: 'ring_finger2',\n 15: 'ring_finger3',\n 16: 'ring_finger4',\n 17: 'pinky_finger1',\n 18: 'pinky_finger2',\n 19: 'pinky_finger3',\n 20: 'pinky_finger4'\n\n Args:\n ann_file (str): Path to the annotation file.\n img_prefix (str): Path to a directory where images are held.\n Default: None.\n data_cfg (dict): config\n pipeline (list[dict | callable]): A sequence of data transforms.\n dataset_info (DatasetInfo): A class containing all dataset info.\n test_mode (bool): Store True when building test or\n validation dataset. Default: False.\n \"\"\"\n\n def __init__(self,\n ann_file,\n img_prefix,\n data_cfg,\n pipeline,\n dataset_info=None,\n test_mode=False):\n\n if dataset_info is None:\n warnings.warn(\n 'dataset_info is missing. '\n 'Check https://github.com/open-mmlab/mmpose/pull/663 '\n 'for details.', DeprecationWarning)\n cfg = Config.fromfile(\n 'configs/_base_/datasets/coco_wholebody_hand.py')\n dataset_info = cfg._cfg_dict['dataset_info']\n\n super().__init__(\n ann_file,\n img_prefix,\n data_cfg,\n pipeline,\n dataset_info=dataset_info,\n test_mode=test_mode)\n\n self.ann_info['use_different_joint_weights'] = False\n self.db = self._get_db()\n\n print(f'=> num_images: {self.num_images}')\n print(f'=> load {len(self.db)} samples')\n\n def _get_db(self):\n \"\"\"Load dataset.\"\"\"\n gt_db = []\n bbox_id = 0\n num_joints = self.ann_info['num_joints']\n for img_id in self.img_ids:\n\n ann_ids = self.coco.getAnnIds(imgIds=img_id, iscrowd=False)\n objs = self.coco.loadAnns(ann_ids)\n\n for obj in objs:\n for type in ['left', 'right']:\n if obj[f'{type}hand_valid'] and max(\n obj[f'{type}hand_kpts']) > 0:\n joints_3d = np.zeros((num_joints, 3), dtype=np.float32)\n joints_3d_visible = np.zeros((num_joints, 3),\n dtype=np.float32)\n\n keypoints = np.array(obj[f'{type}hand_kpts']).reshape(\n -1, 3)\n joints_3d[:, :2] = keypoints[:, :2]\n joints_3d_visible[:, :2] = np.minimum(\n 1, keypoints[:, 2:3])\n\n image_file = osp.join(self.img_prefix,\n self.id2name[img_id])\n\n gt_db.append({\n 'image_file': image_file,\n 'rotation': 0,\n 'joints_3d': joints_3d,\n 'joints_3d_visible': joints_3d_visible,\n 'dataset': self.dataset_name,\n 'bbox': obj[f'{type}hand_box'],\n 'bbox_score': 1,\n 'bbox_id': bbox_id\n })\n bbox_id = bbox_id + 1\n gt_db = sorted(gt_db, key=lambda x: x['bbox_id'])\n\n return gt_db\n\n @deprecated_api_warning(name_dict=dict(outputs='results'))\n def evaluate(self, results, res_folder=None, metric='PCK', **kwargs):\n \"\"\"Evaluate COCO-WholeBody Hand keypoint results. The pose prediction\n results will be saved in ``${res_folder}/result_keypoints.json``.\n\n Note:\n - batch_size: N\n - num_keypoints: K\n - heatmap height: H\n - heatmap width: W\n\n Args:\n results (list[dict]): Testing results containing the following\n items:\n\n - preds (np.ndarray[N,K,3]): The first two dimensions are \\\n coordinates, score is the third dimension of the array.\n - boxes (np.ndarray[N,6]): [center[0], center[1], scale[0], \\\n scale[1],area, score]\n - image_paths (list[str]): For example, ['Test/source/0.jpg']\n - output_heatmap (np.ndarray[N, K, H, W]): model outputs.\n res_folder (str, optional): The folder to save the testing\n results. If not specified, a temp folder will be created.\n Default: None.\n metric (str | list[str]): Metric to be performed.\n Options: 'PCK', 'AUC', 'EPE'.\n\n Returns:\n dict: Evaluation results for evaluation metric.\n \"\"\"\n metrics = metric if isinstance(metric, list) else [metric]\n allowed_metrics = ['PCK', 'AUC', 'EPE']\n for metric in metrics:\n if metric not in allowed_metrics:\n raise KeyError(f'metric {metric} is not supported')\n\n if res_folder is not None:\n tmp_folder = None\n res_file = osp.join(res_folder, 'result_keypoints.json')\n else:\n tmp_folder = tempfile.TemporaryDirectory()\n res_file = osp.join(tmp_folder.name, 'result_keypoints.json')\n\n kpts = []\n for result in results:\n preds = result['preds']\n boxes = result['boxes']\n image_paths = result['image_paths']\n bbox_ids = result['bbox_ids']\n\n batch_size = len(image_paths)\n for i in range(batch_size):\n image_id = self.name2id[image_paths[i][len(self.img_prefix):]]\n\n kpts.append({\n 'keypoints': preds[i].tolist(),\n 'center': boxes[i][0:2].tolist(),\n 'scale': boxes[i][2:4].tolist(),\n 'area': float(boxes[i][4]),\n 'score': float(boxes[i][5]),\n 'image_id': image_id,\n 'bbox_id': bbox_ids[i]\n })\n kpts = self._sort_and_unique_bboxes(kpts)\n\n self._write_keypoint_results(kpts, res_file)\n info_str = self._report_metric(res_file, metrics)\n name_value = OrderedDict(info_str)\n\n if tmp_folder is not None:\n tmp_folder.cleanup()\n\n return name_value\n",
"# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom mmpose.core.post_processing.group import HeatmapParser\n\n\ndef test_group():\n cfg = {}\n cfg['num_joints'] = 17\n cfg['detection_threshold'] = 0.1\n cfg['tag_threshold'] = 1\n cfg['use_detection_val'] = True\n cfg['ignore_too_much'] = False\n cfg['nms_kernel'] = 5\n cfg['nms_padding'] = 2\n cfg['tag_per_joint'] = True\n cfg['max_num_people'] = 1\n parser = HeatmapParser(cfg)\n fake_heatmap = torch.zeros(1, 1, 5, 5)\n fake_heatmap[0, 0, 3, 3] = 1\n fake_heatmap[0, 0, 3, 2] = 0.8\n assert parser.nms(fake_heatmap)[0, 0, 3, 2] == 0\n fake_heatmap = torch.zeros(1, 17, 32, 32)\n fake_tag = torch.zeros(1, 17, 32, 32, 1)\n fake_heatmap[0, 0, 10, 10] = 0.8\n fake_heatmap[0, 1, 12, 12] = 0.9\n fake_heatmap[0, 4, 8, 8] = 0.8\n fake_heatmap[0, 8, 6, 6] = 0.9\n fake_tag[0, 0, 10, 10] = 0.8\n fake_tag[0, 1, 12, 12] = 0.9\n fake_tag[0, 4, 8, 8] = 0.8\n fake_tag[0, 8, 6, 6] = 0.9\n grouped, scores = parser.parse(fake_heatmap, fake_tag, True, True)\n assert grouped[0][0, 0, 0] == 10.25\n assert abs(scores[0] - 0.2) < 0.001\n cfg['tag_per_joint'] = False\n parser = HeatmapParser(cfg)\n grouped, scores = parser.parse(fake_heatmap, fake_tag, False, False)\n assert grouped[0][0, 0, 0] == 10.\n grouped, scores = parser.parse(fake_heatmap, fake_tag, False, True)\n assert grouped[0][0, 0, 0] == 10.\n\n\ndef test_group_score_per_joint():\n cfg = {}\n cfg['num_joints'] = 17\n cfg['detection_threshold'] = 0.1\n cfg['tag_threshold'] = 1\n cfg['use_detection_val'] = True\n cfg['ignore_too_much'] = False\n cfg['nms_kernel'] = 5\n cfg['nms_padding'] = 2\n cfg['tag_per_joint'] = True\n cfg['max_num_people'] = 1\n cfg['score_per_joint'] = True\n parser = HeatmapParser(cfg)\n fake_heatmap = torch.zeros(1, 1, 5, 5)\n fake_heatmap[0, 0, 3, 3] = 1\n fake_heatmap[0, 0, 3, 2] = 0.8\n assert parser.nms(fake_heatmap)[0, 0, 3, 2] == 0\n fake_heatmap = torch.zeros(1, 17, 32, 32)\n fake_tag = torch.zeros(1, 17, 32, 32, 1)\n fake_heatmap[0, 0, 10, 10] = 0.8\n fake_heatmap[0, 1, 12, 12] = 0.9\n fake_heatmap[0, 4, 8, 8] = 0.8\n fake_heatmap[0, 8, 6, 6] = 0.9\n fake_tag[0, 0, 10, 10] = 0.8\n fake_tag[0, 1, 12, 12] = 0.9\n fake_tag[0, 4, 8, 8] = 0.8\n fake_tag[0, 8, 6, 6] = 0.9\n grouped, scores = parser.parse(fake_heatmap, fake_tag, True, True)\n assert len(scores[0]) == 17\n\n\ndef test_group_ignore_too_much():\n cfg = {}\n cfg['num_joints'] = 17\n cfg['detection_threshold'] = 0.1\n cfg['tag_threshold'] = 1\n cfg['use_detection_val'] = True\n cfg['ignore_too_much'] = True\n cfg['nms_kernel'] = 5\n cfg['nms_padding'] = 2\n cfg['tag_per_joint'] = True\n cfg['max_num_people'] = 1\n cfg['score_per_joint'] = True\n parser = HeatmapParser(cfg)\n fake_heatmap = torch.zeros(1, 1, 5, 5)\n fake_heatmap[0, 0, 3, 3] = 1\n fake_heatmap[0, 0, 3, 2] = 0.8\n assert parser.nms(fake_heatmap)[0, 0, 3, 2] == 0\n fake_heatmap = torch.zeros(1, 17, 32, 32)\n fake_tag = torch.zeros(1, 17, 32, 32, 1)\n fake_heatmap[0, 0, 10, 10] = 0.8\n fake_heatmap[0, 1, 12, 12] = 0.9\n fake_heatmap[0, 4, 8, 8] = 0.8\n fake_heatmap[0, 8, 6, 6] = 0.9\n fake_tag[0, 0, 10, 10] = 0.8\n fake_tag[0, 1, 12, 12] = 0.9\n fake_tag[0, 4, 8, 8] = 0.8\n fake_tag[0, 8, 6, 6] = 2.0\n grouped, sc = parser.parse(fake_heatmap, fake_tag, True, True)\n assert len(grouped[0]) == 1\n\n cfg['ignore_too_much'] = False\n parser_noignore = HeatmapParser(cfg)\n grouped, sc = parser_noignore.parse(fake_heatmap, fake_tag, True, True)\n assert len(grouped[0]) == 2\n",
"# Copyright (c) OpenMMLab. All rights reserved.\nimport logging\nimport time\nfrom queue import Full, Queue\nfrom threading import Thread\nfrom typing import List, Optional, Union\n\nimport cv2\nimport numpy as np\nfrom mmcv import color_val\n\nfrom mmpose.utils.timer import RunningAverage\nfrom .builder import NODES\nfrom .node import Node\n\ntry:\n import psutil\n psutil_proc = psutil.Process()\nexcept (ImportError, ModuleNotFoundError):\n psutil_proc = None\n\n\[email protected]_module()\nclass ModelResultBindingNode(Node):\n\n def __init__(self, name: str, frame_buffer: str, result_buffer: str,\n output_buffer: Union[str, List[str]]):\n super().__init__(name=name, enable=True)\n self.synchronous = None\n\n # Cache the latest model result\n self.last_result_msg = None\n self.last_output_msg = None\n\n # Inference speed analysis\n self.frame_fps = RunningAverage(window=10)\n self.frame_lag = RunningAverage(window=10)\n self.result_fps = RunningAverage(window=10)\n self.result_lag = RunningAverage(window=10)\n\n # Register buffers\n # The trigger buffer depends on the runner.synchronous attribute, thus\n # it will be set later in ``set_runner``.\n self.register_input_buffer(result_buffer, 'result', trigger=False)\n self.register_input_buffer(frame_buffer, 'frame', trigger=False)\n self.register_output_buffer(output_buffer)\n\n def set_runner(self, runner):\n super().set_runner(runner)\n\n # Set synchronous according to the runner\n if runner.synchronous:\n self.synchronous = True\n trigger = 'result'\n else:\n self.synchronous = False\n trigger = 'frame'\n\n # Set trigger input buffer according to the synchronous setting\n for buffer_info in self._input_buffers:\n if buffer_info.input_name == trigger:\n buffer_info.trigger = True\n\n def process(self, input_msgs):\n result_msg = input_msgs['result']\n\n # Update last result\n if result_msg is not None:\n # Update result FPS\n if self.last_result_msg is not None:\n self.result_fps.update(\n 1.0 /\n (result_msg.timestamp - self.last_result_msg.timestamp))\n # Update inference latency\n self.result_lag.update(time.time() - result_msg.timestamp)\n # Update last inference result\n self.last_result_msg = result_msg\n\n if not self.synchronous:\n # Asynchronous mode: Bind the latest result with the current frame.\n frame_msg = input_msgs['frame']\n\n self.frame_lag.update(time.time() - frame_msg.timestamp)\n\n # Bind result to frame\n if self.last_result_msg is not None:\n frame_msg.set_full_results(\n self.last_result_msg.get_full_results())\n frame_msg.merge_route_info(\n self.last_result_msg.get_route_info())\n\n output_msg = frame_msg\n\n else:\n # Synchronous mode: Directly output the frame that the model result\n # was obtained from.\n self.frame_lag.update(time.time() - result_msg.timestamp)\n output_msg = result_msg\n\n # Update frame fps and lag\n if self.last_output_msg is not None:\n self.frame_lag.update(time.time() - output_msg.timestamp)\n self.frame_fps.update(\n 1.0 / (output_msg.timestamp - self.last_output_msg.timestamp))\n self.last_output_msg = output_msg\n\n return output_msg\n\n def _get_node_info(self):\n info = super()._get_node_info()\n info['result_fps'] = self.result_fps.average()\n info['result_lag (ms)'] = self.result_lag.average() * 1000\n info['frame_fps'] = self.frame_fps.average()\n info['frame_lag (ms)'] = self.frame_lag.average() * 1000\n return info\n\n\[email protected]_module()\nclass MonitorNode(Node):\n\n _default_ignore_items = ['timestamp']\n\n def __init__(self,\n name: str,\n frame_buffer: str,\n output_buffer: Union[str, List[str]],\n enable_key: Optional[Union[str, int]] = None,\n enable: bool = False,\n x_offset=20,\n y_offset=20,\n y_delta=15,\n text_color='black',\n background_color=(255, 183, 0),\n text_scale=0.4,\n ignore_items: Optional[List[str]] = None):\n super().__init__(name=name, enable_key=enable_key, enable=enable)\n\n self.x_offset = x_offset\n self.y_offset = y_offset\n self.y_delta = y_delta\n self.text_color = color_val(text_color)\n self.background_color = color_val(background_color)\n self.text_scale = text_scale\n if ignore_items is None:\n self.ignore_items = self._default_ignore_items\n else:\n self.ignore_items = ignore_items\n\n self.register_input_buffer(frame_buffer, 'frame', trigger=True)\n self.register_output_buffer(output_buffer)\n\n def process(self, input_msgs):\n frame_msg = input_msgs['frame']\n\n frame_msg.update_route_info(\n node_name='System Info',\n node_type='dummy',\n info=self._get_system_info())\n\n img = frame_msg.get_image()\n route_info = frame_msg.get_route_info()\n img = self._show_route_info(img, route_info)\n\n frame_msg.set_image(img)\n return frame_msg\n\n def _get_system_info(self):\n sys_info = {}\n if psutil_proc is not None:\n sys_info['CPU(%)'] = psutil_proc.cpu_percent()\n sys_info['Memory(%)'] = psutil_proc.memory_percent()\n return sys_info\n\n def _show_route_info(self, img, route_info):\n canvas = np.full(img.shape, self.background_color, dtype=img.dtype)\n\n x = self.x_offset\n y = self.y_offset\n\n max_len = 0\n\n def _put_line(line=''):\n nonlocal y, max_len\n cv2.putText(canvas, line, (x, y), cv2.FONT_HERSHEY_DUPLEX,\n self.text_scale, self.text_color, 1)\n y += self.y_delta\n max_len = max(max_len, len(line))\n\n for node_info in route_info:\n title = f'{node_info[\"node\"]}({node_info[\"node_type\"]})'\n _put_line(title)\n for k, v in node_info['info'].items():\n if k in self.ignore_items:\n continue\n if isinstance(v, float):\n v = f'{v:.1f}'\n _put_line(f' {k}: {v}')\n\n x1 = max(0, self.x_offset)\n x2 = min(img.shape[1], int(x + max_len * self.text_scale * 20))\n y1 = max(0, self.y_offset - self.y_delta)\n y2 = min(img.shape[0], y)\n\n src1 = canvas[y1:y2, x1:x2]\n src2 = img[y1:y2, x1:x2]\n img[y1:y2, x1:x2] = cv2.addWeighted(src1, 0.5, src2, 0.5, 0)\n\n return img\n\n def bypass(self, input_msgs):\n return input_msgs['frame']\n\n\[email protected]_module()\nclass RecorderNode(Node):\n \"\"\"Record the frames into a local file.\"\"\"\n\n def __init__(\n self,\n name: str,\n frame_buffer: str,\n output_buffer: Union[str, List[str]],\n out_video_file: str,\n out_video_fps: int = 30,\n out_video_codec: str = 'mp4v',\n buffer_size: int = 30,\n ):\n super().__init__(name=name, enable_key=None, enable=True)\n\n self.queue = Queue(maxsize=buffer_size)\n self.out_video_file = out_video_file\n self.out_video_fps = out_video_fps\n self.out_video_codec = out_video_codec\n self.vwriter = None\n\n # Register buffers\n self.register_input_buffer(frame_buffer, 'frame', trigger=True)\n self.register_output_buffer(output_buffer)\n\n # Start a new thread to write frame\n self.t_record = Thread(target=self._record, args=(), daemon=True)\n self.t_record.start()\n\n def process(self, input_msgs):\n\n frame_msg = input_msgs['frame']\n img = frame_msg.get_image() if frame_msg is not None else None\n img_queued = False\n\n while not img_queued:\n try:\n self.queue.put(img, timeout=1)\n img_queued = True\n logging.info(f'{self.name}: recorder received one frame!')\n except Full:\n logging.info(f'{self.name}: recorder jamed!')\n\n return frame_msg\n\n def _record(self):\n\n while True:\n\n img = self.queue.get()\n\n if img is None:\n break\n\n if self.vwriter is None:\n fourcc = cv2.VideoWriter_fourcc(*self.out_video_codec)\n fps = self.out_video_fps\n frame_size = (img.shape[1], img.shape[0])\n self.vwriter = cv2.VideoWriter(self.out_video_file, fourcc,\n fps, frame_size)\n assert self.vwriter.isOpened()\n\n self.vwriter.write(img)\n\n logging.info('Video recorder released!')\n if self.vwriter is not None:\n self.vwriter.release()\n\n def on_exit(self):\n try:\n # Try putting a None into the output queue so the self.vwriter will\n # be released after all queue frames have been written to file.\n self.queue.put(None, timeout=1)\n self.t_record.join(timeout=1)\n except Full:\n pass\n\n if self.t_record.is_alive():\n # Force to release self.vwriter\n logging.info('Video recorder forced release!')\n if self.vwriter is not None:\n self.vwriter.release()\n"
] | [
[
"numpy.diagflat",
"numpy.full",
"numpy.concatenate",
"numpy.ones",
"numpy.random.rand",
"numpy.testing.assert_allclose",
"numpy.load",
"numpy.array",
"numpy.zeros",
"numpy.testing.assert_array_almost_equal"
],
[
"torch.nn.ModuleList",
"torch.nn.functional.relu",
"torch.nn.functional.max_pool2d",
"torch.nn.functional.interpolate"
],
[
"numpy.maximum",
"numpy.abs",
"numpy.meshgrid",
"numpy.random.random",
"numpy.arange",
"numpy.linalg.norm",
"numpy.round",
"numpy.ceil",
"numpy.zeros_like",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
],
[
"numpy.array",
"numpy.zeros",
"numpy.minimum"
],
[
"torch.zeros"
],
[
"numpy.full"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xingjiepan/alpha_helix_generator | [
"2b35691b790e6363d5c4897a72c3efa8556d0143",
"2b35691b790e6363d5c4897a72c3efa8556d0143"
] | [
"ss_generator/basic.py",
"tests/test_ca_tracing_alpha_helix.py"
] | [
"import numpy as np\n\nfrom . import geometry\n\n\ndef transform_residue(residue, M, t):\n '''Transform a residue by a rotation M and a\n translation t. Return the transformed residue.\n '''\n new_res = {}\n\n for key in residue.keys():\n new_res[key] = np.dot(M, residue[key]) + t\n\n return new_res\n\ndef transform_residue_list(res_list, M, t):\n '''Transform a residue list by a rotation M and a\n translation t. Return the new list.\n '''\n return [transform_residue(res, M, t) for res in res_list]\n\ndef get_phi(strand, res_id):\n '''Get the phi torsions of a residue.'''\n return geometry.dihedral(strand[res_id - 1]['c'], strand[res_id]['n'],\n strand[res_id]['ca'], strand[res_id]['c'])\n\ndef get_psi(strand, res_id):\n '''Get the psi torsions of a residue.'''\n return geometry.dihedral(strand[res_id]['n'], strand[res_id]['ca'],\n strand[res_id]['c'], strand[res_id + 1]['n'])\n\ndef change_torsions(strand, res_id, phi, psi):\n '''Change the phi, psi angles of a residue in\n a strand. The input torsions should be in radians.\n '''\n\n # Rotate the psi torsion\n\n if 0 <= res_id < len(strand) - 1:\n \n psi_old = geometry.dihedral(strand[res_id]['n'], strand[res_id]['ca'], \n strand[res_id]['c'], strand[res_id + 1]['n'])\n\n # Get the rotation matrix\n\n axis = strand[res_id]['c'] - strand[res_id]['ca']\n M = geometry.rotation_matrix_from_axis_and_angle(axis, psi - psi_old)\n t = strand[res_id]['ca'] - np.dot(M, strand[res_id]['ca'])\n\n # Rotate subsequent atoms\n\n strand[res_id]['o'] = np.dot(M, strand[res_id]['o']) + t\n\n for i in range(res_id + 1, len(strand)):\n strand[i] = transform_residue(strand[i], M, t)\n\n # Rotate the phi torsion\n \n if 0 < res_id < len(strand):\n\n phi_old = geometry.dihedral(strand[res_id - 1]['c'], strand[res_id]['n'],\n strand[res_id]['ca'], strand[res_id]['c'])\n\n # Get the rotation matrix\n\n axis = strand[res_id]['ca'] - strand[res_id]['n']\n M = geometry.rotation_matrix_from_axis_and_angle(axis, phi - phi_old)\n t = strand[res_id]['ca'] - np.dot(M, strand[res_id]['ca'])\n\n # Rotate subsequent residues\n\n for key in strand[res_id].keys():\n if key != 'h':\n strand[res_id][key] = np.dot(M, strand[res_id][key]) + t\n\n for i in range(res_id + 1, len(strand)):\n strand[i] = transform_residue(strand[i], M, t)\n\ndef get_hb_co_coord_from_nh(n, h):\n '''Get the ideal coordinates of C and O from\n coordinates of N and H.\n '''\n v = geometry.normalize(h - n)\n return (n + 3.9 * v, n + 2.7 * v)\n\ndef get_hb_nh_coord_from_co(c, o):\n '''Get the ideal coordinates of N and H from\n coordinates of C and O.\n '''\n v = geometry.normalize(o - c)\n return (c + 3.9 * v, c + 2.9 * v)\n\ndef get_peptide_bond_transformation(phi, psi):\n '''Get the rotation matrix and translation vector of a peptide bond\n corresponding to a pair of phi psi torsions. The reference frame\n is build on the C, N and CA atoms.\n '''\n # Set the parameters\n\n n_ca_length = 1.47\n ca_c_length = 1.53\n c_n_length = 1.32\n n_ca_c_angle = np.radians(111.2)\n ca_c_n_angle = np.radians(114)\n c_n_ca_angle = np.radians(123)\n omega = np.pi\n\n # Get the coordinates\n\n c1 = c_n_length * np.array([np.sin(c_n_ca_angle), np.cos(c_n_ca_angle), 0])\n n1 = np.array([0, 0, 0])\n ca1 = np.array([0, n_ca_length, 0])\n c2 = geometry.cartesian_coord_from_internal_coord(c1, n1, ca1, \n ca_c_length, n_ca_c_angle, phi)\n n2 = geometry.cartesian_coord_from_internal_coord(n1, ca1, c2,\n c_n_length, ca_c_n_angle, psi)\n ca2 = geometry.cartesian_coord_from_internal_coord(ca1, c2, n2,\n n_ca_length, c_n_ca_angle, omega)\n\n # Get the transformation\n\n return np.transpose(geometry.create_frame_from_three_points(c2, n2, ca2)), n2 - n1\n",
"#!/usr/bin/env python3\n\nimport pytest\nimport numpy as np\nnp.seterr(all='raise')\n\nimport ss_generator as ssg\n\n\ndef test_transformations():\n print(\"test transformations.\")\n mean_theta = np.radians(91.8)\n std_theta = np.radians(3.35)\n mean_tau = np.radians(49.5)\n std_tau = np.radians(7.1)\n\n coef = [-1, 0, 1]\n\n for c1 in coef:\n for c2 in coef:\n theta = mean_theta + c1 * std_theta\n tau = mean_tau + c2 * std_tau\n\n axis, xi = ssg.geometry.rotation_matrix_to_axis_and_angle(\n ssg.ca_tracing.alpha_helix.theta_tau_to_rotation_matrix(theta, tau))\n\n c_theta, c_tau = ssg.ca_tracing.alpha_helix.axis_to_theta_tau(axis)\n\n print(\"theta = {0:.2f}\\ttau = {1:.2f}\\txi = {2:.2f}\\taxis = {3}\\tc_theta = {4:.2f}\\tc_tau = {5:.2f}\".format(\n np.degrees(theta), np.degrees(tau), np.degrees(xi), axis, np.degrees(c_theta), np.degrees(c_tau)))\n\ndef test_build_nexus():\n print(\"test build nexus.\")\n\n theta = np.radians(91.8)\n tau = np.radians(49.5)\n\n axis = ssg.geometry.rotation_matrix_to_axis_and_angle(\n ssg.ca_tracing.alpha_helix.theta_tau_to_rotation_matrix(theta, tau))[0]\n\n c_theta, c_tau = ssg.ca_tracing.alpha_helix.theta_tau_for_nexus(axis, axis)\n\n print(\"theta = {0:.2f}\\ttau = {1:.2f}\\taxis = {2}\\tc_theta = {3:.2f}\\tc_tau = {4:.2f}\".format(\n np.degrees(theta), np.degrees(tau), axis, np.degrees(c_theta), np.degrees(c_tau)))\n\ndef test_generate_alpha_helix():\n print(\"test generating alpha helices.\")\n\n #ds = 100 * [3.81]\n #thetas = 99 * [np.radians(91.8)]\n #taus = 98 * [np.radians(49.5)]\n\n #ca_list = ssg.ca_tracing.basic.generate_segment_from_internal_coordinates(ds, thetas, taus)\n #ssg.IO.save_ca_list(ca_list, \"straight_helix.pdb\")\n\n #ca_list = ssg.ca_tracing.basic.generate_segment_from_internal_coordinates(\n # ds, thetas + np.radians(3.35) * np.random.uniform(-1, 1, 99), taus + np.radians(7.1) * np.random.uniform(-1, 1, 98))\n #ssg.IO.save_ca_list(ca_list, \"random_helix.pdb\")\n\n #screw_axes = [np.array([0, 0, 1])] * 20\n #ca_list = ssg.ca_tracing.alpha_helix.generate_alpha_helix_from_screw_axes(screw_axes)\n #ssg.IO.save_ca_list(ca_list, \"z_helix.pdb\")\n\n #screw_axes = [np.array([0, 0, 1])]\n #for i in range(100):\n # screw_axes.append(ssg.geometry.normalize(screw_axes[i] + 0.001 * np.array([np.random.normal(), np.random.normal(), np.random.normal()])))\n\n #ca_list = ssg.ca_tracing.alpha_helix.generate_alpha_helix_from_screw_axes(screw_axes)\n #ssg.IO.save_ca_list(ca_list, \"random_screws.pdb\")\n \n ca_list = ssg.ca_tracing.alpha_helix.generate_super_coil(np.array([0, 0, 1]), np.radians(-3.6), np.radians(12), 1000)\n ssg.IO.save_ca_list(ca_list, \"super_coil.pdb\")\n\ndef test_perturb_alpha_helix():\n print(\"test perturb alpha helices.\")\n \n ds = 100 * [3.81]\n thetas = 99 * [np.radians(91.8)]\n taus = 98 * [np.radians(49.5)]\n\n ca_list_before = ssg.ca_tracing.basic.generate_segment_from_internal_coordinates(ds, thetas, taus)\n for ca in ca_list_before:\n ca += np.array([10, 0, 0])\n \n ssg.IO.save_ca_list(ca_list_before, \"helix_before_perturb.pdb\")\n ca_list_before_bb = ssg.ca_tracing.alpha_helix.thread_backbone_for_helix(ca_list_before)\n ssg.IO.save_residue_list(ca_list_before_bb, 'helix_before_perturb_bb.pdb')\n \n random_perturbed_ca_list = ssg.ca_tracing.alpha_helix.randomize_a_helix(ca_list_before, 0.1)\n ssg.IO.save_ca_list(random_perturbed_ca_list, \"helix_random_perturb.pdb\")\n random_perturbed_ca_list_bb = ssg.ca_tracing.alpha_helix.thread_backbone_for_helix(random_perturbed_ca_list)\n ssg.IO.save_residue_list(random_perturbed_ca_list_bb, 'helix_random_perturb_bb.pdb')\n\n phase_shifted_ca_list = ssg.ca_tracing.alpha_helix.shift_helix_phase(random_perturbed_ca_list, np.pi)\n ssg.IO.save_ca_list(phase_shifted_ca_list, \"helix_phase_shifted.pdb\")\n phase_shifted_ca_list_bb = ssg.ca_tracing.alpha_helix.thread_backbone_for_helix(phase_shifted_ca_list)\n ssg.IO.save_residue_list(phase_shifted_ca_list_bb, 'helix_phase_shifted_bb.pdb')\n\n ca_to_twist = random_perturbed_ca_list\n twisted_ca_list = ssg.ca_tracing.alpha_helix.twist_helix(ca_to_twist, ca_to_twist[-1] - ca_to_twist[0], np.radians(12), np.radians(-3.6), 0.5)\n ssg.IO.save_ca_list(twisted_ca_list, \"helix_twisted.pdb\")\n twisted_ca_list_bb = ssg.ca_tracing.alpha_helix.thread_backbone_for_helix(twisted_ca_list)\n ssg.IO.save_residue_list(twisted_ca_list_bb, 'helix_twisted_ca_list_bb.pdb')\n\ndef test_thread_bb():\n print(\"test thread bb.\")\n \n n = 100\n ds = n * [3.81]\n thetas = (n - 1) * [np.radians(91.8)]\n taus = (n - 2) * [np.radians(49.5)]\n\n ca_list = ssg.ca_tracing.basic.generate_segment_from_internal_coordinates(ds, thetas, taus)\n ssg.IO.save_ca_list(ca_list, 'straight_helix.pdb')\n res_list = ssg.ca_tracing.alpha_helix.thread_backbone_for_helix(ca_list)\n ssg.IO.save_residue_list(res_list, 'straight_helix_bb.pdb')\n"
] | [
[
"numpy.dot",
"numpy.radians",
"numpy.cos",
"numpy.sin",
"numpy.array"
],
[
"numpy.seterr",
"numpy.array",
"numpy.radians",
"numpy.degrees"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mori0711/XenonPy | [
"e36ca0ea112b45ee629cd980c88e80cd6c96c514",
"e36ca0ea112b45ee629cd980c88e80cd6c96c514",
"e36ca0ea112b45ee629cd980c88e80cd6c96c514"
] | [
"xenonpy/mdl/base.py",
"tests/models/test_extension.py",
"xenonpy/model/utils/metrics.py"
] | [
"# Copyright (c) 2021. yoshida-lab. All rights reserved.\n# Use of this source code is governed by a BSD-style\n# license that can be found in the LICENSE file.\n\nimport json\nfrom os import remove\nfrom pathlib import Path\nfrom shutil import make_archive\n\nimport pandas as pd\nimport requests\nfrom requests import HTTPError\nfrom sklearn.base import BaseEstimator\n\nfrom xenonpy.utils import TimedMetaClass\n\n\nclass BaseQuery(BaseEstimator, metaclass=TimedMetaClass):\n queryable = None\n\n def __init__(self, variables, *, api_key: str = 'anonymous.user.key', endpoint: str = 'http://xenon.ism.ac.jp/api'):\n if self.queryable is None:\n raise RuntimeError('Query class must give a queryable field in list of string')\n\n self._results = None\n self._return_json = False\n self._endpoint = endpoint\n self._api_key = api_key\n self._variables = variables\n\n @property\n def api_key(self):\n return self._api_key\n\n @property\n def endpoint(self):\n return self._endpoint\n\n @property\n def variables(self):\n return self._variables\n\n @property\n def results(self):\n return self._results\n\n def gql(self, *query_vars: str):\n raise NotImplementedError()\n\n @staticmethod\n def _post(ret, return_json):\n if return_json:\n return ret\n\n if not isinstance(ret, list):\n ret = [ret]\n ret = pd.DataFrame(ret)\n return ret\n\n def check_query_vars(self, *query_vars: str):\n if not set(query_vars) <= set(self.queryable):\n raise RuntimeError(f'`query_vars` contains illegal variables, '\n f'available querying variables are: {self.queryable}')\n return query_vars\n\n def __call__(self, *querying_vars, file=None, return_json=None):\n if len(querying_vars) == 0:\n query = self.gql(*self.queryable)\n else:\n query = self.gql(*self.check_query_vars(*querying_vars))\n\n payload = json.dumps({'query': query, 'variables': self._variables})\n\n if file is None:\n ret = requests.post(url=self._endpoint,\n data=payload,\n headers={\n \"content-type\": \"application/json\",\n 'api_key': self._api_key\n })\n else:\n file = Path(file).resolve()\n file = make_archive(str(file), 'gztar', str(file))\n operations = ('operations', payload)\n maps = ('map', json.dumps({0: ['variables.model']}))\n payload_tuples = (operations, maps)\n files = {'0': open(file, 'rb')}\n try:\n ret = requests.post(url=self._endpoint,\n data=payload_tuples,\n headers={'api_key': self._api_key},\n files=files)\n finally:\n files['0'].close()\n remove(file)\n\n if ret.status_code != 200:\n try:\n message = ret.json()\n except json.JSONDecodeError:\n message = \"Server did not responce.\"\n\n raise HTTPError('status_code: %s, %s' % (ret.status_code, message))\n ret = ret.json()\n if 'errors' in ret:\n raise ValueError(ret['errors'][0]['message'])\n query_name = self.__class__.__name__\n ret = ret['data'][query_name[0].lower() + query_name[1:]]\n\n if not ret:\n return None\n\n if return_json is None:\n return_json = self._return_json\n\n ret = self._post(ret, return_json)\n self._results = ret\n return ret\n\n def __repr__(self, N_CHAR_MAX=700):\n queryable = '\\n '.join(self.queryable)\n return f'{super().__repr__(N_CHAR_MAX)}\\nQueryable: \\n {queryable}'\n",
"# Copyright (c) 2021. TsumiNa. All rights reserved.\n# Use of this source code is governed by a BSD-style\n# license that can be found in the LICENSE file.\n\nfrom collections import OrderedDict\nfrom pathlib import Path\nfrom scipy.special import softmax\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom shutil import rmtree\n\nimport torch\nimport os\n\nfrom xenonpy.model import SequentialLinear\nfrom xenonpy.model.training import Trainer\nfrom xenonpy.model.training.base import BaseExtension, BaseRunner\nfrom xenonpy.model.training.extension import TensorConverter, Validator, Persist\nfrom xenonpy.model.utils import regression_metrics, classification_metrics\n\n\[email protected](scope='module')\ndef data():\n # ignore numpy warning\n import warnings\n print('ignore NumPy RuntimeWarning\\n')\n warnings.filterwarnings(\"ignore\", message=\"numpy.dtype size changed\")\n warnings.filterwarnings(\"ignore\", message=\"numpy.ndarray size changed\")\n\n dir_ = os.path.dirname(os.path.abspath(__file__))\n\n yield\n\n try:\n rmtree(str(Path('.').resolve() / 'test_model'))\n except:\n pass\n try:\n rmtree(str(Path('.').resolve() / 'test_model@1'))\n except:\n pass\n try:\n rmtree(str(Path('.').resolve() / 'test_model_1'))\n except:\n pass\n try:\n rmtree(str(Path('.').resolve() / 'test_model_2'))\n except:\n pass\n try:\n rmtree(str(Path('.').resolve() / 'test_model_3'))\n except:\n pass\n try:\n rmtree(str(Path('.').resolve() / Path(os.getcwd()).name))\n except:\n pass\n\n print('test over')\n\n\ndef test_base_runner_1():\n ext = BaseExtension()\n x, y = 1, 2\n assert ext.input_proc(x, y) == (x, y)\n assert ext.output_proc(y, None) == (y, None)\n\n x, y = (1,), 2\n assert ext.input_proc(x, y) == (x, y)\n assert ext.output_proc(y, None) == (y, None)\n\n x, y = (1,), (2,)\n assert ext.input_proc(x, y) == (x, y)\n assert ext.output_proc(y, y) == (y, y)\n\n\ndef test_tensor_converter_1():\n\n class _Trainer(BaseRunner):\n\n def __init__(self):\n super().__init__()\n self.non_blocking = False\n\n def predict(self, x_, y_): # noqa\n return x_, y_\n\n trainer = _Trainer()\n arr_1 = [1, 2, 3]\n np_1 = np.asarray(arr_1)\n se_1 = pd.Series(arr_1)\n pd_1 = pd.DataFrame(arr_1)\n np_ = np.asarray([arr_1, arr_1])\n pd_ = pd.DataFrame(np_)\n tensor_ = torch.Tensor(np_)\n\n # test auto reshape; #189\n converter = TensorConverter(auto_reshape=False)\n x, y = converter.input_proc(np_1, None, trainer=trainer) # noqa\n assert isinstance(x, torch.Tensor)\n assert x.shape == (3,)\n\n x, y = converter.input_proc(se_1, None, trainer=trainer) # noqa\n assert isinstance(x, torch.Tensor)\n assert x.shape == (3,)\n\n x, y = converter.input_proc(pd_1, None, trainer=trainer) # noqa\n assert isinstance(x, torch.Tensor)\n assert x.shape == (3, 1)\n\n converter = TensorConverter()\n x, y = converter.input_proc(np_1, None, trainer=trainer) # noqa\n assert isinstance(x, torch.Tensor)\n assert x.shape == (3, 1)\n\n x, y = converter.input_proc(se_1, None, trainer=trainer) # noqa\n assert isinstance(x, torch.Tensor)\n assert x.shape == (3, 1)\n\n x, y = converter.input_proc(pd_1, None, trainer=trainer) # noqa\n assert isinstance(x, torch.Tensor)\n assert x.shape == (3, 1)\n\n # normal tests\n x, y = converter.input_proc(np_, None, trainer=trainer) # noqa\n assert isinstance(x, torch.Tensor)\n assert x.shape == (2, 3)\n assert torch.equal(x, tensor_)\n assert y is None\n\n x, y = converter.input_proc(pd_, None, trainer=trainer) # noqa\n assert isinstance(x, torch.Tensor)\n assert x.shape == (2, 3)\n assert torch.equal(x, tensor_)\n assert y is None\n\n x, y = converter.input_proc(tensor_, None, trainer=trainer) # noqa\n assert isinstance(x, torch.Tensor)\n assert x.shape == (2, 3)\n assert torch.equal(x, tensor_)\n assert y is None\n\n x, y = converter.input_proc(np_, np_, trainer=trainer) # noqa\n assert isinstance(x, torch.Tensor)\n assert x.shape == (2, 3)\n assert torch.equal(x, tensor_)\n assert torch.equal(y, tensor_)\n\n x, y = converter.input_proc(pd_, pd_, trainer=trainer) # noqa\n assert isinstance(x, torch.Tensor)\n assert x.shape == (2, 3)\n assert torch.equal(x, tensor_)\n assert torch.equal(y, tensor_)\n\n x, y = converter.input_proc(tensor_, tensor_, trainer=trainer) # noqa\n assert isinstance(x, torch.Tensor)\n assert x.shape == (2, 3)\n assert torch.equal(x, tensor_)\n assert torch.equal(y, tensor_)\n\n converter = TensorConverter(x_dtype=torch.long)\n x, y = converter.input_proc((np_, np_), np_, trainer=trainer) # noqa\n assert isinstance(x, tuple)\n assert len(x) == 2\n assert x[0].dtype == torch.long\n assert x[1].dtype == torch.long\n\n converter = TensorConverter(x_dtype=(torch.long, torch.float32), y_dtype=torch.long)\n x, y = converter.input_proc((np_, np_), np_, trainer=trainer) # noqa\n assert isinstance(x, tuple)\n assert len(x) == 2\n assert x[0].dtype == torch.long\n assert x[1].dtype == torch.float32\n assert y.dtype == torch.long\n\n converter = TensorConverter(x_dtype=(torch.long, torch.float32))\n x, y = converter.input_proc((pd_, pd_), pd_, trainer=trainer) # noqa\n assert isinstance(x, tuple)\n assert len(x) == 2\n assert x[0].dtype == torch.long\n assert x[1].dtype == torch.float32\n\n # for tensor input, dtype change will never be executed\n converter = TensorConverter(x_dtype=(torch.long, torch.long))\n x, y = converter.input_proc((tensor_, tensor_), tensor_, trainer=trainer) # noqa\n assert isinstance(x, tuple)\n assert len(x) == 2\n assert x[0].dtype == torch.float32\n assert x[1].dtype == torch.float32\n\n\ndef test_tensor_converter_2():\n\n class _Trainer(BaseRunner):\n\n def __init__(self):\n super().__init__()\n self.non_blocking = False\n\n def predict(self, x_, y_):\n return x_, y_\n\n trainer = _Trainer()\n converter = TensorConverter()\n np_ = np.asarray([[1, 2, 3], [4, 5, 6]])\n pd_ = pd.DataFrame(np_)\n tensor_ = torch.Tensor(np_) # noqa\n\n x, y = converter.input_proc(np_, np_[0], trainer=trainer) # noqa\n assert isinstance(y, torch.Tensor)\n assert y.shape == (3, 1)\n assert torch.equal(y, tensor_[0].unsqueeze(-1))\n\n x, y = converter.input_proc(pd_, pd_.iloc[0], trainer=trainer) # noqa\n assert isinstance(y, torch.Tensor)\n assert y.shape == (3, 1)\n assert torch.equal(y, tensor_[0].unsqueeze(-1))\n\n x, y = converter.input_proc(tensor_, tensor_[0], trainer=trainer) # noqa\n assert isinstance(y, torch.Tensor)\n assert y.shape == (3,)\n assert torch.equal(y, tensor_[0])\n\n\ndef test_tensor_converter_3():\n np_ = np.asarray([[1, 2, 3], [4, 5, 6]])\n tensor_ = torch.from_numpy(np_)\n\n converter = TensorConverter()\n y, y_ = converter.output_proc(tensor_, None, training=True)\n assert y_ is None\n assert isinstance(y, torch.Tensor)\n assert y.shape == (2, 3)\n assert torch.equal(y, tensor_)\n\n y, y_ = converter.output_proc(tensor_, tensor_, training=True)\n assert isinstance(y, torch.Tensor)\n assert isinstance(y_, torch.Tensor)\n assert y.equal(y_)\n assert y.shape == (2, 3)\n assert torch.equal(y, tensor_)\n\n y, _ = converter.output_proc((tensor_,), None, training=True)\n assert isinstance(y, tuple)\n assert isinstance(y[0], torch.Tensor)\n assert torch.equal(y[0], tensor_)\n\n y, y_ = converter.output_proc(tensor_, tensor_, training=False)\n assert isinstance(y, np.ndarray)\n assert isinstance(y_, np.ndarray)\n assert np.all(y == y_)\n assert y.shape == (2, 3)\n assert np.all(y == tensor_.numpy())\n\n y, _ = converter.output_proc((tensor_,), None, training=False)\n assert isinstance(y, tuple)\n assert isinstance(y[0], np.ndarray)\n assert np.all(y[0] == tensor_.numpy())\n\n converter = TensorConverter(argmax=True)\n y, y_ = converter.output_proc(tensor_, tensor_, training=False)\n assert isinstance(y, np.ndarray)\n assert isinstance(y_, np.ndarray)\n assert y.shape == (2,)\n assert y_.shape == (2, 3)\n assert np.all(y == np.argmax(np_, 1))\n\n y, y_ = converter.output_proc((tensor_, tensor_), None, training=False)\n assert isinstance(y, tuple)\n assert y_ is None\n assert y[0].shape == (2,)\n assert y[0].shape == y[1].shape\n assert np.all(y[0] == np.argmax(np_, 1))\n\n converter = TensorConverter(probability=True)\n y, y_ = converter.output_proc(tensor_, tensor_, training=False)\n assert isinstance(y, np.ndarray)\n assert isinstance(y_, np.ndarray)\n assert y.shape == (2, 3)\n assert y_.shape == (2, 3)\n assert np.all(y == softmax(np_, 1))\n\n y, y_ = converter.output_proc((tensor_, tensor_), None, training=False)\n assert isinstance(y, tuple)\n assert y_ is None\n assert y[0].shape == (2, 3)\n assert y[0].shape == y[1].shape\n assert np.all(y[0] == softmax(np_, 1))\n\n\ndef test_validator_1():\n x = np.random.randn(100) # input\n y = x + np.random.rand() * 0.001 # true values\n\n class _Trainer(BaseRunner):\n\n def __init__(self):\n super().__init__()\n self.x_val = x\n self.y_val = y\n self.loss_type = 'train_loss'\n\n def predict(self, x_, y_):\n return x_, y_\n\n val = Validator('regress', each_iteration=False)\n\n step_info = OrderedDict(train_loss=0, i_epoch=0)\n val.step_forward(trainer=_Trainer(), step_info=step_info) # noqa\n assert 'val_mae' not in step_info\n\n step_info = OrderedDict(train_loss=0, i_epoch=1)\n val.step_forward(trainer=_Trainer(), step_info=step_info) # noqa\n assert step_info['val_mae'] == regression_metrics(y, x)['mae']\n assert set(step_info.keys()) == {\n 'i_epoch', 'val_mae', 'val_mse', 'val_rmse', 'val_r2', 'val_pearsonr', 'val_spearmanr',\n 'val_p_value', 'val_max_ae', 'train_loss'\n }\n\n\ndef test_validator_2():\n y = np.random.randint(3, size=10) # true labels\n x = np.zeros((10, 3)) # input\n for i, j in enumerate(y):\n x[i, j] = 1\n\n class _Trainer(BaseRunner):\n\n def __init__(self):\n super().__init__()\n self.x_val = x\n self.y_val = y\n self.loss_type = 'train_loss'\n\n def predict(self, x_, y_): # noqa\n return x_, y_\n\n val = Validator('classify', each_iteration=False)\n\n step_info = OrderedDict(train_loss=0, i_epoch=0)\n val.step_forward(trainer=_Trainer(), step_info=step_info) # noqa\n assert 'val_f1' not in step_info\n\n step_info = OrderedDict(train_loss=0, i_epoch=1)\n val.step_forward(trainer=_Trainer(), step_info=step_info) # noqa\n assert step_info['val_f1'] == classification_metrics(y, x)['f1']\n assert set(step_info.keys()) == {\n 'i_epoch', 'val_accuracy', 'val_f1', 'val_precision', 'val_recall', 'val_macro_f1',\n 'val_macro_precision', 'val_macro_recall', 'train_loss'\n }\n\n\ndef test_persist_1(data):\n\n class _Trainer(BaseRunner):\n\n def __init__(self):\n super().__init__()\n self.model = SequentialLinear(50, 2)\n\n def predict(self, x_, y_): # noqa\n return x_, y_\n\n p = Persist()\n\n with pytest.raises(ValueError, match='can not access property `path` before training'):\n p.path\n\n p.before_proc(trainer=_Trainer())\n assert p.path == str(Path('.').resolve() / Path(os.getcwd()).name)\n with pytest.raises(ValueError, match='can not reset property `path` after training'):\n p.path = 'aa'\n\n p = Persist('test_model')\n p.before_proc(trainer=_Trainer())\n assert p.path == str(Path('.').resolve() / 'test_model')\n assert (Path('.').resolve() / 'test_model' / 'describe.pkl.z').exists()\n assert (Path('.').resolve() / 'test_model' / 'init_state.pth.s').exists()\n assert (Path('.').resolve() / 'test_model' / 'model.pth.m').exists()\n assert (Path('.').resolve() / 'test_model' / 'model_structure.pkl.z').exists()\n\n p = Persist('test_model', increment=True)\n p.before_proc(trainer=_Trainer())\n assert p.path == str(Path('.').resolve() / 'test_model@1')\n assert (Path('.').resolve() / 'test_model@1' / 'describe.pkl.z').exists()\n assert (Path('.').resolve() / 'test_model@1' / 'init_state.pth.s').exists()\n assert (Path('.').resolve() / 'test_model@1' / 'model.pth.m').exists()\n assert (Path('.').resolve() / 'test_model@1' / 'model_structure.pkl.z').exists()\n\n\ndef test_persist_save_checkpoints(data):\n\n class _Trainer(BaseRunner):\n\n def __init__(self):\n super().__init__()\n self.model = SequentialLinear(50, 2)\n\n def predict(self, x_, y_): # noqa\n return x_, y_\n\n cp_1 = Trainer.checkpoint_tuple(\n id='cp_1',\n iterations=111,\n model_state=SequentialLinear(50, 2).state_dict(),\n )\n cp_2 = Trainer.checkpoint_tuple(\n id='cp_2',\n iterations=111,\n model_state=SequentialLinear(50, 2).state_dict(),\n )\n\n # save checkpoint\n p = Persist('test_model_1', increment=False, only_best_states=False)\n p.before_proc(trainer=_Trainer())\n p.on_checkpoint(cp_1, trainer=_Trainer())\n p.on_checkpoint(cp_2, trainer=_Trainer())\n assert (Path('.').resolve() / 'test_model_1' / 'checkpoints' / 'cp_1.pth.s').exists()\n assert (Path('.').resolve() / 'test_model_1' / 'checkpoints' / 'cp_2.pth.s').exists()\n\n # reduced save checkpoint\n p = Persist('test_model_2', increment=False, only_best_states=True)\n p.before_proc(trainer=_Trainer())\n p.on_checkpoint(cp_1, trainer=_Trainer())\n p.on_checkpoint(cp_2, trainer=_Trainer())\n assert (Path('.').resolve() / 'test_model_2' / 'checkpoints' / 'cp.pth.s').exists()\n assert not (Path('.').resolve() / 'test_model_2' / 'checkpoints' / 'cp_1.pth.s').exists()\n assert not (Path('.').resolve() / 'test_model_2' / 'checkpoints' / 'cp_2.pth.s').exists()\n\n # no checkpoint will be saved\n p = Persist('test_model_3', increment=False, only_best_states=True)\n p.before_proc(trainer=_Trainer())\n p.on_checkpoint(cp_2, trainer=_Trainer())\n assert not (Path('.').resolve() / 'test_model_3' / 'checkpoints' / 'cp.pth.s').exists()\n assert not (Path('.').resolve() / 'test_model_3' / 'checkpoints' / 'cp_2.pth.s').exists()\n\n\nif __name__ == \"__main__\":\n pytest.main()\n",
"# Copyright (c) 2021. TsumiNa. All rights reserved.\n# Use of this source code is governed by a BSD-style\n# license that can be found in the LICENSE file.\n\nfrom collections import OrderedDict\nfrom typing import Union\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import pearsonr, spearmanr\nfrom sklearn.metrics import mean_absolute_error, r2_score, mean_squared_error, max_error\nfrom sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score\n\n__all__ = ['regression_metrics', 'classification_metrics']\n\n\ndef regression_metrics(y_true: Union[np.ndarray, pd.Series],\n y_pred: Union[np.ndarray, pd.Series]) -> OrderedDict:\n \"\"\"\n Calculate most common regression scores.\n See Also: https://scikit-learn.org/stable/modules/model_evaluation.html\n \n Parameters\n ----------\n y_true\n True results.\n y_pred\n Predicted results.\n \n Returns\n -------\n OrderedDict\n An :class:`collections.OrderedDict` contains regression scores.\n These scores will be calculated: ``mae``, ``mse``, ``rmse``, ``r2``,\n ``pearsonr``, ``spearmanr``, ``p_value``, and ``max_ae``\n \"\"\"\n if len(y_true.shape) != 1:\n y_true = y_true.flatten()\n if len(y_pred.shape) != 1:\n y_pred = y_pred.flatten()\n\n mask = ~np.isnan(y_pred)\n y_true = y_true[mask]\n y_pred = y_pred[mask]\n\n mae = mean_absolute_error(y_true, y_pred)\n maxae = max_error(y_true, y_pred)\n mse = mean_squared_error(y_true, y_pred)\n rmse = np.sqrt(mse)\n r2 = r2_score(y_true, y_pred)\n pr, p_val = pearsonr(y_true, y_pred)\n sr, _ = spearmanr(y_true, y_pred)\n return OrderedDict(\n mae=mae,\n mse=mse,\n rmse=rmse,\n r2=r2,\n pearsonr=pr,\n spearmanr=sr,\n p_value=p_val,\n max_ae=maxae,\n )\n\n\ndef classification_metrics(\n y_true: Union[np.ndarray, pd.DataFrame, pd.Series],\n y_pred: Union[np.ndarray, pd.Series]) -> OrderedDict:\n \"\"\"\n Calculate most common classification scores.\n See Also: https://scikit-learn.org/stable/modules/model_evaluation.html\n \n Parameters\n ----------\n y_true\n True results.\n y_pred\n Predicted results.\n \n Returns\n -------\n OrderedDict\n An :class:`collections.OrderedDict` contains classification scores.\n These scores will be calculated: ``accuracy``, ``f1``, ``precision``, ``recall``,\n ``macro_f1``, ``macro_precision``, and ``macro_recall``\n \"\"\"\n if len(y_true.shape) != 1:\n y_true = np.argmax(y_true, 1)\n if len(y_pred.shape) != 1:\n y_pred = np.argmax(y_pred, 1)\n\n mask = ~np.isnan(y_pred)\n y_true = y_true[mask]\n y_pred = y_pred[mask]\n\n accuracy = accuracy_score(y_true, y_pred)\n f1 = f1_score(y_true, y_pred, average='weighted')\n precision = precision_score(y_true, y_pred, average='weighted')\n recall = recall_score(y_true, y_pred, average='weighted')\n macro_f1 = f1_score(y_true, y_pred, average='macro')\n macro_precision = precision_score(y_true, y_pred, average='macro')\n macro_recall = recall_score(y_true, y_pred, average='macro')\n return OrderedDict(\n accuracy=accuracy,\n f1=f1,\n precision=precision,\n recall=recall,\n macro_f1=macro_f1,\n macro_precision=macro_precision,\n macro_recall=macro_recall,\n )\n"
] | [
[
"pandas.DataFrame"
],
[
"pandas.Series",
"torch.Tensor",
"numpy.asarray",
"scipy.special.softmax",
"torch.from_numpy",
"torch.equal",
"pandas.DataFrame",
"numpy.all",
"numpy.argmax",
"numpy.random.randn",
"numpy.random.rand",
"numpy.zeros",
"numpy.random.randint"
],
[
"sklearn.metrics.r2_score",
"numpy.sqrt",
"numpy.isnan",
"sklearn.metrics.mean_absolute_error",
"scipy.stats.pearsonr",
"sklearn.metrics.precision_score",
"sklearn.metrics.mean_squared_error",
"sklearn.metrics.max_error",
"numpy.argmax",
"scipy.stats.spearmanr",
"sklearn.metrics.f1_score",
"sklearn.metrics.recall_score",
"sklearn.metrics.accuracy_score"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"1.5",
"1.2",
"1.7",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
jzerfowski/mne-python | [
"963eea23740f0e697ac03dc1fd671ee91e15c162"
] | [
"mne/io/tests/test_meas_info.py"
] | [
"# -*- coding: utf-8 -*-\n# # Authors: MNE Developers\n# Stefan Appelhoff <[email protected]>\n#\n# License: BSD-3-Clause\n\nfrom datetime import datetime, timedelta, timezone, date\nimport hashlib\nimport os.path as op\nimport pickle\n\nimport pytest\nimport numpy as np\nfrom numpy.testing import assert_array_equal, assert_allclose\nfrom scipy import sparse\nimport string\n\nfrom mne import (Epochs, read_events, pick_info, pick_types, Annotations,\n read_evokeds, make_forward_solution, make_sphere_model,\n setup_volume_source_space, write_forward_solution,\n read_forward_solution, write_cov, read_cov, read_epochs,\n compute_covariance)\nfrom mne.channels import (read_polhemus_fastscan, make_standard_montage,\n equalize_channels)\nfrom mne.event import make_fixed_length_events\nfrom mne.datasets import testing\nfrom mne.io import (read_fiducials, write_fiducials, _coil_trans_to_loc,\n _loc_to_coil_trans, read_raw_fif, read_info, write_info,\n meas_info, Projection, BaseRaw, read_raw_ctf)\nfrom mne.io.constants import FIFF\nfrom mne.io.write import _generate_meas_id, DATE_NONE\nfrom mne.io.meas_info import (Info, create_info, _merge_info,\n _force_update_info, RAW_INFO_FIELDS,\n _bad_chans_comp, _get_valid_units,\n anonymize_info, _stamp_to_dt, _dt_to_stamp,\n _add_timedelta_to_stamp, _read_extended_ch_info)\nfrom mne.minimum_norm import (make_inverse_operator, write_inverse_operator,\n read_inverse_operator, apply_inverse)\nfrom mne.io._digitization import _write_dig_points, _make_dig_points, DigPoint\nfrom mne.transforms import Transform\nfrom mne.utils import catch_logging, assert_object_equal\n\nfiducials_fname = op.join(op.dirname(__file__), '..', '..', 'data',\n 'fsaverage', 'fsaverage-fiducials.fif')\nbase_dir = op.join(op.dirname(__file__), 'data')\nraw_fname = op.join(base_dir, 'test_raw.fif')\nchpi_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')\nevent_name = op.join(base_dir, 'test-eve.fif')\n\nkit_data_dir = op.join(op.dirname(__file__), '..', 'kit', 'tests', 'data')\nhsp_fname = op.join(kit_data_dir, 'test_hsp.txt')\nelp_fname = op.join(kit_data_dir, 'test_elp.txt')\n\ndata_path = testing.data_path(download=False)\nsss_path = op.join(data_path, 'SSS')\npre = op.join(sss_path, 'test_move_anon_')\nsss_ctc_fname = pre + 'crossTalk_raw_sss.fif'\nctf_fname = op.join(data_path, 'CTF', 'testdata_ctf.ds')\nraw_invalid_bday_fname = op.join(data_path, 'misc',\n 'sample_invalid_birthday_raw.fif')\n\n\[email protected]('kwargs, want', [\n (dict(meg=False, eeg=True), [0]),\n (dict(meg=False, fnirs=True), [5]),\n (dict(meg=False, fnirs='hbo'), [5]),\n (dict(meg=False, fnirs='hbr'), []),\n (dict(meg=False, misc=True), [1]),\n (dict(meg=True), [2, 3, 4]),\n (dict(meg='grad'), [2, 3]),\n (dict(meg='planar1'), [2]),\n (dict(meg='planar2'), [3]),\n (dict(meg='mag'), [4]),\n])\ndef test_create_info_grad(kwargs, want):\n \"\"\"Test create_info behavior with grad coils.\"\"\"\n info = create_info(6, 256, [\"eeg\", \"misc\", \"grad\", \"grad\", \"mag\", \"hbo\"])\n # Put these in an order such that grads get named \"2\" and \"3\", since\n # they get picked based first on coil_type then ch_name...\n assert [ch['ch_name'] for ch in info['chs']\n if ch['coil_type'] == FIFF.FIFFV_COIL_VV_PLANAR_T1] == ['2', '3']\n picks = pick_types(info, **kwargs)\n assert_array_equal(picks, want)\n\n\ndef test_get_valid_units():\n \"\"\"Test the valid units.\"\"\"\n valid_units = _get_valid_units()\n assert isinstance(valid_units, tuple)\n assert all(isinstance(unit, str) for unit in valid_units)\n assert \"n/a\" in valid_units\n\n\ndef test_coil_trans():\n \"\"\"Test loc<->coil_trans functions.\"\"\"\n rng = np.random.RandomState(0)\n x = rng.randn(4, 4)\n x[3] = [0, 0, 0, 1]\n assert_allclose(_loc_to_coil_trans(_coil_trans_to_loc(x)), x)\n x = rng.randn(12)\n assert_allclose(_coil_trans_to_loc(_loc_to_coil_trans(x)), x)\n\n\ndef test_make_info():\n \"\"\"Test some create_info properties.\"\"\"\n n_ch = np.longlong(1)\n info = create_info(n_ch, 1000., 'eeg')\n assert set(info.keys()) == set(RAW_INFO_FIELDS)\n\n coil_types = {ch['coil_type'] for ch in info['chs']}\n assert FIFF.FIFFV_COIL_EEG in coil_types\n\n pytest.raises(TypeError, create_info, ch_names='Test Ch', sfreq=1000)\n pytest.raises(ValueError, create_info, ch_names=['Test Ch'], sfreq=-1000)\n pytest.raises(ValueError, create_info, ch_names=['Test Ch'], sfreq=1000,\n ch_types=['eeg', 'eeg'])\n pytest.raises(TypeError, create_info, ch_names=[np.array([1])],\n sfreq=1000)\n pytest.raises(KeyError, create_info, ch_names=['Test Ch'], sfreq=1000,\n ch_types=np.array([1]))\n pytest.raises(KeyError, create_info, ch_names=['Test Ch'], sfreq=1000,\n ch_types='awesome')\n pytest.raises(TypeError, create_info, ['Test Ch'], sfreq=1000,\n montage=np.array([1]))\n m = make_standard_montage('biosemi32')\n info = create_info(ch_names=m.ch_names, sfreq=1000., ch_types='eeg')\n info.set_montage(m)\n ch_pos = [ch['loc'][:3] for ch in info['chs']]\n ch_pos_mon = m._get_ch_pos()\n ch_pos_mon = np.array(\n [ch_pos_mon[ch_name] for ch_name in info['ch_names']])\n # transform to head\n ch_pos_mon += (0., 0., 0.04014)\n assert_allclose(ch_pos, ch_pos_mon, atol=1e-5)\n\n\ndef test_duplicate_name_correction():\n \"\"\"Test duplicate channel names with running number.\"\"\"\n # When running number is possible\n info = create_info(['A', 'A', 'A'], 1000., verbose='error')\n assert info['ch_names'] == ['A-0', 'A-1', 'A-2']\n\n # When running number is not possible but alpha numeric is\n info = create_info(['A', 'A', 'A-0'], 1000., verbose='error')\n assert info['ch_names'] == ['A-a', 'A-1', 'A-0']\n\n # When a single addition is not sufficient\n with pytest.raises(ValueError, match='Adding a single alphanumeric'):\n ch_n = ['A', 'A']\n # add all options for first duplicate channel (0)\n ch_n.extend([f'{ch_n[0]}-{c}' for c in string.ascii_lowercase + '0'])\n create_info(ch_n, 1000., verbose='error')\n\n\ndef test_fiducials_io(tmp_path):\n \"\"\"Test fiducials i/o.\"\"\"\n pts, coord_frame = read_fiducials(fiducials_fname)\n assert pts[0]['coord_frame'] == FIFF.FIFFV_COORD_MRI\n assert pts[0]['ident'] == FIFF.FIFFV_POINT_CARDINAL\n\n temp_fname = tmp_path / 'test.fif'\n write_fiducials(temp_fname, pts, coord_frame)\n pts_1, coord_frame_1 = read_fiducials(temp_fname)\n assert coord_frame == coord_frame_1\n for pt, pt_1 in zip(pts, pts_1):\n assert pt['kind'] == pt_1['kind']\n assert pt['ident'] == pt_1['ident']\n assert pt['coord_frame'] == pt_1['coord_frame']\n assert_array_equal(pt['r'], pt_1['r'])\n assert isinstance(pt, DigPoint)\n assert isinstance(pt_1, DigPoint)\n\n # test safeguards\n pts[0]['coord_frame'] += 1\n pytest.raises(ValueError, write_fiducials, temp_fname, pts, coord_frame)\n\n\ndef test_info():\n \"\"\"Test info object.\"\"\"\n raw = read_raw_fif(raw_fname)\n event_id, tmin, tmax = 1, -0.2, 0.5\n events = read_events(event_name)\n event_id = int(events[0, 2])\n epochs = Epochs(raw, events[:1], event_id, tmin, tmax, picks=None)\n\n evoked = epochs.average()\n\n # Test subclassing was successful.\n info = Info(a=7, b='aaaaa')\n assert ('a' in info)\n assert ('b' in info)\n\n # Test info attribute in API objects\n for obj in [raw, epochs, evoked]:\n assert (isinstance(obj.info, Info))\n rep = repr(obj.info)\n assert '2002-12-03 19:01:10 UTC' in rep, rep\n assert '146 items (3 Cardinal, 4 HPI, 61 EEG, 78 Extra)' in rep\n dig_rep = repr(obj.info['dig'][0])\n assert 'LPA' in dig_rep, dig_rep\n assert '(-71.4, 0.0, 0.0) mm' in dig_rep, dig_rep\n assert 'head frame' in dig_rep, dig_rep\n # Test our BunchConstNamed support\n for func in (str, repr):\n assert '4 (FIFFV_COORD_HEAD)' == \\\n func(obj.info['dig'][0]['coord_frame'])\n\n # Test read-only fields\n info = raw.info.copy()\n nchan = len(info['chs'])\n ch_names = [ch['ch_name'] for ch in info['chs']]\n assert info['nchan'] == nchan\n assert list(info['ch_names']) == ch_names\n\n # Deleting of regular fields should work\n info['experimenter'] = 'bar'\n del info['experimenter']\n\n # Test updating of fields\n del info['chs'][-1]\n info._update_redundant()\n assert info['nchan'] == nchan - 1\n assert list(info['ch_names']) == ch_names[:-1]\n\n info['chs'][0]['ch_name'] = 'foo'\n info._update_redundant()\n assert info['ch_names'][0] == 'foo'\n\n # Test casting to and from a dict\n info_dict = dict(info)\n info2 = Info(info_dict)\n assert info == info2\n\n\ndef test_read_write_info(tmp_path):\n \"\"\"Test IO of info.\"\"\"\n info = read_info(raw_fname)\n temp_file = tmp_path / 'info.fif'\n # check for bug `#1198`\n info['dev_head_t']['trans'] = np.eye(4)\n t1 = info['dev_head_t']['trans']\n write_info(temp_file, info)\n info2 = read_info(temp_file)\n t2 = info2['dev_head_t']['trans']\n assert (len(info['chs']) == len(info2['chs']))\n assert_array_equal(t1, t2)\n # proc_history (e.g., GH#1875)\n creator = u'é'\n info = read_info(chpi_fname)\n info['proc_history'][0]['creator'] = creator\n info['hpi_meas'][0]['creator'] = creator\n info['subject_info']['his_id'] = creator\n info['subject_info']['weight'] = 11.1\n info['subject_info']['height'] = 2.3\n\n with info._unlock():\n if info['gantry_angle'] is None: # future testing data may include it\n info['gantry_angle'] = 0. # Elekta supine position\n gantry_angle = info['gantry_angle']\n\n meas_id = info['meas_id']\n write_info(temp_file, info)\n info = read_info(temp_file)\n assert info['proc_history'][0]['creator'] == creator\n assert info['hpi_meas'][0]['creator'] == creator\n assert info['subject_info']['his_id'] == creator\n assert info['gantry_angle'] == gantry_angle\n assert info['subject_info']['height'] == 2.3\n assert info['subject_info']['weight'] == 11.1\n for key in ['secs', 'usecs', 'version']:\n assert info['meas_id'][key] == meas_id[key]\n assert_array_equal(info['meas_id']['machid'], meas_id['machid'])\n\n # Test that writing twice produces the same file\n m1 = hashlib.md5()\n with open(temp_file, 'rb') as fid:\n m1.update(fid.read())\n m1 = m1.hexdigest()\n temp_file_2 = tmp_path / 'info2.fif'\n assert temp_file_2 != temp_file\n write_info(temp_file_2, info)\n m2 = hashlib.md5()\n with open(str(temp_file_2), 'rb') as fid:\n m2.update(fid.read())\n m2 = m2.hexdigest()\n assert m1 == m2\n\n info = read_info(raw_fname)\n with info._unlock():\n info['meas_date'] = None\n anonymize_info(info, verbose='error')\n assert info['meas_date'] is None\n tmp_fname_3 = tmp_path / 'info3.fif'\n write_info(tmp_fname_3, info)\n assert info['meas_date'] is None\n info2 = read_info(tmp_fname_3)\n assert info2['meas_date'] is None\n\n # Check that having a very old date in fine until you try to save it to fif\n with info._unlock(check_after=True):\n info['meas_date'] = datetime(1800, 1, 1, 0, 0, 0, tzinfo=timezone.utc)\n fname = tmp_path / 'test.fif'\n with pytest.raises(RuntimeError, match='must be between '):\n write_info(fname, info)\n\n\ndef test_io_dig_points(tmp_path):\n \"\"\"Test Writing for dig files.\"\"\"\n points = read_polhemus_fastscan(hsp_fname, on_header_missing='ignore')\n\n dest = tmp_path / 'test.txt'\n dest_bad = tmp_path / 'test.mne'\n with pytest.raises(ValueError, match='must be of shape'):\n _write_dig_points(dest, points[:, :2])\n with pytest.raises(ValueError, match='extension'):\n _write_dig_points(dest_bad, points)\n _write_dig_points(dest, points)\n points1 = read_polhemus_fastscan(\n dest, unit='m', on_header_missing='ignore')\n err = \"Dig points diverged after writing and reading.\"\n assert_array_equal(points, points1, err)\n\n points2 = np.array([[-106.93, 99.80], [99.80, 68.81]])\n np.savetxt(dest, points2, delimiter='\\t', newline='\\n')\n with pytest.raises(ValueError, match='must be of shape'):\n with pytest.warns(RuntimeWarning, match='FastSCAN header'):\n read_polhemus_fastscan(dest, on_header_missing='warn')\n\n\ndef test_io_coord_frame(tmp_path):\n \"\"\"Test round trip for coordinate frame.\"\"\"\n fname = tmp_path / 'test.fif'\n for ch_type in ('eeg', 'seeg', 'ecog', 'dbs', 'hbo', 'hbr'):\n info = create_info(\n ch_names=['Test Ch'], sfreq=1000., ch_types=[ch_type])\n info['chs'][0]['loc'][:3] = [0.05, 0.01, -0.03]\n write_info(fname, info)\n info2 = read_info(fname)\n assert info2['chs'][0]['coord_frame'] == FIFF.FIFFV_COORD_HEAD\n\n\ndef test_make_dig_points():\n \"\"\"Test application of Polhemus HSP to info.\"\"\"\n extra_points = read_polhemus_fastscan(\n hsp_fname, on_header_missing='ignore')\n info = create_info(ch_names=['Test Ch'], sfreq=1000.)\n assert info['dig'] is None\n\n with info._unlock():\n info['dig'] = _make_dig_points(extra_points=extra_points)\n assert (info['dig'])\n assert_allclose(info['dig'][0]['r'], [-.10693, .09980, .06881])\n\n elp_points = read_polhemus_fastscan(elp_fname, on_header_missing='ignore')\n nasion, lpa, rpa = elp_points[:3]\n info = create_info(ch_names=['Test Ch'], sfreq=1000.)\n assert info['dig'] is None\n\n with info._unlock():\n info['dig'] = _make_dig_points(nasion, lpa, rpa, elp_points[3:], None)\n assert (info['dig'])\n idx = [d['ident'] for d in info['dig']].index(FIFF.FIFFV_POINT_NASION)\n assert_allclose(info['dig'][idx]['r'], [.0013930, .0131613, -.0046967])\n pytest.raises(ValueError, _make_dig_points, nasion[:2])\n pytest.raises(ValueError, _make_dig_points, None, lpa[:2])\n pytest.raises(ValueError, _make_dig_points, None, None, rpa[:2])\n pytest.raises(ValueError, _make_dig_points, None, None, None,\n elp_points[:, :2])\n pytest.raises(ValueError, _make_dig_points, None, None, None, None,\n elp_points[:, :2])\n\n\ndef test_redundant():\n \"\"\"Test some of the redundant properties of info.\"\"\"\n # Indexing\n info = create_info(ch_names=['a', 'b', 'c'], sfreq=1000.)\n assert info['ch_names'][0] == 'a'\n assert info['ch_names'][1] == 'b'\n assert info['ch_names'][2] == 'c'\n\n # Equality\n assert info['ch_names'] == info['ch_names']\n assert info['ch_names'] == ['a', 'b', 'c']\n\n # No channels in info\n info = create_info(ch_names=[], sfreq=1000.)\n assert info['ch_names'] == []\n\n # List should be read-only\n info = create_info(ch_names=['a', 'b', 'c'], sfreq=1000.)\n\n\ndef test_merge_info():\n \"\"\"Test merging of multiple Info objects.\"\"\"\n info_a = create_info(ch_names=['a', 'b', 'c'], sfreq=1000.)\n info_b = create_info(ch_names=['d', 'e', 'f'], sfreq=1000.)\n info_merged = _merge_info([info_a, info_b])\n assert info_merged['nchan'], 6\n assert info_merged['ch_names'], ['a', 'b', 'c', 'd', 'e', 'f']\n pytest.raises(ValueError, _merge_info, [info_a, info_a])\n\n # Testing for force updates before merging\n info_c = create_info(ch_names=['g', 'h', 'i'], sfreq=500.)\n # This will break because sfreq is not equal\n pytest.raises(RuntimeError, _merge_info, [info_a, info_c])\n _force_update_info(info_a, info_c)\n assert (info_c['sfreq'] == info_a['sfreq'])\n assert (info_c['ch_names'][0] != info_a['ch_names'][0])\n # Make sure it works now\n _merge_info([info_a, info_c])\n # Check that you must supply Info\n pytest.raises(ValueError, _force_update_info, info_a,\n dict([('sfreq', 1000.)]))\n # KIT System-ID\n info_a._unlocked = info_b._unlocked = True\n info_a['kit_system_id'] = 50\n assert _merge_info((info_a, info_b))['kit_system_id'] == 50\n info_b['kit_system_id'] = 50\n assert _merge_info((info_a, info_b))['kit_system_id'] == 50\n info_b['kit_system_id'] = 60\n pytest.raises(ValueError, _merge_info, (info_a, info_b))\n\n # hpi infos\n info_d = create_info(ch_names=['d', 'e', 'f'], sfreq=1000.)\n info_merged = _merge_info([info_a, info_d])\n assert not info_merged['hpi_meas']\n assert not info_merged['hpi_results']\n info_a['hpi_meas'] = [{'f1': 3, 'f2': 4}]\n assert _merge_info([info_a, info_d])['hpi_meas'] == info_a['hpi_meas']\n info_d._unlocked = True\n info_d['hpi_meas'] = [{'f1': 3, 'f2': 4}]\n assert _merge_info([info_a, info_d])['hpi_meas'] == info_d['hpi_meas']\n # This will break because of inconsistency\n info_d['hpi_meas'] = [{'f1': 3, 'f2': 5}]\n pytest.raises(ValueError, _merge_info, [info_a, info_d])\n\n info_0 = read_info(raw_fname)\n info_0['bads'] = ['MEG 2443', 'EEG 053']\n assert len(info_0['chs']) == 376\n assert len(info_0['dig']) == 146\n info_1 = create_info([\"STI YYY\"], info_0['sfreq'], ['stim'])\n assert info_1['bads'] == []\n info_out = _merge_info([info_0, info_1], force_update_to_first=True)\n assert len(info_out['chs']) == 377\n assert len(info_out['bads']) == 2\n assert len(info_out['dig']) == 146\n assert len(info_0['chs']) == 376\n assert len(info_0['bads']) == 2\n assert len(info_0['dig']) == 146\n\n\ndef test_check_consistency():\n \"\"\"Test consistency check of Info objects.\"\"\"\n info = create_info(ch_names=['a', 'b', 'c'], sfreq=1000.)\n\n # This should pass\n info._check_consistency()\n\n # Info without any channels\n info_empty = create_info(ch_names=[], sfreq=1000.)\n info_empty._check_consistency()\n\n # Bad channels that are not in the info object\n info2 = info.copy()\n info2['bads'] = ['b', 'foo', 'bar']\n pytest.raises(RuntimeError, info2._check_consistency)\n\n # Bad data types\n info2 = info.copy()\n with info2._unlock():\n info2['sfreq'] = 'foo'\n pytest.raises(ValueError, info2._check_consistency)\n\n info2 = info.copy()\n with info2._unlock():\n info2['highpass'] = 'foo'\n pytest.raises(ValueError, info2._check_consistency)\n\n info2 = info.copy()\n with info2._unlock():\n info2['lowpass'] = 'foo'\n pytest.raises(ValueError, info2._check_consistency)\n\n # Silent type conversion to float\n info2 = info.copy()\n with info2._unlock(check_after=True):\n info2['sfreq'] = 1\n info2['highpass'] = 2\n info2['lowpass'] = 2\n assert (isinstance(info2['sfreq'], float))\n assert (isinstance(info2['highpass'], float))\n assert (isinstance(info2['lowpass'], float))\n\n # Duplicate channel names\n info2 = info.copy()\n with info2._unlock():\n info2['chs'][2]['ch_name'] = 'b'\n pytest.raises(RuntimeError, info2._check_consistency)\n\n # Duplicates appended with running numbers\n with pytest.warns(RuntimeWarning, match='Channel names are not'):\n info3 = create_info(ch_names=['a', 'b', 'b', 'c', 'b'], sfreq=1000.)\n assert_array_equal(info3['ch_names'], ['a', 'b-0', 'b-1', 'c', 'b-2'])\n\n # a few bad ones\n idx = 0\n ch = info['chs'][idx]\n for key, bad, match in (('ch_name', 1., 'not a string'),\n ('loc', np.zeros(15), '12 elements'),\n ('cal', np.ones(1), 'float or int')):\n info._check_consistency() # okay\n old = ch[key]\n ch[key] = bad\n if key == 'ch_name':\n info['ch_names'][idx] = bad\n with pytest.raises(TypeError, match=match):\n info._check_consistency()\n ch[key] = old\n if key == 'ch_name':\n info['ch_names'][idx] = old\n\n # bad channel entries\n info2 = info.copy()\n info2['chs'][0]['foo'] = 'bar'\n with pytest.raises(KeyError, match='key errantly present'):\n info2._check_consistency()\n info2 = info.copy()\n del info2['chs'][0]['loc']\n with pytest.raises(KeyError, match='key missing'):\n info2._check_consistency()\n\n\ndef _test_anonymize_info(base_info):\n \"\"\"Test that sensitive information can be anonymized.\"\"\"\n pytest.raises(TypeError, anonymize_info, 'foo')\n\n default_anon_dos = datetime(2000, 1, 1, 0, 0, 0, tzinfo=timezone.utc)\n default_str = \"mne_anonymize\"\n default_subject_id = 0\n default_desc = (\"Anonymized using a time shift\" +\n \" to preserve age at acquisition\")\n\n # Test no error for incomplete info\n info = base_info.copy()\n info.pop('file_id')\n anonymize_info(info)\n\n # Fake some subject data\n meas_date = datetime(2010, 1, 1, 0, 0, 0, tzinfo=timezone.utc)\n with base_info._unlock():\n base_info['meas_date'] = meas_date\n base_info['subject_info'] = dict(id=1,\n his_id='foobar',\n last_name='bar',\n first_name='bar',\n birthday=(1987, 4, 8),\n sex=0, hand=1)\n\n # generate expected info...\n # first expected result with no options.\n # will move DOS from 2010/1/1 to 2000/1/1 which is 3653 days.\n exp_info = base_info.copy()\n exp_info._unlocked = True\n exp_info['description'] = default_desc\n exp_info['experimenter'] = default_str\n exp_info['proj_name'] = default_str\n exp_info['proj_id'] = np.array([0])\n exp_info['subject_info']['first_name'] = default_str\n exp_info['subject_info']['last_name'] = default_str\n exp_info['subject_info']['id'] = default_subject_id\n exp_info['subject_info']['his_id'] = str(default_subject_id)\n exp_info['subject_info']['sex'] = 0\n del exp_info['subject_info']['hand'] # there's no \"unknown\" setting\n\n # this bday is 3653 days different. the change in day is due to a\n # different number of leap days between 1987 and 1977 than between\n # 2010 and 2000.\n exp_info['subject_info']['birthday'] = (1977, 4, 7)\n exp_info['meas_date'] = default_anon_dos\n exp_info._unlocked = False\n\n # make copies\n exp_info_3 = exp_info.copy()\n\n # adjust each expected outcome\n delta_t = timedelta(days=3653)\n for key in ('file_id', 'meas_id'):\n value = exp_info.get(key)\n if value is not None:\n assert 'msecs' not in value\n tmp = _add_timedelta_to_stamp(\n (value['secs'], value['usecs']), -delta_t)\n value['secs'] = tmp[0]\n value['usecs'] = tmp[1]\n value['machid'][:] = 0\n\n # exp 2 tests the keep_his option\n exp_info_2 = exp_info.copy()\n with exp_info_2._unlock():\n exp_info_2['subject_info']['his_id'] = 'foobar'\n exp_info_2['subject_info']['sex'] = 0\n exp_info_2['subject_info']['hand'] = 1\n\n # exp 3 tests is a supplied daysback\n delta_t_2 = timedelta(days=43)\n with exp_info_3._unlock():\n exp_info_3['subject_info']['birthday'] = (1987, 2, 24)\n exp_info_3['meas_date'] = meas_date - delta_t_2\n for key in ('file_id', 'meas_id'):\n value = exp_info_3.get(key)\n if value is not None:\n assert 'msecs' not in value\n tmp = _add_timedelta_to_stamp(\n (value['secs'], value['usecs']), -delta_t_2)\n value['secs'] = tmp[0]\n value['usecs'] = tmp[1]\n value['machid'][:] = 0\n\n # exp 4 tests is a supplied daysback\n delta_t_3 = timedelta(days=223 + 364 * 500)\n\n new_info = anonymize_info(base_info.copy())\n assert_object_equal(new_info, exp_info)\n\n new_info = anonymize_info(base_info.copy(), keep_his=True)\n assert_object_equal(new_info, exp_info_2)\n\n new_info = anonymize_info(base_info.copy(), daysback=delta_t_2.days)\n assert_object_equal(new_info, exp_info_3)\n\n with pytest.raises(RuntimeError, match='anonymize_info generated'):\n anonymize_info(base_info.copy(), daysback=delta_t_3.days)\n # assert_object_equal(new_info, exp_info_4)\n\n # test with meas_date = None\n with base_info._unlock():\n base_info['meas_date'] = None\n exp_info_3._unlocked = True\n exp_info_3['meas_date'] = None\n exp_info_3['file_id']['secs'] = DATE_NONE[0]\n exp_info_3['file_id']['usecs'] = DATE_NONE[1]\n exp_info_3['meas_id']['secs'] = DATE_NONE[0]\n exp_info_3['meas_id']['usecs'] = DATE_NONE[1]\n exp_info_3['subject_info'].pop('birthday', None)\n exp_info_3._unlocked = False\n\n if base_info['meas_date'] is None:\n with pytest.warns(RuntimeWarning, match='all information'):\n new_info = anonymize_info(base_info.copy(),\n daysback=delta_t_2.days)\n else:\n new_info = anonymize_info(base_info.copy(), daysback=delta_t_2.days)\n assert_object_equal(new_info, exp_info_3)\n\n with pytest.warns(None): # meas_date is None\n new_info = anonymize_info(base_info.copy())\n assert_object_equal(new_info, exp_info_3)\n\n\[email protected]('stamp, dt', [\n [(1346981585, 835782), (2012, 9, 7, 1, 33, 5, 835782)],\n # test old dates for BIDS anonymization\n [(-1533443343, 24382), (1921, 5, 29, 19, 30, 57, 24382)],\n # gh-7116\n [(-908196946, 988669), (1941, 3, 22, 11, 4, 14, 988669)],\n])\ndef test_meas_date_convert(stamp, dt):\n \"\"\"Test conversions of meas_date to datetime objects.\"\"\"\n meas_datetime = _stamp_to_dt(stamp)\n stamp2 = _dt_to_stamp(meas_datetime)\n assert stamp == stamp2\n assert meas_datetime == datetime(*dt, tzinfo=timezone.utc)\n # smoke test for info __repr__\n info = create_info(1, 1000., 'eeg')\n with info._unlock():\n info['meas_date'] = meas_datetime\n assert str(dt[0]) in repr(info)\n\n\ndef test_anonymize(tmp_path):\n \"\"\"Test that sensitive information can be anonymized.\"\"\"\n pytest.raises(TypeError, anonymize_info, 'foo')\n\n # Fake some subject data\n raw = read_raw_fif(raw_fname)\n raw.set_annotations(Annotations(onset=[0, 1],\n duration=[1, 1],\n description='dummy',\n orig_time=None))\n first_samp = raw.first_samp\n expected_onset = np.arange(2) + raw._first_time\n assert raw.first_samp == first_samp\n assert_allclose(raw.annotations.onset, expected_onset)\n\n # test mne.anonymize_info()\n events = read_events(event_name)\n epochs = Epochs(raw, events[:1], 2, 0., 0.1, baseline=None)\n _test_anonymize_info(raw.info.copy())\n _test_anonymize_info(epochs.info.copy())\n\n # test instance methods & I/O roundtrip\n for inst, keep_his in zip((raw, epochs), (True, False)):\n inst = inst.copy()\n\n subject_info = dict(his_id='Volunteer', sex=2, hand=1)\n inst.info['subject_info'] = subject_info\n inst.anonymize(keep_his=keep_his)\n\n si = inst.info['subject_info']\n if keep_his:\n assert si == subject_info\n else:\n assert si['his_id'] == '0'\n assert si['sex'] == 0\n assert 'hand' not in si\n\n # write to disk & read back\n inst_type = 'raw' if isinstance(inst, BaseRaw) else 'epo'\n fname = 'tmp_raw.fif' if inst_type == 'raw' else 'tmp_epo.fif'\n out_path = tmp_path / fname\n inst.save(out_path, overwrite=True)\n if inst_type == 'raw':\n read_raw_fif(out_path)\n else:\n read_epochs(out_path)\n\n # test that annotations are correctly zeroed\n raw.anonymize()\n assert raw.first_samp == first_samp\n assert_allclose(raw.annotations.onset, expected_onset)\n assert raw.annotations.orig_time == raw.info['meas_date']\n stamp = _dt_to_stamp(raw.info['meas_date'])\n assert raw.annotations.orig_time == _stamp_to_dt(stamp)\n\n with raw.info._unlock():\n raw.info['meas_date'] = None\n raw.anonymize(daysback=None)\n with pytest.warns(RuntimeWarning, match='None'):\n raw.anonymize(daysback=123)\n assert raw.annotations.orig_time is None\n assert raw.first_samp == first_samp\n assert_allclose(raw.annotations.onset, expected_onset)\n\n\ndef test_anonymize_with_io(tmp_path):\n \"\"\"Test that IO does not break anonymization.\"\"\"\n raw = read_raw_fif(raw_fname)\n\n temp_path = tmp_path / 'tmp_raw.fif'\n raw.save(temp_path)\n\n raw2 = read_raw_fif(temp_path)\n\n daysback = (raw2.info['meas_date'].date() - date(1924, 1, 1)).days\n raw2.anonymize(daysback=daysback)\n\n\[email protected]_testing_data\ndef test_csr_csc(tmp_path):\n \"\"\"Test CSR and CSC.\"\"\"\n info = read_info(sss_ctc_fname)\n info = pick_info(info, pick_types(info, meg=True, exclude=[]))\n sss_ctc = info['proc_history'][0]['max_info']['sss_ctc']\n ct = sss_ctc['decoupler'].copy()\n # CSC\n assert isinstance(ct, sparse.csc_matrix)\n fname = tmp_path / 'test.fif'\n write_info(fname, info)\n info_read = read_info(fname)\n ct_read = info_read['proc_history'][0]['max_info']['sss_ctc']['decoupler']\n assert isinstance(ct_read, sparse.csc_matrix)\n assert_array_equal(ct_read.toarray(), ct.toarray())\n # Now CSR\n csr = ct.tocsr()\n assert isinstance(csr, sparse.csr_matrix)\n assert_array_equal(csr.toarray(), ct.toarray())\n info['proc_history'][0]['max_info']['sss_ctc']['decoupler'] = csr\n fname = tmp_path / 'test1.fif'\n write_info(fname, info)\n info_read = read_info(fname)\n ct_read = info_read['proc_history'][0]['max_info']['sss_ctc']['decoupler']\n assert isinstance(ct_read, sparse.csc_matrix) # this gets cast to CSC\n assert_array_equal(ct_read.toarray(), ct.toarray())\n\n\[email protected]_testing_data\ndef test_check_compensation_consistency():\n \"\"\"Test check picks compensation.\"\"\"\n raw = read_raw_ctf(ctf_fname, preload=False)\n events = make_fixed_length_events(raw, 99999)\n picks = pick_types(raw.info, meg=True, exclude=[], ref_meg=True)\n pick_ch_names = [raw.info['ch_names'][idx] for idx in picks]\n for (comp, expected_result) in zip([0, 1], [False, False]):\n raw.apply_gradient_compensation(comp)\n ret, missing = _bad_chans_comp(raw.info, pick_ch_names)\n assert ret == expected_result\n assert len(missing) == 0\n Epochs(raw, events, None, -0.2, 0.2, preload=False, picks=picks)\n\n picks = pick_types(raw.info, meg=True, exclude=[], ref_meg=False)\n pick_ch_names = [raw.info['ch_names'][idx] for idx in picks]\n\n for (comp, expected_result) in zip([0, 1], [False, True]):\n raw.apply_gradient_compensation(comp)\n ret, missing = _bad_chans_comp(raw.info, pick_ch_names)\n assert ret == expected_result\n assert len(missing) == 17\n with catch_logging() as log:\n Epochs(raw, events, None, -0.2, 0.2, preload=False,\n picks=picks, verbose=True)\n assert'Removing 5 compensators' in log.getvalue()\n\n\ndef test_field_round_trip(tmp_path):\n \"\"\"Test round-trip for new fields.\"\"\"\n info = create_info(1, 1000., 'eeg')\n with info._unlock():\n for key in ('file_id', 'meas_id'):\n info[key] = _generate_meas_id()\n info['device_info'] = dict(\n type='a', model='b', serial='c', site='d')\n info['helium_info'] = dict(\n he_level_raw=1., helium_level=2.,\n orig_file_guid='e', meas_date=(1, 2))\n fname = tmp_path / 'temp-info.fif'\n write_info(fname, info)\n info_read = read_info(fname)\n assert_object_equal(info, info_read)\n\n\ndef test_equalize_channels():\n \"\"\"Test equalization of channels for instances of Info.\"\"\"\n info1 = create_info(['CH1', 'CH2', 'CH3'], sfreq=1.)\n info2 = create_info(['CH4', 'CH2', 'CH1'], sfreq=1.)\n info1, info2 = equalize_channels([info1, info2])\n\n assert info1.ch_names == ['CH1', 'CH2']\n assert info2.ch_names == ['CH1', 'CH2']\n\n\ndef test_repr():\n \"\"\"Test Info repr.\"\"\"\n info = create_info(1, 1000, 'eeg')\n assert '7 non-empty values' in repr(info)\n\n t = Transform('meg', 'head', np.ones((4, 4)))\n info['dev_head_t'] = t\n assert 'dev_head_t: MEG device -> head transform' in repr(info)\n\n\ndef test_repr_html():\n \"\"\"Test Info HTML repr.\"\"\"\n info = read_info(raw_fname)\n assert 'Projections' in info._repr_html_()\n with info._unlock():\n info['projs'] = []\n assert 'Projections' not in info._repr_html_()\n info['bads'] = []\n assert 'None' in info._repr_html_()\n info['bads'] = ['MEG 2443', 'EEG 053']\n assert 'MEG 2443' in info._repr_html_()\n assert 'EEG 053' in info._repr_html_()\n\n html = info._repr_html_()\n for ch in ['204 Gradiometers', '102 Magnetometers', '9 Stimulus',\n '60 EEG', '1 EOG']:\n assert ch in html\n\n\[email protected]_testing_data\ndef test_invalid_subject_birthday():\n \"\"\"Test handling of an invalid birthday in the raw file.\"\"\"\n with pytest.warns(RuntimeWarning, match='No birthday will be set'):\n raw = read_raw_fif(raw_invalid_bday_fname)\n assert 'birthday' not in raw.info['subject_info']\n\n\[email protected]('fname', [\n pytest.param(ctf_fname, marks=testing._pytest_mark()),\n raw_fname,\n])\ndef test_channel_name_limit(tmp_path, monkeypatch, fname):\n \"\"\"Test that our remapping works properly.\"\"\"\n #\n # raw\n #\n if fname.endswith('fif'):\n raw = read_raw_fif(fname)\n raw.pick_channels(raw.ch_names[:3])\n ref_names = []\n data_names = raw.ch_names\n else:\n assert fname.endswith('.ds')\n raw = read_raw_ctf(fname)\n ref_names = [raw.ch_names[pick]\n for pick in pick_types(raw.info, meg=False, ref_meg=True)]\n data_names = raw.ch_names[32:35]\n proj = dict(data=np.ones((1, len(data_names))),\n col_names=data_names[:2].copy(), row_names=None, nrow=1)\n proj = Projection(\n data=proj, active=False, desc='test', kind=0, explained_var=0.)\n raw.add_proj(proj, remove_existing=True)\n raw.info.normalize_proj()\n raw.pick_channels(data_names + ref_names).crop(0, 2)\n long_names = ['123456789abcdefg' + name for name in raw.ch_names]\n fname = tmp_path / 'test-raw.fif'\n with catch_logging() as log:\n raw.save(fname)\n log = log.getvalue()\n assert 'truncated' not in log\n rename = dict(zip(raw.ch_names, long_names))\n long_data_names = [rename[name] for name in data_names]\n long_proj_names = long_data_names[:2]\n raw.rename_channels(rename)\n for comp in raw.info['comps']:\n for key in ('row_names', 'col_names'):\n for name in comp['data'][key]:\n assert name in raw.ch_names\n if raw.info['comps']:\n assert raw.compensation_grade == 0\n raw.apply_gradient_compensation(3)\n assert raw.compensation_grade == 3\n assert len(raw.info['projs']) == 1\n assert raw.info['projs'][0]['data']['col_names'] == long_proj_names\n raw.info['bads'] = bads = long_data_names[2:3]\n good_long_data_names = [\n name for name in long_data_names if name not in bads]\n with catch_logging() as log:\n raw.save(fname, overwrite=True, verbose=True)\n log = log.getvalue()\n assert 'truncated to 15' in log\n for name in raw.ch_names:\n assert len(name) > 15\n # first read the full way\n with catch_logging() as log:\n raw_read = read_raw_fif(fname, verbose=True)\n log = log.getvalue()\n assert 'Reading extended channel information' in log\n for ra in (raw, raw_read):\n assert ra.ch_names == long_names\n assert raw_read.info['projs'][0]['data']['col_names'] == long_proj_names\n del raw_read\n # next read as if no longer names could be read\n monkeypatch.setattr(\n meas_info, '_read_extended_ch_info', lambda x, y, z: None)\n with catch_logging() as log:\n raw_read = read_raw_fif(fname, verbose=True)\n log = log.getvalue()\n assert 'extended' not in log\n if raw.info['comps']:\n assert raw_read.compensation_grade == 3\n raw_read.apply_gradient_compensation(0)\n assert raw_read.compensation_grade == 0\n monkeypatch.setattr( # restore\n meas_info, '_read_extended_ch_info', _read_extended_ch_info)\n short_proj_names = [\n f'{name[:13 - bool(len(ref_names))]}-{len(ref_names) + ni}'\n for ni, name in enumerate(long_data_names[:2])]\n assert raw_read.info['projs'][0]['data']['col_names'] == short_proj_names\n #\n # epochs\n #\n epochs = Epochs(raw, make_fixed_length_events(raw))\n fname = tmp_path / 'test-epo.fif'\n epochs.save(fname)\n epochs_read = read_epochs(fname)\n for ep in (epochs, epochs_read):\n assert ep.info['ch_names'] == long_names\n assert ep.ch_names == long_names\n del raw, epochs_read\n # cov\n epochs.info['bads'] = []\n cov = compute_covariance(epochs, verbose='error')\n fname = tmp_path / 'test-cov.fif'\n write_cov(fname, cov)\n cov_read = read_cov(fname)\n for co in (cov, cov_read):\n assert co['names'] == long_data_names\n assert co['bads'] == []\n del cov_read\n\n #\n # evoked\n #\n evoked = epochs.average()\n evoked.info['bads'] = bads\n assert evoked.nave == 1\n fname = tmp_path / 'test-ave.fif'\n evoked.save(fname)\n evoked_read = read_evokeds(fname)[0]\n for ev in (evoked, evoked_read):\n assert ev.ch_names == long_names\n assert ev.info['bads'] == bads\n del evoked_read, epochs\n\n #\n # forward\n #\n with pytest.warns(None): # not enough points for CTF\n sphere = make_sphere_model('auto', 'auto', evoked.info)\n src = setup_volume_source_space(\n pos=dict(rr=[[0, 0, 0.04]], nn=[[0, 1., 0.]]))\n fwd = make_forward_solution(evoked.info, None, src, sphere)\n fname = tmp_path / 'temp-fwd.fif'\n write_forward_solution(fname, fwd)\n fwd_read = read_forward_solution(fname)\n for fw in (fwd, fwd_read):\n assert fw['sol']['row_names'] == long_data_names\n assert fw['info']['ch_names'] == long_data_names\n assert fw['info']['bads'] == bads\n del fwd_read\n\n #\n # inv\n #\n inv = make_inverse_operator(evoked.info, fwd, cov)\n fname = tmp_path / 'test-inv.fif'\n write_inverse_operator(fname, inv)\n inv_read = read_inverse_operator(fname)\n for iv in (inv, inv_read):\n assert iv['info']['ch_names'] == good_long_data_names\n apply_inverse(evoked, inv) # smoke test\n\n\[email protected]('fname_info', (raw_fname, 'create_info'))\[email protected]('unlocked', (True, False))\ndef test_pickle(fname_info, unlocked):\n \"\"\"Test that Info can be (un)pickled.\"\"\"\n if fname_info == 'create_info':\n info = create_info(3, 1000., 'eeg')\n else:\n info = read_info(fname_info)\n assert not info._unlocked\n info._unlocked = unlocked\n data = pickle.dumps(info)\n info_un = pickle.loads(data)\n assert isinstance(info_un, Info)\n assert_object_equal(info, info_un)\n assert info_un._unlocked == unlocked\n\n\ndef test_info_bad():\n \"\"\"Test our info sanity checkers.\"\"\"\n info = create_info(2, 1000., 'eeg')\n info['description'] = 'foo'\n info['experimenter'] = 'bar'\n info['line_freq'] = 50.\n info['bads'] = info['ch_names'][:1]\n info['temp'] = ('whatever', 1.)\n # After 0.24 these should be pytest.raises calls\n check, klass = pytest.raises, RuntimeError\n with check(klass, match=r\"info\\['temp'\\]\"):\n info['bad_key'] = 1.\n for (key, match) in ([\n ('sfreq', r'inst\\.resample'),\n ('chs', r'inst\\.add_channels')]):\n with check(klass, match=match):\n info[key] = info[key]\n with pytest.raises(ValueError, match='between meg<->head'):\n info['dev_head_t'] = Transform('mri', 'head', np.eye(4))\n\n\ndef test_info_pick_channels():\n \"\"\"Test that info.pick_channels emits a deprecation warning.\"\"\"\n info = create_info(2, 1000., 'eeg')\n with pytest.deprecated_call(match='use inst.pick_channels instead.'):\n info.pick_channels(['0'])\n"
] | [
[
"numpy.arange",
"numpy.eye",
"numpy.ones",
"numpy.testing.assert_array_equal",
"numpy.longlong",
"numpy.testing.assert_allclose",
"numpy.savetxt",
"numpy.array",
"numpy.zeros",
"numpy.random.RandomState"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mpharrigan/reference-qvm | [
"ed427b0a9f279a22a8d6e2f5c4979cc61e97b5b2",
"ed427b0a9f279a22a8d6e2f5c4979cc61e97b5b2"
] | [
"referenceqvm/tests/test_unitary_generator.py",
"referenceqvm/unitary_generator.py"
] | [
"import pytest\nimport warnings\nimport numpy as np\nfrom referenceqvm.unitary_generator import (lifted_gate, apply_gate,\n tensor_gates, tensor_up)\nfrom referenceqvm.gates import gate_matrix, utility_gates\nfrom pyquil.quil import Program\nfrom pyquil.gates import H as Hgate\nfrom pyquil.gates import RX as RXgate\nfrom pyquil.gates import CNOT as CNOTgate\nfrom pyquil.paulis import PauliTerm, PauliSum\n\n\ndef test_lifted_swap():\n # SWAP indexed at 0\n test_matrix = lifted_gate(0, gate_matrix['SWAP'], 2).toarray()\n result = gate_matrix['SWAP']\n assert np.allclose(test_matrix, result)\n\n test_matrix = lifted_gate(0, gate_matrix[\"SWAP\"], 3).toarray()\n result = np.kron(np.eye(2**1), gate_matrix['SWAP'])\n assert np.allclose(test_matrix, result)\n\n test_matrix = lifted_gate(0, gate_matrix[\"SWAP\"], 4).toarray()\n result = np.kron(np.eye(2**2), gate_matrix['SWAP'])\n assert np.allclose(test_matrix, result)\n\n # SWAP indexed at max num_qubits\n test_matrix = lifted_gate(1, gate_matrix[\"SWAP\"], 3).toarray()\n result = np.kron(gate_matrix['SWAP'], np.eye(2))\n assert np.allclose(test_matrix, result)\n\n # SWAP indexed outside of the range throws error\n with pytest.raises(ValueError):\n lifted_gate(2, gate_matrix['SWAP'], 3)\n with pytest.raises(ValueError):\n lifted_gate(3, gate_matrix['SWAP'], 3)\n with pytest.raises(ValueError):\n lifted_gate(-1, gate_matrix['SWAP'], 3)\n with pytest.raises(ValueError):\n lifted_gate(3, gate_matrix['SWAP'], 4)\n\n test_matrix = lifted_gate(1, gate_matrix['SWAP'], 4).toarray()\n result = np.kron(np.eye(2**1), np.kron(gate_matrix['SWAP'], np.eye(2**1)))\n assert np.allclose(test_matrix, result)\n\n test_matrix = lifted_gate(2, gate_matrix['SWAP'], 4).toarray()\n result = np.kron(np.eye(2**0), np.kron(gate_matrix['SWAP'], np.eye(2**2)))\n assert np.allclose(test_matrix, result)\n\n test_matrix = lifted_gate(8, gate_matrix['SWAP'], 10).toarray()\n result = np.kron(np.eye(2**0), np.kron(gate_matrix['SWAP'], np.eye(2**8)))\n assert np.allclose(test_matrix, result)\n\n\ndef test_two_qubit_gates():\n unitary_test = apply_gate(gate_matrix['CNOT'], [1, 0], 2).toarray()\n unitary_true = np.kron(utility_gates['P0'], np.eye(2)) + \\\n np.kron(utility_gates['P1'], gate_matrix['X'])\n assert np.allclose(unitary_test, unitary_true)\n\n unitary_test = apply_gate(gate_matrix['CNOT'], [0, 1], 2).toarray()\n unitary_true = np.kron(np.eye(2), utility_gates['P0']) + \\\n np.kron(gate_matrix['X'], utility_gates['P1'])\n assert np.allclose(unitary_test, unitary_true)\n\n unitary_test = apply_gate(gate_matrix['CNOT'], [2, 1], 3).toarray()\n unitary_true = np.kron(gate_matrix['CNOT'], np.eye(2 ** 1))\n assert np.allclose(unitary_test, unitary_true)\n\n with pytest.raises(ValueError):\n apply_gate(gate_matrix['CNOT'], [2, 1], 2)\n\n unitary_test = apply_gate(gate_matrix['ISWAP'], [0, 1], 3).toarray()\n unitary_true = np.kron(np.eye(2), gate_matrix['ISWAP'])\n assert np.allclose(unitary_test, unitary_true)\n\n unitary_test = apply_gate(gate_matrix['ISWAP'], [1, 0], 3).toarray()\n unitary_true = np.kron(np.eye(2), gate_matrix['ISWAP'])\n assert np.allclose(unitary_test, unitary_true)\n\n unitary_test = apply_gate(gate_matrix['ISWAP'], [1, 2], 4).toarray()\n unitary_true = np.kron(np.eye(2), np.kron(gate_matrix['ISWAP'], np.eye(2)))\n assert np.allclose(unitary_test, unitary_true)\n\n unitary_test = apply_gate(gate_matrix['ISWAP'], [3, 2], 4).toarray()\n unitary_true = np.kron(gate_matrix['ISWAP'], np.eye(4))\n assert np.allclose(unitary_test, unitary_true)\n\n unitary_test = apply_gate(gate_matrix['ISWAP'], [2, 3], 4).toarray()\n unitary_true = np.kron(gate_matrix['ISWAP'], np.eye(4))\n assert np.allclose(unitary_test, unitary_true)\n\n unitary_test = apply_gate(gate_matrix['ISWAP'], [0, 3], 4).toarray()\n swap_01 = np.kron(np.eye(4), gate_matrix['SWAP'])\n swap_12 = np.kron(np.eye(2), np.kron(gate_matrix['SWAP'], np.eye(2)))\n swapper = swap_12.dot(swap_01)\n V = np.kron(gate_matrix['ISWAP'], np.eye(4))\n unitary_true = np.dot(np.conj(swapper.T), np.dot(V, swapper))\n assert np.allclose(unitary_test, unitary_true)\n\n unitary_test = apply_gate(gate_matrix['ISWAP'], [3, 0], 4).toarray()\n swap_01 = np.kron(np.eye(4), gate_matrix['SWAP'])\n swap_12 = np.kron(np.eye(2), np.kron(gate_matrix['SWAP'], np.eye(2)))\n swapper = swap_12.dot(swap_01)\n V = np.kron(gate_matrix['ISWAP'], np.eye(4))\n unitary_true = np.dot(np.conj(swapper.T), np.dot(V, swapper))\n assert np.allclose(unitary_test, unitary_true)\n\n unitary_test = apply_gate(gate_matrix['ISWAP'], [1, 3], 4).toarray()\n swap_12 = np.kron(np.eye(2), np.kron(gate_matrix['SWAP'], np.eye(2)))\n swapper = swap_12\n V = np.kron(gate_matrix['ISWAP'], np.eye(4))\n unitary_true = np.dot(np.conj(swapper.T), np.dot(V, swapper))\n assert np.allclose(unitary_test, unitary_true)\n\n unitary_test = apply_gate(gate_matrix['ISWAP'], [3, 1], 4).toarray()\n swap_12 = np.kron(np.eye(2), np.kron(gate_matrix['SWAP'], np.eye(2)))\n swapper = swap_12\n V = np.kron(gate_matrix['ISWAP'], np.eye(4))\n unitary_true = np.dot(np.conj(swapper.T), np.dot(V, swapper))\n assert np.allclose(unitary_test, unitary_true)\n\n unitary_test = apply_gate(gate_matrix['CNOT'], [3, 1], 4).toarray()\n swap_12 = np.kron(np.eye(2), np.kron(gate_matrix['SWAP'], np.eye(2)))\n swapper = swap_12\n V = np.kron(gate_matrix['CNOT'], np.eye(4))\n unitary_true = np.dot(np.conj(swapper.T), np.dot(V, swapper))\n assert np.allclose(unitary_test, unitary_true)\n\n unitary_test = apply_gate(gate_matrix['SWAP'], [3, 1], 4).toarray()\n swap_12 = np.kron(np.eye(2), np.kron(gate_matrix['SWAP'], np.eye(2)))\n swapper = swap_12\n V = np.kron(gate_matrix['SWAP'], np.eye(4))\n unitary_true = np.dot(np.conj(swapper.T), np.dot(V, swapper))\n assert np.allclose(unitary_test, unitary_true)\n\n\ndef test_single_qubit_gates():\n test_unitary = apply_gate(gate_matrix['H'], 0, 4).toarray()\n true_unitary = np.kron(np.eye(8), gate_matrix['H'])\n assert np.allclose(test_unitary, true_unitary)\n\n test_unitary = apply_gate(gate_matrix['H'], 1, 4).toarray()\n true_unitary = np.kron(np.eye(4), np.kron(gate_matrix['H'], np.eye(2)))\n assert np.allclose(test_unitary, true_unitary)\n\n test_unitary = apply_gate(gate_matrix['H'], 2, 4).toarray()\n true_unitary = np.kron(np.eye(2), np.kron(gate_matrix['H'], np.eye(4)))\n assert np.allclose(test_unitary, true_unitary)\n\n test_unitary = apply_gate(gate_matrix['H'], 3, 4).toarray()\n true_unitary = np.kron(gate_matrix['H'], np.eye(8))\n assert np.allclose(test_unitary, true_unitary)\n\n test_unitary = apply_gate(gate_matrix['H'], 0, 5).toarray()\n true_unitary = np.kron(np.eye(2**4), gate_matrix['H'])\n assert np.allclose(test_unitary, true_unitary)\n\n test_unitary = apply_gate(gate_matrix['H'], 1, 5).toarray()\n true_unitary = np.kron(np.eye(2**3), np.kron(gate_matrix['H'], np.eye(2)))\n assert np.allclose(test_unitary, true_unitary)\n\n test_unitary = apply_gate(gate_matrix['H'], 2, 5).toarray()\n true_unitary = np.kron(np.eye(2**2), np.kron(gate_matrix['H'], np.eye(2**2)))\n assert np.allclose(test_unitary, true_unitary)\n\n test_unitary = apply_gate(gate_matrix['H'], 3, 5).toarray()\n true_unitary = np.kron(np.eye(2**1), np.kron(gate_matrix['H'], np.eye(2**3)))\n assert np.allclose(test_unitary, true_unitary)\n\n test_unitary = apply_gate(gate_matrix['H'], 4, 5).toarray()\n true_unitary = np.kron(np.eye(2**0), np.kron(gate_matrix['H'], np.eye(2**4)))\n assert np.allclose(test_unitary, true_unitary)\n\n\ndef test_tensor_gates_single_qubit():\n prog = Program().inst([Hgate(0)])\n test_unitary = tensor_gates(gate_matrix, {}, prog.instructions[0], 1).toarray()\n true_unitary = gate_matrix['H']\n assert np.allclose(test_unitary, true_unitary)\n\n prog = Program().inst([Hgate(0)])\n test_unitary = tensor_gates(gate_matrix, {}, prog.instructions[0], 5).toarray()\n true_unitary = np.kron(np.eye(2**4), gate_matrix['H'])\n assert np.allclose(test_unitary, true_unitary)\n\n prog = Program().inst([RXgate(0.2)(3)])\n test_unitary = tensor_gates(gate_matrix, {}, prog.instructions[0], 5).toarray()\n true_unitary = np.kron(np.eye(2**1), np.kron(gate_matrix['RX'](0.2), np.eye(2**3)))\n assert np.allclose(test_unitary, true_unitary)\n\n prog = Program().inst([RXgate(0.5)(4)])\n test_unitary = tensor_gates(gate_matrix, {}, prog.instructions[0], 5).toarray()\n true_unitary = np.kron(np.eye(2**0), np.kron(gate_matrix['RX'](0.5), np.eye(2**4)))\n assert np.allclose(test_unitary, true_unitary)\n\n\ndef test_tensor_gates_two_qubit():\n prog = Program().inst([CNOTgate(0, 1)])\n test_unitary = tensor_gates(gate_matrix, {}, prog.instructions[0], 4).toarray()\n true_unitary = apply_gate(gate_matrix['CNOT'], [0, 1], 4).toarray()\n assert np.allclose(test_unitary, true_unitary)\n\n prog = Program().inst([CNOTgate(1, 0)])\n test_unitary = tensor_gates(gate_matrix, {}, prog.instructions[0], 4).toarray()\n true_unitary = apply_gate(gate_matrix['CNOT'], [1, 0], 4).toarray()\n assert np.allclose(test_unitary, true_unitary)\n\n prog = Program().inst([CNOTgate(1, 3)])\n test_unitary = tensor_gates(gate_matrix, {}, prog.instructions[0], 4).toarray()\n true_unitary = apply_gate(gate_matrix['CNOT'], [1, 3], 4).toarray()\n assert np.allclose(test_unitary, true_unitary)\n\n\ndef test_tensor_up_error_catch():\n \"\"\"Testing tensor up type checking\"\"\"\n x_term = PauliTerm(\"X\", 5)\n\n # testing type rejection\n with pytest.raises(TypeError):\n tensor_up(x_term, 5)\n\n # testing index rejection\n with pytest.raises(IndexError):\n tensor_up(PauliSum([x_term]), 3)\n\n\ndef test_tensor_up_correctness():\n \"\"\"Check the correctness of the tensor up routine\"\"\"\n xy_term = PauliSum([PauliTerm(\"X\", 0)*PauliTerm(\"Y\", 1)])\n\n # test correctness\n trial_matrix = tensor_up(xy_term, 2)\n true_matrix = np.kron(gate_matrix['Y'], gate_matrix['X'])\n np.testing.assert_allclose(trial_matrix, true_matrix)\n\n x1_term = PauliSum([PauliTerm(\"X\", 1)])\n trial_matrix = tensor_up(x1_term, 2)\n true_matrix = np.kron(gate_matrix['X'], gate_matrix['I'])\n np.testing.assert_allclose(trial_matrix, true_matrix)\n\n zpz_term = PauliTerm(\"Z\", 0) + PauliTerm(\"Z\", 1)\n trial_matrix = tensor_up(zpz_term, 2)\n true_matrix = np.zeros((4, 4))\n true_matrix[0, 0] = 2\n true_matrix[-1, -1] = -2\n np.testing.assert_allclose(trial_matrix, true_matrix)\n",
"#!/usr/bin/python\n##############################################################################\n# Copyright 2016-2017 Rigetti Computing\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##############################################################################\n\"\"\"\nUtility functions for generating gates for evolving states on the full Hilbert\nspace for qubits.\n\nNote: uses SciPy sparse diagonal (DIA) representation to increase space and\ntimeefficiency.\n\"\"\"\nimport numpy as np\nfrom collections import Sequence\nfrom numbers import Integral\n\nimport scipy.sparse as sps\nfrom pyquil.quilbase import *\nfrom pyquil.paulis import PauliSum\n\nfrom referenceqvm.gates import gate_matrix\n\n\"\"\"\nIf True, only physically-implementable operations allowed!\ni.e. local SWAPS only (topology of QPU is periodic with nearest-neighbor gate\noperations allowed, and a qubit architecture may be input as needed)\n\nFor now, implicitly assumes a linear chain of qubit connectivity, for ease &\nguaranteed termination in swap algorithm. Arbitrary SWAP operations to be\nimplemented in a future release.\n\"\"\"\ntopological_QPU = False\n\n\ndef lifted_gate(i, matrix, num_qubits):\n \"\"\"\n Lifts input k-qubit gate on adjacent qubits starting from qubit i\n to complete Hilbert space of dimension 2 ** num_qubits.\n\n Ex: 1-qubit gate, lifts from qubit i\n Ex: 2-qubit gate, lifts from qubits (i+1, i)\n Ex: 3-qubit gate, lifts from qubits (i+2, i+1, i), operating in that order\n\n In general, this takes a k-qubit gate (2D matrix 2^k x 2^k) and lifts\n it to the complete Hilbert space of dim 2^num_qubits, as defined by\n the rightward tensor product (1) in arXiv:1608.03355.\n\n Note that while the qubits are addressed in decreasing order,\n starting with num_qubit - 1 on the left and ending with qubit 0 on the\n right (in a little-endian fashion), gates are still lifted to apply\n on qubits in increasing index (right-to-left) order.\n\n :param int i: starting qubit to lift matrix from (incr. index order)\n :param np.array matrix: the matrix to be lifted\n :param int num_qubits: number of overall qubits present in space\n\n :return: matrix representation of operator acting on the\n complete Hilbert space of all num_qubits.\n :rtype: sparse_array\n \"\"\"\n # input is checked in parent function apply_gate()\n # Find gate size (number of qubits operated on)\n if (matrix.shape[0] & matrix.shape[0] - 1) != 0:\n raise TypeError(\"Invalid gate size. Must be power of 2! \"\n \"Received {} size\".format(matrix.shape))\n else:\n gate_size = np.log2(matrix.shape[0])\n # Is starting gate index out of range?\n if not (0 <= i < num_qubits + 1 - gate_size):\n raise ValueError(\"Gate index out of range!\")\n\n # Outer-product to lift gate to complete Hilbert space\n # bottom: i qubits below target\n bottom_matrix = sps.eye(2 ** i).astype(np.complex128)\n # top: Nq - i (bottom) - gate_size (gate) qubits above target\n top_qubits = num_qubits - i - gate_size\n top_matrix = sps.eye(2 ** top_qubits).astype(np.complex128)\n return sps.kron(top_matrix, sps.kron(matrix, bottom_matrix))\n\n\ndef swap_inds_helper(i, j, arr):\n \"\"\"\n Swaps indices in array, in-place.\n\n :param int i: index 1\n :param int j: index 2\n :param array-like arr: {list, np.array} array to be modified in-place\n \"\"\"\n tmp = arr[i]\n arr[i] = arr[j]\n arr[j] = tmp\n\n\ndef two_swap_helper(j, k, num_qubits, qubit_map):\n \"\"\"\n Generate the permutation matrix that permutes two single-particle Hilbert\n spaces into adjacent positions.\n\n ALWAYS swaps j TO k. Recall that Hilbert spaces are ordered in decreasing\n qubit index order. Hence, j > k implies that j is to the left of k.\n\n End results:\n j == k: nothing happens\n j > k: Swap j right to k, until j at ind (k) and k at ind (k+1).\n j < k: Swap j left to k, until j at ind (k) and k at ind (k-1).\n\n Done in preparation for arbitrary 2-qubit gate application on ADJACENT\n qubits.\n\n :param int j: starting qubit index\n :param int k: ending qubit index\n :param int num_qubits: number of qubits in Hilbert space\n :param np.array qubit_map: current index mapping of qubits\n\n :return: tuple of swap matrix for the specified permutation,\n and the new qubit_map, after permutation is made\n :rtype: tuple (np.array, np.array)\n \"\"\"\n if not (0 <= j < num_qubits and 0 <= k < num_qubits):\n raise ValueError(\"Permutation SWAP index not valid\")\n\n perm = sps.eye(2 ** num_qubits).astype(np.complex128)\n new_qubit_map = np.copy(qubit_map)\n\n if j == k:\n # nothing happens\n return perm, new_qubit_map\n elif j > k:\n # swap j right to k, until j at ind (k) and k at ind (k+1)\n for i in range(j, k, -1):\n perm = lifted_gate(i - 1, gate_matrix['SWAP'], num_qubits)\\\n .dot(perm)\n swap_inds_helper(i - 1, i, new_qubit_map)\n elif j < k:\n # swap j left to k, until j at ind (k) and k at ind (k-1)\n for i in range(j, k, 1):\n perm = lifted_gate(i, gate_matrix['SWAP'], num_qubits).dot(perm)\n swap_inds_helper(i, i + 1, new_qubit_map)\n\n return perm, new_qubit_map\n\n\ndef permutation_arbitrary(args, num_qubits):\n \"\"\"\n Generate the permutation matrix that permutes an arbitrary number of\n single-particle Hilbert spaces into adjacent positions.\n\n Transposes the qubit indices in the order they are passed to a\n contiguous region in the complete Hilbert space, in increasing\n qubit index order (preserving the order they are passed in).\n\n Gates are usually defined as `GATE 0 1 2`, with such an argument ordering\n dictating the layout of the matrix corresponding to GATE. If such an\n instruction is given, actual qubits (0, 1, 2) need to be swapped into the\n positions (2, 1, 0), because the lifting operation taking the 8 x 8 matrix\n of GATE is done in the little-endian (reverse) addressed qubit space.\n\n For example, suppose I have a Quil command CCNOT 20 15 10.\n The median of the qubit indices is 15 - hence, we permute qubits\n [20, 15, 10] into the final map [16, 15, 14] to minimize the number of\n swaps needed, and so we can directly operate with the final CCNOT, when\n lifted from indices [16, 15, 14] to the complete Hilbert space.\n\n Notes: assumes qubit indices are unique (assured in parent call).\n\n See documentation for further details and explanation.\n\n Done in preparation for arbitrary gate application on\n adjacent qubits.\n\n :param Sequence args: (int) Qubit indices in the order the gate is\n applied to.\n :param int num_qubits: Number of qubits in system\n\n :return:\n perm - permutation matrix providing the desired qubit reordering\n qubit_arr - new indexing of qubits presented in left to right\n decreasing index order. Should be identical to passed 'args'.\n start_i - starting index to lift gate from\n :rtype: tuple (sparse_array, np.array, int)\n \"\"\"\n # Don't permit NoneType or empty sequences, but allow 0\n if isinstance(args, Sequence):\n if not args:\n raise ValueError(\"Need at least one qubit index to perform\"\n \"permutation\")\n else:\n args = [args]\n\n inds = np.array([value_get(x) for x in args])\n for ind in inds:\n if not (0 <= ind < num_qubits):\n raise ValueError(\"Permutation SWAP index not valid\")\n\n # Begin construction of permutation\n perm = sps.eye(2 ** num_qubits).astype(np.complex128)\n\n # First, sort the list and find the median.\n sort_i = np.argsort(inds)\n sorted_inds = inds[sort_i]\n med_i = int(len(sort_i) / 2)\n med = sorted_inds[med_i]\n\n # The starting position of all specified Hilbert spaces begins at\n # the qubit at (median - med_i)\n start = med - med_i\n # Array of final indices the arguments are mapped to, from\n # high index to low index, left to right ordering\n final_map = np.arange(start, start + len(inds))[::-1]\n start_i = final_map[-1]\n\n # Note that the lifting operation takes a k-qubit gate operating\n # on the qubits i+k-1, i+k-2, ... i (left to right).\n # two_swap_helper can be used to build the\n # permutation matrix by filling out the final map by sweeping over\n # the args from left to right and back again, swapping qubits into\n # position. we loop over the args until the final mapping matches\n # the argument.\n qubit_arr = np.arange(num_qubits) # current qubit indexing\n\n made_it = False\n right = True\n while not made_it:\n array = range(len(inds)) if right else range(len(inds))[::-1]\n for i in array:\n pmod, qubit_arr = two_swap_helper(np.where(qubit_arr == inds[i])[0][0],\n final_map[i], num_qubits,\n qubit_arr)\n\n # update permutation matrix\n perm = pmod.dot(perm)\n if np.allclose(qubit_arr[final_map[-1]:final_map[0] + 1][::-1], inds):\n made_it = True\n break\n\n # for next iteration, go in opposite direction\n right = not right\n\n assert np.allclose(qubit_arr[final_map[-1]:final_map[0] + 1][::-1], inds)\n\n return perm, qubit_arr[::-1], start_i\n\n\ndef permutation_arbitrary_swap(args, num_qubits):\n \"\"\"\n Not yet implemented.\n \"\"\"\n raise NotImplementedError(\"Arbitrary topological QPU not yet implemented\")\n\n\ndef apply_gate(matrix, args, num_qubits):\n \"\"\"\n Apply k-qubit gate of size (2**k, 2**k) on the qubits in the order passed\n in args. e.g. GATE(arg[0], arg[1], ... arg[k-1]).\n\n If topological_QPU is True, we use local SWAP gates only as allowed by the\n qubit architecture --- as detailed in\n permutation_arbitrary() --- to permute the gate arguments to be adjacent to\n each other, and then lift the gate to the complete Hilbert space and\n perform the multiplication.\n\n :param np.array matrix: matrix specification of GATE\n :param tuple args: (int) qubit indices to operate gate on\n :param int num_qubits: number of qubits overall\n\n :return: transformed gate that acts on the specified qubits\n :rtype: np.array\n \"\"\"\n if not isinstance(num_qubits, Integral) or num_qubits < 1:\n raise ValueError(\"Improper number of qubits passed.\")\n if len(matrix.shape) != 2 or matrix.shape[0] != matrix.shape[1]:\n raise TypeError(\"Gate array must be two-dimensional and \"\n \"square matrix.\")\n\n # Find gate size (number of qubits operated on)\n if (matrix.shape[0] & matrix.shape[0] - 1) != 0:\n raise TypeError(\"Invalid gate size. Must be power of 2! \"\n \"Received {} size\".format(matrix.shape))\n else:\n gate_size = int(np.log2(matrix.shape[0]))\n\n # Is gate size proper?\n if not (1 <= gate_size <= num_qubits):\n raise TypeError(\"Invalid gate size. k-qubit gates supported, for \"\n \"k in [1, num_qubits]\")\n\n if not topological_QPU:\n # use local SWAPs\n pi_permutation_matrix, final_map, start_i = permutation_arbitrary(args, num_qubits)\n else:\n # assume fully-connected, arbitrary SWAPs allowed\n raise NotImplementedError(\"Arbitrary SWAPs not yet implemented\")\n\n # Transform qubit indices into ints\n if isinstance(args, Sequence):\n args = tuple(value_get(x) for x in args)\n else:\n args = value_get(args)\n\n if start_i:\n assert np.allclose(final_map[- gate_size - start_i: - start_i],\n np.array(args))\n else:\n assert np.allclose(final_map[- gate_size - start_i:], np.array(args))\n\n v_matrix = lifted_gate(start_i, matrix, num_qubits)\n return np.dot(np.conj(pi_permutation_matrix.T),\n np.dot(v_matrix, pi_permutation_matrix))\n\n\ndef tensor_gates(gate_set, defgate_set, pyquil_gate, num_qubits):\n \"\"\"\n Take a pyQuil_gate instruction (assumed in the Quil Standard Gate Set\n or in defined_gates dictionary), returns the unitary over the complete\n Hilbert space corresponding to the instruction.\n\n :param dict gate_set: gate dictionary (name, matrix) pairs\n :param dict defgate_set: defined gate dictionary (name, matrix) pairs\n :param Gate pyquil_gate: Instruction object for pyQuil gate\n :param int num_qubits: number of qubits in Hilbert space\n\n :return: input gate lifted to full Hilbert space and applied\n :rtype: np.array\n \"\"\"\n if pyquil_gate.name in gate_set:\n # Input gate set. Assumed to be standard gate set.\n dict_check = gate_set\n elif pyquil_gate.name in defgate_set:\n # defined_gates\n dict_check = defgate_set\n else:\n raise ValueError(\"Instruction (presumed a Gate or DefGate) is not \"\n \"found in standard gate set or defined \"\n \"gate set of program!\")\n\n args = tuple(value_get(x) for x in pyquil_gate.qubits) \\\n if dict_check == gate_matrix else tuple(pyquil_gate.qubits)\n\n if pyquil_gate.params:\n gate = apply_gate(dict_check[pyquil_gate.name]\n (*[value_get(p) for p in pyquil_gate.params]),\n args,\n num_qubits)\n else:\n gate = apply_gate(dict_check[pyquil_gate.name],\n args,\n num_qubits)\n return gate\n\n\ndef tensor_up(pauli_terms, num_qubits):\n \"\"\"\n Takes a PauliSum object along with a total number of\n qubits and returns a matrix corresponding the tensor representation of the\n object.\n\n Useful for generating the full Hamiltonian after a particular fermion to\n pauli transformation. For example:\n\n Converting a PauliSum X0Y1 + Y1X0 into the matrix\n\n .. code-block:: python\n\n [[ 0.+0.j, 0.+0.j, 0.+0.j, 0.-2.j],\n [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],\n [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],\n [ 0.+2.j, 0.+0.j, 0.+0.j, 0.+0.j]]\n\n\n :param pauli_terms: (PauliSum) object of PauliTerm\n :param num_qubits: (int) number of qubits in the system\n :returns: (numpy array) representation of the paui_terms operator\n \"\"\"\n from scipy.sparse import kron, csr_matrix\n\n if not isinstance(pauli_terms, PauliSum):\n raise TypeError(\"can only tensor PauliSum\")\n\n # check if operator is valid w.r.t the input number of qubits\n for term in pauli_terms.terms:\n if term._ops.keys():\n if max(term._ops.keys()) >= num_qubits:\n raise IndexError(\"pauli_terms has higher index than qubits\")\n\n # big_hilbert = csr_matrix(np.zeros((2 ** num_qubits, 2 ** num_qubits), dtype=complex))\n big_hilbert = csr_matrix((2 ** num_qubits, 2 ** num_qubits), dtype=complex)\n # left kronecker product corresponds to the correct basis ordering\n for term in pauli_terms.terms:\n tmp_big_hilbert = csr_matrix(np.array([1]))\n for index in range(num_qubits):\n gate_sparse = csr_matrix(gate_matrix[term[index]])\n tmp_big_hilbert = kron(gate_sparse, tmp_big_hilbert, format='csr')\n\n big_hilbert += tmp_big_hilbert * term.coefficient\n\n return np.asarray(big_hilbert.todense())\n\n\ndef value_get(param_obj):\n \"\"\"\n Function that returns the raw number / string stored in certain pyQuil\n objects.\n \"\"\"\n if isinstance(param_obj, (float, int)):\n return param_obj\n elif isinstance(param_obj, Qubit):\n return param_obj.index\n elif isinstance(param_obj, Addr):\n return param_obj.address\n elif isinstance(param_obj, Slot):\n return param_obj.value()\n elif isinstance(param_obj, Label):\n return param_obj.name\n"
] | [
[
"numpy.dot",
"numpy.allclose",
"numpy.conj",
"numpy.eye",
"numpy.kron",
"numpy.testing.assert_allclose",
"numpy.zeros"
],
[
"numpy.dot",
"numpy.log2",
"numpy.allclose",
"numpy.conj",
"scipy.sparse.eye",
"numpy.arange",
"scipy.sparse.csr_matrix",
"numpy.copy",
"numpy.argsort",
"scipy.sparse.kron",
"numpy.array",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
fdp0525/seam-erasure | [
"fa3aece97b4a4ab0a2bdaf0ea96911253d8a38fc"
] | [
"SeamErasure/points_in_triangle.py"
] | [
"\"\"\"\nUtility file for testing if points are in a given triangle.\n\nWritten by Zachary Ferguson\n\"\"\"\n\nimport numpy\n\n\ndef points_in_triangle(tri, points, tol=1e-8):\n \"\"\"\n Test if the points are inside the triangle.\n Input:\n tri - the triangle as a matrix where the rows are the xy points.\n points - the points as a matrix where the rows are the xy points.\n Returns a vector of boolean values.\n \"\"\"\n # B is the transformation from xy to barycentric coordinates\n B = numpy.vstack([tri.T, numpy.ones(3)])\n\n vecs = numpy.vstack([points.T, numpy.ones((1, points.shape[0]))])\n\n # Convert the grid from XY locations to barycentric coordinates.\n # This will only fail of the triangle is degenerate.\n try:\n coords = numpy.linalg.solve(B, vecs)\n except:\n return numpy.zeros(points.shape[0]).astype(bool)\n\n return numpy.all(coords >= -tol, axis = 0)\n"
] | [
[
"numpy.all",
"numpy.linalg.solve",
"numpy.zeros",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AshishSardana/make-a-thon | [
"9a50ceca893c484812c07fbc210639182d9e6ff7"
] | [
"1-Neonatal Jaunice Detection/Scripts/skinDetect_2.py"
] | [
"# Required modules\r\nimport cv2\r\nimport numpy\r\n\r\n# Constants for finding range of skin color in YCrCb\r\nmin_YCrCb = numpy.array([0,133,77],numpy.uint8)\r\nmax_YCrCb = numpy.array([255,173,127],numpy.uint8)\r\n\r\n# Create a window to display the camera feed\r\ncv2.namedWindow('Camera Output')\r\n\r\n# Get pointer to video frames from primary device\r\nvideoFrame = cv2.VideoCapture(0)\r\n\r\n# Process the video frames\r\nkeyPressed = -1 # -1 indicates no key pressed\r\n\r\nwhile(keyPressed < 0): # any key pressed has a value >= 0\r\n\r\n # Grab video frame, decode it and return next video frame\r\n readSucsess, sourceImage = videoFrame.read()\r\n\r\n # Convert image to YCrCb\r\n imageYCrCb = cv2.cvtColor(sourceImage,cv2.COLOR_BGR2YCR_CB)\r\n\r\n # Find region with skin tone in YCrCb image\r\n skinRegion = cv2.inRange(imageYCrCb,min_YCrCb,max_YCrCb)\r\n\r\n # Do contour detection on skin region\r\n img2, contours, hierarchy = cv2.findContours(skinRegion, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n # Draw the contour on the source image\r\n for i, c in enumerate(contours):\r\n area = cv2.contourArea(c)\r\n if area > 1000:\r\n cv2.drawContours(sourceImage, contours, i, (0, 255, 0), 3)\r\n\r\n # Display the source image\r\n cv2.imshow('Camera Output',sourceImage)\r\n\r\n # Check for user input to close program\r\n keyPressed = cv2.waitKey(2) # wait 2 millisecond in each iteration of while loop\r\n\r\n# Close window and camera after exiting the while loop\r\ncv2.destroyWindow('Camera Output')\r\nvideoFrame.release()\r\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mkmkryu/scanpy2 | [
"f3db32a142dc31c1b628380db1c969a6d0b9dc3a",
"f3db32a142dc31c1b628380db1c969a6d0b9dc3a",
"f3db32a142dc31c1b628380db1c969a6d0b9dc3a",
"f3db32a142dc31c1b628380db1c969a6d0b9dc3a",
"f3db32a142dc31c1b628380db1c969a6d0b9dc3a"
] | [
"scanpy/tools/_dendrogram.py",
"scanpy/external/tl/_palantir.py",
"scanpy/plotting/palettes.py",
"scanpy/plotting/_tools/paga.py",
"scanpy/tests/test_sim.py"
] | [
"\"\"\"\nComputes a dendrogram based on a given categorical observation.\n\"\"\"\n\nfrom typing import Optional, Sequence, Dict, Any\n\nimport pandas as pd\nfrom anndata import AnnData\nfrom pandas.api.types import is_categorical_dtype\n\nfrom .. import logging as logg\nfrom .._utils import _doc_params\nfrom ..tools._utils import _choose_representation, doc_use_rep, doc_n_pcs\n\n\n@_doc_params(n_pcs=doc_n_pcs, use_rep=doc_use_rep)\ndef dendrogram(\n adata: AnnData,\n groupby: str,\n n_pcs: Optional[int] = None,\n use_rep: Optional[str] = None,\n var_names: Optional[Sequence[str]] = None,\n use_raw: Optional[bool] = None,\n cor_method: str = 'pearson',\n linkage_method: str = 'complete',\n optimal_ordering: bool = False,\n key_added: Optional[str] = None,\n inplace: bool = True,\n) -> Optional[Dict[str, Any]]:\n \"\"\"\\\n Computes a hierarchical clustering for the given `groupby` categories.\n\n By default, the PCA representation is used unless `.X`\n has less than 50 variables.\n\n Alternatively, a list of `var_names` (e.g. genes) can be given.\n\n Average values of either `var_names` or components are used\n to compute a correlation matrix.\n\n The hierarchical clustering can be visualized using\n :func:`scanpy.pl.dendrogram` or multiple other visualizations that can\n include a dendrogram: :func:`~scanpy.pl.matrixplot`,\n :func:`~scanpy.pl.heatmap`, :func:`~scanpy.pl.dotplot`,\n and :func:`~scanpy.pl.stacked_violin`.\n\n .. note::\n The computation of the hierarchical clustering is based on predefined\n groups and not per cell. The correlation matrix is computed using by\n default pearson but other methods are available.\n\n Parameters\n ----------\n adata\n Annotated data matrix\n {n_pcs}\n {use_rep}\n var_names\n List of var_names to use for computing the hierarchical clustering.\n If `var_names` is given, then `use_rep` and `n_pcs` is ignored.\n use_raw\n Only when `var_names` is not None.\n Use `raw` attribute of `adata` if present.\n cor_method\n correlation method to use.\n Options are 'pearson', 'kendall', and 'spearman'\n linkage_method\n linkage method to use. See :func:`scipy.cluster.hierarchy.linkage`\n for more information.\n optimal_ordering\n Same as the optimal_ordering argument of :func:`scipy.cluster.hierarchy.linkage`\n which reorders the linkage matrix so that the distance between successive\n leaves is minimal.\n key_added\n By default, the dendrogram information is added to\n `.uns[f'dendrogram_{{groupby}}']`.\n Notice that the `groupby` information is added to the dendrogram.\n inplace\n If `True`, adds dendrogram information to `adata.uns[key_added]`,\n else this function returns the information.\n\n Returns\n -------\n If `inplace=False`, returns dendrogram information,\n else `adata.uns[key_added]` is updated with it.\n\n Examples\n --------\n >>> import scanpy as sc\n >>> adata = sc.datasets.pbmc68k_reduced()\n >>> sc.tl.dendrogram(adata, groupby='bulk_labels')\n >>> sc.pl.dendrogram(adata)\n >>> markers = ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ']\n >>> sc.pl.dotplot(adata, markers, groupby='bulk_labels', dendrogram=True)\n \"\"\"\n if isinstance(groupby, str):\n # if not a list, turn into a list\n groupby = [groupby]\n for group in groupby:\n if group not in adata.obs_keys():\n raise ValueError(\n 'groupby has to be a valid observation. '\n f'Given value: {group}, valid observations: {adata.obs_keys()}'\n )\n if not is_categorical_dtype(adata.obs[group]):\n raise ValueError(\n 'groupby has to be a categorical observation. '\n f'Given value: {group}, Column type: {adata.obs[group].dtype}'\n )\n\n if var_names is None:\n rep_df = pd.DataFrame(\n _choose_representation(adata, use_rep=use_rep, n_pcs=n_pcs)\n )\n categorical = adata.obs[groupby[0]]\n if len(groupby) > 1:\n for group in groupby[1:]:\n # create new category by merging the given groupby categories\n categorical = (\n categorical.astype(str) + \"_\" + adata.obs[group].astype(str)\n ).astype('category')\n categorical.name = \"_\".join(groupby)\n\n rep_df.set_index(categorical, inplace=True)\n categories = rep_df.index.categories\n else:\n gene_names = adata.raw.var_names if use_raw else adata.var_names\n from ..plotting._anndata import _prepare_dataframe\n\n categories, rep_df = _prepare_dataframe(adata, gene_names, groupby, use_raw)\n\n # aggregate values within categories using 'mean'\n mean_df = rep_df.groupby(level=0).mean()\n\n import scipy.cluster.hierarchy as sch\n\n corr_matrix = mean_df.T.corr(method=cor_method)\n z_var = sch.linkage(\n corr_matrix, method=linkage_method, optimal_ordering=optimal_ordering\n )\n dendro_info = sch.dendrogram(z_var, labels=list(categories), no_plot=True)\n\n dat = dict(\n linkage=z_var,\n groupby=groupby,\n use_rep=use_rep,\n cor_method=cor_method,\n linkage_method=linkage_method,\n categories_ordered=dendro_info['ivl'],\n categories_idx_ordered=dendro_info['leaves'],\n dendrogram_info=dendro_info,\n correlation_matrix=corr_matrix.values,\n )\n\n if inplace:\n if key_added is None:\n key_added = f'dendrogram_{\"_\".join(groupby)}'\n logg.info(f'Storing dendrogram info using `.uns[{key_added!r}]`')\n adata.uns[key_added] = dat\n else:\n return dat\n",
"\"\"\"\\\nRun Diffusion maps using the adaptive anisotropic kernel\n\"\"\"\nfrom typing import Optional, List\n\nimport pandas as pd\nfrom anndata import AnnData\n\nfrom ... import logging as logg\n\n\ndef palantir(\n adata: AnnData,\n n_components: int = 10,\n knn: int = 30,\n alpha: float = 0,\n use_adjacency_matrix: bool = False,\n distances_key: Optional[str] = None,\n n_eigs: int = None,\n impute_data: bool = True,\n n_steps: int = 3,\n copy: bool = False,\n) -> Optional[AnnData]:\n \"\"\"\\\n Run Diffusion maps using the adaptive anisotropic kernel [Setty18]_.\n\n Palantir is an algorithm to align cells along differentiation trajectories.\n Palantir models differentiation as a stochastic process where stem cells\n differentiate to terminally differentiated cells by a series of steps through\n a low dimensional phenotypic manifold. Palantir effectively captures the\n continuity in cell states and the stochasticity in cell fate determination.\n Palantir has been designed to work with multidimensional single cell data\n from diverse technologies such as Mass cytometry and single cell RNA-seq.\n\n .. note::\n More information and bug reports `here <https://github.com/dpeerlab/Palantir>`__.\n\n Parameters\n ----------\n adata\n An AnnData object.\n n_components\n Number of diffusion components.\n knn\n Number of nearest neighbors for graph construction.\n alpha\n Normalization parameter for the diffusion operator.\n use_adjacency_matrix\n Use adaptive anisotropic adjacency matrix, instead of PCA projections\n (default) to compute diffusion components.\n distances_key\n With `use_adjacency_matrix=True`, use the indicated distances key for `.obsp`.\n If `None`, `'distances'`.\n n_eigs\n Number of eigen vectors to use. If `None` specified, the number of eigen\n vectors will be determined using eigen gap. Passed to\n `palantir.utils.determine_multiscale_space`.\n impute_data\n Impute data using MAGIC.\n n_steps\n Number of steps in the diffusion operator. Passed to\n `palantir.utils.run_magic_imputation`.\n copy\n Return a copy instead of writing to `adata`.\n\n Returns\n -------\n Depending on `copy`, returns or updates `adata` with the following fields:\n\n **Diffusion maps**,\n used for magic imputation, and to generate multi-scale data matrix,\n\n - X_palantir_diff_comp - :class:`~numpy.ndarray` (:attr:`~anndata.AnnData.obsm`, dtype `float`)\n Array of Diffusion components.\n - palantir_EigenValues - :class:`~numpy.ndarray` (:attr:`~anndata.AnnData.uns`, dtype `float`)\n Array of corresponding eigen values.\n - palantir_diff_op - :class:`~scipy.sparse.spmatrix` (:attr:`~anndata.AnnData.obsp`, dtype `float`)\n The diffusion operator matrix.\n\n **Multi scale space results**,\n used to build tsne on diffusion components, and to compute branch probabilities\n and waypoints,\n\n - X_palantir_multiscale - :class:`~numpy.ndarray` (:attr:`~anndata.AnnData.obsm`, dtype `float`)\n Multi scale data matrix.\n\n **MAGIC imputation**,\n used for plotting gene expression on tsne, and gene expression trends,\n\n - palantir_imp - :class:`~numpy.ndarray` (:attr:`~anndata.AnnData.layers`, dtype `float`)\n Imputed data matrix (MAGIC imputation).\n\n Example\n -------\n >>> import scanpy.external as sce\n >>> import scanpy as sc\n\n A sample data is available `here <https://github.com/dpeerlab/Palantir/tree/master/data>`_.\n\n **Load sample data**\n\n >>> adata = sc.read_csv(filename=\"Palantir/data/marrow_sample_scseq_counts.csv.gz\")\n\n *Cleanup and normalize*\n\n >>> sc.pp.filter_cells(adata, min_counts=1000)\n >>> sc.pp.filter_genes(adata, min_counts=10)\n >>> sc.pp.normalize_per_cell(adata)\n >>> sc.pp.log1p(adata)\n\n **Data preprocessing**\n\n Palantir builds diffusion maps using one of two optional inputs:\n\n *Principal component analysis*\n\n >>> sc.tl.pca(adata, n_comps=300)\n\n or,\n\n *Nearist neighbors graph*\n\n >>> sc.pp.neighbors(adata, knn=30)\n\n *Diffusion maps*\n\n Palantir determines the diffusion maps of the data as an estimate of the low\n dimensional phenotypic manifold of the data.\n\n >>> sce.tl.palantir(adata, n_components=5, knn=30)\n\n if pre-computed distances are to be used,\n\n >>> sce.tl.palantir(\n ... adata,\n ... n_components=5,\n ... knn=30,\n ... use_adjacency_matrix=True,\n ... distances_key=\"distances\",\n ... )\n\n **Visualizing Palantir results**\n\n *tSNE visualization*\n\n important for Palantir!\n\n Palantir constructs the tSNE map in the embedded space since these maps better\n represent the differentiation trajectories.\n\n >>> sc.tl.tsne(adata, n_pcs=2, use_rep='X_palantir_multiscale', perplexity=150)\n\n *tsne by cell size*\n\n >>> sc.pl.tsne(adata, color=\"n_counts\")\n\n *Imputed gene expression visualized on tSNE maps*\n\n >>> sc.pl.tsne(\n ... adata,\n ... gene_symbols=['CD34', 'MPO', 'GATA1', 'IRF8'],\n ... layer='palantir_imp',\n ... color=['CD34', 'MPO', 'GATA1', 'IRF8']\n ... )\n\n **Running Palantir**\n\n Palantir can be run by specifying an approximate early cell. While Palantir\n automatically determines the terminal states, they can also be specified using the\n `termine_states` parameter.\n\n >>> start_cell = 'Run5_164698952452459'\n >>> pr_res = sce.tl.palantir_results(\n ... adata,\n ... early_cell=start_cell,\n ... ms_data='X_palantir_multiscale',\n ... num_waypoints=500,\n ... )\n\n .. note::\n A `start_cell` must be defined for every data set. The start cell for\n this dataset was chosen based on high expression of CD34.\n\n At this point the returned Palantir object `pr_res` can be used for all downstream\n analysis and plotting. Please consult this notebook\n `Palantir_sample_notebook.ipynb\n <https://github.com/dpeerlab/Palantir/blob/master/notebooks/Palantir_sample_notebook.ipynb>`_.\n It provides a comprehensive guide to draw *gene expression trends*, amongst other\n things.\n \"\"\"\n\n _check_import()\n from palantir.utils import (\n run_diffusion_maps,\n determine_multiscale_space,\n run_magic_imputation,\n )\n\n adata = adata.copy() if copy else adata\n\n logg.info('Palantir Diffusion Maps in progress ...')\n\n if use_adjacency_matrix:\n df = adata.obsp[distances_key] if distances_key else adata.obsp[\"distances\"]\n else:\n df = pd.DataFrame(adata.obsm['X_pca'], index=adata.obs_names)\n\n # Diffusion maps\n dm_res = run_diffusion_maps(\n data_df=df, n_components=n_components, knn=knn, alpha=alpha,\n )\n # Determine the multi scale space of the data\n ms_data = determine_multiscale_space(dm_res=dm_res, n_eigs=n_eigs)\n\n # MAGIC imputation\n if impute_data:\n imp_df = run_magic_imputation(\n data=adata.to_df(),\n dm_res=dm_res,\n n_steps=n_steps\n )\n adata.layers['palantir_imp'] = imp_df\n\n (\n adata.obsm['X_palantir_diff_comp'],\n adata.uns['palantir_EigenValues'],\n adata.obsp['palantir_diff_op'],\n adata.obsm['X_palantir_multiscale'],\n ) = (\n dm_res['EigenVectors'].to_numpy(),\n dm_res['EigenValues'].to_numpy(),\n dm_res['T'],\n ms_data.to_numpy(),\n )\n\n return adata if copy else None\n\n\ndef palantir_results(\n adata: AnnData,\n early_cell: str,\n ms_data: str = 'X_palantir_multiscale',\n terminal_states: List = None,\n knn: int = 30,\n num_waypoints: int = 1200,\n n_jobs: int = -1,\n scale_components: bool = True,\n use_early_cell_as_start: bool = False,\n max_iterations: int = 25,\n) -> Optional[AnnData]:\n \"\"\"\\\n **Running Palantir**\n\n A convenience function that wraps `palantir.core.run_palantir` to compute branch\n probabilities and waypoints.\n\n Parameters\n ----------\n adata\n An AnnData object.\n early_cell\n Start cell for pseudotime construction.\n ms_data\n Palantir multi scale data matrix,\n terminal_states\n List of user defined terminal states\n knn\n Number of nearest neighbors for graph construction.\n num_waypoints\n Number of waypoints to sample.\n n_jobs\n Number of jobs for parallel processing.\n scale_components\n Transform features by scaling each feature to a given range. Consult the\n documentation for `sklearn.preprocessing.minmax_scale`.\n use_early_cell_as_start\n Use `early_cell` as `start_cell`, instead of determining it from the boundary\n cells closest to the defined `early_cell`.\n max_iterations\n Maximum number of iterations for pseudotime convergence.\n\n Returns\n -------\n PResults\n PResults object with pseudotime, entropy, branch probabilities and waypoints.\n \"\"\"\n logg.info('Palantir computing waypoints..')\n\n _check_import()\n from palantir.core import run_palantir\n\n ms_data = pd.DataFrame(adata.obsm[ms_data], index=adata.obs_names)\n pr_res = run_palantir(\n ms_data=ms_data,\n early_cell=early_cell,\n terminal_states=terminal_states,\n knn=knn,\n num_waypoints=num_waypoints,\n n_jobs=n_jobs,\n scale_components=scale_components,\n use_early_cell_as_start=use_early_cell_as_start,\n max_iterations=max_iterations,\n )\n\n return pr_res\n\n\ndef _check_import():\n try:\n import palantir\n except ImportError:\n raise ImportError('\\nplease install palantir:\\n\\tpip install palantir')\n\n",
"\"\"\"Color palettes in addition to matplotlib's palettes.\"\"\"\n\nfrom matplotlib import cm, colors\n\n# Colorblindness adjusted vega_10\n# See https://github.com/theislab/scanpy/issues/387\nvega_10 = list(map(colors.to_hex, cm.tab10.colors))\nvega_10_scanpy = vega_10.copy()\nvega_10_scanpy[2] = '#279e68' # green\nvega_10_scanpy[4] = '#aa40fc' # purple\nvega_10_scanpy[8] = '#b5bd61' # kakhi\n\n# default matplotlib 2.0 palette\n# see 'category20' on https://github.com/vega/vega/wiki/Scales#scale-range-literals\nvega_20 = list(map(colors.to_hex, cm.tab20.colors))\n\n# reorderd, some removed, some added\nvega_20_scanpy = [\n *vega_20[0:14:2], *vega_20[16::2], # dark without grey\n *vega_20[1:15:2], *vega_20[17::2], # light without grey\n '#ad494a', '#8c6d31', # manual additions\n]\nvega_20_scanpy[2] = vega_10_scanpy[2]\nvega_20_scanpy[4] = vega_10_scanpy[4]\nvega_20_scanpy[7] = vega_10_scanpy[8] # kakhi shifted by missing grey\n# TODO: also replace pale colors if necessary\n\ndefault_20 = vega_20_scanpy\n\n# https://graphicdesign.stackexchange.com/questions/3682/where-can-i-find-a-large-palette-set-of-contrasting-colors-for-coloring-many-d\n# update 1\n# orig reference http://epub.wu.ac.at/1692/1/document.pdf\nzeileis_28 = [\n \"#023fa5\", \"#7d87b9\", \"#bec1d4\", \"#d6bcc0\", \"#bb7784\", \"#8e063b\", \"#4a6fe3\",\n \"#8595e1\", \"#b5bbe3\", \"#e6afb9\", \"#e07b91\", \"#d33f6a\", \"#11c638\", \"#8dd593\",\n \"#c6dec7\", \"#ead3c6\", \"#f0b98d\", \"#ef9708\", \"#0fcfc0\", \"#9cded6\", \"#d5eae7\",\n \"#f3e1eb\", \"#f6c4e1\", \"#f79cd4\",\n '#7f7f7f', \"#c7c7c7\", \"#1CE6FF\", \"#336600\", # these last ones were added,\n]\n\ndefault_28 = zeileis_28\n\n# from http://godsnotwheregodsnot.blogspot.de/2012/09/color-distribution-methodology.html\ngodsnot_102 = [\n # \"#000000\", # remove the black, as often, we have black colored annotation\n \"#FFFF00\", \"#1CE6FF\", \"#FF34FF\", \"#FF4A46\", \"#008941\", \"#006FA6\", \"#A30059\",\n \"#FFDBE5\", \"#7A4900\", \"#0000A6\", \"#63FFAC\", \"#B79762\", \"#004D43\", \"#8FB0FF\", \"#997D87\",\n \"#5A0007\", \"#809693\", \"#6A3A4C\", \"#1B4400\", \"#4FC601\", \"#3B5DFF\", \"#4A3B53\", \"#FF2F80\",\n \"#61615A\", \"#BA0900\", \"#6B7900\", \"#00C2A0\", \"#FFAA92\", \"#FF90C9\", \"#B903AA\", \"#D16100\",\n \"#DDEFFF\", \"#000035\", \"#7B4F4B\", \"#A1C299\", \"#300018\", \"#0AA6D8\", \"#013349\", \"#00846F\",\n \"#372101\", \"#FFB500\", \"#C2FFED\", \"#A079BF\", \"#CC0744\", \"#C0B9B2\", \"#C2FF99\", \"#001E09\",\n \"#00489C\", \"#6F0062\", \"#0CBD66\", \"#EEC3FF\", \"#456D75\", \"#B77B68\", \"#7A87A1\", \"#788D66\",\n \"#885578\", \"#FAD09F\", \"#FF8A9A\", \"#D157A0\", \"#BEC459\", \"#456648\", \"#0086ED\", \"#886F4C\",\n \"#34362D\", \"#B4A8BD\", \"#00A6AA\", \"#452C2C\", \"#636375\", \"#A3C8C9\", \"#FF913F\", \"#938A81\",\n \"#575329\", \"#00FECF\", \"#B05B6F\", \"#8CD0FF\", \"#3B9700\", \"#04F757\", \"#C8A1A1\", \"#1E6E00\",\n \"#7900D7\", \"#A77500\", \"#6367A9\", \"#A05837\", \"#6B002C\", \"#772600\", \"#D790FF\", \"#9B9700\",\n \"#549E79\", \"#FFF69F\", \"#201625\", \"#72418F\", \"#BC23FF\", \"#99ADC0\", \"#3A2465\", \"#922329\",\n \"#5B4534\", \"#FDE8DC\", \"#404E55\", \"#0089A3\", \"#CB7E98\", \"#A4E804\", \"#324E72\",\n]\n\ndefault_102 = godsnot_102\n\n\nfrom typing import Mapping, Sequence\n\n\ndef _plot_color_cycle(clists: Mapping[str, Sequence[str]]):\n import numpy as np\n import matplotlib.pyplot as plt\n from matplotlib.colors import ListedColormap, BoundaryNorm\n\n fig, axes = plt.subplots(nrows=len(clists)) # type: plt.Figure, plt.Axes\n fig.subplots_adjust(top=.95, bottom=.01, left=.3, right=.99)\n axes[0].set_title('Color Maps/Cycles', fontsize=14)\n\n for ax, (name, clist) in zip(axes, clists.items()):\n n = len(clist)\n ax.imshow(\n np.arange(n)[None, :].repeat(2, 0),\n aspect='auto',\n cmap=ListedColormap(clist),\n norm=BoundaryNorm(np.arange(n+1)-.5, n),\n )\n pos = list(ax.get_position().bounds)\n x_text = pos[0] - .01\n y_text = pos[1] + pos[3] / 2.\n fig.text(x_text, y_text, name, va='center', ha='right', fontsize=10)\n\n # Turn off all ticks & spines\n for ax in axes:\n ax.set_axis_off()\n fig.show()\n\n\nif __name__ == '__main__':\n _plot_color_cycle({\n name: colors\n for name, colors in globals().items()\n if isinstance(colors, list)\n })\n",
"import warnings\nimport collections.abc as cabc\nfrom pathlib import Path\nfrom types import MappingProxyType\nfrom typing import Optional, Union, List, Sequence, Mapping, Any, Tuple\n\nimport numpy as np\nimport pandas as pd\nimport scipy\nfrom anndata import AnnData\nfrom pandas.api.types import is_categorical_dtype\nfrom matplotlib import pyplot as pl, rcParams, ticker\nfrom matplotlib import patheffects\nfrom matplotlib.axes import Axes\nfrom matplotlib.colors import is_color_like, Colormap\n\nfrom .. import _utils\nfrom .._utils import matrix, _IGraphLayout, _FontWeight, _FontSize\nfrom ... import _utils as _sc_utils, logging as logg\nfrom ..._settings import settings\nfrom ..._compat import Literal\n\n\ndef paga_compare(\n adata: AnnData,\n basis=None,\n edges=False,\n color=None,\n alpha=None,\n groups=None,\n components=None,\n projection: Literal['2d', '3d'] = '2d',\n legend_loc='on data',\n legend_fontsize: Union[int, float, _FontSize, None] = None,\n legend_fontweight: Union[int, _FontWeight] = 'bold',\n legend_fontoutline=None,\n color_map=None,\n palette=None,\n frameon=False,\n size=None,\n title=None,\n right_margin=None,\n left_margin=0.05,\n show=None,\n save=None,\n title_graph=None,\n groups_graph=None,\n **paga_graph_params,\n):\n \"\"\"\\\n Scatter and PAGA graph side-by-side.\n\n Consists in a scatter plot and the abstracted graph. See\n :func:`~scanpy.pl.paga` for all related parameters.\n\n See :func:`~scanpy.pl.paga_path` for visualizing gene changes along paths\n through the abstracted graph.\n\n Additional parameters are as follows.\n\n Parameters\n ----------\n adata\n Annotated data matrix.\n kwds_scatter\n Keywords for :func:`~scanpy.pl.scatter`.\n kwds_paga\n Keywords for :func:`~scanpy.pl.paga`.\n\n Returns\n -------\n A list of :class:`~matplotlib.axes.Axes` if `show` is `False`.\n \"\"\"\n axs, _, _, _ = _utils.setup_axes(panels=[0, 1], right_margin=right_margin,)\n if color is None:\n color = adata.uns['paga']['groups']\n suptitle = None # common title for entire figure\n if title_graph is None:\n suptitle = color if title is None else title\n title, title_graph = '', ''\n if basis is None:\n if 'X_draw_graph_fa' in adata.obsm.keys():\n basis = 'draw_graph_fa'\n elif 'X_umap' in adata.obsm.keys():\n basis = 'umap'\n elif 'X_tsne' in adata.obsm.keys():\n basis = 'tsne'\n elif 'X_draw_graph_fr' in adata.obsm.keys():\n basis = 'draw_graph_fr'\n else:\n basis = 'umap'\n from .scatterplots import embedding\n\n embedding(\n adata,\n ax=axs[0],\n basis=basis,\n color=color,\n edges=edges,\n alpha=alpha,\n groups=groups,\n components=components,\n legend_loc=legend_loc,\n legend_fontsize=legend_fontsize,\n legend_fontweight=legend_fontweight,\n legend_fontoutline=legend_fontoutline,\n color_map=color_map,\n palette=palette,\n frameon=frameon,\n size=size,\n title=title,\n show=False,\n save=False,\n )\n if 'pos' not in paga_graph_params:\n if color == adata.uns['paga']['groups']:\n paga_graph_params['pos'] = _utils._tmp_cluster_pos\n else:\n paga_graph_params['pos'] = adata.uns['paga']['pos']\n xlim, ylim = axs[0].get_xlim(), axs[0].get_ylim()\n axs[1].set_xlim(xlim)\n axs[1].set_ylim(ylim)\n if 'labels' in paga_graph_params:\n labels = paga_graph_params.pop('labels')\n else:\n labels = groups_graph\n if legend_fontsize is not None:\n paga_graph_params['fontsize'] = legend_fontsize\n if legend_fontweight is not None:\n paga_graph_params['fontweight'] = legend_fontweight\n if legend_fontoutline is not None:\n paga_graph_params['fontoutline'] = legend_fontoutline\n paga(\n adata,\n ax=axs[1],\n show=False,\n save=False,\n title=title_graph,\n labels=labels,\n colors=color,\n frameon=frameon,\n **paga_graph_params,\n )\n if suptitle is not None:\n pl.suptitle(suptitle)\n _utils.savefig_or_show('paga_compare', show=show, save=save)\n if show == False:\n return axs\n\n\ndef _compute_pos(\n adjacency_solid,\n layout=None,\n random_state=0,\n init_pos=None,\n adj_tree=None,\n root=0,\n layout_kwds: Mapping[str, Any] = MappingProxyType({}),\n):\n import networkx as nx\n\n nx_g_solid = nx.Graph(adjacency_solid)\n if layout is None:\n layout = 'fr'\n if layout == 'fa':\n try:\n from fa2 import ForceAtlas2\n except:\n logg.warning(\n \"Package 'fa2' is not installed, falling back to layout 'fr'.\"\n 'To use the faster and better ForceAtlas2 layout, '\n \"install package 'fa2' (`pip install fa2`).\"\n )\n layout = 'fr'\n if layout == 'fa':\n np.random.seed(random_state)\n if init_pos is None:\n init_coords = np.random.random((adjacency_solid.shape[0], 2))\n else:\n init_coords = init_pos.copy()\n forceatlas2 = ForceAtlas2(\n # Behavior alternatives\n outboundAttractionDistribution=False, # Dissuade hubs\n linLogMode=False, # NOT IMPLEMENTED\n adjustSizes=False, # Prevent overlap (NOT IMPLEMENTED)\n edgeWeightInfluence=1.0,\n # Performance\n jitterTolerance=1.0, # Tolerance\n barnesHutOptimize=True,\n barnesHutTheta=1.2,\n multiThreaded=False, # NOT IMPLEMENTED\n # Tuning\n scalingRatio=2.0,\n strongGravityMode=False,\n gravity=1.0,\n # Log\n verbose=False,\n )\n if 'maxiter' in layout_kwds:\n iterations = layout_kwds['maxiter']\n elif 'iterations' in layout_kwds:\n iterations = layout_kwds['iterations']\n else:\n iterations = 500\n pos_list = forceatlas2.forceatlas2(\n adjacency_solid, pos=init_coords, iterations=iterations\n )\n pos = {n: [p[0], -p[1]] for n, p in enumerate(pos_list)}\n elif layout == 'eq_tree':\n nx_g_tree = nx.Graph(adj_tree)\n pos = _utils.hierarchy_pos(nx_g_tree, root)\n if len(pos) < adjacency_solid.shape[0]:\n raise ValueError(\n 'This is a forest and not a single tree. '\n 'Try another `layout`, e.g., {\\'fr\\'}.'\n )\n else:\n # igraph layouts\n g = _sc_utils.get_igraph_from_adjacency(adjacency_solid)\n if 'rt' in layout:\n g_tree = _sc_utils.get_igraph_from_adjacency(adj_tree)\n pos_list = g_tree.layout(\n layout, root=root if isinstance(root, list) else [root]\n ).coords\n elif layout == 'circle':\n pos_list = g.layout(layout).coords\n else:\n # I don't know why this is necessary\n np.random.seed(random_state)\n if init_pos is None:\n init_coords = np.random.random((adjacency_solid.shape[0], 2)).tolist()\n else:\n init_pos = init_pos.copy()\n # this is a super-weird hack that is necessary as igraph’s\n # layout function seems to do some strange stuff here\n init_pos[:, 1] *= -1\n init_coords = init_pos.tolist()\n try:\n pos_list = g.layout(\n layout, seed=init_coords, weights='weight', **layout_kwds\n ).coords\n except AttributeError: # hack for empty graphs...\n pos_list = g.layout(layout, seed=init_coords, **layout_kwds).coords\n pos = {n: [p[0], -p[1]] for n, p in enumerate(pos_list)}\n if len(pos) == 1:\n pos[0] = (0.5, 0.5)\n pos_array = np.array([pos[n] for count, n in enumerate(nx_g_solid)])\n return pos_array\n\n\ndef paga(\n adata: AnnData,\n threshold: Optional[float] = None,\n color: Optional[Union[str, Mapping[Union[str, int], Mapping[Any, float]]]] = None,\n layout: Optional[_IGraphLayout] = None,\n layout_kwds: Mapping[str, Any] = MappingProxyType({}),\n init_pos: Optional[np.ndarray] = None,\n root: Union[int, str, Sequence[int], None] = 0,\n labels: Union[str, Sequence[str], Mapping[str, str], None] = None,\n single_component: bool = False,\n solid_edges: str = 'connectivities',\n dashed_edges: Optional[str] = None,\n transitions: Optional[str] = None,\n fontsize: Optional[int] = None,\n fontweight: str = 'bold',\n fontoutline: Optional[int] = None,\n text_kwds: Mapping[str, Any] = MappingProxyType({}),\n node_size_scale: float = 1.0,\n node_size_power: float = 0.5,\n edge_width_scale: float = 1.0,\n min_edge_width: Optional[float] = None,\n max_edge_width: Optional[float] = None,\n arrowsize: int = 30,\n title: Optional[str] = None,\n left_margin: float = 0.01,\n random_state: Optional[int] = 0,\n pos: Union[np.ndarray, str, Path, None] = None,\n normalize_to_color: bool = False,\n cmap: Union[str, Colormap] = None,\n cax: Optional[Axes] = None,\n colorbar=None, # TODO: this seems to be unused\n cb_kwds: Mapping[str, Any] = MappingProxyType({}),\n frameon: Optional[bool] = None,\n add_pos: bool = True,\n export_to_gexf: bool = False,\n use_raw: bool = True,\n colors=None, # backwards compat\n groups=None, # backwards compat\n plot: bool = True,\n show: Optional[bool] = None,\n save: Union[bool, str, None] = None,\n ax: Optional[Axes] = None,\n) -> Union[Axes, List[Axes], None]:\n \"\"\"\\\n Plot the PAGA graph through thresholding low-connectivity edges.\n\n Compute a coarse-grained layout of the data. Reuse this by passing\n `init_pos='paga'` to :func:`~scanpy.tl.umap` or\n :func:`~scanpy.tl.draw_graph` and obtain embeddings with more meaningful\n global topology [Wolf19]_.\n\n This uses ForceAtlas2 or igraph's layout algorithms for most layouts [Csardi06]_.\n\n Parameters\n ----------\n adata\n Annotated data matrix.\n threshold\n Do not draw edges for weights below this threshold. Set to 0 if you want\n all edges. Discarding low-connectivity edges helps in getting a much\n clearer picture of the graph.\n color\n Gene name or `obs` annotation defining the node colors.\n Also plots the degree of the abstracted graph when\n passing {`'degree_dashed'`, `'degree_solid'`}.\n\n Can be also used to visualize pie chart at each node in the following form:\n `{<group name or index>: {<color>: <fraction>, ...}, ...}`. If the fractions\n do not sum to 1, a new category called `'rest'` colored grey will be created.\n labels\n The node labels. If `None`, this defaults to the group labels stored in\n the categorical for which :func:`~scanpy.tl.paga` has been computed.\n pos\n Two-column array-like storing the x and y coordinates for drawing.\n Otherwise, path to a `.gdf` file that has been exported from Gephi or\n a similar graph visualization software.\n layout\n Plotting layout that computes positions.\n `'fa'` stands for “ForceAtlas2”,\n `'fr'` stands for “Fruchterman-Reingold”,\n `'rt'` stands for “Reingold-Tilford”,\n `'eq_tree'` stands for “eqally spaced tree”.\n All but `'fa'` and `'eq_tree'` are igraph layouts.\n All other igraph layouts are also permitted.\n See also parameter `pos` and :func:`~scanpy.tl.draw_graph`.\n layout_kwds\n Keywords for the layout.\n init_pos\n Two-column array storing the x and y coordinates for initializing the\n layout.\n random_state\n For layouts with random initialization like `'fr'`, change this to use\n different intial states for the optimization. If `None`, the initial\n state is not reproducible.\n root\n If choosing a tree layout, this is the index of the root node or a list\n of root node indices. If this is a non-empty vector then the supplied\n node IDs are used as the roots of the trees (or a single tree if the\n graph is connected). If this is `None` or an empty list, the root\n vertices are automatically calculated based on topological sorting.\n transitions\n Key for `.uns['paga']` that specifies the matrix that – for instance\n `'transistions_confidence'` – that specifies the matrix that stores the\n arrows.\n solid_edges\n Key for `.uns['paga']` that specifies the matrix that stores the edges\n to be drawn solid black.\n dashed_edges\n Key for `.uns['paga']` that specifies the matrix that stores the edges\n to be drawn dashed grey. If `None`, no dashed edges are drawn.\n single_component\n Restrict to largest connected component.\n fontsize\n Font size for node labels.\n fontoutline\n Width of the white outline around fonts.\n text_kwds\n Keywords for :meth:`~matplotlib.axes.Axes.text`.\n node_size_scale\n Increase or decrease the size of the nodes.\n node_size_power\n The power with which groups sizes influence the radius of the nodes.\n edge_width_scale\n Edge with scale in units of `rcParams['lines.linewidth']`.\n min_edge_width\n Min width of solid edges.\n max_edge_width\n Max width of solid and dashed edges.\n arrowsize\n For directed graphs, choose the size of the arrow head head's length and\n width. See :py:class: `matplotlib.patches.FancyArrowPatch` for attribute\n `mutation_scale` for more info.\n export_to_gexf\n Export to gexf format to be read by graph visualization programs such as\n Gephi.\n normalize_to_color\n Whether to normalize categorical plots to `color` or the underlying\n grouping.\n cmap\n The color map.\n cax\n A matplotlib axes object for a potential colorbar.\n cb_kwds\n Keyword arguments for :class:`~matplotlib.colorbar.ColorbarBase`,\n for instance, `ticks`.\n add_pos\n Add the positions to `adata.uns['paga']`.\n title\n Provide a title.\n frameon\n Draw a frame around the PAGA graph.\n plot\n If `False`, do not create the figure, simply compute the layout.\n save\n If `True` or a `str`, save the figure.\n A string is appended to the default filename.\n Infer the filetype if ending on \\\\{`'.pdf'`, `'.png'`, `'.svg'`\\\\}.\n ax\n A matplotlib axes object.\n\n Returns\n -------\n If `show==False`, one or more :class:`~matplotlib.axes.Axes` objects.\n Adds `'pos'` to `adata.uns['paga']` if `add_pos` is `True`.\n\n Notes\n -----\n When initializing the positions, note that – for some reason – igraph\n mirrors coordinates along the x axis... that is, you should increase the\n `maxiter` parameter by 1 if the layout is flipped.\n\n .. currentmodule:: scanpy\n\n See also\n --------\n tl.paga\n pl.paga_compare\n pl.paga_path\n \"\"\"\n\n if groups is not None: # backwards compat\n labels = groups\n logg.warning('`groups` is deprecated in `pl.paga`: use `labels` instead')\n if colors is None:\n colors = color\n\n groups_key = adata.uns['paga']['groups']\n\n def is_flat(x):\n has_one_per_category = isinstance(x, cabc.Collection) and len(x) == len(\n adata.obs[groups_key].cat.categories\n )\n return has_one_per_category or x is None or isinstance(x, str)\n\n if isinstance(colors, cabc.Mapping) and isinstance(\n colors[next(iter(colors))], cabc.Mapping\n ):\n # handle paga pie, remap string keys to integers\n names_to_ixs = {\n n: i for i, n in enumerate(adata.obs[groups_key].cat.categories)\n }\n colors = {names_to_ixs.get(n, n): v for n, v in colors.items()}\n if is_flat(colors):\n colors = [colors]\n\n if frameon is None:\n frameon = settings._frameon\n # labels is a list that contains no lists\n if is_flat(labels):\n labels = [labels for _ in range(len(colors))]\n\n if title is None and len(colors) > 1:\n title = [c for c in colors]\n elif isinstance(title, str):\n title = [title for c in colors]\n elif title is None:\n title = [None for c in colors]\n\n if colorbar is None:\n var_names = adata.var_names if adata.raw is None else adata.raw.var_names\n colorbars = [\n (\n (c in adata.obs_keys() and adata.obs[c].dtype.name != 'category')\n or (c in var_names)\n )\n for c in colors\n ]\n else:\n colorbars = [False for _ in colors]\n\n if isinstance(root, str):\n if root not in labels:\n raise ValueError(\n 'If `root` is a string, '\n f'it needs to be one of {labels} not {root!r}.'\n )\n root = list(labels).index(root)\n if isinstance(root, cabc.Sequence) and root[0] in labels:\n root = [list(labels).index(r) for r in root]\n\n # define the adjacency matrices\n adjacency_solid = adata.uns['paga'][solid_edges].copy()\n adjacency_dashed = None\n if threshold is None:\n threshold = 0.01 # default threshold\n if threshold > 0:\n adjacency_solid.data[adjacency_solid.data < threshold] = 0\n adjacency_solid.eliminate_zeros()\n if dashed_edges is not None:\n adjacency_dashed = adata.uns['paga'][dashed_edges].copy()\n if threshold > 0:\n adjacency_dashed.data[adjacency_dashed.data < threshold] = 0\n adjacency_dashed.eliminate_zeros()\n\n # compute positions\n if pos is None:\n adj_tree = None\n if layout in {'rt', 'rt_circular', 'eq_tree'}:\n adj_tree = adata.uns['paga']['connectivities_tree']\n pos = _compute_pos(\n adjacency_solid,\n layout=layout,\n random_state=random_state,\n init_pos=init_pos,\n layout_kwds=layout_kwds,\n adj_tree=adj_tree,\n root=root,\n )\n\n if plot:\n axs, panel_pos, draw_region_width, figure_width = _utils.setup_axes(\n ax=ax, panels=colors, colorbars=colorbars,\n )\n\n if len(colors) == 1 and not isinstance(axs, list):\n axs = [axs]\n\n for icolor, c in enumerate(colors):\n if title[icolor] is not None:\n axs[icolor].set_title(title[icolor])\n sct = _paga_graph(\n adata,\n axs[icolor],\n colors=colors if isinstance(colors, cabc.Mapping) else c,\n solid_edges=solid_edges,\n dashed_edges=dashed_edges,\n transitions=transitions,\n threshold=threshold,\n adjacency_solid=adjacency_solid,\n adjacency_dashed=adjacency_dashed,\n root=root,\n labels=labels[icolor],\n fontsize=fontsize,\n fontweight=fontweight,\n fontoutline=fontoutline,\n text_kwds=text_kwds,\n node_size_scale=node_size_scale,\n node_size_power=node_size_power,\n edge_width_scale=edge_width_scale,\n min_edge_width=min_edge_width,\n max_edge_width=max_edge_width,\n normalize_to_color=normalize_to_color,\n frameon=frameon,\n cmap=cmap,\n colorbar=colorbars[icolor],\n cb_kwds=cb_kwds,\n use_raw=use_raw,\n title=title[icolor],\n export_to_gexf=export_to_gexf,\n single_component=single_component,\n arrowsize=arrowsize,\n pos=pos,\n )\n if colorbars[icolor]:\n if cax is None:\n bottom = panel_pos[0][0]\n height = panel_pos[1][0] - bottom\n width = 0.006 * draw_region_width / len(colors)\n left = panel_pos[2][2 * icolor + 1] + 0.2 * width\n rectangle = [left, bottom, width, height]\n fig = pl.gcf()\n ax_cb = fig.add_axes(rectangle)\n else:\n ax_cb = cax[icolor]\n\n cb = pl.colorbar(\n sct, format=ticker.FuncFormatter(_utils.ticks_formatter), cax=ax_cb,\n )\n if add_pos:\n adata.uns['paga']['pos'] = pos\n logg.hint(\"added 'pos', the PAGA positions (adata.uns['paga'])\")\n if plot:\n _utils.savefig_or_show('paga', show=show, save=save)\n if len(colors) == 1 and isinstance(axs, list):\n axs = axs[0]\n if show is False:\n return axs\n\n\ndef _paga_graph(\n adata,\n ax,\n solid_edges=None,\n dashed_edges=None,\n adjacency_solid=None,\n adjacency_dashed=None,\n transitions=None,\n threshold=None,\n root=0,\n colors=None,\n labels=None,\n fontsize=None,\n fontweight=None,\n fontoutline=None,\n text_kwds: Mapping[str, Any] = MappingProxyType({}),\n node_size_scale=1.0,\n node_size_power=0.5,\n edge_width_scale=1.0,\n normalize_to_color='reference',\n title=None,\n pos=None,\n cmap=None,\n frameon=True,\n min_edge_width=None,\n max_edge_width=None,\n export_to_gexf=False,\n colorbar=None,\n use_raw=True,\n cb_kwds: Mapping[str, Any] = MappingProxyType({}),\n single_component=False,\n arrowsize=30,\n):\n import networkx as nx\n\n node_labels = labels # rename for clarity\n if (\n node_labels is not None\n and isinstance(node_labels, str)\n and node_labels != adata.uns['paga']['groups']\n ):\n raise ValueError(\n 'Provide a list of group labels for the PAGA groups {}, not {}.'.format(\n adata.uns['paga']['groups'], node_labels\n )\n )\n groups_key = adata.uns['paga']['groups']\n if node_labels is None:\n node_labels = adata.obs[groups_key].cat.categories\n\n if (colors is None or colors == groups_key) and groups_key is not None:\n if groups_key + '_colors' not in adata.uns or len(\n adata.obs[groups_key].cat.categories\n ) != len(adata.uns[groups_key + '_colors']):\n _utils.add_colors_for_categorical_sample_annotation(adata, groups_key)\n colors = adata.uns[groups_key + '_colors']\n for iname, name in enumerate(adata.obs[groups_key].cat.categories):\n if name in settings.categories_to_ignore:\n colors[iname] = 'grey'\n\n nx_g_solid = nx.Graph(adjacency_solid)\n if dashed_edges is not None:\n nx_g_dashed = nx.Graph(adjacency_dashed)\n\n # convert pos to array and dict\n if not isinstance(pos, (Path, str)):\n pos_array = pos\n else:\n pos = Path(pos)\n if pos.suffix != '.gdf':\n raise ValueError(\n 'Currently only supporting reading positions from .gdf files. '\n 'Consider generating them using, for instance, Gephi.'\n )\n s = '' # read the node definition from the file\n with pos.open() as f:\n f.readline()\n for line in f:\n if line.startswith('edgedef>'):\n break\n s += line\n from io import StringIO\n\n df = pd.read_csv(StringIO(s), header=-1)\n pos_array = df[[4, 5]].values\n\n # convert to dictionary\n pos = {n: [p[0], p[1]] for n, p in enumerate(pos_array)}\n\n # uniform color\n if isinstance(colors, str) and is_color_like(colors):\n colors = [colors for c in range(len(node_labels))]\n\n # color degree of the graph\n if isinstance(colors, str) and colors.startswith('degree'):\n # see also tools.paga.paga_degrees\n if colors == 'degree_dashed':\n colors = [d for _, d in nx_g_dashed.degree(weight='weight')]\n elif colors == 'degree_solid':\n colors = [d for _, d in nx_g_solid.degree(weight='weight')]\n else:\n raise ValueError('`degree` either \"degree_dashed\" or \"degree_solid\".')\n colors = (np.array(colors) - np.min(colors)) / (np.max(colors) - np.min(colors))\n\n # plot gene expression\n var_names = adata.var_names if adata.raw is None else adata.raw.var_names\n if isinstance(colors, str) and colors in var_names:\n x_color = []\n cats = adata.obs[groups_key].cat.categories\n for icat, cat in enumerate(cats):\n subset = (cat == adata.obs[groups_key]).values\n if adata.raw is not None and use_raw:\n adata_gene = adata.raw[:, colors]\n else:\n adata_gene = adata[:, colors]\n x_color.append(np.mean(adata_gene.X[subset]))\n colors = x_color\n\n # plot continuous annotation\n if (\n isinstance(colors, str)\n and colors in adata.obs\n and not is_categorical_dtype(adata.obs[colors])\n ):\n x_color = []\n cats = adata.obs[groups_key].cat.categories\n for icat, cat in enumerate(cats):\n subset = (cat == adata.obs[groups_key]).values\n x_color.append(adata.obs.loc[subset, colors].mean())\n colors = x_color\n\n # plot categorical annotation\n if (\n isinstance(colors, str)\n and colors in adata.obs\n and is_categorical_dtype(adata.obs[colors])\n ):\n asso_names, asso_matrix = _sc_utils.compute_association_matrix_of_groups(\n adata,\n prediction=groups_key,\n reference=colors,\n normalization='reference' if normalize_to_color else 'prediction',\n )\n _utils.add_colors_for_categorical_sample_annotation(adata, colors)\n asso_colors = _sc_utils.get_associated_colors_of_groups(\n adata.uns[colors + '_colors'], asso_matrix\n )\n colors = asso_colors\n\n if len(colors) != len(node_labels):\n raise ValueError(\n f'Expected `colors` to be of length `{len(node_labels)}`, '\n f'found `{len(colors)}`.'\n )\n\n # count number of connected components\n n_components, labels = scipy.sparse.csgraph.connected_components(adjacency_solid)\n if n_components > 1 and not single_component:\n logg.debug(\n 'Graph has more than a single connected component. '\n 'To restrict to this component, pass `single_component=True`.'\n )\n if n_components > 1 and single_component:\n component_sizes = np.bincount(labels)\n largest_component = np.where(component_sizes == component_sizes.max())[0][0]\n adjacency_solid = adjacency_solid.tocsr()[labels == largest_component, :]\n adjacency_solid = adjacency_solid.tocsc()[:, labels == largest_component]\n colors = np.array(colors)[labels == largest_component]\n node_labels = np.array(node_labels)[labels == largest_component]\n cats_dropped = (\n adata.obs[groups_key].cat.categories[labels != largest_component].tolist()\n )\n logg.info(\n 'Restricting graph to largest connected component by dropping categories\\n'\n f'{cats_dropped}'\n )\n nx_g_solid = nx.Graph(adjacency_solid)\n if dashed_edges is not None:\n raise ValueError('`single_component` only if `dashed_edges` is `None`.')\n\n # edge widths\n base_edge_width = edge_width_scale * 5 * rcParams['lines.linewidth']\n\n # draw dashed edges\n if dashed_edges is not None:\n widths = [x[-1]['weight'] for x in nx_g_dashed.edges(data=True)]\n widths = base_edge_width * np.array(widths)\n if max_edge_width is not None:\n widths = np.clip(widths, None, max_edge_width)\n nx.draw_networkx_edges(\n nx_g_dashed,\n pos,\n ax=ax,\n width=widths,\n edge_color='grey',\n style='dashed',\n alpha=0.5,\n )\n\n # draw solid edges\n if transitions is None:\n widths = [x[-1]['weight'] for x in nx_g_solid.edges(data=True)]\n widths = base_edge_width * np.array(widths)\n if min_edge_width is not None or max_edge_width is not None:\n widths = np.clip(widths, min_edge_width, max_edge_width)\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n nx.draw_networkx_edges(\n nx_g_solid, pos, ax=ax, width=widths, edge_color='black'\n )\n # draw directed edges\n else:\n adjacency_transitions = adata.uns['paga'][transitions].copy()\n if threshold is None:\n threshold = 0.01\n adjacency_transitions.data[adjacency_transitions.data < threshold] = 0\n adjacency_transitions.eliminate_zeros()\n g_dir = nx.DiGraph(adjacency_transitions.T)\n widths = [x[-1]['weight'] for x in g_dir.edges(data=True)]\n widths = base_edge_width * np.array(widths)\n if min_edge_width is not None or max_edge_width is not None:\n widths = np.clip(widths, min_edge_width, max_edge_width)\n nx.draw_networkx_edges(\n g_dir, pos, ax=ax, width=widths, edge_color='black', arrowsize=arrowsize\n )\n\n if export_to_gexf:\n if isinstance(colors[0], tuple):\n from matplotlib.colors import rgb2hex\n\n colors = [rgb2hex(c) for c in colors]\n for count, n in enumerate(nx_g_solid.nodes()):\n nx_g_solid.node[count]['label'] = str(node_labels[count])\n nx_g_solid.node[count]['color'] = str(colors[count])\n nx_g_solid.node[count]['viz'] = dict(\n position=dict(x=1000 * pos[count][0], y=1000 * pos[count][1], z=0,)\n )\n filename = settings.writedir / 'paga_graph.gexf'\n logg.warning(f'exporting to {filename}')\n settings.writedir.mkdir(parents=True, exist_ok=True)\n nx.write_gexf(nx_g_solid, settings.writedir / 'paga_graph.gexf')\n\n ax.set_frame_on(frameon)\n ax.set_xticks([])\n ax.set_yticks([])\n\n # groups sizes\n if groups_key is not None and groups_key + '_sizes' in adata.uns:\n groups_sizes = adata.uns[groups_key + '_sizes']\n else:\n groups_sizes = np.ones(len(node_labels))\n base_scale_scatter = 2000\n base_pie_size = (\n base_scale_scatter / (np.sqrt(adjacency_solid.shape[0]) + 10) * node_size_scale\n )\n median_group_size = np.median(groups_sizes)\n groups_sizes = base_pie_size * np.power(\n groups_sizes / median_group_size, node_size_power\n )\n\n if fontsize is None:\n fontsize = rcParams['legend.fontsize']\n if fontoutline is not None:\n text_kwds = dict(text_kwds)\n text_kwds['path_effects'] = [\n patheffects.withStroke(linewidth=fontoutline, foreground='w')\n ]\n # usual scatter plot\n if not isinstance(colors[0], cabc.Mapping):\n n_groups = len(pos_array)\n sct = ax.scatter(\n pos_array[:, 0],\n pos_array[:, 1],\n c=colors[:n_groups],\n edgecolors='face',\n s=groups_sizes,\n cmap=cmap,\n )\n for count, group in enumerate(node_labels):\n ax.text(\n pos_array[count, 0],\n pos_array[count, 1],\n group,\n verticalalignment='center',\n horizontalalignment='center',\n size=fontsize,\n fontweight=fontweight,\n **text_kwds,\n )\n # else pie chart plot\n else:\n for ix, (xx, yy) in enumerate(zip(pos_array[:, 0], pos_array[:, 1])):\n if not isinstance(colors[ix], cabc.Mapping):\n raise ValueError(\n f'{colors[ix]} is neither a dict of valid '\n 'matplotlib colors nor a valid matplotlib color.'\n )\n color_single = colors[ix].keys()\n fracs = [colors[ix][c] for c in color_single]\n total = sum(fracs)\n\n if total < 1:\n color_single = list(color_single)\n color_single.append('grey')\n fracs.append(1 - sum(fracs))\n elif not np.isclose(total, 1):\n raise ValueError(\n f'Expected fractions for node `{ix}` to be '\n f'close to 1, found `{total}`.'\n )\n\n cumsum = np.cumsum(fracs)\n cumsum = cumsum / cumsum[-1]\n cumsum = [0] + cumsum.tolist()\n\n for r1, r2, color in zip(cumsum[:-1], cumsum[1:], color_single):\n angles = np.linspace(2 * np.pi * r1, 2 * np.pi * r2, 20)\n x = [0] + np.cos(angles).tolist()\n y = [0] + np.sin(angles).tolist()\n\n xy = np.column_stack([x, y])\n s = np.abs(xy).max()\n\n sct = ax.scatter(\n [xx], [yy], marker=xy, s=s ** 2 * groups_sizes[ix], color=color\n )\n\n if node_labels is not None:\n ax.text(\n xx,\n yy,\n node_labels[ix],\n verticalalignment='center',\n horizontalalignment='center',\n size=fontsize,\n fontweight=fontweight,\n **text_kwds,\n )\n\n return sct\n\n\ndef paga_path(\n adata: AnnData,\n nodes: Sequence[Union[str, int]],\n keys: Sequence[str],\n use_raw: bool = True,\n annotations: Sequence[str] = ('dpt_pseudotime',),\n color_map: Union[str, Colormap, None] = None,\n color_maps_annotations: Mapping[str, Union[str, Colormap]] = MappingProxyType(\n dict(dpt_pseudotime='Greys')\n ),\n palette_groups: Optional[Sequence[str]] = None,\n n_avg: int = 1,\n groups_key: Optional[str] = None,\n xlim: Tuple[Optional[int], Optional[int]] = (None, None),\n title: Optional[str] = None,\n left_margin=None,\n ytick_fontsize: Optional[int] = None,\n title_fontsize: Optional[int] = None,\n show_node_names: bool = True,\n show_yticks: bool = True,\n show_colorbar: bool = True,\n legend_fontsize: Union[int, float, _FontSize, None] = None,\n legend_fontweight: Union[int, _FontWeight, None] = None,\n normalize_to_zero_one: bool = False,\n as_heatmap: bool = True,\n return_data: bool = False,\n show: Optional[bool] = None,\n save: Union[bool, str, None] = None,\n ax: Optional[Axes] = None,\n) -> Optional[Axes]:\n \"\"\"\\\n Gene expression and annotation changes along paths in the abstracted graph.\n\n Parameters\n ----------\n adata\n An annotated data matrix.\n nodes\n A path through nodes of the abstracted graph, that is, names or indices\n (within `.categories`) of groups that have been used to run PAGA.\n keys\n Either variables in `adata.var_names` or annotations in\n `adata.obs`. They are plotted using `color_map`.\n use_raw\n Use `adata.raw` for retrieving gene expressions if it has been set.\n annotations\n Plot these keys with `color_maps_annotations`. Need to be keys for\n `adata.obs`.\n color_map\n Matplotlib colormap.\n color_maps_annotations\n Color maps for plotting the annotations. Keys of the dictionary must\n appear in `annotations`.\n palette_groups\n Ususally, use the same `sc.pl.palettes...` as used for coloring the\n abstracted graph.\n n_avg\n Number of data points to include in computation of running average.\n groups_key\n Key of the grouping used to run PAGA. If `None`, defaults to\n `adata.uns['paga']['groups']`.\n as_heatmap\n Plot the timeseries as heatmap. If not plotting as heatmap,\n `annotations` have no effect.\n show_node_names\n Plot the node names on the nodes bar.\n show_colorbar\n Show the colorbar.\n show_yticks\n Show the y ticks.\n normalize_to_zero_one\n Shift and scale the running average to [0, 1] per gene.\n return_data\n Return the timeseries data in addition to the axes if `True`.\n show\n Show the plot, do not return axis.\n save\n If `True` or a `str`, save the figure.\n A string is appended to the default filename.\n Infer the filetype if ending on \\\\{`'.pdf'`, `'.png'`, `'.svg'`\\\\}.\n ax\n A matplotlib axes object.\n\n Returns\n -------\n A :class:`~matplotlib.axes.Axes` object, if `ax` is `None`, else `None`.\n If `return_data`, return the timeseries data in addition to an axes.\n \"\"\"\n ax_was_none = ax is None\n\n if groups_key is None:\n if 'groups' not in adata.uns['paga']:\n raise KeyError(\n 'Pass the key of the grouping with which you ran PAGA, '\n 'using the parameter `groups_key`.'\n )\n groups_key = adata.uns['paga']['groups']\n groups_names = adata.obs[groups_key].cat.categories\n\n if 'dpt_pseudotime' not in adata.obs.keys():\n raise ValueError(\n '`pl.paga_path` requires computation of a pseudotime `tl.dpt` '\n 'for ordering at single-cell resolution'\n )\n\n if palette_groups is None:\n _utils.add_colors_for_categorical_sample_annotation(adata, groups_key)\n palette_groups = adata.uns[f'{groups_key}_colors']\n\n def moving_average(a):\n return _sc_utils.moving_average(a, n_avg)\n\n ax = pl.gca() if ax is None else ax\n\n X = []\n x_tick_locs = [0]\n x_tick_labels = []\n groups = []\n anno_dict = {anno: [] for anno in annotations}\n if isinstance(nodes[0], str):\n nodes_ints = []\n groups_names_set = set(groups_names)\n for node in nodes:\n if node not in groups_names_set:\n raise ValueError(\n f'Each node/group needs to be in {groups_names.tolist()} '\n f'(`groups_key`={groups_key!r}) not {node!r}.'\n )\n nodes_ints.append(groups_names.get_loc(node))\n nodes_strs = nodes\n else:\n nodes_ints = nodes\n nodes_strs = [groups_names[node] for node in nodes]\n\n adata_X = adata\n if use_raw and adata.raw is not None:\n adata_X = adata.raw\n\n for ikey, key in enumerate(keys):\n x = []\n for igroup, group in enumerate(nodes_ints):\n idcs = np.arange(adata.n_obs)[\n adata.obs[groups_key].values == nodes_strs[igroup]\n ]\n if len(idcs) == 0:\n raise ValueError(\n 'Did not find data points that match '\n f'`adata.obs[{groups_key!r}].values == {str(group)!r}`. '\n f'Check whether `adata.obs[{groups_key!r}]` '\n 'actually contains what you expect.'\n )\n idcs_group = np.argsort(\n adata.obs['dpt_pseudotime'].values[\n adata.obs[groups_key].values == nodes_strs[igroup]\n ]\n )\n idcs = idcs[idcs_group]\n if key in adata.obs_keys():\n x += list(adata.obs[key].values[idcs])\n else:\n x += list(adata_X[:, key].X[idcs])\n if ikey == 0:\n groups += [group for i in range(len(idcs))]\n x_tick_locs.append(len(x))\n for anno in annotations:\n series = adata.obs[anno]\n if is_categorical_dtype(series):\n series = series.cat.codes\n anno_dict[anno] += list(series.values[idcs])\n if n_avg > 1:\n x = moving_average(x)\n if ikey == 0:\n for key in annotations:\n if not isinstance(anno_dict[key][0], str):\n anno_dict[key] = moving_average(anno_dict[key])\n if normalize_to_zero_one:\n x -= np.min(x)\n x /= np.max(x)\n X.append(x)\n if not as_heatmap:\n ax.plot(x[xlim[0] : xlim[1]], label=key)\n if ikey == 0:\n for igroup, group in enumerate(nodes):\n if len(groups_names) > 0 and group not in groups_names:\n label = groups_names[group]\n else:\n label = group\n x_tick_labels.append(label)\n X = np.array(X)\n if as_heatmap:\n img = ax.imshow(X, aspect='auto', interpolation='nearest', cmap=color_map)\n if show_yticks:\n ax.set_yticks(range(len(X)))\n ax.set_yticklabels(keys, fontsize=ytick_fontsize)\n else:\n ax.set_yticks([])\n ax.set_frame_on(False)\n ax.set_xticks([])\n ax.tick_params(axis='both', which='both', length=0)\n ax.grid(False)\n if show_colorbar:\n pl.colorbar(img, ax=ax)\n left_margin = 0.2 if left_margin is None else left_margin\n pl.subplots_adjust(left=left_margin)\n else:\n left_margin = 0.4 if left_margin is None else left_margin\n if len(keys) > 1:\n pl.legend(\n frameon=False,\n loc='center left',\n bbox_to_anchor=(-left_margin, 0.5),\n fontsize=legend_fontsize,\n )\n xlabel = groups_key\n if not as_heatmap:\n ax.set_xlabel(xlabel)\n pl.yticks([])\n if len(keys) == 1:\n pl.ylabel(keys[0] + ' (a.u.)')\n else:\n import matplotlib.colors\n\n # groups bar\n ax_bounds = ax.get_position().bounds\n groups_axis = pl.axes(\n (\n ax_bounds[0],\n ax_bounds[1] - ax_bounds[3] / len(keys),\n ax_bounds[2],\n ax_bounds[3] / len(keys),\n )\n )\n groups = np.array(groups)[None, :]\n groups_axis.imshow(\n groups,\n aspect='auto',\n interpolation=\"nearest\",\n cmap=matplotlib.colors.ListedColormap(\n # the following line doesn't work because of normalization\n # adata.uns['paga_groups_colors'])\n palette_groups[np.min(groups).astype(int) :],\n N=int(np.max(groups) + 1 - np.min(groups)),\n ),\n )\n if show_yticks:\n groups_axis.set_yticklabels(['', xlabel, ''], fontsize=ytick_fontsize)\n else:\n groups_axis.set_yticks([])\n groups_axis.set_frame_on(False)\n if show_node_names:\n ypos = (groups_axis.get_ylim()[1] + groups_axis.get_ylim()[0]) / 2\n x_tick_locs = _sc_utils.moving_average(x_tick_locs, n=2)\n for ilabel, label in enumerate(x_tick_labels):\n groups_axis.text(\n x_tick_locs[ilabel],\n ypos,\n x_tick_labels[ilabel],\n fontdict=dict(\n horizontalalignment='center', verticalalignment='center',\n ),\n )\n groups_axis.set_xticks([])\n groups_axis.grid(False)\n groups_axis.tick_params(axis='both', which='both', length=0)\n # further annotations\n y_shift = ax_bounds[3] / len(keys)\n for ianno, anno in enumerate(annotations):\n if ianno > 0:\n y_shift = ax_bounds[3] / len(keys) / 2\n anno_axis = pl.axes(\n (\n ax_bounds[0],\n ax_bounds[1] - (ianno + 2) * y_shift,\n ax_bounds[2],\n y_shift,\n )\n )\n arr = np.array(anno_dict[anno])[None, :]\n if anno not in color_maps_annotations:\n color_map_anno = (\n 'Vega10' if is_categorical_dtype(adata.obs[anno]) else 'Greys'\n )\n else:\n color_map_anno = color_maps_annotations[anno]\n img = anno_axis.imshow(\n arr, aspect='auto', interpolation='nearest', cmap=color_map_anno,\n )\n if show_yticks:\n anno_axis.set_yticklabels(['', anno, ''], fontsize=ytick_fontsize)\n anno_axis.tick_params(axis='both', which='both', length=0)\n else:\n anno_axis.set_yticks([])\n anno_axis.set_frame_on(False)\n anno_axis.set_xticks([])\n anno_axis.grid(False)\n if title is not None:\n ax.set_title(title, fontsize=title_fontsize)\n if show is None and not ax_was_none:\n show = False\n else:\n show = settings.autoshow if show is None else show\n _utils.savefig_or_show('paga_path', show=show, save=save)\n if return_data:\n df = pd.DataFrame(data=X.T, columns=keys)\n df['groups'] = moving_average(groups) # groups is without moving average, yet\n if 'dpt_pseudotime' in anno_dict:\n df['distance'] = anno_dict['dpt_pseudotime'].T\n return ax, df if ax_was_none and not show else df\n else:\n return ax if ax_was_none and not show else None\n\n\ndef paga_adjacency(\n adata,\n adjacency='connectivities',\n adjacency_tree='connectivities_tree',\n as_heatmap=True,\n color_map=None,\n show=None,\n save=None,\n):\n \"\"\"Connectivity of paga groups.\"\"\"\n connectivity = adata.uns[adjacency].toarray()\n connectivity_select = adata.uns[adjacency_tree]\n if as_heatmap:\n matrix(connectivity, color_map=color_map, show=False)\n for i in range(connectivity_select.shape[0]):\n neighbors = connectivity_select[i].nonzero()[1]\n pl.scatter([i for j in neighbors], neighbors, color='black', s=1)\n # as a stripplot\n else:\n pl.figure()\n for i, cs in enumerate(connectivity):\n x = [i for j, d in enumerate(cs) if i != j]\n y = [c for j, c in enumerate(cs) if i != j]\n pl.scatter(x, y, color='gray', s=1)\n neighbors = connectivity_select[i].nonzero()[1]\n pl.scatter([i for j in neighbors], cs[neighbors], color='black', s=1)\n _utils.savefig_or_show('paga_connectivity', show=show, save=save)\n",
"import scanpy as sc\nimport numpy as np\n\n\ndef test_sim_toggleswitch():\n adata = sc.tl.sim('toggleswitch')\n np.allclose(adata.X, sc.datasets.toggleswitch().X, np.finfo(np.float32).eps)\n"
] | [
[
"scipy.cluster.hierarchy.linkage",
"pandas.api.types.is_categorical_dtype"
],
[
"pandas.DataFrame"
],
[
"numpy.arange",
"matplotlib.colors.ListedColormap"
],
[
"matplotlib.pyplot.legend",
"numpy.sqrt",
"numpy.linspace",
"numpy.cumsum",
"pandas.DataFrame",
"matplotlib.pyplot.axes",
"numpy.max",
"numpy.mean",
"matplotlib.pyplot.gca",
"matplotlib.colors.is_color_like",
"numpy.clip",
"numpy.arange",
"matplotlib.pyplot.gcf",
"numpy.sin",
"matplotlib.patheffects.withStroke",
"matplotlib.ticker.FuncFormatter",
"matplotlib.pyplot.subplots_adjust",
"numpy.column_stack",
"matplotlib.pyplot.figure",
"numpy.isclose",
"pandas.api.types.is_categorical_dtype",
"numpy.power",
"numpy.min",
"numpy.median",
"numpy.argsort",
"matplotlib.pyplot.suptitle",
"numpy.array",
"scipy.sparse.csgraph.connected_components",
"matplotlib.pyplot.ylabel",
"numpy.random.random",
"numpy.abs",
"numpy.random.seed",
"matplotlib.pyplot.scatter",
"numpy.cos",
"matplotlib.pyplot.colorbar",
"numpy.bincount",
"matplotlib.pyplot.yticks",
"matplotlib.colors.rgb2hex"
],
[
"numpy.finfo"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
srsohn/mtsgi | [
"31dc14b007758edc0aa340397184f47695705b1c"
] | [
"mtsgi/run_meta_loop.py"
] | [
"# python3\n# Copyright 2018 DeepMind Technologies Limited. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Runs meta agent on toywob environment locally.\"\"\"\n\nimport datetime\nimport functools\nimport glob\nimport os\nimport numpy as np\nnp.set_printoptions(precision=3, suppress=True)\n\nfrom absl import app\nfrom absl import flags\nfrom acme import specs\nfrom acme.utils import paths\n\nfrom mtsgi.system import multiprocessing\n\nimport mtsgi\nfrom mtsgi import agents\nfrom mtsgi.utils import env_utils\nfrom mtsgi.utils import log_utils\nfrom mtsgi.utils import snt_utils\n\nimport tensorflow as tf\n\n# Environment loop flags.\nflags.DEFINE_integer('seed', 1, 'Random seed.')\nflags.DEFINE_integer('exp_id', 1, 'Experiment ID.')\nflags.DEFINE_integer('load_exp_id', -1, 'ID of experiment to load from.')\nflags.DEFINE_integer('num_envs', 1, 'Batch size of environments (parallel or serial_wrapper).')\nflags.DEFINE_boolean('use_multi_processing', False, 'multi-processing/serial.')\nflags.DEFINE_integer('num_trials', 10, 'Number updates to perform over tasks distribution.')\nflags.DEFINE_integer('num_adapt_steps', 1000, 'Number of adaptation steps to task in a trial.')\nflags.DEFINE_integer('num_test_episodes', 4, 'Number of test episodes to average performance over.')\nflags.DEFINE_integer('num_trial_splits', 40, 'Number of iterations of adaptation-test phases in a trial.')\nflags.DEFINE_enum('label', 'meta_eval', ['meta_train', 'meta_eval'],\n 'Environment label/mode (meta_train or meta_eval).')\n\n# Environment flags.\nflags.DEFINE_string('env_id', 'mining', 'Environment name/ID.')\nflags.DEFINE_string('graph_param', 'eval', 'Difficulty of subtask graph.')\nflags.DEFINE_string('load_graph_param', 'train', 'Difficulty of subtask graph.')\nflags.DEFINE_float('gamma', 0.99, 'Discount factor.')\n\n# Debugging\nflags.DEFINE_integer('task_idx_debug', -1, 'Fixing task index for debugging.')\nflags.DEFINE_integer('verbose_level', 0, 'verbose level.')\n\n# Agent flags.\n# TODO: add more agent types.\nflags.DEFINE_enum('algorithm', 'msgi', ['msgi', 'hrl', 'rlrl'],\n 'Name of the algorithm to run (e.g. msgi, rlrl).')\nflags.DEFINE_integer('n_step_horizon', 10, 'n-step learning horizon.')\nflags.DEFINE_float('learning_rate', 1e-3, 'Learning rate.')\nflags.DEFINE_float('gae_lambda', 0.99, 'GAE lambda parameter.')\nflags.DEFINE_float('entropy_cost', 1.0, 'Coefficient for entropy loss.')\nflags.DEFINE_float('baseline_cost', 0.5, 'Coefficient for critic/baseline loss.')\nflags.DEFINE_float('max_abs_reward', None, 'Reward clipping.')\nflags.DEFINE_float('max_gradient_norm', None, 'Maximum gradient norm.')\n\n# ILP\nflags.DEFINE_boolean('neccessary_first', True, 'Whether to use neccessary-first branching in CART.')\n\n# GRProp\nflags.DEFINE_float('grprop_temp', 200., 'GRProp parameter.')\nflags.DEFINE_float('w_a', 3., 'GRProp parameter.')\nflags.DEFINE_float('beta_a', 8., 'GRProp parameter.')\nflags.DEFINE_float('ep_or', 0.8, 'GRProp parameter.')\nflags.DEFINE_float('temp_or', 2., 'GRProp parameter.')\n\n# MSGI & MTSGI\nflags.DEFINE_enum('exploration', 'ucb', ['random', 'count', 'ucb', 'mtucb'],\n 'Adaptation exploration strategy for MSGI.')\nflags.DEFINE_float('ucb_temp', 1.0, 'UCB temperature parameter.')\n\n# Logger flags.\n# TODO: If False, CSVDumper won't be used but this will break logging\n# as the data (split_avg_log) to be logged is a list.\nflags.DEFINE_boolean('save_logs', True, 'Whether to save the loggings.')\n\n# Snapshot flags.\nflags.DEFINE_boolean('save_snapshots', False, 'Whether to save the model snapshots.')\n\n# MSGI flags.\nflags.DEFINE_boolean('visualize', False, 'Whether to visualize subtask graph.')\nFLAGS = flags.FLAGS\n\nAGENT_CLASS_DICT = {\n 'msgi': agents.MSGI,\n 'rlrl': agents.RLRL,\n 'hrl': agents.HRL\n}\nMETA_EVAL_ONLY = ['msgi', 'hrl']\nMETA_TRAIN = ['rlrl']\nUSE_GRPROP = ['msgi']\nUSE_UCB = ['msgi']\nUSE_ILP = ['msgi']\n\ndef main(_):\n # Assertions\n argument_assertions()\n\n ### runs: logs/{env_name}_{task_name}/{method_hparam}/{seed}\n hparam_str, load_hparam_str = get_hparam_str(FLAGS)\n env_str = f'{FLAGS.env_id}_{FLAGS.graph_param}'\n time_str = datetime.datetime.now().strftime(\"%y%m%d-%H%M%S\")\n root_dir = f\"logs/{env_str}_{FLAGS.seed}/{FLAGS.algorithm}_{hparam_str}/run{FLAGS.exp_id}_{FLAGS.seed}_{time_str}\"\n if FLAGS.visualize:\n graph_dir = f\"visualize/{env_str}_{FLAGS.seed}/{FLAGS.algorithm}_{hparam_str}/run{FLAGS.exp_id}_{FLAGS.seed}_{time_str}\"\n else:\n graph_dir = None\n save_filename = f\"{root_dir}/train_data.npy\"\n\n ### Loading\n if FLAGS.load_exp_id >= 0:\n assert len(load_hparam_str) > 0, 'Error: hparam string for loading has not been set!'\n load_prefixes, load_postfixes = [], []\n load_filenames = []\n\n if FLAGS.env_id == 'webnav': # 3:7 fixed web envs.\n configs = mtsgi.envs.WEBNAV_TASKS[f'train_{FLAGS.seed}']\n configs = [config.environment_id for config in configs]\n for config in configs:\n load_prefixes.append(f\"logs/{config}_train_1\")\n load_postfixes.append(f\"run{FLAGS.load_exp_id}_{FLAGS.seed}_*/train_data.npy\")\n\n elif FLAGS.env_id in ('playground', 'mining'):\n load_prefixes.append(f'logs/{FLAGS.env_id}_{FLAGS.load_graph_param}_{FLAGS.seed}')\n load_postfixes.append(f\"run{FLAGS.load_exp_id}_{FLAGS.seed}_*/train_data.npy\")\n\n else:\n raise ValueError(FLAGS.env_id)\n\n # Collect all the files to load.\n for prefix, postfix in zip(load_prefixes, load_postfixes):\n load_glob_str = f\"{prefix}/{FLAGS.algorithm}_{load_hparam_str}/{postfix}\"\n matching_paths = glob.glob(load_glob_str)\n assert len(matching_paths) == 1, f'Error: cannot load from {len(matching_paths)} files: {load_glob_str}'\n load_filenames.append(matching_paths[0])\n\n # Create loggers for meta RL loop.\n environment_logger, agent_logger = log_utils.create_loggers(\n logdir=root_dir,\n label='meta',\n save_data=FLAGS.save_logs,\n )\n\n # TODO: support environment creation of individual web navigation tasks.\n # Prepare adaptation phase environment.\n adapt_environment = env_utils.create_environment(\n env_id=FLAGS.env_id,\n batch_size=FLAGS.num_envs,\n graph_param=FLAGS.graph_param,\n use_multi_processing=FLAGS.use_multi_processing,\n num_adapt_steps=FLAGS.num_adapt_steps,\n add_fewshot_wrapper=True,\n seed=FLAGS.seed,\n gamma=FLAGS.gamma,\n verbose_level=FLAGS.verbose_level,\n )\n environment_spec = specs.make_environment_spec(adapt_environment)\n\n # Prepare test phase environment.\n test_environment = env_utils.create_environment(\n env_id=FLAGS.env_id,\n batch_size=FLAGS.num_envs,\n graph_param=FLAGS.graph_param,\n use_multi_processing=FLAGS.use_multi_processing,\n num_adapt_steps=FLAGS.num_adapt_steps,\n add_fewshot_wrapper=True,\n seed=FLAGS.seed,\n gamma=FLAGS.gamma,\n verbose_level=FLAGS.verbose_level,\n )\n\n # Create directory for graph visualization.\n meta_agent_class = AGENT_CLASS_DICT[FLAGS.algorithm]\n agent_args = dict(\n environment_spec=environment_spec,\n logger=agent_logger,\n verbose_level=FLAGS.verbose_level,\n )\n if FLAGS.algorithm in USE_ILP:\n agent_args.update(\n num_adapt_steps=FLAGS.num_adapt_steps,\n num_trial_splits=FLAGS.num_trial_splits,\n exploration=FLAGS.exploration,\n visualize=FLAGS.visualize,\n directory=graph_dir,\n environment_id=FLAGS.env_id,\n branch_neccessary_first=FLAGS.neccessary_first,\n )\n if FLAGS.algorithm in USE_GRPROP:\n agent_args.update(\n temp=FLAGS.grprop_temp,\n w_a=FLAGS.w_a,\n beta_a=FLAGS.beta_a,\n ep_or=FLAGS.ep_or,\n temp_or=FLAGS.temp_or,\n )\n if FLAGS.algorithm in USE_UCB: # add ucb hparam\n agent_args.update(ucb_temp=FLAGS.ucb_temp)\n\n if 'mtsgi' in FLAGS.algorithm:\n agent_args.update(\n prior_sample_mode=FLAGS.prior_sample_mode if FLAGS.label == 'meta_eval' else None,\n posterior_mode=FLAGS.posterior_mode,\n )\n elif FLAGS.algorithm == 'hrl':\n assert FLAGS.num_envs == 1, 'Currently HRL only supports single worker.'\n assert FLAGS.n_step_horizon <= FLAGS.num_adapt_steps // FLAGS.num_trial_splits, \\\n 'Make sure n-step horizon is less than equal to adaptation_steps.'\n assert FLAGS.num_adapt_steps % FLAGS.n_step_horizon == 0, \\\n 'The number of training steps must be a multiple of n-step horizon.'\n\n if FLAGS.env_id in {'playground', 'mining'}:\n # spatial observation.\n network = snt_utils.CombinedNN(environment_spec.actions)\n else:\n network = snt_utils.RecurrentNN(environment_spec.actions)\n\n agent_args.update(\n network=network,\n n_step_horizon=FLAGS.n_step_horizon\n )\n\n elif FLAGS.algorithm == 'rlrl':\n assert FLAGS.num_adapt_steps % FLAGS.n_step_horizon == 0, \\\n 'Number of adaptation steps must be a multiple of n-step horizon.'\n minibatch_size = FLAGS.num_adapt_steps // FLAGS.n_step_horizon\n\n #snapshot_dir = paths.process_path(\n # 'snapshot',\n # FLAGS.env_id,\n # FLAGS.algorithm,\n # f'run_{FLAGS.exp_id}',\n # add_uid=False\n #) if FLAGS.save_snapshots else None\n if FLAGS.save_snapshots:\n snapshot_dir = root_dir.replace('logs', 'snapshot')\n else:\n snapshot_dir = None\n\n if FLAGS.env_id in {'playground', 'mining'}:\n # spatial observation.\n network = snt_utils.CombinedNN(environment_spec.actions)\n else:\n network = snt_utils.RecurrentNN(environment_spec.actions)\n\n agent_args.update(\n network=network,\n snapshot_dir=snapshot_dir,\n n_step_horizon=FLAGS.num_adapt_steps,\n minibatch_size=1,\n learning_rate=FLAGS.learning_rate,\n discount=FLAGS.gamma,\n gae_lambda=FLAGS.gae_lambda,\n entropy_cost=FLAGS.entropy_cost,\n baseline_cost=FLAGS.baseline_cost,\n max_abs_reward=FLAGS.max_abs_reward,\n max_gradient_norm=FLAGS.max_gradient_norm\n )\n elif 'msgi' in FLAGS.algorithm:\n pass\n else:\n raise NotImplementedError\n\n meta_agent = meta_agent_class(**agent_args)\n\n # Run meta RL loop\n meta_loop = mtsgi.EnvironmentMetaLoop(\n adapt_environment=adapt_environment,\n test_environment=test_environment,\n meta_agent=meta_agent,\n logger=environment_logger,\n label=FLAGS.label,\n verbose_level=FLAGS.verbose_level\n )\n if FLAGS.label == 'meta_train':\n assert FLAGS.algorithm in META_TRAIN, \\\n f'Error! {FLAGS.algorithm} does not support meta-training.'\n meta_loop.run(\n num_trials=FLAGS.num_trials,\n num_adapt_steps=FLAGS.num_adapt_steps,\n num_test_episodes=FLAGS.num_test_episodes,\n num_trial_splits=FLAGS.num_trial_splits,\n task_idx_debug=FLAGS.task_idx_debug\n ) # pytype: disable=attribute-error\n meta_agent.save(save_filename)\n else:\n if FLAGS.algorithm in META_TRAIN:\n meta_agent.load(load_filenames)\n meta_loop.run(\n num_trials=FLAGS.num_trials,\n num_adapt_steps=FLAGS.num_adapt_steps,\n num_test_episodes=FLAGS.num_test_episodes,\n num_trial_splits=FLAGS.num_trial_splits,\n task_idx_debug=FLAGS.task_idx_debug\n ) # pytype: disable=attribute-error\n\ndef get_hparam_str(FLAGS):\n hparam_str = \"\"\n load_hparam_str = \"\"\n # Exploration stretegy.\n if FLAGS.algorithm in {'msgi', 'mtsgi'}:\n hparam_str += f\"_explore={FLAGS.exploration}\"\n if FLAGS.algorithm == 'mtsgi':\n if FLAGS.exploration == 'mtucb':\n load_hparam_str += f\"_explore=ucb\" # loads ucb (not mtucb)\n else:\n load_hparam_str += f\"_explore={FLAGS.exploration}\"\n\n if FLAGS.exploration in {'ucb', 'mtucb'}:\n hparam_str += f\"_temp={FLAGS.ucb_temp}\"\n load_hparam_str += f\"_temp={FLAGS.ucb_temp}\"\n\n if FLAGS.algorithm in USE_ILP:\n if FLAGS.label == 'meta_eval':\n hparam_str += f\"_ILPnf={FLAGS.neccessary_first}\" # only matters for meta-eval\n\n if 'mtsgi' in FLAGS.algorithm:\n if FLAGS.label == 'meta_eval': # number of priors to load\n hparam_str += f\"_nprior={FLAGS.num_prior_load}\"\n hparam_str += f\"_posterior={FLAGS.posterior_mode}\"\n hparam_str += f\"_prior={FLAGS.prior_sample_mode}\"\n if FLAGS.load_exp_id >= 0:\n hparam_str += f\"_load={FLAGS.load_exp_id}-{FLAGS.seed}\"\n else:\n load_hparam_str = \"\"\n\n if len(hparam_str) == 0:\n hparam_str = \"_default\"\n return hparam_str, load_hparam_str\n\ndef argument_assertions():\n if FLAGS.algorithm in META_EVAL_ONLY:\n assert FLAGS.label == 'meta_eval', \\\n '%s only supports meta-eval, not meta-train'%(FLAGS.algorithm)\n\n if FLAGS.label == 'meta_train':\n assert FLAGS.num_trial_splits == 1\n assert 'train' in FLAGS.graph_param\n else:\n assert FLAGS.num_trial_splits > 1\n assert 'eval' in FLAGS.graph_param\n\n assert FLAGS.num_trial_splits > 0, 'Evaluation period must be greater than zero.'\n assert FLAGS.num_adapt_steps % FLAGS.num_trial_splits == 0, \\\n 'The number of training steps must be a multiple of num_trial_splits.'\n\n if FLAGS.exploration == 'mtucb':\n assert FLAGS.algorithm == 'mtsgi' and FLAGS.label == 'meta_eval'\n\nif __name__ == '__main__':\n multiprocessing.handle_main(functools.partial(app.run, main))\n"
] | [
[
"numpy.set_printoptions"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lpd6375/qlib | [
"3a911bc09ba5136cd7c61c2c8dcca8a63339e738",
"3a911bc09ba5136cd7c61c2c8dcca8a63339e738"
] | [
"examples/benchmarks_dynamic/DDG-DA/workflow.py",
"qlib/contrib/online/operator.py"
] | [
"# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\nfrom pathlib import Path\nfrom qlib.model.meta.task import MetaTask\nfrom qlib.contrib.meta.data_selection.model import MetaModelDS\nfrom qlib.contrib.meta.data_selection.dataset import InternalData, MetaDatasetDS\nfrom qlib.data.dataset.handler import DataHandlerLP\n\nimport pandas as pd\nimport fire\nimport sys\nfrom tqdm.auto import tqdm\nimport yaml\nimport pickle\nfrom qlib import auto_init\nfrom qlib.model.trainer import TrainerR, task_train\nfrom qlib.utils import init_instance_by_config\nfrom qlib.workflow.task.gen import RollingGen, task_generator\nfrom qlib.workflow import R\nfrom qlib.tests.data import GetData\n\nDIRNAME = Path(__file__).absolute().resolve().parent\nsys.path.append(str(DIRNAME.parent / \"baseline\"))\nfrom rolling_benchmark import RollingBenchmark # NOTE: sys.path is changed for import RollingBenchmark\n\n\nclass DDGDA:\n \"\"\"\n please run `python workflow.py run_all` to run the full workflow of the experiment\n\n **NOTE**\n before running the example, please clean your previous results with following command\n - `rm -r mlruns`\n \"\"\"\n\n def __init__(self, sim_task_model=\"linear\", forecast_model=\"linear\"):\n self.step = 20\n # NOTE:\n # the horizon must match the meaning in the base task template\n self.horizon = 20\n self.meta_exp_name = \"DDG-DA\"\n self.sim_task_model = sim_task_model # The model to capture the distribution of data.\n self.forecast_model = forecast_model # downstream forecasting models' type\n\n def get_feature_importance(self):\n # this must be lightGBM, because it needs to get the feature importance\n rb = RollingBenchmark(model_type=\"gbdt\")\n task = rb.basic_task()\n\n model = init_instance_by_config(task[\"model\"])\n dataset = init_instance_by_config(task[\"dataset\"])\n model.fit(dataset)\n\n fi = model.get_feature_importance()\n\n # Because the model use numpy instead of dataframe for training lightgbm\n # So the we must use following extra steps to get the right feature importance\n df = dataset.prepare(segments=slice(None), col_set=\"feature\", data_key=DataHandlerLP.DK_R)\n cols = df.columns\n fi_named = {cols[int(k.split(\"_\")[1])]: imp for k, imp in fi.to_dict().items()}\n\n return pd.Series(fi_named)\n\n def dump_data_for_proxy_model(self):\n \"\"\"\n Dump data for training meta model.\n The meta model will be trained upon the proxy forecasting model.\n This dataset is for the proxy forecasting model.\n \"\"\"\n topk = 30\n fi = self.get_feature_importance()\n col_selected = fi.nlargest(topk)\n\n rb = RollingBenchmark(model_type=self.sim_task_model)\n task = rb.basic_task()\n dataset = init_instance_by_config(task[\"dataset\"])\n prep_ds = dataset.prepare(slice(None), col_set=[\"feature\", \"label\"], data_key=DataHandlerLP.DK_L)\n\n feature_df = prep_ds[\"feature\"]\n label_df = prep_ds[\"label\"]\n\n feature_selected = feature_df.loc[:, col_selected.index]\n\n feature_selected = feature_selected.groupby(\"datetime\").apply(lambda df: (df - df.mean()).div(df.std()))\n feature_selected = feature_selected.fillna(0.0)\n\n df_all = {\n \"label\": label_df.reindex(feature_selected.index),\n \"feature\": feature_selected,\n }\n df_all = pd.concat(df_all, axis=1)\n df_all.to_pickle(DIRNAME / \"fea_label_df.pkl\")\n\n # dump data in handler format for aligning the interface\n handler = DataHandlerLP(\n data_loader={\n \"class\": \"qlib.data.dataset.loader.StaticDataLoader\",\n \"kwargs\": {\"config\": DIRNAME / \"fea_label_df.pkl\"},\n }\n )\n handler.to_pickle(DIRNAME / \"handler_proxy.pkl\", dump_all=True)\n\n @property\n def _internal_data_path(self):\n return DIRNAME / f\"internal_data_s{self.step}.pkl\"\n\n def dump_meta_ipt(self):\n \"\"\"\n Dump data for training meta model.\n This function will dump the input data for meta model\n \"\"\"\n # According to the experiments, the choice of the model type is very important for achieving good results\n rb = RollingBenchmark(model_type=self.sim_task_model)\n sim_task = rb.basic_task()\n\n if self.sim_task_model == \"gbdt\":\n sim_task[\"model\"].setdefault(\"kwargs\", {}).update({\"early_stopping_rounds\": None, \"num_boost_round\": 150})\n\n exp_name_sim = f\"data_sim_s{self.step}\"\n\n internal_data = InternalData(sim_task, self.step, exp_name=exp_name_sim)\n internal_data.setup(trainer=TrainerR)\n\n with self._internal_data_path.open(\"wb\") as f:\n pickle.dump(internal_data, f)\n\n def train_meta_model(self):\n \"\"\"\n training a meta model based on a simplified linear proxy model;\n \"\"\"\n\n # 1) leverage the simplified proxy forecasting model to train meta model.\n # - Only the dataset part is important, in current version of meta model will integrate the\n rb = RollingBenchmark(model_type=self.sim_task_model)\n sim_task = rb.basic_task()\n proxy_forecast_model_task = {\n # \"model\": \"qlib.contrib.model.linear.LinearModel\",\n \"dataset\": {\n \"class\": \"qlib.data.dataset.DatasetH\",\n \"kwargs\": {\n \"handler\": f\"file://{(DIRNAME / 'handler_proxy.pkl').absolute()}\",\n \"segments\": {\n \"train\": (\"2008-01-01\", \"2010-12-31\"),\n \"test\": (\"2011-01-01\", sim_task[\"dataset\"][\"kwargs\"][\"segments\"][\"test\"][1]),\n },\n },\n },\n # \"record\": [\"qlib.workflow.record_temp.SignalRecord\"]\n }\n # the proxy_forecast_model_task will be used to create meta tasks.\n # The test date of first task will be 2011-01-01. Each test segment will be about 20days\n # The tasks include all training tasks and test tasks.\n\n # 2) preparing meta dataset\n kwargs = dict(\n task_tpl=proxy_forecast_model_task,\n step=self.step,\n segments=0.62, # keep test period consistent with the dataset yaml\n trunc_days=1 + self.horizon,\n hist_step_n=30,\n fill_method=\"max\",\n rolling_ext_days=0,\n )\n # NOTE:\n # the input of meta model (internal data) are shared between proxy model and final forecasting model\n # but their task test segment are not aligned! It worked in my previous experiment.\n # So the misalignment will not affect the effectiveness of the method.\n with self._internal_data_path.open(\"rb\") as f:\n internal_data = pickle.load(f)\n md = MetaDatasetDS(exp_name=internal_data, **kwargs)\n\n # 3) train and logging meta model\n with R.start(experiment_name=self.meta_exp_name):\n R.log_params(**kwargs)\n mm = MetaModelDS(step=self.step, hist_step_n=kwargs[\"hist_step_n\"], lr=0.001, max_epoch=200, seed=43)\n mm.fit(md)\n R.save_objects(model=mm)\n\n @property\n def _task_path(self):\n return DIRNAME / f\"tasks_s{self.step}.pkl\"\n\n def meta_inference(self):\n \"\"\"\n Leverage meta-model for inference:\n - Given\n - baseline tasks\n - input for meta model(internal data)\n - meta model (its learnt knowledge on proxy forecasting model is expected to transfer to normal forecasting model)\n \"\"\"\n # 1) get meta model\n exp = R.get_exp(experiment_name=self.meta_exp_name)\n rec = exp.list_recorders(rtype=exp.RT_L)[0]\n meta_model: MetaModelDS = rec.load_object(\"model\")\n\n # 2)\n # we are transfer to knowledge of meta model to final forecasting tasks.\n # Create MetaTaskDataset for the final forecasting tasks\n # Aligning the setting of it to the MetaTaskDataset when training Meta model is necessary\n\n # 2.1) get previous config\n param = rec.list_params()\n trunc_days = int(param[\"trunc_days\"])\n step = int(param[\"step\"])\n hist_step_n = int(param[\"hist_step_n\"])\n fill_method = param.get(\"fill_method\", \"max\")\n\n rb = RollingBenchmark(model_type=self.forecast_model)\n task_l = rb.create_rolling_tasks()\n\n # 2.2) create meta dataset for final dataset\n kwargs = dict(\n task_tpl=task_l,\n step=step,\n segments=0.0, # all the tasks are for testing\n trunc_days=trunc_days,\n hist_step_n=hist_step_n,\n fill_method=fill_method,\n task_mode=MetaTask.PROC_MODE_TRANSFER,\n )\n\n with self._internal_data_path.open(\"rb\") as f:\n internal_data = pickle.load(f)\n mds = MetaDatasetDS(exp_name=internal_data, **kwargs)\n\n # 3) meta model make inference and get new qlib task\n new_tasks = meta_model.inference(mds)\n with self._task_path.open(\"wb\") as f:\n pickle.dump(new_tasks, f)\n\n def train_and_eval_tasks(self):\n \"\"\"\n Training the tasks generated by meta model\n Then evaluate it\n \"\"\"\n with self._task_path.open(\"rb\") as f:\n tasks = pickle.load(f)\n rb = RollingBenchmark(rolling_exp=\"rolling_ds\", model_type=self.forecast_model)\n rb.train_rolling_tasks(tasks)\n rb.ens_rolling()\n rb.update_rolling_rec()\n\n def run_all(self):\n # 1) file: handler_proxy.pkl\n self.dump_data_for_proxy_model()\n # 2)\n # file: internal_data_s20.pkl\n # mlflow: data_sim_s20, models for calculating meta_ipt\n self.dump_meta_ipt()\n # 3) meta model will be stored in `DDG-DA`\n self.train_meta_model()\n # 4) new_tasks are saved in \"tasks_s20.pkl\" (reweighter is added)\n self.meta_inference()\n # 5) load the saved tasks and train model\n self.train_and_eval_tasks()\n\n\nif __name__ == \"__main__\":\n GetData().qlib_data(exists_skip=True)\n auto_init()\n fire.Fire(DDGDA)\n",
"# Copyright (c) Microsoft Corporation.\r\n# Licensed under the MIT License.\r\n\r\n# pylint: skip-file\r\n\r\nimport fire\r\nimport pandas as pd\r\nimport pathlib\r\nimport qlib\r\nimport logging\r\n\r\nfrom ...data import D\r\nfrom ...log import get_module_logger\r\nfrom ...utils import get_pre_trading_date, is_tradable_date\r\nfrom ..evaluate import risk_analysis\r\nfrom ..backtest.backtest import update_account\r\n\r\nfrom .manager import UserManager\r\nfrom .utils import prepare\r\nfrom .utils import create_user_folder\r\nfrom .executor import load_order_list, save_order_list\r\nfrom .executor import SimulatorExecutor\r\nfrom .executor import save_score_series, load_score_series\r\n\r\n\r\nclass Operator:\r\n def __init__(self, client: str):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n client: str\r\n The qlib client config file(.yaml)\r\n \"\"\"\r\n self.logger = get_module_logger(\"online operator\", level=logging.INFO)\r\n self.client = client\r\n\r\n @staticmethod\r\n def init(client, path, date=None):\r\n \"\"\"Initial UserManager(), get predict date and trade date\r\n Parameters\r\n ----------\r\n client: str\r\n The qlib client config file(.yaml)\r\n path : str\r\n Path to save user account.\r\n date : str (YYYY-MM-DD)\r\n Trade date, when the generated order list will be traded.\r\n Return\r\n ----------\r\n um: UserManager()\r\n pred_date: pd.Timestamp\r\n trade_date: pd.Timestamp\r\n \"\"\"\r\n qlib.init_from_yaml_conf(client)\r\n um = UserManager(user_data_path=pathlib.Path(path))\r\n um.load_users()\r\n if not date:\r\n trade_date, pred_date = None, None\r\n else:\r\n trade_date = pd.Timestamp(date)\r\n if not is_tradable_date(trade_date):\r\n raise ValueError(\"trade date is not tradable date\".format(trade_date.date()))\r\n pred_date = get_pre_trading_date(trade_date, future=True)\r\n return um, pred_date, trade_date\r\n\r\n def add_user(self, id, config, path, date):\r\n \"\"\"Add a new user into the a folder to run 'online' module.\r\n\r\n Parameters\r\n ----------\r\n id : str\r\n User id, should be unique.\r\n config : str\r\n The file path (yaml) of user config\r\n path : str\r\n Path to save user account.\r\n date : str (YYYY-MM-DD)\r\n The date that user account was added.\r\n \"\"\"\r\n create_user_folder(path)\r\n qlib.init_from_yaml_conf(self.client)\r\n um = UserManager(user_data_path=path)\r\n add_date = D.calendar(end_time=date)[-1]\r\n if not is_tradable_date(add_date):\r\n raise ValueError(\"add date is not tradable date\".format(add_date.date()))\r\n um.add_user(user_id=id, config_file=config, add_date=add_date)\r\n\r\n def remove_user(self, id, path):\r\n \"\"\"Remove user from folder used in 'online' module.\r\n\r\n Parameters\r\n ----------\r\n id : str\r\n User id, should be unique.\r\n path : str\r\n Path to save user account.\r\n \"\"\"\r\n um = UserManager(user_data_path=path)\r\n um.remove_user(user_id=id)\r\n\r\n def generate(self, date, path):\r\n \"\"\"Generate order list that will be traded at 'date'.\r\n\r\n Parameters\r\n ----------\r\n date : str (YYYY-MM-DD)\r\n Trade date, when the generated order list will be traded.\r\n path : str\r\n Path to save user account.\r\n \"\"\"\r\n um, pred_date, trade_date = self.init(self.client, path, date)\r\n for user_id, user in um.users.items():\r\n dates, trade_exchange = prepare(um, pred_date, user_id)\r\n # get and save the score at predict date\r\n input_data = user.model.get_data_with_date(pred_date)\r\n score_series = user.model.predict(input_data)\r\n save_score_series(score_series, (pathlib.Path(path) / user_id), trade_date)\r\n\r\n # update strategy (and model)\r\n user.strategy.update(score_series, pred_date, trade_date)\r\n\r\n # generate and save order list\r\n order_list = user.strategy.generate_trade_decision(\r\n score_series=score_series,\r\n current=user.account.current_position,\r\n trade_exchange=trade_exchange,\r\n trade_date=trade_date,\r\n )\r\n save_order_list(\r\n order_list=order_list,\r\n user_path=(pathlib.Path(path) / user_id),\r\n trade_date=trade_date,\r\n )\r\n self.logger.info(\"Generate order list at {} for {}\".format(trade_date, user_id))\r\n um.save_user_data(user_id)\r\n\r\n def execute(self, date, exchange_config, path):\r\n \"\"\"Execute the orderlist at 'date'.\r\n\r\n Parameters\r\n ----------\r\n date : str (YYYY-MM-DD)\r\n Trade date, that the generated order list will be traded.\r\n exchange_config: str\r\n The file path (yaml) of exchange config\r\n path : str\r\n Path to save user account.\r\n \"\"\"\r\n um, pred_date, trade_date = self.init(self.client, path, date)\r\n for user_id, user in um.users.items():\r\n dates, trade_exchange = prepare(um, trade_date, user_id, exchange_config)\r\n executor = SimulatorExecutor(trade_exchange=trade_exchange)\r\n if str(dates[0].date()) != str(pred_date.date()):\r\n raise ValueError(\r\n \"The account data is not newest! last trading date {}, today {}\".format(\r\n dates[0].date(), trade_date.date()\r\n )\r\n )\r\n\r\n # load and execute the order list\r\n # will not modify the trade_account after executing\r\n order_list = load_order_list(user_path=(pathlib.Path(path) / user_id), trade_date=trade_date)\r\n trade_info = executor.execute(order_list=order_list, trade_account=user.account, trade_date=trade_date)\r\n executor.save_executed_file_from_trade_info(\r\n trade_info=trade_info,\r\n user_path=(pathlib.Path(path) / user_id),\r\n trade_date=trade_date,\r\n )\r\n self.logger.info(\"execute order list at {} for {}\".format(trade_date.date(), user_id))\r\n\r\n def update(self, date, path, type=\"SIM\"):\r\n \"\"\"Update account at 'date'.\r\n\r\n Parameters\r\n ----------\r\n date : str (YYYY-MM-DD)\r\n Trade date, that the generated order list will be traded.\r\n path : str\r\n Path to save user account.\r\n type : str\r\n which executor was been used to execute the order list\r\n 'SIM': SimulatorExecutor()\r\n \"\"\"\r\n if type not in [\"SIM\", \"YC\"]:\r\n raise ValueError(\"type is invalid, {}\".format(type))\r\n um, pred_date, trade_date = self.init(self.client, path, date)\r\n for user_id, user in um.users.items():\r\n dates, trade_exchange = prepare(um, trade_date, user_id)\r\n if type == \"SIM\":\r\n executor = SimulatorExecutor(trade_exchange=trade_exchange)\r\n else:\r\n raise ValueError(\"not found executor\")\r\n # dates[0] is the last_trading_date\r\n if str(dates[0].date()) > str(pred_date.date()):\r\n raise ValueError(\r\n \"The account data is not newest! last trading date {}, today {}\".format(\r\n dates[0].date(), trade_date.date()\r\n )\r\n )\r\n # load trade info and update account\r\n trade_info = executor.load_trade_info_from_executed_file(\r\n user_path=(pathlib.Path(path) / user_id), trade_date=trade_date\r\n )\r\n score_series = load_score_series((pathlib.Path(path) / user_id), trade_date)\r\n update_account(user.account, trade_info, trade_exchange, trade_date)\r\n\r\n portfolio_metrics = user.account.portfolio_metrics.generate_portfolio_metrics_dataframe()\r\n self.logger.info(portfolio_metrics)\r\n um.save_user_data(user_id)\r\n self.logger.info(\"Update account state {} for {}\".format(trade_date, user_id))\r\n\r\n def simulate(self, id, config, exchange_config, start, end, path, bench=\"SH000905\"):\r\n \"\"\"Run the ( generate_trade_decision -> execute_order_list -> update_account) process everyday\r\n from start date to end date.\r\n\r\n Parameters\r\n ----------\r\n id : str\r\n user id, need to be unique\r\n config : str\r\n The file path (yaml) of user config\r\n exchange_config: str\r\n The file path (yaml) of exchange config\r\n start : str \"YYYY-MM-DD\"\r\n The start date to run the online simulate\r\n end : str \"YYYY-MM-DD\"\r\n The end date to run the online simulate\r\n path : str\r\n Path to save user account.\r\n bench : str\r\n The benchmark that our result compared with.\r\n 'SH000905' for csi500, 'SH000300' for csi300\r\n \"\"\"\r\n # Clear the current user if exists, then add a new user.\r\n create_user_folder(path)\r\n um = self.init(self.client, path, None)[0]\r\n start_date, end_date = pd.Timestamp(start), pd.Timestamp(end)\r\n try:\r\n um.remove_user(user_id=id)\r\n except BaseException:\r\n pass\r\n um.add_user(user_id=id, config_file=config, add_date=pd.Timestamp(start_date))\r\n\r\n # Do the online simulate\r\n um.load_users()\r\n user = um.users[id]\r\n dates, trade_exchange = prepare(um, end_date, id, exchange_config)\r\n executor = SimulatorExecutor(trade_exchange=trade_exchange)\r\n for pred_date, trade_date in zip(dates[:-2], dates[1:-1]):\r\n user_path = pathlib.Path(path) / id\r\n\r\n # 1. load and save score_series\r\n input_data = user.model.get_data_with_date(pred_date)\r\n score_series = user.model.predict(input_data)\r\n save_score_series(score_series, (pathlib.Path(path) / id), trade_date)\r\n\r\n # 2. update strategy (and model)\r\n user.strategy.update(score_series, pred_date, trade_date)\r\n\r\n # 3. generate and save order list\r\n order_list = user.strategy.generate_trade_decision(\r\n score_series=score_series,\r\n current=user.account.current_position,\r\n trade_exchange=trade_exchange,\r\n trade_date=trade_date,\r\n )\r\n save_order_list(order_list=order_list, user_path=user_path, trade_date=trade_date)\r\n\r\n # 4. auto execute order list\r\n order_list = load_order_list(user_path=user_path, trade_date=trade_date)\r\n trade_info = executor.execute(trade_account=user.account, order_list=order_list, trade_date=trade_date)\r\n executor.save_executed_file_from_trade_info(\r\n trade_info=trade_info, user_path=user_path, trade_date=trade_date\r\n )\r\n # 5. update account state\r\n trade_info = executor.load_trade_info_from_executed_file(user_path=user_path, trade_date=trade_date)\r\n update_account(user.account, trade_info, trade_exchange, trade_date)\r\n portfolio_metrics = user.account.portfolio_metrics.generate_portfolio_metrics_dataframe()\r\n self.logger.info(portfolio_metrics)\r\n um.save_user_data(id)\r\n self.show(id, path, bench)\r\n\r\n def show(self, id, path, bench=\"SH000905\"):\r\n \"\"\"show the newly report (mean, std, information_ratio, annualized_return)\r\n\r\n Parameters\r\n ----------\r\n id : str\r\n user id, need to be unique\r\n path : str\r\n Path to save user account.\r\n bench : str\r\n The benchmark that our result compared with.\r\n 'SH000905' for csi500, 'SH000300' for csi300\r\n \"\"\"\r\n um = self.init(self.client, path, None)[0]\r\n if id not in um.users:\r\n raise ValueError(\"Cannot find user \".format(id))\r\n bench = D.features([bench], [\"$change\"]).loc[bench, \"$change\"]\r\n portfolio_metrics = um.users[id].account.portfolio_metrics.generate_portfolio_metrics_dataframe()\r\n portfolio_metrics[\"bench\"] = bench\r\n analysis_result = {}\r\n r = (portfolio_metrics[\"return\"] - portfolio_metrics[\"bench\"]).dropna()\r\n analysis_result[\"excess_return_without_cost\"] = risk_analysis(r)\r\n r = (portfolio_metrics[\"return\"] - portfolio_metrics[\"bench\"] - portfolio_metrics[\"cost\"]).dropna()\r\n analysis_result[\"excess_return_with_cost\"] = risk_analysis(r)\r\n print(\"Result:\")\r\n print(\"excess_return_without_cost:\")\r\n print(analysis_result[\"excess_return_without_cost\"])\r\n print(\"excess_return_with_cost:\")\r\n print(analysis_result[\"excess_return_with_cost\"])\r\n\r\n\r\ndef run():\r\n fire.Fire(Operator)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n run()\r\n"
] | [
[
"pandas.concat",
"pandas.Series"
],
[
"pandas.Timestamp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
luotigerlsx/models_archive | [
"c6578521ae61df7298003b42526b03e78d2d0d4b",
"c6578521ae61df7298003b42526b03e78d2d0d4b"
] | [
"research/mobilenet/mobilenet_trainer.py",
"research/mobilenet/mobilenet_v2.py"
] | [
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\n\"\"\"Runs an Image Classification task for MobileNet.\"\"\"\n\nimport os\nimport logging\nfrom typing import Mapping, Text, Any, Type, List, Dict, Optional\n\nfrom absl import app\nfrom absl import flags\nimport tensorflow as tf\nimport tensorflow_addons as tfa\n\nfrom research.mobilenet import dataset_loader\nfrom research.mobilenet import mobilenet_v1\nfrom research.mobilenet import mobilenet_v2\nfrom research.mobilenet import mobilenet_v3\nfrom research.mobilenet.configs import defaults\nfrom research.mobilenet.configs import archs\nfrom research.mobilenet.configs import dataset\n\nfrom official.vision.image_classification.configs import base_configs\nfrom official.vision.image_classification import optimizer_factory\nfrom official.vision.image_classification import dataset_factory\n\n\ndef _get_model_config() -> Mapping[Text, Type[archs.MobileNetConfig]]:\n return {\n 'mobilenet_v1': archs.MobileNetV1Config,\n 'mobilenet_v2': archs.MobileNetV2Config,\n 'mobilenet_v3_small': archs.MobileNetV3SmallConfig,\n 'mobilenet_v3_large': archs.MobileNetV3LargeConfig,\n 'mobilenet_v3_edge_tpu': archs.MobileNetV3EdgeTPUConfig\n }\n\n\ndef _get_model_builder() -> Mapping[Text, Any]:\n return {\n 'mobilenet_v1': mobilenet_v1.mobilenet_v1,\n 'mobilenet_v2': mobilenet_v2.mobilenet_v2,\n 'mobilenet_v3_small': mobilenet_v3.mobilenet_v3_small,\n 'mobilenet_v3_large': mobilenet_v3.mobilenet_v3_large,\n 'mobilenet_v3_edge_tpu': mobilenet_v3.mobilenet_v3_edge_tpu\n }\n\n\ndef _get_dataset_config() -> Mapping[Text, Type[dataset_factory.DatasetConfig]]:\n return {\n 'imagenet2012': dataset.ImageNetConfig,\n 'imagenette': dataset.ImageNetteConfig,\n 'mnist': dataset.MNISTConfig\n }\n\n\ndef _get_metrics(one_hot: bool) -> Mapping[Text, Any]:\n \"\"\"Get a dict of available metrics to track.\"\"\"\n if one_hot:\n return {\n # (name, metric_fn)\n 'acc': tf.keras.metrics.CategoricalAccuracy(),\n 'accuracy': tf.keras.metrics.CategoricalAccuracy(),\n 'top_1': tf.keras.metrics.CategoricalAccuracy(),\n 'top_5': tf.keras.metrics.TopKCategoricalAccuracy(\n k=5, name='top_5_accuracy'),\n }\n else:\n return {\n # (name, metric_fn)\n 'acc': tf.keras.metrics.SparseCategoricalAccuracy(),\n 'accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),\n 'top_1': tf.keras.metrics.SparseCategoricalAccuracy(),\n 'top_5': tf.keras.metrics.SparseTopKCategoricalAccuracy(\n k=5, name='top_5_accuracy'),\n }\n\n\ndef _get_callback(model_dir: Text) -> List[tf.keras.callbacks.Callback]:\n \"\"\"Create callbacks for Keras model training.\"\"\"\n check_point = tf.keras.callbacks.ModelCheckpoint(\n save_best_only=True,\n filepath=os.path.join(model_dir, 'model.ckpt-{epoch:04d}'),\n verbose=1)\n tensorboard = tf.keras.callbacks.TensorBoard(\n log_dir=model_dir, update_freq=100)\n return [check_point, tensorboard]\n\n\ndef _get_optimizer(\n batch_size: int,\n steps_per_epoch: int,\n lr_name: Text = defaults.LR_NAME_DEFAULT,\n optimizer_name: Text = defaults.OP_NAME_DEFAULT,\n lr_params: Dict[Text, Any] = defaults.LR_CONFIG_DEFAULT,\n optimizer_params: Dict[Text, Any] = defaults.OP_CONFIG_DEFAULT,\n) -> tf.keras.optimizers.Optimizer:\n \"\"\"Construct optimizer for model training.\n\n Args:\n batch_size: batch size\n steps_per_epoch: number of steps per epoch\n lr_name: learn rate scheduler name, e.g., exponential\n optimizer_name: optimizer name, e.g., adam, rmsprop\n lr_params: parameters for initiating learning rate scheduler object\n optimizer_params: parameters for initiating optimizer object\n\n Returns:\n Return a tf.keras.optimizers.Optimizer object.\n \"\"\"\n learning_rate_config = base_configs.LearningRateConfig(\n name=lr_name,\n **lr_params)\n optimizer_config = base_configs.OptimizerConfig(\n name=optimizer_name,\n **optimizer_params)\n learning_rate = optimizer_factory.build_learning_rate(\n params=learning_rate_config,\n batch_size=batch_size,\n train_steps=steps_per_epoch)\n optimizer = optimizer_factory.build_optimizer(\n optimizer_name=optimizer_config.name,\n base_learning_rate=learning_rate,\n params=optimizer_config.as_dict())\n return optimizer\n\n\ndef _resume_from_checkpoint(model: tf.keras.Model,\n model_dir: str,\n train_steps: int) -> int:\n \"\"\"Resumes from the latest checkpoint, if possible.\n\n Loads the model weights and optimizer settings from a checkpoint.\n This function should be used in case of preemption recovery.\n\n Args:\n model: The model whose weights should be restored.\n model_dir: The directory where model weights were saved.\n train_steps: The number of steps to train.\n\n Returns:\n The epoch of the latest checkpoint, or 0 if not restoring.\n\n \"\"\"\n logging.info('Load from checkpoint is enabled.')\n latest_checkpoint = tf.train.latest_checkpoint(model_dir)\n logging.info('latest_checkpoint: %s', latest_checkpoint)\n if not latest_checkpoint:\n logging.info('No checkpoint detected.')\n return 0\n\n logging.info('Checkpoint file %s found and restoring from '\n 'checkpoint', latest_checkpoint)\n model.load_weights(latest_checkpoint)\n initial_epoch = model.optimizer.iterations // train_steps\n logging.info('Completed loading from checkpoint.')\n logging.info('Resuming from epoch %d', initial_epoch)\n return int(initial_epoch)\n\n\ndef get_flags():\n \"\"\"Initialize the data extraction parameters.\n\n Define the arguments with the default values and parses the arguments\n passed to the main program.\n\n \"\"\"\n flags.DEFINE_string(\n 'model_name',\n help='MobileNet version name: mobilenet_v1, mobilenet_v2, '\n 'mobilenet_v3_small and mobilenet_v3_large',\n default='mobilenet_v1'\n )\n flags.DEFINE_string(\n 'dataset_name',\n help='Dataset name from TDFS to train on: imagenette, imagenet2012',\n default='imagenette'\n )\n flags.DEFINE_string(\n 'model_dir',\n help='Working directory.',\n default='./tmp'\n )\n flags.DEFINE_string(\n 'data_dir',\n help='Directory for training data.',\n default=None\n )\n flags.DEFINE_bool(\n 'resume_checkpoint',\n help='Whether resume training from previous checkpoint.',\n default=False\n )\n flags.DEFINE_string(\n 'optimizer_name',\n help='Name of optimizer.',\n default='rmsprop'\n )\n flags.DEFINE_string(\n 'learning_scheduler_name',\n help='Name of learning rate scheduler.',\n default='exponential'\n )\n # for hyperparameter tuning\n flags.DEFINE_float(\n 'op_momentum',\n help='Optimizer momentum.',\n default=0.9\n )\n flags.DEFINE_float(\n 'op_decay_rate',\n help='Optimizer discounting factor for gradient.',\n default=0.9\n )\n flags.DEFINE_float(\n 'lr',\n help='Base learning rate.',\n default=0.008\n )\n flags.DEFINE_float(\n 'lr_decay_rate',\n help='Magnitude of learning rate decay.',\n default=0.97\n )\n flags.DEFINE_float(\n 'lr_decay_epochs',\n help='Frequency of learning rate decay.',\n default=2.4\n )\n flags.DEFINE_float(\n 'label_smoothing',\n help='The amount of label smoothing.',\n default=0.0,\n )\n flags.DEFINE_float(\n 'ma_decay_rate',\n help='Exponential moving average decay rate.',\n default=None\n )\n flags.DEFINE_float(\n 'dropout_rate',\n help='Dropout rate.',\n default=0.2\n )\n flags.DEFINE_float(\n 'std_weight_decay',\n help='Standard weight decay.',\n default=0.00004\n )\n flags.DEFINE_float(\n 'truncated_normal_stddev',\n help='The standard deviation of the truncated normal weight initializer.',\n default=0.09\n )\n flags.DEFINE_float(\n 'batch_norm_decay',\n help='Batch norm decay.',\n default=0.9997\n )\n flags.DEFINE_integer(\n 'batch_size',\n help='Training batch size.',\n default=4 # for testing purpose\n )\n flags.DEFINE_integer(\n 'epochs',\n help='Number of epochs.',\n default=5\n )\n\n\ndef get_dataset(config: dataset_factory.DatasetConfig,\n slim_preprocess: bool = False) -> tf.data.Dataset:\n \"\"\"Build dataset for training, evaluation and test\"\"\"\n logging.info(\"Dataset Config: \")\n logging.info(config)\n if config.builder == 'tfds':\n raw_dataset = dataset_loader.load_tfds(\n dataset_name=config.name,\n data_dir=config.data_dir,\n download=config.download,\n split=config.split\n )\n elif config.builder == 'records':\n raw_dataset = dataset_loader.load_tfrecords(\n data_dir=config.data_dir,\n split=config.split,\n file_shuffle_buffer_size=config.file_shuffle_buffer_size\n )\n else:\n raise ValueError('Only support tfds and tfrecords builder.')\n\n processed_dataset = dataset_loader.pipeline(\n dataset=raw_dataset,\n config=config,\n slim_preprocess=slim_preprocess\n )\n\n return processed_dataset\n\n\ndef build_model(model_name: Text,\n model_config: archs.MobileNetConfig,\n dataset_config: Optional[dataset_factory.DatasetConfig] = None,\n ) -> tf.keras.models.Model:\n \"\"\"Build mobilenet model given configuration\"\"\"\n\n model_build_function = _get_model_builder().get(model_name)\n if model_build_function:\n if dataset_config:\n image_size = dataset_config.image_size\n channels = dataset_config.num_channels\n model_config.input_shape = (image_size, image_size, channels)\n model_config.num_classes = dataset_config.num_classes\n return model_build_function(config=model_config)\n else:\n raise ValueError('The model {} is not supported.'.format(model_name))\n\n\ndef train_and_eval(params: flags.FlagValues) -> tf.keras.callbacks.History:\n \"\"\"Runs the train and eval path using compile/fit.\"\"\"\n logging.info('Run training for {} with {}'.format(params.model_name,\n params.dataset_name))\n logging.info('The CLI params are: {}'.format(params.flag_values_dict()))\n d_config = _get_dataset_config().get(params.dataset_name)()\n m_config = _get_model_config().get(params.model_name)()\n\n logging.info('Training dataset configuration:', d_config)\n logging.info('Training model configuration:', m_config)\n\n # override the model params with CLI params\n m_config.num_classes = d_config.num_classes\n m_config.dropout_keep_prob = 1 - params.dropout_rate\n m_config.weight_decay = params.std_weight_decay\n m_config.stddev = params.truncated_normal_stddev\n m_config.batch_norm_decay = params.batch_norm_decay\n\n strategy = tf.distribute.MirroredStrategy()\n with strategy.scope():\n # override the dataset params with CLI params\n if params.data_dir:\n d_config.data_dir = params.data_dir\n global_batch_size = params.batch_size * strategy.num_replicas_in_sync\n\n # override the dataset params with CLI params\n # for distributed training, update batch size\n d_config.batch_size = global_batch_size\n # determine whether one_hot is used based on label_smoothing\n d_config.one_hot = params.label_smoothing and params.label_smoothing > 0\n\n # build train dataset\n train_dataset = get_dataset(d_config)\n # build validation dataset\n d_config.split = 'validation'\n eval_dataset = get_dataset(d_config)\n\n # compute number iterations per epoch\n steps_per_epoch = d_config.num_examples // d_config.batch_size\n eval_steps = d_config.num_eval_examples // d_config.batch_size\n\n # build the model\n keras_model = build_model(\n model_name=params.model_name,\n dataset_config=d_config,\n model_config=m_config\n )\n\n # build the optimizer\n learning_params = defaults.LR_CONFIG_DEFAULT\n learning_params.update({'initial_lr': params.lr,\n 'decay_epochs': params.lr_decay_epochs,\n 'decay_rate': params.lr_decay_rate})\n optimizer_params = defaults.OP_CONFIG_DEFAULT\n optimizer_params.update({'decay': params.op_decay_rate,\n 'momentum': params.op_momentum})\n optimizer = _get_optimizer(\n batch_size=global_batch_size,\n steps_per_epoch=steps_per_epoch,\n lr_name=params.learning_scheduler_name,\n optimizer_name=params.optimizer_name,\n lr_params=learning_params,\n optimizer_params=optimizer_params\n )\n\n logging.info('Exponential decay rate:{}'.format(params.ma_decay_rate))\n if params.ma_decay_rate:\n optimizer = tfa.optimizers.MovingAverage(\n optimizer=optimizer,\n average_decay=params.ma_decay_rate)\n\n # compile model\n if d_config.one_hot:\n loss_obj = tf.keras.losses.CategoricalCrossentropy(\n label_smoothing=params.label_smoothing)\n else:\n loss_obj = tf.keras.losses.SparseCategoricalCrossentropy()\n\n keras_model.compile(\n optimizer=optimizer,\n loss=loss_obj,\n metrics=[_get_metrics(one_hot=d_config.one_hot)['acc']],\n )\n\n logging.info(keras_model.summary())\n\n initial_epoch = 0\n if params.resume_checkpoint:\n initial_epoch = _resume_from_checkpoint(model=keras_model,\n model_dir=params.model_dir,\n train_steps=steps_per_epoch)\n\n # Callbacks\n callbacks_to_use = _get_callback(model_dir=params.model_dir)\n\n # Train model\n history = keras_model.fit(\n train_dataset,\n steps_per_epoch=steps_per_epoch,\n epochs=params.epochs,\n validation_data=eval_dataset,\n validation_steps=eval_steps,\n initial_epoch=initial_epoch,\n verbose=1,\n callbacks=callbacks_to_use\n )\n\n return history\n\n\ndef main(_):\n history = train_and_eval(flags.FLAGS)\n if history:\n logging.info('Run history:\\n%s', history)\n\n\nif __name__ == '__main__':\n logging.basicConfig(\n format='%(asctime)-15s:%(levelname)s:%(module)s:%(message)s',\n level=logging.INFO)\n get_flags()\n app.run(main)\n",
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\"\"\"MobileNet v2.\n\nAdapted from tf.keras.applications.mobilenet_v2.MobileNetV2().\n\nArchitecture: https://arxiv.org/abs/1801.04381\n\nThe base model gives 72.2% accuracy on ImageNet, with 300MMadds,\n3.4 M parameters.\n\"\"\"\n\nimport logging\n\nimport tensorflow as tf\n\nfrom research.mobilenet import common_modules\nfrom research.mobilenet.configs import archs\n\nlayers = tf.keras.layers\n\nMobileNetV2Config = archs.MobileNetV2Config\n\n\ndef mobilenet_v2(config: MobileNetV2Config = MobileNetV2Config()\n ) -> tf.keras.models.Model:\n \"\"\"Instantiates the MobileNet Model.\"\"\"\n\n model_name = config.name\n input_shape = config.input_shape\n\n img_input = layers.Input(shape=input_shape, name='Input')\n\n # build network base\n x = common_modules.mobilenet_base(img_input, config)\n\n # build classification head\n x = common_modules.mobilenet_head(x, config)\n\n return tf.keras.models.Model(inputs=img_input,\n outputs=x,\n name=model_name)\n\n\nif __name__ == '__main__':\n logging.basicConfig(\n format='%(asctime)-15s:%(levelname)s:%(module)s:%(message)s',\n level=logging.INFO)\n model = mobilenet_v2()\n model.compile(\n optimizer='adam',\n loss=tf.keras.losses.categorical_crossentropy,\n metrics=[tf.keras.metrics.categorical_crossentropy])\n logging.info(model.summary())\n"
] | [
[
"tensorflow.keras.metrics.SparseTopKCategoricalAccuracy",
"tensorflow.keras.metrics.TopKCategoricalAccuracy",
"tensorflow.train.latest_checkpoint",
"tensorflow.keras.losses.CategoricalCrossentropy",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.keras.metrics.CategoricalAccuracy",
"tensorflow.keras.callbacks.TensorBoard",
"tensorflow.keras.metrics.SparseCategoricalAccuracy",
"tensorflow.distribute.MirroredStrategy"
],
[
"tensorflow.keras.models.Model"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
adymaharana/VLCStoryGan | [
"74112404689e8144c2ed2d375e1e5a1cde09debb"
] | [
"densecap/utils/data_loader.py"
] | [
"import os\nimport pickle\n\nimport h5py\nimport torch\nfrom torch.utils.data import Dataset\nimport torchvision.transforms as transforms\nfrom PIL import Image\nfrom torch.utils.data import DataLoader\nfrom prefetch_generator import BackgroundGenerator\n\n\n__all__ = [\n \"DataLoaderPFG\", \"DenseCapDataset\", 'DenseCapDatasetV2'\n]\n\n\nclass DataLoaderPFG(DataLoader):\n \"\"\"\n Prefetch version of DataLoader: https://github.com/IgorSusmelj/pytorch-styleguide/issues/5\n \"\"\"\n\n def __iter__(self):\n return BackgroundGenerator(super().__iter__())\n\n\nclass DenseCapDataset(Dataset):\n \"\"\"Images are loaded from by open specific file\n \"\"\"\n\n @staticmethod\n def collate_fn(batch):\n \"\"\"Use in torch.utils.data.DataLoader\n \"\"\"\n\n return tuple(zip(*batch)) # as tuples instead of stacked tensors\n\n @staticmethod\n def get_transform():\n \"\"\"More complicated transform utils in torchvison/references/detection/transforms.py\n \"\"\"\n\n transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n ])\n\n return transform\n\n def __init__(self, img_dir_root, vg_data_path, look_up_tables_path, dataset_type=None, transform=None):\n\n assert dataset_type in {None, 'train', 'test', 'val'}\n\n super(DenseCapDataset, self).__init__()\n\n self.img_dir_root = img_dir_root\n self.vg_data_path = vg_data_path\n self.look_up_tables_path = look_up_tables_path\n self.dataset_type = dataset_type # if dataset_type is None, all data will be use\n self.transform = transform\n\n # === load data here ====\n self.look_up_tables = pickle.load(open(look_up_tables_path, 'rb'))\n\n def set_dataset_type(self, dataset_type, verbose=True):\n\n assert dataset_type in {None, 'train', 'test', 'val'}\n\n if verbose:\n print('[DenseCapDataset]: {} switch to {}'.format(self.dataset_type, dataset_type))\n\n self.dataset_type = dataset_type\n\n def __getitem__(self, idx):\n\n with h5py.File(self.vg_data_path, 'r') as vg_data:\n\n vg_idx = self.look_up_tables['split'][self.dataset_type][idx] if self.dataset_type else idx\n\n img_path = os.path.join(self.img_dir_root, self.look_up_tables['idx_to_directory'][vg_idx],\n self.look_up_tables['idx_to_filename'][vg_idx])\n\n img = Image.open(img_path).convert(\"RGB\")\n if self.transform is not None:\n img = self.transform(img)\n else:\n img = transforms.ToTensor()(img)\n\n first_box_idx = vg_data['img_to_first_box'][vg_idx]\n last_box_idx = vg_data['img_to_last_box'][vg_idx]\n\n boxes = torch.as_tensor(vg_data['boxes'][first_box_idx: last_box_idx+1], dtype=torch.float32)\n caps = torch.as_tensor(vg_data['captions'][first_box_idx: last_box_idx+1], dtype=torch.long)\n caps_len = torch.as_tensor(vg_data['lengths'][first_box_idx: last_box_idx+1], dtype=torch.long)\n\n targets = {\n 'boxes': boxes,\n 'caps': caps,\n 'caps_len': caps_len,\n }\n\n info = {\n 'idx': vg_idx,\n 'dir': self.look_up_tables['idx_to_directory'][vg_idx],\n 'file_name': self.look_up_tables['idx_to_filename'][vg_idx]\n }\n\n return img, targets, info\n\n def __len__(self):\n\n if self.dataset_type:\n return len(self.look_up_tables['split'][self.dataset_type])\n else:\n return len(self.look_up_tables['filename_to_idx'])\n\n\nclass DenseCapDatasetV2(Dataset):\n \"\"\"Images are stored in VG-regions.h5\n \"\"\"\n\n @staticmethod\n def collate_fn(batch):\n \"\"\"Use in torch.utils.data.DataLoader\n \"\"\"\n\n return tuple(zip(*batch)) # as tuples instead of stacked tensors\n\n @staticmethod\n def get_transform():\n \"\"\"More complicated transform utils in torchvison/references/detection/transforms.py\n \"\"\"\n\n transform = transforms.Compose([\n transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n ])\n\n return transform\n\n def __init__(self, vg_data_path, look_up_tables_path, dataset_type=None, transform=None):\n\n assert dataset_type in {None, 'train', 'test', 'val'}\n\n super(DenseCapDatasetV2, self).__init__()\n\n self.vg_data_path = vg_data_path\n self.look_up_tables_path = look_up_tables_path\n self.dataset_type = dataset_type # if dataset_type is None, all data will be use\n self.transform = transform\n\n # === load data here ====\n self.look_up_tables = pickle.load(open(look_up_tables_path, 'rb'))\n\n def set_dataset_type(self, dataset_type, verbose=True):\n\n assert dataset_type in {None, 'train', 'test', 'val'}\n\n if verbose:\n print('[DenseCapDataset]: {} switch to {}'.format(self.dataset_type, dataset_type))\n\n self.dataset_type = dataset_type\n\n def __getitem__(self, idx):\n\n with h5py.File(self.vg_data_path, 'r') as vg_data:\n\n vg_idx = self.look_up_tables['split'][self.dataset_type][idx] if self.dataset_type else idx\n\n img = vg_data['images'][vg_idx]\n h = vg_data['image_heights'][vg_idx]\n w = vg_data['image_widths'][vg_idx]\n\n img = torch.tensor(img[:, :h, :w] / 255., dtype=torch.float32) # get rid of zero padding\n\n if self.transform is not None:\n img = self.transform(img)\n\n first_box_idx = vg_data['img_to_first_box'][vg_idx]\n last_box_idx = vg_data['img_to_last_box'][vg_idx]\n\n boxes = torch.as_tensor(vg_data['boxes'][first_box_idx: last_box_idx+1], dtype=torch.float32)\n caps = torch.as_tensor(vg_data['captions'][first_box_idx: last_box_idx+1], dtype=torch.long)\n caps_len = torch.as_tensor(vg_data['lengths'][first_box_idx: last_box_idx+1], dtype=torch.long)\n\n targets = {\n 'boxes': boxes,\n 'caps': caps,\n 'caps_len': caps_len,\n }\n\n info = {\n 'idx': vg_idx,\n 'dir': self.look_up_tables['idx_to_directory'][vg_idx],\n 'file_name': self.look_up_tables['idx_to_filename'][vg_idx]\n }\n\n return img, targets, info\n\n def __len__(self):\n\n if self.dataset_type:\n return len(self.look_up_tables['split'][self.dataset_type])\n else:\n return len(self.look_up_tables['filename_to_idx'])\n"
] | [
[
"torch.tensor",
"torch.as_tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bltlab/mot | [
"4896c8e6b5e6895a27662b5e92897c0752b7adaa"
] | [
"qualitychecks/sentence_average.py"
] | [
"#! /usr/bin/env python\n\"\"\"\nScript to observe sentence counts by language\n\"\"\"\n\nimport os\nimport json\nfrom argparse import ArgumentParser\nfrom collections import Counter\nfrom multiprocessing import Pool\nfrom typing import Generator\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nMINIMUM_MEAN_SENTENCE_THRESHOLD = 1\n\n\ndef count_sentences(path: str) -> tuple[str, int]:\n \"\"\"Retrieves language and number of sentences for each file\"\"\"\n print(f\"Extracting data from {path}\")\n with open(path) as file:\n data = json.load(file)\n language = data.get(\"site_language\")\n sentences = data.get(\"sentences\")\n n_sentences = 0\n if sentences:\n n_sentences = len(sentences)\n return language, n_sentences\n\n\nclass SentenceCounter:\n def __init__(self) -> None:\n # Distribution of sentence counts by language\n self.sentence_distribution: Counter[str] = Counter()\n # Distribution of mean sentences by language\n self.mean_sentence_distribution: Counter[str] = Counter()\n\n def count(self, language: str, n_sentences: int) -> None:\n \"\"\"Calculates average number of sentences by language\"\"\"\n print(f\"Counting {language}\")\n self.sentence_distribution[language] += 1\n self.mean_sentence_distribution[language] = (\n (\n self.mean_sentence_distribution[language]\n * (self.sentence_distribution[language] - 1)\n )\n + n_sentences\n ) / self.sentence_distribution[language]\n\n def histogram(self) -> None:\n \"\"\"Creates histogram from sentence average distribution by language\"\"\"\n df = pd.DataFrame.from_records(\n self.mean_sentence_distribution.most_common(),\n columns=[\"Language\", \"Mean Sentence\"],\n )\n df.to_csv(\"sentence_average.csv\")\n df.drop(df[df[\"Language\"] == \"bam\"].index, inplace=True)\n print(df[df[\"Mean Sentence\"] < MINIMUM_MEAN_SENTENCE_THRESHOLD])\n df.drop(\n df[df[\"Mean Sentence\"] < MINIMUM_MEAN_SENTENCE_THRESHOLD].index,\n inplace=True,\n )\n print(df)\n sns.set_style(\"whitegrid\")\n sns.barplot(\n data=df.reset_index(), x=\"Language\", y=\"Mean Sentence\", color=\"#69b3a2\"\n )\n plt.xticks(\n rotation=90,\n horizontalalignment=\"center\"\n )\n plt.tight_layout()\n plt.autoscale()\n plt.savefig(\"sentence_average.png\", dpi=200)\n plt.show()\n\n\ndef find_docpaths(inputdir: str) -> Generator[str, None, None]:\n for root, dirs, files in os.walk(inputdir):\n if root.split(\"/\")[-1] == \"article\":\n for file in files:\n if file.endswith(\".json\"):\n yield os.path.join(root, file)\n\n\ndef run() -> None:\n parser = ArgumentParser(description=__doc__)\n parser.add_argument(\n \"inputdir\",\n help=\"Input directory containing json in directories by iso_site/type\",\n )\n parser.add_argument(\"--n-workers\", type=int, default=1)\n args = parser.parse_args()\n\n counter = SentenceCounter()\n\n if args.n_workers == 1:\n for path in find_docpaths(args.inputdir):\n language, n_sentences = count_sentences(path)\n counter.count(language, n_sentences)\n else:\n with Pool(args.n_workers) as pool:\n for language, n_sentences in pool.imap_unordered(count_sentences, find_docpaths(args.inputdir), chunksize=100):\n counter.count(language, n_sentences)\n\n counter.histogram()\n\n\nif __name__ == \"__main__\":\n run()\n"
] | [
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.autoscale",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
danbirken/pandas | [
"bfd5348d824a721dd0d896bb06e63e4ad801ba51",
"fa8a5ca1dd27c4169727070ddbdcb248002fddb4"
] | [
"pandas/tseries/tests/test_timedeltas.py",
"pandas/tseries/offsets.py"
] | [
"# pylint: disable-msg=E1101,W0612\n\nfrom datetime import datetime, timedelta\nimport nose\n\nimport numpy as np\nimport pandas as pd\n\nfrom pandas import (Index, Series, DataFrame, Timestamp, isnull, notnull,\n bdate_range, date_range, _np_version_under1p7)\nimport pandas.core.common as com\nfrom pandas.compat import StringIO, lrange, range, zip, u, OrderedDict, long\nfrom pandas import compat, to_timedelta, tslib\nfrom pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type as ct\nfrom pandas.util.testing import (assert_series_equal,\n assert_frame_equal,\n assert_almost_equal,\n ensure_clean)\nimport pandas.util.testing as tm\n\ndef _skip_if_numpy_not_friendly():\n # not friendly for < 1.7\n if _np_version_under1p7:\n raise nose.SkipTest(\"numpy < 1.7\")\n\nclass TestTimedeltas(tm.TestCase):\n _multiprocess_can_split_ = True\n\n def setUp(self):\n pass\n\n def test_numeric_conversions(self):\n _skip_if_numpy_not_friendly()\n\n self.assertEqual(ct(0), np.timedelta64(0,'ns'))\n self.assertEqual(ct(10), np.timedelta64(10,'ns'))\n self.assertEqual(ct(10,unit='ns'), np.timedelta64(10,'ns').astype('m8[ns]'))\n\n self.assertEqual(ct(10,unit='us'), np.timedelta64(10,'us').astype('m8[ns]'))\n self.assertEqual(ct(10,unit='ms'), np.timedelta64(10,'ms').astype('m8[ns]'))\n self.assertEqual(ct(10,unit='s'), np.timedelta64(10,'s').astype('m8[ns]'))\n self.assertEqual(ct(10,unit='d'), np.timedelta64(10,'D').astype('m8[ns]'))\n\n def test_timedelta_conversions(self):\n _skip_if_numpy_not_friendly()\n\n self.assertEqual(ct(timedelta(seconds=1)), np.timedelta64(1,'s').astype('m8[ns]'))\n self.assertEqual(ct(timedelta(microseconds=1)), np.timedelta64(1,'us').astype('m8[ns]'))\n self.assertEqual(ct(timedelta(days=1)), np.timedelta64(1,'D').astype('m8[ns]'))\n\n def test_short_format_converters(self):\n _skip_if_numpy_not_friendly()\n\n def conv(v):\n return v.astype('m8[ns]')\n\n self.assertEqual(ct('10'), np.timedelta64(10,'ns'))\n self.assertEqual(ct('10ns'), np.timedelta64(10,'ns'))\n self.assertEqual(ct('100'), np.timedelta64(100,'ns'))\n self.assertEqual(ct('100ns'), np.timedelta64(100,'ns'))\n\n self.assertEqual(ct('1000'), np.timedelta64(1000,'ns'))\n self.assertEqual(ct('1000ns'), np.timedelta64(1000,'ns'))\n self.assertEqual(ct('1000NS'), np.timedelta64(1000,'ns'))\n\n self.assertEqual(ct('10us'), np.timedelta64(10000,'ns'))\n self.assertEqual(ct('100us'), np.timedelta64(100000,'ns'))\n self.assertEqual(ct('1000us'), np.timedelta64(1000000,'ns'))\n self.assertEqual(ct('1000Us'), np.timedelta64(1000000,'ns'))\n self.assertEqual(ct('1000uS'), np.timedelta64(1000000,'ns'))\n\n self.assertEqual(ct('1ms'), np.timedelta64(1000000,'ns'))\n self.assertEqual(ct('10ms'), np.timedelta64(10000000,'ns'))\n self.assertEqual(ct('100ms'), np.timedelta64(100000000,'ns'))\n self.assertEqual(ct('1000ms'), np.timedelta64(1000000000,'ns'))\n\n self.assertEqual(ct('-1s'), -np.timedelta64(1000000000,'ns'))\n self.assertEqual(ct('1s'), np.timedelta64(1000000000,'ns'))\n self.assertEqual(ct('10s'), np.timedelta64(10000000000,'ns'))\n self.assertEqual(ct('100s'), np.timedelta64(100000000000,'ns'))\n self.assertEqual(ct('1000s'), np.timedelta64(1000000000000,'ns'))\n\n self.assertEqual(ct('1d'), conv(np.timedelta64(1,'D')))\n self.assertEqual(ct('-1d'), -conv(np.timedelta64(1,'D')))\n self.assertEqual(ct('1D'), conv(np.timedelta64(1,'D')))\n self.assertEqual(ct('10D'), conv(np.timedelta64(10,'D')))\n self.assertEqual(ct('100D'), conv(np.timedelta64(100,'D')))\n self.assertEqual(ct('1000D'), conv(np.timedelta64(1000,'D')))\n self.assertEqual(ct('10000D'), conv(np.timedelta64(10000,'D')))\n\n # space\n self.assertEqual(ct(' 10000D '), conv(np.timedelta64(10000,'D')))\n self.assertEqual(ct(' - 10000D '), -conv(np.timedelta64(10000,'D')))\n\n # invalid\n self.assertRaises(ValueError, ct, '1foo')\n self.assertRaises(ValueError, ct, 'foo')\n\n def test_full_format_converters(self):\n _skip_if_numpy_not_friendly()\n\n def conv(v):\n return v.astype('m8[ns]')\n d1 = np.timedelta64(1,'D')\n\n self.assertEqual(ct('1days'), conv(d1))\n self.assertEqual(ct('1days,'), conv(d1))\n self.assertEqual(ct('- 1days,'), -conv(d1))\n\n self.assertEqual(ct('00:00:01'), conv(np.timedelta64(1,'s')))\n self.assertEqual(ct('06:00:01'), conv(np.timedelta64(6*3600+1,'s')))\n self.assertEqual(ct('06:00:01.0'), conv(np.timedelta64(6*3600+1,'s')))\n self.assertEqual(ct('06:00:01.01'), conv(np.timedelta64(1000*(6*3600+1)+10,'ms')))\n\n self.assertEqual(ct('- 1days, 00:00:01'), -conv(d1+np.timedelta64(1,'s')))\n self.assertEqual(ct('1days, 06:00:01'), conv(d1+np.timedelta64(6*3600+1,'s')))\n self.assertEqual(ct('1days, 06:00:01.01'), conv(d1+np.timedelta64(1000*(6*3600+1)+10,'ms')))\n\n # invalid\n self.assertRaises(ValueError, ct, '- 1days, 00')\n\n def test_nat_converters(self):\n _skip_if_numpy_not_friendly()\n\n self.assertEqual(to_timedelta('nat',box=False).astype('int64'), tslib.iNaT)\n self.assertEqual(to_timedelta('nan',box=False).astype('int64'), tslib.iNaT)\n\n def test_to_timedelta(self):\n _skip_if_numpy_not_friendly()\n\n def conv(v):\n return v.astype('m8[ns]')\n d1 = np.timedelta64(1,'D')\n\n self.assertEqual(to_timedelta('1 days 06:05:01.00003',box=False), conv(d1+np.timedelta64(6*3600+5*60+1,'s')+np.timedelta64(30,'us')))\n self.assertEqual(to_timedelta('15.5us',box=False), conv(np.timedelta64(15500,'ns')))\n\n # empty string\n result = to_timedelta('',box=False)\n self.assertEqual(result.astype('int64'), tslib.iNaT)\n\n result = to_timedelta(['', ''])\n self.assertTrue(isnull(result).all())\n\n # pass thru\n result = to_timedelta(np.array([np.timedelta64(1,'s')]))\n expected = np.array([np.timedelta64(1,'s')])\n tm.assert_almost_equal(result,expected)\n\n # ints\n result = np.timedelta64(0,'ns')\n expected = to_timedelta(0,box=False)\n self.assertEqual(result, expected)\n\n # Series\n expected = Series([timedelta(days=1), timedelta(days=1, seconds=1)])\n result = to_timedelta(Series(['1d','1days 00:00:01']))\n tm.assert_series_equal(result, expected)\n\n # with units\n result = Series([ np.timedelta64(0,'ns'), np.timedelta64(10,'s').astype('m8[ns]') ],dtype='m8[ns]')\n expected = to_timedelta([0,10],unit='s')\n tm.assert_series_equal(result, expected)\n\n # single element conversion\n v = timedelta(seconds=1)\n result = to_timedelta(v,box=False)\n expected = np.timedelta64(timedelta(seconds=1))\n self.assertEqual(result, expected)\n\n v = np.timedelta64(timedelta(seconds=1))\n result = to_timedelta(v,box=False)\n expected = np.timedelta64(timedelta(seconds=1))\n self.assertEqual(result, expected)\n\n # arrays of various dtypes\n arr = np.array([1]*5,dtype='int64')\n result = to_timedelta(arr,unit='s')\n expected = Series([ np.timedelta64(1,'s') ]*5)\n tm.assert_series_equal(result, expected)\n\n arr = np.array([1]*5,dtype='int64')\n result = to_timedelta(arr,unit='m')\n expected = Series([ np.timedelta64(1,'m') ]*5)\n tm.assert_series_equal(result, expected)\n\n arr = np.array([1]*5,dtype='int64')\n result = to_timedelta(arr,unit='h')\n expected = Series([ np.timedelta64(1,'h') ]*5)\n tm.assert_series_equal(result, expected)\n\n arr = np.array([1]*5,dtype='timedelta64[s]')\n result = to_timedelta(arr)\n expected = Series([ np.timedelta64(1,'s') ]*5)\n tm.assert_series_equal(result, expected)\n\n arr = np.array([1]*5,dtype='timedelta64[D]')\n result = to_timedelta(arr)\n expected = Series([ np.timedelta64(1,'D') ]*5)\n tm.assert_series_equal(result, expected)\n\n # validate all units\n # GH 6855\n for unit in ['Y','M','W','D','y','w','d']:\n result = to_timedelta(np.arange(5),unit=unit)\n expected = Series([ np.timedelta64(i,unit.upper()) for i in np.arange(5).tolist() ])\n tm.assert_series_equal(result, expected)\n for unit in ['h','m','s','ms','us','ns','H','S','MS','US','NS']:\n result = to_timedelta(np.arange(5),unit=unit)\n expected = Series([ np.timedelta64(i,unit.lower()) for i in np.arange(5).tolist() ])\n tm.assert_series_equal(result, expected)\n\n # these will error\n self.assertRaises(ValueError, lambda : to_timedelta(['1h']))\n self.assertRaises(ValueError, lambda : to_timedelta(['1m']))\n\n def test_to_timedelta_via_apply(self):\n _skip_if_numpy_not_friendly()\n\n # GH 5458\n expected = Series([np.timedelta64(1,'s')])\n result = Series(['00:00:01']).apply(to_timedelta)\n tm.assert_series_equal(result, expected)\n\n result = Series([to_timedelta('00:00:01')])\n tm.assert_series_equal(result, expected)\n\n def test_timedelta_ops(self):\n _skip_if_numpy_not_friendly()\n\n # GH4984\n # make sure ops return timedeltas\n s = Series([Timestamp('20130101') + timedelta(seconds=i*i) for i in range(10) ])\n td = s.diff()\n\n result = td.mean()[0]\n # TODO This should have returned a scalar to begin with. Hack for now.\n expected = to_timedelta(timedelta(seconds=9))\n tm.assert_almost_equal(result, expected)\n\n result = td.quantile(.1)\n # This properly returned a scalar.\n expected = np.timedelta64(2599999999,'ns')\n tm.assert_almost_equal(result, expected)\n\n result = td.median()[0]\n # TODO This should have returned a scalar to begin with. Hack for now.\n expected = to_timedelta('00:00:08')\n tm.assert_almost_equal(result, expected)\n\n # GH 6462\n # consistency in returned values for sum\n result = td.sum()[0]\n expected = to_timedelta('00:01:21')\n tm.assert_almost_equal(result, expected)\n\n def test_timedelta_ops_scalar(self):\n _skip_if_numpy_not_friendly()\n\n # GH 6808\n base = pd.to_datetime('20130101 09:01:12.123456')\n expected_add = pd.to_datetime('20130101 09:01:22.123456')\n expected_sub = pd.to_datetime('20130101 09:01:02.123456')\n\n for offset in [pd.to_timedelta(10,unit='s'),\n timedelta(seconds=10),\n np.timedelta64(10,'s'),\n np.timedelta64(10000000000,'ns'),\n pd.offsets.Second(10)]:\n result = base + offset\n self.assertEqual(result, expected_add)\n\n result = base - offset\n self.assertEqual(result, expected_sub)\n\n base = pd.to_datetime('20130102 09:01:12.123456')\n expected_add = pd.to_datetime('20130103 09:01:22.123456')\n expected_sub = pd.to_datetime('20130101 09:01:02.123456')\n\n for offset in [pd.to_timedelta('1 day, 00:00:10'),\n pd.to_timedelta('1 days, 00:00:10'),\n timedelta(days=1,seconds=10),\n np.timedelta64(1,'D')+np.timedelta64(10,'s'),\n pd.offsets.Day()+pd.offsets.Second(10)]:\n result = base + offset\n self.assertEqual(result, expected_add)\n\n result = base - offset\n self.assertEqual(result, expected_sub)\n\n def test_to_timedelta_on_missing_values(self):\n _skip_if_numpy_not_friendly()\n\n # GH5438\n timedelta_NaT = np.timedelta64('NaT')\n\n actual = pd.to_timedelta(Series(['00:00:01', np.nan]))\n expected = Series([np.timedelta64(1000000000, 'ns'), timedelta_NaT], dtype='<m8[ns]')\n assert_series_equal(actual, expected)\n\n actual = pd.to_timedelta(Series(['00:00:01', pd.NaT]))\n assert_series_equal(actual, expected)\n\n actual = pd.to_timedelta(np.nan)\n self.assertEqual(actual.astype('int64'), timedelta_NaT.astype('int64'))\n\n actual = pd.to_timedelta(pd.NaT)\n self.assertEqual(actual.astype('int64'), timedelta_NaT.astype('int64'))\n\n def test_timedelta_ops_with_missing_values(self):\n _skip_if_numpy_not_friendly()\n\n # setup\n s1 = pd.to_timedelta(Series(['00:00:01']))\n s2 = pd.to_timedelta(Series(['00:00:02']))\n sn = pd.to_timedelta(Series([pd.NaT]))\n df1 = DataFrame(['00:00:01']).apply(pd.to_timedelta)\n df2 = DataFrame(['00:00:02']).apply(pd.to_timedelta)\n dfn = DataFrame([pd.NaT]).apply(pd.to_timedelta)\n scalar1 = pd.to_timedelta('00:00:01')\n scalar2 = pd.to_timedelta('00:00:02')\n timedelta_NaT = pd.to_timedelta('NaT')\n NA = np.nan\n\n actual = scalar1 + scalar1\n self.assertEqual(actual, scalar2)\n actual = scalar2 - scalar1\n self.assertEqual(actual, scalar1)\n\n actual = s1 + s1\n assert_series_equal(actual, s2)\n actual = s2 - s1\n assert_series_equal(actual, s1)\n\n actual = s1 + scalar1\n assert_series_equal(actual, s2)\n actual = s2 - scalar1\n assert_series_equal(actual, s1)\n\n actual = s1 + timedelta_NaT\n assert_series_equal(actual, sn)\n actual = s1 - timedelta_NaT\n assert_series_equal(actual, sn)\n\n actual = s1 + NA\n assert_series_equal(actual, sn)\n actual = s1 - NA\n assert_series_equal(actual, sn)\n\n actual = s1 + pd.NaT # NaT is datetime, not timedelta\n assert_series_equal(actual, sn)\n actual = s2 - pd.NaT\n assert_series_equal(actual, sn)\n\n actual = s1 + df1\n assert_frame_equal(actual, df2)\n actual = s2 - df1\n assert_frame_equal(actual, df1)\n actual = df1 + s1\n assert_frame_equal(actual, df2)\n actual = df2 - s1\n assert_frame_equal(actual, df1)\n\n actual = df1 + df1\n assert_frame_equal(actual, df2)\n actual = df2 - df1\n assert_frame_equal(actual, df1)\n\n actual = df1 + scalar1\n assert_frame_equal(actual, df2)\n actual = df2 - scalar1\n assert_frame_equal(actual, df1)\n\n actual = df1 + timedelta_NaT\n assert_frame_equal(actual, dfn)\n actual = df1 - timedelta_NaT\n assert_frame_equal(actual, dfn)\n\n actual = df1 + NA\n assert_frame_equal(actual, dfn)\n actual = df1 - NA\n assert_frame_equal(actual, dfn)\n\n actual = df1 + pd.NaT # NaT is datetime, not timedelta\n assert_frame_equal(actual, dfn)\n actual = df1 - pd.NaT\n assert_frame_equal(actual, dfn)\n\n def test_apply_to_timedelta(self):\n _skip_if_numpy_not_friendly()\n\n timedelta_NaT = pd.to_timedelta('NaT')\n\n list_of_valid_strings = ['00:00:01', '00:00:02']\n a = pd.to_timedelta(list_of_valid_strings)\n b = Series(list_of_valid_strings).apply(pd.to_timedelta)\n # Can't compare until apply on a Series gives the correct dtype\n # assert_series_equal(a, b)\n\n list_of_strings = ['00:00:01', np.nan, pd.NaT, timedelta_NaT]\n a = pd.to_timedelta(list_of_strings)\n b = Series(list_of_strings).apply(pd.to_timedelta)\n # Can't compare until apply on a Series gives the correct dtype\n # assert_series_equal(a, b)\n\n\nif __name__ == '__main__':\n nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n exit=False)\n",
"from datetime import date, datetime, timedelta\nfrom pandas.compat import range\nfrom pandas import compat\nimport numpy as np\n\nfrom pandas.tseries.tools import to_datetime\n\n# import after tools, dateutil check\nfrom dateutil.relativedelta import relativedelta, weekday\nfrom dateutil.easter import easter\nimport pandas.tslib as tslib\nfrom pandas.tslib import Timestamp, OutOfBoundsDatetime\n\nfrom pandas import _np_version_under1p7\n\nimport functools\n\n__all__ = ['Day', 'BusinessDay', 'BDay', 'CustomBusinessDay', 'CDay',\n 'CBMonthEnd','CBMonthBegin',\n 'MonthBegin', 'BMonthBegin', 'MonthEnd', 'BMonthEnd',\n 'YearBegin', 'BYearBegin', 'YearEnd', 'BYearEnd',\n 'QuarterBegin', 'BQuarterBegin', 'QuarterEnd', 'BQuarterEnd',\n 'LastWeekOfMonth', 'FY5253Quarter', 'FY5253',\n 'Week', 'WeekOfMonth', 'Easter',\n 'Hour', 'Minute', 'Second', 'Milli', 'Micro', 'Nano']\n\n# convert to/from datetime/timestamp to allow invalid Timestamp ranges to pass thru\ndef as_timestamp(obj):\n try:\n return Timestamp(obj)\n except (OutOfBoundsDatetime):\n pass\n return obj\n\ndef as_datetime(obj):\n f = getattr(obj,'to_pydatetime',None)\n if f is not None:\n obj = f()\n return obj\n\ndef apply_nat(func):\n @functools.wraps(func)\n def wrapper(self, other):\n if other is tslib.NaT:\n return tslib.NaT\n else:\n return func(self, other)\n return wrapper\n\n#----------------------------------------------------------------------\n# DateOffset\n\n\nclass ApplyTypeError(TypeError):\n # sentinel class for catching the apply error to return NotImplemented\n pass\n\n\nclass CacheableOffset(object):\n _cacheable = True\n\n\nclass DateOffset(object):\n \"\"\"\n Standard kind of date increment used for a date range.\n\n Works exactly like relativedelta in terms of the keyword args you\n pass in, use of the keyword n is discouraged-- you would be better\n off specifying n in the keywords you use, but regardless it is\n there for you. n is needed for DateOffset subclasses.\n\n DateOffets work as follows. Each offset specify a set of dates\n that conform to the DateOffset. For example, Bday defines this\n set to be the set of dates that are weekdays (M-F). To test if a\n date is in the set of a DateOffset dateOffset we can use the\n onOffset method: dateOffset.onOffset(date).\n\n If a date is not on a valid date, the rollback and rollforward\n methods can be used to roll the date to the nearest valid date\n before/after the date.\n\n DateOffsets can be created to move dates forward a given number of\n valid dates. For example, Bday(2) can be added to a date to move\n it two business days forward. If the date does not start on a\n valid date, first it is moved to a valid date. Thus psedo code\n is:\n\n def __add__(date):\n date = rollback(date) # does nothing if date is valid\n return date + <n number of periods>\n\n When a date offset is created for a negitive number of periods,\n the date is first rolled forward. The pseudo code is:\n\n def __add__(date):\n date = rollforward(date) # does nothing is date is valid\n return date + <n number of periods>\n\n Zero presents a problem. Should it roll forward or back? We\n arbitrarily have it rollforward:\n\n date + BDay(0) == BDay.rollforward(date)\n\n Since 0 is a bit weird, we suggest avoiding its use.\n \"\"\"\n _cacheable = False\n _normalize_cache = True\n\n def __init__(self, n=1, **kwds):\n self.n = int(n)\n self.kwds = kwds\n if len(kwds) > 0:\n self._offset = relativedelta(**kwds)\n else:\n self._offset = timedelta(1)\n\n @apply_nat\n def apply(self, other):\n other = as_datetime(other)\n if len(self.kwds) > 0:\n if self.n > 0:\n for i in range(self.n):\n other = other + self._offset\n else:\n for i in range(-self.n):\n other = other - self._offset\n return as_timestamp(other)\n else:\n return as_timestamp(other + timedelta(self.n))\n\n def isAnchored(self):\n return (self.n == 1)\n\n def copy(self):\n return self.__class__(self.n, **self.kwds)\n\n def _should_cache(self):\n return self.isAnchored() and self._cacheable\n\n def _params(self):\n attrs = [(k, v) for k, v in compat.iteritems(vars(self))\n if (k not in ['kwds', 'name', 'normalize',\n 'busdaycalendar']) and (k[0] != '_')]\n attrs.extend(list(self.kwds.items()))\n attrs = sorted(set(attrs))\n\n params = tuple([str(self.__class__)] + attrs)\n return params\n\n def __repr__(self):\n if hasattr(self, '_named'):\n return self._named\n className = getattr(self, '_outputName', type(self).__name__)\n exclude = set(['n', 'inc'])\n attrs = []\n for attr in sorted(self.__dict__):\n if ((attr == 'kwds' and len(self.kwds) == 0)\n or attr.startswith('_')):\n continue\n elif attr == 'kwds':\n kwds_new = {}\n for key in self.kwds:\n if not hasattr(self, key):\n kwds_new[key] = self.kwds[key]\n if len(kwds_new) > 0:\n attrs.append('='.join((attr, repr(kwds_new))))\n else:\n if attr not in exclude:\n attrs.append('='.join((attr, repr(getattr(self, attr)))))\n\n if abs(self.n) != 1:\n plural = 's'\n else:\n plural = ''\n\n n_str = \"\"\n if self.n != 1:\n n_str = \"%s * \" % self.n\n\n out = '<%s' % n_str + className + plural\n if attrs:\n out += ': ' + ', '.join(attrs)\n out += '>'\n return out\n\n @property\n def name(self):\n if hasattr(self, '_named'):\n return self._named\n else:\n return self.rule_code\n\n def __eq__(self, other):\n if other is None:\n return False\n\n if isinstance(other, compat.string_types):\n from pandas.tseries.frequencies import to_offset\n\n other = to_offset(other)\n\n if not isinstance(other, DateOffset):\n return False\n\n return self._params() == other._params()\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n return hash(self._params())\n\n def __call__(self, other):\n return self.apply(other)\n\n def __add__(self, other):\n try:\n return self.apply(other)\n except ApplyTypeError:\n return NotImplemented\n\n def __radd__(self, other):\n return self.__add__(other)\n\n def __sub__(self, other):\n if isinstance(other, datetime):\n raise TypeError('Cannot subtract datetime from offset.')\n elif type(other) == type(self):\n return self.__class__(self.n - other.n, **self.kwds)\n else: # pragma: no cover\n return NotImplemented\n\n def __rsub__(self, other):\n return self.__class__(-self.n, **self.kwds) + other\n\n def __mul__(self, someInt):\n return self.__class__(n=someInt * self.n, **self.kwds)\n\n def __rmul__(self, someInt):\n return self.__mul__(someInt)\n\n def __neg__(self):\n return self.__class__(-self.n, **self.kwds)\n\n def rollback(self, dt):\n \"\"\"Roll provided date backward to next offset only if not on offset\"\"\"\n if type(dt) == date:\n dt = datetime(dt.year, dt.month, dt.day)\n\n if not self.onOffset(dt):\n dt = dt - self.__class__(1, **self.kwds)\n return dt\n\n def rollforward(self, dt):\n \"\"\"Roll provided date forward to next offset only if not on offset\"\"\"\n if type(dt) == date:\n dt = datetime(dt.year, dt.month, dt.day)\n\n if not self.onOffset(dt):\n dt = dt + self.__class__(1, **self.kwds)\n return dt\n\n def onOffset(self, dt):\n # XXX, see #1395\n if type(self) == DateOffset or isinstance(self, Tick):\n return True\n\n # Default (slow) method for determining if some date is a member of the\n # date range generated by this offset. Subclasses may have this\n # re-implemented in a nicer way.\n a = dt\n b = ((dt + self) - self)\n return a == b\n\n # way to get around weirdness with rule_code\n @property\n def _prefix(self):\n raise NotImplementedError('Prefix not defined')\n\n @property\n def rule_code(self):\n return self._prefix\n\n @property\n def freqstr(self):\n try:\n code = self.rule_code\n except NotImplementedError:\n return repr(self)\n\n if self.n != 1:\n fstr = '%d%s' % (self.n, code)\n else:\n fstr = code\n\n return fstr\n\n\nclass SingleConstructorOffset(DateOffset):\n @classmethod\n def _from_name(cls, suffix=None):\n # default _from_name calls cls with no args\n if suffix:\n raise ValueError(\"Bad freq suffix %s\" % suffix)\n return cls()\n\n\nclass BusinessMixin(object):\n \"\"\" mixin to business types to provide related functions \"\"\"\n\n # TODO: Combine this with DateOffset by defining a whitelisted set of\n # attributes on each object rather than the existing behavior of iterating\n # over internal ``__dict__``\n def __repr__(self):\n if hasattr(self, '_named'):\n return self._named\n className = getattr(self, '_outputName', self.__class__.__name__)\n attrs = []\n\n if self.offset:\n attrs = ['offset=%s' % repr(self.offset)]\n\n if abs(self.n) != 1:\n plural = 's'\n else:\n plural = ''\n\n n_str = \"\"\n if self.n != 1:\n n_str = \"%s * \" % self.n\n\n out = '<%s' % n_str + className + plural\n if attrs:\n out += ': ' + ', '.join(attrs)\n out += '>'\n return out\n\nclass BusinessDay(BusinessMixin, SingleConstructorOffset):\n \"\"\"\n DateOffset subclass representing possibly n business days\n \"\"\"\n _prefix = 'B'\n\n def __init__(self, n=1, **kwds):\n self.n = int(n)\n self.kwds = kwds\n self.offset = kwds.get('offset', timedelta(0))\n self.normalize = kwds.get('normalize', False)\n\n @property\n def freqstr(self):\n try:\n code = self.rule_code\n except NotImplementedError:\n return repr(self)\n\n if self.n != 1:\n fstr = '%d%s' % (self.n, code)\n else:\n fstr = code\n\n if self.offset:\n fstr += self._offset_str()\n\n return fstr\n\n def _offset_str(self):\n def get_str(td):\n off_str = ''\n if td.days > 0:\n off_str += str(td.days) + 'D'\n if td.seconds > 0:\n s = td.seconds\n hrs = int(s / 3600)\n if hrs != 0:\n off_str += str(hrs) + 'H'\n s -= hrs * 3600\n mts = int(s / 60)\n if mts != 0:\n off_str += str(mts) + 'Min'\n s -= mts * 60\n if s != 0:\n off_str += str(s) + 's'\n if td.microseconds > 0:\n off_str += str(td.microseconds) + 'us'\n return off_str\n\n if isinstance(self.offset, timedelta):\n zero = timedelta(0, 0, 0)\n if self.offset >= zero:\n off_str = '+' + get_str(self.offset)\n else:\n off_str = '-' + get_str(-self.offset)\n return off_str\n else:\n return '+' + repr(self.offset)\n\n def isAnchored(self):\n return (self.n == 1)\n\n @apply_nat\n def apply(self, other):\n if isinstance(other, datetime):\n n = self.n\n\n if n == 0 and other.weekday() > 4:\n n = 1\n\n result = other\n\n # avoid slowness below\n if abs(n) > 5:\n k = n // 5\n result = result + timedelta(7 * k)\n if n < 0 and result.weekday() > 4:\n n += 1\n n -= 5 * k\n if n == 0 and result.weekday() > 4:\n n -= 1\n\n while n != 0:\n k = n // abs(n)\n result = result + timedelta(k)\n if result.weekday() < 5:\n n -= k\n\n if self.normalize:\n result = datetime(result.year, result.month, result.day)\n\n if self.offset:\n result = result + self.offset\n\n return as_timestamp(result)\n\n elif isinstance(other, (timedelta, Tick)):\n return BDay(self.n, offset=self.offset + other,\n normalize=self.normalize)\n else:\n raise ApplyTypeError('Only know how to combine business day with '\n 'datetime or timedelta.')\n\n @classmethod\n def onOffset(cls, dt):\n return dt.weekday() < 5\n\n\nclass CustomBusinessDay(BusinessDay):\n \"\"\"\n **EXPERIMENTAL** DateOffset subclass representing possibly n business days\n excluding holidays\n\n .. warning:: EXPERIMENTAL\n\n This class is not officially supported and the API is likely to change\n in future versions. Use this at your own risk.\n\n Parameters\n ----------\n n : int, default 1\n offset : timedelta, default timedelta(0)\n normalize : bool, default False\n Normalize start/end dates to midnight before generating date range\n weekmask : str, Default 'Mon Tue Wed Thu Fri'\n weekmask of valid business days, passed to ``numpy.busdaycalendar``\n holidays : list\n list/array of dates to exclude from the set of valid business days,\n passed to ``numpy.busdaycalendar``\n calendar : HolidayCalendar instance\n instance of AbstractHolidayCalendar that provide the list of holidays\n \"\"\"\n\n _cacheable = False\n _prefix = 'C'\n\n def __init__(self, n=1, **kwds):\n self.n = int(n)\n self.kwds = kwds\n self.offset = kwds.get('offset', timedelta(0))\n self.normalize = kwds.get('normalize', False)\n self.weekmask = kwds.get('weekmask', 'Mon Tue Wed Thu Fri')\n \n if 'calendar' in kwds:\n holidays = kwds['calendar'].holidays()\n else:\n holidays = kwds.get('holidays', [])\n holidays = [self._to_dt64(dt, dtype='datetime64[D]') for dt in\n holidays]\n self.holidays = tuple(sorted(holidays))\n self.kwds['holidays'] = self.holidays\n\n self._set_busdaycalendar()\n\n def _set_busdaycalendar(self):\n if self.holidays:\n kwargs = {'weekmask':self.weekmask,'holidays':self.holidays}\n else:\n kwargs = {'weekmask':self.weekmask}\n try:\n self.busdaycalendar = np.busdaycalendar(**kwargs)\n except:\n # Check we have the required numpy version\n from distutils.version import LooseVersion\n\n if LooseVersion(np.__version__) < '1.7.0':\n raise NotImplementedError(\"CustomBusinessDay requires numpy >= \"\n \"1.7.0. Current version: \" +\n np.__version__)\n else:\n raise\n\n def __getstate__(self):\n \"\"\"Return a pickleable state\"\"\"\n state = self.__dict__.copy()\n del state['busdaycalendar']\n return state\n\n def __setstate__(self, state):\n \"\"\"Reconstruct an instance from a pickled state\"\"\"\n self.__dict__ = state\n self._set_busdaycalendar()\n\n @apply_nat\n def apply(self, other):\n if self.n <= 0:\n roll = 'forward'\n else:\n roll = 'backward'\n\n # Distinguish input cases to enhance performance\n if isinstance(other, datetime):\n date_in = other\n np_dt = np.datetime64(date_in.date())\n\n np_incr_dt = np.busday_offset(np_dt, self.n, roll=roll,\n busdaycal=self.busdaycalendar)\n\n dt_date = np_incr_dt.astype(datetime)\n if not self.normalize:\n result = datetime.combine(dt_date,date_in.time())\n else:\n result = dt_date\n\n if self.offset:\n result = result + self.offset\n\n return as_timestamp(result)\n\n elif isinstance(other, np.datetime64):\n date_in = other\n np_day = date_in.astype('datetime64[D]')\n np_time = date_in - np_day\n\n np_incr_dt = np.busday_offset(np_day, self.n, roll=roll,\n busdaycal=self.busdaycalendar)\n\n if not self.normalize:\n result = np_incr_dt + np_time\n else:\n result = np_incr_dt\n\n if self.offset:\n result = result + self.offset\n\n return as_timestamp(result)\n\n elif isinstance(other, (timedelta, Tick)):\n return BDay(self.n, offset=self.offset + other,\n normalize=self.normalize)\n else:\n raise ApplyTypeError('Only know how to combine trading day with '\n 'datetime, datetime64 or timedelta.')\n\n @staticmethod\n def _to_dt64(dt, dtype='datetime64'):\n # Currently\n # > np.datetime64(dt.datetime(2013,5,1),dtype='datetime64[D]')\n # numpy.datetime64('2013-05-01T02:00:00.000000+0200')\n # Thus astype is needed to cast datetime to datetime64[D]\n dt = np.datetime64(dt)\n if dt.dtype.name != dtype:\n dt = dt.astype(dtype)\n return dt\n\n def onOffset(self, dt):\n day64 = self._to_dt64(dt,'datetime64[D]')\n return np.is_busday(day64, busdaycal=self.busdaycalendar)\n\n\nclass MonthOffset(SingleConstructorOffset):\n @property\n def name(self):\n if self.isAnchored:\n return self.rule_code\n else:\n return \"%s-%s\" % (self.rule_code, _int_to_month[self.n])\n\n\nclass MonthEnd(MonthOffset):\n \"\"\"DateOffset of one month end\"\"\"\n\n @apply_nat\n def apply(self, other):\n other = datetime(other.year, other.month, other.day,\n tzinfo=other.tzinfo)\n\n n = self.n\n _, days_in_month = tslib.monthrange(other.year, other.month)\n if other.day != days_in_month:\n other = as_datetime(other) + relativedelta(months=-1, day=31)\n if n <= 0:\n n = n + 1\n other = as_datetime(other) + relativedelta(months=n, day=31)\n return as_timestamp(other)\n\n @classmethod\n def onOffset(cls, dt):\n days_in_month = tslib.monthrange(dt.year, dt.month)[1]\n return dt.day == days_in_month\n\n _prefix = 'M'\n\n\nclass MonthBegin(MonthOffset):\n \"\"\"DateOffset of one month at beginning\"\"\"\n\n @apply_nat\n def apply(self, other):\n n = self.n\n\n if other.day > 1 and n <= 0: # then roll forward if n<=0\n n += 1\n\n other = as_datetime(other) + relativedelta(months=n, day=1)\n return as_timestamp(other)\n\n @classmethod\n def onOffset(cls, dt):\n return dt.day == 1\n\n _prefix = 'MS'\n\n\nclass BusinessMonthEnd(MonthOffset):\n \"\"\"DateOffset increments between business EOM dates\"\"\"\n\n def isAnchored(self):\n return (self.n == 1)\n\n @apply_nat\n def apply(self, other):\n other = datetime(other.year, other.month, other.day)\n\n n = self.n\n\n wkday, days_in_month = tslib.monthrange(other.year, other.month)\n lastBDay = days_in_month - max(((wkday + days_in_month - 1)\n % 7) - 4, 0)\n\n if n > 0 and not other.day >= lastBDay:\n n = n - 1\n elif n <= 0 and other.day > lastBDay:\n n = n + 1\n other = as_datetime(other) + relativedelta(months=n, day=31)\n\n if other.weekday() > 4:\n other = other - BDay()\n return as_timestamp(other)\n\n _prefix = 'BM'\n\n\nclass BusinessMonthBegin(MonthOffset):\n \"\"\"DateOffset of one business month at beginning\"\"\"\n\n @apply_nat\n def apply(self, other):\n n = self.n\n\n wkday, _ = tslib.monthrange(other.year, other.month)\n first = _get_firstbday(wkday)\n\n if other.day > first and n <= 0:\n # as if rolled forward already\n n += 1\n elif other.day < first and n > 0:\n other = as_datetime(other) + timedelta(days=first - other.day)\n n -= 1\n\n other = as_datetime(other) + relativedelta(months=n)\n wkday, _ = tslib.monthrange(other.year, other.month)\n first = _get_firstbday(wkday)\n result = datetime(other.year, other.month, first)\n return as_timestamp(result)\n\n @classmethod\n def onOffset(cls, dt):\n first_weekday, _ = tslib.monthrange(dt.year, dt.month)\n if first_weekday == 5:\n return dt.day == 3\n elif first_weekday == 6:\n return dt.day == 2\n else:\n return dt.day == 1\n\n _prefix = 'BMS'\n\n\n\nclass CustomBusinessMonthEnd(BusinessMixin, MonthOffset):\n \"\"\"\n **EXPERIMENTAL** DateOffset of one custom business month\n\n .. warning:: EXPERIMENTAL\n\n This class is not officially supported and the API is likely to change\n in future versions. Use this at your own risk.\n\n Parameters\n ----------\n n : int, default 1\n offset : timedelta, default timedelta(0)\n normalize : bool, default False\n Normalize start/end dates to midnight before generating date range\n weekmask : str, Default 'Mon Tue Wed Thu Fri'\n weekmask of valid business days, passed to ``numpy.busdaycalendar``\n holidays : list\n list/array of dates to exclude from the set of valid business days,\n passed to ``numpy.busdaycalendar``\n \"\"\"\n\n _cacheable = False\n _prefix = 'CBM'\n def __init__(self, n=1, **kwds):\n self.n = int(n)\n self.kwds = kwds\n self.offset = kwds.get('offset', timedelta(0))\n self.normalize = kwds.get('normalize', False)\n self.weekmask = kwds.get('weekmask', 'Mon Tue Wed Thu Fri')\n self.cbday = CustomBusinessDay(n=self.n,**kwds)\n self.m_offset = MonthEnd()\n\n @apply_nat\n def apply(self,other):\n n = self.n\n dt_in = other\n # First move to month offset\n cur_mend = self.m_offset.rollforward(dt_in)\n # Find this custom month offset\n cur_cmend = self.cbday.rollback(cur_mend)\n \n # handle zero case. arbitrarily rollforward\n if n == 0 and dt_in != cur_cmend:\n n += 1\n\n if dt_in < cur_cmend and n >= 1:\n n -= 1\n elif dt_in > cur_cmend and n <= -1:\n n += 1\n \n new = cur_mend + n * MonthEnd()\n result = self.cbday.rollback(new)\n return as_timestamp(result)\n \nclass CustomBusinessMonthBegin(BusinessMixin, MonthOffset):\n \"\"\"\n **EXPERIMENTAL** DateOffset of one custom business month\n\n .. warning:: EXPERIMENTAL\n\n This class is not officially supported and the API is likely to change\n in future versions. Use this at your own risk.\n\n Parameters\n ----------\n n : int, default 1\n offset : timedelta, default timedelta(0)\n normalize : bool, default False\n Normalize start/end dates to midnight before generating date range\n weekmask : str, Default 'Mon Tue Wed Thu Fri'\n weekmask of valid business days, passed to ``numpy.busdaycalendar``\n holidays : list\n list/array of dates to exclude from the set of valid business days,\n passed to ``numpy.busdaycalendar``\n \"\"\"\n\n _cacheable = False\n _prefix = 'CBMS'\n def __init__(self, n=1, **kwds):\n self.n = int(n)\n self.kwds = kwds\n self.offset = kwds.get('offset', timedelta(0))\n self.normalize = kwds.get('normalize', False)\n self.weekmask = kwds.get('weekmask', 'Mon Tue Wed Thu Fri')\n self.cbday = CustomBusinessDay(n=self.n,**kwds)\n self.m_offset = MonthBegin()\n\n @apply_nat\n def apply(self,other):\n n = self.n\n dt_in = other\n # First move to month offset\n cur_mbegin = self.m_offset.rollback(dt_in)\n # Find this custom month offset\n cur_cmbegin = self.cbday.rollforward(cur_mbegin)\n\n # handle zero case. arbitrarily rollforward\n if n == 0 and dt_in != cur_cmbegin:\n n += 1\n\n if dt_in > cur_cmbegin and n <= -1:\n n += 1\n elif dt_in < cur_cmbegin and n >= 1:\n n -= 1\n \n new = cur_mbegin + n * MonthBegin()\n result = self.cbday.rollforward(new)\n return as_timestamp(result)\n\nclass Week(DateOffset):\n \"\"\"\n Weekly offset\n\n Parameters\n ----------\n weekday : int, default None\n Always generate specific day of week. 0 for Monday\n \"\"\"\n\n def __init__(self, n=1, **kwds):\n self.n = n\n self.weekday = kwds.get('weekday', None)\n\n if self.weekday is not None:\n if self.weekday < 0 or self.weekday > 6:\n raise ValueError('Day must be 0<=day<=6, got %d' %\n self.weekday)\n\n self._inc = timedelta(weeks=1)\n self.kwds = kwds\n\n def isAnchored(self):\n return (self.n == 1 and self.weekday is not None)\n\n @apply_nat\n def apply(self, other):\n if self.weekday is None:\n return as_timestamp(as_datetime(other) + self.n * self._inc)\n\n if self.n > 0:\n k = self.n\n otherDay = other.weekday()\n if otherDay != self.weekday:\n other = as_datetime(other) + timedelta((self.weekday - otherDay) % 7)\n k = k - 1\n other = as_datetime(other)\n for i in range(k):\n other = other + self._inc\n else:\n k = self.n\n otherDay = other.weekday()\n if otherDay != self.weekday:\n other = as_datetime(other) + timedelta((self.weekday - otherDay) % 7)\n other = as_datetime(other)\n for i in range(-k):\n other = other - self._inc\n return as_timestamp(other)\n\n def onOffset(self, dt):\n return dt.weekday() == self.weekday\n\n _prefix = 'W'\n\n @property\n def rule_code(self):\n suffix = ''\n if self.weekday is not None:\n suffix = '-%s' % (_int_to_weekday[self.weekday])\n return self._prefix + suffix\n\n @classmethod\n def _from_name(cls, suffix=None):\n if not suffix:\n weekday = None\n else:\n weekday = _weekday_to_int[suffix]\n return cls(weekday=weekday)\n\nclass WeekDay(object):\n MON = 0\n TUE = 1\n WED = 2\n THU = 3\n FRI = 4\n SAT = 5\n SUN = 6\n\n_int_to_weekday = {\n WeekDay.MON: 'MON',\n WeekDay.TUE: 'TUE',\n WeekDay.WED: 'WED',\n WeekDay.THU: 'THU',\n WeekDay.FRI: 'FRI',\n WeekDay.SAT: 'SAT',\n WeekDay.SUN: 'SUN'\n}\n\n_weekday_to_int = dict((v, k) for k, v in _int_to_weekday.items())\n\n\nclass WeekOfMonth(DateOffset):\n \"\"\"\n Describes monthly dates like \"the Tuesday of the 2nd week of each month\"\n\n Parameters\n ----------\n n : int\n week : {0, 1, 2, 3, ...}\n 0 is 1st week of month, 1 2nd week, etc.\n weekday : {0, 1, ..., 6}\n 0: Mondays\n 1: Tuesdays\n 2: Wednesdays\n 3: Thursdays\n 4: Fridays\n 5: Saturdays\n 6: Sundays\n \"\"\"\n\n def __init__(self, n=1, **kwds):\n self.n = n\n self.weekday = kwds['weekday']\n self.week = kwds['week']\n\n if self.n == 0:\n raise ValueError('N cannot be 0')\n\n if self.weekday < 0 or self.weekday > 6:\n raise ValueError('Day must be 0<=day<=6, got %d' %\n self.weekday)\n if self.week < 0 or self.week > 3:\n raise ValueError('Week must be 0<=day<=3, got %d' %\n self.week)\n\n self.kwds = kwds\n\n @apply_nat\n def apply(self, other):\n offsetOfMonth = self.getOffsetOfMonth(other)\n\n if offsetOfMonth > other:\n if self.n > 0:\n months = self.n - 1\n else:\n months = self.n\n elif offsetOfMonth == other:\n months = self.n\n else:\n if self.n > 0:\n months = self.n\n else:\n months = self.n + 1\n\n return self.getOffsetOfMonth(as_datetime(other) + relativedelta(months=months,\n day=1))\n\n def getOffsetOfMonth(self, dt):\n w = Week(weekday=self.weekday)\n d = datetime(dt.year, dt.month, 1)\n\n d = w.rollforward(d)\n\n for i in range(self.week):\n d = w.apply(d)\n\n return d\n\n def onOffset(self, dt):\n return dt == self.getOffsetOfMonth(dt)\n\n @property\n def rule_code(self):\n return '%s-%d%s' % (self._prefix, self.week + 1,\n _int_to_weekday.get(self.weekday, ''))\n\n _prefix = 'WOM'\n\n @classmethod\n def _from_name(cls, suffix=None):\n if not suffix:\n raise ValueError(\"Prefix %r requires a suffix.\" % (cls._prefix))\n # TODO: handle n here...\n # only one digit weeks (1 --> week 0, 2 --> week 1, etc.)\n week = int(suffix[0]) - 1\n weekday = _weekday_to_int[suffix[1:]]\n return cls(week=week, weekday=weekday)\n\nclass LastWeekOfMonth(DateOffset):\n \"\"\"\n Describes monthly dates in last week of month like \"the last Tuesday of each month\"\n\n Parameters\n ----------\n n : int\n weekday : {0, 1, ..., 6}\n 0: Mondays\n 1: Tuesdays\n 2: Wednesdays\n 3: Thursdays\n 4: Fridays\n 5: Saturdays\n 6: Sundays\n \"\"\"\n def __init__(self, n=1, **kwds):\n self.n = n\n self.weekday = kwds['weekday']\n\n if self.n == 0:\n raise ValueError('N cannot be 0')\n\n if self.weekday < 0 or self.weekday > 6:\n raise ValueError('Day must be 0<=day<=6, got %d' %\n self.weekday)\n\n self.kwds = kwds\n\n @apply_nat\n def apply(self, other):\n offsetOfMonth = self.getOffsetOfMonth(other)\n\n if offsetOfMonth > other:\n if self.n > 0:\n months = self.n - 1\n else:\n months = self.n\n elif offsetOfMonth == other:\n months = self.n\n else:\n if self.n > 0:\n months = self.n\n else:\n months = self.n + 1\n\n return self.getOffsetOfMonth(as_datetime(other) + relativedelta(months=months, day=1))\n\n def getOffsetOfMonth(self, dt):\n m = MonthEnd()\n d = datetime(dt.year, dt.month, 1)\n\n eom = m.rollforward(d)\n\n w = Week(weekday=self.weekday)\n\n return w.rollback(eom)\n\n def onOffset(self, dt):\n return dt == self.getOffsetOfMonth(dt)\n\n @property\n def rule_code(self):\n return '%s-%s' % (self._prefix, _int_to_weekday.get(self.weekday, ''))\n\n _prefix = 'LWOM'\n\n @classmethod\n def _from_name(cls, suffix=None):\n if not suffix:\n raise ValueError(\"Prefix %r requires a suffix.\" % (cls._prefix))\n # TODO: handle n here...\n weekday = _weekday_to_int[suffix]\n return cls(weekday=weekday)\n\n\nclass QuarterOffset(DateOffset):\n \"\"\"Quarter representation - doesn't call super\"\"\"\n\n #: default month for __init__\n _default_startingMonth = None\n #: default month in _from_name\n _from_name_startingMonth = None\n\n # TODO: Consider combining QuarterOffset and YearOffset __init__ at some\n # point\n def __init__(self, n=1, **kwds):\n self.n = n\n self.startingMonth = kwds.get('startingMonth',\n self._default_startingMonth)\n\n self.kwds = kwds\n\n def isAnchored(self):\n return (self.n == 1 and self.startingMonth is not None)\n\n @classmethod\n def _from_name(cls, suffix=None):\n kwargs = {}\n if suffix:\n kwargs['startingMonth'] = _month_to_int[suffix]\n else:\n if cls._from_name_startingMonth is not None:\n kwargs['startingMonth'] = cls._from_name_startingMonth\n return cls(**kwargs)\n\n @property\n def rule_code(self):\n return '%s-%s' % (self._prefix, _int_to_month[self.startingMonth])\n\n\nclass BQuarterEnd(QuarterOffset):\n \"\"\"DateOffset increments between business Quarter dates\n startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...\n startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...\n startingMonth = 3 corresponds to dates like 3/30/2007, 6/29/2007, ...\n \"\"\"\n _outputName = 'BusinessQuarterEnd'\n _default_startingMonth = 3\n # 'BQ'\n _from_name_startingMonth = 12\n _prefix = 'BQ'\n\n @apply_nat\n def apply(self, other):\n n = self.n\n\n wkday, days_in_month = tslib.monthrange(other.year, other.month)\n lastBDay = days_in_month - max(((wkday + days_in_month - 1)\n % 7) - 4, 0)\n\n monthsToGo = 3 - ((other.month - self.startingMonth) % 3)\n if monthsToGo == 3:\n monthsToGo = 0\n\n if n > 0 and not (other.day >= lastBDay and monthsToGo == 0):\n n = n - 1\n elif n <= 0 and other.day > lastBDay and monthsToGo == 0:\n n = n + 1\n\n other = as_datetime(other) + relativedelta(months=monthsToGo + 3 * n, day=31)\n\n if other.weekday() > 4:\n other = other - BDay()\n\n return as_timestamp(other)\n\n def onOffset(self, dt):\n modMonth = (dt.month - self.startingMonth) % 3\n return BMonthEnd().onOffset(dt) and modMonth == 0\n\n\n_int_to_month = {\n 1: 'JAN',\n 2: 'FEB',\n 3: 'MAR',\n 4: 'APR',\n 5: 'MAY',\n 6: 'JUN',\n 7: 'JUL',\n 8: 'AUG',\n 9: 'SEP',\n 10: 'OCT',\n 11: 'NOV',\n 12: 'DEC'\n}\n\n_month_to_int = dict((v, k) for k, v in _int_to_month.items())\n\n\n# TODO: This is basically the same as BQuarterEnd\nclass BQuarterBegin(QuarterOffset):\n _outputName = \"BusinessQuarterBegin\"\n # I suspect this is wrong for *all* of them.\n _default_startingMonth = 3\n _from_name_startingMonth = 1\n _prefix = 'BQS'\n\n @apply_nat\n def apply(self, other):\n n = self.n\n other = as_datetime(other)\n\n wkday, _ = tslib.monthrange(other.year, other.month)\n\n first = _get_firstbday(wkday)\n\n monthsSince = (other.month - self.startingMonth) % 3\n\n if n <= 0 and monthsSince != 0: # make sure to roll forward so negate\n monthsSince = monthsSince - 3\n\n # roll forward if on same month later than first bday\n if n <= 0 and (monthsSince == 0 and other.day > first):\n n = n + 1\n # pretend to roll back if on same month but before firstbday\n elif n > 0 and (monthsSince == 0 and other.day < first):\n n = n - 1\n\n # get the first bday for result\n other = other + relativedelta(months=3 * n - monthsSince)\n wkday, _ = tslib.monthrange(other.year, other.month)\n first = _get_firstbday(wkday)\n result = datetime(other.year, other.month, first,\n other.hour, other.minute, other.second,\n other.microsecond)\n return as_timestamp(result)\n\n\nclass QuarterEnd(QuarterOffset):\n \"\"\"DateOffset increments between business Quarter dates\n startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...\n startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...\n startingMonth = 3 corresponds to dates like 3/31/2007, 6/30/2007, ...\n \"\"\"\n _outputName = 'QuarterEnd'\n _default_startingMonth = 3\n _prefix = 'Q'\n\n def __init__(self, n=1, **kwds):\n self.n = n\n self.startingMonth = kwds.get('startingMonth', 3)\n\n self.kwds = kwds\n\n def isAnchored(self):\n return (self.n == 1 and self.startingMonth is not None)\n\n @apply_nat\n def apply(self, other):\n n = self.n\n other = as_datetime(other)\n\n wkday, days_in_month = tslib.monthrange(other.year, other.month)\n\n monthsToGo = 3 - ((other.month - self.startingMonth) % 3)\n if monthsToGo == 3:\n monthsToGo = 0\n\n if n > 0 and not (other.day >= days_in_month and monthsToGo == 0):\n n = n - 1\n\n other = other + relativedelta(months=monthsToGo + 3 * n, day=31)\n\n return as_timestamp(other)\n\n def onOffset(self, dt):\n modMonth = (dt.month - self.startingMonth) % 3\n return MonthEnd().onOffset(dt) and modMonth == 0\n\n\nclass QuarterBegin(QuarterOffset):\n _outputName = 'QuarterBegin'\n _default_startingMonth = 3\n _from_name_startingMonth = 1\n _prefix = 'QS'\n\n def isAnchored(self):\n return (self.n == 1 and self.startingMonth is not None)\n\n @apply_nat\n def apply(self, other):\n n = self.n\n other = as_datetime(other)\n\n wkday, days_in_month = tslib.monthrange(other.year, other.month)\n\n monthsSince = (other.month - self.startingMonth) % 3\n\n if n <= 0 and monthsSince != 0:\n # make sure you roll forward, so negate\n monthsSince = monthsSince - 3\n\n if n < 0 and (monthsSince == 0 and other.day > 1):\n # after start, so come back an extra period as if rolled forward\n n = n + 1\n\n other = other + relativedelta(months=3 * n - monthsSince, day=1)\n return as_timestamp(other)\n\n\nclass YearOffset(DateOffset):\n \"\"\"DateOffset that just needs a month\"\"\"\n\n def __init__(self, n=1, **kwds):\n self.month = kwds.get('month', self._default_month)\n\n if self.month < 1 or self.month > 12:\n raise ValueError('Month must go from 1 to 12')\n\n DateOffset.__init__(self, n=n, **kwds)\n\n @classmethod\n def _from_name(cls, suffix=None):\n kwargs = {}\n if suffix:\n kwargs['month'] = _month_to_int[suffix]\n return cls(**kwargs)\n\n @property\n def rule_code(self):\n return '%s-%s' % (self._prefix, _int_to_month[self.month])\n\n\nclass BYearEnd(YearOffset):\n \"\"\"DateOffset increments between business EOM dates\"\"\"\n _outputName = 'BusinessYearEnd'\n _default_month = 12\n _prefix = 'BA'\n\n @apply_nat\n def apply(self, other):\n n = self.n\n other = as_datetime(other)\n\n wkday, days_in_month = tslib.monthrange(other.year, self.month)\n lastBDay = (days_in_month -\n max(((wkday + days_in_month - 1) % 7) - 4, 0))\n\n years = n\n if n > 0:\n if (other.month < self.month or\n (other.month == self.month and other.day < lastBDay)):\n years -= 1\n elif n <= 0:\n if (other.month > self.month or\n (other.month == self.month and other.day > lastBDay)):\n years += 1\n\n other = other + relativedelta(years=years)\n\n _, days_in_month = tslib.monthrange(other.year, self.month)\n result = datetime(other.year, self.month, days_in_month,\n other.hour, other.minute, other.second,\n other.microsecond)\n\n if result.weekday() > 4:\n result = result - BDay()\n\n return as_timestamp(result)\n\n\nclass BYearBegin(YearOffset):\n \"\"\"DateOffset increments between business year begin dates\"\"\"\n _outputName = 'BusinessYearBegin'\n _default_month = 1\n _prefix = 'BAS'\n\n @apply_nat\n def apply(self, other):\n n = self.n\n other = as_datetime(other)\n\n wkday, days_in_month = tslib.monthrange(other.year, self.month)\n\n first = _get_firstbday(wkday)\n\n years = n\n\n if n > 0: # roll back first for positive n\n if (other.month < self.month or\n (other.month == self.month and other.day < first)):\n years -= 1\n elif n <= 0: # roll forward\n if (other.month > self.month or\n (other.month == self.month and other.day > first)):\n years += 1\n\n # set first bday for result\n other = other + relativedelta(years=years)\n wkday, days_in_month = tslib.monthrange(other.year, self.month)\n first = _get_firstbday(wkday)\n return as_timestamp(datetime(other.year, self.month, first))\n\n\nclass YearEnd(YearOffset):\n \"\"\"DateOffset increments between calendar year ends\"\"\"\n _default_month = 12\n _prefix = 'A'\n\n @apply_nat\n def apply(self, other):\n def _increment(date):\n if date.month == self.month:\n _, days_in_month = tslib.monthrange(date.year, self.month)\n if date.day != days_in_month:\n year = date.year\n else:\n year = date.year + 1\n elif date.month < self.month:\n year = date.year\n else:\n year = date.year + 1\n _, days_in_month = tslib.monthrange(year, self.month)\n return datetime(year, self.month, days_in_month,\n date.hour, date.minute, date.second,\n date.microsecond)\n\n def _decrement(date):\n year = date.year if date.month > self.month else date.year - 1\n _, days_in_month = tslib.monthrange(year, self.month)\n return datetime(year, self.month, days_in_month,\n date.hour, date.minute, date.second,\n date.microsecond)\n\n def _rollf(date):\n if date.month != self.month or\\\n date.day < tslib.monthrange(date.year, date.month)[1]:\n date = _increment(date)\n return date\n\n n = self.n\n result = other\n if n > 0:\n while n > 0:\n result = _increment(result)\n n -= 1\n elif n < 0:\n while n < 0:\n result = _decrement(result)\n n += 1\n else:\n # n == 0, roll forward\n result = _rollf(result)\n\n return as_timestamp(result)\n\n def onOffset(self, dt):\n wkday, days_in_month = tslib.monthrange(dt.year, self.month)\n return self.month == dt.month and dt.day == days_in_month\n\n\nclass YearBegin(YearOffset):\n \"\"\"DateOffset increments between calendar year begin dates\"\"\"\n _default_month = 1\n _prefix = 'AS'\n\n @apply_nat\n def apply(self, other):\n def _increment(date):\n year = date.year\n if date.month >= self.month:\n year += 1\n return datetime(year, self.month, 1, date.hour, date.minute,\n date.second, date.microsecond)\n\n def _decrement(date):\n year = date.year\n if date.month < self.month or (date.month == self.month and\n date.day == 1):\n year -= 1\n return datetime(year, self.month, 1, date.hour, date.minute,\n date.second, date.microsecond)\n\n def _rollf(date):\n if (date.month != self.month) or date.day > 1:\n date = _increment(date)\n return date\n\n n = self.n\n result = other\n if n > 0:\n while n > 0:\n result = _increment(result)\n n -= 1\n elif n < 0:\n while n < 0:\n result = _decrement(result)\n n += 1\n else:\n # n == 0, roll forward\n result = _rollf(result)\n\n return as_timestamp(result)\n\n def onOffset(self, dt):\n return dt.month == self.month and dt.day == 1\n\n\nclass FY5253(DateOffset):\n \"\"\"\n Describes 52-53 week fiscal year. This is also known as a 4-4-5 calendar.\n\n It is used by companies that desire that their\n fiscal year always end on the same day of the week.\n\n It is a method of managing accounting periods.\n It is a common calendar structure for some industries,\n such as retail, manufacturing and parking industry.\n\n For more information see:\n http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar\n\n\n The year may either:\n - end on the last X day of the Y month.\n - end on the last X day closest to the last day of the Y month.\n\n X is a specific day of the week.\n Y is a certain month of the year\n\n Parameters\n ----------\n n : int\n weekday : {0, 1, ..., 6}\n 0: Mondays\n 1: Tuesdays\n 2: Wednesdays\n 3: Thursdays\n 4: Fridays\n 5: Saturdays\n 6: Sundays\n startingMonth : The month in which fiscal years end. {1, 2, ... 12}\n variation : str\n {\"nearest\", \"last\"} for \"LastOfMonth\" or \"NearestEndMonth\"\n \"\"\"\n\n _prefix = 'RE'\n _suffix_prefix_last = 'L'\n _suffix_prefix_nearest = 'N'\n\n def __init__(self, n=1, **kwds):\n self.n = n\n self.startingMonth = kwds['startingMonth']\n self.weekday = kwds[\"weekday\"]\n\n self.variation = kwds[\"variation\"]\n\n self.kwds = kwds\n\n if self.n == 0:\n raise ValueError('N cannot be 0')\n\n if self.variation not in [\"nearest\", \"last\"]:\n raise ValueError('%s is not a valid variation' % self.variation)\n\n if self.variation == \"nearest\":\n weekday_offset = weekday(self.weekday)\n self._rd_forward = relativedelta(weekday=weekday_offset)\n self._rd_backward = relativedelta(weekday=weekday_offset(-1))\n else:\n self._offset_lwom = LastWeekOfMonth(n=1, weekday=self.weekday)\n\n def isAnchored(self):\n return self.n == 1 \\\n and self.startingMonth is not None \\\n and self.weekday is not None\n\n def onOffset(self, dt):\n year_end = self.get_year_end(dt)\n\n if self.variation == \"nearest\":\n # We have to check the year end of \"this\" cal year AND the previous\n return year_end == dt or \\\n self.get_year_end(dt - relativedelta(months=1)) == dt\n else:\n return year_end == dt\n\n @apply_nat\n def apply(self, other):\n n = self.n\n prev_year = self.get_year_end(\n datetime(other.year - 1, self.startingMonth, 1))\n cur_year = self.get_year_end(\n datetime(other.year, self.startingMonth, 1))\n next_year = self.get_year_end(\n datetime(other.year + 1, self.startingMonth, 1))\n\n if n > 0:\n if other == prev_year:\n year = other.year - 1\n elif other == cur_year:\n year = other.year\n elif other == next_year:\n year = other.year + 1\n elif other < prev_year:\n year = other.year - 1\n n -= 1\n elif other < cur_year:\n year = other.year\n n -= 1\n elif other < next_year:\n year = other.year + 1\n n -= 1\n else:\n assert False\n\n return self.get_year_end(datetime(year + n, self.startingMonth, 1))\n else:\n n = -n\n if other == prev_year:\n year = other.year - 1\n elif other == cur_year:\n year = other.year\n elif other == next_year:\n year = other.year + 1\n elif other > next_year:\n year = other.year + 1\n n -= 1\n elif other > cur_year:\n year = other.year\n n -= 1\n elif other > prev_year:\n year = other.year - 1\n n -= 1\n else:\n assert False\n\n return self.get_year_end(datetime(year - n, self.startingMonth, 1))\n\n def get_year_end(self, dt):\n if self.variation == \"nearest\":\n return self._get_year_end_nearest(dt)\n else:\n return self._get_year_end_last(dt)\n\n def get_target_month_end(self, dt):\n target_month = datetime(year=dt.year, month=self.startingMonth, day=1)\n next_month_first_of = target_month + relativedelta(months=+1)\n return next_month_first_of + relativedelta(days=-1)\n\n def _get_year_end_nearest(self, dt):\n target_date = self.get_target_month_end(dt)\n if target_date.weekday() == self.weekday:\n return target_date\n else:\n forward = target_date + self._rd_forward\n backward = target_date + self._rd_backward\n\n if forward - target_date < target_date - backward:\n return forward\n else:\n return backward\n\n def _get_year_end_last(self, dt):\n current_year = datetime(year=dt.year, month=self.startingMonth, day=1)\n return current_year + self._offset_lwom\n\n @property\n def rule_code(self):\n suffix = self.get_rule_code_suffix()\n return \"%s-%s\" % (self._get_prefix(), suffix)\n\n def _get_prefix(self):\n return self._prefix\n\n def _get_suffix_prefix(self):\n if self.variation == \"nearest\":\n return self._suffix_prefix_nearest\n else:\n return self._suffix_prefix_last\n\n def get_rule_code_suffix(self):\n return '%s-%s-%s' % (self._get_suffix_prefix(), \\\n _int_to_month[self.startingMonth], \\\n _int_to_weekday[self.weekday])\n\n @classmethod\n def _parse_suffix(cls, varion_code, startingMonth_code, weekday_code):\n if varion_code == \"N\":\n variation = \"nearest\"\n elif varion_code == \"L\":\n variation = \"last\"\n else:\n raise ValueError(\n \"Unable to parse varion_code: %s\" % (varion_code,))\n\n startingMonth = _month_to_int[startingMonth_code]\n weekday = _weekday_to_int[weekday_code]\n\n return {\n \"weekday\": weekday,\n \"startingMonth\": startingMonth,\n \"variation\": variation,\n }\n\n @classmethod\n def _from_name(cls, *args):\n return cls(**cls._parse_suffix(*args))\n\n\nclass FY5253Quarter(DateOffset):\n \"\"\"\n DateOffset increments between business quarter dates\n for 52-53 week fiscal year (also known as a 4-4-5 calendar).\n\n It is used by companies that desire that their\n fiscal year always end on the same day of the week.\n\n It is a method of managing accounting periods.\n It is a common calendar structure for some industries,\n such as retail, manufacturing and parking industry.\n\n For more information see:\n http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar\n\n The year may either:\n - end on the last X day of the Y month.\n - end on the last X day closest to the last day of the Y month.\n\n X is a specific day of the week.\n Y is a certain month of the year\n\n startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...\n startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...\n startingMonth = 3 corresponds to dates like 3/30/2007, 6/29/2007, ...\n\n Parameters\n ----------\n n : int\n weekday : {0, 1, ..., 6}\n 0: Mondays\n 1: Tuesdays\n 2: Wednesdays\n 3: Thursdays\n 4: Fridays\n 5: Saturdays\n 6: Sundays\n startingMonth : The month in which fiscal years end. {1, 2, ... 12}\n qtr_with_extra_week : The quarter number that has the leap\n or 14 week when needed. {1,2,3,4}\n variation : str\n {\"nearest\", \"last\"} for \"LastOfMonth\" or \"NearestEndMonth\"\n \"\"\"\n\n _prefix = 'REQ'\n\n def __init__(self, n=1, **kwds):\n self.n = n\n\n self.qtr_with_extra_week = kwds[\"qtr_with_extra_week\"]\n\n self.kwds = kwds\n\n if self.n == 0:\n raise ValueError('N cannot be 0')\n\n self._offset = FY5253( \\\n startingMonth=kwds['startingMonth'], \\\n weekday=kwds[\"weekday\"],\n variation=kwds[\"variation\"])\n\n def isAnchored(self):\n return self.n == 1 and self._offset.isAnchored()\n\n @apply_nat\n def apply(self, other):\n other = as_datetime(other)\n n = self.n\n\n if n > 0:\n while n > 0:\n if not self._offset.onOffset(other):\n qtr_lens = self.get_weeks(other)\n start = other - self._offset\n else:\n start = other\n qtr_lens = self.get_weeks(other + self._offset)\n\n for weeks in qtr_lens:\n start += relativedelta(weeks=weeks)\n if start > other:\n other = start\n n -= 1\n break\n\n else:\n n = -n\n while n > 0:\n if not self._offset.onOffset(other):\n qtr_lens = self.get_weeks(other)\n end = other + self._offset\n else:\n end = other\n qtr_lens = self.get_weeks(other)\n\n for weeks in reversed(qtr_lens):\n end -= relativedelta(weeks=weeks)\n if end < other:\n other = end\n n -= 1\n break\n\n return other\n\n def get_weeks(self, dt):\n ret = [13] * 4\n\n year_has_extra_week = self.year_has_extra_week(dt)\n\n if year_has_extra_week:\n ret[self.qtr_with_extra_week - 1] = 14\n\n return ret\n\n def year_has_extra_week(self, dt):\n if self._offset.onOffset(dt):\n prev_year_end = dt - self._offset\n next_year_end = dt\n else:\n next_year_end = dt + self._offset\n prev_year_end = dt - self._offset\n\n week_in_year = (next_year_end - prev_year_end).days / 7\n\n return week_in_year == 53\n\n def onOffset(self, dt):\n if self._offset.onOffset(dt):\n return True\n\n next_year_end = dt - self._offset\n\n qtr_lens = self.get_weeks(dt)\n\n current = next_year_end\n for qtr_len in qtr_lens[0:4]:\n current += relativedelta(weeks=qtr_len)\n if dt == current:\n return True\n return False\n\n @property\n def rule_code(self):\n suffix = self._offset.get_rule_code_suffix()\n return \"%s-%s\" % (self._prefix,\n \"%s-%d\" % (suffix, self.qtr_with_extra_week))\n\n @classmethod\n def _from_name(cls, *args):\n return cls(**dict(FY5253._parse_suffix(*args[:-1]),\n qtr_with_extra_week=int(args[-1])))\n\nclass Easter(DateOffset):\n '''\n DateOffset for the Easter holiday using\n logic defined in dateutil. Right now uses\n the revised method which is valid in years\n 1583-4099.\n '''\n def __init__(self, n=1, **kwds):\n super(Easter, self).__init__(n, **kwds)\n \n @apply_nat\n def apply(self, other):\n \n currentEaster = easter(other.year)\n currentEaster = datetime(currentEaster.year, currentEaster.month, currentEaster.day)\n \n # NOTE: easter returns a datetime.date so we have to convert to type of other\n if self.n >= 0:\n if other >= currentEaster:\n new = easter(other.year + self.n)\n else:\n new = easter(other.year + self.n - 1)\n else:\n if other > currentEaster:\n new = easter(other.year + self.n + 1)\n else:\n new = easter(other.year + self.n)\n \n # FIXME: There has to be a better way to do this, but I don't know what it is\n if isinstance(other, Timestamp):\n return as_timestamp(new)\n elif isinstance(other, datetime):\n return datetime(new.year, new.month, new.day)\n else:\n return new\n \n @classmethod\n def onOffset(cls, dt):\n return date(dt.year, dt.month, dt.day) == easter(dt.year)\n#----------------------------------------------------------------------\n# Ticks\n\nimport operator\n\n\ndef _tick_comp(op):\n def f(self, other):\n return op(self.delta, other.delta)\n\n return f\n\n\nclass Tick(SingleConstructorOffset):\n _inc = timedelta(microseconds=1000)\n\n __gt__ = _tick_comp(operator.gt)\n __ge__ = _tick_comp(operator.ge)\n __lt__ = _tick_comp(operator.lt)\n __le__ = _tick_comp(operator.le)\n __eq__ = _tick_comp(operator.eq)\n __ne__ = _tick_comp(operator.ne)\n\n def __add__(self, other):\n if isinstance(other, Tick):\n if type(self) == type(other):\n return type(self)(self.n + other.n)\n else:\n return _delta_to_tick(self.delta + other.delta)\n try:\n return self.apply(other)\n except ApplyTypeError:\n return NotImplemented\n\n def __eq__(self, other):\n if isinstance(other, compat.string_types):\n from pandas.tseries.frequencies import to_offset\n\n other = to_offset(other)\n\n if isinstance(other, Tick):\n return self.delta == other.delta\n else:\n return DateOffset.__eq__(self, other)\n\n # This is identical to DateOffset.__hash__, but has to be redefined here\n # for Python 3, because we've redefined __eq__.\n def __hash__(self):\n return hash(self._params())\n\n def __ne__(self, other):\n if isinstance(other, compat.string_types):\n from pandas.tseries.frequencies import to_offset\n\n other = to_offset(other)\n\n if isinstance(other, Tick):\n return self.delta != other.delta\n else:\n return DateOffset.__ne__(self, other)\n\n @property\n def delta(self):\n return self.n * self._inc\n\n @property\n def nanos(self):\n return _delta_to_nanoseconds(self.delta)\n\n @apply_nat\n def apply(self, other):\n if type(other) == date:\n other = datetime(other.year, other.month, other.day)\n\n if isinstance(other, (datetime, timedelta)):\n return other + self.delta\n elif isinstance(other, type(self)):\n return type(self)(self.n + other.n)\n else:\n raise ApplyTypeError('Unhandled type: %s' % type(other).__name__)\n\n _prefix = 'undefined'\n\n def isAnchored(self):\n return False\n\n\ndef _delta_to_tick(delta):\n if delta.microseconds == 0:\n if delta.seconds == 0:\n return Day(delta.days)\n else:\n seconds = delta.days * 86400 + delta.seconds\n if seconds % 3600 == 0:\n return Hour(seconds / 3600)\n elif seconds % 60 == 0:\n return Minute(seconds / 60)\n else:\n return Second(seconds)\n else:\n nanos = _delta_to_nanoseconds(delta)\n if nanos % 1000000 == 0:\n return Milli(nanos // 1000000)\n elif nanos % 1000 == 0:\n return Micro(nanos // 1000)\n else: # pragma: no cover\n return Nano(nanos)\n\n\ndef _delta_to_nanoseconds(delta):\n if isinstance(delta, np.timedelta64):\n return delta.astype('timedelta64[ns]').item()\n elif isinstance(delta, Tick):\n delta = delta.delta\n\n return (delta.days * 24 * 60 * 60 * 1000000\n + delta.seconds * 1000000\n + delta.microseconds) * 1000\n\n\nclass Day(Tick):\n _inc = timedelta(1)\n _prefix = 'D'\n\n\nclass Hour(Tick):\n _inc = timedelta(0, 3600)\n _prefix = 'H'\n\n\nclass Minute(Tick):\n _inc = timedelta(0, 60)\n _prefix = 'T'\n\n\nclass Second(Tick):\n _inc = timedelta(0, 1)\n _prefix = 'S'\n\n\nclass Milli(Tick):\n _prefix = 'L'\n\n\nclass Micro(Tick):\n _inc = timedelta(microseconds=1)\n _prefix = 'U'\n\n\nclass Nano(Tick):\n _inc = np.timedelta64(1, 'ns') if not _np_version_under1p7 else 1\n _prefix = 'N'\n\n\nBDay = BusinessDay\nBMonthEnd = BusinessMonthEnd\nBMonthBegin = BusinessMonthBegin\nCBMonthEnd = CustomBusinessMonthEnd\nCBMonthBegin = CustomBusinessMonthBegin\nCDay = CustomBusinessDay\n\n\ndef _get_firstbday(wkday):\n \"\"\"\n wkday is the result of monthrange(year, month)\n\n If it's a saturday or sunday, increment first business day to reflect this\n \"\"\"\n first = 1\n if wkday == 5: # on Saturday\n first = 3\n elif wkday == 6: # on Sunday\n first = 2\n return first\n\n\ndef generate_range(start=None, end=None, periods=None,\n offset=BDay(), time_rule=None):\n \"\"\"\n Generates a sequence of dates corresponding to the specified time\n offset. Similar to dateutil.rrule except uses pandas DateOffset\n objects to represent time increments\n\n Parameters\n ----------\n start : datetime (default None)\n end : datetime (default None)\n periods : int, optional\n time_rule : (legacy) name of DateOffset object to be used, optional\n Corresponds with names expected by tseries.frequencies.get_offset\n\n Notes\n -----\n * This method is faster for generating weekdays than dateutil.rrule\n * At least two of (start, end, periods) must be specified.\n * If both start and end are specified, the returned dates will\n satisfy start <= date <= end.\n * If both time_rule and offset are specified, time_rule supersedes offset.\n\n Returns\n -------\n dates : generator object\n\n \"\"\"\n if time_rule is not None:\n from pandas.tseries.frequencies import get_offset\n\n offset = get_offset(time_rule)\n\n start = to_datetime(start)\n end = to_datetime(end)\n\n if start and not offset.onOffset(start):\n start = offset.rollforward(start)\n\n if end and not offset.onOffset(end):\n end = offset.rollback(end)\n\n if periods is None and end < start:\n end = None\n periods = 0\n\n if end is None:\n end = start + (periods - 1) * offset\n\n if start is None:\n start = end - (periods - 1) * offset\n\n cur = start\n\n next_date = cur\n while cur <= end:\n yield cur\n\n # faster than cur + offset\n next_date = offset.apply(cur)\n if next_date <= cur:\n raise ValueError('Offset %s did not increment date' % offset)\n cur = next_date\n\nprefix_mapping = dict((offset._prefix, offset) for offset in [\n YearBegin, # 'AS'\n YearEnd, # 'A'\n BYearBegin, # 'BAS'\n BYearEnd, # 'BA'\n BusinessDay, # 'B'\n BusinessMonthBegin, # 'BMS'\n BusinessMonthEnd, # 'BM'\n BQuarterEnd, # 'BQ'\n BQuarterBegin, # 'BQS'\n CustomBusinessDay, # 'C'\n CustomBusinessMonthEnd, # 'CBM'\n CustomBusinessMonthBegin, # 'CBMS'\n MonthEnd, # 'M'\n MonthBegin, # 'MS'\n Week, # 'W'\n Second, # 'S'\n Minute, # 'T'\n Micro, # 'U'\n QuarterEnd, # 'Q'\n QuarterBegin, # 'QS'\n Milli, # 'L'\n Hour, # 'H'\n Day, # 'D'\n WeekOfMonth, # 'WOM'\n FY5253,\n FY5253Quarter,\n])\n\nif not _np_version_under1p7:\n # Only 1.7+ supports nanosecond resolution\n prefix_mapping['N'] = Nano\n\n\ndef _make_offset(key):\n \"\"\"Gets offset based on key. KeyError if prefix is bad, ValueError if\n suffix is bad. All handled by `get_offset` in tseries/frequencies. Not\n public.\"\"\"\n if key is None:\n return None\n split = key.replace('@', '-').split('-')\n klass = prefix_mapping[split[0]]\n # handles case where there's no suffix (and will TypeError if too many '-')\n obj = klass._from_name(*split[1:])\n obj._named = key\n return obj\n"
] | [
[
"pandas.to_datetime",
"pandas.Series",
"pandas.isnull",
"pandas.offsets.Day",
"pandas.Timestamp",
"numpy.arange",
"pandas.util.testing.assert_series_equal",
"pandas.util.testing.assert_almost_equal",
"pandas.tseries.timedeltas._coerce_scalar_to_timedelta_type",
"numpy.timedelta64",
"pandas.util.testing.assert_frame_equal",
"pandas.DataFrame",
"pandas.to_timedelta",
"pandas.offsets.Second",
"numpy.array",
"pandas.compat.range"
],
[
"numpy.busdaycalendar",
"pandas.tseries.frequencies.to_offset",
"pandas.tseries.frequencies.get_offset",
"pandas.tslib.Timestamp",
"numpy.datetime64",
"numpy.is_busday",
"numpy.timedelta64",
"numpy.busday_offset",
"pandas.tslib.monthrange",
"pandas.tseries.tools.to_datetime",
"pandas.compat.range"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
233xw/Neural-Networks-and-Deep-Learning | [
"e6e0519d41ca3dc02da652e43fc799f736b60a2f"
] | [
"src/mnist_loader.py"
] | [
"\"\"\"\nmnist_loader\n~~~~~~~~~~~~\n\nA library to load the MNIST image data. For details of the data\nstructures that are returned, see the doc strings for ``load_data``\nand ``load_data_wrapper``. In practice, ``load_data_wrapper`` is the\nfunction usually called by our neural network code.\n\"\"\"\n\n#### Libraries\n# Standard library\nimport pickle\nimport gzip\n\n# Third-party libraries\nimport numpy as np\n\ndef load_data():\n \"\"\"Return the MNIST data as a tuple containing the training data,\n the validation data, and the test data.\n\n The ``training_data`` is returned as a tuple with two entries.\n The first entry contains the actual training images. This is a\n numpy ndarray with 50,000 entries. Each entry is, in turn, a\n numpy ndarray with 784 values, representing the 28 * 28 = 784\n pixels in a single MNIST image.\n\n The second entry in the ``training_data`` tuple is a numpy ndarray\n containing 50,000 entries. Those entries are just the digit\n values (0...9) for the corresponding images contained in the first\n entry of the tuple.\n\n The ``validation_data`` and ``test_data`` are similar, except\n each contains only 10,000 images.\n\n This is a nice data format, but for use in neural networks it's\n helpful to modify the format of the ``training_data`` a little.\n That's done in the wrapper function ``load_data_wrapper()``, see\n below.\n \"\"\"\n f = gzip.open('../data/mnist.pkl.gz', 'rb')\n training_data, validation_data, test_data = pickle.load(f, encoding='latin1')\n f.close()\n return (training_data, validation_data, test_data)\n\ndef load_data_wrapper():\n \"\"\"Return a tuple containing ``(training_data, validation_data,\n test_data)``. Based on ``load_data``, but the format is more\n convenient for use in our implementation of neural networks.\n\n In particular, ``training_data`` is a list containing 50,000\n 2-tuples ``(x, y)``. ``x`` is a 784-dimensional numpy.ndarray\n containing the input image. ``y`` is a 10-dimensional\n numpy.ndarray representing the unit vector corresponding to the\n correct digit for ``x``.\n\n ``validation_data`` and ``test_data`` are lists containing 10,000\n 2-tuples ``(x, y)``. In each case, ``x`` is a 784-dimensional\n numpy.ndarry containing the input image, and ``y`` is the\n corresponding classification, i.e., the digit values (integers)\n corresponding to ``x``.\n\n Obviously, this means we're using slightly different formats for\n the training data and the validation / test data. These formats\n turn out to be the most convenient for use in our neural network\n code.\"\"\"\n tr_d, va_d, te_d = load_data()\n training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]\n training_results = [vectorized_result(y) for y in tr_d[1]]\n #training_data = zip(training_inputs, training_results)\n training_data = list(zip(training_inputs, training_results))\n validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]\n #validation_data = zip(validation_inputs, va_d[1])\n validation_data = list(zip(validation_inputs, va_d[1]))\n test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]\n test_data = zip(test_inputs, te_d[1])\n test_data = list(zip(test_inputs, te_d[1]))\n return (training_data, validation_data, test_data)\n\ndef vectorized_result(j):\n \"\"\"Return a 10-dimensional unit vector with a 1.0 in the jth\n position and zeroes elsewhere. This is used to convert a digit\n (0...9) into a corresponding desired output from the neural\n network.\"\"\"\n e = np.zeros((10, 1))\n e[j] = 1.0\n return e\n"
] | [
[
"numpy.reshape",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
caisarl76/alfred | [
"b73bdc1651e14c02440938b639fa3c7f3ab3d321"
] | [
"models/eval/eval.py"
] | [
"import json\nimport pprint\nimport random\nimport time\nimport torch\nimport torch.multiprocessing as mp\nfrom models.nn.resnet import Resnet\nfrom data.preprocess import Dataset\nfrom importlib import import_module\n\nclass Eval(object):\n\n # tokens\n STOP_TOKEN = \"<<stop>>\"\n SEQ_TOKEN = \"<<seg>>\"\n TERMINAL_TOKENS = [STOP_TOKEN, SEQ_TOKEN]\n\n def __init__(self, args, manager):\n # args and manager\n self.args = args\n self.manager = manager\n\n # load splits\n with open(self.args.splits) as f:\n self.splits = json.load(f)\n pprint.pprint({k: len(v) for k, v in self.splits.items()})\n\n # load model\n print(\"Loading: \", self.args.model_path)\n M = import_module(self.args.model)\n self.model, optimizer = M.Module.load(self.args.model_path)\n self.model.share_memory()\n self.model.eval()\n\n # updated args\n self.model.args.dout = self.args.model_path.replace(self.args.model_path.split('/')[-1], '')\n self.model.args.data = self.args.data if self.args.data else self.model.args.data\n\n # preprocess and save\n if args.preprocess:\n print(\"\\nPreprocessing dataset and saving to %s folders ... This is will take a while. Do this once as required:\" % self.model.args.pp_folder)\n self.model.args.fast_epoch = self.args.fast_epoch\n dataset = Dataset(self.model.args, self.model.vocab)\n dataset.preprocess_splits(self.splits)\n\n # load resnet\n args.visual_model = 'resnet18'\n self.resnet = Resnet(args, eval=True, share_memory=True, use_conv_feat=True)\n\n # gpu\n if self.args.gpu:\n self.model = self.model.to(torch.device('cuda'))\n\n # success and failure lists\n self.create_stats()\n\n # set random seed for shuffling\n random.seed(int(time.time()))\n\n def queue_tasks(self):\n '''\n create queue of trajectories to be evaluated\n '''\n task_queue = self.manager.Queue()\n files = self.splits[self.args.eval_split]\n\n # debugging: fast epoch\n if self.args.fast_epoch:\n files = files[:16]\n\n if self.args.shuffle:\n random.shuffle(files)\n for traj in files:\n task_queue.put(traj)\n return task_queue\n\n def spawn_threads(self):\n '''\n spawn multiple threads to run eval in parallel\n '''\n task_queue = self.queue_tasks()\n\n # start threads\n threads = []\n lock = self.manager.Lock()\n for n in range(self.args.num_threads):\n thread = mp.Process(target=self.run, args=(self.model, self.resnet, task_queue, self.args, lock,\n self.successes, self.failures, self.results))\n thread.start()\n threads.append(thread)\n\n for t in threads:\n t.join()\n\n # save\n self.save_results()\n\n @classmethod\n def setup_scene(cls, env, traj_data, r_idx, args, reward_type='dense'):\n '''\n intialize the scene and agent from the task info\n '''\n # scene setup\n scene_num = traj_data['scene']['scene_num']\n object_poses = traj_data['scene']['object_poses']\n dirty_and_empty = traj_data['scene']['dirty_and_empty']\n object_toggles = traj_data['scene']['object_toggles']\n\n scene_name = 'FloorPlan%d' % scene_num\n env.reset(scene_name)\n env.restore_scene(object_poses, object_toggles, dirty_and_empty)\n\n # initialize to start position\n env.step(dict(traj_data['scene']['init_action']))\n\n # print goal instr\n print(\"Task: %s\" % (traj_data['turk_annotations']['anns'][r_idx]['task_desc']))\n\n # setup task for reward\n env.set_task(traj_data, args, reward_type=reward_type)\n\n @classmethod\n def run(cls, model, resnet, task_queue, args, lock, successes, failures):\n raise NotImplementedError()\n\n @classmethod\n def evaluate(cls, env, model, r_idx, resnet, traj_data, args, lock, successes, failures):\n raise NotImplementedError()\n\n def save_results(self):\n raise NotImplementedError()\n\n def create_stats(self):\n raise NotImplementedError()"
] | [
[
"torch.device",
"torch.multiprocessing.Process"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Speech-Lab-IITM/espnet-1 | [
"fc62b1ce3e50c5ef8a2ac8cedb0d92ac41df54ca",
"fc62b1ce3e50c5ef8a2ac8cedb0d92ac41df54ca"
] | [
"espnet/nets/chainer_backend/transformer/subsampling.py",
"espnet/nets/pytorch_backend/nets_utils.py"
] | [
"# encoding: utf-8\n\"\"\"Class Declaration of Transformer's Input layers.\"\"\"\n\nimport logging\n\nimport chainer\nimport chainer.functions as F\nimport chainer.links as L\nimport numpy as np\n\nfrom espnet.nets.chainer_backend.transformer.embedding import PositionalEncoding\n\n\nclass Conv2dSubsampling(chainer.Chain):\n \"\"\"Convolutional 2D subsampling (to 1/4 length).\n\n :param int idim: input dim\n :param int odim: output dim\n :param flaot dropout_rate: dropout rate\n\n \"\"\"\n\n def __init__(\n self, channels, idim, dims, dropout=0.1, initialW=None, initial_bias=None\n ):\n \"\"\"Initialize Conv2dSubsampling.\"\"\"\n super(Conv2dSubsampling, self).__init__()\n self.dropout = dropout\n with self.init_scope():\n # Standard deviation for Conv2D with 1 channel and kernel 3 x 3.\n n = 1 * 3 * 3\n stvd = 1.0 / np.sqrt(n)\n self.conv1 = L.Convolution2D(\n 1,\n channels,\n 3,\n stride=2,\n pad=1,\n initialW=initialW(scale=stvd),\n initial_bias=initial_bias(scale=stvd),\n )\n n = channels * 3 * 3\n stvd = 1.0 / np.sqrt(n)\n self.conv2 = L.Convolution2D(\n channels,\n channels,\n 3,\n stride=2,\n pad=1,\n initialW=initialW(scale=stvd),\n initial_bias=initial_bias(scale=stvd),\n )\n stvd = 1.0 / np.sqrt(dims)\n self.out = L.Linear(\n idim,\n dims,\n initialW=initialW(scale=stvd),\n initial_bias=initial_bias(scale=stvd),\n )\n self.pe = PositionalEncoding(dims, dropout)\n\n def forward(self, xs, ilens):\n \"\"\"Subsample x.\n\n :param chainer.Variable x: input tensor\n :return: subsampled x and mask\n\n \"\"\"\n xs = self.xp.array(xs[:, None])\n xs = F.relu(self.conv1(xs))\n xs = F.relu(self.conv2(xs))\n batch, _, length, _ = xs.shape\n xs = self.out(F.swapaxes(xs, 1, 2).reshape(batch * length, -1))\n xs = self.pe(xs.reshape(batch, length, -1))\n # change ilens accordingly\n ilens = np.ceil(np.array(ilens, dtype=np.float32) / 2).astype(np.int64)\n ilens = np.ceil(np.array(ilens, dtype=np.float32) / 2).astype(np.int64)\n return xs, ilens\n\n\nclass LinearSampling(chainer.Chain):\n \"\"\"Linear 1D subsampling.\n\n :param int idim: input dim\n :param int odim: output dim\n :param flaot dropout_rate: dropout rate\n\n \"\"\"\n\n def __init__(self, idim, dims, dropout=0.1, initialW=None, initial_bias=None):\n \"\"\"Initialize LinearSampling.\"\"\"\n super(LinearSampling, self).__init__()\n stvd = 1.0 / np.sqrt(dims)\n self.dropout = dropout\n with self.init_scope():\n self.linear = L.Linear(\n idim,\n dims,\n initialW=initialW(scale=stvd),\n initial_bias=initial_bias(scale=stvd),\n )\n self.pe = PositionalEncoding(dims, dropout)\n\n def forward(self, xs, ilens):\n \"\"\"Subsample x.\n\n :param chainer.Variable x: input tensor\n :return: subsampled x and mask\n\n \"\"\"\n logging.info(xs.shape)\n xs = self.linear(xs, n_batch_axes=2)\n logging.info(xs.shape)\n xs = self.pe(xs)\n return xs, ilens\n",
"# -*- coding: utf-8 -*-\n\n\"\"\"Network related utility tools.\"\"\"\n\nimport logging\nfrom typing import Dict\n\nimport numpy as np\nimport torch\n\n\ndef to_device(m, x):\n \"\"\"Send tensor into the device of the module.\n\n Args:\n m (torch.nn.Module): Torch module.\n x (Tensor): Torch tensor.\n\n Returns:\n Tensor: Torch tensor located in the same place as torch module.\n\n \"\"\"\n if isinstance(m, torch.nn.Module):\n device = next(m.parameters()).device\n elif isinstance(m, torch.Tensor):\n device = m.device\n else:\n raise TypeError(\n \"Expected torch.nn.Module or torch.tensor, \" f\"bot got: {type(m)}\"\n )\n return x.to(device)\n\n\ndef pad_list(xs, pad_value):\n \"\"\"Perform padding for the list of tensors.\n\n Args:\n xs (List): List of Tensors [(T_1, `*`), (T_2, `*`), ..., (T_B, `*`)].\n pad_value (float): Value for padding.\n\n Returns:\n Tensor: Padded tensor (B, Tmax, `*`).\n\n Examples:\n >>> x = [torch.ones(4), torch.ones(2), torch.ones(1)]\n >>> x\n [tensor([1., 1., 1., 1.]), tensor([1., 1.]), tensor([1.])]\n >>> pad_list(x, 0)\n tensor([[1., 1., 1., 1.],\n [1., 1., 0., 0.],\n [1., 0., 0., 0.]])\n\n \"\"\"\n n_batch = len(xs)\n max_len = max(x.size(0) for x in xs)\n pad = xs[0].new(n_batch, max_len, *xs[0].size()[1:]).fill_(pad_value)\n\n for i in range(n_batch):\n pad[i, : xs[i].size(0)] = xs[i]\n\n return pad\n\n\ndef make_pad_mask(lengths, xs=None, length_dim=-1, maxlen=None):\n \"\"\"Make mask tensor containing indices of padded part.\n\n Args:\n lengths (LongTensor or List): Batch of lengths (B,).\n xs (Tensor, optional): The reference tensor.\n If set, masks will be the same shape as this tensor.\n length_dim (int, optional): Dimension indicator of the above tensor.\n See the example.\n\n Returns:\n Tensor: Mask tensor containing indices of padded part.\n dtype=torch.uint8 in PyTorch 1.2-\n dtype=torch.bool in PyTorch 1.2+ (including 1.2)\n\n Examples:\n With only lengths.\n\n >>> lengths = [5, 3, 2]\n >>> make_pad_mask(lengths)\n masks = [[0, 0, 0, 0 ,0],\n [0, 0, 0, 1, 1],\n [0, 0, 1, 1, 1]]\n\n With the reference tensor.\n\n >>> xs = torch.zeros((3, 2, 4))\n >>> make_pad_mask(lengths, xs)\n tensor([[[0, 0, 0, 0],\n [0, 0, 0, 0]],\n [[0, 0, 0, 1],\n [0, 0, 0, 1]],\n [[0, 0, 1, 1],\n [0, 0, 1, 1]]], dtype=torch.uint8)\n >>> xs = torch.zeros((3, 2, 6))\n >>> make_pad_mask(lengths, xs)\n tensor([[[0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1]],\n [[0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1]],\n [[0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)\n\n With the reference tensor and dimension indicator.\n\n >>> xs = torch.zeros((3, 6, 6))\n >>> make_pad_mask(lengths, xs, 1)\n tensor([[[0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1]],\n [[0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1]],\n [[0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1]]], dtype=torch.uint8)\n >>> make_pad_mask(lengths, xs, 2)\n tensor([[[0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1]],\n [[0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1]],\n [[0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)\n\n \"\"\"\n if length_dim == 0:\n raise ValueError(\"length_dim cannot be 0: {}\".format(length_dim))\n\n if not isinstance(lengths, list):\n lengths = lengths.long().tolist()\n\n bs = int(len(lengths))\n if maxlen is None:\n if xs is None:\n maxlen = int(max(lengths))\n else:\n maxlen = xs.size(length_dim)\n else:\n assert xs is None\n assert maxlen >= int(max(lengths))\n\n seq_range = torch.arange(0, maxlen, dtype=torch.int64)\n seq_range_expand = seq_range.unsqueeze(0).expand(bs, maxlen)\n seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1)\n mask = seq_range_expand >= seq_length_expand\n\n if xs is not None:\n assert xs.size(0) == bs, (xs.size(0), bs)\n\n if length_dim < 0:\n length_dim = xs.dim() + length_dim\n # ind = (:, None, ..., None, :, , None, ..., None)\n ind = tuple(\n slice(None) if i in (0, length_dim) else None for i in range(xs.dim())\n )\n mask = mask[ind].expand_as(xs).to(xs.device)\n return mask\n\n\ndef make_non_pad_mask(lengths, xs=None, length_dim=-1):\n \"\"\"Make mask tensor containing indices of non-padded part.\n\n Args:\n lengths (LongTensor or List): Batch of lengths (B,).\n xs (Tensor, optional): The reference tensor.\n If set, masks will be the same shape as this tensor.\n length_dim (int, optional): Dimension indicator of the above tensor.\n See the example.\n\n Returns:\n ByteTensor: mask tensor containing indices of padded part.\n dtype=torch.uint8 in PyTorch 1.2-\n dtype=torch.bool in PyTorch 1.2+ (including 1.2)\n\n Examples:\n With only lengths.\n\n >>> lengths = [5, 3, 2]\n >>> make_non_pad_mask(lengths)\n masks = [[1, 1, 1, 1 ,1],\n [1, 1, 1, 0, 0],\n [1, 1, 0, 0, 0]]\n\n With the reference tensor.\n\n >>> xs = torch.zeros((3, 2, 4))\n >>> make_non_pad_mask(lengths, xs)\n tensor([[[1, 1, 1, 1],\n [1, 1, 1, 1]],\n [[1, 1, 1, 0],\n [1, 1, 1, 0]],\n [[1, 1, 0, 0],\n [1, 1, 0, 0]]], dtype=torch.uint8)\n >>> xs = torch.zeros((3, 2, 6))\n >>> make_non_pad_mask(lengths, xs)\n tensor([[[1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 0]],\n [[1, 1, 1, 0, 0, 0],\n [1, 1, 1, 0, 0, 0]],\n [[1, 1, 0, 0, 0, 0],\n [1, 1, 0, 0, 0, 0]]], dtype=torch.uint8)\n\n With the reference tensor and dimension indicator.\n\n >>> xs = torch.zeros((3, 6, 6))\n >>> make_non_pad_mask(lengths, xs, 1)\n tensor([[[1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [0, 0, 0, 0, 0, 0]],\n [[1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0]],\n [[1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0]]], dtype=torch.uint8)\n >>> make_non_pad_mask(lengths, xs, 2)\n tensor([[[1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 0]],\n [[1, 1, 1, 0, 0, 0],\n [1, 1, 1, 0, 0, 0],\n [1, 1, 1, 0, 0, 0],\n [1, 1, 1, 0, 0, 0],\n [1, 1, 1, 0, 0, 0],\n [1, 1, 1, 0, 0, 0]],\n [[1, 1, 0, 0, 0, 0],\n [1, 1, 0, 0, 0, 0],\n [1, 1, 0, 0, 0, 0],\n [1, 1, 0, 0, 0, 0],\n [1, 1, 0, 0, 0, 0],\n [1, 1, 0, 0, 0, 0]]], dtype=torch.uint8)\n\n \"\"\"\n return ~make_pad_mask(lengths, xs, length_dim)\n\n\ndef mask_by_length(xs, lengths, fill=0):\n \"\"\"Mask tensor according to length.\n\n Args:\n xs (Tensor): Batch of input tensor (B, `*`).\n lengths (LongTensor or List): Batch of lengths (B,).\n fill (int or float): Value to fill masked part.\n\n Returns:\n Tensor: Batch of masked input tensor (B, `*`).\n\n Examples:\n >>> x = torch.arange(5).repeat(3, 1) + 1\n >>> x\n tensor([[1, 2, 3, 4, 5],\n [1, 2, 3, 4, 5],\n [1, 2, 3, 4, 5]])\n >>> lengths = [5, 3, 2]\n >>> mask_by_length(x, lengths)\n tensor([[1, 2, 3, 4, 5],\n [1, 2, 3, 0, 0],\n [1, 2, 0, 0, 0]])\n\n \"\"\"\n assert xs.size(0) == len(lengths)\n ret = xs.data.new(*xs.size()).fill_(fill)\n for i, l in enumerate(lengths):\n ret[i, :l] = xs[i, :l]\n return ret\n\n\ndef th_accuracy(pad_outputs, pad_targets, ignore_label):\n \"\"\"Calculate accuracy.\n\n Args:\n pad_outputs (Tensor): Prediction tensors (B * Lmax, D).\n pad_targets (LongTensor): Target label tensors (B, Lmax, D).\n ignore_label (int): Ignore label id.\n\n Returns:\n float: Accuracy value (0.0 - 1.0).\n\n \"\"\"\n pad_pred = pad_outputs.view(\n pad_targets.size(0), pad_targets.size(1), pad_outputs.size(1)\n ).argmax(2)\n mask = pad_targets != ignore_label\n numerator = torch.sum(\n pad_pred.masked_select(mask) == pad_targets.masked_select(mask)\n )\n denominator = torch.sum(mask)\n return float(numerator) / float(denominator)\n\n\ndef to_torch_tensor(x):\n \"\"\"Change to torch.Tensor or ComplexTensor from numpy.ndarray.\n\n Args:\n x: Inputs. It should be one of numpy.ndarray, Tensor, ComplexTensor, and dict.\n\n Returns:\n Tensor or ComplexTensor: Type converted inputs.\n\n Examples:\n >>> xs = np.ones(3, dtype=np.float32)\n >>> xs = to_torch_tensor(xs)\n tensor([1., 1., 1.])\n >>> xs = torch.ones(3, 4, 5)\n >>> assert to_torch_tensor(xs) is xs\n >>> xs = {'real': xs, 'imag': xs}\n >>> to_torch_tensor(xs)\n ComplexTensor(\n Real:\n tensor([1., 1., 1.])\n Imag;\n tensor([1., 1., 1.])\n )\n\n \"\"\"\n # If numpy, change to torch tensor\n if isinstance(x, np.ndarray):\n if x.dtype.kind == \"c\":\n # Dynamically importing because torch_complex requires python3\n from torch_complex.tensor import ComplexTensor\n\n return ComplexTensor(x)\n else:\n return torch.from_numpy(x)\n\n # If {'real': ..., 'imag': ...}, convert to ComplexTensor\n elif isinstance(x, dict):\n # Dynamically importing because torch_complex requires python3\n from torch_complex.tensor import ComplexTensor\n\n if \"real\" not in x or \"imag\" not in x:\n raise ValueError(\"has 'real' and 'imag' keys: {}\".format(list(x)))\n # Relative importing because of using python3 syntax\n return ComplexTensor(x[\"real\"], x[\"imag\"])\n\n # If torch.Tensor, as it is\n elif isinstance(x, torch.Tensor):\n return x\n\n else:\n error = (\n \"x must be numpy.ndarray, torch.Tensor or a dict like \"\n \"{{'real': torch.Tensor, 'imag': torch.Tensor}}, \"\n \"but got {}\".format(type(x))\n )\n try:\n from torch_complex.tensor import ComplexTensor\n except Exception:\n # If PY2\n raise ValueError(error)\n else:\n # If PY3\n if isinstance(x, ComplexTensor):\n return x\n else:\n raise ValueError(error)\n\n\ndef get_subsample(train_args, mode, arch):\n \"\"\"Parse the subsampling factors from the args for the specified `mode` and `arch`.\n\n Args:\n train_args: argument Namespace containing options.\n mode: one of ('asr', 'mt', 'st')\n arch: one of ('rnn', 'rnn-t', 'rnn_mix', 'rnn_mulenc', 'transformer')\n\n Returns:\n np.ndarray / List[np.ndarray]: subsampling factors.\n \"\"\"\n if arch == \"transformer\":\n return np.array([1])\n\n elif mode == \"mt\" and arch == \"rnn\":\n # +1 means input (+1) and layers outputs (train_args.elayer)\n subsample = np.ones(train_args.elayers + 1, dtype=np.int64)\n logging.warning(\"Subsampling is not performed for machine translation.\")\n logging.info(\"subsample: \" + \" \".join([str(x) for x in subsample]))\n return subsample\n\n elif (\n (mode == \"asr\" and arch in (\"rnn\", \"rnn-t\"))\n or (mode == \"mt\" and arch == \"rnn\")\n or (mode == \"st\" and arch == \"rnn\")\n ):\n subsample = np.ones(train_args.elayers + 1, dtype=np.int64)\n if train_args.etype.endswith(\"p\") and not train_args.etype.startswith(\"vgg\"):\n ss = train_args.subsample.split(\"_\")\n for j in range(min(train_args.elayers + 1, len(ss))):\n subsample[j] = int(ss[j])\n else:\n logging.warning(\n \"Subsampling is not performed for vgg*. \"\n \"It is performed in max pooling layers at CNN.\"\n )\n logging.info(\"subsample: \" + \" \".join([str(x) for x in subsample]))\n return subsample\n\n elif mode == \"asr\" and arch == \"rnn_mix\":\n subsample = np.ones(\n train_args.elayers_sd + train_args.elayers + 1, dtype=np.int64\n )\n if train_args.etype.endswith(\"p\") and not train_args.etype.startswith(\"vgg\"):\n ss = train_args.subsample.split(\"_\")\n for j in range(\n min(train_args.elayers_sd + train_args.elayers + 1, len(ss))\n ):\n subsample[j] = int(ss[j])\n else:\n logging.warning(\n \"Subsampling is not performed for vgg*. \"\n \"It is performed in max pooling layers at CNN.\"\n )\n logging.info(\"subsample: \" + \" \".join([str(x) for x in subsample]))\n return subsample\n\n elif mode == \"asr\" and arch == \"rnn_mulenc\":\n subsample_list = []\n for idx in range(train_args.num_encs):\n subsample = np.ones(train_args.elayers[idx] + 1, dtype=np.int64)\n if train_args.etype[idx].endswith(\"p\") and not train_args.etype[\n idx\n ].startswith(\"vgg\"):\n ss = train_args.subsample[idx].split(\"_\")\n for j in range(min(train_args.elayers[idx] + 1, len(ss))):\n subsample[j] = int(ss[j])\n else:\n logging.warning(\n \"Encoder %d: Subsampling is not performed for vgg*. \"\n \"It is performed in max pooling layers at CNN.\",\n idx + 1,\n )\n logging.info(\"subsample: \" + \" \".join([str(x) for x in subsample]))\n subsample_list.append(subsample)\n return subsample_list\n\n else:\n raise ValueError(\"Invalid options: mode={}, arch={}\".format(mode, arch))\n\n\ndef rename_state_dict(\n old_prefix: str, new_prefix: str, state_dict: Dict[str, torch.Tensor]\n):\n \"\"\"Replace keys of old prefix with new prefix in state dict.\"\"\"\n # need this list not to break the dict iterator\n old_keys = [k for k in state_dict if k.startswith(old_prefix)]\n if len(old_keys) > 0:\n logging.warning(f\"Rename: {old_prefix} -> {new_prefix}\")\n for k in old_keys:\n v = state_dict.pop(k)\n new_k = k.replace(old_prefix, new_prefix)\n state_dict[new_k] = v\n\n\ndef get_activation(act):\n \"\"\"Return activation function.\"\"\"\n # Lazy load to avoid unused import\n from espnet.nets.pytorch_backend.conformer.swish import Swish\n\n activation_funcs = {\n \"hardtanh\": torch.nn.Hardtanh,\n \"tanh\": torch.nn.Tanh,\n \"relu\": torch.nn.ReLU,\n \"selu\": torch.nn.SELU,\n \"swish\": Swish,\n }\n\n return activation_funcs[act]()\n"
] | [
[
"numpy.array",
"numpy.sqrt"
],
[
"torch.sum",
"torch.from_numpy",
"numpy.ones",
"torch.arange",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Devanshu-singh-VR/FaceRecognition | [
"f596d1964f4f43174ffe5bac6d6437a7d22c3593",
"f596d1964f4f43174ffe5bac6d6437a7d22c3593"
] | [
"network.py",
"extractor_triplet.py"
] | [
"import torch\r\nimport torch.nn as nn\r\nimport torchvision.models as models\r\n\r\nclass Siamese(nn.Module):\r\n def __init__(self, in_channels, out_size, dropout):\r\n super(Siamese, self).__init__()\r\n self.dropout = dropout\r\n self.dropout_layer = nn.Dropout(dropout)\r\n #self.model = models.vgg16(pretrained=False)\r\n #self.model.classifier[6] = nn.Linear(4096, 2148)\r\n #self.fc_out = nn.Linear(2148, out_size)\r\n self.model = self.cnn(in_channels)\r\n self.fc = nn.Linear(256*8*8, 300)\r\n self.fc_out = nn.Linear(300, out_size)\r\n\r\n def cnn(self, in_channels):\r\n model = nn.Sequential(\r\n nn.Conv2d(in_channels, 32, kernel_size=3, stride=1, padding=1),\r\n nn.BatchNorm2d(32),\r\n nn.Dropout(self.dropout),\r\n nn.MaxPool2d(kernel_size=2, stride=2),\r\n\r\n nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),\r\n nn.BatchNorm2d(64),\r\n nn.Dropout(self.dropout),\r\n nn.MaxPool2d(kernel_size=2, stride=2),\r\n\r\n nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),\r\n nn.BatchNorm2d(128),\r\n nn.Dropout(self.dropout),\r\n nn.MaxPool2d(kernel_size=2, stride=2),\r\n\r\n nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1),\r\n nn.BatchNorm2d(256),\r\n nn.Dropout(self.dropout),\r\n nn.MaxPool2d(kernel_size=2, stride=2)\r\n )\r\n return model\r\n\r\n def forward(self, x):\r\n batch_size = x.shape[0]\r\n x = self.model(x).reshape(batch_size, -1)\r\n x = self.dropout_layer(self.fc(x))\r\n x = self.fc_out(x)\r\n #x = self.model(x)\r\n #x = self.fc_out(x)\r\n return x\r\n\r\nif __name__ == '__main__':\r\n model = Siamese(3, 100, 0.1)\r\n x = torch.ones((64, 3, 128, 128))\r\n print(model(x).shape)",
"import pandas as pd\r\n\r\np = pd.read_csv('train.csv')\r\n\r\npath1 = p.iloc[:, 0]\r\npath2 = p.iloc[:, 1]\r\nlabel = p.iloc[:, 2]\r\n\r\nmat = []\r\nfor i in range(1, 12000):\r\n if label[i] == 1:\r\n gul = set()\r\n for j in range(i, 40+i):\r\n try:\r\n if path2[j].split('/')[1] != path1[i].split('/')[1] and path2[j] not in gul:\r\n mat.append([path1[i], path2[i], path2[j]])\r\n gul.add(path2[j])\r\n except:\r\n continue\r\n\r\ndf = pd.DataFrame(mat, columns=['face1', 'face2', 'face3'])\r\ndf.to_csv('train_triplet.csv', index=False)"
] | [
[
"torch.nn.Dropout",
"torch.ones",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d"
],
[
"pandas.read_csv",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
karlhthompson/cwrc | [
"9fadf113712908c1bfa73bf8a696108a846e4677",
"9fadf113712908c1bfa73bf8a696108a846e4677",
"9fadf113712908c1bfa73bf8a696108a846e4677"
] | [
"rltools/rltools/distributions.py",
"rllab/rllab/distributions/recurrent_categorical.py",
"rllab/rllab/spaces/box.py"
] | [
"import numpy as np\nimport tensorflow as tf\n\nfrom rltools.rltools import tfutil\nfrom rltools.rltools import util\n\nTINY = 1e-10\n\n\nclass Distribution(object):\n\n @property\n def dim(self):\n raise NotImplementedError()\n\n def kl(self, old, new):\n raise NotImplementedError()\n\n def log_density(self, dist_params, x):\n raise NotImplementedError()\n\n def entropy(self, logprobs_N_K):\n raise NotImplementedError()\n\n def sample(self, logprobs_N_K):\n raise NotImplementedError()\n\n def kl_expr(self, logprobs1, logprobs2):\n raise NotImplementedError()\n\n def log_density_expr(self, dist_params, x):\n raise NotImplementedError()\n\n\nclass Categorical(Distribution):\n\n def __init__(self, dim):\n self._dim = dim\n\n @property\n def dim(self):\n return self._dim\n\n def log_density(self, dist_params_B_A, x_B_A):\n return util.lookup_last_idx(dist_params_B_A, x_B_A)\n\n def entropy(self, probs_N_K):\n tmp = -probs_N_K * np.log(probs_N_K + TINY)\n tmp[~np.isfinite(tmp)] = 0\n return tmp.sum(axis=1)\n\n def sample(self, probs_N_K):\n \"\"\"Sample from N categorical distributions, each over K outcomes\"\"\"\n N, K = probs_N_K.shape\n return np.array([np.random.choice(K, p=probs_N_K[i, :]) for i in range(N)])\n\n def kl_expr(self, logprobs1_B_A, logprobs2_B_A, name=None):\n \"\"\"KL divergence between categorical distributions, specified as log probabilities\"\"\"\n with tf.op_scope([logprobs1_B_A, logprobs2_B_A], name, 'categorical_kl') as scope:\n kl_B = tf.reduce_sum(\n tf.exp(logprobs1_B_A) * (logprobs1_B_A - logprobs2_B_A), 1, name=scope)\n return kl_B\n\n def log_density_expr(self, dist_params_B_A, x_B_A):\n \"\"\"Log density from categorical distribution params\"\"\"\n return tfutil.lookup_last_idx(dist_params_B_A, x_B_A)\n\n\nclass RecurrentCategorical(Distribution):\n\n def __init__(self, dim):\n self._dim = dim\n self._cat = Categorical(dim)\n\n @property\n def dim(self):\n return self._dim\n\n def log_density(self, dist_params_B_H_A, x_B_H_A):\n adim = dist_params_B_H_A.shape[-1]\n flat_logd = self._cat.log_density(\n dist_params_B_H_A.reshape((-1, adim)), x_B_H_A.reshape((-1, adim)))\n return flat_logd.reshape(dist_params_B_H_A.shape)\n\n def entropy(self, probs_N_H_K):\n tmp = -probs_N_H_K * np.log(probs_N_H_K + TINY)\n tmp[~np.isfinite(tmp)] = 0\n return tmp.sum(axis=-1)\n\n def sample(self, probs_N_K):\n \"\"\"Sample from N categorical distributions, each over K outcomes\"\"\"\n return self._cat.sample(probs_N_K)\n\n def kl_expr(self, logprobs1_B_H_A, logprobs2_B_H_A, name=None):\n \"\"\"KL divergence between categorical distributions, specified as log probabilities\"\"\"\n with tf.op_scope([logprobs1_B_H_A, logprobs2_B_H_A], name, 'categorical_kl') as scope:\n kl_B_H = tf.reduce_sum(\n tf.exp(logprobs1_B_H_A) * (logprobs1_B_H_A - logprobs2_B_H_A), 2, name=scope)\n return kl_B_H\n\n def log_density_expr(self, dist_params_B_H_A, x_B_H_A):\n adim = tf.shape(dist_params_B_H_A)[len(dist_params_B_H_A.get_shape()) - 1]\n flat_logd = self._cat.log_density_expr(\n tf.reshape(dist_params_B_H_A, tf.pack([-1, adim])),\n tf.reshape(x_B_H_A, tf.pack([-1, adim])))\n return tf.reshape(flat_logd, tf.shape(dist_params_B_H_A)[:2])\n\n\nclass Gaussian(Distribution):\n\n def __init__(self, dim):\n self._dim = dim\n\n @property\n def dim(self):\n return self._dim\n\n def entropy(self, stdevs):\n d = stdevs.shape[-1]\n return .5 * d * (1. + np.log(2. * np.pi)) + np.log(stdevs).sum(axis=-1)\n\n def kl_expr(self, means1_stdevs1, means2_stdevs2, name=None):\n \"\"\"KL divergence wbw diagonal covariant gaussians\"\"\"\n means1, stdevs1 = means1_stdevs1\n means2, stdevs2 = means2_stdevs2\n with tf.op_scope([means1, stdevs1, means2, stdevs2], name, 'gaussian_kl') as scope:\n D = tf.shape(means1)[len(means1.get_shape()) - 1]\n kl = tf.mul(.5, (tf.reduce_sum(tf.square(stdevs1 / stdevs2), -1) + tf.reduce_sum(\n tf.square((means2 - means1) / stdevs2), -1) + 2. * (tf.reduce_sum(\n tf.log(stdevs2), -1) - tf.reduce_sum(tf.log(stdevs1), -1)) - tf.to_float(D)),\n name=scope)\n return kl\n\n def log_density_expr(self, means, stdevs, x, name=None):\n \"\"\"Log density of diagonal gauss\"\"\"\n with tf.op_scope([means, stdevs, x], name, 'gauss_log_density') as scope:\n D = tf.shape(means)[len(means.get_shape()) - 1]\n lognormconsts = -.5 * tf.to_float(D) * np.log(2. * np.pi) + 2. * tf.reduce_sum(\n tf.log(stdevs), -1) # log norm consts\n logprobs = tf.add(-.5 * tf.reduce_sum(tf.square((x - means) / stdevs), -1),\n lognormconsts, name=scope)\n return logprobs\n\n\nRecurrentGaussian = Gaussian\n",
"import theano.tensor as TT\nimport numpy as np\nimport theano\nfrom rllab.rllab.distributions.categorical import Categorical\nfrom rllab.rllab.distributions.base import Distribution\n\nTINY = 1e-8\n\n\nclass RecurrentCategorical(Distribution):\n def __init__(self, dim):\n self._cat = Categorical(dim)\n self._dim = dim\n\n @property\n def dim(self):\n return self._dim\n\n def kl_sym(self, old_dist_info_vars, new_dist_info_vars):\n \"\"\"\n Compute the symbolic KL divergence of two categorical distributions\n \"\"\"\n old_prob_var = old_dist_info_vars[\"prob\"]\n new_prob_var = new_dist_info_vars[\"prob\"]\n # Assume layout is N * T * A\n return TT.sum(\n old_prob_var * (TT.log(old_prob_var + TINY) - TT.log(new_prob_var + TINY)),\n axis=2\n )\n\n def kl(self, old_dist_info, new_dist_info):\n \"\"\"\n Compute the KL divergence of two categorical distributions\n \"\"\"\n old_prob = old_dist_info[\"prob\"]\n new_prob = new_dist_info[\"prob\"]\n return np.sum(\n old_prob * (np.log(old_prob + TINY) - np.log(new_prob + TINY)),\n axis=2\n )\n\n def likelihood_ratio_sym(self, x_var, old_dist_info_vars, new_dist_info_vars):\n old_prob_var = old_dist_info_vars[\"prob\"]\n new_prob_var = new_dist_info_vars[\"prob\"]\n # Assume layout is N * T * A\n a_dim = x_var.shape[-1]\n flat_ratios = self._cat.likelihood_ratio_sym(\n x_var.reshape((-1, a_dim)),\n dict(prob=old_prob_var.reshape((-1, a_dim))),\n dict(prob=new_prob_var.reshape((-1, a_dim)))\n )\n return flat_ratios.reshape(old_prob_var.shape[:2])\n\n def entropy(self, dist_info):\n probs = dist_info[\"prob\"]\n return -np.sum(probs * np.log(probs + TINY), axis=2)\n\n def log_likelihood_sym(self, xs, dist_info_vars):\n probs = dist_info_vars[\"prob\"]\n # Assume layout is N * T * A\n a_dim = probs.shape[-1]\n # a_dim = TT.printing.Print(\"lala\")(a_dim)\n flat_logli = self._cat.log_likelihood_sym(xs.reshape((-1, a_dim)), dict(prob=probs.reshape((-1, a_dim))))\n return flat_logli.reshape(probs.shape[:2])\n\n def log_likelihood(self, xs, dist_info):\n probs = dist_info[\"prob\"]\n # Assume layout is N * T * A\n a_dim = probs.shape[-1]\n flat_logli = self._cat.log_likelihood_sym(xs.reshape((-1, a_dim)), dict(prob=probs.reshape((-1, a_dim))))\n return flat_logli.reshape(probs.shape[:2])\n\n @property\n def dist_info_keys(self):\n return [\"prob\"]\n",
"from rllab.rllab.core.serializable import Serializable\nfrom .base import Space\nimport numpy as np\nfrom rllab.rllab.misc import ext\nimport theano\n\n\nclass Box(Space):\n \"\"\"\n A box in R^n.\n I.e., each coordinate is bounded.\n \"\"\"\n\n def __init__(self, low, high, shape=None):\n \"\"\"\n Two kinds of valid input:\n Box(-1.0, 1.0, (3,4)) # low and high are scalars, and shape is provided\n Box(np.array([-1.0,-2.0]), np.array([2.0,4.0])) # low and high are arrays of the same shape\n \"\"\"\n if shape is None:\n assert low.shape == high.shape\n self.low = low\n self.high = high\n else:\n assert np.isscalar(low) and np.isscalar(high)\n self.low = low + np.zeros(shape)\n self.high = high + np.zeros(shape)\n\n def sample(self):\n return np.random.uniform(low=self.low, high=self.high, size=self.low.shape)\n\n def contains(self, x):\n return x.shape == self.shape and (x >= self.low).all() and (x <= self.high).all()\n\n @property\n def shape(self):\n return self.low.shape\n\n @property\n def flat_dim(self):\n return np.prod(self.low.shape)\n\n @property\n def bounds(self):\n return self.low, self.high\n\n def flatten(self, x):\n return np.asarray(x).flatten()\n\n def unflatten(self, x):\n return np.asarray(x).reshape(self.shape)\n\n def flatten_n(self, xs):\n xs = np.asarray(xs)\n return xs.reshape((xs.shape[0], -1))\n\n def unflatten_n(self, xs):\n xs = np.asarray(xs)\n return xs.reshape((xs.shape[0],) + self.shape)\n\n def __repr__(self):\n return \"Box\" + str(self.shape)\n\n def __eq__(self, other):\n return isinstance(other, Box) and np.allclose(self.low, other.low) and \\\n np.allclose(self.high, other.high)\n\n def __hash__(self):\n return hash((self.low, self.high))\n\n def new_tensor_variable(self, name, extra_dims):\n return ext.new_tensor(\n name=name,\n ndim=extra_dims+1,\n dtype=theano.config.floatX\n )\n\n"
] | [
[
"numpy.log",
"numpy.isfinite",
"numpy.random.choice",
"tensorflow.shape",
"tensorflow.exp",
"tensorflow.op_scope",
"tensorflow.to_float",
"tensorflow.log",
"tensorflow.square",
"tensorflow.pack"
],
[
"numpy.log"
],
[
"numpy.allclose",
"numpy.asarray",
"numpy.isscalar",
"numpy.prod",
"numpy.random.uniform",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"0.12",
"1.7"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jzjonah/apollo | [
"df9113ae656e28e5374db32529d68e59455058a0"
] | [
"modules/tools/vehicle_calibration/process.py"
] | [
"#!/usr/bin/env python3\n\n###############################################################################\n# Copyright 2017 The Apollo Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n###############################################################################\n\"\"\"\nThis module provide function to plot the speed control info from log csv file\n\"\"\"\n\nimport math\nimport warnings\n\nimport numpy as np\nimport scipy.signal as signal\n\n\nwarnings.simplefilter('ignore', np.RankWarning)\n\nSPEED_INTERVAL = 0.2\nSPEED_DELAY = 130 # Speed report delay relative to IMU information\n\n\ndef preprocess(filename):\n data = np.genfromtxt(filename, delimiter=',', names=True)\n data = data[np.where(data['io'] == 0)[0]]\n data = data[np.argsort(data['time'])]\n data['time'] = data['time'] - data['time'][get_start_index(data)]\n\n b, a = signal.butter(6, 0.05, 'low')\n data['imu'] = signal.filtfilt(b, a, data['imu'])\n\n data['imu'] = np.append(data['imu'][-SPEED_DELAY // 10:],\n data['imu'][0:-SPEED_DELAY // 10])\n return data\n\n\ndef get_start_index(data):\n if np.all(data['vehicle_speed'] == 0):\n return 0\n\n start_ind = np.where(data['brake_percentage'] == 40)\n\n if len(start_ind[0] > 0):\n ind = start_ind[0][0]\n while ind < len(data):\n if data['brake_percentage'][ind] == 40:\n ind += 1\n else:\n break\n return ind\n else:\n ind = 0\n while ind < len(data):\n if abs(data['vehicle_speed'][ind]) < 0.01:\n ind += 1\n else:\n break\n return ind\n\n\ndef process(data):\n \"\"\"\n process data\n \"\"\"\n np.set_printoptions(precision=3)\n\n if np.all(data['vehicle_speed'] == 0):\n print(\"All Speed = 0\")\n return [], [], [], [], [], []\n\n start_index = get_start_index(data)\n\n # print \"Start index: \", start_index\n data = data[start_index:]\n data['time'] = data['time'] - data['time'][0]\n\n transition = np.where(\n np.logical_or(\n np.diff(data['ctlbrake']) != 0, np.diff(data['ctlthrottle']) != 0))[\n 0]\n transition = np.insert(np.append(transition, len(data) - 1), 0, 0)\n # print \"Transition indexes: \", transition\n\n speedsegments = []\n timesegments = []\n accsegments = []\n tablespeed = []\n tableacc = []\n tablecmd = []\n\n for i in range(len(transition) - 1):\n # print \"process transition index:\", data['time'][transition[i]], \":\", data['time'][transition[i + 1]]\n speedsection = data['vehicle_speed'][transition[i]:transition[i +\n 1] + 1]\n timesection = data['time'][transition[i]:transition[i + 1] + 1]\n brake = data['ctlbrake'][transition[i] + 1]\n throttle = data['ctlthrottle'][transition[i] + 1]\n imusection = data['imu'][transition[i]:transition[i + 1] + 1]\n if brake == 0 and throttle == 0:\n continue\n # print \"Brake CMD: \", brake, \" Throttle CMD: \", throttle\n firstindex = 0\n\n while speedsection[firstindex] == 0:\n firstindex += 1\n firstindex = max(firstindex - 2, 0)\n speedsection = speedsection[firstindex:]\n timesection = timesection[firstindex:]\n imusection = imusection[firstindex:]\n\n if speedsection[0] < speedsection[-1]:\n is_increase = True\n lastindex = np.argmax(speedsection)\n else:\n is_increase = False\n lastindex = np.argmin(speedsection)\n\n speedsection = speedsection[0:lastindex + 1]\n timesection = timesection[0:lastindex + 1]\n imusection = imusection[0:lastindex + 1]\n\n speedmin = np.min(speedsection)\n speedmax = np.max(speedsection)\n speedrange = np.arange(\n max(0, round(speedmin / SPEED_INTERVAL) * SPEED_INTERVAL),\n min(speedmax, 10.01), SPEED_INTERVAL)\n # print \"Speed min, max\", speedmin, speedmax, is_increase, firstindex, lastindex, speedsection[-1]\n accvalue = []\n for value in speedrange:\n val_ind = 0\n if is_increase:\n while val_ind < len(\n speedsection) - 1 and value > speedsection[val_ind]:\n val_ind += 1\n else:\n while val_ind < len(\n speedsection) - 1 and value < speedsection[val_ind]:\n val_ind += 1\n if val_ind == 0:\n imu_value = imusection[val_ind]\n else:\n slope = (imusection[val_ind] - imusection[val_ind - 1]) / (\n speedsection[val_ind] - speedsection[val_ind - 1])\n imu_value = imusection[val_ind - 1] + slope * (\n value - speedsection[val_ind - 1])\n accvalue.append(imu_value)\n\n if brake == 0:\n cmd = throttle\n else:\n cmd = -brake\n # print \"Overall CMD: \", cmd\n # print \"Time: \", timesection\n # print \"Speed: \", speedrange\n # print \"Acc: \", accvalue\n # print cmd\n tablecmd.append(cmd)\n tablespeed.append(speedrange)\n tableacc.append(accvalue)\n\n speedsegments.append(speedsection)\n accsegments.append(imusection)\n timesegments.append(timesection)\n\n return tablecmd, tablespeed, tableacc, speedsegments, accsegments, timesegments\n"
] | [
[
"scipy.signal.filtfilt",
"numpy.min",
"numpy.set_printoptions",
"numpy.genfromtxt",
"numpy.all",
"numpy.max",
"scipy.signal.butter",
"numpy.append",
"numpy.argmax",
"numpy.argmin",
"numpy.diff",
"numpy.argsort",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
preisach/animations | [
"07dc9598eee7ed3f73e35b99dd6c44c525a976d5"
] | [
"samples/samplesPY/arch/io_plots/ioPairs.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 19 14:34:30 2019\n\n@author: m\n\"\"\"\n\nfrom matplotlib import pyplot as plt\n\n# pairs = [ [ [ 0, 0 ], [ 0.4, 0 ] ], [ [ 0.2, 0.16666666666666666 ], [ 0.6000000000000001, 0.16666666666666666 ] ], [ [ 0.2, 0.3333333333333333 ], [ 0.6000000000000001, 0.3333333333333333 ] ], [ [ 0.4, 0.5 ], [ 0.8, 0.5 ] ], [ [ 0.2, 0.5 ], [ 0.6000000000000001, 0.5 ] ], [ [ 0.4, 0.6666666666666666 ], [ 0.8, 0.6666666666666666 ] ], [ [ 0.4, 0.8333333333333333 ], [ 0.8, 0.8333333333333333 ] ], [ [ 0.6000000000000001, 0.9999999999999999 ], [ 1, 0.9999999999999999 ] ] ]\n# pairs = [ [ [ 0, 0 ], [ 0.3333333333333333, 0 ] ],\n# [ [ 0.16666666666666666, 0.1 ], [ 0.5, 0.1 ] ],\n# [ [ 0.16666666666666666, 0.2 ], [ 0.5, 0.2 ] ],\n# [ [ 0.3333333333333333, 0.30000000000000004 ],\n# [ 0.6666666666666666, 0.30000000000000004 ] ],\n# [ [ 0.16666666666666666, 0.30000000000000004 ],\n# [ 0.5, 0.30000000000000004 ] ],\n# [ [ 0.3333333333333333, 0.4 ], [ 0.6666666666666666, 0.4 ] ],\n# [ [ 0.3333333333333333, 0.5 ], [ 0.6666666666666666, 0.5 ] ],\n# [ [ 0.5, 0.6 ], [ 0.8333333333333333, 0.6 ] ],\n# [ [ 0.16666666666666666, 0.4 ], [ 0.5, 0.4 ] ],\n# [ [ 0.3333333333333333, 0.5 ], [ 0.6666666666666666, 0.5 ] ],\n# [ [ 0.3333333333333333, 0.6 ], [ 0.6666666666666666, 0.6 ] ],\n# [ [ 0.5, 0.7 ], [ 0.8333333333333333, 0.7 ] ],\n# [ [ 0.3333333333333333, 0.7 ], [ 0.6666666666666666, 0.7 ] ],\n# [ [ 0.5, 0.7999999999999999 ],\n# [ 0.8333333333333333, 0.7999999999999999 ] ],\n# [ [ 0.5, 0.8999999999999999 ],\n# [ 0.8333333333333334, 0.8999999999999999 ] ],\n# [ [ 0.6666666666666666, 0.9999999999999999 ],\n# [ 1, 0.9999999999999999 ] ] ]\n\npairs = [ [ [ 0, 0 ], [ 0.25, 0 ] ],\n\t[ [ 0.125, 0.047619047619047616 ],\n\t\t[ 0.375, 0.047619047619047616 ] ],\n\t[ [ 0.125, 0.09523809523809523 ],\n\t\t[ 0.375, 0.09523809523809523 ] ],\n\t[ [ 0.25, 0.14285714285714285 ], [ 0.5, 0.14285714285714285 ] ],\n\t[ [ 0.125, 0.14285714285714285 ],\n\t\t[ 0.375, 0.14285714285714285 ] ],\n\t[ [ 0.25, 0.19047619047619047 ], [ 0.5, 0.19047619047619047 ] ],\n\t[ [ 0.25, 0.23809523809523808 ], [ 0.5, 0.23809523809523808 ] ],\n\t[ [ 0.375, 0.2857142857142857 ], [ 0.625, 0.2857142857142857 ] ],\n\t[ [ 0.125, 0.19047619047619047 ],\n\t\t[ 0.375, 0.19047619047619047 ] ],\n\t[ [ 0.25, 0.23809523809523808 ], [ 0.5, 0.23809523809523808 ] ],\n\t[ [ 0.25, 0.2857142857142857 ], [ 0.5, 0.2857142857142857 ] ],\n\t[ [ 0.375, 0.3333333333333333 ], [ 0.625, 0.3333333333333333 ] ],\n\t[ [ 0.25, 0.3333333333333333 ], [ 0.5, 0.3333333333333333 ] ],\n\t[ [ 0.375, 0.38095238095238093 ],\n\t\t[ 0.625, 0.38095238095238093 ] ],\n\t[ [ 0.375, 0.42857142857142855 ],\n\t\t[ 0.625, 0.42857142857142855 ] ],\n\t[ [ 0.5, 0.47619047619047616 ], [ 0.75, 0.47619047619047616 ] ],\n\t[ [ 0.125, 0.23809523809523808 ],\n\t\t[ 0.375, 0.23809523809523808 ] ],\n\t[ [ 0.25, 0.2857142857142857 ], [ 0.5, 0.2857142857142857 ] ],\n\t[ [ 0.25, 0.3333333333333333 ], [ 0.5, 0.3333333333333333 ] ],\n\t[ [ 0.375, 0.38095238095238093 ],\n\t\t[ 0.625, 0.38095238095238093 ] ],\n\t[ [ 0.25, 0.38095238095238093 ], [ 0.5, 0.38095238095238093 ] ],\n\t[ [ 0.375, 0.42857142857142855 ],\n\t\t[ 0.625, 0.42857142857142855 ] ],\n\t[ [ 0.375, 0.47619047619047616 ],\n\t\t[ 0.625, 0.47619047619047616 ] ],\n\t[ [ 0.5, 0.5238095238095237 ], [ 0.75, 0.5238095238095237 ] ],\n\t[ [ 0.25, 0.42857142857142855 ], [ 0.5, 0.42857142857142855 ] ],\n\t[ [ 0.375, 0.47619047619047616 ],\n\t\t[ 0.625, 0.47619047619047616 ] ],\n\t[ [ 0.375, 0.5238095238095237 ], [ 0.625, 0.5238095238095237 ] ],\n\t[ [ 0.5, 0.5714285714285714 ], [ 0.75, 0.5714285714285714 ] ],\n\t[ [ 0.375, 0.5714285714285714 ], [ 0.625, 0.5714285714285714 ] ],\n\t[ [ 0.5, 0.6190476190476191 ], [ 0.75, 0.6190476190476191 ] ],\n\t[ [ 0.5, 0.6666666666666667 ], [ 0.75, 0.6666666666666667 ] ],\n\t[ [ 0.625, 0.7142857142857144 ], [ 0.875, 0.7142857142857144 ] ],\n\t[ [ 0.125, 0.2857142857142857 ], [ 0.375, 0.2857142857142857 ] ],\n\t[ [ 0.25, 0.3333333333333333 ], [ 0.5, 0.3333333333333333 ] ],\n\t[ [ 0.25, 0.38095238095238093 ], [ 0.5, 0.38095238095238093 ] ],\n\t[ [ 0.375, 0.42857142857142855 ],\n\t\t[ 0.625, 0.42857142857142855 ] ],\n\t[ [ 0.25, 0.42857142857142855 ], [ 0.5, 0.42857142857142855 ] ],\n\t[ [ 0.375, 0.47619047619047616 ],\n\t\t[ 0.625, 0.47619047619047616 ] ],\n\t[ [ 0.375, 0.5238095238095237 ], [ 0.625, 0.5238095238095237 ] ],\n\t[ [ 0.5, 0.5714285714285714 ], [ 0.75, 0.5714285714285714 ] ],\n\t[ [ 0.25, 0.47619047619047616 ], [ 0.5, 0.47619047619047616 ] ],\n\t[ [ 0.375, 0.5238095238095237 ], [ 0.625, 0.5238095238095237 ] ],\n\t[ [ 0.375, 0.5714285714285714 ], [ 0.625, 0.5714285714285714 ] ],\n\t[ [ 0.5, 0.6190476190476191 ], [ 0.75, 0.6190476190476191 ] ],\n\t[ [ 0.375, 0.6190476190476191 ], [ 0.625, 0.6190476190476191 ] ],\n\t[ [ 0.5, 0.6666666666666667 ], [ 0.75, 0.6666666666666667 ] ],\n\t[ [ 0.5, 0.7142857142857144 ], [ 0.75, 0.7142857142857144 ] ],\n\t[ [ 0.625, 0.7619047619047621 ], [ 0.875, 0.7619047619047621 ] ],\n\t[ [ 0.25, 0.5238095238095237 ], [ 0.5, 0.5238095238095237 ] ],\n\t[ [ 0.375, 0.5714285714285714 ], [ 0.625, 0.5714285714285714 ] ],\n\t[ [ 0.375, 0.6190476190476191 ], [ 0.625, 0.6190476190476191 ] ],\n\t[ [ 0.5, 0.6666666666666667 ], [ 0.75, 0.6666666666666667 ] ],\n\t[ [ 0.375, 0.6666666666666667 ], [ 0.625, 0.6666666666666667 ] ],\n\t[ [ 0.5, 0.7142857142857144 ], [ 0.75, 0.7142857142857144 ] ],\n\t[ [ 0.5, 0.7619047619047621 ], [ 0.75, 0.7619047619047621 ] ],\n\t[ [ 0.625, 0.8095238095238098 ], [ 0.875, 0.8095238095238098 ] ],\n\t[ [ 0.375, 0.7142857142857144 ], [ 0.625, 0.7142857142857144 ] ],\n\t[ [ 0.5, 0.7619047619047621 ], [ 0.75, 0.7619047619047621 ] ],\n\t[ [ 0.5, 0.8095238095238098 ], [ 0.75, 0.8095238095238098 ] ],\n\t[ [ 0.625, 0.8571428571428574 ], [ 0.875, 0.8571428571428574 ] ],\n\t[ [ 0.5, 0.8571428571428574 ], [ 0.75, 0.8571428571428574 ] ],\n\t[ [ 0.625, 0.9047619047619051 ], [ 0.875, 0.9047619047619051 ] ],\n\t[ [ 0.625, 0.9523809523809528 ], [ 0.875, 0.9523809523809528 ] ],\n\t[ [ 0.75, 1.0000000000000004 ], [ 1, 1.0000000000000004 ] ] ]\n\n# x = [pairs[0][0][0], pairs[0][1][0]]\n# y = [pairs[0][0][1], pairs[0][1][1]]\n\nmpl = plt.figure()\nfor p in pairs:\n\t\tx = [p[0][0], p[1][0]]\n\t\ty = [p[0][1], p[1][1]]\n\t\tmpl = plt.plot(x, y)\n\t\t# plt.plot(p[:][0], p[:][1])\n\nplt.show(mpl)\n\n# different when using spyder and just running using py"
] | [
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
apoorv698/MajorProject | [
"50162cc7eec676d662d7484c7c65046835faaadc"
] | [
"trypy.py"
] | [
"from flask import Flask, render_template, request, redirect, url_for\r\nimport base64\r\nimport re\r\nimport numpy as np\r\nfrom io import BytesIO\r\nfrom tkinter import *\r\nfrom PIL import Image, ImageTk\r\nimport time\r\nimport threading\r\nimport cv2\r\nimport os\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nimport matplotlib.pyplot as plt\r\n\r\nimport tensorflow as tf\r\nfrom keras import backend as k\r\nfrom keras.models import load_model\r\n\r\n\r\napp = Flask(__name__)\r\n\r\nimg_w = 75\r\nimg_h = 75\r\nweight_dir = os.path.join(os.getcwd(), 'weights/')\r\nmodel_name = 'face_model.h5'\r\nmodel_dir = os.path.join(os.getcwd(), 'models/')\r\npredictedAge='NA'\r\ngraph = tf.get_default_graph()\r\n\r\n\r\[email protected]('/result' , methods=['GET'])\r\ndef result():\r\n\tglobal predictedAge\r\n\tprint(predictedAge)\r\n\treturn render_template('result.html',predictedAge=predictedAge)\r\n\r\n\r\[email protected]('/', methods=['POST','GET'])\r\ndef index():\r\n\tprint(request.method)\r\n\tif request.method == 'POST':\r\n\t\twith graph.as_default():\r\n\t\t\tglobal predictedAge\r\n\t\t\tprint(\"INSIDE POST\")\r\n\t\t\tprint(request.form['number'])\r\n\t\t\timage_b64 = request.form['image']\r\n\t\t\tprint(e)\r\n\t\t\timage_b64 = image_b64.split(',')[1]\r\n\t\t\tprint(image_b64[:100])\r\n\t\t\tsbuf = BytesIO()\r\n\t\t\tsbuf.write(base64.b64decode(image_b64))\r\n\t\t\tpimg = Image.open(sbuf)\r\n\t\t\timage = cv2.cvtColor(np.array(pimg), cv2.COLOR_RGB2BGR)\r\n\t\t\tprint('image produced')\r\n\t\t\tprint(image.shape)\r\n\t\t\t#cv2.imread('captured image', (image))\r\n\t\t\t#cv2.waitKey(0)\r\n\t\t\tglobal weight_dir, img_w, img_h\r\n\t\t\timg = image\r\n\t\t\tgray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\n\t\t\tface_cascade = cv2.CascadeClassifier('C:/Python35/Scripts/env/haarcascade_frontalface_default.xml')\r\n\t\t\tfaces = face_cascade.detectMultiScale(gray, 1.3, 5)\r\n\t\t\tprint('displaying image')\r\n\t\t\t\r\n\t\t\troi = None\r\n\t\t\tfor (x,y,w,h) in faces: \r\n\t\t\t\troi = gray[y:y+h, x:x+w]\r\n\t\t\ttry:\r\n\t\t\t\tprint('using face only')\r\n\t\t\t\tgray_img = cv2.resize(roi, (img_w,img_h))\r\n\t\t\t\tgray_img = np.expand_dims(gray_img, axis=2)\r\n\t\t\t\tgray_img = np.array([gray_img])/255.0\r\n\t\t\t\t#cv2.imshow('face', (gray_img))\r\n\t\t\texcept:\r\n\t\t\t\tprint('Unable to find face')\r\n\t\t\t\tprint('using whole picture')\r\n\t\t\t\tgray = cv2.resize(gray, (img_w,img_h))\r\n\t\t\t\tgray = np.expand_dims(gray, axis=2)\r\n\t\t\t\tgray = np.array([gray])/255.0\r\n\t\t\t\tprint(gray.shape)\r\n\t\t\t\t#cv2.imshow('face', (gray))\r\n\t\t\t\r\n\t\t\tpredicted_age = 0\r\n\t\t\t\r\n\t\t\tsum=0.0\r\n\t\t\tcounter=0.0\r\n\t\t\ttry:\r\n\t\t\t\tfor wt in os.listdir(weight_dir):\r\n\t\t\t\t\tcounter+=1.0\r\n\t\t\t\t\tmodel.load_weights(weight_dir+wt)\r\n\t\t\t\t\tprint(\"wt: \",wt)\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\tynew = model.predict_classes(gray_img)\r\n\t\t\t\t\texcept:\r\n\t\t\t\t\t\tynew = model.predict_classes(gray)\r\n\t\t\t\t\tsum+=ynew[0]\r\n\t\t\texcept Exception as e:\r\n\t\t\t\tprint('line 217 ',e)\r\n\t\t\tpredicted_age = sum/counter\r\n\t\t\r\n\t\t\tpredictedAge = predicted_age\r\n\t\t\t# predictedAge = 22\r\n\t\t\tprint('predict_age=', predictedAge)\r\n\t\t\treturn redirect(url_for('result'))\r\n\telse:\r\n\t\treturn render_template('index.html')\r\n\r\n\r\nif __name__ ==\"__main__\":\r\n\tos.environ['TF_CPP_MIN_LOG_LEVEL']='3'\r\n\t\r\n\tmodel = load_model(model_dir+model_name)\r\n\tprint('model prepared')\r\n\r\n\tapp.run(debug=True,port=10080)"
] | [
[
"tensorflow.get_default_graph",
"numpy.array",
"numpy.expand_dims"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
brandondutra/cloudml-samples | [
"3d483593c070c4acd4a9648dbfd7db2be6524583",
"3d483593c070c4acd4a9648dbfd7db2be6524583"
] | [
"molecules/predict.py",
"census/tensorflowcore/trainer/task.py"
] | [
"#!/usr/bin/env python\n#\n# Copyright 2018 Google Inc. All Rights Reserved. Licensed under the Apache\n# License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under\n# the License.\n\n# This tool does either batch or streaming predictions on a trained model.\n\nfrom __future__ import print_function\n\nimport argparse\nimport json\nimport os\nimport sys\nimport tempfile\n\nimport pubchem\n\nimport apache_beam as beam\nimport tensorflow as tf\n\nfrom apache_beam.options.pipeline_options import GoogleCloudOptions\nfrom apache_beam.options.pipeline_options import PipelineOptions\nfrom apache_beam.options.pipeline_options import SetupOptions\nfrom apache_beam.options.pipeline_options import StandardOptions\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.saved_model import loader\n\n\nclass Predict(beam.DoFn):\n def __init__(self,\n model_dir,\n id_key,\n meta_tag='serve',\n meta_signature='predict',\n meta_predictions='predictions'):\n super(Predict, self).__init__()\n self.model_dir = model_dir\n self.id_key = id_key\n self.meta_tag = meta_tag\n self.meta_signature = meta_signature\n self.meta_predictions = meta_predictions\n self.session = None\n self.graph = None\n self.feed_tensors = None\n self.fetch_tensors = None\n\n def process(self, inputs):\n # Create a session for every worker only once. The session is not\n # pickleable, so it can't be created at the DoFn constructor.\n if not self.session:\n self.graph = ops.Graph()\n with self.graph.as_default():\n self.session = tf.Session()\n metagraph_def = loader.load(\n self.session, {self.meta_tag}, self.model_dir)\n signature_def = metagraph_def.signature_def[self.meta_signature]\n\n # inputs\n self.feed_tensors = {\n k: self.graph.get_tensor_by_name(v.name)\n for k, v in signature_def.inputs.items()\n }\n\n # outputs/predictions\n self.fetch_tensors = {\n k: self.graph.get_tensor_by_name(v.name)\n for k, v in signature_def.outputs.items()\n }\n\n # Create a feed_dict for a single element.\n feed_dict = {\n tensor: [inputs[key]]\n for key, tensor in self.feed_tensors.items()\n if key in inputs\n }\n results = self.session.run(self.fetch_tensors, feed_dict)\n\n yield {\n 'id': inputs[self.id_key],\n 'predictions': results[self.meta_predictions][0].tolist()\n }\n\n\n# [START run_definition]\ndef run(model_dir, feature_extraction, sink, beam_options=None):\n with beam.Pipeline(options=beam_options) as p:\n _ = (p\n | 'Feature extraction' >> feature_extraction\n | 'Predict' >> beam.ParDo(Predict(model_dir, 'ID'))\n | 'Format as JSON' >> beam.Map(json.dumps)\n | 'Write predictions' >> sink)\n# [END run_definition]\n\n\nif __name__ == '__main__':\n \"\"\"Main function\"\"\"\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument(\n '--work-dir',\n type=str,\n default=os.path.join(\n tempfile.gettempdir(), 'cloudml-samples', 'molecules'),\n help='Directory for temporary files and preprocessed datasets to. '\n 'This can be a Google Cloud Storage path.')\n\n parser.add_argument(\n '--model-dir',\n type=str,\n required=True,\n help='Path to the exported TensorFlow model. '\n 'This can be a Google Cloud Storage path.')\n\n verbs = parser.add_subparsers(dest='verb')\n batch_verb = verbs.add_parser('batch', help='Batch prediction')\n batch_verb.add_argument(\n '--inputs-dir',\n type=str,\n required=True,\n help='Input directory where SDF data files are read from. '\n 'This can be a Google Cloud Storage path.')\n batch_verb.add_argument(\n '--outputs-dir',\n type=str,\n required=True,\n help='Directory to store prediction results. '\n 'This can be a Google Cloud Storage path.')\n\n stream_verb = verbs.add_parser('stream', help='Streaming prediction')\n stream_verb.add_argument(\n '--inputs-topic',\n type=str,\n default='molecules-inputs',\n help='PubSub topic to subscribe for molecules.')\n\n stream_verb.add_argument(\n '--outputs-topic',\n type=str,\n default='molecules-predictions',\n help='PubSub topic to publish predictions.')\n\n args, pipeline_args = parser.parse_known_args()\n\n beam_options = PipelineOptions(pipeline_args)\n beam_options.view_as(SetupOptions).save_main_session = True\n\n project = beam_options.view_as(GoogleCloudOptions).project\n\n # [START batch_or_stream]\n if args.verb == 'batch':\n data_files_pattern = os.path.join(args.inputs_dir, '*.sdf')\n results_prefix = os.path.join(args.outputs_dir, 'part')\n source = beam.io.Read(pubchem.ParseSDF(data_files_pattern))\n sink = beam.io.WriteToText(results_prefix)\n\n elif args.verb == 'stream':\n if not project:\n parser.print_usage()\n print('error: argument --project is required for streaming')\n sys.exit(1)\n\n beam_options.view_as(StandardOptions).streaming = True\n source = beam.io.ReadFromPubSub(topic='projects/{}/topics/{}'.format(\n project, args.inputs_topic))\n sink = beam.io.WriteStringsToPubSub(topic='projects/{}/topics/{}'.format(\n project, args.outputs_topic))\n # [END batch_or_stream]\n\n else:\n parser.print_usage()\n sys.exit(1)\n\n # [START call_run]\n run(\n args.model_dir,\n pubchem.SimpleFeatureExtraction(source),\n sink,\n beam_options)\n # [END call_run]\n",
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"A Feed forward neural network using TensorFlow Core APIs.\n\nIt implements a binary classifier for Census Income Dataset using both single\nand distributed node cluster.\n\"\"\"\n\nimport argparse\nimport json\nimport os\nimport threading\nimport six\n\nimport tensorflow as tf\nfrom tensorflow.contrib.training.python.training import hparam\n\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import lookup_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.saved_model import signature_constants as sig_constants\n\nimport trainer.model as model\n\n\nclass EvaluationRunHook(tf.train.SessionRunHook):\n \"\"\"EvaluationRunHook performs continuous evaluation of the model.\n\n Args:\n checkpoint_dir (string): Dir to store model checkpoints\n metric_dir (string): Dir to store metrics like accuracy and auroc\n graph (tf.Graph): Evaluation graph\n eval_frequency (int): Frequency of evaluation every n train steps\n eval_steps (int): Evaluation steps to be performed\n \"\"\"\n\n def __init__(self,\n checkpoint_dir,\n metric_dict,\n graph,\n eval_frequency,\n eval_steps=None,\n **kwargs):\n\n self._eval_steps = eval_steps\n self._checkpoint_dir = checkpoint_dir\n self._kwargs = kwargs\n self._eval_every = eval_frequency\n self._latest_checkpoint = None\n self._checkpoints_since_eval = 0\n self._graph = graph\n\n # With the graph object as default graph.\n # See https://www.tensorflow.org/api_docs/python/tf/Graph#as_default\n # Adds ops to the graph object\n with graph.as_default():\n value_dict, update_dict = tf.contrib.metrics.aggregate_metric_map(\n metric_dict)\n\n # Op that creates a Summary protocol buffer by merging summaries\n self._summary_op = tf.summary.merge([\n tf.summary.scalar(name, value_op)\n for name, value_op in six.iteritems(value_dict)\n ])\n\n # Saver class add ops to save and restore\n # variables to and from checkpoint\n self._saver = tf.train.Saver()\n\n # Creates a global step to contain a counter for\n # the global training step\n self._gs = tf.contrib.framework.get_or_create_global_step()\n\n self._final_ops_dict = value_dict\n self._eval_ops = update_dict.values()\n\n # MonitoredTrainingSession runs hooks in background threads\n # and it doesn't wait for the thread from the last session.run()\n # call to terminate to invoke the next hook, hence locks.\n self._eval_lock = threading.Lock()\n self._checkpoint_lock = threading.Lock()\n self._file_writer = tf.summary.FileWriter(\n os.path.join(checkpoint_dir, 'eval'), graph=graph)\n\n def after_run(self, run_context, run_values):\n # Always check for new checkpoints in case a single evaluation\n # takes longer than checkpoint frequency and _eval_every is >1\n self._update_latest_checkpoint()\n\n if self._eval_lock.acquire(False):\n try:\n if self._checkpoints_since_eval > self._eval_every:\n self._checkpoints_since_eval = 0\n self._run_eval()\n finally:\n self._eval_lock.release()\n\n def _update_latest_checkpoint(self):\n \"\"\"Update the latest checkpoint file created in the output dir.\"\"\"\n if self._checkpoint_lock.acquire(False):\n try:\n latest = tf.train.latest_checkpoint(self._checkpoint_dir)\n if latest != self._latest_checkpoint:\n self._checkpoints_since_eval += 1\n self._latest_checkpoint = latest\n finally:\n self._checkpoint_lock.release()\n\n def end(self, session):\n \"\"\"Called at then end of session to make sure we always evaluate.\"\"\"\n self._update_latest_checkpoint()\n\n with self._eval_lock:\n self._run_eval()\n\n def _run_eval(self):\n \"\"\"Run model evaluation and generate summaries.\"\"\"\n coord = tf.train.Coordinator(clean_stop_exception_types=(\n tf.errors.CancelledError, tf.errors.OutOfRangeError))\n\n with tf.Session(graph=self._graph) as session:\n # Restores previously saved variables from latest checkpoint\n self._saver.restore(session, self._latest_checkpoint)\n\n session.run([\n tf.tables_initializer(),\n tf.local_variables_initializer()])\n tf.train.start_queue_runners(coord=coord, sess=session)\n train_step = session.run(self._gs)\n\n tf.logging.info('Starting Evaluation For Step: {}'.format(train_step))\n with coord.stop_on_exception():\n eval_step = 0\n while not coord.should_stop() and (self._eval_steps is None or\n eval_step < self._eval_steps):\n summaries, final_values, _ = session.run(\n [self._summary_op, self._final_ops_dict, self._eval_ops])\n if eval_step % 100 == 0:\n tf.logging.info('On Evaluation Step: {}'.format(eval_step))\n eval_step += 1\n\n # Write the summaries\n self._file_writer.add_summary(summaries, global_step=train_step)\n self._file_writer.flush()\n tf.logging.info(final_values)\n\n\ndef run(target, cluster_spec, is_chief, hparams):\n\n \"\"\"Runs the training and evaluation graph.\n\n Args:\n target (str): Tensorflow server target.\n cluster_spec: (cluster spec) Cluster specification.\n is_chief (bool): Boolean flag to specify a chief server.\n hparams (tf.hparams): Input Arguments.\n \"\"\"\n\n # Calculate the number of hidden units\n hidden_units = [\n max(2, int(hparams.first_layer_size * hparams.scale_factor**i))\n for i in range(hparams.num_layers)\n ]\n\n # If the server is chief which is `master`\n # In between graph replication Chief is one node in\n # the cluster with extra responsibility and by default\n # is worker task zero. We have assigned master as the chief.\n #\n # See https://youtu.be/la_M6bCV91M?t=1203 for details on\n # distributed TensorFlow and motivation about chief.\n if is_chief:\n tf.logging.info('Created DNN hidden units {}'.format(hidden_units))\n evaluation_graph = tf.Graph()\n with evaluation_graph.as_default():\n\n # Features and label tensors\n features, labels = model.input_fn(\n hparams.eval_files,\n num_epochs=None if hparams.eval_steps else 1,\n batch_size=hparams.eval_batch_size,\n shuffle=False\n )\n # Accuracy and AUROC metrics\n # model.model_fn returns the dict when EVAL mode\n metric_dict = model.model_fn(\n model.EVAL,\n features.copy(),\n labels,\n hidden_units=hidden_units,\n learning_rate=hparams.learning_rate\n )\n\n hooks = [EvaluationRunHook(\n hparams.job_dir,\n metric_dict,\n evaluation_graph,\n hparams.eval_frequency,\n eval_steps=hparams.eval_steps,\n )]\n else:\n hooks = []\n\n # Create a new graph and specify that as default.\n with tf.Graph().as_default():\n # Placement of ops on devices using replica device setter\n # which automatically places the parameters on the `ps` server\n # and the `ops` on the workers.\n #\n # See:\n # https://www.tensorflow.org/api_docs/python/tf/train/replica_device_setter\n with tf.device(tf.train.replica_device_setter(cluster=cluster_spec)):\n\n # Features and label tensors as read using filename queue.\n features, labels = model.input_fn(\n hparams.train_files,\n num_epochs=hparams.num_epochs,\n batch_size=hparams.train_batch_size\n )\n\n # Returns the training graph and global step tensor.\n train_op, global_step_tensor = model.model_fn(\n model.TRAIN,\n features.copy(),\n labels,\n hidden_units=hidden_units,\n learning_rate=hparams.learning_rate\n )\n\n # Creates a MonitoredSession for training.\n # MonitoredSession is a Session-like object that handles\n # initialization, recovery and hooks\n # https://www.tensorflow.org/api_docs/python/tf/train/MonitoredTrainingSession\n with tf.train.MonitoredTrainingSession(master=target,\n is_chief=is_chief,\n checkpoint_dir=hparams.job_dir,\n hooks=hooks,\n save_checkpoint_secs=20,\n save_summaries_steps=50) as session:\n # Global step to keep track of global number of steps particularly in\n # distributed setting\n step = global_step_tensor.eval(session=session)\n\n # Run the training graph which returns the step number as tracked by\n # the global step tensor.\n # When train epochs is reached, session.should_stop() will be true.\n while (hparams.train_steps is None or\n step < hparams.train_steps) and not session.should_stop():\n step, _ = session.run([global_step_tensor, train_op])\n\n # Find the filename of the latest saved checkpoint file\n latest_checkpoint = tf.train.latest_checkpoint(hparams.job_dir)\n\n # Only perform this if chief\n if is_chief:\n build_and_run_exports(latest_checkpoint,\n hparams.job_dir,\n model.SERVING_INPUT_FUNCTIONS[hparams.export_format],\n hidden_units)\n\n\ndef main_op():\n init_local = variables.local_variables_initializer()\n init_tables = lookup_ops.tables_initializer()\n return control_flow_ops.group(init_local, init_tables)\n\n\ndef build_and_run_exports(latest, job_dir, serving_input_fn, hidden_units):\n \"\"\"Given the latest checkpoint file export the saved model.\n\n Args:\n latest (str): Latest checkpoint file.\n job_dir (str): Location of checkpoints and model files.\n serving_input_fn (str): Serving Function\n hidden_units (list): Number of hidden units.\n \"\"\"\n\n prediction_graph = tf.Graph()\n # Create exporter.\n exporter = tf.saved_model.builder.SavedModelBuilder(\n os.path.join(job_dir, 'export'))\n with prediction_graph.as_default():\n features, inputs_dict = serving_input_fn()\n prediction_dict = model.model_fn(\n model.PREDICT,\n features.copy(),\n None, # labels\n hidden_units=hidden_units,\n learning_rate=None # learning_rate unused in prediction mode\n )\n saver = tf.train.Saver()\n\n inputs_info = {\n name: tf.saved_model.utils.build_tensor_info(tensor)\n for name, tensor in six.iteritems(inputs_dict)\n }\n output_info = {\n name: tf.saved_model.utils.build_tensor_info(tensor)\n for name, tensor in six.iteritems(prediction_dict)\n }\n signature_def = tf.saved_model.signature_def_utils.build_signature_def(\n inputs=inputs_info,\n outputs=output_info,\n method_name=sig_constants.PREDICT_METHOD_NAME\n )\n\n with tf.Session(graph=prediction_graph) as session:\n session.run([tf.local_variables_initializer(), tf.tables_initializer()])\n saver.restore(session, latest)\n exporter.add_meta_graph_and_variables(\n session,\n tags=[tf.saved_model.tag_constants.SERVING],\n signature_def_map={\n sig_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature_def\n },\n legacy_init_op=main_op()\n )\n exporter.save()\n\n\ndef train_and_evaluate(hparams):\n \"\"\"Parse TF_CONFIG to cluster_spec and call run() method.\n\n TF_CONFIG environment variable is available when running using\n gcloud either locally or on cloud. It has all the information required\n to create a ClusterSpec which is important for running distributed code.\n\n Args:\n hparams (tf.hparams): Input arguments.\n \"\"\"\n\n tf_config = os.environ.get('TF_CONFIG')\n # If TF_CONFIG is not available run local.\n if not tf_config:\n return run(target='', cluster_spec=None, is_chief=True, hparams=hparams)\n\n tf_config_json = json.loads(tf_config)\n cluster = tf_config_json.get('cluster')\n job_name = tf_config_json.get('task', {}).get('type')\n task_index = tf_config_json.get('task', {}).get('index')\n\n # If cluster information is empty run local.\n if job_name is None or task_index is None:\n return run(target='', cluster_spec=None, is_chief=True, hparams=hparams)\n\n cluster_spec = tf.train.ClusterSpec(cluster)\n server = tf.train.Server(cluster_spec,\n job_name=job_name,\n task_index=task_index)\n\n # Wait for incoming connections forever.\n # Worker ships the graph to the ps server.\n # The ps server manages the parameters of the model.\n #\n # See a detailed video on distributed TensorFlow\n # https://www.youtube.com/watch?v=la_M6bCV91M\n if job_name == 'ps':\n server.join()\n return\n elif job_name in ['master', 'worker']:\n return run(server.target, cluster_spec, is_chief=(job_name == 'master'),\n hparams=hparams)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n # Input Arguments\n parser.add_argument(\n '--train-files',\n nargs='+',\n help='Training files local or GCS',\n default='gs://cloud-samples-data/ml-engine/census/data/adult.data.csv')\n parser.add_argument(\n '--eval-files',\n nargs='+',\n help='Evaluation files local or GCS',\n default='gs://cloud-samples-data/ml-engine/census/data/adult.test.csv')\n parser.add_argument(\n '--job-dir',\n type=str,\n help=\"\"\"GCS or local dir for checkpoints, exports, and summaries.\n Use an existing directory to load a trained model, or a new directory\n to retrain\"\"\",\n default='/tmp/census-tensorflowcore')\n parser.add_argument(\n '--train-steps',\n type=int,\n help='Maximum number of training steps to perform.')\n parser.add_argument(\n '--eval-steps',\n help=\"\"\"Number of steps to run evalution for at each checkpoint.\n If unspecified, will run for 1 full epoch over training data\"\"\",\n default=None,\n type=int)\n parser.add_argument(\n '--train-batch-size',\n type=int,\n default=40,\n help='Batch size for training steps')\n parser.add_argument(\n '--eval-batch-size',\n type=int,\n default=40,\n help='Batch size for evaluation steps')\n parser.add_argument(\n '--learning-rate',\n type=float,\n default=0.003,\n help='Learning rate for SGD')\n parser.add_argument(\n '--eval-frequency',\n default=50,\n help='Perform one evaluation per n steps')\n parser.add_argument(\n '--first-layer-size',\n type=int,\n default=256,\n help='Number of nodes in the first layer of DNN')\n parser.add_argument(\n '--num-layers',\n type=int,\n default=2,\n help='Number of layers in DNN')\n parser.add_argument(\n '--scale-factor',\n type=float,\n default=0.25,\n help=\"\"\"Rate of decay size of layer for Deep Neural Net.\n max(2, int(first_layer_size * scale_factor**i)) \"\"\")\n parser.add_argument(\n '--num-epochs',\n type=int,\n help='Maximum number of epochs on which to train')\n parser.add_argument(\n '--export-format',\n help='The input format of the exported SavedModel binary',\n choices=['JSON', 'CSV', 'EXAMPLE'],\n default='JSON')\n parser.add_argument(\n '--verbosity',\n choices=['DEBUG', 'ERROR', 'FATAL', 'INFO', 'WARN'],\n default='INFO',\n help='Set logging verbosity')\n\n args, _ = parser.parse_known_args()\n\n # Set python level verbosity\n tf.logging.set_verbosity(args.verbosity)\n # Set C++ Graph Execution level verbosity\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(\n tf.logging.__dict__[args.verbosity] / 10)\n\n # Run the training job.\n hparams = hparam.HParams(**args.__dict__)\n train_and_evaluate(hparams)\n"
] | [
[
"tensorflow.python.saved_model.loader.load",
"tensorflow.python.framework.ops.Graph",
"tensorflow.Session"
],
[
"tensorflow.contrib.metrics.aggregate_metric_map",
"tensorflow.train.MonitoredTrainingSession",
"tensorflow.summary.scalar",
"tensorflow.Graph",
"tensorflow.contrib.training.python.training.hparam.HParams",
"tensorflow.logging.set_verbosity",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.train.Server",
"tensorflow.python.ops.lookup_ops.tables_initializer",
"tensorflow.train.Coordinator",
"tensorflow.logging.info",
"tensorflow.saved_model.utils.build_tensor_info",
"tensorflow.train.latest_checkpoint",
"tensorflow.saved_model.signature_def_utils.build_signature_def",
"tensorflow.local_variables_initializer",
"tensorflow.python.ops.control_flow_ops.group",
"tensorflow.train.start_queue_runners",
"tensorflow.train.ClusterSpec",
"tensorflow.train.replica_device_setter",
"tensorflow.contrib.framework.get_or_create_global_step",
"tensorflow.python.ops.variables.local_variables_initializer",
"tensorflow.tables_initializer"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
NathanWoo/pulse2percept | [
"2a1e15159af234fb247092b88a465b7bdffd21db"
] | [
"pulse2percept/viz/axon_map.py"
] | [
"\"\"\"`plot_axon_map`, `plot_implant_on_axon_map`\"\"\"\n# https://stackoverflow.com/questions/21784641/installation-issue-with-matplotlib-python\nfrom sys import platform\nimport matplotlib as mpl\nif platform == \"darwin\": # OS X\n mpl.use('TkAgg')\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import patches\nimport logging\n\nfrom ..implants import ProsthesisSystem\nfrom ..utils import parfor\nfrom ..models import AxonMapModel, dva2ret\n\n\ndef plot_axon_map(eye='RE', loc_od=(15.5, 1.5), n_bundles=100, ax=None,\n upside_down=False, annotate_quadrants=True):\n \"\"\"Plot an axon map\n\n This function generates an axon map for a left/right eye and a given\n optic disc location.\n\n Parameters\n ----------\n eye : str\n Either 'LE' for left eye or 'RE' for right eye\n loc_od : (x_od, y_od), optional, default: (15.5, 1.5)\n Location of the optic disc center (deg).\n n_bundles : int, optional, default: 100\n Number of nerve fiber bundles to plot.\n ax : matplotlib.axes._subplots.AxesSubplot, optional, default: None\n A Matplotlib axes object. If None given, a new one will be created.\n upside_down : bool, optional, default: False\n Flag whether to plot the retina upside-down, such that the upper\n half of the plot corresponds to the upper visual field. In general,\n inferior retina == upper visual field (and superior == lower).\n annotate_quadrants : bool, optional, default: True\n Flag whether to annotate the four retinal quadrants\n (inferior/superior x temporal/nasal).\n \"\"\"\n loc_od = np.asarray(loc_od)\n if len(loc_od) != 2:\n raise ValueError(\"'loc_od' must specify the x and y coordinates of \"\n \"the optic disc.\")\n if eye not in ['LE', 'RE']:\n raise ValueError(\"'eye' must be either 'LE' or 'RE', not %s.\" % eye)\n if n_bundles < 1:\n raise ValueError('Number of nerve fiber bundles must be >= 1.')\n\n # Make sure x-coord of optic disc has the correct sign for LE/RE:\n if (eye == 'RE' and loc_od[0] <= 0 or eye == 'LE' and loc_od[0] > 0):\n logstr = (\"For eye==%s, expected opposite sign of x-coordinate of \"\n \"the optic disc; changing %.2f to %.2f\" % (eye, loc_od[0],\n -loc_od[0]))\n logging.getLogger(__name__).info(logstr)\n loc_od = (-loc_od[0], loc_od[1])\n if ax is None:\n # No axes object given: create\n fig, ax = plt.subplots(1, figsize=(10, 8))\n else:\n fig = ax.figure\n\n # Matplotlib<2 compatibility\n if hasattr(ax, 'set_facecolor'):\n ax.set_facecolor('black')\n elif hasattr(ax, 'set_axis_bgcolor'):\n ax.set_axis_bgcolor('black')\n\n # Draw axon pathways:\n axon_map = AxonMapModel(n_axons=n_bundles, loc_od_x=loc_od[0],\n loc_od_y=loc_od[1], eye=eye)\n axon_bundles = axon_map.grow_axon_bundles()\n for bundle in axon_bundles:\n ax.plot(bundle[:, 0], bundle[:, 1], c=(0.5, 1.0, 0.5))\n\n # Show circular optic disc:\n ax.add_patch(patches.Circle(dva2ret(loc_od), radius=900, alpha=1,\n color='black', zorder=10))\n\n xmin, xmax, ymin, ymax = dva2ret([-20, 20, -15, 15])\n ax.set_aspect('equal')\n ax.set_xlim(xmin, xmax)\n ax.set_xlabel('x (microns)')\n ax.set_ylim(ymin, ymax)\n ax.set_ylabel('y (microns)')\n ax.set_title('Axon map: %s, %s' % (eye, loc_od))\n ax.grid(False)\n\n # Annotate the four retinal quadrants near the corners of the plot:\n # superior/inferior x temporal/nasal\n if annotate_quadrants:\n if upside_down:\n topbottom = ['bottom', 'top']\n else:\n topbottom = ['top', 'bottom']\n if eye == 'RE':\n temporalnasal = ['temporal', 'nasal']\n else:\n temporalnasal = ['nasal', 'temporal']\n for yy, valign, si in zip([ymax, ymin], topbottom,\n ['superior', 'inferior']):\n for xx, halign, tn in zip([xmin, xmax], ['left', 'right'],\n temporalnasal):\n ax.text(xx, yy, si + ' ' + tn,\n color='black', fontsize=14,\n horizontalalignment=halign,\n verticalalignment=valign,\n backgroundcolor=(1, 1, 1, 0.8))\n\n # Need to flip y axis to have upper half == upper visual field\n if upside_down:\n ax.invert_yaxis()\n\n return fig, ax\n\n\ndef plot_implant_on_axon_map(implant, loc_od=(15.5, 1.5), n_bundles=100,\n ax=None, upside_down=False, annotate_implant=True,\n annotate_quadrants=True):\n \"\"\"Plot an implant on top of the axon map\n\n This function plots an electrode array on top of an axon map.\n\n Parameters\n ----------\n implant : p2p.implants.ProsthesisSystem\n A ProsthesisSystem object. If a stimulus is given, stimulating\n electrodes will be highlighted in yellow.\n loc_od : (x_od, y_od), optional, default: (15.5, 1.5)\n Location of the optic disc center (deg).\n n_bundles : int, optional, default: 100\n Number of nerve fiber bundles to plot.\n ax : matplotlib.axes._subplots.AxesSubplot, optional, default: None\n A Matplotlib axes object. If None given, a new one will be created.\n upside_down : bool, optional, default: False\n Flag whether to plot the retina upside-down, such that the upper\n half of the plot corresponds to the upper visual field. In general,\n inferior retina == upper visual field (and superior == lower).\n annotate_implant : bool, optional, default: True\n Flag whether to label electrodes in the implant.\n annotate_quadrants : bool, optional, default: True\n Flag whether to annotate the four retinal quadrants\n (inferior/superior x temporal/nasal).\n \"\"\"\n if not isinstance(implant, ProsthesisSystem):\n e_s = \"`implant` must be of type ProsthesisSystem\"\n raise TypeError(e_s)\n\n fig, ax = plot_axon_map(eye=implant.eye, loc_od=loc_od, ax=ax,\n n_bundles=n_bundles, upside_down=upside_down,\n annotate_quadrants=annotate_quadrants)\n\n # Highlight location of stimulated electrodes:\n if implant.stim is not None:\n for e in implant.stim.electrodes:\n ax.plot(implant[e].x, implant[e].y, 'oy',\n markersize=np.sqrt(implant[e].r) * 2)\n\n # Plot all electrodes and label them (optional):\n for name, el in implant.items():\n if annotate_implant:\n ax.text(el.x + 100, el.y + 50, name, color='white', size='x-large')\n ax.plot(el.x, el.y, 'ow', markersize=np.sqrt(el.r))\n\n ax.set_title(implant)\n\n return fig, ax\n"
] | [
[
"numpy.asarray",
"matplotlib.use",
"numpy.sqrt",
"matplotlib.pyplot.subplots"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
RenatoFarruggio/TextBoxes_plusplus_tf | [
"9d1bfb73ebe9eb147747daea3b9ec21b8cb0d6ba"
] | [
"train_stage_1.py"
] | [
"import os\n\nuse_gpu = True\n\nif use_gpu:\n os.environ['CUDA_VISIBLE_DEVICES']= '0' # using GPU 0\nelse:\n os.environ['CUDA_VISIBLE_DEVICES']= '-1' # using only CPU\n\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nfrom tensorflow.python.ops import control_flow_ops\n\nfrom tf_extended import tf_utils\nfrom deployment import model_deploy\nfrom datasets import TFrecords2Dataset\nfrom nets import txtbox_384, txtbox_768\nfrom processing import ssd_vgg_preprocessing\n\n# =========================================================================== #\n# Textboxes++ Network flags.\n# =========================================================================== #\n# α in Lloc - smooth L1 loss --> Default set to 0.2 for quickly convergence.\ntf.app.flags.DEFINE_float(\n\t'loss_alpha', 0.2,\n 'Alpha parameter in the loss function'\n)\n#TODO: On-line hard negative mining (OHNM) ratio, split to two value for two training stages: 1.nr=3; 2.nr=6.\ntf.app.flags.DEFINE_float(\n\t'negative_ratio', 3., #6.\n 'On-line negative mining ratio in the loss function.'\n)\n# IOU threshold for NMS\ntf.app.flags.DEFINE_float(\n\t'match_threshold', 0.5,\n 'Matching threshold in the loss function.'\n)\n#TODO: Multi-scales training divide into two stages: 1.size=384, lr=10^-4; 2.size=786, lr=10^-5.\ntf.app.flags.DEFINE_boolean(\n\t'large_training', False, #True\n\t'Use 768 to train'\n)\n# =========================================================================== #\n# Train & Deploy Flags.\n# =========================================================================== #\ntf.app.flags.DEFINE_string(\n 'train_dir', './checkpoints',\n 'Directory where checkpoints and event logs are written to.'\n)\n# TODO:GPU number configuration\ntf.app.flags.DEFINE_integer(\n\t'num_clones', 1,\n 'Number of model clones to GPU deploy.'\n)\ntf.app.flags.DEFINE_boolean(\n\t'clone_on_cpu', False,\n 'Use CPUs to deploy clones.'\n)\ntf.app.flags.DEFINE_integer(\n\t'num_readers', 8,\n 'The number of parallel readers that read data from the dataset.'\n)\ntf.app.flags.DEFINE_integer(\n\t'num_preprocessing_threads', 8,\n 'The number of threads used to create the batches.'\n)\ntf.app.flags.DEFINE_integer(\n\t'log_every_n_steps', 10,\n 'The frequency with which logs are print.'\n)\ntf.app.flags.DEFINE_integer(\n\t'save_summaries_secs', 120,\n 'The frequency with which summaries are saved, in seconds.'\n)\ntf.app.flags.DEFINE_integer(\n\t'save_interval_secs', 1200,\n 'The frequency with which the model is saved, in seconds.'\n)\ntf.app.flags.DEFINE_float(\n\t'gpu_memory_fraction', 0.9,\n\t'GPU memory fraction to use.'\n)\n# =========================================================================== #\n# Optimization Flags.\n# =========================================================================== #\ntf.app.flags.DEFINE_float(\n\t'weight_decay', 0.0005,\n 'The weight decay on the model weights.'\n)\ntf.app.flags.DEFINE_string(\n\t'optimizer', 'adam',\n 'The name of the optimizer, one of \"adadelta\", \"adagrad\", \"adam\",\"ftrl\", \"momentum\", \"sgd\" or \"rmsprop\".'\n)\ntf.app.flags.DEFINE_float(\n\t'adadelta_rho', 0.95,\n 'The decay rate for adadelta.'\n)\ntf.app.flags.DEFINE_float(\n\t'adagrad_initial_accumulator_value', 0.1,\n 'Starting value for the AdaGrad accumulators.'\n)\ntf.app.flags.DEFINE_float(\n\t'adam_beta1', 0.9,\n 'The exponential decay rate for the 1st moment estimates.'\n)\ntf.app.flags.DEFINE_float(\n\t'adam_beta2', 0.999,\n 'The exponential decay rate for the 2nd moment estimates.'\n)\ntf.app.flags.DEFINE_float(\n\t'opt_epsilon', 1.0,\n 'Epsilon term for the optimizer.'\n)\ntf.app.flags.DEFINE_float(\n\t'ftrl_learning_rate_power', -0.5,\n 'The learning rate power.'\n)\ntf.app.flags.DEFINE_float(\n\t'ftrl_initial_accumulator_value', 0.1,\n 'Starting value for the FTRL accumulators.'\n)\ntf.app.flags.DEFINE_float(\n\t'ftrl_l1', 0.0,\n 'The FTRL l1 regularization strength.'\n)\ntf.app.flags.DEFINE_float(\n\t'ftrl_l2', 0.0,\n 'The FTRL l2 regularization strength.'\n)\ntf.app.flags.DEFINE_float(\n\t'momentum', 0.9,\n 'The momentum for the MomentumOptimizer and RMSPropOptimizer.'\n)\ntf.app.flags.DEFINE_float(\n\t'rmsprop_momentum', 0.9,\n\t'Momentum.'\n)\ntf.app.flags.DEFINE_float(\n\t'rmsprop_decay', 0.9,\n\t'Decay term for RMSProp.'\n)\n# =========================================================================== #\n# Learning Rate Flags.\n# =========================================================================== #\ntf.app.flags.DEFINE_string(\n\t'learning_rate_decay_type', 'exponential',\n 'Specifies how the learning rate is decayed. One of \"fixed\", \"exponential\",'' or \"polynomial\"'\n)\n# TODO: stage1 -> lr 10^-4; stage2 -> lr 10^-5\ntf.app.flags.DEFINE_float(\n\t'learning_rate', 1e-4, #0.00001\n 'Initial learning rate.'\n)\ntf.app.flags.DEFINE_float(\n\t'end_learning_rate', 1e-5, #0.00001\n 'The minimal end learning rate used by a polynomial decay learning rate.'\n)\ntf.app.flags.DEFINE_float(\n\t'label_smoothing', 0.0,\n 'The amount of label smoothing.'\n)\ntf.app.flags.DEFINE_float(\n\t'learning_rate_decay_factor', 0.1,\n 'Learning rate decay factor.'\n)\ntf.app.flags.DEFINE_float(\n\t'num_epochs_per_decay', 80000,\n 'Number of epochs after which learning rate decays.'\n)\ntf.app.flags.DEFINE_float(\n 'moving_average_decay', None,\n\t'The decay to use for the moving average. If left as None, then moving averages are not used.'\n)\n# =========================================================================== #\n# Dataset Flags.\n# =========================================================================== #\ntf.app.flags.DEFINE_string(\n\t'dataset_name', 'SynthText',\n 'The name of the dataset to load.'\n)\ntf.app.flags.DEFINE_integer(\n\t'num_classes', 2,\n 'Number of classes to use in the dataset.'\n)\ntf.app.flags.DEFINE_string(\n\t'dataset_split_name', 'train',\n 'The name of the train/test split.'\n)\ntf.app.flags.DEFINE_string(\n 'dataset_dir', './tfrecords',\n 'The directory where the dataset files are stored.'\n)\ntf.app.flags.DEFINE_integer(\n 'labels_offset', 0,\n 'An offset for the labels in the dataset. This flag is primarily used to '\n 'evaluate the VGG and ResNet architectures which do not use a background '\n 'class for the ImageNet dataset.'\n)\ntf.app.flags.DEFINE_string(\n\t'model_name', 'text_box_384',\n 'The name of the architecture to train.'\n)\ntf.app.flags.DEFINE_string(\n 'preprocessing_name', None,\n 'The name of the preprocessing to use. If left '\n 'as `None`, then the model_name flag is used.'\n)\ntf.app.flags.DEFINE_integer(\n\t'batch_size', 8,\n 'The number of samples in each batch.'\n)\ntf.app.flags.DEFINE_integer(\n\t'train_image_size', '384',\n\t'Train image size'\n)\ntf.app.flags.DEFINE_string(\n\t'training_image_crop_area', '0.1, 1.0',\n 'the area of image process for training'\n)\n#TODO: stage1 -> 8k; stage2 -> 4k\ntf.app.flags.DEFINE_integer(\n\t'max_number_of_steps', 68000, #120000, #8000\n 'The maxim number of training steps.'\n)\n# =========================================================================== #\n# Fine-Tuning Flags.\n# =========================================================================== #\n#TODO: indicate ckpt path for continuing stage 2 training.\ntf.app.flags.DEFINE_string(\n 'checkpoint_path', '/tank/rfarruggio/TextBoxes_plusplus_tf/checkpoints/model.ckpt-60000', #'./model/model.ckpt-8000.ckpt'\n 'The path to a checkpoint from which to fine-tune.'\n)\ntf.app.flags.DEFINE_string(\n 'checkpoint_model_scope', None,\n 'Model scope in the checkpoint. None if the same as the trained model.'\n)\ntf.app.flags.DEFINE_string(\n 'checkpoint_exclude_scopes', None,\n 'Comma-separated list of scopes of variables to exclude when restoring '\n 'from a checkpoint.'\n)\ntf.app.flags.DEFINE_string(\n 'trainable_scopes', None,\n 'Comma-separated list of scopes to filter the set of variables to train.'\n 'By default, None would train all the variables.'\n)\ntf.app.flags.DEFINE_boolean(\n 'ignore_missing_vars', False,\n 'When restoring a checkpoint would ignore missing variables.'\n)\nFLAGS = tf.app.flags.FLAGS\n\n\n# =========================================================================== #\n# Main training routine.\n# =========================================================================== #\ndef main(_):\n if not FLAGS.dataset_dir:\n raise ValueError(\n 'You must supply the dataset directory with --dataset_dir'\n )\n # Sets the threshold for what messages will be logged. (DEBUG / INFO / WARN / ERROR / FATAL)\n tf.logging.set_verbosity(tf.logging.DEBUG)\n\n with tf.Graph().as_default():\n # Config model_deploy. Keep TF Slim Models structure.\n # Useful if want to need multiple GPUs and/or servers in the future.\n deploy_config = model_deploy.DeploymentConfig(\n num_clones=FLAGS.num_clones,\n clone_on_cpu=FLAGS.clone_on_cpu,\n replica_id=0,\n num_replicas=1,\n num_ps_tasks=0)\n\n # Create global_step, the training iteration counter.\n with tf.device(deploy_config.variables_device()):\n global_step = slim.create_global_step()\n\n # Select the dataset.\n dataset = TFrecords2Dataset.get_datasets(FLAGS.dataset_dir)\n\n # Get the TextBoxes++ network and its anchors.\n text_net = txtbox_384.TextboxNet()\n\n # Stage 2 training using the 768x768 input size.\n if FLAGS.large_training:\n # replace the input image shape and the extracted feature map size from each indicated layer which\n #associated to each textbox layer.\n text_net.params = text_net.params._replace(img_shape = (768, 768))\n text_net.params = text_net.params._replace(feat_shapes = [(96, 96), (48,48), (24, 24), (12, 12), (10, 10), (8, 8)])\n\n img_shape = text_net.params.img_shape\n print('img_shape: ' + str(img_shape))\n\n # Compute the default anchor boxes with the given image shape, get anchor list.\n text_anchors = text_net.anchors(img_shape)\n\n # Print the training configuration before training.\n tf_utils.print_configuration(FLAGS.__flags, text_net.params, dataset.data_sources, FLAGS.train_dir)\n\n # =================================================================== #\n # Create a dataset provider and batches.\n # =================================================================== #\n with tf.device(deploy_config.inputs_device()):\n # setting the dataset provider\n with tf.name_scope(FLAGS.dataset_name + '_data_provider'):\n provider = slim.dataset_data_provider.DatasetDataProvider(\n dataset,\n num_readers=FLAGS.num_readers,\n common_queue_capacity=1000 * FLAGS.batch_size,\n common_queue_min=300 * FLAGS.batch_size,\n shuffle=True\n )\n # Get for SSD network: image, labels, bboxes.\n [image, shape, glabels, gbboxes, x1, x2, x3, x4, y1, y2, y3,\n y4] = provider.get([\n 'image', 'shape', 'object/label', 'object/bbox',\n 'object/oriented_bbox/x1', 'object/oriented_bbox/x2',\n 'object/oriented_bbox/x3', 'object/oriented_bbox/x4',\n 'object/oriented_bbox/y1', 'object/oriented_bbox/y2',\n 'object/oriented_bbox/y3', 'object/oriented_bbox/y4'\n ])\n gxs = tf.transpose(tf.stack([x1, x2, x3, x4])) #shape = (N,4)\n gys = tf.transpose(tf.stack([y1, y2, y3, y4]))\n image = tf.identity(image, 'input_image')\n init_op = tf.global_variables_initializer()\n # tf.global_variables_initializer()\n\n # Pre-processing image, labels and bboxes.\n training_image_crop_area = FLAGS.training_image_crop_area\n area_split = training_image_crop_area.split(',')\n assert len(area_split) == 2\n training_image_crop_area = [\n float(area_split[0]),\n float(area_split[1])]\n\n image, glabels, gbboxes, gxs, gys= \\\n ssd_vgg_preprocessing.preprocess_for_train(image, glabels, gbboxes, gxs, gys,\n img_shape,\n data_format='NHWC', crop_area_range=training_image_crop_area)\n\n # Encode groundtruth labels and bboxes.\n image = tf.identity(image, 'processed_image')\n\n glocalisations, gscores, glabels = \\\n text_net.bboxes_encode( glabels, gbboxes, text_anchors, gxs, gys)\n batch_shape = [1] + [len(text_anchors)] * 3\n\n # Training batches and queue.\n r = tf.train.batch(\n tf_utils.reshape_list([image, glocalisations, gscores, glabels]),\n batch_size=FLAGS.batch_size,\n num_threads=FLAGS.num_preprocessing_threads,\n capacity=5 * FLAGS.batch_size)\n\n b_image, b_glocalisations, b_gscores, b_glabels= \\\n tf_utils.reshape_list(r, batch_shape)\n\n # Intermediate queueing: unique batch computation pipeline for all\n # GPUs running the training.\n batch_queue = slim.prefetch_queue.prefetch_queue(\n tf_utils.reshape_list(\n [b_image, b_glocalisations, b_gscores, b_glabels]),\n capacity=2 * deploy_config.num_clones)\n\n # =================================================================== #\n # Define the model running on every GPU.\n # =================================================================== #\n def clone_fn(batch_queue):\n\n #Allows data parallelism by creating multiple\n #clones of network_fn.\n # Dequeue batch.\n b_image, b_glocalisations, b_gscores, b_glabels = \\\n tf_utils.reshape_list(batch_queue.dequeue(), batch_shape)\n\n # Construct TextBoxes network.\n arg_scope = text_net.arg_scope(weight_decay=FLAGS.weight_decay)\n with slim.arg_scope(arg_scope):\n predictions,localisations, logits, end_points = \\\n text_net.net(b_image, is_training=True)\n # Add loss function.\n\n text_net.losses(\n logits,\n localisations,\n b_glabels,\n b_glocalisations,\n b_gscores,\n match_threshold=FLAGS.match_threshold,\n negative_ratio=FLAGS.negative_ratio,\n alpha=FLAGS.loss_alpha,\n label_smoothing=FLAGS.label_smoothing,\n batch_size=FLAGS.batch_size)\n return end_points\n\n # Gather initial tensorboard summaries.\n summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))\n\n # =================================================================== #\n # Add summaries from first clone.\n # =================================================================== #\n clones = model_deploy.create_clones(deploy_config, clone_fn,\n [batch_queue])\n first_clone_scope = deploy_config.clone_scope(0)\n # Gather update_ops from the first clone. These contain, for example,\n # the updates for the batch_norm variables created by network_fn.\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n\n # Add summaries for end_points.\n end_points = clones[0].outputs\n for end_point in end_points:\n x = end_points[end_point]\n summaries.add(tf.summary.histogram('activations/' + end_point, x))\n summaries.add(\n tf.summary.scalar('sparsity/' + end_point,\n tf.nn.zero_fraction(x)))\n # Add summaries for losses.\n for loss in tf.get_collection(tf.GraphKeys.LOSSES):\n summaries.add(tf.summary.scalar(loss.op.name, loss))\n # Add summaries for extra losses.\n for loss in tf.get_collection('EXTRA_LOSSES'):\n summaries.add(tf.summary.scalar(loss.op.name, loss))\n # Add summaries for variables.\n for variable in slim.get_model_variables():\n summaries.add(tf.summary.histogram(variable.op.name, variable))\n\n # =================================================================== #\n # Configure the moving averages.\n # =================================================================== #\n if FLAGS.moving_average_decay:\n moving_average_variables = slim.get_model_variables()\n variable_averages = tf.train.ExponentialMovingAverage(\n FLAGS.moving_average_decay, global_step)\n else:\n moving_average_variables, variable_averages = None, None\n\n # =================================================================== #\n # Configure the optimization procedure.\n # =================================================================== #\n with tf.device(deploy_config.optimizer_device()):\n learning_rate = tf_utils.configure_learning_rate(\n FLAGS, dataset.num_samples, global_step)\n optimizer = tf_utils.configure_optimizer(\n FLAGS, learning_rate)\n # Add summaries for learning_rate.\n summaries.add(tf.summary.scalar('learning_rate', learning_rate))\n\n if FLAGS.moving_average_decay:\n # Update ops executed locally by trainer.\n update_ops.append(\n variable_averages.apply(moving_average_variables))\n\n # Variables to train.\n variables_to_train = tf_utils.get_variables_to_train(FLAGS)\n\n # and returns a train_tensor and summary_op\n total_loss, clones_gradients = model_deploy.optimize_clones(\n clones, optimizer, var_list=variables_to_train)\n\n # Add total_loss to summary.\n summaries.add(tf.summary.scalar('total_loss', total_loss))\n\n # Create gradient updates.\n grad_updates = optimizer.apply_gradients(\n clones_gradients, global_step=global_step)\n update_ops.append(grad_updates)\n update_op = tf.group(*update_ops)\n train_tensor = control_flow_ops.with_dependencies(\n [update_op], total_loss, name='train_op')\n\n # Add the summaries from the first clone. These contain the summaries\n summaries |= set(\n tf.get_collection(tf.GraphKeys.SUMMARIES, first_clone_scope))\n # Merge all summaries together.\n summary_op = tf.summary.merge(list(summaries), name='summary_op')\n\n # =================================================================== #\n # Kicks off the training.\n # =================================================================== #\n gpu_options = tf.GPUOptions(\n per_process_gpu_memory_fraction=FLAGS.gpu_memory_fraction)\n\n config = tf.ConfigProto(\n log_device_placement=False,\n allow_soft_placement=True,\n gpu_options=gpu_options)\n\n saver = tf.train.Saver(\n max_to_keep=100,\n keep_checkpoint_every_n_hours=1.0,\n write_version=2,\n pad_step_number=False)\n\n slim.learning.train(\n train_tensor,\n logdir=FLAGS.train_dir,\n master='',\n is_chief=True,\n # init_op=init_op,\n init_fn=tf_utils.get_init_fn(FLAGS),\n summary_op=summary_op, ##output variables to logdir\n number_of_steps=FLAGS.max_number_of_steps,\n log_every_n_steps=FLAGS.log_every_n_steps,\n save_summaries_secs=FLAGS.save_summaries_secs,\n saver=saver,\n save_interval_secs=FLAGS.save_interval_secs,\n session_config=config,\n sync_optimizer=None)\n\n\nif __name__ == '__main__':\n tf.app.run()\n"
] | [
[
"tensorflow.stack",
"tensorflow.train.ExponentialMovingAverage",
"tensorflow.app.flags.DEFINE_string",
"tensorflow.GPUOptions",
"tensorflow.app.flags.DEFINE_boolean",
"tensorflow.group",
"tensorflow.summary.scalar",
"tensorflow.Graph",
"tensorflow.python.ops.control_flow_ops.with_dependencies",
"tensorflow.get_collection",
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.ConfigProto",
"tensorflow.contrib.slim.create_global_step",
"tensorflow.logging.set_verbosity",
"tensorflow.name_scope",
"tensorflow.train.Saver",
"tensorflow.app.run",
"tensorflow.contrib.slim.arg_scope",
"tensorflow.contrib.slim.dataset_data_provider.DatasetDataProvider",
"tensorflow.identity",
"tensorflow.global_variables_initializer",
"tensorflow.summary.histogram",
"tensorflow.contrib.slim.get_model_variables",
"tensorflow.nn.zero_fraction",
"tensorflow.app.flags.DEFINE_float"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
genggng/Meta-DETR | [
"45f9dcb4d3402b0e308e06537284fe2c8443b3f6"
] | [
"models/meta_detr.py"
] | [
"import copy\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.utils.weight_norm import WeightNorm\n\nfrom util import box_ops\nfrom util.misc import (NestedTensor, nested_tensor_from_tensor_list,\n accuracy, get_world_size, is_dist_avail_and_initialized, inverse_sigmoid)\n\nfrom .backbone import build_backbone\nfrom .matcher import build_matcher\nfrom .deformable_transformer import build_deforamble_transformer\nfrom .deformable_transformer import DeformableTransformerDecoderLayer, DeformableTransformerDecoder\nfrom .position_encoding import TaskPositionalEncoding, QueryEncoding\n\n\ndef _get_clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])\n\n\nclass distLinear(nn.Module):\n def __init__(self, indim, outdim):\n super(distLinear, self).__init__()\n self.L = nn.Linear(indim, outdim, bias=False)\n self.class_wise_learnable_norm = True\n if self.class_wise_learnable_norm:\n WeightNorm.apply(self.L, 'weight', dim=0)\n self.scale_factor = 10\n\n def forward(self, x):\n x_norm = torch.norm(x, p=2, dim=1).unsqueeze(1).expand_as(x)\n x_normalized = x.div(x_norm + 0.00001)\n if not self.class_wise_learnable_norm:\n L_norm = torch.norm(self.L.weight.data, p=2, dim=1).unsqueeze(1).expand_as(self.L.weight.data)\n self.L.weight.data = self.L.weight.data.div(L_norm + 0.00001)\n cos_dist = self.L(x_normalized)\n scores = self.scale_factor * cos_dist\n return scores\n\n\nclass MetaDETR(nn.Module):\n \"\"\" This is the Meta-DETR module that performs object detection \"\"\"\n def __init__(self, args, backbone, transformer, num_classes, num_queries, num_feature_levels, aux_loss=True, with_box_refine=False):\n \"\"\" Initializes the model.\n Parameters:\n backbone: torch module of the backbone to be used. See backbone.py\n transformer: the deform-transformer architecture. See deformable_transformer.py\n num_classes: number of object classes\n num_queries: number of object queries, ie, detection slot.\n aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.\n with_box_refine: iterative bounding box refinement.\n \"\"\"\n super().__init__()\n self.args = args\n self.num_classes = num_classes\n self.num_queries = num_queries\n self.hidden_dim = args.hidden_dim\n self.num_feature_levels = num_feature_levels\n\n self.transformer = transformer\n self.task_positional_encoding = TaskPositionalEncoding(self.hidden_dim, dropout=0., max_len=self.args.episode_size)\n self.class_embed = nn.Linear(self.hidden_dim, self.args.episode_size)\n self.bbox_embed = MLP(self.hidden_dim, self.hidden_dim, 4, 3)\n if args.category_codes_cls_loss:\n if num_feature_levels == 1:\n self.category_codes_cls = distLinear(self.hidden_dim, self.num_classes)\n elif num_feature_levels > 1:\n category_codes_cls_list = []\n for _ in range(self.num_feature_levels):\n category_codes_cls_list.append(distLinear(self.hidden_dim, self.num_classes))\n self.category_codes_cls = nn.ModuleList(category_codes_cls_list)\n else:\n raise RuntimeError\n\n # self.query_embed = nn.Embedding(self.num_queries, self.hidden_dim * 2)\n queryencoding = QueryEncoding(self.hidden_dim, dropout=0., max_len=self.num_queries)\n qe = queryencoding()\n self.query_embed = torch.cat([qe, qe], dim=1)\n\n if self.num_feature_levels > 1:\n num_backbone_outs = len(backbone.strides)\n input_proj_list = []\n for _ in range(num_backbone_outs):\n in_channels = backbone.num_channels[_]\n input_proj_list.append(nn.Sequential(\n nn.Conv2d(in_channels, self.hidden_dim, kernel_size=1),\n nn.GroupNorm(32, self.hidden_dim),\n ))\n for _ in range(self.num_feature_levels - num_backbone_outs):\n input_proj_list.append(nn.Sequential(\n nn.Conv2d(in_channels, self.hidden_dim, kernel_size=3, stride=2, padding=1),\n nn.GroupNorm(32, self.hidden_dim),\n ))\n in_channels = self.hidden_dim\n self.input_proj = nn.ModuleList(input_proj_list)\n else:\n self.input_proj = nn.ModuleList([\n nn.Sequential(\n nn.Conv2d(backbone.num_channels[0], self.hidden_dim, kernel_size=1),\n nn.GroupNorm(32, self.hidden_dim),\n )])\n self.backbone = backbone\n self.with_box_refine = with_box_refine\n self.aux_loss = aux_loss\n\n prior_prob = 0.01\n bias_value = -math.log((1 - prior_prob) / prior_prob)\n self.class_embed.bias.data = torch.ones(1) * bias_value\n nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0)\n nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0)\n for proj in self.input_proj:\n nn.init.xavier_uniform_(proj[0].weight, gain=1)\n nn.init.constant_(proj[0].bias, 0)\n\n assert args.hidden_dim == self.hidden_dim\n decoder_layer = DeformableTransformerDecoderLayer(args.hidden_dim,\n args.dim_feedforward,\n args.dropout,\n 'relu',\n args.num_feature_levels,\n args.nheads,\n args.dec_n_points)\n\n self.meta_decoder = DeformableTransformerDecoder(decoder_layer,\n args.dec_layers,\n return_intermediate=True)\n\n num_pred = self.meta_decoder.num_layers\n if with_box_refine:\n self.class_embed = _get_clones(self.class_embed, num_pred)\n self.bbox_embed = _get_clones(self.bbox_embed, num_pred)\n nn.init.constant_(self.bbox_embed[0].layers[-1].bias.data[2:], -2.0)\n # hack implementation for iterative bounding box refinement\n self.meta_decoder.bbox_embed = self.bbox_embed\n else:\n nn.init.constant_(self.bbox_embed.layers[-1].bias.data[2:], -2.0)\n self.class_embed = nn.ModuleList([self.class_embed for _ in range(num_pred)])\n self.bbox_embed = nn.ModuleList([self.bbox_embed for _ in range(num_pred)])\n\n def forward(self, samples, targets=None, supp_samples=None, supp_class_ids=None, supp_targets=None, category_codes=None):\n\n if not isinstance(samples, NestedTensor):\n samples = nested_tensor_from_tensor_list(samples)\n\n batchsize = samples.tensors.shape[0]\n device = samples.tensors.device\n\n # During training, category_codes are generated from sampled (supp_samples, supp_class_ids, supp_targets)\n if self.training:\n assert supp_samples is not None\n assert supp_class_ids is not None\n assert supp_targets is not None\n # During training stage: we don't have to cover all categories, so there is only 1 episode\n num_support = supp_class_ids.shape[0]\n support_batchsize = self.args.episode_size\n assert num_support == (self.args.episode_size * self.args.episode_num)\n num_episode = self.args.episode_num\n category_codes = self.compute_category_codes(supp_samples, supp_targets)\n # During inference, category_codes should be provided and ready to use for all activated categories\n else:\n assert category_codes is not None\n assert supp_class_ids is not None\n # During inference stage: there are multiple episodes to cover all categories, including both base and novel\n num_support = supp_class_ids.shape[0]\n support_batchsize = self.args.episode_size\n num_episode = math.ceil(num_support / support_batchsize)\n\n features, pos = self.backbone(samples)\n\n srcs = []\n masks = []\n for l, feat in enumerate(features):\n src, mask = feat.decompose()\n srcs.append(self.input_proj[l](src))\n masks.append(mask)\n assert mask is not None\n if self.num_feature_levels > len(srcs):\n _len_srcs = len(srcs)\n for l in range(_len_srcs, self.num_feature_levels):\n if l == _len_srcs:\n src = self.input_proj[l](features[-1].tensors)\n else:\n src = self.input_proj[l](srcs[-1])\n m = samples.mask\n mask = F.interpolate(m[None].float(), size=src.shape[-2:]).to(torch.bool)[0]\n pos_l = self.backbone[1](NestedTensor(src, mask)).to(src.dtype)\n srcs.append(src)\n masks.append(mask)\n pos.append(pos_l)\n\n query_embeds = self.query_embed.to(device)\n\n # To store predictions for each episode\n meta_outputs_classes = []\n meta_outputs_coords = []\n meta_support_class_ids = []\n\n for i in range(num_episode):\n\n if self.num_feature_levels == 1:\n if (support_batchsize * (i + 1)) <= num_support:\n cc = [c[(support_batchsize * i): (support_batchsize * (i + 1)), :].unsqueeze(0).expand(batchsize, -1, -1) for c in category_codes]\n episode_class_ids = supp_class_ids[(support_batchsize * i): (support_batchsize * (i + 1))]\n else:\n cc = [c[-support_batchsize:, :].unsqueeze(0).expand(batchsize, -1, -1) for c in category_codes]\n episode_class_ids = supp_class_ids[-support_batchsize:]\n elif self.num_feature_levels == 4:\n raise NotImplementedError\n else:\n raise NotImplementedError\n\n _, init_reference, _, encoder_outputs = \\\n self.transformer(srcs, masks, pos, query_embeds, cc,\n self.task_positional_encoding(torch.zeros(self.args.episode_size, self.hidden_dim, device=device)).unsqueeze(0).expand(batchsize, -1, -1))\n\n (memory, spatial_shapes, level_start_index, valid_ratios, query_embed, mask_flatten, tgt) = encoder_outputs\n\n # Category-agnostic transformer decoder\n hs, inter_references = self.meta_decoder(\n tgt,\n init_reference,\n memory,\n spatial_shapes,\n level_start_index,\n valid_ratios,\n query_embed,\n mask_flatten,\n )\n\n # Final FFN to predict confidence scores and boxes coordinates\n outputs_classes = []\n outputs_coords = []\n for lvl in range(hs.shape[0]):\n if lvl == 0:\n reference = init_reference.reshape(batchsize, self.num_queries, 2)\n else:\n reference = inter_references[lvl - 1]\n reference = inverse_sigmoid(reference)\n outputs_class = self.class_embed[lvl](hs[lvl])\n tmp = self.bbox_embed[lvl](hs[lvl])\n if reference.shape[-1] == 4:\n tmp += reference\n else:\n assert reference.shape[-1] == 2\n tmp[..., :2] += reference\n outputs_coord = tmp.sigmoid()\n outputs_classes.append(outputs_class.view(batchsize, self.num_queries, self.args.episode_size))\n outputs_coords.append(outputs_coord.view(batchsize, self.num_queries, 4))\n\n meta_outputs_classes.append(torch.stack(outputs_classes))\n meta_outputs_coords.append(torch.stack(outputs_coords))\n meta_support_class_ids.append(episode_class_ids)\n\n # Calculate targets for the constructed meta-tasks\n # meta_targets are computed based on original targets and the sampled support images.\n meta_targets = []\n for b in range(batchsize):\n for episode_class_ids in meta_support_class_ids:\n meta_target = dict()\n target_indexes = [i for i, x in enumerate(targets[b]['labels'].tolist()) if x in episode_class_ids]\n meta_target['boxes'] = targets[b]['boxes'][target_indexes]\n meta_target['labels'] = targets[b]['labels'][target_indexes]\n meta_target['area'] = targets[b]['area'][target_indexes]\n meta_target['iscrowd'] = targets[b]['iscrowd'][target_indexes]\n meta_target['image_id'] = targets[b]['image_id']\n meta_target['size'] = targets[b]['size']\n meta_target['orig_size'] = targets[b]['orig_size']\n meta_targets.append(meta_target)\n\n # Create tensors for final outputs\n # default logits are -inf (default confidence scores are 0.00 after sigmoid)\n final_meta_outputs_classes = torch.ones(hs.shape[0], batchsize, num_episode, self.num_queries, self.num_classes, device=device) * (-999999.99)\n final_meta_outputs_coords = torch.zeros(hs.shape[0], batchsize, num_episode, self.num_queries, 4, device=device)\n # Fill in predicted logits into corresponding positions\n class_ids_already_filled_in = []\n for episode_index, (pred_classes, pred_coords, class_ids) in enumerate(zip(meta_outputs_classes, meta_outputs_coords, meta_support_class_ids)):\n for class_index, class_id in enumerate(class_ids):\n # During inference, we need to ignore the classes that already have predictions\n # During training, the same category might appear over different episodes, so no need to filter\n if self.training or (class_id.item() not in class_ids_already_filled_in):\n class_ids_already_filled_in.append(class_id.item())\n final_meta_outputs_classes[:, :, episode_index, :, class_id] = pred_classes[:, :, :, class_index]\n final_meta_outputs_coords[:, :, episode_index, :, :] = pred_coords[:, :, :, :]\n # Pretend we have a batchsize of (batchsize x num_support), and produce final predictions\n final_meta_outputs_classes = final_meta_outputs_classes.view(hs.shape[0], batchsize * num_episode, self.num_queries, self.num_classes)\n final_meta_outputs_coords = final_meta_outputs_coords.view(hs.shape[0], batchsize * num_episode, self.num_queries, 4)\n\n out = dict()\n\n out['pred_logits'] = final_meta_outputs_classes[-1]\n out['pred_boxes'] = final_meta_outputs_coords[-1]\n out['activated_class_ids'] = torch.stack(meta_support_class_ids).unsqueeze(0).expand(batchsize, -1, -1).reshape(batchsize * num_episode, -1)\n out['meta_targets'] = meta_targets # Add meta_targets into outputs for optimization\n\n out['batchsize'] = batchsize\n out['num_episode'] = num_episode\n out['num_queries'] = self.num_queries\n out['num_classes'] = self.num_classes\n\n if self.args.category_codes_cls_loss:\n if self.num_feature_levels == 1:\n # out['category_codes_cls_logits'] = self.category_codes_cls(category_codes)\n # out['category_codes_cls_targets'] = supp_class_ids\n # TODO: category_codes_cls_loss @ every encoder layer! THIS IS ONLY TRIAL!\n #out['category_codes_cls_logits'] = self.category_codes_cls(torch.cat(category_codes, dim=0))\n #out['category_codes_cls_targets'] = supp_class_ids.repeat(self.args.dec_layers)\n\n out['category_codes_cls_logits'] = self.category_codes_cls(category_codes[0])\n out['category_codes_cls_targets'] = supp_class_ids\n elif self.num_feature_levels == 4:\n raise NotImplementedError\n else:\n raise NotImplementedError\n\n if self.aux_loss:\n out['aux_outputs'] = self._set_aux_loss(final_meta_outputs_classes, final_meta_outputs_coords)\n for aux_output in out['aux_outputs']:\n aux_output['activated_class_ids'] = torch.stack(meta_support_class_ids).unsqueeze(0).expand(batchsize, -1, -1).reshape(batchsize * num_episode, -1)\n return out\n\n def compute_category_codes(self, supp_samples, supp_targets):\n num_supp = supp_samples.tensors.shape[0]\n\n if self.num_feature_levels == 1:\n features, pos = self.backbone.forward_supp_branch(supp_samples, return_interm_layers=False)\n srcs = []\n masks = []\n for l, feat in enumerate(features):\n src, mask = feat.decompose()\n srcs.append(self.input_proj[l](src))\n masks.append(mask)\n assert mask is not None\n\n boxes = [box_ops.box_cxcywh_to_xyxy(t['boxes']) for t in supp_targets]\n # and from relative [0, 1] to absolute [0, height] coordinates\n img_sizes = torch.stack([t[\"size\"] for t in supp_targets], dim=0)\n img_h, img_w = img_sizes.unbind(1)\n scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)\n for b in range(num_supp):\n boxes[b] *= scale_fct[b]\n\n query_embeds = self.query_embed.to(src.device)\n\n tsp = self.task_positional_encoding(torch.zeros(self.args.episode_size, self.hidden_dim, device=src.device)).unsqueeze(0).expand(num_supp, -1, -1)\n\n category_codes_list = list()\n\n for i in range(num_supp // self.args.episode_size):\n category_codes_list.append(\n self.transformer.forward_supp_branch([srcs[0][i*self.args.episode_size: (i+1)*self.args.episode_size]],\n [masks[0][i*self.args.episode_size: (i+1)*self.args.episode_size]],\n [pos[0][i*self.args.episode_size: (i+1)*self.args.episode_size]],\n query_embeds,\n tsp[i*self.args.episode_size: (i+1)*self.args.episode_size],\n boxes[i*self.args.episode_size: (i+1)*self.args.episode_size])\n )\n\n final_category_codes_list = []\n for i in range(self.args.enc_layers):\n final_category_codes_list.append(\n torch.cat([ccl[i] for ccl in category_codes_list], dim=0)\n )\n\n return final_category_codes_list\n\n elif self.num_feature_levels == 4:\n raise NotImplementedError\n else:\n raise NotImplementedError\n\n @torch.jit.unused\n def _set_aux_loss(self, outputs_class, outputs_coord):\n # this is a workaround to make torchscript happy, as torchscript\n # doesn't support dictionary with non-homogeneous values, such\n # as a dict having both a Tensor and a list.\n return [{'pred_logits': a, 'pred_boxes': b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])]\n\n\ndef sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):\n \"\"\"\n Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.\n Args:\n inputs: A float tensor of arbitrary shape.\n The predictions for each example.\n targets: A float tensor with the same shape as inputs. Stores the binary\n classification label for each element in inputs\n (0 for the negative class and 1 for the positive class).\n alpha: (optional) Weighting factor in range (0,1) to balance\n positive vs negative examples. Default = -1 (no weighting).\n gamma: Exponent of the modulating factor (1 - p_t) to\n balance easy vs hard examples.\n Returns:\n Loss tensor\n \"\"\"\n prob = inputs.sigmoid()\n ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction=\"none\")\n p_t = prob * targets + (1 - prob) * (1 - targets)\n loss = ce_loss * ((1 - p_t) ** gamma)\n\n if alpha >= 0:\n alpha_t = alpha * targets + (1 - alpha) * (1 - targets)\n loss = alpha_t * loss\n\n return loss.mean(1).sum() / num_boxes\n\n\nclass SetCriterion(nn.Module):\n \"\"\" This class computes the loss for Meta-DETR.\n The process happens in two steps:\n 1) we compute hungarian assignment between ground truth boxes and the outputs of the model\n 2) we supervise each pair of matched ground-truth / prediction (supervise class and box)\n \"\"\"\n def __init__(self, args, num_classes, matcher, weight_dict, losses, focal_alpha=0.25):\n \"\"\" Create the criterion.\n Parameters:\n num_classes: number of object categories, omitting the special no-object category\n matcher: module able to compute a matching between targets and proposals\n weight_dict: dict containing as key the names of the losses and as values their relative weight.\n losses: list of all the losses to be applied. See get_loss for list of available losses.\n focal_alpha: alpha in Focal Loss\n \"\"\"\n super().__init__()\n self.args = args\n self.num_classes = num_classes\n self.matcher = matcher\n self.weight_dict = weight_dict\n self.losses = losses\n self.focal_alpha = focal_alpha\n\n def loss_labels(self, outputs, targets, indices, num_boxes, log=True):\n \"\"\"Classification loss (NLL)\n targets dicts must contain the key \"labels\" containing a tensor of dim [nb_target_boxes]\n \"\"\"\n assert 'pred_logits' in outputs\n src_logits = outputs['pred_logits']\n idx = self._get_src_permutation_idx(indices)\n target_classes_o = torch.cat([t[\"labels\"][J] for t, (_, J) in zip(targets, indices)])\n target_classes = torch.full(src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device)\n target_classes[idx] = target_classes_o\n\n target_classes_onehot = torch.zeros([src_logits.shape[0], src_logits.shape[1], src_logits.shape[2] + 1],\n dtype=src_logits.dtype, layout=src_logits.layout, device=src_logits.device)\n target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1)\n\n target_classes_onehot = target_classes_onehot[:, :, :-1]\n\n # ################### Only Produce Loss for Activated Categories ###################\n activated_class_ids = outputs['activated_class_ids'] # (bs, num_support)\n activated_class_ids = activated_class_ids.unsqueeze(1).repeat(1, target_classes_onehot.shape[1], 1)\n loss_ce = sigmoid_focal_loss(src_logits.gather(2, activated_class_ids),\n target_classes_onehot.gather(2, activated_class_ids),\n num_boxes,\n alpha=self.focal_alpha,\n gamma=2)\n\n loss_ce = loss_ce * src_logits.shape[1]\n\n losses = {'loss_ce': loss_ce}\n\n if log:\n losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0]\n\n return losses\n\n @torch.no_grad()\n def loss_cardinality(self, outputs, targets, indices, num_boxes):\n \"\"\" Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes\n This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients\n \"\"\"\n pred_logits = outputs['pred_logits']\n device = pred_logits.device\n tgt_lengths = torch.as_tensor([len(v[\"labels\"]) for v in targets], device=device)\n # Count the number of predictions that are NOT \"no-object\" (which is the last class)\n card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1)\n card_err = F.l1_loss(card_pred.float(), tgt_lengths.float())\n losses = {'cardinality_error': card_err}\n return losses\n\n def loss_boxes(self, outputs, targets, indices, num_boxes):\n \"\"\"Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss\n targets dicts must contain the key \"boxes\" containing a tensor of dim [nb_target_boxes, 4]\n The target boxes are expected in format (center_x, center_y, h, w), normalized by the image size.\n \"\"\"\n assert 'pred_boxes' in outputs\n idx = self._get_src_permutation_idx(indices)\n src_boxes = outputs['pred_boxes'][idx]\n target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0)\n\n loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none')\n\n losses = dict()\n losses['loss_bbox'] = loss_bbox.sum() / num_boxes\n\n loss_giou = 1 - torch.diag(\n box_ops.generalized_box_iou(\n box_ops.box_cxcywh_to_xyxy(src_boxes),\n box_ops.box_cxcywh_to_xyxy(target_boxes)\n )\n )\n losses['loss_giou'] = loss_giou.sum() / num_boxes\n\n return losses\n\n def loss_category_codes_cls(self, outputs, targets, indices, num_boxes):\n logits = outputs['category_codes_cls_logits']\n targets = outputs['category_codes_cls_targets']\n losses = {\n \"loss_category_codes_cls\": F.cross_entropy(logits, targets)\n }\n return losses\n\n def _get_src_permutation_idx(self, indices):\n # permute predictions following indices\n batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])\n src_idx = torch.cat([src for (src, _) in indices])\n return batch_idx, src_idx\n\n def _get_tgt_permutation_idx(self, indices):\n # permute targets following indices\n batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])\n tgt_idx = torch.cat([tgt for (_, tgt) in indices])\n return batch_idx, tgt_idx\n\n def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs):\n loss_map = {\n 'labels': self.loss_labels,\n 'cardinality': self.loss_cardinality,\n 'boxes': self.loss_boxes,\n 'category_codes_cls': self.loss_category_codes_cls,\n }\n assert loss in loss_map, f'do you really want to compute {loss} loss?'\n return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs)\n\n def forward(self, outputs):\n \"\"\" This performs the loss computation.\n Parameters:\n outputs: dict of tensors, see the output specification of the model for the format\n \"\"\"\n # Since we are doing meta-learning over our constructed meta-tasks, the targets for these meta-tasks are\n # stored in outputs['meta_targets']. We dont use original targets.\n targets = outputs['meta_targets']\n\n outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs' and k != 'enc_outputs'}\n\n # Retrieve the matching between the outputs of the last layer and the targets\n indices = self.matcher(outputs_without_aux, targets)\n\n # Compute the average number of target boxes accross all nodes, for normalization purposes\n num_boxes = sum(len(t[\"labels\"]) for t in targets)\n num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)\n\n if is_dist_avail_and_initialized():\n torch.distributed.all_reduce(num_boxes)\n num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()\n\n # Compute all the requested losses\n losses = {}\n for loss in self.losses:\n kwargs = {}\n losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes, **kwargs))\n\n # In case of auxiliary losses, we repeat this process with the output of each intermediate layer.\n if 'aux_outputs' in outputs:\n for i, aux_outputs in enumerate(outputs['aux_outputs']):\n indices = self.matcher(aux_outputs, targets)\n for loss in self.losses:\n if loss == 'category_codes_cls':\n # meta-attention cls loss not for aux_outputs\n continue\n kwargs = {}\n if loss == 'labels':\n # Logging is enabled only for the last layer\n kwargs['log'] = False\n l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs)\n l_dict = {k + f'_{i}': v for k, v in l_dict.items()}\n losses.update(l_dict)\n\n return losses\n\n\nclass PostProcess(nn.Module):\n \"\"\" This module converts the model's output into the format expected by the coco api\"\"\"\n @torch.no_grad()\n def forward(self, outputs, target_sizes):\n \"\"\" Perform the computation\n Parameters:\n outputs: raw outputs of the model\n target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch\n For evaluation, this must be the original image size (before any data augmentation)\n For visualization, this should be the image size after data augment, but before padding\n \"\"\"\n out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes']\n\n batchsize = outputs['batchsize']\n num_episode = outputs['num_episode']\n num_queries = outputs['num_queries']\n num_classes = outputs['num_classes']\n\n out_logits = out_logits.view(batchsize, num_episode * num_queries, num_classes)\n out_bbox = out_bbox.view(batchsize, num_episode * num_queries, 4)\n\n assert len(out_logits) == len(target_sizes)\n assert target_sizes.shape[1] == 2\n\n prob = out_logits.sigmoid()\n topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1)\n scores = topk_values\n topk_boxes = topk_indexes // out_logits.shape[2]\n labels = topk_indexes % out_logits.shape[2]\n boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)\n boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))\n\n # and from relative [0, 1] to absolute [0, height] coordinates\n img_h, img_w = target_sizes.unbind(1)\n scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)\n boxes = boxes * scale_fct[:, None, :]\n\n results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)]\n\n return results\n\n\nclass MLP(nn.Module):\n \"\"\" Very simple multi-layer perceptron (also called FFN)\"\"\"\n def __init__(self, input_dim, hidden_dim, output_dim, num_layers):\n super().__init__()\n self.num_layers = num_layers\n h = [hidden_dim] * (num_layers - 1)\n self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))\n\n def forward(self, x):\n for i, layer in enumerate(self.layers):\n x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n return x\n\n\ndef build(args):\n\n if args.dataset_file in ['coco', 'coco_base']:\n num_classes = 91\n elif args.dataset_file in ['voc', 'voc_base1', 'voc_base2', 'voc_base3']:\n num_classes = 21\n else:\n raise ValueError('Unknown args.dataset_file!')\n\n device = torch.device(args.device)\n\n backbone = build_backbone(args)\n transformer = build_deforamble_transformer(args)\n model = MetaDETR(\n args,\n backbone,\n transformer,\n num_classes=num_classes,\n num_queries=args.num_queries,\n num_feature_levels=args.num_feature_levels,\n aux_loss=args.aux_loss,\n with_box_refine=args.with_box_refine,\n )\n\n matcher = build_matcher(args)\n\n weight_dict = dict()\n weight_dict['loss_ce'] = args.cls_loss_coef\n weight_dict['loss_bbox'] = args.bbox_loss_coef\n weight_dict['loss_giou'] = args.giou_loss_coef\n\n if args.aux_loss:\n aux_weight_dict = {}\n for i in range(args.dec_layers - 1):\n aux_weight_dict.update({k + f'_{i}': v for k, v in weight_dict.items()})\n aux_weight_dict.update({k + f'_enc': v for k, v in weight_dict.items()})\n weight_dict.update(aux_weight_dict)\n\n if args.category_codes_cls_loss:\n weight_dict[\"loss_category_codes_cls\"] = args.category_codes_cls_loss_coef\n\n losses = ['labels', 'boxes', 'cardinality']\n\n if args.category_codes_cls_loss:\n losses += [\"category_codes_cls\"]\n\n criterion = SetCriterion(args, num_classes, matcher, weight_dict, losses, focal_alpha=args.focal_alpha)\n criterion.to(device)\n\n postprocessors = {'bbox': PostProcess()}\n\n return model, criterion, postprocessors\n\n"
] | [
[
"torch.nn.functional.l1_loss",
"torch.cat",
"torch.zeros",
"torch.no_grad",
"torch.full_like",
"torch.device",
"torch.nn.utils.weight_norm.WeightNorm.apply",
"torch.norm",
"torch.ones",
"torch.nn.GroupNorm",
"torch.full",
"torch.nn.init.constant_",
"torch.nn.functional.binary_cross_entropy_with_logits",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.stack",
"torch.nn.functional.cross_entropy",
"torch.nn.init.xavier_uniform_",
"torch.distributed.all_reduce"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ZhenyueQin/PANet | [
"a186d7ca06533a666299c1c07c632f44e4e34d48",
"a186d7ca06533a666299c1c07c632f44e4e34d48"
] | [
"lib/datasets/voc_dataset_evaluator.py",
"lib/modeling/collect_and_distribute_fpn_rpn_proposals.py"
] | [
"# Copyright (c) 2017-present, Facebook, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##############################################################################\n\n\"\"\"PASCAL VOC dataset evaluation interface.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\nimport numpy as np\nimport os\nimport shutil\nimport uuid\n\nfrom lib.core.config import cfg\nfrom lib.datasets.dataset_catalog import DATASETS\nfrom lib.datasets.dataset_catalog import DEVKIT_DIR\nfrom lib.datasets.voc_eval import voc_eval\nfrom lib.utils.io import save_object\n\nlogger = logging.getLogger(__name__)\n\n\ndef evaluate_boxes(\n json_dataset,\n all_boxes,\n output_dir,\n use_salt=True,\n cleanup=True,\n use_matlab=False\n):\n salt = '_{}'.format(str(uuid.uuid4())) if use_salt else ''\n filenames = _write_voc_results_files(json_dataset, all_boxes, salt)\n _do_python_eval(json_dataset, salt, output_dir)\n if use_matlab:\n _do_matlab_eval(json_dataset, salt, output_dir)\n if cleanup:\n for filename in filenames:\n shutil.copy(filename, output_dir)\n os.remove(filename)\n return None\n\n\ndef _write_voc_results_files(json_dataset, all_boxes, salt):\n filenames = []\n image_set_path = voc_info(json_dataset)['image_set_path']\n assert os.path.exists(image_set_path), \\\n 'Image set path does not exist: {}'.format(image_set_path)\n with open(image_set_path, 'r') as f:\n image_index = [x.strip() for x in f.readlines()]\n # Sanity check that order of images in json dataset matches order in the\n # image set\n roidb = json_dataset.get_roidb()\n for i, entry in enumerate(roidb):\n index = os.path.splitext(os.path.split(entry['image'])[1])[0]\n assert index == image_index[i]\n for cls_ind, cls in enumerate(json_dataset.classes):\n if cls == '__background__':\n continue\n logger.info('Writing VOC results for: {}'.format(cls))\n filename = _get_voc_results_file_template(json_dataset,\n salt).format(cls)\n filenames.append(filename)\n assert len(all_boxes[cls_ind]) == len(image_index)\n with open(filename, 'wt') as f:\n for im_ind, index in enumerate(image_index):\n dets = all_boxes[cls_ind][im_ind]\n if type(dets) == list:\n assert len(dets) == 0, \\\n 'dets should be numpy.ndarray or empty list'\n continue\n # the VOCdevkit expects 1-based indices\n for k in range(dets.shape[0]):\n f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\\n'.\n format(index, dets[k, -1],\n dets[k, 0] + 1, dets[k, 1] + 1,\n dets[k, 2] + 1, dets[k, 3] + 1))\n return filenames\n\n\ndef _get_voc_results_file_template(json_dataset, salt):\n info = voc_info(json_dataset)\n year = info['year']\n image_set = info['image_set']\n devkit_path = info['devkit_path']\n # VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt\n filename = 'comp4' + salt + '_det_' + image_set + '_{:s}.txt'\n return os.path.join(devkit_path, 'results', 'VOC' + year, 'Main', filename)\n\n\ndef _do_python_eval(json_dataset, salt, output_dir='output'):\n info = voc_info(json_dataset)\n year = info['year']\n anno_path = info['anno_path']\n image_set_path = info['image_set_path']\n devkit_path = info['devkit_path']\n cachedir = os.path.join(devkit_path, 'annotations_cache')\n aps = []\n # The PASCAL VOC metric changed in 2010\n use_07_metric = True if int(year) < 2010 else False\n logger.info('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))\n if not os.path.isdir(output_dir):\n os.mkdir(output_dir)\n for _, cls in enumerate(json_dataset.classes):\n if cls == '__background__':\n continue\n filename = _get_voc_results_file_template(\n json_dataset, salt).format(cls)\n rec, prec, ap = voc_eval(\n filename, anno_path, image_set_path, cls, cachedir, ovthresh=0.5,\n use_07_metric=use_07_metric)\n aps += [ap]\n logger.info('AP for {} = {:.4f}'.format(cls, ap))\n res_file = os.path.join(output_dir, cls + '_pr.pkl')\n save_object({'rec': rec, 'prec': prec, 'ap': ap}, res_file)\n logger.info('Mean AP = {:.4f}'.format(np.mean(aps)))\n logger.info('~~~~~~~~')\n logger.info('Results:')\n for ap in aps:\n logger.info('{:.3f}'.format(ap))\n logger.info('{:.3f}'.format(np.mean(aps)))\n logger.info('~~~~~~~~')\n logger.info('')\n logger.info('----------------------------------------------------------')\n logger.info('Results computed with the **unofficial** Python eval code.')\n logger.info('Results should be very close to the official MATLAB code.')\n logger.info('Use `./tools/reval.py --matlab ...` for your paper.')\n logger.info('-- Thanks, The Management')\n logger.info('----------------------------------------------------------')\n\n\ndef _do_matlab_eval(json_dataset, salt, output_dir='output'):\n import subprocess\n logger.info('-----------------------------------------------------')\n logger.info('Computing results with the official MATLAB eval code.')\n logger.info('-----------------------------------------------------')\n info = voc_info(json_dataset)\n path = os.path.join(\n cfg.ROOT_DIR, 'lib', 'datasets', 'VOCdevkit-matlab-wrapper')\n cmd = 'cd {} && '.format(path)\n cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB)\n cmd += '-r \"dbstop if error; '\n cmd += 'voc_eval(\\'{:s}\\',\\'{:s}\\',\\'{:s}\\',\\'{:s}\\'); quit;\"' \\\n .format(info['devkit_path'], 'comp4' + salt, info['image_set'],\n output_dir)\n logger.info('Running:\\n{}'.format(cmd))\n subprocess.call(cmd, shell=True)\n\n\ndef voc_info(json_dataset):\n year = json_dataset.name[4:8]\n image_set = json_dataset.name[9:]\n devkit_path = DATASETS[json_dataset.name][DEVKIT_DIR]\n assert os.path.exists(devkit_path), \\\n 'Devkit directory {} not found'.format(devkit_path)\n anno_path = os.path.join(\n devkit_path, 'VOC' + year, 'Annotations', '{:s}.xml')\n image_set_path = os.path.join(\n devkit_path, 'VOC' + year, 'ImageSets', 'Main', image_set + '.txt')\n return dict(\n year=year,\n image_set=image_set,\n devkit_path=devkit_path,\n anno_path=anno_path,\n image_set_path=image_set_path)\n",
"import numpy as np\nfrom torch import nn\n\nfrom lib.core.config import cfg\nfrom lib.datasets import json_dataset\nimport lib.roi_data.fast_rcnn\nfrom lib.nn.parallel import utils as fpn_utils\n\n\nclass CollectAndDistributeFpnRpnProposalsOp(nn.Module):\n \"\"\"Merge RPN proposals generated at multiple FPN levels and then\n distribute those proposals to their appropriate FPN levels. An anchor\n at one FPN level may predict an RoI that will map to another level,\n hence the need to redistribute the proposals.\n\n This function assumes standard blob names for input and output blobs.\n\n Input blobs: [rpn_rois_fpn<min>, ..., rpn_rois_fpn<max>,\n rpn_roi_probs_fpn<min>, ..., rpn_roi_probs_fpn<max>]\n - rpn_rois_fpn<i> are the RPN proposals for FPN level i; see rpn_rois\n documentation from GenerateProposals.\n - rpn_roi_probs_fpn<i> are the RPN objectness probabilities for FPN\n level i; see rpn_roi_probs documentation from GenerateProposals.\n\n If used during training, then the input blobs will also include:\n [roidb, im_info] (see GenerateProposalLabels).\n\n Output blobs: [rois_fpn<min>, ..., rois_rpn<max>, rois,\n rois_idx_restore]\n - rois_fpn<i> are the RPN proposals for FPN level i\n - rois_idx_restore is a permutation on the concatenation of all\n rois_fpn<i>, i=min...max, such that when applied the RPN RoIs are\n restored to their original order in the input blobs.\n\n If used during training, then the output blobs will also include:\n [labels, bbox_targets, bbox_inside_weights, bbox_outside_weights].\n \"\"\"\n def __init__(self):\n super().__init__()\n\n def forward(self, inputs, roidb, im_info):\n \"\"\"\n Args:\n inputs: a list of [rpn_rois_fpn2, ..., rpn_rois_fpn6,\n rpn_roi_probs_fpn2, ..., rpn_roi_probs_fpn6]\n im_info: [[im_height, im_width, im_scale], ...]\n \"\"\"\n rois = collect(inputs, self.training)\n if self.training:\n # During training we reuse the data loader code. We populate roidb\n # entries on the fly using the rois generated by RPN.\n im_scales = im_info.data.numpy()[:, 2]\n # For historical consistency with the original Faster R-CNN\n # implementation we are *not* filtering crowd proposals.\n # This choice should be investigated in the future (it likely does\n # not matter).\n json_dataset.add_proposals(roidb, rois, im_scales, crowd_thresh=0)\n # Compute training labels for the RPN proposals; also handles\n # distributing the proposals over FPN levels\n output_blob_names = roi_data.fast_rcnn.get_fast_rcnn_blob_names()\n blobs = {k: [] for k in output_blob_names}\n roi_data.fast_rcnn.add_fast_rcnn_blobs(blobs, im_scales, roidb)\n else:\n # For inference we have a special code path that avoids some data\n # loader overhead\n blobs = distribute(rois, None)\n\n return blobs\n\n\ndef collect(inputs, is_training):\n cfg_key = 'TRAIN' if is_training else 'TEST'\n post_nms_topN = int(cfg[cfg_key].RPN_POST_NMS_TOP_N * cfg.FPN.RPN_COLLECT_SCALE + 0.5)\n k_max = cfg.FPN.RPN_MAX_LEVEL\n k_min = cfg.FPN.RPN_MIN_LEVEL\n num_lvls = k_max - k_min + 1\n roi_inputs = inputs[:num_lvls]\n score_inputs = inputs[num_lvls:]\n\n # rois are in [[batch_idx, x0, y0, x1, y2], ...] format\n # Combine predictions across all levels and retain the top scoring\n rois = np.concatenate(roi_inputs)\n scores = np.concatenate(score_inputs).squeeze()\n inds = np.argsort(-scores)[:post_nms_topN]\n rois = rois[inds, :]\n return rois\n\n\ndef distribute(rois, label_blobs):\n \"\"\"To understand the output blob order see return value of\n roi_data.fast_rcnn.get_fast_rcnn_blob_names(is_training=False)\n \"\"\"\n lvl_min = cfg.FPN.ROI_MIN_LEVEL\n lvl_max = cfg.FPN.ROI_MAX_LEVEL\n lvls = fpn_utils.map_rois_to_fpn_levels(rois[:, 1:5], lvl_min, lvl_max)\n\n # Delete roi entries that have negative area\n # idx_neg = np.where(lvls == -1)[0]\n # rois = np.delete(rois, idx_neg, axis=0)\n # lvls = np.delete(lvls, idx_neg, axis=0)\n\n output_blob_names = roi_data.fast_rcnn.get_fast_rcnn_blob_names(is_training=False)\n outputs = [None] * len(output_blob_names)\n outputs[0] = rois\n\n # Create new roi blobs for each FPN level\n # (See: utils.fpn.add_multilevel_roi_blobs which is similar but annoying\n # to generalize to support this particular case.)\n rois_idx_order = np.empty((0, ))\n for output_idx, lvl in enumerate(range(lvl_min, lvl_max + 1)):\n idx_lvl = np.where(lvls == lvl)[0]\n blob_roi_level = rois[idx_lvl, :]\n outputs[output_idx + 1] = blob_roi_level\n rois_idx_order = np.concatenate((rois_idx_order, idx_lvl))\n rois_idx_restore = np.argsort(rois_idx_order)\n outputs[-1] = rois_idx_restore.astype(np.int32)\n\n return dict(zip(output_blob_names, outputs))\n"
] | [
[
"numpy.mean"
],
[
"numpy.concatenate",
"numpy.argsort",
"numpy.where",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
henninglebbaeus/simpletransformers | [
"99ede240385fb7999abd46f7cbd708d8951e945f"
] | [
"simpletransformers/classification/multi_modal_classification_model.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n\nfrom __future__ import absolute_import, division, print_function\n\nimport json\nimport logging\nimport math\nimport os\nimport random\nimport warnings\nfrom multiprocessing import cpu_count\nfrom dataclasses import asdict\n\nimport numpy as np\nfrom scipy.stats import mode, pearsonr\nfrom sklearn.metrics import (\n confusion_matrix,\n label_ranking_average_precision_score,\n matthews_corrcoef,\n mean_squared_error,\n)\nfrom tqdm.auto import tqdm, trange\n\nimport pandas as pd\nimport torch\nfrom simpletransformers.classification.classification_utils import (\n ImageEncoder,\n InputExample,\n JsonlDataset,\n collate_fn,\n convert_examples_to_features,\n get_image_transforms,\n)\nfrom simpletransformers.classification.transformer_models.mmbt_model import MMBTForClassification\nfrom simpletransformers.config.model_args import MultiModalClassificationArgs\nfrom simpletransformers.config.global_args import global_args\nfrom tensorboardX import SummaryWriter\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\nfrom torch.utils.data.distributed import DistributedSampler\nfrom transformers import (\n WEIGHTS_NAME,\n AdamW,\n BertConfig,\n BertModel,\n BertTokenizer,\n get_linear_schedule_with_warmup,\n BERT_PRETRAINED_MODEL_ARCHIVE_LIST,\n)\nfrom transformers.configuration_mmbt import MMBTConfig\n\ntry:\n import wandb\n\n wandb_available = True\nexcept ImportError:\n wandb_available = False\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass MultiModalClassificationModel:\n def __init__(\n self,\n model_type,\n model_name,\n multi_label=False,\n label_list=None,\n num_labels=None,\n pos_weight=None,\n args=None,\n use_cuda=True,\n cuda_device=-1,\n **kwargs,\n ):\n\n \"\"\"\n Initializes a MultiModalClassificationModel model.\n\n Args:\n model_type: The type of model (bert, xlnet, xlm, roberta, distilbert, albert)\n model_name: Default Transformer model name or path to a directory containing Transformer model file (pytorch_nodel.bin).\n multi_label (optional): Set to True for multi label tasks.\n label_list (optional) : A list of all the labels (str) in the dataset.\n num_labels (optional): The number of labels or classes in the dataset.\n pos_weight (optional): A list of length num_labels containing the weights to assign to each label for loss calculation.\n args (optional): Default args will be used if this parameter is not provided. If provided, it should be a dict containing the args that should be changed in the default args.\n use_cuda (optional): Use GPU if available. Setting to False will force model to use CPU only.\n cuda_device (optional): Specific GPU that should be used. Will use the first available GPU by default.\n **kwargs (optional): For providing proxies, force_download, resume_download, cache_dir and other options specific to the 'from_pretrained' implementation where this will be supplied.\n \"\"\" # noqa: ignore flake8\"\n\n MODEL_CLASSES = {\n \"bert\": (BertConfig, BertModel, BertTokenizer),\n }\n\n self.args = self._load_model_args(model_name)\n\n if isinstance(args, dict):\n self.args.update_from_dict(args)\n elif isinstance(args, MultiModalClassificationArgs):\n self.args = args\n\n if \"sweep_config\" in kwargs:\n sweep_config = kwargs.pop(\"sweep_config\")\n sweep_values = {key: value[\"value\"] for key, value in sweep_config.as_dict().items() if key != \"_wandb\"}\n self.args.update_from_dict(sweep_values)\n\n if self.args.manual_seed:\n random.seed(self.args.manual_seed)\n np.random.seed(self.args.manual_seed)\n torch.manual_seed(self.args.manual_seed)\n if self.args.n_gpu > 0:\n torch.cuda.manual_seed_all(self.args.manual_seed)\n\n config_class, model_class, tokenizer_class = MODEL_CLASSES[model_type]\n\n self.label_list = label_list\n if self.label_list and not num_labels:\n num_labels = len(self.label_list)\n elif self.label_list and num_labels:\n if len(self.label_list) != num_labels:\n raise ValueError(f\"Mismatch in num_labels ({num_labels}) and length of label_list ({len(label_list)})\")\n\n if num_labels and not self.label_list:\n self.label_list = [str(i) for i in range(num_labels)]\n\n if num_labels:\n self.transformer_config = config_class.from_pretrained(model_name, num_labels=num_labels, **kwargs)\n self.num_labels = num_labels\n else:\n self.transformer_config = config_class.from_pretrained(model_name, **kwargs)\n self.num_labels = self.transformer_config.num_labels\n\n self.multi_label = multi_label\n\n if use_cuda:\n if torch.cuda.is_available():\n if cuda_device == -1:\n self.device = torch.device(\"cuda\")\n else:\n self.device = torch.device(f\"cuda:{cuda_device}\")\n else:\n raise ValueError(\n \"'use_cuda' set to True when cuda is unavailable.\"\n \" Make sure CUDA is available or set use_cuda=False.\"\n )\n else:\n self.device = \"cpu\"\n\n self.transformer = model_class.from_pretrained(model_name, config=self.transformer_config, **kwargs)\n self.config = MMBTConfig(self.transformer_config, num_labels=self.num_labels)\n self.results = {}\n\n self.img_encoder = ImageEncoder(self.args)\n self.model = MMBTForClassification(self.config, self.transformer, self.img_encoder)\n\n if model_name not in BERT_PRETRAINED_MODEL_ARCHIVE_LIST:\n try:\n self.model.load_state_dict(torch.load(os.path.join(model_name, \"pytorch_model.bin\")))\n except EnvironmentError:\n msg = (\n \"Model name '{}' was not found in model name list ({}). \"\n \"We assumed '{}' was a path or url to model weight files named one of {} but \"\n \"couldn't find any such file at this path or url.\".format(\n model_name, \", \".join(BERT_PRETRAINED_MODEL_ARCHIVE_LIST), model_name, \"pytorch_model.bin\",\n )\n )\n raise EnvironmentError(msg)\n\n self.tokenizer = tokenizer_class.from_pretrained(model_name, do_lower_case=self.args.do_lower_case, **kwargs)\n\n self.args.model_name = model_name\n self.args.model_type = model_type\n\n if model_type in [\"camembert\", \"xlmroberta\"]:\n warnings.warn(\n f\"use_multiprocessing automatically disabled as {model_type}\"\n \" fails when using multiprocessing for feature conversion.\"\n )\n self.args.use_multiprocessing = False\n\n self.args.model_name = model_name\n self.args.model_type = model_type\n\n if model_type in [\"camembert\", \"xlmroberta\"]:\n warnings.warn(\n f\"use_multiprocessing automatically disabled as {model_type}\"\n \" fails when using multiprocessing for feature conversion.\"\n )\n self.args.use_multiprocessing = False\n\n if self.args.wandb_project and not wandb_available:\n warnings.warn(\"wandb_project specified but wandb is not available. Wandb disabled.\")\n self.args.wandb_project = None\n\n if multi_label:\n self.criterion = torch.nn.BCEWithLogitsLoss(pos_weight=pos_weight)\n elif self.num_labels == 1:\n self.criterion = torch.nn.MSELoss()\n else:\n self.criterion = torch.nn.CrossEntropyLoss()\n\n def train_model(\n self,\n train_data,\n files_list=None,\n image_path=None,\n text_label=None,\n labels_label=None,\n images_label=None,\n image_type_extension=None,\n data_type_extension=None,\n auto_weights=False,\n output_dir=None,\n show_running_loss=True,\n args=None,\n eval_data=None,\n verbose=True,\n **kwargs,\n ):\n \"\"\"\n Trains the model using 'train_df'\n\n Args:\n data: Path to data directory containing text files (JSON) and image files OR a Pandas DataFrame.\n If a DataFrame is given, it should contain the columns [text, labels, images]. When using a DataFrame,\n image_path MUST be specified. The image column of the DataFrame should contain the relative path from\n image_path to the image.\n E.g:\n For an image file 1.jpeg located in \"data/train/\";\n image_path = \"data/train/\"\n images = \"1.jpeg\"\n files_list (optional): If given, only the files specified in this list will be taken from data directory.\n files_list can be a Python list or the path (str) to a JSON file containing a list of files.\n image_path (optional): Must be specified when using DataFrame as input. Path to the directory containing the\n images.\n text_label (optional): Column name to look for instead of the default \"text\"\n labels_label (optional): Column name to look for instead of the default \"labels\"\n images_label (optional): Column name to look for instead of the default \"images\"\n image_type_extension (optional): If given, this will be added to the end of each value in \"images\".\n data_type_extension (optional): If given, this will be added to the end of each value in \"files_list\".\n auto_weights (optional): If True, weights will be used to balance the classes. Only implemented for multi label tasks currently.\n output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.\n show_running_loss (optional): Set to False to prevent running loss from being printed to console. Defaults to True.\n args (optional): Optional changes to the args dict of the model. Any changes made will persist for the model.\n eval_data (optional): A DataFrame against which evaluation will be performed when evaluate_during_training is enabled. Is required if evaluate_during_training is enabled.\n **kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use). E.g. f1=sklearn.metrics.f1_score.\n A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions.\n\n Returns:\n None\n \"\"\" # noqa: ignore flake8\"\n\n if args:\n self.args.update_from_dict(args)\n\n if text_label:\n self.args.text_label = text_label\n\n if text_label:\n self.args.labels_label = labels_label\n\n if text_label:\n self.args.images_label = images_label\n\n if text_label:\n self.args.image_type_extension = image_type_extension\n\n if text_label:\n self.args.data_type_extension = data_type_extension\n\n if self.args.silent:\n show_running_loss = False\n\n if self.args.evaluate_during_training and eval_data is None:\n raise ValueError(\n \"evaluate_during_training is enabled but eval_df is not specified.\"\n \" Pass eval_df to model.train_model() if using evaluate_during_training.\"\n )\n\n if not output_dir:\n output_dir = self.args.output_dir\n\n if os.path.exists(output_dir) and os.listdir(output_dir) and not self.args.overwrite_output_dir:\n raise ValueError(\n \"Output directory ({}) already exists and is not empty.\"\n \" Set overwrite_output_dir to True to overcome.\".format(output_dir)\n )\n\n self._move_model_to_device()\n\n train_dataset = self.load_and_cache_examples(\n train_data,\n files_list=files_list,\n image_path=image_path,\n text_label=self.args.text_label,\n labels_label=self.args.labels_label,\n images_label=self.args.images_label,\n image_type_extension=self.args.image_type_extension,\n data_type_extension=self.args.data_type_extension,\n verbose=verbose,\n )\n\n if auto_weights:\n if self.multi_label:\n self.criterion = torch.nn.BCEWithLogitsLoss(pos_weight=self.calculate_weights(train_dataset))\n\n os.makedirs(output_dir, exist_ok=True)\n\n global_step, tr_loss = self.train(\n train_dataset,\n output_dir,\n show_running_loss=show_running_loss,\n eval_data=eval_data,\n verbose=verbose,\n **kwargs,\n )\n\n self._save_model(output_dir, model=self.model)\n\n if verbose:\n logger.info(\" Training of {} model complete. Saved to {}.\".format(self.args.model_type, output_dir))\n\n def train(\n self, train_dataset, output_dir, show_running_loss=True, eval_data=None, verbose=True, **kwargs,\n ):\n \"\"\"\n Trains the model on train_dataset.\n\n Utility function to be used by the train_model() method. Not intended to be used directly.\n \"\"\"\n\n device = self.device\n model = self.model\n args = self.args\n multi_label = self.multi_label\n\n tb_writer = SummaryWriter(logdir=args.tensorboard_dir)\n train_sampler = RandomSampler(train_dataset)\n train_dataloader = DataLoader(\n train_dataset,\n sampler=train_sampler,\n batch_size=args.train_batch_size,\n collate_fn=collate_fn,\n num_workers=args.process_count,\n )\n\n t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": args.weight_decay,\n },\n {\n \"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n },\n ]\n\n warmup_steps = math.ceil(t_total * args.warmup_ratio)\n args.warmup_steps = warmup_steps if args.warmup_steps == 0 else args.warmup_steps\n\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n scheduler = get_linear_schedule_with_warmup(\n optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total\n )\n\n if args.fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n\n model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)\n\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n global_step = 0\n tr_loss, logging_loss = 0.0, 0.0\n model.zero_grad()\n train_iterator = trange(int(args.num_train_epochs), desc=\"Epoch\", disable=args.silent)\n epoch_number = 0\n best_eval_metric = None\n early_stopping_counter = 0\n\n if args.evaluate_during_training:\n training_progress_scores = self._create_training_progress_scores(multi_label, **kwargs)\n\n if args.wandb_project:\n wandb.init(project=args.wandb_project, config={**args}, **args.wandb_kwargs)\n wandb.watch(self.model)\n\n model.train()\n for _ in train_iterator:\n train_iterator.set_description(f\"Epoch {epoch_number} of {args.num_train_epochs}\")\n for step, batch in enumerate(\n tqdm(train_dataloader, desc=f\"Running Epoch {epoch_number}\", disable=args.silent)\n ):\n batch = tuple(t.to(device) for t in batch)\n labels = batch[5]\n\n inputs = self._get_inputs_dict(batch)\n outputs = model(**inputs)\n # model outputs are always tuple in pytorch-transformers (see doc)\n logits = outputs[0] # Different from default behaviour\n loss = self.criterion(logits, labels)\n\n if args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n\n current_loss = loss.item()\n\n if show_running_loss:\n print(\"\\rRunning loss: %f\" % loss, end=\"\")\n\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n # torch.nn.utils.clip_grad_norm_(\n # amp.master_params(optimizer), args.max_grad_norm\n # )\n else:\n loss.backward()\n # torch.nn.utils.clip_grad_norm_(\n # model.parameters(), args.max_grad_norm\n # )\n\n tr_loss += loss.item()\n if (step + 1) % args.gradient_accumulation_steps == 0:\n if args.fp16:\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)\n else:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n model.zero_grad()\n global_step += 1\n\n if args.logging_steps > 0 and global_step % args.logging_steps == 0:\n # Log metrics\n tb_writer.add_scalar(\"lr\", scheduler.get_lr()[0], global_step)\n tb_writer.add_scalar(\"loss\", (tr_loss - logging_loss) / args.logging_steps, global_step)\n logging_loss = tr_loss\n if args.wandb_project:\n wandb.log(\n {\n \"Training loss\": current_loss,\n \"lr\": scheduler.get_lr()[0],\n \"global_step\": global_step,\n }\n )\n\n if args.save_steps > 0 and global_step % args.save_steps == 0:\n # Save model checkpoint\n output_dir_current = os.path.join(output_dir, \"checkpoint-{}\".format(global_step))\n\n self._save_model(output_dir_current, model=model)\n\n if args.evaluate_during_training and (\n args.evaluate_during_training_steps > 0\n and global_step % args.evaluate_during_training_steps == 0\n ):\n # Only evaluate when single GPU otherwise metrics may not average well\n results, _ = self.eval_model(\n eval_data,\n verbose=verbose and args.evaluate_during_training_verbose,\n silent=args.evaluate_during_training_silent,\n **kwargs,\n )\n for key, value in results.items():\n tb_writer.add_scalar(\"eval_{}\".format(key), value, global_step)\n\n output_dir_current = os.path.join(output_dir, \"checkpoint-{}\".format(global_step))\n\n if args.save_eval_checkpoints:\n self._save_model(output_dir_current, model=model, results=results)\n\n training_progress_scores[\"global_step\"].append(global_step)\n training_progress_scores[\"train_loss\"].append(current_loss)\n for key in results:\n training_progress_scores[key].append(results[key])\n report = pd.DataFrame(training_progress_scores)\n report.to_csv(\n os.path.join(args.output_dir, \"training_progress_scores.csv\"), index=False,\n )\n\n if args.wandb_project:\n wandb.log(self._get_last_metrics(training_progress_scores))\n\n if not best_eval_metric:\n best_eval_metric = results[args.early_stopping_metric]\n self._save_model(args.best_model_dir, model=model, results=results)\n if best_eval_metric and args.early_stopping_metric_minimize:\n if results[args.early_stopping_metric] - best_eval_metric < args.early_stopping_delta:\n best_eval_metric = results[args.early_stopping_metric]\n self._save_model(args.best_model_dir, model=model, results=results)\n early_stopping_counter = 0\n else:\n if args.use_early_stopping:\n if early_stopping_counter < args.early_stopping_patience:\n early_stopping_counter += 1\n if verbose:\n logger.info(f\" No improvement in {args.early_stopping_metric}\")\n logger.info(f\" Current step: {early_stopping_counter}\")\n logger.info(f\" Early stopping patience: {args.early_stopping_patience}\")\n else:\n if verbose:\n logger.info(f\" Patience of {args.early_stopping_patience} steps reached\")\n logger.info(\" Training terminated.\")\n train_iterator.close()\n return global_step, tr_loss / global_step\n else:\n if results[args.early_stopping_metric] - best_eval_metric > args.early_stopping_delta:\n best_eval_metric = results[args.early_stopping_metric]\n self._save_model(args.best_model_dir, model=model, results=results)\n early_stopping_counter = 0\n else:\n if args.use_early_stopping:\n if early_stopping_counter < args.early_stopping_patience:\n early_stopping_counter += 1\n if verbose:\n logger.info(f\" No improvement in {args.early_stopping_metric}\")\n logger.info(f\" Current step: {early_stopping_counter}\")\n logger.info(f\" Early stopping patience: {args.early_stopping_patience}\")\n else:\n if verbose:\n logger.info(f\" Patience of {args.early_stopping_patience} steps reached\")\n logger.info(\" Training terminated.\")\n train_iterator.close()\n return global_step, tr_loss / global_step\n\n epoch_number += 1\n output_dir_current = os.path.join(output_dir, \"checkpoint-{}-epoch-{}\".format(global_step, epoch_number))\n\n if args.save_model_every_epoch or args.evaluate_during_training:\n os.makedirs(output_dir_current, exist_ok=True)\n\n if args.save_model_every_epoch:\n self._save_model(output_dir_current, model=model)\n\n if args.evaluate_during_training:\n results, _ = self.eval_model(\n eval_data,\n verbose=verbose and args.evaluate_during_training_verbose,\n silent=args.evaluate_during_training_silent,\n **kwargs,\n )\n\n self._save_model(output_dir_current, results=results)\n\n training_progress_scores[\"global_step\"].append(global_step)\n training_progress_scores[\"train_loss\"].append(current_loss)\n for key in results:\n training_progress_scores[key].append(results[key])\n report = pd.DataFrame(training_progress_scores)\n report.to_csv(\n os.path.join(args.output_dir, \"training_progress_scores.csv\"), index=False,\n )\n\n if not best_eval_metric:\n best_eval_metric = results[args.early_stopping_metric]\n self._save_model(args.best_model_dir, model=model, results=results)\n if best_eval_metric and args.early_stopping_metric_minimize:\n if results[args.early_stopping_metric] - best_eval_metric < args.early_stopping_delta:\n best_eval_metric = results[args.early_stopping_metric]\n self._save_model(args.best_model_dir, model=model, results=results)\n early_stopping_counter = 0\n else:\n if args.use_early_stopping and args.early_stopping_consider_epochs:\n if early_stopping_counter < args.early_stopping_patience:\n early_stopping_counter += 1\n if verbose:\n logger.info(f\" No improvement in {args.early_stopping_metric}\")\n logger.info(f\" Current step: {early_stopping_counter}\")\n logger.info(f\" Early stopping patience: {args.early_stopping_patience}\")\n else:\n if verbose:\n logger.info(f\" Patience of {args.early_stopping_patience} steps reached\")\n logger.info(\" Training terminated.\")\n train_iterator.close()\n return global_step, tr_loss / global_step\n else:\n if results[args.early_stopping_metric] - best_eval_metric > args.early_stopping_delta:\n best_eval_metric = results[args.early_stopping_metric]\n self._save_model(args.best_model_dir, model=model, results=results)\n early_stopping_counter = 0\n else:\n if args.use_early_stopping and args.early_stopping_consider_epochs:\n if early_stopping_counter < args.early_stopping_patience:\n early_stopping_counter += 1\n if verbose:\n logger.info(f\" No improvement in {args.early_stopping_metric}\")\n logger.info(f\" Current step: {early_stopping_counter}\")\n logger.info(f\" Early stopping patience: {args.early_stopping_patience}\")\n else:\n if verbose:\n logger.info(f\" Patience of {args.early_stopping_patience} steps reached\")\n logger.info(\" Training terminated.\")\n train_iterator.close()\n return global_step, tr_loss / global_step\n\n return global_step, tr_loss / global_step\n\n def eval_model(\n self,\n data,\n files_list=None,\n image_path=None,\n text_label=None,\n labels_label=None,\n images_label=None,\n image_type_extension=None,\n data_type_extension=None,\n output_dir=None,\n verbose=True,\n silent=False,\n **kwargs,\n ):\n \"\"\"\n Evaluates the model on eval_df. Saves results to output_dir.\n\n Args:\n data: Path to data directory containing text files (JSON) and image files OR a Pandas DataFrame.\n If a DataFrame is given, it should contain the columns [text, labels, images]. When using a DataFrame,\n image_path MUST be specified. The image column of the DataFrame should contain the relative path from\n image_path to the image.\n E.g:\n For an image file 1.jpeg located in \"data/train/\";\n image_path = \"data/train/\"\n images = \"1.jpeg\"\n files_list (optional): If given, only the files specified in this list will be taken from data directory.\n files_list can be a Python list or the path (str) to a JSON file containing a list of files.\n image_path (optional): Must be specified when using DataFrame as input. Path to the directory containing the\n images.\n text_label (optional): Column name to look for instead of the default \"text\"\n labels_label (optional): Column name to look for instead of the default \"labels\"\n images_label (optional): Column name to look for instead of the default \"images\"\n image_type_extension (optional): If given, this will be added to the end of each value in \"images\".\n data_type_extension (optional): If given, this will be added to the end of each value in \"files_list\".\n output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.\n verbose: If verbose, results will be printed to the console on completion of evaluation.\n silent: If silent, tqdm progress bars will be hidden.\n **kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use). E.g. f1=sklearn.metrics.f1_score.\n A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions.\n\n Returns:\n result: Dictionary containing evaluation results. (Matthews correlation coefficient, tp, tn, fp, fn)\n model_outputs: List of model outputs for each row in eval_df\n \"\"\" # noqa: ignore flake8\"\n\n if text_label:\n self.args.text_label = text_label\n\n if text_label:\n self.args.labels_label = labels_label\n\n if text_label:\n self.args.images_label = images_label\n\n if text_label:\n self.args.image_type_extension = image_type_extension\n\n if text_label:\n self.args.data_type_extension = data_type_extension\n\n if not output_dir:\n output_dir = self.args.output_dir\n\n self._move_model_to_device()\n\n # If data is a tuple,\n # this is for early stopping and first element is data_path and second element is files_list\n if isinstance(data, tuple):\n data, files_list = data\n\n eval_dataset = self.load_and_cache_examples(\n data,\n files_list=files_list,\n image_path=image_path,\n text_label=self.args.text_label,\n labels_label=self.args.labels_label,\n images_label=self.args.images_label,\n image_type_extension=self.args.image_type_extension,\n data_type_extension=self.args.data_type_extension,\n verbose=verbose,\n silent=silent,\n )\n os.makedirs(output_dir, exist_ok=True)\n\n result, model_outputs = self.evaluate(eval_dataset, output_dir, verbose=verbose, silent=silent, **kwargs)\n self.results.update(result)\n\n if verbose:\n logger.info(self.results)\n\n return result, model_outputs\n\n def evaluate(\n self, eval_dataset, output_dir, prefix=\"\", verbose=True, silent=False, **kwargs,\n ):\n \"\"\"\n Evaluates the model on eval_df.\n\n Utility function to be used by the eval_model() method. Not intended to be used directly.\n \"\"\"\n\n device = self.device\n model = self.model\n args = self.args\n multi_label = self.multi_label\n eval_output_dir = output_dir\n\n results = {}\n\n eval_sampler = SequentialSampler(eval_dataset)\n eval_dataloader = DataLoader(\n eval_dataset,\n sampler=eval_sampler,\n batch_size=args.eval_batch_size,\n collate_fn=collate_fn,\n num_workers=args.process_count,\n )\n\n eval_loss = 0.0\n nb_eval_steps = 0\n preds = None\n out_label_ids = None\n model.eval()\n\n for batch in tqdm(eval_dataloader, disable=args.silent or silent, desc=\"Running Evaluation\"):\n batch = tuple(t.to(device) for t in batch)\n labels = batch[5]\n with torch.no_grad():\n inputs = self._get_inputs_dict(batch)\n\n outputs = model(**inputs)\n logits = outputs[0] # Different from default behaviour\n tmp_eval_loss = self.criterion(logits, labels)\n\n eval_loss += tmp_eval_loss.mean().item()\n\n nb_eval_steps += 1\n\n if preds is None:\n preds = torch.sigmoid(logits).detach().cpu().numpy()\n out_label_ids = labels.detach().cpu().numpy()\n else:\n preds = np.append(preds, torch.sigmoid(logits).detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(out_label_ids, labels.detach().cpu().numpy(), axis=0)\n\n eval_loss = eval_loss / nb_eval_steps\n model_outputs = preds\n\n if args.regression is True:\n preds = np.squeeze(preds)\n model_outputs = preds\n\n model_outputs = preds\n if multi_label:\n preds = (preds > 0.5).astype(int)\n else:\n preds = np.argmax(preds, axis=1)\n\n result = self.compute_metrics(preds, out_label_ids, **kwargs)\n result[\"eval_loss\"] = eval_loss\n results.update(result)\n\n output_eval_file = os.path.join(eval_output_dir, \"eval_results.txt\")\n with open(output_eval_file, \"w\") as writer:\n for key in sorted(result.keys()):\n writer.write(\"{} = {}\\n\".format(key, str(result[key])))\n\n return results, model_outputs\n\n def load_and_cache_examples(\n self,\n data,\n files_list=None,\n image_path=None,\n text_label=None,\n labels_label=None,\n images_label=None,\n image_type_extension=None,\n data_type_extension=None,\n evaluate=False,\n no_cache=False,\n verbose=True,\n silent=False,\n ):\n \"\"\"\n Converts a list of InputExample objects to a TensorDataset containing InputFeatures. Caches the InputFeatures.\n\n Args:\n data: Path to data directory containing text files (JSON) and image files OR a Pandas DataFrame.\n If a DataFrame is given, it should contain the columns [text, labels, images]. When using a DataFrame,\n image_path MUST be specified. The image column of the DataFrame should contain the relative path from\n image_path to the image.\n E.g:\n For an image file 1.jpeg located in \"data/train/\";\n image_path = \"data/train/\"\n images = \"1.jpeg\"\n files_list (optional): If given, only the files specified in this list will be taken from data directory.\n files_list can be a Python list or the path (str) to a JSON file containing a list of files.\n image_path (optional): Must be specified when using DataFrame as input. Path to the directory containing the\n images.\n text_label (optional): Column name to look for instead of the default \"text\"\n labels_label (optional): Column name to look for instead of the default \"labels\"\n images_label (optional): Column name to look for instead of the default \"images\"\n image_type_extension (optional): If given, this will be added to the end of each value in \"images\".\n data_type_extension (optional): If given, this will be added to the end of each value in \"files_list\".\n\n Utility function for train() and eval() methods. Not intended to be used directly.\n \"\"\" # noqa: ignore flake8\"\n\n tokenizer = self.tokenizer\n args = self.args\n\n if not isinstance(data, str):\n if not image_path:\n raise ValueError(\n \"data is not a str and image_path is not given. image_path must be specified when input is a DF\"\n )\n else:\n data = data.rename(columns={text_label: \"text\", labels_label: \"labels\", images_label: \"images\"})\n\n transforms = get_image_transforms()\n\n if self.label_list:\n labels = self.label_list\n else:\n labels = [str(i) for i in range(self.num_labels)]\n\n dataset = JsonlDataset(\n data,\n tokenizer,\n transforms,\n labels,\n args.max_seq_length - args.num_image_embeds - 2,\n files_list=files_list,\n image_path=image_path,\n text_label=text_label,\n labels_label=labels_label,\n images_label=images_label,\n image_type_extension=image_type_extension,\n data_type_extension=data_type_extension,\n multi_label=self.multi_label,\n )\n return dataset\n\n def compute_metrics(self, preds, labels, **kwargs):\n \"\"\"\n Computes the evaluation metrics for the model predictions.\n\n Args:\n preds: Model predictions\n labels: Ground truth labels\n **kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use). E.g. f1=sklearn.metrics.f1_score.\n A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions.\n\n Returns:\n result: Dictionary containing evaluation results. (Matthews correlation coefficient, tp, tn, fp, fn)\n wrong: List of InputExample objects corresponding to each incorrect prediction by the model\n \"\"\" # noqa: ignore flake8\"\n assert len(preds) == len(labels)\n\n multi_label = self.multi_label\n extra_metrics = {}\n for metric, func in kwargs.items():\n extra_metrics[metric] = func(labels, preds)\n\n if self.args.regression:\n return {**extra_metrics}\n\n if multi_label:\n label_ranking_score = label_ranking_average_precision_score(labels, preds)\n return {**{\"LRAP\": label_ranking_score}, **extra_metrics}\n\n mcc = matthews_corrcoef(labels, preds)\n\n if self.model.num_labels == 2:\n tn, fp, fn, tp = confusion_matrix(labels, preds).ravel()\n return ({**{\"mcc\": mcc, \"tp\": tp, \"tn\": tn, \"fp\": fp, \"fn\": fn}, **extra_metrics},)\n else:\n return {**{\"mcc\": mcc}, **extra_metrics}\n\n def predict(self, to_predict, image_path, image_type_extension=None):\n \"\"\"\n Performs predictions on a list of text.\n\n Args:\n to_predict: A python dictionary to be sent to the model for prediction.\n The dictionary should be of the form {\"text\": [<list of sentences>], \"images\": [<list of images>]}.\n image_path: Path to the directory containing the image/images.\n image_type_extension (optional): If given, this will be added to the end of each value in \"images\".\n\n Returns:\n preds: A python list of the predictions (0 or 1) for each text.\n model_outputs: A python list of the raw model outputs for each text.\n \"\"\"\n\n device = self.device\n model = self.model\n args = self.args\n multi_label = self.multi_label\n\n self._move_model_to_device()\n\n to_predict.update({\"labels\": [\"0\" for i in range(len(to_predict[\"text\"]))]})\n to_predict = pd.DataFrame.from_dict(to_predict)\n\n eval_dataset = self.load_and_cache_examples(\n to_predict, image_path=image_path, evaluate=True, image_type_extension=image_type_extension, no_cache=True,\n )\n\n eval_sampler = SequentialSampler(eval_dataset)\n eval_dataloader = DataLoader(\n eval_dataset,\n sampler=eval_sampler,\n batch_size=args.eval_batch_size,\n collate_fn=collate_fn,\n num_workers=args.process_count,\n )\n\n eval_loss = 0.0\n nb_eval_steps = 0\n preds = None\n out_label_ids = None\n\n for batch in tqdm(eval_dataloader, disable=args.silent, desc=\"Running Prediction\"):\n batch = tuple(t.to(device) for t in batch)\n labels = batch[5]\n with torch.no_grad():\n inputs = self._get_inputs_dict(batch)\n\n outputs = model(**inputs)\n logits = outputs[0] # Different from default behaviour\n tmp_eval_loss = self.criterion(logits, labels)\n\n eval_loss += tmp_eval_loss.mean().item()\n\n nb_eval_steps += 1\n\n if preds is None:\n preds = torch.sigmoid(logits).detach().cpu().numpy()\n out_label_ids = labels.detach().cpu().numpy()\n else:\n preds = np.append(preds, torch.sigmoid(logits).detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(out_label_ids, labels.detach().cpu().numpy(), axis=0)\n\n eval_loss = eval_loss / nb_eval_steps\n model_outputs = preds\n\n if multi_label:\n preds = (preds > 0.5).astype(int)\n else:\n preds = np.argmax(preds, axis=1)\n\n return preds, model_outputs\n\n def calculate_weights(self, train_dataset):\n label_frequences = train_dataset.get_label_frequencies()\n label_frequences = [label_frequences[label] if label_frequences[label] > 0 else 1 for label in self.label_list]\n label_weights = (\n torch.tensor(label_frequences, device=self.device, dtype=torch.float) / len(train_dataset)\n ) ** -1\n\n return label_weights\n\n def _threshold(self, x, threshold):\n if x >= threshold:\n return 1\n return 0\n\n def _move_model_to_device(self):\n self.model.to(self.device)\n\n def _get_inputs_dict(self, batch):\n inputs = {\n \"input_ids\": batch[0],\n \"input_modal\": batch[2],\n \"attention_mask\": batch[1],\n \"modal_start_tokens\": batch[3],\n \"modal_end_tokens\": batch[4],\n }\n\n return inputs\n\n def _get_last_metrics(self, metric_values):\n return {metric: values[-1] for metric, values in metric_values.items()}\n\n def _create_training_progress_scores(self, multi_label, **kwargs):\n extra_metrics = {key: [] for key in kwargs}\n\n if multi_label:\n training_progress_scores = {\n \"global_step\": [],\n \"LRAP\": [],\n \"train_loss\": [],\n \"eval_loss\": [],\n **extra_metrics,\n }\n else:\n if self.model.num_labels == 2:\n training_progress_scores = {\n \"global_step\": [],\n \"tp\": [],\n \"tn\": [],\n \"fp\": [],\n \"fn\": [],\n \"mcc\": [],\n \"train_loss\": [],\n \"eval_loss\": [],\n **extra_metrics,\n }\n elif self.model.num_labels == 1:\n training_progress_scores = {\n \"global_step\": [],\n \"train_loss\": [],\n \"eval_loss\": [],\n **extra_metrics,\n }\n else:\n training_progress_scores = {\n \"global_step\": [],\n \"mcc\": [],\n \"train_loss\": [],\n \"eval_loss\": [],\n **extra_metrics,\n }\n\n return training_progress_scores\n\n def _save_model(self, output_dir, model=None, results=None):\n os.makedirs(output_dir, exist_ok=True)\n\n if model and not self.args.no_save:\n # Take care of distributed/parallel training\n model_to_save = model.module if hasattr(model, \"module\") else model\n torch.save(model_to_save.state_dict(), os.path.join(output_dir, \"pytorch_model.bin\"))\n self.tokenizer.save_pretrained(output_dir)\n torch.save(self.args, os.path.join(output_dir, \"training_args.bin\"))\n self.transformer_config.architectures = [model_to_save.__class__.__name__]\n self.transformer_config.save_pretrained(output_dir)\n self._save_model_args(output_dir)\n\n if results:\n output_eval_file = os.path.join(output_dir, \"eval_results.txt\")\n with open(output_eval_file, \"w\") as writer:\n for key in sorted(results.keys()):\n writer.write(\"{} = {}\\n\".format(key, str(results[key])))\n\n def _save_model_args(self, output_dir):\n os.makedirs(output_dir, exist_ok=True)\n self.args.save(output_dir)\n\n def _load_model_args(self, input_dir):\n args = MultiModalClassificationArgs()\n args.load(input_dir)\n return args\n"
] | [
[
"numpy.squeeze",
"sklearn.metrics.matthews_corrcoef",
"torch.utils.data.DataLoader",
"sklearn.metrics.confusion_matrix",
"pandas.DataFrame",
"torch.nn.BCEWithLogitsLoss",
"torch.no_grad",
"torch.cuda.is_available",
"torch.cuda.manual_seed_all",
"torch.device",
"torch.nn.CrossEntropyLoss",
"torch.tensor",
"numpy.argmax",
"torch.sigmoid",
"pandas.DataFrame.from_dict",
"numpy.random.seed",
"torch.manual_seed",
"torch.utils.data.SequentialSampler",
"torch.utils.data.RandomSampler",
"torch.nn.DataParallel",
"torch.nn.MSELoss",
"sklearn.metrics.label_ranking_average_precision_score"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
kamatsuoka/gpt-2 | [
"ac326eb5e704316d16f2a5421666a71e986c24a8"
] | [
"src/interactive_conditional_samples.py"
] | [
"#!/usr/bin/env python3\n\nimport json\nimport os\n\nimport fire\nimport numpy as np\nimport tensorflow as tf\n\nimport encoder\nimport model\nimport sample\n\n\ndef interact_model(\n model_name='345M',\n seed=None,\n nsamples=1,\n batch_size=1,\n length=None,\n temperature=1,\n top_k=0,\n models_dir='models',\n):\n \"\"\"\n Interactively run the model\n :model_name=345M : String, which model to use\n :seed=None : Integer seed for random number generators, fix seed to reproduce\n results\n :nsamples=1 : Number of samples to return total\n :batch_size=1 : Number of batches (only affects speed/memory). Must divide nsamples.\n :length=None : Number of tokens in generated text, if None (default), is\n determined by model hyperparameters\n :temperature=1 : Float value controlling randomness in boltzmann\n distribution. Lower temperature results in less random completions. As the\n temperature approaches zero, the model will become deterministic and\n repetitive. Higher temperature results in more random completions.\n :top_k=0 : Integer value controlling diversity. 1 means only 1 word is\n considered for each step (token), resulting in deterministic completions,\n while 40 means 40 words are considered at each step. 0 (default) is a\n special setting meaning no restrictions. 40 generally is a good value.\n :models_dir : path to parent folder containing model subfolders\n (i.e. contains the <model_name> folder) \n \"\"\"\n models_dir = os.path.expanduser(os.path.expandvars(models_dir))\n if batch_size is None:\n batch_size = 1\n assert nsamples % batch_size == 0\n\n enc = encoder.get_encoder(model_name, models_dir)\n hparams = model.default_hparams()\n with open(os.path.join(models_dir, model_name, 'hparams.json')) as f:\n hparams.override_from_dict(json.load(f))\n\n if length is None:\n length = hparams.n_ctx // 2\n elif length > hparams.n_ctx:\n raise ValueError(\"Can't get samples longer than window size: %s\" % hparams.n_ctx)\n\n with tf.Session(graph=tf.Graph()) as sess:\n context = tf.placeholder(tf.int32, [batch_size, None])\n np.random.seed(seed)\n tf.set_random_seed(seed)\n output = sample.sample_sequence(\n hparams=hparams, length=length,\n context=context,\n batch_size=batch_size,\n temperature=temperature, top_k=top_k\n )\n\n saver = tf.train.Saver()\n ckpt = tf.train.latest_checkpoint(os.path.join(models_dir, model_name))\n saver.restore(sess, ckpt)\n\n while True:\n raw_text = input(\"Model prompt >>> \")\n while not raw_text:\n print('Prompt should not be empty!')\n raw_text = input(\"Model prompt >>> \")\n context_tokens = enc.encode(raw_text)\n generated = 0\n for _ in range(nsamples // batch_size):\n out = sess.run(output, feed_dict={\n context: [context_tokens for _ in range(batch_size)]\n })[:, len(context_tokens):]\n for i in range(batch_size):\n generated += 1\n text = enc.decode(out[i])\n print(\"=\" * 40 + \" SAMPLE \" + str(generated) + \" \" + \"=\" * 40)\n print(text)\n print(\"=\" * 80)\n\n\nif __name__ == '__main__':\n fire.Fire(interact_model)\n"
] | [
[
"tensorflow.Graph",
"numpy.random.seed",
"tensorflow.placeholder",
"tensorflow.set_random_seed",
"tensorflow.train.Saver"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
cshenry/MergeMetabolicAnnotations-1 | [
"d0b9bce13de16bcd816bfca789bf624323b2a239"
] | [
"lib/MergeMetabolicAnnotations/utils/functions.py"
] | [
"import pandas as pd\nimport os\nimport logging\nimport yaml\nimport datetime\n\n\ndef df_to_ontology(params, staging_dir, pass_df=None):\n '''\n Takes the text file from staging, or the pandas df passed from the merge\n app, and converts to an ontology dictionary suitable from the annotation\n ontology API add_annotation_ontology_events() method\n '''\n\n if isinstance(pass_df, pd.DataFrame):\n annotations = pass_df\n method = \"Merge Annotations\"\n else:\n if 'debug' in params and params['debug'] is True:\n annotations_file_path = os.path.join(\n '/kb/module/test/test_data', params['annotation_file'])\n else:\n annotations_file_path = os.path.join(staging_dir, params['annotation_file'])\n\n annotations = pd.read_csv(annotations_file_path,\n sep='\\t',\n header=None,\n names=['gene', 'term']\n )\n\n method = \"Import Annotations\"\n\n # remove duplicate rows, if any\n annotations = annotations.drop_duplicates()\n\n ontology = {\n 'event_id': params['description'],\n 'ontology_id': params['ontology'],\n 'method': method, # from above\n 'method_version': get_app_version(),\n \"timestamp\": datetime.datetime.now().strftime(\"%Y_%m_%d_%H_%M_%S\"),\n 'ontology_terms': {}\n }\n\n # add imported terms\n for index, row in annotations.iterrows():\n if pd.notnull(row['term']):\n if row['gene'] in ontology['ontology_terms']:\n ontology['ontology_terms'][row['gene']].append(\n {'term': row['term']}\n )\n else:\n ontology['ontology_terms'][row['gene']] = [\n {'term': row['term']}\n ]\n\n return ontology\n\n\ndef get_app_version():\n with open(\"/kb/module/kbase.yml\", 'r') as stream:\n data_loaded = yaml.load(stream)\n return str(data_loaded['module-version'])\n"
] | [
[
"pandas.notnull",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
mrernst/rl_robotics_research | [
"0bc446cfb69591cb4ee3ce8d39815c463090a5f6"
] | [
"env/plane.py"
] | [
"import gym\nimport numpy as np\nimport cv2\nfrom gym import spaces\n\n\ndef line_intersection(line1, line2):\n # calculate the intersection point\n xdiff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])\n ydiff = (line1[0][1] - line1[1][1], line2[0]\n [1] - line2[1][1]) # Typo was here\n\n def det(a, b):\n return a[0] * b[1] - a[1] * b[0]\n\n div = det(xdiff, ydiff)\n if div == 0:\n raise Exception('lines do not intersect')\n\n d = (det(*line1), det(*line2))\n x = det(d, xdiff) / div\n y = det(d, ydiff) / div\n return x, y\n\n\ndef check_cross(x0, y0, x1, y1):\n x0 = np.array(x0)\n y0 = np.array(y0)\n x1 = np.array(x1)\n y1 = np.array(y1)\n return np.cross(x1 - x0, y0 - x0), np.cross(y0 - x0, y1 - x0)\n\n\ndef check_itersection(x0, y0, x1, y1):\n EPS = 1e-10\n\n def sign(x):\n if x > EPS:\n return 1\n if x < -EPS:\n return -1\n return 0\n\n f1, f2 = check_cross(x0, y0, x1, y1)\n f3, f4 = check_cross(x1, y1, x0, y0)\n if sign(f1) == sign(f2) and sign(f3) == sign(f4) and sign(f1) != 0 and sign(f3) != 0:\n return True\n return False\n\n\nclass PlaneBase(gym.Env):\n def __init__(self, rects, R, is_render=False, size=512):\n self.rects = rects\n self.n = len(self.rects)\n self.size = size\n self.map = np.ones((size, size, 3), dtype=np.uint8) * 255\n self.R = R\n self.R2 = R ** 2\n self.board = np.array(\n [[0, 0],\n [1, 1]],\n dtype='float32')\n\n self.action_space = gym.spaces.Box(\n low=-R, high=R, shape=(2,), dtype='float32')\n self.observation_space = gym.spaces.Box(\n low=0., high=1., shape=(2,), dtype='float32')\n\n if is_render:\n cv2.namedWindow('image', cv2.WINDOW_NORMAL)\n self.image_name = 'image'\n\n for i in range(self.n):\n for j in range(i + 1, self.n):\n if check_itersection(self.rects[i][0], self.rects[i][1], self.rects[j][0], self.rects[j][0]):\n raise Exception(\"Rectangle interaction with each other\")\n\n for ((x0, y0), (x1, y1)) in rects:\n x0, y0 = int(x0 * size), int(y0 * size)\n x1, y1 = int(x1 * size), int(y1 * size)\n cv2.rectangle(self.map, (x0, y0), (x1, y1), (0, 255, 0), 1)\n\n ps = np.array([\n [x0, y0],\n [x1, y0],\n [x1, y1],\n [x0, y1],\n ], dtype=np.int32)\n cv2.fillConvexPoly(self.map, ps, (127, 127, 127))\n\n self.state = (0, 0)\n self.reset()\n\n def restore(self, obs):\n self.state = (float(obs[0]), float(obs[1]))\n\n def rect_lines(self, rect):\n (x0, y0), (x1, y1) = rect\n yield (x0, y0), (x1, y0)\n yield (x1, y0), (x1, y1)\n yield (x1, y1), (x0, y1)\n yield (x0, y1), (x0, y0)\n\n def l2dist(self, x, y):\n return ((y[0] - x[0]) ** 2) + ((y[1] - x[1]) ** 2)\n\n def check_inside(self, p):\n EPS = 1e-10\n for i in self.rects:\n if p[0] > i[0][0] + EPS and p[0] < i[1][0] - EPS and p[1] > i[0][1] + EPS and p[1] < i[1][1] - EPS:\n return True\n return False\n\n def step(self, action):\n dx, dy = action\n l = 0.0001\n p = (self.state[0] + dx * l, self.state[1] + dy * l)\n if self.check_inside(p) or p[0] > 1 or p[1] > 1 or p[0] < 0 or p[1] < 0:\n return np.array(self.state), 0, False, {}\n\n dest = (self.state[0] + dx, self.state[1] + dy)\n\n md = self.l2dist(self.state, dest)\n\n _dest = dest\n line = (self.state, dest)\n\n for i in list(self.rects) + [self.board]:\n for l in self.rect_lines(i):\n if check_itersection(self.state, dest, l[0], l[1]):\n inter_point = line_intersection(line, l)\n d = self.l2dist(self.state, inter_point)\n if d < md:\n md = d\n _dest = inter_point\n\n self.restore(_dest)\n return np.array(self.state), -md, False, {}\n\n def render(self, mode='human'):\n image = self.map.copy()\n x, y = self.state\n x = int(x * self.size)\n y = int(y * self.size)\n cv2.circle(image, (x, y), 5, (255, 0, 255), -1)\n if mode == 'human':\n cv2.imshow('image', image)\n cv2.waitKey(2)\n else:\n return image\n\n def reset(self):\n inside_rect = True\n while inside_rect:\n a, b = np.random.random(), np.random.random()\n inside_rect = self.check_inside((a, b))\n self.state = (a, b)\n return np.array(self.state)\n\n\nclass NaivePlane(PlaneBase):\n def __init__(self, is_render=True, R=300, size=512):\n PlaneBase.__init__(self,\n [\n np.array([[128, 128], [300, 386]]) / 512,\n np.array([[400, 400], [500, 500]]) / 512,\n ],\n R, is_render=is_render, size=size),\n\n\nclass NaivePlane2(PlaneBase):\n # two rectangle\n def __init__(self, is_render=True, R=300, size=512):\n PlaneBase.__init__(self,\n [\n np.array([[64, 64], [256, 256]]) / 512,\n np.array([[300, 128], [400, 500]]) / 512,\n ],\n R, is_render=is_render, size=size),\n\n\nclass NaivePlane3(PlaneBase):\n # four rectangle\n def __init__(self, is_render=True, R=300, size=512):\n PlaneBase.__init__(self,\n [\n np.array([[64, 64], [192, 192]]) / 512,\n np.array([[320, 64], [448, 192]]) / 512,\n np.array([[320, 320], [448, 448]]) / 512,\n np.array([[64, 320], [192, 448]]) / 512,\n ],\n R, is_render=is_render, size=size),\n\n\nclass NaivePlane4(PlaneBase):\n # four rectangle\n def __init__(self, is_render=True, R=300, size=512):\n PlaneBase.__init__(self,\n [\n np.array([[64, 64], [192, 512]]) / 512,\n np.array([[320, 64], [448, 512]]) / 512,\n ],\n R, is_render=is_render, size=size),\n\n\nclass NaivePlane5(PlaneBase):\n # four rectangle\n def __init__(self, is_render=False, R=300, size=512):\n PlaneBase.__init__(self,\n [\n np.array([[0, 1. / 3], [2. / 3, 2. / 3]]),\n ],\n R, is_render=is_render, size=size),\n\n\nclass NaivePlane6(PlaneBase):\n # four rectangle\n def __init__(self, is_render=False, R=300, size=512):\n PlaneBase.__init__(self,\n [\n # np.array([[0, 1. / 3], [2. / 3, 2. / 3]]),\n ],\n R, is_render=is_render, size=size),\n\n\nif __name__ == '__main__':\n env = NaivePlane5()\n obs = env.reset()\n while True:\n print(obs)\n env.render()\n while True:\n try:\n print('entering the dir (x, y)')\n act = input().strip().split(' ')\n act = float(act[0]) / 512, float(act[1]) / 512\n break\n except KeyboardInterrupt as e:\n raise e\n except:\n continue\n\n obs, reward, _, _ = env.step(act)\n"
] | [
[
"numpy.ones",
"numpy.array",
"numpy.random.random",
"numpy.cross"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
OpenMDAO/zappy | [
"2c72048b4c4e0ce0ae83221e4ee5788978254340"
] | [
"zappy/LF_elements/bus.py"
] | [
"import math, cmath\nimport numpy as np\n\nfrom openmdao.api import ImplicitComponent\n\nclass ACbus(ImplicitComponent):\n \"\"\"\n Determines the voltage of an AC bus\n \"\"\"\n\n def initialize(self):\n\n self.options.declare('num_nodes', types=int)\n self.options.declare('lines', default=['1', '2'], desc='Names of electrical lines connecting to the bus')\n self.options.declare('Vbase', default=5000.0, desc='Base voltage in units of volts')\n self.options.declare('Sbase', default=10.0E6, desc='Base power in units of watts')\n\n def setup(self):\n\n nn = self.options['num_nodes']\n lines = self.options['lines']\n Ibase = self.options['Sbase']/self.options['Vbase']\n ar = np.arange(nn)\n\n self.add_output('Vr', val=np.ones(nn), units='V', desc='Voltage (real) of the bus',\n res_ref=Ibase, res_units='A')\n self.add_output('Vi', val=np.zeros(nn), units='V', desc='Voltage (imaginary) of the bus',\n res_ref=Ibase, res_units='A')\n \n for name in lines:\n self.add_input(name+':Ir', val=np.zeros(nn), units='A', desc='Current (real) of line '+name)\n self.add_input(name+':Ii', val=np.zeros(nn), units='A', desc='Current (imaginary) of line '+name)\n\n self.declare_partials('Vr', name+':Ir', rows=ar, cols=ar, val=1.0)\n self.declare_partials('Vi', name+':Ii', rows=ar, cols=ar, val=1.0)\n\n def guess_nonlinear(self, inputs, outputs, resids):\n\n outputs['Vr'] = self.options['Vbase']\n outputs['Vi'] = 0.0\n\n def apply_nonlinear(self, inputs, outputs, resids):\n\n lines = self.options['lines']\n resids['Vr'] = 0.0\n resids['Vi'] = 0.0\n \n for name in lines:\n resids['Vr'] += inputs[name+':Ir']\n resids['Vi'] += inputs[name+':Ii']\n\n def linearize(self, inputs, outputs, J):\n\n pass\n\nclass DCbus(ImplicitComponent):\n \"\"\"\n Determines the voltage of a DC bus\n \"\"\"\n\n def initialize(self):\n self.options.declare('num_nodes', types=int)\n self.options.declare('lines', default=['1', '2'], desc='names of electrical lines connecting to the bus')\n self.options.declare('Vbase', default=5000.0, desc='Base voltage in units of volts')\n self.options.declare('Sbase', default=10.0E6, desc='Base power in units of watts')\n\n def setup(self):\n\n nn = self.options['num_nodes']\n lines = self.options['lines']\n Ibase = self.options['Sbase']/self.options['Vbase']\n ar = np.arange(nn)\n\n self.add_output('V', val=np.ones(nn), units='V', desc='Voltage of the bus',\n res_ref=Ibase, res_units='A')\n \n for name in lines:\n self.add_input(name+':I', val=np.zeros(nn), units='A', desc='Current of line '+name)\n\n self.declare_partials('V', name+':I', rows=ar, cols=ar, val=1.0)\n\n def guess_nonlinear(self, inputs, outputs, resids):\n\n outputs['V'] = self.options['Vbase']\n\n def apply_nonlinear(self, inputs, outputs, resids):\n\n lines = self.options['lines']\n resids['V'] = 0.0\n \n for name in lines:\n resids['V'] += inputs[name+':I']\n\n def linearize(self, inputs, outputs, J):\n\n pass\n\n\nif __name__ == \"__main__\":\n from openmdao.api import Problem, Group, IndepVarComp\n\n p = Problem()\n p.model = Group()\n des_vars = p.model.add_subsystem('des_vars', IndepVarComp(), promotes=['*'])\n\n des_vars.add_output('In1:Ir', 1.90003278522448*np.ones(3), units='A')\n des_vars.add_output('In1:Ii', 0.800107961803713*np.ones(3), units='A')\n des_vars.add_output('In2:Ir', 1.99999059394351*np.ones(3), units='A')\n des_vars.add_output('In2:Ii', 0.999977006616166*np.ones(3), units='A')\n des_vars.add_output('Out1:Ir', -3.9*np.ones(3), units='A')\n des_vars.add_output('Out1:Ii', -1.8*np.ones(3), units='A')\n\n p.model.add_subsystem('acbus', ACbus(num_nodes=3, lines=['In1', 'In2', 'Out1']), promotes=['*'])\n\n des_vars.add_output('In1:I', 1.90003278522448*np.ones(3), units='A')\n des_vars.add_output('In2:I', 1.99999059394351*np.ones(3), units='A')\n des_vars.add_output('Out1:I', -3.9*np.ones(3), units='A')\n\n p.model.add_subsystem('dcbus', DCbus(num_nodes=3, lines=['In1', 'In2', 'Out1']), promotes=['*'])\n\n\n p.setup(check=False)\n\n p.check_partials(compact_print=False)\n\n\n"
] | [
[
"numpy.arange",
"numpy.zeros",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kobewangSky/deep-person-reid | [
"628885841585536a09f40542bebbb77c3edcb8b1"
] | [
"torchreid/data/transforms.py"
] | [
"from __future__ import division, print_function, absolute_import\nimport math\nimport random\nfrom collections import deque\nimport torch\nfrom PIL import Image\nfrom torchvision.transforms import (\n Resize, Compose, ToTensor, Normalize, ColorJitter, RandomHorizontalFlip\n)\nfrom ..utils.augumentation import SSD_PhotometricDistort\n\nclass Random2DTranslation(object):\n \"\"\"Randomly translates the input image with a probability.\n\n Specifically, given a predefined shape (height, width), the input is first\n resized with a factor of 1.125, leading to (height*1.125, width*1.125), then\n a random crop is performed. Such operation is done with a probability.\n\n Args:\n height (int): target image height.\n width (int): target image width.\n p (float, optional): probability that this operation takes place.\n Default is 0.5.\n interpolation (int, optional): desired interpolation. Default is\n ``PIL.Image.BILINEAR``\n \"\"\"\n\n def __init__(self, height, width, p=0.5, interpolation=Image.BILINEAR):\n self.height = height\n self.width = width\n self.p = p\n self.interpolation = interpolation\n\n def __call__(self, img):\n if random.uniform(0, 1) > self.p:\n return img.resize((self.width, self.height), self.interpolation)\n\n new_width, new_height = int(round(self.width * 1.125)\n ), int(round(self.height * 1.125))\n resized_img = img.resize((new_width, new_height), self.interpolation)\n x_maxrange = new_width - self.width\n y_maxrange = new_height - self.height\n x1 = int(round(random.uniform(0, x_maxrange)))\n y1 = int(round(random.uniform(0, y_maxrange)))\n croped_img = resized_img.crop(\n (x1, y1, x1 + self.width, y1 + self.height)\n )\n return croped_img\n\n\nclass RandomErasing(object):\n \"\"\"Randomly erases an image patch.\n\n Origin: `<https://github.com/zhunzhong07/Random-Erasing>`_\n\n Reference:\n Zhong et al. Random Erasing Data Augmentation.\n\n Args:\n probability (float, optional): probability that this operation takes place.\n Default is 0.5.\n sl (float, optional): min erasing area.\n sh (float, optional): max erasing area.\n r1 (float, optional): min aspect ratio.\n mean (list, optional): erasing value.\n \"\"\"\n\n def __init__(\n self,\n probability=0.5,\n sl=0.02,\n sh=0.4,\n r1=0.3,\n mean=[0.4914, 0.4822, 0.4465]\n ):\n self.probability = probability\n self.mean = mean\n self.sl = sl\n self.sh = sh\n self.r1 = r1\n\n def __call__(self, img):\n if random.uniform(0, 1) > self.probability:\n return img\n\n for attempt in range(100):\n area = img.size()[1] * img.size()[2]\n\n target_area = random.uniform(self.sl, self.sh) * area\n aspect_ratio = random.uniform(self.r1, 1 / self.r1)\n\n h = int(round(math.sqrt(target_area * aspect_ratio)))\n w = int(round(math.sqrt(target_area / aspect_ratio)))\n\n if w < img.size()[2] and h < img.size()[1]:\n x1 = random.randint(0, img.size()[1] - h)\n y1 = random.randint(0, img.size()[2] - w)\n if img.size()[0] == 3:\n img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]\n img[1, x1:x1 + h, y1:y1 + w] = self.mean[1]\n img[2, x1:x1 + h, y1:y1 + w] = self.mean[2]\n else:\n img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]\n return img\n\n return img\n\n\nclass ColorAugmentation(object):\n \"\"\"Randomly alters the intensities of RGB channels.\n\n Reference:\n Krizhevsky et al. ImageNet Classification with Deep ConvolutionalNeural\n Networks. NIPS 2012.\n\n Args:\n p (float, optional): probability that this operation takes place.\n Default is 0.5.\n \"\"\"\n\n def __init__(self, p=0.5):\n self.p = p\n self.eig_vec = torch.Tensor(\n [\n [0.4009, 0.7192, -0.5675],\n [-0.8140, -0.0045, -0.5808],\n [0.4203, -0.6948, -0.5836],\n ]\n )\n self.eig_val = torch.Tensor([[0.2175, 0.0188, 0.0045]])\n\n def _check_input(self, tensor):\n assert tensor.dim() == 3 and tensor.size(0) == 3\n\n def __call__(self, tensor):\n if random.uniform(0, 1) > self.p:\n return tensor\n alpha = torch.normal(mean=torch.zeros_like(self.eig_val)) * 0.1\n quatity = torch.mm(self.eig_val * alpha, self.eig_vec)\n tensor = tensor + quatity.view(3, 1, 1)\n return tensor\n\n\nclass RandomPatch(object):\n \"\"\"Random patch data augmentation.\n\n There is a patch pool that stores randomly extracted pathces from person images.\n \n For each input image, RandomPatch\n 1) extracts a random patch and stores the patch in the patch pool;\n 2) randomly selects a patch from the patch pool and pastes it on the\n input (at random position) to simulate occlusion.\n\n Reference:\n - Zhou et al. Omni-Scale Feature Learning for Person Re-Identification. ICCV, 2019.\n - Zhou et al. Learning Generalisable Omni-Scale Representations\n for Person Re-Identification. arXiv preprint, 2019.\n \"\"\"\n\n def __init__(\n self,\n prob_happen=0.5,\n pool_capacity=50000,\n min_sample_size=100,\n patch_min_area=0.01,\n patch_max_area=0.5,\n patch_min_ratio=0.1,\n prob_rotate=0.5,\n prob_flip_leftright=0.5,\n ):\n self.prob_happen = prob_happen\n\n self.patch_min_area = patch_min_area\n self.patch_max_area = patch_max_area\n self.patch_min_ratio = patch_min_ratio\n\n self.prob_rotate = prob_rotate\n self.prob_flip_leftright = prob_flip_leftright\n\n self.patchpool = deque(maxlen=pool_capacity)\n self.min_sample_size = min_sample_size\n\n def generate_wh(self, W, H):\n area = W * H\n for attempt in range(100):\n target_area = random.uniform(\n self.patch_min_area, self.patch_max_area\n ) * area\n aspect_ratio = random.uniform(\n self.patch_min_ratio, 1. / self.patch_min_ratio\n )\n h = int(round(math.sqrt(target_area * aspect_ratio)))\n w = int(round(math.sqrt(target_area / aspect_ratio)))\n if w < W and h < H:\n return w, h\n return None, None\n\n def transform_patch(self, patch):\n if random.uniform(0, 1) > self.prob_flip_leftright:\n patch = patch.transpose(Image.FLIP_LEFT_RIGHT)\n if random.uniform(0, 1) > self.prob_rotate:\n patch = patch.rotate(random.randint(-10, 10))\n return patch\n\n def __call__(self, img):\n W, H = img.size # original image size\n\n # collect new patch\n w, h = self.generate_wh(W, H)\n if w is not None and h is not None:\n x1 = random.randint(0, W - w)\n y1 = random.randint(0, H - h)\n new_patch = img.crop((x1, y1, x1 + w, y1 + h))\n self.patchpool.append(new_patch)\n\n if len(self.patchpool) < self.min_sample_size:\n return img\n\n if random.uniform(0, 1) > self.prob_happen:\n return img\n\n # paste a randomly selected patch on a random position\n patch = random.sample(self.patchpool, 1)[0]\n patchW, patchH = patch.size\n x1 = random.randint(0, W - patchW)\n y1 = random.randint(0, H - patchH)\n patch = self.transform_patch(patch)\n img.paste(patch, (x1, y1))\n\n return img\n\n\ndef build_transforms(\n height,\n width,\n transforms='random_flip',\n norm_mean=[0.485, 0.456, 0.406],\n norm_std=[0.229, 0.224, 0.225],\n **kwargs\n):\n \"\"\"Builds train and test transform functions.\n\n Args:\n height (int): target image height.\n width (int): target image width.\n transforms (str or list of str, optional): transformations applied to model training.\n Default is 'random_flip'.\n norm_mean (list or None, optional): normalization mean values. Default is ImageNet means.\n norm_std (list or None, optional): normalization standard deviation values. Default is\n ImageNet standard deviation values.\n \"\"\"\n if transforms is None:\n transforms = []\n\n if isinstance(transforms, str):\n transforms = [transforms]\n\n if not isinstance(transforms, list):\n raise ValueError(\n 'transforms must be a list of strings, but found to be {}'.format(\n type(transforms)\n )\n )\n\n if len(transforms) > 0:\n transforms = [t.lower() for t in transforms]\n\n if norm_mean is None or norm_std is None:\n norm_mean = [0.485, 0.456, 0.406] # imagenet mean\n norm_std = [0.229, 0.224, 0.225] # imagenet std\n normalize = Normalize(mean=norm_mean, std=norm_std)\n\n print('Building train transforms ...')\n transform_tr = []\n\n print('+ resize to {}x{}'.format(height, width))\n transform_tr += [Resize((height, width))]\n\n if 'random_flip' in transforms:\n print('+ random flip')\n transform_tr += [RandomHorizontalFlip()]\n\n if 'random_crop' in transforms:\n print(\n '+ random crop (enlarge to {}x{} and '\n 'crop {}x{})'.format(\n int(round(height * 1.125)), int(round(width * 1.125)), height,\n width\n )\n )\n transform_tr += [Random2DTranslation(height, width)]\n\n if 'random_patch' in transforms:\n print('+ random patch')\n transform_tr += [RandomPatch()]\n\n if 'color_jitter' in transforms:\n print('+ color jitter')\n transform_tr += [\n ColorJitter(brightness=0.2, contrast=0.15, saturation=0, hue=0)\n ]\n\n print('+ to torch tensor of range [0, 1]')\n transform_tr += [ToTensor()]\n\n print('+ normalization (mean={}, std={})'.format(norm_mean, norm_std))\n transform_tr += [normalize]\n\n if 'random_erase' in transforms:\n print('+ random erase')\n transform_tr += [RandomErasing(mean=norm_mean)]\n\n transform_tr = Compose(transform_tr)\n\n print('Building test transforms ...')\n print('+ resize to {}x{}'.format(height, width))\n print('+ to torch tensor of range [0, 1]')\n print('+ normalization (mean={}, std={})'.format(norm_mean, norm_std))\n\n transform_te = Compose([\n Resize((height, width)),\n ToTensor(),\n normalize,\n ])\n\n return transform_tr, transform_te\n\ndef build_SSDAugment_transforms(height, width, norm_mean=[0.485, 0.456, 0.406],\n norm_std=[0.229, 0.224, 0.225], **kwargs):\n \"\"\"Builds train and test transform functions.\n\n Args:\n height (int): target image height.\n width (int): target image width.\n transforms (str or list of str, optional): transformations applied to model training.\n Default is 'random_flip'.\n norm_mean (list or None, optional): normalization mean values. Default is ImageNet means.\n norm_std (list or None, optional): normalization standard deviation values. Default is\n ImageNet standard deviation values.\n \"\"\"\n print(\"use build_SSDAugment_transforms\")\n # import time\n # time.sleep(10)\n\n if norm_mean is None or norm_std is None:\n norm_mean = [0.485, 0.456, 0.406] # imagenet mean\n norm_std = [0.229, 0.224, 0.225] # imagenet std\n normalize = Normalize(mean=norm_mean, std=norm_std)\n\n print('Building train transforms ...')\n transform_tr = []\n transform_tr += [Resize((height, width))]\n print('+ resize to {}x{}'.format(height, width))\n transform_tr += [RandomHorizontalFlip()]\n transform_tr += [Random2DTranslation(height, width)]\n transform_tr += [RandomPatch()]\n transform_tr += [SSD_PhotometricDistort()]\n\n print('+ to torch tensor of range [0, 1]')\n transform_tr += [ToTensor()]\n print('+ normalization (mean={}, std={})'.format(norm_mean, norm_std))\n transform_tr += [normalize]\n transform_tr += [RandomErasing()]\n transform_tr = Compose(transform_tr)\n\n print('Building test transforms ...')\n print('+ resize to {}x{}'.format(height, width))\n print('+ to torch tensor of range [0, 1]')\n print('+ normalization (mean={}, std={})'.format(norm_mean, norm_std))\n transform_te = Compose([\n Resize((height, width)),\n ToTensor(),\n normalize,\n ])\n\n return transform_tr, transform_te"
] | [
[
"torch.mm",
"torch.zeros_like",
"torch.Tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TrasgoGroup/Cell-Viewer | [
"4f2421e278b22926cd34e0600d0ecb12026d58c9"
] | [
"figmap/cellmap.py"
] | [
"\"\"\"\n A P A C H E L I C E N S E\n ------------ \n Version 2.0, January 2004\n\n Copyright 2021 Miguel Cruces Fernández\n\n Licensed under the Apache License, Version 2.0 (the \n\"License\"); you may not use this file except in compliance \nwith the License. You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, \nsoftware distributed under the License is distributed on an \n\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, \neither express or implied. See the License for the specific \nlanguage governing permissions and limitations under the \nLicense.\n\n [email protected]\n [email protected]\n\"\"\"\n\nfrom kitchen.cook_root import CookDataROOT\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom matplotlib.colors import to_hex, Normalize\nfrom os.path import join as join_path\n\n\nclass Cellmap:\n \"\"\"\n Matplotlibilization of the cell map.\n \"\"\"\n storage_dir = \"store/saves/\"\n fig = None\n\n def __init__(self):\n\n self._mapper = None\n self.cooked = None\n self.save_cm = True\n self.show_cm = False\n self.hz = False\n\n self.filename = None\n\n def update(self, cooked: CookDataROOT, save: bool = True,\n show: bool = False, hz: bool = False):\n \"\"\"\n Update the current cell map\n\n :param cooked: Cooked data from an instnce of a class that inherits from Chef.\n :param save: True (default) to save_cm the file, False to skip.\n :param show: True to show_cm cellmap, False (default) to not.\n :param hz: True to show cellmap in Hz units, False (default) to show it\n in default units (number of counts).\n \"\"\"\n self.cooked = cooked\n self.save_cm = save\n self.show_cm = show\n self.hz = hz\n\n if self._mapper is None:\n self.set_mapper()\n\n @property\n def mapper(self):\n \"\"\"\n Mapper Getter\n \"\"\"\n return self._mapper\n\n @mapper.setter\n def mapper(self, mapper: cm.ScalarMappable):\n \"\"\"\n Mapper setter\n \"\"\"\n self._mapper = mapper\n\n def create(self):\n \"\"\"\n Create the Cellmap\n \"\"\"\n # Title and Filename:\n if len(self.cooked.used_filenames) > 1:\n files_str = f\"{self.cooked.used_filenames[0]}-{self.cooked.used_filenames[-1]}\"\n else:\n files_str = f\"{self.cooked.used_filenames[0]}\"\n\n self.filename = f\"{files_str}-\"\\\n f\"{self.cooked.plane_name}-\"\\\n f\"{self.cooked.current_var}\"\n\n title = f\"Plane: {self.cooked.plane_name}, Branch: {self.cooked.current_var}\\n\"\\\n f\"Files: {files_str}\"\n\n # Create Figure:\n self.fig, (cells, cmap) = plt.subplots(\n ncols=2,\n figsize=(7, 5),\n gridspec_kw={\n \"width_ratios\": [15, 1]\n }\n )\n self.fig.tight_layout()\n\n # Cellmap:\n cipher = lambda n: int(np.log10(n) + 1)\n\n for (i, j), z in np.ndenumerate(self.cooked.plane_event):\n if not self.hz:\n txt = f\"{z}\"\n if z >= 1e2:\n font_size = 32 / cipher(z)\n else:\n font_size = 12\n else:\n txt = f\"{z:.3f}\"\n font_size = 9\n\n _, foreground = self.set_value_color(z)\n cells.text(\n j, i,\n txt,\n fontsize=font_size,\n ha=\"center\", va=\"center\",\n color=foreground\n )\n\n cells.plot([5.5, 5.5], [-0.5, 9.5], \"#ffffff\")\n cells.plot([-0.5, 11.5], [4.5, 4.5], \"#ffffff\")\n\n cells.axis(\"off\")\n cells.set_title(title)\n\n # Color map bar:\n c_min, c_max = self.mapper.get_clim()\n im = cells.matshow(\n self.cooked.plane_event,\n interpolation=None,\n aspect='auto',\n cmap=self.mapper.get_cmap(),\n vmin=c_min, vmax=c_max,\n )\n\n cmap.tick_params(labelrotation=270)\n self.fig.colorbar(im, cmap)\n\n def set_mapper(self, set_max: bool = False, max_val: float = 0.8, cmap_name: str = \"jet\"):\n \"\"\"\n Normalize item number values to colormap.\n Create a matplotlib.cm.ScalarMappable called self.mapper.\n\n (min, max) = self.mapper.get_clim()\n color_map = self.mapper.get_cmap()\n\n :param set_max: True to set a maximum value in color map, False (default) to\n calculate maximum automatically.\n :param max_val: If set_max=True, max_val is the maximum value to set maximum\n color in cellmap.\n :param cmap_name: Name of the Color Map Gradient.\n \"\"\"\n numpy_value = self.cooked.plane_event\n if set_max:\n min_val = 0\n max_val = float(max_val)\n else:\n min_val = np.min(numpy_value)\n max_val = np.max(numpy_value)\n norm = Normalize(vmin=min_val, vmax=max_val)\n self.mapper = cm.ScalarMappable(norm=norm, cmap=cmap_name)\n\n def set_value_color(self, val: float):\n \"\"\"\n This method is used for choose the color for each value in the cell buttons.\n :param val: Value of each button.\n :return: Colors for button background (bg_color) and button value (fg_color)\n \"\"\"\n\n # Management of background color (button)\n rgba_color = self._mapper.to_rgba(val)\n rgb_color = rgba_color[:-1]\n bg_color = to_hex(rgb_color)\n\n # Management of foreground color (words)\n inv_rgb_color = (1 - rgb_color[0], 1 - rgb_color[1], 1 - rgb_color[2])\n fg_color = to_hex(inv_rgb_color) # Foreground is inverted color of background\n\n return bg_color, fg_color\n\n def save_file(self, out_path: str = None, ext: str = \"png\", re_create: bool = True, label: str = \"\"):\n \"\"\"\n Save the created figure.\n\n :param out_path: Path to to the directory to save the file.\n :param ext: Extension of the output file.\n :param re_create: True to create the figure again (default), False to use de previous\n calculated figure. If any figure exists, it will raise an error.\n :param label: Label to the end of the filename (optional).\n \"\"\"\n if re_create:\n self.create()\n\n if ext.startswith(\".\"):\n ext = ext[1:]\n\n if out_path is None:\n out_path = self.storage_dir\n\n self.fig.savefig(\n f\"{join_path(out_path, self.filename)}{label}.{ext}\",\n bbox_inches='tight'\n )\n"
] | [
[
"numpy.min",
"matplotlib.colors.to_hex",
"matplotlib.pyplot.subplots",
"matplotlib.colors.Normalize",
"numpy.max",
"numpy.log10",
"matplotlib.cm.ScalarMappable",
"numpy.ndenumerate"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zhuoyuhe/CenterTrack | [
"43a2e641b7d3f4d19279eab24f2e4c6e8a6d7ae1"
] | [
"src/lib/dataset/datasets/nuscenes.py"
] | [
"# Copyright (c) Xingyi Zhou. All Rights Reserved\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport pycocotools.coco as coco\nfrom pycocotools.cocoeval import COCOeval\nfrom pyquaternion import Quaternion\nimport numpy as np\nimport torch\nimport json\nimport cv2\nimport os\nimport math\nimport copy\n\nfrom ..generic_dataset import GenericDataset\nfrom utils.ddd_utils import compute_box_3d, project_to_image\n\nclass nuScenes(GenericDataset):\n default_resolution = [448, 800]\n num_categories = 10\n class_name = [\n 'car', 'truck', 'bus', 'trailer', \n 'construction_vehicle', 'pedestrian', 'motorcycle', 'bicycle',\n 'traffic_cone', 'barrier']\n cat_ids = {i + 1: i + 1 for i in range(num_categories)}\n focal_length = 1200\n max_objs = 128\n _tracking_ignored_class = ['construction_vehicle', 'traffic_cone', 'barrier']\n _vehicles = ['car', 'truck', 'bus', 'trailer', 'construction_vehicle']\n _cycles = ['motorcycle', 'bicycle']\n _pedestrians = ['pedestrian']\n attribute_to_id = {\n '': 0, 'cycle.with_rider' : 1, 'cycle.without_rider' : 2,\n 'pedestrian.moving': 3, 'pedestrian.standing': 4, \n 'pedestrian.sitting_lying_down': 5,\n 'vehicle.moving': 6, 'vehicle.parked': 7, \n 'vehicle.stopped': 8}\n id_to_attribute = {v: k for k, v in attribute_to_id.items()}\n\n def __init__(self, opt, split):\n split_names = {'train': 'train', 'val': 'val'}\n split_name = split_names[split]\n data_dir = os.path.join(opt.data_dir, 'nuscenes')\n img_dir = os.path.join(\n data_dir, 'v1.0-mini' if split_name == 'mini' else '')\n print('Dataset version', opt.dataset_version)\n if opt.dataset_version == 'test':\n ann_path = os.path.join(data_dir, \n 'annotations', 'test.json')\n img_dir = os.path.join(data_dir, 'v1.0-test')\n else:\n ann_path = os.path.join(data_dir, \n 'annotations', '{}{}.json').format(opt.dataset_version, split_name)\n\n self.images = None\n super(nuScenes, self).__init__(opt, split, ann_path, img_dir)\n\n self.alpha_in_degree = False \n self.num_samples = len(self.images)\n\n print('Loaded {} {} samples'.format(split, self.num_samples))\n\n\n def __len__(self):\n return self.num_samples\n\n\n def _to_float(self, x):\n return float(\"{:.2f}\".format(x))\n\n\n def convert_coco_format(self, all_bboxes):\n detections = []\n for image_id in all_bboxes:\n if type(all_bboxes[image_id]) != type({}):\n # newest format\n for j in range(len(all_bboxes[image_id])):\n item = all_bboxes[image_id][j] \n category_id = citem['class']\n bbox = item['bbox']\n bbox[2] -= bbox[0]\n bbox[3] -= bbox[1]\n bbox_out = list(map(self._to_float, bbox[0:4]))\n detection = {\n \"image_id\": int(image_id),\n \"category_id\": int(category_id),\n \"bbox\": bbox_out,\n \"score\": float(\"{:.2f}\".format(item['score']))\n }\n detections.append(detection)\n return detections\n\n\n def convert_eval_format(self, results):\n from nuscenes.utils.data_classes import Box\n ret = {'meta': {'use_camera': True, 'use_lidar': False, 'use_radar': False, \n 'use_map': False, 'use_external': False}, 'results': {}}\n print('Converting nuscenes format...')\n for image_id in self.images:\n if not (image_id in results):\n continue\n image_info = self.coco.loadImgs(ids=[image_id])[0]\n sample_token = image_info['sample_token']\n trans_matrix = np.array(image_info['trans_matrix'], np.float32)\n sensor_id = image_info['sensor_id']\n sample_results = []\n for item in results[image_id]:\n class_name = self.class_name[int(item['class'] - 1)] \\\n if not ('detection_name' in item) else item['detection_name']\n if self.opt.tracking and class_name in self._tracking_ignored_class:\n continue\n score = float(item['score']) \\\n if not ('detection_score' in item) else item['detection_score']\n if 'size' in item:\n size = item['size']\n else:\n size = [float(item['dim'][1]), float(item['dim'][2]), \\\n float(item['dim'][0])]\n for i in range(3):\n size[i] = max(0.001, size[i])\n if 'translation' in item:\n translation = item['translation']\n else:\n translation = np.dot(trans_matrix, np.array(\n [item['loc'][0], item['loc'][1] - size[2], item['loc'][2], 1], \n np.float32))\n\n det_id = item['det_id'] if 'det_id' in item else -1\n tracking_id = item['tracking_id'] if 'tracking_id' in item else 1\n \n if not ('rotation' in item):\n rot_cam = Quaternion(\n axis=[0, 1, 0], angle=item['rot_y'])\n loc = np.array(\n [item['loc'][0], item['loc'][1], item['loc'][2]], np.float32)\n box = Box(loc, size, rot_cam, name='2', token='1')\n box.translate(np.array([0, - box.wlh[2] / 2, 0]))\n box.rotate(Quaternion(image_info['cs_record_rot']))\n box.translate(np.array(image_info['cs_record_trans']))\n box.rotate(Quaternion(image_info['pose_record_rot']))\n box.translate(np.array(image_info['pose_record_trans']))\n rotation = box.orientation\n rotation = [float(rotation.w), float(rotation.x), \\\n float(rotation.y), float(rotation.z)]\n else:\n rotation = item['rotation']\n \n nuscenes_att = np.array(item['nuscenes_att'], np.float32) \\\n if 'nuscenes_att' in item else np.zeros(8, np.float32)\n att = ''\n if class_name in self._cycles:\n att = self.id_to_attribute[np.argmax(nuscenes_att[0:2]) + 1]\n elif class_name in self._pedestrians:\n att = self.id_to_attribute[np.argmax(nuscenes_att[2:5]) + 3]\n elif class_name in self._vehicles:\n att = self.id_to_attribute[np.argmax(nuscenes_att[5:8]) + 6]\n if 'velocity' in item and len(item['velocity']) == 2:\n velocity = item['velocity']\n else:\n velocity = item['velocity'] if 'velocity' in item else [0, 0, 0]\n velocity = np.dot(trans_matrix, np.array(\n [velocity[0], velocity[1], velocity[2], 0], np.float32))\n velocity = [float(velocity[0]), float(velocity[1])]\n result = {\n 'sample_token': sample_token, \n 'translation': [float(translation[0]), float(translation[1]), \\\n float(translation[2])],\n 'size': size,\n 'rotation': rotation,\n 'velocity': velocity,\n 'detection_name': class_name,\n 'attribute_name': att \\\n if not ('attribute_name' in item) else item['attribute_name'],\n 'detection_score': score,\n 'tracking_name': class_name,\n 'tracking_score': score,\n 'tracking_id': tracking_id,\n 'sensor_id': sensor_id,\n 'det_id': det_id}\n\n sample_results.append(result)\n if sample_token in ret['results']:\n ret['results'][sample_token] = ret['results'][sample_token] + \\\n sample_results\n else:\n ret['results'][sample_token] = sample_results\n\n for sample_token in ret['results'].keys():\n confs = sorted([(-d['detection_score'], ind) \\\n for ind, d in enumerate(ret['results'][sample_token])])\n ret['results'][sample_token] = [ret['results'][sample_token][ind] \\\n for _, ind in confs[:min(500, len(confs))]]\n\n return ret\n\n\n def save_results(self, results, save_dir, task):\n json.dump(self.convert_eval_format(results), \n open('{}/results_nuscenes_{}.json'.format(save_dir, task), 'w'))\n\n\n def run_eval(self, results, save_dir):\n task = 'tracking' if self.opt.tracking else 'det'\n self.save_results(results, save_dir, task)\n if task == 'det':\n os.system('python ' + \\\n '../tools/nuscenes-devkit/python-sdk/nuscenes/eval/detection/evaluate.py ' +\\\n '{}/results_nuscenes_{}.json '.format(save_dir, task) + \\\n '--output_dir {}/nuscenes_eval_det_output/ '.format(save_dir) + \\\n '--dataroot ../data/nuscenes/v1.0-trainval/')\n else:\n os.system('python ' + \\\n 'tools/nuscenes-devkit/python-sdk/nuscenes/eval/tracking/evaluate.py ' +\\\n '{}/results_nuscenes_{}.json '.format(save_dir, task) + \\\n '--output_dir {}/nuscenes_evaltracl__output/ '.format(save_dir) + \\\n '--dataroot ../data/nuscenes/v1.0-trainval/')\n os.system('python ' + \\\n 'tools/nuscenes-devkit/python-sdk-alpha02/nuscenes/eval/tracking/evaluate.py ' +\\\n '{}/results_nuscenes_{}.json '.format(save_dir, task) + \\\n '--output_dir {}/nuscenes_evaltracl__output/ '.format(save_dir) + \\\n '--dataroot ../data/nuscenes/v1.0-trainval/')\n"
] | [
[
"numpy.array",
"numpy.zeros",
"numpy.argmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ishine/StyleSpeech-1 | [
"f939cf9cb981db7b738fa9c9c9a7fea2dfdd0766"
] | [
"train_meta.py"
] | [
"import torch\r\nimport torch.nn as nn\r\nimport torch.distributed as dist\r\nfrom torch.utils.tensorboard import SummaryWriter\r\nimport torch.multiprocessing as mp\r\nimport torch.distributed as dist\r\nimport torch.utils.data.distributed\r\n\r\nimport argparse\r\nimport os\r\nimport json\r\n\r\nfrom models.StyleSpeech import StyleSpeech\r\nfrom models.Discriminators import Discriminator\r\nfrom dataloader import prepare_dataloader\r\nfrom optimizer import ScheduledOptim\r\nfrom evaluate import evaluate\r\nimport utils\r\n\r\ndef load_checkpoint(checkpoint_path, model, discriminator, G_optim, D_optim, rank, distributed=False):\r\n assert os.path.isfile(checkpoint_path)\r\n print(\"Starting model from checkpoint '{}'\".format(checkpoint_path))\r\n checkpoint_dict = torch.load(checkpoint_path, map_location='cuda:{}'.format(rank))\r\n if 'model' in checkpoint_dict:\r\n if distributed:\r\n state_dict = {}\r\n for k,v in checkpoint_dict['model'].items():\r\n state_dict['module.{}'.format(k)] = v\r\n model.load_state_dict(state_dict)\r\n else:\r\n model.load_state_dict(checkpoint_dict['model'])\r\n print('Model is loaded!') \r\n if 'discriminator' in checkpoint_dict:\r\n if distributed:\r\n state_dict = {}\r\n for k,v in checkpoint_dict['discriminator'].items():\r\n state_dict['module.{}'.format(k)] = v\r\n discriminator.load_state_dict(state_dict)\r\n else:\r\n discriminator.load_state_dict(checkpoint_dict['discriminator'])\r\n print('Discriminator is loaded!')\r\n if 'G_optim' in checkpoint_dict or 'optimizer' in checkpoint_dict:\r\n if 'optimizer' in checkpoint_dict:\r\n G_optim.load_state_dict(checkpoint_dict['optimizer'])\r\n if 'G_optim' in checkpoint_dict:\r\n G_optim.load_state_dict(checkpoint_dict['G_optim'])\r\n print('G_optim is loaded!')\r\n if 'D_optim' in checkpoint_dict:\r\n D_optim.load_state_dict(checkpoint_dict['D_optim'])\r\n print('D_optim is loaded!')\r\n current_step = checkpoint_dict['step'] + 1\r\n del checkpoint_dict\r\n return model, discriminator, G_optim, D_optim, current_step\r\n\r\n\r\ndef main(rank, args, c):\r\n\r\n print('Use GPU: {} for training'.format(rank))\r\n\r\n ngpus = args.ngpus\r\n if args.distributed:\r\n torch.cuda.set_device(rank % ngpus)\r\n dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=rank)\r\n\r\n # Define model & loss\r\n model = StyleSpeech(c).cuda()\r\n discriminator = Discriminator(c).cuda()\r\n num_param = utils.get_param_num(model)\r\n D_num_param = utils.get_param_num(discriminator)\r\n\r\n if rank==0:\r\n print('Number of Meta-StyleSpeech Parameters:', num_param)\r\n print('Number of Discriminator Parameters:', D_num_param)\r\n with open(os.path.join(args.save_path, \"model.txt\"), \"w\") as f_log:\r\n f_log.write(str(model))\r\n f_log.write(str(discriminator))\r\n print(\"Model Has Been Defined\")\r\n \r\n model_without_ddp = model\r\n discriminator_without_ddp = discriminator\r\n if args.distributed:\r\n c.meta_batch_size = c.meta_batch_size // ngpus\r\n model = nn.parallel.DistributedDataParallel(model, device_ids=[rank])\r\n model_without_ddp = model.module\r\n discriminator = nn.parallel.DistributedDataParallel(discriminator, device_ids=[rank])\r\n discriminator_without_ddp = discriminator.module\r\n\r\n # Optimizer\r\n G_optim = torch.optim.Adam(model.parameters(), betas=c.betas, eps=c.eps)\r\n D_optim = torch.optim.Adam(discriminator.parameters(), lr=2e-4, betas=c.betas, eps=c.eps)\r\n # Loss\r\n Loss = model_without_ddp.get_criterion()\r\n adversarial_loss = discriminator_without_ddp.get_criterion()\r\n print(\"Optimizer and Loss Function Defined.\")\r\n\r\n # Get dataset\r\n data_loader = prepare_dataloader(args.data_path, \"train.txt\", batch_size=c.meta_batch_size, meta_learning=True, seed=rank) \r\n print(\"Data Loader is Prepared\")\r\n\r\n # Load checkpoint if exists\r\n if args.checkpoint_path is not None:\r\n assert os.path.exists(args.checkpoint_path)\r\n model, discriminator, G_optim, D_optim, current_step = load_checkpoint(\r\n args.checkpoint_path, model, discriminator, G_optim, D_optim, rank, args.distributed)\r\n print(\"\\n---Model Restored at Step {}---\\n\".format(current_step))\r\n else:\r\n print(\"\\n---Start New Training---\\n\")\r\n current_step = 0\r\n if rank == 0:\r\n checkpoint_path = os.path.join(args.save_path, 'ckpt')\r\n os.makedirs(checkpoint_path, exist_ok=True)\r\n \r\n # scheduled optimizer\r\n G_optim = ScheduledOptim(G_optim, c.decoder_hidden, c.n_warm_up_step, current_step)\r\n \r\n # Init logger\r\n if rank == 0:\r\n log_path = os.path.join(args.save_path, 'log')\r\n logger = SummaryWriter(os.path.join(log_path, 'board'))\r\n with open(os.path.join(log_path, \"log.txt\"), \"a\") as f_log:\r\n f_log.write(\"Dataset :{}\\n Number of Parameters: {}\\n\".format(c.dataset, num_param))\r\n\r\n # Init synthesis directory\r\n if rank == 0:\r\n synth_path = os.path.join(args.save_path, 'synth')\r\n os.makedirs(synth_path, exist_ok=True)\r\n\r\n model.train()\r\n while current_step < args.max_iter:\r\n # Get Training Loader\r\n for idx, batch in enumerate(data_loader):\r\n\r\n if current_step == args.max_iter:\r\n break\r\n\r\n losses = {}\r\n #### Generator ####\r\n G_optim.zero_grad()\r\n # Get Support Data\r\n sid, text, mel_target, D, log_D, f0, energy, \\\r\n src_len, mel_len, max_src_len, max_mel_len = model_without_ddp.parse_batch(batch)\r\n \r\n # Support Forward\r\n mel_output, src_output, style_vector, log_duration_output, f0_output, energy_output, src_mask, mel_mask, _ = model(\r\n text, src_len, mel_target, mel_len, D, f0, energy, max_src_len, max_mel_len)\r\n src_target, _, _ = model_without_ddp.variance_adaptor.length_regulator(src_output, D)\r\n\r\n # Reconstruction loss\r\n mel_loss, d_loss, f_loss, e_loss = Loss(mel_output, mel_target, \r\n log_duration_output, log_D, f0_output, f0, energy_output, energy, src_len, mel_len)\r\n losses['G_recon'] = mel_loss\r\n losses['d_loss'] = d_loss\r\n losses['f_loss'] = f_loss\r\n losses['e_loss'] = e_loss\r\n \r\n\r\n #### META LEARNING ####\r\n # Get query text \r\n B = mel_target.shape[0]\r\n perm_idx = torch.randperm(B)\r\n q_text, q_src_len = text[perm_idx], src_len[perm_idx]\r\n # Generate query speech\r\n q_mel_output, q_src_output, q_log_duration_output, \\\r\n _, _, q_src_mask, q_mel_mask, q_mel_len = model_without_ddp.inference(style_vector, q_text, q_src_len)\r\n # Legulate length of query src\r\n q_duration_rounded = torch.clamp(torch.round(torch.exp(q_log_duration_output.detach())-1.), min=0)\r\n q_duration = q_duration_rounded.masked_fill(q_src_mask, 0).long()\r\n q_src, _, _ = model_without_ddp.variance_adaptor.length_regulator(q_src_output, q_duration)\r\n # Adverserial loss \r\n t_val, s_val, _= discriminator(q_mel_output, q_src, None, sid, q_mel_mask)\r\n losses['G_GAN_query_t'] = adversarial_loss(t_val, is_real=True)\r\n losses['G_GAN_query_s'] = adversarial_loss(s_val, is_real=True)\r\n\r\n # Total generator loss\r\n alpha = 10.0\r\n G_loss = alpha*losses['G_recon'] + losses['d_loss'] + losses['f_loss'] + losses['e_loss'] + \\\r\n losses['G_GAN_query_t'] + losses['G_GAN_query_s']\r\n # Backward loss\r\n G_loss.backward()\r\n # Update weights\r\n G_optim.step_and_update_lr()\r\n\r\n\r\n #### Discriminator ####\r\n D_optim.zero_grad()\r\n # Real\r\n real_t_pred, real_s_pred, cls_loss = discriminator(\r\n mel_target, src_target.detach(), style_vector.detach(), sid, mask=mel_mask)\r\n # Fake\r\n fake_t_pred, fake_s_pred, _ = discriminator(\r\n q_mel_output.detach(), q_src.detach(), None, sid, mask=q_mel_mask)\r\n losses['D_t_loss'] = adversarial_loss(real_t_pred, is_real=True) + adversarial_loss(fake_t_pred, is_real=False)\r\n losses['D_s_loss'] = adversarial_loss(real_s_pred, is_real=True) + adversarial_loss(fake_s_pred, is_real=False)\r\n losses['cls_loss'] = cls_loss\r\n # Total discriminator Loss\r\n D_loss = losses['D_t_loss'] + losses['D_s_loss'] + losses['cls_loss']\r\n # Backward\r\n D_loss.backward()\r\n # Update weights\r\n D_optim.step()\r\n \r\n # Print log\r\n if current_step % args.log_step == 0 and current_step != 0 and rank == 0 :\r\n m_l = losses['G_recon'].item()\r\n d_l = losses['d_loss'].item()\r\n f_l = losses['f_loss'].item() \r\n e_l = losses['e_loss'].item() \r\n g_t_l = losses['G_GAN_query_t'].item()\r\n g_s_l = losses['G_GAN_query_s'].item()\r\n d_t_l = losses['D_t_loss'].item() / 2\r\n d_s_l = losses['D_s_loss'].item() / 2\r\n cls_l = losses['cls_loss'].item()\r\n\r\n str1 = \"Step [{}/{}]:\".format(current_step, args.max_iter)\r\n str2 = \"Mel Loss: {:.4f},\\n\" \\\r\n \"Duration Loss: {:.4f}, F0 Loss: {:.4f}, Energy Loss: {:.4f}\\n\" \\\r\n \"T G Loss: {:.4f}, T D Loss: {:.4f}, S G Loss: {:.4f}, S D Loss: {:.4f} \\n\" \\\r\n \"cls_Loss: {:.4f};\" \\\r\n .format(m_l, d_l, f_l, e_l, g_t_l, d_t_l, g_s_l, d_s_l, cls_l)\r\n print(str1 + \"\\n\" + str2 +\"\\n\")\r\n with open(os.path.join(log_path, \"log.txt\"), \"a\") as f_log:\r\n f_log.write(str1 + \"\\n\" + str2 +\"\\n\")\r\n \r\n logger.add_scalar('Train/mel_loss', m_l, current_step)\r\n logger.add_scalar('Train/duration_loss', d_l, current_step)\r\n logger.add_scalar('Train/f0_loss', f_l, current_step)\r\n logger.add_scalar('Train/energy_loss', e_l, current_step)\r\n logger.add_scalar('Train/G_t_loss', g_t_l, current_step)\r\n logger.add_scalar('Train/D_t_loss', d_t_l, current_step)\r\n logger.add_scalar('Train/G_s_loss', g_s_l, current_step)\r\n logger.add_scalar('Train/D_s_loss', d_s_l, current_step)\r\n logger.add_scalar('Train/cls_loss', cls_l, current_step)\r\n \r\n # Save Checkpoint\r\n if current_step % args.save_step == 0 and current_step != 0 and rank == 0:\r\n torch.save({'model': model_without_ddp.state_dict(), \r\n 'discriminator': discriminator_without_ddp.state_dict(), \r\n 'G_optim': G_optim.state_dict(),'D_optim': D_optim.state_dict(),\r\n 'step': current_step}, \r\n os.path.join(checkpoint_path, 'checkpoint_{}.pth.tar'.format(current_step)))\r\n print(\"*** Save Checkpoint ***\")\r\n print(\"Save model at step {}...\\n\".format(current_step))\r\n\r\n if current_step % args.synth_step == 0 and current_step != 0 and rank == 0:\r\n length = mel_len[0].item() \r\n mel_target = mel_target[0, :length].detach().cpu().transpose(0, 1)\r\n mel = mel_output[0, :length].detach().cpu().transpose(0, 1)\r\n q_length = q_mel_len[0].item()\r\n q_mel = q_mel_output[0, :q_length].detach().cpu().transpose(0, 1)\r\n # plotting\r\n utils.plot_data([q_mel.numpy(), mel.numpy(), mel_target.numpy()], \r\n ['Query Spectrogram', 'Recon Spectrogram', 'Ground-Truth Spectrogram'], filename=os.path.join(synth_path, 'step_{}.png'.format(current_step)))\r\n print(\"Synth audios at step {}...\\n\".format(current_step))\r\n \r\n # Evaluate\r\n if current_step % args.eval_step == 0 and current_step != 0 and rank == 0:\r\n model.eval()\r\n with torch.no_grad():\r\n m_l, d_l, f_l, e_l = evaluate(args, model_without_ddp, current_step)\r\n str_v = \"*** Validation ***\\n\" \\\r\n \"Meta-StyleSpeech Step {},\\n\" \\\r\n \"Mel Loss: {}\\nDuration Loss:{}\\nF0 Loss: {}\\nEnergy Loss: {}\" \\\r\n .format(current_step, m_l, d_l, f_l, e_l)\r\n print(str_v + \"\\n\" )\r\n with open(os.path.join(log_path, \"eval.txt\"), \"a\") as f_log:\r\n f_log.write(str_v + \"\\n\")\r\n logger.add_scalar('Validation/mel_loss', m_l, current_step)\r\n logger.add_scalar('Validation/duration_loss', d_l, current_step)\r\n logger.add_scalar('Validation/f0_loss', f_l, current_step)\r\n logger.add_scalar('Validation/energy_loss', e_l, current_step)\r\n model.train()\r\n\r\n current_step += 1 \r\n \r\n if rank == 0:\r\n print(\"Training Done at Step : {}\".format(current_step))\r\n torch.save({'model': model_without_ddp.state_dict(), \r\n 'discriminator': discriminator_without_ddp.state_dict(), \r\n 'G_optim': G_optim.state_dict(), 'D_optim': D_optim.state_dict(),\r\n 'step': current_step}, \r\n os.path.join(checkpoint_path, 'checkpoint_last_{}.pth.tar'.format(current_step)))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--data_path', default='dataset/LibriTTS/preprocessed')\r\n parser.add_argument('--save_path', default='exp_meta_stylespeech')\r\n parser.add_argument('--config', default='configs/config.json')\r\n parser.add_argument('--max_iter', default=100000, type=int)\r\n parser.add_argument('--save_step', default=5000, type=int)\r\n parser.add_argument('--synth_step', default=1000, type=int)\r\n parser.add_argument('--eval_step', default=5000, type=int)\r\n parser.add_argument('--log_step', default=100, type=int)\r\n parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to pretrained model') \r\n parser.add_argument('--dist-url', default='tcp://127.0.0.1:3456', type=str, help='url for setting up distributed training')\r\n parser.add_argument('--world-size', default=-1, type=int, help='number of nodes for distributed training')\r\n parser.add_argument('--rank', default=-1, type=int, help='distributed backend')\r\n parser.add_argument('--dist-backend', default='nccl', type=str, help='node rank for distributed training')\r\n\r\n args = parser.parse_args()\r\n\r\n torch.backends.cudnn.enabled = True\r\n\r\n with open(args.config) as f:\r\n data = f.read()\r\n json_config = json.loads(data)\r\n config = utils.AttrDict(json_config)\r\n utils.build_env(args.config, 'config.json', args.save_path)\r\n\r\n ngpus = torch.cuda.device_count()\r\n args.ngpus = ngpus\r\n args.distributed = ngpus > 1\r\n\r\n if args.distributed:\r\n args.world_size = ngpus\r\n mp.spawn(main, nprocs=ngpus, args=(args, config))\r\n else:\r\n main(0, args, config)\r\n\r\n\r\n"
] | [
[
"torch.distributed.init_process_group",
"torch.multiprocessing.spawn",
"torch.cuda.set_device",
"torch.randperm",
"torch.no_grad",
"torch.cuda.device_count",
"torch.nn.parallel.DistributedDataParallel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
git-disl/HQ-Ensemble | [
"c588f2979476e50efd429c9d9d5bb2f854b9ad41"
] | [
"baselineDiversityPruning.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\nimport os\nimport time\nimport timeit\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim\nimport torch.utils.data\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nfrom collections import Counter\n\nfrom itertools import combinations\n\n\nfrom pytorchUtility import *\nimport numpy as np\nfrom operator import itemgetter\nfrom EnsembleBench.groupMetrics import *\nfrom EnsembleBench.teamSelection import *\n\n\n# Dataset Configuration\npredictionDir = './cifar10/prediction'\ntrainPredictionDir = './cifar10/train'\nmodels = ['densenet-L190-k40', 'densenetbc-100-12', 'resnext8x64d', 'wrn-28-10-drop', 'vgg19_bn', \n 'resnet20', 'resnet32', 'resnet44', 'resnet56', 'resnet110']\nsuffix = '.pt'\n\n# baseline\nmaxModel = 0\nthresholdAcc = 96.33\n\n\nlabelVectorsList = list()\npredictionVectorsList = list()\ntmpAccList = list()\nfor m in models:\n predictionPath = os.path.join(predictionDir, m+suffix)\n prediction = torch.load(predictionPath)\n predictionVectors = prediction['predictionVectors']\n predictionVectorsList.append(nn.functional.softmax(predictionVectors, dim=-1).cpu())\n labelVectors = prediction['labelVectors']\n labelVectorsList.append(labelVectors.cpu())\n tmpAccList.append(calAccuracy(predictionVectors, labelVectors))\n print(tmpAccList[-1])\n\n\nminAcc = np.min(tmpAccList)\navgAcc = np.mean(tmpAccList)\nmaxAcc = np.max(tmpAccList)\n\n\ntrainLabelVectorsList = list()\ntrainPredictionVectorsList = list()\nfor m in models:\n trainPredictionPath = os.path.join(trainPredictionDir, m+suffix)\n trainPrediction = torch.load(trainPredictionPath)\n trainPredictionVectors = trainPrediction['predictionVectors']\n trainPredictionVectorsList.append(nn.functional.softmax(trainPredictionVectors, dim=-1).cpu())\n trainLabelVectors = trainPrediction['labelVectors']\n trainLabelVectorsList.append(labelVectors.cpu())\n\n\nsampleID, sampleTarget, predictions, predVectors = calDisagreementSamplesNoGroundTruth(trainPredictionVectorsList, trainLabelVectorsList[0])\n\n\nsampleID = np.array(sampleID)\nsampleTarget = np.array(sampleTarget)\npredictions = np.array(predictions)\npredVectors = np.array([np.array([np.array(pp) for pp in p]) for p in predVectors])\n\nnModels = len(predictions[0])\nmodelIdx = list(range(nModels))\n\n\n# statistics for different metrics\nnp.random.seed(0)\ncrossValidationTimes = 3\nnRandomSamples = 100\n\naccuracyList = list()\n#negAccuracyList = list()\nkappaList = list()\nbinaryDisagreementList = list()\nkwVarianceList = list()\nGDList = list()\n\nteamSizeList = list()\nteamList = list()\n\nstartTime = timeit.default_timer()\nfor n in range(2, nModels+1):\n kappa_scores = []\n comb = combinations(modelIdx, n)\n best_comb = None\n best_kappa_score = 1.0\n best_accuracy = 0.0\n best_nSamples = len(predictions)\n accuracies = []\n for selectedModels in list(comb):\n teamSampleID, teamSampleTarget, teamPredictions, teamPredVectors = filterModelsFixed(sampleID, sampleTarget, predictions, predVectors, selectedModels) \n \n if len(teamPredictions) == 0:\n continue\n \n cur_kappa_scores = list()\n cur_binary_disagreements = list()\n cur_kw_variances = list()\n cur_GDs = list()\n \n for _ in range(crossValidationTimes):\n randomIdx = np.random.choice(np.arange(teamPredictions.shape[0]), nRandomSamples)\n cur_kappa_scores.append(group_kappa_score(teamPredictions[randomIdx]))\n cur_binary_disagreements.append(group_binary_disagreement(teamPredictions[randomIdx], teamSampleTarget[randomIdx]))\n cur_kw_variances.append(group_KW_variance(teamPredictions[randomIdx], teamSampleTarget[randomIdx]))\n cur_GDs.append(group_generalized_diversity(teamPredictions[randomIdx], teamSampleTarget[randomIdx]))\n \n kappaList.append(np.mean(cur_kappa_scores))\n binaryDisagreementList.append(np.mean(cur_binary_disagreements))\n kwVarianceList.append(np.mean(cur_kw_variances))\n GDList.append(np.mean(cur_GDs))\n \n tmpAccuracy = calAveragePredictionVectorAccuracy(predictionVectorsList, labelVectorsList[0], modelsList=selectedModels)[0].cpu().numpy()\n accuracyList.append(tmpAccuracy)\n teamSizeList.append(n)\n teamList.append(selectedModels)\nendTime = timeit.default_timer()\nprint(\"Time: \", endTime-startTime)\n\n\naccuracyList = np.array(accuracyList)\nkappaList = np.array(kappaList)\nbinaryDisagreementList = np.array(binaryDisagreementList)\nkwVarianceList = np.array(kwVarianceList)\nGDList = np.array(GDList)\n\nteamSizeList = np.array(teamSizeList)\nteamList = np.array(teamList)\n\nQData = {\"Acc\": accuracyList, \n \"CK\": kappaList,\n \"BD\": binaryDisagreementList,\n \"KW\": kwVarianceList,\n \"GD\": GDList,\n \"teamSizeList\": teamSizeList,\n \"teamList\": teamList}\ndiversityMetricsList = ['CK', 'BD', 'KW', 'GD']\n\n\nteamAccuracyDict = {}\nfor acc, t in zip(accuracyList, teamList):\n teamAccuracyDict[\"\".join(map(str, t))] = acc\n\n\nQMetrics = {}\nQMetricsThreshold = {}\nteamSelectedQAllDict = {}\n\n\nfor j, dm in enumerate(diversityMetricsList):\n if dm in [\"CK\", \"QS\", \"FK\"]:\n QMetricsThreshold[dm] = np.mean(QData[dm])\n elif dm in [\"BD\", \"KW\", \"GD\"]:\n QMetricsThreshold[dm] = np.mean(1.0-QData[dm])\n\nprint(QMetricsThreshold)\n\nfor i, t in enumerate(QData[\"teamList\"]):\n teamName = \"\".join(map(str, t))\n for j, dm in enumerate(diversityMetricsList):\n QMetricsDM = QMetrics.get(dm, {})\n if dm in [\"CK\", \"QS\", \"FK\"]:\n QMetricsDM[teamName] = QData[dm][i]\n elif dm in [\"BD\", \"KW\", \"GD\"]:\n QMetricsDM[teamName] = 1.0 - QData[dm][i]\n QMetrics[dm] = QMetricsDM\n if QMetricsDM[teamName] < round(QMetricsThreshold[dm], 3):\n teamSelectedQAllSet = teamSelectedQAllDict.get(dm, set())\n teamSelectedQAllSet.add(teamName)\n teamSelectedQAllDict[dm] = teamSelectedQAllSet\n\nfor dm in diversityMetricsList:\n print(dm, getNTeamStatisticsTeamName(list(teamSelectedQAllDict[dm]), \n teamAccuracyDict, minAcc, avgAcc, maxAcc, \n targetModel=maxModel, thresholdAcc=thresholdAcc))\n"
] | [
[
"torch.nn.functional.softmax",
"numpy.random.seed",
"numpy.min",
"torch.load",
"numpy.arange",
"numpy.max",
"numpy.mean",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
medtray/IEEEBigData2020-DSRMM-Table-Retrieval | [
"ff274f0c029c856b584d29ee1590c971cb12b051"
] | [
"dsrmm_model.py"
] | [
"import torch\nimport torch.nn as nn\n\n\nimport numpy as np\nimport torch.nn.functional as f\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\n\n\nclass DSRMM(nn.Module):\n\n def __init__(self, args):\n \"\"\"\"Constructor of the class.\"\"\"\n super(DSRMM, self).__init__()\n\n self.wv=args.wv\n self.index_to_word=args.index_to_word\n\n self.input_dim=args.emsize\n self.device=args.device\n\n self.STR=args.STR\n\n self.nbins = args.nbins\n #self.bins = [-1.0, -0.5, 0, 0.5, 1.0, 1.0]\n self.bins = [-0.75, -0.25, 0.25, 0.75, 1.0, 1.0]\n\n self.gating_network = GatingNetwork(args.emsize)\n\n\n self.conv1 = nn.Conv2d(self.input_dim, args.k1, (3, 3), padding=1)\n self.conv2 = nn.Conv2d(self.input_dim, args.k2, (3, 5), padding=(1, 2))\n self.conv3 = nn.Conv2d(self.input_dim, args.k3, (3, 7), padding=(1, 3))\n self.relu = nn.ReLU()\n self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=1)\n self.conv_all = nn.Conv2d(args.k1+args.k2+args.k3, args.k4, (3, 3), padding=1)\n self.conv_dim = nn.Conv2d(args.k4, args.sem_feature, (1, 1))\n\n self.conv_uni = nn.Sequential(\n nn.Conv2d(1, args.emsize, (1, self.input_dim)),\n nn.ReLU()\n )\n\n tensor_mu = torch.FloatTensor(args.mu).to(self.device)\n tensor_sigma = torch.FloatTensor(args.sigma).to(self.device)\n\n self.mu = Variable(tensor_mu, requires_grad=False).view(1, 1, 1, self.nbins)\n self.sigma = Variable(tensor_sigma, requires_grad=False).view(1, 1, 1, self.nbins)\n\n if args.STR:\n self.output3 = nn.Linear(args.sem_feature+args.nbins*args.max_query_len+39, 1,True)\n else:\n self.output3 = nn.Linear(args.sem_feature+args.nbins*args.max_query_len, 1,True)\n\n def get_intersect_matrix(self, q_embed, d_embed):\n sim = torch.bmm(q_embed, d_embed).view(q_embed.size()[0], q_embed.size()[1], d_embed.size()[2], 1)\n pooling_value = torch.exp((- ((sim - self.mu) ** 2) / (self.sigma ** 2) / 2))\n pooling_sum = torch.sum(pooling_value, 2)\n log_pooling_sum = torch.log(torch.clamp(pooling_sum, min=1e-10)) * 0.01\n #log_pooling_sum = torch.sum(log_pooling_sum, 1)\n return log_pooling_sum\n\n def get_intersect_matrix_with_cos(self, q_embed, d_embed):\n\n\n sim = f.cosine_similarity(q_embed, d_embed, 3).view(q_embed.size()[0], q_embed.size()[1], d_embed.size()[2], 1)\n pooling_value = torch.exp((- ((sim - self.mu) ** 2) / (self.sigma ** 2) / 2))\n pooling_sum = torch.sum(pooling_value, 2)\n log_pooling_sum = torch.log(torch.clamp(pooling_sum, min=1e-10)) * 0.01\n #log_pooling_sum = torch.sum(log_pooling_sum, 1)\n return log_pooling_sum\n\n\n def to_embedding(self,input):\n shape_input = list(input.shape)\n\n em = input.view(-1)\n list_of_embeddings = []\n for key in em:\n list_of_embeddings += self.wv[self.index_to_word[key]].tolist()\n list_of_embeddings = torch.Tensor(list_of_embeddings)\n embeds = list_of_embeddings.view(shape_input[0], shape_input[1],\n self.input_dim).to(self.device)\n\n return embeds\n\n\n def forward(self, batch_queries, batch_docs,batch_values_struct,batch_semantic):\n\n\n\n num_docs, dlen = batch_docs.shape[0], batch_docs.shape[1]\n\n\n emb_query = self.to_embedding(batch_queries)\n emb_desc = self.to_embedding(batch_docs)\n\n all_tables = []\n\n for sample in batch_values_struct:\n l_table = []\n for instance in sample:\n instance = torch.Tensor(instance).unsqueeze(0)\n instance = instance.type(torch.int64)\n emb_instance = self.to_embedding(instance)\n emb_instance = torch.mean(emb_instance, dim=1)\n l_table.append(emb_instance.tolist())\n all_tables.append(l_table)\n\n all_tables = torch.Tensor(all_tables).squeeze(2).to(self.device)\n emb_desc=torch.cat([emb_desc,all_tables],dim=1)\n\n desc_att_shape = emb_desc.shape\n query_shape = emb_query.shape\n\n embedded_docs = torch.stack([emb_desc] * query_shape[1], dim=1).to(self.device)\n embedded_queries = torch.stack([emb_query] * desc_att_shape[1], dim=2).to(self.device)\n\n qwu_embed = torch.transpose(\n torch.squeeze(self.conv_uni(emb_query.view(emb_query.size()[0], 1, -1, self.input_dim))), 1,\n 2) + 0.000000001\n\n dwu_embed = torch.squeeze(\n self.conv_uni(emb_desc.view(emb_desc.size()[0], 1, -1, self.input_dim))) + 0.000000001\n\n\n qwu_embed_norm = F.normalize(qwu_embed, p=2, dim=2, eps=1e-10)\n dwu_embed_norm = F.normalize(dwu_embed, p=2, dim=1, eps=1e-10)\n\n #log_pooling_sum_wwuu = self.get_intersect_matrix(qwu_embed_norm, dwu_embed_norm)\n\n dwu_embed_norm = torch.stack([dwu_embed_norm] * query_shape[1], dim=1).to(self.device)\n dwu_embed_norm = dwu_embed_norm.permute(0, 1, 3, 2)\n qwu_embed_norm = torch.stack([qwu_embed_norm] * desc_att_shape[1], dim=2).to(self.device)\n\n log_pooling_sum_wwuu = self.get_intersect_matrix_with_cos(qwu_embed_norm, dwu_embed_norm)\n\n term_weights = self.gating_network(emb_query)\n term_weights = torch.stack([term_weights] * self.nbins, dim=2).to(self.device)\n hist_feat=term_weights*log_pooling_sum_wwuu\n hist_feat=hist_feat.view([num_docs,-1])\n\n new_input = embedded_docs * embedded_queries\n\n new_input = new_input.permute(0, 3, 1, 2)\n convoluted_feat1 = self.conv1(new_input)\n convoluted_feat2 = self.conv2(new_input)\n convoluted_feat3 = self.conv3(new_input)\n convoluted_feat = self.relu(torch.cat((convoluted_feat1, convoluted_feat2, convoluted_feat3), 1))\n\n pooled_feat = self.pool(convoluted_feat)\n conv_all_feat = self.conv_all(pooled_feat)\n conv_all_feat = self.relu(conv_all_feat)\n\n conv_all_feat = self.conv_dim(conv_all_feat)\n\n conv_all_feat = conv_all_feat.permute(0, 2, 3, 1)\n\n max_pooled_feat = torch.max(conv_all_feat, 2)[0]\n max_pooled_feat = torch.max(max_pooled_feat, 1)[0]\n\n semantic_input = batch_semantic.type(torch.float32)\n if self.STR:\n final_feat = torch.cat((max_pooled_feat,hist_feat, semantic_input), dim=1)\n\n else:\n final_feat = torch.cat((max_pooled_feat, hist_feat), dim=1)\n\n final_score = self.output3(final_feat).squeeze(-1)\n\n\n return final_score\n\n\nclass GatingNetwork(nn.Module):\n \"\"\"Term gating network\"\"\"\n\n def __init__(self, emsize):\n \"\"\"\"Constructor of the class\"\"\"\n super(GatingNetwork, self).__init__()\n self.weight = nn.Linear(emsize, 1)\n\n def forward(self, term_embeddings):\n \"\"\"\"Defines the forward computation of the gating network layer.\"\"\"\n dot_out = self.weight(term_embeddings).squeeze(2)\n return f.softmax(dot_out, 1)\n"
] | [
[
"torch.nn.functional.normalize",
"torch.mean",
"torch.nn.functional.softmax",
"torch.clamp",
"torch.max",
"torch.Tensor",
"torch.cat",
"torch.nn.Conv2d",
"torch.sum",
"torch.exp",
"torch.nn.MaxPool2d",
"torch.nn.Linear",
"torch.FloatTensor",
"torch.bmm",
"torch.nn.functional.cosine_similarity",
"torch.stack",
"torch.nn.ReLU",
"torch.autograd.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
entn-at/TTS-Cube | [
"ac244ac1aa7cf2ce485e40ea54100329bc5961a7"
] | [
"cube/models/clarinet/modules.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom models.clarinet.loss import gaussian_loss, KL_gaussians\nimport numpy as np\nimport math\n\n\nclass Conv(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, dilation=1, causal=False, mode='SAME'):\n super(Conv, self).__init__()\n\n self.causal = causal\n self.mode = mode\n if self.causal and self.mode == 'SAME':\n self.padding = dilation * (kernel_size - 1)\n elif self.mode == 'SAME':\n self.padding = dilation * (kernel_size - 1) // 2\n else:\n self.padding = 0\n self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, dilation=dilation, padding=self.padding)\n self.conv = nn.utils.weight_norm(self.conv)\n nn.init.kaiming_normal_(self.conv.weight)\n\n def forward(self, tensor):\n out = self.conv(tensor)\n if self.causal and self.padding is not 0:\n out = out[:, :, :-self.padding]\n return out\n\n\nclass ResBlock(nn.Module):\n def __init__(self, in_channels, out_channels, skip_channels, kernel_size, dilation,\n cin_channels=None, local_conditioning=True, causal=False, mode='SAME'):\n super(ResBlock, self).__init__()\n self.causal = causal\n self.local_conditioning = local_conditioning\n self.cin_channels = cin_channels\n self.mode = mode\n\n self.filter_conv = Conv(in_channels, out_channels, kernel_size, dilation, causal, mode)\n self.gate_conv = Conv(in_channels, out_channels, kernel_size, dilation, causal, mode)\n self.res_conv = nn.Conv1d(out_channels, in_channels, kernel_size=1)\n self.skip_conv = nn.Conv1d(out_channels, skip_channels, kernel_size=1)\n self.res_conv = nn.utils.weight_norm(self.res_conv)\n self.skip_conv = nn.utils.weight_norm(self.skip_conv)\n nn.init.kaiming_normal_(self.res_conv.weight)\n nn.init.kaiming_normal_(self.skip_conv.weight)\n\n if self.local_conditioning:\n self.filter_conv_c = nn.Conv1d(cin_channels, out_channels, kernel_size=1)\n self.gate_conv_c = nn.Conv1d(cin_channels, out_channels, kernel_size=1)\n self.filter_conv_c = nn.utils.weight_norm(self.filter_conv_c)\n self.gate_conv_c = nn.utils.weight_norm(self.gate_conv_c)\n nn.init.kaiming_normal_(self.filter_conv_c.weight)\n nn.init.kaiming_normal_(self.gate_conv_c.weight)\n\n def forward(self, tensor, c=None):\n h_filter = self.filter_conv(tensor)\n h_gate = self.gate_conv(tensor)\n\n if self.local_conditioning:\n h_filter += self.filter_conv_c(c)\n h_gate += self.gate_conv_c(c)\n\n out = torch.tanh(h_filter) * torch.sigmoid(h_gate)\n\n res = self.res_conv(out)\n skip = self.skip_conv(out)\n if self.mode == 'SAME':\n return (tensor + res) * math.sqrt(0.5), skip\n else:\n return (tensor[:, :, 1:] + res) * math.sqrt(0.5), skip\n\n\nclass GaussianLoss(nn.Module):\n def __init__(self):\n super(GaussianLoss, self).__init__()\n\n def forward(self, input, target, size_average=True):\n losses = gaussian_loss(input, target)\n if size_average:\n return losses.mean()\n else:\n return losses.mean(1).sum(0)\n\n\nclass KL_Loss(nn.Module):\n def __init__(self):\n super(KL_Loss, self).__init__()\n\n def forward(self, mu_q, logs_q, mu_p, logs_p, regularization=True, size_average=True):\n KL_loss, reg_loss = KL_gaussians(mu_q, logs_q, mu_p, logs_p, regularization=regularization)\n loss_tot = KL_loss + reg_loss * 4.\n\n if size_average:\n return loss_tot.mean(), KL_loss.mean(), reg_loss.mean()\n else:\n return loss_tot.sum(), KL_loss.sum(), reg_loss.sum()\n\n\nclass ExponentialMovingAverage(object):\n def __init__(self, decay):\n self.decay = decay\n self.shadow = {}\n\n def register(self, name, val):\n self.shadow[name] = val.clone()\n\n def update(self, name, x):\n assert name in self.shadow\n new_average = self.decay * x + (1.0 - self.decay) * self.shadow[name]\n self.shadow[name] = new_average.clone()\n\ndef stft(y, scale='linear'):\n D = torch.stft(y, n_fft=1024, hop_length=256, win_length=1024, window=torch.hann_window(1024).cuda())\n D = torch.sqrt(D.pow(2).sum(-1) + 1e-10)\n # D = torch.sqrt(torch.clamp(D.pow(2).sum(-1), min=1e-10))\n if scale == 'linear':\n return D\n elif scale == 'log':\n S = 2 * torch.log(torch.clamp(D, 1e-10, float(\"inf\")))\n return S\n else:\n pass\n\n# STFT code is adapted from: https://github.com/pseeth/pytorch-stft\nclass STFT(torch.nn.Module):\n def __init__(self, filter_length=1024, hop_length=256):\n super(STFT, self).__init__()\n\n self.filter_length = filter_length\n self.hop_length = hop_length\n self.forward_transform = None\n scale = self.filter_length / self.hop_length\n fourier_basis = np.fft.fft(np.eye(self.filter_length))\n\n cutoff = int((self.filter_length / 2 + 1))\n fourier_basis = np.vstack([np.real(fourier_basis[:cutoff, :]),\n np.imag(fourier_basis[:cutoff, :])])\n forward_basis = torch.tensor(fourier_basis[:, None, :])\n inverse_basis = torch.tensor(np.linalg.pinv(scale * fourier_basis).T[:, None, :])\n\n self.register_buffer('forward_basis', forward_basis.float())\n self.register_buffer('inverse_basis', inverse_basis.float())\n\n def forward(self, input_data):\n num_batches, _, num_samples = input_data.size()\n\n self.num_samples = num_samples\n\n forward_transform = F.conv1d(input_data,\n self.forward_basis,\n stride=self.hop_length,\n padding=self.filter_length)\n cutoff = int((self.filter_length / 2) + 1)\n real_part = forward_transform[:, :cutoff, :]\n imag_part = forward_transform[:, cutoff:, :]\n\n magnitude = torch.sqrt(real_part**2 + imag_part**2)\n phase = torch.autograd.Variable(torch.atan2(imag_part.data, real_part.data))\n return magnitude, phase\n\n def inverse(self, magnitude, phase):\n recombine_magnitude_phase = torch.cat([magnitude*torch.cos(phase),\n magnitude*torch.sin(phase)], dim=1)\n\n inverse_transform = F.conv_transpose1d(recombine_magnitude_phase,\n self.inverse_basis,\n stride=self.hop_length,\n padding=0)\n inverse_transform = inverse_transform[:, :, self.filter_length:]\n inverse_transform = inverse_transform[:, :, :self.num_samples]\n return inverse_transform\n"
] | [
[
"torch.cos",
"torch.sigmoid",
"numpy.imag",
"torch.sin",
"torch.sqrt",
"torch.nn.utils.weight_norm",
"numpy.eye",
"torch.nn.functional.conv1d",
"torch.nn.functional.conv_transpose1d",
"torch.tensor",
"torch.tanh",
"numpy.linalg.pinv",
"numpy.real",
"torch.nn.Conv1d",
"torch.hann_window",
"torch.atan2",
"torch.nn.init.kaiming_normal_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
biergaiqiao/Oriole-Thwarting-Privacy-against-Trustworthy-Deep-Learning-Models | [
"ffadb82b666e8c1561a036a10d9922db8a3266cc"
] | [
"oriole/function.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Oct 10 08:45:03 2020\r\n\r\n@author: 一叶之秋\r\n\"\"\"\r\nimport numpy as np\r\nfrom fawkes.utils import extractor_ls_predict\r\nfrom fawkes.utils import pairwise_l2_distance\r\n\r\n\r\n####这里的loaded_images必须进行归一化处理\r\ndef compute_points(loaded_images,feature_extractors_ls):\r\n \r\n points = np.zeros((len(loaded_images),1024))\r\n points = extractor_ls_predict(feature_extractors_ls,loaded_images)###这是对所有加载的图片进行预测\r\n mean = np.average(points[1:],axis = 0)\r\n radius = pairwise_l2_distance(mean.reshape((1,1024)),points[1:,:])\r\n \r\n original_distance = pairwise_l2_distance(points[0,:],mean.reshape(1,1024))\r\n \r\n return points"
] | [
[
"numpy.average"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Joseph-ML/training-data-analyst | [
"a96d73f0b9630935337bab7bb624da87160d1865"
] | [
"blogs/rl-on-gcp/cartpole_policy_gradients/rl_model_code/trainer/model.py"
] | [
"# Copyright 2018 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Simple policy gradients for Reinforcement learning on Google Cloud.\n\nAlso includes code for hyperparameter tuning. Adapted from:\nhttps://github.com/ageron/handson-ml/blob/master/16_reinforcement_learning.ipynb\n\"\"\"\n\nimport json\nimport os\n\nimport gym\nimport numpy as np\nimport tensorflow as tf\n\ntf.reset_default_graph()\n\n# task.py arguments.\nN_GAMES_PER_UPDATE = None\nDISCOUNT_RATE = None\nN_HIDDEN = None\nLEARNING_RATE = None\n\n# Currently hardcoded.\nn_max_steps = 1000\nn_iterations = 30\nsave_iterations = 5\n\n# For cartpole.\nenv = gym.make('CartPole-v0')\nn_inputs = 4\nn_outputs = 1\n\n\ndef discount_rewards(rewards, discount_rate):\n discounted_rewards = np.zeros(len(rewards))\n cumulative_rewards = 0\n for step in reversed(range(len(rewards))):\n cumulative_rewards = rewards[step] + cumulative_rewards * discount_rate\n discounted_rewards[step] = cumulative_rewards\n return discounted_rewards\n\n\ndef discount_and_normalize_rewards(all_rewards, discount_rate):\n all_discounted_rewards = [\n discount_rewards(rewards, discount_rate) for rewards in all_rewards\n ]\n flat_rewards = np.concatenate(all_discounted_rewards)\n reward_mean = flat_rewards.mean()\n reward_std = flat_rewards.std()\n return [(discounted_rewards - reward_mean) / reward_std\n for discounted_rewards in all_discounted_rewards]\n\n\ndef hp_directory(model_dir):\n \"\"\"If running a hyperparam job, create subfolder name with trial ID.\n\n If not running a hyperparam job, just keep original model_dir.\n \"\"\"\n trial_id = json.loads(os.environ.get('TF_CONFIG', '{}')).get('task', {}).get(\n 'trial', '')\n return os.path.join(model_dir, trial_id)\n\n\n# Play games and train agent. Or evaluate and make gifs.\ndef run(outdir, train_mode):\n\n # Build network.\n initializer = tf.keras.initializers.VarianceScaling()\n X = tf.placeholder(tf.float32, shape=[None, n_inputs])\n hidden = tf.layers.dense(\n X, N_HIDDEN, activation=tf.nn.elu, kernel_initializer=initializer)\n logits = tf.layers.dense(hidden, n_outputs)\n outputs = tf.nn.sigmoid(logits) # probability of action 0 (left)\n p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])\n action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)\n\n # Optimizer, gradients.\n y = 1. - tf.to_float(action)\n cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(\n labels=y, logits=logits)\n optimizer = tf.train.AdamOptimizer(LEARNING_RATE)\n grads_and_vars = optimizer.compute_gradients(cross_entropy)\n gradients = [grad for grad, variable in grads_and_vars]\n gradient_placeholders = []\n grads_and_vars_feed = []\n for grad, variable in grads_and_vars:\n gradient_placeholder = tf.placeholder(tf.float32, shape=grad.get_shape())\n gradient_placeholders.append(gradient_placeholder)\n grads_and_vars_feed.append((gradient_placeholder, variable))\n training_op = optimizer.apply_gradients(grads_and_vars_feed)\n\n # For TensorBoard.\n episode_reward = tf.placeholder(dtype=tf.float32, shape=[])\n tf.summary.scalar('reward', episode_reward)\n\n init = tf.global_variables_initializer()\n saver = tf.train.Saver()\n\n if train_mode:\n hp_save_dir = hp_directory(outdir)\n with tf.Session() as sess:\n init.run()\n # For TensorBoard.\n print('hp_save_dir')\n train_writer = tf.summary.FileWriter(hp_save_dir, sess.graph)\n for iteration in range(n_iterations):\n all_rewards = []\n all_gradients = []\n for game in range(N_GAMES_PER_UPDATE):\n current_rewards = []\n current_gradients = []\n obs = env.reset()\n for _ in range(n_max_steps):\n action_val, gradients_val = sess.run(\n [action, gradients], feed_dict={X: obs.reshape(1, n_inputs)})\n obs, reward, done, info = env.step(action_val[0][0])\n current_rewards.append(reward)\n current_gradients.append(gradients_val)\n if done:\n break\n all_rewards.append(current_rewards)\n all_gradients.append(current_gradients)\n avg_reward = np.mean(([np.sum(r) for r in all_rewards]))\n\n print('\\rIteration: {}, Reward: {}'.format(\n iteration, avg_reward, end=''))\n all_rewards = discount_and_normalize_rewards(\n all_rewards, discount_rate=DISCOUNT_RATE)\n feed_dict = {}\n for var_index, gradient_placeholder in enumerate(gradient_placeholders):\n mean_gradients = np.mean([\n reward * all_gradients[game_index][step][var_index]\n for game_index, rewards in enumerate(all_rewards)\n for step, reward in enumerate(rewards)\n ],\n axis=0)\n feed_dict[gradient_placeholder] = mean_gradients\n sess.run(training_op, feed_dict=feed_dict)\n if iteration % save_iterations == 0:\n print('Saving model to ', hp_save_dir)\n model_file = '{}/my_policy_net_pg.ckpt'.format(hp_save_dir)\n saver.save(sess, model_file)\n # Also save event files for TB.\n merge = tf.summary.merge_all()\n summary = sess.run(merge, feed_dict={episode_reward: avg_reward})\n train_writer.add_summary(summary, iteration)\n obs = env.reset()\n steps = []\n done = False\n else: # Make a gif.\n from moviepy.editor import ImageSequenceClip\n model_file = '{}/my_policy_net_pg.ckpt'.format(outdir)\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n saver.restore(sess, save_path=model_file)\n # Run model.\n obs = env.reset()\n done = False\n steps = []\n rewards = []\n while not done:\n s = env.render('rgb_array')\n steps.append(s)\n action_val = sess.run(action, feed_dict={X: obs.reshape(1, n_inputs)})\n obs, reward, done, info = env.step(action_val[0][0])\n rewards.append(reward)\n print('Final reward :', np.mean(rewards))\n clip = ImageSequenceClip(steps, fps=30)\n clip.write_gif('cartpole.gif', fps=30)\n"
] | [
[
"tensorflow.concat",
"numpy.concatenate",
"tensorflow.nn.sigmoid_cross_entropy_with_logits",
"numpy.mean",
"tensorflow.keras.initializers.VarianceScaling",
"tensorflow.train.AdamOptimizer",
"tensorflow.summary.scalar",
"tensorflow.layers.dense",
"tensorflow.reset_default_graph",
"tensorflow.to_float",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.nn.sigmoid",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.summary.merge_all",
"numpy.sum",
"tensorflow.summary.FileWriter",
"tensorflow.log"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
W-yk/privacy | [
"5f21b040ded4132346e025b73246ecf4dfa32604"
] | [
"tutorials/cifar.py"
] | [
"# Copyright 2019, The TensorFlow Authors.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\"\"\"Training a CNN on MNIST with Keras and the DP SGD optimizer.\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom absl import app\r\nfrom absl import flags\r\nfrom absl import logging\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport os\r\n\r\nfrom tensorflow.python.ops.gen_math_ops import TruncateDiv\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"2\"\r\nimport sys\r\nsys.path.append(\"..\")\r\n\r\nfrom tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp\r\nfrom tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent\r\nfrom tensorflow_privacy.privacy.optimizers.dp_optimizer_keras_vectorized_ import VectorizedDPKerasSGDOptimizer\r\nfrom tensorflow.keras.layers import Dense, Flatten, Conv2D, MaxPool2D, Input, BatchNormalization, Dropout, Activation\r\nfrom tensorflow.keras import Model, regularizers, Sequential\r\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\r\n\r\nflags.DEFINE_boolean(\r\n 'dpsgd', True, 'If True, train with DP-SGD. If False, '\r\n 'train with vanilla SGD.')\r\nflags.DEFINE_float('learning_rate', 0.1, 'Learning rate for training')\r\nflags.DEFINE_float('noise_multiplier', 0.45,\r\n 'Ratio of the standard deviation to the clipping norm')\r\nflags.DEFINE_float('l2_norm_clip', 10.0, 'Clipping norm')\r\nflags.DEFINE_integer('batch_size', 16, 'Batch size')\r\nflags.DEFINE_integer('lot_size', 256, 'Lot size')\r\nflags.DEFINE_integer('epochs', 100, 'Number of epochs')\r\nflags.DEFINE_integer(\r\n 'microbatches', 16, 'Number of microbatches '\r\n '(must evenly divide batch_size)')\r\nflags.DEFINE_string('model_dir', None, 'Model directory')\r\n\r\nFLAGS = flags.FLAGS\r\n\r\n\r\ndef compute_epsilon(steps):\r\n \"\"\"Computes epsilon value for given hyperparameters.\"\"\"\r\n if FLAGS.noise_multiplier == 0.0:\r\n return float('inf')\r\n orders = [1 + x / 10. for x in range(1, 100)] + list(range(12, 64))\r\n sampling_probability = FLAGS.batch_size / 60000\r\n rdp = compute_rdp(q=sampling_probability,\r\n noise_multiplier=FLAGS.noise_multiplier,\r\n steps=steps,\r\n orders=orders)\r\n # Delta is set to 1e-5 because MNIST has 60000 training points.\r\n return get_privacy_spent(orders, rdp, target_delta=1e-5)[0]\r\n\r\n\r\ndef load_cifar():\r\n \"\"\"Loads MNIST and preprocesses to combine training and validation data.\"\"\"\r\n train, test = tf.keras.datasets.cifar10.load_data()\r\n train_data, train_labels = train\r\n test_data, test_labels = test\r\n\r\n train_data = np.array(train_data, dtype=np.float32) / 255\r\n test_data = np.array(test_data, dtype=np.float32) / 255\r\n\r\n # train_data = train_data.reshape((train_data.shape[0], 32, 32, 3))\r\n # test_data = test_data.reshape((test_data.shape[0], 32, 32, 3))\r\n\r\n train_labels = np.array(train_labels, dtype=np.int32)\r\n test_labels = np.array(test_labels, dtype=np.int32)\r\n\r\n train_labels = tf.keras.utils.to_categorical(train_labels, num_classes=10)\r\n test_labels = tf.keras.utils.to_categorical(test_labels, num_classes=10)\r\n print(len(train_labels),'****************************')\r\n assert train_data.min() == 0.\r\n assert train_data.max() == 1.\r\n assert test_data.min() == 0.\r\n assert test_data.max() == 1.\r\n\r\n return train_data, train_labels, test_data, test_labels\r\n\r\n\r\nclass MobileNetv2(Model):\r\n def __init__(self,batch_per_lot):\r\n super(MobileNetv2, self).__init__()\r\n self.batch_per_lot = batch_per_lot\r\n self.apply_flag = tf.Variable(False, dtype=tf.bool, trainable=False)\r\n self.weight_decay = 0.0005\r\n\r\n self.base_model=tf.keras.applications.mobilenet_v2.MobileNetV2(alpha=1,include_top=False,input_shape=(32,32,3),weights=None)\r\n # self.base_model.trainable = False,,weights=None\r\n self.flatten = Flatten()\r\n self.dense = Dense(10, activation='softmax', name='fc')\r\n\r\n def call(self, x):\r\n x=self.base_model(x)\r\n x = self.flatten(x)\r\n x = self.dense(x)\r\n return x\r\n\r\n def build_model(self):\r\n x = Input(shape=(32,32,3))\r\n model = Model(inputs=[x], outputs=self.call(x))\r\n self.accumulated_grads = [tf.Variable(tf.zeros_like(var), trainable=False) for var in self.trainable_variables]\r\n return model\r\n \r\n def apply(self):\r\n gradients = [g / self.batch_per_lot for g in self.accumulated_grads]\r\n self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))\r\n for g in self.accumulated_grads:\r\n g.assign(tf.zeros_like(g))\r\n return\r\n\r\n def not_apply(self):\r\n return\r\n\r\n def train_step(self, data):\r\n images, labels = data\r\n with tf.GradientTape() as tape:\r\n predictions = self(images, training=True)\r\n loss = self.compiled_loss(labels, predictions)\r\n # add the regularization\r\n regularization = sum(self.losses)\r\n loss += regularization\r\n\r\n noised_gradients = list(zip(*self.optimizer._compute_gradients(\r\n loss, self.trainable_variables, tape=tape)))[0]\r\n\r\n for g, new_grad in zip(self.accumulated_grads, noised_gradients):\r\n g.assign_add(new_grad)\r\n\r\n tf.cond(self.apply_flag, lambda: self.apply(), lambda: self.not_apply())\r\n\r\n # self.optimizer.apply_gradients(zip(noised_gradients, self.trainable_variables))\r\n self.compiled_metrics.update_state(labels, predictions)\r\n\r\n return {m.name: m.result() for m in self.metrics}\r\n\r\n def test_step(self, data):\r\n images, labels = data\r\n predictions = self(images, training=False)\r\n self.compiled_metrics.update_state(labels, predictions)\r\n\r\n return {m.name: m.result() for m in self.metrics}\r\n\r\n\r\nclass VGG16(Model):\r\n def __init__(self, batch_per_lot):\r\n super(VGG16, self).__init__()\r\n self.batch_per_lot = batch_per_lot\r\n self.apply_flag = tf.Variable(False, dtype=tf.bool, trainable=False)\r\n self.weight_decay = 0.0005\r\n\r\n # Block 1\r\n self.conv1_1 = Conv2D(64, 3, activation='relu', padding='same', input_shape=[\r\n 32, 32, 3], kernel_regularizer=regularizers.l2(self.weight_decay), name='block1_conv1', trainable=True)\r\n self.bn1_1 = BatchNormalization()\r\n self.drop1 = Dropout(0.3)\r\n self.conv1_2 = Conv2D(64, 3, activation='relu', padding='same', kernel_regularizer=regularizers.l2(\r\n self.weight_decay), name='block1_conv2', trainable=True)\r\n self.bn1_2 = BatchNormalization()\r\n self.pool1 = MaxPool2D(pool_size=(2, 2))\r\n\r\n # Block 2\r\n self.conv2_1 = Conv2D(128, 3, activation='relu', padding='same',\r\n kernel_regularizer=regularizers.l2(self.weight_decay), name='block2_conv1', trainable=True)\r\n self.bn2_1 = BatchNormalization()\r\n self.drop2 = Dropout(0.4)\r\n self.conv2_2 = Conv2D(128, 3, activation='relu', padding='same',\r\n kernel_regularizer=regularizers.l2(self.weight_decay), name='block2_conv2', trainable=True)\r\n self.bn2_2 = BatchNormalization()\r\n self.pool2 = MaxPool2D(pool_size=(2, 2))\r\n\r\n # Block 3\r\n self.conv3_1 = Conv2D(256, 3, activation='relu', padding='same',\r\n kernel_regularizer=regularizers.l2(self.weight_decay), name='block3_conv1', trainable=True)\r\n self.bn3_1 = BatchNormalization()\r\n self.drop3 = Dropout(0.4)\r\n self.conv3_2 = Conv2D(256, 3, activation='relu', padding='same',\r\n kernel_regularizer=regularizers.l2(self.weight_decay), name='block3_conv2', trainable=True)\r\n self.bn3_2 = BatchNormalization()\r\n self.drop4 = Dropout(0.4)\r\n self.conv3_3 = Conv2D(256, 3, activation='relu', padding='same',\r\n kernel_regularizer=regularizers.l2(self.weight_decay), name='block3_conv3', trainable=True)\r\n self.bn3_3 = BatchNormalization()\r\n self.pool3 = MaxPool2D(pool_size=(2, 2))\r\n\r\n # Block 4\r\n self.conv4_1 = Conv2D(512, 3, activation='relu', padding='same',\r\n kernel_regularizer=regularizers.l2(self.weight_decay), name='block4_conv1', trainable=True)\r\n self.bn4_1 = BatchNormalization()\r\n self.drop5 = Dropout(0.4)\r\n self.conv4_2 = Conv2D(512, 3, activation='relu', padding='same',\r\n kernel_regularizer=regularizers.l2(self.weight_decay), name='block4_conv2', trainable=True)\r\n self.bn4_2 = BatchNormalization()\r\n self.drop6 = Dropout(0.4)\r\n self.conv4_3 = Conv2D(512, 3, activation='relu', padding='same',\r\n kernel_regularizer=regularizers.l2(self.weight_decay), name='block4_conv3', trainable=True)\r\n self.bn4_3 = BatchNormalization()\r\n self.pool4 = MaxPool2D(pool_size=(2, 2))\r\n\r\n # Block 5\r\n self.conv5_1 = Conv2D(512, 3, activation='relu', padding='same',\r\n kernel_regularizer=regularizers.l2(self.weight_decay), name='block5_conv1', trainable=True)\r\n self.bn5_1 = BatchNormalization()\r\n self.drop7 = Dropout(0.4)\r\n self.conv5_2 = Conv2D(512, 3, activation='relu', padding='same',\r\n kernel_regularizer=regularizers.l2(self.weight_decay), name='block5_conv2', trainable=True)\r\n self.bn5_2 = BatchNormalization()\r\n self.drop8 = Dropout(0.4)\r\n self.conv5_3 = Conv2D(512, 3, activation='relu', padding='same',\r\n kernel_regularizer=regularizers.l2(self.weight_decay), name='block5_conv3', trainable=True)\r\n self.bn5_3 = BatchNormalization()\r\n self.pool5 = MaxPool2D(pool_size=(2, 2))\r\n self.drop9 = Dropout(0.5)\r\n\r\n self.flatten = Flatten()\r\n self.d1 = Dense(512, activation='relu', kernel_regularizer=regularizers.l2(\r\n self.weight_decay), name='fc1')\r\n self.bn1 = BatchNormalization()\r\n self.drop10 = Dropout(0.5)\r\n self.d2 = Dense(10, activation='softmax', name='fc2')\r\n\r\n def call(self, x):\r\n x = self.conv1_1(x)\r\n x = self.bn1_1(x)\r\n x = self.drop1(x)\r\n x = self.conv1_2(x)\r\n x = self.bn1_2(x)\r\n x = self.pool1(x)\r\n x = self.conv2_1(x)\r\n x = self.bn2_1(x)\r\n x = self.drop2(x)\r\n x = self.conv2_2(x)\r\n x = self.bn2_2(x)\r\n x = self.pool2(x)\r\n x = self.conv3_1(x)\r\n x = self.bn3_1(x)\r\n x = self.drop3(x)\r\n x = self.conv3_2(x)\r\n x = self.bn3_2(x)\r\n x = self.drop4(x)\r\n x = self.conv3_3(x)\r\n x = self.bn3_3(x)\r\n x = self.pool3(x)\r\n x = self.conv4_1(x)\r\n x = self.bn4_1(x)\r\n x = self.drop5(x)\r\n x = self.conv4_2(x)\r\n x = self.bn4_2(x)\r\n x = self.drop6(x)\r\n x = self.conv4_3(x)\r\n x = self.bn4_3(x)\r\n x = self.pool4(x)\r\n x = self.conv5_1(x)\r\n x = self.bn5_1(x)\r\n x = self.drop7(x)\r\n x = self.conv5_2(x)\r\n x = self.bn5_2(x)\r\n x = self.drop8(x)\r\n x = self.conv5_3(x)\r\n x = self.bn5_3(x)\r\n x = self.pool5(x)\r\n x = self.drop9(x)\r\n x = self.flatten(x)\r\n x = self.d1(x)\r\n x = self.bn1(x)\r\n x = self.drop10(x)\r\n return self.d2(x)\r\n\r\n def build_model(self):\r\n x = Input(shape=(32, 32, 3))\r\n model = Model(inputs=[x], outputs=self.call(x))\r\n self.accumulated_grads = [tf.Variable(tf.zeros_like(\r\n var), trainable=False) for var in self.trainable_variables]\r\n return model\r\n\r\n def apply(self):\r\n gradients = [g / self.batch_per_lot for g in self.accumulated_grads]\r\n self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))\r\n for g in self.accumulated_grads:\r\n g.assign(tf.zeros_like(g))\r\n return\r\n\r\n def not_apply(self):\r\n return\r\n\r\n def train_step(self, data):\r\n images, labels = data\r\n with tf.GradientTape() as tape:\r\n predictions = self(images, training=True)\r\n loss = self.compiled_loss(labels, predictions)\r\n # add the regularization\r\n regularization = sum(self.losses)\r\n loss += regularization\r\n\r\n noised_gradients = list(zip(*self.optimizer._compute_gradients(\r\n loss, self.trainable_variables, tape=tape)))[0]\r\n\r\n for g, new_grad in zip(self.accumulated_grads, noised_gradients):\r\n g.assign_add(new_grad)\r\n\r\n tf.cond(self.apply_flag, lambda: self.apply(), lambda: self.not_apply())\r\n\r\n # self.optimizer.apply_gradients(zip(noised_gradients, self.trainable_variables))\r\n self.compiled_metrics.update_state(labels, predictions)\r\n\r\n return {m.name: m.result() for m in self.metrics}\r\n\r\n def test_step(self, data):\r\n images, labels = data\r\n predictions = self(images, training=False)\r\n self.compiled_metrics.update_state(labels, predictions)\r\n\r\n return {m.name: m.result() for m in self.metrics}\r\n\r\nclass VGG16_v2(Model):\r\n def __init__(self, batch_per_lot):\r\n super(VGG16_v2, self).__init__()\r\n self.batch_per_lot = batch_per_lot\r\n self.apply_flag = tf.Variable(False, dtype=tf.bool, trainable=False)\r\n self.weight_decay = 0.005\r\n\r\n self.base_model=tf.keras.applications.VGG16(\r\n include_top=False, weights='imagenet', input_tensor=None,\r\n input_shape=(32,32,3), pooling=None, classes=10,\r\n classifier_activation='softmax'\r\n )\r\n self.flatten = Flatten()\r\n self.d1 = Dense(4096, activation='relu', kernel_regularizer=regularizers.l2(\r\n self.weight_decay), name='fc1')\r\n self.bn1 = BatchNormalization()\r\n self.drop10 = Dropout(0.2)\r\n self.d2 = Dense(512, activation='relu', kernel_regularizer=regularizers.l2(\r\n self.weight_decay), name='fc2')\r\n self.drop11 = Dropout(0.4)\r\n self.d3 = Dense(10, activation='softmax', name='fc3')\r\n\r\n def call(self, x):\r\n x=self.base_model(x)\r\n x = self.flatten(x)\r\n x = self.d1(x)\r\n x = self.bn1(x)\r\n x = self.drop10(x)\r\n x = self.d2(x)\r\n x = self.drop11(x)\r\n return self.d3(x)\r\n\r\n def build_model(self):\r\n x = Input(shape=(32,32,3))\r\n model = Model(inputs=[x], outputs=self.call(x))\r\n self.accumulated_grads = [tf.Variable(tf.zeros_like(var), trainable=False) for var in self.trainable_variables]\r\n self.back_grads = [tf.Variable(tf.zeros_like(\r\n var), trainable=False) for var in self.trainable_variables]\r\n \r\n return model\r\n \r\n def apply(self):\r\n gradients = [g / self.batch_per_lot for g in self.accumulated_grads]\r\n self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))\r\n for g in self.accumulated_grads:\r\n g.assign(tf.zeros_like(g))\r\n return\r\n\r\n def not_apply(self):\r\n return\r\n\r\n def train_step(self, data):\r\n images, labels = data\r\n with tf.GradientTape() as tape:\r\n predictions = self(images, training=True)\r\n loss = self.compiled_loss(labels, predictions)\r\n # add the regularization\r\n regularization = sum(self.losses)\r\n loss += regularization\r\n\r\n noised_gradients = list(zip(*self.optimizer._compute_gradients(\r\n loss, self.trainable_variables, tape=tape)))[0]\r\n\r\n for g, new_grad in zip(self.accumulated_grads, noised_gradients):\r\n g.assign_add(new_grad)\r\n\r\n tf.cond(self.apply_flag, lambda: self.apply(), lambda: self.not_apply())\r\n\r\n # self.optimizer.apply_gradients(zip(noised_gradients, self.trainable_variables))\r\n self.compiled_metrics.update_state(labels, predictions)\r\n\r\n return {m.name: m.result() for m in self.metrics}\r\n\r\n def test_step(self, data):\r\n images, labels = data\r\n predictions = self(images, training=False)\r\n self.compiled_metrics.update_state(labels, predictions)\r\n\r\n return {m.name: m.result() for m in self.metrics}\r\n\r\nclass TestCallback(tf.keras.callbacks.Callback):\r\n def on_train_batch_begin(self, batch, logs=None):\r\n if (batch + 1) % self.model.batch_per_lot == 0:\r\n self.model.apply_flag.assign(True)\r\n else:\r\n self.model.apply_flag.assign(False)\r\n # print('\\nStep: {}, Apply Flag: {}\\n'.format(batch, self.model.apply_flag))\r\n\r\n # def on_epoch_begin(self, epoch, logs={}):\r\n # if epoch > 1:\r\n # for l in self.model.layers:\r\n # if 'conl' in l.name:\r\n # l.trainable = False\r\n # if 'conv' in l.name:\r\n # l.trainable = True\r\n\r\n\r\ndef main(unused_argv):\r\n logging.set_verbosity(logging.INFO)\r\n if FLAGS.dpsgd and FLAGS.batch_size % FLAGS.microbatches != 0:\r\n raise ValueError('Number of microbatches should divide evenly batch_size')\r\n\r\n # Load training and test data.\r\n train_data, train_labels, test_data, test_labels = load_cifar()\r\n datagen = ImageDataGenerator(\r\n rotation_range=15,\r\n width_shift_range=0.1,\r\n height_shift_range=0.1,\r\n horizontal_flip=True)\r\n\r\n # Define a sequential Keras model\r\n batch_per_lot = int(FLAGS.lot_size / FLAGS.batch_size)\r\n model = VGG16_v2(batch_per_lot)\r\n m = model.build_model()\r\n m.summary()\r\n model(test_data)\r\n for l in model.layers:\r\n print(l.name, l.trainable)\r\n\r\n # # Loading pretrain model\r\n # initial_weights = [layer.get_weights() for layer in model.layers]\r\n # #model.load_weights('pretrained_vgg16.h5', by_name=True)\r\n # for layer, initial in zip(model.layers, initial_weights):\r\n # weights = layer.get_weights()\r\n # if weights and all(tf.nest.map_structure(np.array_equal, weights, initial)):\r\n # print(f'Checkpoint contained no weights for layer {layer.name}!')\r\n # else:\r\n # print(f'Loading weights for layer {layer.name}!')\r\n\r\n if FLAGS.dpsgd:\r\n optimizer = VectorizedDPKerasSGDOptimizer(\r\n l2_norm_clip=FLAGS.l2_norm_clip,\r\n noise_multiplier=FLAGS.noise_multiplier,\r\n num_microbatches=FLAGS.microbatches,\r\n learning_rate=tf.keras.optimizers.schedules.ExponentialDecay(0.1, 4000, 0.5, staircase=True))\r\n # Compute vector of per-example loss rather than its mean over a minibatch.\r\n loss = tf.keras.losses.CategoricalCrossentropy(\r\n reduction=tf.losses.Reduction.NONE)\r\n else:\r\n optimizer = tf.keras.optimizers.SGD(learning_rate=FLAGS.learning_rate)\r\n loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True)\r\n\r\n # Compile model with Keras\r\n model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])\r\n\r\n # Compute the privacy budget expended.\r\n if FLAGS.dpsgd:\r\n eps = compute_epsilon(FLAGS.epochs * 60000 // FLAGS.batch_size)\r\n print('For delta=1e-5, the current epsilon is: %.2f' % eps)\r\n\r\n\r\n else:\r\n print('Trained with vanilla non-private SGD optimizer')\r\n\r\n # Train model with Keras\r\n model.fit(datagen.flow(train_data, train_labels, batch_size=FLAGS.batch_size),\r\n steps_per_epoch=(50000/FLAGS.batch_size),\r\n epochs=FLAGS.epochs,\r\n validation_data=(test_data, test_labels),\r\n callbacks=[TestCallback()])\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(main)\r\n"
] | [
[
"tensorflow.keras.losses.CategoricalCrossentropy",
"tensorflow.keras.optimizers.SGD",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"tensorflow.Variable",
"tensorflow.keras.regularizers.l2",
"tensorflow.keras.applications.mobilenet_v2.MobileNetV2",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Dense",
"tensorflow.zeros_like",
"tensorflow.keras.applications.VGG16",
"numpy.array",
"tensorflow.GradientTape",
"tensorflow.keras.optimizers.schedules.ExponentialDecay",
"tensorflow.keras.datasets.cifar10.load_data",
"tensorflow.keras.layers.MaxPool2D",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.utils.to_categorical",
"tensorflow.keras.layers.Input"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
masstalde/uwb | [
"a220d03e604f47d6b884d21f8180f9d3998615a5"
] | [
"src/uwb_tracker_node.py"
] | [
"#!/usr/bin/env python\n\n\"\"\"uwb_tracker_node.py: Streams tracked positions based on UWB multi-range messages.\"\"\"\n\nfrom __future__ import print_function\n\n__author__ = \"Benjamin Hepp\"\n__email__ = \"[email protected]\"\n__copyright__ = \"Copyright 2016 Benjamin Hepp\"\n\nimport select\nimport numpy as np\nimport roslib\nimport scipy.stats\nroslib.load_manifest('uwb')\nimport rospy\nimport tf\n\nimport uwb.msg\n\n\nclass UWBTracker(object):\n \"\"\"Position tracker for ultra-wideband range measurements.\n\n By default the z-coordinate of the state is ignored.\n This can be modified with the ROS parameter `ignore_z_position`.\n Topics and other options can also be modified with ROS parameters (see the `_read_configuration` method).\n \"\"\"\n\n class StateEstimate(object):\n \"\"\"State estimate consisting of a state vector and a covariance matrix.\n \"\"\"\n def __init__(self, state, covariance):\n \"\"\"Initialize a new state estimate.\n\n Args:\n state (numpy.ndarray): State vector.\n covariance (numpy.ndarray): Covariance matrix.\n \"\"\"\n self.state = state\n self.covariance = covariance\n\n def __init__(self):\n \"\"\"Initialize tracker.\n \"\"\"\n self._read_configuration()\n\n self.estimates = {}\n self.estimate_times = {}\n self.ikf_prev_outlier_flags = {}\n self.ikf_outlier_counts = {}\n self.outlier_thresholds = {}\n\n rospy.loginfo(\"Receiving multi-range-with-offsets messages from: {}\".format(self.uwb_multi_range_topic))\n rospy.loginfo(\"Publishing tracker messages to {}\".format(self.uwb_tracker_topic))\n rospy.loginfo(\"Publishing tracker transform as {} -> {}\".format(self.tracker_frame, self.target_frame))\n\n # ROS publishers and subscribers\n self.tracker_frame = self.tracker_frame\n self.target_frame = self.target_frame\n self.uwb_pub = rospy.Publisher(self.uwb_tracker_topic, uwb.msg.UWBTracker, queue_size=1)\n self.tf_broadcaster = tf.TransformBroadcaster()\n self.uwb_multi_range_sub = rospy.Subscriber(self.uwb_multi_range_topic, uwb.msg.UWBMultiRangeWithOffsets,\n self.handle_multi_range_message)\n\n def _read_configuration(self):\n \"\"\"Initialize configuration from ROS parameters.\n \"\"\"\n self.uwb_multi_range_topic = rospy.get_param('~multi_range_with_offsets_topic', '/uwb/multi_range_with_offsets')\n self.uwb_tracker_topic = rospy.get_param('~tracker_topic', '/uwb/tracker')\n self.tracker_frame = rospy.get_param('~tracker_frame', 'uwb')\n self.target_frame = rospy.get_param('~target_frame', 'target')\n\n # Get parameters for covariance matrices\n self.initial_position_covariance = rospy.get_param('~initial_position_covariance', 10)\n self.process_covariance_position = rospy.get_param('~process_covariance_position', 0)\n self.process_covariance_velocity = rospy.get_param('~process_covariance_velocity', 1)\n self.measurement_covariance = rospy.get_param('~measurement_covariance', 0.1 ** 2)\n\n # Get parameters for filter update and initial gauss-newton estimation\n self.ignore_z_position = rospy.get_param('~ignore_z_position', True)\n # The default value of 7.779 represents the 0.9 quantile of a Chi-Square distribution\n # with 4 degrees of freedom (4 UWB measurements).\n self.outlier_threshold_quantile = rospy.get_param('~outlier_threshold_quantile', 0.1)\n self.ikf_iterations = rospy.get_param('~ikf_iterations', 2)\n self.initial_guess_position = np.empty((3, 1), dtype=np.float)\n self.initial_guess_position[0] = rospy.get_param('~initial_guess_position_x', 0)\n self.initial_guess_position[1] = rospy.get_param('~initial_guess_position_y', 0)\n self.initial_guess_position[2] = rospy.get_param('~initial_guess_position_z', 0)\n self.initial_guess_iterations = rospy.get_param('~initial_guess_iterations', 200)\n self.initial_guess_tolerance = rospy.get_param('~initial_guess_tolerance', 1e-5)\n self.initial_guess_residuals_threshold = rospy.get_param('~initial_guess_residuals_threshold', 0.1)\n self.ikf_max_outlier_count = rospy.get_param('~ikf_max_outlier_count', 200)\n\n def handle_multi_range_message(self, multi_range_msg):\n \"\"\"Handle a ROS multi-range message by updating and publishing the state.\n\n Args:\n multi_range_msg (uwb.msg.UWBMultiRangeWithOffsets): ROS multi-range message.\n \"\"\"\n # Update tracker position based on time-of-flight measurements\n new_estimate = self.update_estimate(multi_range_msg)\n if new_estimate is None:\n rospy.logwarn('Could not compute initial estimate: address={}, remote_address={}'.format(\n multi_range_msg.address, multi_range_msg.remote_address))\n else:\n # Publish tracker message\n ros_msg = uwb.msg.UWBTracker()\n ros_msg.header.stamp = rospy.get_rostime()\n ros_msg.address = multi_range_msg.address\n ros_msg.remote_address = multi_range_msg.remote_address\n ros_msg.state = new_estimate.state\n ros_msg.covariance = np.ravel(new_estimate.covariance)\n self.uwb_pub.publish(ros_msg)\n\n # Publish target transform (rotation is identity)\n self.tf_broadcaster.sendTransform(\n (new_estimate.state[0], new_estimate.state[1], new_estimate.state[2]),\n tf.transformations.quaternion_from_euler(0, 0, 0),\n rospy.get_rostime(),\n self.target_frame,\n self.tracker_frame\n )\n\n def initialize_estimate(self, estimate_id, initial_state):\n \"\"\"Initialize a state estimate with identity covariance.\n\n The initial estimate is saved in the `self.estimates` dictionary.\n The timestamp in the `self.estimate_times` is updated.\n\n Args:\n estimate_id (int): ID of the tracked target.\n initial_state (int): Initial state of the estimate.\n\n Returns:\n X (numpy.ndarray): Solution of equation.\n \"\"\"\n x = initial_state\n P = self.initial_position_covariance * np.eye(6)\n P[3:6, 3:6] = 0\n estimate = UWBTracker.StateEstimate(x, P)\n self.estimates[estimate_id] = estimate\n self.estimate_times[estimate_id] = rospy.get_time()\n self.ikf_prev_outlier_flags[estimate_id] = False\n self.ikf_outlier_counts[estimate_id] = 0\n\n def _solve_equation_least_squares(self, A, B):\n \"\"\"Solve system of linear equations A X = B.\n Currently using Pseudo-inverse because it also allows for singular matrices.\n\n Args:\n A (numpy.ndarray): Left-hand side of equation.\n B (numpy.ndarray): Right-hand side of equation.\n\n Returns:\n X (numpy.ndarray): Solution of equation.\n \"\"\"\n # Pseudo-inverse\n X = np.dot(np.linalg.pinv(A), B)\n # LU decomposition\n # lu, piv = scipy.linalg.lu_factor(A)\n # X = scipy.linalg.lu_solve((lu, piv), B)\n # Vanilla least-squares from numpy\n # X, _, _, _ = np.linalg.lstsq(A, B)\n # QR decomposition\n # Q, R, P = scipy.linalg.qr(A, mode='economic', pivoting=True)\n # # Find first zero element in R\n # out = np.where(np.diag(R) == 0)[0]\n # if out.size == 0:\n # i = R.shape[0]\n # else:\n # i = out[0]\n # B_prime = np.dot(Q.T, B)\n # X = np.zeros((A.shape[1], B.shape[1]), dtype=A.dtype)\n # X[P[:i], :] = scipy.linalg.solve_triangular(R[:i, :i], B_prime[:i, :])\n return X\n\n def _compute_measurements_and_jacobians(self, ranges, position, h, H, z):\n \"\"\"Computes the predicted measurements and the jacobian of the measurement model based on the current state.\n\n Args:\n ranges (list of uwb.msg.UWBMultiRange): Range measurement message.\n position (numpy.ndarray): Current position state.\n h (``Output``) (numpy.ndarray): Vector for the predicted measurements.\n H (``Output``) (numpy.ndarray): Vector for the computed jacobian of the measurement model.\n z (``Output``) (numpy.ndarray): Vector for the range measurements.\n\n TODO:\n Could be sped up a bit using Cython\n \"\"\"\n for j in xrange(len(ranges)):\n offset = ranges[j].offset\n offset = np.array([[offset.x], [offset.y], [offset.z]])\n # Observation\n if self.ignore_z_position:\n h[j] = np.linalg.norm(position[0:2] - offset[0:2])\n else:\n h[j] = np.linalg.norm(position - offset)\n\n # Jacobians\n # squared distance by position\n hs_to_x = 2 * position - 2 * offset\n # distance by squared distance\n h_to_hs = 1 / (2 * h[j])\n # distance by position\n h_to_x = h_to_hs[0] * hs_to_x\n H[j, 0:3] = h_to_x[:, 0]\n if self.ignore_z_position:\n H[j, 2] = 0\n z[j] = ranges[j].range\n\n def initial_guess(self, ranges):\n \"\"\"Computes an initial position guess based on range measurements.\n\n The initial position is computed using Gauss-Newton method.\n The behavior can be modified with some parameters: `self.initial_guess_...`.\n\n Args:\n ranges (list of floats): Range measurements.\n\n Returns:\n initial_state (numpy.ndarray): Initial state vector (velocity components are zero).\n \"\"\"\n num_of_units = len(ranges)\n position = self.initial_guess_position\n H = np.zeros((num_of_units, position.size))\n z = np.zeros((num_of_units, 1))\n h = np.zeros((num_of_units, 1))\n residuals = np.zeros((num_of_units, 1))\n for i in xrange(self.initial_guess_iterations):\n self._compute_measurements_and_jacobians(ranges, position, h, H, z)\n new_residuals = z - h\n position = position + np.dot(self._solve_equation_least_squares(np.dot(H.T, H), H.T), new_residuals)\n if np.sum((new_residuals - residuals) ** 2) < self.initial_guess_tolerance:\n break\n residuals = new_residuals\n rospy.loginfo('initial guess residuals: {}'.format(residuals))\n if np.any(np.abs(residuals) > self.initial_guess_residuals_threshold):\n # This initial guess is not good enough\n return None\n initial_state = np.zeros((6, 1))\n initial_state[0:3] = position\n return initial_state\n\n def update_estimate(self, multi_range_msg):\n \"\"\"Update tracker based on a multi-range message.\n\n Updates estimate and timestamp in the `self.estimate` and `self.estimate_times` dictionaries.\n\n Args:\n multi_range_msg (uwb.msg.UWBMultiRangeWithOffsets): ROS multi-range message.\n\n Returns:\n new_estimate (StateEstimate): Updated position estimate.\n \"\"\"\n estimate_id = (multi_range_msg.address, multi_range_msg.remote_address)\n if estimate_id not in self.estimates:\n initial_state = self.initial_guess(multi_range_msg.ranges)\n if initial_state is None:\n return None\n self.initialize_estimate(estimate_id, initial_state)\n\n current_time = rospy.get_time()\n timestep = current_time - self.estimate_times[estimate_id]\n estimate = self.estimates[estimate_id]\n new_estimate, outlier_flag = self.update_filter(timestep, estimate, multi_range_msg.ranges)\n if not outlier_flag:\n self.estimates[estimate_id] = new_estimate\n self.estimate_times[estimate_id] = current_time\n if self.ikf_prev_outlier_flags[estimate_id]:\n self.ikf_prev_outlier_flags[estimate_id] = False\n # If too many outliers are encountered in a row the estimate is deleted.\n # This will lead to a new initial guess for the next multi-range message.\n if outlier_flag:\n if not self.ikf_prev_outlier_flags[estimate_id]:\n self.ikf_prev_outlier_flags[estimate_id] = True\n self.ikf_outlier_counts[estimate_id] = 0\n self.ikf_outlier_counts[estimate_id] += 1\n if self.ikf_outlier_counts[estimate_id] >= self.ikf_max_outlier_count:\n del self.estimates[estimate_id]\n rospy.loginfo('Too many outliers in a row. Resetting estimate for address={}, remote_address={}'.format(\n multi_range_msg.address, multi_range_msg.remote_address\n ))\n\n return new_estimate\n\n def _ikf_iteration(self, x, n, ranges, h, H, z, estimate, R):\n \"\"\"Update tracker based on a multi-range message.\n\n Args:\n multi_range_msg (uwb.msg.UWBMultiRangeWithOffsets): ROS multi-range message.\n\n Returns:\n new_estimate (StateEstimate): Updated position estimate.\n \"\"\"\n new_position = n[0:3]\n self._compute_measurements_and_jacobians(ranges, new_position, h, H, z)\n res = z - h\n S = np.dot(np.dot(H, estimate.covariance), H.T) + R\n K = np.dot(estimate.covariance, self._solve_equation_least_squares(S.T, H).T)\n mahalanobis = np.sqrt(np.dot(self._solve_equation_least_squares(S.T, res).T, res))\n if res.size not in self.outlier_thresholds:\n self.outlier_thresholds[res.size] = scipy.stats.chi2.isf(self.outlier_threshold_quantile, res.size)\n outlier_threshold = self.outlier_thresholds[res.size]\n if mahalanobis < outlier_threshold:\n n = x + np.dot(K, (res - np.dot(H, x - n)))\n outlier_flag = False\n else:\n outlier_flag = True\n return n, K, outlier_flag\n\n def _compute_process_and_covariance_matrices(self, dt):\n \"\"\"Computes the transition and covariance matrix of the process model and measurement model.\n\n Args:\n dt (float): Timestep of the discrete transition.\n\n Returns:\n F (numpy.ndarray): Transition matrix.\n Q (numpy.ndarray): Process covariance matrix.\n R (numpy.ndarray): Measurement covariance matrix.\n \"\"\"\n F = np.array(np.bmat([[np.eye(3), dt * np.eye(3)], [np.zeros((3, 3)), np.eye(3)]]))\n self.process_matrix = F\n q_p = self.process_covariance_position\n q_v = self.process_covariance_velocity\n Q = np.diag([q_p, q_p, q_p, q_v, q_v, q_v]) ** 2 * dt\n r = self.measurement_covariance\n R = r * np.eye(4)\n self.process_covariance = Q\n self.measurement_covariance = R\n return F, Q, R\n\n def update_filter(self, timestep, estimate, ranges):\n \"\"\"Update position filter.\n\n Args:\n timestep (float): Time elapsed since last update.\n estimate (StateEstimate): Position estimate to update.\n ranges (list of floats): Range measurements.\n\n Returns:\n new_estimate (StateEstimate): Updated position estimate.\n outlier_flag (bool): Flag indicating whether the measurement was rejected as an outlier.\n \"\"\"\n num_of_units = len(ranges)\n x = estimate.state\n P = estimate.covariance\n # Compute process matrix and covariance matrices\n F, Q, R = self._compute_process_and_covariance_matrices(timestep)\n # rospy.logdebug('F: {}'.format(F))\n # rospy.logdebug('Q: {}'.format(Q))\n # rospy.logdebug('R: {}'.format(R))\n # Prediction\n x = np.dot(F, x)\n P = np.dot(F, np.dot(P, F.T)) + Q\n # Update\n n = np.copy(x)\n H = np.zeros((num_of_units, x.size))\n z = np.zeros((num_of_units, 1))\n h = np.zeros((num_of_units, 1))\n for i in xrange(self.ikf_iterations):\n n, K, outlier_flag = self._ikf_iteration(x, n, ranges, h, H, z, estimate, R)\n if outlier_flag:\n new_estimate = estimate\n else:\n new_state = n\n new_covariance = np.dot((np.eye(6) - np.dot(K, H)), P)\n new_estimate = UWBTracker.StateEstimate(new_state, new_covariance)\n return new_estimate, outlier_flag\n\n def exec_(self):\n rospy.spin()\n\n def stop(self):\n rospy.signal_shutdown('User request')\n\n\ndef main():\n import signal\n\n rospy.init_node('uwb_tracker_node')\n u = UWBTracker()\n\n def sigint_handler(sig, _):\n if sig == signal.SIGINT:\n u.stop()\n signal.signal(signal.SIGINT, sigint_handler)\n\n try:\n u.exec_()\n except (rospy.ROSInterruptException, select.error):\n rospy.logwarn(\"Interrupted... Stopping.\")\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.diag",
"numpy.dot",
"numpy.abs",
"numpy.eye",
"numpy.linalg.norm",
"numpy.linalg.pinv",
"numpy.copy",
"numpy.ravel",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
HamsterBiz/Automatic-Video-Colorization-1 | [
"e110650d130d3eadfe29073bf445513d10a5b3f6",
"e110650d130d3eadfe29073bf445513d10a5b3f6"
] | [
"fully_colorization/pytorch_pwc/comparison/comparison.py",
"fully_colorization/psnr.py"
] | [
"#!/usr/bin/env python\n\nimport math\nimport moviepy\nimport moviepy.editor\nimport numpy\nimport PIL\nimport PIL.Image\nimport PIL.ImageFont\nimport PIL.ImageDraw\n\nintX = 32\nintY = 436 - 64\n\nobjImages = [ {\n\t'strFile': 'official - caffe.png',\n\t'strText': 'official - Caffe'\n}, {\n\t'strFile': 'this - pytorch.png',\n\t'strText': 'this - PyTorch'\n} ]\n\nnpyImages = []\n\nfor objImage in objImages:\n\tobjOutput = PIL.Image.open(objImage['strFile']).convert('RGB')\n\n\tfor intU in [ intShift - 10 for intShift in range(20) ]:\n\t\tfor intV in [ intShift - 10 for intShift in range(20) ]:\n\t\t\tif math.sqrt(math.pow(intU, 2.0) + math.pow(intV, 2.0)) <= 5.0:\n\t\t\t\tPIL.ImageDraw.Draw(objOutput).text((intX + intU, intY + intV), objImage['strText'], (255, 255, 255), PIL.ImageFont.truetype('freefont/FreeSerifBold.ttf', 32))\n\t\t\t# end\n\t\t# end\n\t# end\n\n\tPIL.ImageDraw.Draw(objOutput).text((intX, intY), objImage['strText'], (0, 0, 0), PIL.ImageFont.truetype('freefont/FreeSerifBold.ttf', 32))\n\n\tnpyImages.append(numpy.array(objOutput))\n# end\n\nmoviepy.editor.ImageSequenceClip(sequence=npyImages, fps=1).write_gif(filename='comparison.gif', program='ImageMagick', opt='optimizeplus')",
"import numpy\nimport math\nimport cv2\nimport pandas as pd\nimport glob\nimport argparse\ndef psnr (original, colorized) :\n mse = numpy.mean((original-colorized)**2)\n if mse == 0 :\n return 100\n else :\n return 20*math.log10(255.0/math.sqrt(mse))\n\n#원본 이미지(ground truth)들이 모여있는 폴더 path 지정(해당 path에는 이미지만 있어야함)\n# 끝에 /* 꼭 붙이기\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--original\", default='./data/test/', type=str)\nparser.add_argument(\"--color\", default='./model/0100/001/prediction0', type=str, help=\"Test dir path\")\nparser.add_argument(\"--output\", default='result', type=str)\nargs = parser.parse_args()\nprint(args)\noriginal_path = args.original\n\n#모듈을 통과한 이미지들이 모여있는 폴더 path 지정(해당 path에는 이미지만 있어야함)\n# 끝에 /* 꼭 붙이기\ncolorized_path = args.color\n\n#original 이미지가 모여 있는 폴더에서 사진들 이름 다 불러오기\noriginal_list = glob.glob(original_path)\n#colorized 이미지가 모여 있는 폴더에서 사진들 이름 다 불러오기\ncolorized_list = glob.glob(colorized_path)\n\n#psnr 결과 저장 리스트\npsnr_list = []\n\n#psnr 계산\nindex = 0\nwhile index < len(original_list) :\n original_img = cv2.imread(original_list[index])\n colorized_img = cv2.imread(colorized_list[index])\n original_img = cv2.resize(original_img, dsize=(colorized_img.shape[1], colorized_img.shape[0]))\n psnr_list.append(psnr(original_img, colorized_img))\n index += 1\n\n#DataFrame 생성\ndata = {'original' : original_list,\n 'colorized' : colorized_list,\n 'psnr' : psnr_list\n }\ndf = pd.DataFrame(data)\n\n#csv에 저장\ndf.to_csv(\"%s.csv\"%(args.output), mode='w')\n"
] | [
[
"numpy.array"
],
[
"numpy.mean",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
levaid/open_lth | [
"43c91e83cf17524702b80df3a252dffc83fb532e"
] | [
"models/charcnn.py"
] | [
"import torch.nn as nn\n\nimport torch.nn as nn\n\nfrom foundations import hparams\nfrom lottery.desc import LotteryDesc\nfrom models import base\nfrom pruning import sparse_global\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Model(base.Model):\n \"\"\"Character-level CNN designed for text classification.\"\"\"\n\n def __init__(self, num_classes=4):\n super(Model, self).__init__()\n\n # I do not support setting hparams since this model is very different from image classifiers.\n # AGNews is 70 dim on input, it has 70 different characters\n hparams = {'num_features': 70, 'dropout': 0.5, }\n\n self.conv1 = nn.Sequential(\n nn.Conv1d(hparams['num_features'], 256, kernel_size=7, stride=1),\n nn.ReLU(),\n nn.MaxPool1d(kernel_size=3, stride=3)\n )\n\n self.conv2 = nn.Sequential(\n nn.Conv1d(256, 256, kernel_size=7, stride=1),\n nn.ReLU(),\n nn.MaxPool1d(kernel_size=3, stride=3)\n ) \n \n self.conv3 = nn.Sequential(\n nn.Conv1d(256, 256, kernel_size=3, stride=1),\n nn.ReLU()\n )\n\n self.conv4 = nn.Sequential(\n nn.Conv1d(256, 256, kernel_size=3, stride=1),\n nn.ReLU() \n )\n \n self.conv5 = nn.Sequential(\n nn.Conv1d(256, 256, kernel_size=3, stride=1),\n nn.ReLU()\n )\n\n self.conv6 = nn.Sequential(\n nn.Conv1d(256, 256, kernel_size=3, stride=1),\n nn.ReLU(),\n nn.MaxPool1d(kernel_size=3, stride=3)\n )\n \n \n self.fc1 = nn.Sequential(\n nn.Linear(8704, 1024),\n nn.ReLU(),\n nn.Dropout(p=hparams['dropout'])\n )\n \n self.fc2 = nn.Sequential(\n nn.Linear(1024, 1024),\n nn.ReLU(),\n nn.Dropout(p=hparams['dropout'])\n )\n\n self.fc3 = nn.Linear(1024, num_classes)\n\n self.grads = {}\n self.criterion = nn.CrossEntropyLoss()\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.conv3(x)\n x = self.conv4(x)\n x = self.conv5(x)\n x = self.conv6(x)\n\n # collapse\n x = x.view(x.size(0), -1)\n # linear layer\n x = self.fc1(x)\n # linear layer\n x = self.fc2(x)\n # linear layer\n x = self.fc3(x)\n \n return x\n\n @property\n def output_layer_names(self):\n return ['fc3.weight', 'fc3.bias']\n\n @staticmethod\n def is_valid_model_name(model_name):\n return model_name=='charcnn'\n\n @staticmethod\n def get_model_from_name(model_name, initializer, outputs=4):\n \"\"\"Name is charcnn.\"\"\"\n\n if not Model.is_valid_model_name(model_name):\n raise ValueError('Invalid model name: {}'.format(model_name))\n \n return(Model(num_classes=outputs))\n\n @property\n def loss_criterion(self):\n return self.criterion\n\n def default_hparams():\n model_hparams = hparams.ModelHparams(\n model_name='charcnn',\n model_init='kaiming_normal',\n batchnorm_init='uniform',\n )\n\n dataset_hparams = hparams.DatasetHparams(\n dataset_name='agnews',\n batch_size=32,\n )\n\n training_hparams = hparams.TrainingHparams(\n optimizer_name='sgd',\n momentum=0.9,\n milestone_steps='120ep,180ep',\n lr=0.1,\n gamma=0.5,\n weight_decay=0,\n training_steps='200ep',\n )\n\n pruning_hparams = sparse_global.PruningHparams(\n pruning_strategy='sparse_global',\n pruning_fraction=0.2\n )\n\n return LotteryDesc(model_hparams, dataset_hparams, training_hparams, pruning_hparams)\n\n\n\n\n"
] | [
[
"torch.nn.CrossEntropyLoss",
"torch.nn.Dropout",
"torch.nn.Linear",
"torch.nn.MaxPool1d",
"torch.nn.Conv1d",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Rowing0914/TF_RL | [
"68e5e9a23e38ed2d8ac5f97d380567b919a3d2e7",
"68e5e9a23e38ed2d8ac5f97d380567b919a3d2e7"
] | [
"examples/Sutton_RL_Intro/ch5_MC/first_visit_MC_ES.py",
"test/Other_test/gpu_test.py"
] | [
"# 5.3 Monte Carlo Control\n# Monte Carlo ES (Exploring Starts) with first visit constraint\n# reference: https://github.com/dennybritz/reinforcement-learning/blob/master/MC/MC%20Control%20with%20Epsilon-Greedy%20Policies%20Solution.ipynb\n\nfrom collections import defaultdict\nimport numpy as np\nimport sys\n\nif \"../\" not in sys.path:\n sys.path.append(\"../\")\n\nfrom utils.envs.blackjack import BlackjackEnv\n\n\ndef make_epsilon_greedy_policy(Q, epsilon, nA):\n def policy(observation):\n A = np.ones(nA, dtype=float) * epsilon / nA\n best_action = np.argmax(Q[observation])\n A[best_action] += (1.0 - epsilon)\n return A\n\n return policy\n\n\ndef First_Visit_MC_ES(env, action_value, discount_factor=1.0, num_episodes=1000):\n Returns = defaultdict(float)\n Returns_count = defaultdict(float)\n policy = make_epsilon_greedy_policy(action_value, discount_factor, env.nA)\n\n for i in range(num_episodes):\n # observe the environment and store the observation\n experience = []\n # this satisfies the exploraing start condition\n observation = env.reset()\n # generate an episode\n for t in range(100):\n action = np.random.choice(np.arange(env.nA), p=policy(observation))\n next_observation, reward, done, _ = env.step(action)\n experience.append((observation, action, reward))\n observation = next_observation\n if done:\n break\n\n # remove duplicated state-action pairs in the episode\n state_action_in_experience = set([(x[0], x[1]) for x in experience])\n # update the state-value function using the obtained episode\n for row in state_action_in_experience:\n state, action = row[0], row[1]\n # Find the first occurance of the state-action pair in the episode\n first_occurence_idx = next(i for i, x in enumerate(experience) if ((x[0] == state) and (x[1] == action)))\n # Sum up all discounted rewards over time since the first occurance in the episode\n G = sum([x[2] * (discount_factor ** i) for i, x in enumerate(experience[first_occurence_idx:])])\n # Calculate average return for this state over all sampled experiences\n Returns[row] += G\n Returns_count[row] += 1.0\n action_value[state][action] = Returns[row] / Returns_count[row]\n\n return action_value, policy\n\n\nif __name__ == '__main__':\n env = BlackjackEnv()\n action_value = defaultdict(lambda: np.zeros(env.action_space.n))\n discount_factor = 1.0\n num_episodes = 100\n action_value, policy = First_Visit_MC_ES(env, action_value, discount_factor=1.0, num_episodes=num_episodes)\n print(action_value)\n",
"# https://stackoverflow.com/questions/38009682/how-to-tell-if-tensorflow-is-using-gpu-acceleration-from-inside-python-shell\n\nimport tensorflow as tf\nfrom tensorflow.python.client import device_lib\n\nprint(device_lib.list_local_devices())\nprint(tf.__version__)\n\nwith tf.device('/gpu:0'):\n a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')\n b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')\n c = tf.matmul(a, b)\n\nwith tf.Session() as sess:\n print(sess.run(c))"
] | [
[
"numpy.arange",
"numpy.argmax",
"numpy.zeros",
"numpy.ones"
],
[
"tensorflow.device",
"tensorflow.matmul",
"tensorflow.constant",
"tensorflow.python.client.device_lib.list_local_devices",
"tensorflow.Session"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
michael-snower/d2go | [
"614336e421baef3c61bcbc2135a21c912db25cc5"
] | [
"projects_oss/detr/test_detr_export.py"
] | [
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport unittest\n\nimport torch\nfrom d2go.runner import create_runner\nfrom detr.util.misc import nested_tensor_from_tensor_list\nfrom fvcore.nn import flop_count_table, FlopCountAnalysis\n\n\nclass Tester(unittest.TestCase):\n @staticmethod\n def _set_detr_cfg(cfg, enc_layers, dec_layers, num_queries, dim_feedforward):\n cfg.MODEL.META_ARCHITECTURE = \"Detr\"\n cfg.MODEL.DETR.NUM_OBJECT_QUERIES = num_queries\n cfg.MODEL.DETR.ENC_LAYERS = enc_layers\n cfg.MODEL.DETR.DEC_LAYERS = dec_layers\n cfg.MODEL.DETR.DEEP_SUPERVISION = False\n cfg.MODEL.DETR.DIM_FEEDFORWARD = dim_feedforward # 2048\n\n def _assert_model_output(self, model, scripted_model):\n x = nested_tensor_from_tensor_list(\n [torch.rand(3, 200, 200), torch.rand(3, 200, 250)]\n )\n out = model(x)\n out_script = scripted_model(x)\n self.assertTrue(out[\"pred_logits\"].equal(out_script[\"pred_logits\"]))\n self.assertTrue(out[\"pred_boxes\"].equal(out_script[\"pred_boxes\"]))\n\n def test_detr_res50_export(self):\n runner = create_runner(\"d2go.projects.detr.runner.DETRRunner\")\n cfg = runner.get_default_cfg()\n cfg.MODEL.DEVICE = \"cpu\"\n # DETR\n self._set_detr_cfg(cfg, 6, 6, 100, 2048)\n # backbone\n cfg.MODEL.BACKBONE.NAME = \"build_resnet_backbone\"\n cfg.MODEL.RESNETS.DEPTH = 50\n cfg.MODEL.RESNETS.STRIDE_IN_1X1 = False\n cfg.MODEL.RESNETS.OUT_FEATURES = [\"res2\", \"res3\", \"res4\", \"res5\"]\n # build model\n model = runner.build_model(cfg).eval()\n model = model.detr\n scripted_model = torch.jit.script(model)\n self._assert_model_output(model, scripted_model)\n\n def test_detr_fbnet_export(self):\n runner = create_runner(\"d2go.projects.detr.runner.DETRRunner\")\n cfg = runner.get_default_cfg()\n cfg.MODEL.DEVICE = \"cpu\"\n # DETR\n self._set_detr_cfg(cfg, 3, 3, 50, 256)\n # backbone\n cfg.MODEL.BACKBONE.NAME = \"FBNetV2C4Backbone\"\n cfg.MODEL.FBNET_V2.ARCH = \"FBNetV3_A_dsmask_C5\"\n cfg.MODEL.FBNET_V2.WIDTH_DIVISOR = 8\n cfg.MODEL.FBNET_V2.OUT_FEATURES = [\"trunk4\"]\n # build model\n model = runner.build_model(cfg).eval()\n model = model.detr\n print(model)\n scripted_model = torch.jit.script(model)\n self._assert_model_output(model, scripted_model)\n # print flops\n table = flop_count_table(FlopCountAnalysis(model, ([torch.rand(3, 224, 320)],)))\n print(table)\n"
] | [
[
"torch.jit.script",
"torch.rand"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jojoelfe/napari | [
"b52a136dad392c091b0008c0b8d7fcc5ef460f66",
"b52a136dad392c091b0008c0b8d7fcc5ef460f66",
"48cdf4d1c4bcf6f76603e90b1c0c7498e2aba6c0"
] | [
"examples/dev/plot_2d_edge_meshes.py",
"napari/benchmarks/benchmark_points_layer.py",
"napari/layers/utils/_color_encoding.py"
] | [
"from napari.layers.shapes._shapes_utils import (\n generate_2D_edge_meshes,\n) # , old_generate_2D_edge_meshes\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Polygon\n\nfig, axes = plt.subplots(2, 3)\n# fig.set_figwidth(15)\n# fig.set_figheight(10)\ncolors = iter(['red', 'green', 'blue', 'yellow'])\nitaxes = iter(axes.flatten())\nsup = axes.flatten()[4]\nfor closed in [False, True]:\n for beveled in [False, True]:\n ax = next(itaxes)\n c = next(colors)\n centers, offsets, triangles = generate_2D_edge_meshes(\n [[0, 3], [1, 0], [2, 3], [5, 0], [2.5, 5]],\n closed=closed,\n limit=3,\n bevel=beveled,\n )\n points = centers + 0.3 * offsets\n for t in triangles:\n trp = points[t]\n ax.add_patch(Polygon(trp, ec='#000000', fc=c, alpha=0.2))\n sup.add_patch(Polygon(trp, ec='#000000', fc=c, alpha=0.1))\n ax.scatter(*(points).T)\n ax.scatter(*(centers).T)\n ax.set_aspect('equal')\n ax.set_title(f' {closed=}, {beveled=}')\n ax.set_xlim(-1, 6)\n ax.set_ylim(-1, 6)\n sup.set_xlim(-1, 6)\n sup.set_ylim(-1, 6)\nplt.show()\n",
"# See \"Writing benchmarks\" in the asv docs for more information.\n# https://asv.readthedocs.io/en/latest/writing_benchmarks.html\n# or the napari documentation on benchmarking\n# https://github.com/napari/napari/blob/main/docs/BENCHMARKS.md\nimport numpy as np\n\nfrom napari.layers import Points\n\n\nclass Points2DSuite:\n \"\"\"Benchmarks for the Points layer with 2D data\"\"\"\n\n params = [2**i for i in range(4, 18, 2)]\n\n def setup(self, n):\n np.random.seed(0)\n self.data = np.random.random((n, 2))\n self.layer = Points(self.data)\n\n def time_create_layer(self, n):\n \"\"\"Time to create layer.\"\"\"\n Points(self.data)\n\n def time_refresh(self, n):\n \"\"\"Time to refresh view.\"\"\"\n self.layer.refresh()\n\n def time_set_view_slice(self, n):\n \"\"\"Time to set view slice.\"\"\"\n self.layer._set_view_slice()\n\n def time_update_thumbnail(self, n):\n \"\"\"Time to update thumbnail.\"\"\"\n self.layer._update_thumbnail()\n\n def time_get_value(self, n):\n \"\"\"Time to get current value.\"\"\"\n self.layer.get_value((0,) * 2)\n\n def time_add(self, n):\n self.layer.add(self.data)\n\n def mem_layer(self, n):\n \"\"\"Memory used by layer.\"\"\"\n return self.layer\n\n def mem_data(self, n):\n \"\"\"Memory used by raw data.\"\"\"\n return self.data\n\n\nclass Points3DSuite:\n \"\"\"Benchmarks for the Points layer with 3D data.\"\"\"\n\n params = [2**i for i in range(4, 18, 2)]\n\n def setup(self, n):\n np.random.seed(0)\n self.data = np.random.random((n, 3))\n self.layer = Points(self.data)\n\n def time_create_layer(self, n):\n \"\"\"Time to create layer.\"\"\"\n Points(self.data)\n\n def time_refresh(self, n):\n \"\"\"Time to refresh view.\"\"\"\n self.layer.refresh()\n\n def time_set_view_slice(self, n):\n \"\"\"Time to set view slice.\"\"\"\n self.layer._set_view_slice()\n\n def time_update_thumbnail(self, n):\n \"\"\"Time to update thumbnail.\"\"\"\n self.layer._update_thumbnail()\n\n def time_get_value(self, n):\n \"\"\"Time to get current value.\"\"\"\n self.layer.get_value((0,) * 3)\n\n def mem_layer(self, n):\n \"\"\"Memory used by layer.\"\"\"\n return self.layer\n\n def mem_data(self, n):\n \"\"\"Memory used by raw data.\"\"\"\n return self.data\n\n\nclass PointsSlicingSuite:\n \"\"\"Benchmarks for slicing the Points layer with 3D data.\"\"\"\n\n params = [True, False]\n\n def setup(self, flatten_slice_axis):\n np.random.seed(0)\n self.data = np.random.uniform(size=(20_000_000, 3), low=0, high=500)\n if flatten_slice_axis:\n self.data[:, 0] = np.round(self.data[:, 0])\n self.layer = Points(self.data)\n self.slice = np.s_[249, :, :]\n\n def time_slice_points(self, flatten_slice_axis):\n \"\"\"Time to take one slice of points\"\"\"\n self.layer._slice_data(self.slice)\n\n\nclass PointsToMaskSuite:\n \"\"\"Benchmarks for creating a binary image mask from points.\"\"\"\n\n param_names = ['num_points', 'mask_shape', 'point_size']\n params = [\n [64, 256, 1024, 4096, 16384],\n [\n (256, 256),\n (512, 512),\n (1024, 1024),\n (2048, 2048),\n (128, 128, 128),\n (256, 256, 256),\n (512, 512, 512),\n ],\n [5, 10],\n ]\n\n def setup(self, num_points, mask_shape, point_size):\n np.random.seed(0)\n data = np.random.random((num_points, len(mask_shape))) * mask_shape\n self.layer = Points(data, size=point_size)\n\n def time_to_mask(self, num_points, mask_shape, point_size):\n self.layer.to_mask(shape=mask_shape)\n",
"from typing import Any, Literal, Optional, Tuple, Union\n\nimport numpy as np\nfrom pydantic import Field, parse_obj_as, validator\nfrom typing_extensions import Protocol, runtime_checkable\n\nfrom ...utils import Colormap\nfrom ...utils.colormaps import ValidColormapArg, ensure_colormap\nfrom ...utils.colormaps.categorical_colormap import CategoricalColormap\nfrom ...utils.colormaps.standardize_color import transform_color\nfrom ...utils.translations import trans\nfrom .color_transformations import ColorType\nfrom .style_encoding import (\n StyleEncoding,\n _ConstantStyleEncoding,\n _DerivedStyleEncoding,\n _ManualStyleEncoding,\n)\n\n\nclass ColorValue(np.ndarray):\n \"\"\"A 4x1 array that represents one RGBA color value.\"\"\"\n\n @classmethod\n def __get_validators__(cls):\n yield cls.validate_type\n\n @classmethod\n def validate_type(cls, val):\n return transform_color(val)[0]\n\n\nclass ColorArray(np.ndarray):\n \"\"\"An Nx4 array where each row of N represents one RGBA color value.\"\"\"\n\n @classmethod\n def __get_validators__(cls):\n yield cls.validate_type\n\n @classmethod\n def validate_type(cls, val):\n return (\n np.empty((0, 4), np.float32)\n if len(val) == 0\n else transform_color(val)\n )\n\n\n@runtime_checkable\nclass ColorEncoding(StyleEncoding[ColorValue, ColorArray], Protocol):\n \"\"\"Encodes colors from features.\"\"\"\n\n @classmethod\n def __get_validators__(cls):\n yield cls.validate\n\n @classmethod\n def validate(\n cls, value: Union['ColorEncoding', dict, str, ColorType]\n ) -> 'ColorEncoding':\n \"\"\"Validates and coerces a value to a ColorEncoding.\n\n Parameters\n ----------\n value : ColorEncodingArgument\n The value to validate and coerce.\n If this is already a ColorEncoding, it is returned as is.\n If this is a dict, then it should represent one of the built-in color encodings.\n If this a string, then a DirectColorEncoding is returned.\n If this a single color, a ConstantColorEncoding is returned.\n If this is a sequence of colors, a ManualColorEncoding is returned.\n\n Returns\n -------\n ColorEncoding\n\n Raises\n ------\n TypeError\n If the value is not a supported type.\n ValidationError\n If the value cannot be parsed into a ColorEncoding.\n \"\"\"\n if isinstance(value, ColorEncoding):\n return value\n if isinstance(value, dict):\n return parse_obj_as(\n Union[\n ConstantColorEncoding,\n ManualColorEncoding,\n DirectColorEncoding,\n NominalColorEncoding,\n QuantitativeColorEncoding,\n ],\n value,\n )\n if isinstance(value, str):\n return DirectColorEncoding(feature=value, fallback=DEFAULT_COLOR)\n try:\n color_array = ColorArray.validate_type(value)\n except (ValueError, AttributeError, KeyError):\n raise TypeError(\n trans._(\n 'value should be a ColorEncoding, a dict, a string, a color, a sequence of colors, or None',\n deferred=True,\n )\n )\n if color_array.shape[0] == 1:\n return ConstantColorEncoding(constant=value)\n return ManualColorEncoding(array=color_array, default=DEFAULT_COLOR)\n\n\n\"\"\"The default color to use, which may also be used a safe fallback color.\"\"\"\nDEFAULT_COLOR = ColorValue.validate_type('cyan')\n\n\nclass ConstantColorEncoding(_ConstantStyleEncoding[ColorValue, ColorArray]):\n \"\"\"Encodes color values from a single constant color.\n\n Attributes\n ----------\n constant : ColorValue\n The constant color RGBA value.\n \"\"\"\n\n encoding_type: Literal['ConstantColorEncoding'] = 'ConstantColorEncoding'\n constant: ColorValue\n\n\nclass ManualColorEncoding(_ManualStyleEncoding[ColorValue, ColorArray]):\n \"\"\"Encodes color values manually in an array attribute.\n\n Attributes\n ----------\n array : ColorArray\n The array of color values. Can be written to directly to make\n persistent updates.\n default : ColorValue\n The default color value.\n \"\"\"\n\n encoding_type: Literal['ManualColorEncoding'] = 'ManualColorEncoding'\n array: ColorArray\n default: ColorValue = Field(default_factory=lambda: DEFAULT_COLOR)\n\n\nclass DirectColorEncoding(_DerivedStyleEncoding[ColorValue, ColorArray]):\n \"\"\"Encodes color values directly from a feature column.\n\n Attributes\n ----------\n feature : str\n The name of the feature that contains the desired color values.\n fallback : ColorArray\n The safe constant fallback color to use if the feature column\n does not contain valid color values.\n \"\"\"\n\n encoding_type: Literal['DirectColorEncoding'] = 'DirectColorEncoding'\n feature: str\n fallback: ColorValue = Field(default_factory=lambda: DEFAULT_COLOR)\n\n def __call__(self, features: Any) -> ColorArray:\n # A column-like may be a series or have an object dtype (e.g. color names),\n # neither of which transform_color handles, so convert to a list.\n return ColorArray.validate_type(list(features[self.feature]))\n\n\nclass NominalColorEncoding(_DerivedStyleEncoding[ColorValue, ColorArray]):\n \"\"\"Encodes color values from a nominal feature whose values are mapped to colors.\n\n Attributes\n ----------\n feature : str\n The name of the feature that contains the nominal values to be mapped to colors.\n colormap : CategoricalColormap\n Maps the feature values to colors.\n fallback : ColorValue\n The safe constant fallback color to use if mapping the feature values to\n colors fails.\n \"\"\"\n\n encoding_type: Literal['NominalColorEncoding'] = 'NominalColorEncoding'\n feature: str\n colormap: CategoricalColormap\n fallback: ColorValue = Field(default_factory=lambda: DEFAULT_COLOR)\n\n def __call__(self, features: Any) -> ColorArray:\n # map is not expecting some column-likes (e.g. pandas.Series), so ensure\n # this is a numpy array first.\n values = np.asarray(features[self.feature])\n return self.colormap.map(values)\n\n\nclass QuantitativeColorEncoding(_DerivedStyleEncoding[ColorValue, ColorArray]):\n \"\"\"Encodes color values from a quantitative feature whose values are mapped to colors.\n\n Attributes\n ----------\n feature : str\n The name of the feature that contains the nominal values to be mapped to colors.\n colormap : Colormap\n Maps feature values to colors.\n contrast_limits : Optional[Tuple[float, float]]\n The (min, max) feature values that should respectively map to the first and last\n colors in the colormap. If None, then this will attempt to calculate these values\n from the feature values each time this generates color values. If that attempt\n fails, these are effectively (0, 1).\n fallback : ColorValue\n The safe constant fallback color to use if mapping the feature values to\n colors fails.\n \"\"\"\n\n encoding_type: Literal[\n 'QuantitativeColorEncoding'\n ] = 'QuantitativeColorEncoding'\n feature: str\n colormap: Colormap\n contrast_limits: Optional[Tuple[float, float]] = None\n fallback: ColorValue = Field(default_factory=lambda: DEFAULT_COLOR)\n\n def __call__(self, features: Any) -> ColorArray:\n values = features[self.feature]\n contrast_limits = self.contrast_limits or _calculate_contrast_limits(\n values\n )\n if contrast_limits is not None:\n values = np.interp(values, contrast_limits, (0, 1))\n return self.colormap.map(values)\n\n @validator('colormap', pre=True, always=True)\n def _check_colormap(cls, colormap: ValidColormapArg) -> Colormap:\n return ensure_colormap(colormap)\n\n @validator('contrast_limits', pre=True, always=True)\n def _check_contrast_limits(\n cls, contrast_limits\n ) -> Optional[Tuple[float, float]]:\n if (contrast_limits is not None) and (\n contrast_limits[0] >= contrast_limits[1]\n ):\n raise ValueError(\n 'contrast_limits must be a strictly increasing pair of values'\n )\n return contrast_limits\n\n\ndef _calculate_contrast_limits(\n values: np.ndarray,\n) -> Optional[Tuple[float, float]]:\n contrast_limits = None\n if values.size > 0:\n min_value = np.min(values)\n max_value = np.max(values)\n # Use < instead of != to handle nans.\n if min_value < max_value:\n contrast_limits = (min_value, max_value)\n return contrast_limits\n"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.patches.Polygon",
"matplotlib.pyplot.subplots"
],
[
"numpy.round",
"numpy.random.uniform",
"numpy.random.random",
"numpy.random.seed"
],
[
"numpy.min",
"numpy.asarray",
"numpy.max",
"numpy.interp",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
orestislampridis/Greek-Lyrics-Generation | [
"e37b33b048a8d38a83474b43e407552538fcf2ca"
] | [
"char_level_lstm/char_level_lstm.py"
] | [
"#Import dependencies\nimport tensorflow as tf\nfrom keras.models import Sequential\nfrom keras.layers import LSTM, Bidirectional, Activation, Dropout, Dense,CuDNNLSTM, Embedding,GRU, CuDNNGRU\nfrom keras.callbacks import *\nfrom keras.optimizers import Adam\nfrom keras.utils import np_utils\nimport numpy as np\nimport pandas as pd\nimport sys\n\n#Get data from google drive\ndef get_from_drive():\n from google.colab import drive\n drive.mount('/content/drive')\n with open('/content/drive/My Drive/Colab Notebooks/entexna.txt', 'r') as f: \n text = f.read()\n return text\n\n#Get file from text\ndef get_from_git():\n #get raw link of data on github\n url='https://raw.githubusercontent.com/orestislampridis/Greek-Lyric-Generation/master/char_level_lstm/entexna.txt?token=ANTGNDJE42Q36BYI7IFYRZS6G4TE6'\n path_to_file = tf.keras.utils.get_file('shakespeare.txt', url)\n text = open(path_to_file, 'rb').read().decode(encoding='utf-8')\n return text\n\ntext=get_from_git()\n\ndef check_text(t):\n print('the first 100 characters are:',repr(text[:100])) #read the first 100 characters of doc\n n=len(t)\n print ('Length of text: %i characters' %n) #lenght=number of characters in text\n v=sorted(set(t)) #making the vocabulary of characters\n n_v=len(v) \n print('number of unique characters: %i' %n)\n return n,v,n_v\n\nn_char,vocab,n_vocab=check_text(text)\n\nchar2int=dict((i, c) for c, i in enumerate(vocab)) #map characters to int\nint2char=dict((i, c) for i, c in enumerate(vocab)) #map int to char (for \"translation\")\n\n#print(char2int) #print the result of mapping the characters in the vocabulary\n\nprint('\\nMapping text...')\ntext_as_int=np.array([char2int[c] for c in text]) #map the data as int\n# Show a sample of our data mapped from text to integers\nprint ('%s --[mapped to] -- > %s' %(repr(text[100:119]), text_as_int[100:119]))\n\nprint('\\nMaking samples(sequences) and deviding data to input and target...')\nseq_length = 100 #how many characters per sequence\n#i.e seq_length=3 text=καλή, input=καλ, target=ή\ntarget=[]\ninput=[]\nstep=5 #this step determines how many sequences we want\nfor i in range (0,n_char-seq_length,step):\n\n input.append(text_as_int[i:i+seq_length]) \n target.append(text_as_int[i+seq_length])\n\nprint('Input and target data example:')\nprint(\"input 2:\", \"\".join([int2char[c] for c in input[2]]))\nprint(\"target 2:\", int2char[target[2]])\n\n\nn_samples=len(input)\nprint(\"\\nNumber of samples:\",n_samples)\n\nprint('\\nReshaping data to feed RNN...')\n#We can use the reshape() function on the NumPy array to reshape this one-dimensional array into a two dimensional array \ninputR=np.reshape(input,(n_samples, seq_length))\nprint(\"The input representation of: \", \"\".join([int2char[c] for c in input[0][:13]]),\"is now:\")\nprint(inputR[0][:13])\n#We represent the target values with One Hot Encoding.\ntargetE= np_utils.to_categorical(target)\nprint(\"The target representation of: \",int2char[target[60]],\" is now:\\n\",targetE[60])\nprint(\"/The shape of the input data is:\",inputR.shape)\nprint(\"The shape of the target data is:\",targetE.shape)\n\nprint('\\nBuilding model...')\nmodel= Sequential()\nrnn_size=512\n#embedding layer\nmodel.add(Embedding(n_samples, seq_length,input_length=seq_length, trainable=True))\n#input layer\nmodel.add(Bidirectional( CuDNNLSTM(rnn_size, return_sequences=True)))\n#Hidden layers \nmodel.add(Bidirectional( CuDNNLSTM(rnn_size)))\n#Dropout layer(avoid overfitting)\nmodel.add(Dropout(0.2))\n#Output layer\nmodel.add(Dense(targetE.shape[1]))\n#Activation function\nmodel.add(Activation('softmax'))\nadam = Adam(lr=0.001)\n#compile model\nmodel.compile(loss = 'categorical_crossentropy', optimizer = 'adam',metrics=['accuracy'])\n#model details\nmodel.summary()\n\nprint('\\nCreating callbacks..')\n\nfilepath=\"/content/drive/My Drive/Colab Notebooks/CheckpointsLyricsGen/epochs:{epoch:03d}-val_acc:{val_acc:.5f}.hdf5\"\n#Folder called CheckpointsLyricsGen in drive\n#each file will be stored with epoch number and validation accuracy\n#these files contain weights of your neural network\n\nprint('Callbacks created at:',filepath[:63])\n\ncheckpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose = 1, save_best_only = False, mode ='max')\n#the arguments passed in the above code it is monitoring validation accuracy \n\ncallbacks_list = [checkpoint]\n#a list so that you can append any other callbacks to this list and pass it in fit function while training \n#all the methods in the list will be called after every epoch\n\n#if we need to train more: uncomment the code below with the correct checkpoint \n\nmodel.load_weights('/content/drive/My Drive/Colab Notebooks/CheckpointsLyricsGen/epochs:015-val_acc:0.47429.hdf5')\n\nprint('\\nTraining model...')\n\n#fit the model\nmodel.fit(inputR,\n targetE,\n epochs=50,\n batch_size=128,\n shuffle= True,\n initial_epoch=16,\n callbacks=callbacks_list,\n validation_split = 0.2,\n validation_data = None,\n validation_steps = None)\n\n#Load weights for generation\n #choose the right filename!!!\nmodel.load_weights('/content/drive/My Drive/Colab Notebooks/CheckpointsLyricsGen/epochs:005-val_acc:0.50984.hdf5') \n#compile model \nmodel.compile(loss = 'categorical_crossentropy', optimizer = 'adam')\n\nprint('\\nGenerating text...')\n\ndef random_seed():\n start = np.random.randint(0, len(input)-1)\n random_pattern = input[start]\n print('Seed : ')\n print(\"\\\"\",''.join([int2char[v] for v in random_pattern]), \"\\\"\\n\")\n return random_pattern\n\ndef set_seed():\n seed=\"Θάλασσα\"\n seed_int=([char2int[c] for c in seed])\n pad_len=seq_length-len(seed_int) \n set_pattern=np.pad(seed_int,(pad_len,0),constant_values=char2int[\" \"]) #we need to pad the seed so it can be the correct shape\n return set_pattern\n\n\n\npattern = random_seed() #Choose what type of seed we want\n\n\n\n# How many characters you want to generate\ngenerated_characters = 300\n\nresults=[]\n\nfor i in range(generated_characters):\n x = np.reshape(pattern, ( 1, len(pattern)))\n \n prediction = model.predict(x,verbose = 0)\n \n index = np.argmax(prediction)\n\n result = int2char[index]\n\n results.append(result)\n \n pattern = np.append(pattern,index)\n \n pattern = pattern[1:len(pattern)]\nprint(\"Generated text:\")\nprint(\"\\\"\",''.join(results), \"\\\"\\n\") \nprint('\\nDone')\n"
] | [
[
"numpy.pad",
"numpy.reshape",
"numpy.append",
"numpy.argmax",
"tensorflow.keras.utils.get_file",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
andreabragantini/pandapower | [
"5503873141ba6bf3dc1459ce8d57f4dac5160dbd"
] | [
"pandapower/build_branch.py"
] | [
"# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2020 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.\n\nimport copy\nimport math\nfrom functools import partial\n\nimport numpy as np\nimport pandas as pd\n\nfrom pandapower.auxiliary import get_values\nfrom pandapower.pypower.idx_brch import F_BUS, T_BUS, BR_R, BR_X, BR_B, TAP, SHIFT, BR_STATUS, RATE_A, \\\n BR_R_ASYM, BR_X_ASYM, branch_cols\nfrom pandapower.pypower.idx_bus import BASE_KV, VM, VA\n\n\ndef _build_branch_ppc(net, ppc):\n \"\"\"\n Takes the empty ppc network and fills it with the branch values. The branch\n datatype will be np.complex 128 afterwards.\n\n .. note:: The order of branches in the ppc is:\n 1. Lines\n 2. Transformers\n 3. 3W Transformers (each 3W Transformer takes up three branches)\n 4. Impedances\n 5. Internal branch for extended ward\n\n **INPUT**:\n **net** -The pandapower format network\n\n **ppc** - The PYPOWER format network to fill in values\n\n \"\"\"\n length = _initialize_branch_lookup(net)\n lookup = net._pd2ppc_lookups[\"branch\"]\n mode = net._options[\"mode\"]\n ppc[\"branch\"] = np.zeros(shape=(length, branch_cols), dtype=np.complex128)\n if mode == \"sc\":\n from pandapower.shortcircuit.idx_brch import branch_cols_sc\n branch_sc = np.empty(shape=(length, branch_cols_sc), dtype=float)\n branch_sc.fill(np.nan)\n ppc[\"branch\"] = np.hstack((ppc[\"branch\"], branch_sc))\n ppc[\"branch\"][:, :13] = np.array([0, 0, 0, 0, 0, 250, 250, 250, 1, 0, 1, -360, 360])\n if \"line\" in lookup:\n _calc_line_parameter(net, ppc)\n if \"trafo\" in lookup:\n _calc_trafo_parameter(net, ppc)\n if \"trafo3w\" in lookup:\n _calc_trafo3w_parameter(net, ppc)\n if \"impedance\" in lookup:\n _calc_impedance_parameter(net, ppc)\n if \"xward\" in lookup:\n _calc_xward_parameter(net, ppc)\n if \"switch\" in lookup:\n _calc_switch_parameter(net, ppc)\n\n\ndef _initialize_branch_lookup(net):\n start = 0\n end = 0\n net._pd2ppc_lookups[\"branch\"] = {}\n for element in [\"line\", \"trafo\", \"trafo3w\", \"impedance\", \"xward\"]:\n if len(net[element]) > 0:\n if element == \"trafo3w\":\n end = start + len(net[element]) * 3\n else:\n end = start + len(net[element])\n net._pd2ppc_lookups[\"branch\"][element] = (start, end)\n start = end\n if \"_impedance_bb_switches\" in net and net._impedance_bb_switches.any():\n end = start + net._impedance_bb_switches.sum()\n net._pd2ppc_lookups[\"branch\"][\"switch\"] = (start, end)\n return end\n\n\ndef _calc_trafo3w_parameter(net, ppc):\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\n branch = ppc[\"branch\"]\n f, t = net[\"_pd2ppc_lookups\"][\"branch\"][\"trafo3w\"]\n trafo_df = _trafo_df_from_trafo3w(net)\n hv_bus = get_trafo_values(trafo_df, \"hv_bus\").astype(int)\n lv_bus = get_trafo_values(trafo_df, \"lv_bus\").astype(int)\n in_service = get_trafo_values(trafo_df, \"in_service\").astype(int)\n branch[f:t, F_BUS] = bus_lookup[hv_bus]\n branch[f:t, T_BUS] = bus_lookup[lv_bus]\n r, x, y, ratio, shift = _calc_branch_values_from_trafo_df(net, ppc, trafo_df)\n branch[f:t, BR_R] = r\n branch[f:t, BR_X] = x\n branch[f:t, BR_B] = y\n branch[f:t, TAP] = ratio\n branch[f:t, SHIFT] = shift\n branch[f:t, BR_STATUS] = in_service\n if net[\"_options\"][\"mode\"] == \"opf\":\n if \"max_loading_percent\" in trafo_df:\n max_load = get_trafo_values(trafo_df, \"max_loading_percent\")\n sn_mva = get_trafo_values(trafo_df, \"sn_mva\")\n branch[f:t, RATE_A] = max_load / 100. * sn_mva\n else:\n branch[f:t, RATE_A] = np.nan\n\n\ndef _calc_line_parameter(net, ppc, elm=\"line\", ppc_elm=\"branch\"):\n \"\"\"\n calculates the line parameter in per unit.\n\n **INPUT**:\n **net** - The pandapower format network\n\n **ppc** - the ppc array\n\n **OPTIONAL**:\n **elm** - The pandapower element (normally \"line\")\n\n **ppc_elm** - The ppc element (normally \"branch\")\n\n **RETURN**:\n **t** - Temporary line parameter. Which is a complex128\n Nunmpy array. with the following order:\n 0:bus_a; 1:bus_b; 2:r_pu; 3:x_pu; 4:b_pu\n \"\"\"\n f, t = net._pd2ppc_lookups[ppc_elm][elm]\n branch = ppc[ppc_elm]\n mode = net[\"_options\"][\"mode\"]\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\n line = net[elm]\n from_bus = bus_lookup[line[\"from_bus\"].values]\n to_bus = bus_lookup[line[\"to_bus\"].values]\n length_km = line[\"length_km\"].values\n parallel = line[\"parallel\"].values\n base_kv = ppc[\"bus\"][from_bus, BASE_KV]\n baseR = np.square(base_kv) / net.sn_mva\n\n branch[f:t, F_BUS] = from_bus\n branch[f:t, T_BUS] = to_bus\n\n branch[f:t, BR_R] = line[\"r_ohm_per_km\"].values * length_km / baseR / parallel\n branch[f:t, BR_X] = line[\"x_ohm_per_km\"].values * length_km / baseR / parallel\n\n if mode == \"sc\":\n # temperature correction\n if net[\"_options\"][\"case\"] == \"min\":\n branch[f:t, BR_R] *= _end_temperature_correction_factor(net, short_circuit=True)\n else:\n # temperature correction\n if net[\"_options\"][\"consider_line_temperature\"]:\n branch[f:t, BR_R] *= _end_temperature_correction_factor(net)\n\n b = 2 * net.f_hz * math.pi * line[\"c_nf_per_km\"].values * 1e-9 * baseR * length_km * parallel\n g = line[\"g_us_per_km\"].values * 1e-6 * baseR * length_km * parallel\n branch[f:t, BR_B] = b - g * 1j\n # in service of lines\n branch[f:t, BR_STATUS] = line[\"in_service\"].values\n if net._options[\"mode\"] == \"opf\":\n # RATE_A is conisdered by the (PowerModels) OPF. If zero -> unlimited\n max_load = line.max_loading_percent.values if \"max_loading_percent\" in line else 0.\n vr = net.bus.loc[line[\"from_bus\"].values, \"vn_kv\"].values * np.sqrt(3.)\n max_i_ka = line.max_i_ka.values\n df = line.df.values\n branch[f:t, RATE_A] = max_load / 100. * max_i_ka * df * parallel * vr\n\n\ndef _calc_trafo_parameter(net, ppc):\n '''\n Calculates the transformer parameter in per unit.\n\n **INPUT**:\n **net** - The pandapower format network\n\n **RETURN**:\n **temp_para** -\n Temporary transformer parameter. Which is a np.complex128\n Numpy array. with the following order:\n 0:hv_bus; 1:lv_bus; 2:r_pu; 3:x_pu; 4:b_pu; 5:tab, 6:shift\n '''\n\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\n f, t = net[\"_pd2ppc_lookups\"][\"branch\"][\"trafo\"]\n branch = ppc[\"branch\"]\n temp_para = np.zeros(shape=(len(net[\"trafo\"].index), 9), dtype=np.complex128)\n trafo = net[\"trafo\"]\n parallel = trafo[\"parallel\"].values\n branch[f:t, F_BUS] = bus_lookup[trafo[\"hv_bus\"].values]\n branch[f:t, T_BUS] = bus_lookup[trafo[\"lv_bus\"].values]\n r, x, y, ratio, shift = _calc_branch_values_from_trafo_df(net, ppc)\n branch[f:t, BR_R] = r\n branch[f:t, BR_X] = x\n branch[f:t, BR_B] = y\n branch[f:t, TAP] = ratio\n branch[f:t, SHIFT] = shift\n branch[f:t, BR_STATUS] = trafo[\"in_service\"].values\n if any(trafo.df.values <= 0):\n raise UserWarning(\"Rating factor df must be positive. Transformers with false \"\n \"rating factors: %s\" % trafo.query('df<=0').index.tolist())\n if net._options[\"mode\"] == \"opf\":\n max_load = trafo.max_loading_percent.values if \"max_loading_percent\" in trafo else 0\n sn_mva = trafo.sn_mva.values\n df = trafo.df.values\n branch[f:t, RATE_A] = max_load / 100. * sn_mva * df * parallel\n\n\ndef get_trafo_values(trafo_df, par):\n if isinstance(trafo_df, dict):\n return trafo_df[par]\n else:\n return trafo_df[par].values\n\n\ndef _calc_branch_values_from_trafo_df(net, ppc, trafo_df=None):\n \"\"\"\n Calculates the MAT/PYPOWER-branch-attributes from the pandapower trafo dataframe.\n\n PYPOWER and MATPOWER uses the PI-model to model transformers.\n This function calculates the resistance r, reactance x, complex susceptance c and the tap ratio\n according to the given parameters.\n\n .. warning:: This function returns the subsceptance b as a complex number\n **(-img + -re*i)**. MAT/PYPOWER is only intended to calculate the\n imaginary part of the subceptance. However, internally c is\n multiplied by i. By using subsceptance in this way, it is possible\n to consider the ferromagnetic loss of the coil. Which would\n otherwise be neglected.\n\n\n .. warning:: Tab switches effect calculation as following:\n On **high-voltage** side(=1) -> only **tab** gets adapted.\n On **low-voltage** side(=2) -> **tab, x, r** get adapted.\n This is consistent with Sincal.\n The Sincal method in this case is questionable.\n\n\n **INPUT**:\n **pd_trafo** - The pandapower format Transformer Dataframe.\n The Transformer modell will only readfrom pd_net\n\n **RETURN**:\n **temp_para** - Temporary transformer parameter. Which is a complex128\n Nunmpy array. with the following order:\n 0:r_pu; 1:x_pu; 2:b_pu; 3:tab;\n\n \"\"\"\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\n if trafo_df is None:\n trafo_df = net[\"trafo\"]\n lv_bus = get_trafo_values(trafo_df, \"lv_bus\")\n vn_lv = ppc[\"bus\"][bus_lookup[lv_bus], BASE_KV]\n ### Construct np.array to parse results in ###\n # 0:r_pu; 1:x_pu; 2:b_pu; 3:tab;\n vn_trafo_hv, vn_trafo_lv, shift = _calc_tap_from_dataframe(net, trafo_df)\n ratio = _calc_nominal_ratio_from_dataframe(ppc, trafo_df, vn_trafo_hv, vn_trafo_lv,\n bus_lookup)\n r, x, y = _calc_r_x_y_from_dataframe(net, trafo_df, vn_trafo_lv, vn_lv, net.sn_mva)\n return r, x, y, ratio, shift\n\n\ndef _calc_r_x_y_from_dataframe(net, trafo_df, vn_trafo_lv, vn_lv, sn_mva):\n mode = net[\"_options\"][\"mode\"]\n trafo_model = net[\"_options\"][\"trafo_model\"]\n\n r, x = _calc_r_x_from_dataframe(trafo_df, vn_lv, vn_trafo_lv, sn_mva)\n if mode == \"sc\":\n y = 0\n if isinstance(trafo_df, pd.DataFrame): # 2w trafo is dataframe, 3w trafo is dict\n from pandapower.shortcircuit.idx_bus import C_MAX\n bus_lookup = net._pd2ppc_lookups[\"bus\"]\n cmax = net._ppc[\"bus\"][bus_lookup[net.trafo.lv_bus.values], C_MAX]\n kt = _transformer_correction_factor(trafo_df.vk_percent, trafo_df.vkr_percent,\n trafo_df.sn_mva, cmax)\n r *= kt\n x *= kt\n else:\n y = _calc_y_from_dataframe(trafo_df, vn_lv, vn_trafo_lv, sn_mva)\n if trafo_model == \"pi\":\n return r, x, y\n elif trafo_model == \"t\":\n return _wye_delta(r, x, y)\n else:\n raise ValueError(\"Unkonwn Transformer Model %s - valid values ar 'pi' or 't'\" % trafo_model)\n\n\ndef _wye_delta(r, x, y):\n \"\"\"\n 20.05.2016 added by Lothar Löwer\n\n Calculate transformer Pi-Data based on T-Data\n\n \"\"\"\n tidx = np.where(y != 0)\n za_star = (r[tidx] + x[tidx] * 1j) / 2\n zc_star = -1j / y[tidx]\n zSum_triangle = za_star * za_star + 2 * za_star * zc_star\n zab_triangle = zSum_triangle / zc_star\n zbc_triangle = zSum_triangle / za_star\n r[tidx] = zab_triangle.real\n x[tidx] = zab_triangle.imag\n y[tidx] = -2j / zbc_triangle\n return r, x, y\n\n\ndef _calc_y_from_dataframe(trafo_df, vn_lv, vn_trafo_lv, sn_mva):\n \"\"\"\n Calculate the subsceptance y from the transformer dataframe.\n\n INPUT:\n\n **trafo** (Dataframe) - The dataframe in net.trafo\n which contains transformer calculation values.\n\n OUTPUT:\n **subsceptance** (1d array, np.complex128) - The subsceptance in pu in\n the form (-b_img, -b_real)\n \"\"\"\n baseR = np.square(vn_lv) / sn_mva\n vn_lv_kv = get_trafo_values(trafo_df, \"vn_lv_kv\")\n pfe = get_trafo_values(trafo_df, \"pfe_kw\") * 1e-3\n parallel = get_trafo_values(trafo_df, \"parallel\")\n\n ### Calculate subsceptance ###\n vnl_squared = vn_lv_kv ** 2\n b_real = pfe / vnl_squared * baseR\n i0 = get_trafo_values(trafo_df, \"i0_percent\")\n sn = get_trafo_values(trafo_df, \"sn_mva\")\n b_img = (i0 / 100. * sn) ** 2 - pfe ** 2\n\n b_img[b_img < 0] = 0\n b_img = np.sqrt(b_img) * baseR / vnl_squared\n y = - b_real * 1j - b_img * np.sign(i0)\n return y / np.square(vn_trafo_lv / vn_lv_kv) * parallel\n\n\ndef _calc_tap_from_dataframe(net, trafo_df):\n \"\"\"\n Adjust the nominal voltage vnh and vnl to the active tab position \"tap_pos\".\n If \"side\" is 1 (high-voltage side) the high voltage vnh is adjusted.\n If \"side\" is 2 (low-voltage side) the low voltage vnl is adjusted\n\n INPUT:\n **net** - The pandapower format network\n\n **trafo** (Dataframe) - The dataframe in pd_net[\"structure\"][\"trafo\"]\n which contains transformer calculation values.\n\n OUTPUT:\n **vn_hv_kv** (1d array, float) - The adusted high voltages\n\n **vn_lv_kv** (1d array, float) - The adjusted low voltages\n\n **trafo_shift** (1d array, float) - phase shift angle\n\n \"\"\"\n calculate_voltage_angles = net[\"_options\"][\"calculate_voltage_angles\"]\n mode = net[\"_options\"][\"mode\"]\n vnh = copy.copy(get_trafo_values(trafo_df, \"vn_hv_kv\").astype(float))\n vnl = copy.copy(get_trafo_values(trafo_df, \"vn_lv_kv\").astype(float))\n trafo_shift = get_trafo_values(trafo_df, \"shift_degree\").astype(float) if calculate_voltage_angles else \\\n np.zeros(len(vnh))\n if mode == \"sc\":\n return vnh, vnl, trafo_shift\n\n tap_pos = get_trafo_values(trafo_df, \"tap_pos\")\n tap_neutral = get_trafo_values(trafo_df, \"tap_neutral\")\n tap_diff = tap_pos - tap_neutral\n tap_phase_shifter = get_trafo_values(trafo_df, \"tap_phase_shifter\")\n tap_side = get_trafo_values(trafo_df, \"tap_side\")\n tap_step_percent = get_trafo_values(trafo_df, \"tap_step_percent\")\n tap_step_degree = get_trafo_values(trafo_df, \"tap_step_degree\")\n\n cos = lambda x: np.cos(np.deg2rad(x))\n sin = lambda x: np.sin(np.deg2rad(x))\n arctan = lambda x: np.rad2deg(np.arctan(x))\n\n for side, vn, direction in [(\"hv\", vnh, 1), (\"lv\", vnl, -1)]:\n phase_shifters = tap_phase_shifter & (tap_side == side)\n tap_complex = np.isfinite(tap_step_percent) & np.isfinite(tap_pos) & (tap_side == side) & \\\n ~phase_shifters\n if tap_complex.any():\n tap_steps = tap_step_percent[tap_complex] * tap_diff[tap_complex] / 100\n tap_angles = _replace_nan(tap_step_degree[tap_complex])\n u1 = vn[tap_complex]\n du = u1 * _replace_nan(tap_steps)\n vn[tap_complex] = np.sqrt((u1 + du * cos(tap_angles)) ** 2 + (du * sin(tap_angles)) ** 2)\n trafo_shift[tap_complex] += (arctan(direction * du * sin(tap_angles) /\n (u1 + du * cos(tap_angles))))\n if phase_shifters.any():\n degree_is_set = _replace_nan(tap_step_degree[phase_shifters]) != 0\n percent_is_set = _replace_nan(tap_step_percent[phase_shifters]) != 0\n if (degree_is_set & percent_is_set).any():\n raise UserWarning(\"Both tap_step_degree and tap_step_percent set for ideal phase shifter\")\n trafo_shift[phase_shifters] += np.where(\n (degree_is_set),\n (direction * tap_diff[phase_shifters] * tap_step_degree[phase_shifters]),\n (direction * 2 * np.rad2deg(np.arcsin(tap_diff[phase_shifters] * \\\n tap_step_percent[phase_shifters] / 100 / 2)))\n )\n return vnh, vnl, trafo_shift\n\n\ndef _replace_nan(array, value=0):\n mask = np.isnan(array)\n array[mask] = value\n return array\n\n\ndef _calc_r_x_from_dataframe(trafo_df, vn_lv, vn_trafo_lv, sn_mva):\n \"\"\"\n Calculates (Vectorized) the resitance and reactance according to the\n transformer values\n\n \"\"\"\n parallel = get_trafo_values(trafo_df, \"parallel\")\n vk_percent = get_trafo_values(trafo_df, \"vk_percent\")\n vkr_percent = get_trafo_values(trafo_df, \"vkr_percent\")\n tap_lv = np.square(vn_trafo_lv / vn_lv) * sn_mva # adjust for low voltage side voltage converter\n sn_trafo_mva = get_trafo_values(trafo_df, \"sn_mva\")\n z_sc = vk_percent / 100. / sn_trafo_mva * tap_lv\n r_sc = vkr_percent / 100. / sn_trafo_mva * tap_lv\n x_sc = np.sign(z_sc) * np.sqrt(z_sc ** 2 - r_sc ** 2)\n return r_sc / parallel, x_sc / parallel\n\n\ndef _calc_nominal_ratio_from_dataframe(ppc, trafo_df, vn_hv_kv, vn_lv_kv, bus_lookup):\n \"\"\"\n Calculates (Vectorized) the off nominal tap ratio::\n\n (vn_hv_kv / vn_lv_kv) / (ub1_in_kv / ub2_in_kv)\n\n INPUT:\n **net** (Dataframe) - The net for which to calc the tap ratio.\n\n **vn_hv_kv** (1d array, float) - The adjusted nominal high voltages\n\n **vn_lv_kv** (1d array, float) - The adjusted nominal low voltages\n\n OUTPUT:\n **tab** (1d array, float) - The off-nominal tap ratio\n \"\"\"\n # Calculating tab (trasformer off nominal turns ratio)\n tap_rat = vn_hv_kv / vn_lv_kv\n hv_bus = get_trafo_values(trafo_df, \"hv_bus\")\n lv_bus = get_trafo_values(trafo_df, \"lv_bus\")\n nom_rat = get_values(ppc[\"bus\"][:, BASE_KV], hv_bus, bus_lookup) / \\\n get_values(ppc[\"bus\"][:, BASE_KV], lv_bus, bus_lookup)\n return tap_rat / nom_rat\n\n\ndef _calc_impedance_parameter(net, ppc):\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\n f, t = net[\"_pd2ppc_lookups\"][\"branch\"][\"impedance\"]\n branch = ppc[\"branch\"]\n rij, xij, r_asym, x_asym = _calc_impedance_parameters_from_dataframe(net)\n branch[f:t, BR_R] = rij\n branch[f:t, BR_X] = xij\n branch[f:t, BR_R_ASYM] = r_asym\n branch[f:t, BR_X_ASYM] = x_asym\n branch[f:t, F_BUS] = bus_lookup[net.impedance[\"from_bus\"].values]\n branch[f:t, T_BUS] = bus_lookup[net.impedance[\"to_bus\"].values]\n branch[f:t, BR_STATUS] = net[\"impedance\"][\"in_service\"].values\n\n\ndef _calc_impedance_parameters_from_dataframe(net):\n impedance = net.impedance\n sn_impedance = impedance[\"sn_mva\"].values\n sn_net = net.sn_mva\n rij = impedance[\"rft_pu\"].values\n xij = impedance[\"xft_pu\"].values\n rji = impedance[\"rtf_pu\"].values\n xji = impedance[\"xtf_pu\"].values\n\n r = rij / sn_impedance * sn_net\n x = xij / sn_impedance * sn_net\n r_asym = (rji - rij) / sn_impedance * sn_net\n x_asym = (xji - xij) / sn_impedance * sn_net\n return r, x, r_asym, x_asym\n\n\ndef _calc_xward_parameter(net, ppc):\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\n f, t = net[\"_pd2ppc_lookups\"][\"branch\"][\"xward\"]\n branch = ppc[\"branch\"]\n baseR = np.square(get_values(ppc[\"bus\"][:, BASE_KV], net[\"xward\"][\"bus\"].values, bus_lookup)) / \\\n net.sn_mva\n xw_is = net[\"_is_elements\"][\"xward\"]\n branch[f:t, F_BUS] = bus_lookup[net[\"xward\"][\"bus\"].values]\n branch[f:t, T_BUS] = bus_lookup[net._pd2ppc_lookups[\"aux\"][\"xward\"]]\n branch[f:t, BR_R] = net[\"xward\"][\"r_ohm\"] / baseR\n branch[f:t, BR_X] = net[\"xward\"][\"x_ohm\"] / baseR\n branch[f:t, BR_STATUS] = xw_is\n\n\ndef _gather_branch_switch_info(bus, branch_id, branch_type, net):\n # determine at which end the switch is located\n # 1 = to-bus/lv-bus; 0 = from-bus/hv-bus\n branch_id = int(branch_id)\n lookup = net._pd2ppc_lookups[\"branch\"]\n if branch_type == \"l\":\n side = \"to\" if net[\"line\"][\"to_bus\"].at[branch_id] == bus else \"from\"\n branch_idx = net[\"line\"].index.get_loc(branch_id)\n return side, int(bus), int(branch_idx)\n elif branch_type == \"t\":\n side = \"hv\" if net[\"trafo\"][\"hv_bus\"].at[branch_id] == bus else \"lv\"\n branch_idx = lookup[\"trafo\"][0] + net[\"trafo\"].index.get_loc(branch_id)\n return side, int(bus), int(branch_idx)\n elif branch_type == \"t3\":\n f, t = lookup[\"trafo3w\"]\n if net[\"trafo3w\"][\"hv_bus\"].at[branch_id] == bus:\n side = \"hv\"\n offset = 0\n elif net[\"trafo3w\"][\"mv_bus\"].at[branch_id] == bus:\n side = \"mv\"\n offset = (t - f) / 3\n elif net[\"trafo3w\"][\"lv_bus\"].at[branch_id] == bus:\n side = \"lv\"\n offset = (t - f) / 3 * 2\n branch_idx = lookup[\"trafo3w\"][0] + net[\"trafo3w\"].index.get_loc(branch_id) + offset\n return side, int(bus), int(branch_idx)\n\n\ndef _switch_branches(net, ppc):\n from pandapower.shortcircuit.idx_bus import C_MIN, C_MAX\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\n calculate_voltage_angles = net._options[\"calculate_voltage_angles\"]\n neglect_open_switch_branches = net._options[\"neglect_open_switch_branches\"]\n mode = net._options[\"mode\"]\n open_switches = (net.switch.closed.values == False)\n n_bus = ppc[\"bus\"].shape[0]\n for et, element in [(\"l\", \"line\"), (\"t\", \"trafo\"), (\"t3\", \"trafo3w\")]:\n switch_mask = open_switches & (net.switch.et.values == et)\n if not switch_mask.any():\n continue\n nr_open_switches = np.count_nonzero(switch_mask)\n mapfunc = partial(_gather_branch_switch_info, branch_type=et, net=net)\n switch_element = net[\"switch\"][\"element\"].values[switch_mask]\n switch_buses = net[\"switch\"][\"bus\"].values[switch_mask]\n switch_info = np.array(list(map(mapfunc, switch_buses, switch_element)))\n sw_sides = switch_info[:, 0]\n sw_bus_index = bus_lookup[switch_info[:, 1].astype(int)]\n sw_branch_index = switch_info[:, 2].astype(int)\n if neglect_open_switch_branches:\n # deactivate switches which have an open switch instead of creating aux buses\n ppc[\"branch\"][sw_branch_index, BR_STATUS] = 0\n continue\n\n new_buses = np.zeros(shape=(nr_open_switches, ppc[\"bus\"].shape[1]), dtype=float)\n new_buses[:, :15] = np.array([0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1.1, 0.9, 0, 0])\n new_indices = np.arange(n_bus, n_bus + nr_open_switches)\n new_buses[:, 0] = new_indices\n new_buses[:, BASE_KV] = ppc[\"bus\"][sw_bus_index, BASE_KV]\n ppc[\"bus\"] = np.vstack([ppc[\"bus\"], new_buses])\n n_bus += new_buses.shape[0]\n init_vm = net._options[\"init_vm_pu\"]\n init_va = net._options[\"init_va_degree\"]\n for location in np.unique(sw_sides):\n mask = sw_sides == location\n buses = new_indices[mask]\n side = F_BUS if location == \"hv\" or location == \"from\" else T_BUS\n for init, col in [(init_vm, VM), (init_va, VA)]:\n if isinstance(init, str) and init == \"results\":\n if col == VM:\n res_column = net[\"res_%s\" % element][\"vm_%s_pu\" % location]\n else:\n res_column = net[\"res_%s\" % element][\"va_%s_degree\" % location]\n init_values = res_column.loc[switch_element].values[mask]\n else:\n if element == \"line\":\n opposite_buses = ppc[\"branch\"][sw_branch_index[mask], side].real.astype(int)\n init_values = ppc[\"bus\"][opposite_buses, col]\n else:\n opposite_side = T_BUS if side == F_BUS else F_BUS\n opposite_buses = ppc[\"branch\"][sw_branch_index[mask], opposite_side].real.astype(int)\n if col == VM:\n taps = ppc[\"branch\"][sw_branch_index[mask], TAP].real\n init_values = ppc[\"bus\"][opposite_buses, col] * taps\n else:\n if calculate_voltage_angles:\n shift = ppc[\"branch\"][sw_branch_index[mask], SHIFT].real.astype(int)\n init_values = ppc[\"bus\"][opposite_buses, col] + shift\n else:\n init_values = ppc[\"bus\"][opposite_buses, col]\n ppc[\"bus\"][buses, col] = init_values\n if mode == \"sc\":\n ppc[\"bus\"][buses, C_MAX] = ppc[\"bus\"][opposite_buses, C_MAX]\n ppc[\"bus\"][buses, C_MIN] = ppc[\"bus\"][opposite_buses, C_MIN]\n ppc[\"branch\"][sw_branch_index[mask], side] = new_indices[mask]\n\n\ndef _branches_with_oos_buses(net, ppc):\n \"\"\"\n Updates the ppc[\"branch\"] matrix with the changed from or to values\n if the branch is connected to an out of service bus\n\n Adds auxiliary buses if branch is connected to an out of service bus\n Sets branch out of service if connected to two out of service buses\n\n **INPUT**:\n **n** - The pandapower format network\n\n **ppc** - The PYPOWER format network to fill in values\n **bus_is** - The in service buses\n \"\"\"\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\n # get in service elements\n _is_elements = net[\"_is_elements\"]\n bus_is_idx = _is_elements['bus_is_idx']\n line_is_idx = _is_elements['line_is_idx']\n\n n_oos_buses = len(net['bus']) - len(bus_is_idx)\n\n # only filter lines at oos buses if oos buses exists\n if n_oos_buses > 0:\n n_bus = len(ppc[\"bus\"])\n future_buses = [ppc[\"bus\"]]\n # out of service buses\n bus_oos = np.setdiff1d(net['bus'].index.values, bus_is_idx)\n # from buses of line\n line_buses = net[\"line\"][[\"from_bus\", \"to_bus\"]].loc[line_is_idx].values\n f_bus = line_buses[:, 0]\n t_bus = line_buses[:, 1]\n\n # determine on which side of the line the oos bus is located\n mask_from = np.in1d(f_bus, bus_oos)\n mask_to = np.in1d(t_bus, bus_oos)\n\n mask_and = mask_to & mask_from\n if np.any(mask_and):\n mask_from[mask_and] = False\n mask_to[mask_and] = False\n\n # get lines that are connected to oos bus at exactly one side\n # buses that are connected to two oos buses will be removed by ext2int\n mask_or = mask_to | mask_from\n # check whether buses are connected to line\n oos_buses_at_lines = np.hstack([f_bus[mask_from], t_bus[mask_to]])\n n_oos_buses_at_lines = len(oos_buses_at_lines)\n\n # only if oos_buses are at lines (they could be isolated as well)\n if n_oos_buses_at_lines > 0:\n ls_info = np.zeros((n_oos_buses_at_lines, 3), dtype=int)\n ls_info[:, 0] = mask_to[mask_or] & ~mask_from[mask_or]\n ls_info[:, 1] = oos_buses_at_lines\n ls_info[:, 2] = np.nonzero(np.in1d(net['line'].index, line_is_idx[mask_or]))[0]\n\n # ls_info = list(map(mapfunc,\n # line_switches[\"bus\"].values,\n # line_switches[\"element\"].values))\n # we now have the following matrix\n # 0: 1 if switch is at to_bus, 0 else\n # 1: bus of the switch\n # 2: position of the line a switch is connected to\n # ls_info = np.array(ls_info, dtype=int)\n\n # build new buses\n new_ls_buses = np.zeros(shape=(n_oos_buses_at_lines, ppc[\"bus\"].shape[1]), dtype=float)\n new_indices = np.arange(n_bus, n_bus + n_oos_buses_at_lines)\n # the newly created buses\n new_ls_buses[:, :15] = np.array([0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1.1, 0.9, 0, 0])\n new_ls_buses[:, 0] = new_indices\n new_ls_buses[:, BASE_KV] = get_values(ppc[\"bus\"][:, BASE_KV], ls_info[:, 1], bus_lookup)\n\n future_buses.append(new_ls_buses)\n\n # re-route the end of lines to a new bus\n ppc[\"branch\"][ls_info[ls_info[:, 0].astype(bool), 2], 1] = \\\n new_indices[ls_info[:, 0].astype(bool)]\n ppc[\"branch\"][ls_info[np.logical_not(ls_info[:, 0]), 2], 0] = \\\n new_indices[np.logical_not(ls_info[:, 0])]\n\n ppc[\"bus\"] = np.vstack(future_buses)\n\n\n\ndef _calc_switch_parameter(net, ppc):\n \"\"\"\n calculates the line parameter in per unit.\n\n **INPUT**:\n **net** -The pandapower format network\n\n **RETURN**:\n **t** - Temporary line parameter. Which is a complex128\n Nunmpy array. with the following order:\n 0:bus_a; 1:bus_b; 2:r_pu; 3:x_pu; 4:b_pu\n \"\"\"\n rx_ratio = net[\"_options\"][\"switch_rx_ratio\"]\n rz_ratio = rx_ratio / np.sqrt(1 + rx_ratio ** 2)\n xz_ratio = 1 / np.sqrt(1 + rx_ratio ** 2)\n\n f, t = net[\"_pd2ppc_lookups\"][\"branch\"][\"switch\"]\n branch = ppc[\"branch\"]\n bus_lookup = net[\"_pd2ppc_lookups\"][\"bus\"]\n switch = net.switch[net._impedance_bb_switches]\n fb = bus_lookup[switch[\"bus\"].values]\n tb = bus_lookup[switch[\"element\"].values]\n baseR = np.square(ppc[\"bus\"][fb, BASE_KV]) / net.sn_mva\n branch[f:t, F_BUS] = fb\n branch[f:t, T_BUS] = tb\n\n z_switch = switch['z_ohm'].values\n # x_switch will have the same value of r_switch to avoid zero dividence\n branch[f:t, BR_R] = z_switch / baseR * rz_ratio\n branch[f:t, BR_X] = z_switch / baseR * xz_ratio\n\n\ndef _end_temperature_correction_factor(net, short_circuit=False):\n \"\"\"\n Function to calculate resistance correction factor for the given temperature (\"endtemp_degree\").\n When multiplied by the factor, the value of r_ohm_per_km will correspond to the resistance at\n the given temperature.\n\n In case of short circuit calculation, the relevant value for the temperature is\n \"endtemp_degree\", which stands for the final temperature of a line after the short circuit.\n The temperature coefficient \"alpha\" is a constant value of 0.004 in the short circuit\n calculation standard IEC 60909-0:2016.\n\n In case of a load flow calculation, the relelvant parameter is \"temperature_degree_celsius\",\n which is specified by the user and allows calculating load flow for a given operating\n temperature.\n\n The alpha value can be provided according to the used material for the purpose of load flow\n calculation, e.g. 0.0039 for copper or 0.00403 for aluminum. If alpha is not provided in the\n net.line table, the default value of 0.004 is used.\n\n The calculation of the electrical resistance is based on the formula R = R20(1+alpha*(T-20°C)),\n where R is the calculated resistance, R20 is the resistance at 20 °C, alpha is the temperature\n coefficient of resistance of the conducting material and T is the line temperature in °C.\n Accordingly, the resulting correction factor is (1+alpha*(T-20°C)).\n\n Args:\n net: pandapowerNet\n short_circuit: whether the factor is calculated in the scope of a short circuit calculation\n\n Returns:\n correction factor for line R, by which the line parameter should be multiplied to\n obtain the value of resistance at line temperature \"endtemp_degree\"\n\n \"\"\"\n\n if short_circuit:\n # endtemp_degree is line temperature that is reached as the result of a short circuit\n # this value is the property of the lines\n if \"endtemp_degree\" not in net.line.columns:\n raise UserWarning(\"Specify end temperature for lines in net.line.endtemp_degree\")\n\n delta_t_degree_celsius = net.line.endtemp_degree.values.astype(np.float64) - 20\n # alpha is the temperature correction factor for the electric resistance of the material\n # formula from standard, used this way in short-circuit calculation\n alpha = 4e-3\n else:\n # temperature_degree_celsius is line temperature for load flow calculation\n if \"temperature_degree_celsius\" not in net.line.columns:\n raise UserWarning(\"Specify line temperature in net.line.temperature_degree_celsius\")\n\n delta_t_degree_celsius = net.line.temperature_degree_celsius.values.astype(np.float64) - 20\n\n if 'alpha' in net.line.columns:\n alpha = net.line.alpha.values.astype(np.float64)\n else:\n alpha = 4e-3\n\n r_correction_for_temperature = 1 + alpha * delta_t_degree_celsius\n\n return r_correction_for_temperature\n\n\ndef _transformer_correction_factor(vk, vkr, sn, cmax):\n \"\"\"\n 2W-Transformer impedance correction factor in short circuit calculations,\n based on the IEC 60909-0:2016 standard.\n Args:\n vk: transformer short-circuit voltage, percent\n vkr: real-part of transformer short-circuit voltage, percent\n sn: transformer rating, kVA\n cmax: voltage factor to account for maximum worst-case currents, based on the lv side\n\n Returns:\n kt: transformer impedance correction factor for short-circuit calculations\n \"\"\"\n zt = vk / 100 / sn\n rt = vkr / 100 / sn\n xt = np.sqrt(zt ** 2 - rt ** 2)\n kt = 0.95 * cmax / (1 + .6 * xt * sn)\n return kt\n\n\ndef get_is_lines(net):\n \"\"\"\n get indices of lines that are in service and save that information in net\n \"\"\"\n _is_elements = net[\"_is_elements\"]\n _is_elements[\"line\"] = net[\"line\"][net[\"line\"][\"in_service\"].values.astype(bool)]\n\n\ndef _trafo_df_from_trafo3w(net):\n nr_trafos = len(net[\"trafo3w\"])\n trafo2 = dict()\n sides = [\"hv\", \"mv\", \"lv\"]\n mode = net._options[\"mode\"]\n loss_side = net._options[\"trafo3w_losses\"].lower()\n nr_trafos = len(net[\"trafo3w\"])\n t3 = net[\"trafo3w\"]\n _calculate_sc_voltages_of_equivalent_transformers(t3, trafo2, mode)\n _calculate_3w_tap_changers(t3, trafo2, sides)\n zeros = np.zeros(len(net.trafo3w))\n aux_buses = net._pd2ppc_lookups[\"aux\"][\"trafo3w\"]\n trafo2[\"hv_bus\"] = {\"hv\": t3.hv_bus.values, \"mv\": aux_buses, \"lv\": aux_buses}\n trafo2[\"lv_bus\"] = {\"hv\": aux_buses, \"mv\": t3.mv_bus.values, \"lv\": t3.lv_bus.values}\n trafo2[\"in_service\"] = {side: t3.in_service.values for side in sides}\n trafo2[\"i0_percent\"] = {side: t3.i0_percent.values if loss_side == side else zeros for side in sides}\n trafo2[\"pfe_kw\"] = {side: t3.pfe_kw.values if loss_side == side else zeros for side in sides}\n trafo2[\"vn_hv_kv\"] = {side: t3.vn_hv_kv.values for side in sides}\n trafo2[\"vn_lv_kv\"] = {side: t3[\"vn_%s_kv\" % side].values for side in sides}\n trafo2[\"shift_degree\"] = {\"hv\": np.zeros(nr_trafos), \"mv\": t3.shift_mv_degree.values,\n \"lv\": t3.shift_lv_degree.values}\n trafo2[\"tap_phase_shifter\"] = {side: np.zeros(nr_trafos).astype(bool) for side in sides}\n trafo2[\"parallel\"] = {side: np.ones(nr_trafos) for side in sides}\n trafo2[\"df\"] = {side: np.ones(nr_trafos) for side in sides}\n if net._options[\"mode\"] == \"opf\" and \"max_loading_percent\" in net.trafo3w:\n trafo2[\"max_loading_percent\"] = {side: net.trafo3w.max_loading_percent.values for side in sides}\n return {var: np.concatenate([trafo2[var][side] for side in sides]) for var in trafo2.keys()}\n\n\ndef _calculate_sc_voltages_of_equivalent_transformers(t3, t2, mode):\n vk_3w = np.stack([t3.vk_hv_percent.values, t3.vk_mv_percent.values, t3.vk_lv_percent.values])\n vkr_3w = np.stack([t3.vkr_hv_percent.values, t3.vkr_mv_percent.values, t3.vkr_lv_percent.values])\n sn = np.stack([t3.sn_hv_mva.values, t3.sn_mv_mva.values, t3.sn_lv_mva.values])\n\n vk_2w_delta = z_br_to_bus_vector(vk_3w, sn)\n vkr_2w_delta = z_br_to_bus_vector(vkr_3w, sn)\n if mode == \"sc\":\n kt = _transformer_correction_factor(vk_3w, vkr_3w, sn, 1.1)\n vk_2w_delta *= kt\n vkr_2w_delta *= kt\n vki_2w_delta = np.sqrt(vk_2w_delta ** 2 - vkr_2w_delta ** 2)\n vkr_2w = wye_delta_vector(vkr_2w_delta, sn)\n vki_2w = wye_delta_vector(vki_2w_delta, sn)\n vk_2w = np.sign(vki_2w) * np.sqrt(vki_2w ** 2 + vkr_2w ** 2)\n if np.any(vk_2w == 0):\n raise UserWarning(\"Equivalent transformer with zero impedance!\")\n t2[\"vk_percent\"] = {\"hv\": vk_2w[0, :], \"mv\": vk_2w[1, :], \"lv\": vk_2w[2, :]}\n t2[\"vkr_percent\"] = {\"hv\": vkr_2w[0, :], \"mv\": vkr_2w[1, :], \"lv\": vkr_2w[2, :]}\n t2[\"sn_mva\"] = {\"hv\": sn[0, :], \"mv\": sn[1, :], \"lv\": sn[2, :]}\n\n\ndef z_br_to_bus_vector(z, sn):\n return sn[0, :] * np.array([z[0, :] / sn[[0, 1], :].min(axis=0), z[1, :] /\n sn[[1, 2], :].min(axis=0), z[2, :] / sn[[0, 2], :].min(axis=0)])\n\n\ndef wye_delta(zbr_n, s):\n return .5 * s / s[0] * np.array([(zbr_n[0] + zbr_n[2] - zbr_n[1]),\n (zbr_n[1] + zbr_n[0] - zbr_n[2]),\n (zbr_n[2] + zbr_n[1] - zbr_n[0])])\n\n\ndef wye_delta_vector(zbr_n, s):\n return .5 * s / s[0, :] * np.array([(zbr_n[0, :] + zbr_n[2, :] - zbr_n[1, :]),\n (zbr_n[1, :] + zbr_n[0, :] - zbr_n[2, :]),\n (zbr_n[2, :] + zbr_n[1, :] - zbr_n[0, :])])\n\n\ndef _calculate_3w_tap_changers(t3, t2, sides):\n tap_variables = [\"tap_side\", \"tap_pos\", \"tap_neutral\", \"tap_max\", \"tap_min\", \"tap_step_percent\",\n \"tap_step_degree\"]\n sides = [\"hv\", \"mv\", \"lv\"]\n nr_trafos = len(t3)\n empty = np.zeros(nr_trafos)\n empty.fill(np.nan)\n tap_arrays = {var: {side: empty.copy() for side in sides} for var in tap_variables}\n tap_arrays[\"tap_side\"] = {side: np.array([None] * nr_trafos) for side in sides}\n at_star_point = t3.tap_at_star_point.values\n any_at_star_point = at_star_point.any()\n for side in sides:\n tap_mask = t3.tap_side.values == side\n for var in tap_variables:\n tap_arrays[var][side][tap_mask] = t3[var].values[tap_mask]\n\n # t3 trafos with tap changer at terminals\n tap_arrays[\"tap_side\"][side][tap_mask] = \"hv\" if side == \"hv\" else \"lv\"\n\n # t3 trafos with tap changer at star points\n if any_at_star_point:\n mask_star_point = tap_mask & at_star_point\n tap_arrays[\"tap_side\"][side][mask_star_point] = \"lv\" if side == \"hv\" else \"hv\"\n tap_arrays[\"tap_step_degree\"][side][mask_star_point] += 180\n t2.update(tap_arrays)\n"
] | [
[
"numpy.sqrt",
"numpy.arctan",
"numpy.in1d",
"numpy.concatenate",
"numpy.any",
"numpy.where",
"numpy.square",
"numpy.hstack",
"numpy.unique",
"numpy.arcsin",
"numpy.arange",
"numpy.stack",
"numpy.count_nonzero",
"numpy.zeros",
"numpy.logical_not",
"numpy.isnan",
"numpy.deg2rad",
"numpy.array",
"numpy.isfinite",
"numpy.setdiff1d",
"numpy.ones",
"numpy.sign",
"numpy.empty",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
loichuder/hsds | [
"729d39d423b4b52ef60985fb32d25468b211c1c2"
] | [
"hsds/async_lib.py"
] | [
"##############################################################################\n# Copyright by The HDF Group. #\n# All rights reserved. #\n# #\n# This file is part of HSDS (HDF5 Scalable Data Service), Libraries and #\n# Utilities. The full HSDS copyright notice, including #\n# terms governing use, modification, and redistribution, is contained in #\n# the file COPYING, which can be found at the root of the source code #\n# distribution tree. If you do not have access to this file, you may #\n# request a copy from [email protected]. #\n##############################################################################\n\nimport time\nimport hashlib\nimport numpy as np\nfrom aiohttp.client_exceptions import ClientError\nfrom aiohttp.web_exceptions import HTTPNotFound, HTTPInternalServerError, HTTPForbidden\nfrom .util.idUtil import isValidUuid, isSchema2Id, getS3Key, isS3ObjKey, getObjId, isValidChunkId, getCollectionForId\nfrom .util.chunkUtil import getDatasetId, getNumChunks, ChunkIterator\nfrom .util.hdf5dtype import getItemSize, createDataType\nfrom .util.arrayUtil import getShapeDims, getNumElements, bytesToArray\nfrom .util.dsetUtil import getHyperslabSelection, getFilterOps\n\nfrom .util.storUtil import getStorKeys, putStorJSONObj, getStorJSONObj, deleteStorObj, getStorBytes, isStorObj\nfrom . import hsds_logger as log\nfrom . import config\n\n\n# List all keys under given root and optionally update info.json\n# Note: only works with schema v2 domains!\n\nasync def getDatasetJson(app, dsetid, bucket=None):\n # try to read the dataset json from s3\n s3_key = getS3Key(dsetid)\n try:\n dset_json = await getStorJSONObj(app, s3_key, bucket=bucket)\n except HTTPNotFound:\n log.warn(f\"HTTPNotFound for {s3_key} bucket:{bucket}\")\n return None\n except HTTPForbidden:\n log.warn(f\"HTTPForbidden error for {s3_key} bucket:{bucket}\")\n return None\n except HTTPInternalServerError:\n log.warn(f\"HTTPInternalServerError error for {s3_key} bucket:{bucket}\")\n return None\n return dset_json\n\nasync def updateDatasetInfo(app, dset_id, dataset_info, bucket=None):\n # get dataset metadata and deteermine number logical)_bytes, linked_bytes, and num_linked_chunks\n\n dset_json = await getDatasetJson(app, dset_id, bucket=bucket)\n log.debug(f\"updateDatasetInfo - id: {dset_id} dataset_info: {dataset_info}\")\n if \"shape\" not in dset_json:\n log.debug(f\"updateDatasetInfo - no shape dataset_json for {dset_id} - skipping\")\n return # null dataspace\n shape_json = dset_json[\"shape\"]\n if \"class\" in shape_json and shape_json[\"class\"] == 'H5S_NULL':\n log.debug(f\"updatedDatasetInfo - null space for {dset_id} - skipping\")\n return\n if \"type\" not in dset_json:\n log.warn(f\"updateDatasetInfo - expected to find type in dataset_json for {dset_id}\")\n return\n type_json = dset_json[\"type\"]\n item_size = getItemSize(type_json)\n if \"layout\" not in dset_json:\n log.warn(f\"updateDatasetInfo - expected to find layout in dataset_json for {dset_id}\")\n return\n layout = dset_json[\"layout\"]\n log.info(f\"updateDatasetInfo - shape: {shape_json} type: {type_json} item size: {item_size} layout: {layout}\")\n\n dims = getShapeDims(shape_json) # returns None for HS_NULL dsets\n\n if dims is None:\n return # null dataspace\n\n if item_size == 'H5T_VARIABLE':\n # arbitrary lgoical size for vaariable, so just set to allocated size\n logical_bytes = dataset_info['allocated_bytes'] \n else:\n num_elements = getNumElements(dims)\n logical_bytes = num_elements * item_size\n dataset_info[\"logical_bytes\"] = logical_bytes \n log.debug(f\"dims: {dims}\")\n rank = len(dims)\n layout_class = layout[\"class\"]\n log.debug(f\"updateDatasetInfo - {dset_id} has layout_class: {layout_class}\")\n selection = getHyperslabSelection(dims) # select entire datashape\n linked_bytes = 0\n num_linked_chunks = 0\n\n if layout_class == 'H5D_CONTIGUOUS_REF':\n # In H5D_CONTIGUOUS_REF a non-compressed part of the HDF5 is divided into equal size chunks,\n # so we can just compute link bytes and num chunks based on the size of the coniguous dataset\n layout_dims = layout[\"dims\"]\n num_chunks = getNumChunks(selection, layout_dims)\n chunk_size = item_size\n for dim in layout_dims:\n chunk_size *= dim\n log.debug(f\"updateDatasetInfo, H5D_CONTIGUOUS_REF, num_chunks: {num_chunks} chunk_size: {chunk_size}\")\n linked_bytes = chunk_size * num_chunks\n num_linked_chunks = num_chunks\n elif layout_class == 'H5D_CHUNKED_REF': \n chunks = layout[\"chunks\"]\n # chunks is a dict with tuples (offset, size)\n for chunk_id in chunks:\n chunk_info = chunks[chunk_id]\n linked_bytes += chunk_info[1]\n num_linked_chunks = len(chunks)\n elif layout_class == 'H5D_CHUNKED_REF_INDIRECT':\n log.debug(\"chunk ref indirect\")\n if \"chunk_table\" not in layout:\n log.error(f\"Expected to find chunk_table in dataset layout for {dset_id}\")\n return\n chunktable_id = layout[\"chunk_table\"]\n # get state for dataset from DN.\n chunktable_json = await getDatasetJson(app, chunktable_id, bucket=bucket)\n log.debug(f\"chunktable_json: {chunktable_json}\")\n chunktable_dims = getShapeDims(chunktable_json[\"shape\"])\n if len(chunktable_dims) != rank:\n msg = f\"Expected rank of chunktable to be same as the dataset for {dset_id}\"\n log.warn(msg)\n return\n chunktable_layout = chunktable_json[\"layout\"]\n log.debug(f\"chunktable_layout: {chunktable_layout}\")\n if not isinstance(chunktable_layout, dict) or \"class\" not in chunktable_layout:\n log.warn(f\"expected chunktable_layout: {chunktable_id}\")\n return\n if chunktable_layout[\"class\"] != 'H5D_CHUNKED':\n log.warn(\"expected chunktable layout class to be chunked\")\n return\n if \"dims\" not in chunktable_layout:\n log.warn(\"expected chunktable layout to have dims key\")\n return\n chunktable_layout_dims = chunktable_layout[\"dims\"]\n chunktable_type_json = chunktable_json[\"type\"]\n chunktable_item_size = getItemSize(chunktable_type_json)\n chunktable_dt = createDataType(chunktable_type_json)\n chunktable_filter_ops = getFilterOps(app, chunktable_json, chunktable_item_size)\n \n # read chunktable one chunk at a time - this can be slow if there are a lot of chunks,\n # but this is only used by the async bucket scan task\n chunktable_selection = getHyperslabSelection(chunktable_dims)\n it = ChunkIterator(chunktable_id, chunktable_selection, chunktable_layout_dims)\n log.debug(f\"updateDatasetInfo - iterating over chunks in {chunktable_id}\")\n\n while True:\n try:\n chunktable_chunk_id = it.next()\n log.debug(f\"updateDatasetInfo - gotchunktable chunk id: {chunktable_chunk_id}\")\n chunktable_chunk_s3key = getS3Key(chunktable_chunk_id)\n # read the chunk\n try:\n is_stor_obj = await isStorObj(app, chunktable_chunk_s3key, bucket=bucket)\n except HTTPInternalServerError as hse:\n log.warning(f\"updateDatasetInfo - got error checking for key: {chunktable_chunk_s3key}: {hse}\")\n continue\n if not is_stor_obj:\n log.debug(f\"updateDatasetInfo - no chunk found for chunktable id: {chunktable_chunk_id}\")\n else:\n try:\n chunk_bytes = await getStorBytes(app, chunktable_chunk_s3key, filter_ops=chunktable_filter_ops, bucket=bucket)\n except HTTPInternalServerError as hse:\n log.warning(f\"updateDatasetInfo - got error reading chunktable for key: {chunktable_chunk_s3key}: {hse}\")\n continue \n chunk_arr = bytesToArray(chunk_bytes, chunktable_dt, chunktable_layout_dims)\n if chunk_arr is None:\n log.warn(f\"updateDatasetInfo - expected to find chunk found fo: {chunktable_chunk_s3key}\")\n else:\n # convert to 1-d list\n try: \n nelements = getNumElements(chunk_arr.shape)\n chunk_arr = chunk_arr.reshape((nelements,))\n for i in range(nelements):\n e = chunk_arr[i]\n # elements should have 2 (if it is offset and size) or 3 (if it is offset, size, and path)\n chunk_size = int(e[1])\n if chunk_size > 0:\n linked_bytes += chunk_size\n num_linked_chunks += 1\n except Exception as e:\n log.error(f\"updateDatasetInfo - got exception parsing chunktable array {chunktable_chunk_id}: {e}\")\n \n except StopIteration:\n break\n log.debug(f\"updateDatasetInfo - done with chunktable iteration for {chunktable_id}\")\n elif layout_class == 'H5D_CHUNKED':\n log.debug(\"updateDatasetInfo - no linked bytes/chunks for H5D_CHUNKED layout\")\n else:\n log.error(f\"unexpected chunk layout: {layout_class}\")\n\n log.debug(f\"updateDatasetInfo - {dset_id} setting linked_bytes to {linked_bytes}, num_linked_chunks to {num_linked_chunks}\")\n dataset_info[\"linked_bytes\"] = linked_bytes\n dataset_info[\"num_linked_chunks\"] = num_linked_chunks\n\ndef scanRootCallback(app, s3keys):\n log.debug(f\"scanRoot - callback, {len(s3keys)} items\")\n if isinstance(s3keys, list):\n log.error(\"got list result for s3keys callback\")\n raise ValueError(\"unexpected callback format\")\n\n results = app[\"scanRoot_results\"]\n scanRoot_keyset = app[\"scanRoot_keyset\"]\n checksums = results[\"checksums\"]\n for s3key in s3keys.keys():\n\n if not isS3ObjKey(s3key):\n log.info(f\"not s3obj key, ignoring: {s3key}\")\n continue\n if s3key in scanRoot_keyset:\n log.warn(f\"scanRoot - dejavu for key: {s3key}\")\n continue\n scanRoot_keyset.add(s3key)\n\n objid = getObjId(s3key)\n etag = None\n obj_size = None\n lastModified = None\n item = s3keys[s3key]\n if \"ETag\" in item:\n etag = item[\"ETag\"]\n checksums[objid] = etag\n if \"Size\" in item:\n obj_size = item[\"Size\"]\n if \"LastModified\" in item:\n lastModified = item[\"LastModified\"]\n log.debug(f\"scanRoot - got key {objid}: {etag} {obj_size} {lastModified}\")\n\n if lastModified > results[\"lastModified\"]:\n log.debug(f\"scanRoot: changing lastModified from: {results['lastModified']} to {lastModified}\")\n results[\"lastModified\"] = lastModified\n is_chunk = False\n if isValidChunkId(objid):\n is_chunk = True\n results[\"num_chunks\"] += 1\n results[\"allocated_bytes\"] += obj_size\n else:\n results[\"metadata_bytes\"] += obj_size\n\n if is_chunk or getCollectionForId(objid) == \"datasets\":\n if is_chunk:\n dsetid = getDatasetId(objid)\n else:\n dsetid = objid\n datasets = results[\"datasets\"]\n if dsetid not in datasets:\n dataset_info = {}\n dataset_info[\"lastModified\"] = 0\n dataset_info[\"num_chunks\"] = 0\n dataset_info[\"allocated_bytes\"] = 0\n dataset_info[\"logical_bytes\"] = 0\n dataset_info[\"linked_bytes\"] = 0\n dataset_info[\"num_linked_chunks\"] = 0\n dataset_info[\"logical_bytes\"] = 0\n datasets[dsetid] = dataset_info\n dataset_info = datasets[dsetid]\n if lastModified > dataset_info[\"lastModified\"]:\n dataset_info[\"lastModified\"] = lastModified\n if is_chunk:\n dataset_info[\"num_chunks\"] += 1\n dataset_info[\"allocated_bytes\"] += obj_size\n elif getCollectionForId(objid) == \"groups\":\n results[\"num_groups\"] += 1\n elif getCollectionForId(objid) == \"datatypes\":\n results[\"num_datatypes\"] += 1\n else:\n log.error(f\"scanRoot - Unexpected collection type for id: {objid}\")\n\n\nasync def scanRoot(app, rootid, update=False, bucket=None):\n\n # iterate through all s3 keys under the given root.\n # Return dict with stats for the root.\n #\n # Note: not re-entrant! Only one scanRoot an be run at a time per app.\n log.info(f\"scanRoot for rootid: {rootid} bucket: {bucket}\")\n\n if not isValidUuid(rootid):\n raise ValueError(\"Invalid root id\")\n\n if not isSchema2Id(rootid):\n log.warn(f\"no tabulation for schema v1 id: {rootid} returning null results\")\n return {}\n\n if not bucket:\n bucket = config.get(\"bucket_name\")\n if not bucket:\n raise ValueError(f\"no bucket defined for scan of {rootid}\")\n\n root_key = getS3Key(rootid)\n\n if not root_key.endswith(\"/.group.json\"):\n raise ValueError(\"unexpected root key\")\n root_prefix = root_key[:-(len(\".group.json\"))]\n\n log.debug(f\"scanRoot - using prefix: {root_prefix}\")\n\n results = {}\n results[\"lastModified\"] = 0\n results[\"num_groups\"] = 0\n results[\"num_datatypes\"] = 0\n results[\"datasets\"] = {} # since we need per dataset info\n results[\"num_chunks\"] = 0\n results[\"allocated_bytes\"] = 0\n results[\"metadata_bytes\"] = 0\n results[\"num_linked_chunks\"] = 0\n results[\"linked_bytes\"] = 0\n results[\"logical_bytes\"] = 0\n results[\"checksums\"] = {} # map of objid to checksums\n results[\"bucket\"] = bucket\n results[\"scan_start\"] = time.time()\n\n app[\"scanRoot_results\"] = results\n app[\"scanRoot_keyset\"] = set()\n\n await getStorKeys(app, prefix=root_prefix, include_stats=True, bucket=bucket, callback=scanRootCallback)\n num_objects = results[\"num_groups\"] + results[\"num_datatypes\"] + len(results[\"datasets\"]) + results[\"num_chunks\"]\n log.info(f\"scanRoot - got {num_objects} keys for rootid: {rootid}\")\n\n dataset_results = results[\"datasets\"]\n for dsetid in dataset_results:\n dataset_info = dataset_results[dsetid]\n log.info(f\"got dataset: {dsetid}: {dataset_info}\")\n await updateDatasetInfo(app, dsetid, dataset_info, bucket=bucket)\n if dataset_info[\"logical_bytes\"] != \"variable\":\n results[\"logical_bytes\"] += dataset_info[\"logical_bytes\"]\n results[\"linked_bytes\"] += dataset_info[\"linked_bytes\"]\n results[\"num_linked_chunks\"] += dataset_info[\"num_linked_chunks\"]\n\n log.info(f\"scanRoot - scan complete for rootid: {rootid}\")\n\n # compute overall checksum\n checksums = results[\"checksums\"]\n \n if len(checksums) != num_objects:\n log.warn(f\"skipping domain checksum calculation - {len(checksums)} found but {num_objects} hdf objects\")\n else:\n # create a numpy array to store checksums\n log.debug(f\"creating numpy checksum array for {num_objects} checksums\")\n checksum_arr = np.zeros((num_objects,), dtype='S16')\n objids = list(checksums.keys())\n objids.sort()\n for i in range(num_objects):\n objid = objids[i]\n checksum_arr[i] = checksums[objid]\n log.debug(\"numpy array created\")\n hash_object = hashlib.md5(checksum_arr.tobytes())\n md5_sum = hash_object.hexdigest()\n log.debug(f\"got domain_checksum: {md5_sum}\")\n results[\"md5_sum\"] = md5_sum\n # free up memory used by the checksums\n del results[\"checksums\"]\n\n results[\"scan_complete\"] = time.time()\n\n if update:\n # write .info object back to S3\n info_key = root_prefix + \".info.json\"\n log.info(f\"scanRoot - updating info key: {info_key} with results: {results}\")\n await putStorJSONObj(app, info_key, results, bucket=bucket)\n return results\n\nasync def objDeleteCallback(app, s3keys):\n log.info(f\"objDeleteCallback, {len(s3keys)} items\")\n\n if not isinstance(s3keys, list):\n log.error(\"expected list result for objDeleteCallback\")\n raise ValueError(\"unexpected callback format\")\n\n\n if \"objDelete_prefix\" not in app or not app[\"objDelete_prefix\"]:\n log.error(\"Unexpected objDeleteCallback\")\n raise ValueError(\"Invalid objDeleteCallback\")\n\n prefix = app[\"objDelete_prefix\"]\n prefix_len = len(prefix)\n for s3key in s3keys:\n if not s3key.startswith(prefix):\n log.error(f\"Unexpected key {s3key} for prefix: {prefix}\")\n raise ValueError(\"invalid s3key for objDeleteCallback\")\n full_key = prefix + s3key[prefix_len:]\n log.info(f\"removeKeys - objDeleteCallback deleting key: {full_key}\")\n await deleteStorObj(app, full_key)\n\n\n log.info(\"objDeleteCallback complete\")\n\nasync def removeKeys(app, objid):\n # iterate through all s3 keys under the given root or dataset id and delete them\n #\n # Note: not re-entrant! Only one removeKeys an be run at a time per app.\n log.debug(f\"removeKeys: {objid}\")\n if not isSchema2Id(objid):\n log.warn(\"ignoring non-schema2 id\")\n raise KeyError(\"Invalid key\")\n s3key = getS3Key(objid)\n log.debug(f\"removeKeys - got s3key: {s3key}\")\n expected_suffixes = (\".dataset.json\", \".group.json\")\n s3prefix = None\n\n for suffix in expected_suffixes:\n if s3key.endswith(suffix):\n s3prefix = s3key[:-len(suffix)]\n if not s3prefix:\n log.error(\"removeKeys - unexpected s3key for delete_set\")\n raise KeyError(\"unexpected key suffix\")\n log.info(f\"removeKeys - delete for {objid} searching for s3prefix: {s3prefix}\")\n if app[\"objDelete_prefix\"]:\n log.error(\"removeKeys - objDelete_prefix is already set - improper use of non-reentrant call?\")\n # just continue and reset\n app[\"objDelete_prefix\"] = s3prefix\n try:\n await getStorKeys(app, prefix=s3prefix, include_stats=False, callback=objDeleteCallback)\n except ClientError as ce:\n log.error(f\"removeKeys - getS3Keys faiiled: {ce}\")\n except HTTPNotFound:\n log.warn(f\"removeKeys - HTTPNotFound error for getStorKeys with prefix: {s3prefix}\")\n except HTTPInternalServerError:\n log.error(f\"removeKeys - HTTPInternalServerError for getStorKeys with prefix: {s3prefix}\")\n except Exception as e:\n log.error(f\"removeKeys - Unexpected Exception for getStorKeys with prefix: {s3prefix}: {e}\")\n\n # reset the prefix\n app[\"objDelete_prefix\"] = None\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
serre-lab/deeplabcut_mgh | [
"8fa4a59f422ff0357552e290230838239edcfe1b"
] | [
"deeplabcut/utils/select_crop_parameters.py"
] | [
"\"\"\"\nDeepLabCut2.0 Toolbox (deeplabcut.org)\n© A. & M. Mathis Labs\nhttps://github.com/AlexEMG/DeepLabCut\nPlease see AUTHORS for contributors.\n\nhttps://github.com/AlexEMG/DeepLabCut/blob/master/AUTHORS\nLicensed under GNU Lesser General Public License v3.0\n\"\"\"\n\nfrom __future__ import print_function\nimport wx\nimport cv2\nimport matplotlib\nimport argparse\nfrom deeplabcut.utils import auxiliaryfunctions\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas\nfrom matplotlib.widgets import RectangleSelector\n\n# ###########################################################################\n# Class for GUI MainFrame\n# ###########################################################################\nclass ImagePanel(wx.Panel):\n\n def __init__(self, parent,config,gui_size,**kwargs):\n h=gui_size[0]/2\n w=gui_size[1]/3\n wx.Panel.__init__(self, parent, -1,style=wx.SUNKEN_BORDER,size=(h,w))\n\n self.figure = matplotlib.figure.Figure()\n self.axes = self.figure.add_subplot(1, 1, 1)\n self.canvas = FigureCanvas(self, -1, self.figure)\n self.sizer = wx.BoxSizer(wx.VERTICAL)\n self.sizer.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)\n self.SetSizer(self.sizer)\n self.Fit()\n\n def getfigure(self):\n \"\"\"\n Returns the figure, axes and canvas\n \"\"\"\n return(self.figure,self.axes,self.canvas)\n\nclass WidgetPanel(wx.Panel):\n def __init__(self, parent):\n wx.Panel.__init__(self, parent, -1,style=wx.SUNKEN_BORDER)\n\nclass MainFrame(wx.Frame):\n \"\"\"Contains the main GUI and button boxes\"\"\"\n\n def __init__(self, parent,config,image):\n# Settting the GUI size and panels design\n displays = (wx.Display(i) for i in range(wx.Display.GetCount())) # Gets the number of displays\n screenSizes = [display.GetGeometry().GetSize() for display in displays] # Gets the size of each display\n index = 0 # For display 1.\n screenWidth = screenSizes[index][0]\n screenHeight = screenSizes[index][1]\n self.gui_size = (screenWidth*0.7,screenHeight*0.85)\n\n wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = 'DeepLabCut2.0 - Select Crop Parameters',\n size = wx.Size(self.gui_size), pos = wx.DefaultPosition, style = wx.RESIZE_BORDER|wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )\n self.statusbar = self.CreateStatusBar()\n self.statusbar.SetStatusText(\"\")\n\n self.SetSizeHints(wx.Size(self.gui_size)) # This sets the minimum size of the GUI. It can scale now!\n \n###################################################################################################################################################\n# Spliting the frame into top and bottom panels. Bottom panels contains the widgets. The top panel is for showing images and plotting!\n topSplitter = wx.SplitterWindow(self)\n\n self.image_panel = ImagePanel(topSplitter, config,self.gui_size)\n self.widget_panel = WidgetPanel(topSplitter)\n \n topSplitter.SplitHorizontally(self.image_panel, self.widget_panel,sashPosition=self.gui_size[1]*0.83)#0.9\n topSplitter.SetSashGravity(1)\n sizer = wx.BoxSizer(wx.VERTICAL)\n sizer.Add(topSplitter, 1, wx.EXPAND)\n self.SetSizer(sizer)\n\n###################################################################################################################################################\n# Add Buttons to the WidgetPanel and bind them to their respective functions.\n\n widgetsizer = wx.WrapSizer(orient=wx.HORIZONTAL)\n\n self.help = wx.Button(self.widget_panel, id=wx.ID_ANY, label=\"Help\")\n widgetsizer.Add(self.help , 1, wx.ALL, 15)\n self.help.Bind(wx.EVT_BUTTON, self.helpButton)\n\n self.quit = wx.Button(self.widget_panel, id=wx.ID_ANY, label=\"Save parameters and Quit\")\n widgetsizer.Add(self.quit , 1, wx.ALL, 15)\n self.quit.Bind(wx.EVT_BUTTON, self.quitButton)\n\n self.widget_panel.SetSizer(widgetsizer)\n self.widget_panel.SetSizerAndFit(widgetsizer)\n self.widget_panel.Layout()\n \n# Variables initialization\n self.image = image\n self.coords = []\n self.figure = Figure()\n self.axes = self.figure.add_subplot(111)\n self.cfg = auxiliaryfunctions.read_config(config)\n MainFrame.show_image(self)\n\n def quitButton(self, event):\n \"\"\"\n Quits the GUI\n \"\"\"\n self.statusbar.SetStatusText(\"\")\n dlg = wx.MessageDialog(None,\"Are you sure?\", \"Quit!\",wx.YES_NO | wx.ICON_WARNING)\n result = dlg.ShowModal()\n if result == wx.ID_YES:\n self.Destroy()\n\n def show_image(self):\n self.figure,self.axes,self.canvas = self.image_panel.getfigure()\n frame=cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB)\n self.ax = self.axes.imshow(frame)\n self.figure.canvas.draw()\n self.cid = RectangleSelector(self.axes, self.line_select_callback,drawtype='box', useblit=False,button=[1], minspanx=5, minspany=5,spancoords='pixels',interactive=True)\n self.canvas.mpl_connect('key_press_event', self.cid)\n \n \n def line_select_callback(self,eclick, erelease):\n 'eclick and erelease are the press and release events'\n global coords\n new_x1, new_y1 = eclick.xdata, eclick.ydata\n new_x2, new_y2 = erelease.xdata, erelease.ydata\n coords = [str(int(new_x1)),str(int(new_x2)),str(int(new_y1)),str(int(new_y2))]\n self.coords = coords\n return(self.coords)\n \n def helpButton(self,event):\n \"\"\"\n Opens Instructions\n \"\"\"\n wx.MessageBox('1. Use left click to select the region of interest. A red box will be drawn around the selected region. \\n\\n2. Use the corner points to expand the box and center to move the box around the image. \\n\\n3. Click ''Save parameters and Quit'' to save the croppeing parameters and close the GUI. \\n\\n Click OK to continue', 'Instructions to use!', wx.OK | wx.ICON_INFORMATION)\n\ndef show(config,image):\n import imageio\n imageio.plugins.ffmpeg.download()\n app = wx.App()\n MainFrame(None,config,image).Show()\n app.MainLoop()\n return(coords)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('config','image')\n cli_args = parser.parse_args()\n"
] | [
[
"matplotlib.backends.backend_wxagg.FigureCanvasWxAgg",
"matplotlib.widgets.RectangleSelector",
"matplotlib.figure.Figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
KnightZhang625/Project_SentimentAnalysis | [
"e68193b8811a12217f9ae117aafe7f363c9f7caa"
] | [
"show_acc.py"
] | [
"import matplotlib.pyplot as plt\nimport codecs as co\nimport os\nimport sys\nimport numpy as np\n\nif len(sys.argv) > 1:\n LF = sys.argv[1]\nelse: \n LF = './log/loss_record'\nAF = './infer_precision'\n# ADF = './ad_losses'\n# GNG = './gng_losses'\n\nplt.figure(figsize=(22,12))\n\ndef full_arr(arr, target):\n while(len(arr) < target):\n arr.append(arr[-1])\n return arr\n\ndef draw_new_loss():\n global plt\n if (not os.path.exists(LF)):\n print(LF + ' does not exists.')\n return\n lines = co.open(LF).readlines()\n _pre_losses = []\n _dis_losses = []\n _sup_losses = []\n _adv_osses = []\n _lanlosses = []\n c = 0\n cache = []\n for line in lines:\n line = line.replace('\\n', '')\n line = line.split(',')\n for l in line:\n l = l.strip()\n l = l.split(':')\n if (l[0].strip() == 'pre_avg'):\n _pre_losses.append(float(l[1].strip()))\n elif (l[0].strip() == 'dis_avg'):\n _dis_losses.append(float(l[1].strip()))\n elif (l[0].strip() == 'sup_avg'):\n if c == 10:\n _sup_losses.append(np.sum(cache) / len(cache))\n cache = []\n # _sup_losses.append(float(l[1].strip()))\n c = 0\n else:\n if float(l[1].strip()) < 300:\n cache.append(float(l[1].strip()))\n c +=1\n elif (l[0].strip() == 'adv_avg'):\n _adv_osses.append(float(l[1].strip()))\n ma = max([len(_pre_losses), len(_dis_losses), len(_sup_losses), len(_adv_osses), len(_lanlosses)])\n if (len(_pre_losses) > 0):\n _pre_losses = full_arr(_pre_losses, ma)\n if (len(_dis_losses) > 0):\n _dis_losses = full_arr(_dis_losses, ma)\n if (len(_sup_losses) > 0):\n _sup_losses = full_arr(_sup_losses, ma)\n if (len(_adv_osses) > 0):\n _adv_osses = full_arr(_adv_osses, ma)\n if (len(_lanlosses) > 0):\n _lanlosses = full_arr(_lanlosses, ma)\n\n plot_id = 220\n # if (len(_pre_losses) > 0):\n # plot_id += 1\n # pre_sp = plt.subplot(231)\n # pre_sp.set_title('pre loss')\n # plt.plot(range(len(_pre_losses)), _pre_losses, 'k-', lw=2)\n # if (len(_dis_losses) > 0):\n # plot_id += 1\n # cls_sp = plt.subplot(231)\n # cls_sp.set_title('ad loss')\n # plt.plot(range(len(_dis_losses)), _dis_losses, 'g-', lw=2)\n if (len(_sup_losses) > 0):\n plot_id += 1\n # ad_sp = plt.subplot(232)\n # ad_sp.set_title('final loss')\n plt.plot(range(len(_sup_losses)), _sup_losses, 'r-', lw=1)\n # if (len(_adv_osses) > 0):\n # plot_id += 1\n # c_sp = plt.subplot(234)\n # c_sp.set_title('c loss')\n # plt.plot(range(len(_adv_osses)), _adv_osses, 'r-', lw=2)\n # if (len(_lanlosses) > 0):\n # plot_id += 1\n # lan_sp = plt.subplot(235)\n # lan_sp.set_title('lan loss')\n # plt.plot(range(len(_lanlosses)), _lanlosses, 'k-', lw=2)\n\n# def draw_loss():\n# global plt\n# if (not os.path.exists(LF)):\n# print(LF + ' does not exists.')\n# return\n# lines = co.open(LF).readlines()\n# data1 = []\n# data2 = []\n# for i in range(len(lines)):\n# line = lines[i].strip()\n# line = line.split(' : ')[1]\n# line = line.split(',')\n# data1.append(float(line[0]))\n# data2.append(float(line[1]))\n# assert len(data1) == len(data2)\n# x = range(1, len(data1) + 1)\n# sp = plt.subplot(221)\n# sp.set_title('main loss and z loss')\n# plt.plot(x, data1, 'r-', lw=2)\n# plt.plot(x, data2, 'b-', lw=2)\n# print('MIN LOSS: ' + str(min(data1)))\n# print('MIN LANTENT: ' + str(min(data2)))\n# # plt.show()\n\ndef draw_acc():\n global plt\n if (not os.path.exists(AF)):\n print(AF + ' does not exists.')\n return\n lines = co.open(AF).readlines()\n data1 = []\n for i in range(len(lines)):\n line = lines[i].strip()\n line = line.split(' : ')[1]\n # line = line.split(',')\n try:\n data1.append(float(line))\n except:\n continue\n x = range(1, len(data1) + 1)\n sp = plt.subplot(236)\n sp.set_title('accuracy, 1.0 is not the best.')\n plt.plot(x, data1, 'g-', lw=2)\n print('MAX PRECISION: ' + str(max(data1)))\n # plt.show()\n\n# def draw_adlosses():\n# global plt\n# if (not os.path.exists(ADF)):\n# print(ADF + ' does not exists.')\n# return\n# lines = co.open(ADF).readlines()\n# data1 = []\n# for i in range(len(lines)):\n# line = lines[i].strip()\n# line = line.replace('\\n', '')\n# # line = line.split(',')\n# try:\n# data1.append(float(line))\n# except:\n# continue\n# x = range(1, len(data1) + 1)\n# sp = plt.subplot(223)\n# sp.set_title('advesarial loss')\n# plt.plot(x, data1, 'c-', lw=2)\n# print('MIN AD_LOSS: ' + str(min(data1)))\n\n# def draw_gng():\n# global plt\n# if (not os.path.exists(GNG)):\n# print(GNG + ' does not exists.')\n# return\n# lines = co.open(GNG).readlines()\n# data1 = []\n# data2 = []\n# for i in range(len(lines)):\n# line = lines[i].strip()\n# line = line.replace('\\n', '')\n# line = line.split(',')\n# try:\n# data1.append(float(line[0]))\n# data2.append(float(line[1]))\n# except:\n# continue\n# x = range(1, len(data1) + 1)\n# sp = plt.subplot(224)\n# sp.set_title('discriminator loss')\n# plt.plot(x, data1, 'm-', lw=2)\n# plt.plot(x, data2, 'k-', lw=2)\n\n# draw_loss()\n# draw_acc()\n# draw_adlosses()\n# draw_gng()\ndraw_new_loss()\n# draw_acc()\nplt.grid(True)\nplt.show()\n"
] | [
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.show",
"numpy.sum",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sajjad2014/vq-vae-2-pytorch | [
"ef5f67c46f93624163776caec9e0d95063910eca"
] | [
"pixelsnail_mnist.py"
] | [
"import numpy as np\r\nimport torch\r\nfrom torch import nn, optim\r\nfrom torch.utils.data import DataLoader\r\nfrom torchvision import datasets\r\nfrom tqdm import tqdm\r\n\r\nfrom pixelsnail import PixelSNAIL\r\n\r\n\r\ndef train(epoch, loader, model, optimizer, device):\r\n loader = tqdm(loader)\r\n\r\n criterion = nn.CrossEntropyLoss()\r\n\r\n for i, (img, label) in enumerate(loader):\r\n model.zero_grad()\r\n\r\n img = img.to(device)\r\n\r\n out = model(img)\r\n loss = criterion(out, img)\r\n loss.backward()\r\n\r\n optimizer.step()\r\n\r\n _, pred = out.max(1)\r\n correct = (pred == img).float()\r\n accuracy = correct.sum() / img.numel()\r\n\r\n loader.set_description(\r\n (f'epoch: {epoch + 1}; loss: {loss.item():.5f}; ' f'acc: {accuracy:.5f}')\r\n )\r\n\r\n\r\nclass PixelTransform:\r\n def __init__(self):\r\n pass\r\n\r\n def __call__(self, input):\r\n ar = np.array(input)\r\n\r\n return torch.from_numpy(ar).long()\r\n\r\n\r\nif __name__ == '__main__':\r\n device = 'cuda'\r\n epoch = 10\r\n\r\n dataset = datasets.MNIST('.', transform=PixelTransform(), download=True)\r\n loader = DataLoader(dataset, batch_size=32, shuffle=True, num_workers=4)\r\n\r\n model = PixelSNAIL([28, 28], 256, 128, 5, 2, 4, 128)\r\n model = model.to(device)\r\n\r\n optimizer = optim.Adam(model.parameters(), lr=1e-3)\r\n\r\n for i in range(10):\r\n train(i, loader, model, optimizer, device)\r\n torch.save(model.state_dict(), f'checkpoint/mnist_{str(i + 1).zfill(3)}.pt')\r\n"
] | [
[
"torch.nn.CrossEntropyLoss",
"numpy.array",
"torch.utils.data.DataLoader",
"torch.from_numpy"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
davidhwyllie/findNeighbour4 | [
"d42e10711e59e93ebf0e798fbb1598929f662c9c"
] | [
"findn/rdbmsstore.py"
] | [
"#!/usr/bin/env python\n\"\"\" provides a storage layer for meta-data and snv distances from the\nfindneighbour4 system in a RDBMS\n\nTested with:\n- Oracle Autonomous (ATP and ADW cloud service)\n- Sqlite (but, sqlite can't be used as a server backend)\n\nNot tested:\nMS SQL server, PostgreSQL\n\nTested but doesn't work at present\nMySQL - the issue is with storage of character large objects in TEXT fields. The SQL alchemy version used make TEXT, not LARGETEXT fields.\n - Workarounds described https://stackoverflow.com/questions/47644739/what-column-type-does-sqlalchemy-use-for-text-on-mysql did not fix this\n - probably a soluble problem\n - tested with MySQL 8 on Ubuntu 20. Connection string was \"mysql://root:root@localhost:3306/test_db\" with user/password root\n\nA component of the findNeighbour4 system for bacterial relatedness monitoring\nCopyright (C) 2021 David Wyllie [email protected]\nrepo: https://github.com/davidhwyllie/findNeighbour4\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the MIT License as published\nby the Free Software Foundation. See <https://opensource.org/licenses/MIT>, and the LICENSE file.\n\n\n\"\"\"\n# import gc\nimport bson # type: ignore\nfrom datetime import datetime, timedelta, date\nimport hashlib\nimport os\nimport json\nimport pandas as pd\nimport psutil\nimport logging\nimport numpy as np\nimport warnings\nimport uuid\nimport cx_Oracle\nfrom sentry_sdk import capture_exception\n\nimport progressbar\nfrom sqlalchemy import (\n Integer,\n Column,\n Float,\n MetaData,\n Text,\n String,\n DateTime,\n Identity,\n Index,\n TIMESTAMP,\n func,\n create_engine,\n inspect,\n)\n\n# from sqlalchemy.pool import NullPool\nfrom findn.seq2json import SeqDictConverter\nfrom sqlalchemy.sql.expression import desc\nfrom sqlalchemy.orm import (\n sessionmaker,\n scoped_session,\n)\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom typing import (\n Any,\n Dict,\n Iterable,\n List,\n NoReturn,\n Optional,\n Set,\n TypedDict,\n Union,\n)\n\nGuid2NeighboursFormat1 = List[Union[str, int]]\nGuid2NeighboursFormat3 = Union[str]\nGuid2NeighboursFormat4 = Dict[str, Union[str, int]]\nGuid2NeighboursFormats = Union[\n Guid2NeighboursFormat1, Guid2NeighboursFormat3, Guid2NeighboursFormat4\n]\n\n\nclass RecentDatabaseMonitoringRet(TypedDict, total=False):\n recompression_data: bool\n latest_stats: Dict[str, Union[int, np.float64]]\n trend_stats: List[Dict[str, Any]]\n\n\n# global: definition of database structure\n# classes mapping to persistence database inherit from this\ndb_pc = declarative_base() # global\nmetadata = MetaData()\n\n\nclass RDBMSError(Exception):\n \"\"\"a general purpose error used by the rdbms module.\"\"\"\n\n pass\n\n\n## define schema\nclass BulkLoadTest(db_pc):\n \"\"\"used only for testing bulk uploads as part of unit testing\"\"\"\n\n __tablename__ = \"fn4_bulk_load_test\"\n blt_int_id = Column(Integer, Identity(start=1), primary_key=True)\n bulk1 = Column(Integer)\n bulk2 = Column(Integer)\n\n\nclass FNLock(db_pc):\n \"\"\"used for storing details of one or more classes of lock\"\"\"\n\n __tablename__ = \"fn4lock\"\n lock_int_id = Column(\n Integer,\n primary_key=True,\n comment=\"an integer reflecting the kind of lock studied\",\n )\n sequence_id = Column(\n String(60),\n comment=\"the sample_id represented by the entry; sample_ids are typically guids\",\n )\n lock_set_date = Column(\n TIMESTAMP, index=True, comment=\"the date and time the lock was modified\"\n )\n uuid = Column(String(32))\n lock_status = Column(\n Integer, comment=\"whether the lock is in place (1) or not in place (0)\"\n )\n\n\nclass Config(db_pc):\n \"\"\"stores config data\"\"\"\n\n __tablename__ = \"config\"\n cfg_int_id = Column(Integer, Identity(start=1), primary_key=True)\n config_key = Column(String(56), index=True, unique=True)\n config_value = Column(Text(50000000)) # 50M limit. Required for Mysql\n\n\nclass RefCompressedSeq(db_pc):\n \"\"\"stores reference compressed sequences, which are large character objects, and their annotations.\n\n Note: the mongodb equivalent is the GridFS meta-collection refcompressedseq and the standard collection guid2meta.\n \"\"\"\n\n __tablename__ = \"refcompressedseq\"\n seq_int_id = Column(\n Integer,\n Identity(start=1),\n primary_key=True,\n comment=\"the primary key to the table\",\n )\n sequence_id = Column(\n String(60),\n index=True,\n unique=True,\n comment=\"the sample_id represented by the entry; sample_ids are typically guids\",\n )\n examination_date = Column(\n TIMESTAMP,\n index=True,\n comment=\"the date and time the record was examined and compressed\",\n )\n annotations = Column(\n Text, comment=\"a json string, representing metadata about the sequence\"\n )\n invalid = Column(\n Integer,\n default=-1,\n index=True,\n comment=\"whether the sequence is of sufficient quality to be analysed (invalid = 0) or is not (invalid = 1). Part of the annotations, but extracted into separate field for indexing.\",\n )\n prop_actg = Column(\n Float,\n index=True,\n comment=\"the proportion of A,C,G,T (as opposed to N,-, or IUPAC codes). Part of the annotations, but extracted into separate field for indexing.\",\n )\n content = Column(\n Text, comment=\"a json string, representing the reference compressed sequence\"\n )\n\n\nclass Edge(db_pc):\n \"\"\"represents a pair of RefCompressedSeq which are similar. Table is called 'edge' because it represents an edge in a network in which the vertices are RefCompressedSequences.\n Note\n - that an edge A -> B is represented twice in this data representation: as A -> B, and as B -> A.\n - This means that all the edges of A can be obtained by\n statements such as SELECT * from Edge where seq_int_id_1 = 217, where 217 is the seq_int_id of sequence A.\n\n - if insertion is fast enough, we could enable foreign key constraints here.\n - it is likely that insert speed will be the main determinant of server speed\n - and the faster the inserts work the better.\n - in the mongo implementation, FK constraints are not implemented, and the relationships are guaranteed by application logic, not at a database level .\n At present, this approach is being used here too.\n\n \"\"\"\n\n __tablename__ = \"edge\"\n edge_int_id = Column(\n Integer,\n Identity(start=1),\n primary_key=True,\n comment=\"the primary key to the table\",\n )\n sequence_id_1 = Column(\n String(60),\n comment=\"One of a pair of sequences. Note: foreign key constraint not enforced at a database level\",\n )\n sequence_id_2 = Column(\n String(60),\n comment=\"One of a pair of sequences. Note: foreign key constraint not enforced at a database level \",\n )\n dist = Column(Integer, comment=\"the SNV distance between sequences\")\n\n\nIndex(\"ix_Edge_1\", Edge.sequence_id_1, unique=False, oracle_compress=1)\n\n\nclass Cluster(db_pc):\n \"\"\"stores clusters, which are large character objects (json)\"\"\"\n\n __tablename__ = \"sequence_cluster\"\n cl_int_id = Column(\n Integer,\n Identity(start=1),\n primary_key=True,\n comment=\"the primary key to the table\",\n )\n cluster_build_id = Column(\n String(40),\n index=True,\n comment=\"an identifier for the contents; this is typically a sha-1 hash on the contents\",\n )\n upload_date = Column(\n DateTime, index=True, comment=\"the time the record was inserted\"\n )\n content = Column(Text, comment=\"a json string describing the cluster\")\n\n\nclass Monitor(db_pc):\n \"\"\"stores monitor entries, which are large character objects (html)\"\"\"\n\n __tablename__ = \"monitor\"\n mo_int_id = Column(\n Integer,\n Identity(start=1),\n primary_key=True,\n comment=\"the primary key to the table\",\n )\n mo_id = Column(\n String(40),\n index=True,\n unique=True,\n comment=\"an identifier for the contents; this is typically and sha-1 hash on the contents\",\n )\n content = Column(Text, comment=\"html data\")\n\n\nclass ServerMonitoring(db_pc):\n \"\"\"stores server monitoring entries, which are large character objects (json)\"\"\"\n\n __tablename__ = \"server_monitoring\"\n sm_int_id = Column(\n Integer,\n Identity(start=1),\n primary_key=True,\n comment=\"the primary key to the table\",\n )\n sm_id = Column(\n String(40),\n index=True,\n unique=True,\n comment=\"an identifier for the contents; this is typically and sha-1 hash on the contents\",\n )\n upload_date = Column(\n DateTime, index=True, comment=\"the time the record was inserted\"\n )\n content = Column(Text, comment=\"a json dictionary including \")\n\n\nclass MSA(db_pc):\n \"\"\"stores multisequence alignments, which are large character objects (json)\"\"\"\n\n __tablename__ = \"msa\"\n msa_int_id = Column(\n Integer,\n Identity(start=1),\n primary_key=True,\n comment=\"the primary key to the table\",\n )\n msa_id = Column(\n String(60),\n index=True,\n unique=True,\n comment=\"an identifier for the contents; this is typically and sha-1 hash on the contents\",\n )\n upload_date = Column(\n DateTime, index=True, comment=\"the time the record was inserted\"\n )\n content = Column(Text, comment=\"character large object containing a json string\")\n\n\nclass TreeStorage(db_pc):\n \"\"\"stores trees and related data, which are large character objects (json)\"\"\"\n\n __tablename__ = \"tree\"\n ts_int_id = Column(\n Integer,\n Identity(start=1),\n primary_key=True,\n comment=\"the primary key to the table\",\n )\n ts_id = Column(\n String(40),\n index=True,\n unique=True,\n comment=\"an identifier for the contents; this is typically and sha-1 hash on the contents\",\n )\n upload_date = Column(\n DateTime, index=True, comment=\"the time the record was inserted\"\n )\n content = Column(Text, comment=\"character large object containing a json string\")\n\n\nclass NPEncoder(json.JSONEncoder):\n \"\"\"encodes Numpy and datetime types as jsonisable equivalents\"\"\"\n\n def default(self, obj):\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n elif isinstance(obj, np.ndarray):\n return obj.tolist()\n elif isinstance(obj, date):\n return obj.isoformat()\n elif isinstance(obj, datetime):\n return obj.isoformat()\n else:\n return super(NPEncoder, self).default(obj)\n\n\nclass fn3persistence_r:\n \"\"\"System for persisting results from large numbers of sequences stored in FindNeighbour 3+.\n Uses a generic rdbms, with optimisations for Oracle databases when using the cx_oracle package.\n\n the associated schema is defined using SQLalchemy tables, as above.\n\n Note that parts of this code are taken from the class pcadb.PCADatabaseManager.\n In future, it may be possible to integrate this class with that, if successful\n implementation of findNeighbour4 functionality using an RDBMS is possible\n\n See also the Persistence class in the persistence module.\n This provides an API which will either use this class, or the mongo based fn3persistence class, depending on\n software settings.\n\n \"\"\"\n\n def __init__(\n self, connection_config, debug=0, server_monitoring_min_interval_msec=0\n ):\n\n \"\"\"creates the RDBMS connection\n\n Parameters\n -----------\n connection_config:\n One of\n 1. a key to a dictionary containing one or more database configuration details: (e.g. 'prod', 'test')\n 2. a valid sqlalchemy database connection string (if this is sufficient for connections) e.g. 'pyodbc+mssql://myserver'\n 3. None. This is considered to mean 'sqlite://' i.e. an in memory sqlite database, which is not persisted when the program stops.\n\n if it is not none, a variable called DB_CONNECTION_CONFIG_FILE must be present. This must point to a file containing credentials.\n the name of an environment variable containing (in json format) a dictionary, or None if it is not required.\n An example of such a dictionary is as below:\n {\n 'prod':{'DBTYPE':'sqlite', 'ENGINE_NAME':'sqlite:///db/proddb.sqlite'}\n 'dev': {'DBTYPE':'sqlite', 'ENGINE_NAME':'sqlite:///db/devdb.sqlite'},\n 'test':{'DBTYPE':'sqlite', 'ENGINE_NAME':'sqlite://'}\n }\n The DBTYPE and ENGINE_NAME keys are essential.\n Other keys may also be present, and are required in some cases (for example by Oracle connections).\n {\n 'prod':{'DBTYPE':'oracle',\n 'ENGINE_NAME':''oracle+cx_oracle://PROD:97bxxxxxxxxX@(description: .........)))',\n 'TNS_ADMIN':'/secrets/oracle/pca_prod'\n },\n\n 'dev':{'DBTYPE':'oracle',\n 'ENGINE_NAME':''oracle+cx_oracle://PROD:97bxxxxxxxxX@(description: .........)))',\n 'TNS_ADMIN':'/secrets/oracle/pca_prod'\n }\n }\n Note, the bit after the @(description describes where your database is, and will be found in your cloud wallet, if you are using cloud databases. See below.\n In this case, TNS_ADMIN is the value you wish the TNS_ADMIN environment variable to be set to.\n The software will set TNS_ADMIN, and, if you are using a virtual environment, it will be scoped to the virtual environment.\n In summary, it will configure necessary settings to allow Oracle database connections.\n\n Note, if you are using a python virtual environment, this environment variable should be included in the .env file in the root of your project. The .env file should not be under source control.\n\n configuration engine_name: an SQLalchemy connect string, e.g. sqlite::// for temporary memory db, see https://docs.sqlalchemy.org/en/13/core/engines.html\n debug: if True, deletes any existing data on startup.\n show_bar: show a progress bar during long operations\n\n\n NOTE:\n This software has been tested with\n (i) Sqlite 3.3.2+\n (ii) Oracle Autonomous Database (cloud)\n https://blogs.oracle.com/oraclemagazine/getting-started-with-autonomous\n\n To connect to Oracle there are several steps.\n 0. Install dependencies, see\n https://cx-oracle.readthedocs.io/en/latest/user_guide/installation.html\n wget https://download.oracle.com/otn_software/linux/instantclient/211000/instantclient-basic-linux.x64-21.1.0.0.0.zip\n need to set the LD_LIBRARY_PATH variable, see\n https://www.oracle.com/database/technologies/instant-client/linux-x86-64-downloads.html\n e.g. export LD_LIBRARY_PATH=/data/software/instantclient_21_1:LD_LIBRARY_PATH\n\n these parameters have to go into the python .env file e.g.\n ----------------------------------------------------------------\n LD_LIBRARY_PATH=\"/software/instantclient_21_1\"\n PCADB_CONNECTION_CONFIGFILE=\"/secret/config.json\"\n\n Where config.json looks like\n {\n 'prod':{'DBTYPE':'oracle',\n 'ENGINE_NAME':''oracle+cx_oracle://PROD:97bxxxxxxxxX@(description: .........)))',\n 'TNS_ADMIN':'/secrets/oracle/pca_prod'\n }\n }\n ** NOTE: as per normal json conventions, escape quotes (i.e. \\\" not \" around the certificate name, otherwise SSL connections will fail) **\n\n 1. Download your OCI wallet, & unzip it somewhere\n 2. Set the TNS_ADMIN env var to point to this directory\n 3. Edit the WALLET_LOCATION in the sqlnet.ora file to point to the relevant directory, e.g. WALLET_LOCATION = (SOURCE = (METHOD = file) (METHOD_DATA = (DIRECTORY=\"/data/credentials/oci_test\")))\n 4. Create a user with relevant privileges see below)\n 5. Set the OCI_ENGINE_NAME env var.\n An example of this is as below (redacted so not live)\n oracle+cx_oracle://scott:tigerX22@(description= (retry_count=20)(retry_delay=3)(address=(protocol=tcps)(port=1522)(host=host.oraclecloud.com))(connect_data=(service_name=redacted))(security=(ssl_server_cert_dn=\"redacted\")))\n\n\n This data can be found in the tnsnames.ora file: for details, see\n https://docs.sqlalchemy.org/en/14/dialects/oracle.html#dialect-oracle-cx_oracle-connect\n https://stackoverflow.com/questions/37471892/using-sqlalchemy-dburi-with-oracle-using-external-password-store\n https://stackoverflow.com/questions/14140902/using-oracle-service-names-with-sqlalchemy/35215324\n https://blogs.oracle.com/sql/how-to-create-users-grant-them-privileges-and-remove-them-in-oracle-database\n https://docs.oracle.com/en/database/oracle/oracle-database/19/sqlrf/GRANT.html#GUID-20B4E2C0-A7F8-4BC8-A5E8-BE61BDC41AC3\n\n Configuring interactions with external OCI databases\n ====================================================\n Your application will need to run as a user (we'll call it PCADB) will need some priviledges granted.\n The exact privileges required involving creating, dropping tables & indexes, as well as inserting and deleting data.\n CREATE USER PCADB IDENTIFIED BY 'MyPassword1234!';\n GRANT CONNECT TO PCADB;\n GRANT CREATE SESSION TO PCADB;\n GRANT CREATE SEQUENCE TO PCADB;\n GRANT CREATE TABLE TO PCADB;\n GRANT CREATE SYNONYM TO PCADB;\n ALTER USER PCADB DEFAULT TABLESPACE DATA quota unlimited on DATA;\n \"\"\"\n\n self.sjc = SeqDictConverter()\n self.logger = logging.getLogger()\n self.logger.setLevel(logging.INFO)\n\n self.debug = debug\n self.storage_technology = \"rdbms\"\n\n self.logger.info(\"Storage technology is {0}\".format(self.storage_technology))\n\n self.server_monitoring_min_interval_msec = server_monitoring_min_interval_msec\n self.previous_server_monitoring_data = {}\n self.previous_server_monitoring_time = None\n\n # connect and create session. Validate inputs carefully.\n if connection_config is None:\n self.logger.info(\"Connection config is None: using in-memory sqlite.\")\n self.engine_name = \"sqlite://\"\n\n elif \"://\" in connection_config:\n # it's not None, and we assume what we are provided is an sqlalchemy database connection string\n self.logger.info(\n \"Connection config provided; using {0}\".format(connection_config)\n )\n self.engine_name = connection_config\n else:\n # we have been passed a token. this should be a key to a dictionary, stored in\n # DB_CONNECTION_CONFIG_FILE which contains credentials\n conn_detail_file = None\n try:\n conn_detail_file = os.environ[\"DB_CONNECTION_CONFIG_FILE\"]\n except KeyError:\n raise RDBMSError(\n \"Environment variable DB_CONNECTION_CONFIG_FILE does not exist; however, it is required. If you are using a python virtual environment, you need to set it in .env, not globally\"\n )\n\n if conn_detail_file is None:\n # we failed to set it\n raise RDBMSError(\n \"Tried to set conn_detail_file from environment variable DB_CONNECTION_CONFIG_FILE, but it is still None.\"\n )\n\n if not os.path.exists(conn_detail_file):\n raise FileNotFoundError(\n \"Connection file specified but not found: {0}\".format(\n conn_detail_file\n )\n )\n\n # read the config file\n with open(conn_detail_file, \"rt\") as f:\n conn_detail = json.load(f)\n\n if connection_config not in conn_detail.keys():\n raise RDBMSError(\n \"Connection {0} does not correspond to one of the keys {1} of the configuration json file at {2}\".format(\n connection_config, conn_detail.keys(), conn_detail_file\n )\n )\n\n # configure engine\n this_configuration = conn_detail[\n connection_config\n ] # extract the relevant part of the config dictionary\n\n # two keys are always present\n essential_keys = set([\"DBTYPE\", \"ENGINE_NAME\"])\n if len(essential_keys - set(this_configuration.keys())) > 0:\n raise RDBMSError(\n \"Provided keys for {0} are not correct. Required are {1}\".format(\n connection_config, essential_keys\n )\n )\n\n # if it's Oracle, then three keys are required.\n if this_configuration[\"DBTYPE\"] == \"oracle\":\n essential_keys = set([\"DBTYPE\", \"ENGINE_NAME\", \"TNS_ADMIN\"])\n if len(essential_keys - set(this_configuration.keys())) > 0:\n raise RDBMSError(\n \"Provided keys for oracle db in {0} are not correct. Required are {1}\".format(\n connection_config, essential_keys\n )\n )\n\n # set the TNS_ADMIN variable.\n self.logger.info(\n \"Set TNS_ADMIN to value specified in config file {0}\".format(\n this_configuration[\"TNS_ADMIN\"]\n )\n )\n os.environ[\"TNS_ADMIN\"] = this_configuration[\"TNS_ADMIN\"]\n\n self.logger.info(\"Set ENGINE_NAME configuration string from config file.\")\n self.engine_name = this_configuration[\"ENGINE_NAME\"]\n\n self.using_sqlite = self.engine_name.startswith(\"sqlite://\")\n\n # now we can start\n self.Base = db_pc\n self.logger.info(\"DatabaseManager: Connecting to database\")\n self.is_oracle = \"oracle+cx\" in self.engine_name\n self.is_sqlite = \"sqlite://\" in self.engine_name\n self.show_bar = True # maybe define a method to switch this off\n\n self._create_engine()\n\n self.logger.info(\n \"DatabaseManager: Database connection made; there are {0} tables. Oracle database = {1}\".format(\n len(self._table_names()), self.is_oracle\n )\n )\n\n self.Base.metadata.create_all(\n bind=self.engine\n ) # create the table(s) if they don't exist\n\n # create thread-local sessions, see https://docs.sqlalchemy.org/en/14/orm/contextual.html#using-custom-created-scopes\n session_factory = sessionmaker(bind=self.engine)\n\n # this object will call session factory when we create/request a thread local session\n # e.g. thread_local_session = self.Session()\n self.Session = scoped_session(session_factory)\n\n # drop existing tables if in debug mode\n # delete any pre-existing data if we are in debug mode.\n if debug == 2:\n logging.warning(\n \"Debug mode operational [DEBUG={0}]; deleting all data from tables.\".format(\n debug\n )\n )\n self._delete_existing_clustering_data()\n self._delete_existing_data()\n\n else:\n self.logger.info(\"Using stored data in rdbms\")\n\n def _create_engine(self):\n \"\"\"create database connection engine\"\"\"\n if self.is_oracle:\n # it is important to set arraysize, see\n\n # https://docs.sqlalchemy.org/en/14/dialects/oracle.html\n # https://cx-oracle.readthedocs.io/en/latest/user_guide/tuning.html#tuningfetch\n\n # but if you set it too large you can get DPI-1015 (allocated cursor size > 2G) see\n # https://cx-oracle.readthedocs.io/en/latest/api_manual/cursor.html\n\n # to disable connection pooling\n # https://docs.sqlalchemy.org/en/14/core/pooling.html#switching-pool-implementations\n\n # self.engine = create_engine(\n # self.engine_name, arraysize=1000000,\n # poolclass=NullPool\n # ) # fetch results in batches of 1m.\n\n self.engine = create_engine(\n self.engine_name, arraysize=1000000\n ) # fetch results in batches of 1m.\n\n logging.info(\"Created engine connecting to Oracle database\")\n else:\n # sqlalchemy generic pool manager, for non-Oracle databases\n self.engine = create_engine(self.engine_name)\n\n # oracle pool manager code\n # use cx_Oracle pool manager\n # u, p, dsn = self.oracle_connstring_parts(self.engine_name)\n # self.oracle_pool = cx_Oracle.SessionPool(\n # user = u,\n # password = p,\n # dsn = dsn,\n # min = 4,\n # max = 4,\n # encoding=\"UTF-8\",\n # nencoding=\"UTF-8\"\n # )\n # self.engine = create_engine(\"oracle://\", creator = self.oracle_pool.acquire, poolclass = NullPool)\n\n def closedown(self):\n \"\"\"closes the session(s) & disposes of any engine.\n Is required for unit testing\"\"\"\n try:\n self.engine.dispose()\n except AttributeError as e:\n # the class may not have an engine object attached, generates an AttributeError\n pass \n logging.info(\n \"Failed to dispose of engine during closedown(). AttributeError logged to sentry\"\n )\n capture_exception(e)\n except Exception as e1:\n logging.warning(\n \"Failed to dispose of engine during closedown(). Error logged to sentry\"\n )\n capture_exception(e1)\n\n def no_progressbar(self):\n \"\"\"don't use progress bars\"\"\"\n self.show_bar = False\n\n def _table_names(self):\n \"\"\"returns table names in the schema.\n If the schema's contents have not been created, returns an empty list\"\"\"\n return inspect(self.engine).get_table_names()\n\n def thread_local_session(self, n_retries=3, simulate_failure=\"no\", log_errors=True):\n \"\"\"generates, or selects a thread local session from a session factory or pool.\n\n For context, see https://docs.sqlalchemy.org/en/13/orm/contextual.html\n\n Checks that the session recovered from the session pool is still valid, since they can time out.\n If it is timed out, tries another. Will retry up to n_retries times.\n\n Parameters:\n n_retries: the number of attempts which will be made to generate a functional connection.\n simulate_failure: simulates the failure of a connection (closes the connection before returning it) - used only for unittesting. Valid values:\n 'no' : normal operation\n 'once': fail once\n 'always': fail every time, even on repeated attempts to connect\n log_errors: logs any errors to sentry & error log (recommended)\"\"\"\n\n tries = n_retries\n while tries > 0:\n tries = tries - 1\n tls = self.Session()\n\n # simulate failure if required to do so\n if (simulate_failure == \"once\" and tries - 1 == n_retries) or (\n simulate_failure == \"always\"\n ):\n tls = (\n None # not a session object. Attempts to use it as such will fail.\n )\n\n # test whether it is working\n try:\n tls.query(Config).filter_by(\n config_key=\"config\"\n ).first() # try to connect\n\n # if execution continues here, the session works\n return tls\n\n except Exception as e1:\n logging.info(\n \"Failed to connect on trial {0}/{1}; error raised was {2}. Recreating engine\".format(\n n_retries - tries, n_retries, e1\n )\n )\n\n if log_errors:\n capture_exception(e1)\n logging.error(e1)\n\n # try to remove the failing session, if it has been constructed properly\n try:\n self.engine.dispose()\n tls.close()\n except Exception:\n logging.info(\"Failed to remove session and dispose of engine\")\n\n # succeeded in disposing of existing engine; try reconnecting\n self._create_engine()\n \n # could not connect despite multiple attempts.\n raise RDBMSError(\n \"Could not connect to database. Tried {0} times with different sessions despite recreating database connection\".format(\n n_retries\n )\n )\n\n def _drop_existing_tables(self):\n \"\"\"empties, but does not drop, any existing tables\"\"\"\n\n self.Base.metadata.create_all(\n self.engine\n ) # create the table(s) if they don't already exist\n\n BulkLoadTest.__table__.drop(self.engine)\n Config.__table__.drop(self.engine)\n Edge.__table__.drop(self.engine)\n Cluster.__table__.drop(self.engine)\n Monitor.__table__.drop(self.engine)\n ServerMonitoring.__table__.drop(self.engine)\n MSA.__table__.drop(self.engine)\n TreeStorage.__table__.drop(self.engine)\n RefCompressedSeq.__table__.drop(self.engine)\n\n remaining = len(self._table_names())\n if remaining > 0:\n warnings.warn(\n \"Failed to remove all tables in the database. Is this database being used by another program? The following remain: {0}\".format(\n self._table_names()\n )\n )\n\n return\n\n def oracle_connstring_parts(self, connstring):\n \"\"\"splits an oracle connection string into username, password and DSN\"\"\"\n if connstring.startswith(\"oracle+cx_oracle://\"):\n e1 = self.engine_name.replace(\"oracle+cx_oracle://\", \"\")\n up, dns = e1.split(\"@\")\n u, p = up.split(\":\")\n return u, p, dns\n else:\n return None\n\n def _bulk_load(self, upload_df, target_table, max_batch=100000):\n \"\"\"bulk uploads pandas dataframes.\n If using an oracle database, uses Oracle's cx-oracle package to bulk upload data\n\n upload_df: a pandas dataframe. Names **much match** the table target_table\n target_table: the name of the table to upload into\n (note: this is not the name of the class in the table definition, it's the name of the table in the SQL)\n\n Returns:\n number of items uploaded (integer)\n\n Background:\n - ORM is slow for large inserts\n - Oracle is not currently supported by pandas .to_sql method='multi', so a bespoke method is needed\n - Need custom code for bulk loading to Oracle. Methods are provided in the cx_oracle package, see\n https://cx-oracle.readthedocs.io/en/latest/user_guide/batch_statement.html\n\n The maximum size that can be transmitted to the Oracle server is 2G.\n If this happens, a cx_Oracle.DatabaseError\n is raised with result DPI-1015. R Code to prevent this by auto-setting max_batch is provided.\n\n \"\"\"\n\n # verify data : ensure that target_table is a table [Essential: otherwise, can be a vector for SQL injection]\n if target_table not in self._table_names():\n raise ValueError(\n \"target_table {0} does not exist: valid tables are {1}\".format(\n target_table, self._table_names()\n )\n )\n\n # check that upload_df is pandas dataframe\n if not isinstance(upload_df, pd.DataFrame):\n raise TypeError(\n \"upload_df needs to be pandas DataFrame, not a {0}\".format(\n type(upload_df)\n )\n )\n\n # check that we have been passed data\n ncol = len(upload_df.columns)\n if ncol == 0:\n # we treat this as an error\n raise RDBMSError(\n \"Passed data frame to _bulk_upload to {0} contains no columns {1}\".format(\n target_table, upload_df\n )\n )\n\n self.logger.info(\"Bulk upload to {0} started\".format(target_table))\n\n if self.is_sqlite:\n # there is a max variable limit of 32,766 for Sqlite 3.32.0 on https://www.sqlite.org/limits.html\n # set max_batch to keep below this.\n max_batch = int(32000 / ncol)\n self.logger.info(\n \"Autoset max_batch to {0}, as running SQLite\".format(max_batch)\n )\n\n if self.is_oracle:\n # ************* commit via cx_Oracle bulk upload syntax ****************\n # parse the engine_name into dsn, database & password\n\n u, p, dsn = self.oracle_connstring_parts(self.engine_name)\n\n # get into the right format for loading: note: holds all data in ram\n loadvar = list(upload_df.itertuples(index=False, name=None))\n ncol = len(upload_df.columns.to_list())\n\n # estimate the maximum buffer size required and auto-reduce the max_batch if required.\n estimated_row_size = len(str(loadvar[0]))\n estimated_max_batch = int(\n 1e7 / (estimated_row_size)\n ) # large margin of safety 10M batch size\n if estimated_max_batch < max_batch:\n max_batch = estimated_max_batch\n self.logger.info(\n \"Reduced max_batch to keep estimated buffer size within acceptable limits (<= 10M target). max_batch is {0}\".format(\n max_batch\n )\n )\n\n # construct sql statment.\n # Should be injection-safe; we have checked the target_table is a table, and are incorporating integers and verified strings only.\n collabels = [\":\" + str(x + 1) for x in list(range(ncol))]\n insert_cols = \",\".join(collabels)\n target_cols = \",\".join(upload_df.columns.to_list())\n sql_statement = \"INSERT INTO {0} ({1}) VALUES ({2})\".format(\n target_table, target_cols, insert_cols\n )\n start_n = len(loadvar)\n if self.show_bar:\n bar = progressbar.ProgressBar(max_value=start_n)\n\n with cx_Oracle.connect(user=u, password=p, dsn=dsn) as conn:\n cursor = conn.cursor()\n\n while len(loadvar) > 0:\n if self.show_bar:\n bar.update(start_n - len(loadvar))\n try:\n\n cursor.executemany(sql_statement, loadvar[0:max_batch])\n loadvar = loadvar[max_batch:]\n conn.commit()\n except Exception:\n conn.rollback()\n raise\n\n if self.show_bar:\n bar.finish()\n\n else:\n\n # ***************************** ALL DATABASES OTHER THAN ORACLE ************************\n # note: there may be limits to the complexity of the statement permitted: a maximum number of parameters.\n # therefore, we do this in batches as well.\n start_n = len(upload_df.index)\n if self.show_bar:\n bar = progressbar.ProgressBar(max_value=start_n)\n\n while len(upload_df.index) > 0:\n self.logger.info(\n \"Bulk upload of {0} : {1} remain\".format(\n target_table, len(upload_df)\n )\n )\n to_upload = upload_df.head(n=max_batch)\n to_upload.to_sql(\n target_table,\n self.engine,\n if_exists=\"append\",\n index=False,\n method=\"multi\",\n ) # pandas method\n upload_df = upload_df.iloc[max_batch:]\n if self.show_bar:\n bar.update(start_n - len(upload_df.index))\n\n self.logger.info(\"Bulk upload to {0} complete\".format(target_table))\n if self.show_bar:\n bar.finish()\n return len(upload_df.index)\n\n def delete_server_monitoring_entries(self, before_seconds: int) -> None:\n \"\"\"deletes server monitoring entries more than before_seconds ago\"\"\"\n try:\n tls = self.thread_local_session()\n earliest_allowed = datetime.now() - timedelta(seconds=before_seconds)\n tls.query(ServerMonitoring).filter(\n ServerMonitoring.upload_date < earliest_allowed\n ).delete()\n tls.commit()\n # finished\n except Exception:\n tls.rollback()\n raise\n\n def summarise_stored_items(self) -> Dict[str, Any]:\n \"\"\"counts how many sequences exist of various types\"\"\"\n return {}\n\n def connect(self) -> None:\n \"\"\"test whether the database is connected, and if not, tries to connect.\n Does nothing here, just a stub.\"\"\"\n pass\n\n def rotate_log(self) -> None:\n \"\"\"forces rotation of the mongo log file; a stub here, does nothing\"\"\"\n pass\n\n def raise_error(self, token: str) -> NoReturn:\n \"\"\"raises a ZeroDivisionError, with token as the message.\n useful for unit tests of error logging\"\"\"\n raise ZeroDivisionError(token)\n\n def _delete_existing_data(self) -> None:\n \"\"\"deletes existing data from the databases\"\"\"\n try:\n tls = self.thread_local_session()\n\n tls.query(Config).delete()\n tls.query(Edge).delete()\n tls.query(RefCompressedSeq).delete()\n tls.query(Monitor).delete()\n tls.query(ServerMonitoring).delete()\n tls.query(BulkLoadTest).delete()\n tls.query(Cluster).delete()\n tls.query(MSA).delete()\n tls.query(TreeStorage).delete()\n tls.query(FNLock).delete()\n tls.commit()\n # finished\n except Exception:\n tls.rollback()\n\n def _delete_existing_clustering_data(self) -> None:\n \"\"\"deletes any clustering data from the databases\"\"\"\n\n try:\n tls = self.thread_local_session()\n tls.query(Cluster).delete()\n tls.query(MSA).delete()\n tls.query(TreeStorage).delete()\n tls.commit()\n # finished\n except Exception:\n tls.rollback()\n raise\n\n def first_run(self) -> bool:\n \"\"\"if there is no config entry, it is a first-run situation\"\"\"\n tls = self.thread_local_session()\n row = tls.query(Config).filter_by(config_key=\"config\").first()\n return row is None\n\n def __del__(self) -> None:\n \"\"\"closes any session\"\"\"\n self.closedown()\n\n def memory_usage(self) -> Dict[str, Union[int, float]]:\n \"\"\"returns memory usage by current python3 process\n Uses the psutil module, as the resource module is not available in windows.\n \"\"\"\n memdict = psutil.virtual_memory()._asdict()\n sm = {\"server|mstat|\" + k: v for k, v in memdict.items()}\n return sm\n\n # methods for the config collection\n def config_store(self, key: str, object: Dict[str, Any]) -> Any:\n \"\"\"stores object into config collection\n It is assumed object is a dictionary\n \"\"\"\n\n if not isinstance(object, dict):\n raise TypeError(\n \"Can only store dictionary objects, not {0}\".format(type(object))\n )\n tls = self.thread_local_session()\n if row := tls.query(Config).filter_by(config_key=key).first():\n row.config_value = json.dumps(object, cls=NPEncoder).encode(\"utf-8\")\n else:\n try:\n row = Config(\n config_key=key,\n config_value=json.dumps(object, cls=NPEncoder).encode(\"utf-8\"),\n )\n tls.add(row)\n tls.commit()\n # finished\n except Exception:\n tls.rollback()\n raise\n\n def config_read(self, key: str) -> Any:\n \"\"\"loads object from config.\n It is assumed object is a dictionary\"\"\"\n tls = self.thread_local_session()\n row = tls.query(Config).filter_by(config_key=key).first()\n if row is None:\n return None\n else:\n return dict(_id=key, **json.loads(row.config_value))\n\n # methods for the server and database monitoring\n def recent_database_monitoring(\n self, max_reported: int = 100\n ) -> RecentDatabaseMonitoringRet:\n \"\"\"computes trends in the number of records holding pairs (guid2neighbours) vs. records.\n This ratio is a measure of database health. Ratios > 100 indicate the database may become very large, and query slowly\"\"\"\n return {\"recompression_data\": False, \"latest_stats\": {\"storage_ratio\": 1}}\n\n def _to_string(self, x=[str, bytes]) -> str:\n \"\"\"returns a string version of x; if x is bytes, returns a string, assuming utf-8 encoding\n\n This function is required because some databases return bytes objects\n from 'text' fields (like sqlite) while others return str objects (like oracle)\n \"\"\"\n\n if isinstance(x, bytes):\n return x.decode(\"utf-8\")\n else:\n return x\n\n def recent_server_monitoring(\n self,\n max_reported: int = 100,\n selection_field: Optional[str] = None,\n selection_string: Optional[str] = None,\n ) -> List[dict]:\n \"\"\"returns a list containing recent server monitoring, in reverse order (i.e. tail first).\n The _id field is an integer reflecting the order added. Lowest numbers are most recent.\n\n Inputs\n max_reported - return this number of lines, at most.\n selection_field - if not None, will only return lines containing selection_string\n in the 'selection_field' key of the returned dictionary.\n selection_string -if selection_field is not None, only returns rows if\n selection_string is present in the 'selection_field' key of the\n monitoring element. If None, this constraint is ignored.\n \"\"\"\n\n if not isinstance(max_reported, int):\n raise TypeError(\n f\"limit must be an integer, but it is a {type(max_reported)}\"\n )\n if not max_reported >= 0:\n raise ValueError(\"limit must be more than or equal to zero\")\n\n if max_reported == 0:\n return []\n\n def row_to_dict(res: ServerMonitoring) -> dict:\n d = json.loads(res.content)\n d[\"_id\"] = res.sm_int_id\n if selection_field is None:\n return d\n else:\n if d[selection_field] == selection_string:\n return d\n else:\n return None\n\n tls = self.thread_local_session()\n return [\n d\n for res in tls.query(ServerMonitoring)\n .order_by(desc(ServerMonitoring.sm_int_id))\n .limit(max_reported)\n for d in (row_to_dict(res),)\n if d is not None\n ]\n\n def server_monitoring_store(\n self,\n message: str = \"No message provided\",\n what: Optional[str] = None,\n guid: Optional[str] = None,\n content: Dict[str, Any] = {},\n ) -> bool:\n \"\"\"stores content, a dictionary, into the server monitoring log\"\"\"\n\n if not isinstance(content, dict):\n raise TypeError(\n \"Can only store dictionary objects, not {0}\".format(type(content))\n )\n tls = self.thread_local_session()\n now = dict(content)\n if what is not None:\n now[\"content|activity|whatprocess\"] = what\n if guid is not None:\n now[\"content|activity|guid\"] = guid\n now[\"context|info|message\"] = message\n current_time = datetime.now()\n now[\"context|time|time_now\"] = str(current_time.isoformat())\n now[\"context|time|time_boot\"] = datetime.fromtimestamp(\n psutil.boot_time()\n ).strftime(\"%Y-%m-%d %H:%M:%S\")\n\n # should we write this data? We have the option not to log all messages, to prevent the store getting very full.\n write_content = False\n if self.previous_server_monitoring_time is None:\n # yes if this is the first record written.\n write_content = True\n else:\n time_since_last_write = current_time - self.previous_server_monitoring_time\n\n # yes if it's after the previous_server_monitoring_time in milliseconds\n t = (\n 1000 * float(time_since_last_write.seconds)\n + float(time_since_last_write.microseconds) / 1000\n )\n if t >= self.server_monitoring_min_interval_msec:\n write_content = True\n\n if write_content:\n try:\n json_now = json.dumps(now).encode(\"utf-8\")\n row = ServerMonitoring(\n sm_id=hashlib.sha1(json_now).hexdigest(),\n upload_date=current_time,\n content=json_now,\n )\n tls.add(row)\n self.previous_server_monitoring_time = current_time\n self.previous_server_monitoring_data = now\n tls.commit()\n # finished\n return True\n except Exception:\n tls.rollback()\n raise\n else:\n return False\n\n # methods for monitor, which store the contents of an html file\n # in a gridFS store.\n def monitor_store(self, monitoring_id: str, html: str) -> str:\n \"\"\"stores the monitor output string html. Overwrites any prior object.\"\"\"\n\n if not isinstance(html, str):\n raise TypeError(\"Can only store string objects, not {0}\".format(type(html)))\n try:\n tls = self.thread_local_session()\n tls.query(Monitor).filter_by(mo_id=monitoring_id).delete()\n\n row = Monitor(mo_id=monitoring_id, content=html.encode(\"utf-8\"))\n tls.add(row)\n tls.commit()\n # finished\n except Exception:\n tls.rollback()\n\n def monitor_read(self, monitoring_id: str) -> Optional[str]:\n \"\"\"loads stored string (e.g. html object) from the monitor collection.\"\"\"\n tls = self.thread_local_session()\n if res := tls.query(Monitor).filter_by(mo_id=monitoring_id).first():\n return self._to_string(res.content)\n else:\n return None\n\n # methods for multisequence alignments\n def msa_store(self, msa_token: str, msa: dict) -> Optional[str]:\n \"\"\"stores the msa object msa under token msa_token.\"\"\"\n tls = self.thread_local_session()\n if not isinstance(msa, dict):\n raise TypeError(\n \"Can only store dictionary objects, not {0}\".format(type(msa))\n )\n\n # we don't replace. These entries are write once.\n if tls.query(MSA).filter_by(msa_id=msa_token).one_or_none() is None:\n try:\n res = MSA(\n msa_id=msa_token,\n upload_date=datetime.now(),\n content=json.dumps(msa).encode(\"utf-8\"),\n )\n tls.add(res)\n tls.commit()\n except Exception:\n tls.rollback()\n raise\n\n def msa_read(self, msa_token: str) -> Optional[dict]:\n \"\"\"loads object from msa collection.\n It is assumed object is a dictionary\"\"\"\n tls = self.thread_local_session()\n if res := tls.query(MSA).filter_by(msa_id=msa_token).first():\n return json.loads(res.content)\n else:\n return None\n\n def msa_delete(self, msa_token: str) -> None:\n try:\n \"\"\"deletes the msa with token msa_token\"\"\"\n tls = self.thread_local_session()\n tls.query(MSA).filter_by(msa_id=msa_token).delete()\n tls.commit()\n # finished\n except Exception:\n tls.rollback()\n raise\n\n def msa_stored_ids(self) -> List[str]:\n \"\"\"returns a list of msa tokens of all objects stored\"\"\"\n tls = self.thread_local_session()\n return [res.msa_id for res in tls.query(MSA)]\n\n def msa_delete_unless_whitelisted(self, whitelist: Iterable[str]) -> None:\n try:\n \"\"\"deletes the msa unless the id is in whitelist\"\"\"\n tls = self.thread_local_session()\n tls.query(MSA).filter(MSA.msa_id.not_in(whitelist)).delete()\n tls.commit()\n # finished\n except Exception:\n tls.rollback()\n raise\n\n # methods for trees\n def tree_store(self, tree_token: str, tree: dict) -> Optional[str]:\n \"\"\"stores the tree object tree under token tree_token.\n\n Will not overwrite; requests to do so fail, silently.\"\"\"\n\n if not isinstance(tree, dict):\n raise TypeError(\n \"Can only store dictionary objects, not {0}\".format(type(tree))\n )\n\n tls = self.thread_local_session()\n\n if tls.query(TreeStorage).filter_by(ts_id=tree_token).one_or_none() is None:\n try:\n row = TreeStorage(\n ts_id=tree_token,\n upload_date=datetime.now(),\n content=json.dumps(tree).encode(\"utf-8\"),\n )\n tls.add(row)\n tls.commit()\n except Exception:\n tls.rollback()\n raise\n\n def tree_read(self, tree_token: str) -> Optional[dict]:\n \"\"\"loads object from tree collection.\n It is assumed object is a dictionary\"\"\"\n tls = self.thread_local_session()\n if res := tls.query(TreeStorage).filter_by(ts_id=tree_token).first():\n return json.loads(res.content)\n else:\n return None\n\n def tree_delete(self, tree_token: str) -> None:\n try:\n \"\"\"deletes the tree with token tree_token\"\"\"\n tls = self.thread_local_session()\n tls.query(TreeStorage).filter_by(ts_id=tree_token).delete()\n tls.commit()\n except Exception:\n tls.rollback()\n raise\n\n def tree_stored_ids(self) -> List[str]:\n \"\"\"returns a list of tree tokens of all objects stored\"\"\"\n tls = self.thread_local_session()\n return [res.ts_id for res in tls.query(TreeStorage)]\n\n def tree_delete_unless_whitelisted(self, whitelist: Iterable[str]) -> None:\n try:\n \"\"\"deletes the tree unless the id is in whitelist\"\"\"\n tls = self.thread_local_session()\n tls.query(TreeStorage).filter(TreeStorage.ts_id.not_in(whitelist)).delete()\n tls.commit()\n # finished\n except Exception:\n tls.rollback()\n raise\n\n # methods for clusters\n def cluster_store(self, clustering_key: str, obj: dict) -> int:\n \"\"\"stores the clustering object obj. retains previous version.\n\n obj: a dictionary to store\n clustering_key: the name of the clustering, e.g. TBSNP12-graph\n\n Returns:\n current cluster version\n\n Note: does not replace previous version, but stores a new one.\n Note: to delete legacy versions, call cluster_delete_legacy().\n \"\"\"\n tls = self.thread_local_session()\n if not isinstance(obj, dict):\n raise TypeError(f\"Can only store dictionary objects, not {type(obj)}\")\n\n try:\n json_repr = json.dumps(obj, cls=NPEncoder).encode(\"utf-8\")\n cluster = Cluster(\n cluster_build_id=clustering_key,\n upload_date=datetime.now(),\n content=json_repr,\n )\n tls.add(cluster)\n tls.commit()\n # finished\n return cluster.cl_int_id\n except Exception:\n tls.rollback()\n raise\n\n def cluster_read(self, clustering_key: str) -> Optional[dict]:\n \"\"\"loads object from clusters collection corresponding to the most recent version of\n the clustering identified by 'clustering_key'.\n\n Parameters:\n clustering_key: a string identifying a clustering result\n\n Returns:\n the clustering information, in the form of a dictionary if it exists, or None if it does not\n \"\"\"\n tls = self.thread_local_session()\n if (\n res := tls.query(Cluster)\n .filter_by(cluster_build_id=clustering_key)\n .order_by(desc(Cluster.cl_int_id))\n .first()\n ):\n return json.loads(res.content)\n else:\n return None\n\n def cluster_read_update(\n self, clustering_key: str, current_cluster_version: int\n ) -> Optional[dict]:\n \"\"\"loads object from clusters collection corresponding to the most recent version\n of the clustering, saved with cluster_build_id = 'clustering_key'.\n it will read only if the current version is different from current_cluster_version; other wise, it returns None\n\n\n Parameters:\n clustering_key: a string identifying the cluster\n current_cluster_version: an integer identifying a legacy cluster version\n\n Returns:\n the clustering information, in the form of a dictionary if it exists, or None if it does not\n \"\"\"\n tls = self.thread_local_session()\n if (\n res := tls.query(Cluster)\n .filter_by(cluster_build_id=clustering_key)\n .filter(Cluster.cl_int_id != current_cluster_version)\n .order_by(desc(Cluster.cl_int_id))\n .first()\n ):\n return json.loads(res.content)\n return None\n\n def cluster_latest_version(self, clustering_key: str) -> int:\n \"\"\"returns id of latest version, which is the maximum number\n\n Parameters:\n clustering_key: a string identifying the cluster\n\n Returns:\n cl_int_id, the primary key to the cluster table\"\"\"\n\n tls = self.thread_local_session()\n if (\n res := tls.query(func.max(Cluster.cl_int_id))\n .filter(Cluster.cluster_build_id == clustering_key)\n .first()\n ):\n retVal = res[0] # it's a tuple\n return retVal\n else:\n return None\n\n def cluster_keys(self, clustering_name: Optional[str] = None) -> List[str]:\n \"\"\"lists clustering keys beginning with clustering_name. If clustering_name is none, all clustering keys are returned.\"\"\"\n\n tls = self.thread_local_session()\n if clustering_name:\n return list(\n sorted(\n set(\n res.cluster_build_id\n for res in tls.query(Cluster).filter(\n Cluster.cluster_build_id.startswith(clustering_name)\n )\n )\n )\n )\n else:\n return list(sorted(set(res.cluster_build_id for res in tls.query(Cluster))))\n\n def cluster_versions(self, clustering_key: str) -> List[bson.objectid.ObjectId]:\n \"\"\"lists ids and storage dates corresponding to versions of clustering identifed by clustering_key.\n the newest version is first.\n \"\"\"\n tls = self.thread_local_session()\n return list(\n tls.query(Cluster)\n .filter_by(cluster_build_id=clustering_key)\n .order_by(desc(Cluster.upload_date))\n )\n\n def cluster_delete_all(self, clustering_key: str) -> None:\n \"\"\"delete all clustering objects, including the latest version, stored under clustering_key\"\"\"\n try:\n\n tls = self.thread_local_session()\n tls.query(Cluster).filter(\n Cluster.cluster_build_id == clustering_key\n ).delete()\n tls.commit()\n # finished\n except Exception:\n tls.rollback()\n raise\n\n def cluster_delete_legacy_by_key(self, clustering_key: str) -> None:\n \"\"\"delete all clustering objects, except latest version, stored with key clustering_key\"\"\"\n tls = self.thread_local_session()\n cl_int_ids = set()\n for (cl_int_id,) in tls.query(Cluster.cl_int_id).filter_by(\n cluster_build_id=clustering_key\n ):\n cl_int_ids.add(cl_int_id)\n if len(cl_int_ids) == 0:\n return\n else:\n latest_cl_int_id = max(cl_int_ids)\n for this_cl_int_id in cl_int_ids:\n if not this_cl_int_id == latest_cl_int_id:\n tls.query(Cluster).filter_by(cl_int_id=this_cl_int_id).delete()\n try:\n tls.commit()\n # finished\n except Exception:\n tls.rollback()\n raise\n\n def cluster_delete_legacy(self, clustering_name: str) -> None:\n \"\"\"delete all clustering objects, except latest version, stored with clustering_name\"\"\"\n for clustering_key in self.cluster_keys(clustering_name=clustering_name):\n self.cluster_delete_legacy_by_key(clustering_key)\n\n def refcompressedseq_store(self, guid: str, obj: dict) -> str:\n \"\"\"stores the json object obj with guid guid.\n\n Parameters:\n guid: the sequence identifer\n obj: a reference compressed sequence representation, as produced by py_seqComparer.compress().\n Here is an example:\n\n {\n 'A':set([1,2,3]), 'C':set([6]), 'T':set([4]), 'G':set([5]), 'M':{11:'Y', 12:'k'}, 'invalid':0\n }\n\n If the guid already exists in the database, raises a FileExistsError, as is the case with the mongo client.\"\"\"\n tls = self.thread_local_session()\n if not isinstance(obj, dict):\n raise TypeError(\n \"Can only store dictionary objects, not {0}\".format(type(obj))\n )\n\n if \"invalid\" not in obj.keys():\n raise KeyError(\n \"An invalid key must be present. Keys are: {0}\".format(obj.keys())\n )\n\n # if the record already exists, we don't re-add it\n res = (\n tls.query(RefCompressedSeq.seq_int_id)\n .filter_by(sequence_id=guid)\n .one_or_none()\n )\n if res is None: # it doesn't exits\n tls.add(\n RefCompressedSeq(\n sequence_id=guid,\n invalid=obj[\"invalid\"],\n examination_date=datetime.now(),\n content=self.sjc.to_json(obj),\n prop_actg=None,\n annotations=json.dumps({}),\n )\n )\n try:\n tls.commit()\n except Exception:\n tls.rollback()\n raise\n\n else: # it does exist\n raise FileExistsError(\"Attempting to overwrite {0}\".format(guid))\n\n # finished\n\n def refcompressedsequence_read(self, guid: str) -> Any:\n \"\"\"loads object from refcompressedseq collection.\n the object loaded is identified by guid.\n It is assumed object stored is a dictionary\n\n returns:\n dictionary containing referencecompressed sequences\"\"\"\n tls = self.thread_local_session()\n if (\n rcs := tls.query(RefCompressedSeq.content)\n .filter_by(sequence_id=guid)\n .first()\n ):\n return self.sjc.from_json(rcs.content)\n else:\n return None\n\n def refcompressedsequence_read_many(self, guids: Iterable) -> Any:\n \"\"\"loads objects identified by all members of guids from refcompressedseq collection.\n It is assumed object stored is a dictionary\n\n returns:\n generator, which yields a tuple\n (guid, referencecompressedsequence)\n\n raises:\n ValueError, if length of guids is > 1000\n \"\"\"\n\n if len(guids) > 1000:\n raise ValueError(\"Maximum number of samples which can be sought is 1000\")\n\n tls = self.thread_local_session()\n results = (\n tls.query(RefCompressedSeq.sequence_id, RefCompressedSeq.content)\n .filter(RefCompressedSeq.sequence_id.in_(guids))\n .all()\n )\n\n for result in results:\n yield (result.sequence_id, self.sjc.from_json(result.content))\n\n def refcompressedsequence_read_all(self, internal_batch_size=5000) -> Any:\n \"\"\"loads objects identified by all members of guids from refcompressedseq collection.\n It is assumed object stored is a dictionary\n\n parameters:\n internal_batch_size: how many samples are loaded into ram at a time. Default should be fine unless low memory\n\n returns:\n generator, which yields a tuple\n (guid, referencecompressedsequence)\n \"\"\"\n\n # sanity check\n if internal_batch_size < 1:\n raise ValueError(\"Internal batch size must be >= 1\")\n\n tls = self.thread_local_session()\n seq_int_ids = [\n seq_int_id\n for seq_int_id, in tls.query(RefCompressedSeq.seq_int_id)\n .order_by(RefCompressedSeq.seq_int_id)\n .all()\n ]\n\n start_position = 0\n while start_position < len(seq_int_ids):\n end_position = internal_batch_size + start_position - 1\n if end_position >= len(seq_int_ids):\n end_position = len(seq_int_ids) - 1\n start_seq_int_id = seq_int_ids[start_position]\n end_seq_int_id = seq_int_ids[end_position]\n\n # load batch\n results = (\n tls.query(RefCompressedSeq.sequence_id, RefCompressedSeq.content)\n .filter(RefCompressedSeq.seq_int_id >= start_seq_int_id)\n .filter(RefCompressedSeq.seq_int_id <= end_seq_int_id)\n .all()\n )\n\n for result in results:\n yield (result.sequence_id, self.sjc.from_json(result.content))\n\n # next batch\n start_position = end_position + 1\n\n def refcompressedsequence_guids(self) -> Set[str]:\n \"\"\"loads guids from refcompressedseq collection.\"\"\"\n tls = self.thread_local_session()\n return set(res.sequence_id for res in tls.query(RefCompressedSeq.sequence_id))\n\n def guid_annotate(self, guid: str, nameSpace: str, annotDict: dict) -> None:\n \"\"\"adds multiple annotations of guid from a dictionary;\n all annotations go into a namespace.\n updates the annotation if it exists\"\"\"\n tls = self.thread_local_session()\n if not isinstance(annotDict, dict):\n raise TypeError(\n \"Can only store dictionary objects, not {0}\".format(type(annotDict))\n )\n\n # The reference compressed sequence must exist.\n rcs = (\n tls.query(RefCompressedSeq)\n .filter(RefCompressedSeq.sequence_id == guid)\n .one_or_none()\n )\n if rcs is None:\n raise RDBMSError(\n \"Asked to annotate a record {0} but it does not exist\".format(guid)\n )\n\n if nameSpace == \"DNAQuality\":\n # coerce examination date to string\n if \"examinationDate\" in annotDict:\n rcs.examination_date = annotDict[\"examinationDate\"]\n if isinstance(annotDict[\"examinationDate\"], datetime):\n # convert to isoformat pre-jsonisation\n annotDict[\"examinationDate\"] = annotDict[\n \"examinationDate\"\n ].isoformat()\n\n if \"propACTG\" in annotDict:\n rcs.prop_actg = annotDict[\"propACTG\"]\n\n annotations = json.loads(rcs.annotations) # what's there now\n annotations[nameSpace] = annotDict # replace or add the new namespace\n rcs.annotations = json.dumps(annotations).encode(\"utf-8\")\n try:\n tls.commit()\n # finished\n except Exception:\n tls.rollback()\n raise\n\n def guids(self) -> Set[str]:\n \"\"\"returns all registered guids\"\"\"\n return self.refcompressedsequence_guids()\n\n def guids_added_after_sample(self, guid: str) -> Set[str]:\n \"\"\"returns all guids added after a sample\"\"\"\n tls = self.thread_local_session()\n rcs = (\n tls.query(RefCompressedSeq.seq_int_id)\n .filter(RefCompressedSeq.sequence_id == guid)\n .one_or_none()\n )\n if rcs is None:\n return None # does not exist\n\n (this_seq_int_id,) = rcs # the sequence int id of the sample\n retVal = []\n for (guid,) in tls.query(RefCompressedSeq.sequence_id).filter(\n RefCompressedSeq.seq_int_id > this_seq_int_id\n ):\n retVal.append(guid)\n return set(retVal)\n\n def guids_considered_after(self, addition_datetime: datetime) -> Set[str]:\n \"\"\"returns all registered guid added after addition_datetime\n addition_datetime: a date of datetime class.\"\"\"\n tls = self.thread_local_session()\n if not isinstance(addition_datetime, datetime):\n raise TypeError(\n \"addition_datetime must be a datetime value. It is {0}. Value = {1}\".format(\n type(addition_datetime), addition_datetime\n )\n )\n retVal = []\n for (guid,) in tls.query(RefCompressedSeq.sequence_id).filter(\n RefCompressedSeq.examination_date > addition_datetime\n ):\n retVal.append(guid)\n return set(retVal)\n\n def _guids_selected_by_validity(self, validity: int) -> Set[str]:\n \"\"\"returns registered guids, selected on their validity\n\n 0 = guid is valid\n 1 = guid is invalid\n\n \"\"\"\n tls = self.thread_local_session()\n return set(\n res.sequence_id\n for res in tls.query(RefCompressedSeq.sequence_id).filter_by(\n invalid=validity\n )\n )\n\n def singletons(\n self, method: str = \"approximate\", return_top: int = 1000\n ) -> pd.DataFrame:\n \"\"\"\n\n This method is not important in the RDBMS implementation of the fn3persistence store.\n\n Returns:\n An empty data frame.\n \"\"\"\n return pd.DataFrame()\n\n def guids_valid(self) -> set:\n \"\"\"return all registered valid guids.\n\n Validity is determined by the contents of the DNAQuality.invalid field, on which there is an index\"\"\"\n return self._guids_selected_by_validity(0)\n\n def guids_invalid(self) -> set:\n \"\"\"return all invalid guids\n\n Validity is determined by the contents of the DNAQuality.invalid field, on which there is an index\"\"\"\n return self._guids_selected_by_validity(1)\n\n def guid_exists(self, guid: str) -> bool:\n \"\"\"checks the presence of a single guid\"\"\"\n tls = self.thread_local_session()\n return (\n tls.query(RefCompressedSeq.sequence_id).filter_by(sequence_id=guid).first()\n is not None\n )\n\n def guid_valid(self, guid: str) -> int:\n \"\"\"checks the validity of a single guid\n\n Parameters:\n guid: the sequence identifier\n\n Returns\n -1 The guid does not exist\n 0 The guid exists and the sequence is valid\n 1 The guid exists and the sequence is invalid\n \"\"\"\n tls = self.thread_local_session()\n if (\n res := tls.query(RefCompressedSeq.invalid)\n .filter_by(sequence_id=guid)\n .first()\n ):\n if res.invalid == 0:\n return 0\n elif res.invalid == 1:\n return 1\n else:\n raise ValueError(\n \"invalid is neither 1 nor 0 but {0}\".format(res.invalid)\n )\n else:\n return -1\n\n def guid_examination_time(self, guid: str) -> Optional[datetime]:\n \"\"\"returns the examination time for a single guid\n\n Parameters:\n guid: the sequence identifier\n\n Returns either\n The examination datetime value for this guid OR\n None if the guid does not exist\n \"\"\"\n tls = self.thread_local_session()\n if (\n res := tls.query(RefCompressedSeq.examination_date)\n .filter_by(sequence_id=guid)\n .first()\n ):\n return res.examination_date\n else:\n return None\n\n def guids_considered_after_guid(self, guid: str) -> Set[str]:\n \"\"\"returns all registered guids added after guid\n guid: a sequence identifier\"\"\"\n if addition_datetime := self.guid_examination_time(guid):\n return self.guids_considered_after(addition_datetime)\n else:\n raise ValueError(\"guid is not valid: {0}\".format(guid))\n\n def guid_quality_check(\n self, guid: str, cutoff: Union[float, int]\n ) -> Optional[bool]:\n \"\"\"Checks whether the quality of one guid exceeds the cutoff.\n\n If the guid does not exist, returns None.\n If the guid does exist and has quality< cutoff, returns False.\n Otherwise, returns True.\n \"\"\"\n tls = self.thread_local_session()\n # test input\n if not type(cutoff) in [float, int]:\n raise TypeError(\n \"Cutoff should be either floating point or integer, but it is %s\"\n % type(cutoff)\n )\n if not type(guid) == str:\n raise TypeError(\"The guid passed should be as string, not %s\" % str(guid))\n\n # recover record, compare with quality\n\n res = tls.query(RefCompressedSeq).filter_by(sequence_id=guid).first()\n if res is None: # no entry for this guid\n return None\n else:\n # report whether it is larger or smaller than cutoff\n return res.prop_actg >= cutoff\n\n def _guid2seq(self, guidList: Optional[List[str]]) -> Iterable[RefCompressedSeq]:\n \"\"\"returns the annotations, sequence_id and prop_actg from each RefCompressedSeq for each guid in guidList\n If guidList is None, all items are returned.\n \"\"\"\n tls = self.thread_local_session()\n if guidList is None: # rreturn everything\n return tls.query(\n RefCompressedSeq.sequence_id,\n RefCompressedSeq.annotations,\n RefCompressedSeq.prop_actg,\n RefCompressedSeq.examination_date,\n )\n else:\n return tls.query(RefCompressedSeq).filter(\n RefCompressedSeq.sequence_id.in_(guidList)\n )\n\n def guid2item(\n self, guidList: Optional[List[str]], namespace: str, tag: str\n ) -> dict:\n \"\"\"returns the annotation (such as sequence quality, which is stored as an annotation)\n in namespace:tag for all guids in guidlist.\n If guidList is None, all items are returned.\n An error is raised if namespace and tag is not present in each record.\n \"\"\"\n return {\n res.sequence_id: json.loads(res.annotations)[namespace][tag]\n for res in self._guid2seq(guidList)\n }\n\n def guid2ExaminationDateTime(self, guidList: Optional[List[str]] = None) -> dict:\n \"\"\"returns quality scores and examinationDate for all guids in guidlist. If guidList is None, all results are returned.\"\"\"\n\n return {\n res.sequence_id: res.examination_date for res in self._guid2seq(guidList)\n }\n\n def guid2quality(self, guidList: Optional[List[str]] = None) -> Optional[dict]:\n \"\"\"returns quality scores for all guids in guidlist (or all samples if guidList is None)\n potentially expensive query if guidList is None.\"\"\"\n\n return {res.sequence_id: res.prop_actg for res in self._guid2seq(guidList)}\n\n def guid2propACTG_filtered(self, cutoff: Union[int, float] = 0) -> Dict[str, float]:\n \"\"\"recover guids which have good quality, > cutoff.\n These are in the majority, so we run a table scan to find these.\n\n This query is potentially very inefficient- best avoided\n \"\"\"\n tls = self.thread_local_session()\n query = tls.query(\n RefCompressedSeq.sequence_id, RefCompressedSeq.prop_actg\n ).filter(RefCompressedSeq.prop_actg >= cutoff)\n\n return {res.sequence_id: res.prop_actg for res in query}\n\n def guid2items(\n self, guidList: Optional[List[str]], namespaces: Optional[Set[str]]\n ) -> Dict[Any, Dict[str, Any]]:\n \"\"\"returns all annotations in namespaces, which is a list\n If namespaces is None, all namespaces are returned.\n If guidList is None, all items are returned.\n To do this, a table scan is performed - indices are not used.\n \"\"\"\n\n def select_namespaces(annotations: dict) -> dict:\n if namespaces:\n return {ns: annotations[ns] for ns in annotations.keys() & namespaces}\n else:\n return annotations\n\n return {\n res.sequence_id: select_namespaces(json.loads(res.annotations))\n for res in self._guid2seq(guidList)\n }\n\n def guid_annotations(self) -> Optional[Dict[Any, Dict[str, Any]]]:\n \"\"\"return all annotations of all guids\"\"\"\n\n return self.guid2items(None, None) # no restriction by namespace or by guid.\n\n def guid_annotation(self, guid: str) -> Optional[Dict[Any, Dict[str, Any]]]:\n \"\"\"return all annotations of one guid\"\"\"\n\n return self.guid2items([guid], None) # restriction by guid.\n\n def guid2neighbour_add_links(\n self,\n guid: str,\n targetguids: Dict[str, Dict[str, int]],\n use_update: bool = False,\n ) -> Dict[str, int]:\n \"\"\"adds links between guid and their neighbours ('targetguids')\n\n Parameters:\n guid: the 'source' guid for the matches eg 'guid1'\n targetguids: what is guid linked to, eg\n {\n 'guid2':{'dist':12},\n 'guid3':{'dist':2}\n }\n use_update - currently ignored, always False. Setting True yields NotImplementedError\n\n This stores links in the guid2neighbour collection.\n\n Returns:\n The number of records written\n\n Note:\n uses bulk upload methodology to write fast, as some samples may have thousands or tens of thousands of neighbours\n\n \"\"\"\n\n load_list = []\n for guid2, dist in targetguids.items():\n load_list.append(\n {\"sequence_id_1\": guid, \"sequence_id_2\": guid2, \"dist\": dist[\"dist\"]}\n )\n load_list.append(\n {\"sequence_id_1\": guid2, \"sequence_id_2\": guid, \"dist\": dist[\"dist\"]}\n )\n load_df = pd.DataFrame.from_records(load_list)\n\n if len(load_df.index) > 0:\n self._bulk_load(load_df, \"edge\")\n\n class Guid2NeighbourRepackRet(TypedDict):\n guid: str\n finished: str\n pre_optimisation: dict\n actions_taken: Dict[str, int]\n\n def guid2neighbour_repack(\n self, guid: str, always_optimise: bool = False, min_optimisable_size: int = 1\n ) -> Guid2NeighbourRepackRet:\n \"\"\"In the mongo implementation, alters the mongodb representation of the links of guid.\n\n In the rdbms implementation, this does nothing, and just returns a status report.\n Parameters:\n guid : the identifier of a sample to repack\n always_optimise: consider for optimisation even if there are no 's' (single item) records\n \"\"\"\n return {\n \"guid\": guid,\n \"finished\": datetime.now().isoformat(),\n \"pre_optimisation\": {\n \"s_records\": 0,\n \"msg\": \"Repacking not necessary on RDBMS\",\n },\n \"actions_taken\": {},\n }\n\n class Guid2NeighboursRet(TypedDict):\n guid: str\n neighbours: List[Guid2NeighboursFormats]\n\n def guid2neighbours(\n self, guid: str, cutoff: int = 20, returned_format: int = 2\n ) -> Guid2NeighboursRet:\n \"\"\"returns neighbours of guid with cutoff <=cutoff.\n\n Parameters:\n guid: the sequence identifier\n cutoff: a SNV distance cutoff\n returned_format: see below.\n\n Returns links either as\n\n format 1 [[otherGuid, distance],[otherGuid2, distance2],...]\n or as\n format 2 [[otherGuid, distance, N_just1, N_just2, N_either],[],...]\n or as\n format 3 [otherGuid1, otherGuid2, otherGuid3]\n or as\n format 4 [{'guid':otherguid, 'snv':distance}]\n\n Internally, the documents in guid2neighbour are of the form\n {'guid':'guid1', 'rstat':'s', 'neighbours':{'guid2':{'dist':12, ...}}} OR\n {'guid':'guid1', 'rstat':'m', 'neighbours':{'guid2':{'dist':12, ...}, 'guid3':{'dist':5, ...}} OR\n {'guid':'guid1', 'rstat':'f', 'neighbours':{'guid2':{'dist':12, ...}, 'guid3':{'dist':5, ...}}\n\n However, irrespective of their internal representation, this function always returns\n exactly one item for each link of 'guid'; duplicates are not possible.\n The last example occurs when the maximum number of neighbours permitted per record has been reached.\n \"\"\"\n\n def f(res):\n otherGuid = res.sequence_id_2\n dist = res.dist\n if returned_format == 1:\n return [otherGuid, dist]\n elif returned_format == 2:\n raise NotImplementedError(\"format 2 is no longer supported\")\n elif returned_format == 3:\n return otherGuid\n elif returned_format == 4:\n return {\"guid\": otherGuid, \"snv\": dist}\n else:\n raise ValueError(\n f\"Unable to understand returned_format = {returned_format}\"\n )\n\n tls = self.Session()\n\n return {\n \"guid\": guid,\n \"neighbours\": [\n f(res)\n for res in tls.query(Edge)\n .filter_by(sequence_id_1=guid)\n .filter(Edge.dist <= cutoff)\n ],\n }\n\n def _set_lock_status(self, lock_int_id, lock_status, sequence_id=\"-NotSpecified-\"):\n \"\"\"locks or unlocks resources identified by lock_int_id, allowing cross- process sequential processing (e.g. insertion)\n\n To lock, set lock_status =1 ; to unlock, set lock_status =0\n To return the relevant row, set lock_status to None\n\n See the acquire_lock() method for more details\n\n returns:\n\n If lock_status is either 1 or 0:\n True if update succeeded, false if it did not\n\n If lock_status is None:\n the lock row, as an Sqlalchemy object, from which field values can be accessed by dot notation, e.g. retVal.lock_set_date, or retVal.lock_status\n\n Technical notes:\n https://docs.sqlalchemy.org/en/14/orm/session_transaction.html\n https://www.amazon.com/Expert-Oracle-Database-Architecture-Thomas-dp-1430262982/dp/1430262982/ref=dp_ob_title_bk\n\n \"\"\"\n # make sure there is an entry for this lock\n tls = self.Session()\n\n lock_row = (\n tls.query(FNLock).filter(FNLock.lock_int_id == lock_int_id).one_or_none()\n )\n\n # if the row doesn't exist, we add it, with the lock not set.\n if lock_row is None:\n try:\n lock_row = FNLock(\n lock_int_id=lock_int_id,\n lock_status=0,\n sequence_id=sequence_id,\n lock_set_date=datetime.now(),\n uuid=uuid.uuid4().hex,\n )\n tls.add(lock_row)\n tls.commit()\n except Exception:\n tls.rollback()\n raise\n\n # analyse the record for this row\n lock_row = (\n tls.query(FNLock)\n .filter(FNLock.lock_int_id == lock_int_id)\n .with_for_update()\n .one()\n )\n if lock_status is None:\n retval = lock_row\n\n elif lock_row.lock_status == 0 and lock_status == 0:\n # it's already unlocked\n retval = True\n\n elif lock_row.lock_status == 1 and lock_status == 1:\n # it's already locked and we're asked to acquire a lock. We can't.\n retval = False\n\n elif lock_row.lock_status == 0 and lock_status == 1:\n # it's already unlocked, we can lock\n lock_row.lock_status = 1\n lock_row.lock_set_date = datetime.now()\n lock_row.sequence_id = sequence_id\n lock_row.uuid = uuid.uuid4().hex\n retval = True\n\n elif lock_row.lock_status == 1 and lock_status == 0:\n # it's already locked, we can unlock\n lock_row.lock_status = 0\n lock_row.lock_set_date = datetime.now()\n lock_row.sequence_id = \"-NotSpecified-\"\n lock_row.uuid = uuid.uuid4().hex\n retval = True\n try:\n tls.commit()\n return retval\n except Exception:\n tls.rollback()\n raise\n\n def lock_details(self, lock_int_id):\n \"\"\"returns details of the lock as a dictionary\n\n Parameters:\n lock_int_id: an integer identifier to the lock of interest\n\n Returns:\n None if there is no lock,\n or a dictionary containing details of the lock held including sequence_id, lock_status, lock_set_date, and uuid\"\"\"\n res = self.lock_status(lock_int_id)\n\n if res.lock_status == 0:\n return None\n else:\n return dict(\n sequence_id=res.sequence_id,\n lock_set_date=res.lock_set_date,\n uuid=res.uuid,\n )\n\n def lock_status(self, lock_int_id):\n \"\"\"determine whether a database-based lock is open (0) or closed (1).\n\n Parameters:\n lock_int_id: an integer identifier to the lock of interest\n\n Returns:\n a sqlalchemy object containing the lock row\"\"\"\n\n return self._set_lock_status(lock_int_id, None)\n\n def lock(self, lock_int_id, sequence_id):\n \"\"\"obtains a database-based lock.\n\n Parameters:\n lock_int_id: an integer identifier to the lock of interest\n sequence_id: the id (typically guid) of the sequence being added. Used if the inserting process crashes\n\n Returns:\n True if the lock is acquired\n False if it is not\"\"\"\n\n return self._set_lock_status(lock_int_id, 1, sequence_id)\n\n def unlock(self, lock_int_id, force=False):\n \"\"\"obtains a database-based lock.\n\n Parameters:\n lock_int_id: an integer identifier to the lock of interest\n force: if True, will unlock irrespective of current status, returning True\n\n Returns:\n True if the lock is acquired\n False if it is not\"\"\"\n\n res = self._set_lock_status(lock_int_id, 0)\n if force:\n return True\n else:\n return res\n"
] | [
[
"pandas.DataFrame.from_records",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
dlshriver/tensorfuzz | [
"a81df1b9b62f4d3176af35cf545bef16cf65a05f"
] | [
"tensorfuzz/fuzz_utils.py"
] | [
"# Copyright 2018 Google LLC\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# https://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Utilities for the fuzzer library.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport os\nimport random as random\nimport numpy as np\nimport scipy\nimport tensorflow as tf\nimport tensorfuzz.dataset as mnist\n\n\ndef basic_mnist_input_corpus(choose_randomly=False, data_dir=\"/tmp/mnist\"):\n \"\"\"Returns the first image and label from MNIST.\n\n Args:\n choose_randomly: a boolean indicating whether to choose randomly.\n data_dir: a string giving the location of the original MNIST data.\n Returns:\n A single image and a single label.\n \"\"\"\n\n dataset = mnist.train(data_dir)\n dataset = dataset.cache().shuffle(buffer_size=50000).batch(100).repeat()\n iterator = dataset.make_one_shot_iterator()\n images, integer_labels = iterator.get_next()\n images = tf.reshape(images, [-1, 28, 28, 1])\n # labels = tf.one_hot(integer_labels, 10)\n labels = integer_labels\n\n with tf.train.MonitoredTrainingSession() as sess:\n image_batch, label_batch = sess.run([images, labels])\n\n if choose_randomly:\n idx = random.choice(range(image_batch.shape[0]))\n else:\n idx = 0\n tf.logging.info(\"Seeding corpus with element at idx: %s\", idx)\n return image_batch[idx], label_batch[idx]\n\n\ndef imsave(image, path):\n \"\"\"Saves an image to a given path.\n\n This function has the side-effect of writing to disk.\n\n Args:\n image: The Numpy array representing the image.\n path: A Filepath.\n \"\"\"\n image = np.squeeze(image)\n with tf.gfile.Open(path, mode=\"w\") as fptr:\n scipy.misc.imsave(fptr, image)\n\n\ndef build_feed_dict(input_tensors, input_batches):\n \"\"\"Constructs a feed_dict to pass to the run method of TensorFlow Session.\n\n In the logic we assume all tensors should have the same batch size.\n However, we have to do some crazy stuff to deal with the case when\n some of the tensors have concrete shapes and some don't, especially\n when we're constructing the seed corpus.\n\n Args:\n input_tensors: The TF tensors into which we will feed the fuzzed inputs.\n input_batches: Numpy arrays that will be fed into the input tensors.\n\n Returns:\n The feed_dict described above.\n \"\"\"\n feed_dict = {}\n\n # If the tensor has concrete shape and we are feeding in something that has a\n # non-matching shape, we will need to tile it to make it work.\n tensor_bszs = [x.get_shape().as_list()[0] for x in input_tensors]\n should_tile = any([x is not None for x in tensor_bszs])\n if should_tile:\n max_tensor_bsz = max([x for x in tensor_bszs if x is not None])\n for idx in range(len(list(zip(input_tensors, input_batches)))):\n np_bsz = input_batches[idx].shape[0]\n if should_tile and np_bsz != max_tensor_bsz:\n tf.logging.info(\n \"Tiling feed_dict inputs due to concrete batch sizes.\"\n )\n this_shape = [max_tensor_bsz // np_bsz] + [\n 1 for _ in range(len(input_batches[idx].shape[1:]))\n ]\n input_batches[idx] = np.tile(input_batches[idx], this_shape)\n\n # Note that this will truncate one of input_tensors or input_batches\n # if either of them is longer. This is WAI right now, because we sometimes\n # want to store the label for an image classifier for which we don't have\n # a label placeholder in the checkpoint.\n for input_tensor, input_batch in list(zip(input_tensors, input_batches)):\n feed_dict[input_tensor] = input_batch\n return feed_dict\n\n\ndef get_tensors_from_checkpoint(sess, checkpoint_dir):\n \"\"\"Loads and returns the fuzzing tensors given a session and a directory.\n\n It's assumed that the checkpoint directory has checkpoints from a TensorFlow\n model, and moreoever that those checkpoints have 3 collections:\n 1. input_tensors: The tensors into which we will feed the fuzzed inputs.\n 2. coverage_tensors: The tensors from which we will fetch information needed\n to compute the coverage. The coverage will be used to guide the fuzzing\n process.\n 3. metadata_tensors: The tensors from which we will fetch information needed\n to compute the metadata. The metadata can be used for computing the fuzzing\n objective or just to track the progress of fuzzing.\n\n Args:\n sess: a TensorFlow Session object.\n checkpoint_dir: a directory containing the TensorFlow checkpoints.\n\n Returns:\n The 3 lists of tensorflow tensors described above.\n \"\"\"\n potential_files = tf.gfile.ListDirectory(checkpoint_dir)\n meta_files = [f for f in potential_files if f.endswith(\".meta\")]\n\n # Sort the meta files by global step\n meta_files.sort(key=lambda f: int(f[: -len(\".meta\")].split(\"-\")[-1]))\n meta_file = meta_files[-1]\n\n explicit_meta_path = os.path.join(checkpoint_dir, meta_file)\n explicit_checkpoint_path = explicit_meta_path[: -len(\".meta\")]\n tf.logging.info(\"Visualizing checkpoint: %s\", explicit_checkpoint_path)\n\n new_saver = tf.train.import_meta_graph(\n explicit_meta_path, clear_devices=True\n )\n new_saver.restore(sess, explicit_checkpoint_path)\n\n input_tensors = tf.get_collection(\"input_tensors\")\n coverage_tensors = tf.get_collection(\"coverage_tensors\")\n metadata_tensors = tf.get_collection(\"metadata_tensors\")\n\n tensor_map = {\n \"input\": input_tensors,\n \"coverage\": coverage_tensors,\n \"metadata\": metadata_tensors,\n }\n return tensor_map\n\n\ndef fetch_function(\n sess, input_tensors, coverage_tensors, metadata_tensors, input_batches\n):\n \"\"\"Fetches from the TensorFlow runtime given inputs.\n\n Args:\n sess: a TensorFlow Session object.\n input_tensors: TF tensors to which we feed input_batches.\n coverage_tensors: TF tensors we fetch for coverage.\n metadata_tensors: TF tensors we fetch for metadata.\n input_batches: numpy arrays we feed to input_tensors.\n\n Returns:\n Coverage and metadata as lists of numpy arrays.\n \"\"\"\n feed_dict = build_feed_dict(input_tensors, input_batches)\n fetched_data = sess.run(\n coverage_tensors + metadata_tensors, feed_dict=feed_dict\n )\n idx = len(coverage_tensors)\n coverage_batches = fetched_data[:idx]\n metadata_batches = fetched_data[idx:]\n return coverage_batches, metadata_batches\n\n\ndef build_fetch_function(sess, tensor_map):\n \"\"\"Constructs fetch function given session and tensors.\"\"\"\n\n def func(input_batches):\n \"\"\"The fetch function.\"\"\"\n return fetch_function(\n sess,\n tensor_map[\"input\"],\n tensor_map[\"coverage\"],\n tensor_map[\"metadata\"],\n input_batches,\n )\n\n return func\n"
] | [
[
"tensorflow.gfile.ListDirectory",
"tensorflow.gfile.Open",
"tensorflow.get_collection",
"scipy.misc.imsave",
"numpy.squeeze",
"tensorflow.reshape",
"numpy.tile",
"tensorflow.train.import_meta_graph",
"tensorflow.logging.info",
"tensorflow.train.MonitoredTrainingSession"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"1.0",
"0.19",
"0.18",
"1.2",
"0.12",
"0.10",
"0.17",
"0.16"
],
"tensorflow": [
"1.10"
]
}
] |
tcbegley/numpyro | [
"e55f0d41c9eba48a10e88fb403a5e016f18857e6"
] | [
"numpyro/util.py"
] | [
"# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom collections import OrderedDict\nfrom contextlib import contextmanager\nimport os\nimport random\nimport re\nimport warnings\n\nimport numpy as np\nimport tqdm\nfrom tqdm.auto import tqdm as tqdm_auto\n\nimport jax\nfrom jax import device_put, jit, lax, ops, vmap\nfrom jax.core import Tracer\nfrom jax.experimental import host_callback\nfrom jax.flatten_util import ravel_pytree\nimport jax.numpy as jnp\nfrom jax.tree_util import tree_flatten, tree_map\n\n_DISABLE_CONTROL_FLOW_PRIM = False\n_CHAIN_RE = re.compile(r\"\\d+$\") # e.g. get '3' from 'TFRT_CPU_3'\n\n\ndef set_rng_seed(rng_seed):\n \"\"\"\n Initializes internal state for the Python and NumPy random number generators.\n\n :param int rng_seed: seed for Python and NumPy random states.\n \"\"\"\n random.seed(rng_seed)\n np.random.seed(rng_seed)\n\n\ndef enable_x64(use_x64=True):\n \"\"\"\n Changes the default array type to use 64 bit precision as in NumPy.\n\n :param bool use_x64: when `True`, JAX arrays will use 64 bits by default;\n else 32 bits.\n \"\"\"\n if not use_x64:\n use_x64 = os.getenv(\"JAX_ENABLE_X64\", 0)\n jax.config.update(\"jax_enable_x64\", use_x64)\n\n\ndef set_platform(platform=None):\n \"\"\"\n Changes platform to CPU, GPU, or TPU. This utility only takes\n effect at the beginning of your program.\n\n :param str platform: either 'cpu', 'gpu', or 'tpu'.\n \"\"\"\n if platform is None:\n platform = os.getenv(\"JAX_PLATFORM_NAME\", \"cpu\")\n jax.config.update(\"jax_platform_name\", platform)\n\n\ndef set_host_device_count(n):\n \"\"\"\n By default, XLA considers all CPU cores as one device. This utility tells XLA\n that there are `n` host (CPU) devices available to use. As a consequence, this\n allows parallel mapping in JAX :func:`jax.pmap` to work in CPU platform.\n\n .. note:: This utility only takes effect at the beginning of your program.\n Under the hood, this sets the environment variable\n `XLA_FLAGS=--xla_force_host_platform_device_count=[num_devices]`, where\n `[num_device]` is the desired number of CPU devices `n`.\n\n .. warning:: Our understanding of the side effects of using the\n `xla_force_host_platform_device_count` flag in XLA is incomplete. If you\n observe some strange phenomenon when using this utility, please let us\n know through our issue or forum page. More information is available in this\n `JAX issue <https://github.com/google/jax/issues/1408>`_.\n\n :param int n: number of CPU devices to use.\n \"\"\"\n xla_flags = os.getenv(\"XLA_FLAGS\", \"\")\n xla_flags = re.sub(\n r\"--xla_force_host_platform_device_count=\\S+\", \"\", xla_flags\n ).split()\n os.environ[\"XLA_FLAGS\"] = \" \".join(\n [\"--xla_force_host_platform_device_count={}\".format(n)] + xla_flags\n )\n\n\n@contextmanager\ndef optional(condition, context_manager):\n \"\"\"\n Optionally wrap inside `context_manager` if condition is `True`.\n \"\"\"\n if condition:\n with context_manager:\n yield\n else:\n yield\n\n\n@contextmanager\ndef control_flow_prims_disabled():\n global _DISABLE_CONTROL_FLOW_PRIM\n stored_flag = _DISABLE_CONTROL_FLOW_PRIM\n try:\n _DISABLE_CONTROL_FLOW_PRIM = True\n yield\n finally:\n _DISABLE_CONTROL_FLOW_PRIM = stored_flag\n\n\ndef cond(pred, true_operand, true_fun, false_operand, false_fun):\n if _DISABLE_CONTROL_FLOW_PRIM:\n if pred:\n return true_fun(true_operand)\n else:\n return false_fun(false_operand)\n else:\n return lax.cond(pred, true_operand, true_fun, false_operand, false_fun)\n\n\ndef while_loop(cond_fun, body_fun, init_val):\n if _DISABLE_CONTROL_FLOW_PRIM:\n val = init_val\n while cond_fun(val):\n val = body_fun(val)\n return val\n else:\n return lax.while_loop(cond_fun, body_fun, init_val)\n\n\ndef fori_loop(lower, upper, body_fun, init_val):\n if _DISABLE_CONTROL_FLOW_PRIM:\n val = init_val\n for i in range(int(lower), int(upper)):\n val = body_fun(i, val)\n return val\n else:\n return lax.fori_loop(lower, upper, body_fun, init_val)\n\n\ndef not_jax_tracer(x):\n \"\"\"\n Checks if `x` is not an array generated inside `jit`, `pmap`, `vmap`, or `lax_control_flow`.\n \"\"\"\n return not isinstance(x, Tracer)\n\n\ndef identity(x, *args, **kwargs):\n return x\n\n\ndef cached_by(outer_fn, *keys):\n # Restrict cache size to prevent ref cycles.\n max_size = 8\n outer_fn._cache = getattr(outer_fn, \"_cache\", OrderedDict())\n\n def _wrapped(fn):\n fn_cache = outer_fn._cache\n if keys in fn_cache:\n fn = fn_cache[keys]\n # update position\n del fn_cache[keys]\n fn_cache[keys] = fn\n else:\n fn_cache[keys] = fn\n if len(fn_cache) > max_size:\n fn_cache.popitem(last=False)\n return fn\n\n return _wrapped\n\n\ndef progress_bar_factory(num_samples, num_chains):\n \"\"\"Factory that builds a progress bar decorator along\n with the `set_tqdm_description` and `close_tqdm` functions\n \"\"\"\n\n if num_samples > 20:\n print_rate = int(num_samples / 20)\n else:\n print_rate = 1\n\n remainder = num_samples % print_rate\n\n tqdm_bars = {}\n finished_chains = []\n for chain in range(num_chains):\n tqdm_bars[chain] = tqdm_auto(range(num_samples), position=chain)\n tqdm_bars[chain].set_description(\"Compiling.. \", refresh=True)\n\n def _update_tqdm(arg, transform, device):\n chain_match = _CHAIN_RE.search(str(device))\n assert chain_match\n chain = int(chain_match.group())\n tqdm_bars[chain].set_description(f\"Running chain {chain}\", refresh=False)\n tqdm_bars[chain].update(arg)\n\n def _close_tqdm(arg, transform, device):\n chain_match = _CHAIN_RE.search(str(device))\n assert chain_match\n chain = int(chain_match.group())\n tqdm_bars[chain].update(arg)\n finished_chains.append(chain)\n if len(finished_chains) == num_chains:\n for chain in range(num_chains):\n tqdm_bars[chain].close()\n\n def _update_progress_bar(iter_num):\n \"\"\"Updates tqdm progress bar of a JAX loop only if the iteration number is a multiple of the print_rate\n Usage: carry = progress_bar((iter_num, print_rate), carry)\n \"\"\"\n\n _ = lax.cond(\n iter_num == 1,\n lambda _: host_callback.id_tap(\n _update_tqdm, 0, result=iter_num, tap_with_device=True\n ),\n lambda _: iter_num,\n operand=None,\n )\n _ = lax.cond(\n iter_num % print_rate == 0,\n lambda _: host_callback.id_tap(\n _update_tqdm, print_rate, result=iter_num, tap_with_device=True\n ),\n lambda _: iter_num,\n operand=None,\n )\n _ = lax.cond(\n iter_num == num_samples,\n lambda _: host_callback.id_tap(\n _close_tqdm, remainder, result=iter_num, tap_with_device=True\n ),\n lambda _: iter_num,\n operand=None,\n )\n\n def progress_bar_fori_loop(func):\n \"\"\"Decorator that adds a progress bar to `body_fun` used in `lax.fori_loop`.\n Note that `body_fun` must be looping over a tuple who's first element is `np.arange(num_samples)`.\n This means that `iter_num` is the current iteration number\n \"\"\"\n\n def wrapper_progress_bar(i, vals):\n result = func(i, vals)\n _update_progress_bar(i + 1)\n return result\n\n return wrapper_progress_bar\n\n return progress_bar_fori_loop\n\n\ndef fori_collect(\n lower,\n upper,\n body_fun,\n init_val,\n transform=identity,\n progbar=True,\n return_last_val=False,\n collection_size=None,\n thinning=1,\n **progbar_opts,\n):\n \"\"\"\n This looping construct works like :func:`~jax.lax.fori_loop` but with the additional\n effect of collecting values from the loop body. In addition, this allows for\n post-processing of these samples via `transform`, and progress bar updates.\n Note that, `progbar=False` will be faster, especially when collecting a\n lot of samples. Refer to example usage in :func:`~numpyro.infer.mcmc.hmc`.\n\n :param int lower: the index to start the collective work. In other words,\n we will skip collecting the first `lower` values.\n :param int upper: number of times to run the loop body.\n :param body_fun: a callable that takes a collection of\n `np.ndarray` and returns a collection with the same shape and\n `dtype`.\n :param init_val: initial value to pass as argument to `body_fun`. Can\n be any Python collection type containing `np.ndarray` objects.\n :param transform: a callable to post-process the values returned by `body_fn`.\n :param progbar: whether to post progress bar updates.\n :param bool return_last_val: If `True`, the last value is also returned.\n This has the same type as `init_val`.\n :param thinning: Positive integer that controls the thinning ratio for retained\n values. Defaults to 1, i.e. no thinning.\n :param int collection_size: Size of the returned collection. If not\n specified, the size will be ``(upper - lower) // thinning``. If the\n size is larger than ``(upper - lower) // thinning``, only the top\n ``(upper - lower) // thinning`` entries will be non-zero.\n :param `**progbar_opts`: optional additional progress bar arguments. A\n `diagnostics_fn` can be supplied which when passed the current value\n from `body_fun` returns a string that is used to update the progress\n bar postfix. Also a `progbar_desc` keyword argument can be supplied\n which is used to label the progress bar.\n :return: collection with the same type as `init_val` with values\n collected along the leading axis of `np.ndarray` objects.\n \"\"\"\n assert lower <= upper\n assert thinning >= 1\n collection_size = (\n (upper - lower) // thinning if collection_size is None else collection_size\n )\n assert collection_size >= (upper - lower) // thinning\n init_val_flat, unravel_fn = ravel_pytree(transform(init_val))\n start_idx = lower + (upper - lower) % thinning\n num_chains = progbar_opts.pop(\"num_chains\", 1)\n # host_callback does not work yet with multi-GPU platforms\n # See: https://github.com/google/jax/issues/6447\n if num_chains > 1 and jax.default_backend() == \"gpu\":\n warnings.warn(\n \"We will disable progress bar because it does not work yet on multi-GPUs platforms.\"\n )\n progbar = False\n\n @cached_by(fori_collect, body_fun, transform)\n def _body_fn(i, vals):\n val, collection, start_idx, thinning = vals\n val = body_fun(val)\n idx = (i - start_idx) // thinning\n collection = cond(\n idx >= 0,\n collection,\n lambda x: ops.index_update(x, idx, ravel_pytree(transform(val))[0]),\n collection,\n identity,\n )\n return val, collection, start_idx, thinning\n\n collection = jnp.zeros((collection_size,) + init_val_flat.shape)\n if not progbar:\n last_val, collection, _, _ = fori_loop(\n 0, upper, _body_fn, (init_val, collection, start_idx, thinning)\n )\n elif num_chains > 1:\n progress_bar_fori_loop = progress_bar_factory(upper, num_chains)\n _body_fn_pbar = progress_bar_fori_loop(_body_fn)\n last_val, collection, _, _ = fori_loop(\n 0, upper, _body_fn_pbar, (init_val, collection, start_idx, thinning)\n )\n else:\n diagnostics_fn = progbar_opts.pop(\"diagnostics_fn\", None)\n progbar_desc = progbar_opts.pop(\"progbar_desc\", lambda x: \"\")\n\n vals = (init_val, collection, device_put(start_idx), device_put(thinning))\n if upper == 0:\n # special case, only compiling\n jit(_body_fn)(0, vals)\n else:\n with tqdm.trange(upper) as t:\n for i in t:\n vals = jit(_body_fn)(i, vals)\n t.set_description(progbar_desc(i), refresh=False)\n if diagnostics_fn:\n t.set_postfix_str(diagnostics_fn(vals[0]), refresh=False)\n\n last_val, collection, _, _ = vals\n\n unravel_collection = vmap(unravel_fn)(collection)\n return (unravel_collection, last_val) if return_last_val else unravel_collection\n\n\ndef soft_vmap(fn, xs, batch_ndims=1, chunk_size=None):\n \"\"\"\n Vectorizing map that maps a function `fn` over `batch_ndims` leading axes\n of `xs`. This uses jax.vmap over smaller chunks of the batch dimensions\n to keep memory usage constant.\n\n :param callable fn: The function to map over.\n :param xs: JAX pytree (e.g. an array, a list/tuple/dict of arrays,...)\n :param int batch_ndims: The number of leading dimensions of `xs`\n to apply `fn` element-wise over them.\n :param int chunk_size: Size of each chunk of `xs`.\n Defaults to the size of batch dimensions.\n :returns: output of `fn(xs)`.\n \"\"\"\n flatten_xs = tree_flatten(xs)[0]\n batch_shape = np.shape(flatten_xs[0])[:batch_ndims]\n for x in flatten_xs[1:]:\n assert np.shape(x)[:batch_ndims] == batch_shape\n\n # we'll do map(vmap(fn), xs) and make xs.shape = (num_chunks, chunk_size, ...)\n num_chunks = batch_size = int(np.prod(batch_shape))\n prepend_shape = (-1,) if batch_size > 1 else ()\n xs = tree_map(\n lambda x: jnp.reshape(x, prepend_shape + jnp.shape(x)[batch_ndims:]), xs\n )\n # XXX: probably for the default behavior with chunk_size=None,\n # it is better to catch OOM error and reduce chunk_size by half until OOM disappears.\n chunk_size = batch_size if chunk_size is None else min(batch_size, chunk_size)\n if chunk_size > 1:\n pad = chunk_size - (batch_size % chunk_size)\n xs = tree_map(\n lambda x: jnp.pad(x, ((0, pad),) + ((0, 0),) * (np.ndim(x) - 1)), xs\n )\n num_chunks = batch_size // chunk_size + int(pad > 0)\n prepend_shape = (-1,) if num_chunks > 1 else ()\n xs = tree_map(\n lambda x: jnp.reshape(x, prepend_shape + (chunk_size,) + jnp.shape(x)[1:]),\n xs,\n )\n fn = vmap(fn)\n\n ys = lax.map(fn, xs) if num_chunks > 1 else fn(xs)\n map_ndims = int(num_chunks > 1) + int(chunk_size > 1)\n ys = tree_map(\n lambda y: jnp.reshape(y, (-1,) + jnp.shape(y)[map_ndims:])[:batch_size], ys\n )\n return tree_map(lambda y: jnp.reshape(y, batch_shape + jnp.shape(y)[1:]), ys)\n"
] | [
[
"numpy.prod",
"numpy.shape",
"numpy.random.seed",
"numpy.ndim"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dnacombo/mne-bids | [
"b6f7b80456e207005fb126c7136d7cf30190831c"
] | [
"examples/convert_ieeg_to_bids.py"
] | [
"\"\"\"\n.. currentmodule:: mne_bids\n\n====================================\n08. Convert iEEG data to BIDS format\n====================================\n\nIn this example, we use MNE-BIDS to create a BIDS-compatible directory of iEEG\ndata. Specifically, we will follow these steps:\n\n1. Download some iEEG data from the\n `MNE-ECoG example <https://mne.tools/stable/auto_tutorials/misc/plot_ecog>`_.\n\n2. Load the data, extract information, and save in a new BIDS directory.\n\n3. Check the result and compare it with the standard.\n\n4. Cite MNE-BIDS\n\n5. Confirm that written iEEG coordinates are the\n same before :func:`write_raw_bids` was called.\n\nThe iEEG data will be written by :func:`write_raw_bids` with\nthe addition of extra metadata elements in the following files:\n\n * the sidecar file ``ieeg.json``\n * ``electrodes.tsv``\n * ``coordsystem.json``\n * ``events.tsv``\n * ``channels.tsv``\n\nCompared to EEG data, the main differences are within the\n``coordsystem.json`` and ``electrodes.tsv`` files.\nFor more information on these files,\nrefer to the `iEEG part of the BIDS specification`_.\n\n.. _iEEG part of the BIDS specification: https://bids-specification.readthedocs.io/en/latest/04-modality-specific-files/04-intracranial-electroencephalography.html\n.. _appendix VIII: https://bids-specification.readthedocs.io/en/stable/99-appendices/08-coordinate-systems.html\n.. _background on FreeSurfer: https://mne.tools/dev/auto_tutorials/source-modeling/plot_background_freesurfer_mne\n.. _MNE-Python coordinate frames: https://mne.tools/dev/auto_tutorials/source-modeling/plot_source_alignment.html\n\n\"\"\" # noqa: E501\n\n# Authors: Adam Li <[email protected]>\n# Stefan Appelhoff <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport os.path as op\nimport shutil\nfrom pprint import pprint\n\nimport numpy as np\n\nimport mne\nfrom mne_bids import (write_raw_bids, BIDSPath,\n read_raw_bids, print_dir_tree)\n\n\n###############################################################################\n# Step 1: Download the data\n# -------------------------\n#\n# First, we need some data to work with. We will use the\n# data downloaded via MNE-Python's ``datasets`` API:\n# :func:`mne.datasets.misc.data_path`\nmisc_path = mne.datasets.misc.data_path(force_update=True)\n\n# The electrode coords data are in the tsv file format\n# which is easily read in using numpy\nfname = misc_path + '/ecog/sample_ecog_electrodes.tsv'\ndata = np.loadtxt(fname, dtype=str, delimiter='\\t',\n comments=None, encoding='utf-8')\ncolumn_names = data[0, :]\ninfo = data[1:, :]\nelectrode_tsv = dict()\nfor i, name in enumerate(column_names):\n electrode_tsv[name] = info[:, i].tolist()\n\n# load in channel names\nch_names = electrode_tsv['name']\n\n# load in the xyz coordinates as a float\nelec = np.empty(shape=(len(ch_names), 3))\nfor ind, axis in enumerate(['x', 'y', 'z']):\n elec[:, ind] = list(map(float, electrode_tsv[axis]))\n\n###############################################################################\n# Now we make a montage stating that the iEEG contacts are in the MNI\n# coordinate system, which corresponds to the `fsaverage` subject in\n# FreeSurfer. For example, one can look at how MNE-Python deals with iEEG data\n# at `Working with SEEG\n# <https://mne.tools/stable/auto_tutorials/misc/plot_seeg.html>`_.\nmontage = mne.channels.make_dig_montage(ch_pos=dict(zip(ch_names, elec)),\n coord_frame='mni_tal')\nprint(f'Created {len(ch_names)} channel positions')\nprint(dict(zip(ch_names, elec)))\n\n###############################################################################\n# We will load a :class:`mne.io.Raw` object and\n# use the montage we created.\ninfo = mne.create_info(ch_names, 1000., 'ecog')\nraw = mne.io.read_raw_edf(misc_path + '/ecog/sample_ecog.edf')\nraw.info['line_freq'] = 60 # specify power line frequency as required by BIDS\nraw.set_channel_types({ch: 'ecog' for ch in raw.ch_names})\n\n# set the bad channels\nraw.info['bads'].extend(['BTM1', 'BTM2', 'BTM3', 'BTM4', 'BTM5', 'BTM6',\n 'BTP1', 'BTP2', 'BTP3', 'BTP4', 'BTP5', 'BTP6',\n 'EKGL', 'EKGR'])\n\n# set montage\nraw.set_montage(montage, on_missing='warn')\n\n###############################################################################\n# Let us confirm what our channel coordinates look like.\n\n# make a plot of the sensors in 2D plane\nraw.plot_sensors(ch_type='ecog')\n\n# Get the first 5 channels and show their locations.\npicks = mne.pick_types(raw.info, ecog=True)\ndig = [raw.info['dig'][pick] for pick in picks]\nchs = [raw.info['chs'][pick] for pick in picks]\npos = np.array([ch['r'] for ch in dig[:5]])\nch_names = np.array([ch['ch_name'] for ch in chs[:5]])\nprint(\"The channel coordinates before writing into BIDS: \")\npprint([x for x in zip(ch_names, pos)])\n\n###############################################################################\n# BIDS vs MNE-Python Coordinate Systems\n# -------------------------------------\n#\n# BIDS has many acceptable coordinate systems for iEEG, which can be viewed in\n# `appendix VIII`_ of the BIDS specification.\n# However, MNE-BIDS depends on MNE-Python and MNE-Python does not support all\n# these coordinate systems (yet).\n#\n# MNE-Python has a few tutorials on this topic:\n#\n# - `background on FreeSurfer`_\n# - `MNE-Python coordinate frames`_\n#\n# Currently, MNE-Python supports the ``mni_tal`` coordinate frame, which\n# corresponds to the ``fsaverage`` BIDS coordinate system. All other coordinate\n# frames in MNE-Python if written with :func:`mne_bids.write_raw_bids` are\n# written with coordinate system ``'Other'``. Note, then we suggest using\n# :func:`mne_bids.update_sidecar_json` to update the sidecar\n# ``*_coordsystem.json`` file to add additional information.\n\n###############################################################################\n# Step 2: Formatting as BIDS\n# --------------------------\n#\n# Now, let us format the `Raw` object into BIDS.\n#\n# With this step, we have everything to start a new BIDS directory using\n# our data. To do that, we can use :func:`write_raw_bids`\n# Generally, :func:`write_raw_bids` tries to extract as much\n# meta data as possible from the raw data and then formats it in a BIDS\n# compatible way. :func:`write_raw_bids` takes a bunch of inputs, most of\n# which are however optional. The required inputs are:\n#\n# * :code:`raw`\n# * :code:`bids_basename`\n# * :code:`bids_root`\n#\n# ... as you can see in the docstring:\nprint(write_raw_bids.__doc__)\n\n###############################################################################\n# Let us initialize some of the necessary data for the subject.\n\n# There is a subject, and specific task for the dataset.\nsubject_id = '001' # zero padding to account for >100 subjects in this dataset\ntask = 'testresteyes'\n\n# get MNE-Python directory w/ example data\nmne_data_dir = mne.get_config('MNE_DATASETS_MISC_PATH')\n\n# There is the root directory for where we will write our data.\nbids_root = op.join(mne_data_dir, 'ieegmmidb_bids')\n\n###############################################################################\n# To ensure the output path doesn't contain any leftover files from previous\n# tests and example runs, we simply delete it.\n#\n# .. warning:: Do not delete directories that may contain important data!\n#\n\nif op.exists(bids_root):\n shutil.rmtree(bids_root)\n\n###############################################################################\n# Now we just need to specify a few iEEG details to make things work:\n# We need the basename of the dataset. In addition, :func:`write_raw_bids`\n# requires the ``.filenames`` attribute of the Raw object to be non-empty,\n# so since we\n# initialized the dataset from an array, we need to do a hack where we\n# temporarily save the data to disc before reading it back in.\n\n# Now convert our data to be in a new BIDS dataset.\nbids_path = BIDSPath(subject=subject_id,\n task=task, acquisition=\"ecog\", root=bids_root)\n\n# write `raw` to BIDS and anonymize it into BrainVision format\nwrite_raw_bids(raw, bids_path, anonymize=dict(daysback=30000),\n overwrite=True)\n\n###############################################################################\n# Step 3: Check and compare with standard\n# ---------------------------------------\n\n# Now we have written our BIDS directory.\nprint_dir_tree(bids_root)\n\n###############################################################################\n# Step 4: Cite mne-bids\n# ---------------------\n# We can see that the appropriate citations are already written in the README.\n# If you are preparing a manuscript, please make sure to also cite MNE-BIDS\n# there.\nreadme = op.join(bids_root, 'README')\nwith open(readme, 'r', encoding='utf-8-sig') as fid:\n text = fid.read()\nprint(text)\n\n###############################################################################\n# MNE-BIDS has created a suitable directory structure for us, and among other\n# meta data files, it started an ``events.tsv``` and ``channels.tsv`` file,\n# and created an initial ``dataset_description.json`` file on top!\n#\n# Now it's time to manually check the BIDS directory and the meta files to add\n# all the information that MNE-BIDS could not infer. For instance, you must\n# describe ``iEEGReference`` and ``iEEGGround`` yourself.\n# It's easy to find these by searching for ``\"n/a\"`` in the sidecar files.\n#\n# ``$ grep -i 'n/a' <bids_root>```\n#\n# Remember that there is a convenient JavaScript tool to validate all your BIDS\n# directories called the \"BIDS-validator\", available as a web version and a\n# command line tool:\n#\n# Web version: https://bids-standard.github.io/bids-validator/\n#\n# Command line tool: https://www.npmjs.com/package/bids-validator\n\n###############################################################################\n# Step 5: Plot output channels and check that they match!\n# -------------------------------------------------------\n#\n# Now we have written our BIDS directory. We can use\n# :func:`read_raw_bids` to read in the data.\n\n# read in the BIDS dataset and plot the coordinates\nraw = read_raw_bids(bids_path=bids_path)\n\n# get the first 5 channels and show their locations\n# this should match what was printed earlier.\npicks = mne.pick_types(raw.info, ecog=True)\ndig = [raw.info['dig'][pick] for pick in picks]\nchs = [raw.info['chs'][pick] for pick in picks]\npos = np.array([ch['r'] for ch in dig[:5]])\nch_names = np.array([ch['ch_name'] for ch in chs[:5]])\n\nprint(\"The channel montage after writing into BIDS: \")\npprint(dig[0:5])\nprint(\"The channel coordinates after writing into BIDS: \")\npprint([x for x in zip(ch_names, pos)])\n\n# make a plot of the sensors in 2D plane\nraw.plot_sensors(ch_type='ecog')\n"
] | [
[
"numpy.array",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
y-mitsui/DensityRatioEstimation | [
"de84b60890e36087f09cdc9215e0a83610d61de5"
] | [
"py_lsif.py"
] | [
"# -*- coding:utf-8 -*-\n# uLSIF\nimport numpy as np\n\nclass LSIF:\n \"\"\"\n Density radio estimation of uLSIF (Sugiyama) using RBF kernel\n r(sample) = P_x(sample) / P_y(sample)\n \n example:\n LSIF(0.3, 0.1).fit(sample_molecule, sample_denominator).predict(sample_new)\n \"\"\"\n \n def __init__(self, band_width, regulation):\n \"\"\"\n @param band_width: parameter of RBF kernel\n @param regulation: regulation to prevent over fitting\n \"\"\"\n self.band_width = band_width\n self.regulation = regulation\n \n def kernel(self, X):\n result = []\n for X2 in self.sample_kernel_fold:\n diff_vec = X - X2\n result.append(np.exp(-np.dot(diff_vec, diff_vec) / (2*self.band_width**2)))\n return np.array(result)\n \n def estGHat(self, sample_hospital):\n g_hat = np.matrix(np.zeros((self.n_kernel_fold, self.n_kernel_fold)),dtype=float)\n for sample in sample_hospital:\n basis_result = np.matrix(self.kernel(sample))\n g_hat += basis_result.T * basis_result\n g_hat /= sample_hospital.shape[0]\n return g_hat\n\n def estHHat(self, sample_entry):\n h_hat = np.matrix(np.zeros(self.n_kernel_fold),dtype=float)\n for sample in sample_entry:\n basis_result = np.matrix(self.kernel(sample))\n h_hat += basis_result\n h_hat /= sample_entry.shape[0]\n return h_hat\n \n def _score(self, theta, g_hat, h_hat):\n return 0.5 * theta.T * g_hat * theta - theta.T * h_hat.T\n\n def score(self, sample_X, sample_Y):\n g_hat = self.estGHat(sample_Y)\n h_hat = self.estHHat(sample_X)\n return self._score(self.thetas, g_hat, h_hat) \n \n def fit(self, sample_X, sample_Y):\n self.n_kernel_fold = sample_X.shape[0]\n self.n_kernel_fold = 100\n self.sample_kernel_fold = sample_X[:self.n_kernel_fold]\n g_hat = self.estGHat(sample_Y)\n h_hat = self.estHHat(sample_X)\n self.thetas = np.linalg.inv(g_hat + self.regulation * np.matrix(np.identity(self.n_kernel_fold))) * h_hat.T\n self.thetas = np.maximum(self.thetas, 0)\n return self\n \n def predict(self, sample):\n result = []\n for x2 in sample:\n r = self.thetas.T * np.matrix(self.kernel(x2)).T\n result.append(r[0,0])\n return np.array(result)\n\n \n\n \n"
] | [
[
"numpy.dot",
"numpy.maximum",
"numpy.identity",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sultansidhu/Graphonomy | [
"6ca8ddf89c4b42a5c9869fe890df9fc807e1d4ce"
] | [
"exp/test/eval_show_cihp2pascal.py"
] | [
"import socket\nimport timeit\nimport numpy as np\nfrom PIL import Image\nfrom datetime import datetime\nimport os\nimport sys\nimport glob\nfrom collections import OrderedDict\n\nsys.path.append(\"../../\")\n# PyTorch includes\nimport torch\nimport pdb\nfrom torch.autograd import Variable\nimport torch.optim as optim\nfrom torchvision import transforms\nfrom torch.utils.data import DataLoader\nfrom torchvision.utils import make_grid\nimport cv2\n\n# Tensorboard include\n# from tensorboardX import SummaryWriter\n\n# Custom includes\nfrom dataloaders import pascal\nfrom utils import util\nfrom networks import deeplab_xception_transfer, graph\nfrom dataloaders import custom_transforms as tr\n\n#\nimport argparse\nimport copy\nimport torch.nn.functional as F\nfrom test_from_disk import eval_\n\n\ngpu_id = 1\n\nlabel_colours = [\n (0, 0, 0)\n # 0=background\n ,\n (128, 0, 0),\n (0, 128, 0),\n (128, 128, 0),\n (0, 0, 128),\n (128, 0, 128),\n (0, 128, 128),\n]\n\n\ndef flip(x, dim):\n indices = [slice(None)] * x.dim()\n indices[dim] = torch.arange(\n x.size(dim) - 1, -1, -1, dtype=torch.long, device=x.device\n )\n return x[tuple(indices)]\n\n\n# def flip_cihp(tail_list):\n# '''\n#\n# :param tail_list: tail_list size is 1 x n_class x h x w\n# :return:\n# '''\n# # tail_list = tail_list[0]\n# tail_list_rev = [None] * 20\n# for xx in range(14):\n# tail_list_rev[xx] = tail_list[xx].unsqueeze(0)\n# tail_list_rev[14] = tail_list[15].unsqueeze(0)\n# tail_list_rev[15] = tail_list[14].unsqueeze(0)\n# tail_list_rev[16] = tail_list[17].unsqueeze(0)\n# tail_list_rev[17] = tail_list[16].unsqueeze(0)\n# tail_list_rev[18] = tail_list[19].unsqueeze(0)\n# tail_list_rev[19] = tail_list[18].unsqueeze(0)\n# return torch.cat(tail_list_rev,dim=0)\n\n\ndef decode_labels(mask, num_images=1, num_classes=20):\n \"\"\"Decode batch of segmentation masks.\n \n Args:\n mask: result of inference after taking argmax.\n num_images: number of images to decode from the batch.\n num_classes: number of classes to predict (including background).\n \n Returns:\n A batch with num_images RGB images of the same size as the input. \n \"\"\"\n n, h, w = mask.shape\n assert n >= num_images, (\n \"Batch size %d should be greater or equal than number of images to save %d.\"\n % (n, num_images)\n )\n outputs = np.zeros((num_images, h, w, 3), dtype=np.uint8)\n for i in range(num_images):\n img = Image.new(\"RGB\", (len(mask[i, 0]), len(mask[i])))\n pixels = img.load()\n for j_, j in enumerate(mask[i, :, :]):\n for k_, k in enumerate(j):\n if k < num_classes:\n pixels[k_, j_] = label_colours[k]\n outputs[i] = np.array(img)\n return outputs\n\n\ndef get_parser():\n \"\"\"argparse begin\"\"\"\n parser = argparse.ArgumentParser()\n LookupChoices = type(\n \"\",\n (argparse.Action,),\n dict(__call__=lambda a, p, n, v, o: setattr(n, a.dest, a.choices[v])),\n )\n\n parser.add_argument(\"--epochs\", default=100, type=int)\n parser.add_argument(\"--batch\", default=16, type=int)\n parser.add_argument(\"--lr\", default=1e-7, type=float)\n parser.add_argument(\"--numworker\", default=12, type=int)\n parser.add_argument(\"--step\", default=30, type=int)\n # parser.add_argument('--loadmodel',default=None,type=str)\n parser.add_argument(\"--classes\", default=7, type=int)\n parser.add_argument(\"--testepoch\", default=10, type=int)\n parser.add_argument(\"--loadmodel\", default=\"\", type=str)\n parser.add_argument(\"--txt_file\", default=\"\", type=str)\n parser.add_argument(\"--hidden_layers\", default=128, type=int)\n parser.add_argument(\"--gpus\", default=4, type=int)\n parser.add_argument(\"--output_path\", default=\"./results/\", type=str)\n parser.add_argument(\"--gt_path\", default=\"./results/\", type=str)\n opts = parser.parse_args()\n return opts\n\n\ndef main(opts):\n adj2_ = torch.from_numpy(graph.cihp2pascal_nlp_adj).float()\n adj2_test = adj2_.unsqueeze(0).unsqueeze(0).expand(1, 1, 7, 20).cuda()\n\n adj1_ = Variable(torch.from_numpy(graph.preprocess_adj(graph.pascal_graph)).float())\n adj1_test = adj1_.unsqueeze(0).unsqueeze(0).expand(1, 1, 7, 7).cuda()\n\n cihp_adj = graph.preprocess_adj(graph.cihp_graph)\n adj3_ = Variable(torch.from_numpy(cihp_adj).float())\n adj3_test = adj3_.unsqueeze(0).unsqueeze(0).expand(1, 1, 20, 20).cuda()\n\n p = OrderedDict() # Parameters to include in report\n p[\"trainBatch\"] = opts.batch # Training batch size\n p[\"nAveGrad\"] = 1 # Average the gradient of several iterations\n p[\"lr\"] = opts.lr # Learning rate\n p[\"lrFtr\"] = 1e-5\n p[\"lraspp\"] = 1e-5\n p[\"lrpro\"] = 1e-5\n p[\"lrdecoder\"] = 1e-5\n p[\"lrother\"] = 1e-5\n p[\"wd\"] = 5e-4 # Weight decay\n p[\"momentum\"] = 0.9 # Momentum\n p[\"epoch_size\"] = 10 # How many epochs to change learning rate\n p[\"num_workers\"] = opts.numworker\n backbone = \"xception\" # Use xception or resnet as feature extractor,\n\n with open(opts.txt_file, \"r\") as f:\n img_list = f.readlines()\n\n max_id = 0\n save_dir_root = os.path.join(os.path.dirname(os.path.abspath(__file__)))\n exp_name = os.path.dirname(os.path.abspath(__file__)).split(\"/\")[-1]\n runs = glob.glob(os.path.join(save_dir_root, \"run\", \"run_*\"))\n for r in runs:\n run_id = int(r.split(\"_\")[-1])\n if run_id >= max_id:\n max_id = run_id + 1\n # run_id = int(runs[-1].split('_')[-1]) + 1 if runs else 0\n\n # Network definition\n if backbone == \"xception\":\n net = deeplab_xception_transfer.deeplab_xception_transfer_projection(\n n_classes=opts.classes,\n os=16,\n hidden_layers=opts.hidden_layers,\n source_classes=20,\n )\n elif backbone == \"resnet\":\n # net = deeplab_resnet.DeepLabv3_plus(nInputChannels=3, n_classes=7, os=16, pretrained=True)\n raise NotImplementedError\n else:\n raise NotImplementedError\n\n if gpu_id >= 0:\n net.cuda()\n\n # net load weights\n if not opts.loadmodel == \"\":\n x = torch.load(opts.loadmodel)\n net.load_source_model(x)\n print(\"load model:\", opts.loadmodel)\n else:\n print(\"no model load !!!!!!!!\")\n\n ## multi scale\n scale_list = [1, 0.5, 0.75, 1.25, 1.5, 1.75]\n testloader_list = []\n testloader_flip_list = []\n for pv in scale_list:\n composed_transforms_ts = transforms.Compose(\n [tr.Scale_(pv), tr.Normalize_xception_tf(), tr.ToTensor_()]\n )\n\n composed_transforms_ts_flip = transforms.Compose(\n [\n tr.Scale_(pv),\n tr.HorizontalFlip(),\n tr.Normalize_xception_tf(),\n tr.ToTensor_(),\n ]\n )\n\n voc_val = pascal.VOCSegmentation(split=\"val\", transform=composed_transforms_ts)\n voc_val_f = pascal.VOCSegmentation(\n split=\"val\", transform=composed_transforms_ts_flip\n )\n\n testloader = DataLoader(\n voc_val, batch_size=1, shuffle=False, num_workers=p[\"num_workers\"]\n )\n testloader_flip = DataLoader(\n voc_val_f, batch_size=1, shuffle=False, num_workers=p[\"num_workers\"]\n )\n\n testloader_list.append(copy.deepcopy(testloader))\n testloader_flip_list.append(copy.deepcopy(testloader_flip))\n\n print(\"Eval Network\")\n\n if not os.path.exists(opts.output_path + \"pascal_output_vis/\"):\n os.makedirs(opts.output_path + \"pascal_output_vis/\")\n if not os.path.exists(opts.output_path + \"pascal_output/\"):\n os.makedirs(opts.output_path + \"pascal_output/\")\n\n start_time = timeit.default_timer()\n # One testing epoch\n total_iou = 0.0\n net.eval()\n for ii, large_sample_batched in enumerate(\n zip(*testloader_list, *testloader_flip_list)\n ):\n print(ii)\n # 1 0.5 0.75 1.25 1.5 1.75 ; flip:\n sample1 = large_sample_batched[:6]\n sample2 = large_sample_batched[6:]\n for iii, sample_batched in enumerate(zip(sample1, sample2)):\n inputs, labels = sample_batched[0][\"image\"], sample_batched[0][\"label\"]\n inputs_f, _ = sample_batched[1][\"image\"], sample_batched[1][\"label\"]\n inputs = torch.cat((inputs, inputs_f), dim=0)\n if iii == 0:\n _, _, h, w = inputs.size()\n # assert inputs.size() == inputs_f.size()\n\n # Forward pass of the mini-batch\n inputs, labels = Variable(inputs, requires_grad=False), Variable(labels)\n\n with torch.no_grad():\n if gpu_id >= 0:\n inputs, labels = inputs.cuda(), labels.cuda()\n # outputs = net.forward(inputs)\n # pdb.set_trace()\n outputs = net.forward(\n inputs, adj1_test.cuda(), adj3_test.cuda(), adj2_test.cuda()\n )\n outputs = (outputs[0] + flip(outputs[1], dim=-1)) / 2\n outputs = outputs.unsqueeze(0)\n\n if iii > 0:\n outputs = F.upsample(\n outputs, size=(h, w), mode=\"bilinear\", align_corners=True\n )\n outputs_final = outputs_final + outputs\n else:\n outputs_final = outputs.clone()\n ################ plot pic\n predictions = torch.max(outputs_final, 1)[1]\n prob_predictions = torch.max(outputs_final, 1)[0]\n results = predictions.cpu().numpy()\n prob_results = prob_predictions.cpu().numpy()\n vis_res = decode_labels(results)\n\n parsing_im = Image.fromarray(vis_res[0])\n parsing_im.save(\n opts.output_path + \"pascal_output_vis/{}.png\".format(img_list[ii][:-1])\n )\n cv2.imwrite(\n opts.output_path + \"pascal_output/{}.png\".format(img_list[ii][:-1]),\n results[0, :, :],\n )\n # np.save('../../cihp_prob_output/{}.npy'.format(img_list[ii][:-1]), prob_results[0, :, :])\n # pred_list.append(predictions.cpu())\n # label_list.append(labels.squeeze(1).cpu())\n # loss = criterion(outputs, labels, batch_average=True)\n # running_loss_ts += loss.item()\n\n # total_iou += utils.get_iou(predictions, labels)\n end_time = timeit.default_timer()\n print(\"time use for \" + str(ii) + \" is :\" + str(end_time - start_time))\n\n # Eval\n pred_path = opts.output_path + \"pascal_output/\"\n eval_(\n pred_path=pred_path,\n gt_path=opts.gt_path,\n classes=opts.classes,\n txt_file=opts.txt_file,\n )\n\n\nif __name__ == \"__main__\":\n opts = get_parser()\n main(opts)\n"
] | [
[
"torch.nn.functional.upsample",
"torch.max",
"torch.load",
"torch.cat",
"torch.utils.data.DataLoader",
"torch.from_numpy",
"torch.no_grad",
"numpy.array",
"numpy.zeros",
"torch.autograd.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jhfong/NeMo | [
"db7a2db5ee4e0e81a5640b1f8ff5e83e993bcb87"
] | [
"nemo/collections/asr/modules/audio_preprocessing.py"
] | [
"# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport math\nfrom abc import ABC, abstractmethod\nfrom dataclasses import dataclass\nfrom typing import Any, Optional\n\nimport torch\nfrom packaging import version\n\nfrom nemo.collections.asr.parts.numba.spec_augment import SpecAugmentNumba, spec_augment_launch_heuristics\nfrom nemo.collections.asr.parts.preprocessing.features import FilterbankFeatures\nfrom nemo.collections.asr.parts.submodules.spectr_augment import SpecAugment, SpecCutout\nfrom nemo.core.classes import NeuralModule, typecheck\nfrom nemo.core.neural_types import (\n AudioSignal,\n LengthsType,\n MelSpectrogramType,\n MFCCSpectrogramType,\n NeuralType,\n SpectrogramType,\n)\nfrom nemo.core.utils import numba_utils\nfrom nemo.core.utils.numba_utils import __NUMBA_MINIMUM_VERSION__\nfrom nemo.utils import logging\n\ntry:\n import torchaudio\n import torchaudio.functional\n import torchaudio.transforms\n\n TORCHAUDIO_VERSION = version.parse(torchaudio.__version__)\n TORCHAUDIO_VERSION_MIN = version.parse('0.5')\n\n HAVE_TORCHAUDIO = True\nexcept ModuleNotFoundError:\n HAVE_TORCHAUDIO = False\n\n__all__ = [\n 'AudioToMelSpectrogramPreprocessor',\n 'AudioToMFCCPreprocessor',\n 'SpectrogramAugmentation',\n 'CropOrPadSpectrogramAugmentation',\n]\n\n\nclass AudioPreprocessor(NeuralModule, ABC):\n \"\"\"\n An interface for Neural Modules that performs audio pre-processing,\n transforming the wav files to features.\n \"\"\"\n\n def __init__(self, win_length, hop_length):\n super().__init__()\n\n self.win_length = win_length\n self.hop_length = hop_length\n\n self.torch_windows = {\n 'hann': torch.hann_window,\n 'hamming': torch.hamming_window,\n 'blackman': torch.blackman_window,\n 'bartlett': torch.bartlett_window,\n 'ones': torch.ones,\n None: torch.ones,\n }\n\n @typecheck()\n @torch.no_grad()\n def forward(self, input_signal, length):\n processed_signal, processed_length = self.get_features(input_signal, length)\n\n return processed_signal, processed_length\n\n @abstractmethod\n def get_features(self, input_signal, length):\n # Called by forward(). Subclasses should implement this.\n pass\n\n\nclass AudioToMelSpectrogramPreprocessor(AudioPreprocessor):\n \"\"\"Featurizer module that converts wavs to mel spectrograms.\n We don't use torchaudio's implementation here because the original\n implementation is not the same, so for the sake of backwards-compatibility\n this will use the old FilterbankFeatures for now.\n Args:\n sample_rate (int): Sample rate of the input audio data.\n Defaults to 16000\n window_size (float): Size of window for fft in seconds\n Defaults to 0.02\n window_stride (float): Stride of window for fft in seconds\n Defaults to 0.01\n n_window_size (int): Size of window for fft in samples\n Defaults to None. Use one of window_size or n_window_size.\n n_window_stride (int): Stride of window for fft in samples\n Defaults to None. Use one of window_stride or n_window_stride.\n window (str): Windowing function for fft. can be one of ['hann',\n 'hamming', 'blackman', 'bartlett']\n Defaults to \"hann\"\n normalize (str): Can be one of ['per_feature', 'all_features']; all\n other options disable feature normalization. 'all_features'\n normalizes the entire spectrogram to be mean 0 with std 1.\n 'pre_features' normalizes per channel / freq instead.\n Defaults to \"per_feature\"\n n_fft (int): Length of FT window. If None, it uses the smallest power\n of 2 that is larger than n_window_size.\n Defaults to None\n preemph (float): Amount of pre emphasis to add to audio. Can be\n disabled by passing None.\n Defaults to 0.97\n features (int): Number of mel spectrogram freq bins to output.\n Defaults to 64\n lowfreq (int): Lower bound on mel basis in Hz.\n Defaults to 0\n highfreq (int): Lower bound on mel basis in Hz.\n Defaults to None\n log (bool): Log features.\n Defaults to True\n log_zero_guard_type(str): Need to avoid taking the log of zero. There\n are two options: \"add\" or \"clamp\".\n Defaults to \"add\".\n log_zero_guard_value(float, or str): Add or clamp requires the number\n to add with or clamp to. log_zero_guard_value can either be a float\n or \"tiny\" or \"eps\". torch.finfo is used if \"tiny\" or \"eps\" is\n passed.\n Defaults to 2**-24.\n dither (float): Amount of white-noise dithering.\n Defaults to 1e-5\n pad_to (int): Ensures that the output size of the time dimension is\n a multiple of pad_to.\n Defaults to 16\n frame_splicing (int): Defaults to 1\n exact_pad (bool): If True, sets stft center to False and adds padding, such that num_frames = audio_length\n // hop_length. Defaults to False.\n stft_exact_pad (bool): If True, uses pytorch_stft and convolutions with\n padding such that num_frames = num_samples / hop_length. If False,\n stft_conv will be used to determine how stft will be performed.\n Defaults to False. TODO:This feature is deprecated and will be removed in 1.1.0\n stft_conv (bool): If True, uses pytorch_stft and convolutions. If\n False, uses torch.stft. TODO:This feature is deprecated and will be removed in 1.1.0\n Defaults to False\n pad_value (float): The value that shorter mels are padded with.\n Defaults to 0\n mag_power (float): The power that the linear spectrogram is raised to\n prior to multiplication with mel basis.\n Defaults to 2 for a power spec\n \"\"\"\n\n def save_to(self, save_path: str):\n pass\n\n @classmethod\n def restore_from(cls, restore_path: str):\n pass\n\n @property\n def input_types(self):\n \"\"\"Returns definitions of module input ports.\n \"\"\"\n return {\n \"input_signal\": NeuralType(('B', 'T'), AudioSignal(freq=self._sample_rate)),\n \"length\": NeuralType(tuple('B'), LengthsType()),\n }\n\n @property\n def output_types(self):\n \"\"\"Returns definitions of module output ports.\n processed_signal:\n 0: AxisType(BatchTag)\n 1: AxisType(MelSpectrogramSignalTag)\n 2: AxisType(ProcessedTimeTag)\n processed_length:\n 0: AxisType(BatchTag)\n \"\"\"\n return {\n \"processed_signal\": NeuralType(('B', 'D', 'T'), MelSpectrogramType()),\n \"processed_length\": NeuralType(tuple('B'), LengthsType()),\n }\n\n def __init__(\n self,\n sample_rate=16000,\n window_size=0.02,\n window_stride=0.01,\n n_window_size=None,\n n_window_stride=None,\n window=\"hann\",\n normalize=\"per_feature\",\n n_fft=None,\n preemph=0.97,\n features=64,\n lowfreq=0,\n highfreq=None,\n log=True,\n log_zero_guard_type=\"add\",\n log_zero_guard_value=2 ** -24,\n dither=1e-5,\n pad_to=16,\n frame_splicing=1,\n exact_pad=False,\n stft_exact_pad=False,\n stft_conv=False,\n pad_value=0,\n mag_power=2.0,\n ):\n super().__init__(n_window_size, n_window_stride)\n\n self._sample_rate = sample_rate\n if window_size and n_window_size:\n raise ValueError(f\"{self} received both window_size and \" f\"n_window_size. Only one should be specified.\")\n if window_stride and n_window_stride:\n raise ValueError(\n f\"{self} received both window_stride and \" f\"n_window_stride. Only one should be specified.\"\n )\n if window_size:\n n_window_size = int(window_size * self._sample_rate)\n if window_stride:\n n_window_stride = int(window_stride * self._sample_rate)\n\n self.featurizer = FilterbankFeatures(\n sample_rate=self._sample_rate,\n n_window_size=n_window_size,\n n_window_stride=n_window_stride,\n window=window,\n normalize=normalize,\n n_fft=n_fft,\n preemph=preemph,\n nfilt=features,\n lowfreq=lowfreq,\n highfreq=highfreq,\n log=log,\n log_zero_guard_type=log_zero_guard_type,\n log_zero_guard_value=log_zero_guard_value,\n dither=dither,\n pad_to=pad_to,\n frame_splicing=frame_splicing,\n exact_pad=exact_pad,\n stft_exact_pad=stft_exact_pad,\n stft_conv=stft_conv,\n pad_value=pad_value,\n mag_power=mag_power,\n )\n\n def get_features(self, input_signal, length):\n return self.featurizer(input_signal, length)\n\n @property\n def filter_banks(self):\n return self.featurizer.filter_banks\n\n\nclass AudioToMFCCPreprocessor(AudioPreprocessor):\n \"\"\"Preprocessor that converts wavs to MFCCs.\n Uses torchaudio.transforms.MFCC.\n Args:\n sample_rate: The sample rate of the audio.\n Defaults to 16000.\n window_size: Size of window for fft in seconds. Used to calculate the\n win_length arg for mel spectrogram.\n Defaults to 0.02\n window_stride: Stride of window for fft in seconds. Used to caculate\n the hop_length arg for mel spect.\n Defaults to 0.01\n n_window_size: Size of window for fft in samples\n Defaults to None. Use one of window_size or n_window_size.\n n_window_stride: Stride of window for fft in samples\n Defaults to None. Use one of window_stride or n_window_stride.\n window: Windowing function for fft. can be one of ['hann',\n 'hamming', 'blackman', 'bartlett', 'none', 'null'].\n Defaults to 'hann'\n n_fft: Length of FT window. If None, it uses the smallest power of 2\n that is larger than n_window_size.\n Defaults to None\n lowfreq (int): Lower bound on mel basis in Hz.\n Defaults to 0\n highfreq (int): Lower bound on mel basis in Hz.\n Defaults to None\n n_mels: Number of mel filterbanks.\n Defaults to 64\n n_mfcc: Number of coefficients to retain\n Defaults to 64\n dct_type: Type of discrete cosine transform to use\n norm: Type of norm to use\n log: Whether to use log-mel spectrograms instead of db-scaled.\n Defaults to True.\n \"\"\"\n\n @property\n def input_types(self):\n \"\"\"Returns definitions of module input ports.\n \"\"\"\n return {\n \"input_signal\": NeuralType(('B', 'T'), AudioSignal(freq=self._sample_rate)),\n \"length\": NeuralType(tuple('B'), LengthsType()),\n }\n\n @property\n def output_types(self):\n \"\"\"Returns definitions of module output ports.\n \"\"\"\n return {\n \"processed_signal\": NeuralType(('B', 'D', 'T'), MFCCSpectrogramType()),\n \"processed_length\": NeuralType(tuple('B'), LengthsType()),\n }\n\n def save_to(self, save_path: str):\n pass\n\n @classmethod\n def restore_from(cls, restore_path: str):\n pass\n\n def __init__(\n self,\n sample_rate=16000,\n window_size=0.02,\n window_stride=0.01,\n n_window_size=None,\n n_window_stride=None,\n window='hann',\n n_fft=None,\n lowfreq=0.0,\n highfreq=None,\n n_mels=64,\n n_mfcc=64,\n dct_type=2,\n norm='ortho',\n log=True,\n ):\n self._sample_rate = sample_rate\n if not HAVE_TORCHAUDIO:\n logging.error('Could not import torchaudio. Some features might not work.')\n\n raise ModuleNotFoundError(\n \"torchaudio is not installed but is necessary for \"\n \"AudioToMFCCPreprocessor. We recommend you try \"\n \"building it from source for the PyTorch version you have.\"\n )\n if window_size and n_window_size:\n raise ValueError(f\"{self} received both window_size and \" f\"n_window_size. Only one should be specified.\")\n if window_stride and n_window_stride:\n raise ValueError(\n f\"{self} received both window_stride and \" f\"n_window_stride. Only one should be specified.\"\n )\n # Get win_length (n_window_size) and hop_length (n_window_stride)\n if window_size:\n n_window_size = int(window_size * self._sample_rate)\n if window_stride:\n n_window_stride = int(window_stride * self._sample_rate)\n\n super().__init__(n_window_size, n_window_stride)\n\n mel_kwargs = {}\n\n mel_kwargs['f_min'] = lowfreq\n mel_kwargs['f_max'] = highfreq\n mel_kwargs['n_mels'] = n_mels\n\n mel_kwargs['n_fft'] = n_fft or 2 ** math.ceil(math.log2(n_window_size))\n\n mel_kwargs['win_length'] = n_window_size\n mel_kwargs['hop_length'] = n_window_stride\n\n # Set window_fn. None defaults to torch.ones.\n window_fn = self.torch_windows.get(window, None)\n if window_fn is None:\n raise ValueError(\n f\"Window argument for AudioProcessor is invalid: {window}.\"\n f\"For no window function, use 'ones' or None.\"\n )\n mel_kwargs['window_fn'] = window_fn\n\n # Use torchaudio's implementation of MFCCs as featurizer\n self.featurizer = torchaudio.transforms.MFCC(\n sample_rate=self._sample_rate,\n n_mfcc=n_mfcc,\n dct_type=dct_type,\n norm=norm,\n log_mels=log,\n melkwargs=mel_kwargs,\n )\n\n def get_features(self, input_signal, length):\n features = self.featurizer(input_signal)\n seq_len = torch.ceil(length.to(torch.float32) / self.hop_length).to(dtype=torch.long)\n return features, seq_len\n\n\nclass SpectrogramAugmentation(NeuralModule):\n \"\"\"\n Performs time and freq cuts in one of two ways.\n SpecAugment zeroes out vertical and horizontal sections as described in\n SpecAugment (https://arxiv.org/abs/1904.08779). Arguments for use with\n SpecAugment are `freq_masks`, `time_masks`, `freq_width`, and `time_width`.\n SpecCutout zeroes out rectangulars as described in Cutout\n (https://arxiv.org/abs/1708.04552). Arguments for use with Cutout are\n `rect_masks`, `rect_freq`, and `rect_time`.\n Args:\n freq_masks (int): how many frequency segments should be cut.\n Defaults to 0.\n time_masks (int): how many time segments should be cut\n Defaults to 0.\n freq_width (int): maximum number of frequencies to be cut in one\n segment.\n Defaults to 10.\n time_width (int): maximum number of time steps to be cut in one\n segment\n Defaults to 10.\n rect_masks (int): how many rectangular masks should be cut\n Defaults to 0.\n rect_freq (int): maximum size of cut rectangles along the frequency\n dimension\n Defaults to 5.\n rect_time (int): maximum size of cut rectangles along the time\n dimension\n Defaults to 25.\n \"\"\"\n\n @property\n def input_types(self):\n \"\"\"Returns definitions of module input types\n \"\"\"\n return {\n \"input_spec\": NeuralType(('B', 'D', 'T'), SpectrogramType()),\n \"length\": NeuralType(tuple('B'), LengthsType(), optional=True),\n }\n\n @property\n def output_types(self):\n \"\"\"Returns definitions of module output types\n \"\"\"\n return {\"augmented_spec\": NeuralType(('B', 'D', 'T'), SpectrogramType())}\n\n def __init__(\n self,\n freq_masks=0,\n time_masks=0,\n freq_width=10,\n time_width=10,\n rect_masks=0,\n rect_time=5,\n rect_freq=20,\n rng=None,\n mask_value=0.0,\n use_numba_spec_augment: bool = True,\n ):\n super().__init__()\n\n if rect_masks > 0:\n self.spec_cutout = SpecCutout(rect_masks=rect_masks, rect_time=rect_time, rect_freq=rect_freq, rng=rng,)\n # self.spec_cutout.to(self._device)\n else:\n self.spec_cutout = lambda input_spec: input_spec\n\n if freq_masks + time_masks > 0:\n self.spec_augment = SpecAugment(\n freq_masks=freq_masks,\n time_masks=time_masks,\n freq_width=freq_width,\n time_width=time_width,\n rng=rng,\n mask_value=mask_value,\n )\n else:\n self.spec_augment = lambda input_spec: input_spec\n\n # Check if numba is supported, and use a Numba kernel if it is\n if use_numba_spec_augment and numba_utils.numba_cuda_is_supported(__NUMBA_MINIMUM_VERSION__):\n self.spec_augment_numba = SpecAugmentNumba(\n freq_masks=freq_masks,\n time_masks=time_masks,\n freq_width=freq_width,\n time_width=time_width,\n rng=rng,\n mask_value=mask_value,\n )\n else:\n self.spec_augment_numba = None\n\n @typecheck()\n def forward(self, input_spec, length=None):\n augmented_spec = self.spec_cutout(input_spec=input_spec)\n\n # To run the Numba kernel, correct numba version is required as well as\n # tensor must be on GPU and length must be provided\n if self.spec_augment_numba is not None and spec_augment_launch_heuristics(augmented_spec, length):\n augmented_spec = self.spec_augment_numba(input_spec=augmented_spec, length=length)\n else:\n augmented_spec = self.spec_augment(input_spec=augmented_spec)\n return augmented_spec\n\n\nclass CropOrPadSpectrogramAugmentation(NeuralModule):\n \"\"\"\n Pad or Crop the incoming Spectrogram to a certain shape.\n Args:\n audio_length (int): the final number of timesteps that is required.\n The signal will be either padded or cropped temporally to this\n size.\n \"\"\"\n\n def __init__(self, audio_length):\n super(CropOrPadSpectrogramAugmentation, self).__init__()\n self.audio_length = audio_length\n\n @typecheck()\n @torch.no_grad()\n def forward(self, input_signal, length):\n image = input_signal\n num_images = image.shape[0]\n\n audio_length = self.audio_length\n image_len = image.shape[-1]\n\n # Crop long signal\n if image_len > audio_length: # randomly slice\n cutout_images = []\n offset = torch.randint(low=0, high=image_len - audio_length + 1, size=[num_images])\n\n for idx, offset in enumerate(offset):\n cutout_images.append(image[idx : idx + 1, :, offset : offset + audio_length])\n\n image = torch.cat(cutout_images, dim=0)\n del cutout_images\n\n else: # symmetrically pad short signal with zeros\n pad_left = (audio_length - image_len) // 2\n pad_right = (audio_length - image_len) // 2\n\n if (audio_length - image_len) % 2 == 1:\n pad_right += 1\n\n image = torch.nn.functional.pad(image, [pad_left, pad_right], mode=\"constant\", value=0)\n\n # Replace dynamic length sequences with static number of timesteps\n length = (length * 0) + audio_length\n\n return image, length\n\n @property\n def input_types(self):\n \"\"\"Returns definitions of module output ports.\n \"\"\"\n return {\n \"input_signal\": NeuralType(('B', 'D', 'T'), SpectrogramType()),\n \"length\": NeuralType(tuple('B'), LengthsType()),\n }\n\n @property\n def output_types(self):\n \"\"\"Returns definitions of module output ports.\n \"\"\"\n return {\n \"processed_signal\": NeuralType(('B', 'D', 'T'), SpectrogramType()),\n \"processed_length\": NeuralType(tuple('B'), LengthsType()),\n }\n\n def save_to(self, save_path: str):\n pass\n\n @classmethod\n def restore_from(cls, restore_path: str):\n pass\n\n\n@dataclass\nclass AudioToMelSpectrogramPreprocessorConfig:\n _target_: str = \"nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor\"\n sample_rate: int = 16000\n window_size: float = 0.02\n window_stride: float = 0.01\n n_window_size: Optional[int] = None\n n_window_stride: Optional[int] = None\n window: str = \"hann\"\n normalize: str = \"per_feature\"\n n_fft: Optional[int] = None\n preemph: float = 0.97\n features: int = 64\n lowfreq: int = 0\n highfreq: Optional[int] = None\n log: bool = True\n log_zero_guard_type: str = \"add\"\n log_zero_guard_value: float = 2 ** -24\n dither: float = 1e-5\n pad_to: int = 16\n frame_splicing: int = 1\n exact_pad: bool = False\n stft_exact_pad: bool = False\n stft_conv: bool = False\n pad_value: int = 0\n mag_power: float = 2.0\n\n\n@dataclass\nclass AudioToMFCCPreprocessorConfig:\n _target_: str = 'nemo.collections.asr.modules.AudioToMFCCPreprocessor'\n sample_rate: int = 16000\n window_size: float = 0.02\n window_stride: float = 0.01\n n_window_size: Optional[int] = None\n n_window_stride: Optional[int] = None\n window: str = 'hann'\n n_fft: Optional[int] = None\n lowfreq: Optional[float] = 0.0\n highfreq: Optional[float] = None\n n_mels: int = 64\n n_mfcc: int = 64\n dct_type: int = 2\n norm: str = 'ortho'\n log: bool = True\n\n\n@dataclass\nclass SpectrogramAugmentationConfig:\n _target_: str = \"nemo.collections.asr.modules.SpectrogramAugmentation\"\n freq_masks: int = 0\n time_masks: int = 0\n freq_width: int = 0\n time_width: Optional[Any] = 0\n rect_masks: int = 0\n rect_time: int = 0\n rect_freq: int = 0\n mask_value: float = 0\n rng: Optional[Any] = None # random.Random() type\n use_numba_spec_augment: bool = True\n\n\n@dataclass\nclass CropOrPadSpectrogramAugmentationConfig:\n audio_length: int\n _target_: str = \"nemo.collections.asr.modules.CropOrPadSpectrogramAugmentation\"\n"
] | [
[
"torch.no_grad",
"torch.randint",
"torch.nn.functional.pad",
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
openforcefield/bayes-implicit-solvent | [
"067239fcbb8af28eb6310d702804887662692ec2",
"067239fcbb8af28eb6310d702804887662692ec2",
"067239fcbb8af28eb6310d702804887662692ec2",
"067239fcbb8af28eb6310d702804887662692ec2"
] | [
"bayes_implicit_solvent/continuous_parameter_experiments/automatic_parameterization_figure/rwmh.py",
"bayes_implicit_solvent/rjmc_experiments/tree_rjmc2.py",
"bayes_implicit_solvent/rjmc_experiments/plot_branching_figure.py",
"bayes_implicit_solvent/utils.py"
] | [
"from jax_posterior import log_posterior, grad_log_posterior, sample_prior, get_predictions\nfrom jax import numpy as np\nimport numpy as onp\nfrom bayes_implicit_solvent.samplers import random_walk_mh\n\nif __name__ == '__main__':\n perturbation_sigma = 0.2\n pred_traj_thinning = 50\n #n_steps = 10000\n n_steps = 100\n step_size = 0.005\n\n import sys\n\n try:\n job_id = int(sys.argv[1])\n except:\n print(\"No valid job_id supplied! Selecting one at random\")\n job_id = onp.random.randint(10000)\n\n onp.random.seed(job_id)\n\n obc2_theta = np.array([\n 1.5, 1.2, 1.7, 1.55, 1.5, 1.5, 2.1, 1.85, 1.8,\n 0.8, 0.85, 0.72, 0.79, 0.85, 0.88, 0.8, 0.86, 0.96])\n\n x0 = obc2_theta + onp.random.randn(len(obc2_theta)) * perturbation_sigma\n\n\n prior_sample = sample_prior()\n\n rw_mh_traj, rw_mh_post_traj, accept_rate = random_walk_mh(x0, log_posterior, n_steps=n_steps, stepsize=step_size)\n\n prediction_traj = onp.array(list(map(get_predictions, rw_mh_traj[::pred_traj_thinning])))\n\n onp.savez('rw_mh_starting_from_obc2_perturbed_by_sigma={},job_id={}'.format(perturbation_sigma, job_id),\n random_seed=job_id,\n rw_mh_traj=onp.asarray(rw_mh_traj),\n rw_mh_post_traj=onp.asarray(rw_mh_post_traj),\n prediction_traj=prediction_traj)\n",
"import numpy as np\n\nfrom bayes_implicit_solvent.continuous_parameter_experiments.elemental_types_mh import log_prior, mols, ll, data_path, \\\n smiles\n\nsmiles_list = smiles\nfrom bayes_implicit_solvent.typers import RADIUS_UNIT\n\nfrom bayes_implicit_solvent.freesolv import smiles_list\nfrom bayes_implicit_solvent.typers import AtomSpecificationProposal\n\nnp.random.seed(0)\n\nfrom bayes_implicit_solvent.gb_models.obc2_parameters import mbondi_model\n\ninitial_tree = mbondi_model\ninitial_tree.remove_node('[#14]') # otherwise everything is -inf, because this type will be empty\ninitial_tree.proposal_sigmas['radius'] = 1e-2 * RADIUS_UNIT\ninitial_tree.proposal_sigmas['scale_factor'] = 1e-2\n\n# add one more parameter per element appearing in FreeSolv but not specified in obc2 parameter set to initial tree\nfor i in [17, 35, 53]:\n smirks = '[#{}]'.format(i)\n initial_tree.add_child(smirks, '*')\n initial_tree.un_delete_able_types.add(smirks)\n\nspecifiers = ['X1', 'X2', 'X3', 'X4', 'a', 'A', '-1', '+0', '+1', '+2']\natom_specification_proposal = AtomSpecificationProposal(atomic_specifiers=specifiers)\nsmirks_elaboration_proposal = atom_specification_proposal\n\nprint('initial tree:')\nprint(initial_tree)\n\nn_configuration_samples = 25\n\nimport os\n\nname = 'tree_rjmc_n_config={}_{}_ll'.format(n_configuration_samples, ll)\nsmiles_subset_fname = os.path.join(data_path,\n 'smiles_subset_{}.txt'.format(name))\nwith open(smiles_subset_fname, 'w') as f:\n f.writelines(['{}\\n'.format(s) for s in smiles_list])\n\nfrom bayes_implicit_solvent.prior_checking import check_no_empty_types\n\nerror_y_trees = []\n\n\ndef log_prob(tree):\n log_prior_value = check_no_empty_types(tree)\n\n theta = np.hstack([tree.get_radii(), tree.get_scale_factors()])\n\n log_prior_value += log_prior(theta)\n\n if log_prior_value > -np.inf:\n try:\n # TODO: Parallelize. Note that multiprocessing.Pool won't work here because it doesn't play nice with SwigPy objects\n # TODO: update to allow scale factors to be variable also\n log_likelihood_value = 0\n for mol in mols:\n radii = tree.assign_radii(mol.mol) / RADIUS_UNIT\n scale_factors = tree.assign_scale_factors(mol.mol)\n\n log_likelihood_value += mol.log_prob(radii, scale_factors)\n except:\n global error_y_trees\n error_y_trees.append(tree)\n print('Warning! Encountered un-anticipated exception!')\n return - np.inf\n return log_prior_value + log_likelihood_value\n else:\n return log_prior_value\n\n\nfrom bayes_implicit_solvent.samplers import tree_rjmc\nfrom pickle import dump\n\nn_iterations = 10000\n\nresult = tree_rjmc(initial_tree, log_prob, smirks_elaboration_proposal, n_iterations=n_iterations,\n fraction_cross_model_proposals=0.1)\nwith open('elaborate_tree_rjmc2_run_n_compounds={}_n_iter={}_gaussian_ll.pkl'.format(len(mols), n_iterations),\n 'wb') as f:\n dump(result, f)\n\nwith open('error_y_trees.pkl', 'wb') as f:\n dump(error_y_trees, f)\n",
"from bayes_implicit_solvent.utils import remove_top_right_spines\n\nfrom pickle import load\n\nexperiment_number = 5\n\nwith open('results/experiment_{}_radii_samples.pkl'.format(experiment_number), 'rb') as f:\n radii_samples = load(f)\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nlog_ps = np.load('results/experiment_{}_log_ps.npy'.format(experiment_number))\n\nn_types_trace = [len(r) for r in radii_samples]\n\nmax_n_dimensions = max(n_types_trace)\nprint(max_n_dimensions)\n\nfig = plt.figure(figsize=(4,8))\n\n\ntraces = []\nfor i in range(max_n_dimensions):\n trace = []\n for r in radii_samples:\n if len(r) > i:\n trace.append(r[i])\n else:\n trace.append(np.nan)\n traces.append(trace)\n\nax = plt.subplot(3,1,1)\n# plot branching\nfor trace in traces:\n plt.plot(trace)\n#plt.xlabel('iteration')\nplt.ylabel('radius')\nremove_top_right_spines(ax)\n\n# plot # types trace\nax = plt.subplot(3,1,2)\nplt.plot(n_types_trace)\n#plt.xlabel('iteration')\nplt.ylabel('# GB types')\nplt.yticks([1,10,20])\nremove_top_right_spines(ax)\n\n# plot log-probability trace\nax = plt.subplot(3,1,3)\nplt.plot(log_ps)\nplt.xlabel('iteration')\nplt.ylim(log_ps[10], max(log_ps) + 0.05 * (max(log_ps) - log_ps[10]))\nplt.ylabel('log posterior')\nremove_top_right_spines(ax)\n\nplt.tight_layout()\n\nplt.savefig('figures/experiment_{}_branching.png'.format(experiment_number),\n bbox_inches='tight', dpi=300)\nplt.close()",
"from functools import lru_cache\n\nimport numpy as np\nfrom openeye import oechem\nfrom simtk import unit\n\n\ndef smiles_to_mol(smiles):\n \"\"\"Create an openeye OEGraphMol object from this smiles string\"\"\"\n mol = oechem.OEGraphMol()\n oechem.OESmilesToMol(mol, smiles)\n return mol\n\n\ndef get_gbsa_force(system):\n \"\"\"Find and return the first force that contains 'GBSA' in its name\"\"\"\n for force in system.getForces():\n if \"GBSA\" in force.__class__.__name__:\n return force\n raise (Exception('No GBSA force found'))\n\n\ndef get_nb_force(system):\n \"\"\"Find and return the first force that contains 'NonbondedForce' in its name\"\"\"\n forces = system.getForces()\n for f in forces:\n if 'NonbondedForce' in f.__class__.__name__:\n return f\n raise (Exception('No nonbonded force found'))\n\n\ndef get_charges(system):\n \"\"\"Find and return the partial charges of all particles in the system\"\"\"\n nb_force = get_nb_force(system)\n return np.array(\n [nb_force.getParticleParameters(i)[0] / unit.elementary_charge for i in range(nb_force.getNumParticles())])\n\n\ndef apply_per_particle_params_to_GB_force(radii, scale_factors, gb_force):\n \"\"\"Given arrays of radii and scale_factors,\n each of length = gb_force.getNumParameters(),\n overwrite the per-particle radius and scalingFactor parameters of gb_force.\n (Retain charges.)\n \"\"\"\n\n for i in range(len(radii)):\n charge = gb_force.getParticleParameters(i)[0]\n gb_force.setParticleParameters(index=i,\n charge=charge,\n radius=radii[i],\n scalingFactor=scale_factors[i],\n )\n\n\ndef mdtraj_to_list_of_unitted_snapshots(traj):\n \"\"\"Create list of (n_atoms, 3) snapshots with simtk.units attached\"\"\"\n return [snapshot * unit.nanometer for snapshot in traj.xyz]\n\n\ndef npy_sample_path_to_unitted_snapshots(path_to_npy_samples):\n \"\"\"Given a path to a .npy file containing xyz coordinates in units of nanometers,\n create a list of (n_atoms, 3) snapshots with simtk.units attached\"\"\"\n xyz = np.load(path_to_npy_samples)\n traj = [snapshot * unit.nanometer for snapshot in xyz]\n return traj\n\ndef smarts_to_subsearch(smarts):\n \"\"\"Creates an oechem.OESubsearch object from a SMARTS pattern\"\"\"\n qmol = oechem.OEQMol()\n oechem.OEParseSmarts(qmol, smarts)\n subsearch = oechem.OESubSearch(qmol)\n return subsearch\n\n\ndef remove_top_right_spines(ax):\n \"\"\"Aesthetic tweak of matplotlib axes\"\"\"\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n\n\ndef get_substructure_matches(mol, subsearch):\n mol_ = oechem.OEMol(mol)\n n_atoms = len(list(mol_.GetAtoms()))\n matches = np.zeros(n_atoms, dtype=bool)\n\n for match in subsearch.Match(mol_, False):\n match_atoms = match.GetTargetAtoms()\n match_patterns = match.GetPatternAtoms()\n for a, p in zip(match_atoms, match_patterns):\n if p.GetIdx() == 0:\n matches[a.GetIdx()] = True\n return matches\n\n# observation: I usually am doing many subsearch queries on the same fixed dataset.\n# TODO: I should replace this function with one that uses a constant dataset and only has the subsearch string vary...\n\n\n@lru_cache(maxsize=2 ** 12)\ndef cached_substructure_matches(mol, subsearch_string):\n subsearch = smarts_to_subsearch(subsearch_string)\n return get_substructure_matches(mol, subsearch)\n\n\ndef make_cached_substructure_matcher(mol_list):\n @lru_cache(maxsize=2**12)\n def cached_substructure_matches(subsearch_string):\n \"\"\"returns a list of length mol_list, where each element of the list is an array of\n length \"\"\"\n subsearch = smarts_to_subsearch(subsearch_string)\n return [get_substructure_matches(mol, subsearch) for mol in mol_list]\n return cached_substructure_matches\n\n\nclass Dataset():\n def __init__(self, mol_list):\n \"\"\"\"\"\"\n self.mol_list = mol_list\n self.n_atoms_list = [self.mol_list[i].NumAtoms() for i in range(len(self.mol_list))]\n self.cached_substructure_matcher = make_cached_substructure_matcher(mol_list)\n\n def get_match_matrices(self, smarts_list):\n n_smarts = len(smarts_list)\n n_mols = len(self.mol_list)\n\n match_lists = list(map(self.cached_substructure_matcher, smarts_list))\n match_matrices = [np.vstack([l[i] for l in match_lists]).T for i in range(n_mols)]\n\n # check shapes, since this is a bit nested / tricky...\n assert(len(match_matrices) == n_mols)\n for i, m in enumerate(match_matrices):\n assert(m.shape[0] == self.n_atoms_list[i])\n assert(m.shape[1] == n_smarts)\n return match_matrices\n\n\n\ndef convert_to_unitd_array(unitd_quantities):\n \"\"\"Given an iterable of Quantities in compatible units, make a numpy\n array of unitless scalars, then multiply the array by the unit\"\"\"\n u = unitd_quantities[0].unit\n return np.array([q.value_in_unit(u) for q in unitd_quantities]) * u\n"
] | [
[
"numpy.asarray",
"numpy.random.seed",
"numpy.random.randint"
],
[
"numpy.random.seed"
],
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.ylabel"
],
[
"numpy.load",
"numpy.zeros",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
notplus/thinc | [
"9228eba1a0af20fb0de80970791c601549b40e26"
] | [
"thinc/backends/ops.py"
] | [
"import math\n\nfrom typing import Optional, List, Tuple, Sequence, Union, cast, TypeVar\nfrom typing import Iterator, overload\nimport numpy\nimport itertools\n\nfrom .. import registry\nfrom ..types import Xp, Shape, DTypes, DTypesInt, DTypesFloat, List2d, ArrayXd\nfrom ..types import Array3d, Floats1d, Floats2d, Floats3d, Floats4d\nfrom ..types import FloatsXd, Ints1d, Ints2d, Ints3d, Ints4d, IntsXd, _Floats\nfrom ..types import DeviceTypes, Generator, Padded, Batchable, SizedGenerator\nfrom ..util import get_array_module, is_xp_array, to_numpy\n\n\nArrayT = TypeVar(\"ArrayT\", bound=ArrayXd)\nFloatsT = TypeVar(\"FloatsT\", bound=_Floats)\nFloatsType = TypeVar(\"FloatsType\", bound=FloatsXd)\nSQRT2PI = math.sqrt(2.0 / math.pi)\nINV_SQRT2 = 1.0 / math.sqrt(2.0)\nINV_SQRT_2PI = 1.0 / math.sqrt(2.0 * math.pi)\n\n\nclass Ops:\n name: str = \"base\"\n xp: Xp = numpy\n\n def __init__(\n self, device_type: DeviceTypes = \"cpu\", device_id: int = -1, **kwargs\n ) -> None:\n self.device_type = device_type\n self.device_id = device_id\n\n def to_numpy(self, data, *, byte_order=None): # pragma: no cover\n if isinstance(data, numpy.ndarray):\n if byte_order:\n dtype = data.dtype.newbyteorder(byte_order)\n data = numpy.asarray(data, dtype=dtype)\n return data\n else:\n raise ValueError(\"Cannot convert non-numpy from base Ops class\")\n\n def minibatch(\n self,\n size: Union[int, Generator],\n sequence: Batchable,\n *,\n shuffle: bool = False,\n buffer: int = 1,\n ) -> SizedGenerator:\n \"\"\"Iterate slices from a sequence, optionally shuffled. Slices\n may be either views or copies of the underlying data.\n\n The `size` argument may be either an integer, or a sequence of integers.\n If a sequence, a new size is drawn before every output.\n\n If shuffle is True, shuffled batches are produced by first generating\n an index array, shuffling it, and then using it to slice into the\n sequence.\n\n An internal queue of `buffer` items is accumulated before being each\n output. Buffering is useful for some devices, to allow the\n network to run asynchronously without blocking on every batch.\n \"\"\"\n if not hasattr(sequence, \"__len__\"):\n err = f\"Can't minibatch data. Expected sequence, got {type(sequence)}\"\n raise ValueError(err)\n sizes = self._get_batch_sizes(\n len(sequence), itertools.repeat(size) if isinstance(size, int) else size\n )\n indices = numpy.arange(len(sequence))\n\n # This is a bit convoluted, but it's a time where convenience makes\n # trickery worthwhile: instead of being an actual generator, we\n # return our SizedGenerator object, which provides a __len__.\n def _iter_items():\n if shuffle:\n numpy.random.shuffle(indices)\n queue = []\n i = 0\n for size in sizes:\n size = int(size)\n queue.append(self._get_batch(sequence, indices[i : i + size]))\n if len(queue) >= buffer:\n yield from queue\n queue = []\n i += size\n yield from queue\n\n return SizedGenerator(_iter_items, len(sizes))\n\n def multibatch(\n self,\n size: Union[int, Generator],\n sequence: Batchable,\n *others: Batchable,\n shuffle: bool = False,\n buffer: int = 1,\n ) -> SizedGenerator:\n \"\"\"Minibatch one or more sequences of data, and yield\n lists with one batch per sequence. See ops.minibatch.\n \"\"\"\n # You'd think we could just do this by calling into minibatch and zip...\n # But the shuffling makes it really hard.\n sequences = (sequence,) + tuple(others)\n if not all(hasattr(seq, \"__len__\") for seq in sequences):\n values = \", \".join([f\"{type(seq)}\" for seq in sequences])\n err = f\"Can't multibatch data. Expected sequences, got {values}\"\n raise ValueError(err)\n sizes = self._get_batch_sizes(\n len(sequence), itertools.repeat(size) if isinstance(size, int) else size\n )\n indices = numpy.arange(len(sequence))\n\n def _iter_items():\n if shuffle:\n numpy.random.shuffle(indices)\n queue = []\n i = 0\n for size in sizes:\n size = int(size)\n idx_batch = indices[i : i + size]\n queue.append([])\n for sequence in sequences:\n queue[-1].append(self._get_batch(sequence, idx_batch))\n if len(queue) >= buffer:\n yield from queue\n queue = []\n i += size\n yield from queue\n\n return SizedGenerator(_iter_items, len(sizes))\n\n def _get_batch(self, sequence, indices):\n if isinstance(sequence, list):\n subseq = [sequence[i] for i in indices]\n elif isinstance(sequence, tuple):\n subseq = tuple(sequence[i] for i in indices) # type: ignore\n else:\n subseq = sequence[indices] # type: ignore\n if is_xp_array(subseq):\n subseq = self.as_contig(\n cast(ArrayXd, self.xp.asarray(subseq))\n ) # type: ignore\n return subseq\n\n def _get_batch_sizes(self, length: int, sizes: Iterator[int]):\n output = []\n i = 0\n while i < length:\n output.append(next(sizes))\n i += output[-1]\n return output\n\n def seq2col(\n self, seq: Floats2d, nW: int, *, lengths: Optional[Ints1d] = None\n ) -> Floats2d:\n \"\"\"Given an (M, N) sequence of vectors, return an (M, N*(nW*2+1))\n sequence. The new sequence is constructed by concatenating nW preceding\n and succeeding vectors onto each column in the sequence, to extract a\n window of features.\n \"\"\"\n # This is a test implementation that only supports nW=1 and lengths=None\n assert nW == 1\n assert lengths == None\n B = seq.shape[0]\n I = seq.shape[1]\n cols = self.alloc3f(B, (nW * 2 + 1), I)\n # Copy left contexts. The last words aren't the left-context for anything.\n cols[nW:, :nW] = self.reshape3f(seq[:-nW], -1, nW, I)\n cols[:, nW] = seq\n cols[:-nW, nW + 1 :] = self.reshape3f(seq[nW:], -1, nW, I)\n return self.reshape2f(cols, B, I * (2 * nW + 1))\n\n def backprop_seq2col(\n self, dY: Floats2d, nW: int, *, lengths: Optional[Ints1d] = None\n ) -> Floats2d:\n \"\"\"The reverse/backward operation of the `seq2col` function: calculate\n the gradient of the original `(M, N)` sequence, as a function of the\n gradient of the output `(M, N*(nW*2+1))` sequence.\n \"\"\"\n # This is a test implementation that only supports nW=1 and lengths=None\n assert nW == 1\n assert lengths == None\n nF = nW * 2 + 1\n B = dY.shape[0]\n I = dY.shape[1] // nF\n # Having trouble getting the kernel to work...\n dX = self.alloc2f(B, I)\n dY3d = self.reshape3f(dY, B, nF, I)\n dX[:-nW] += self.reshape2f(dY3d[nW:, :nW], -1, I)\n dX += dY3d[:, nW]\n dX[nW:] += self.reshape2f(dY3d[:-nW, nW + 1 :], -1, I)\n return dX\n\n def gemm(\n self,\n x: Floats2d,\n y: Floats2d,\n out: Optional[Floats2d] = None,\n trans1: bool = False,\n trans2: bool = False,\n ) -> Floats2d:\n \"\"\"Perform General Matrix Multiplication (GeMM) and optionally store\n the result in the specified output variable.\n \"\"\"\n if trans1:\n x = x.T\n if trans2:\n y = y.T\n if out is None:\n return self.xp.dot(x, y)\n else:\n self.xp.dot(x, y, out=out)\n return out\n\n def tile(self, X: Floats2d, reps: int) -> Floats2d:\n return self.xp.tile(X, reps)\n\n def affine(self, X: Floats2d, W: Floats2d, b: Floats1d) -> Floats2d:\n \"\"\"Apply a weights layer and a bias to some inputs, i.e.\n Y = X @ W.T + b\n \"\"\"\n Y = self.gemm(X, W, trans2=True)\n Y += b\n return Y\n\n def flatten(\n self,\n X: Sequence[ArrayT],\n dtype: Optional[DTypes] = None,\n pad: int = 0,\n ndim_if_empty: int = 2,\n ) -> ArrayT:\n \"\"\"Flatten a list of arrays into one large array.\"\"\"\n if X is None or len(X) == 0:\n return self.alloc((0,) * ndim_if_empty, dtype=dtype or \"f\")\n xp = get_array_module(X[0])\n shape_if_empty = X[0].shape\n X = [x for x in X if x.size != 0]\n if len(X) == 0:\n return self.alloc(shape_if_empty, dtype=dtype or \"f\")\n if int(pad) >= 1:\n padded = []\n for x in X:\n padded.append(xp.zeros((pad,) + x.shape[1:], dtype=x.dtype))\n padded.append(x)\n padded.append(xp.zeros((pad,) + x.shape[1:], dtype=x.dtype))\n X = padded\n result = xp.concatenate(X)\n if dtype is not None:\n result = xp.asarray(result, dtype=dtype)\n return result\n\n def unflatten(self, X: Floats2d, lengths: Ints1d, pad: int = 0) -> List[Floats2d]:\n \"\"\"The reverse/backward operation of the `flatten` function: unflatten\n a large array into a list of arrays according to the given lengths.\n \"\"\"\n unflat = []\n pad = int(pad)\n for length in lengths:\n length = int(length)\n if pad >= 1 and length != 0:\n X = X[pad:]\n unflat.append(X[:length])\n X = X[length:]\n if pad >= 1:\n X = X[pad:]\n assert len(X) == 0\n assert len(unflat) == len(lengths)\n return unflat\n\n @overload\n def pad(self, seqs: List[Ints2d], round_to=1) -> Ints3d:\n ...\n\n @overload # noqa: F811\n def pad(self, seqs: List[Floats2d], round_to=1) -> Floats3d:\n ...\n\n def pad( # noqa: F811\n self, seqs: Union[List[Ints2d], List[Floats2d]], round_to=1\n ) -> Array3d:\n \"\"\"Perform padding on a list of arrays so that they each have the same\n length, by taking the maximum dimension across each axis. This only\n works on non-empty sequences with the same `ndim` and `dtype`.\n \"\"\"\n # TODO: This should be generalized to handle different ranks\n if not seqs:\n raise ValueError(\"Cannot pad empty sequence\")\n if len(set(seq.ndim for seq in seqs)) != 1:\n raise ValueError(\"Cannot pad sequences with different ndims\")\n if len(set(seq.dtype for seq in seqs)) != 1:\n raise ValueError(\"Cannot pad sequences with different dtypes\")\n if len(set(seq.shape[1:] for seq in seqs)) != 1:\n raise ValueError(\"Cannot pad sequences that differ on other dimensions\")\n # Find the maximum dimension along each axis. That's what we'll pad to.\n length = max(len(seq) for seq in seqs)\n # Round the length to nearest bucket -- helps on GPU, to make similar\n # array sizes.\n length = (length + (round_to - 1)) // round_to * round_to\n final_shape = (len(seqs), length) + seqs[0].shape[1:]\n output: Array3d = self.alloc(final_shape, dtype=seqs[0].dtype)\n for i, arr in enumerate(seqs):\n # It's difficult to convince this that the dtypes will match.\n output[i, : arr.shape[0]] = arr # type: ignore\n return output\n\n def unpad(self, padded: Array3d, lengths: List[int]) -> List2d:\n \"\"\"The reverse/backward operation of the `pad` function: transform an\n array back into a list of arrays, each with their original length.\n \"\"\"\n output = []\n for i, length in enumerate(lengths):\n output.append(padded[i, :length])\n return cast(List2d, output)\n\n def list2padded(self, seqs: List[Floats2d]) -> Padded:\n \"\"\"Pack a sequence of 2d arrays into a Padded datatype.\"\"\"\n if not seqs:\n return Padded(\n self.alloc3f(0, 0, 0), self.alloc1i(0), self.alloc1i(0), self.alloc1i(0)\n )\n elif len(seqs) == 1:\n data = self.reshape3f(seqs[0], seqs[0].shape[0], 1, seqs[0].shape[1])\n size_at_t = self.asarray1i([1] * data.shape[0])\n lengths = self.asarray1i([data.shape[0]])\n indices = self.asarray1i([0])\n return Padded(data, size_at_t, lengths, indices)\n lengths_indices = [(len(seq), i) for i, seq in enumerate(seqs)]\n lengths_indices.sort(reverse=True)\n indices_ = [i for length, i in lengths_indices]\n lengths_ = [length for length, i in lengths_indices]\n nS = max([seq.shape[0] for seq in seqs])\n nB = len(seqs)\n nO = seqs[0].shape[1]\n # Reorder the sequences, by length. This looks the same in either\n # direction: you're swapping elements between their original and sorted\n # position.\n seqs = [seqs[i] for i in indices_]\n arr: Floats3d = self.pad(seqs)\n assert arr.shape == (nB, nS, nO), (nB, nS, nO)\n arr = self.as_contig(arr.transpose((1, 0, 2)))\n assert arr.shape == (nS, nB, nO)\n # Build a lookup table so we can find how big the batch is at point t.\n batch_size_at_t_ = [0 for _ in range(nS)]\n current_size = len(lengths_)\n for t in range(nS):\n while current_size and t >= lengths_[current_size - 1]:\n current_size -= 1\n batch_size_at_t_[t] = current_size\n assert sum(lengths_) == sum(batch_size_at_t_)\n return Padded(\n cast(Floats3d, arr),\n self.asarray1i(batch_size_at_t_),\n self.asarray1i(lengths_),\n self.asarray1i(indices_),\n )\n\n def padded2list(self, padded: Padded) -> List2d:\n \"\"\"Unpack a Padded datatype to a list of 2-dimensional arrays.\"\"\"\n data = padded.data\n indices = to_numpy(padded.indices)\n lengths = to_numpy(padded.lengths)\n unpadded: List[Optional[Floats2d]] = [None] * len(lengths)\n # Transpose from (length, batch, data) to (batch, length, data)\n data = self.as_contig(data.transpose((1, 0, 2)))\n for i in range(data.shape[0]):\n unpadded[indices[i]] = data[i, : int(lengths[i])]\n return cast(List2d, unpadded)\n\n def get_dropout_mask(self, shape: Shape, drop: Optional[float]) -> FloatsXd:\n \"\"\"Create a random mask for applying dropout, with a certain percent of\n the mask (defined by `drop`) will contain zeros. The neurons at those\n positions will be deactivated during training, resulting in a more\n robust network and less overfitting.\n \"\"\"\n if drop is None or drop <= 0:\n return self.xp.ones(shape, dtype=\"f\")\n elif drop >= 1.0:\n return self.alloc(shape)\n coinflips = self.xp.random.uniform(0.0, 1.0, shape)\n mask = (coinflips >= drop) / (1.0 - drop)\n return cast(FloatsXd, self.asarray(mask, dtype=\"float32\"))\n\n def alloc1f(self, d0: int, *, dtype: Optional[DTypesFloat] = \"float32\") -> Floats1d:\n return self.alloc((d0,), dtype=dtype)\n\n def alloc2f(\n self, d0: int, d1: int, *, dtype: Optional[DTypesFloat] = \"float32\"\n ) -> Floats2d:\n return self.alloc((d0, d1), dtype=dtype)\n\n def alloc3f(\n self, d0: int, d1: int, d2: int, *, dtype: Optional[DTypesFloat] = \"float32\"\n ) -> Floats3d:\n return self.alloc((d0, d1, d2), dtype=dtype)\n\n def alloc4f(\n self,\n d0: int,\n d1: int,\n d2: int,\n d3: int,\n *,\n dtype: Optional[DTypesFloat] = \"float32\",\n ) -> Floats4d:\n return self.alloc((d0, d1, d2, d3), dtype=dtype)\n\n def alloc_f(\n self, shape: Shape, *, dtype: Optional[DTypesFloat] = \"float32\"\n ) -> FloatsXd:\n return self.alloc(shape, dtype=dtype)\n\n def alloc1i(self, d0: int, *, dtype: Optional[DTypesInt] = \"int32\") -> Ints1d:\n return self.alloc((d0,), dtype=dtype)\n\n def alloc2i(\n self, d0: int, d1: int, *, dtype: Optional[DTypesInt] = \"int32\"\n ) -> Ints2d:\n return self.alloc((d0, d1), dtype=dtype)\n\n def alloc3i(\n self, d0: int, d1: int, d2: int, *, dtype: Optional[DTypesInt] = \"int32\"\n ) -> Ints3d:\n return self.alloc((d0, d1, d2), dtype=dtype)\n\n def alloc4i(\n self,\n d0: int,\n d1: int,\n d2: int,\n d3: int,\n *,\n dtype: Optional[DTypesInt] = \"int32\",\n ) -> Ints4d:\n return self.alloc((d0, d1, d2, d3), dtype=dtype)\n\n def alloc_i(self, shape: Shape, *, dtype: Optional[DTypesInt] = \"int32\") -> IntsXd:\n return self.alloc(shape, dtype=dtype)\n\n def alloc(self, shape: Shape, *, dtype: Optional[DTypes] = \"float32\") -> ArrayT:\n \"\"\"Allocate an array of a certain shape.\"\"\"\n if isinstance(shape, int):\n shape = (shape,)\n return self.xp.zeros(shape, dtype=dtype)\n\n def reshape1f(self, array: FloatsXd, d0: int) -> Floats1d:\n return cast(Floats1d, self.reshape(array, (d0,)))\n\n def reshape2f(self, array: FloatsXd, d0: int, d1: int) -> Floats2d:\n return cast(Floats2d, self.reshape(array, (d0, d1)))\n\n def reshape3f(self, array: FloatsXd, d0: int, d1: int, d2: int) -> Floats3d:\n return cast(Floats3d, self.reshape(array, (d0, d1, d2)))\n\n def reshape4f(\n self, array: FloatsXd, d0: int, d1: int, d2: int, d3: int\n ) -> Floats4d:\n return cast(Floats4d, self.reshape(array, (d0, d1, d2, d3)))\n\n def reshape_f(self, array: FloatsXd, shape: Shape) -> FloatsXd:\n return self.reshape(array, shape)\n\n def reshape1i(self, array: IntsXd, d0: int) -> Ints1d:\n return cast(Ints1d, self.reshape(array, (d0,)))\n\n def reshape2i(self, array: IntsXd, d0: int, d1: int) -> Ints2d:\n return cast(Ints2d, self.reshape(array, (d0, d1)))\n\n def reshape3i(self, array: IntsXd, d0: int, d1: int, d2: int) -> Ints3d:\n return cast(Ints3d, self.reshape(array, (d0, d1, d2)))\n\n def reshape4i(self, array: IntsXd, d0: int, d1: int, d2: int, d3: int) -> Ints4d:\n return cast(Ints4d, self.reshape(array, (d0, d1, d2, d3)))\n\n def reshape_i(self, array: IntsXd, shape: Shape) -> IntsXd:\n return self.reshape(array, shape)\n\n def reshape(self, array: ArrayT, shape: Shape) -> ArrayT:\n \"\"\"Reshape an array.\"\"\"\n if isinstance(shape, int):\n shape = (shape,)\n return cast(ArrayT, array.reshape(shape))\n\n def asarray4f(\n self,\n data: Union[Floats4d, Sequence[int]],\n *,\n dtype: Optional[DTypes] = \"float32\",\n ) -> Floats4d:\n return cast(Floats4d, self.asarray(data, dtype=dtype))\n\n def asarray3f(\n self,\n data: Union[Floats3d, Sequence[int]],\n *,\n dtype: Optional[DTypes] = \"float32\",\n ) -> Floats3d:\n return cast(Floats3d, self.asarray(data, dtype=dtype))\n\n def asarray2f(\n self,\n data: Union[Floats2d, Sequence[int]],\n *,\n dtype: Optional[DTypes] = \"float32\",\n ) -> Floats2d:\n return cast(Floats2d, self.asarray(data, dtype=dtype))\n\n def asarray1f(\n self,\n data: Union[Floats1d, Sequence[int]],\n *,\n dtype: Optional[DTypes] = \"float32\",\n ) -> Floats1d:\n return cast(Floats1d, self.asarray(data, dtype=dtype))\n\n def asarray_f(\n self,\n data: Union[FloatsXd, Sequence[float]],\n *,\n dtype: Optional[DTypes] = \"float32\",\n ) -> FloatsXd:\n return cast(FloatsXd, self.asarray(data, dtype=dtype))\n\n def asarray1i(\n self, data: Union[Ints1d, Sequence[int]], *, dtype: Optional[DTypes] = \"int32\"\n ) -> Ints1d:\n return cast(Ints1d, self.asarray(data, dtype=dtype))\n\n def asarray2i(\n self, data: Union[Ints2d, Sequence[int]], *, dtype: Optional[DTypes] = \"int32\"\n ) -> Ints2d:\n return cast(Ints2d, self.asarray(data, dtype=dtype))\n\n def asarray3i(\n self, data: Union[Ints3d, Sequence[int]], *, dtype: Optional[DTypes] = \"int32\"\n ) -> Ints3d:\n return cast(Ints3d, self.asarray(data, dtype=dtype))\n\n def asarray4i(\n self, data: Union[Ints4d, Sequence[int]], *, dtype: Optional[DTypes] = \"int32\"\n ) -> Ints4d:\n return cast(Ints4d, self.asarray(data, dtype=dtype))\n\n def asarray_i(\n self, data: Union[IntsXd, Sequence[int]], *, dtype: Optional[DTypes] = \"int32\"\n ) -> IntsXd:\n return cast(IntsXd, self.asarray(data, dtype=dtype))\n\n def asarray(\n self,\n data: Union[ArrayXd, Sequence[ArrayXd], Sequence[float], Sequence[int]],\n *,\n dtype: Optional[DTypes] = None,\n ) -> ArrayXd:\n \"\"\"Ensure a given array is of the correct type.\"\"\"\n if isinstance(data, self.xp.ndarray):\n if dtype is None:\n return data\n elif data.dtype == dtype:\n return data\n else:\n return self.xp.asarray(data, dtype=dtype)\n elif hasattr(data, \"numpy\"):\n # Handles PyTorch Tensor\n return data.numpy() # type: ignore\n elif dtype is not None:\n return self.xp.array(data, dtype=dtype)\n else:\n return self.xp.array(data)\n\n def as_contig(self, data: ArrayT, dtype: Optional[DTypes] = None) -> ArrayT:\n \"\"\"Allow the backend to make a contiguous copy of an array.\n Implementations of `Ops` do not have to make a copy or make it\n contiguous if that would not improve efficiency for the execution engine.\n \"\"\"\n if data.flags[\"C_CONTIGUOUS\"] and dtype in (None, data.dtype):\n return data\n kwargs = {\"dtype\": dtype} if dtype is not None else {}\n return self.xp.ascontiguousarray(data, **kwargs)\n\n def sigmoid(self, X: FloatsType, *, inplace: bool = False) -> FloatsType:\n if inplace:\n self.xp.exp(-X, out=X)\n X += 1.0 # type: ignore\n X **= -1.0 # type: ignore\n return cast(FloatsType, X)\n else:\n return cast(FloatsType, 1.0 / (1.0 + self.xp.exp(-X)))\n\n def dsigmoid(self, Y: FloatsType, *, inplace: bool = False) -> FloatsType:\n if inplace:\n Y *= 1 - Y\n return Y\n else:\n return Y * (1.0 - Y)\n\n def dtanh(self, Y: FloatsT, *, inplace: bool = False) -> FloatsT:\n if inplace:\n Y **= 2\n Y *= -1.0\n Y += 1.0\n return Y\n else:\n return 1 - Y ** 2\n\n def softmax(\n self,\n x: FloatsT,\n *,\n inplace: bool = False,\n axis: int = -1,\n temperature: float = 1.0,\n ) -> FloatsT:\n if temperature != 1.0:\n x = x / temperature\n maxes = self.xp.max(x, axis=axis, keepdims=True)\n shifted = x - maxes\n new_x = self.xp.exp(shifted)\n new_x /= new_x.sum(axis=axis, keepdims=True)\n return new_x\n\n def softmax_sequences(\n self, Xs: Floats2d, lengths: Ints1d, *, inplace: bool = False, axis: int = -1\n ) -> Floats2d:\n if Xs.ndim >= 3:\n err = f\"Softmax currently only supports 2d. Got: {Xs.ndim}\"\n raise NotImplementedError(err)\n # This loses almost no fidelity, and helps the numerical stability.\n Xs = self.xp.clip(Xs, -20.0, 20.0)\n new_x = self.xp.exp(Xs)\n summed = self.backprop_reduce_sum(self.reduce_sum(new_x, lengths), lengths)\n new_x /= summed\n return new_x\n\n def backprop_softmax(\n self, Y: FloatsT, dY: FloatsT, *, axis: int = -1, temperature: float = 1.0\n ) -> FloatsT:\n if temperature != 1.0:\n dY = dY / temperature\n\n dX = Y * dY\n dX -= Y * dX.sum(axis=axis, keepdims=True)\n return dX\n\n def backprop_softmax_sequences(\n self, dY: Floats2d, Y: Floats2d, lengths: Ints1d\n ) -> Floats2d:\n dX = Y * dY\n sum_dX = self.backprop_reduce_sum(self.reduce_sum(dX, lengths), lengths)\n dX -= Y * sum_dX\n return dX\n\n def lstm_forward_training(\n self,\n params: Floats1d,\n H0: Floats3d,\n C0: Floats3d,\n X: Floats2d,\n size_at_t: Ints1d,\n ) -> Tuple[Floats2d, Tuple]:\n assert H0.shape == C0.shape\n assert H0.shape[1] == C0.shape[1]\n Y, fwd_state = lstm_forward_training(params, H0, C0, X, size_at_t)\n return Y, fwd_state\n\n def lstm_forward_inference(\n self,\n params: Floats1d,\n H0: Floats3d,\n C0: Floats3d,\n X: Floats2d,\n size_at_t: Ints1d,\n ) -> Floats2d:\n Y, _ = lstm_forward_training(params, H0, C0, X, size_at_t)\n return Y\n\n def backprop_lstm(\n self, dY: Floats2d, lengths: Ints1d, params: Floats1d, fwd_state: Tuple\n ) -> Tuple[Floats2d, Floats1d]:\n dX, d_params = backprop_lstm(dY, lengths, params, fwd_state)\n return dX, d_params\n\n def maxout(self, X: Floats3d) -> Tuple[Floats2d, Ints2d]:\n which = X.argmax(axis=-1, keepdims=False)\n return X.max(axis=-1), which\n\n def backprop_maxout(self, dY: Floats2d, which: Ints2d, P: int) -> Floats3d:\n dX = self.alloc3f(dY.shape[0], dY.shape[1], P)\n for b in range(dY.shape[0]):\n for o in range(dY.shape[1]):\n dX[b, o, which[b, o]] = dY[b, o]\n return dX\n\n def relu(self, X: Floats2d, inplace: bool = False) -> Floats2d:\n if not inplace:\n return X * (X > 0)\n else:\n X *= X > 0\n return X\n\n def backprop_relu(\n self, dY: Floats2d, Y: Floats2d, inplace: bool = False\n ) -> Floats2d:\n if not inplace:\n return dY * (Y > 0)\n dY *= Y > 0\n return dY\n\n def clipped_linear(\n self,\n X: FloatsType,\n slope: float = 1.0,\n offset: float = 0.0,\n min_val: float = 0.0,\n max_val: float = 1.0,\n inplace: bool = False,\n ) -> FloatsType:\n if inplace:\n X *= slope # type: ignore\n X += offset # type: ignore\n return cast(FloatsType, self.xp.clip(X, min_val, max_val, out=X))\n out = X * slope + offset # type: ignore\n return cast(FloatsType, self.xp.clip(out, min_val, max_val))\n\n def backprop_clipped_linear(\n self,\n dY: FloatsType,\n X: FloatsType,\n slope: float = 1.0,\n offset: float = 0.0,\n min_val: float = 0.0,\n max_val: float = 1.0,\n inplace: bool = False,\n ) -> FloatsType:\n low = (min_val - offset) / slope\n high = (max_val - offset) / slope\n slope = self.xp.float64(slope).astype(X.dtype)\n zero = self.xp.float64(0.0).astype(X.dtype)\n dX = self.xp.where((low < X) & (X < high), slope, zero)\n if inplace:\n dY *= dX\n return dY\n return dY * dX\n\n def relu_k(\n self, X: FloatsType, n: float = 6.0, inplace: bool = False\n ) -> FloatsType:\n return self.clipped_linear(X, max_val=n, inplace=inplace)\n\n def backprop_relu_k(\n self, dY: FloatsType, X: FloatsType, n: float = 6.0, inplace: bool = False\n ) -> FloatsType:\n return self.backprop_clipped_linear(dY, X, max_val=n, inplace=inplace)\n\n def hard_sigmoid(self, X: FloatsType, inplace: bool = False) -> FloatsType:\n return self.clipped_linear(X, slope=0.2, offset=0.5)\n\n def backprop_hard_sigmoid(\n self, dY: FloatsType, X: FloatsType, inplace: bool = False\n ) -> FloatsType:\n return self.backprop_clipped_linear(dY, X, slope=0.2, offset=0.5)\n\n def hard_tanh(self, X: FloatsType, inplace: bool = False) -> FloatsType:\n return self.clipped_linear(X, min_val=-1.0, max_val=1.0)\n\n def backprop_hard_tanh(\n self, dY: FloatsType, X: FloatsType, inplace: bool = False\n ) -> FloatsType:\n return self.backprop_clipped_linear(dY, X, min_val=-1.0, max_val=1.0)\n\n def swish(self, X: FloatsType, inplace: bool = False) -> FloatsType:\n if inplace:\n X *= self.sigmoid(X) # type: ignore\n return cast(FloatsType, X)\n out = X * self.sigmoid(X) # type: ignore\n return cast(FloatsType, out)\n\n def backprop_swish(\n self, dY: FloatsType, X: FloatsType, Y: FloatsType, inplace: bool = False\n ) -> FloatsType:\n Y = Y + self.sigmoid(X) * (1 - Y) # type: ignore\n if inplace:\n dY *= Y # type: ignore\n return cast(FloatsType, dY)\n out = dY * Y # type: ignore\n return cast(FloatsType, out)\n\n # Following https://www.scitepress.org/Papers/2019/74696/74696.pdf\n def hard_swish(self, X: FloatsType, inplace: bool = False) -> FloatsType:\n if inplace:\n X *= self.hard_sigmoid(X) # type: ignore\n return cast(FloatsType, X)\n out = X * self.hard_sigmoid(X) # type: ignore\n return cast(FloatsType, out)\n\n def backprop_hard_swish(\n self, dY: FloatsType, X: FloatsType, inplace: bool = False\n ) -> FloatsType:\n dX = X * 0.4 + 0.5\n dX[X > 2.5] = 1.0\n dX[X < -2.5] = 0\n if inplace:\n dY *= dX\n return dY\n return dY * dX\n\n # From https://arxiv.org/pdf/1905.02244v5.pdf\n def hard_swish_mobilenet(self, X: FloatsType, inplace: bool = False) -> FloatsType:\n if inplace:\n X *= self.relu_k(X + 3) / 6\n return X\n return X * (self.relu_k(X + 3) / 6)\n\n def backprop_hard_swish_mobilenet(\n self, dY: FloatsType, X: FloatsType, inplace: bool = False\n ) -> FloatsType:\n dX = (1 / 6) * (X * 2.0 + 3.0)\n dX[X > 3.0] = 1.0\n dX[X < -3.0] = 0\n if inplace:\n dY *= dX\n return dY\n return dX * dY\n\n # Code snippet taken from:\n # https://www.johndcook.com/blog/2009/01/19/stand-alone-error-function-erf/\n def erf(self, X: FloatsType) -> FloatsType:\n # save the sign of x\n sign = self.xp.sign(X)\n X = self.xp.abs(X)\n\n a1 = 0.254829592\n a2 = -0.284496736\n a3 = 1.421413741\n a4 = -1.453152027\n a5 = 1.061405429\n p = 0.3275911\n\n t = 1.0 / (1.0 + p * X)\n y = 1.0 - (((((a5 * t + a4) * t) + a3) * t + a2) * t + a1) * t * self.xp.exp(\n -X * X\n )\n out = sign * y\n out = out.astype(X.dtype)\n return out\n\n def sechsq(self, X: FloatsType) -> FloatsType:\n return (1 / self.xp.cosh(X)) ** 2\n\n def gelu_approx(self, X: FloatsType, inplace: bool = False) -> FloatsType:\n tmp = 1.0 + self.xp.tanh(SQRT2PI * (X + 0.044715 * self.xp.power(X, 3)))\n tmp *= 0.5\n tmp = tmp.astype(X.dtype)\n if inplace:\n X *= tmp\n return X\n Y = self.xp.zeros_like(X)\n Y += tmp\n Y *= X\n return cast(FloatsType, Y)\n\n def backprop_gelu_approx(\n self, dY: FloatsType, X: FloatsType, inplace: bool = False\n ) -> FloatsType:\n dX = self.alloc_f(X.shape)\n Xp3 = self.xp.power(X, 3)\n tmp = 0.5 * self.xp.tanh(0.0356774 * Xp3 + 0.797885 * X)\n tmp += (0.0535161 * Xp3 + 0.398942 * X) * self.sechsq(\n 0.0356774 * Xp3 + 0.797885 * X\n )\n tmp += 0.5\n dX += tmp\n if inplace:\n dY *= dX\n return dY\n return dY * dX\n\n def gelu(self, X: FloatsType, inplace: bool = False) -> FloatsType:\n # GELU(x) = x · Φ(x)\n cdf = gaussian_cdf(self, X)\n if inplace:\n X *= cdf # type: ignore\n return X\n return X * cdf # type: ignore\n\n def backprop_gelu(\n self, dY: FloatsType, X: FloatsType, inplace: bool = False\n ) -> FloatsType:\n # GELU'(x) = Φ(x) + x · PDF(x)\n dX = gaussian_cdf(self, X) + X * gaussian_pdf(self, X) # type: ignore\n if inplace:\n dY *= dX\n return dY\n return dY * dX\n\n def mish(\n self, X: FloatsType, threshold: float = 20.0, inplace: bool = False\n ) -> FloatsType:\n tmp = X * self.xp.tanh(self.xp.log(1.0 + self.xp.exp(X)))\n Y = self.xp.where(X >= threshold, X, tmp)\n if inplace:\n X[:] = Y\n return X\n else:\n return Y\n\n def backprop_mish(\n self,\n dY: FloatsType,\n X: Floats2d,\n threshold: float = 20.0,\n inplace: bool = False,\n ) -> FloatsType:\n xp = get_array_module(X)\n indices = X < threshold\n Xsub = X[indices]\n dYsub = dY[indices]\n omega = 4.0 * (Xsub + 1.0)\n omega += 4.0 * xp.exp(2.0 * Xsub)\n omega += xp.exp(3.0 * Xsub)\n omega += xp.exp(Xsub) * ((4.0 * Xsub) + 6.0)\n delta = xp.exp(Xsub) + 1.0\n delta *= delta\n delta += 1.0\n dXsub = dYsub * ((xp.exp(Xsub) * omega) / (delta ** 2))\n # Gradient when above threshold will ignore softplus.\n if inplace:\n out = dY\n else:\n out = xp.copy(dY)\n out[indices] = dXsub\n return out\n\n def update_averages(\n self, ema: FloatsT, weights: FloatsT, t: int, max_decay: float = 0.9999\n ) -> None:\n # Internals for optimizer\n decay = (1.0 + t) / (10.0 + t)\n if decay > max_decay:\n decay = max_decay\n ema -= (1 - decay) * (ema - weights)\n\n def adam(\n self,\n weights: Floats1d,\n gradient: Floats1d,\n mom1: Floats1d,\n mom2: Floats1d,\n beta1: float,\n beta2: float,\n eps: float,\n learn_rate: float,\n mod_rate: float = 1.0,\n ) -> Tuple[Floats1d, Floats1d, Floats1d, Floats1d]:\n # Internals for optimizer\n mom1 *= beta1\n mom2 *= beta2\n mom1 += gradient * (1.0 - beta1)\n mom2 += gradient * gradient * (1.0 - beta2)\n # Here we assume learn rate is calculated by the caller.\n # cdef weight_t a_t = learn_rate * sqrt(1-beta2**hp.t) / (1-beta1**hp.t);\n weights -= learn_rate * (mom1 / (mod_rate * self.xp.sqrt(mom2) + eps))\n return weights, gradient, mom1, mom2\n\n def clip_gradient(self, gradient: FloatsT, threshold: float) -> FloatsT:\n # Internals for optimizer\n xp = get_array_module(gradient)\n grad_norm = xp.linalg.norm(gradient)\n if grad_norm >= threshold:\n gradient *= threshold / grad_norm\n return gradient\n\n def logloss(self, y_true: FloatsT, y_pred: FloatsT) -> float:\n # Currently not used\n log_yp = self.xp.log(y_pred + 1e-8)\n loss = (y_true * log_yp) + (1 - y_true) * self.xp.log((1 - y_pred) + 1e-8)\n return -loss\n\n def reduce_sum(self, X: Floats2d, lengths: Ints1d) -> Floats2d:\n Y = self.alloc2f(lengths.shape[0], X.shape[1])\n start = 0\n for i, length in enumerate(lengths):\n Y[i] = X[start : start + length].sum(axis=0)\n start += length\n return Y\n\n def reduce_mean(self, X: Floats2d, lengths: Ints1d) -> Floats2d:\n Y = self.alloc2f(lengths.shape[0], X.shape[1])\n start = 0\n for i, length in enumerate(lengths):\n if length:\n Y[i] = X[start : start + length].mean(axis=0)\n start += length\n return Y\n\n def reduce_max(self, X: Floats2d, lengths: Ints1d) -> Tuple[Floats2d, Ints2d]:\n Y = self.alloc2f(lengths.shape[0], X.shape[1])\n which = self.alloc2i(lengths.shape[0], X.shape[1])\n start = 0\n for i, length in enumerate(lengths):\n if length:\n which[i] = X[start : start + length].argmax(axis=0)\n Y[i] = X[start : start + length].max(axis=0)\n start += length\n return Y, which\n\n def backprop_reduce_sum(self, d_sums: Floats2d, lengths: Ints1d) -> Floats2d:\n dX = self.alloc2f(lengths.sum(), d_sums.shape[1])\n start = 0\n for i, length in enumerate(lengths):\n dX[start : start + length] = d_sums[i]\n start += length\n return dX\n\n def backprop_reduce_mean(self, d_means: Floats2d, lengths: Ints1d) -> Floats2d:\n dX = self.alloc2f(lengths.sum(), d_means.shape[1])\n start = 0\n for i, length in enumerate(lengths):\n dX[start : start + length] = d_means[i] / length\n start += length\n return dX\n\n def backprop_reduce_max(\n self, d_maxes: Floats2d, which: Ints2d, lengths: Ints1d\n ) -> Floats2d:\n dX = self.alloc2f(lengths.sum(), d_maxes.shape[1])\n start = 0\n for i, length in enumerate(lengths):\n self.xp.put_along_axis(\n dX[start : start + length], which[i].reshape((1, -1)), d_maxes[i], 0\n )\n start += length\n return dX\n\n def hash(self, ids: Ints1d, seed: int) -> Ints2d:\n \"\"\"Hash a sequence of 64-bit keys into a table with 4 32-bit keys, using\n murmurhash3.\n \"\"\"\n from .numpy_ops import NumpyOps\n\n numpy_ops = NumpyOps()\n return self.asarray2i(\n numpy_ops.hash(numpy_ops.asarray(ids, dtype=\"uint64\"), seed)\n )\n\n def ngrams(self, n: int, keys: Ints1d) -> Ints1d:\n from .numpy_ops import NumpyOps\n\n numpy_ops = NumpyOps()\n return self.asarray1i(\n numpy_ops.ngrams(n, numpy_ops.asarray(keys, dtype=\"uint64\"))\n )\n\n def position_encode(\n self, N: int, D: int, period: int = 10000, out: Optional[Floats2d] = None\n ) -> Floats2d:\n # Currently internals only\n from .numpy_ops import NumpyOps\n\n numpy_ops = NumpyOps()\n return self.asarray2f(numpy_ops.position_encode(N, D, period, out))\n\n def scatter_add(\n self, table: FloatsXd, indices: IntsXd, values: FloatsXd\n ) -> FloatsXd:\n return self.xp.add.at(table, indices, values)\n\n def insert_into(self, shape, Xs):\n \"\"\"Maybe don't need this? Just a quicky to get Jax working.\"\"\"\n output = self.alloc(shape, dtype=Xs[0].dtype)\n for i, x in enumerate(Xs):\n output[i, : x.shape[0]] = x\n return output\n\n\n\"\"\"\nLSTM Notation (kind of involved, but made it a lot easier to write)\n\nX: Inputs\nY: Outputs (aka hiddens)\nC: Cells\nG: Gates (Output of non-linearity, i.e. lstm_gates(X @ W.T)\nA: Activations (X @ W.T, before non-linearity)\n\nImagine we have the input:\nbatch = [\n [\"apple\", \"banana\", \"cantaloupe\", \"date\", \"elderberry\"],\n [\"aardvark\", \"bat\", \"capybara\", \"dingo\", \"elephant\"]\n]\n\nThe input variable X will have one vector per word, so X[0, 1] will be banana's\nvector, X[0, 1, 0] will be a float, the first element of that vector.\n\nWe're computing an output variable Y of shape (nL, nB, nO), so that Y[0, 1] is\nthe output variable of banana.\n\nA problem with variables for RNNs is keeping the timesteps straight. It's hard\nto distinguish the current, previous, and next timesteps. To solve this problem,\nwe follow the convention that **we are at timestep 3**.\n\nAdditionally, the variables for Y and C are offset by one, as the 0th elements\nhave the initial hiddens and initial cells. So:\n\n t=3\n Xt3: The input vectors for 'dingo' and 'date', i.e. X[t]\n Yt3: The output vectors for 'dingo' and 'date', i.e. Y[t+1] (Y is offset.)\n Ct2: The cells calculated at 'c...', that are the input for 'd...'\n Ct3: The cells calculated at 'd...', that are the input for 'e...'\n At3: The activations at 'd...'\n Gt3: The gates at 'd...'\n\"\"\"\n\n\ndef lstm_forward_training(\n params: Floats1d, c_init: Floats3d, h_init: Floats3d, X: Floats2d, lengths: Ints1d\n) -> Tuple[Floats2d, Tuple]:\n xp = get_array_module(params)\n depth, dirs, nO = c_init.shape\n N, nI = X.shape\n batch_size = lengths[0]\n # Preallocate these so we can pass them through for loop.\n G = cast(Floats4d, xp.zeros((depth, dirs, X.shape[0], nO * 4), dtype=\"f\"))\n Y = cast(Floats4d, xp.zeros((depth, dirs, X.shape[0], nO), dtype=\"f\"))\n C = cast(Floats4d, xp.zeros((depth, dirs, X.shape[0], nO), dtype=\"f\"))\n Yt2 = cast(Floats2d, xp.zeros((batch_size, nO), dtype=\"f\"))\n Ct2 = cast(Floats2d, xp.zeros((batch_size, nO), dtype=\"f\"))\n # Compute the start and end indices first.\n indices = []\n start = 0\n for batch_size in lengths:\n indices.append((start, start + batch_size))\n start += batch_size\n params_i = 0\n orig_X = X\n for i in range(depth):\n nI = X.shape[1]\n for d in range(dirs):\n # The inits are shaped (depth, dirs, nO). We add the internal dimension\n # to make them set correctly.\n Yt2 = h_init[i, d].reshape((1, nO)) # type: ignore\n Ct2 = c_init[i, d].reshape((1, nO)) # type: ignore\n layer_params, params_i = _split_weights(params, i, nO, nI, params_i)\n Wx, Wh, bias = _transpose_weights(layer_params)\n G[i, d] += xp.dot(X, Wx.T)\n G[i, d] += bias\n for start, end in indices if d == 0 else reversed(indices):\n # When we iterate left-to-right, t2 might be longer than t3.\n Yt2 = Yt2[: end - start]\n Ct2 = Ct2[: end - start]\n # But in right-to-left, it's the opposite: t3 can be longer.\n Gt3 = G[i, d, start:end]\n Gt3 = Gt3[: Yt2.shape[0]]\n Gt3 += xp.dot(Yt2, Wh.T)\n Gt3_ = cast(Floats3d, Gt3.reshape((-1, nO, 4)))\n hf = sigmoid(Gt3_[:, :, 0])\n hi = sigmoid(Gt3_[:, :, 1])\n ho = sigmoid(Gt3_[:, :, 2])\n hc = xp.tanh(Gt3_[:, :, 3])\n Ct3 = hf * Ct2\n Ct3 += hi * hc\n # Store results\n Gt3 = (\n xp.hstack((hf, hi, ho, hc))\n .reshape((-1, 4, nO))\n .transpose((0, 2, 1))\n .reshape((-1, nO * 4))\n )\n # Fix the endpoint to account for shorter slices when iterating\n # reversed. Not 100% sure this is right. If there's a bug, look\n # here?\n end = min(end, start + ho.shape[0])\n Y[i, d, start:end] = xp.tanh(Ct3) * ho\n G[i, d, start:end] = Gt3\n C[i, d, start:end] = Ct3\n # Set the t2 variables to the current t3 variables.\n Ct2 = Ct3\n Yt2 = Y[i, d, start:end]\n H = cast(Floats2d, Y[i].transpose((1, 0, 2)).reshape((N, -1)))\n if dirs == 2:\n H = xp.ascontiguousarray(H)\n X = H\n return H, (Y, G, C, orig_X)\n\n\ndef backprop_lstm(dY: Floats2d, lengths: Ints1d, params: Floats1d, fwd_state: Tuple):\n xp = get_array_module(params)\n\n Y: Floats4d\n G: Floats4d\n C: Floats4d\n X: Floats2d\n Wx: Floats2d\n Wh: Floats2d\n bias: Floats1d\n dWx: Floats2d\n dWh: Floats2d\n d_bias: Floats1d\n Y, G, C, X = fwd_state\n depth, dirs, N, nO = C.shape\n nI = X.shape[1]\n batch_size = lengths[0]\n # We don't need to store all the cells for all the layers.\n dC = cast(Floats2d, xp.zeros((N, nO), dtype=C.dtype))\n dG = cast(Floats2d, xp.zeros((N, nO * 4), dtype=C.dtype))\n d_params = cast(Floats1d, xp.zeros((params.shape[0],), dtype=params.dtype))\n # Collect the params and slices. It makes it a bit easier to get the indexing\n # right, when we're iterating backwards.\n params_i = 0\n all_layer_params: List[List[Tuple[Tuple[Floats2d, Floats2d, Floats1d], int]]] = []\n for i in range(depth):\n all_layer_params.append([])\n n_inputs = nI if i == 0 else (nO * dirs)\n for d in range(dirs):\n layer_params, params_i = _split_weights(params, i, nO, n_inputs, params_i)\n layer_params = _transpose_weights(layer_params)\n all_layer_params[-1].append((layer_params, params_i))\n params_i = 0\n all_layer_grads: List[List[Tuple[Tuple[Floats2d, Floats2d, Floats1d], int]]] = []\n for i in range(depth):\n all_layer_grads.append([])\n n_inputs = nI if i == 0 else (nO * dirs)\n for d in range(dirs):\n layer_grads, params_i = _split_weights(d_params, i, nO, n_inputs, params_i)\n layer_grads = _transpose_weights(layer_grads)\n all_layer_grads[-1].append((layer_grads, params_i))\n # Similarly, we want to compute the indices first\n indices = []\n start = 0\n for batch_size in lengths:\n indices.append((start, start + batch_size))\n start += batch_size\n\n Xs = [X] + [\n cast(Floats2d, Y[i].transpose((1, 0, 2)).reshape((N, -1)))\n for i in range(depth - 1)\n ]\n dXs = [xp.zeros((X.shape[0], X.shape[1]), dtype=X.dtype) for X in Xs]\n # Okay, now do the actual looping\n for i in reversed(range(depth)):\n dY3d = cast(Floats3d, dY.reshape((N, dirs, nO)).transpose((1, 0, 2)))\n dX = dXs[i]\n X = Xs[i]\n if dirs >= 2:\n dY3d = xp.ascontiguousarray(dY3d)\n for d in range(dirs):\n Wx, Wh, bias = all_layer_params[i][d][0]\n dWx, dWh, d_bias = all_layer_grads[i][d][0]\n if d == 0:\n start_t3, end_t3 = indices[-1]\n layer_indices = indices[:-1]\n layer_indices.reverse()\n else:\n start_t3, end_t3 = indices[0]\n layer_indices = indices[1:]\n for start_t2, end_t2 in layer_indices:\n size = min(end_t2 - start_t2, end_t3 - start_t3)\n dGt3, dCt2 = backprop_lstm_gates(\n dY3d[d, start_t3 : start_t3 + size],\n dC[start_t3 : start_t3 + size],\n G[i, d, start_t3 : start_t3 + size],\n C[i, d, start_t3 : start_t3 + size],\n C[i, d, start_t2 : start_t2 + size],\n )\n # Backprop hidden-to-hidden w.r.t. hidden.\n dY3d[d, start_t2 : start_t2 + size] += dGt3 @ Wh\n # Update iteration variables\n dC[start_t2 : start_t2 + size] = dCt2\n start_t3 = start_t2\n end_t3 = end_t2\n # Backprop input-to-hidden w.r.t. weights.\n dWx += dG.T @ X\n # Backprop hidden-to-hidden w.r.t. weights.\n dWh += dG.T @ Y[i, d]\n # Backprop bias\n d_bias += dG.sum(axis=0)\n # Backprop input-to-hidden w.r.t. input\n dX += dG @ Wx\n dY = dX\n assert dX.shape[1] == X.shape[1]\n grad_parts = []\n for layer_grads in all_layer_grads:\n for dir_grads, _ in layer_grads:\n grad_parts.append(_untranspose_unsplit_weights(dir_grads))\n return dX, xp.concatenate(grad_parts)\n\n\ndef _split_weights(params: Floats1d, i: int, nO: int, nI: int, params_i: int):\n Wx_size = 4 * nO * nI\n bx_size = 4 * nO\n Wh_size = 4 * nO * nO\n bh_size = 4 * nO\n Wx = params[params_i : params_i + Wx_size].reshape((4 * nO, nI))\n params_i += Wx_size\n bx = params[params_i : params_i + bx_size].reshape((4 * nO,))\n params_i += bx_size\n Wh = params[params_i : params_i + Wh_size].reshape((4 * nO, nO))\n params_i += Wh_size\n bh = params[params_i : params_i + bh_size].reshape((4 * nO,))\n params_i += bh_size\n return ((Wx, bx), (Wh, bh)), params_i\n\n\ndef _transpose_weights(params):\n # Transpose the parameters so that the gates are the last dimension. This\n # makes it easier to fuse.\n (Wx, bx), (Wh, bh) = params\n xp = get_array_module(Wx)\n Wx = Wx.reshape((4, -1, Wx.shape[-1]))\n Wx = Wx.transpose((1, 0, 2)).reshape((-1, Wx.shape[-1]))\n bx = bx.reshape((4, -1)).transpose((1, 0)).reshape((-1,))\n Wh = Wh.reshape((4, -1, Wh.shape[-1]))\n Wh = Wh.transpose((1, 0, 2)).reshape((-1, Wh.shape[-1]))\n bh = bh.reshape((4, -1)).transpose((1, 0)).reshape((-1,))\n ascontig = xp.ascontiguousarray\n Wx = ascontig(Wx)\n Wh = ascontig(Wh)\n bias = ascontig(bx) + bh\n return Wx, Wh, bias\n\n\ndef _untranspose_unsplit_weights(params):\n Wx, Wh, bias = params\n xp = get_array_module(Wx)\n nO = Wh.shape[1]\n nI = Wx.shape[1]\n Wx = Wx.reshape((-1, 4, nI)).transpose((1, 0, 2)).reshape((-1, nI))\n Wh = Wh.reshape((-1, 4, nO)).transpose((1, 0, 2)).reshape((-1, nO))\n bias = bias.reshape((-1, 4)).transpose((1, 0)).reshape((-1,))\n zeros = xp.zeros(bias.shape, dtype=\"f\")\n return xp.concatenate((Wx.ravel(), bias, Wh.ravel(), zeros))\n\n\ndef backprop_lstm_gates(\n dYt3: Floats2d, dCt3: Floats2d, Gt3: Floats2d, Ct3: Floats2d, Ct2: Floats2d\n) -> Tuple[Floats2d, Floats2d]:\n # See above for notation. Step numbering refers to forward_lstm_gates\n xp = get_array_module(dYt3)\n hf, hi, ho, hc = xp.split(Gt3, 4, axis=-1)\n assert hf.shape[0] == hi.shape[0] == ho.shape[0] == hc.shape[0]\n assert hf.shape[0] == dYt3.shape[0] == dCt3.shape[0] == Ct3.shape[0] == Ct2.shape[0]\n tanhCt3 = xp.tanh(Ct3)\n # 3b: Yt3 = tanhCt3 * ho\n d_ho = dYt3 * tanhCt3\n d_tanhCt3 = dYt3 * ho\n # 3a: tanhCt3 = tanh(Ct3)\n dCt3 += d_tanhCt3 * dtanh(tanhCt3)\n # 2b: Ct3 += hi * hc\n d_hi = dCt3 * hc\n d_hc = dCt3 * hi\n # 2a: Ct3 = hf * Ct2\n d_hf = dCt3 * Ct2\n dCt2 = dCt3 * hf\n d_At3_hc = d_hc * dtanh(hc) # 1d\n d_At3_ho = d_ho * dsigmoid(ho) # 1c\n d_At3_hi = d_hi * dsigmoid(hi) # 1b\n d_At3_hf = d_hf * dsigmoid(hf) # 1a\n dAt3 = xp.concatenate((d_At3_hf, d_At3_hi, d_At3_ho, d_At3_hc), axis=-1)\n return dAt3, dCt2\n\n\ndef sigmoid(X, out=None):\n xp = get_array_module(X)\n return 1.0 / (1.0 + xp.exp(-X))\n\n\ndef dsigmoid(Y: ArrayT) -> ArrayT:\n return Y * (1.0 - Y)\n\n\ndef dtanh(Y: ArrayT) -> ArrayT:\n return 1 - Y ** 2\n\n\ndef gaussian_cdf(ops: Ops, X: FloatsType) -> FloatsType:\n \"\"\"Gaussian CDF for distribution with mean 0 and stdev 1.\"\"\"\n return 0.5 * (1.0 + ops.erf(INV_SQRT2 * X))\n\n\ndef gaussian_pdf(ops: Ops, X: FloatsType) -> FloatsType:\n \"\"\"Gaussian PDF for distribution with mean 0 and stdev 1.\"\"\"\n return INV_SQRT_2PI * ops.xp.exp(-0.5 * X * X)\n"
] | [
[
"numpy.asarray",
"numpy.random.shuffle"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jorisvandenbossche/scipy-lecture-notes | [
"689105f90db641eb1e1f82692f4d8b8492e8245d",
"689105f90db641eb1e1f82692f4d8b8492e8245d",
"689105f90db641eb1e1f82692f4d8b8492e8245d"
] | [
"advanced/image_processing/examples/plot_sharpen.py",
"intro/matplotlib/examples/plot_exercice_2.py",
"packages/3d_plotting/examples/compute_field.py"
] | [
"import scipy\nfrom scipy import ndimage\nimport matplotlib.pyplot as plt\n\nl = scipy.misc.lena()\nblurred_l = ndimage.gaussian_filter(l, 3)\n\nfilter_blurred_l = ndimage.gaussian_filter(blurred_l, 1)\n\nalpha = 30\nsharpened = blurred_l + alpha * (blurred_l - filter_blurred_l)\n\nplt.figure(figsize=(12, 4))\n\nplt.subplot(131)\nplt.imshow(l, cmap=plt.cm.gray)\nplt.axis('off')\nplt.subplot(132)\nplt.imshow(blurred_l, cmap=plt.cm.gray)\nplt.axis('off')\nplt.subplot(133)\nplt.imshow(sharpened, cmap=plt.cm.gray)\nplt.axis('off')\n\nplt.show()\n",
"import pylab as pl\nimport numpy as np\n\n# Create a new figure of size 8x6 points, using 100 dots per inch\npl.figure(figsize=(8, 6), dpi=80)\n\n# Create a new subplot from a grid of 1x1\npl.subplot(111)\n\nX = np.linspace(-np.pi, np.pi, 256, endpoint=True)\nC, S = np.cos(X), np.sin(X)\n\n# Plot cosine using blue color with a continuous line of width 1 (pixels)\npl.plot(X, C, color=\"blue\", linewidth=1.0, linestyle=\"-\")\n\n# Plot sine using green color with a continuous line of width 1 (pixels)\npl.plot(X, S, color=\"green\", linewidth=1.0, linestyle=\"-\")\n\n# Set x limits\npl.xlim(-4., 4.)\n\n# Set x ticks\npl.xticks(np.linspace(-4, 4, 9, endpoint=True))\n\n# Set y limits\npl.ylim(-1.0, 1.0)\n\n# Set y ticks\npl.yticks(np.linspace(-1, 1, 5, endpoint=True))\n\n# Show result on screen\npl.show()\n",
"\"\"\"\nA script that computes the magnetic field generated by a pair of Helmoltz\ncoils.\n\"\"\"\n\nimport numpy as np\nfrom scipy import special, linalg\n\n##############################################################################\n# Function to caculate the field of a loop\n\ndef base_vectors(n):\n \"\"\" Returns 3 orthognal base vectors, the first one colinear to n.\n \"\"\"\n # normalize n\n n = n / np.sqrt(np.square(n).sum(axis=-1))\n\n # choose two vectors perpendicular to n\n # choice is arbitrary since the coil is symetric about n\n if abs(n[0]) == 1 :\n l = np.r_[n[2], 0, -n[0]]\n else:\n l = np.r_[0, n[2], -n[1]]\n\n l = l / np.sqrt(np.square(l).sum(axis=-1))\n m = np.cross(n, l)\n return n, l, m\n\n\ndef B_field(r, n, r0, R):\n \"\"\"\n returns the magnetic field from an arbitrary current loop calculated from\n eqns (1) and (2) in Phys Rev A Vol. 35, N 4, pp. 1535-1546; 1987.\n\n Parameters\n ----------\n n is normal vector to the plane of the loop at the center, current\n is oriented by the right-hand-rule.\n r is a position vector where the Bfield is evaluated:\n [x1 y2 z3 ; x2 y2 z2 ; ... ]\n r is in units of d\n r0 is the location of the center of the loop in units of d: [x y z]\n R is the radius of the loop\n\n Returns\n -------\n B is a vector for the B field at point r in inverse units of\n (mu I) / (2 pi d)\n for I in amps and d in meters and mu = 4 pi * 10^-7 we get Tesla\n \"\"\"\n ### Translate the coordinates in the coil's frame\n n, l, m = base_vectors(n)\n\n # transformation matrix coil frame to lab frame\n trans = np.vstack((l, m, n))\n # transformation matrix to lab frame to coil frame\n inv_trans = linalg.inv(trans)\n\n r = r - r0\t #point location from center of coil\n r = np.dot(r, inv_trans) \t #transform vector to coil frame\n\n #### calculate field\n\n # express the coordinates in polar form\n x = r[:, 0]\n y = r[:, 1]\n z = r[:, 2]\n rho = np.sqrt(x**2 + y**2)\n theta = np.arctan(x / y)\n # NaNs are generated where y is zero.\n theta[y == 0] = np.pi / 2\n\n E = special.ellipe((4 * R * rho)/( (R + rho)**2 + z**2))\n K = special.ellipk((4 * R * rho)/( (R + rho)**2 + z**2))\n dist = ((R - rho)**2 + z**2)\n Bz = 1 / np.sqrt((R + rho)**2 + z**2) * (\n K\n + E * (R**2 - rho**2 - z**2) / dist\n )\n Brho = z / (rho*np.sqrt((R + rho)**2 + z**2)) * (\n -K\n + E * (R**2 + rho**2 + z**2)/ dist\n )\n # On the axis of the coil we get a divided by zero here. This returns a\n # NaN, where the field is actually zero :\n Brho[dist == 0] = 0\n Brho[rho == 0] = 0\n Bz[dist == 0] = 0\n\n B = np.c_[np.cos(theta)*Brho, np.sin(theta)*Brho, Bz ]\n\n # Rotate the field back in the lab's frame\n B = np.dot(B, trans)\n return B\n\n\n##############################################################################\n# The grid of points on which we want to evaluate the field\nX, Y, Z = np.mgrid[-0.15:0.15:31j, -0.15:0.15:31j, -0.15:0.15:31j]\n# Avoid rounding issues :\nf = 1e4 # this gives the precision we are interested in:\nX = np.round(X * f) / f\nY = np.round(Y * f) / f\nZ = np.round(Z * f) / f\n\n# The (x, y, z) position vector\nr = np.c_[np.ravel(X), np.ravel(Y), np.ravel(Z)]\n\n\n##############################################################################\n# The coil positions\n\n# The center of the coil\nr0 = np.r_[0, 0, 0.1]\n# The normal to the coils\nn = np.r_[0, 0, 1]\n# The radius\nR = 0.1\n\n# Add the mirror image of this coils relatively to the xy plane :\nr0 = np.vstack((r0, -r0 ))\nR = np.r_[R, R]\nn = np.vstack((n, n))\t # Helmoltz like configuration\n\n##############################################################################\n# Calculate field\n# First initialize a container matrix for the field vector :\nB = np.zeros_like(r)\n# Then loop through the different coils and sum the fields :\nfor this_n, this_r0, this_R in zip(n, r0, R):\n this_n = np.array(this_n)\n this_r0 = np.array(this_r0)\n this_R = np.array(this_R)\n B += B_field(r, this_n, this_r0, this_R)\n\n"
] | [
[
"matplotlib.pyplot.imshow",
"scipy.misc.lena",
"scipy.ndimage.gaussian_filter",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"numpy.cos",
"numpy.linspace",
"numpy.sin"
],
[
"numpy.square",
"numpy.dot",
"numpy.sqrt",
"numpy.arctan",
"scipy.special.ellipe",
"scipy.special.ellipk",
"numpy.cos",
"numpy.round",
"numpy.sin",
"numpy.zeros_like",
"numpy.cross",
"scipy.linalg.inv",
"numpy.ravel",
"numpy.array",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.19",
"0.18",
"0.12",
"0.17",
"0.16"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.12",
"0.14",
"0.15"
],
"tensorflow": []
}
] |
DingXiaoH/GSM-SGD | [
"992dd0fb31ee47a79cb0891f4f231707abd0c5c6"
] | [
"utils/engine.py"
] | [
"# encoding: utf-8\n\nimport os\nimport os.path as osp\nimport time\nimport torch\nimport torch.distributed as dist\n\nfrom collections import OrderedDict\nfrom utils.pyt_utils import (\n parse_torch_devices, ensure_dir)\nfrom utils.logger import get_logger\n\nfrom utils.checkpoint import load_model\nfrom utils.misc import save_hdf5, read_hdf5\nimport numpy as np\n\nfrom collections import namedtuple\n\nNamedParamValue = namedtuple('NamedParamValue', ['name', 'value'])\n\n\nclass State(object):\n def __init__(self):\n self.iteration = 0\n self.model = None\n self.optimizer = None\n self.scheduler = None\n self.cfg = None\n\n def register(self, **kwargs):\n for k, v in kwargs.items():\n assert k in ['iteration', 'model', 'optimizer', 'scheduler', 'cfg']\n setattr(self, k, v)\n\n\n\nclass Engine(object):\n def __init__(self):\n self.version = 0.01\n self.state = State()\n self.devices = None\n self.distributed = False\n self.logger = None\n\n if 'WORLD_SIZE' in os.environ:\n self.distributed = int(os.environ['WORLD_SIZE']) >= 1\n\n if self.distributed:\n print('Initialize Engine for distributed training.')\n self.local_rank = 0 # TODO we only use single-machine-multi-gpus\n self.world_size = int(os.environ['WORLD_SIZE'])\n self.world_rank = int(os.environ['RANK'])\n torch.cuda.set_device(self.local_rank)\n dist.init_process_group(backend=\"nccl\", init_method='env://')\n dist.barrier()\n self.devices = [i for i in range(self.world_size)]\n else:\n # todo check non-distributed training\n print('Initialize Engine for non-distributed training.')\n self.world_size = 1\n self.world_rank = 1\n self.devices = parse_torch_devices('0') # TODO correct?\n torch.backends.cudnn.benchmark = True\n\n\n def setup_log(self, name='train', log_dir=None, file_name=None):\n if not self.logger:\n self.logger = get_logger(\n name, log_dir, distributed_rank=0, filename=file_name) #TODO self.args.local_rank=0?\n else:\n self.logger.warning('already exists logger')\n return self.logger\n\n # def inject_default_parser(self):\n # p = self.parser\n # p.add_argument(\n # '-d', '--devices', default='0',\n # help='set data parallel training')\n # p.add_argument(\n # '-c', '--continue', type=extant_file, metavar=\"FILE\",\n # dest=\"continue_fpath\",\n # help='continue from one certain checkpoint')\n # p.add_argument(\n # '--local_rank', default=0, type=int,\n # help='process rank on node')\n\n def register_state(self, **kwargs):\n self.state.register(**kwargs)\n\n def update_iteration(self, iteration):\n self.state.iteration = iteration\n\n def show_variables(self):\n print('---------- show variables -------------')\n for k, v in self.state.model.state_dict().items():\n print(k, v.shape)\n print('--------------------------------------')\n\n def save_hdf5(self, path):\n save_dict = {}\n num_params = 0\n for k, v in self.state.model.state_dict().items():\n key = k\n if k.split('.')[0] == 'module':\n key = k[7:]\n np_array = v.cpu().numpy()\n save_dict[key] = np_array\n num_params += np_array.size\n if self.state.cfg is not None and self.state.cfg.deps is not None:\n save_dict['deps'] = self.state.cfg.deps\n save_hdf5(save_dict, path)\n print('---------------saved {} numpy arrays to {}---------------'.format(len(save_dict), path))\n self.log('num of params in hdf5={}'.format(num_params))\n\n def set_value(self, param, value):\n assert tuple(param.size()) == tuple(value.shape)\n param.data = torch.from_numpy(value).cuda().type(torch.cuda.FloatTensor)\n\n def load_hdf5(self, path):\n hdf5_dict = read_hdf5(path)\n assigned_params = 0\n for k, v in self.state.model.named_parameters():\n if k in hdf5_dict:\n print('assign {} from hdf5'.format(k))\n self.set_value(v, hdf5_dict[k])\n else:\n print('param {} not found in hdf5'.format(k))\n for k, v in self.state.model.named_buffers():\n if k in hdf5_dict:\n self.set_value(v, hdf5_dict[k])\n else:\n print('buffer {} not found in hdf5'.format(k))\n assigned_params += 1\n print('Assigned {} params from hdf5: {}'.format(assigned_params, path))\n\n\n\n\n def save_checkpoint(self, path):\n # self.logger.info(\"Saving checkpoint to file {}\".format(path))\n t_start = time.time()\n\n state_dict = {}\n new_state_dict = OrderedDict()\n\n for k, v in self.state.model.state_dict().items():\n key = k\n if k.split('.')[0] == 'module':\n key = k[7:]\n new_state_dict[key] = v\n state_dict['model'] = new_state_dict\n\n if self.state.optimizer:\n state_dict['optimizer'] = self.state.optimizer.state_dict()\n if self.state.scheduler:\n state_dict['scheduler'] = self.state.scheduler.state_dict()\n if self.state.iteration:\n state_dict['iteration'] = self.state.iteration\n\n t_io_begin = time.time()\n try:\n torch.save(state_dict, path)\n except:\n print('save {} failed, continue training'.format(path))\n t_end = time.time()\n\n del state_dict\n del new_state_dict\n\n print('-----------save ckpt to {}----------'.format(path))\n\n # self.logger.info(\n # \"Save checkpoint to file {}, \"\n # \"Time usage:\\n\\tprepare snapshot: {}, IO: {}\".format(\n # path, t_io_begin - t_start, t_end - t_io_begin))\n\n def load_checkpoint(self, weights, just_weights=False):\n\n t_start = time.time()\n\n loaded = torch.load(weights, map_location=torch.device(\"cpu\"))\n\n t_io_end = time.time()\n if \"model\" not in loaded:\n loaded = dict(model=loaded)\n\n self.state.model = load_model(\n self.state.model, loaded['model'], self.logger)\n\n if not just_weights:\n if \"optimizer\" in loaded:\n self.state.optimizer.load_state_dict(loaded['optimizer'])\n if \"iteration\" in loaded:\n self.state.iteration = loaded['iteration']\n if \"scheduler\" in loaded:\n self.state.scheduler.load_state_dict(loaded[\"scheduler\"])\n\n\n del loaded\n\n t_end = time.time()\n self.logger.info(\n \"Load checkpoint from file {}, \"\n \"Time usage:\\n\\tIO: {}, restore snapshot: {}\".format(\n weights, t_io_end - t_start, t_end - t_io_end))\n\n def save_and_link_checkpoint(self, snapshot_dir):\n ensure_dir(snapshot_dir)\n current_iter_checkpoint = osp.join(\n snapshot_dir, 'iter-{}.pth'.format(self.state.iteration))\n self.save_checkpoint(current_iter_checkpoint)\n\n # last_iter_checkpoint = osp.join(\n # snapshot_dir, 'iter-last.pth')\n # link_file(current_iter_checkpoint, last_iter_checkpoint)\n\n # def restore_checkpoint(self):\n # self.load_checkpoint(self.continue_state_object, is_restore=True)\n\n def get_all_conv_kernel_namedvalue_as_list(self):\n result = []\n for k, v in self.state.model.state_dict().items():\n if v.dim() == 4:\n result.append(NamedParamValue(name=k, value=v.cpu().numpy()))\n return result\n\n def get_all_kernel_namedvalue_as_list(self):\n result = []\n for k, v in self.state.model.state_dict().items():\n if v.dim() in [2, 4]:\n result.append(NamedParamValue(name=k, value=v.cpu().numpy()))\n return result\n\n def get_param_value_by_name(self, name):\n state_dict = self.state.model.state_dict()\n if name not in state_dict:\n return None\n else:\n return state_dict[name].cpu().numpy()\n\n def state_values(self):\n result = OrderedDict()\n for k, v in self.state.model.state_dict().items():\n result[k] = v.cpu().numpy()\n return result\n\n\n\n\n\n\n\n\n def log(self, msg):\n self.logger.info(msg)\n\n def __exit__(self, type, value, tb):\n del self.state\n\n torch.cuda.empty_cache()\n if type is not None:\n self.logger.warning(\n \"A exception occurred during Engine initialization, \"\n \"give up running process\")\n return False\n\n\n\n def __enter__(self):\n return self\n"
] | [
[
"torch.distributed.init_process_group",
"torch.cuda.set_device",
"torch.cuda.empty_cache",
"torch.distributed.barrier",
"torch.from_numpy",
"torch.device",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.