repo_name
stringlengths
8
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
wlawt/synbiolic
[ "cbe9efc6d2992a9445dcd241a20321db9373e32e" ]
[ "score_test_joey.py" ]
[ "import csv\nimport time\nimport math\nimport numpy as np\nimport warnings\n\nfrom rdkit import Chem, DataStructs\nfrom rdkit.Chem import QED\nfrom sklearn.model_selection import KFold, StratifiedKFold\n\nimport torch\nimport torch.nn as nn\nfrom torch.optim.lr_scheduler import ExponentialLR, StepLR\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nimport random\n\nfrom tqdm import tqdm, trange\n\nimport threading\n\nfrom __future__ import print_function\nfrom __future__ import division\n\nfrom sklearn.externals import joblib\nfrom sklearn import metrics\nfrom sklearn.ensemble import RandomForestRegressor as RFR\nimport pickle\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nfrom azureml.core.model import Model\n\ndef get_fp(smiles):\n fp = []\n processed_indices = []\n invalid_indices = []\n for i in range(len(smiles)):\n mol = smiles[i]\n tmp = np.array(mol2image(mol, n=2048))\n if np.isnan(tmp[0]):\n invalid_indices.append(i)\n else:\n fp.append(tmp)\n processed_indices.append(i)\n return np.array(fp), processed_indices, invalid_indices\n\ndef get_desc(smiles, calc):\n desc = []\n processed_indices = []\n invalid_indices = []\n for i in range(len(smiles)):\n sm = smiles[i]\n try:\n mol = Chem.MolFromSmiles(sm)\n tmp = np.array(calc(mol))\n desc.append(tmp)\n processed_indices.append(i)\n except:\n invalid_indices.append(i)\n\n desc_array = np.array(desc)\n return desc_array, processed_indices, invalid_indices\n\n\ndef normalize_desc(desc_array, desc_mean=None):\n desc_array = np.array(desc_array).reshape(len(desc_array), -1)\n ind = np.zeros(desc_array.shape)\n for i in range(desc_array.shape[0]):\n for j in range(desc_array.shape[1]):\n try:\n if np.isfinite(desc_array[i, j]):\n ind[i, j] = 1\n except:\n pass\n for i in range(desc_array.shape[0]):\n for j in range(desc_array.shape[1]):\n if ind[i, j] == 0:\n desc_array[i, j] = 0\n if desc_mean is None:\n desc_mean = np.mean(desc_array, axis=0)\n for i in range(desc_array.shape[0]):\n for j in range(desc_array.shape[1]):\n if ind[i, j] == 0:\n desc_array[i, j] = desc_mean[j]\n return desc_array, desc_mean\n\n\ndef mol2image(x, n=2048):\n try:\n m = Chem.MolFromSmiles(x)\n fp = Chem.RDKFingerprint(m, maxPath=4, fpSize=n)\n res = np.zeros(len(fp))\n DataStructs.ConvertToNumpyArray(fp, res)\n return res\n except:\n return [np.nan]\n\n\ndef sanitize_smiles(smiles, canonical=True, throw_warning=False):\n \"\"\"\n Takes list of SMILES strings and returns list of their sanitized versions.\n For definition of sanitized SMILES check\n http://www.rdkit.org/docs/api/rdkit.Chem.rdmolops-module.html#SanitizeMol\n Parameters\n ----------\n smiles: list\n list of SMILES strings\n canonical: bool (default True)\n parameter specifying whether SMILES will be converted to canonical\n format\n throw_warning: bool (default False)\n parameter specifying whether warnings will be thrown if a SMILES is\n invalid\n Returns\n -------\n new_smiles: list\n list of SMILES and NaNs if SMILES string is invalid or unsanitized.\n If canonical is True, returns list of canonical SMILES.\n When canonical is True this function is analogous to:\n canonical_smiles(smiles, sanitize=True).\n \"\"\"\n new_smiles = []\n for sm in smiles:\n try:\n if canonical:\n new_smiles.append(Chem.MolToSmiles(Chem.MolFromSmiles(sm, sanitize=True)))\n else:\n new_smiles.append(sm)\n except:\n if throw_warning:\n warnings.warn('Unsanitized SMILES string: ' + sm, UserWarning)\n new_smiles.append('')\n return new_smiles\n\n\ndef canonical_smiles(smiles, sanitize=True, throw_warning=False):\n \"\"\"\n Takes list of SMILES strings and returns list of their canonical SMILES.\n Parameters\n ----------\n smiles: list\n list of SMILES strings to convert into canonical format\n sanitize: bool (default True)\n parameter specifying whether to sanitize SMILES or not.\n For definition of sanitized SMILES check\n http://www.rdkit.org/docs/api/rdkit.Chem.rdmolops-module.html#SanitizeMol\n throw_warning: bool (default False)\n parameter specifying whether warnings will be thrown if a SMILES is\n invalid\n Returns\n -------\n new_smiles: list\n list of canonical SMILES and NaNs if SMILES string is invalid or\n unsanitized (when sanitize is True)\n When sanitize is True the function is analogous to:\n sanitize_smiles(smiles, canonical=True).\n \"\"\"\n new_smiles = []\n for sm in smiles:\n try:\n mol = Chem.MolFromSmiles(sm, sanitize=sanitize)\n new_smiles.append(Chem.MolToSmiles(mol))\n except:\n if throw_warning:\n warnings.warn(sm + ' can not be canonized: invalid '\n 'SMILES string!', UserWarning)\n new_smiles.append('')\n return new_smiles\n\n\ndef save_smi_to_file(filename, smiles, unique=True):\n \"\"\"\n Takes path to file and list of SMILES strings and writes SMILES to the specified file.\n Args:\n filename (str): path to the file\n smiles (list): list of SMILES strings\n unique (bool): parameter specifying whether to write only unique copies or not.\n Output:\n success (bool): defines whether operation was successfully completed or not.\n \"\"\"\n if unique:\n smiles = list(set(smiles))\n else:\n smiles = list(smiles)\n f = open(filename, 'w')\n for mol in smiles:\n f.writelines([mol, '\\n'])\n f.close()\n return f.closed\n\n\ndef read_smi_file(filename, unique=True, add_start_end_tokens=False):\n \"\"\"\n Reads SMILES from file. File must contain one SMILES string per line\n with \\n token in the end of the line.\n Args:\n filename (str): path to the file\n unique (bool): return only unique SMILES\n Returns:\n smiles (list): list of SMILES strings from specified file.\n success (bool): defines whether operation was successfully completed or not.\n If 'unique=True' this list contains only unique copies.\n \"\"\"\n f = open(filename, 'r')\n molecules = []\n for line in f:\n if add_start_end_tokens:\n molecules.append('<' + line[:-1] + '>')\n else:\n molecules.append(line[:-1])\n if unique:\n molecules = list(set(molecules))\n else:\n molecules = list(molecules)\n f.close()\n return molecules, f.closed\n\n\ndef tokenize(smiles, tokens=None):\n \"\"\"\n Returns list of unique tokens, token-2-index dictionary and number of\n unique tokens from the list of SMILES\n Parameters\n ----------\n smiles: list\n list of SMILES strings to tokenize.\n tokens: list, str (default None)\n list of unique tokens\n Returns\n -------\n tokens: list\n list of unique tokens/SMILES alphabet.\n token2idx: dict\n dictionary mapping token to its index.\n num_tokens: int\n number of unique tokens.\n \"\"\"\n if tokens is None:\n tokens = list(set(''.join(smiles)))\n tokens = list(np.sort(tokens))\n tokens = ''.join(tokens)\n token2idx = dict((token, i) for i, token in enumerate(tokens))\n num_tokens = len(tokens)\n return tokens, token2idx, num_tokens\n\n\ndef time_since(since):\n s = time.time() - since\n m = math.floor(s / 60)\n s -= m * 60\n return '%dm %ds' % (m, s)\n\n\ndef cross_validation_split(x, y, n_folds=5, split='random', folds=None):\n assert(len(x) == len(y))\n x = np.array(x)\n y = np.array(y)\n if split not in ['random', 'stratified', 'fixed']:\n raise ValueError('Invalid value for argument \\'split\\': '\n 'must be either \\'random\\', \\'stratified\\' '\n 'or \\'fixed\\'')\n if split == 'random':\n cv_split = KFold(n_splits=n_folds, shuffle=True)\n folds = list(cv_split.split(x, y))\n elif split == 'stratified':\n cv_split = StratifiedKFold(n_splits=n_folds, shuffle=True)\n folds = list(cv_split.split(x, y))\n elif split == 'fixed' and folds is None:\n raise TypeError(\n 'Invalid type for argument \\'folds\\': found None, but must be list')\n cross_val_data = []\n cross_val_labels = []\n if len(folds) == n_folds:\n for fold in folds:\n cross_val_data.append(x[fold[1]])\n cross_val_labels.append(y[fold[1]])\n elif len(folds) == len(x) and np.max(folds) == n_folds:\n for f in range(n_folds):\n left = np.where(folds == f)[0].min()\n right = np.where(folds == f)[0].max()\n cross_val_data.append(x[left:right + 1])\n cross_val_labels.append(y[left:right + 1])\n\n return cross_val_data, cross_val_labels\n\n\ndef read_object_property_file(path, delimiter=',', cols_to_read=[0, 1],\n keep_header=False):\n f = open(path, 'r')\n reader = csv.reader(f, delimiter=delimiter)\n data_full = np.array(list(reader))\n if keep_header:\n start_position = 0\n else:\n start_position = 1\n assert len(data_full) > start_position\n data = [[] for _ in range(len(cols_to_read))]\n for i in range(len(cols_to_read)):\n col = cols_to_read[i]\n data[i] = data_full[start_position:, col]\n f.close()\n if len(cols_to_read) == 1:\n data = data[0]\n return data\n\n\"\"\"### **Data Process**\"\"\"\n\nclass GeneratorData(object):\n \"\"\"\n Docstring coming soon...\n \"\"\"\n def __init__(self, training_data_path, tokens=None, start_token='<', \n end_token='>', max_len=120, use_cuda=None, **kwargs):\n \"\"\"\n Constructor for the GeneratorData object.\n Parameters\n ----------\n training_data_path: str\n path to file with training dataset. Training dataset must contain\n a column with training strings. The file also may contain other\n columns.\n tokens: list (default None)\n list of characters specifying the language alphabet. Of left\n unspecified, tokens will be extracted from data automatically.\n start_token: str (default '<')\n special character that will be added to the beginning of every\n sequence and encode the sequence start.\n end_token: str (default '>')\n special character that will be added to the end of every\n sequence and encode the sequence end.\n max_len: int (default 120)\n maximum allowed length of the sequences. All sequences longer than\n max_len will be excluded from the training data.\n use_cuda: bool (default None)\n parameter specifying if GPU is used for computations. If left\n unspecified, GPU will be used if available\n kwargs: additional positional arguments\n These include cols_to_read (list, default [0]) specifying which\n column in the file with training data contains training sequences\n and delimiter (str, default ',') that will be used to separate\n columns if there are multiple of them in the file.\n \"\"\"\n super(GeneratorData, self).__init__()\n\n if 'cols_to_read' not in kwargs:\n kwargs['cols_to_read'] = []\n\n data = read_object_property_file(training_data_path,\n **kwargs)\n self.start_token = start_token\n self.end_token = end_token\n self.file = []\n for i in range(len(data)):\n if len(data[i]) <= max_len:\n self.file.append(self.start_token + data[i] + self.end_token) \n self.file_len = len(self.file)\n self.all_characters, self.char2idx, \\\n self.n_characters = tokenize(self.file, tokens)\n self.use_cuda = use_cuda\n if self.use_cuda is None:\n self.use_cuda = torch.cuda.is_available()\n\n def load_dictionary(self, tokens, char2idx):\n self.all_characters = tokens\n self.char2idx = char2idx\n self.n_characters = len(tokens)\n\n def random_chunk(self):\n \"\"\"\n Samples random SMILES string from generator training data set.\n Returns:\n random_smiles (str).\n \"\"\"\n index = random.randint(0, self.file_len-1)\n return self.file[index]\n\n def char_tensor(self, string):\n \"\"\"\n Converts SMILES into tensor of indices wrapped into torch.autograd.Variable.\n Args:\n string (str): input SMILES string\n Returns:\n tokenized_string (torch.autograd.Variable(torch.tensor))\n \"\"\"\n tensor = torch.zeros(len(string)).long()\n for c in range(len(string)):\n tensor[c] = self.all_characters.index(string[c])\n if self.use_cuda:\n return torch.tensor(tensor).cuda()\n else:\n return torch.tensor(tensor)\n\n def random_training_set(self, smiles_augmentation):\n chunk = self.random_chunk()\n if smiles_augmentation is not None:\n chunk = '<' + smiles_augmentation.randomize_smiles(chunk[1:-1]) + '>'\n inp = self.char_tensor(chunk[:-1])\n target = self.char_tensor(chunk[1:])\n return inp, target\n\n def read_sdf_file(self, path, fields_to_read):\n raise NotImplementedError\n \n def update_data(self, path):\n self.file, success = read_smi_file(path, unique=True)\n self.file_len = len(self.file)\n assert success\n\n\nclass PredictorData(object):\n def __init__(self, path, delimiter=',', cols=[0, 1], get_features=None,\n has_label=True, labels_start=1, **kwargs):\n super(PredictorData, self).__init__()\n data = read_object_property_file(path, delimiter, cols_to_read=cols)\n if has_label:\n self.objects = np.array(data[:labels_start]).reshape(-1)\n self.y = np.array(data[labels_start:], dtype='float32')\n self.y = self.y.reshape(-1, len(cols) - labels_start)\n if self.y.shape[1] == 1:\n self.y = self.y.reshape(-1)\n else:\n self.objects = np.array(data[:labels_start]).reshape(-1)\n self.y = [None]*len(self.object)\n assert len(self.objects) == len(self.y)\n if get_features is not None:\n self.x, processed_indices, invalid_indices = \\\n get_features(self.objects, **kwargs)\n self.invalid_objects = self.objects[invalid_indices]\n self.objects = self.objects[processed_indices]\n self.invalid_y = self.y[invalid_indices]\n self.y = self.y[processed_indices]\n else:\n self.x = self.objects\n self.invalid_objects = None\n self.invalid_y = None\n self.binary_y = None\n\n def binarize(self, threshold):\n self.binary_y = np.array(self.y >= threshold, dtype='int32')\n\n\"\"\"### **Smiles Enumerator**\"\"\"\n\nclass Iterator(object):\n \"\"\"Abstract base class for data iterators.\n # Arguments\n n: Integer, total number of samples in the dataset to loop over.\n batch_size: Integer, size of a batch.\n shuffle: Boolean, whether to shuffle the data between epochs.\n seed: Random seeding for data shuffling.\n \"\"\"\n\n def __init__(self, n, batch_size, shuffle, seed):\n self.n = n\n self.batch_size = batch_size\n self.shuffle = shuffle\n self.batch_index = 0\n self.total_batches_seen = 0\n self.lock = threading.Lock()\n self.index_generator = self._flow_index(n, batch_size, shuffle, seed)\n if n < batch_size:\n raise ValueError('Input data length is shorter than batch_size\\nAdjust batch_size')\n\n def reset(self):\n self.batch_index = 0\n\n def _flow_index(self, n, batch_size=32, shuffle=False, seed=None):\n # Ensure self.batch_index is 0.\n self.reset()\n while 1:\n if seed is not None:\n np.random.seed(seed + self.total_batches_seen)\n if self.batch_index == 0:\n index_array = np.arange(n)\n if shuffle:\n index_array = np.random.permutation(n)\n\n current_index = (self.batch_index * batch_size) % n\n if n > current_index + batch_size:\n current_batch_size = batch_size\n self.batch_index += 1\n else:\n current_batch_size = n - current_index\n self.batch_index = 0\n self.total_batches_seen += 1\n yield (index_array[current_index: current_index + current_batch_size],\n current_index, current_batch_size)\n\n def __iter__(self):\n # Needed if we want to do something like:\n # for x, y in data_gen.flow(...):\n return self\n\n def __next__(self, *args, **kwargs):\n return self.next(*args, **kwargs)\n\n\nclass SmilesIterator(Iterator):\n \"\"\"Iterator yielding data from a SMILES array.\n # Arguments\n x: Numpy array of SMILES input data.\n y: Numpy array of targets data.\n smiles_data_generator: Instance of `SmilesEnumerator`\n to use for random SMILES generation.\n batch_size: Integer, size of a batch.\n shuffle: Boolean, whether to shuffle the data between epochs.\n seed: Random seed for data shuffling.\n dtype: dtype to use for returned batch. Set to keras.backend.floatx if using Keras\n \"\"\"\n\n def __init__(self, x, y, smiles_data_generator,\n batch_size=32, shuffle=False, seed=None,\n dtype=np.float32\n ):\n if y is not None and len(x) != len(y):\n raise ValueError('X (images tensor) and y (labels) '\n 'should have the same length. '\n 'Found: X.shape = %s, y.shape = %s' %\n (np.asarray(x).shape, np.asarray(y).shape))\n\n self.x = np.asarray(x)\n\n if y is not None:\n self.y = np.asarray(y)\n else:\n self.y = None\n self.smiles_data_generator = smiles_data_generator\n self.dtype = dtype\n super(SmilesIterator, self).__init__(x.shape[0], batch_size, shuffle, seed)\n\n def next(self):\n \"\"\"For python 2.x.\n # Returns\n The next batch.\n \"\"\"\n # Keeps under lock only the mechanism which advances\n # the indexing of each batch.\n with self.lock:\n index_array, current_index, current_batch_size = next(self.index_generator)\n # The transformation of images is not under thread lock\n # so it can be done in parallel\n batch_x = np.zeros(\n tuple([current_batch_size] + [self.smiles_data_generator.pad, self.smiles_data_generator._charlen]),\n dtype=self.dtype)\n for i, j in enumerate(index_array):\n smiles = self.x[j:j + 1]\n x = self.smiles_data_generator.transform(smiles)\n batch_x[i] = x\n\n if self.y is None:\n return batch_x\n batch_y = self.y[index_array]\n return batch_x, batch_y\n\n\nclass SmilesEnumerator(object):\n \"\"\"SMILES Enumerator, vectorizer and devectorizer\n #Arguments\n charset: string containing the characters for the vectorization\n can also be generated via the .fit() method\n pad: Length of the vectorization\n leftpad: Add spaces to the left of the SMILES\n isomericSmiles: Generate SMILES containing information about stereogenic centers\n enum: Enumerate the SMILES during transform\n canonical: use canonical SMILES during transform (overrides enum)\n \"\"\"\n\n def __init__(self, charset='@C)(=cOn1S2/H[N]\\\\', pad=120, leftpad=True, isomericSmiles=True, enum=True,\n canonical=False):\n self._charset = None\n self.charset = charset\n self.pad = pad\n self.leftpad = leftpad\n self.isomericSmiles = isomericSmiles\n self.enumerate = enum\n self.canonical = canonical\n\n @property\n def charset(self):\n return self._charset\n\n @charset.setter\n def charset(self, charset):\n self._charset = charset\n self._charlen = len(charset)\n self._char_to_int = dict((c, i) for i, c in enumerate(charset))\n self._int_to_char = dict((i, c) for i, c in enumerate(charset))\n\n def fit(self, smiles, extra_chars=[], extra_pad=5):\n \"\"\"Performs extraction of the charset and length of a SMILES datasets and sets self.pad and self.charset\n #Arguments\n smiles: Numpy array or Pandas series containing smiles as strings\n extra_chars: List of extra chars to add to the charset (e.g. \"\\\\\\\\\" when \"/\" is present)\n extra_pad: Extra padding to add before or after the SMILES vectorization\n \"\"\"\n charset = set(\"\".join(list(smiles)))\n self.charset = \"\".join(charset.union(set(extra_chars)))\n self.pad = max([len(smile) for smile in smiles]) + extra_pad\n\n def randomize_smiles(self, smiles):\n \"\"\"Perform a randomization of a SMILES string\n must be RDKit sanitizable\"\"\"\n m = Chem.MolFromSmiles(smiles)\n ans = list(range(m.GetNumAtoms()))\n np.random.shuffle(ans)\n nm = Chem.RenumberAtoms(m, ans)\n return Chem.MolToSmiles(nm, canonical=self.canonical, isomericSmiles=self.isomericSmiles)\n\n def transform(self, smiles):\n \"\"\"Perform an enumeration (randomization) and vectorization of a Numpy array of smiles strings\n #Arguments\n smiles: Numpy array or Pandas series containing smiles as strings\n \"\"\"\n one_hot = np.zeros((smiles.shape[0], self.pad, self._charlen), dtype=np.int8)\n\n for i, ss in enumerate(smiles):\n if self.enumerate: ss = self.randomize_smiles(ss)\n for j, c in enumerate(ss):\n one_hot[i, j, self._char_to_int[c]] = 1\n return one_hot\n\n def reverse_transform(self, vect):\n \"\"\" Performs a conversion of a vectorized SMILES to a smiles strings\n charset must be the same as used for vectorization.\n #Arguments\n vect: Numpy array of vectorized SMILES.\n \"\"\"\n smiles = []\n for v in vect:\n # mask v\n v = v[v.sum(axis=1) == 1]\n # Find one hot encoded index with argmax, translate to char and join to string\n smile = \"\".join(self._int_to_char[i] for i in v.argmax(axis=1))\n smiles.append(smile)\n return np.array(smiles)\n\n\nif __name__ == \"__main__\":\n smiles = np.array([\"CCC(=O)O[C@@]1(CC[NH+](C[C@H]1CC=C)C)c2ccccc2\",\n \"CCC[S@@](=O)c1ccc2c(c1)[nH]/c(=N/C(=O)OC)/[nH]2\"] * 10\n )\n # Test canonical SMILES vectorization\n sm_en = SmilesEnumerator(canonical=True, enum=False)\n sm_en.fit(smiles, extra_chars=[\"\\\\\"])\n v = sm_en.transform(smiles)\n transformed = sm_en.reverse_transform(v)\n if len(set(transformed)) > 2: print(\"Too many different canonical SMILES generated\")\n\n # Test enumeration\n sm_en.canonical = False\n sm_en.enumerate = True\n v2 = sm_en.transform(smiles)\n transformed = sm_en.reverse_transform(v2)\n if len(set(transformed)) < 3: print(\"Too few enumerated SMILES generated\")\n\n # Reconstruction\n reconstructed = sm_en.reverse_transform(v[0:5])\n for i, smile in enumerate(reconstructed):\n if smile != smiles[i]:\n print(\"Error in reconstruction %s %s\" % (smile, smiles[i]))\n break\n\n # test Pandas\n import pandas as pd\n\n df = pd.DataFrame(smiles)\n v = sm_en.transform(df[0])\n if v.shape != (20, 52, 18): print(\"Possible error in pandas use\")\n\n # BUG, when batchsize > x.shape[0], then it only returns x.shape[0]!\n # Test batch generation\n sm_it = SmilesIterator(smiles, np.array([1, 2] * 10), sm_en, batch_size=10, shuffle=True)\n X, y = sm_it.next()\n if sum(y == 1) - sum(y == 2) > 1:\n print(\"Unbalanced generation of batches\")\n if len(X) != 10: print(\"Error in batchsize generation\")\n\n\"\"\"### **StackRNN Code**\"\"\"\n\nclass StackAugmentedRNN(nn.Module):\n def __init__(self, input_size, hidden_size, output_size, layer_type='GRU',\n n_layers=1, is_bidirectional=False, has_stack=False,\n stack_width=None, stack_depth=None, use_cuda=None,\n optimizer_instance=torch.optim.Adadelta, lr=0.01):\n \"\"\"\n Constructor for the StackAugmentedRNN object.\n Parameters\n ----------\n input_size: int\n number of characters in the alphabet\n hidden_size: int\n size of the RNN layer(s)\n output_size: int\n again number of characters in the alphabet\n layer_type: str (default 'GRU')\n type of the RNN layer to be used. Could be either 'LSTM' or 'GRU'.\n n_layers: int (default 1)\n number of RNN layers\n is_bidirectional: bool (default False)\n parameter specifying if RNN is bidirectional\n has_stack: bool (default False)\n parameter specifying if augmented memory stack is used\n stack_width: int (default None)\n if has_stack is True then this parameter defines width of the\n augmented stack memory\n stack_depth: int (default None)\n if has_stack is True then this parameter define depth of the augmented\n stack memory. Hint: no need fo stack depth to be larger than the\n length of the longest sequence you plan to generate\n use_cuda: bool (default None)\n parameter specifying if GPU is used for computations. If left\n unspecified, GPU will be used if available\n optimizer_instance: torch.optim object (default torch.optim.Adadelta)\n optimizer to be used for training\n lr: float (default 0.01)\n learning rate for the optimizer\n \"\"\"\n super(StackAugmentedRNN, self).__init__()\n \n if layer_type not in ['GRU', 'LSTM']:\n raise InvalidArgumentError('Layer type must be GRU or LSTM')\n self.layer_type = layer_type\n self.is_bidirectional = is_bidirectional\n if self.is_bidirectional:\n self.num_dir = 2\n else:\n self.num_dir = 1\n if layer_type == 'LSTM':\n self.has_cell = True\n else:\n self.has_cell = False\n self.has_stack = has_stack\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.output_size = output_size\n if self.has_stack:\n self.stack_width = stack_width\n self.stack_depth = stack_depth\n\n self.use_cuda = use_cuda\n if self.use_cuda is None:\n self.use_cuda = torch.cuda.is_available()\n\n self.n_layers = n_layers\n \n if self.has_stack:\n self.stack_controls_layer = nn.Linear(in_features=self.hidden_size *\n self.num_dir,\n out_features=3)\n\n self.stack_input_layer = nn.Linear(in_features=self.hidden_size *\n self.num_dir,\n out_features=self.stack_width)\n\n self.encoder = nn.Embedding(input_size, hidden_size)\n if self.has_stack:\n rnn_input_size = hidden_size + stack_width\n else:\n rnn_input_size = hidden_size\n if self.layer_type == 'LSTM':\n self.rnn = nn.LSTM(rnn_input_size, hidden_size, n_layers,\n bidirectional=self.is_bidirectional)\n self.decoder = nn.Linear(hidden_size * self.num_dir, output_size)\n elif self.layer_type == 'GRU':\n self.rnn = nn.GRU(rnn_input_size, hidden_size, n_layers,\n bidirectional=self.is_bidirectional)\n self.decoder = nn.Linear(hidden_size * self.num_dir, output_size)\n self.log_softmax = torch.nn.LogSoftmax(dim=1)\n \n if self.use_cuda:\n self = self.cuda()\n self.criterion = nn.CrossEntropyLoss()\n self.lr = lr\n self.optimizer_instance = optimizer_instance\n self.optimizer = self.optimizer_instance(self.parameters(), lr=lr,\n weight_decay=0.00001)\n \n def load_model(self, path):\n \"\"\"\n Loads pretrained parameters from the checkpoint into the model.\n Parameters\n ----------\n path: str\n path to the checkpoint file model will be loaded from.\n \"\"\"\n weights = torch.load(path)\n self.load_state_dict(weights)\n\n def save_model(self, path):\n \"\"\"\n Saves model parameters into the checkpoint file.\n Parameters\n ----------\n path: str\n path to the checkpoint file model will be saved to.\n \"\"\"\n torch.save(self.state_dict(), path)\n\n def change_lr(self, new_lr):\n \"\"\"\n Updates learning rate of the optimizer.\n Parameters\n ----------\n new_lr: float\n new learning rate value\n \"\"\"\n self.optimizer = self.optimizer_instance(self.parameters(), lr=new_lr)\n self.lr = new_lr\n\n def forward(self, inp, hidden, stack):\n \"\"\"\n Forward step of the model. Generates probability of the next character\n given the prefix.\n Parameters\n ----------\n inp: torch.tensor\n input tensor that contains prefix string indices\n hidden: torch.tensor or tuple(torch.tensor, torch.tensor)\n previous hidden state of the model. If layer_type is 'LSTM',\n then hidden is a tuple of hidden state and cell state, otherwise\n hidden is torch.tensor\n stack: torch.tensor\n previous state of the augmented memory stack\n Returns\n -------\n output: torch.tensor\n tensor with non-normalized probabilities of the next character\n next_hidden: torch.tensor or tuple(torch.tensor, torch.tensor)\n next hidden state of the model. If layer_type is 'LSTM',\n then next_hidden is a tuple of hidden state and cell state,\n otherwise next_hidden is torch.tensor\n next_stack: torch.tensor\n next state of the augmented memory stack\n \"\"\"\n inp = self.encoder(inp.view(1, -1))\n if self.has_stack:\n if self.has_cell:\n hidden_ = hidden[0]\n else:\n hidden_ = hidden\n if self.is_bidirectional:\n hidden_2_stack = torch.cat((hidden_[0], hidden_[1]), dim=1)\n else:\n hidden_2_stack = hidden_.squeeze(0)\n stack_controls = self.stack_controls_layer(hidden_2_stack)\n stack_controls = F.softmax(stack_controls, dim=1)\n stack_input = self.stack_input_layer(hidden_2_stack.unsqueeze(0))\n stack_input = torch.tanh(stack_input)\n stack = self.stack_augmentation(stack_input.permute(1, 0, 2),\n stack, stack_controls)\n stack_top = stack[:, 0, :].unsqueeze(0)\n inp = torch.cat((inp, stack_top), dim=2)\n output, next_hidden = self.rnn(inp.view(1, 1, -1), hidden)\n output = self.decoder(output.view(1, -1))\n return output, next_hidden, stack\n\n def stack_augmentation(self, input_val, prev_stack, controls):\n \"\"\"\n Augmentation of the tensor into the stack. For more details see\n https://arxiv.org/abs/1503.01007\n Parameters\n ----------\n input_val: torch.tensor\n tensor to be added to stack\n prev_stack: torch.tensor\n previous stack state\n controls: torch.tensor\n predicted probabilities for each operation in the stack, i.e\n PUSH, POP and NO_OP. Again, see https://arxiv.org/abs/1503.01007\n Returns\n -------\n new_stack: torch.tensor\n new stack state\n \"\"\"\n batch_size = prev_stack.size(0)\n\n controls = controls.view(-1, 3, 1, 1)\n zeros_at_the_bottom = torch.zeros(batch_size, 1, self.stack_width)\n if self.use_cuda:\n zeros_at_the_bottom = Variable(zeros_at_the_bottom.cuda())\n else:\n zeros_at_the_bottom = Variable(zeros_at_the_bottom)\n a_push, a_pop, a_no_op = controls[:, 0], controls[:, 1], controls[:, 2]\n stack_down = torch.cat((prev_stack[:, 1:], zeros_at_the_bottom), dim=1)\n stack_up = torch.cat((input_val, prev_stack[:, :-1]), dim=1)\n new_stack = a_no_op * prev_stack + a_push * stack_up + a_pop * stack_down\n return new_stack\n\n def init_hidden(self):\n \"\"\"\n Initialization of the hidden state of RNN.\n Returns\n -------\n hidden: torch.tensor\n tensor filled with zeros of an appropriate size (taking into\n account number of RNN layers and directions)\n \"\"\"\n if self.use_cuda:\n return Variable(torch.zeros(self.n_layers * self.num_dir, 1,\n self.hidden_size).cuda())\n else:\n return Variable(torch.zeros(self.n_layers * self.num_dir, 1,\n self.hidden_size))\n\n def init_cell(self):\n \"\"\"\n Initialization of the cell state of LSTM. Only used when layers_type is\n 'LSTM'\n Returns\n -------\n cell: torch.tensor\n tensor filled with zeros of an appropriate size (taking into\n account number of RNN layers and directions)\n \"\"\"\n if self.use_cuda:\n return Variable(torch.zeros(self.n_layers * self.num_dir, 1,\n self.hidden_size).cuda())\n else:\n return Variable(torch.zeros(self.n_layers * self.num_dir, 1,\n self.hidden_size))\n\n def init_stack(self):\n \"\"\"\n Initialization of the stack state. Only used when has_stack is True\n Returns\n -------\n stack: torch.tensor\n tensor filled with zeros\n \"\"\"\n result = torch.zeros(1, self.stack_depth, self.stack_width)\n if self.use_cuda:\n return Variable(result.cuda())\n else:\n return Variable(result)\n\n def train_step(self, inp, target):\n \"\"\"\n One train step, i.e. forward-backward and parameters update, for\n a single training example.\n Parameters\n ----------\n inp: torch.tensor\n tokenized training string from position 0 to position (seq_len - 1)\n target:\n tokenized training string from position 1 to position seq_len\n Returns\n -------\n loss: float\n mean value of the loss function (averaged through the sequence\n length)\n \"\"\"\n hidden = self.init_hidden()\n if self.has_cell:\n cell = self.init_cell()\n hidden = (hidden, cell)\n if self.has_stack:\n stack = self.init_stack()\n else:\n stack = None\n self.optimizer.zero_grad()\n loss = 0\n for c in range(len(inp)):\n output, hidden, stack = self(inp[c], hidden, stack)\n loss += self.criterion(output, target[c].unsqueeze(0))\n\n loss.backward()\n self.optimizer.step()\n\n return loss.item() / len(inp)\n \n def evaluate(self, data, prime_str='<', end_token='>', predict_len=100):\n \"\"\"\n Generates new string from the model distribution.\n Parameters\n ----------\n data: object of type GeneratorData\n stores information about the generator data format such alphabet, etc\n prime_str: str (default '<')\n prime string that will be used as prefix. Deafult value is just the\n START_TOKEN\n end_token: str (default '>')\n when end_token is sampled from the model distribution,\n the generation of a new example is finished\n predict_len: int (default 100)\n maximum length of the string to be generated. If the end_token is\n not sampled, the generation will be aborted when the length of the\n generated sequence is equal to predict_len\n Returns\n -------\n new_sample: str\n Newly generated sample from the model distribution.\n \"\"\"\n hidden = self.init_hidden()\n if self.has_cell:\n cell = self.init_cell()\n hidden = (hidden, cell)\n if self.has_stack:\n stack = self.init_stack()\n else:\n stack = None\n prime_input = data.char_tensor(prime_str)\n new_sample = prime_str\n\n # Use priming string to \"build up\" hidden state\n for p in range(len(prime_str)-1):\n _, hidden, stack = self.forward(prime_input[p], hidden, stack)\n inp = prime_input[-1]\n\n for p in range(predict_len):\n output, hidden, stack = self.forward(inp, hidden, stack)\n\n # Sample from the network as a multinomial distribution\n probs = torch.softmax(output, dim=1)\n top_i = torch.multinomial(probs.view(-1), 1)[0].cpu().numpy()\n\n # Add predicted character to string and use as next input\n predicted_char = data.all_characters[top_i]\n new_sample += predicted_char\n inp = data.char_tensor(predicted_char)\n if predicted_char == end_token:\n break\n\n return new_sample\n\n def fit(self, data, n_iterations, all_losses=[], print_every=100,\n plot_every=10, augment=False):\n \"\"\"\n This methods fits the parameters of the model. Training is performed to\n minimize the cross-entropy loss when predicting the next character\n given the prefix.\n Parameters\n ----------\n data: object of type GeneratorData\n stores information about the generator data format such alphabet, etc\n n_iterations: int\n how many iterations of training will be performed\n all_losses: list (default [])\n list to store the values of the loss function\n print_every: int (default 100)\n feedback will be printed to std_out once every print_every\n iterations of training\n plot_every: int (default 10)\n value of the loss function will be appended to all_losses once every\n plot_every iterations of training\n augment: bool (default False)\n parameter specifying if SMILES enumeration will be used. For mode\n details on SMILES enumeration see https://arxiv.org/abs/1703.07076\n Returns\n -------\n all_losses: list\n list that stores the values of the loss function (learning curve)\n \"\"\"\n start = time.time()\n loss_avg = 0\n\n if augment:\n smiles_augmentation = SmilesEnumerator()\n else:\n smiles_augmentation = None\n\n for epoch in trange(1, n_iterations + 1, desc='Training in progress...'):\n inp, target = data.random_training_set(smiles_augmentation)\n loss = self.train_step(inp, target)\n loss_avg += loss\n\n if epoch % print_every == 0:\n print('[%s (%d %d%%) %.4f]' % (time_since(start), epoch,\n epoch / n_iterations * 100, loss)\n )\n print(self.evaluate(data=data, prime_str = '<',\n predict_len=100), '\\n')\n\n if epoch % plot_every == 0:\n all_losses.append(loss_avg / plot_every)\n loss_avg = 0\n return all_losses\n\n\"\"\"### **Predictor**\"\"\"\n\nclass VanillaQSAR(object):\n def __init__(self, model_instance=None, model_params=None,\n model_type='classifier', ensemble_size=5, normalization=False):\n super(VanillaQSAR, self).__init__()\n self.model_instance = model_instance\n self.model_params = model_params\n self.ensemble_size = ensemble_size\n self.model = []\n self.normalization = normalization\n if model_type not in ['classifier', 'regressor']:\n raise InvalidArgumentError(\"model type must be either\"\n \"classifier or regressor\")\n self.model_type = model_type\n if isinstance(self.model_instance, list):\n assert(len(self.model_instance) == self.ensemble_size)\n assert(isinstance(self.model_params, list))\n assert(len(self.model_params) == self.ensemble_size)\n for i in range(self.ensemble_size):\n self.model.append(self.model_instance[i](**model_params[i]))\n else:\n for _ in range(self.ensemble_size):\n self.model.append(self.model_instance(**model_params))\n if self.normalization:\n self.desc_mean = [0]*self.ensemble_size\n self.metrics_type = None\n\n def fit_model(self, data, cv_split='stratified'):\n eval_metrics = []\n x = data.x\n if self.model_type == 'classifier' and data.binary_y is not None:\n y = data.binary_y\n else:\n y = data.y\n cross_val_data, cross_val_labels = cross_validation_split(x=x, y=y,\n split=cv_split,\n n_folds=self.ensemble_size)\n for i in range(self.ensemble_size):\n train_x = np.concatenate(cross_val_data[:i] +\n cross_val_data[(i + 1):])\n test_x = cross_val_data[i]\n train_y = np.concatenate(cross_val_labels[:i] +\n cross_val_labels[(i + 1):])\n test_y = cross_val_labels[i]\n if self.normalization:\n train_x, desc_mean = normalize_desc(train_x)\n self.desc_mean[i] = desc_mean\n test_x, _ = normalize_desc(test_x, desc_mean)\n self.model[i].fit(train_x, train_y.ravel())\n predicted = self.model[i].predict(test_x)\n if self.model_type == 'classifier':\n eval_metrics.append(metrics.f1_score(test_y, predicted))\n self.metrics_type = 'F1 score'\n elif self.model_type == 'regressor':\n r2 = metrics.r2_score(test_y, predicted)\n eval_metrics.append(r2)\n self.metrics_type = 'R^2 score'\n else:\n raise RuntimeError()\n return eval_metrics, self.metrics_type\n\n def load_model(self, path):\n # TODO: add iterable path object instead of static path \n m = joblib.load(path)\n if self.normalization:\n arr = np.load(path + 'desc_mean.npy')\n self.desc_mean = arr\n\n def save_model(self, path):\n joblib.dump(self.model, path + '.pkl')\n if self.normalization:\n np.save(path + 'desc_mean.npy', self.desc_mean)\n\n def predict(self, objects=None, average=True, get_features=None,\n **kwargs):\n objects = np.array(objects)\n invalid_objects = []\n processed_objects = []\n if get_features is not None:\n x, processed_indices, invalid_indices = get_features(objects,\n **kwargs)\n processed_objects = objects[processed_indices]\n invalid_objects = objects[invalid_indices]\n else:\n x = objects\n if len(x) == 0:\n processed_objects = []\n prediction = []\n invalid_objects = objects\n else:\n prediction = []\n for i in range(self.ensemble_size):\n m = self.model[i]\n if self.normalization:\n x, _ = normalize_desc(x, self.desc_mean[i])\n prediction.append(m.predict(x))\n prediction = np.array(prediction)\n if average:\n prediction = prediction.mean(axis=0)\n return processed_objects, prediction, invalid_objects\n\n\"\"\"### **Demo JAK2**\"\"\"\n\ndef plot_hist(prediction, n_to_generate):\n print(\"Mean value of predictions:\", prediction.mean())\n print(\"Proportion of valid SMILES:\", len(prediction)/n_to_generate)\n ax = sns.kdeplot(prediction, shade=True)\n ax.set(xlabel='Predicted pIC50', \n title='Distribution of predicted pIC50 for generated molecules')\n plt.show()\n\ndef estimate_and_update(generator, predictor, n_to_generate, **kwargs):\n generated = []\n pbar = tqdm(range(n_to_generate))\n for i in pbar:\n pbar.set_description(\"Generating molecules...\")\n generated.append(generator.evaluate(gen_data, predict_len=120)[1:-1])\n\n sanitized = canonical_smiles(generated, sanitize=False, throw_warning=False)[:-1]\n unique_smiles = list(np.unique(sanitized))[1:]\n smiles, prediction, nan_smiles = predictor.predict(unique_smiles, get_features=get_fp) \n \n return smiles, prediction\n\ndef init():\n global use_cuda\n global my_generator\n global my_predictor\n global gen_data\n global optimizer_instance\n\n #variables in for gen model \n hidden_size = 1500\n stack_width = 1500\n stack_depth = 200\n layer_type = 'GRU'\n n_characters = 45\n lr = 0.001\n\n optimizer_instance = torch.optim.Adadelta\n use_cuda = torch.cuda.is_available()\n\n #get models path from registered model\n gen_model_path = Model.get_model_path(model_name='gen_model')\n pred_model_path = Model.get_model_path(model_name='pred_model')\n\n #define smiles strings dataset path\n model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), './deploy_files')\n gen_data_path = model_path +'/1000smiles.csv' \n \n tokens = ['<', '>', '#', '%', ')', '(', '+', '-', '/', '.', '1', '0', '3', '2', '5', '4', '7',\n '6', '9', '8', '=', 'A', '@', 'C', 'B', 'F', 'I', 'H', 'O', 'N', 'P', 'S', '[', ']',\n '\\\\', 'c', 'e', 'i', 'l', 'o', 'n', 'p', 's', 'r', '\\n']\n\n gen_data = GeneratorData(training_data_path=gen_data_path, delimiter='\\t', \n cols_to_read=[0], keep_header=True, tokens=tokens)\n\n my_generator = StackAugmentedRNN(input_size=gen_data.n_characters, hidden_size=hidden_size,\n output_size=gen_data.n_characters, layer_type=layer_type,\n n_layers=1, is_bidirectional=False, has_stack=True,\n stack_width=stack_width, stack_depth=stack_depth, \n use_cuda=use_cuda, \n optimizer_instance=optimizer_instance, lr=lr)\n\n #loading gen_model_1\n my_generator.load_model(gen_model_path)\n\n #my_predict = predictor model \n model_instance = RFR\n model_params = {'n_estimators': 250, 'n_jobs': 10} \n\n my_predictor = VanillaQSAR(model_instance=model_instance,\n model_params=model_params,\n model_type='regressor')\n \n my_predictor.load_model(path = pred_model_path)\n\ndef run(n_to_generate):\n #put this in run function\n smiles, pic50 = estimate_and_update(my_generator, \n my_predictor,\n n_to_generate=n_to_generate)\n \n molecules = [Chem.MolFromSmiles(x) for x in smiles]\n qed_list = [] \n \n for x in molecules:\n try:\n qed_list.append(QED.qed(x))\n except ValueError:\n pass\n\n return smiles.tolist(), pic50.tolist(), qed_list.tolist()\n" ]
[ [ "numpy.save", "torch.nn.functional.softmax", "numpy.random.seed", "numpy.asarray", "torch.nn.GRU", "torch.cuda.is_available", "torch.cat", "torch.softmax", "numpy.isfinite", "torch.autograd.Variable", "sklearn.metrics.f1_score", "torch.tanh", "sklearn.model_selection.KFold", "numpy.isnan", "numpy.where", "numpy.unique", "numpy.mean", "numpy.load", "torch.nn.LSTM", "torch.load", "numpy.zeros", "torch.tensor", "torch.nn.LogSoftmax", "numpy.arange", "numpy.max", "numpy.sort", "sklearn.metrics.r2_score", "numpy.array", "numpy.random.shuffle", "torch.nn.Linear", "sklearn.model_selection.StratifiedKFold", "numpy.random.permutation", "pandas.DataFrame", "torch.nn.Embedding", "torch.nn.CrossEntropyLoss", "sklearn.externals.joblib.load", "matplotlib.pyplot.show", "torch.zeros", "sklearn.externals.joblib.dump", "numpy.concatenate" ] ]
tom-kuchler/vhive
[ "ae1f2f5920e7607e9902ed1060bda62b56e332ac" ]
[ "function-images/rnn_serving/rnn.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\n\nclass RNN(nn.Module):\n def __init__(self, input_size, hidden_size, output_size, all_categories, n_categories, all_letters, n_letters):\n super(RNN, self).__init__()\n self.hidden_size = hidden_size\n\n self.all_categories = all_categories\n self.n_categories = n_categories\n self.all_letters = all_letters\n self.n_letters = n_letters\n\n self.i2h = nn.Linear(n_categories + input_size + hidden_size, hidden_size)\n self.i2o = nn.Linear(n_categories + input_size + hidden_size, output_size)\n self.o2o = nn.Linear(hidden_size + output_size, output_size)\n self.dropout = nn.Dropout(0.1)\n self.softmax = nn.LogSoftmax(dim=1)\n\n def forward(self, category, input_tensor, hidden):\n input_combined = torch.cat((category, input_tensor, hidden), 1)\n hidden = self.i2h(input_combined)\n output = self.i2o(input_combined)\n output_combined = torch.cat((hidden, output), 1)\n output = self.o2o(output_combined)\n output = self.dropout(output)\n output = self.softmax(output)\n return output, hidden\n\n def init_hidden(self):\n return Variable(torch.zeros(1, self.hidden_size))\n\n @staticmethod\n def gen_input_tensor(all_letters, n_letters, line):\n tensor = torch.zeros(len(line), 1, n_letters)\n for li in range(len(line)):\n letter = line[li]\n tensor[li][0][all_letters.find(letter)] = 1\n return tensor\n\n @staticmethod\n def gen_category_tensor(all_categories, n_categories, category):\n li = all_categories.index(category)\n tensor = torch.zeros(1, n_categories)\n tensor[0][li] = 1\n return tensor\n\n # Sample from a category and starting letter\n def sample(self, category, start_letter='A'):\n category_tensor = Variable(self.gen_category_tensor(self.all_categories, self.n_categories, category))\n input_tensor = Variable(self.gen_input_tensor(self.all_letters, self.n_letters, start_letter))\n hidden = self.init_hidden()\n\n output_name = start_letter\n\n max_length = 20\n for i in range(max_length):\n output, hidden = self.forward(category_tensor, input_tensor[0], hidden)\n topv, topi = output.data.topk(1)\n topi = topi[0][0]\n\n if topi == self.n_letters - 1:\n break\n else:\n letter = self.all_letters[topi]\n output_name += letter\n\n input_tensor = Variable(self.gen_input_tensor(self.all_letters, self.n_letters, letter))\n\n return output_name\n\n # Get multiple samples from one category and multiple starting letters\n def samples(self, category, start_letters='ABC'):\n for start_letter in start_letters:\n yield self.sample(category, start_letter)\n" ]
[ [ "torch.nn.Linear", "torch.nn.LogSoftmax", "torch.zeros", "torch.cat", "torch.nn.Dropout" ] ]
gaurav272333/fastestimator
[ "d49e48d7375e19416ef337b3517dcfe42bb56589" ]
[ "fastestimator/op/numpyop/univariate/expand_dims.py" ]
[ "# Copyright 2019 The FastEstimator Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nfrom typing import Any, Dict, Iterable, List, Union\n\nimport numpy as np\n\nfrom fastestimator.op.numpyop.numpyop import NumpyOp\nfrom fastestimator.util.traceability_util import traceable\n\n\n@traceable()\nclass ExpandDims(NumpyOp):\n \"\"\"Expand the dimension of inputs by inserting a new axis to the specified axis position.\n\n Args:\n inputs: Key(s) of inputs to be modified.\n outputs: Key(s) into which to write the modified inputs.\n mode: What mode(s) to execute this Op in. For example, \"train\", \"eval\", \"test\", or \"infer\". To execute\n regardless of mode, pass None. To execute in all modes except for a particular one, you can pass an argument\n like \"!infer\" or \"!train\".\n axis: The axis to expand.\n \"\"\"\n def __init__(self,\n inputs: Union[str, Iterable[str]],\n outputs: Union[str, Iterable[str]],\n mode: Union[None, str, Iterable[str]] = None,\n axis: int = -1):\n super().__init__(inputs=inputs, outputs=outputs, mode=mode)\n self.axis = axis\n self.in_list, self.out_list = True, True\n\n def forward(self, data: List[np.ndarray], state: Dict[str, Any]) -> List[np.ndarray]:\n return [np.expand_dims(elem, self.axis) for elem in data]\n" ]
[ [ "numpy.expand_dims" ] ]
mikofski/FlyingCircus
[ "77641afd6683db4ef1ba351b78fb80604a655609" ]
[ "carousel/tests/test_calcs.py" ]
[ "\"\"\"\ntest calculations\n\"\"\"\n\nfrom nose.tools import ok_, eq_\nfrom carousel.core.calculations import Calc, CalcParameter\nfrom carousel.core.calculators import Calculator\nfrom carousel.tests import PROJ_PATH, sandia_performance_model\nimport os\nimport uncertainties\nfrom pvlib.solarposition import get_solarposition as solpos\nimport logging\nimport numpy as np\n\nLOGGER = logging.getLogger(__name__)\n\n\ndef test_calc_metaclass():\n \"\"\"\n Test calculation class is created with params file using metaclass\n \"\"\"\n\n class CalcTest1(Calc):\n class Meta:\n calcs_file = 'utils.json'\n calcs_path = os.path.join(PROJ_PATH, 'calculations')\n\n calc_test1 = CalcTest1()\n ok_(isinstance(calc_test1, Calc))\n eq_(calc_test1.param_file,\n os.path.join(PROJ_PATH, 'calculations', 'utils.json'))\n\n class CalcTest2(Calc):\n energy = CalcParameter(\n is_dynamic=False,\n dependencies=[\"ac_power\", \"daterange\"],\n formula=\"f_energy\",\n args={\"outputs\": {\"ac_power\": \"Pac\", \"times\": \"timestamps\"}},\n returns=[\"hourly_energy\", \"hourly_timeseries\"]\n )\n monthly_rollup = CalcParameter(\n is_dynamic=False,\n dependencies=[\"energy\"],\n formula=\"f_rollup\",\n args={\n \"data\": {\"freq\": \"MONTHLY\"},\n \"outputs\": {\"items\": \"hourly_energy\",\n \"times\": \"hourly_timeseries\"}\n },\n returns=[\"monthly_energy\"]\n )\n yearly_rollup = CalcParameter(\n is_dynamic=False,\n dependencies=[\"energy\"],\n formula=\"f_rollup\",\n args={\"data\": {\"freq\": \"YEARLY\"},\n \"outputs\": {\"items\": \"hourly_energy\",\n \"times\": \"hourly_timeseries\"}},\n returns=[\"annual_energy\"]\n )\n\n calc_test2 = CalcTest2()\n ok_(isinstance(calc_test2, Calc))\n for k, v in calc_test1.parameters.iteritems():\n eq_(calc_test2.parameters[k], v)\n\n\ndef test_static_calc_unc():\n \"\"\"\n Test uncertainty propagation in static calculations using Uncertainties.\n \"\"\"\n\n # FIXME: this shouldn't have to run a model to test the uncertainty\n test_model_file = os.path.join(PROJ_PATH, 'models',\n 'sandia_performance_model-Tuscon.json')\n test_model = sandia_performance_model.SAPM(test_model_file) # create model\n test_model.command('start') # start simulation\n # get parameters from model\n dt = test_model.outputs.reg['timestamps'] # timestamps\n latitude = test_model.data.reg['latitude'].m # latitude [degrees]\n longitude = test_model.data.reg['longitude'].m # longitude [degrees]\n zenith = test_model.outputs.reg['solar_zenith'].m # zenith [degrees]\n s_ze_ze = test_model.outputs.reg.variance['solar_zenith']['solar_zenith']\n azimuth = test_model.outputs.reg['solar_azimuth'].m # azimuth [degrees]\n s_az_az = test_model.outputs.reg.variance['solar_azimuth']['solar_azimuth']\n # get uncertainties percentages in base units\n lat_unc = test_model.data.reg.uncertainty['latitude']['latitude']\n lat_unc = lat_unc.to_base_units().m\n lon_unc = test_model.data.reg.uncertainty['longitude']['longitude']\n lon_unc = lon_unc.to_base_units().m\n # create ufloat Uncertainties from parameters\n lat_unc = uncertainties.ufloat(latitude, np.abs(latitude * lat_unc))\n lon_unc = uncertainties.ufloat(longitude, np.abs(longitude * lon_unc))\n test_unc = [] # empty list to collect return values\n for n in xrange(96):\n # Uncertainties wrapped functions must return only scalar float\n f_ze_unc = uncertainties.wrap(\n lambda lat, lon: solpos(dt[n], lat, lon)['apparent_zenith'].item()\n )\n f_az_unc = uncertainties.wrap(\n lambda lat, lon: solpos(dt[n], lat, lon)['azimuth'].item()\n )\n ze_unc, az_unc = f_ze_unc(lat_unc, lon_unc), f_az_unc(lat_unc, lon_unc)\n LOGGER.debug(\n '%s: ze = %g +/- %g%%, az = %g +/- %g%%', dt[n].isoformat(),\n zenith[n], np.sqrt(s_ze_ze[n]) * 100,\n azimuth[n], np.sqrt(s_az_az[n]) * 100\n )\n LOGGER.debug(\n 'Uncertainties test %2d: ze = %g +/- %g%%, az = %g +/- %g%%', n,\n ze_unc.n, ze_unc.s / ze_unc.n * 100,\n az_unc.n, az_unc.s / az_unc.n * 100\n )\n assert np.isclose(zenith[n], ze_unc.n)\n assert np.isclose(np.sqrt(s_ze_ze[n]), ze_unc.s / ze_unc.n)\n assert np.isclose(azimuth[n], az_unc.n)\n assert np.isclose(np.sqrt(s_az_az[n]), az_unc.s / az_unc.n)\n test_unc.append({'ze': ze_unc, 'az': az_unc})\n return test_model, test_unc\n\n\nif __name__ == '__main__':\n tm, tu = test_static_calc_unc()\n test_calc_metaclass()\n" ]
[ [ "numpy.sqrt", "numpy.isclose", "numpy.abs" ] ]
CasualDan/ose-course-scientific-computing
[ "28c9595aabf47d98406c05dd9519245aa58fac05" ]
[ "lectures/integration/integration_algorithms.py" ]
[ "\"\"\"Algorithms for integration lecture.\"\"\"\nimport chaospy as cp\nimport numpy as np\n\n\ndef quadrature_newton_trapezoid_one(f, a, b, n):\n \"\"\"Return quadrature newton trapezoid example.\"\"\"\n xvals = np.linspace(a, b, n + 1)\n fvals = np.tile(np.nan, n + 1)\n h = xvals[1] - xvals[0]\n\n weights = np.tile(h, n + 1)\n weights[0] = weights[-1] = 0.5 * h\n\n for i, xval in enumerate(xvals):\n fvals[i] = f(xval)\n\n return np.sum(weights * fvals)\n\n\ndef quadrature_newton_simpson_one(f, a, b, n):\n \"\"\"Return quadrature newton simpson example.\"\"\"\n if n % 2 == 0:\n raise Warning(\"n must be an odd integer. Increasing by 1\")\n n += 1\n\n xvals = np.linspace(a, b, n)\n fvals = np.tile(np.nan, n)\n\n h = xvals[1] - xvals[0]\n\n weights = np.tile(np.nan, n)\n weights[0::2] = 2 * h / 3\n weights[1::2] = 4 * h / 3\n weights[0] = weights[-1] = h / 3\n\n for i, xval in enumerate(xvals):\n fvals[i] = f(xval)\n\n return np.sum(weights * fvals)\n\n\ndef quadrature_gauss_legendre_one(f, a, b, n):\n \"\"\"Return quadrature gauss legendre example.\"\"\"\n xvals, weights = np.polynomial.legendre.leggauss(n)\n xval_trans = (b - a) * (xvals + 1.0) / 2.0 + a\n\n fvals = np.tile(np.nan, n)\n for i, xval in enumerate(xval_trans):\n fvals[i] = ((b - a) / 2.0) * f(xval)\n\n return np.sum(weights * fvals)\n\n\ndef quadrature_gauss_legendre_two(f, a=-1, b=1, n=10):\n \"\"\"Return quadrature gauss legendre example.\"\"\"\n n_dim = int(np.sqrt(n))\n\n xvals, weight_uni = np.polynomial.legendre.leggauss(n_dim)\n xvals_transformed = (b - a) * (xvals + 1.0) / 2.0 + a\n\n weights = np.tile(np.nan, n_dim ** 2)\n fvals = np.tile(np.nan, n_dim ** 2)\n\n counter = 0\n for i, x in enumerate(xvals_transformed):\n for j, y in enumerate(xvals_transformed):\n weights[counter] = weight_uni[i] * weight_uni[j]\n fvals[counter] = f([x, y])\n counter += 1\n\n return ((b - a) / 2) ** 2 * np.sum(weights * np.array(fvals))\n\n\ndef monte_carlo_naive_one(f, a=0, b=1, n=10, seed=123):\n \"\"\"Return naive monte carlo example.\"\"\"\n np.random.seed(seed)\n xvals = np.random.uniform(size=n)\n fvals = np.tile(np.nan, n)\n weights = np.tile(1 / n, n)\n\n scale = b - a\n for i, xval in enumerate(xvals):\n fvals[i] = f(a + xval * (b - a))\n\n return scale * np.sum(weights * fvals)\n\n\ndef monte_carlo_naive_two_dimensions(f, a=0, b=1, n=10, seed=128):\n \"\"\"Return naive monte carlo example (two-dimensional).\n\n Restricted to same integration domain for both variables.\n \"\"\"\n np.random.seed(seed)\n xvals = np.random.uniform(low=a, high=b, size=2 * n).reshape(n, 2)\n volume = (b - a) ** 2\n\n fvals = np.tile(np.nan, n)\n weights = np.tile(1 / n, n)\n\n for i, xval in enumerate(xvals):\n fvals[i] = f(xval)\n\n return volume * np.sum(weights * fvals)\n\n\ndef monte_carlo_quasi_two_dimensions(f, a=0, b=1, n=10, rule=\"random\"):\n \"\"\"Return Monte Carlo example (two-dimensional).\n\n Corresponds to naive Monthe Carlo for `rule='random'`. Restricted to same\n integration domain for both variables.\n \"\"\"\n distribution = cp.J(cp.Uniform(a, b), cp.Uniform(a, b))\n samples = distribution.sample(n, rule=rule).T\n volume = (b - a) ** 2\n\n fvals = np.tile(np.nan, n)\n weights = np.tile(1 / n, n)\n\n for i, xval in enumerate(samples):\n fvals[i] = f(xval)\n\n return volume * np.sum(weights * fvals)\n" ]
[ [ "numpy.random.uniform", "numpy.tile", "numpy.sum", "numpy.array", "numpy.random.seed", "numpy.polynomial.legendre.leggauss", "numpy.sqrt", "numpy.linspace" ] ]
msakai/blueoil
[ "0c9160b524b17482d59ae48a0c11384f1d26dccc" ]
[ "blueoil/cmd/export.py" ]
[ "# -*- coding: utf-8 -*-\n# Copyright 2018 The Blueoil Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\nimport os\nimport shutil\n\nimport click\nimport PIL\nimport numpy as np\nimport tensorflow as tf\n\nfrom blueoil import environment\nfrom blueoil.utils.image import load_image\nfrom blueoil.utils import config as config_util\nfrom blueoil.utils import executor\n\nDEFAULT_INFERENCE_TEST_DATA_IMAGE = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n \"fixtures\", \"export_inference_test_data_images\",\n \"5605039097_05baa93bfd_m.jpg\")\n\n\n# TODO(wakisaka): duplicated function with blueoil/cmd/measure_latency.py\ndef _pre_process(raw_image, pre_processor, data_format):\n image = pre_processor(image=raw_image)['image']\n if data_format == 'NCHW':\n image = np.transpose(image, [2, 0, 1])\n return image\n\n\ndef _save_all_operation_outputs(image_path, output_dir, image, raw_image, all_outputs, image_size):\n shutil.copy(image_path, os.path.join(output_dir))\n tmp_image = PIL.Image.open(image_path)\n tmp_image.save(os.path.join(output_dir, \"raw_image.png\"))\n np.save(os.path.join(output_dir, \"raw_image.npy\"), raw_image)\n\n np.save(os.path.join(output_dir, \"preprocessed_image.npy\"), image)\n\n for _output in all_outputs:\n np.save(os.path.join(output_dir, \"{}.npy\".format(_output['name'])), _output['val'])\n\n\ndef _minimal_operations(sess):\n \"\"\"Get inference operations.\"\"\"\n minimal_graph_def = executor.convert_variables_to_constants(sess)\n minimal_graph = tf.Graph()\n with minimal_graph.as_default():\n tf.import_graph_def(minimal_graph_def, name=\"\")\n ops = minimal_graph.get_operations()\n\n return ops\n\n\ndef _export(config, restore_path, image_path):\n if restore_path is None:\n restore_file = executor.search_restore_filename(environment.CHECKPOINTS_DIR)\n restore_path = os.path.join(environment.CHECKPOINTS_DIR, restore_file)\n\n print(\"Restore from {}\".format(restore_path))\n\n if not os.path.exists(\"{}.index\".format(restore_path)):\n raise Exception(\"restore file {} dont exists.\".format(restore_path))\n\n output_root_dir = os.path.join(environment.EXPERIMENT_DIR, \"export\")\n output_root_dir = os.path.join(output_root_dir, os.path.basename(restore_path))\n\n if not os.path.exists(output_root_dir):\n os.makedirs(output_root_dir)\n\n graph = tf.Graph()\n ModelClass = config.NETWORK_CLASS\n network_kwargs = dict((key.lower(), val) for key, val in config.NETWORK.items())\n\n with graph.as_default():\n\n model = ModelClass(\n classes=config.CLASSES,\n is_debug=config.IS_DEBUG,\n **network_kwargs,\n )\n\n is_training = tf.constant(False, name=\"is_training\")\n\n images_placeholder, _ = model.placeholders()\n model.inference(images_placeholder, is_training)\n init_op = tf.compat.v1.global_variables_initializer()\n\n saver = tf.compat.v1.train.Saver(max_to_keep=50)\n\n session_config = tf.compat.v1.ConfigProto()\n sess = tf.compat.v1.Session(graph=graph, config=session_config)\n sess.run(init_op)\n\n saver.restore(sess, restore_path)\n\n main_output_dir = os.path.join(output_root_dir, \"{}x{}\".format(config.IMAGE_SIZE[0], config.IMAGE_SIZE[1]))\n if not os.path.exists(main_output_dir):\n os.makedirs(main_output_dir)\n\n # save inference values as npy files for runtime inference test and debug.\n if image_path:\n all_ops = _minimal_operations(sess)\n inference_values_output_dir = os.path.join(main_output_dir, \"inference_test_data\")\n\n if not os.path.exists(inference_values_output_dir):\n os.makedirs(inference_values_output_dir)\n\n raw_image = load_image(image_path)\n image = _pre_process(raw_image, config.PRE_PROCESSOR, config.DATA_FORMAT)\n images = np.expand_dims(image, axis=0)\n feed_dict = {\n images_placeholder: images,\n }\n\n all_outputs = []\n index = 0\n for op in all_ops:\n for op_output in op.outputs:\n # HACK: This is for TensorFlow bug workaround.\n # We can remove following 4 lines once it's been resolved in TensorFlow\n # Issue link: https://github.com/tensorflow/tensorflow/issues/36456\n if (not tf.config.experimental.list_physical_devices('GPU')\n and \"FusedBatchNormV3\" in op_output.name\n and int(op_output.name.split(\":\")[1]) in set(range(1, 6))):\n continue\n val = sess.run(op_output.name, feed_dict=feed_dict)\n name = '%03d' % index + '_' + op_output.name.replace('/', '_')\n all_outputs.append({'val': val, 'name': name})\n index += 1\n\n _save_all_operation_outputs(\n image_path, inference_values_output_dir, image, raw_image, all_outputs, config.IMAGE_SIZE)\n\n yaml_names = config_util.save_yaml(main_output_dir, config)\n pb_name = executor.save_pb_file(sess, main_output_dir)\n\n message = \"\"\"\nCreate pb and yaml files in: {}\npb: {}\nyaml: {}, {}\n\"\"\".format(main_output_dir,\n pb_name,\n *yaml_names)\n\n if image_path:\n message += \"Create npy files in under `inference_test_data` folder \\n\"\n message += \"npy: {}\".format([d[\"name\"] for d in all_outputs] + [\"raw_image\", \"preprocessed_image\", ])\n\n print(message)\n print(\"finish\")\n\n return main_output_dir, config\n\n\ndef run(experiment_id,\n restore_path=None,\n image_size=(None, None),\n image=DEFAULT_INFERENCE_TEST_DATA_IMAGE,\n config_file=None):\n environment.init(experiment_id)\n\n config = config_util.load_from_experiment()\n\n if config_file:\n config = config_util.merge(config, config_util.load(config_file))\n\n config.BATCH_SIZE = 1\n config.NETWORK.BATCH_SIZE = 1\n config.DATASET.BATCH_SIZE = 1\n\n if list(image_size) != [None, None]:\n config.IMAGE_SIZE = list(image_size)\n config.NETWORK.IMAGE_SIZE = list(image_size)\n\n # override pre processes image size.\n if config.PRE_PROCESSOR:\n config.PRE_PROCESSOR.set_image_size(image_size)\n\n # override post processes image size.\n if config.POST_PROCESSOR:\n config.POST_PROCESSOR.set_image_size(image_size)\n\n print(\"Override IMAGE_SIZE\", config.IMAGE_SIZE)\n\n executor.init_logging(config)\n config_util.display(config)\n\n return _export(config, restore_path, image)\n\n\[email protected](context_settings=dict(help_option_names=['-h', '--help']))\[email protected](\n \"-i\",\n \"--experiment_id\",\n help=\"id of this experiment.\",\n required=True,\n)\[email protected](\n \"--restore_path\",\n help=\"restore ckpt file base path. e.g. saved/experiment/checkpoints/save.ckpt-10001\",\n default=None,\n)\[email protected](\n '--image_size',\n nargs=2,\n type=click.Tuple([int, int]),\n help=\"input image size height and width. if it is not provided, it restore from saved experiment config. \"\n \"e.g. --image_size 320 320\",\n # NOQA\n default=(None, None),\n)\[email protected](\n \"--image\",\n help=\"path of target image\",\n default=DEFAULT_INFERENCE_TEST_DATA_IMAGE,\n)\[email protected](\n \"-c\",\n \"--config_file\",\n help=\"config file path. override saved experiment config.\",\n)\ndef main(experiment_id, restore_path, image_size, image, config_file):\n \"\"\"Exporting a trained model to proto buffer files and meta config yaml.\n\n In the case with `image` option, create each layer output value npy files into\n `export/{restore_path}/{image_size}/inference_test_data/**.npy` as expected value for inference test and debug.\n \"\"\"\n run(experiment_id, restore_path, image_size, image, config_file)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "tensorflow.compat.v1.train.Saver", "tensorflow.compat.v1.Session", "numpy.transpose", "tensorflow.compat.v1.ConfigProto", "tensorflow.config.experimental.list_physical_devices", "tensorflow.Graph", "numpy.expand_dims", "tensorflow.compat.v1.global_variables_initializer", "tensorflow.import_graph_def", "tensorflow.constant" ] ]
SwapneelM/pyprob
[ "4d93441ea838c3491a49050ae05d218a34708e6d" ]
[ "pyprob/model.py" ]
[ "import torch\nimport time\nimport sys\nimport os\nimport math\nimport random\nfrom termcolor import colored\n\nfrom .distributions import Empirical\nfrom . import util, state, TraceMode, PriorInflation, InferenceEngine, InferenceNetwork, ImportanceWeighting, Optimizer, LearningRateScheduler, AddressDictionary\nfrom .nn import InferenceNetwork as InferenceNetworkBase\nfrom .nn import OnlineDataset, OfflineDataset, InferenceNetworkFeedForward, InferenceNetworkLSTM\nfrom .remote import ModelServer\n\n\nclass Model():\n def __init__(self, name='Unnamed pyprob model', address_dict_file_name=None):\n super().__init__()\n self.name = name\n self._inference_network = None\n if address_dict_file_name is None:\n self._address_dictionary = None\n else:\n self._address_dictionary = AddressDictionary(address_dict_file_name)\n\n def forward(self):\n raise NotImplementedError()\n\n def _trace_generator(self, trace_mode=TraceMode.PRIOR, prior_inflation=PriorInflation.DISABLED, inference_engine=InferenceEngine.IMPORTANCE_SAMPLING, inference_network=None, observe=None, metropolis_hastings_trace=None, likelihood_importance=1., importance_weighting=ImportanceWeighting.IW0, *args, **kwargs):\n state._init_traces(func=self.forward, trace_mode=trace_mode, prior_inflation=prior_inflation, inference_engine=inference_engine, inference_network=inference_network, observe=observe, metropolis_hastings_trace=metropolis_hastings_trace, address_dictionary=self._address_dictionary, likelihood_importance=likelihood_importance, importance_weighting=importance_weighting)\n while True:\n state._begin_trace()\n result = self.forward(*args, **kwargs)\n trace = state._end_trace(result)\n yield trace\n\n def _traces(self, num_traces=10, trace_mode=TraceMode.PRIOR, prior_inflation=PriorInflation.DISABLED, inference_engine=InferenceEngine.IMPORTANCE_SAMPLING, inference_network=None, map_func=None, silent=False, observe=None, file_name=None, likelihood_importance=1., *args, **kwargs):\n generator = self._trace_generator(trace_mode=trace_mode, prior_inflation=prior_inflation, inference_engine=inference_engine, inference_network=inference_network, observe=observe, likelihood_importance=likelihood_importance, *args, **kwargs)\n traces = Empirical(file_name=file_name)\n if map_func is None:\n map_func = lambda trace: trace\n time_start = time.time()\n if (util._verbosity > 1) and not silent:\n len_str_num_traces = len(str(num_traces))\n print('Time spent | Time remain.| Progress | {} | Traces/sec'.format('Trace'.ljust(len_str_num_traces * 2 + 1)))\n prev_duration = 0\n for i in range(num_traces):\n if (util._verbosity > 1) and not silent:\n duration = time.time() - time_start\n if (duration - prev_duration > util._print_refresh_rate) or (i == num_traces - 1):\n prev_duration = duration\n traces_per_second = (i + 1) / duration\n print('{} | {} | {} | {}/{} | {:,.2f} '.format(util.days_hours_mins_secs_str(duration), util.days_hours_mins_secs_str((num_traces - i) / traces_per_second), util.progress_bar(i+1, num_traces), str(i+1).rjust(len_str_num_traces), num_traces, traces_per_second), end='\\r')\n sys.stdout.flush()\n trace = next(generator)\n if trace_mode == TraceMode.PRIOR:\n log_weight = 1.\n else:\n log_weight = trace.log_importance_weight\n traces.add(map_func(trace), log_weight)\n if (util._verbosity > 1) and not silent:\n print()\n traces.finalize()\n return traces\n\n def get_trace(self, *args, **kwargs):\n return next(self._trace_generator(*args, **kwargs))\n\n def prior(self, num_traces=10, prior_inflation=PriorInflation.DISABLED, map_func=None, file_name=None, likelihood_importance=1., *args, **kwargs):\n prior = self._traces(num_traces=num_traces, trace_mode=TraceMode.PRIOR, prior_inflation=prior_inflation, map_func=map_func, file_name=file_name, likelihood_importance=likelihood_importance, *args, **kwargs)\n prior.rename('Prior, traces: {:,}'.format(prior.length))\n prior.add_metadata(op='prior', num_traces=num_traces, prior_inflation=str(prior_inflation), likelihood_importance=likelihood_importance)\n return prior\n\n def prior_results(self, num_traces=10, prior_inflation=PriorInflation.DISABLED, map_func=lambda trace: trace.result, file_name=None, likelihood_importance=1., *args, **kwargs):\n return self.prior(num_traces=num_traces, prior_inflation=prior_inflation, map_func=map_func, file_name=file_name, likelihood_importance=likelihood_importance, *args, **kwargs)\n\n def posterior(self, num_traces=10, inference_engine=InferenceEngine.IMPORTANCE_SAMPLING, initial_trace=None, map_func=None, observe=None, file_name=None, thinning_steps=None, likelihood_importance=1., *args, **kwargs):\n if inference_engine == InferenceEngine.IMPORTANCE_SAMPLING:\n posterior = self._traces(num_traces=num_traces, trace_mode=TraceMode.POSTERIOR, inference_engine=inference_engine, inference_network=None, map_func=map_func, observe=observe, file_name=file_name, likelihood_importance=likelihood_importance, *args, **kwargs)\n posterior.rename('Posterior, IS, traces: {:,}, ESS: {:,.2f}'.format(posterior.length, posterior.effective_sample_size))\n posterior.add_metadata(op='posterior', num_traces=num_traces, inference_engine=str(inference_engine), effective_sample_size=posterior.effective_sample_size, likelihood_importance=likelihood_importance)\n elif inference_engine == InferenceEngine.IMPORTANCE_SAMPLING_WITH_INFERENCE_NETWORK:\n if self._inference_network is None:\n raise RuntimeError('Cannot run inference engine IMPORTANCE_SAMPLING_WITH_INFERENCE_NETWORK because no inference network for this model is available. Use learn_inference_network or load_inference_network first.')\n with torch.no_grad():\n posterior = self._traces(num_traces=num_traces, trace_mode=TraceMode.POSTERIOR, inference_engine=inference_engine, inference_network=self._inference_network, map_func=map_func, observe=observe, file_name=file_name, likelihood_importance=likelihood_importance, *args, **kwargs)\n posterior.rename('Posterior, IC, traces: {:,}, train. traces: {:,}, ESS: {:,.2f}'.format(posterior.length, self._inference_network._total_train_traces, posterior.effective_sample_size))\n posterior.add_metadata(op='posterior', num_traces=num_traces, inference_engine=str(inference_engine), effective_sample_size=posterior.effective_sample_size, likelihood_importance=likelihood_importance, train_traces=self._inference_network._total_train_traces)\n else: # inference_engine == InferenceEngine.LIGHTWEIGHT_METROPOLIS_HASTINGS or inference_engine == InferenceEngine.RANDOM_WALK_METROPOLIS_HASTINGS\n posterior = Empirical(file_name=file_name)\n if map_func is None:\n map_func = lambda trace: trace\n if initial_trace is None:\n current_trace = next(self._trace_generator(trace_mode=TraceMode.POSTERIOR, inference_engine=inference_engine, observe=observe, *args, **kwargs))\n else:\n current_trace = initial_trace\n\n time_start = time.time()\n traces_accepted = 0\n samples_reused = 0\n samples_all = 0\n if thinning_steps is None:\n thinning_steps = 1\n\n if util._verbosity > 1:\n len_str_num_traces = len(str(num_traces))\n print('Time spent | Time remain.| Progress | {} | Accepted|Smp reuse| Traces/sec'.format('Trace'.ljust(len_str_num_traces * 2 + 1)))\n prev_duration = 0\n for i in range(num_traces):\n if util._verbosity > 1:\n duration = time.time() - time_start\n if (duration - prev_duration > util._print_refresh_rate) or (i == num_traces - 1):\n prev_duration = duration\n traces_per_second = (i + 1) / duration\n print('{} | {} | {} | {}/{} | {} | {} | {:,.2f} '.format(util.days_hours_mins_secs_str(duration), util.days_hours_mins_secs_str((num_traces - i) / traces_per_second), util.progress_bar(i+1, num_traces), str(i+1).rjust(len_str_num_traces), num_traces, '{:,.2f}%'.format(100 * (traces_accepted / (i + 1))).rjust(7), '{:,.2f}%'.format(100 * samples_reused / max(1, samples_all)).rjust(7), traces_per_second), end='\\r')\n sys.stdout.flush()\n candidate_trace = next(self._trace_generator(trace_mode=TraceMode.POSTERIOR, inference_engine=inference_engine, metropolis_hastings_trace=current_trace, observe=observe, *args, **kwargs))\n log_acceptance_ratio = math.log(current_trace.length_controlled) - math.log(candidate_trace.length_controlled) + candidate_trace.log_prob_observed - current_trace.log_prob_observed\n for variable in candidate_trace.variables_controlled:\n if variable.reused:\n log_acceptance_ratio += torch.sum(variable.log_prob)\n log_acceptance_ratio -= torch.sum(current_trace.variables_dict_address[variable.address].log_prob)\n samples_reused += 1\n samples_all += candidate_trace.length_controlled\n\n if state._metropolis_hastings_site_transition_log_prob is None:\n print(colored('Warning: trace did not hit the Metropolis Hastings site, ensure that the model is deterministic except pyprob.sample calls', 'red', attrs=['bold']))\n else:\n log_acceptance_ratio += torch.sum(state._metropolis_hastings_site_transition_log_prob)\n\n # print(log_acceptance_ratio)\n if math.log(random.random()) < float(log_acceptance_ratio):\n traces_accepted += 1\n current_trace = candidate_trace\n # do thinning\n if i % thinning_steps == 0:\n posterior.add(map_func(current_trace))\n\n if util._verbosity > 1:\n print()\n\n posterior.finalize()\n posterior.rename('Posterior, {}, traces: {:,}{}, accepted: {:,.2f}%, sample reuse: {:,.2f}%'.format('LMH' if inference_engine == InferenceEngine.LIGHTWEIGHT_METROPOLIS_HASTINGS else 'RMH', posterior.length, '' if thinning_steps == 1 else ' (thinning steps: {:,})'.format(thinning_steps), 100 * (traces_accepted / num_traces), 100 * samples_reused / samples_all))\n posterior.add_metadata(op='posterior', num_traces=num_traces, inference_engine=str(inference_engine), likelihood_importance=likelihood_importance, thinning_steps=thinning_steps, num_traces_accepted=traces_accepted, num_samples_reuised=samples_reused, num_samples=samples_all)\n return posterior\n\n def posterior_results(self, num_traces=10, inference_engine=InferenceEngine.IMPORTANCE_SAMPLING, initial_trace=None, map_func=lambda trace: trace.result, observe=None, file_name=None, thinning_steps=None, *args, **kwargs):\n return self.posterior(num_traces=num_traces, inference_engine=inference_engine, initial_trace=initial_trace, map_func=map_func, observe=observe, file_name=file_name, thinning_steps=thinning_steps, *args, **kwargs)\n\n def reset_inference_network(self):\n self._inference_network = None\n\n def learn_inference_network(self, num_traces, num_traces_end=1e9, inference_network=InferenceNetwork.FEEDFORWARD, prior_inflation=PriorInflation.DISABLED, dataset_dir=None, dataset_valid_dir=None, observe_embeddings={}, batch_size=64, valid_size=None, valid_every=None, optimizer_type=Optimizer.ADAM, learning_rate_init=0.001, learning_rate_end=1e-6, learning_rate_scheduler_type=LearningRateScheduler.NONE, momentum=0.9, weight_decay=0., save_file_name_prefix=None, save_every_sec=600, pre_generate_layers=True, distributed_backend=None, distributed_params_sync_every_iter=10000, distributed_num_buckets=None, dataloader_offline_num_workers=0, stop_with_bad_loss=True, log_file_name=None, lstm_dim=512, lstm_depth=1, proposal_mixture_components=10):\n if dataset_dir is None:\n dataset = OnlineDataset(model=self, prior_inflation=prior_inflation)\n else:\n dataset = OfflineDataset(dataset_dir=dataset_dir)\n\n if dataset_valid_dir is None:\n dataset_valid = None\n else:\n dataset_valid = OfflineDataset(dataset_dir=dataset_valid_dir)\n\n if self._inference_network is None:\n print('Creating new inference network...')\n if inference_network == InferenceNetwork.FEEDFORWARD:\n self._inference_network = InferenceNetworkFeedForward(model=self, observe_embeddings=observe_embeddings, proposal_mixture_components=proposal_mixture_components)\n elif inference_network == InferenceNetwork.LSTM:\n self._inference_network = InferenceNetworkLSTM(model=self, observe_embeddings=observe_embeddings, lstm_dim=lstm_dim, lstm_depth=lstm_depth, proposal_mixture_components=proposal_mixture_components)\n else:\n raise ValueError('Unknown inference_network: {}'.format(inference_network))\n if pre_generate_layers:\n if dataset_valid_dir is not None:\n self._inference_network._pre_generate_layers(dataset_valid, save_file_name_prefix=save_file_name_prefix)\n if dataset_dir is not None:\n self._inference_network._pre_generate_layers(dataset, save_file_name_prefix=save_file_name_prefix)\n else:\n print('Continuing to train existing inference network...')\n print('Total number of parameters: {:,}'.format(self._inference_network._history_num_params[-1]))\n\n self._inference_network.to(device=util._device)\n self._inference_network.optimize(num_traces=num_traces, dataset=dataset, dataset_valid=dataset_valid, num_traces_end=num_traces_end, batch_size=batch_size, valid_every=valid_every, optimizer_type=optimizer_type, learning_rate_init=learning_rate_init, learning_rate_end=learning_rate_end, learning_rate_scheduler_type=learning_rate_scheduler_type, momentum=momentum, weight_decay=weight_decay, save_file_name_prefix=save_file_name_prefix, save_every_sec=save_every_sec, distributed_backend=distributed_backend, distributed_params_sync_every_iter=distributed_params_sync_every_iter, distributed_num_buckets=distributed_num_buckets, dataloader_offline_num_workers=dataloader_offline_num_workers, stop_with_bad_loss=stop_with_bad_loss, log_file_name=log_file_name)\n\n def save_inference_network(self, file_name):\n if self._inference_network is None:\n raise RuntimeError('The model has no trained inference network.')\n self._inference_network._save(file_name)\n\n def load_inference_network(self, file_name):\n self._inference_network = InferenceNetworkBase._load(file_name)\n # The following is due to a temporary hack related with https://github.com/pytorch/pytorch/issues/9981 and can be deprecated by using dill as pickler with torch > 0.4.1\n self._inference_network._model = self\n\n def save_dataset(self, dataset_dir, num_traces, num_traces_per_file, prior_inflation=PriorInflation.DISABLED, *args, **kwargs):\n if not os.path.exists(dataset_dir):\n print('Directory does not exist, creating: {}'.format(dataset_dir))\n os.makedirs(dataset_dir)\n dataset = OnlineDataset(self, None, prior_inflation=prior_inflation)\n dataset.save_dataset(dataset_dir=dataset_dir, num_traces=num_traces, num_traces_per_file=num_traces_per_file, *args, **kwargs)\n\n\nclass RemoteModel(Model):\n def __init__(self, server_address='tcp://127.0.0.1:5555', before_forward_func=None, after_forward_func=None, *args, **kwargs):\n self._server_address = server_address\n self._model_server = None\n self._before_forward_func = before_forward_func # Optional mthod to run before each forward call of the remote model (simulator)\n self._after_forward_func = after_forward_func # Optional method to run after each forward call of the remote model (simulator)\n super().__init__(*args, **kwargs)\n\n def close(self):\n if self._model_server is not None:\n self._model_server.close()\n super().close()\n\n def forward(self):\n if self._model_server is None:\n self._model_server = ModelServer(self._server_address)\n self.name = '{} running on {}'.format(self._model_server.model_name, self._model_server.system_name)\n\n if self._before_forward_func is not None:\n self._before_forward_func()\n ret = self._model_server.forward() # Calls the forward run of the remove model (simulator)\n if self._after_forward_func is not None:\n self._after_forward_func()\n return ret\n" ]
[ [ "torch.sum", "torch.no_grad" ] ]
moodyRahman/basenji
[ "0fb66324549bbd52c10ce714a1ad988e54ce2773" ]
[ "bin/basenji_test.py" ]
[ "#!/usr/bin/env python\n# Copyright 2017 Calico LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =========================================================================\n\nfrom __future__ import print_function\nfrom optparse import OptionParser\nimport json\nimport os\nimport pdb\nimport sys\nimport time\n\nimport h5py\nfrom intervaltree import IntervalTree\nimport joblib\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import poisson\nfrom sklearn.metrics import roc_auc_score, roc_curve\nfrom sklearn.metrics import precision_recall_curve, average_precision_score\nimport tensorflow as tf\n\nimport matplotlib\nmatplotlib.use('PDF')\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom basenji import bed\nfrom basenji import dataset\nfrom basenji import plots\nfrom basenji import seqnn\nfrom basenji import trainer\n\nif tf.__version__[0] == '1':\n tf.compat.v1.enable_eager_execution()\n\n\"\"\"\nbasenji_test.py\n\nTest the accuracy of a trained model.\n\"\"\"\n\n################################################################################\n# main\n################################################################################\ndef main():\n usage = 'usage: %prog [options] <params_file> <model_file> <data_dir>'\n parser = OptionParser(usage)\n parser.add_option('--ai', dest='accuracy_indexes',\n help='Comma-separated list of target indexes to make accuracy scatter plots.')\n parser.add_option('--bi', dest='bedgraph_indexes',\n help='Comma-separated list of target indexes to write predictions and targets as bedgraph [Default: %default]')\n parser.add_option('--head', dest='head_i',\n default=0, type='int',\n help='Parameters head to test [Default: %default]')\n parser.add_option('--mc', dest='mc_n',\n default=0, type='int',\n help='Monte carlo test iterations [Default: %default]')\n parser.add_option('--peak','--peaks', dest='peaks',\n default=False, action='store_true',\n help='Compute expensive peak accuracy [Default: %default]')\n parser.add_option('-o', dest='out_dir',\n default='test_out',\n help='Output directory for test statistics [Default: %default]')\n parser.add_option('--rc', dest='rc',\n default=False, action='store_true',\n help='Average the fwd and rc predictions [Default: %default]')\n parser.add_option('--save', dest='save',\n default=False, action='store_true',\n help='Save targets and predictions numpy arrays [Default: %default]')\n parser.add_option('--shifts', dest='shifts',\n default='0',\n help='Ensemble prediction shifts [Default: %default]')\n parser.add_option('-t', dest='targets_file',\n default=None, type='str',\n help='File specifying target indexes and labels in table format')\n parser.add_option('--split', dest='split_label',\n default='test',\n help='Dataset split label for eg TFR pattern [Default: %default]')\n parser.add_option('--tfr', dest='tfr_pattern',\n default=None,\n help='TFR pattern string appended to data_dir/tfrecords for subsetting [Default: %default]')\n (options, args) = parser.parse_args()\n\n if len(args) != 3:\n parser.error('Must provide parameters, model, and test data HDF5')\n else:\n params_file = args[0]\n model_file = args[1]\n data_dir = args[2]\n\n if not os.path.isdir(options.out_dir):\n os.mkdir(options.out_dir)\n\n # parse shifts to integers\n options.shifts = [int(shift) for shift in options.shifts.split(',')]\n\n #######################################################\n # inputs\n\n # read targets\n if options.targets_file is None:\n options.targets_file = '%s/targets.txt' % data_dir\n targets_df = pd.read_csv(options.targets_file, index_col=0, sep='\\t')\n\n # read model parameters\n with open(params_file) as params_open:\n params = json.load(params_open)\n params_model = params['model']\n params_train = params['train']\n \n # construct eval data\n eval_data = dataset.SeqDataset(data_dir,\n split_label=options.split_label,\n batch_size=params_train['batch_size'],\n mode='eval',\n tfr_pattern=options.tfr_pattern)\n\n # initialize model\n seqnn_model = seqnn.SeqNN(params_model)\n seqnn_model.restore(model_file, options.head_i)\n seqnn_model.build_ensemble(options.rc, options.shifts)\n\n #######################################################\n # evaluate\n\n loss_label = params_train.get('loss', 'poisson').lower()\n spec_weight = params_train.get('spec_weight', 1)\n loss_fn = trainer.parse_loss(loss_label, spec_weight=spec_weight)\n \n # evaluate\n test_loss, test_metric1, test_metric2 = seqnn_model.evaluate(eval_data, loss=loss_fn)\n\n # print summary statistics\n print('\\nTest Loss: %7.5f' % test_loss)\n\n if loss_label == 'bce':\n print('Test AUROC: %7.5f' % test_metric1.mean())\n print('Test AUPRC: %7.5f' % test_metric2.mean())\n\n # write target-level statistics\n targets_acc_df = pd.DataFrame({\n 'index': targets_df.index,\n 'auroc': test_metric1,\n 'auprc': test_metric2,\n 'identifier': targets_df.identifier,\n 'description': targets_df.description\n })\n\n else:\n print('Test PearsonR: %7.5f' % test_metric1.mean())\n print('Test R2: %7.5f' % test_metric2.mean())\n\n # write target-level statistics\n targets_acc_df = pd.DataFrame({\n 'index': targets_df.index,\n 'pearsonr': test_metric1,\n 'r2': test_metric2,\n 'identifier': targets_df.identifier,\n 'description': targets_df.description\n })\n\n targets_acc_df.to_csv('%s/acc.txt'%options.out_dir, sep='\\t',\n index=False, float_format='%.5f')\n \n #######################################################\n # predict?\n\n if options.save or options.peaks or options.accuracy_indexes is not None:\n # compute predictions\n test_preds = seqnn_model.predict(eval_data).astype('float16')\n\n # read targets\n test_targets = eval_data.numpy(return_inputs=False)\n\n if options.save:\n preds_h5 = h5py.File('%s/preds.h5' % options.out_dir, 'w')\n preds_h5.create_dataset('preds', data=test_preds)\n preds_h5.close()\n targets_h5 = h5py.File('%s/targets.h5' % options.out_dir, 'w')\n targets_h5.create_dataset('targets', data=test_targets)\n targets_h5.close()\n\n if options.bedgraph_indexes is not None:\n bedgraph_indexes = [int(ti) for ti in options.bedgraph_indexes.split(',')]\n bed.write_bedgraph(test_preds, test_targets, data_dir,\n options.out_dir, options.split_label, bedgraph_indexes)\n\n #######################################################\n # peak call accuracy\n\n if options.peaks:\n peaks_out_file = '%s/peaks.txt' % options.out_dir\n test_peaks(test_preds, test_targets, peaks_out_file)\n\n\n #######################################################\n # accuracy plots\n\n if options.accuracy_indexes is not None:\n accuracy_indexes = [int(ti) for ti in options.accuracy_indexes.split(',')]\n\n if not os.path.isdir('%s/scatter' % options.out_dir):\n os.mkdir('%s/scatter' % options.out_dir)\n\n if not os.path.isdir('%s/violin' % options.out_dir):\n os.mkdir('%s/violin' % options.out_dir)\n\n if not os.path.isdir('%s/roc' % options.out_dir):\n os.mkdir('%s/roc' % options.out_dir)\n\n if not os.path.isdir('%s/pr' % options.out_dir):\n os.mkdir('%s/pr' % options.out_dir)\n\n for ti in accuracy_indexes:\n test_targets_ti = test_targets[:, :, ti]\n\n ############################################\n # scatter\n\n # sample every few bins (adjust to plot the # points I want)\n ds_indexes = np.arange(0, test_preds.shape[1], 8)\n\n # subset and flatten\n test_targets_ti_flat = test_targets_ti[:, ds_indexes].flatten(\n ).astype('float32')\n test_preds_ti_flat = test_preds[:, ds_indexes, ti].flatten().astype(\n 'float32')\n\n # take log2\n test_targets_ti_log = np.log2(test_targets_ti_flat + 1)\n test_preds_ti_log = np.log2(test_preds_ti_flat + 1)\n\n # plot log2\n sns.set(font_scale=1.2, style='ticks')\n out_pdf = '%s/scatter/t%d.pdf' % (options.out_dir, ti)\n plots.regplot(\n test_targets_ti_log,\n test_preds_ti_log,\n out_pdf,\n poly_order=1,\n alpha=0.3,\n sample=500,\n figsize=(6, 6),\n x_label='log2 Experiment',\n y_label='log2 Prediction',\n table=True)\n\n ############################################\n # violin\n\n # call peaks\n test_targets_ti_lambda = np.mean(test_targets_ti_flat)\n test_targets_pvals = 1 - poisson.cdf(\n np.round(test_targets_ti_flat) - 1, mu=test_targets_ti_lambda)\n test_targets_qvals = np.array(ben_hoch(test_targets_pvals))\n test_targets_peaks = test_targets_qvals < 0.01\n test_targets_peaks_str = np.where(test_targets_peaks, 'Peak',\n 'Background')\n\n # violin plot\n sns.set(font_scale=1.3, style='ticks')\n plt.figure()\n df = pd.DataFrame({\n 'log2 Prediction': np.log2(test_preds_ti_flat + 1),\n 'Experimental coverage status': test_targets_peaks_str\n })\n ax = sns.violinplot(\n x='Experimental coverage status', y='log2 Prediction', data=df)\n ax.grid(True, linestyle=':')\n plt.savefig('%s/violin/t%d.pdf' % (options.out_dir, ti))\n plt.close()\n\n # ROC\n plt.figure()\n fpr, tpr, _ = roc_curve(test_targets_peaks, test_preds_ti_flat)\n auroc = roc_auc_score(test_targets_peaks, test_preds_ti_flat)\n plt.plot(\n [0, 1], [0, 1], c='black', linewidth=1, linestyle='--', alpha=0.7)\n plt.plot(fpr, tpr, c='black')\n ax = plt.gca()\n ax.set_xlabel('False positive rate')\n ax.set_ylabel('True positive rate')\n ax.text(\n 0.99, 0.02, 'AUROC %.3f' % auroc,\n horizontalalignment='right') # , fontsize=14)\n ax.grid(True, linestyle=':')\n plt.savefig('%s/roc/t%d.pdf' % (options.out_dir, ti))\n plt.close()\n\n # PR\n plt.figure()\n prec, recall, _ = precision_recall_curve(test_targets_peaks,\n test_preds_ti_flat)\n auprc = average_precision_score(test_targets_peaks, test_preds_ti_flat)\n plt.axhline(\n y=test_targets_peaks.mean(),\n c='black',\n linewidth=1,\n linestyle='--',\n alpha=0.7)\n plt.plot(recall, prec, c='black')\n ax = plt.gca()\n ax.set_xlabel('Recall')\n ax.set_ylabel('Precision')\n ax.text(\n 0.99, 0.95, 'AUPRC %.3f' % auprc,\n horizontalalignment='right') # , fontsize=14)\n ax.grid(True, linestyle=':')\n plt.savefig('%s/pr/t%d.pdf' % (options.out_dir, ti))\n plt.close()\n\n\ndef ben_hoch(p_values):\n \"\"\" Convert the given p-values to q-values using Benjamini-Hochberg FDR. \"\"\"\n m = len(p_values)\n\n # attach original indexes to p-values\n p_k = [(p_values[k], k) for k in range(m)]\n\n # sort by p-value\n p_k.sort()\n\n # compute q-value and attach original index to front\n k_q = [(p_k[i][1], p_k[i][0] * m // (i + 1)) for i in range(m)]\n\n # re-sort by original index\n k_q.sort()\n\n # drop original indexes\n q_values = [k_q[k][1] for k in range(m)]\n\n return q_values\n\n\ndef test_peaks(test_preds, test_targets, peaks_out_file):\n # sample every few bins to decrease correlations\n ds_indexes = np.arange(0, test_preds.shape[1], 8)\n # ds_indexes_preds = np.arange(0, test_preds.shape[1], 8)\n # ds_indexes_targets = ds_indexes_preds + (model.hp.batch_buffer // model.hp.target_pool)\n\n aurocs = []\n auprcs = []\n\n peaks_out = open(peaks_out_file, 'w')\n for ti in range(test_targets.shape[2]):\n test_targets_ti = test_targets[:, :, ti]\n\n # subset and flatten\n test_targets_ti_flat = test_targets_ti[:, ds_indexes].flatten(\n ).astype('float32')\n test_preds_ti_flat = test_preds[:, ds_indexes, ti].flatten().astype(\n 'float32')\n\n # call peaks\n test_targets_ti_lambda = np.mean(test_targets_ti_flat)\n test_targets_pvals = 1 - poisson.cdf(\n np.round(test_targets_ti_flat) - 1, mu=test_targets_ti_lambda)\n test_targets_qvals = np.array(ben_hoch(test_targets_pvals))\n test_targets_peaks = test_targets_qvals < 0.01\n\n if test_targets_peaks.sum() == 0:\n aurocs.append(0.5)\n auprcs.append(0)\n\n else:\n # compute prediction accuracy\n aurocs.append(roc_auc_score(test_targets_peaks, test_preds_ti_flat))\n auprcs.append(\n average_precision_score(test_targets_peaks, test_preds_ti_flat))\n\n print('%4d %6d %.5f %.5f' % (ti, test_targets_peaks.sum(),\n aurocs[-1], auprcs[-1]),\n file=peaks_out)\n\n peaks_out.close()\n\n print('Test AUROC: %7.5f' % np.mean(aurocs))\n print('Test AUPRC: %7.5f' % np.mean(auprcs))\n\n\n################################################################################\n# __main__\n################################################################################\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.log2", "pandas.read_csv", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "pandas.DataFrame", "tensorflow.compat.v1.enable_eager_execution", "sklearn.metrics.roc_curve", "matplotlib.pyplot.gca", "sklearn.metrics.precision_recall_curve", "numpy.arange", "sklearn.metrics.roc_auc_score", "matplotlib.pyplot.close", "matplotlib.use", "matplotlib.pyplot.plot", "numpy.where", "sklearn.metrics.average_precision_score", "numpy.round", "numpy.mean" ] ]
kconnour/planetary_disort
[ "ccdb0694d4146ef52003fdcec4deb5f65ec72306" ]
[ "pyrt/untested_utils/utilities/sporadic_particle_size_grid.py" ]
[ "# 3rd-party imports\nimport numpy as np\n\n\nclass SporadicParticleSizes:\n \"\"\" A SporadicParticleSizes object holds altitude info of particle sizes and can interpolate them onto a model\n grid\"\"\"\n\n def __init__(self, altitude_grid, particle_size_grid, model_grid):\n \"\"\"\n Parameters\n ----------\n altitude_grid: np.ndarray\n 1D array of altitudes where particle sizes are known\n particle_size_grid: np.ndarray\n 1D array of particle sizes\n model_grid: ModelGrid\n Model structure to interpolate particle sizes on to\n\n Attributes\n ----------\n altitude_grid: np.ndarray\n The input altitude grid\n particle_size_grid: np.ndarray\n The input particle size grid\n model_grid: ModelGrid\n The input model grid\n regridded_particle_sizes: np.ndarray\n The regridded particle sizes at the altitude layers in model_grid\n \"\"\"\n self.altitude_grid = altitude_grid\n self.particle_size_grid = particle_size_grid\n self.model_grid = model_grid\n\n self.__check_grids_are_physical()\n\n self.regridded_particle_sizes = self.__interp_particle_sizes_to_model_grid()\n\n def __check_grids_are_physical(self):\n self.__check_altitude_grid_is_physical()\n self.__check_particle_size_grid_is_physical()\n self.__check_altitude_grid_size_grid_have_same_shape()\n self.__check_model_grid_is_ModelGrid()\n\n def __check_altitude_grid_is_physical(self):\n pass\n #altitude_checker = ArrayChecker(self.altitude_grid, 'altitude_grid')\n #altitude_checker.check_object_is_array()\n #altitude_checker.check_ndarray_is_numeric()\n #altitude_checker.check_ndarray_is_non_negative()\n #altitude_checker.check_ndarray_is_finite()\n #altitude_checker.check_ndarray_is_1d()\n #altitude_checker.check_1d_array_is_monotonically_decreasing()\n\n def __check_particle_size_grid_is_physical(self):\n pass\n #size_checker = ArrayChecker(self.particle_size_grid, 'particle_size_grid')\n #size_checker.check_object_is_array()\n #size_checker.check_ndarray_is_numeric()\n #size_checker.check_ndarray_is_positive_finite()\n #size_checker.check_ndarray_is_1d()\n\n def __check_altitude_grid_size_grid_have_same_shape(self):\n if self.altitude_grid.shape != self.particle_size_grid.shape:\n raise ValueError('altitude_grid and particle_size_grid must have the same shape')\n\n def __check_model_grid_is_ModelGrid(self):\n pass\n #if not isinstance(self.model_grid, ModelGrid):\n # raise TypeError('model_grid must be an instance of ModelGrid')\n\n def __interp_particle_sizes_to_model_grid(self):\n # I must flip these since numpy.interp expects monotonically increasing xp\n return np.interp(self.model_grid.layer_altitudes, np.flip(self.altitude_grid), np.flip(self.particle_size_grid))\n" ]
[ [ "numpy.flip" ] ]
danzhewuju/candock
[ "ab78aec64086b68ab65c53438332772efd5df260" ]
[ "models/multi_scale_resnet.py" ]
[ "import torch\r\nfrom torch import nn\r\nimport torch.nn.functional as F\r\nclass ResidualBlock(nn.Module):\r\n def __init__(self, inchannel, outchannel,kernel_size,stride=2):\r\n super(ResidualBlock, self).__init__()\r\n self.stride = stride\r\n self.conv = nn.Sequential(\r\n nn.Conv2d(inchannel, outchannel, kernel_size=kernel_size, stride=stride, padding=int((kernel_size-1)/2), bias=False),\r\n nn.BatchNorm2d(outchannel),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(outchannel, outchannel, kernel_size=kernel_size, stride=1, padding=int((kernel_size-1)/2), bias=False),\r\n nn.BatchNorm2d(outchannel)\r\n )\r\n self.shortcut = nn.Sequential(\r\n nn.Conv2d(inchannel, outchannel, kernel_size=1, stride=2, bias=False),\r\n nn.BatchNorm2d(outchannel)\r\n )\r\n\r\n def forward(self, x):\r\n out = self.conv(x)\r\n if self.stride != 1:\r\n out += self.shortcut(x)\r\n else:\r\n out += x\r\n out = F.relu(out,inplace=True)\r\n return out\r\n\r\nclass Route(nn.Module):\r\n def __init__(self, kernel_size):\r\n super(Route, self).__init__()\r\n self.block1 = ResidualBlock(64, 64, kernel_size, stride=1)\r\n self.block2 = ResidualBlock(64, 128, kernel_size)\r\n self.block3 = ResidualBlock(128, 256, kernel_size)\r\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\r\n\r\n def forward(self, x):\r\n x = self.block1(x)\r\n x = self.block2(x)\r\n x = self.block3(x)\r\n x = self.avgpool(x)\r\n return x\r\n\r\nclass Multi_Scale_ResNet(nn.Module):\r\n def __init__(self, inchannel, num_classes):\r\n super(Multi_Scale_ResNet, self).__init__()\r\n self.pre_conv = nn.Sequential(\r\n nn.Conv2d(inchannel, 64, kernel_size=7, stride=2, padding=3, bias=False),\r\n nn.BatchNorm2d(64),\r\n nn.ReLU(inplace=True),\r\n nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\r\n )\r\n self.Route1 = Route(3)\r\n self.Route2 = Route(5)\r\n self.Route3 = Route(7)\r\n self.fc = nn.Linear(256*3, num_classes)\r\n\r\n def forward(self, x):\r\n x = self.pre_conv(x)\r\n x1 = self.Route1(x)\r\n x2 = self.Route2(x)\r\n x3 = self.Route3(x)\r\n x = torch.cat((x1,x2,x3), 1)\r\n x = x.view(x.size(0), -1)\r\n x = self.fc(x)\r\n return x" ]
[ [ "torch.nn.BatchNorm2d", "torch.nn.MaxPool2d", "torch.nn.Linear", "torch.nn.AdaptiveAvgPool2d", "torch.nn.functional.relu", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.cat" ] ]
jefkine/zeta-learn
[ "04388f90093b52f5df2f334c898f3a1224f5a13f" ]
[ "ztlearn/toolkit/optviz.py" ]
[ "import numpy as np\n\nfrom ztlearn.utils import plot_opt_viz\nfrom ztlearn.initializers import InitializeWeights as init\nfrom ztlearn.optimizers import OptimizationFunction as optimize\n\nclass GbOptimization(object):\n\n def __init__(self, optimizer, init_method = 'ones'):\n self.optimizer = optimizer\n self.init_method = init_method\n\n def run(self, f, df, params = 1, epochs = 10, tol = 1e-4, scale_factor = 5, verbose = False):\n self.inputs = init(self.init_method).initialize_weights((params, 1)) * scale_factor\n self.f0 = f(self.inputs) # initial function value (fsolve)\n self.epochs = epochs\n\n self.fsolve = np.zeros((self.epochs, 1))\n self.weights = np.zeros((self.epochs, 1, params))\n\n for i in np.arange(self.epochs):\n self.inputs = optimize(self.optimizer).update(self.inputs, df(self.inputs))\n self.weights[i,:,:] = self.inputs.T\n\n f_solution = f(self.inputs)\n self.fsolve[i,:] = f_solution\n\n if verbose:\n if i%5 == 0:\n print('Epoch-{} weights: {:.20}'.format(i+1, self.npstring(self.inputs.T)))\n print('Epoch-{} eps: {:.20}'.format(i+1, self.npstring(self.f0 - f_solution)))\n # if np.linalg.norm(self.inputs, axis = 0) > tol: break\n\n def npstring(self, np_array):\n return np.array2string(np_array, formatter = {'float_kind':'{0:.4f}'.format})\n\n def plot_3d(self, f):\n \"\"\" plot a 3d visualization \"\"\"\n theta = np.arange(-4.0, 4.0, 0.1)\n\n x_grid = np.meshgrid(theta, theta)\n z = f(x_grid)\n\n weights = self.weights.reshape(self.epochs, -1)\n\n vis_type = ['wireframe', 'contour']\n for vis in vis_type:\n plot_opt_viz(3, x_grid, weights, z, self.fsolve, overlay = vis)\n\n def plot_2d(self, f):\n \"\"\" plot a 2d visualization \"\"\"\n theta = np.expand_dims(np.arange(-5.0, 6.0, 1.0), axis = 1)\n\n y = np.zeros_like(theta)\n for i in np.arange(theta.shape[0]):\n y[i,:] = f(theta[i,:])\n\n weights = self.weights.reshape(self.epochs, -1)\n\n plot_opt_viz(2, theta, y, weights, self.fsolve, overlay = 'plot')\n \n" ]
[ [ "numpy.zeros_like", "numpy.zeros", "numpy.array2string", "numpy.arange", "numpy.meshgrid" ] ]
guenteru/pandas
[ "172ab7aee433fed7780bf96f0c723af284755324" ]
[ "pandas/core/indexes/base.py" ]
[ "from datetime import datetime, timedelta\nimport warnings\nimport operator\nfrom textwrap import dedent\n\nimport numpy as np\nfrom pandas._libs import (lib, index as libindex, tslib as libts,\n algos as libalgos, join as libjoin,\n Timedelta)\nfrom pandas._libs.lib import is_datetime_array\n\nfrom pandas.compat import range, u, set_function_name\nfrom pandas.compat.numpy import function as nv\nfrom pandas import compat\n\nfrom pandas.core.accessor import CachedAccessor\nfrom pandas.core.arrays import ExtensionArray\nfrom pandas.core.dtypes.generic import (\n ABCSeries, ABCDataFrame,\n ABCMultiIndex,\n ABCPeriodIndex, ABCTimedeltaIndex,\n ABCDateOffset)\nfrom pandas.core.dtypes.missing import isna, array_equivalent\nfrom pandas.core.dtypes.common import (\n _ensure_int64,\n _ensure_object,\n _ensure_categorical,\n _ensure_platform_int,\n is_integer,\n is_float,\n is_dtype_equal,\n is_dtype_union_equal,\n is_object_dtype,\n is_categorical_dtype,\n is_interval_dtype,\n is_period_dtype,\n is_bool,\n is_bool_dtype,\n is_signed_integer_dtype,\n is_unsigned_integer_dtype,\n is_integer_dtype, is_float_dtype,\n is_datetime64_any_dtype,\n is_datetime64tz_dtype,\n is_timedelta64_dtype,\n is_hashable,\n needs_i8_conversion,\n is_iterator, is_list_like,\n is_scalar)\n\nfrom pandas.core.base import PandasObject, IndexOpsMixin\nimport pandas.core.common as com\nfrom pandas.core import ops\nfrom pandas.util._decorators import (\n Appender, Substitution, cache_readonly, deprecate_kwarg)\nfrom pandas.core.indexes.frozen import FrozenList\nimport pandas.core.dtypes.concat as _concat\nimport pandas.core.missing as missing\nimport pandas.core.algorithms as algos\nimport pandas.core.sorting as sorting\nfrom pandas.io.formats.printing import pprint_thing\nfrom pandas.core.ops import make_invalid_op\nfrom pandas.core.config import get_option\nfrom pandas.core.strings import StringMethods\n\n\n# simplify\ndefault_pprint = lambda x, max_seq_items=None: \\\n pprint_thing(x, escape_chars=('\\t', '\\r', '\\n'), quote_strings=True,\n max_seq_items=max_seq_items)\n\n__all__ = ['Index']\n\n_unsortable_types = frozenset(('mixed', 'mixed-integer'))\n\n_index_doc_kwargs = dict(klass='Index', inplace='',\n target_klass='Index',\n unique='Index', duplicated='np.ndarray')\n_index_shared_docs = dict()\n\n\ndef _try_get_item(x):\n try:\n return x.item()\n except AttributeError:\n return x\n\n\ndef _make_comparison_op(op, cls):\n def cmp_method(self, other):\n if isinstance(other, (np.ndarray, Index, ABCSeries)):\n if other.ndim > 0 and len(self) != len(other):\n raise ValueError('Lengths must match to compare')\n\n # we may need to directly compare underlying\n # representations\n if needs_i8_conversion(self) and needs_i8_conversion(other):\n return self._evaluate_compare(other, op)\n\n if is_object_dtype(self) and self.nlevels == 1:\n # don't pass MultiIndex\n with np.errstate(all='ignore'):\n result = ops._comp_method_OBJECT_ARRAY(op, self.values, other)\n\n else:\n\n # numpy will show a DeprecationWarning on invalid elementwise\n # comparisons, this will raise in the future\n with warnings.catch_warnings(record=True):\n with np.errstate(all='ignore'):\n result = op(self.values, np.asarray(other))\n\n # technically we could support bool dtyped Index\n # for now just return the indexing array directly\n if is_bool_dtype(result):\n return result\n try:\n return Index(result)\n except TypeError:\n return result\n\n name = '__{name}__'.format(name=op.__name__)\n # TODO: docstring?\n return set_function_name(cmp_method, name, cls)\n\n\ndef _make_arithmetic_op(op, cls):\n def index_arithmetic_method(self, other):\n if isinstance(other, (ABCSeries, ABCDataFrame)):\n return NotImplemented\n elif isinstance(other, ABCTimedeltaIndex):\n # Defer to subclass implementation\n return NotImplemented\n\n other = self._validate_for_numeric_binop(other, op)\n\n # handle time-based others\n if isinstance(other, (ABCDateOffset, np.timedelta64, timedelta)):\n return self._evaluate_with_timedelta_like(other, op)\n elif isinstance(other, (datetime, np.datetime64)):\n return self._evaluate_with_datetime_like(other, op)\n\n values = self.values\n with np.errstate(all='ignore'):\n result = op(values, other)\n\n result = missing.dispatch_missing(op, values, other, result)\n\n attrs = self._get_attributes_dict()\n attrs = self._maybe_update_attributes(attrs)\n if op is divmod:\n result = (Index(result[0], **attrs), Index(result[1], **attrs))\n else:\n result = Index(result, **attrs)\n return result\n\n name = '__{name}__'.format(name=op.__name__)\n # TODO: docstring?\n return set_function_name(index_arithmetic_method, name, cls)\n\n\nclass InvalidIndexError(Exception):\n pass\n\n\n_o_dtype = np.dtype(object)\n_Identity = object\n\n\ndef _new_Index(cls, d):\n \"\"\" This is called upon unpickling, rather than the default which doesn't\n have arguments and breaks __new__\n \"\"\"\n # required for backward compat, because PI can't be instantiated with\n # ordinals through __new__ GH #13277\n if issubclass(cls, ABCPeriodIndex):\n from pandas.core.indexes.period import _new_PeriodIndex\n return _new_PeriodIndex(cls, **d)\n return cls.__new__(cls, **d)\n\n\nclass Index(IndexOpsMixin, PandasObject):\n \"\"\"\n Immutable ndarray implementing an ordered, sliceable set. The basic object\n storing axis labels for all pandas objects\n\n Parameters\n ----------\n data : array-like (1-dimensional)\n dtype : NumPy dtype (default: object)\n copy : bool\n Make a copy of input ndarray\n name : object\n Name to be stored in the index\n tupleize_cols : bool (default: True)\n When True, attempt to create a MultiIndex if possible\n\n Notes\n -----\n An Index instance can **only** contain hashable objects\n\n Examples\n --------\n >>> pd.Index([1, 2, 3])\n Int64Index([1, 2, 3], dtype='int64')\n\n >>> pd.Index(list('abc'))\n Index(['a', 'b', 'c'], dtype='object')\n\n See Also\n ---------\n RangeIndex : Index implementing a monotonic integer range\n CategoricalIndex : Index of :class:`Categorical` s.\n MultiIndex : A multi-level, or hierarchical, Index\n IntervalIndex : an Index of :class:`Interval` s.\n DatetimeIndex, TimedeltaIndex, PeriodIndex\n Int64Index, UInt64Index, Float64Index\n \"\"\"\n # To hand over control to subclasses\n _join_precedence = 1\n\n # Cython methods\n _left_indexer_unique = libjoin.left_join_indexer_unique_object\n _left_indexer = libjoin.left_join_indexer_object\n _inner_indexer = libjoin.inner_join_indexer_object\n _outer_indexer = libjoin.outer_join_indexer_object\n\n _typ = 'index'\n _data = None\n _id = None\n name = None\n asi8 = None\n _comparables = ['name']\n _attributes = ['name']\n _is_numeric_dtype = False\n _can_hold_na = True\n\n # would we like our indexing holder to defer to us\n _defer_to_indexing = False\n\n # prioritize current class for _shallow_copy_with_infer,\n # used to infer integers as datetime-likes\n _infer_as_myclass = False\n\n _engine_type = libindex.ObjectEngine\n\n _accessors = set(['str'])\n\n str = CachedAccessor(\"str\", StringMethods)\n\n def __new__(cls, data=None, dtype=None, copy=False, name=None,\n fastpath=False, tupleize_cols=True, **kwargs):\n\n if name is None and hasattr(data, 'name'):\n name = data.name\n\n if fastpath:\n return cls._simple_new(data, name)\n\n from .range import RangeIndex\n\n # range\n if isinstance(data, RangeIndex):\n return RangeIndex(start=data, copy=copy, dtype=dtype, name=name)\n elif isinstance(data, range):\n return RangeIndex.from_range(data, copy=copy, dtype=dtype,\n name=name)\n\n # categorical\n if is_categorical_dtype(data) or is_categorical_dtype(dtype):\n from .category import CategoricalIndex\n return CategoricalIndex(data, dtype=dtype, copy=copy, name=name,\n **kwargs)\n\n # interval\n if is_interval_dtype(data) or is_interval_dtype(dtype):\n from .interval import IntervalIndex\n closed = kwargs.get('closed', None)\n return IntervalIndex(data, dtype=dtype, name=name, copy=copy,\n closed=closed)\n\n # index-like\n elif isinstance(data, (np.ndarray, Index, ABCSeries)):\n\n if (is_datetime64_any_dtype(data) or\n (dtype is not None and is_datetime64_any_dtype(dtype)) or\n 'tz' in kwargs):\n from pandas.core.indexes.datetimes import DatetimeIndex\n result = DatetimeIndex(data, copy=copy, name=name,\n dtype=dtype, **kwargs)\n if dtype is not None and is_dtype_equal(_o_dtype, dtype):\n return Index(result.to_pydatetime(), dtype=_o_dtype)\n else:\n return result\n\n elif (is_timedelta64_dtype(data) or\n (dtype is not None and is_timedelta64_dtype(dtype))):\n from pandas.core.indexes.timedeltas import TimedeltaIndex\n result = TimedeltaIndex(data, copy=copy, name=name, **kwargs)\n if dtype is not None and _o_dtype == dtype:\n return Index(result.to_pytimedelta(), dtype=_o_dtype)\n else:\n return result\n\n if dtype is not None:\n try:\n\n # we need to avoid having numpy coerce\n # things that look like ints/floats to ints unless\n # they are actually ints, e.g. '0' and 0.0\n # should not be coerced\n # GH 11836\n if is_integer_dtype(dtype):\n inferred = lib.infer_dtype(data)\n if inferred == 'integer':\n data = np.array(data, copy=copy, dtype=dtype)\n elif inferred in ['floating', 'mixed-integer-float']:\n if isna(data).any():\n raise ValueError('cannot convert float '\n 'NaN to integer')\n\n # If we are actually all equal to integers,\n # then coerce to integer.\n try:\n return cls._try_convert_to_int_index(\n data, copy, name, dtype)\n except ValueError:\n pass\n\n # Return an actual float index.\n from .numeric import Float64Index\n return Float64Index(data, copy=copy, dtype=dtype,\n name=name)\n\n elif inferred == 'string':\n pass\n else:\n data = data.astype(dtype)\n elif is_float_dtype(dtype):\n inferred = lib.infer_dtype(data)\n if inferred == 'string':\n pass\n else:\n data = data.astype(dtype)\n else:\n data = np.array(data, dtype=dtype, copy=copy)\n\n except (TypeError, ValueError) as e:\n msg = str(e)\n if 'cannot convert float' in msg:\n raise\n\n # maybe coerce to a sub-class\n from pandas.core.indexes.period import (\n PeriodIndex, IncompatibleFrequency)\n if isinstance(data, PeriodIndex):\n return PeriodIndex(data, copy=copy, name=name, **kwargs)\n if is_signed_integer_dtype(data.dtype):\n from .numeric import Int64Index\n return Int64Index(data, copy=copy, dtype=dtype, name=name)\n elif is_unsigned_integer_dtype(data.dtype):\n from .numeric import UInt64Index\n return UInt64Index(data, copy=copy, dtype=dtype, name=name)\n elif is_float_dtype(data.dtype):\n from .numeric import Float64Index\n return Float64Index(data, copy=copy, dtype=dtype, name=name)\n elif issubclass(data.dtype.type, np.bool) or is_bool_dtype(data):\n subarr = data.astype('object')\n else:\n subarr = com._asarray_tuplesafe(data, dtype=object)\n\n # _asarray_tuplesafe does not always copy underlying data,\n # so need to make sure that this happens\n if copy:\n subarr = subarr.copy()\n\n if dtype is None:\n inferred = lib.infer_dtype(subarr)\n if inferred == 'integer':\n try:\n return cls._try_convert_to_int_index(\n subarr, copy, name, dtype)\n except ValueError:\n pass\n\n return Index(subarr, copy=copy,\n dtype=object, name=name)\n elif inferred in ['floating', 'mixed-integer-float']:\n from .numeric import Float64Index\n return Float64Index(subarr, copy=copy, name=name)\n elif inferred == 'interval':\n from .interval import IntervalIndex\n return IntervalIndex(subarr, name=name, copy=copy)\n elif inferred == 'boolean':\n # don't support boolean explicitly ATM\n pass\n elif inferred != 'string':\n if inferred.startswith('datetime'):\n if (lib.is_datetime_with_singletz_array(subarr) or\n 'tz' in kwargs):\n # only when subarr has the same tz\n from pandas.core.indexes.datetimes import (\n DatetimeIndex)\n try:\n return DatetimeIndex(subarr, copy=copy,\n name=name, **kwargs)\n except libts.OutOfBoundsDatetime:\n pass\n\n elif inferred.startswith('timedelta'):\n from pandas.core.indexes.timedeltas import (\n TimedeltaIndex)\n return TimedeltaIndex(subarr, copy=copy, name=name,\n **kwargs)\n elif inferred == 'period':\n try:\n return PeriodIndex(subarr, name=name, **kwargs)\n except IncompatibleFrequency:\n pass\n return cls._simple_new(subarr, name)\n\n elif hasattr(data, '__array__'):\n return Index(np.asarray(data), dtype=dtype, copy=copy, name=name,\n **kwargs)\n elif data is None or is_scalar(data):\n cls._scalar_data_error(data)\n else:\n if tupleize_cols and is_list_like(data) and data:\n if is_iterator(data):\n data = list(data)\n # we must be all tuples, otherwise don't construct\n # 10697\n if all(isinstance(e, tuple) for e in data):\n from .multi import MultiIndex\n return MultiIndex.from_tuples(\n data, names=name or kwargs.get('names'))\n # other iterable of some kind\n subarr = com._asarray_tuplesafe(data, dtype=object)\n return Index(subarr, dtype=dtype, copy=copy, name=name, **kwargs)\n\n \"\"\"\n NOTE for new Index creation:\n\n - _simple_new: It returns new Index with the same type as the caller.\n All metadata (such as name) must be provided by caller's responsibility.\n Using _shallow_copy is recommended because it fills these metadata\n otherwise specified.\n\n - _shallow_copy: It returns new Index with the same type (using\n _simple_new), but fills caller's metadata otherwise specified. Passed\n kwargs will overwrite corresponding metadata.\n\n - _shallow_copy_with_infer: It returns new Index inferring its type\n from passed values. It fills caller's metadata otherwise specified as the\n same as _shallow_copy.\n\n See each method's docstring.\n \"\"\"\n\n @classmethod\n def _simple_new(cls, values, name=None, dtype=None, **kwargs):\n \"\"\"\n we require the we have a dtype compat for the values\n if we are passed a non-dtype compat, then coerce using the constructor\n\n Must be careful not to recurse.\n \"\"\"\n if not hasattr(values, 'dtype'):\n if (values is None or not len(values)) and dtype is not None:\n values = np.empty(0, dtype=dtype)\n else:\n values = np.array(values, copy=False)\n if is_object_dtype(values):\n values = cls(values, name=name, dtype=dtype,\n **kwargs)._ndarray_values\n\n result = object.__new__(cls)\n result._data = values\n result.name = name\n for k, v in compat.iteritems(kwargs):\n setattr(result, k, v)\n return result._reset_identity()\n\n _index_shared_docs['_shallow_copy'] = \"\"\"\n create a new Index with the same class as the caller, don't copy the\n data, use the same object attributes with passed in attributes taking\n precedence\n\n *this is an internal non-public method*\n\n Parameters\n ----------\n values : the values to create the new Index, optional\n kwargs : updates the default attributes for this Index\n \"\"\"\n\n @Appender(_index_shared_docs['_shallow_copy'])\n def _shallow_copy(self, values=None, **kwargs):\n if values is None:\n values = self.values\n attributes = self._get_attributes_dict()\n attributes.update(kwargs)\n if not len(values) and 'dtype' not in kwargs:\n attributes['dtype'] = self.dtype\n return self._simple_new(values, **attributes)\n\n def _shallow_copy_with_infer(self, values=None, **kwargs):\n \"\"\"\n create a new Index inferring the class with passed value, don't copy\n the data, use the same object attributes with passed in attributes\n taking precedence\n\n *this is an internal non-public method*\n\n Parameters\n ----------\n values : the values to create the new Index, optional\n kwargs : updates the default attributes for this Index\n \"\"\"\n if values is None:\n values = self.values\n attributes = self._get_attributes_dict()\n attributes.update(kwargs)\n attributes['copy'] = False\n if not len(values) and 'dtype' not in kwargs:\n attributes['dtype'] = self.dtype\n if self._infer_as_myclass:\n try:\n return self._constructor(values, **attributes)\n except (TypeError, ValueError):\n pass\n return Index(values, **attributes)\n\n def _deepcopy_if_needed(self, orig, copy=False):\n \"\"\"\n .. versionadded:: 0.19.0\n\n Make a copy of self if data coincides (in memory) with orig.\n Subclasses should override this if self._base is not an ndarray.\n\n Parameters\n ----------\n orig : ndarray\n other ndarray to compare self._data against\n copy : boolean, default False\n when False, do not run any check, just return self\n\n Returns\n -------\n A copy of self if needed, otherwise self : Index\n \"\"\"\n if copy:\n # Retrieve the \"base objects\", i.e. the original memory allocations\n if not isinstance(orig, np.ndarray):\n # orig is a DatetimeIndex\n orig = orig.values\n orig = orig if orig.base is None else orig.base\n new = self._data if self._data.base is None else self._data.base\n if orig is new:\n return self.copy(deep=True)\n\n return self\n\n def _update_inplace(self, result, **kwargs):\n # guard when called from IndexOpsMixin\n raise TypeError(\"Index can't be updated inplace\")\n\n def _sort_levels_monotonic(self):\n \"\"\" compat with MultiIndex \"\"\"\n return self\n\n _index_shared_docs['_get_grouper_for_level'] = \"\"\"\n Get index grouper corresponding to an index level\n\n Parameters\n ----------\n mapper: Group mapping function or None\n Function mapping index values to groups\n level : int or None\n Index level\n\n Returns\n -------\n grouper : Index\n Index of values to group on\n labels : ndarray of int or None\n Array of locations in level_index\n uniques : Index or None\n Index of unique values for level\n \"\"\"\n\n @Appender(_index_shared_docs['_get_grouper_for_level'])\n def _get_grouper_for_level(self, mapper, level=None):\n assert level is None or level == 0\n if mapper is None:\n grouper = self\n else:\n grouper = self.map(mapper)\n\n return grouper, None, None\n\n def is_(self, other):\n \"\"\"\n More flexible, faster check like ``is`` but that works through views\n\n Note: this is *not* the same as ``Index.identical()``, which checks\n that metadata is also the same.\n\n Parameters\n ----------\n other : object\n other object to compare against.\n\n Returns\n -------\n True if both have same underlying data, False otherwise : bool\n \"\"\"\n # use something other than None to be clearer\n return self._id is getattr(\n other, '_id', Ellipsis) and self._id is not None\n\n def _reset_identity(self):\n \"\"\"Initializes or resets ``_id`` attribute with new object\"\"\"\n self._id = _Identity()\n return self\n\n # ndarray compat\n def __len__(self):\n \"\"\"\n return the length of the Index\n \"\"\"\n return len(self._data)\n\n def __array__(self, dtype=None):\n \"\"\" the array interface, return my values \"\"\"\n return self._data.view(np.ndarray)\n\n def __array_wrap__(self, result, context=None):\n \"\"\"\n Gets called after a ufunc\n \"\"\"\n if is_bool_dtype(result):\n return result\n\n attrs = self._get_attributes_dict()\n attrs = self._maybe_update_attributes(attrs)\n return Index(result, **attrs)\n\n @cache_readonly\n def dtype(self):\n \"\"\" return the dtype object of the underlying data \"\"\"\n return self._data.dtype\n\n @cache_readonly\n def dtype_str(self):\n \"\"\" return the dtype str of the underlying data \"\"\"\n return str(self.dtype)\n\n @property\n def values(self):\n \"\"\" return the underlying data as an ndarray \"\"\"\n return self._data.view(np.ndarray)\n\n @property\n def _values(self):\n # type: () -> Union[ExtensionArray, Index]\n # TODO(EA): remove index types as they become extension arrays\n \"\"\"The best array representation.\n\n This is an ndarray, ExtensionArray, or Index subclass. This differs\n from ``_ndarray_values``, which always returns an ndarray.\n\n Both ``_values`` and ``_ndarray_values`` are consistent between\n ``Series`` and ``Index``.\n\n It may differ from the public '.values' method.\n\n index | values | _values | _ndarray_values |\n ----------------- | -------------- -| ----------- | --------------- |\n CategoricalIndex | Categorical | Categorical | codes |\n DatetimeIndex[tz] | ndarray[M8ns] | DTI[tz] | ndarray[M8ns] |\n\n For the following, the ``._values`` is currently ``ndarray[object]``,\n but will soon be an ``ExtensionArray``\n\n index | values | _values | _ndarray_values |\n ----------------- | --------------- | ------------ | --------------- |\n PeriodIndex | ndarray[object] | ndarray[obj] | ndarray[int] |\n IntervalIndex | ndarray[object] | ndarray[obj] | ndarray[object] |\n\n See Also\n --------\n values\n _ndarray_values\n \"\"\"\n return self.values\n\n def get_values(self):\n \"\"\"\n Return `Index` data as an `numpy.ndarray`.\n\n Returns\n -------\n numpy.ndarray\n A one-dimensional numpy array of the `Index` values.\n\n See Also\n --------\n Index.values : The attribute that get_values wraps.\n\n Examples\n --------\n Getting the `Index` values of a `DataFrame`:\n\n >>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],\n ... index=['a', 'b', 'c'], columns=['A', 'B', 'C'])\n >>> df\n A B C\n a 1 2 3\n b 4 5 6\n c 7 8 9\n >>> df.index.get_values()\n array(['a', 'b', 'c'], dtype=object)\n\n Standalone `Index` values:\n\n >>> idx = pd.Index(['1', '2', '3'])\n >>> idx.get_values()\n array(['1', '2', '3'], dtype=object)\n\n `MultiIndex` arrays also have only one dimension:\n\n >>> midx = pd.MultiIndex.from_arrays([[1, 2, 3], ['a', 'b', 'c']],\n ... names=('number', 'letter'))\n >>> midx.get_values()\n array([(1, 'a'), (2, 'b'), (3, 'c')], dtype=object)\n >>> midx.get_values().ndim\n 1\n \"\"\"\n return self.values\n\n @Appender(IndexOpsMixin.memory_usage.__doc__)\n def memory_usage(self, deep=False):\n result = super(Index, self).memory_usage(deep=deep)\n\n # include our engine hashtable\n result += self._engine.sizeof(deep=deep)\n return result\n\n # ops compat\n @deprecate_kwarg(old_arg_name='n', new_arg_name='repeats')\n def repeat(self, repeats, *args, **kwargs):\n \"\"\"\n Repeat elements of an Index.\n\n Returns a new index where each element of the current index\n is repeated consecutively a given number of times.\n\n Parameters\n ----------\n repeats : int\n The number of repetitions for each element.\n **kwargs\n Additional keywords have no effect but might be accepted for\n compatibility with numpy.\n\n Returns\n -------\n pandas.Index\n Newly created Index with repeated elements.\n\n See Also\n --------\n Series.repeat : Equivalent function for Series\n numpy.repeat : Underlying implementation\n\n Examples\n --------\n >>> idx = pd.Index([1, 2, 3])\n >>> idx\n Int64Index([1, 2, 3], dtype='int64')\n >>> idx.repeat(2)\n Int64Index([1, 1, 2, 2, 3, 3], dtype='int64')\n >>> idx.repeat(3)\n Int64Index([1, 1, 1, 2, 2, 2, 3, 3, 3], dtype='int64')\n \"\"\"\n nv.validate_repeat(args, kwargs)\n return self._shallow_copy(self._values.repeat(repeats))\n\n _index_shared_docs['where'] = \"\"\"\n .. versionadded:: 0.19.0\n\n Return an Index of same shape as self and whose corresponding\n entries are from self where cond is True and otherwise are from\n other.\n\n Parameters\n ----------\n cond : boolean array-like with the same length as self\n other : scalar, or array-like\n \"\"\"\n\n @Appender(_index_shared_docs['where'])\n def where(self, cond, other=None):\n if other is None:\n other = self._na_value\n\n dtype = self.dtype\n values = self.values\n\n if is_bool(other) or is_bool_dtype(other):\n\n # bools force casting\n values = values.astype(object)\n dtype = None\n\n values = np.where(cond, values, other)\n\n if self._is_numeric_dtype and np.any(isna(values)):\n # We can't coerce to the numeric dtype of \"self\" (unless\n # it's float) if there are NaN values in our output.\n dtype = None\n\n return self._shallow_copy_with_infer(values, dtype=dtype)\n\n def ravel(self, order='C'):\n \"\"\"\n return an ndarray of the flattened values of the underlying data\n\n See also\n --------\n numpy.ndarray.ravel\n \"\"\"\n return self._ndarray_values.ravel(order=order)\n\n # construction helpers\n @classmethod\n def _try_convert_to_int_index(cls, data, copy, name, dtype):\n \"\"\"\n Attempt to convert an array of data into an integer index.\n\n Parameters\n ----------\n data : The data to convert.\n copy : Whether to copy the data or not.\n name : The name of the index returned.\n\n Returns\n -------\n int_index : data converted to either an Int64Index or a\n UInt64Index\n\n Raises\n ------\n ValueError if the conversion was not successful.\n \"\"\"\n\n from .numeric import Int64Index, UInt64Index\n if not is_unsigned_integer_dtype(dtype):\n # skip int64 conversion attempt if uint-like dtype is passed, as\n # this could return Int64Index when UInt64Index is what's desrired\n try:\n res = data.astype('i8', copy=False)\n if (res == data).all():\n return Int64Index(res, copy=copy, name=name)\n except (OverflowError, TypeError, ValueError):\n pass\n\n # Conversion to int64 failed (possibly due to overflow) or was skipped,\n # so let's try now with uint64.\n try:\n res = data.astype('u8', copy=False)\n if (res == data).all():\n return UInt64Index(res, copy=copy, name=name)\n except (OverflowError, TypeError, ValueError):\n pass\n\n raise ValueError\n\n @classmethod\n def _scalar_data_error(cls, data):\n raise TypeError('{0}(...) must be called with a collection of some '\n 'kind, {1} was passed'.format(cls.__name__,\n repr(data)))\n\n @classmethod\n def _string_data_error(cls, data):\n raise TypeError('String dtype not supported, you may need '\n 'to explicitly cast to a numeric type')\n\n @classmethod\n def _coerce_to_ndarray(cls, data):\n \"\"\"coerces data to ndarray, raises on scalar data. Converts other\n iterables to list first and then to array. Does not touch ndarrays.\n \"\"\"\n\n if not isinstance(data, (np.ndarray, Index)):\n if data is None or is_scalar(data):\n cls._scalar_data_error(data)\n\n # other iterable of some kind\n if not isinstance(data, (ABCSeries, list, tuple)):\n data = list(data)\n data = np.asarray(data)\n return data\n\n def _get_attributes_dict(self):\n \"\"\" return an attributes dict for my class \"\"\"\n return {k: getattr(self, k, None) for k in self._attributes}\n\n def view(self, cls=None):\n\n # we need to see if we are subclassing an\n # index type here\n if cls is not None and not hasattr(cls, '_typ'):\n result = self._data.view(cls)\n else:\n result = self._shallow_copy()\n if isinstance(result, Index):\n result._id = self._id\n return result\n\n def _coerce_scalar_to_index(self, item):\n \"\"\"\n we need to coerce a scalar to a compat for our index type\n\n Parameters\n ----------\n item : scalar item to coerce\n \"\"\"\n dtype = self.dtype\n\n if self._is_numeric_dtype and isna(item):\n # We can't coerce to the numeric dtype of \"self\" (unless\n # it's float) if there are NaN values in our output.\n dtype = None\n\n return Index([item], dtype=dtype, **self._get_attributes_dict())\n\n _index_shared_docs['copy'] = \"\"\"\n Make a copy of this object. Name and dtype sets those attributes on\n the new object.\n\n Parameters\n ----------\n name : string, optional\n deep : boolean, default False\n dtype : numpy dtype or pandas type\n\n Returns\n -------\n copy : Index\n\n Notes\n -----\n In most cases, there should be no functional difference from using\n ``deep``, but if ``deep`` is passed it will attempt to deepcopy.\n \"\"\"\n\n @Appender(_index_shared_docs['copy'])\n def copy(self, name=None, deep=False, dtype=None, **kwargs):\n if deep:\n new_index = self._shallow_copy(self._data.copy())\n else:\n new_index = self._shallow_copy()\n\n names = kwargs.get('names')\n names = self._validate_names(name=name, names=names, deep=deep)\n new_index = new_index.set_names(names)\n\n if dtype:\n new_index = new_index.astype(dtype)\n return new_index\n\n def __copy__(self, **kwargs):\n return self.copy(**kwargs)\n\n def __deepcopy__(self, memo=None):\n if memo is None:\n memo = {}\n return self.copy(deep=True)\n\n def _validate_names(self, name=None, names=None, deep=False):\n \"\"\"\n Handles the quirks of having a singular 'name' parameter for general\n Index and plural 'names' parameter for MultiIndex.\n \"\"\"\n from copy import deepcopy\n if names is not None and name is not None:\n raise TypeError(\"Can only provide one of `names` and `name`\")\n elif names is None and name is None:\n return deepcopy(self.names) if deep else self.names\n elif names is not None:\n if not is_list_like(names):\n raise TypeError(\"Must pass list-like as `names`.\")\n return names\n else:\n if not is_list_like(name):\n return [name]\n return name\n\n def __unicode__(self):\n \"\"\"\n Return a string representation for this object.\n\n Invoked by unicode(df) in py2 only. Yields a Unicode String in both\n py2/py3.\n \"\"\"\n klass = self.__class__.__name__\n data = self._format_data()\n attrs = self._format_attrs()\n space = self._format_space()\n\n prepr = (u(\",%s\") %\n space).join(u(\"%s=%s\") % (k, v) for k, v in attrs)\n\n # no data provided, just attributes\n if data is None:\n data = ''\n\n res = u(\"%s(%s%s)\") % (klass, data, prepr)\n\n return res\n\n def _format_space(self):\n\n # using space here controls if the attributes\n # are line separated or not (the default)\n\n # max_seq_items = get_option('display.max_seq_items')\n # if len(self) > max_seq_items:\n # space = \"\\n%s\" % (' ' * (len(klass) + 1))\n return \" \"\n\n @property\n def _formatter_func(self):\n \"\"\"\n Return the formatted data as a unicode string\n \"\"\"\n return default_pprint\n\n def _format_data(self, name=None):\n \"\"\"\n Return the formatted data as a unicode string\n \"\"\"\n from pandas.io.formats.console import get_console_size\n from pandas.io.formats.format import _get_adjustment\n display_width, _ = get_console_size()\n if display_width is None:\n display_width = get_option('display.width') or 80\n if name is None:\n name = self.__class__.__name__\n\n space1 = \"\\n%s\" % (' ' * (len(name) + 1))\n space2 = \"\\n%s\" % (' ' * (len(name) + 2))\n\n n = len(self)\n sep = ','\n max_seq_items = get_option('display.max_seq_items') or n\n formatter = self._formatter_func\n\n # do we want to justify (only do so for non-objects)\n is_justify = not (self.inferred_type in ('string', 'unicode') or\n (self.inferred_type == 'categorical' and\n is_object_dtype(self.categories)))\n\n # are we a truncated display\n is_truncated = n > max_seq_items\n\n # adj can optionally handle unicode eastern asian width\n adj = _get_adjustment()\n\n def _extend_line(s, line, value, display_width, next_line_prefix):\n\n if (adj.len(line.rstrip()) + adj.len(value.rstrip()) >=\n display_width):\n s += line.rstrip()\n line = next_line_prefix\n line += value\n return s, line\n\n def best_len(values):\n if values:\n return max(adj.len(x) for x in values)\n else:\n return 0\n\n if n == 0:\n summary = '[], '\n elif n == 1:\n first = formatter(self[0])\n summary = '[%s], ' % first\n elif n == 2:\n first = formatter(self[0])\n last = formatter(self[-1])\n summary = '[%s, %s], ' % (first, last)\n else:\n\n if n > max_seq_items:\n n = min(max_seq_items // 2, 10)\n head = [formatter(x) for x in self[:n]]\n tail = [formatter(x) for x in self[-n:]]\n else:\n head = []\n tail = [formatter(x) for x in self]\n\n # adjust all values to max length if needed\n if is_justify:\n\n # however, if we are not truncated and we are only a single\n # line, then don't justify\n if (is_truncated or\n not (len(', '.join(head)) < display_width and\n len(', '.join(tail)) < display_width)):\n max_len = max(best_len(head), best_len(tail))\n head = [x.rjust(max_len) for x in head]\n tail = [x.rjust(max_len) for x in tail]\n\n summary = \"\"\n line = space2\n\n for i in range(len(head)):\n word = head[i] + sep + ' '\n summary, line = _extend_line(summary, line, word,\n display_width, space2)\n\n if is_truncated:\n # remove trailing space of last line\n summary += line.rstrip() + space2 + '...'\n line = space2\n\n for i in range(len(tail) - 1):\n word = tail[i] + sep + ' '\n summary, line = _extend_line(summary, line, word,\n display_width, space2)\n\n # last value: no sep added + 1 space of width used for trailing ','\n summary, line = _extend_line(summary, line, tail[-1],\n display_width - 2, space2)\n summary += line\n summary += '],'\n\n if len(summary) > (display_width):\n summary += space1\n else: # one row\n summary += ' '\n\n # remove initial space\n summary = '[' + summary[len(space2):]\n\n return summary\n\n def _format_attrs(self):\n \"\"\"\n Return a list of tuples of the (attr,formatted_value)\n \"\"\"\n attrs = []\n attrs.append(('dtype', \"'%s'\" % self.dtype))\n if self.name is not None:\n attrs.append(('name', default_pprint(self.name)))\n max_seq_items = get_option('display.max_seq_items') or len(self)\n if len(self) > max_seq_items:\n attrs.append(('length', len(self)))\n return attrs\n\n def to_series(self, index=None, name=None):\n \"\"\"\n Create a Series with both index and values equal to the index keys\n useful with map for returning an indexer based on an index\n\n Parameters\n ----------\n index : Index, optional\n index of resulting Series. If None, defaults to original index\n name : string, optional\n name of resulting Series. If None, defaults to name of original\n index\n\n Returns\n -------\n Series : dtype will be based on the type of the Index values.\n \"\"\"\n\n from pandas import Series\n\n if index is None:\n index = self._shallow_copy()\n if name is None:\n name = self.name\n\n return Series(self._to_embed(), index=index, name=name)\n\n def to_frame(self, index=True):\n \"\"\"\n Create a DataFrame with a column containing the Index.\n\n .. versionadded:: 0.21.0\n\n Parameters\n ----------\n index : boolean, default True\n Set the index of the returned DataFrame as the original Index.\n\n Returns\n -------\n DataFrame\n DataFrame containing the original Index data.\n\n See Also\n --------\n Index.to_series : Convert an Index to a Series.\n Series.to_frame : Convert Series to DataFrame.\n\n Examples\n --------\n >>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal')\n >>> idx.to_frame()\n animal\n animal\n Ant Ant\n Bear Bear\n Cow Cow\n\n By default, the original Index is reused. To enforce a new Index:\n\n >>> idx.to_frame(index=False)\n animal\n 0 Ant\n 1 Bear\n 2 Cow\n \"\"\"\n\n from pandas import DataFrame\n result = DataFrame(self._shallow_copy(), columns=[self.name or 0])\n\n if index:\n result.index = self\n return result\n\n def _to_embed(self, keep_tz=False, dtype=None):\n \"\"\"\n *this is an internal non-public method*\n\n return an array repr of this object, potentially casting to object\n\n \"\"\"\n if dtype is not None:\n return self.astype(dtype)._to_embed(keep_tz=keep_tz)\n\n return self.values.copy()\n\n _index_shared_docs['astype'] = \"\"\"\n Create an Index with values cast to dtypes. The class of a new Index\n is determined by dtype. When conversion is impossible, a ValueError\n exception is raised.\n\n Parameters\n ----------\n dtype : numpy dtype or pandas type\n copy : bool, default True\n By default, astype always returns a newly allocated object.\n If copy is set to False and internal requirements on dtype are\n satisfied, the original data is used to create a new Index\n or the original Index is returned.\n\n .. versionadded:: 0.19.0\n\n \"\"\"\n\n @Appender(_index_shared_docs['astype'])\n def astype(self, dtype, copy=True):\n if is_dtype_equal(self.dtype, dtype):\n return self.copy() if copy else self\n elif is_categorical_dtype(dtype):\n from .category import CategoricalIndex\n return CategoricalIndex(self.values, name=self.name, dtype=dtype,\n copy=copy)\n try:\n return Index(self.values.astype(dtype, copy=copy), name=self.name,\n dtype=dtype)\n except (TypeError, ValueError):\n msg = 'Cannot cast {name} to dtype {dtype}'\n raise TypeError(msg.format(name=type(self).__name__, dtype=dtype))\n\n def _to_safe_for_reshape(self):\n \"\"\" convert to object if we are a categorical \"\"\"\n return self\n\n def _assert_can_do_setop(self, other):\n if not is_list_like(other):\n raise TypeError('Input must be Index or array-like')\n return True\n\n def _convert_can_do_setop(self, other):\n if not isinstance(other, Index):\n other = Index(other, name=self.name)\n result_name = self.name\n else:\n result_name = self.name if self.name == other.name else None\n return other, result_name\n\n def _convert_for_op(self, value):\n \"\"\" Convert value to be insertable to ndarray \"\"\"\n return value\n\n def _assert_can_do_op(self, value):\n \"\"\" Check value is valid for scalar op \"\"\"\n if not is_scalar(value):\n msg = \"'value' must be a scalar, passed: {0}\"\n raise TypeError(msg.format(type(value).__name__))\n\n @property\n def nlevels(self):\n return 1\n\n def _get_names(self):\n return FrozenList((self.name, ))\n\n def _set_names(self, values, level=None):\n \"\"\"\n Set new names on index. Each name has to be a hashable type.\n\n Parameters\n ----------\n values : str or sequence\n name(s) to set\n level : int, level name, or sequence of int/level names (default None)\n If the index is a MultiIndex (hierarchical), level(s) to set (None\n for all levels). Otherwise level must be None\n\n Raises\n ------\n TypeError if each name is not hashable.\n \"\"\"\n if not is_list_like(values):\n raise ValueError('Names must be a list-like')\n if len(values) != 1:\n raise ValueError('Length of new names must be 1, got %d' %\n len(values))\n\n # GH 20527\n # All items in 'name' need to be hashable:\n for name in values:\n if not is_hashable(name):\n raise TypeError('{}.name must be a hashable type'\n .format(self.__class__.__name__))\n self.name = values[0]\n\n names = property(fset=_set_names, fget=_get_names)\n\n def set_names(self, names, level=None, inplace=False):\n \"\"\"\n Set new names on index. Defaults to returning new index.\n\n Parameters\n ----------\n names : str or sequence\n name(s) to set\n level : int, level name, or sequence of int/level names (default None)\n If the index is a MultiIndex (hierarchical), level(s) to set (None\n for all levels). Otherwise level must be None\n inplace : bool\n if True, mutates in place\n\n Returns\n -------\n new index (of same type and class...etc) [if inplace, returns None]\n\n Examples\n --------\n >>> Index([1, 2, 3, 4]).set_names('foo')\n Int64Index([1, 2, 3, 4], dtype='int64', name='foo')\n >>> Index([1, 2, 3, 4]).set_names(['foo'])\n Int64Index([1, 2, 3, 4], dtype='int64', name='foo')\n >>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),\n (2, u'one'), (2, u'two')],\n names=['foo', 'bar'])\n >>> idx.set_names(['baz', 'quz'])\n MultiIndex(levels=[[1, 2], [u'one', u'two']],\n labels=[[0, 0, 1, 1], [0, 1, 0, 1]],\n names=[u'baz', u'quz'])\n >>> idx.set_names('baz', level=0)\n MultiIndex(levels=[[1, 2], [u'one', u'two']],\n labels=[[0, 0, 1, 1], [0, 1, 0, 1]],\n names=[u'baz', u'bar'])\n \"\"\"\n\n if level is not None and self.nlevels == 1:\n raise ValueError('Level must be None for non-MultiIndex')\n\n if level is not None and not is_list_like(level) and is_list_like(\n names):\n raise TypeError(\"Names must be a string\")\n\n if not is_list_like(names) and level is None and self.nlevels > 1:\n raise TypeError(\"Must pass list-like as `names`.\")\n\n if not is_list_like(names):\n names = [names]\n if level is not None and not is_list_like(level):\n level = [level]\n\n if inplace:\n idx = self\n else:\n idx = self._shallow_copy()\n idx._set_names(names, level=level)\n if not inplace:\n return idx\n\n def rename(self, name, inplace=False):\n \"\"\"\n Set new names on index. Defaults to returning new index.\n\n Parameters\n ----------\n name : str or list\n name to set\n inplace : bool\n if True, mutates in place\n\n Returns\n -------\n new index (of same type and class...etc) [if inplace, returns None]\n \"\"\"\n return self.set_names([name], inplace=inplace)\n\n @property\n def _has_complex_internals(self):\n # to disable groupby tricks in MultiIndex\n return False\n\n def _summary(self, name=None):\n \"\"\"\n Return a summarized representation\n\n Parameters\n ----------\n name : str\n name to use in the summary representation\n\n Returns\n -------\n String with a summarized representation of the index\n \"\"\"\n if len(self) > 0:\n head = self[0]\n if (hasattr(head, 'format') and\n not isinstance(head, compat.string_types)):\n head = head.format()\n tail = self[-1]\n if (hasattr(tail, 'format') and\n not isinstance(tail, compat.string_types)):\n tail = tail.format()\n index_summary = ', %s to %s' % (pprint_thing(head),\n pprint_thing(tail))\n else:\n index_summary = ''\n\n if name is None:\n name = type(self).__name__\n return '%s: %s entries%s' % (name, len(self), index_summary)\n\n def summary(self, name=None):\n \"\"\"\n Return a summarized representation\n .. deprecated:: 0.23.0\n \"\"\"\n warnings.warn(\"'summary' is deprecated and will be removed in a \"\n \"future version.\", FutureWarning, stacklevel=2)\n return self._summary(name)\n\n def _mpl_repr(self):\n # how to represent ourselves to matplotlib\n return self.values\n\n _na_value = np.nan\n \"\"\"The expected NA value to use with this index.\"\"\"\n\n # introspection\n @property\n def is_monotonic(self):\n \"\"\" alias for is_monotonic_increasing (deprecated) \"\"\"\n return self.is_monotonic_increasing\n\n @property\n def is_monotonic_increasing(self):\n \"\"\"\n return if the index is monotonic increasing (only equal or\n increasing) values.\n\n Examples\n --------\n >>> Index([1, 2, 3]).is_monotonic_increasing\n True\n >>> Index([1, 2, 2]).is_monotonic_increasing\n True\n >>> Index([1, 3, 2]).is_monotonic_increasing\n False\n \"\"\"\n return self._engine.is_monotonic_increasing\n\n @property\n def is_monotonic_decreasing(self):\n \"\"\"\n return if the index is monotonic decreasing (only equal or\n decreasing) values.\n\n Examples\n --------\n >>> Index([3, 2, 1]).is_monotonic_decreasing\n True\n >>> Index([3, 2, 2]).is_monotonic_decreasing\n True\n >>> Index([3, 1, 2]).is_monotonic_decreasing\n False\n \"\"\"\n return self._engine.is_monotonic_decreasing\n\n @property\n def _is_strictly_monotonic_increasing(self):\n \"\"\"return if the index is strictly monotonic increasing\n (only increasing) values\n\n Examples\n --------\n >>> Index([1, 2, 3])._is_strictly_monotonic_increasing\n True\n >>> Index([1, 2, 2])._is_strictly_monotonic_increasing\n False\n >>> Index([1, 3, 2])._is_strictly_monotonic_increasing\n False\n \"\"\"\n return self.is_unique and self.is_monotonic_increasing\n\n @property\n def _is_strictly_monotonic_decreasing(self):\n \"\"\"return if the index is strictly monotonic decreasing\n (only decreasing) values\n\n Examples\n --------\n >>> Index([3, 2, 1])._is_strictly_monotonic_decreasing\n True\n >>> Index([3, 2, 2])._is_strictly_monotonic_decreasing\n False\n >>> Index([3, 1, 2])._is_strictly_monotonic_decreasing\n False\n \"\"\"\n return self.is_unique and self.is_monotonic_decreasing\n\n def is_lexsorted_for_tuple(self, tup):\n return True\n\n @cache_readonly\n def is_unique(self):\n \"\"\" return if the index has unique values \"\"\"\n return self._engine.is_unique\n\n @property\n def has_duplicates(self):\n return not self.is_unique\n\n def is_boolean(self):\n return self.inferred_type in ['boolean']\n\n def is_integer(self):\n return self.inferred_type in ['integer']\n\n def is_floating(self):\n return self.inferred_type in ['floating', 'mixed-integer-float']\n\n def is_numeric(self):\n return self.inferred_type in ['integer', 'floating']\n\n def is_object(self):\n return is_object_dtype(self.dtype)\n\n def is_categorical(self):\n \"\"\"\n Check if the Index holds categorical data.\n\n Returns\n -------\n boolean\n True if the Index is categorical.\n\n See Also\n --------\n CategoricalIndex : Index for categorical data.\n\n Examples\n --------\n >>> idx = pd.Index([\"Watermelon\", \"Orange\", \"Apple\",\n ... \"Watermelon\"]).astype(\"category\")\n >>> idx.is_categorical()\n True\n\n >>> idx = pd.Index([1, 3, 5, 7])\n >>> idx.is_categorical()\n False\n\n >>> s = pd.Series([\"Peter\", \"Victor\", \"Elisabeth\", \"Mar\"])\n >>> s\n 0 Peter\n 1 Victor\n 2 Elisabeth\n 3 Mar\n dtype: object\n >>> s.index.is_categorical()\n False\n \"\"\"\n return self.inferred_type in ['categorical']\n\n def is_interval(self):\n return self.inferred_type in ['interval']\n\n def is_mixed(self):\n return self.inferred_type in ['mixed']\n\n def holds_integer(self):\n return self.inferred_type in ['integer', 'mixed-integer']\n\n _index_shared_docs['_convert_scalar_indexer'] = \"\"\"\n Convert a scalar indexer.\n\n Parameters\n ----------\n key : label of the slice bound\n kind : {'ix', 'loc', 'getitem', 'iloc'} or None\n \"\"\"\n\n @Appender(_index_shared_docs['_convert_scalar_indexer'])\n def _convert_scalar_indexer(self, key, kind=None):\n assert kind in ['ix', 'loc', 'getitem', 'iloc', None]\n\n if kind == 'iloc':\n return self._validate_indexer('positional', key, kind)\n\n if len(self) and not isinstance(self, ABCMultiIndex,):\n\n # we can raise here if we are definitive that this\n # is positional indexing (eg. .ix on with a float)\n # or label indexing if we are using a type able\n # to be represented in the index\n\n if kind in ['getitem', 'ix'] and is_float(key):\n if not self.is_floating():\n return self._invalid_indexer('label', key)\n\n elif kind in ['loc'] and is_float(key):\n\n # we want to raise KeyError on string/mixed here\n # technically we *could* raise a TypeError\n # on anything but mixed though\n if self.inferred_type not in ['floating',\n 'mixed-integer-float',\n 'string',\n 'unicode',\n 'mixed']:\n return self._invalid_indexer('label', key)\n\n elif kind in ['loc'] and is_integer(key):\n if not self.holds_integer():\n return self._invalid_indexer('label', key)\n\n return key\n\n _index_shared_docs['_convert_slice_indexer'] = \"\"\"\n Convert a slice indexer.\n\n By definition, these are labels unless 'iloc' is passed in.\n Floats are not allowed as the start, step, or stop of the slice.\n\n Parameters\n ----------\n key : label of the slice bound\n kind : {'ix', 'loc', 'getitem', 'iloc'} or None\n \"\"\"\n\n @Appender(_index_shared_docs['_convert_slice_indexer'])\n def _convert_slice_indexer(self, key, kind=None):\n assert kind in ['ix', 'loc', 'getitem', 'iloc', None]\n\n # if we are not a slice, then we are done\n if not isinstance(key, slice):\n return key\n\n # validate iloc\n if kind == 'iloc':\n return slice(self._validate_indexer('slice', key.start, kind),\n self._validate_indexer('slice', key.stop, kind),\n self._validate_indexer('slice', key.step, kind))\n\n # potentially cast the bounds to integers\n start, stop, step = key.start, key.stop, key.step\n\n # figure out if this is a positional indexer\n def is_int(v):\n return v is None or is_integer(v)\n\n is_null_slicer = start is None and stop is None\n is_index_slice = is_int(start) and is_int(stop)\n is_positional = is_index_slice and not self.is_integer()\n\n if kind == 'getitem':\n \"\"\"\n called from the getitem slicers, validate that we are in fact\n integers\n \"\"\"\n if self.is_integer() or is_index_slice:\n return slice(self._validate_indexer('slice', key.start, kind),\n self._validate_indexer('slice', key.stop, kind),\n self._validate_indexer('slice', key.step, kind))\n\n # convert the slice to an indexer here\n\n # if we are mixed and have integers\n try:\n if is_positional and self.is_mixed():\n # TODO: i, j are not used anywhere\n if start is not None:\n i = self.get_loc(start) # noqa\n if stop is not None:\n j = self.get_loc(stop) # noqa\n is_positional = False\n except KeyError:\n if self.inferred_type == 'mixed-integer-float':\n raise\n\n if is_null_slicer:\n indexer = key\n elif is_positional:\n indexer = key\n else:\n try:\n indexer = self.slice_indexer(start, stop, step, kind=kind)\n except Exception:\n if is_index_slice:\n if self.is_integer():\n raise\n else:\n indexer = key\n else:\n raise\n\n return indexer\n\n def _convert_listlike_indexer(self, keyarr, kind=None):\n \"\"\"\n Parameters\n ----------\n keyarr : list-like\n Indexer to convert.\n\n Returns\n -------\n tuple (indexer, keyarr)\n indexer is an ndarray or None if cannot convert\n keyarr are tuple-safe keys\n \"\"\"\n if isinstance(keyarr, Index):\n keyarr = self._convert_index_indexer(keyarr)\n else:\n keyarr = self._convert_arr_indexer(keyarr)\n\n indexer = self._convert_list_indexer(keyarr, kind=kind)\n return indexer, keyarr\n\n _index_shared_docs['_convert_arr_indexer'] = \"\"\"\n Convert an array-like indexer to the appropriate dtype.\n\n Parameters\n ----------\n keyarr : array-like\n Indexer to convert.\n\n Returns\n -------\n converted_keyarr : array-like\n \"\"\"\n\n @Appender(_index_shared_docs['_convert_arr_indexer'])\n def _convert_arr_indexer(self, keyarr):\n keyarr = com._asarray_tuplesafe(keyarr)\n return keyarr\n\n _index_shared_docs['_convert_index_indexer'] = \"\"\"\n Convert an Index indexer to the appropriate dtype.\n\n Parameters\n ----------\n keyarr : Index (or sub-class)\n Indexer to convert.\n\n Returns\n -------\n converted_keyarr : Index (or sub-class)\n \"\"\"\n\n @Appender(_index_shared_docs['_convert_index_indexer'])\n def _convert_index_indexer(self, keyarr):\n return keyarr\n\n _index_shared_docs['_convert_list_indexer'] = \"\"\"\n Convert a list-like indexer to the appropriate dtype.\n\n Parameters\n ----------\n keyarr : Index (or sub-class)\n Indexer to convert.\n kind : iloc, ix, loc, optional\n\n Returns\n -------\n positional indexer or None\n \"\"\"\n\n @Appender(_index_shared_docs['_convert_list_indexer'])\n def _convert_list_indexer(self, keyarr, kind=None):\n if (kind in [None, 'iloc', 'ix'] and\n is_integer_dtype(keyarr) and not self.is_floating() and\n not isinstance(keyarr, ABCPeriodIndex)):\n\n if self.inferred_type == 'mixed-integer':\n indexer = self.get_indexer(keyarr)\n if (indexer >= 0).all():\n return indexer\n # missing values are flagged as -1 by get_indexer and negative\n # indices are already converted to positive indices in the\n # above if-statement, so the negative flags are changed to\n # values outside the range of indices so as to trigger an\n # IndexError in maybe_convert_indices\n indexer[indexer < 0] = len(self)\n from pandas.core.indexing import maybe_convert_indices\n return maybe_convert_indices(indexer, len(self))\n\n elif not self.inferred_type == 'integer':\n keyarr = np.where(keyarr < 0, len(self) + keyarr, keyarr)\n return keyarr\n\n return None\n\n def _invalid_indexer(self, form, key):\n \"\"\" consistent invalid indexer message \"\"\"\n raise TypeError(\"cannot do {form} indexing on {klass} with these \"\n \"indexers [{key}] of {kind}\".format(\n form=form, klass=type(self), key=key,\n kind=type(key)))\n\n def get_duplicates(self):\n \"\"\"\n Extract duplicated index elements.\n\n Returns a sorted list of index elements which appear more than once in\n the index.\n\n .. deprecated:: 0.23.0\n Use idx[idx.duplicated()].unique() instead\n\n Returns\n -------\n array-like\n List of duplicated indexes.\n\n See Also\n --------\n Index.duplicated : Return boolean array denoting duplicates.\n Index.drop_duplicates : Return Index with duplicates removed.\n\n Examples\n --------\n\n Works on different Index of types.\n\n >>> pd.Index([1, 2, 2, 3, 3, 3, 4]).get_duplicates()\n [2, 3]\n >>> pd.Index([1., 2., 2., 3., 3., 3., 4.]).get_duplicates()\n [2.0, 3.0]\n >>> pd.Index(['a', 'b', 'b', 'c', 'c', 'c', 'd']).get_duplicates()\n ['b', 'c']\n\n Note that for a DatetimeIndex, it does not return a list but a new\n DatetimeIndex:\n\n >>> dates = pd.to_datetime(['2018-01-01', '2018-01-02', '2018-01-03',\n ... '2018-01-03', '2018-01-04', '2018-01-04'],\n ... format='%Y-%m-%d')\n >>> pd.Index(dates).get_duplicates()\n DatetimeIndex(['2018-01-03', '2018-01-04'],\n dtype='datetime64[ns]', freq=None)\n\n Sorts duplicated elements even when indexes are unordered.\n\n >>> pd.Index([1, 2, 3, 2, 3, 4, 3]).get_duplicates()\n [2, 3]\n\n Return empty array-like structure when all elements are unique.\n\n >>> pd.Index([1, 2, 3, 4]).get_duplicates()\n []\n >>> dates = pd.to_datetime(['2018-01-01', '2018-01-02', '2018-01-03'],\n ... format='%Y-%m-%d')\n >>> pd.Index(dates).get_duplicates()\n DatetimeIndex([], dtype='datetime64[ns]', freq=None)\n \"\"\"\n warnings.warn(\"'get_duplicates' is deprecated and will be removed in \"\n \"a future release. You can use \"\n \"idx[idx.duplicated()].unique() instead\",\n FutureWarning, stacklevel=2)\n\n return self[self.duplicated()].unique()\n\n def _cleanup(self):\n self._engine.clear_mapping()\n\n @cache_readonly\n def _constructor(self):\n return type(self)\n\n @cache_readonly\n def _engine(self):\n # property, for now, slow to look up\n return self._engine_type(lambda: self._ndarray_values, len(self))\n\n def _validate_index_level(self, level):\n \"\"\"\n Validate index level.\n\n For single-level Index getting level number is a no-op, but some\n verification must be done like in MultiIndex.\n\n \"\"\"\n if isinstance(level, int):\n if level < 0 and level != -1:\n raise IndexError(\"Too many levels: Index has only 1 level,\"\n \" %d is not a valid level number\" % (level, ))\n elif level > 0:\n raise IndexError(\"Too many levels:\"\n \" Index has only 1 level, not %d\" %\n (level + 1))\n elif level != self.name:\n raise KeyError('Level %s must be same as name (%s)' %\n (level, self.name))\n\n def _get_level_number(self, level):\n self._validate_index_level(level)\n return 0\n\n @cache_readonly\n def inferred_type(self):\n \"\"\" return a string of the type inferred from the values \"\"\"\n return lib.infer_dtype(self)\n\n def _is_memory_usage_qualified(self):\n \"\"\" return a boolean if we need a qualified .info display \"\"\"\n return self.is_object()\n\n def is_type_compatible(self, kind):\n return kind == self.inferred_type\n\n @cache_readonly\n def is_all_dates(self):\n if self._data is None:\n return False\n return is_datetime_array(_ensure_object(self.values))\n\n def __reduce__(self):\n d = dict(data=self._data)\n d.update(self._get_attributes_dict())\n return _new_Index, (self.__class__, d), None\n\n def __setstate__(self, state):\n \"\"\"Necessary for making this object picklable\"\"\"\n\n if isinstance(state, dict):\n self._data = state.pop('data')\n for k, v in compat.iteritems(state):\n setattr(self, k, v)\n\n elif isinstance(state, tuple):\n\n if len(state) == 2:\n nd_state, own_state = state\n data = np.empty(nd_state[1], dtype=nd_state[2])\n np.ndarray.__setstate__(data, nd_state)\n self.name = own_state[0]\n\n else: # pragma: no cover\n data = np.empty(state)\n np.ndarray.__setstate__(data, state)\n\n self._data = data\n self._reset_identity()\n else:\n raise Exception(\"invalid pickle state\")\n\n _unpickle_compat = __setstate__\n\n def __nonzero__(self):\n raise ValueError(\"The truth value of a {0} is ambiguous. \"\n \"Use a.empty, a.bool(), a.item(), a.any() or a.all().\"\n .format(self.__class__.__name__))\n\n __bool__ = __nonzero__\n\n _index_shared_docs['__contains__'] = \"\"\"\n return a boolean if this key is IN the index\n\n Parameters\n ----------\n key : object\n\n Returns\n -------\n boolean\n \"\"\"\n\n @Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs)\n def __contains__(self, key):\n hash(key)\n try:\n return key in self._engine\n except (OverflowError, TypeError, ValueError):\n return False\n\n _index_shared_docs['contains'] = \"\"\"\n return a boolean if this key is IN the index\n\n Parameters\n ----------\n key : object\n\n Returns\n -------\n boolean\n \"\"\"\n\n @Appender(_index_shared_docs['contains'] % _index_doc_kwargs)\n def contains(self, key):\n hash(key)\n try:\n return key in self._engine\n except (TypeError, ValueError):\n return False\n\n def __hash__(self):\n raise TypeError(\"unhashable type: %r\" % type(self).__name__)\n\n def __setitem__(self, key, value):\n raise TypeError(\"Index does not support mutable operations\")\n\n def __getitem__(self, key):\n \"\"\"\n Override numpy.ndarray's __getitem__ method to work as desired.\n\n This function adds lists and Series as valid boolean indexers\n (ndarrays only supports ndarray with dtype=bool).\n\n If resulting ndim != 1, plain ndarray is returned instead of\n corresponding `Index` subclass.\n\n \"\"\"\n # There's no custom logic to be implemented in __getslice__, so it's\n # not overloaded intentionally.\n getitem = self._data.__getitem__\n promote = self._shallow_copy\n\n if is_scalar(key):\n return getitem(key)\n\n if isinstance(key, slice):\n # This case is separated from the conditional above to avoid\n # pessimization of basic indexing.\n return promote(getitem(key))\n\n if com.is_bool_indexer(key):\n key = np.asarray(key)\n\n key = com._values_from_object(key)\n result = getitem(key)\n if not is_scalar(result):\n return promote(result)\n else:\n return result\n\n def _can_hold_identifiers_and_holds_name(self, name):\n \"\"\"\n Faster check for ``name in self`` when we know `name` is a Python\n identifier (e.g. in NDFrame.__getattr__, which hits this to support\n . key lookup). For indexes that can't hold identifiers (everything\n but object & categorical) we just return False.\n\n https://github.com/pandas-dev/pandas/issues/19764\n \"\"\"\n if self.is_object() or self.is_categorical():\n return name in self\n return False\n\n def append(self, other):\n \"\"\"\n Append a collection of Index options together\n\n Parameters\n ----------\n other : Index or list/tuple of indices\n\n Returns\n -------\n appended : Index\n \"\"\"\n\n to_concat = [self]\n\n if isinstance(other, (list, tuple)):\n to_concat = to_concat + list(other)\n else:\n to_concat.append(other)\n\n for obj in to_concat:\n if not isinstance(obj, Index):\n raise TypeError('all inputs must be Index')\n\n names = {obj.name for obj in to_concat}\n name = None if len(names) > 1 else self.name\n\n return self._concat(to_concat, name)\n\n def _concat(self, to_concat, name):\n\n typs = _concat.get_dtype_kinds(to_concat)\n\n if len(typs) == 1:\n return self._concat_same_dtype(to_concat, name=name)\n return _concat._concat_index_asobject(to_concat, name=name)\n\n def _concat_same_dtype(self, to_concat, name):\n \"\"\"\n Concatenate to_concat which has the same class\n \"\"\"\n # must be overridden in specific classes\n return _concat._concat_index_asobject(to_concat, name)\n\n _index_shared_docs['take'] = \"\"\"\n return a new %(klass)s of the values selected by the indices\n\n For internal compatibility with numpy arrays.\n\n Parameters\n ----------\n indices : list\n Indices to be taken\n axis : int, optional\n The axis over which to select values, always 0.\n allow_fill : bool, default True\n fill_value : bool, default None\n If allow_fill=True and fill_value is not None, indices specified by\n -1 is regarded as NA. If Index doesn't hold NA, raise ValueError\n\n See also\n --------\n numpy.ndarray.take\n \"\"\"\n\n @Appender(_index_shared_docs['take'] % _index_doc_kwargs)\n def take(self, indices, axis=0, allow_fill=True,\n fill_value=None, **kwargs):\n if kwargs:\n nv.validate_take(tuple(), kwargs)\n indices = _ensure_platform_int(indices)\n if self._can_hold_na:\n taken = self._assert_take_fillable(self.values, indices,\n allow_fill=allow_fill,\n fill_value=fill_value,\n na_value=self._na_value)\n else:\n if allow_fill and fill_value is not None:\n msg = 'Unable to fill values because {0} cannot contain NA'\n raise ValueError(msg.format(self.__class__.__name__))\n taken = self.values.take(indices)\n return self._shallow_copy(taken)\n\n def _assert_take_fillable(self, values, indices, allow_fill=True,\n fill_value=None, na_value=np.nan):\n \"\"\" Internal method to handle NA filling of take \"\"\"\n indices = _ensure_platform_int(indices)\n\n # only fill if we are passing a non-None fill_value\n if allow_fill and fill_value is not None:\n if (indices < -1).any():\n msg = ('When allow_fill=True and fill_value is not None, '\n 'all indices must be >= -1')\n raise ValueError(msg)\n taken = algos.take(values,\n indices,\n allow_fill=allow_fill,\n fill_value=na_value)\n else:\n taken = values.take(indices)\n return taken\n\n @cache_readonly\n def _isnan(self):\n \"\"\" return if each value is nan\"\"\"\n if self._can_hold_na:\n return isna(self)\n else:\n # shouldn't reach to this condition by checking hasnans beforehand\n values = np.empty(len(self), dtype=np.bool_)\n values.fill(False)\n return values\n\n @cache_readonly\n def _nan_idxs(self):\n if self._can_hold_na:\n w, = self._isnan.nonzero()\n return w\n else:\n return np.array([], dtype=np.int64)\n\n @cache_readonly\n def hasnans(self):\n \"\"\" return if I have any nans; enables various perf speedups \"\"\"\n if self._can_hold_na:\n return self._isnan.any()\n else:\n return False\n\n def isna(self):\n \"\"\"\n Detect missing values.\n\n Return a boolean same-sized object indicating if the values are NA.\n NA values, such as ``None``, :attr:`numpy.NaN` or :attr:`pd.NaT`, get\n mapped to ``True`` values.\n Everything else get mapped to ``False`` values. Characters such as\n empty strings `''` or :attr:`numpy.inf` are not considered NA values\n (unless you set ``pandas.options.mode.use_inf_as_na = True``).\n\n .. versionadded:: 0.20.0\n\n Returns\n -------\n numpy.ndarray\n A boolean array of whether my values are NA\n\n See Also\n --------\n pandas.Index.notna : boolean inverse of isna.\n pandas.Index.dropna : omit entries with missing values.\n pandas.isna : top-level isna.\n Series.isna : detect missing values in Series object.\n\n Examples\n --------\n Show which entries in a pandas.Index are NA. The result is an\n array.\n\n >>> idx = pd.Index([5.2, 6.0, np.NaN])\n >>> idx\n Float64Index([5.2, 6.0, nan], dtype='float64')\n >>> idx.isna()\n array([False, False, True], dtype=bool)\n\n Empty strings are not considered NA values. None is considered an NA\n value.\n\n >>> idx = pd.Index(['black', '', 'red', None])\n >>> idx\n Index(['black', '', 'red', None], dtype='object')\n >>> idx.isna()\n array([False, False, False, True], dtype=bool)\n\n For datetimes, `NaT` (Not a Time) is considered as an NA value.\n\n >>> idx = pd.DatetimeIndex([pd.Timestamp('1940-04-25'),\n ... pd.Timestamp(''), None, pd.NaT])\n >>> idx\n DatetimeIndex(['1940-04-25', 'NaT', 'NaT', 'NaT'],\n dtype='datetime64[ns]', freq=None)\n >>> idx.isna()\n array([False, True, True, True], dtype=bool)\n \"\"\"\n return self._isnan\n isnull = isna\n\n def notna(self):\n \"\"\"\n Detect existing (non-missing) values.\n\n Return a boolean same-sized object indicating if the values are not NA.\n Non-missing values get mapped to ``True``. Characters such as empty\n strings ``''`` or :attr:`numpy.inf` are not considered NA values\n (unless you set ``pandas.options.mode.use_inf_as_na = True``).\n NA values, such as None or :attr:`numpy.NaN`, get mapped to ``False``\n values.\n\n .. versionadded:: 0.20.0\n\n Returns\n -------\n numpy.ndarray\n Boolean array to indicate which entries are not NA.\n\n See also\n --------\n Index.notnull : alias of notna\n Index.isna: inverse of notna\n pandas.notna : top-level notna\n\n Examples\n --------\n Show which entries in an Index are not NA. The result is an\n array.\n\n >>> idx = pd.Index([5.2, 6.0, np.NaN])\n >>> idx\n Float64Index([5.2, 6.0, nan], dtype='float64')\n >>> idx.notna()\n array([ True, True, False])\n\n Empty strings are not considered NA values. None is considered a NA\n value.\n\n >>> idx = pd.Index(['black', '', 'red', None])\n >>> idx\n Index(['black', '', 'red', None], dtype='object')\n >>> idx.notna()\n array([ True, True, True, False])\n \"\"\"\n return ~self.isna()\n notnull = notna\n\n def putmask(self, mask, value):\n \"\"\"\n return a new Index of the values set with the mask\n\n See also\n --------\n numpy.ndarray.putmask\n \"\"\"\n values = self.values.copy()\n try:\n np.putmask(values, mask, self._convert_for_op(value))\n return self._shallow_copy(values)\n except (ValueError, TypeError) as err:\n if is_object_dtype(self):\n raise err\n\n # coerces to object\n return self.astype(object).putmask(mask, value)\n\n def format(self, name=False, formatter=None, **kwargs):\n \"\"\"\n Render a string representation of the Index\n \"\"\"\n header = []\n if name:\n header.append(pprint_thing(self.name,\n escape_chars=('\\t', '\\r', '\\n')) if\n self.name is not None else '')\n\n if formatter is not None:\n return header + list(self.map(formatter))\n\n return self._format_with_header(header, **kwargs)\n\n def _format_with_header(self, header, na_rep='NaN', **kwargs):\n values = self.values\n\n from pandas.io.formats.format import format_array\n\n if is_categorical_dtype(values.dtype):\n values = np.array(values)\n\n elif is_object_dtype(values.dtype):\n values = lib.maybe_convert_objects(values, safe=1)\n\n if is_object_dtype(values.dtype):\n result = [pprint_thing(x, escape_chars=('\\t', '\\r', '\\n'))\n for x in values]\n\n # could have nans\n mask = isna(values)\n if mask.any():\n result = np.array(result)\n result[mask] = na_rep\n result = result.tolist()\n\n else:\n result = _trim_front(format_array(values, None, justify='left'))\n return header + result\n\n def to_native_types(self, slicer=None, **kwargs):\n \"\"\"\n Format specified values of `self` and return them.\n\n Parameters\n ----------\n slicer : int, array-like\n An indexer into `self` that specifies which values\n are used in the formatting process.\n kwargs : dict\n Options for specifying how the values should be formatted.\n These options include the following:\n\n 1) na_rep : str\n The value that serves as a placeholder for NULL values\n 2) quoting : bool or None\n Whether or not there are quoted values in `self`\n 3) date_format : str\n The format used to represent date-like values\n \"\"\"\n\n values = self\n if slicer is not None:\n values = values[slicer]\n return values._format_native_types(**kwargs)\n\n def _format_native_types(self, na_rep='', quoting=None, **kwargs):\n \"\"\" actually format my specific types \"\"\"\n mask = isna(self)\n if not self.is_object() and not quoting:\n values = np.asarray(self).astype(str)\n else:\n values = np.array(self, dtype=object, copy=True)\n\n values[mask] = na_rep\n return values\n\n def equals(self, other):\n \"\"\"\n Determines if two Index objects contain the same elements.\n \"\"\"\n if self.is_(other):\n return True\n\n if not isinstance(other, Index):\n return False\n\n if is_object_dtype(self) and not is_object_dtype(other):\n # if other is not object, use other's logic for coercion\n return other.equals(self)\n\n try:\n return array_equivalent(com._values_from_object(self),\n com._values_from_object(other))\n except Exception:\n return False\n\n def identical(self, other):\n \"\"\"Similar to equals, but check that other comparable attributes are\n also equal\n \"\"\"\n return (self.equals(other) and\n all((getattr(self, c, None) == getattr(other, c, None)\n for c in self._comparables)) and\n type(self) == type(other))\n\n def asof(self, label):\n \"\"\"\n For a sorted index, return the most recent label up to and including\n the passed label. Return NaN if not found.\n\n See also\n --------\n get_loc : asof is a thin wrapper around get_loc with method='pad'\n \"\"\"\n try:\n loc = self.get_loc(label, method='pad')\n except KeyError:\n return self._na_value\n else:\n if isinstance(loc, slice):\n loc = loc.indices(len(self))[-1]\n return self[loc]\n\n def asof_locs(self, where, mask):\n \"\"\"\n where : array of timestamps\n mask : array of booleans where data is not NA\n\n \"\"\"\n locs = self.values[mask].searchsorted(where.values, side='right')\n\n locs = np.where(locs > 0, locs - 1, 0)\n result = np.arange(len(self))[mask].take(locs)\n\n first = mask.argmax()\n result[(locs == 0) & (where < self.values[first])] = -1\n\n return result\n\n def sort_values(self, return_indexer=False, ascending=True):\n \"\"\"\n Return a sorted copy of the index.\n\n Return a sorted copy of the index, and optionally return the indices\n that sorted the index itself.\n\n Parameters\n ----------\n return_indexer : bool, default False\n Should the indices that would sort the index be returned.\n ascending : bool, default True\n Should the index values be sorted in an ascending order.\n\n Returns\n -------\n sorted_index : pandas.Index\n Sorted copy of the index.\n indexer : numpy.ndarray, optional\n The indices that the index itself was sorted by.\n\n See Also\n --------\n pandas.Series.sort_values : Sort values of a Series.\n pandas.DataFrame.sort_values : Sort values in a DataFrame.\n\n Examples\n --------\n >>> idx = pd.Index([10, 100, 1, 1000])\n >>> idx\n Int64Index([10, 100, 1, 1000], dtype='int64')\n\n Sort values in ascending order (default behavior).\n\n >>> idx.sort_values()\n Int64Index([1, 10, 100, 1000], dtype='int64')\n\n Sort values in descending order, and also get the indices `idx` was\n sorted by.\n\n >>> idx.sort_values(ascending=False, return_indexer=True)\n (Int64Index([1000, 100, 10, 1], dtype='int64'), array([3, 1, 0, 2]))\n \"\"\"\n _as = self.argsort()\n if not ascending:\n _as = _as[::-1]\n\n sorted_index = self.take(_as)\n\n if return_indexer:\n return sorted_index, _as\n else:\n return sorted_index\n\n def sort(self, *args, **kwargs):\n raise TypeError(\"cannot sort an Index object in-place, use \"\n \"sort_values instead\")\n\n def sortlevel(self, level=None, ascending=True, sort_remaining=None):\n \"\"\"\n\n For internal compatibility with with the Index API\n\n Sort the Index. This is for compat with MultiIndex\n\n Parameters\n ----------\n ascending : boolean, default True\n False to sort in descending order\n\n level, sort_remaining are compat parameters\n\n Returns\n -------\n sorted_index : Index\n \"\"\"\n return self.sort_values(return_indexer=True, ascending=ascending)\n\n def shift(self, periods=1, freq=None):\n \"\"\"\n Shift index by desired number of time frequency increments.\n\n This method is for shifting the values of datetime-like indexes\n by a specified time increment a given number of times.\n\n Parameters\n ----------\n periods : int, default 1\n Number of periods (or increments) to shift by,\n can be positive or negative.\n freq : pandas.DateOffset, pandas.Timedelta or string, optional\n Frequency increment to shift by.\n If None, the index is shifted by its own `freq` attribute.\n Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.\n\n Returns\n -------\n pandas.Index\n shifted index\n\n See Also\n --------\n Series.shift : Shift values of Series.\n\n Examples\n --------\n Put the first 5 month starts of 2011 into an index.\n\n >>> month_starts = pd.date_range('1/1/2011', periods=5, freq='MS')\n >>> month_starts\n DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01', '2011-04-01',\n '2011-05-01'],\n dtype='datetime64[ns]', freq='MS')\n\n Shift the index by 10 days.\n\n >>> month_starts.shift(10, freq='D')\n DatetimeIndex(['2011-01-11', '2011-02-11', '2011-03-11', '2011-04-11',\n '2011-05-11'],\n dtype='datetime64[ns]', freq=None)\n\n The default value of `freq` is the `freq` attribute of the index,\n which is 'MS' (month start) in this example.\n\n >>> month_starts.shift(10)\n DatetimeIndex(['2011-11-01', '2011-12-01', '2012-01-01', '2012-02-01',\n '2012-03-01'],\n dtype='datetime64[ns]', freq='MS')\n\n Notes\n -----\n This method is only implemented for datetime-like index classes,\n i.e., DatetimeIndex, PeriodIndex and TimedeltaIndex.\n \"\"\"\n raise NotImplementedError(\"Not supported for type %s\" %\n type(self).__name__)\n\n def argsort(self, *args, **kwargs):\n \"\"\"\n Return the integer indicies that would sort the index.\n\n Parameters\n ----------\n *args\n Passed to `numpy.ndarray.argsort`.\n **kwargs\n Passed to `numpy.ndarray.argsort`.\n\n Returns\n -------\n numpy.ndarray\n Integer indicies that would sort the index if used as\n an indexer.\n\n See also\n --------\n numpy.argsort : Similar method for NumPy arrays.\n Index.sort_values : Return sorted copy of Index.\n\n Examples\n --------\n >>> idx = pd.Index(['b', 'a', 'd', 'c'])\n >>> idx\n Index(['b', 'a', 'd', 'c'], dtype='object')\n\n >>> order = idx.argsort()\n >>> order\n array([1, 0, 3, 2])\n\n >>> idx[order]\n Index(['a', 'b', 'c', 'd'], dtype='object')\n \"\"\"\n result = self.asi8\n if result is None:\n result = np.array(self)\n return result.argsort(*args, **kwargs)\n\n def __add__(self, other):\n return Index(np.array(self) + other)\n\n def __radd__(self, other):\n return Index(other + np.array(self))\n\n def __iadd__(self, other):\n # alias for __add__\n return self + other\n\n def __sub__(self, other):\n raise TypeError(\"cannot perform __sub__ with this index type: \"\n \"{typ}\".format(typ=type(self).__name__))\n\n def __and__(self, other):\n return self.intersection(other)\n\n def __or__(self, other):\n return self.union(other)\n\n def __xor__(self, other):\n return self.symmetric_difference(other)\n\n def _get_consensus_name(self, other):\n \"\"\"\n Given 2 indexes, give a consensus name meaning\n we take the not None one, or None if the names differ.\n Return a new object if we are resetting the name\n \"\"\"\n if self.name != other.name:\n if self.name is None or other.name is None:\n name = self.name or other.name\n else:\n name = None\n if self.name != name:\n return self._shallow_copy(name=name)\n return self\n\n def union(self, other):\n \"\"\"\n Form the union of two Index objects and sorts if possible.\n\n Parameters\n ----------\n other : Index or array-like\n\n Returns\n -------\n union : Index\n\n Examples\n --------\n\n >>> idx1 = pd.Index([1, 2, 3, 4])\n >>> idx2 = pd.Index([3, 4, 5, 6])\n >>> idx1.union(idx2)\n Int64Index([1, 2, 3, 4, 5, 6], dtype='int64')\n\n \"\"\"\n self._assert_can_do_setop(other)\n other = _ensure_index(other)\n\n if len(other) == 0 or self.equals(other):\n return self._get_consensus_name(other)\n\n if len(self) == 0:\n return other._get_consensus_name(self)\n\n # TODO: is_dtype_union_equal is a hack around\n # 1. buggy set ops with duplicates (GH #13432)\n # 2. CategoricalIndex lacking setops (GH #10186)\n # Once those are fixed, this workaround can be removed\n if not is_dtype_union_equal(self.dtype, other.dtype):\n this = self.astype('O')\n other = other.astype('O')\n return this.union(other)\n\n # TODO(EA): setops-refactor, clean all this up\n if is_period_dtype(self) or is_datetime64tz_dtype(self):\n lvals = self._ndarray_values\n else:\n lvals = self._values\n if is_period_dtype(other) or is_datetime64tz_dtype(other):\n rvals = other._ndarray_values\n else:\n rvals = other._values\n\n if self.is_monotonic and other.is_monotonic:\n try:\n result = self._outer_indexer(lvals, rvals)[0]\n except TypeError:\n # incomparable objects\n result = list(lvals)\n\n # worth making this faster? a very unusual case\n value_set = set(lvals)\n result.extend([x for x in rvals if x not in value_set])\n else:\n indexer = self.get_indexer(other)\n indexer, = (indexer == -1).nonzero()\n\n if len(indexer) > 0:\n other_diff = algos.take_nd(rvals, indexer,\n allow_fill=False)\n result = _concat._concat_compat((lvals, other_diff))\n\n try:\n lvals[0] < other_diff[0]\n except TypeError as e:\n warnings.warn(\"%s, sort order is undefined for \"\n \"incomparable objects\" % e, RuntimeWarning,\n stacklevel=3)\n else:\n types = frozenset((self.inferred_type,\n other.inferred_type))\n if not types & _unsortable_types:\n result.sort()\n\n else:\n result = lvals\n\n try:\n result = np.sort(result)\n except TypeError as e:\n warnings.warn(\"%s, sort order is undefined for \"\n \"incomparable objects\" % e, RuntimeWarning,\n stacklevel=3)\n\n # for subclasses\n return self._wrap_union_result(other, result)\n\n def _wrap_union_result(self, other, result):\n name = self.name if self.name == other.name else None\n return self.__class__(result, name=name)\n\n def intersection(self, other):\n \"\"\"\n Form the intersection of two Index objects.\n\n This returns a new Index with elements common to the index and `other`,\n preserving the order of the calling index.\n\n Parameters\n ----------\n other : Index or array-like\n\n Returns\n -------\n intersection : Index\n\n Examples\n --------\n\n >>> idx1 = pd.Index([1, 2, 3, 4])\n >>> idx2 = pd.Index([3, 4, 5, 6])\n >>> idx1.intersection(idx2)\n Int64Index([3, 4], dtype='int64')\n\n \"\"\"\n self._assert_can_do_setop(other)\n other = _ensure_index(other)\n\n if self.equals(other):\n return self._get_consensus_name(other)\n\n if not is_dtype_equal(self.dtype, other.dtype):\n this = self.astype('O')\n other = other.astype('O')\n return this.intersection(other)\n\n # TODO(EA): setops-refactor, clean all this up\n if is_period_dtype(self):\n lvals = self._ndarray_values\n else:\n lvals = self._values\n if is_period_dtype(other):\n rvals = other._ndarray_values\n else:\n rvals = other._values\n\n if self.is_monotonic and other.is_monotonic:\n try:\n result = self._inner_indexer(lvals, rvals)[0]\n return self._wrap_union_result(other, result)\n except TypeError:\n pass\n\n try:\n indexer = Index(rvals).get_indexer(lvals)\n indexer = indexer.take((indexer != -1).nonzero()[0])\n except Exception:\n # duplicates\n indexer = algos.unique1d(\n Index(rvals).get_indexer_non_unique(lvals)[0])\n indexer = indexer[indexer != -1]\n\n taken = other.take(indexer)\n if self.name != other.name:\n taken.name = None\n return taken\n\n def difference(self, other):\n \"\"\"\n Return a new Index with elements from the index that are not in\n `other`.\n\n This is the set difference of two Index objects.\n It's sorted if sorting is possible.\n\n Parameters\n ----------\n other : Index or array-like\n\n Returns\n -------\n difference : Index\n\n Examples\n --------\n\n >>> idx1 = pd.Index([1, 2, 3, 4])\n >>> idx2 = pd.Index([3, 4, 5, 6])\n >>> idx1.difference(idx2)\n Int64Index([1, 2], dtype='int64')\n\n \"\"\"\n self._assert_can_do_setop(other)\n\n if self.equals(other):\n return self._shallow_copy([])\n\n other, result_name = self._convert_can_do_setop(other)\n\n this = self._get_unique_index()\n\n indexer = this.get_indexer(other)\n indexer = indexer.take((indexer != -1).nonzero()[0])\n\n label_diff = np.setdiff1d(np.arange(this.size), indexer,\n assume_unique=True)\n the_diff = this.values.take(label_diff)\n try:\n the_diff = sorting.safe_sort(the_diff)\n except TypeError:\n pass\n\n return this._shallow_copy(the_diff, name=result_name, freq=None)\n\n def symmetric_difference(self, other, result_name=None):\n \"\"\"\n Compute the symmetric difference of two Index objects.\n It's sorted if sorting is possible.\n\n Parameters\n ----------\n other : Index or array-like\n result_name : str\n\n Returns\n -------\n symmetric_difference : Index\n\n Notes\n -----\n ``symmetric_difference`` contains elements that appear in either\n ``idx1`` or ``idx2`` but not both. Equivalent to the Index created by\n ``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates\n dropped.\n\n Examples\n --------\n >>> idx1 = Index([1, 2, 3, 4])\n >>> idx2 = Index([2, 3, 4, 5])\n >>> idx1.symmetric_difference(idx2)\n Int64Index([1, 5], dtype='int64')\n\n You can also use the ``^`` operator:\n\n >>> idx1 ^ idx2\n Int64Index([1, 5], dtype='int64')\n \"\"\"\n self._assert_can_do_setop(other)\n other, result_name_update = self._convert_can_do_setop(other)\n if result_name is None:\n result_name = result_name_update\n\n this = self._get_unique_index()\n other = other._get_unique_index()\n indexer = this.get_indexer(other)\n\n # {this} minus {other}\n common_indexer = indexer.take((indexer != -1).nonzero()[0])\n left_indexer = np.setdiff1d(np.arange(this.size), common_indexer,\n assume_unique=True)\n left_diff = this.values.take(left_indexer)\n\n # {other} minus {this}\n right_indexer = (indexer == -1).nonzero()[0]\n right_diff = other.values.take(right_indexer)\n\n the_diff = _concat._concat_compat([left_diff, right_diff])\n try:\n the_diff = sorting.safe_sort(the_diff)\n except TypeError:\n pass\n\n attribs = self._get_attributes_dict()\n attribs['name'] = result_name\n if 'freq' in attribs:\n attribs['freq'] = None\n return self._shallow_copy_with_infer(the_diff, **attribs)\n\n def _get_unique_index(self, dropna=False):\n \"\"\"\n Returns an index containing unique values.\n\n Parameters\n ----------\n dropna : bool\n If True, NaN values are dropped.\n\n Returns\n -------\n uniques : index\n \"\"\"\n if self.is_unique and not dropna:\n return self\n\n values = self.values\n\n if not self.is_unique:\n values = self.unique()\n\n if dropna:\n try:\n if self.hasnans:\n values = values[~isna(values)]\n except NotImplementedError:\n pass\n\n return self._shallow_copy(values)\n\n _index_shared_docs['get_loc'] = \"\"\"\n Get integer location, slice or boolean mask for requested label.\n\n Parameters\n ----------\n key : label\n method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional\n * default: exact matches only.\n * pad / ffill: find the PREVIOUS index value if no exact match.\n * backfill / bfill: use NEXT index value if no exact match\n * nearest: use the NEAREST index value if no exact match. Tied\n distances are broken by preferring the larger index value.\n tolerance : optional\n Maximum distance from index value for inexact matches. The value of\n the index at the matching location most satisfy the equation\n ``abs(index[loc] - key) <= tolerance``.\n\n Tolerance may be a scalar\n value, which applies the same tolerance to all values, or\n list-like, which applies variable tolerance per element. List-like\n includes list, tuple, array, Series, and must be the same size as\n the index and its dtype must exactly match the index's type.\n\n .. versionadded:: 0.21.0 (list-like tolerance)\n\n Returns\n -------\n loc : int if unique index, slice if monotonic index, else mask\n\n Examples\n ---------\n >>> unique_index = pd.Index(list('abc'))\n >>> unique_index.get_loc('b')\n 1\n\n >>> monotonic_index = pd.Index(list('abbc'))\n >>> monotonic_index.get_loc('b')\n slice(1, 3, None)\n\n >>> non_monotonic_index = pd.Index(list('abcb'))\n >>> non_monotonic_index.get_loc('b')\n array([False, True, False, True], dtype=bool)\n \"\"\"\n\n @Appender(_index_shared_docs['get_loc'])\n def get_loc(self, key, method=None, tolerance=None):\n if method is None:\n if tolerance is not None:\n raise ValueError('tolerance argument only valid if using pad, '\n 'backfill or nearest lookups')\n try:\n return self._engine.get_loc(key)\n except KeyError:\n return self._engine.get_loc(self._maybe_cast_indexer(key))\n\n indexer = self.get_indexer([key], method=method, tolerance=tolerance)\n if indexer.ndim > 1 or indexer.size > 1:\n raise TypeError('get_loc requires scalar valued input')\n loc = indexer.item()\n if loc == -1:\n raise KeyError(key)\n return loc\n\n def get_value(self, series, key):\n \"\"\"\n Fast lookup of value from 1-dimensional ndarray. Only use this if you\n know what you're doing\n \"\"\"\n\n # if we have something that is Index-like, then\n # use this, e.g. DatetimeIndex\n s = getattr(series, '_values', None)\n if isinstance(s, (ExtensionArray, Index)) and is_scalar(key):\n # GH 20825\n # Unify Index and ExtensionArray treatment\n # First try to convert the key to a location\n # If that fails, see if key is an integer, and\n # try that\n try:\n iloc = self.get_loc(key)\n return s[iloc]\n except KeyError:\n if is_integer(key):\n return s[key]\n\n s = com._values_from_object(series)\n k = com._values_from_object(key)\n\n k = self._convert_scalar_indexer(k, kind='getitem')\n try:\n return self._engine.get_value(s, k,\n tz=getattr(series.dtype, 'tz', None))\n except KeyError as e1:\n if len(self) > 0 and self.inferred_type in ['integer', 'boolean']:\n raise\n\n try:\n return libindex.get_value_box(s, key)\n except IndexError:\n raise\n except TypeError:\n # generator/iterator-like\n if is_iterator(key):\n raise InvalidIndexError(key)\n else:\n raise e1\n except Exception: # pragma: no cover\n raise e1\n except TypeError:\n # python 3\n if is_scalar(key): # pragma: no cover\n raise IndexError(key)\n raise InvalidIndexError(key)\n\n def set_value(self, arr, key, value):\n \"\"\"\n Fast lookup of value from 1-dimensional ndarray. Only use this if you\n know what you're doing\n \"\"\"\n self._engine.set_value(com._values_from_object(arr),\n com._values_from_object(key), value)\n\n def _get_level_values(self, level):\n \"\"\"\n Return an Index of values for requested level, equal to the length\n of the index.\n\n Parameters\n ----------\n level : int or str\n ``level`` is either the integer position of the level in the\n MultiIndex, or the name of the level.\n\n Returns\n -------\n values : Index\n ``self``, as there is only one level in the Index.\n\n See also\n ---------\n pandas.MultiIndex.get_level_values : get values for a level of a\n MultiIndex\n \"\"\"\n\n self._validate_index_level(level)\n return self\n\n get_level_values = _get_level_values\n\n def droplevel(self, level=0):\n \"\"\"\n Return index with requested level(s) removed. If resulting index has\n only 1 level left, the result will be of Index type, not MultiIndex.\n\n .. versionadded:: 0.23.1 (support for non-MultiIndex)\n\n Parameters\n ----------\n level : int, str, or list-like, default 0\n If a string is given, must be the name of a level\n If list-like, elements must be names or indexes of levels.\n\n Returns\n -------\n index : Index or MultiIndex\n \"\"\"\n if not isinstance(level, (tuple, list)):\n level = [level]\n\n levnums = sorted(self._get_level_number(lev) for lev in level)[::-1]\n\n if len(level) == 0:\n return self\n if len(level) >= self.nlevels:\n raise ValueError(\"Cannot remove {} levels from an index with {} \"\n \"levels: at least one level must be \"\n \"left.\".format(len(level), self.nlevels))\n # The two checks above guarantee that here self is a MultiIndex\n\n new_levels = list(self.levels)\n new_labels = list(self.labels)\n new_names = list(self.names)\n\n for i in levnums:\n new_levels.pop(i)\n new_labels.pop(i)\n new_names.pop(i)\n\n if len(new_levels) == 1:\n\n # set nan if needed\n mask = new_labels[0] == -1\n result = new_levels[0].take(new_labels[0])\n if mask.any():\n result = result.putmask(mask, np.nan)\n\n result.name = new_names[0]\n return result\n else:\n from .multi import MultiIndex\n return MultiIndex(levels=new_levels, labels=new_labels,\n names=new_names, verify_integrity=False)\n\n _index_shared_docs['get_indexer'] = \"\"\"\n Compute indexer and mask for new index given the current index. The\n indexer should be then used as an input to ndarray.take to align the\n current data to the new index.\n\n Parameters\n ----------\n target : %(target_klass)s\n method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional\n * default: exact matches only.\n * pad / ffill: find the PREVIOUS index value if no exact match.\n * backfill / bfill: use NEXT index value if no exact match\n * nearest: use the NEAREST index value if no exact match. Tied\n distances are broken by preferring the larger index value.\n limit : int, optional\n Maximum number of consecutive labels in ``target`` to match for\n inexact matches.\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations most\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n\n Tolerance may be a scalar value, which applies the same tolerance\n to all values, or list-like, which applies variable tolerance per\n element. List-like includes list, tuple, array, Series, and must be\n the same size as the index and its dtype must exactly match the\n index's type.\n\n .. versionadded:: 0.21.0 (list-like tolerance)\n\n Examples\n --------\n >>> indexer = index.get_indexer(new_index)\n >>> new_values = cur_values.take(indexer)\n\n Returns\n -------\n indexer : ndarray of int\n Integers from 0 to n - 1 indicating that the index at these\n positions matches the corresponding target values. Missing values\n in the target are marked by -1.\n \"\"\"\n\n @Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)\n def get_indexer(self, target, method=None, limit=None, tolerance=None):\n method = missing.clean_reindex_fill_method(method)\n target = _ensure_index(target)\n if tolerance is not None:\n tolerance = self._convert_tolerance(tolerance, target)\n\n # Treat boolean labels passed to a numeric index as not found. Without\n # this fix False and True would be treated as 0 and 1 respectively.\n # (GH #16877)\n if target.is_boolean() and self.is_numeric():\n return _ensure_platform_int(np.repeat(-1, target.size))\n\n pself, ptarget = self._maybe_promote(target)\n if pself is not self or ptarget is not target:\n return pself.get_indexer(ptarget, method=method, limit=limit,\n tolerance=tolerance)\n\n if not is_dtype_equal(self.dtype, target.dtype):\n this = self.astype(object)\n target = target.astype(object)\n return this.get_indexer(target, method=method, limit=limit,\n tolerance=tolerance)\n\n if not self.is_unique:\n raise InvalidIndexError('Reindexing only valid with uniquely'\n ' valued Index objects')\n\n if method == 'pad' or method == 'backfill':\n indexer = self._get_fill_indexer(target, method, limit, tolerance)\n elif method == 'nearest':\n indexer = self._get_nearest_indexer(target, limit, tolerance)\n else:\n if tolerance is not None:\n raise ValueError('tolerance argument only valid if doing pad, '\n 'backfill or nearest reindexing')\n if limit is not None:\n raise ValueError('limit argument only valid if doing pad, '\n 'backfill or nearest reindexing')\n\n indexer = self._engine.get_indexer(target._ndarray_values)\n\n return _ensure_platform_int(indexer)\n\n def _convert_tolerance(self, tolerance, target):\n # override this method on subclasses\n tolerance = np.asarray(tolerance)\n if target.size != tolerance.size and tolerance.size > 1:\n raise ValueError('list-like tolerance size must match '\n 'target index size')\n return tolerance\n\n def _get_fill_indexer(self, target, method, limit=None, tolerance=None):\n if self.is_monotonic_increasing and target.is_monotonic_increasing:\n method = (self._engine.get_pad_indexer if method == 'pad' else\n self._engine.get_backfill_indexer)\n indexer = method(target._ndarray_values, limit)\n else:\n indexer = self._get_fill_indexer_searchsorted(target, method,\n limit)\n if tolerance is not None:\n indexer = self._filter_indexer_tolerance(target._ndarray_values,\n indexer,\n tolerance)\n return indexer\n\n def _get_fill_indexer_searchsorted(self, target, method, limit=None):\n \"\"\"\n Fallback pad/backfill get_indexer that works for monotonic decreasing\n indexes and non-monotonic targets\n \"\"\"\n if limit is not None:\n raise ValueError('limit argument for %r method only well-defined '\n 'if index and target are monotonic' % method)\n\n side = 'left' if method == 'pad' else 'right'\n\n # find exact matches first (this simplifies the algorithm)\n indexer = self.get_indexer(target)\n nonexact = (indexer == -1)\n indexer[nonexact] = self._searchsorted_monotonic(target[nonexact],\n side)\n if side == 'left':\n # searchsorted returns \"indices into a sorted array such that,\n # if the corresponding elements in v were inserted before the\n # indices, the order of a would be preserved\".\n # Thus, we need to subtract 1 to find values to the left.\n indexer[nonexact] -= 1\n # This also mapped not found values (values of 0 from\n # np.searchsorted) to -1, which conveniently is also our\n # sentinel for missing values\n else:\n # Mark indices to the right of the largest value as not found\n indexer[indexer == len(self)] = -1\n return indexer\n\n def _get_nearest_indexer(self, target, limit, tolerance):\n \"\"\"\n Get the indexer for the nearest index labels; requires an index with\n values that can be subtracted from each other (e.g., not strings or\n tuples).\n \"\"\"\n left_indexer = self.get_indexer(target, 'pad', limit=limit)\n right_indexer = self.get_indexer(target, 'backfill', limit=limit)\n\n target = np.asarray(target)\n left_distances = abs(self.values[left_indexer] - target)\n right_distances = abs(self.values[right_indexer] - target)\n\n op = operator.lt if self.is_monotonic_increasing else operator.le\n indexer = np.where(op(left_distances, right_distances) |\n (right_indexer == -1), left_indexer, right_indexer)\n if tolerance is not None:\n indexer = self._filter_indexer_tolerance(target, indexer,\n tolerance)\n return indexer\n\n def _filter_indexer_tolerance(self, target, indexer, tolerance):\n distance = abs(self.values[indexer] - target)\n indexer = np.where(distance <= tolerance, indexer, -1)\n return indexer\n\n _index_shared_docs['get_indexer_non_unique'] = \"\"\"\n Compute indexer and mask for new index given the current index. The\n indexer should be then used as an input to ndarray.take to align the\n current data to the new index.\n\n Parameters\n ----------\n target : %(target_klass)s\n\n Returns\n -------\n indexer : ndarray of int\n Integers from 0 to n - 1 indicating that the index at these\n positions matches the corresponding target values. Missing values\n in the target are marked by -1.\n missing : ndarray of int\n An indexer into the target of the values not found.\n These correspond to the -1 in the indexer array\n \"\"\"\n\n @Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs)\n def get_indexer_non_unique(self, target):\n target = _ensure_index(target)\n pself, ptarget = self._maybe_promote(target)\n if pself is not self or ptarget is not target:\n return pself.get_indexer_non_unique(ptarget)\n\n if self.is_all_dates:\n self = Index(self.asi8)\n tgt_values = target.asi8\n else:\n tgt_values = target._ndarray_values\n\n indexer, missing = self._engine.get_indexer_non_unique(tgt_values)\n return _ensure_platform_int(indexer), missing\n\n def get_indexer_for(self, target, **kwargs):\n \"\"\"\n guaranteed return of an indexer even when non-unique\n This dispatches to get_indexer or get_indexer_nonunique as appropriate\n \"\"\"\n if self.is_unique:\n return self.get_indexer(target, **kwargs)\n indexer, _ = self.get_indexer_non_unique(target, **kwargs)\n return indexer\n\n def _maybe_promote(self, other):\n # A hack, but it works\n from pandas.core.indexes.datetimes import DatetimeIndex\n if self.inferred_type == 'date' and isinstance(other, DatetimeIndex):\n return DatetimeIndex(self), other\n elif self.inferred_type == 'boolean':\n if not is_object_dtype(self.dtype):\n return self.astype('object'), other.astype('object')\n return self, other\n\n def groupby(self, values):\n \"\"\"\n Group the index labels by a given array of values.\n\n Parameters\n ----------\n values : array\n Values used to determine the groups.\n\n Returns\n -------\n groups : dict\n {group name -> group labels}\n \"\"\"\n\n # TODO: if we are a MultiIndex, we can do better\n # that converting to tuples\n from .multi import MultiIndex\n if isinstance(values, MultiIndex):\n values = values.values\n values = _ensure_categorical(values)\n result = values._reverse_indexer()\n\n # map to the label\n result = {k: self.take(v) for k, v in compat.iteritems(result)}\n\n return result\n\n def map(self, mapper, na_action=None):\n \"\"\"\n Map values using input correspondence (a dict, Series, or function).\n\n Parameters\n ----------\n mapper : function, dict, or Series\n Mapping correspondence.\n na_action : {None, 'ignore'}\n If 'ignore', propagate NA values, without passing them to the\n mapping correspondence.\n\n Returns\n -------\n applied : Union[Index, MultiIndex], inferred\n The output of the mapping function applied to the index.\n If the function returns a tuple with more than one element\n a MultiIndex will be returned.\n \"\"\"\n\n from .multi import MultiIndex\n new_values = super(Index, self)._map_values(\n mapper, na_action=na_action)\n\n attributes = self._get_attributes_dict()\n\n # we can return a MultiIndex\n if new_values.size and isinstance(new_values[0], tuple):\n if isinstance(self, MultiIndex):\n names = self.names\n elif attributes.get('name'):\n names = [attributes.get('name')] * len(new_values[0])\n else:\n names = None\n return MultiIndex.from_tuples(new_values,\n names=names)\n\n attributes['copy'] = False\n if not new_values.size:\n # empty\n attributes['dtype'] = self.dtype\n\n return Index(new_values, **attributes)\n\n def isin(self, values, level=None):\n \"\"\"\n Return a boolean array where the index values are in `values`.\n\n Compute boolean array of whether each index value is found in the\n passed set of values. The length of the returned boolean array matches\n the length of the index.\n\n Parameters\n ----------\n values : set or list-like\n Sought values.\n\n .. versionadded:: 0.18.1\n\n Support for values as a set.\n\n level : str or int, optional\n Name or position of the index level to use (if the index is a\n `MultiIndex`).\n\n Returns\n -------\n is_contained : ndarray\n NumPy array of boolean values.\n\n See also\n --------\n Series.isin : Same for Series.\n DataFrame.isin : Same method for DataFrames.\n\n Notes\n -----\n In the case of `MultiIndex` you must either specify `values` as a\n list-like object containing tuples that are the same length as the\n number of levels, or specify `level`. Otherwise it will raise a\n ``ValueError``.\n\n If `level` is specified:\n\n - if it is the name of one *and only one* index level, use that level;\n - otherwise it should be a number indicating level position.\n\n Examples\n --------\n >>> idx = pd.Index([1,2,3])\n >>> idx\n Int64Index([1, 2, 3], dtype='int64')\n\n Check whether each index value in a list of values.\n >>> idx.isin([1, 4])\n array([ True, False, False])\n\n >>> midx = pd.MultiIndex.from_arrays([[1,2,3],\n ... ['red', 'blue', 'green']],\n ... names=('number', 'color'))\n >>> midx\n MultiIndex(levels=[[1, 2, 3], ['blue', 'green', 'red']],\n labels=[[0, 1, 2], [2, 0, 1]],\n names=['number', 'color'])\n\n Check whether the strings in the 'color' level of the MultiIndex\n are in a list of colors.\n\n >>> midx.isin(['red', 'orange', 'yellow'], level='color')\n array([ True, False, False])\n\n To check across the levels of a MultiIndex, pass a list of tuples:\n\n >>> midx.isin([(1, 'red'), (3, 'red')])\n array([ True, False, False])\n\n For a DatetimeIndex, string values in `values` are converted to\n Timestamps.\n\n >>> dates = ['2000-03-11', '2000-03-12', '2000-03-13']\n >>> dti = pd.to_datetime(dates)\n >>> dti\n DatetimeIndex(['2000-03-11', '2000-03-12', '2000-03-13'],\n dtype='datetime64[ns]', freq=None)\n\n >>> dti.isin(['2000-03-11'])\n array([ True, False, False])\n \"\"\"\n if level is not None:\n self._validate_index_level(level)\n return algos.isin(self, values)\n\n def _can_reindex(self, indexer):\n \"\"\"\n *this is an internal non-public method*\n\n Check if we are allowing reindexing with this particular indexer\n\n Parameters\n ----------\n indexer : an integer indexer\n\n Raises\n ------\n ValueError if its a duplicate axis\n \"\"\"\n\n # trying to reindex on an axis with duplicates\n if not self.is_unique and len(indexer):\n raise ValueError(\"cannot reindex from a duplicate axis\")\n\n def reindex(self, target, method=None, level=None, limit=None,\n tolerance=None):\n \"\"\"\n Create index with target's values (move/add/delete values as necessary)\n\n Parameters\n ----------\n target : an iterable\n\n Returns\n -------\n new_index : pd.Index\n Resulting index\n indexer : np.ndarray or None\n Indices of output values in original index\n\n \"\"\"\n # GH6552: preserve names when reindexing to non-named target\n # (i.e. neither Index nor Series).\n preserve_names = not hasattr(target, 'name')\n\n # GH7774: preserve dtype/tz if target is empty and not an Index.\n target = _ensure_has_len(target) # target may be an iterator\n\n if not isinstance(target, Index) and len(target) == 0:\n attrs = self._get_attributes_dict()\n attrs.pop('freq', None) # don't preserve freq\n target = self._simple_new(None, dtype=self.dtype, **attrs)\n else:\n target = _ensure_index(target)\n\n if level is not None:\n if method is not None:\n raise TypeError('Fill method not supported if level passed')\n _, indexer, _ = self._join_level(target, level, how='right',\n return_indexers=True)\n else:\n if self.equals(target):\n indexer = None\n else:\n\n if self.is_unique:\n indexer = self.get_indexer(target, method=method,\n limit=limit,\n tolerance=tolerance)\n else:\n if method is not None or limit is not None:\n raise ValueError(\"cannot reindex a non-unique index \"\n \"with a method or limit\")\n indexer, missing = self.get_indexer_non_unique(target)\n\n if preserve_names and target.nlevels == 1 and target.name != self.name:\n target = target.copy()\n target.name = self.name\n\n return target, indexer\n\n def _reindex_non_unique(self, target):\n \"\"\"\n *this is an internal non-public method*\n\n Create a new index with target's values (move/add/delete values as\n necessary) use with non-unique Index and a possibly non-unique target\n\n Parameters\n ----------\n target : an iterable\n\n Returns\n -------\n new_index : pd.Index\n Resulting index\n indexer : np.ndarray or None\n Indices of output values in original index\n\n \"\"\"\n\n target = _ensure_index(target)\n indexer, missing = self.get_indexer_non_unique(target)\n check = indexer != -1\n new_labels = self.take(indexer[check])\n new_indexer = None\n\n if len(missing):\n length = np.arange(len(indexer))\n\n missing = _ensure_platform_int(missing)\n missing_labels = target.take(missing)\n missing_indexer = _ensure_int64(length[~check])\n cur_labels = self.take(indexer[check]).values\n cur_indexer = _ensure_int64(length[check])\n\n new_labels = np.empty(tuple([len(indexer)]), dtype=object)\n new_labels[cur_indexer] = cur_labels\n new_labels[missing_indexer] = missing_labels\n\n # a unique indexer\n if target.is_unique:\n\n # see GH5553, make sure we use the right indexer\n new_indexer = np.arange(len(indexer))\n new_indexer[cur_indexer] = np.arange(len(cur_labels))\n new_indexer[missing_indexer] = -1\n\n # we have a non_unique selector, need to use the original\n # indexer here\n else:\n\n # need to retake to have the same size as the indexer\n indexer[~check] = 0\n\n # reset the new indexer to account for the new size\n new_indexer = np.arange(len(self.take(indexer)))\n new_indexer[~check] = -1\n\n new_index = self._shallow_copy_with_infer(new_labels, freq=None)\n return new_index, indexer, new_indexer\n\n _index_shared_docs['join'] = \"\"\"\n *this is an internal non-public method*\n\n Compute join_index and indexers to conform data\n structures to the new index.\n\n Parameters\n ----------\n other : Index\n how : {'left', 'right', 'inner', 'outer'}\n level : int or level name, default None\n return_indexers : boolean, default False\n sort : boolean, default False\n Sort the join keys lexicographically in the result Index. If False,\n the order of the join keys depends on the join type (how keyword)\n\n .. versionadded:: 0.20.0\n\n Returns\n -------\n join_index, (left_indexer, right_indexer)\n \"\"\"\n\n @Appender(_index_shared_docs['join'])\n def join(self, other, how='left', level=None, return_indexers=False,\n sort=False):\n from .multi import MultiIndex\n self_is_mi = isinstance(self, MultiIndex)\n other_is_mi = isinstance(other, MultiIndex)\n\n # try to figure out the join level\n # GH3662\n if level is None and (self_is_mi or other_is_mi):\n\n # have the same levels/names so a simple join\n if self.names == other.names:\n pass\n else:\n return self._join_multi(other, how=how,\n return_indexers=return_indexers)\n\n # join on the level\n if level is not None and (self_is_mi or other_is_mi):\n return self._join_level(other, level, how=how,\n return_indexers=return_indexers)\n\n other = _ensure_index(other)\n\n if len(other) == 0 and how in ('left', 'outer'):\n join_index = self._shallow_copy()\n if return_indexers:\n rindexer = np.repeat(-1, len(join_index))\n return join_index, None, rindexer\n else:\n return join_index\n\n if len(self) == 0 and how in ('right', 'outer'):\n join_index = other._shallow_copy()\n if return_indexers:\n lindexer = np.repeat(-1, len(join_index))\n return join_index, lindexer, None\n else:\n return join_index\n\n if self._join_precedence < other._join_precedence:\n how = {'right': 'left', 'left': 'right'}.get(how, how)\n result = other.join(self, how=how, level=level,\n return_indexers=return_indexers)\n if return_indexers:\n x, y, z = result\n result = x, z, y\n return result\n\n if not is_dtype_equal(self.dtype, other.dtype):\n this = self.astype('O')\n other = other.astype('O')\n return this.join(other, how=how, return_indexers=return_indexers)\n\n _validate_join_method(how)\n\n if not self.is_unique and not other.is_unique:\n return self._join_non_unique(other, how=how,\n return_indexers=return_indexers)\n elif not self.is_unique or not other.is_unique:\n if self.is_monotonic and other.is_monotonic:\n return self._join_monotonic(other, how=how,\n return_indexers=return_indexers)\n else:\n return self._join_non_unique(other, how=how,\n return_indexers=return_indexers)\n elif self.is_monotonic and other.is_monotonic:\n try:\n return self._join_monotonic(other, how=how,\n return_indexers=return_indexers)\n except TypeError:\n pass\n\n if how == 'left':\n join_index = self\n elif how == 'right':\n join_index = other\n elif how == 'inner':\n join_index = self.intersection(other)\n elif how == 'outer':\n join_index = self.union(other)\n\n if sort:\n join_index = join_index.sort_values()\n\n if return_indexers:\n if join_index is self:\n lindexer = None\n else:\n lindexer = self.get_indexer(join_index)\n if join_index is other:\n rindexer = None\n else:\n rindexer = other.get_indexer(join_index)\n return join_index, lindexer, rindexer\n else:\n return join_index\n\n def _join_multi(self, other, how, return_indexers=True):\n from .multi import MultiIndex\n self_is_mi = isinstance(self, MultiIndex)\n other_is_mi = isinstance(other, MultiIndex)\n\n # figure out join names\n self_names = com._not_none(*self.names)\n other_names = com._not_none(*other.names)\n overlap = list(set(self_names) & set(other_names))\n\n # need at least 1 in common, but not more than 1\n if not len(overlap):\n raise ValueError(\"cannot join with no level specified and no \"\n \"overlapping names\")\n if len(overlap) > 1:\n raise NotImplementedError(\"merging with more than one level \"\n \"overlap on a multi-index is not \"\n \"implemented\")\n jl = overlap[0]\n\n # make the indices into mi's that match\n if not (self_is_mi and other_is_mi):\n\n flip_order = False\n if self_is_mi:\n self, other = other, self\n flip_order = True\n # flip if join method is right or left\n how = {'right': 'left', 'left': 'right'}.get(how, how)\n\n level = other.names.index(jl)\n result = self._join_level(other, level, how=how,\n return_indexers=return_indexers)\n\n if flip_order:\n if isinstance(result, tuple):\n return result[0], result[2], result[1]\n return result\n\n # 2 multi-indexes\n raise NotImplementedError(\"merging with both multi-indexes is not \"\n \"implemented\")\n\n def _join_non_unique(self, other, how='left', return_indexers=False):\n from pandas.core.reshape.merge import _get_join_indexers\n\n left_idx, right_idx = _get_join_indexers([self._ndarray_values],\n [other._ndarray_values],\n how=how,\n sort=True)\n\n left_idx = _ensure_platform_int(left_idx)\n right_idx = _ensure_platform_int(right_idx)\n\n join_index = np.asarray(self._ndarray_values.take(left_idx))\n mask = left_idx == -1\n np.putmask(join_index, mask, other._ndarray_values.take(right_idx))\n\n join_index = self._wrap_joined_index(join_index, other)\n\n if return_indexers:\n return join_index, left_idx, right_idx\n else:\n return join_index\n\n def _join_level(self, other, level, how='left', return_indexers=False,\n keep_order=True):\n \"\"\"\n The join method *only* affects the level of the resulting\n MultiIndex. Otherwise it just exactly aligns the Index data to the\n labels of the level in the MultiIndex. If `keep_order` == True, the\n order of the data indexed by the MultiIndex will not be changed;\n otherwise, it will tie out with `other`.\n \"\"\"\n from .multi import MultiIndex\n\n def _get_leaf_sorter(labels):\n \"\"\"\n returns sorter for the inner most level while preserving the\n order of higher levels\n \"\"\"\n if labels[0].size == 0:\n return np.empty(0, dtype='int64')\n\n if len(labels) == 1:\n lab = _ensure_int64(labels[0])\n sorter, _ = libalgos.groupsort_indexer(lab, 1 + lab.max())\n return sorter\n\n # find indexers of beginning of each set of\n # same-key labels w.r.t all but last level\n tic = labels[0][:-1] != labels[0][1:]\n for lab in labels[1:-1]:\n tic |= lab[:-1] != lab[1:]\n\n starts = np.hstack(([True], tic, [True])).nonzero()[0]\n lab = _ensure_int64(labels[-1])\n return lib.get_level_sorter(lab, _ensure_int64(starts))\n\n if isinstance(self, MultiIndex) and isinstance(other, MultiIndex):\n raise TypeError('Join on level between two MultiIndex objects '\n 'is ambiguous')\n\n left, right = self, other\n\n flip_order = not isinstance(self, MultiIndex)\n if flip_order:\n left, right = right, left\n how = {'right': 'left', 'left': 'right'}.get(how, how)\n\n level = left._get_level_number(level)\n old_level = left.levels[level]\n\n if not right.is_unique:\n raise NotImplementedError('Index._join_level on non-unique index '\n 'is not implemented')\n\n new_level, left_lev_indexer, right_lev_indexer = \\\n old_level.join(right, how=how, return_indexers=True)\n\n if left_lev_indexer is None:\n if keep_order or len(left) == 0:\n left_indexer = None\n join_index = left\n else: # sort the leaves\n left_indexer = _get_leaf_sorter(left.labels[:level + 1])\n join_index = left[left_indexer]\n\n else:\n left_lev_indexer = _ensure_int64(left_lev_indexer)\n rev_indexer = lib.get_reverse_indexer(left_lev_indexer,\n len(old_level))\n\n new_lev_labels = algos.take_nd(rev_indexer, left.labels[level],\n allow_fill=False)\n\n new_labels = list(left.labels)\n new_labels[level] = new_lev_labels\n\n new_levels = list(left.levels)\n new_levels[level] = new_level\n\n if keep_order: # just drop missing values. o.w. keep order\n left_indexer = np.arange(len(left), dtype=np.intp)\n mask = new_lev_labels != -1\n if not mask.all():\n new_labels = [lab[mask] for lab in new_labels]\n left_indexer = left_indexer[mask]\n\n else: # tie out the order with other\n if level == 0: # outer most level, take the fast route\n ngroups = 1 + new_lev_labels.max()\n left_indexer, counts = libalgos.groupsort_indexer(\n new_lev_labels, ngroups)\n\n # missing values are placed first; drop them!\n left_indexer = left_indexer[counts[0]:]\n new_labels = [lab[left_indexer] for lab in new_labels]\n\n else: # sort the leaves\n mask = new_lev_labels != -1\n mask_all = mask.all()\n if not mask_all:\n new_labels = [lab[mask] for lab in new_labels]\n\n left_indexer = _get_leaf_sorter(new_labels[:level + 1])\n new_labels = [lab[left_indexer] for lab in new_labels]\n\n # left_indexers are w.r.t masked frame.\n # reverse to original frame!\n if not mask_all:\n left_indexer = mask.nonzero()[0][left_indexer]\n\n join_index = MultiIndex(levels=new_levels, labels=new_labels,\n names=left.names, verify_integrity=False)\n\n if right_lev_indexer is not None:\n right_indexer = algos.take_nd(right_lev_indexer,\n join_index.labels[level],\n allow_fill=False)\n else:\n right_indexer = join_index.labels[level]\n\n if flip_order:\n left_indexer, right_indexer = right_indexer, left_indexer\n\n if return_indexers:\n left_indexer = (None if left_indexer is None\n else _ensure_platform_int(left_indexer))\n right_indexer = (None if right_indexer is None\n else _ensure_platform_int(right_indexer))\n return join_index, left_indexer, right_indexer\n else:\n return join_index\n\n def _join_monotonic(self, other, how='left', return_indexers=False):\n if self.equals(other):\n ret_index = other if how == 'right' else self\n if return_indexers:\n return ret_index, None, None\n else:\n return ret_index\n\n sv = self._ndarray_values\n ov = other._ndarray_values\n\n if self.is_unique and other.is_unique:\n # We can perform much better than the general case\n if how == 'left':\n join_index = self\n lidx = None\n ridx = self._left_indexer_unique(sv, ov)\n elif how == 'right':\n join_index = other\n lidx = self._left_indexer_unique(ov, sv)\n ridx = None\n elif how == 'inner':\n join_index, lidx, ridx = self._inner_indexer(sv, ov)\n join_index = self._wrap_joined_index(join_index, other)\n elif how == 'outer':\n join_index, lidx, ridx = self._outer_indexer(sv, ov)\n join_index = self._wrap_joined_index(join_index, other)\n else:\n if how == 'left':\n join_index, lidx, ridx = self._left_indexer(sv, ov)\n elif how == 'right':\n join_index, ridx, lidx = self._left_indexer(ov, sv)\n elif how == 'inner':\n join_index, lidx, ridx = self._inner_indexer(sv, ov)\n elif how == 'outer':\n join_index, lidx, ridx = self._outer_indexer(sv, ov)\n join_index = self._wrap_joined_index(join_index, other)\n\n if return_indexers:\n lidx = None if lidx is None else _ensure_platform_int(lidx)\n ridx = None if ridx is None else _ensure_platform_int(ridx)\n return join_index, lidx, ridx\n else:\n return join_index\n\n def _wrap_joined_index(self, joined, other):\n name = self.name if self.name == other.name else None\n return Index(joined, name=name)\n\n def _get_string_slice(self, key, use_lhs=True, use_rhs=True):\n # this is for partial string indexing,\n # overridden in DatetimeIndex, TimedeltaIndex and PeriodIndex\n raise NotImplementedError\n\n def slice_indexer(self, start=None, end=None, step=None, kind=None):\n \"\"\"\n For an ordered or unique index, compute the slice indexer for input\n labels and step.\n\n Parameters\n ----------\n start : label, default None\n If None, defaults to the beginning\n end : label, default None\n If None, defaults to the end\n step : int, default None\n kind : string, default None\n\n Returns\n -------\n indexer : slice\n\n Raises\n ------\n KeyError : If key does not exist, or key is not unique and index is\n not ordered.\n\n Notes\n -----\n This function assumes that the data is sorted, so use at your own peril\n\n Examples\n ---------\n This is a method on all index types. For example you can do:\n\n >>> idx = pd.Index(list('abcd'))\n >>> idx.slice_indexer(start='b', end='c')\n slice(1, 3)\n\n >>> idx = pd.MultiIndex.from_arrays([list('abcd'), list('efgh')])\n >>> idx.slice_indexer(start='b', end=('c', 'g'))\n slice(1, 3)\n \"\"\"\n start_slice, end_slice = self.slice_locs(start, end, step=step,\n kind=kind)\n\n # return a slice\n if not is_scalar(start_slice):\n raise AssertionError(\"Start slice bound is non-scalar\")\n if not is_scalar(end_slice):\n raise AssertionError(\"End slice bound is non-scalar\")\n\n return slice(start_slice, end_slice, step)\n\n def _maybe_cast_indexer(self, key):\n \"\"\"\n If we have a float key and are not a floating index\n then try to cast to an int if equivalent\n \"\"\"\n\n if is_float(key) and not self.is_floating():\n try:\n ckey = int(key)\n if ckey == key:\n key = ckey\n except (OverflowError, ValueError, TypeError):\n pass\n return key\n\n def _validate_indexer(self, form, key, kind):\n \"\"\"\n if we are positional indexer\n validate that we have appropriate typed bounds\n must be an integer\n \"\"\"\n assert kind in ['ix', 'loc', 'getitem', 'iloc']\n\n if key is None:\n pass\n elif is_integer(key):\n pass\n elif kind in ['iloc', 'getitem']:\n self._invalid_indexer(form, key)\n return key\n\n _index_shared_docs['_maybe_cast_slice_bound'] = \"\"\"\n This function should be overloaded in subclasses that allow non-trivial\n casting on label-slice bounds, e.g. datetime-like indices allowing\n strings containing formatted datetimes.\n\n Parameters\n ----------\n label : object\n side : {'left', 'right'}\n kind : {'ix', 'loc', 'getitem'}\n\n Returns\n -------\n label : object\n\n Notes\n -----\n Value of `side` parameter should be validated in caller.\n\n \"\"\"\n\n @Appender(_index_shared_docs['_maybe_cast_slice_bound'])\n def _maybe_cast_slice_bound(self, label, side, kind):\n assert kind in ['ix', 'loc', 'getitem', None]\n\n # We are a plain index here (sub-class override this method if they\n # wish to have special treatment for floats/ints, e.g. Float64Index and\n # datetimelike Indexes\n # reject them\n if is_float(label):\n if not (kind in ['ix'] and (self.holds_integer() or\n self.is_floating())):\n self._invalid_indexer('slice', label)\n\n # we are trying to find integer bounds on a non-integer based index\n # this is rejected (generally .loc gets you here)\n elif is_integer(label):\n self._invalid_indexer('slice', label)\n\n return label\n\n def _searchsorted_monotonic(self, label, side='left'):\n if self.is_monotonic_increasing:\n return self.searchsorted(label, side=side)\n elif self.is_monotonic_decreasing:\n # np.searchsorted expects ascending sort order, have to reverse\n # everything for it to work (element ordering, search side and\n # resulting value).\n pos = self[::-1].searchsorted(label, side='right' if side == 'left'\n else 'left')\n return len(self) - pos\n\n raise ValueError('index must be monotonic increasing or decreasing')\n\n def _get_loc_only_exact_matches(self, key):\n \"\"\"\n This is overridden on subclasses (namely, IntervalIndex) to control\n get_slice_bound.\n \"\"\"\n return self.get_loc(key)\n\n def get_slice_bound(self, label, side, kind):\n \"\"\"\n Calculate slice bound that corresponds to given label.\n\n Returns leftmost (one-past-the-rightmost if ``side=='right'``) position\n of given label.\n\n Parameters\n ----------\n label : object\n side : {'left', 'right'}\n kind : {'ix', 'loc', 'getitem'}\n\n \"\"\"\n assert kind in ['ix', 'loc', 'getitem', None]\n\n if side not in ('left', 'right'):\n raise ValueError(\"Invalid value for side kwarg,\"\n \" must be either 'left' or 'right': %s\" %\n (side, ))\n\n original_label = label\n\n # For datetime indices label may be a string that has to be converted\n # to datetime boundary according to its resolution.\n label = self._maybe_cast_slice_bound(label, side, kind)\n\n # we need to look up the label\n try:\n slc = self._get_loc_only_exact_matches(label)\n except KeyError as err:\n try:\n return self._searchsorted_monotonic(label, side)\n except ValueError:\n # raise the original KeyError\n raise err\n\n if isinstance(slc, np.ndarray):\n # get_loc may return a boolean array or an array of indices, which\n # is OK as long as they are representable by a slice.\n if is_bool_dtype(slc):\n slc = lib.maybe_booleans_to_slice(slc.view('u1'))\n else:\n slc = lib.maybe_indices_to_slice(slc.astype('i8'), len(self))\n if isinstance(slc, np.ndarray):\n raise KeyError(\"Cannot get %s slice bound for non-unique \"\n \"label: %r\" % (side, original_label))\n\n if isinstance(slc, slice):\n if side == 'left':\n return slc.start\n else:\n return slc.stop\n else:\n if side == 'right':\n return slc + 1\n else:\n return slc\n\n def slice_locs(self, start=None, end=None, step=None, kind=None):\n \"\"\"\n Compute slice locations for input labels.\n\n Parameters\n ----------\n start : label, default None\n If None, defaults to the beginning\n end : label, default None\n If None, defaults to the end\n step : int, defaults None\n If None, defaults to 1\n kind : {'ix', 'loc', 'getitem'} or None\n\n Returns\n -------\n start, end : int\n\n Notes\n -----\n This method only works if the index is monotonic or unique.\n\n Examples\n ---------\n >>> idx = pd.Index(list('abcd'))\n >>> idx.slice_locs(start='b', end='c')\n (1, 3)\n\n See Also\n --------\n Index.get_loc : Get location for a single label\n \"\"\"\n inc = (step is None or step >= 0)\n\n if not inc:\n # If it's a reverse slice, temporarily swap bounds.\n start, end = end, start\n\n start_slice = None\n if start is not None:\n start_slice = self.get_slice_bound(start, 'left', kind)\n if start_slice is None:\n start_slice = 0\n\n end_slice = None\n if end is not None:\n end_slice = self.get_slice_bound(end, 'right', kind)\n if end_slice is None:\n end_slice = len(self)\n\n if not inc:\n # Bounds at this moment are swapped, swap them back and shift by 1.\n #\n # slice_locs('B', 'A', step=-1): s='B', e='A'\n #\n # s='A' e='B'\n # AFTER SWAP: | |\n # v ------------------> V\n # -----------------------------------\n # | | |A|A|A|A| | | | | |B|B| | | | |\n # -----------------------------------\n # ^ <------------------ ^\n # SHOULD BE: | |\n # end=s-1 start=e-1\n #\n end_slice, start_slice = start_slice - 1, end_slice - 1\n\n # i == -1 triggers ``len(self) + i`` selection that points to the\n # last element, not before-the-first one, subtracting len(self)\n # compensates that.\n if end_slice == -1:\n end_slice -= len(self)\n if start_slice == -1:\n start_slice -= len(self)\n\n return start_slice, end_slice\n\n def delete(self, loc):\n \"\"\"\n Make new Index with passed location(-s) deleted\n\n Returns\n -------\n new_index : Index\n \"\"\"\n return self._shallow_copy(np.delete(self._data, loc))\n\n def insert(self, loc, item):\n \"\"\"\n Make new Index inserting new item at location. Follows\n Python list.append semantics for negative values\n\n Parameters\n ----------\n loc : int\n item : object\n\n Returns\n -------\n new_index : Index\n \"\"\"\n if is_scalar(item) and isna(item):\n # GH 18295\n item = self._na_value\n\n _self = np.asarray(self)\n item = self._coerce_scalar_to_index(item)._ndarray_values\n idx = np.concatenate((_self[:loc], item, _self[loc:]))\n return self._shallow_copy_with_infer(idx)\n\n def drop(self, labels, errors='raise'):\n \"\"\"\n Make new Index with passed list of labels deleted\n\n Parameters\n ----------\n labels : array-like\n errors : {'ignore', 'raise'}, default 'raise'\n If 'ignore', suppress error and existing labels are dropped.\n\n Returns\n -------\n dropped : Index\n\n Raises\n ------\n KeyError\n If none of the labels are found in the selected axis\n \"\"\"\n arr_dtype = 'object' if self.dtype == 'object' else None\n labels = com._index_labels_to_array(labels, dtype=arr_dtype)\n indexer = self.get_indexer(labels)\n mask = indexer == -1\n if mask.any():\n if errors != 'ignore':\n raise KeyError(\n 'labels %s not contained in axis' % labels[mask])\n indexer = indexer[~mask]\n return self.delete(indexer)\n\n _index_shared_docs['index_unique'] = (\n \"\"\"\n Return unique values in the index. Uniques are returned in order\n of appearance, this does NOT sort.\n\n Parameters\n ----------\n level : int or str, optional, default None\n Only return values from specified level (for MultiIndex)\n\n .. versionadded:: 0.23.0\n\n Returns\n -------\n Index without duplicates\n\n See Also\n --------\n unique\n Series.unique\n \"\"\")\n\n @Appender(_index_shared_docs['index_unique'] % _index_doc_kwargs)\n def unique(self, level=None):\n if level is not None:\n self._validate_index_level(level)\n result = super(Index, self).unique()\n return self._shallow_copy(result)\n\n def drop_duplicates(self, keep='first'):\n \"\"\"\n Return Index with duplicate values removed.\n\n Parameters\n ----------\n keep : {'first', 'last', ``False``}, default 'first'\n - 'first' : Drop duplicates except for the first occurrence.\n - 'last' : Drop duplicates except for the last occurrence.\n - ``False`` : Drop all duplicates.\n\n Returns\n -------\n deduplicated : Index\n\n See Also\n --------\n Series.drop_duplicates : equivalent method on Series\n DataFrame.drop_duplicates : equivalent method on DataFrame\n Index.duplicated : related method on Index, indicating duplicate\n Index values.\n\n Examples\n --------\n Generate an pandas.Index with duplicate values.\n\n >>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'])\n\n The `keep` parameter controls which duplicate values are removed.\n The value 'first' keeps the first occurrence for each\n set of duplicated entries. The default value of keep is 'first'.\n\n >>> idx.drop_duplicates(keep='first')\n Index(['lama', 'cow', 'beetle', 'hippo'], dtype='object')\n\n The value 'last' keeps the last occurrence for each set of duplicated\n entries.\n\n >>> idx.drop_duplicates(keep='last')\n Index(['cow', 'beetle', 'lama', 'hippo'], dtype='object')\n\n The value ``False`` discards all sets of duplicated entries.\n\n >>> idx.drop_duplicates(keep=False)\n Index(['cow', 'beetle', 'hippo'], dtype='object')\n \"\"\"\n return super(Index, self).drop_duplicates(keep=keep)\n\n def duplicated(self, keep='first'):\n \"\"\"\n Indicate duplicate index values.\n\n Duplicated values are indicated as ``True`` values in the resulting\n array. Either all duplicates, all except the first, or all except the\n last occurrence of duplicates can be indicated.\n\n Parameters\n ----------\n keep : {'first', 'last', False}, default 'first'\n The value or values in a set of duplicates to mark as missing.\n\n - 'first' : Mark duplicates as ``True`` except for the first\n occurrence.\n - 'last' : Mark duplicates as ``True`` except for the last\n occurrence.\n - ``False`` : Mark all duplicates as ``True``.\n\n Examples\n --------\n By default, for each set of duplicated values, the first occurrence is\n set to False and all others to True:\n\n >>> idx = pd.Index(['lama', 'cow', 'lama', 'beetle', 'lama'])\n >>> idx.duplicated()\n array([False, False, True, False, True])\n\n which is equivalent to\n\n >>> idx.duplicated(keep='first')\n array([False, False, True, False, True])\n\n By using 'last', the last occurrence of each set of duplicated values\n is set on False and all others on True:\n\n >>> idx.duplicated(keep='last')\n array([ True, False, True, False, False])\n\n By setting keep on ``False``, all duplicates are True:\n\n >>> idx.duplicated(keep=False)\n array([ True, False, True, False, True])\n\n Returns\n -------\n numpy.ndarray\n\n See Also\n --------\n pandas.Series.duplicated : Equivalent method on pandas.Series\n pandas.DataFrame.duplicated : Equivalent method on pandas.DataFrame\n pandas.Index.drop_duplicates : Remove duplicate values from Index\n \"\"\"\n return super(Index, self).duplicated(keep=keep)\n\n _index_shared_docs['fillna'] = \"\"\"\n Fill NA/NaN values with the specified value\n\n Parameters\n ----------\n value : scalar\n Scalar value to use to fill holes (e.g. 0).\n This value cannot be a list-likes.\n downcast : dict, default is None\n a dict of item->dtype of what to downcast if possible,\n or the string 'infer' which will try to downcast to an appropriate\n equal type (e.g. float64 to int64 if possible)\n\n Returns\n -------\n filled : %(klass)s\n \"\"\"\n\n @Appender(_index_shared_docs['fillna'])\n def fillna(self, value=None, downcast=None):\n self._assert_can_do_op(value)\n if self.hasnans:\n result = self.putmask(self._isnan, value)\n if downcast is None:\n # no need to care metadata other than name\n # because it can't have freq if\n return Index(result, name=self.name)\n return self._shallow_copy()\n\n _index_shared_docs['dropna'] = \"\"\"\n Return Index without NA/NaN values\n\n Parameters\n ----------\n how : {'any', 'all'}, default 'any'\n If the Index is a MultiIndex, drop the value when any or all levels\n are NaN.\n\n Returns\n -------\n valid : Index\n \"\"\"\n\n @Appender(_index_shared_docs['dropna'])\n def dropna(self, how='any'):\n if how not in ('any', 'all'):\n raise ValueError(\"invalid how option: {0}\".format(how))\n\n if self.hasnans:\n return self._shallow_copy(self.values[~self._isnan])\n return self._shallow_copy()\n\n def _evaluate_with_timedelta_like(self, other, op):\n # Timedelta knows how to operate with np.array, so dispatch to that\n # operation and then wrap the results\n other = Timedelta(other)\n values = self.values\n\n with np.errstate(all='ignore'):\n result = op(values, other)\n\n attrs = self._get_attributes_dict()\n attrs = self._maybe_update_attributes(attrs)\n if op == divmod:\n return Index(result[0], **attrs), Index(result[1], **attrs)\n return Index(result, **attrs)\n\n def _evaluate_with_datetime_like(self, other, op):\n raise TypeError(\"can only perform ops with datetime like values\")\n\n def _evaluate_compare(self, other, op):\n raise com.AbstractMethodError(self)\n\n @classmethod\n def _add_comparison_methods(cls):\n \"\"\" add in comparison methods \"\"\"\n cls.__eq__ = _make_comparison_op(operator.eq, cls)\n cls.__ne__ = _make_comparison_op(operator.ne, cls)\n cls.__lt__ = _make_comparison_op(operator.lt, cls)\n cls.__gt__ = _make_comparison_op(operator.gt, cls)\n cls.__le__ = _make_comparison_op(operator.le, cls)\n cls.__ge__ = _make_comparison_op(operator.ge, cls)\n\n @classmethod\n def _add_numeric_methods_add_sub_disabled(cls):\n \"\"\" add in the numeric add/sub methods to disable \"\"\"\n cls.__add__ = make_invalid_op('__add__')\n cls.__radd__ = make_invalid_op('__radd__')\n cls.__iadd__ = make_invalid_op('__iadd__')\n cls.__sub__ = make_invalid_op('__sub__')\n cls.__rsub__ = make_invalid_op('__rsub__')\n cls.__isub__ = make_invalid_op('__isub__')\n\n @classmethod\n def _add_numeric_methods_disabled(cls):\n \"\"\" add in numeric methods to disable other than add/sub \"\"\"\n cls.__pow__ = make_invalid_op('__pow__')\n cls.__rpow__ = make_invalid_op('__rpow__')\n cls.__mul__ = make_invalid_op('__mul__')\n cls.__rmul__ = make_invalid_op('__rmul__')\n cls.__floordiv__ = make_invalid_op('__floordiv__')\n cls.__rfloordiv__ = make_invalid_op('__rfloordiv__')\n cls.__truediv__ = make_invalid_op('__truediv__')\n cls.__rtruediv__ = make_invalid_op('__rtruediv__')\n if not compat.PY3:\n cls.__div__ = make_invalid_op('__div__')\n cls.__rdiv__ = make_invalid_op('__rdiv__')\n cls.__mod__ = make_invalid_op('__mod__')\n cls.__divmod__ = make_invalid_op('__divmod__')\n cls.__neg__ = make_invalid_op('__neg__')\n cls.__pos__ = make_invalid_op('__pos__')\n cls.__abs__ = make_invalid_op('__abs__')\n cls.__inv__ = make_invalid_op('__inv__')\n\n def _maybe_update_attributes(self, attrs):\n \"\"\" Update Index attributes (e.g. freq) depending on op \"\"\"\n return attrs\n\n def _validate_for_numeric_unaryop(self, op, opstr):\n \"\"\" validate if we can perform a numeric unary operation \"\"\"\n\n if not self._is_numeric_dtype:\n raise TypeError(\"cannot evaluate a numeric op \"\n \"{opstr} for type: {typ}\"\n .format(opstr=opstr, typ=type(self).__name__))\n\n def _validate_for_numeric_binop(self, other, op):\n \"\"\"\n return valid other, evaluate or raise TypeError\n if we are not of the appropriate type\n\n internal method called by ops\n \"\"\"\n opstr = '__{opname}__'.format(opname=op.__name__)\n # if we are an inheritor of numeric,\n # but not actually numeric (e.g. DatetimeIndex/PeriodIndex)\n if not self._is_numeric_dtype:\n raise TypeError(\"cannot evaluate a numeric op {opstr} \"\n \"for type: {typ}\"\n .format(opstr=opstr, typ=type(self).__name__))\n\n if isinstance(other, Index):\n if not other._is_numeric_dtype:\n raise TypeError(\"cannot evaluate a numeric op \"\n \"{opstr} with type: {typ}\"\n .format(opstr=opstr, typ=type(other)))\n elif isinstance(other, np.ndarray) and not other.ndim:\n other = other.item()\n\n if isinstance(other, (Index, ABCSeries, np.ndarray)):\n if len(self) != len(other):\n raise ValueError(\"cannot evaluate a numeric op with \"\n \"unequal lengths\")\n other = com._values_from_object(other)\n if other.dtype.kind not in ['f', 'i', 'u']:\n raise TypeError(\"cannot evaluate a numeric op \"\n \"with a non-numeric dtype\")\n elif isinstance(other, (ABCDateOffset, np.timedelta64, timedelta)):\n # higher up to handle\n pass\n elif isinstance(other, (datetime, np.datetime64)):\n # higher up to handle\n pass\n else:\n if not (is_float(other) or is_integer(other)):\n raise TypeError(\"can only perform ops with scalar values\")\n\n return other\n\n @classmethod\n def _add_numeric_methods_binary(cls):\n \"\"\" add in numeric methods \"\"\"\n cls.__add__ = _make_arithmetic_op(operator.add, cls)\n cls.__radd__ = _make_arithmetic_op(ops.radd, cls)\n cls.__sub__ = _make_arithmetic_op(operator.sub, cls)\n cls.__rsub__ = _make_arithmetic_op(ops.rsub, cls)\n cls.__mul__ = _make_arithmetic_op(operator.mul, cls)\n cls.__rmul__ = _make_arithmetic_op(ops.rmul, cls)\n cls.__rpow__ = _make_arithmetic_op(ops.rpow, cls)\n cls.__pow__ = _make_arithmetic_op(operator.pow, cls)\n cls.__mod__ = _make_arithmetic_op(operator.mod, cls)\n cls.__floordiv__ = _make_arithmetic_op(operator.floordiv, cls)\n cls.__rfloordiv__ = _make_arithmetic_op(ops.rfloordiv, cls)\n cls.__truediv__ = _make_arithmetic_op(operator.truediv, cls)\n cls.__rtruediv__ = _make_arithmetic_op(ops.rtruediv, cls)\n if not compat.PY3:\n cls.__div__ = _make_arithmetic_op(operator.div, cls)\n cls.__rdiv__ = _make_arithmetic_op(ops.rdiv, cls)\n\n cls.__divmod__ = _make_arithmetic_op(divmod, cls)\n\n @classmethod\n def _add_numeric_methods_unary(cls):\n \"\"\" add in numeric unary methods \"\"\"\n\n def _make_evaluate_unary(op, opstr):\n\n def _evaluate_numeric_unary(self):\n\n self._validate_for_numeric_unaryop(op, opstr)\n attrs = self._get_attributes_dict()\n attrs = self._maybe_update_attributes(attrs)\n return Index(op(self.values), **attrs)\n\n return _evaluate_numeric_unary\n\n cls.__neg__ = _make_evaluate_unary(operator.neg, '__neg__')\n cls.__pos__ = _make_evaluate_unary(operator.pos, '__pos__')\n cls.__abs__ = _make_evaluate_unary(np.abs, '__abs__')\n cls.__inv__ = _make_evaluate_unary(lambda x: -x, '__inv__')\n\n @classmethod\n def _add_numeric_methods(cls):\n cls._add_numeric_methods_unary()\n cls._add_numeric_methods_binary()\n\n @classmethod\n def _add_logical_methods(cls):\n \"\"\" add in logical methods \"\"\"\n\n _doc = \"\"\"\n %(desc)s\n\n Parameters\n ----------\n *args\n These parameters will be passed to numpy.%(outname)s.\n **kwargs\n These parameters will be passed to numpy.%(outname)s.\n\n Returns\n -------\n %(outname)s : bool or array_like (if axis is specified)\n A single element array_like may be converted to bool.\"\"\"\n\n _index_shared_docs['index_all'] = dedent(\"\"\"\n\n See Also\n --------\n pandas.Index.any : Return whether any element in an Index is True.\n pandas.Series.any : Return whether any element in a Series is True.\n pandas.Series.all : Return whether all elements in a Series are True.\n\n Notes\n -----\n Not a Number (NaN), positive infinity and negative infinity\n evaluate to True because these are not equal to zero.\n\n Examples\n --------\n **all**\n\n True, because nonzero integers are considered True.\n\n >>> pd.Index([1, 2, 3]).all()\n True\n\n False, because ``0`` is considered False.\n\n >>> pd.Index([0, 1, 2]).all()\n False\n\n **any**\n\n True, because ``1`` is considered True.\n\n >>> pd.Index([0, 0, 1]).any()\n True\n\n False, because ``0`` is considered False.\n\n >>> pd.Index([0, 0, 0]).any()\n False\n \"\"\")\n\n _index_shared_docs['index_any'] = dedent(\"\"\"\n\n See Also\n --------\n pandas.Index.all : Return whether all elements are True.\n pandas.Series.all : Return whether all elements are True.\n\n Notes\n -----\n Not a Number (NaN), positive infinity and negative infinity\n evaluate to True because these are not equal to zero.\n\n Examples\n --------\n >>> index = pd.Index([0, 1, 2])\n >>> index.any()\n True\n\n >>> index = pd.Index([0, 0, 0])\n >>> index.any()\n False\n \"\"\")\n\n def _make_logical_function(name, desc, f):\n @Substitution(outname=name, desc=desc)\n @Appender(_index_shared_docs['index_' + name])\n @Appender(_doc)\n def logical_func(self, *args, **kwargs):\n result = f(self.values)\n if (isinstance(result, (np.ndarray, ABCSeries, Index)) and\n result.ndim == 0):\n # return NumPy type\n return result.dtype.type(result.item())\n else: # pragma: no cover\n return result\n\n logical_func.__name__ = name\n return logical_func\n\n cls.all = _make_logical_function('all', 'Return whether all elements '\n 'are True.',\n np.all)\n cls.any = _make_logical_function('any',\n 'Return whether any element is True.',\n np.any)\n\n @classmethod\n def _add_logical_methods_disabled(cls):\n \"\"\" add in logical methods to disable \"\"\"\n cls.all = make_invalid_op('all')\n cls.any = make_invalid_op('any')\n\n\nIndex._add_numeric_methods_disabled()\nIndex._add_logical_methods()\nIndex._add_comparison_methods()\n\n\ndef _ensure_index_from_sequences(sequences, names=None):\n \"\"\"Construct an index from sequences of data.\n\n A single sequence returns an Index. Many sequences returns a\n MultiIndex.\n\n Parameters\n ----------\n sequences : sequence of sequences\n names : sequence of str\n\n Returns\n -------\n index : Index or MultiIndex\n\n Examples\n --------\n >>> _ensure_index_from_sequences([[1, 2, 3]], names=['name'])\n Int64Index([1, 2, 3], dtype='int64', name='name')\n\n >>> _ensure_index_from_sequences([['a', 'a'], ['a', 'b']],\n names=['L1', 'L2'])\n MultiIndex(levels=[['a'], ['a', 'b']],\n labels=[[0, 0], [0, 1]],\n names=['L1', 'L2'])\n\n See Also\n --------\n _ensure_index\n \"\"\"\n from .multi import MultiIndex\n\n if len(sequences) == 1:\n if names is not None:\n names = names[0]\n return Index(sequences[0], name=names)\n else:\n return MultiIndex.from_arrays(sequences, names=names)\n\n\ndef _ensure_index(index_like, copy=False):\n \"\"\"\n Ensure that we have an index from some index-like object\n\n Parameters\n ----------\n index : sequence\n An Index or other sequence\n copy : bool\n\n Returns\n -------\n index : Index or MultiIndex\n\n Examples\n --------\n >>> _ensure_index(['a', 'b'])\n Index(['a', 'b'], dtype='object')\n\n >>> _ensure_index([('a', 'a'), ('b', 'c')])\n Index([('a', 'a'), ('b', 'c')], dtype='object')\n\n >>> _ensure_index([['a', 'a'], ['b', 'c']])\n MultiIndex(levels=[['a'], ['b', 'c']],\n labels=[[0, 0], [0, 1]])\n\n See Also\n --------\n _ensure_index_from_sequences\n \"\"\"\n if isinstance(index_like, Index):\n if copy:\n index_like = index_like.copy()\n return index_like\n if hasattr(index_like, 'name'):\n return Index(index_like, name=index_like.name, copy=copy)\n\n if is_iterator(index_like):\n index_like = list(index_like)\n\n # must check for exactly list here because of strict type\n # check in clean_index_list\n if isinstance(index_like, list):\n if type(index_like) != list:\n index_like = list(index_like)\n\n converted, all_arrays = lib.clean_index_list(index_like)\n\n if len(converted) > 0 and all_arrays:\n from .multi import MultiIndex\n return MultiIndex.from_arrays(converted)\n else:\n index_like = converted\n else:\n # clean_index_list does the equivalent of copying\n # so only need to do this if not list instance\n if copy:\n from copy import copy\n index_like = copy(index_like)\n\n return Index(index_like)\n\n\ndef _ensure_has_len(seq):\n \"\"\"If seq is an iterator, put its values into a list.\"\"\"\n try:\n len(seq)\n except TypeError:\n return list(seq)\n else:\n return seq\n\n\ndef _trim_front(strings):\n \"\"\"\n Trims zeros and decimal points\n \"\"\"\n trimmed = strings\n while len(strings) > 0 and all(x[0] == ' ' for x in trimmed):\n trimmed = [x[1:] for x in trimmed]\n return trimmed\n\n\ndef _validate_join_method(method):\n if method not in ['left', 'right', 'inner', 'outer']:\n raise ValueError('do not recognize join method %s' % method)\n" ]
[ [ "pandas.core.indexes.frozen.FrozenList", "pandas.io.formats.format._get_adjustment", "numpy.asarray", "pandas.core.ops.make_invalid_op", "pandas.io.formats.format.format_array", "pandas.util._decorators.deprecate_kwarg", "pandas.core.dtypes.common.is_unsigned_integer_dtype", "pandas.core.dtypes.common.is_datetime64_any_dtype", "pandas._libs.Timedelta", "pandas.core.indexes.period._new_PeriodIndex", "pandas._libs.lib.infer_dtype", "pandas.core.dtypes.common.is_period_dtype", "numpy.repeat", "pandas.core.dtypes.common.is_scalar", "pandas.util._decorators.Appender", "numpy.errstate", "pandas.io.formats.console.get_console_size", "numpy.array", "pandas.core.dtypes.common.is_dtype_union_equal", "pandas.compat.iteritems", "pandas.core.dtypes.concat._concat_index_asobject", "pandas.core.dtypes.concat._concat_compat", "pandas.core.dtypes.common.is_integer", "numpy.ndarray.__setstate__", "pandas.core.indexes.timedeltas.TimedeltaIndex", "pandas.core.dtypes.common.is_hashable", "pandas._libs.index.get_value_box", "pandas.io.formats.printing.pprint_thing", "numpy.delete", "pandas.core.dtypes.common.is_timedelta64_dtype", "numpy.hstack", "pandas.core.dtypes.missing.isna", "pandas.core.ops._comp_method_OBJECT_ARRAY", "pandas.core.common._index_labels_to_array", "pandas._libs.lib.clean_index_list", "pandas.core.dtypes.common.is_datetime64tz_dtype", "pandas.core.dtypes.common.is_object_dtype", "numpy.concatenate", "pandas.core.dtypes.common.is_integer_dtype", "pandas.core.common.AbstractMethodError", "pandas.core.dtypes.common.is_list_like", "pandas.core.dtypes.common.is_categorical_dtype", "pandas._libs.lib.is_datetime_with_singletz_array", "pandas.core.sorting.safe_sort", "pandas.core.dtypes.common._ensure_object", "pandas.core.dtypes.common._ensure_categorical", "numpy.arange", "numpy.sort", "pandas.core.dtypes.common.is_bool", "pandas.compat.u", "pandas.core.algorithms.take", "pandas.core.dtypes.common._ensure_platform_int", "pandas.core.indexes.period.PeriodIndex", "pandas.core.dtypes.common.is_signed_integer_dtype", "numpy.dtype", "pandas.core.accessor.CachedAccessor", "pandas.core.dtypes.common.is_float_dtype", "pandas.core.common._asarray_tuplesafe", "pandas._libs.lib.maybe_convert_objects", "pandas.compat.numpy.function.validate_repeat", "pandas.core.dtypes.common.is_float", "pandas.core.config.get_option", "pandas.core.dtypes.common.is_iterator", "pandas.core.common._values_from_object", "numpy.where", "pandas.core.dtypes.concat.get_dtype_kinds", "pandas.core.dtypes.common.needs_i8_conversion", "pandas.core.dtypes.common._ensure_int64", "pandas.core.common._not_none", "pandas.compat.set_function_name", "pandas.core.dtypes.common.is_bool_dtype", "pandas.core.dtypes.common.is_interval_dtype", "pandas.core.algorithms.take_nd", "pandas._libs.algos.groupsort_indexer", "pandas.core.common.is_bool_indexer", "pandas.core.algorithms.isin", "pandas.core.indexes.datetimes.DatetimeIndex", "pandas.util._decorators.Substitution", "numpy.empty", "pandas.core.dtypes.common.is_dtype_equal", "pandas.core.reshape.merge._get_join_indexers" ] ]
tkhan11/TextClassification
[ "4a2121c8c396b8c1e453d0d49f5a92194dac53ba" ]
[ "drugReviewLSTM.py" ]
[ "import string\nimport numpy as np\nimport pandas as pd\nfrom numpy import argmax\nfrom numpy import array\nimport re\n\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport seaborn as sns\n\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Flatten, LSTM, Conv1D, MaxPooling1D, Dropout, Activation\nfrom keras.layers.embeddings import Embedding\nfrom keras.utils.np_utils import to_categorical # convert to one-hot-encoding\n\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.stem.snowball import SnowballStemmer\nfrom sklearn.metrics import cohen_kappa_score\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import classification_report\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import OneHotEncoder\nimport time\nimport itertools\nimport warnings\nfrom sklearn.model_selection import StratifiedKFold\n\nseconds= time.time()\ntime_start = time.ctime(seconds) # The time.ctime() function takes seconds passed since epoch\nprint(\"start time:\", time_start,\"\\n\") # as an argument and returns a string representing time.\n\n\n\n# Data Acquisition\n\nTrain_dataset=pd.read_csv('.\\\\drugLibTrain_raw.tsv', sep='\\t')\nTest_dataset=pd.read_csv('.\\\\drugLibTest_raw.tsv', sep='\\t')\n\n#print(\"Train dataset shape :\",Train_dataset.shape, \" Test dataset shape: \",Test_dataset.shape,\"\\n\")\n\n# DATA PREPROCESSING Phase\n\n\n# for Predicting Patient Satisfaction based on rating and all reviews\n\nPrediction_Train_data = pd.DataFrame({'rating':Train_dataset.rating,\n 'benefits_reviews':Train_dataset.benefitsReview,\n 'side_effects_reviews':Train_dataset.sideEffectsReview,\n 'comments':Train_dataset.commentsReview})\n\nPrediction_Test_data = pd.DataFrame({'rating':Test_dataset.rating,\n 'benefits_reviews':Test_dataset.benefitsReview,\n 'side_effects_reviews':Test_dataset.sideEffectsReview,\n 'comments':Test_dataset.commentsReview})\n\n\n# performing concatanation to join benifits_review, side_effects_review and comments into a Report attribute to predict the overall satisfaction of patients\n\nreport=['benefits_reviews','side_effects_reviews','comments']\n\nPrediction_Train_data['report'] = Prediction_Train_data[report].apply(lambda row: '_'.join(row.values.astype(str)), axis=1)\nPrediction_Test_data['report'] = Prediction_Test_data[report].apply(lambda row: '_'.join(row.values.astype(str)), axis=1)\n\n\n\n\n\n# Labeling of ratings as Postive, Negative and Neutral for sentiment classification\nPrediction_Train_data['Sentiment'] = [ 'Negative' if (x<=4) else 'Neutral' if (4<x<=7) else 'Positive' for x in Prediction_Train_data['rating']]\nPrediction_Test_data['Sentiment'] = [ 'Negative' if (x<=4) else 'Neutral' if (4<x<=7) else 'Positive' for x in Prediction_Test_data['rating']]\n\n\n# Dropping the columns that are not required for the neural network.\nPrediction_Train_data.drop(['rating', 'benefits_reviews', 'side_effects_reviews','comments'],axis=1,inplace=True)\nPrediction_Test_data.drop(['rating', 'benefits_reviews', 'side_effects_reviews','comments'],axis=1,inplace=True)\n\nprint(\"\\nTrain dataset: \",Prediction_Train_data.shape,\"Test dataset: \",Prediction_Test_data.shape)\n\n\n\n# Text Pre-Processing on Test and Train data\n\n# filtering out all the rows with empty comments.\nPrediction_Train_data = Prediction_Train_data[Prediction_Train_data.report.apply(lambda x: x !=\"\")]\nPrediction_Test_data = Prediction_Test_data[Prediction_Test_data.report.apply(lambda x: x !=\"\")]\n\n\ndef process_text(report):\n \n # Remove puncuation\n report = report.translate(string.punctuation)\n \n # Convert words to lower case and split them\n report = report.lower().split()\n \n # Remove stop words\n\n #stop_words = set(stopwords.words(\"english\"))\n #report = [w for w in report if not w in stop_words]\n report = \" \".join(report)\n\n # Clean the text\n report = re.sub(r\"[^A-Za-z0-9^,!.\\/'+-=]\", \" \", report)\n report = re.sub(r\",\", \" \", report)\n report = re.sub(r\"\\.\", \" \", report)\n report = re.sub(r\"!\", \" \", report)\n report = re.sub(r\":\", \" \", report)\n\n # Stemming\n report = report.split()\n stemmer = SnowballStemmer('english')\n stemmed_words = [stemmer.stem(word) for word in report]\n report = \" \".join(stemmed_words)\n\n return report\n\n\n# Applying process_text function on Train and Test data for cleaning of text\nPrediction_Train_data['report'] = Prediction_Train_data['report'].map(lambda x: process_text(x))\nPrediction_Test_data['report'] = Prediction_Test_data['report'].map(lambda x: process_text(x))\n\n\n# Splitting data for Training and testing\n\nSentiment_train = Prediction_Train_data['Sentiment']\nReport_train = Prediction_Train_data['report']\n\nSentiment_test = Prediction_Test_data['Sentiment']\nReport_test = Prediction_Test_data['report']\n\n \n# One-Hot Encoding of Sentiment_Train\n\nSentiment_train = array(Sentiment_train)\n\n# integer encode\nlabel_encoder = LabelEncoder()\nSentiment_train_integer_encoded = label_encoder.fit_transform(Sentiment_train)\n\n# binary encode\nonehot_encoder = OneHotEncoder(sparse=False)\nSentiment_train_integer_encoded = Sentiment_train_integer_encoded.reshape(len(Sentiment_train_integer_encoded), 1)\nSentiment_train_onehot_encoded = onehot_encoder.fit_transform(Sentiment_train_integer_encoded)\n\n\n# One-Hot Encoding of Sentiment_Test\n\nSentiment_test = array(Sentiment_test)\n\n# integer encode\nlabel_encoder = LabelEncoder()\nSentiment_test_integer_encoded = label_encoder.fit_transform(Sentiment_test)\n\n# binary encode\nonehot_encoder = OneHotEncoder(sparse=False)\nSentiment_test_integer_encoded = Sentiment_test_integer_encoded.reshape(len(Sentiment_test_integer_encoded), 1)\nSentiment_test_onehot_encoded = onehot_encoder.fit_transform(Sentiment_test_integer_encoded)\n\n\n#print(\"Sentiment_Train shape after one-hot encoding : \",Sentiment_train_onehot_encoded.shape,\" \"\n # ,\"Sentiment_Test shape after one-hot encoding : \",Sentiment_test_onehot_encoded.shape,\"\\n\")\n\n# Tokenize and Create Sequence For Train set\n\ntokenizer = Tokenizer(num_words = 10000)\ntokenizer.fit_on_texts(Report_train)\n\nReport_train_sequences = tokenizer.texts_to_sequences(Report_train)\nReport_train_padded = pad_sequences(Report_train_sequences, maxlen=100, padding='post', truncating='post') # maxlen is the size of words in a review here it is 100\n\n\n# Tokenize and Create Sequence For Test set\n\nReport_test_sequences = tokenizer.texts_to_sequences(Report_test)\nReport_test_padded = pad_sequences(Report_test_sequences, maxlen=100, padding='post', truncating='post')\n\n\nprint(\"Report_Train shape after padding : \",Report_train_padded.shape,\" \",\"Report_Test shape after padding: \",Report_test_padded.shape)\n\nSentiment_labels = ['Negative', 'Neutral', 'Positive'] # 0:Negative 1: Neutral 2:Positive\n\n\n# Defining the LSTM model\n\nmodel = Sequential()\nmodel.add(Embedding(10000, 100, input_length=100))\nmodel.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2))\nmodel.add(Dense(64, activation='sigmoid')) \nmodel.add(Dense(32, activation='sigmoid')) \nmodel.add(Dense(3, activation='softmax')) \n\n# Compile the model\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\nnum_epochs = 10\nbatch_size = 128\n\n# Train the model\nhistory = model.fit(Report_train_padded, Sentiment_train_onehot_encoded,\n validation_split=0.1, batch_size = batch_size, epochs= num_epochs)\n \n\n\nprint(\"\\n\",\"****************MODEL EVALUATION ************************\\n\")\n\n\n\n\n# Model Evaluation on Test data\n\ntest_loss,test_acc = model.evaluate(Report_test_padded, Sentiment_test_onehot_encoded)\n\nprint(\"\\n Evaluated model accuracy on test data :\",test_acc)\n\nseconds= time.time()\ntime_stop = time.ctime(seconds)\nprint(\"\\n\",\"stop time:\", time_stop,\"\\n\")\n\n\n\n\n# Predict the values from the Test dataset\nSentiment_pred = model.predict(Report_test_padded)\n# Convert predictions classes to one hot vectors \nSentiment_pred_classes = np.argmax(Sentiment_pred,axis = 1) \n# computing the confusion matrix\nconfusion_mtx = confusion_matrix(Sentiment_test_integer_encoded, Sentiment_pred_classes) \n\n\n#Printing Classification Report\n\nprint(classification_report(Sentiment_test_integer_encoded, Sentiment_pred_classes, target_names = Sentiment_labels))\n\naccuracy = accuracy_score(Sentiment_test_integer_encoded, Sentiment_pred_classes)\nprint('Accuracy: %f' % accuracy)\n\n\ncohen_score = cohen_kappa_score(Sentiment_test_integer_encoded, Sentiment_pred_classes)\nprint('Cohen_score: %f' % cohen_score)\n\n\n# Training and validation curves\n\n# Plot the loss and accuracy curves for training and validation \nfig, ax = plt.subplots(2,1)\nax[0].plot(history.history['loss'], color='b', label=\"Training loss\")\nax[0].plot(history.history['val_loss'], color='r', label=\"validation loss\",axes =ax[0])\nlegend = ax[0].legend(loc='best', shadow=True)\n\nax[1].plot(history.history['accuracy'], color='b', label=\"Training accuracy\")\nax[1].plot(history.history['val_accuracy'], color='r',label=\"Validation accuracy\")\nlegend = ax[1].legend(loc='best', shadow=True)\n\nplt.show()\n\n\n\n# Defining function for plotting confusion matrix \n\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.show()\n\n\n# plot the confusion matrix\nplot_confusion_matrix(confusion_mtx, classes = range(3)) \n\n\n\n" ]
[ [ "sklearn.metrics.classification_report", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.imshow", "matplotlib.pyplot.ylabel", "sklearn.preprocessing.LabelEncoder", "matplotlib.pyplot.xticks", "matplotlib.pyplot.title", "matplotlib.pyplot.text", "sklearn.preprocessing.OneHotEncoder", "pandas.read_csv", "numpy.argmax", "matplotlib.pyplot.subplots", "sklearn.metrics.confusion_matrix", "sklearn.metrics.accuracy_score", "matplotlib.pyplot.colorbar", "sklearn.metrics.cohen_kappa_score", "pandas.DataFrame", "matplotlib.pyplot.show", "numpy.array", "matplotlib.pyplot.yticks", "matplotlib.pyplot.xlabel" ] ]
stevehamwu/EmotionCauseExtraction
[ "b5a160f35f7b03bf3730b6885096dbc5f958df8b" ]
[ "metrics/ec/ec.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/2/22 17:21\n# @Author : Steve Wu\n# @Site : \n# @File : ec.py\n# @Software: PyCharm\n# @Github : https://github.com/stevehamwu\nimport itertools\nimport numpy as np\nfrom metrics.metrics import Metrics\nfrom utils.app.log import Logger\nfrom sklearn.metrics import roc_auc_score\n\n\nclass ECMetrics(Metrics):\n\n def __call__(self, pred_labels, true_labels, probs):\n \"\"\"\n Args:\n pred_labels: (bat, n(s)), 0-3\n true_labels: (bat, n(s)), 0-3\n \"\"\"\n if type(pred_labels[0]) != int:\n pred_labels = list(itertools.chain.from_iterable(pred_labels))\n true_labels = list(itertools.chain.from_iterable(true_labels))\n tp, tn, fp, fn = 0, 0, 0, 0\n all_pred, all_true, all_probs = [], [], []\n for i in range(len(pred_labels)):\n if true_labels[i] == self.config['ignore_index']:\n continue\n if pred_labels[i] == true_labels[i]:\n if true_labels[i] == 0:\n tn += 1\n else:\n tp += 1\n else:\n if true_labels[i] == 0:\n fp += 1\n else:\n fn += 1\n all_pred.append(pred_labels[i])\n all_true.append(true_labels[i])\n all_probs.append(probs[i])\n acc = (tp + tn) / (tp + tn + fp + fn)\n pre = tp / (tp + fp) if (tp + fp) > 0 else 0\n rec = tp / (tp + fn) if (tp + fn) > 0 else 0\n f1 = 2 * pre * rec / (pre + rec) if (pre + rec) > 0 else 0\n auc = roc_auc_score(all_true, all_probs) if sum(all_true) > 0 else 0.\n return tp + tn + fp + fn, acc, pre, rec, f1, auc\n" ]
[ [ "sklearn.metrics.roc_auc_score" ] ]
stefanoteso/pystruct
[ "3bafeab13eea82fa6f3077686b951bf76d810a8d" ]
[ "examples/plot_latent_crf.py" ]
[ "\"\"\"\n===================\nLatent Dynamics CRF\n===================\n\nSolving a 2d grid problem by introducing latent variable interactions.\nThe input data is the same as in plot_grid_crf, a cross pattern.\nBut now, the center is not given an extra state. That makes the problem\nmuch harder to solve for a pairwise model.\nWe can still solve it by introducing latent dynamics. In essence we allow\nan additional state with different interactions, that maps to the same\nstate (the cross) in the ground truth.\n\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.cross_validation import train_test_split\n\nfrom pystruct.models import LatentGridCRF\nfrom pystruct.learners import LatentSSVM, OneSlackSSVM\n\nfrom pystruct.datasets import generate_crosses\n\n\nX, Y = generate_crosses(n_samples=20, noise=5, n_crosses=1, total_size=8)\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=.5,\n force_arrays=False)\n\ncrf = LatentGridCRF(n_states_per_label=[1, 2])\nbase_ssvm = OneSlackSSVM(model=crf, C=10., n_jobs=-1, inference_cache=20,\n tol=.1)\nclf = LatentSSVM(base_ssvm=base_ssvm)\nclf.fit(X_train, Y_train)\nprint(\"Score training set: %f\" % clf.score(X_train, Y_train))\nprint(\"Score test set: %f\" % clf.score(X_test, Y_test))\n\nY_pred = clf.predict(X_test)\n\nx, y, y_pred = X_test[1], Y_test[1], Y_pred[1]\nfig, ax = plt.subplots(3, 2)\nax[0, 0].matshow(y, vmin=0, vmax=crf.n_labels - 1)\nax[0, 0].set_title(\"ground truth\")\nax[0, 1].matshow(np.argmax(x, axis=-1),\n vmin=0, vmax=crf.n_labels - 1)\nax[0, 1].set_title(\"unaries only\")\nax[1, 0].set_visible(False)\nax[1, 1].matshow(crf.latent(x, y, clf.w),\n vmin=0, vmax=crf.n_states - 1)\nax[1, 1].set_title(\"latent final\")\nax[2, 0].matshow(crf.inference(x, clf.w),\n vmin=0, vmax=crf.n_states - 1)\nax[2, 0].set_title(\"prediction latent\")\nax[2, 1].matshow(y_pred,\n vmin=0, vmax=crf.n_labels - 1)\nax[2, 1].set_title(\"prediction\")\nfor a in ax.ravel():\n a.set_xticks(())\n a.set_yticks(())\n\nplt.show()\n" ]
[ [ "numpy.argmax", "matplotlib.pyplot.show", "matplotlib.pyplot.subplots", "sklearn.cross_validation.train_test_split" ] ]
lambertaurelle/dfcx-scrapi
[ "bff29c498f0d54701d651c2f99bcd51d4da5613c" ]
[ "src/dfcx_scrapi/tools/validation_util.py" ]
[ "\"\"\"Working with built in CX Validation featrure.\"\"\"\n\n# Copyright 2022 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport re\nfrom typing import Dict\nimport pandas as pd\n\nfrom dfcx_scrapi.core.scrapi_base import ScrapiBase\nfrom dfcx_scrapi.core.agents import Agents\nfrom dfcx_scrapi.core.flows import Flows\n\nSCOPES = [\n \"https://www.googleapis.com/auth/cloud-platform\",\n \"https://www.googleapis.com/auth/dialogflow\",\n]\n\n\nclass ValidationUtil(ScrapiBase):\n \"\"\"Class that extends the built in CX Validation feature.\"\"\"\n\n def __init__(\n self,\n creds_path: str = None,\n creds_dict: Dict = None,\n creds=None,\n scope=False,\n ):\n super().__init__(\n creds_path=creds_path,\n creds_dict=creds_dict,\n creds=creds,\n scope=scope,\n )\n\n self.agents = Agents(creds_path=creds_path, creds_dict=creds_dict)\n self.flows = Flows(creds_path=creds_path, creds_dict=creds_dict)\n\n def validation_results_to_dataframe(self, validation_results: Dict):\n \"\"\" \"Transform the Validation results into a dataframe.\n Note will not work if you call get_validation_result with a\n flow_id specified. For calling validate ensure lro is complete\n Args:\n validation_results: dictionary of validation results\n passed back from get_validation_result or validate functions\n\n Return:\n df: dataframe containing the validation results\n \"\"\"\n\n agent_id = \"/\".join(validation_results[\"name\"].split(\"/\")[0:6])\n\n flows_map = self.flows.get_flows_map(agent_id)\n max_cols_old = 0\n dataframe = pd.DataFrame()\n\n for flow in validation_results[\"flowValidationResults\"]:\n\n temp = \"/\".join(flow[\"name\"].split(\"/\")[:-1])\n val_msg = flow.get(\"validationMessages\", {})\n if bool(val_msg):\n temp_df = pd.DataFrame(val_msg)\n temp_df.insert(0, \"flow\", flows_map[temp])\n\n max_cols_new = max([len(x) for x in temp_df.resourceNames])\n\n if max_cols_new > max_cols_old:\n for i in range(1, max_cols_new + 1):\n temp_df[f\"resource{i}\"] = None\n max_cols_old = max_cols_new\n\n for index in temp_df.index:\n i = 1\n for frame in temp_df[\"resourceNames\"][index]:\n temp_df[f\"resource{i}\"][index] = frame[\n \"displayName\"\n ]\n i += 1\n\n dataframe = dataframe.append(temp_df)\n max_cols_old = 0\n\n return dataframe\n\n def intent_disambiguation(self, agent_id, refresh=False, flow=None):\n \"\"\"Obtains the intent disambiguation tasks from the validation tool\n Args:\n refresh: (optional) False means validation results are pulled\n as is. True means the validation tool is refreshed then\n results are pulled\n flow: (optional) If specified results are returned\n for the indicated flow display name\n\n\n Returns:\n Dictionary of intent disambiguation Validation results\n in two dataframes.\n extended: All intent disambiguation validtion results as\n seperate instances. If 5 training phrases conflict\n in 5 intents they will be shown as 5 rows.\n compact: Only showing the first instance of a conflict\n for each grouping. If 5 trainig phrases conflic in 5 intents\n only the first training phrase will show.\n \"\"\"\n\n if refresh:\n validation = self.agents.validate_agent(agent_id)\n else:\n validation = self.agents.get_validation_result(agent_id=agent_id)\n\n validation_df = self.validation_results_to_dataframe(validation)\n if flow:\n validation_df = validation_df[validation_df[\"flow\"] == flow]\n\n # Parse df\n resources = validation_df.columns\n resources = [r for r in resources if \"resource\" in r]\n validation_df = validation_df[[\"flow\", \"detail\"] + resources]\n\n disambig_id, intents_list, tp_list, id_ = [], [], [], 0\n flows = []\n phrase = \"Multiple intents share training phrases which are too similar\"\n for _, row in validation_df.iterrows():\n deets, flow = row[\"detail\"], row[\"flow\"]\n if bool(re.search(phrase, deets)):\n intents = re.findall(\"Intent '(.*)': training phrase \", deets)\n training_phrases = re.findall(\"training phrase '(.*)'\", deets)\n intents_list = intents_list + intents\n tp_list = tp_list + training_phrases\n disambig_id = disambig_id + ([id_] * len(training_phrases))\n flows = flows + ([flow] * len(training_phrases))\n id_ += 1\n\n\n\n extraction = pd.DataFrame()\n extraction[\"disambig_id\"] = disambig_id\n extraction.insert(0, \"flow\", flows)\n extraction[\"intent\"] = intents_list\n extraction[\"training_phrase\"] = tp_list\n\n if extraction.empty:\n logging.info(\n \"Validation results do not contain clashing intent phrases.\")\n return None\n\n intent_options = (\n extraction.groupby([\"disambig_id\"])[\"intent\"]\n .apply(list)\n .reset_index()\n .rename(columns={\"intent\": \"intents\"})\n )\n intent_options[\"intents\"] = intent_options.apply(\n lambda x: list(set(x[\"intents\"])), axis=1\n )\n\n extraction = pd.merge(\n extraction, intent_options, on=[\"disambig_id\"], how=\"left\"\n )\n\n internal = extraction.copy()\n\n internal[\"intent_count\"] = internal.apply(\n lambda x: len(x[\"intents\"]), axis=1\n )\n external = (\n extraction.groupby([\"flow\", \"disambig_id\"])\n .agg(\n {\n \"training_phrase\": \"first\",\n \"intents\": \"first\",\n \"intent\": \"count\",\n }\n )\n .reset_index()\n .rename(columns={\"intent\": \"conflicting_tp_count\"})\n )\n external[\"intent_count\"] = external.apply(\n lambda x: len(x[\"intents\"]), axis=1\n )\n\n return {\"extended\": internal, \"compact\": external}\n" ]
[ [ "pandas.DataFrame", "pandas.merge" ] ]
lblogan14/rbcodes
[ "36c5c9e608c539ff1c0ae22468ab4a05d4fc29bd" ]
[ "GUIs/gui_dev/Spec_Inspect.py" ]
[ "import matplotlib\nmatplotlib.use('Qt5Agg')\nfrom astropy.io import fits\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom copy import deepcopy\nfrom astropy.convolution import convolve, Box1DKernel\nimport pdb\nimport sys\nimport os\n\n\nclass Spec_Inspect(object):\n\n def __init__(self,two_d_spec):\n self.two_d_spec=two_d_spec\n self.active_two_d_spec=two_d_spec\n fig, ax = plt.subplots(2, 1, sharex=True)\n self.fig=fig\n self.ax=ax\n self.pix=np.array([1.])\n #Sum in dispersion direction to do initial selection\n temp=np.sum(two_d_spec,axis=1)\n temp_cumsum=np.cumsum(temp)/np.sum(temp)\n xlist=np.arange(0,len(temp_cumsum),1)\n self.active_1d_spec=extract_1_d(self.active_two_d_spec)\n\n self.extration_y=[int(np.interp(0.05, temp_cumsum,xlist)), int(np.interp(0.95, temp_cumsum,xlist))]\n self.temp_extraction_y=[]\n print(self.extration_y)\n self.active_1d_spec=extract_1_d(self.active_two_d_spec[self.extration_y[0]:self.extration_y[1],:])\n self.master_plotter()\n\n \n\n # Connect the different functions to the different events\n self.fig.canvas.mpl_connect('key_press_event', self.ontype)\n #plt.gcf().canvas.mpl_connect('button_press_event',self.onclick)\n #plt.gcf().canvas.mpl_connect('pick_event',self.onpick)\n plt.show() # show the window\n\n\n def master_plotter(self,one_d_only=False):\n\n self.ax[1].cla()\n \n \n if one_d_only==False:\n self.ax[0].cla()\n im = self.ax[0].imshow(self.two_d_spec,origin = 'lower', vmin = -10, vmax = 65)\n xlim=self.ax[0].get_xlim()\n #self.fig.colorbar(im, ax=self.ax[0], label='Interactive colorbar',location='top')\n self.ax[0].hlines(self.extration_y[0],xlim[0],xlim[1],colors='r', linestyles='dashed',label='ext_pt_min')\n self.ax[0].hlines(self.extration_y[1],xlim[0],xlim[1],colors='r', linestyles='dashed',label='ext_pt_min')\n\n\n sp=self.ax[1].plot(self.active_1d_spec)\n\n self.ax[0].set_aspect('auto')\n\n\n\n\n def ontype(self,event): \n\n if event.key=='c':\n #Figure out the min max of extraction box\n vline=self.ax[0].plot(event.xdata,event.ydata,'r+')\n plt.draw()\n\n self.temp_extraction_y=np.append(self.temp_extraction_y,event.ydata)\n\n if len(self.temp_extraction_y)==2:\n #First remove previous extraction window lines HOW?\n\n while self.ax[0].collections:\n self.ax[0].collections.pop()\n \n\n ext_min_y=int(np.round(min(self.temp_extraction_y)))\n ext_max_y=int(np.round(max(self.temp_extraction_y)))\n xlim=self.ax[0].get_xlim()\n self.ax[0].hlines(ext_min_y,xlim[0],xlim[1],colors='r', linestyles='dashed',label='ext_pt_min')\n self.ax[0].hlines(ext_max_y,xlim[0],xlim[1],colors='r', linestyles='dashed',label='ext_pt_max')\n self.active_two_d_spec=self.two_d_spec[ext_min_y:ext_max_y,:]\n self.active_1d_spec=extract_1_d(self.active_two_d_spec)\n self.master_plotter(one_d_only=True)\n \n self.temp_extraction_y=[]\n plt.draw()\n\n #Reset Everything\n elif event.key=='r':\n self.active_two_d_spec=self.two_d_spec\n self.active_1d_spec=extract_1_d(self.active_two_d_spec)\n self.master_plotter()\n plt.draw()\n\n # Set top y max\n elif event.key=='t':\n xlim=self.ax[1].get_xlim()\n ylim=self.ax[1].get_ylim()\n self.ax[1].set_ylim([ylim[0],event.ydata])\n self.ax[1].set_xlim(xlim)\n plt.draw()\n # Set top y min\n elif event.key=='b':\n xlim=self.ax[1].get_xlim()\n ylim=self.ax[1].get_ylim()\n self.ax[1].set_ylim([event.ydata,ylim[1]])\n self.ax[1].set_xlim(xlim)\n plt.draw()\n\n\n # Smooth spectrum\n elif event.key=='S':\n self.pix[0] += 2\n Filter_size=np.int(self.pix[0]) \n xlim=self.ax[1].get_xlim()\n ylim=self.ax[1].get_ylim()\n self.active_1d_spec =convolve(extract_1_d(self.active_two_d_spec), Box1DKernel(Filter_size))#medfilt(flux,np.int(Filter_size))\n self.master_plotter(one_d_only=True)\n self.ax[1].set_ylim(ylim)\n self.ax[1].set_xlim(xlim)\n\n plt.draw()\n #Unsmooth Spectrum\n elif event.key=='U':\n self.pix[0] -= 2\n if self.pix[0] <= 0:\n self.pix[0]=1;\n Filter_size=np.int(self.pix[0]) \n xlim=self.ax[1].get_xlim()\n ylim=self.ax[1].get_ylim()\n\n self.active_1d_spec =convolve(extract_1_d(self.active_two_d_spec), Box1DKernel(Filter_size))#medfilt(flux,np.int(Filter_size))\n self.master_plotter(one_d_only=True)\n self.ax[1].set_ylim(ylim)\n self.ax[1].set_xlim(xlim)\n\n plt.draw()\n\n # Set X max\n elif event.key=='X':\n xlim=self.ax[1].get_xlim()\n ylim=self.ax[1].get_ylim()\n self.ax[1].set_xlim([xlim[0],event.xdata])\n self.ax[1].set_ylim(ylim)\n plt.draw()\n # Set x min\n elif event.key=='x':\n xlim=self.ax[1].get_xlim()\n ylim=self.ax[1].get_ylim()\n self.ax[1].set_xlim([event.xdata,xlim[1]])\n self.ax[1].set_ylim(ylim)\n plt.draw()\n\n # Set pan spectrum\n elif event.key==']':\n xlim=self.ax[1].get_xlim()\n ylim=self.ax[1].get_ylim()\n delx=(xlim[1]-xlim[0])\n self.ax[1].set_xlim([xlim[1],xlim[1]+delx])\n self.ax[1].set_ylim(ylim)\n plt.draw()\n\n # Set pan spectrum\n elif event.key=='[':\n xlim=self.ax[1].get_xlim()\n ylim=self.ax[1].get_ylim()\n delx=(xlim[1]-xlim[0])\n self.ax[1].set_xlim([xlim[0]-delx,xlim[0]])\n self.ax[1].set_ylim(ylim)\n plt.draw()\n\n \n\ndef extract_1_d(input_2d_spec):\n return np.sum(input_2d_spec,axis=0)\n\n\n \n" ]
[ [ "numpy.sum", "numpy.cumsum", "matplotlib.pyplot.draw", "numpy.append", "numpy.interp", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show", "matplotlib.use", "numpy.array", "numpy.int" ] ]
FiorenSt/AutoSourceID-Light
[ "6d1f4bf0a735c3de80bdba60d597c7d6dcfba4e3" ]
[ "LaplacianOfGaussian.py" ]
[ "import numpy as np\nfrom skimage.feature import blob_log\nfrom joblib import Parallel, delayed\n\n\n\n###############################\n# Loop for parallelization #\n###############################\n\ndef task(image, i, n_patches_x, n_patches_y):\n\n ###LoG step\n blobs_log = blob_log(image, min_sigma=1.43, max_sigma=1.43, num_sigma=1, threshold=.2,\n exclude_border=False, overlap=0.8)\n\n ###from patch coordinates to full image coordinates\n x_idx = (i % n_patches_x) * 256\n y_idx = int(i / n_patches_y) * 256\n x_coord = x_idx + blobs_log[:, 1]\n y_coord = y_idx + blobs_log[:, 0]\n return np.column_stack((x_coord,y_coord))\n\n\n######################\n# Parallelization #\n######################\n\ndef joblib_loop(pred,n_patches_x,n_patches_y,CPUs=1):\n return Parallel(n_jobs=CPUs)(delayed(task)(pred[i,:,:,0],i,n_patches_x,n_patches_y) for i in range(0,pred.shape[0]))\n\n\n\n\n" ]
[ [ "numpy.column_stack" ] ]
SatoshiShimada/intermittent_crawl_gait
[ "c7f0582c72f36df2f5d1753bdc789226c6d91056" ]
[ "test.py" ]
[ "import numpy as np\n\nfrom geometry import *\nimport plot\n\ndef in_circle(circle, p):\n x = p.x - circle.center.x\n y = p.y - circle.center.y\n if np.sqrt(x ** 2 + y ** 2) <= circle.r:\n return True\n else:\n return False\n\ndef in_ellipse(ellipse, p):\n f1 = Point(np.sqrt(ellipse.a ** 2 - ellipse.b ** 2), 0)\n f2 = Point(-np.sqrt(ellipse.a ** 2 - ellipse.b ** 2), 0)\n pp = p.x - ellipse.center.x\n qq = p.y - ellipse.center.y\n p = Point(pp, qq)\n th = distance(f1, p) + distance(f2, p)\n if th <= ellipse.a * 2:\n return True\n else:\n return False\n P = (p.x ** 2 / ellipse.a ** 2) + (p.y ** 2 / ellipse.b ** 2) - 1.0\n if P > 0:\n return False\n else:\n return True\n\ndef inside(ellipse, ls, pos, orig):\n p1, p2 = intersectionPointEllipseLine(ellipse, ls)\n for p in generate_q(ls, p1, p2):\n pp1 = Point(p.x + pos[0], p.y + pos[1])\n pp2 = Point(p.x + pos[2], p.y + pos[3])\n if in_ellipse(ellipse, pp1) and in_ellipse(ellipse, pp2):\n R = pp2\n if orig == orig_U:\n U = p\n Q = pp1\n elif orig == orig_Q:\n Q = p\n U = pp1\n return True, V(Q, R, U)\n return False, 0\n\n\ndef inside_old(circle, ls, pos, orig):\n p1, p2 = intersectionPointCircleLine(circle, ls)\n for p in generate_q(ls, p1, p2):\n pp1 = Point(p.x + pos[0], p.y + pos[1])\n pp2 = Point(p.x + pos[2], p.y + pos[3])\n if in_circle(circle, pp1) and in_circle(circle, pp2):\n R = pp2\n if orig == orig_U:\n U = p\n Q = pp1\n elif orig == orig_Q:\n Q = p\n U = pp1\n return True, V(Q, R, U)\n return False, 0\n\ndef drift(line, dist):\n dline = Line(line.a, line.b, line.c - dist * line.b)\n return dline\n\ndef generate_q(line, p1, p2):\n step = 1.0\n return [ Point(x, line(x)) for x in np.arange(min(p1.x, p2.x), max(p1.x, p2.x), step) ]\n\ndef calc_u(ellipse, p):\n pp = ellipse.center.x\n qq = ellipse.center.y\n d = ellipse.a ** 2 * (1.0 - (p.y - qq) ** 2 / ellipse.b ** 2)\n if d < 0:\n print(\"Error, x: {}, y: {}, p.x: {}, p.y: {}, b: {}\".format(pp, qq, p.x, p.y, ellipse.b))\n return []\n x0 = pp + np.sqrt(d)\n x1 = pp - np.sqrt(d)\n return Point(x0, p.y), Point(x1, p.y)\n\ndef calc_u_odl(circle, p):\n d = circle.center.y - p.y\n x0 = np.sqrt(circle.r ** 2 - d ** 2) + circle.center.x\n x1 = -np.sqrt(circle.r ** 2 - d ** 2) + circle.center.x\n y = -d + circle.center.y\n return Point(x0, y), Point(x1, y)\n\ndef touch(ellipse, line):\n return intersectionPointEllipseLine(ellipse, line)\n\ndef touch_old(circle, line):\n d = distanceLinePoint(line, circle.center)\n if d > circle.r:\n return False\n else:\n return True\n\ndef main():\n Sne = 50\n leg_x = 190\n leg_y = 140\n leg_radius = 130\n\n center = Point(0, 0)\n circle = Circle(center, Sne)\n\n center_f = Point(leg_x, leg_y)\n center_r = Point(-leg_x, leg_y)\n center_ff = Point(leg_x, -leg_y)\n center_rr = Point(-leg_x, -leg_y)\n\n a = 100\n b = 60\n ellipse_f = Ellipse(center_f, a, b)\n ellipse_r = Ellipse(center_r, a, b)\n ellipse_ff = Ellipse(center_ff, a, b)\n ellipse_rr = Ellipse(center_rr, a, b)\n\n rx = leg_x * 2 + leg_radius\n ry = leg_y * 2 + leg_radius\n fig = plot.Figure(((-rx, rx), (-ry, ry)))\n\n fig.drawCircle(circle, color='black')\n fig.drawEllipse(ellipse_f, color='black')\n fig.drawEllipse(ellipse_r, color='black')\n fig.drawEllipse(ellipse_ff, color='black')\n fig.drawEllipse(ellipse_rr, color='black')\n\n # draw robot\n fig.drawLineP(Point(leg_x, leg_y), Point(-leg_x, leg_y), color='orange')\n fig.drawLineP(Point(-leg_x, leg_y), Point(-leg_x, -leg_y), color='orange')\n fig.drawLineP(Point(-leg_x, -leg_y), Point(leg_x, -leg_y), color='orange')\n fig.drawLineP(Point(leg_x, -leg_y), Point(leg_x, leg_y), color='orange')\n\n EPPSL1, EPPSL2 = [], []\n for angle in np.arange(0, 180, 3.0): # from 0 to 180 degree by step 3.0 degree\n if angle == 0.0:\n continue\n theta1 = radian(angle)\n theta2 = radian(angle) + np.pi\n tangent_line1 = tangent(circle, theta1)\n tangent_line2 = tangent(circle, theta2)\n if touch(ellipse_f, tangent_line1) and touch(ellipse_rr, tangent_line1) and touch(ellipse_f, tangent_line2) and touch(ellipse_rr, tangent_line2):\n ls_r = tangent_line1\n ls_ff = tangent_line2\n EPPSL1.append((ls_r, ls_ff))\n if touch(ellipse_r, tangent_line1) and touch(ellipse_ff, tangent_line1) and touch(ellipse_r, tangent_line2) and touch(ellipse_ff, tangent_line2):\n ls_f = tangent_line1\n ls_rr = tangent_line2\n EPPSL2.append((ls_f, ls_rr))\n\n common_leg_V = None\n common_leg_len = 0.0\n for ls_r, ls_ff in EPPSL1:\n for ls_f, ls_rr in EPPSL2:\n # f leg\n best_Qf, best_Uf, best_Rf = None, None, None\n best_dist = 0.0\n ret = intersectionPointEllipseLine(ellipse_f, ls_r)\n if len(ret) == 2:\n p1, p2 = ret\n for Q in generate_q(ls_r, p1, p2):\n U = calc_u(ellipse_f, Q)[0]\n dist = distance(Q, U)\n if dist > best_dist:\n best_Qf = Q\n best_Uf = U\n best_dist = dist\n dist = best_Uf.y - ls_f(best_Uf.x)\n l = drift(ls_rr, dist)\n best_Rf = intersectionPointLineLine(l, ls_ff)\n Vf = V(best_Qf, best_Rf, best_Uf)\n # rr leg\n best_Qrr, best_Urr, best_Rrr = None, None, None\n best_dist = 0.0\n ret = intersectionPointEllipseLine(ellipse_rr, ls_ff)\n if len(ret) == 2:\n p1, p2 = ret\n for Q in generate_q(ls_ff, p1, p2):\n U = calc_u(ellipse_rr, Q)[1]\n dist = distance(Q, U)\n if dist > best_dist:\n best_Qrr = Q\n best_Urr = U\n best_dist = dist\n dist = best_Urr.y - ls_rr(best_Urr.x)\n l = drift(ls_f, dist)\n best_Rrr = intersectionPointLineLine(l, ls_r)\n Vrr = V(best_Qrr, best_Rrr, best_Urr)\n # ff leg\n best_Qff, best_Uff, best_Rff = None, None, None\n best_dist = 0.0\n ret = intersectionPointEllipseLine(ellipse_ff, ls_rr)\n if len(ret) == 2:\n p1, p2 = ret\n for U in generate_q(ls_rr, p1, p2):\n Q = calc_u(ellipse_ff, U)[0]\n dist = distance(Q, U)\n if dist > best_dist:\n best_Qff = Q\n best_Uff = U\n best_dist = dist\n dist = best_Qff.y - ls_ff(best_Qff.x)\n l = drift(ls_r, dist)\n best_Rff = intersectionPointLineLine(l, ls_f)\n Vff = V(best_Qff, best_Rff, best_Uff)\n # r leg\n best_Qr, best_Ur, best_Rr = None, None, None\n best_dist = 0.0\n ret = intersectionPointEllipseLine(ellipse_r, ls_f)\n if len(ret) == 2:\n p1, p2 = ret\n for U in generate_q(ls_f, p1, p2):\n Q = calc_u(ellipse_r, U)[1]\n dist = distance(Q, U)\n if dist > best_dist:\n best_Qr = Q\n best_Ur = U\n best_dist = dist\n dist = best_Qr.y - ls_r(best_Qr.x)\n l = drift(ls_ff, dist)\n best_Rr = intersectionPointLineLine(l, ls_rr)\n Vr = V(best_Qr, best_Rr, best_Ur)\n # common leg trajectry\n len_f = Vf.get_length()\n len_rr = Vrr.get_length()\n len_ff = Vff.get_length()\n len_r = Vr.get_length()\n best_len = min(len_f, len_rr, len_ff, len_r)\n if best_len == len_f:\n ref = reflect(Vf, orig_U, reflection=False)\n in_r, _Vr = inside(ellipse_r, ls_f, ref, orig_U)\n ref = reflect(Vf, orig_U)\n in_ff, _Vff = inside(ellipse_ff, ls_rr, ref, orig_U)\n ref = reflect(Vf, orig_Q)\n in_rr, _Vrr = inside(ellipse_rr, ls_ff, ref, orig_Q)\n if in_r and in_ff and in_rr:\n if best_len > common_leg_len:\n common_leg_V = (_Vf, _Vr, _Vff, Vrr, ls_f, ls_r, ls_ff, ls_rr)\n common_leg_len = best_len\n elif best_len == len_ff:\n ref = reflect(Vf, orig_Q)\n in_f, _Vf = inside(ellipse_f, ls_r, ref, orig_Q)\n ref = reflect(Vf, orig_U)\n in_r, _Vr = inside(ellipse_r, ls_f, ref, orig_U)\n ref = reflect(Vf, orig_Q, reflection=False)\n in_rr, _Vrr = inside(ellipse_rr, ls_ff, ref, orig_Q)\n if in_f and in_r and in_rr:\n if best_len > common_leg_len:\n common_leg_V = (_Vf, _Vr, Vff, _Vrr, ls_f, ls_r, ls_ff, ls_rr)\n common_leg_len = best_len\n elif best_len == len_rr:\n ref = reflect(Vf, orig_Q)\n in_f, _Vf = inside(ellipse_f, ls_r, ref, orig_Q)\n ref = reflect(Vf, orig_U, reflection=False)\n in_ff, _Vr = inside(ellipse_ff, ls_rr, ref, orig_U)\n ref = reflect(Vf, orig_Q)\n in_r, _Vrr = inside(ellipse_r, ls_f, ref, orig_Q)\n if in_f and in_r and in_ff:\n if best_len > common_leg_len:\n common_leg_V = (_Vf, _Vr, _Vff, Vrr, ls_f, ls_r, ls_ff, ls_rr)\n common_leg_len = best_len\n else:\n ref = reflect(Vr, orig_Q, reflection=False)\n in_f, _Vf = inside(ellipse_f, ls_r, ref, orig_Q)\n ref = reflect(Vr, orig_U)\n in_ff, _Vff = inside(ellipse_ff, ls_rr, ref, orig_U)\n ref = reflect(Vr, orig_Q)\n in_rr, _Vrr = inside(ellipse_rr, ls_ff, ref, orig_Q)\n if in_f and in_ff and in_rr:\n if best_len > common_leg_len:\n common_leg_V = (_Vf, Vr, _Vff, _Vrr, ls_f, ls_r, ls_ff, ls_rr)\n common_leg_len = best_len\n\n if not common_leg_V:\n print(\"No leg tragectry generated!\")\n fig.show()\n return\n\n Vf, Vr, Vff, Vrr, ls_f, ls_r, ls_ff, ls_rr = common_leg_V\n print(Vf.dump())\n print(Vr.dump())\n print(Vff.dump())\n print(Vrr.dump())\n\n fig.drawV(Vf)\n fig.drawV(Vr)\n fig.drawV(Vff)\n fig.drawV(Vrr)\n fig.drawLine(ls_f, color='green')\n fig.drawLine(ls_r, color='green')\n fig.drawLine(ls_ff, color='green')\n fig.drawLine(ls_rr, color='green')\n\n fig.set_title(\"Sne: {}\".format(Sne))\n fig.show()\n\nif __name__ == '__main__':\n main()\n\n" ]
[ [ "numpy.arange", "numpy.sqrt" ] ]
pyGSTi-Developers/pyGSTi
[ "bfedc1de4d604f14b0f958615776fb80ddb59e33" ]
[ "test/unit/tools/test_optools.py" ]
[ "import functools\nfrom unittest import mock\n\nimport sys\nimport numpy as np\nimport scipy\nfrom pygsti.baseobjs.basis import Basis\nfrom pygsti.baseobjs.errorgenlabel import LocalElementaryErrorgenLabel as LEEL\n\nimport pygsti.tools.basistools as bt\nimport pygsti.tools.lindbladtools as lt\nimport pygsti.tools.optools as ot\nfrom pygsti.modelmembers.operations.lindbladcoefficients import LindbladCoefficientBlock\nfrom pygsti.modelpacks.legacy import std2Q_XXYYII\nfrom ..util import BaseCase, needs_cvxpy\n\nSKIP_DIAMONDIST_ON_WIN = True\n\n\ndef fake_minimize(fn):\n \"\"\"Mock scipy.optimize.minimize in the underlying function call to reduce optimization overhead\"\"\"\n def side_effect(o, mx, **kwargs):\n return mock.MagicMock(x=mx)\n\n @functools.wraps(fn)\n def wrapper(*args, **kwargs):\n with mock.patch.object(scipy.optimize, 'minimize', side_effect=side_effect):\n return fn(*args, **kwargs)\n\n return wrapper\n\n\nclass OpToolsTester(BaseCase):\n def test_unitary_to_pauligate(self):\n theta = np.pi\n sigmax = np.array([[0, 1], [1, 0]])\n ex = 1j * theta * sigmax / 2\n U = scipy.linalg.expm(ex)\n # U is 2x2 unitary matrix operating on single qubit in [0,1] basis (X(pi) rotation)\n\n op = ot.unitary_to_pauligate(U)\n op_ans = np.array([[ 1., 0., 0., 0.],\n [ 0., 1., 0., 0.],\n [ 0., 0., -1., 0.],\n [ 0., 0., 0., -1.]], 'd')\n self.assertArraysAlmostEqual(op, op_ans)\n\n U_2Q = np.identity(4, 'complex')\n U_2Q[2:, 2:] = U\n # U_2Q is 4x4 unitary matrix operating on isolated two-qubit space (CX(pi) rotation)\n\n op_2Q = ot.unitary_to_pauligate(U_2Q)\n op_2Q_inv = ot.process_mx_to_unitary(bt.change_basis(op_2Q, 'pp', 'std'))\n self.assertArraysAlmostEqual(U_2Q, op_2Q_inv)\n\n def test_decompose_gate_matrix(self):\n # decompose gate mx whose eigenvalues have a real but non-unit pair\n oneRealPair = np.array([\n [1+1j, 0, 0, 0], # Angle between 0 and 1 should give rotation\n [ 0, 1-1j, 0, 0],\n [ 0, 0, 2, 0], # should be picked out as fixed point (first real eigenval)\n [ 0, 0, 0, 2] # should be picked out as axis of rotation\n ], 'complex')\n decomp = ot.decompose_gate_matrix(oneRealPair)\n\n self.assertEqual(decomp['isValid'], True)\n self.assertEqual(decomp['isUnitary'], False)\n self.assertArraysAlmostEqual(decomp['fixed point'], [0, 0, 1, 0])\n self.assertArraysAlmostEqual(decomp['axis of rotation'], [0, 0, 0, 1])\n self.assertArraysAlmostEqual(decomp['rotating axis 1'], [1, 0, 0, 0])\n self.assertArraysAlmostEqual(decomp['rotating axis 2'], [0, 1, 0, 0])\n self.assertEqual(decomp['decay of diagonal rotation terms'], 1.0 - 2.0)\n self.assertEqual(decomp['decay of off diagonal rotation terms'], 1.0 - abs(1+1j))\n self.assertEqual(decomp['pi rotations'], np.angle(1+1j)/np.pi)\n\n dblRealPair = np.array([\n [ 3, 0, 0, 0],\n [ 0, 3, 0, 0],\n [ 0, 0, 2, 0], # still taken as fixed point because closest to identity (1.0)\n [ 0, 0, 0, 2]\n ], 'complex')\n decomp = ot.decompose_gate_matrix(dblRealPair)\n # decompose gate mx whose eigenvalues have two real but non-unit pairs\n \n self.assertEqual(decomp['isValid'], True)\n self.assertEqual(decomp['isUnitary'], False)\n self.assertArraysAlmostEqual(decomp['fixed point'], [0, 0, 1, 0])\n self.assertArraysAlmostEqual(decomp['axis of rotation'], [0, 0, 0, 1])\n self.assertArraysAlmostEqual(decomp['rotating axis 1'], [1, 0, 0, 0])\n self.assertArraysAlmostEqual(decomp['rotating axis 2'], [0, 1, 0, 0])\n self.assertEqual(decomp['decay of diagonal rotation terms'], 1.0 - 2.0)\n self.assertEqual(decomp['decay of off diagonal rotation terms'], 1.0 - 3.0)\n self.assertEqual(decomp['pi rotations'], np.angle(3.0)/np.pi)\n\n def test_decompose_gate_matrix_invalidates_on_all_complex_eigval(self):\n unpairedMx = np.array([\n [1+1j, 0, 0, 0],\n [ 0, 2-1j, 0, 0],\n [ 0, 0, 2+2j, 0],\n [ 0, 0, 0, 1.0+3j]\n ], 'complex')\n decomp = ot.decompose_gate_matrix(unpairedMx)\n # decompose gate mx which has all complex eigenvalue -> bail out\n self.assertFalse(decomp['isValid'])\n\n def test_decompose_gate_matrix_invalidates_on_large_matrix(self):\n largeMx = np.identity(16, 'd')\n decomp = ot.decompose_gate_matrix(largeMx) # can only handle 1Q mxs\n self.assertFalse(decomp['isValid'])\n\n def test_hack_sqrt_m(self):\n expected = np.array([\n [ 0.55368857+0.46439416j, 0.80696073-0.21242648j],\n [ 1.21044109-0.31863972j, 1.76412966+0.14575444j]\n ])\n sqrt = ot._hack_sqrtm(np.array([[1, 2], [3, 4]]))\n self.assertArraysAlmostEqual(sqrt, expected)\n\n def test_unitary_to_process_mx(self):\n identity = np.identity(2)\n processMx = ot.unitary_to_process_mx(identity)\n self.assertArraysAlmostEqual(processMx, np.identity(4))\n\n\nclass ProjectModelTester(BaseCase):\n def setUp(self):\n self.projectionTypes = ('H', 'S', 'H+S', 'LND', 'LNDF')\n self.target_model = std2Q_XXYYII.target_model()\n self.model = self.target_model.depolarize(op_noise=0.01)\n\n @fake_minimize\n def test_log_diff_model_projection(self):\n self.skipTest(\"project_model for logG-logT is known to be inconsistent in testing (Gxx,Gxy,Gyx,Gyy gates). Skip tests until it gets fixed.\")\n basis = self.target_model.basis\n gen_type = 'logG-logT'\n proj_model, Np_dict = ot.project_model(self.model, self.target_model, self.projectionTypes, gen_type, logG_weight=0)\n # Project a second time and ensure models don't change\n for pm1, ptype in zip(proj_model, self.projectionTypes):\n proj2, _ = ot.project_model(pm1, self.target_model, [ptype], gen_type, logG_weight=0)\n pm2 = proj2[0]\n for pm1_op, pm2_op in zip(pm1.operations.values(), pm2.operations.values()):\n self.assertArraysAlmostEqual(pm1_op, pm2_op)\n\n def test_logTiG_model_projection(self):\n gen_type = 'logTiG'\n proj_model, Np_dict = ot.project_model(self.model, self.target_model, self.projectionTypes, gen_type)\n # Project a second time and ensure models don't change\n for pm1, ptype in zip(proj_model, self.projectionTypes):\n proj2, _ = ot.project_model(pm1, self.target_model, [ptype], gen_type, logG_weight=0)\n pm2 = proj2[0]\n for pm1_op, pm2_op in zip(pm1.operations.values(), pm2.operations.values()):\n self.assertArraysAlmostEqual(pm1_op, pm2_op)\n\n def test_logGTi_model_projection(self):\n gen_type = 'logGTi'\n proj_model, Np_dict = ot.project_model(self.model, self.target_model, self.projectionTypes, gen_type)\n # Project a second time and ensure models don't change\n for pm1, ptype in zip(proj_model, self.projectionTypes):\n proj2, _ = ot.project_model(pm1, self.target_model, [ptype], gen_type, logG_weight=0)\n pm2 = proj2[0]\n for pm1_op, pm2_op in zip(pm1.operations.values(), pm2.operations.values()):\n self.assertArraysAlmostEqual(pm1_op, pm2_op)\n\n def test_raises_on_basis_mismatch(self):\n with self.assertRaises(ValueError):\n mdl_target_gm = std2Q_XXYYII.target_model()\n mdl_target_gm.basis = Basis.cast(\"gm\", 16)\n ot.project_model(self.model, mdl_target_gm, self.projectionTypes, 'logGti') # basis mismatch\n\n\nclass ErrorGenTester(BaseCase):\n def setUp(self):\n self.target_model = std2Q_XXYYII.target_model()\n self.mdl_datagen = self.target_model.depolarize(op_noise=0.1, spam_noise=0.001)\n\n def test_std_errgens(self):\n projectionTypes = ['H', 'S', 'C', 'A']\n basisNames = ['gm', 'pp'] # , 'qt'] #dim must == 3 for qt\n # Note: bases must have first element == identity\n\n for projectionType in projectionTypes:\n #REMOVE ot.std_scale_factor(4, projectionType)\n for basisName in basisNames:\n #REMOVE ot.std_error_generators(4, projectionType, basisName)\n ot.elementary_errorgens_dual(4, projectionType, basisName)\n\n def test_std_errgens_raise_on_bad_projection_type(self):\n with self.assertRaises(AssertionError):\n #REMOVE ot.std_error_generators(4, \"foobar\", 'gm')\n ot.elementary_errorgens_dual(4, \"foobar\", 'gm')\n\n def test_lind_errgens(self):\n\n bases = [Basis.cast('gm', 4),\n Basis.cast('pp', 4),\n Basis.cast('PP', 4)]\n\n for basis in bases:\n print(basis)\n Hblk = LindbladCoefficientBlock('ham', basis)\n Hblk_superops = Hblk.create_lindblad_term_superoperators(mx_basis='std')\n\n for i, mi in enumerate(basis[1:]):\n Hi = lt.create_elementary_errorgen('H', mi)\n HiB = lt.create_lindbladian_term_errorgen('H', mi)\n self.assertArraysAlmostEqual(Hi, HiB)\n self.assertArraysAlmostEqual(Hi, Hblk_superops[i])\n\n ODblk = LindbladCoefficientBlock('other_diagonal', basis)\n ODblk_superops = ODblk.create_lindblad_term_superoperators(mx_basis='std')\n\n for i, mi in enumerate(basis[1:]):\n ODi = lt.create_elementary_errorgen('S', mi)\n ODiB = lt.create_lindbladian_term_errorgen('O', mi, mi)\n self.assertArraysAlmostEqual(ODi, ODiB)\n self.assertArraysAlmostEqual(ODi, ODblk_superops[i])\n\n Oblk = LindbladCoefficientBlock('other', basis)\n Oblk_superops = Oblk.create_lindblad_term_superoperators(mx_basis='std')\n\n for i, mi in enumerate(basis[1:]):\n for j, mj in enumerate(basis[1:]):\n Oij = lt.create_lindbladian_term_errorgen('O', mi, mj)\n self.assertArraysAlmostEqual(Oij, Oblk_superops[i][j])\n\n # C_PQ = NH_PQ + NH_QP\n # A_PQ = i(NH_PQ - NH_QP)\n if i < j:\n Cij = lt.create_elementary_errorgen('C', mi, mj)\n Aij = lt.create_elementary_errorgen('A', mi, mj)\n self.assertArraysAlmostEqual(Oij, (Cij + 1j * Aij) / 2.0)\n elif j < i:\n Cji = lt.create_elementary_errorgen('C', mj, mi)\n Aji = lt.create_elementary_errorgen('A', mj, mi)\n self.assertArraysAlmostEqual(Oij, (Cji - 1j * Aji) / 2.0)\n else: # i == j\n Sii = lt.create_elementary_errorgen('S', mi)\n self.assertArraysAlmostEqual(Oij, Sii)\n\n def test_lind_errgen_projects(self):\n mx_basis = Basis.cast('pp', 4)\n basis = Basis.cast('PP', 4)\n X = basis['X']\n Y = basis['Y']\n Z = basis['Z']\n\n # Build known combination to project back to\n errgen = 0.1 * lt.create_elementary_errorgen('H', Z) \\\n - 0.01 * lt.create_elementary_errorgen('H', X) \\\n + 0.2 * lt.create_elementary_errorgen('S', X) \\\n + 0.25 * lt.create_elementary_errorgen('S', Y) \\\n + 0.05 * lt.create_elementary_errorgen('C', X, Y) \\\n - 0.01 * lt.create_elementary_errorgen('A', X, Y)\n errgen = bt.change_basis(errgen, 'std', mx_basis)\n\n Hblk = LindbladCoefficientBlock('ham', basis)\n ODblk = LindbladCoefficientBlock('other_diagonal', basis)\n Oblk = LindbladCoefficientBlock('other', basis)\n\n Hblk.set_from_errorgen_projections(errgen, errorgen_basis=mx_basis)\n ODblk.set_from_errorgen_projections(errgen, errorgen_basis=mx_basis)\n Oblk.set_from_errorgen_projections(errgen, errorgen_basis=mx_basis)\n\n self.assertArraysAlmostEqual(Hblk.block_data, [-0.01, 0, 0.1])\n self.assertArraysAlmostEqual(ODblk.block_data, [0.2, 0.25, 0])\n self.assertArraysAlmostEqual(Oblk.block_data,\n np.array([[0.2, 0.05 + 0.01j, 0],\n [0.05 - 0.01j, 0.25, 0],\n [0, 0, 0]]))\n\n def dicts_equal(d, f):\n f = {LEEL.cast(k): v for k, v in f.items()}\n if set(d.keys()) != set(f.keys()): return False\n for k in d:\n if abs(d[k] - f[k]) > 1e-12: return False\n return True\n\n self.assertTrue(dicts_equal(Hblk.elementary_errorgens, {('H','Z'): 0.1, ('H','X'): -0.01, ('H','Y'): 0}))\n self.assertTrue(dicts_equal(ODblk.elementary_errorgens, {('S','X'): 0.2, ('S','Y'): 0.25, ('S','Z'): 0}))\n self.assertTrue(dicts_equal(Oblk.elementary_errorgens,\n {('S', 'X'): 0.2,\n ('S', 'Y'): 0.25,\n ('S', 'Z'): 0.0,\n ('C', 'X', 'Y'): 0.05,\n ('A', 'X', 'Y'): -0.01,\n ('C', 'X', 'Z'): 0,\n ('A', 'X', 'Z'): 0,\n ('C', 'Y', 'Z'): 0,\n ('A', 'Y', 'Z'): 0,\n }))\n\n #TODO: test with sparse bases??\n\n #TODO: test basis from name (seems unnecessary)?\n\n @fake_minimize\n def test_err_gen(self):\n projectionTypes = ['hamiltonian', 'stochastic', 'affine']\n basisNames = ['std', 'gm', 'pp'] # , 'qt'] #dim must == 3 for qt\n\n for (lbl, gateTarget), gate in zip(self.target_model.operations.items(), self.mdl_datagen.operations.values()):\n errgen = ot.error_generator(gate, gateTarget, self.target_model.basis, 'logG-logT')\n altErrgen = ot.error_generator(gate, gateTarget, self.target_model.basis, 'logTiG')\n altErrgen2 = ot.error_generator(gate, gateTarget, self.target_model.basis, 'logGTi')\n with self.assertRaises(ValueError):\n ot.error_generator(gate, gateTarget, self.target_model.basis, 'adsf')\n\n #OLD: tested above\n #for projectionType in projectionTypes:\n # for basisName in basisNames:\n # ot.std_errorgen_projections(errgen, projectionType, basisName)\n\n originalGate = ot.operation_from_error_generator(errgen, gateTarget, self.target_model.basis, 'logG-logT')\n altOriginalGate = ot.operation_from_error_generator(altErrgen, gateTarget, self.target_model.basis, 'logTiG')\n altOriginalGate2 = ot.operation_from_error_generator(altErrgen, gateTarget, self.target_model.basis, 'logGTi')\n with self.assertRaises(ValueError):\n ot.operation_from_error_generator(errgen, gateTarget, self.target_model.basis, 'adsf')\n self.assertArraysAlmostEqual(originalGate, gate) # sometimes need to approximate the log for this one\n self.assertArraysAlmostEqual(altOriginalGate, gate)\n self.assertArraysAlmostEqual(altOriginalGate2, gate)\n\n @fake_minimize\n def test_err_gen_nonunitary(self):\n errgen_nonunitary = ot.error_generator(self.mdl_datagen.operations['Gxi'],\n self.mdl_datagen.operations['Gxi'],\n self.mdl_datagen.basis)\n # Perfect match, should get all 0s\n self.assertArraysAlmostEqual(np.zeros_like(self.mdl_datagen.operations['Gxi']), errgen_nonunitary)\n\n def test_err_gen_not_near_gate(self):\n # Both should warn\n with self.assertWarns(UserWarning):\n errgen_notsmall = ot.error_generator(self.mdl_datagen.operations['Gxi'], self.target_model.operations['Gix'],\n self.target_model.basis, 'logTiG')\n\n with self.assertWarns(UserWarning):\n errgen_notsmall = ot.error_generator(self.mdl_datagen.operations['Gxi'], self.target_model.operations['Gix'],\n self.target_model.basis, 'logGTi')\n\n def test_err_gen_raises_on_bad_type(self):\n with self.assertRaises(ValueError):\n ot.error_generator(self.mdl_datagen.operations['Gxi'], self.target_model.operations['Gxi'],\n self.target_model.basis, 'foobar')\n\n def test_err_gen_assert_shape_raises_on_ndims_too_high(self):\n # Check helper routine _assert_shape\n with self.assertRaises(NotImplementedError): # boundary case\n ot._assert_shape(np.zeros((2, 2, 2, 2, 2), 'd'), (2, 2, 2, 2, 2), sparse=True) # ndims must be <= 4\n\n\nclass GateOpsTester(BaseCase):\n def setUp(self):\n self.A = np.array([\n [ 0.9, 0, 0.1j, 0],\n [ 0, 0, 0, 0],\n [-0.1j, 0, 0, 0],\n [ 0, 0, 0, 0.1]\n ], 'complex')\n\n self.B = np.array([\n [ 0.5, 0, 0, -0.2j],\n [ 0, 0.25, 0, 0],\n [ 0, 0, 0.25, 0],\n [0.2j, 0, 0, 0.1]\n ], 'complex')\n\n def test_frobenius_distance(self):\n self.assertAlmostEqual(ot.frobeniusdist(self.A, self.A), 0.0)\n self.assertAlmostEqual(ot.frobeniusdist(self.A, self.B), (0.430116263352+0j))\n\n self.assertAlmostEqual(ot.frobeniusdist_squared(self.A, self.A), 0.0)\n self.assertAlmostEqual(ot.frobeniusdist_squared(self.A, self.B), (0.185+0j))\n\n def test_jtrace_distance(self):\n self.assertAlmostEqual(ot.jtracedist(self.A, self.A, mx_basis=\"std\"), 0.0)\n self.assertAlmostEqual(ot.jtracedist(self.A, self.B, mx_basis=\"std\"), 0.26430148) # OLD: 0.2601 ?\n\n @needs_cvxpy\n def test_diamond_distance(self):\n if SKIP_DIAMONDIST_ON_WIN and sys.platform.startswith('win'): return\n self.assertAlmostEqual(ot.diamonddist(self.A, self.A, mx_basis=\"std\"), 0.0)\n self.assertAlmostEqual(ot.diamonddist(self.A, self.B, mx_basis=\"std\"), 0.614258836298)\n\n def test_frobenius_norm_equiv(self):\n from pygsti.tools import matrixtools as mt\n self.assertAlmostEqual(ot.frobeniusdist(self.A, self.B), mt.frobeniusnorm(self.A - self.B))\n self.assertAlmostEqual(ot.frobeniusdist(self.A, self.B), np.sqrt(mt.frobeniusnorm_squared(self.A - self.B)))\n\n def test_entanglement_fidelity(self):\n fidelity = ot.entanglement_fidelity(self.A, self.B)\n self.assertAlmostEqual(fidelity, 0.42686642003)\n\n def test_fidelity_upper_bound(self):\n upperBound = ot.fidelity_upper_bound(self.A)\n expected = (\n np.array([[ 0.25]]),\n np.array([[ 1.00000000e+00, -8.27013523e-16, 8.57305616e-33, 1.95140273e-15],\n [ -8.27013523e-16, 1.00000000e+00, 6.28036983e-16, -8.74760501e-31],\n [ 5.68444574e-33, -6.28036983e-16, 1.00000000e+00, -2.84689309e-16],\n [ 1.95140273e-15, -9.27538795e-31, 2.84689309e-16, 1.00000000e+00]])\n )\n self.assertArraysAlmostEqual(upperBound[0], expected[0])\n self.assertArraysAlmostEqual(upperBound[1], expected[1])\n" ]
[ [ "numpy.zeros_like", "scipy.linalg.expm", "numpy.zeros", "numpy.angle", "numpy.array", "numpy.identity" ] ]
saishan27/Green_nmt
[ "e82e625056a7f1134729d4b8f18293e3f017c2cf" ]
[ "seqtoseq/nmt/nmt/utils/misc_utils.py" ]
[ "# Copyright 2017 Google Inc. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\n\"\"\"Generally useful utility functions.\"\"\"\r\nfrom __future__ import print_function\r\n\r\nimport codecs\r\nimport collections\r\nimport json\r\nimport math\r\nimport os\r\nimport sys\r\nimport time\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\n\r\n\r\ndef check_tensorflow_version():\r\n min_tf_version = \"1.4.0\"\r\n if tf.__version__ < min_tf_version:\r\n raise EnvironmentError(\"Tensorflow version must >= %s\" % min_tf_version)\r\n\r\n\r\ndef safe_exp(value):\r\n \"\"\"Exponentiation with catching of overflow error.\"\"\"\r\n try:\r\n ans = math.exp(value)\r\n except OverflowError:\r\n ans = float(\"inf\")\r\n return ans\r\n\r\n\r\ndef print_time(s, start_time):\r\n \"\"\"Take a start time, print elapsed duration, and return a new time.\"\"\"\r\n print(\"%s, time %ds, %s.\" % (s, (time.time() - start_time), time.ctime()))\r\n sys.stdout.flush()\r\n return time.time()\r\n\r\n\r\ndef print_out(s, f=None, new_line=True):\r\n \"\"\"Similar to print but with support to flush and output to a file.\"\"\"\r\n if isinstance(s, bytes):\r\n s = s.decode(\"utf-8\")\r\n\r\n if f:\r\n f.write(s.encode(\"utf-8\"))\r\n if new_line:\r\n f.write(b\"\\n\")\r\n\r\n # stdout\r\n out_s = s.encode(sys.stdout.encoding, \"backslashreplace\")\r\n if not isinstance(out_s, str):\r\n out_s = out_s.decode(sys.stdout.encoding, \"backslashreplace\")\r\n print(out_s, end=\"\", file=sys.stdout)\r\n\r\n if new_line:\r\n sys.stdout.write(\"\\n\")\r\n sys.stdout.flush()\r\n\r\n\r\ndef print_hparams(hparams, skip_patterns=None, header=None):\r\n \"\"\"Print hparams, can skip keys based on pattern.\"\"\"\r\n if header: print_out(\"%s\" % header)\r\n values = hparams.values()\r\n for key in sorted(values.keys()):\r\n if not skip_patterns or all(\r\n [skip_pattern not in key for skip_pattern in skip_patterns]):\r\n print_out(\" %s=%s\" % (key, str(values[key])))\r\n\r\n\r\ndef load_hparams(model_dir):\r\n \"\"\"Load hparams from an existing model directory.\"\"\"\r\n hparams_file = os.path.join(model_dir, \"hparams\")\r\n if tf.gfile.Exists(hparams_file):\r\n print_out(\"# Loading hparams from %s\" % hparams_file)\r\n with codecs.getreader(\"utf-8\")(tf.gfile.GFile(hparams_file, \"rb\")) as f:\r\n try:\r\n hparams_values = json.load(f)\r\n hparams = tf.contrib.training.HParams(**hparams_values)\r\n except ValueError:\r\n print_out(\" can't load hparams file\")\r\n return None\r\n return hparams\r\n else:\r\n return None\r\n\r\n\r\ndef maybe_parse_standard_hparams(hparams, hparams_path):\r\n \"\"\"Override hparams values with existing standard hparams config.\"\"\"\r\n if not hparams_path:\r\n return hparams\r\n\r\n if tf.gfile.Exists(hparams_path):\r\n print_out(\"# Loading standard hparams from %s\" % hparams_path)\r\n with tf.gfile.GFile(hparams_path, \"r\") as f:\r\n hparams.parse_json(f.read())\r\n\r\n return hparams\r\n\r\n\r\ndef save_hparams(out_dir, hparams):\r\n \"\"\"Save hparams.\"\"\"\r\n hparams_file = os.path.join(out_dir, \"hparams\")\r\n print_out(\" saving hparams to %s\" % hparams_file)\r\n with codecs.getwriter(\"utf-8\")(tf.gfile.GFile(hparams_file, \"wb\")) as f:\r\n f.write(hparams.to_json())\r\n\r\n\r\ndef debug_tensor(s, msg=None, summarize=10):\r\n \"\"\"Print the shape and value of a tensor at test time. Return a new tensor.\"\"\"\r\n if not msg:\r\n msg = s.name\r\n return tf.Print(s, [tf.shape(s), s], msg + \" \", summarize=summarize)\r\n\r\n\r\ndef add_summary(summary_writer, global_step, tag, value):\r\n \"\"\"Add a new summary to the current summary_writer.\r\n Useful to log things that are not part of the training graph, e.g., tag=BLEU.\r\n \"\"\"\r\n summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])\r\n summary_writer.add_summary(summary, global_step)\r\n\r\n\r\ndef get_config_proto(log_device_placement=False, allow_soft_placement=True,\r\n num_intra_threads=0, num_inter_threads=0):\r\n # GPU options:\r\n # https://www.tensorflow.org/versions/r0.10/how_tos/using_gpu/index.html\r\n config_proto = tf.ConfigProto(\r\n log_device_placement=log_device_placement,\r\n allow_soft_placement=allow_soft_placement)\r\n config_proto.gpu_options.allow_growth = True\r\n\r\n # CPU threads options\r\n if num_intra_threads:\r\n config_proto.intra_op_parallelism_threads = num_intra_threads\r\n if num_inter_threads:\r\n config_proto.inter_op_parallelism_threads = num_inter_threads\r\n\r\n return config_proto\r\n\r\n\r\ndef format_text(words):\r\n \"\"\"Convert a sequence words into sentence.\"\"\"\r\n if (not hasattr(words, \"__len__\") and # for numpy array\r\n not isinstance(words, collections.Iterable)):\r\n words = [words]\r\n return b\" \".join(words)\r\n\r\n\r\ndef format_bpe_text(symbols, delimiter=b\"@@\"):\r\n \"\"\"Convert a sequence of bpe words into sentence.\"\"\"\r\n words = []\r\n word = b\"\"\r\n delimiter_len = len(delimiter)\r\n for symbol in symbols:\r\n if len(symbol) >= delimiter_len and symbol[-delimiter_len:] == delimiter:\r\n word += symbol[:-delimiter_len]\r\n else: # end of a word\r\n word += symbol\r\n words.append(word)\r\n word = b\"\"\r\n return b\" \".join(words)\r\n\r\n\r\ndef format_spm_text(symbols):\r\n \"\"\"Decode a text in SPM (https://github.com/google/sentencepiece) format.\"\"\"\r\n return u\"\".join(format_text(symbols).decode(\"utf-8\").split()).replace(\r\n u\"\\u2581\", u\" \").strip().encode(\"utf-8\")\r\n\r\ndef format_sentence(sentence, subword_option):\r\n \"\"\"Decode sentence using subword option\"\"\"\r\n if isinstance(sentence, str):\r\n sentence = sentence.encode(\"utf-8\").split(b' ')\r\n\r\n if subword_option == \"bpe\": # BPE\r\n sentence = format_bpe_text(sentence)\r\n elif subword_option == \"spm\": # SPM\r\n sentence = format_spm_text(sentence)\r\n else:\r\n sentence = format_text(sentence)\r\n\r\n return sentence\r\n" ]
[ [ "tensorflow.shape", "tensorflow.Summary.Value", "tensorflow.gfile.GFile", "tensorflow.contrib.training.HParams", "tensorflow.gfile.Exists", "tensorflow.ConfigProto" ] ]
zhuang-group/SAQ
[ "594e9c74944999766e119e7137f50583aeedf52b" ]
[ "utils/asam.py" ]
[ "from collections import defaultdict\n\nimport torch\n\n\nclass ASAM:\n def __init__(self, optimizer, model, rho=0.5, eta=0.01):\n self.optimizer = optimizer\n self.model = model\n self.rho = rho\n self.eta = eta\n self.state = defaultdict(dict)\n\n @torch.no_grad()\n def _grad_norm(self):\n # put everything on the same device, in case of model parallelism\n shared_device = self.optimizer.param_groups[0][\"params\"][0].device\n wgrads = []\n for n, p in self.model.named_parameters():\n if p.grad is None:\n continue\n if \"weight\" in n or \"clip_value\" in n:\n grad = (torch.abs(p) + self.eta) * p.grad\n else:\n grad = p.grad\n wgrads.append(torch.norm(grad, p=2).to(shared_device))\n wgrad_norm = torch.norm(torch.stack(wgrads), p=2)\n return wgrad_norm\n\n @torch.no_grad()\n def ascent_step(self):\n grad_norm = self._grad_norm()\n scale = self.rho / (grad_norm + 1e-12)\n for n, p in self.model.named_parameters():\n if p.grad is None:\n continue\n self.state[p][\"old_p\"] = p.data.clone()\n if \"weight\" in n or \"clip_value\" in n:\n e_w = torch.pow(p, 2) * p.grad * scale.to(p)\n else:\n e_w = p.grad * scale.to(p)\n p.add_(e_w)\n self.optimizer.zero_grad()\n\n @torch.no_grad()\n def descent_step(self):\n for n, p in self.model.named_parameters():\n if p.grad is None:\n continue\n # get back to \"w\" from \"w + e(w)\"\n p.data = self.state[p][\"old_p\"]\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n @torch.no_grad()\n def ascent_step_param(self, param_name):\n wgrads = []\n for n, p in self.model.named_parameters():\n if p.grad is None or n not in param_name:\n continue\n t_w = self.state[p].get(\"eps\")\n if t_w is None:\n t_w = torch.clone(p).detach()\n self.state[p][\"eps\"] = t_w\n if \"weight\" in n or \"clip_value\" in n:\n t_w[...] = p[...]\n # t_w + eta\n t_w.abs_().add_(self.eta)\n # t_w * grad\n p.grad.mul_(t_w)\n wgrads.append(torch.norm(p.grad, p=2))\n wgrad_norm = torch.norm(torch.stack(wgrads), p=2) + 1.0e-16\n for n, p in self.model.named_parameters():\n if p.grad is None or n not in param_name:\n continue\n t_w = self.state[p].get(\"eps\")\n if \"weight\" in n or \"clip_value\" in n:\n # t_w * t_w * grad\n p.grad.mul_(t_w)\n eps = t_w\n eps[...] = p.grad[...]\n eps.mul_(self.rho / wgrad_norm)\n p.add_(eps)\n self.optimizer.zero_grad()\n\n @torch.no_grad()\n def restore_step_param(self, param_name):\n for n, p in self.model.named_parameters():\n if p.grad is None or n not in param_name:\n continue\n p.sub_(self.state[p][\"eps\"])\n self.optimizer.zero_grad()\n" ]
[ [ "torch.stack", "torch.no_grad", "torch.norm", "torch.abs", "torch.clone", "torch.pow" ] ]
amjadalfawal/sineor-project-aiu
[ "a8e8456c18feddeab067d43862c6829c50d1555d" ]
[ "helper.py" ]
[ "import torch\nimport torch.utils.data\nimport h5py\nfrom datetime import datetime\nfrom helper import *\nimport PeptideBuilder\nimport Bio.PDB\nfrom Bio.PDB.vectors import Vector\nimport math\n\nAA_ID_DICT = {'A': 1, 'C': 2, 'D': 3, 'E': 4, 'F': 5, 'G': 6, 'H': 7, 'I': 8, 'K': 9,\n 'L': 10, 'M': 11, 'N': 12, 'P': 13, 'Q': 14, 'R': 15, 'S': 16, 'T': 17,\n 'V': 18, 'W': 19,'Y': 20}\n\n\ndef contruct_data_loader_from_disk(filename, minibatch_size):\n return torch.utils.data.DataLoader(H5PytorchDataset(filename), batch_size=minibatch_size, shuffle=True)\n\n\nclass H5PytorchDataset(torch.utils.data.Dataset):\n def __init__(self, filename):\n super(H5PytorchDataset, self).__init__()\n\n self.h5pyfile = h5py.File(filename, 'r')\n self.nb_protiens, self.max_sequence_len = self.h5pyfile['primary'].shape\n\n def __getitem__(self, index):\n return self.h5pyfile['primary'][index,:] , self.h5pyfile['tertiary'][index,:] , self.h5pyfile['mask'][index,:]\n\n def __len__(self):\n return self.nb_protiens\n\ndef set_protien_experiments_id(data_set_identifier, learning_rate, minibatch_size):\n output_string = datetime.now().strftime('%Y-%m-%d_%H_%M_%S')\n output_string += \"-\" + data_set_identifier\n output_string += \"-LR\" + str(learning_rate).replace(\".\",\"_\")\n output_string += \"-MB\" + str(minibatch_size)\n globals().__setitem__(\"experiment_id\",output_string)\n\ndef write_out(*args, end='\\n'):\n output_string = datetime.now().strftime('%Y-%m-%d %H:%M:%S') + \": \" + str.join(\" \", [str(a) for a in args]) + end\n if globals().get(\"experiment_id\") is not None:\n with open(\"output/\"+globals().get(\"experiment_id\")+\".txt\", \"a+\") as output_file:\n output_file.write(output_string)\n output_file.flush()\n print(output_string, end=\"\")\n\ndef test_eval_model(data_loader, model):\n loss = 0\n data_total = []\n for i, data in enumerate(data_loader, 0):\n primary_sequence, tertiary_positions, mask = data\n\n predicted_positions = model(primary_sequence)\n\n minibatch_data = list(zip(primary_sequence,\n tertiary_positions,\n predicted_positions,\n mask))\n data_total.extend(minibatch_data)\n for primary_sequence, tertiary_positions,predicted_positions, mask in minibatch_data:\n error = 1\n loss += error\n loss /= data_loader.dataset.__len__()\n return (loss, data_total)\n\ndef save_model_on_disk_torch_version(model):\n path = \"output/models/\"+globals().get(\"experiment_id\")+\".model\"\n torch.save(model,path)\n return path\n\ndef draw_plot(fig, plt, validation_dataset_size, sample_num, train_loss_values,\n validation_loss_values):\n def draw_with_vars():\n ax = fig.gca()\n ax2 = ax.twinx()\n plt.grid(True)\n plt.title(\"Training progress (\" + str(validation_dataset_size) + \" samples in validation set)\")\n train_loss_plot, = ax.plot(sample_num, train_loss_values)\n ax.set_ylabel('Train Negative log likelihood')\n ax.yaxis.labelpad = 0\n validation_loss_plot, = ax2.plot(sample_num, validation_loss_values, color='black')\n ax2.set_ylabel('Validation loss')\n ax2.set_ylim(bottom=0)\n plt.legend([train_loss_plot, validation_loss_plot],\n ['Train loss on last batch', 'Validation loss'])\n ax.set_xlabel('Minibatches processed (=network updates)', color='black')\n return draw_with_vars\n\ndef logs(accuracy):\n output_string = globals().get(\"experiment_id\") + \": \" + str(accuracy) + \"\\n\"\n with open(\"output/logs.txt\", \"a+\") as output_file:\n output_file.write(output_string)\n output_file.flush()\n print(output_string, end=\"\")\n\n\ndef write_to_pdb_strcture(atomic_coords, aaSequence, prot_id):\n _aa_dict_inverse = {v: k for k, v in AA_ID_DICT.items()}\n atomic_coords = list([Vector(v) for v in atomic_coords.numpy()])\n aa_list = []\n phi_list = []\n psi_list = []\n omega_list = []\n for i, coord in enumerate(atomic_coords):\n if int(aaSequence[int(i/3)]) == 0:\n print(\"Reached end of protein, stopping\")\n break\n\n if i % 3 == 0:\n aa_symbol = _aa_dict_inverse[int(aaSequence[int(i/3)])]\n aa_list.append(aa_symbol)\n\n if i != 0:\n phi_list.append(math.degrees(Bio.PDB.calc_dihedral(atomic_coords[i - 1],\n atomic_coords[i],\n atomic_coords[i + 1],\n atomic_coords[i + 2])))\n if i+3 < len(atomic_coords) and int(aaSequence[int(i/3)+1]) != 0:\n psi_list.append(math.degrees(Bio.PDB.calc_dihedral(atomic_coords[i],\n atomic_coords[i + 1],\n atomic_coords[i + 2],\n atomic_coords[i + 3])))\n omega_list.append(math.degrees(Bio.PDB.calc_dihedral(atomic_coords[i + 1],\n atomic_coords[i + 2],\n atomic_coords[i + 3],\n atomic_coords[i + 4])))\n\n out = Bio.PDB.PDBIO()\n structure = PeptideBuilder.make_structure(aa_list, phi_list, psi_list, omega_list)\n out.set_structure(structure)\n out.save(\"output/protein_\" + str(prot_id) + \".pdb\")" ]
[ [ "torch.save" ] ]
GT-RIPL/robo-vln
[ "286870a7d1095fe2607b524572587a48854bc970" ]
[ "robo_vln_baselines/hierarchical_trainer.py" ]
[ "import copy\nimport gc\nimport json\nimport os\nimport random\nimport warnings\nfrom collections import defaultdict\nfrom typing import Dict\nimport matplotlib.pyplot as plt\nimport scipy.misc\nimport habitat_sim\nimport gc\nimport magnum as mn \nimport quaternion\nfrom habitat_sim.utils.common import quat_to_magnum, quat_from_magnum\nfrom fastdtw import fastdtw\nimport gzip \n\nfrom transformers.optimization import Adafactor\n\nimport lmdb\nimport msgpack_numpy\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport tqdm\nfrom habitat import Config, logger\nfrom habitat.utils.visualizations.utils import append_text_to_image\nfrom habitat_baselines.common.base_trainer import BaseRLTrainer\nfrom habitat_baselines.common.baseline_registry import baseline_registry\nfrom habitat_baselines.common.environments import get_env_class\nfrom habitat_baselines.common.tensorboard_utils import TensorboardWriter\n\nfrom habitat_baselines.common.utils import generate_video\nfrom robo_vln_baselines.common.continuous_path_follower import (\n ContinuousPathFollower,\n track_waypoint\n)\n\nfrom habitat_extensions.utils import observations_to_image\nfrom robo_vln_baselines.common.aux_losses import AuxLosses\nfrom robo_vln_baselines.common.env_utils import (\n construct_env,\n construct_envs,\n construct_envs_auto_reset_false,\n SimpleRLEnv\n)\nfrom robo_vln_baselines.common.utils import transform_obs, batch_obs, batch_obs_data_collect, repackage_hidden, split_batch_tbptt, repackage_mini_batch\nfrom robo_vln_baselines.models.seq2seq_highlevel_cma import Seq2Seq_HighLevel_CMA as Seq2Seq_HighLevel\nfrom robo_vln_baselines.models.seq2seq_lowlevel import Seq2Seq_LowLevel\n\nwith warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=FutureWarning)\n import tensorflow as tf\n\n\nclass ObservationsDict(dict):\n def pin_memory(self):\n for k, v in self.items():\n self[k] = v.pin_memory()\n\n return self\n\n\ndef collate_fn(batch):\n \"\"\"Each sample in batch: (\n obs,\n prev_actions,\n oracle_actions,\n inflec_weight,\n )\n \"\"\"\n\n def _pad_helper(t, max_len, fill_val=0):\n pad_amount = max_len - t.size(0)\n if pad_amount == 0:\n return t\n \n pad = torch.full_like(t[0:1], fill_val).expand(pad_amount, *t.size()[1:])\n return torch.cat([t, pad], dim=0)\n \n def _pad_instruction(t, max_len, fill_val=0):\n pad_amount = max_len - t.size(1)\n if pad_amount == 0:\n return t\n pad = torch.full_like(t[:,0], fill_val).expand(*t.size()[:1], pad_amount)\n return torch.cat([t, pad], dim=1)\n\n transposed = list(zip(*batch))\n\n observations_batch = list(transposed[0])\n prev_actions_batch = list(transposed[1])\n corrected_actions_batch = list(transposed[2])\n oracle_stop_batch = list(transposed[3])\n N = len(corrected_actions_batch)\n B = len(prev_actions_batch)\n new_observations_batch = defaultdict(list)\n for sensor in observations_batch[0]:\n if sensor == 'instruction':\n for bid in range(N):\n new_observations_batch[sensor].append(observations_batch[bid][sensor])\n else: \n for bid in range(B):\n new_observations_batch[sensor].append(observations_batch[bid][sensor])\n\n observations_batch = new_observations_batch\n\n max_traj_len = max(ele.size(0) for ele in prev_actions_batch)\n max_insr_len = max(ele.size(1) for ele in observations_batch['instruction'])\n for bid in range(B):\n for sensor in observations_batch:\n if sensor == 'instruction': \n observations_batch[sensor][bid] = _pad_instruction(\n observations_batch[sensor][bid], max_insr_len, fill_val=0.0\n )\n continue\n observations_batch[sensor][bid] = _pad_helper(\n observations_batch[sensor][bid], max_traj_len, fill_val=0.0\n )\n prev_actions_batch[bid] = _pad_helper(prev_actions_batch[bid], max_traj_len)\n corrected_actions_batch[bid] = _pad_helper(\n corrected_actions_batch[bid], max_traj_len, fill_val=0.0\n )\n oracle_stop_batch[bid] = _pad_helper(oracle_stop_batch[bid], max_traj_len, fill_val=-1.0)\n \n\n for sensor in observations_batch:\n observations_batch[sensor] = torch.stack(observations_batch[sensor], dim=1)\n observations_batch[sensor] = observations_batch[sensor].transpose(1,0)\n observations_batch[sensor] = observations_batch[sensor].contiguous().view(\n -1, *observations_batch[sensor].size()[2:]\n )\n\n prev_actions_batch = torch.stack(prev_actions_batch, dim=1)\n corrected_actions_batch = torch.stack(corrected_actions_batch, dim=1)\n not_done_masks = torch.ones_like(corrected_actions_batch, dtype=torch.float)\n not_done_masks[0] = 0\n oracle_stop_batch = torch.stack(oracle_stop_batch, dim=1)\n\n prev_actions_batch = prev_actions_batch.transpose(1,0)\n not_done_masks = not_done_masks.transpose(1,0)\n corrected_actions_batch = corrected_actions_batch.transpose(1,0)\n oracle_stop_batch = oracle_stop_batch.transpose(1,0)\n\n observations_batch = ObservationsDict(observations_batch)\n\n return (\n observations_batch,\n prev_actions_batch.contiguous().view(-1, 2),\n not_done_masks.contiguous().view(-1, 2),\n corrected_actions_batch.contiguous().view(-1,2),\n oracle_stop_batch.contiguous().view(-1,1)\n )\n\n\ndef _block_shuffle(lst, block_size):\n blocks = [lst[i : i + block_size] for i in range(0, len(lst), block_size)]\n random.shuffle(blocks)\n\n return [ele for block in blocks for ele in block]\n\n\nclass IWTrajectoryDataset(torch.utils.data.IterableDataset):\n def __init__(\n self,\n lmdb_features_dir,\n use_iw,\n inflection_weight_coef=1.0,\n lmdb_map_size=1e9,\n batch_size=1,\n is_bert=False\n ):\n super().__init__()\n self.lmdb_features_dir = lmdb_features_dir\n self.lmdb_map_size = lmdb_map_size\n self.preload_size = batch_size * 100\n self._preload = []\n self.batch_size = batch_size\n self.is_bert = is_bert\n\n if use_iw:\n self.inflec_weights = torch.tensor([1.0, inflection_weight_coef])\n else:\n self.inflec_weights = torch.tensor([1.0, 1.0])\n\n with lmdb.open(\n self.lmdb_features_dir,\n map_size=int(self.lmdb_map_size),\n readonly=True,\n lock=False,\n ) as lmdb_env:\n self.length = lmdb_env.stat()[\"entries\"]\n\n def _load_next(self):\n if len(self._preload) == 0:\n if len(self.load_ordering) == 0:\n raise StopIteration\n\n new_preload = []\n lengths = []\n with lmdb.open(\n self.lmdb_features_dir,\n map_size=int(self.lmdb_map_size),\n readonly=True,\n lock=False,\n ) as lmdb_env, lmdb_env.begin(buffers=True) as txn:\n for _ in range(self.preload_size):\n if len(self.load_ordering) == 0:\n break\n\n new_preload.append(\n msgpack_numpy.unpackb(\n txn.get(str(self.load_ordering.pop()).encode()), raw=False\n )\n )\n\n lengths.append(len(new_preload[-1][0]))\n\n sort_priority = list(range(len(lengths)))\n random.shuffle(sort_priority)\n\n sorted_ordering = list(range(len(lengths)))\n sorted_ordering.sort(key=lambda k: (lengths[k], sort_priority[k]))\n\n for idx in _block_shuffle(sorted_ordering, self.batch_size):\n self._preload.append(new_preload[idx])\n\n return self._preload.pop()\n\n def __next__(self):\n obs, prev_actions, oracle_actions, stop_step = self._load_next()\n\n discrete_oracle_actions = obs['vln_oracle_action_sensor'].copy()\n val = int(stop_step[-1])-1\n discrete_oracle_actions[val:]=4\n obs['vln_oracle_action_sensor'] = discrete_oracle_actions\n oracle_stop = np.zeros_like(obs['vln_oracle_action_sensor'])\n oracle_stop[val:] = 1\n\n if self.is_bert: \n instruction_batch = obs['instruction'][0]\n instruction_batch = np.expand_dims(instruction_batch, axis=0)\n obs['instruction'] = instruction_batch\n else:\n instruction_batch = obs['glove_tokens'][0]\n instruction_batch = np.expand_dims(instruction_batch, axis=0)\n obs['instruction'] = instruction_batch\n del obs['glove_tokens']\n for k, v in obs.items():\n obs[k] = torch.from_numpy(v)\n\n prev_actions = torch.from_numpy(prev_actions)\n oracle_stop = torch.from_numpy(oracle_stop)\n oracle_actions = torch.from_numpy(oracle_actions)\n return (obs, prev_actions, oracle_actions, oracle_stop)\n\n def __iter__(self):\n worker_info = torch.utils.data.get_worker_info()\n if worker_info is None:\n start = 0\n end = self.length\n else:\n per_worker = int(np.ceil(self.length / worker_info.num_workers))\n\n start = per_worker * worker_info.id\n end = min(start + per_worker, self.length)\n\n # Reverse so we can use .pop()\n self.load_ordering = list(\n reversed(_block_shuffle(list(range(start, end)), self.preload_size))\n )\n\n return self\n\n\n@baseline_registry.register_trainer(name=\"hierarchical_trainer\")\nclass RoboDaggerTrainer(BaseRLTrainer):\n def __init__(self, config=None):\n super().__init__(config)\n self.high_level = None\n self.low_level = None\n self.actor_critic = None\n self.envs = None\n\n self.device = (\n torch.device(\"cuda\", self.config.TORCH_GPU_ID)\n if torch.cuda.is_available()\n else torch.device(\"cpu\")\n )\n\n self.device2 = (\n torch.device(\"cuda:1\")\n if torch.cuda.is_available()\n else torch.device(\"cpu\")\n )\n self.lmdb_features_dir = self.config.DAGGER.LMDB_FEATURES_DIR.format(\n split=config.TASK_CONFIG.DATASET.SPLIT\n )\n self.lmdb_eval_dir = self.config.DAGGER.LMDB_EVAL_DIR\n\n def _setup_actor_critic_agent(\n self, config: Config, load_from_ckpt: bool, ckpt_path: str\n ) -> None:\n r\"\"\"Sets up actor critic and agent.\n Args:\n config: MODEL config\n Returns:\n None\n \"\"\"\n config.defrost()\n config.TORCH_GPU_ID = self.config.TORCH_GPU_ID\n config.freeze()\n\n self.high_level = Seq2Seq_HighLevel(\n observation_space=self.envs.observation_space,\n num_actions=self.envs.action_space.n,\n model_config=config,\n batch_size = self.config.DAGGER.BATCH_SIZE,\n )\n\n self.low_level = Seq2Seq_LowLevel(\n observation_space=self.envs.observation_space,\n num_actions=2,\n num_sub_tasks=self.envs.action_space.n,\n model_config=config,\n batch_size = self.config.DAGGER.BATCH_SIZE,\n )\n \n self.optimizer_high_level = torch.optim.AdamW(\n self.high_level.parameters(), lr=self.config.DAGGER.LR, weight_decay=self.config.MODEL.TRANSFORMER.weight_decay)\n\n self.optimizer_low_level = torch.optim.Adam(\n self.low_level.parameters(), lr=self.config.DAGGER.LR,weight_decay=self.config.MODEL.TRANSFORMER.weight_decay\n )\n\n self.scheduler_high_level = torch.optim.lr_scheduler.CyclicLR(self.optimizer_high_level, base_lr=2e-6, max_lr=1e-4, step_size_up=1000,step_size_down=30000, cycle_momentum=False)\n\n if not self.config.MODEL.TRANSFORMER.split_gpus:\n self.high_level.to(self.device) \n\n if load_from_ckpt:\n ckpt_dict = self.load_checkpoint(ckpt_path, map_location=\"cpu\")\n self.high_level.load_state_dict(ckpt_dict[\"high_level_state_dict\"])\n self.low_level.load_state_dict(ckpt_dict[\"low_level_state_dict\"])\n logger.info(f\"Loaded weights from checkpoint: {ckpt_path}\")\n logger.info(\"Finished setting up actor critic model.\")\n\n def save_checkpoint(self, file_name) -> None:\n r\"\"\"Save checkpoint with specified name.\n\n Args:\n file_name: file name for checkpoint\n\n Returns:\n None\n \"\"\"\n checkpoint = {\n \"high_level_state_dict\": self.high_level.state_dict(),\n \"low_level_state_dict\": self.low_level.state_dict(),\n \"config\": self.config,\n }\n torch.save(checkpoint, os.path.join(self.config.CHECKPOINT_FOLDER, file_name))\n\n def load_checkpoint(self, checkpoint_path, *args, **kwargs) -> Dict:\n r\"\"\"Load checkpoint of specified path as a dict.\n\n Args:\n checkpoint_path: path of target checkpoint\n *args: additional positional args\n **kwargs: additional keyword args\n\n Returns:\n dict containing checkpoint info\n \"\"\"\n return torch.load(checkpoint_path, *args, **kwargs)\n\n def _update_dataset(self, data_it):\n if torch.cuda.is_available():\n with torch.cuda.device(self.device):\n torch.cuda.empty_cache()\n\n prev_actions = np.zeros((1,2))\n done = False\n vel_control = habitat_sim.physics.VelocityControl()\n vel_control.controlling_lin_vel = True\n vel_control.lin_vel_is_local = True\n vel_control.controlling_ang_vel = True\n vel_control.ang_vel_is_local = True\n collected_eps = 0\n\n with tqdm.tqdm(total=self.config.DAGGER.UPDATE_SIZE) as pbar, lmdb.open(\n self.lmdb_features_dir, map_size=int(self.config.DAGGER.LMDB_MAP_SIZE)\n ) as lmdb_env, torch.no_grad():\n\n\n start_id = lmdb_env.stat()[\"entries\"]\n txn = lmdb_env.begin(write=True)\n stop_step=0\n for episode in range(self.config.DAGGER.UPDATE_SIZE):\n episode = []\n observations = self.envs.reset()\n observations = transform_obs(\n observations, self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID, is_bert=self.config.MODEL.INSTRUCTION_ENCODER.is_bert\n )\n\n reference_path = self.envs.habitat_env.current_episode.reference_path + [\n self.envs.habitat_env.current_episode.goals[0].position\n ]\n continuous_path_follower = ContinuousPathFollower(\n self.envs.habitat_env._sim, reference_path, waypoint_threshold=0.4)\n\n is_done = False\n steps=0\n stop_flag = False\n valid_trajectories = True\n while continuous_path_follower.progress < 1.0:\n steps+=1\n if is_done:\n break\n continuous_path_follower.update_waypoint()\n agent_state = self.envs.habitat_env._sim.get_agent_state()\n previous_rigid_state = habitat_sim.RigidState(\n quat_to_magnum(agent_state.rotation), agent_state.position\n )\n\n if np.isnan(continuous_path_follower.waypoint).any() or np.isnan(previous_rigid_state.translation).any() or np.isnan(quaternion.as_euler_angles(quat_from_magnum(previous_rigid_state.rotation))).any():\n valid_trajectories = False\n break\n vel,omega = track_waypoint(\n continuous_path_follower.waypoint,\n previous_rigid_state,\n vel_control,\n progress = continuous_path_follower.progress,\n dt=self.config.DAGGER.time_step,\n )\n observations, reward, done, info = self.envs.step(vel_control)\n episode_over, success = done\n\n if continuous_path_follower.progress >0.985 and not stop_flag:\n stop_step = steps\n stop_flag = True\n\n is_done = episode_over or (success and abs(vel)<0.005)\n\n observations = transform_obs(\n observations, self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID, is_bert=self.config.MODEL.INSTRUCTION_ENCODER.is_bert\n )\n actions = np.expand_dims(np.asarray([vel,omega]), axis=0)\n episode.append(\n (\n observations,\n prev_actions,\n actions,\n stop_step\n\n )\n )\n prev_actions = actions\n\n # Save episode to LMDB directory\n if valid_trajectories:\n traj_obs = batch_obs_data_collect([step[0] for step in episode], device=torch.device(\"cpu\"))\n for k, v in traj_obs.items():\n traj_obs[k] = v.numpy()\n transposed_ep = [\n traj_obs,\n np.array([step[1] for step in episode], dtype=float),\n np.array([step[2] for step in episode], dtype=float),\n [step[3] for step in episode],\n ]\n txn.put(\n str(start_id + collected_eps).encode(),\n msgpack_numpy.packb(transposed_ep, use_bin_type=True),\n )\n\n pbar.update()\n collected_eps += 1\n\n if (\n collected_eps % self.config.DAGGER.LMDB_COMMIT_FREQUENCY\n ) == 0:\n txn.commit()\n txn = lmdb_env.begin(write=True)\n\n episode = []\n prev_actions = np.zeros((1,2))\n txn.commit()\n self.envs.close()\n self.envs = None\n\n def _update_agent(\n self, observations, prev_actions, not_done_masks, corrected_actions, oracle_stop, high_recurrent_hidden_states, \n low_recurrent_hidden_states, detached_state_low\n ):\n self.optimizer_high_level.zero_grad()\n self.optimizer_low_level.zero_grad() \n high_level_criterion = nn.CrossEntropyLoss(ignore_index=-1, reduction=\"mean\")\n low_level_criterion = nn.MSELoss()\n low_level_stop_criterion = nn.BCEWithLogitsLoss()\n AuxLosses.clear()\n high_recurrent_hidden_states = repackage_hidden(high_recurrent_hidden_states)\n low_recurrent_hidden_states = repackage_hidden(low_recurrent_hidden_states)\n\n batch = (observations, high_recurrent_hidden_states, prev_actions, not_done_masks)\n output, high_recurrent_hidden_states = self.high_level(batch)\n del batch\n high_level_action_mask = observations['vln_oracle_action_sensor'] ==0\n output = output.masked_fill_(high_level_action_mask, 0)\n observations['vln_oracle_action_sensor'] = observations['vln_oracle_action_sensor'].squeeze(1).to(dtype=torch.int64)\n high_level_loss = high_level_criterion(output,(observations['vln_oracle_action_sensor']-1))\n high_level_loss.backward()\n self.optimizer_high_level.step()\n high_level_loss_data = high_level_loss.detach()\n del output\n\n self.low_level.to(self.device2)\n observations = {\n k: v.to(device=self.device2, non_blocking=True)\n for k, v in observations.items()\n }\n discrete_actions = observations['vln_oracle_action_sensor']\n discrete_action_mask = discrete_actions ==0\n discrete_actions = (discrete_actions-1).masked_fill_(discrete_action_mask, 4)\n\n del observations['vln_oracle_action_sensor']\n batch = (observations,\n low_recurrent_hidden_states,\n prev_actions.to(\n device=self.device2, non_blocking=True\n ),\n not_done_masks.to(\n device=self.device2, non_blocking=True\n ),\n discrete_actions.view(-1)) \n\n del observations, prev_actions, not_done_masks\n oracle_stop = oracle_stop.to(self.device2)\n output, stop_out, low_recurrent_hidden_states = self.low_level(batch)\n\n corrected_actions = corrected_actions.to(self.device2)\n\n action_mask = corrected_actions==0\n output = output.masked_fill_(action_mask, 0)\n output = output.to(dtype=torch.float)\n corrected_actions = corrected_actions.to(dtype=torch.float)\n low_level_action_loss = low_level_criterion(output, corrected_actions)\n\n mask = (oracle_stop!=-1)\n oracle_stop = torch.masked_select(oracle_stop, mask)\n stop_out = torch.masked_select(stop_out, mask)\n low_level_stop_loss = low_level_stop_criterion(stop_out, oracle_stop)\n low_level_loss = low_level_action_loss + low_level_stop_loss\n low_level_loss.backward()\n self.optimizer_low_level.step()\n\n aux_loss_data =0\n loss = (high_level_loss_data.item(), low_level_action_loss.detach().item(), \n low_level_stop_loss.detach().item(), aux_loss_data)\n return loss, high_recurrent_hidden_states, low_recurrent_hidden_states, detached_state_low\n\n def _update_agent_val(\n self, observations, prev_actions, not_done_masks, corrected_actions, oracle_stop, high_recurrent_hidden_states, \n low_recurrent_hidden_states, detached_state_low\n ):\n\n high_level_criterion = nn.CrossEntropyLoss(ignore_index=-1, reduction=\"mean\")\n low_level_criterion = nn.MSELoss()\n low_level_stop_criterion = nn.BCEWithLogitsLoss()\n AuxLosses.clear()\n\n high_recurrent_hidden_states = repackage_hidden(high_recurrent_hidden_states)\n low_recurrent_hidden_states = repackage_hidden(low_recurrent_hidden_states)\n\n batch = (observations, high_recurrent_hidden_states, prev_actions, not_done_masks)\n output, high_recurrent_hidden_states = self.high_level(batch)\n del batch\n high_level_action_mask = observations['vln_oracle_action_sensor'] ==0\n output = output.masked_fill_(high_level_action_mask, 0)\n observations['vln_oracle_action_sensor'] = observations['vln_oracle_action_sensor'].squeeze(1).to(dtype=torch.int64)\n high_level_loss = high_level_criterion(output,(observations['vln_oracle_action_sensor']-1))\n\n predicted = torch.argmax(output, dim=1)\n corrected_mask = ~high_level_action_mask\n correct = torch.masked_select((observations['vln_oracle_action_sensor']-1), corrected_mask)\n predicted = torch.masked_select(predicted, corrected_mask)\n accuracy = (predicted == correct).sum().item()\n total = predicted.size(0)\n del output\n\n self.low_level.to(self.device2)\n observations = {\n k: v.to(device=self.device2, non_blocking=True)\n for k, v in observations.items()\n }\n\n discrete_actions = observations['vln_oracle_action_sensor']\n discrete_action_mask = discrete_actions ==0\n discrete_actions = (discrete_actions-1).masked_fill_(discrete_action_mask, 4)\n\n batch = (observations,\n low_recurrent_hidden_states,\n prev_actions.to(\n device=self.device2, non_blocking=True\n ),\n not_done_masks.to(\n device=self.device2, non_blocking=True\n ),\n discrete_actions.view(-1)) \n\n del observations, prev_actions, not_done_masks\n oracle_stop = oracle_stop.to(self.device2)\n output, stop_out, low_recurrent_hidden_states = self.low_level(batch)\n\n corrected_actions = corrected_actions.to(self.device2)\n\n action_mask = corrected_actions==0\n output = output.masked_fill_(action_mask, 0)\n output = output.to(dtype=torch.float)\n corrected_actions = corrected_actions.to(dtype=torch.float)\n low_level_action_loss = low_level_criterion(output, corrected_actions)\n\n mask = (oracle_stop!=-1)\n oracle_stop = torch.masked_select(oracle_stop, mask)\n stop_out = torch.masked_select(stop_out, mask)\n low_level_stop_loss = low_level_stop_criterion(stop_out, oracle_stop)\n\n aux_loss_data =0\n loss = (high_level_loss.item(), low_level_action_loss.item(), \n low_level_stop_loss.item(), aux_loss_data)\n return loss, high_recurrent_hidden_states, low_recurrent_hidden_states, detached_state_low, accuracy, total\n\n\n def train_epoch(self, diter, length, batch_size, epoch, writer, train_steps):\n loss, action_loss, aux_loss = 0, 0, 0\n step_id = 0\n\n self.high_level.train()\n self.low_level.train()\n\n for batch in tqdm.tqdm(\n diter, total=length // batch_size, leave=False\n ):\n ( observations_batch,\n prev_actions_batch,\n not_done_masks,\n corrected_actions_batch,\n oracle_stop_batch\n ) = batch\n high_recurrent_hidden_states = torch.zeros(\n self.high_level.state_encoder.num_recurrent_layers,\n self.config.DAGGER.BATCH_SIZE,\n self.config.MODEL.STATE_ENCODER.hidden_size,\n device=self.device,\n )\n low_recurrent_hidden_states = torch.zeros(\n self.low_level.state_encoder.num_recurrent_layers,\n self.config.DAGGER.BATCH_SIZE,\n self.config.MODEL.STATE_ENCODER.hidden_size,\n device=self.device2,\n )\n detached_state_low = None\n batch_split = split_batch_tbptt(observations_batch, prev_actions_batch, not_done_masks, \n corrected_actions_batch, oracle_stop_batch, self.config.DAGGER.tbptt_steps, \n self.config.DAGGER.split_dim)\n del observations_batch, prev_actions_batch, not_done_masks, corrected_actions_batch, batch\n for split in batch_split:\n ( observations_batch,\n prev_actions_batch,\n not_done_masks,\n corrected_actions_batch,\n oracle_stop_batch\n ) = split \n observations_batch = {\n k: v.to(device=self.device, non_blocking=True)\n for k, v in observations_batch.items()\n }\n try:\n loss, high_recurrent_hidden_states, low_recurrent_hidden_states, detached_state_low= self._update_agent(\n observations_batch,\n prev_actions_batch.to(\n device=self.device, non_blocking=True\n ),\n not_done_masks.to(\n device=self.device, non_blocking=True\n ),\n corrected_actions_batch.to(\n device=self.device, non_blocking=True\n ),\n oracle_stop_batch.to(\n device=self.device, non_blocking=True\n ),\n high_recurrent_hidden_states,\n low_recurrent_hidden_states,\n detached_state_low\n )\n writer.add_scalar(f\"Train High Level Action Loss\", loss[0], train_steps)\n writer.add_scalar(f\"Train Low Level Action Loss\", loss[1], train_steps)\n writer.add_scalar(f\"Train Low Level Stop Loss\", loss[2], train_steps)\n writer.add_scalar(f\"Train Low_level Total Loss\", loss[1]+loss[2], train_steps)\n train_steps += 1\n except:\n logger.info(\n \"ERROR: failed to update agent. Updating agent with batch size of 1.\"\n )\n loss, action_loss, aux_loss = 0, 0, 0\n prev_actions_batch = prev_actions_batch.cpu()\n not_done_masks = not_done_masks.cpu()\n corrected_actions_batch = corrected_actions_batch.cpu()\n weights_batch = weights_batch.cpu()\n observations_batch = {\n k: v.cpu() for k, v in observations_batch.items()\n }\n\n for i in range(not_done_masks.size(0)):\n output = self._update_agent(\n {\n k: v[i].to(\n device=self.device, non_blocking=True\n )\n for k, v in observations_batch.items()\n },\n prev_actions_batch[i].to(\n device=self.device, non_blocking=True\n ),\n not_done_masks[i].to(\n device=self.device, non_blocking=True\n ),\n corrected_actions_batch[i].to(\n device=self.device, non_blocking=True\n ),\n weights_batch[i].to(\n device=self.device, non_blocking=True\n ),\n )\n loss += output[0]\n action_loss += output[1]\n aux_loss += output[2]\n self.scheduler_high_level.step()\n # self.scheduler_low_level.step()\n\n self.save_checkpoint(\n f\"ckpt.{self.config.DAGGER.EPOCHS + epoch}.pth\"\n )\n return train_steps\n\n def val_epoch(self, diter, length, batch_size, epoch, writer, val_steps):\n loss, aux_loss = 0, 0\n step_id = 0\n val_high_losses = []\n val_low_losses = []\n\n self.high_level.eval()\n self.low_level.eval()\n\n correct_labels = 0\n total_correct=0\n\n with torch.no_grad():\n for batch in tqdm.tqdm(\n diter, total=length // batch_size, leave=False\n ):\n ( observations_batch,\n prev_actions_batch,\n not_done_masks,\n corrected_actions_batch,\n oracle_stop_batch\n ) = batch\n\n high_recurrent_hidden_states = torch.zeros(\n self.high_level.state_encoder.num_recurrent_layers,\n self.config.DAGGER.BATCH_SIZE,\n self.config.MODEL.STATE_ENCODER.hidden_size,\n device=self.device,\n )\n low_recurrent_hidden_states = torch.zeros(\n self.low_level.state_encoder.num_recurrent_layers,\n self.config.DAGGER.BATCH_SIZE,\n self.config.MODEL.STATE_ENCODER.hidden_size,\n device=self.device2,\n )\n detached_state_low = None\n batch_split = split_batch_tbptt(observations_batch, prev_actions_batch, not_done_masks, \n corrected_actions_batch, oracle_stop_batch, self.config.DAGGER.tbptt_steps, \n self.config.DAGGER.split_dim)\n del observations_batch, prev_actions_batch, not_done_masks, corrected_actions_batch, batch\n for split in batch_split:\n ( observations_batch,\n prev_actions_batch,\n not_done_masks,\n corrected_actions_batch,\n oracle_stop_batch\n ) = split \n observations_batch = {\n k: v.to(device=self.device, non_blocking=True)\n for k, v in observations_batch.items()\n }\n loss, high_recurrent_hidden_states, low_recurrent_hidden_states, detached_state_low, correct, total= self._update_agent_val(\n observations_batch,\n prev_actions_batch.to(\n device=self.device, non_blocking=True\n ),\n not_done_masks.to(\n device=self.device, non_blocking=True\n ),\n corrected_actions_batch.to(\n device=self.device, non_blocking=True\n ),\n oracle_stop_batch.to(\n device=self.device, non_blocking=True\n ),\n high_recurrent_hidden_states,\n low_recurrent_hidden_states,\n detached_state_low\n )\n\n correct_labels+= correct \n total_correct+=total\n\n writer.add_scalar(f\"Val High Level Action Loss\", loss[0], val_steps)\n writer.add_scalar(f\"Val Low_level Total Loss\", loss[1]+loss[2], val_steps)\n val_steps += 1\n\n val_low_losses.append(loss[0])\n val_high_losses.append(loss[1]+loss[2])\n\n final_accuracy = 100 * correct_labels / total_correct\n writer.add_scalar(f\"Val High level Loss epoch\", np.mean(val_high_losses), epoch)\n writer.add_scalar(f\"Val Low level Loss epoch\", np.mean(val_low_losses), epoch)\n writer.add_scalar(f\"Validation Accuracy\", final_accuracy, epoch)\n return val_steps\n\n def train(self) -> None:\n r\"\"\"Main method for training DAgger.\n\n Returns:\n None\n \"\"\"\n os.makedirs(self.lmdb_features_dir, exist_ok=True)\n os.makedirs(self.config.CHECKPOINT_FOLDER, exist_ok=True)\n\n if self.config.DAGGER.PRELOAD_LMDB_FEATURES:\n try:\n lmdb.open(self.lmdb_features_dir, readonly=True)\n lmdb.open(self.lmdb_eval_dir, readonly=True)\n except lmdb.Error as err:\n logger.error(\"Cannot open database for teacher forcing preload.\")\n raise err\n else:\n with lmdb.open(\n self.lmdb_features_dir, map_size=int(self.config.DAGGER.LMDB_MAP_SIZE)\n ) as lmdb_env, lmdb_env.begin(write=True) as txn:\n txn.drop(lmdb_env.open_db())\n\n split = self.config.TASK_CONFIG.DATASET.SPLIT\n self.config.defrost()\n self.config.TASK_CONFIG.TASK.NDTW.SPLIT = split\n self.config.TASK_CONFIG.TASK.SDTW.SPLIT = split\n\n # if doing teacher forcing, don't switch the scene until it is complete\n if self.config.DAGGER.P == 1.0:\n self.config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = (\n -1\n )\n self.config.freeze()\n\n if self.config.DAGGER.PRELOAD_LMDB_FEATURES:\n # when preloadeding features, its quicker to just load one env as we just\n # need the observation space from it.\n single_proc_config = self.config.clone()\n single_proc_config.defrost()\n single_proc_config.NUM_PROCESSES = 1\n single_proc_config.freeze()\n self.envs = construct_env(self.config)\n else:\n self.envs = construct_env(self.config)\n\n self._setup_actor_critic_agent(\n self.config.MODEL,\n self.config.DAGGER.LOAD_FROM_CKPT,\n self.config.DAGGER.CKPT_TO_LOAD,\n )\n logger.info(\n \"agent number of high level parameters: {}\".format(\n sum(param.numel() for param in self.high_level.parameters())\n )\n )\n\n logger.info(\n \"agent number of low level parameters: {}\".format(\n sum(param.numel() for param in self.low_level.parameters())\n )\n )\n if self.config.DAGGER.PRELOAD_LMDB_FEATURES:\n self.envs.close()\n del self.envs\n self.envs = None\n\n with TensorboardWriter(\n self.config.TENSORBOARD_DIR, flush_secs=self.flush_secs, purge_step=0\n ) as writer:\n for dagger_it in range(self.config.DAGGER.ITERATIONS):\n step_id = 0\n if not self.config.DAGGER.PRELOAD_LMDB_FEATURES:\n self._update_dataset(\n dagger_it + (1 if self.config.DAGGER.LOAD_FROM_CKPT else 0)\n )\n\n if torch.cuda.is_available():\n with torch.cuda.device(self.device):\n torch.cuda.empty_cache()\n gc.collect()\n\n dataset = IWTrajectoryDataset(\n self.lmdb_features_dir,\n self.config.DAGGER.USE_IW,\n inflection_weight_coef=self.config.MODEL.inflection_weight_coef,\n lmdb_map_size=self.config.DAGGER.LMDB_MAP_SIZE,\n batch_size=self.config.DAGGER.BATCH_SIZE,\n is_bert = self.config.MODEL.INSTRUCTION_ENCODER.is_bert,\n )\n diter = torch.utils.data.DataLoader(\n dataset,\n batch_size=self.config.DAGGER.BATCH_SIZE,\n shuffle=False,\n collate_fn=collate_fn,\n pin_memory=True,\n drop_last=True, # drop last batch if smaller\n num_workers=1,\n )\n\n dataset_eval = IWTrajectoryDataset(\n self.lmdb_eval_dir,\n self.config.DAGGER.USE_IW,\n inflection_weight_coef=self.config.MODEL.inflection_weight_coef,\n lmdb_map_size=self.config.DAGGER.LMDB_EVAL_SIZE,\n batch_size=self.config.DAGGER.BATCH_SIZE,\n is_bert = self.config.MODEL.INSTRUCTION_ENCODER.is_bert,\n )\n diter_eval = torch.utils.data.DataLoader(\n dataset_eval,\n batch_size=self.config.DAGGER.BATCH_SIZE,\n shuffle=False,\n collate_fn=collate_fn,\n pin_memory=True,\n drop_last=True, # drop last batch if smaller\n num_workers=1,\n )\n\n train_steps = 0\n val_steps = 0\n\n AuxLosses.activate()\n print(\"starting training loop\")\n for epoch in tqdm.trange(self.config.DAGGER.EPOCHS):\n train_steps = self.train_epoch(diter, dataset.length, dataset.batch_size, epoch, writer, train_steps)\n val_steps = self.val_epoch(diter_eval, dataset_eval.length, dataset_eval.batch_size, epoch, writer, val_steps)\n AuxLosses.deactivate()\n\n @staticmethod\n def _pause_envs(\n envs_to_pause,\n envs,\n recurrent_hidden_states,\n not_done_masks,\n prev_actions,\n batch,\n ):\n # pausing self.envs with no new episode\n if len(envs_to_pause) > 0:\n state_index = list(range(envs.num_envs))\n\n for idx in reversed(envs_to_pause):\n state_index.pop(idx)\n envs.pause_at(idx)\n\n # indexing along the batch dimensions\n if recurrent_hidden_states:\n recurrent_hidden_states = recurrent_hidden_states[:, state_index]\n # recurrent_hidden_states = recurrent_hidden_states\n not_done_masks = not_done_masks[state_index]\n prev_actions = prev_actions[state_index]\n\n for k, v in batch.items():\n batch[k] = v[state_index]\n\n return (envs, recurrent_hidden_states, not_done_masks, prev_actions, batch)\n\n def _euclidean_distance(self, position_a, position_b):\n return np.linalg.norm(np.array(position_b) - np.array(position_a), ord=2)\n\n def _eval_checkpoint(\n self, checkpoint_path: str, writer: TensorboardWriter, checkpoint_index: int = 0\n ) -> None:\n r\"\"\"Evaluates a single checkpoint. Assumes episode IDs are unique.\n\n Args:\n checkpoint_path: path of checkpoint\n writer: tensorboard writer object for logging to tensorboard\n checkpoint_index: index of cur checkpoint for logging\n\n Returns:\n None\n \"\"\"\n logger.info(f\"checkpoint_path: {checkpoint_path}\")\n\n if self.config.EVAL.USE_CKPT_CONFIG:\n config = self._setup_eval_config(\n self.load_checkpoint(checkpoint_path, map_location=\"cpu\")[\"config\"]\n )\n else:\n config = self.config.clone()\n\n config.defrost()\n config.TASK_CONFIG.DATASET.SPLIT = config.EVAL.SPLIT\n config.TASK_CONFIG.TASK.NDTW.SPLIT = config.EVAL.SPLIT\n config.TASK_CONFIG.TASK.SDTW.SPLIT = config.EVAL.SPLIT\n config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = True\n config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = -1\n if len(config.VIDEO_OPTION) > 0:\n config.defrost()\n config.TASK_CONFIG.TASK.MEASUREMENTS.append(\"TOP_DOWN_MAP\")\n config.TASK_CONFIG.TASK.MEASUREMENTS.append(\"COLLISIONS\")\n\n config.freeze()\n\n gt_path = config.TASK_CONFIG.TASK.NDTW.GT_PATH.format(split=config.TASK_CONFIG.DATASET.SPLIT)\n with gzip.open(gt_path, \"rt\") as f:\n self.gt_json = json.load(f)\n\n # setup agent\n self.envs = construct_env(config)\n self.device = (\n torch.device(\"cuda\", config.TORCH_GPU_ID)\n if torch.cuda.is_available()\n else torch.device(\"cpu\")\n )\n\n self._setup_actor_critic_agent(config.MODEL, True, checkpoint_path)\n vc = habitat_sim.physics.VelocityControl()\n vc.controlling_lin_vel = True\n vc.lin_vel_is_local = True\n vc.controlling_ang_vel = True\n vc.ang_vel_is_local = True\n\n observations = self.envs.reset()\n observations = transform_obs(\n observations, config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID, is_bert=self.config.MODEL.INSTRUCTION_ENCODER.is_bert\n )\n observations = batch_obs(observations, self.device)\n\n high_recurrent_hidden_states = torch.zeros(\n self.high_level.state_encoder.num_recurrent_layers,\n self.config.NUM_PROCESSES,\n self.config.MODEL.STATE_ENCODER.hidden_size,\n device=self.device,\n )\n low_recurrent_hidden_states = torch.zeros(\n self.low_level.state_encoder.num_recurrent_layers,\n self.config.NUM_PROCESSES,\n self.config.MODEL.STATE_ENCODER.hidden_size,\n device=self.device,\n )\n self.low_level.to(self.device)\n prev_actions = torch.zeros(\n config.NUM_PROCESSES, 2, device=self.device, dtype=torch.long\n )\n not_done_masks = torch.zeros(config.NUM_PROCESSES, 2, device=self.device)\n\n stats_episodes = {} # dict of dicts that stores stats per episode\n\n if len(config.VIDEO_OPTION) > 0:\n rgb_frames = []\n os.makedirs(config.VIDEO_DIR, exist_ok=True)\n\n if config.PLOT_ATTENTION:\n attention_weights = [[] for _ in range(config.NUM_PROCESSES)]\n save_actions = [[] for _ in range(config.NUM_PROCESSES)]\n\n self.high_level.eval()\n self.low_level.eval()\n k=0\n ep_count = 0\n min_2nd_dim = 1000\n steps=0\n locations=[]\n detached_state_low = None\n while (\n len(stats_episodes) < config.EVAL.EPISODE_COUNT\n ):\n \n current_episode = self.envs.habitat_env.current_episode\n is_done = False\n locations.append(self.envs.habitat_env._sim.get_agent_state().position.tolist())\n with torch.no_grad():\n batch = (observations, high_recurrent_hidden_states, prev_actions, not_done_masks)\n output, high_recurrent_hidden_states = self.high_level(batch)\n pred = torch.argmax(output, dim=1)\n batch = (observations, low_recurrent_hidden_states,prev_actions, not_done_masks,pred) \n output, stop_out, low_recurrent_hidden_states = self.low_level(batch)\n prev_actions = output\n\n not_done_masks = torch.ones(config.NUM_PROCESSES, 2, device=self.device)\n lin_vel = output[:, 0]\n vc.linear_velocity = mn.Vector3(0, 0, output[:,0].cpu().numpy())\n max_turn_speed = 1.0\n vc.angular_velocity = mn.Vector3(0, np.clip(output[:,1].cpu().numpy(), -max_turn_speed, max_turn_speed), 0)\n observations, _, done, info = self.envs.step(vc) \n episode_over, success = done\n\n stop_pred = torch.round(torch.sigmoid(stop_out))\n episode_success = success and (lin_vel<0.25 or stop_pred ==1)\n is_done = episode_over or episode_success \n steps+=1\n\n if len(config.VIDEO_OPTION) > 0:\n frame = observations_to_image(observations, info)\n frame = append_text_to_image(\n frame, current_episode.instruction.instruction_text\n )\n rgb_frames.append(frame)\n\n if is_done or steps==self.config.TASK_CONFIG.ENVIRONMENT.MAX_EPISODE_STEPS:\n # calulcate NDTW here\n detached_state_low = None \n gt_locations = self.gt_json[str(current_episode.episode_id)][\"locations\"]\n dtw_distance = fastdtw(locations, gt_locations, dist=self._euclidean_distance)[0]\n nDTW = np.exp(-dtw_distance / (len(gt_locations) * config.TASK_CONFIG.TASK.NDTW.SUCCESS_DISTANCE))\n locations=[]\n\n is_done = False\n ep_count+=1\n steps=0\n stats_episodes[current_episode.episode_id] = info\n stats_episodes[current_episode.episode_id]['ndtw'] = nDTW\n if episode_success:\n stats_episodes[current_episode.episode_id]['actual_success'] = 1.0\n else: \n stats_episodes[current_episode.episode_id]['actual_success'] = 0.0\n \n print(\"Current episode ID:\", current_episode.episode_id)\n print(\"Episode Completed:\", ep_count)\n observations = self.envs.reset()\n prev_actions = torch.zeros(\n config.NUM_PROCESSES, 2, device=self.device, dtype=torch.long\n )\n not_done_masks = torch.zeros(config.NUM_PROCESSES, 2, device=self.device)\n high_recurrent_hidden_states = torch.zeros(\n self.high_level.state_encoder.num_recurrent_layers,\n self.config.NUM_PROCESSES,\n self.config.MODEL.STATE_ENCODER.hidden_size,\n device=self.device,\n )\n low_recurrent_hidden_states = torch.zeros(\n self.low_level.state_encoder.num_recurrent_layers,\n self.config.NUM_PROCESSES,\n self.config.MODEL.STATE_ENCODER.hidden_size,\n device=self.device,\n )\n metrics={\"SPL\":round(\n stats_episodes[current_episode.episode_id][\"spl\"], 6\n ) } \n if len(config.VIDEO_OPTION) > 0:\n time_step=30\n generate_video(\n video_option=config.VIDEO_OPTION,\n video_dir=config.VIDEO_DIR,\n images=rgb_frames,\n episode_id=current_episode.episode_id,\n checkpoint_idx=checkpoint_index,\n metrics=metrics,\n tb_writer=writer,\n fps = int (1.0/time_step),\n )\n del stats_episodes[current_episode.episode_id][\"top_down_map\"]\n del stats_episodes[current_episode.episode_id][\"collisions\"]\n rgb_frames =[]\n if config.PLOT_ATTENTION:\n for j in range(len(attention_weights[i])):\n attention_weights[i][j] = attention_weights[i][j][:,:min_2nd_dim]\n attention_weights[i]= torch.cat(attention_weights[i], dim=0).cpu().numpy()\n attention_to_image(\n image_dir = config.VIDEO_DIR,\n attention = attention_weights[i],\n episode_id=current_episodes[i].episode_id,\n checkpoint_idx=checkpoint_index,\n metrics=metrics,\n actions = save_actions[i]\n )\n attention_weights[i] = [] \n save_actions[i] =[]\n \n observations = transform_obs(\n observations, config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID, is_bert=self.config.MODEL.INSTRUCTION_ENCODER.is_bert\n )\n observations = batch_obs(observations, self.device)\n k+=1\n\n self.envs.close()\n\n aggregated_stats = {}\n num_episodes = len(stats_episodes)\n for stat_key in next(iter(stats_episodes.values())).keys():\n aggregated_stats[stat_key] = (\n sum([v[stat_key] for v in stats_episodes.values()]) / num_episodes\n )\n\n split = config.TASK_CONFIG.DATASET.SPLIT\n os.makedirs(config.EVAL.VAL_LOG_DIR, exist_ok=True)\n val_log_path = os.path.join(config.EVAL.VAL_LOG_DIR,f\"stats_ckpt_{checkpoint_index}_{split}.json\")\n with open(val_log_path, \"w\") as f:\n json.dump(aggregated_stats, f, indent=4)\n\n logger.info(f\"Episodes evaluated: {num_episodes}\")\n checkpoint_num = checkpoint_index + 1\n for k, v in aggregated_stats.items():\n logger.info(f\"Average episode {k}: {v:.6f}\")\n writer.add_scalar(f\"eval_{split}_{k}\", v, checkpoint_num)\n" ]
[ [ "torch.utils.data.DataLoader", "torch.stack", "torch.no_grad", "numpy.asarray", "torch.cuda.is_available", "torch.utils.data.get_worker_info", "torch.cat", "torch.cuda.empty_cache", "torch.from_numpy", "numpy.expand_dims", "numpy.isnan", "torch.sigmoid", "torch.device", "numpy.mean", "torch.ones_like", "torch.ones", "torch.load", "numpy.zeros", "torch.argmax", "torch.optim.lr_scheduler.CyclicLR", "numpy.ceil", "torch.tensor", "torch.masked_select", "numpy.array", "torch.cuda.device", "numpy.zeros_like", "torch.nn.MSELoss", "torch.nn.CrossEntropyLoss", "torch.full_like", "torch.nn.BCEWithLogitsLoss", "torch.zeros" ] ]
BingyangWu/Antman
[ "e9323cc8ccda637d3962b0de29ce154317f17e7a" ]
[ "TensorFlow-with-dynamic-scaling/tensorflow/python/training/monitored_session_test.py" ]
[ "# pylint: disable=g-bad-file-header\n# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for monitored_session.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport glob\nimport os\nimport sys\nimport threading\nimport time\nimport traceback\n\nfrom tensorflow.contrib.framework.python.ops import variables as variables_lib\nfrom tensorflow.contrib.testing.python.framework import util_test\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.core.protobuf import debug_pb2\nfrom tensorflow.python.client import session as session_lib\nfrom tensorflow.python.distribute import collective_all_reduce_strategy\nfrom tensorflow.python.distribute import distribute_coordinator\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors_impl\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.summary import summary\nfrom tensorflow.python.training import basic_session_run_hooks\nfrom tensorflow.python.training import checkpoint_management\nfrom tensorflow.python.training import coordinator\nfrom tensorflow.python.training import monitored_session\nfrom tensorflow.python.training import saver as saver_lib\nfrom tensorflow.python.training import session_run_hook\n\n\nclass ScaffoldTest(test.TestCase):\n \"\"\"Scaffold tests.\"\"\"\n\n def test_nothing_created_before_finalize(self):\n with ops.Graph().as_default():\n scaffold = monitored_session.Scaffold()\n self.assertEqual(None, scaffold.init_op)\n self.assertEqual(None, scaffold.init_feed_dict)\n self.assertEqual(None, scaffold.init_fn)\n self.assertEqual(None, scaffold.ready_op)\n self.assertEqual(None, scaffold.ready_for_local_init_op)\n self.assertEqual(None, scaffold.local_init_op)\n self.assertEqual(None, scaffold.saver)\n\n def test_defaults_empty_graph(self):\n with ops.Graph().as_default():\n scaffold = monitored_session.Scaffold()\n variables.VariableV1(1, name='my_var')\n variables.VariableV1(\n 2, name='my_local_var', collections=[ops.GraphKeys.LOCAL_VARIABLES])\n scaffold.finalize()\n self.assertTrue(isinstance(scaffold.init_op, ops.Operation))\n self.assertEqual(None, scaffold.init_feed_dict)\n self.assertEqual(None, scaffold.init_fn)\n self.assertTrue(isinstance(scaffold.ready_op, ops.Tensor))\n self.assertTrue(isinstance(scaffold.ready_for_local_init_op, ops.Tensor))\n self.assertTrue(isinstance(scaffold.local_init_op, ops.Operation))\n self.assertTrue(isinstance(scaffold.saver, saver_lib.Saver))\n with self.cached_session() as sess:\n self.assertItemsEqual([b'my_var', b'my_local_var'],\n sess.run(scaffold.ready_op))\n self.assertItemsEqual([b'my_var'],\n sess.run(scaffold.ready_for_local_init_op))\n sess.run(scaffold.init_op)\n self.assertEqual(0, len(sess.run(scaffold.ready_for_local_init_op)))\n sess.run(scaffold.local_init_op)\n self.assertEqual(0, len(sess.run(scaffold.ready_op)))\n\n def test_defaults_no_variables(self):\n with ops.Graph().as_default():\n scaffold = monitored_session.Scaffold()\n constant_op.constant(1, name='my_const')\n scaffold.finalize()\n self.assertTrue(isinstance(scaffold.init_op, ops.Operation))\n self.assertEqual(None, scaffold.init_feed_dict)\n self.assertEqual(None, scaffold.init_fn)\n self.assertTrue(isinstance(scaffold.ready_op, ops.Tensor))\n self.assertTrue(isinstance(scaffold.ready_for_local_init_op, ops.Tensor))\n self.assertTrue(isinstance(scaffold.local_init_op, ops.Operation))\n self.assertTrue(isinstance(scaffold.saver, saver_lib.Saver))\n\n def test_caches_values(self):\n with ops.Graph().as_default():\n variables.VariableV1([1])\n scaffold1 = monitored_session.Scaffold()\n scaffold1.finalize()\n scaffold2 = monitored_session.Scaffold()\n scaffold2.finalize()\n self.assertEqual(scaffold1.init_op, scaffold2.init_op)\n self.assertEqual(scaffold1.ready_op, scaffold2.ready_op)\n self.assertEqual(scaffold1.ready_for_local_init_op,\n scaffold2.ready_for_local_init_op)\n self.assertEqual(scaffold1.local_init_op, scaffold2.local_init_op)\n self.assertEqual(scaffold1.saver, scaffold2.saver)\n\n def test_raise_error_if_more_than_one_cached_item(self):\n with ops.Graph().as_default():\n variables.VariableV1([1])\n ops.add_to_collection(ops.GraphKeys.SAVERS, saver_lib.Saver())\n ops.add_to_collection(ops.GraphKeys.SAVERS, saver_lib.Saver())\n with self.assertRaisesRegexp(RuntimeError, 'More than one item'):\n monitored_session.Scaffold().finalize()\n\n def test_uses_passed_values(self):\n with ops.Graph().as_default():\n variables.VariableV1([1])\n saver = saver_lib.Saver()\n scaffold = monitored_session.Scaffold(\n init_op=2,\n init_feed_dict=3,\n init_fn=lambda scaffold, sess: 4,\n ready_op=5,\n ready_for_local_init_op=6,\n local_init_op=7,\n saver=saver)\n scaffold.finalize()\n self.assertEqual(2, scaffold.init_op)\n self.assertEqual(3, scaffold.init_feed_dict)\n self.assertTrue(callable(scaffold.init_fn))\n self.assertEqual(5, scaffold.ready_op)\n self.assertEqual(6, scaffold.ready_for_local_init_op)\n self.assertEqual(7, scaffold.local_init_op)\n self.assertEqual(saver, scaffold.saver)\n\n def test_graph_is_finalized(self):\n with ops.Graph().as_default():\n variables.VariableV1([1])\n monitored_session.Scaffold().finalize()\n with self.assertRaisesRegexp(RuntimeError,\n 'Graph is finalized and cannot be modified'):\n constant_op.constant([0])\n\n def test_new_scaffold_from_default_scaffold(self):\n scaffold1 = monitored_session.Scaffold()\n with ops.Graph().as_default():\n variables.VariableV1([1])\n saver = saver_lib.Saver()\n scaffold2 = monitored_session.Scaffold(\n init_op=2,\n init_feed_dict=3,\n init_fn=lambda scaffold, sess: 4,\n ready_op=5,\n ready_for_local_init_op=6,\n local_init_op=7,\n saver=saver,\n copy_from_scaffold=scaffold1)\n\n scaffold2.finalize()\n self.assertEqual(2, scaffold2.init_op)\n self.assertEqual(3, scaffold2.init_feed_dict)\n self.assertTrue(callable(scaffold2.init_fn))\n self.assertEqual(5, scaffold2.ready_op)\n self.assertEqual(6, scaffold2.ready_for_local_init_op)\n self.assertEqual(7, scaffold2.local_init_op)\n self.assertEqual(saver, scaffold2.saver)\n\n def test_new_scaffold_from_existing_scaffold(self):\n with ops.Graph().as_default():\n variables.VariableV1([1])\n saver = saver_lib.Saver()\n scaffold1 = monitored_session.Scaffold(\n init_op=2,\n init_feed_dict=3,\n init_fn=lambda scaffold, sess: 4,\n ready_op=5,\n ready_for_local_init_op=6,\n local_init_op=7,\n saver=saver)\n\n scaffold2 = monitored_session.Scaffold(\n init_op=4,\n init_feed_dict=6,\n init_fn=lambda scaffold, sess: 8,\n ready_op=10,\n ready_for_local_init_op=12,\n local_init_op=14,\n saver=saver,\n copy_from_scaffold=scaffold1)\n\n scaffold2.finalize()\n self.assertEqual(4, scaffold2.init_op)\n self.assertEqual(6, scaffold2.init_feed_dict)\n self.assertTrue(callable(scaffold2.init_fn))\n self.assertEqual(10, scaffold2.ready_op)\n self.assertEqual(12, scaffold2.ready_for_local_init_op)\n self.assertEqual(14, scaffold2.local_init_op)\n self.assertEqual(saver, scaffold2.saver)\n\n def test_copy_from_scaffold_is_scaffold(self):\n with ops.Graph().as_default():\n with self.assertRaisesRegexp(\n TypeError, 'copy_from_scaffold is not a Scaffold instance'):\n monitored_session.Scaffold(copy_from_scaffold=1)\n\n\ndef _test_dir(temp_dir, test_name):\n \"\"\"Create an empty dir to use for tests.\n\n Args:\n temp_dir: Tmp directory path.\n test_name: Name of the test.\n\n Returns:\n Absolute path to the test directory.\n \"\"\"\n test_dir = os.path.join(temp_dir, test_name)\n if os.path.isdir(test_dir):\n for f in glob.glob('%s/*' % test_dir):\n os.remove(f)\n else:\n os.makedirs(test_dir)\n return test_dir\n\n\nclass FakeHook(session_run_hook.SessionRunHook):\n\n def __init__(self):\n self.should_stop = False\n self.request = None\n self.call_counter = collections.Counter()\n self.last_run_context = None\n self.last_run_values = None\n\n def begin(self):\n self.call_counter['begin'] += 1\n\n def after_create_session(self, session, coord): # pylint: disable=unused-argument\n self.call_counter['after_create_session'] += 1\n\n def before_run(self, run_context):\n self.call_counter['before_run'] += 1\n self.last_run_context = run_context\n return self.request\n\n def after_run(self, run_context, run_values):\n self.call_counter['after_run'] += 1\n self.last_run_values = run_values\n if self.should_stop:\n run_context.request_stop()\n\n def end(self, session):\n self.call_counter['end'] += 1\n\n\nclass MonitoredTrainingSessionTest(test.TestCase):\n \"\"\"Tests MonitoredTrainingSession.\"\"\"\n\n def test_saving_restoring_checkpoint(self):\n logdir = _test_dir(self.get_temp_dir(), 'test_saving_restoring_checkpoint')\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n do_step = state_ops.assign_add(gstep, 1)\n with monitored_session.MonitoredTrainingSession(\n is_chief=True, checkpoint_dir=logdir) as session:\n self.assertEqual(0, session.run(gstep))\n self.assertEqual(1, session.run(do_step))\n self.assertEqual(2, session.run(do_step))\n # A restart will find the checkpoint and recover automatically.\n with monitored_session.MonitoredTrainingSession(\n is_chief=True, checkpoint_dir=logdir) as session:\n self.assertEqual(2, session.run(gstep))\n\n def test_save_checkpoint_steps(self):\n logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_steps')\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n new_gstep = state_ops.assign_add(gstep, 1)\n with monitored_session.MonitoredTrainingSession(\n is_chief=True,\n checkpoint_dir=logdir,\n save_checkpoint_steps=100,\n log_step_count_steps=10) as session:\n for _ in range(100):\n session.run(new_gstep)\n # A restart will find the checkpoint and recover automatically.\n with monitored_session.MonitoredTrainingSession(\n is_chief=True, checkpoint_dir=logdir) as session:\n self.assertEqual(100, session.run(gstep))\n\n def test_save_checkpoint_secs(self):\n logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_secs')\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n new_gstep = state_ops.assign_add(gstep, 1)\n with monitored_session.MonitoredTrainingSession(\n is_chief=True,\n checkpoint_dir=logdir,\n save_checkpoint_secs=0.1,\n log_step_count_steps=10) as session:\n session.run(new_gstep)\n time.sleep(0.2)\n for _ in range(10):\n session.run(new_gstep)\n # A restart will find the checkpoint and recover automatically.\n with monitored_session.MonitoredTrainingSession(\n is_chief=True, checkpoint_dir=logdir) as session:\n self.assertEqual(11, session.run(gstep))\n\n def test_summaries_steps(self):\n logdir = _test_dir(self.get_temp_dir(), 'test_summaries_steps')\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n new_gstep = state_ops.assign_add(gstep, 1)\n summary.scalar('my_summary_tag', new_gstep * 2)\n with monitored_session.MonitoredTrainingSession(\n is_chief=True,\n checkpoint_dir=logdir,\n save_summaries_steps=100,\n log_step_count_steps=10) as session:\n for _ in range(101):\n session.run(new_gstep)\n summaries = util_test.latest_summaries(logdir)\n tags = [s.summary.value[0].tag for s in summaries]\n self.assertIn('my_summary_tag', tags)\n self.assertIn('global_step/sec', tags)\n\n def test_summaries_secs(self):\n logdir = _test_dir(self.get_temp_dir(), 'test_summaries_secs')\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n new_gstep = state_ops.assign_add(gstep, 1)\n summary.scalar('my_summary_tag', new_gstep * 2)\n with monitored_session.MonitoredTrainingSession(\n is_chief=True,\n checkpoint_dir=logdir,\n save_summaries_steps=None,\n save_summaries_secs=0.1,\n log_step_count_steps=10) as session:\n session.run(new_gstep)\n time.sleep(0.2)\n for _ in range(101):\n session.run(new_gstep)\n summaries = util_test.latest_summaries(logdir)\n tags = [s.summary.value[0].tag for s in summaries]\n self.assertIn('my_summary_tag', tags)\n self.assertIn('global_step/sec', tags)\n\n def test_custom_saving(self):\n logdir = _test_dir(self.get_temp_dir(), 'test_saving_restoring_checkpoint')\n fake_hook = FakeHook()\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n do_step = state_ops.assign_add(gstep, 1)\n with monitored_session.MonitoredTrainingSession(\n is_chief=True,\n checkpoint_dir=logdir,\n chief_only_hooks=[fake_hook],\n save_checkpoint_secs=0) as session:\n self.assertEqual(0, session.run(gstep))\n self.assertEqual(1, session.run(do_step))\n self.assertEqual(2, session.run(do_step))\n\n # Check whether custom hook called or not\n self.assertEqual(1, fake_hook.call_counter['begin'])\n # A restart will not find the checkpoint, since we didn't save.\n with monitored_session.MonitoredTrainingSession(\n is_chief=True, checkpoint_dir=logdir) as session:\n self.assertEqual(0, session.run(gstep))\n\n\nclass MockExtended(object):\n\n def __init__(self, between_graph, should_init, should_checkpoint,\n should_save_summary):\n self.experimental_between_graph = between_graph\n self.experimental_should_init = should_init\n self.should_checkpoint = should_checkpoint\n self.should_save_summary = should_save_summary\n\n\nclass MockStrategy(object):\n\n def __init__(self,\n between_graph=False,\n should_init=True,\n should_checkpoint=None,\n should_save_summary=None):\n self.extended = MockExtended(between_graph, should_init, should_checkpoint,\n should_save_summary)\n\n\nclass MonitoredTrainingSessionWithDistributeCoordinatorTest(test.TestCase):\n \"\"\"Test distribute coordinator controls summary saving and checkpointing.\"\"\"\n\n def test_summary_hook_enabled(self):\n context = distribute_coordinator._WorkerContext(\n MockStrategy(should_save_summary=True), None, None, None)\n\n logdir = _test_dir(self.get_temp_dir(), 'test_summaries_enabled')\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n new_gstep = state_ops.assign_add(gstep, 1)\n summary.scalar('my_summary_tag', new_gstep * 2)\n with context, monitored_session.MonitoredTrainingSession(\n checkpoint_dir=logdir,\n save_summaries_steps=100,\n log_step_count_steps=10) as session:\n for _ in range(101):\n session.run(new_gstep)\n\n summaries = util_test.latest_summaries(logdir)\n tags = [s.summary.value[0].tag for s in summaries]\n self.assertIn('my_summary_tag', tags)\n self.assertIn('global_step/sec', tags)\n\n def test_summary_hook_disabled(self):\n context = distribute_coordinator._WorkerContext(\n MockStrategy(should_save_summary=False), None, None, None)\n\n logdir = _test_dir(self.get_temp_dir(), 'test_summaries_disabled')\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n new_gstep = state_ops.assign_add(gstep, 1)\n summary.scalar('my_summary_tag', new_gstep * 2)\n with context, monitored_session.MonitoredTrainingSession(\n checkpoint_dir=logdir,\n save_summaries_steps=100,\n log_step_count_steps=10) as session:\n for _ in range(101):\n session.run(new_gstep)\n\n # No summary is saved.\n summaries = util_test.latest_summaries(logdir)\n self.assertEqual(len(summaries), 0)\n\n def test_checkpoint_hook_enabled(self):\n context = distribute_coordinator._WorkerContext(\n MockStrategy(should_checkpoint=True), None, None, None)\n\n logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_enabled')\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n new_gstep = state_ops.assign_add(gstep, 1)\n with context, monitored_session.MonitoredTrainingSession(\n checkpoint_dir=logdir,\n save_checkpoint_steps=100,\n log_step_count_steps=10) as session:\n for _ in range(100):\n session.run(new_gstep)\n\n # A restart will find the checkpoint and recover automatically.\n with monitored_session.MonitoredTrainingSession(\n is_chief=True, checkpoint_dir=logdir) as session:\n self.assertEqual(100, session.run(gstep))\n\n def test_checkpoint_hook_disabled(self):\n context = distribute_coordinator._WorkerContext(\n MockStrategy(should_checkpoint=False), None, None, None)\n\n logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_disabled')\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n new_gstep = state_ops.assign_add(gstep, 1)\n with context, monitored_session.MonitoredTrainingSession(\n checkpoint_dir=logdir,\n save_checkpoint_steps=100,\n log_step_count_steps=10) as session:\n for _ in range(100):\n session.run(new_gstep)\n\n # No checkpoint is saved.\n checkpoint = checkpoint_management.latest_checkpoint(logdir)\n self.assertIsNone(checkpoint)\n\n def test_checkpoint_hook_enable_on_non_chief_with_collective_ops(self):\n strategy = collective_all_reduce_strategy.CollectiveAllReduceStrategy()\n strategy.extended._is_chief = False\n\n context = distribute_coordinator._WorkerContext(strategy, None, 'worker', 1)\n\n logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_disabled')\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n new_gstep = state_ops.assign_add(gstep, 1)\n with context, monitored_session.MonitoredTrainingSession(\n checkpoint_dir=logdir,\n save_checkpoint_steps=100,\n log_step_count_steps=10) as session:\n for _ in range(100):\n session.run(new_gstep)\n\n # No checkpoint is saved.\n checkpoint = checkpoint_management.latest_checkpoint(logdir)\n self.assertIsNone(checkpoint)\n\n # But saved to a temporary directory.\n checkpoint = checkpoint_management.latest_checkpoint(\n os.path.join(logdir, 'tmp_worker_1'))\n self.assertIsNotNone(checkpoint)\n\n\nclass StopAtNSession(monitored_session._WrappedSession):\n \"\"\"A wrapped session that stops at the N-th call to _check_stop.\"\"\"\n\n def __init__(self, sess, n):\n super(StopAtNSession, self).__init__(sess)\n self._count = n\n\n def _check_stop(self):\n if self._count == 0:\n return True\n self._count -= 1\n return False\n\n\nclass WrappedSessionTest(test.TestCase):\n \"\"\"_WrappedSession tests.\"\"\"\n\n @test_util.run_deprecated_v1\n def test_properties(self):\n with self.cached_session() as sess:\n constant_op.constant(0.0)\n wrapped_sess = monitored_session._WrappedSession(sess)\n self.assertEquals(sess.graph, wrapped_sess.graph)\n self.assertEquals(sess.sess_str, wrapped_sess.sess_str)\n\n @test_util.run_deprecated_v1\n def test_should_stop_on_close(self):\n with self.cached_session() as sess:\n wrapped_sess = monitored_session._WrappedSession(sess)\n self.assertFalse(wrapped_sess.should_stop())\n wrapped_sess.close()\n self.assertTrue(wrapped_sess.should_stop())\n\n @test_util.run_deprecated_v1\n def test_should_stop_uses_check_stop(self):\n with self.cached_session() as sess:\n wrapped_sess = StopAtNSession(sess, 3)\n self.assertFalse(wrapped_sess.should_stop())\n self.assertFalse(wrapped_sess.should_stop())\n self.assertFalse(wrapped_sess.should_stop())\n self.assertTrue(wrapped_sess.should_stop())\n\n @test_util.run_deprecated_v1\n def test_should_stop_delegates_to_wrapped_session(self):\n with self.cached_session() as sess:\n wrapped_sess0 = StopAtNSession(sess, 4)\n wrapped_sess1 = monitored_session._WrappedSession(wrapped_sess0)\n self.assertFalse(wrapped_sess1.should_stop())\n self.assertFalse(wrapped_sess1.should_stop())\n self.assertFalse(wrapped_sess1.should_stop())\n self.assertFalse(wrapped_sess1.should_stop())\n self.assertTrue(wrapped_sess1.should_stop())\n\n @test_util.run_deprecated_v1\n def test_close_twice(self):\n with self.cached_session() as sess:\n wrapped_sess = monitored_session._WrappedSession(sess)\n wrapped_sess.close()\n self.assertTrue(wrapped_sess.should_stop())\n wrapped_sess.close()\n self.assertTrue(wrapped_sess.should_stop())\n\n @test_util.run_deprecated_v1\n def test_run(self):\n with self.cached_session() as sess:\n c = constant_op.constant(0)\n v = array_ops.identity(c)\n self.assertEqual(42, sess.run(v, feed_dict={c: 42}))\n wrapped_sess = monitored_session._WrappedSession(sess)\n self.assertEqual(51, wrapped_sess.run(v, feed_dict={c: 51}))\n\n\ndef busy_wait_for_coord_stop(coord):\n while not coord.should_stop():\n time.sleep(0.001)\n\n\nclass CoordinatedSessionTest(test.TestCase):\n \"\"\"_CoordinatedSession tests.\"\"\"\n\n @test_util.run_deprecated_v1\n def test_properties(self):\n with self.cached_session() as sess:\n constant_op.constant(0.0)\n coord = coordinator.Coordinator()\n coord_sess = monitored_session._CoordinatedSession(sess, coord)\n self.assertEquals(sess.graph, coord_sess.graph)\n self.assertEquals(sess.sess_str, coord_sess.sess_str)\n\n @test_util.run_deprecated_v1\n def test_run(self):\n with self.cached_session() as sess:\n c = constant_op.constant(0)\n v = array_ops.identity(c)\n coord = coordinator.Coordinator()\n coord_sess = monitored_session._CoordinatedSession(sess, coord)\n self.assertEqual(42, coord_sess.run(v, feed_dict={c: 42}))\n\n @test_util.run_deprecated_v1\n def test_should_stop_on_close(self):\n with self.cached_session() as sess:\n coord = coordinator.Coordinator()\n coord_sess = monitored_session._CoordinatedSession(sess, coord)\n self.assertFalse(coord_sess.should_stop())\n coord_sess.close()\n self.assertTrue(coord_sess.should_stop())\n\n @test_util.run_deprecated_v1\n def test_should_stop_on_coord_stop(self):\n with self.cached_session() as sess:\n coord = coordinator.Coordinator()\n coord_sess = monitored_session._CoordinatedSession(sess, coord)\n self.assertFalse(coord_sess.should_stop())\n coord.request_stop()\n self.assertTrue(coord_sess.should_stop())\n\n @test_util.run_deprecated_v1\n def test_dont_request_stop_on_exception_in_main_thread(self):\n with self.cached_session() as sess:\n c = constant_op.constant(0)\n v = array_ops.identity(c)\n coord = coordinator.Coordinator()\n coord_sess = monitored_session._CoordinatedSession(sess, coord)\n self.assertFalse(coord_sess.should_stop())\n self.assertEqual(0, coord_sess.run(c))\n self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))\n with self.assertRaisesRegexp(TypeError, 'None has invalid type'):\n coord_sess.run([None], feed_dict={c: 2})\n self.assertFalse(coord.should_stop())\n self.assertFalse(coord_sess.should_stop())\n\n @test_util.run_deprecated_v1\n def test_stop_threads_on_close_after_exception(self):\n with self.cached_session() as sess:\n c = constant_op.constant(0)\n v = array_ops.identity(c)\n coord = coordinator.Coordinator()\n threads = [\n threading.Thread(\n target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3)\n ]\n for t in threads:\n coord.register_thread(t)\n t.start()\n coord_sess = monitored_session._CoordinatedSession(sess, coord)\n self.assertFalse(coord_sess.should_stop())\n for t in threads:\n self.assertTrue(t.is_alive())\n self.assertEqual(0, coord_sess.run(c))\n for t in threads:\n self.assertTrue(t.is_alive())\n self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))\n for t in threads:\n self.assertTrue(t.is_alive())\n with self.assertRaisesRegexp(TypeError, 'None has invalid type'):\n coord_sess.run([None], feed_dict={c: 2})\n coord_sess.close()\n for t in threads:\n self.assertFalse(t.is_alive())\n self.assertTrue(coord.should_stop())\n self.assertTrue(coord_sess.should_stop())\n\n def test_stop_threads_on_close(self):\n with self.cached_session() as sess:\n coord = coordinator.Coordinator()\n threads = [\n threading.Thread(\n target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3)\n ]\n for t in threads:\n coord.register_thread(t)\n t.start()\n coord_sess = monitored_session._CoordinatedSession(sess, coord)\n coord_sess.close()\n for t in threads:\n self.assertFalse(t.is_alive())\n self.assertTrue(coord.should_stop())\n self.assertTrue(coord_sess.should_stop())\n\n @test_util.run_deprecated_v1\n def test_propagates_exception_trace(self):\n assertion = control_flow_ops.Assert(False, ['This should fail.'])\n with self.cached_session() as sess:\n coord = coordinator.Coordinator(clean_stop_exception_types=())\n coord_sess = monitored_session._CoordinatedSession(sess, coord)\n try:\n coord_sess.run([assertion])\n self.fail('No exception was raised by assertion.')\n except errors_impl.InvalidArgumentError:\n # Extract the name of the file where the exception was first raised.\n _, _, exc_traceback = sys.exc_info()\n tb = traceback.extract_tb(exc_traceback)\n exc_source_file = tb[-1][0]\n exc_source_basename = os.path.basename(exc_source_file)\n # If it's monitored_session.py then the original stack trace was not\n # correctly propagated.\n self.assertIn(\n exc_source_basename, ['session.py', 'monitored_session.py'],\n 'The exception was raised from an unrecognized file. This unit '\n 'test probably needs to be updated. Traceback:\\n%s\\n' % tb)\n self.assertEqual(\n exc_source_basename, 'session.py',\n 'Original stack trace was not propagated by MonitoredSession. '\n 'Traceback:\\n%s' % tb)\n\n\nclass AbortAtNSession(object):\n \"\"\"A mock session that aborts at the N-th run call.\"\"\"\n\n def __init__(self, sess, n):\n self._sess = sess\n self._count = n\n\n def close(self):\n pass\n\n def run(self, *args, **kwargs):\n if self._count == 0:\n raise errors_impl.AbortedError('Aborted at N', None, None)\n self._count -= 1\n return self._sess.run(*args, **kwargs)\n\n\nclass StopCoordinatorWithException(session_run_hook.SessionRunHook):\n \"\"\"With this hook Coordinator throws an exception after N-runs.\"\"\"\n\n def __init__(self, calls_before_stopping, exception_to_raise=None):\n self._started_the_side_thread_already = False\n self._lock = threading.Lock()\n self._stored_exception_event = threading.Event()\n self._calls_before_stopping = calls_before_stopping\n self._exception_to_raise = (exception_to_raise or errors_impl.AbortedError(\n None, None, 'Aborted at N'))\n\n def _maybe_stop_with_exception(self, coord):\n while True:\n with self._lock:\n if self._calls_before_stopping == 0:\n try:\n raise self._exception_to_raise\n except Exception as e: # pylint: disable=broad-except\n coord.request_stop(e)\n self._stored_exception_event.set()\n break\n\n def after_create_session(self, session, coord):\n if self._started_the_side_thread_already:\n return\n\n separate_thread = threading.Thread(\n target=self._maybe_stop_with_exception, args=(coord,))\n\n coord.register_thread(separate_thread)\n separate_thread.start()\n self._started_the_side_thread_already = True\n # Coordinator will take care of joining `separate_thread`.\n\n def after_run(self, run_context, run_values):\n stopping_now = False\n with self._lock:\n self._calls_before_stopping -= 1\n if self._calls_before_stopping == 0:\n stopping_now = True\n\n if stopping_now:\n self._stored_exception_event.wait()\n\n\nclass FailTrainingAfterCoordinatorStopped(StopCoordinatorWithException):\n \"\"\"With this hook training encounters an exception after N-runs.\"\"\"\n\n def __init__(self, calls_before_stopping):\n StopCoordinatorWithException.__init__(self, calls_before_stopping)\n self._coord = None\n\n def after_create_session(self, session, coord):\n self._coord = coord\n return StopCoordinatorWithException.after_create_session(\n self, session, coord)\n\n def after_run(self, run_context, run_values):\n StopCoordinatorWithException.after_run(self, run_context, run_values)\n try:\n # After a `run`, an exception could have been stored inside the\n # coordinator.\n self._coord.raise_requested_exception()\n except errors_impl.AbortedError:\n # In real world, the main thread may or may not know about the exception\n # that stopped the coordinator. Because the coordinator has stopped, the\n # main thread could have gotten stuck as well (for example, the\n # coordinator was supposed to execute `FIFOQueue.enqueue` while the main\n # thread is executing a blocking `FIFOQueue.dequeue`). After it got stuck,\n # the session is going to get garbage collected after some time with:\n raise errors_impl.CancelledError(None, None,\n 'Session got garbage-collected.')\n\n\nclass CountingSessionCreator(object):\n \"\"\"A creator that counts the number of created sessions.\"\"\"\n\n def __init__(self, session):\n self._initial_session = session\n # We only have one session per test case. We can't re-create it, thus\n # it shouldn't be closed.\n self._initial_session.close = lambda *args: None\n self._create_session_calls = 0\n\n @property\n def number_of_sessions_created(self):\n return self._create_session_calls\n\n def create_session(self):\n self._create_session_calls += 1\n return self._initial_session\n\n\nclass RecoverableSessionTest(test.TestCase):\n \"\"\"_RecoverableSession tests.\"\"\"\n\n class _SessionReturner(object):\n\n def __init__(self, sess):\n self._sess = sess\n\n def create_session(self):\n return self._sess\n\n @test_util.run_deprecated_v1\n def test_properties(self):\n with self.cached_session() as sess:\n constant_op.constant(0.0)\n recoverable_sess = monitored_session._RecoverableSession(\n self._SessionReturner(sess))\n self.assertEquals(sess.graph, recoverable_sess.graph)\n self.assertEquals(sess.sess_str, recoverable_sess.sess_str)\n\n @test_util.run_deprecated_v1\n def test_run(self):\n with self.cached_session() as sess:\n c = constant_op.constant(0)\n v = array_ops.identity(c)\n recoverable_sess = monitored_session._RecoverableSession(\n self._SessionReturner(sess))\n self.assertEqual(51, recoverable_sess.run(v, feed_dict={c: 51}))\n\n @test_util.run_deprecated_v1\n def test_recovery(self):\n with self.cached_session() as sess:\n\n class StackSessionCreator(object):\n\n def __init__(self, sess):\n self.sessions_to_use = [\n AbortAtNSession(sess, x + 1) for x in range(3)\n ]\n\n def create_session(self):\n return self.sessions_to_use.pop(0)\n\n c = constant_op.constant(0)\n v = array_ops.identity(c)\n session_creator = StackSessionCreator(sess)\n # List of 3 sessions to use for recovery. The first one aborts\n # after 1 run() call, the second after 2 run calls, the third\n # after 3 run calls.\n self.assertEqual(3, len(session_creator.sessions_to_use))\n # Make the recoverable session uses these 3 sessions in sequence by\n # passing a factory that pops from the session_to_use list.\n recoverable_sess = monitored_session._RecoverableSession(session_creator)\n self.assertEqual(\n 2, len(session_creator.sessions_to_use)) # One session popped.\n # Using first session.\n self.assertEqual(51, recoverable_sess.run(v, feed_dict={c: 51}))\n self.assertEqual(\n 2, len(session_creator.sessions_to_use)) # Still 2 sessions available\n # This will fail and recover by picking up the second session.\n self.assertEqual(42, recoverable_sess.run(v, feed_dict={c: 42}))\n self.assertEqual(\n 1, len(session_creator.sessions_to_use)) # Still 1 session available\n self.assertEqual(33, recoverable_sess.run(v, feed_dict={c: 33}))\n self.assertEqual(\n 1, len(session_creator.sessions_to_use)) # Still 1 session available\n # This will fail and recover by picking up the last session.\n self.assertEqual(24, recoverable_sess.run(v, feed_dict={c: 24}))\n self.assertEqual(\n 0, len(session_creator.sessions_to_use)) # All sessions used.\n self.assertEqual(11, recoverable_sess.run(v, feed_dict={c: 11}))\n self.assertEqual(0, recoverable_sess.run(v, feed_dict={c: 0}))\n # This will fail and throw a real error as the pop() will fail.\n with self.assertRaisesRegexp(IndexError, 'pop from empty list'):\n recoverable_sess.run(v, feed_dict={c: -12})\n\n @test_util.run_deprecated_v1\n def test_recovery_from_coordinator_exception(self):\n with self.cached_session() as test_session:\n session_creator = CountingSessionCreator(test_session)\n session = monitored_session.MonitoredSession(\n session_creator,\n [StopCoordinatorWithException(calls_before_stopping=2)])\n\n self.assertEqual(1, session_creator.number_of_sessions_created)\n self.assertFalse(session.should_stop())\n\n c = constant_op.constant(0)\n v = array_ops.identity(c)\n\n # The coordinator will not abort during this call, since it's the call\n # number 0.\n self.assertEqual(51, session.run(v, feed_dict={c: 51}))\n self.assertFalse(session.should_stop())\n # The coordinator will abort during the next call, since it's the call\n # number 1.\n self.assertEqual(42, session.run(v, feed_dict={c: 42}))\n # Even though the coordinator was asked to stop, the underlying session is\n # recreated and is to be continued.\n self.assertFalse(session.should_stop())\n self.assertEqual(2, session_creator.number_of_sessions_created)\n\n @test_util.run_deprecated_v1\n def test_recovery_from_non_preemption_in_coordinator(self):\n with self.cached_session() as test_session:\n session_creator = CountingSessionCreator(test_session)\n hook = StopCoordinatorWithException(\n calls_before_stopping=2,\n exception_to_raise=errors_impl.UnknownError(\n None, None, 'Some fatal exception inside the coordinator.'))\n session = monitored_session.MonitoredSession(session_creator, [hook])\n\n self.assertEqual(1, session_creator.number_of_sessions_created)\n self.assertFalse(session.should_stop())\n\n c = constant_op.constant(0)\n v = array_ops.identity(c)\n\n # The coordinator will not abort during this call, since it's the call\n # number 0.\n self.assertEqual(51, session.run(v, feed_dict={c: 51}))\n self.assertFalse(session.should_stop())\n # The coordinator will abort during the next call, since it's the call\n # number 1.\n self.assertEqual(42, session.run(v, feed_dict={c: 42}))\n # The coordinator was asked to stop due to non-redeemable error. Training\n # should stop and the session should not be recreated.\n self.assertTrue(session.should_stop())\n self.assertEqual(1, session_creator.number_of_sessions_created)\n with self.assertRaises(errors_impl.UnknownError):\n session.close()\n\n @test_util.run_deprecated_v1\n def test_recovery_from_session_getting_stuck(self):\n with self.cached_session() as test_session:\n session_creator = CountingSessionCreator(test_session)\n session = monitored_session.MonitoredSession(\n session_creator,\n [FailTrainingAfterCoordinatorStopped(calls_before_stopping=2)])\n\n self.assertEqual(1, session_creator.number_of_sessions_created)\n self.assertFalse(session.should_stop())\n\n c = constant_op.constant(0)\n v = array_ops.identity(c)\n\n # Training will not fail, since it's the call number 0.\n self.assertEqual(51, session.run(v, feed_dict={c: 51}))\n self.assertFalse(session.should_stop())\n # Training will fail during the next call, since it's the call\n # number 1.\n self.assertEqual(42, session.run(v, feed_dict={c: 42}))\n # Even though the coordinator stopped which and training failed, the\n # underlying session is recreated and training is to be continued.\n self.assertFalse(session.should_stop())\n self.assertEqual(2, session_creator.number_of_sessions_created)\n\n @test_util.run_deprecated_v1\n def test_step_fn_recovery_from_coordinator_exception_when_run_hooks(self):\n with self.cached_session() as test_session:\n session_creator = CountingSessionCreator(test_session)\n session = monitored_session.MonitoredSession(\n session_creator,\n [StopCoordinatorWithException(calls_before_stopping=2)])\n\n self.assertEqual(1, session_creator.number_of_sessions_created)\n self.assertFalse(session.should_stop())\n\n c = constant_op.constant(0)\n v = array_ops.identity(c)\n\n def feed_step_fn(value):\n def step_fn(step_context):\n return step_context.run_with_hooks(fetches=v, feed_dict={c: value})\n return step_fn\n\n # The coordinator will not abort during this call, since it's the call\n # number 0.\n self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))\n self.assertFalse(session.should_stop())\n # The coordinator will abort during the next call, since it's the call\n # number 1.\n self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))\n # Even though the coordinator was asked to stop, the underlying session is\n # recreated and is to be continued.\n self.assertFalse(session.should_stop())\n self.assertEqual(2, session_creator.number_of_sessions_created)\n\n @test_util.run_deprecated_v1\n def test_recovery_from_non_preemption_in_coordinator_when_run_hooks(self):\n with self.cached_session() as test_session:\n session_creator = CountingSessionCreator(test_session)\n hook = StopCoordinatorWithException(\n calls_before_stopping=2,\n exception_to_raise=errors_impl.UnknownError(\n None, None, 'Some fatal exception inside the coordinator.'))\n session = monitored_session.MonitoredSession(session_creator, [hook])\n\n self.assertEqual(1, session_creator.number_of_sessions_created)\n self.assertFalse(session.should_stop())\n\n c = constant_op.constant(0)\n v = array_ops.identity(c)\n\n def feed_step_fn(value):\n def step_fn(step_context):\n return step_context.run_with_hooks(fetches=v, feed_dict={c: value})\n return step_fn\n\n # The coordinator will not abort during this call, since it's the call\n # number 0.\n self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))\n self.assertFalse(session.should_stop())\n # The coordinator will abort during the next call, since it's the call\n # number 1.\n self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))\n # The coordinator was asked to stop due to non-redeemable error. Training\n # should stop and the session should not be recreated.\n self.assertTrue(session.should_stop())\n self.assertEqual(1, session_creator.number_of_sessions_created)\n with self.assertRaises(errors_impl.UnknownError):\n session.close()\n\n @test_util.run_deprecated_v1\n def test_recovery_from_session_getting_stuck_when_run_hooks(self):\n with self.cached_session() as test_session:\n session_creator = CountingSessionCreator(test_session)\n session = monitored_session.MonitoredSession(\n session_creator,\n [FailTrainingAfterCoordinatorStopped(calls_before_stopping=2)])\n\n self.assertEqual(1, session_creator.number_of_sessions_created)\n self.assertFalse(session.should_stop())\n\n c = constant_op.constant(0)\n v = array_ops.identity(c)\n\n def feed_step_fn(value):\n def step_fn(step_context):\n return step_context.run_with_hooks(fetches=v, feed_dict={c: value})\n return step_fn\n\n # Training will not fail, since it's the call number 0.\n self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))\n self.assertFalse(session.should_stop())\n # Training will fail during the next call, since it's the call\n # number 1.\n self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))\n # Even though the coordinator stopped which and training failed, the\n # underlying session is recreated and training is to be continued.\n self.assertFalse(session.should_stop())\n self.assertEqual(2, session_creator.number_of_sessions_created)\n\n def create_raw_session_with_failing_coordinator(self, session_creator, hook):\n \"\"\"Return MonitoredSession that triggers coordinator failures.\"\"\"\n session = monitored_session.MonitoredSession(session_creator, [hook])\n # We would like to test a situation where during fetches through the\n # raw session, the coordinator fails with an exception. To do that, we\n # are going to use (raw_session + StopCoordinatorWithException) hook\n # combination that is stored in\n # `MonitoredSession._RecoverableSession._CoordinatedSession._sess`\n # at this point:\n session._tf_sess = lambda: session._sess._sess._sess\n # `run()` on such a session is equivalent to `run()` on the raw session\n # with separate coordinator threads independently stopping with an\n # exception.\n return session\n\n @test_util.run_deprecated_v1\n def test_step_fn_recovery_from_coordinator_exception_with_raw_session(self):\n with self.cached_session() as test_session:\n session_creator = CountingSessionCreator(test_session)\n session = self.create_raw_session_with_failing_coordinator(\n session_creator,\n StopCoordinatorWithException(calls_before_stopping=2))\n\n self.assertEqual(1, session_creator.number_of_sessions_created)\n self.assertFalse(session.should_stop())\n\n c = constant_op.constant(0)\n v = array_ops.identity(c)\n\n def feed_step_fn(value):\n\n def step_fn(step_context):\n return step_context.session.run(fetches=v, feed_dict={c: value})\n\n return step_fn\n\n # The coordinator will not abort during this call, since it's the call\n # number 0.\n self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))\n self.assertFalse(session.should_stop())\n # The coordinator will abort during the next call, since it's the call\n # number 1.\n self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))\n # Even though the coordinator was asked to stop, the underlying session is\n # recreated and is to be continued.\n self.assertFalse(session.should_stop())\n self.assertEqual(2, session_creator.number_of_sessions_created)\n\n @test_util.run_deprecated_v1\n def test_recovery_from_non_preemption_in_coordinator_with_raw_session(self):\n with self.cached_session() as test_session:\n session_creator = CountingSessionCreator(test_session)\n session = self.create_raw_session_with_failing_coordinator(\n session_creator,\n StopCoordinatorWithException(\n calls_before_stopping=2,\n exception_to_raise=errors_impl.UnknownError(\n None, None, 'Some fatal exception inside the coordinator.')))\n\n self.assertEqual(1, session_creator.number_of_sessions_created)\n self.assertFalse(session.should_stop())\n\n c = constant_op.constant(0)\n v = array_ops.identity(c)\n\n def feed_step_fn(value):\n\n def step_fn(step_context):\n return step_context.run_with_hooks(fetches=v, feed_dict={c: value})\n\n return step_fn\n\n # The coordinator will not abort during this call, since it's the call\n # number 0.\n self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))\n self.assertFalse(session.should_stop())\n # The coordinator will abort during the next call, since it's the call\n # number 1.\n self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))\n # The coordinator was asked to stop due to non-redeemable error. Training\n # should stop and the session should not be recreated.\n self.assertTrue(session.should_stop())\n self.assertEqual(1, session_creator.number_of_sessions_created)\n with self.assertRaises(errors_impl.UnknownError):\n session.close()\n\n @test_util.run_deprecated_v1\n def test_recovery_from_session_getting_stuck_with_raw_session(self):\n with self.cached_session() as test_session:\n session_creator = CountingSessionCreator(test_session)\n session = self.create_raw_session_with_failing_coordinator(\n session_creator,\n FailTrainingAfterCoordinatorStopped(calls_before_stopping=2))\n\n self.assertEqual(1, session_creator.number_of_sessions_created)\n self.assertFalse(session.should_stop())\n\n c = constant_op.constant(0)\n v = array_ops.identity(c)\n\n def feed_step_fn(value):\n\n def step_fn(step_context):\n return step_context.run_with_hooks(fetches=v, feed_dict={c: value})\n\n return step_fn\n\n # Training will not fail, since it's the call number 0.\n self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))\n self.assertFalse(session.should_stop())\n # Training will fail during the next call, since it's the call\n # number 1.\n self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))\n # Even though the coordinator stopped which and training failed, the\n # underlying session is recreated and training is to be continued.\n self.assertFalse(session.should_stop())\n self.assertEqual(2, session_creator.number_of_sessions_created)\n\n\nclass FakeSession(monitored_session._WrappedSession):\n\n def __init__(self, sess):\n monitored_session._WrappedSession.__init__(self, sess)\n self.args_called = {}\n\n def run(self, fetches, **kwargs):\n self.args_called = dict(kwargs)\n # Call run only with fetches since we directly pass other arguments.\n return monitored_session._WrappedSession.run(self, fetches)\n\n\nclass HookedSessionTest(test.TestCase):\n \"\"\"Tests of _HookedSession.\"\"\"\n\n def testRunPassesAllArguments(self):\n with ops.Graph().as_default(), session_lib.Session() as sess:\n mock_run = FakeSession(sess)\n mon_sess = monitored_session._HookedSession(sess=mock_run, hooks=[])\n a_tensor = constant_op.constant([0], name='a_tensor')\n self.evaluate(variables.global_variables_initializer())\n output = mon_sess.run(fetches=a_tensor,\n feed_dict='a_feed',\n options='an_option',\n run_metadata='a_metadata')\n self.assertEqual(output, [0])\n self.assertEqual(mock_run.args_called, {\n 'feed_dict': 'a_feed',\n 'options': 'an_option',\n 'run_metadata': 'a_metadata'\n })\n\n def testCallsHooksBeginEnd(self):\n with ops.Graph().as_default(), session_lib.Session() as sess:\n mock_hook = FakeHook()\n mock_hook2 = FakeHook()\n mon_sess = monitored_session._HookedSession(\n sess=sess, hooks=[mock_hook, mock_hook2])\n a_tensor = constant_op.constant([0], name='a_tensor')\n self.evaluate(variables.global_variables_initializer())\n mon_sess.run(a_tensor)\n\n for hook in [mock_hook, mock_hook2]:\n self.assertEqual(\n hook.last_run_values,\n session_run_hook.SessionRunValues(\n results=None,\n options=config_pb2.RunOptions(),\n run_metadata=config_pb2.RunMetadata()))\n self.assertEqual(hook.last_run_context.original_args,\n session_run_hook.SessionRunArgs(a_tensor))\n self.assertEqual(hook.last_run_context.session, sess)\n self.assertEqual(hook.call_counter['begin'], 0)\n self.assertEqual(hook.call_counter['after_create_session'], 0)\n self.assertEqual(hook.call_counter['before_run'], 1)\n self.assertEqual(hook.call_counter['after_run'], 1)\n\n def testShouldStop(self):\n with ops.Graph().as_default(), session_lib.Session() as sess:\n mock_hook = FakeHook()\n mock_hook2 = FakeHook()\n mon_sess = monitored_session._HookedSession(\n sess=sess, hooks=[mock_hook, mock_hook2])\n constant_op.constant([0], name='a_tensor')\n self.evaluate(variables.global_variables_initializer())\n\n mon_sess.run(fetches='a_tensor')\n self.assertFalse(mon_sess.should_stop())\n\n mock_hook.should_stop = True\n mon_sess.run(fetches='a_tensor')\n self.assertTrue(mon_sess.should_stop())\n\n def testFetchesHookRequests(self):\n with ops.Graph().as_default(), session_lib.Session() as sess:\n mock_hook = FakeHook()\n mock_hook2 = FakeHook()\n mon_sess = monitored_session._HookedSession(\n sess=sess, hooks=[mock_hook, mock_hook2])\n a_tensor = constant_op.constant([0], name='a_tensor')\n another_tensor = constant_op.constant([5], name='another_tensor')\n third_tensor = constant_op.constant([10], name='third_tensor')\n mock_hook.request = session_run_hook.SessionRunArgs([another_tensor])\n mock_hook2.request = session_run_hook.SessionRunArgs([third_tensor])\n self.evaluate(variables.global_variables_initializer())\n\n output = mon_sess.run(fetches=a_tensor)\n self.assertEqual(output, [0])\n self.assertEqual(mock_hook.last_run_values.results, [5])\n self.assertEqual(mock_hook2.last_run_values.results, [10])\n\n def testOnlyHooksHaveFeeds(self):\n with ops.Graph().as_default(), session_lib.Session() as sess:\n mock_hook = FakeHook()\n mock_hook2 = FakeHook()\n mon_sess = monitored_session._HookedSession(\n sess=sess, hooks=[mock_hook, mock_hook2])\n a_tensor = constant_op.constant([0], name='a_tensor')\n b_tensor = constant_op.constant([0], name='b_tensor')\n add_tensor = a_tensor + b_tensor\n mock_hook.request = session_run_hook.SessionRunArgs(\n None, feed_dict={a_tensor: [5]})\n mock_hook2.request = session_run_hook.SessionRunArgs(\n None, feed_dict={b_tensor: [10]})\n self.evaluate(variables.global_variables_initializer())\n\n self.assertEqual(mon_sess.run(fetches=add_tensor), [15])\n\n def testBothHooksAndUserHaveFeeds(self):\n with ops.Graph().as_default(), session_lib.Session() as sess:\n mock_hook = FakeHook()\n mock_hook2 = FakeHook()\n mon_sess = monitored_session._HookedSession(\n sess=sess, hooks=[mock_hook, mock_hook2])\n a_tensor = constant_op.constant([0], name='a_tensor')\n b_tensor = constant_op.constant([0], name='b_tensor')\n c_tensor = constant_op.constant([0], name='c_tensor')\n add_tensor = a_tensor + b_tensor + c_tensor\n mock_hook.request = session_run_hook.SessionRunArgs(\n None, feed_dict={a_tensor: [5]})\n mock_hook2.request = session_run_hook.SessionRunArgs(\n None, feed_dict={b_tensor: [10]})\n self.evaluate(variables.global_variables_initializer())\n\n feed_dict = {c_tensor: [20]}\n self.assertEqual(\n mon_sess.run(fetches=add_tensor, feed_dict=feed_dict), [35])\n # User feed_dict should not be changed\n self.assertEqual(len(feed_dict), 1)\n\n def testHooksFeedConflicts(self):\n with ops.Graph().as_default(), session_lib.Session() as sess:\n mock_hook = FakeHook()\n mock_hook2 = FakeHook()\n mon_sess = monitored_session._HookedSession(\n sess=sess, hooks=[mock_hook, mock_hook2])\n a_tensor = constant_op.constant([0], name='a_tensor')\n b_tensor = constant_op.constant([0], name='b_tensor')\n add_tensor = a_tensor + b_tensor\n mock_hook.request = session_run_hook.SessionRunArgs(\n None, feed_dict={a_tensor: [5]})\n mock_hook2.request = session_run_hook.SessionRunArgs(\n None, feed_dict={a_tensor: [10]})\n self.evaluate(variables.global_variables_initializer())\n\n with self.assertRaisesRegexp(RuntimeError, 'Same tensor is fed'):\n mon_sess.run(fetches=add_tensor)\n\n def testHooksAndUserFeedConflicts(self):\n with ops.Graph().as_default(), session_lib.Session() as sess:\n mock_hook = FakeHook()\n mock_hook2 = FakeHook()\n mon_sess = monitored_session._HookedSession(\n sess=sess, hooks=[mock_hook, mock_hook2])\n a_tensor = constant_op.constant([0], name='a_tensor')\n b_tensor = constant_op.constant([0], name='b_tensor')\n add_tensor = a_tensor + b_tensor\n mock_hook.request = session_run_hook.SessionRunArgs(\n None, feed_dict={a_tensor: [5]})\n mock_hook2.request = session_run_hook.SessionRunArgs(\n None, feed_dict={b_tensor: [10]})\n self.evaluate(variables.global_variables_initializer())\n\n with self.assertRaisesRegexp(RuntimeError, 'Same tensor is fed'):\n mon_sess.run(fetches=add_tensor, feed_dict={b_tensor: [10]})\n\n\nclass RaiseOnceAtCountN(session_run_hook.SessionRunHook):\n \"\"\"Hook that raises an Exception at step N.\"\"\"\n\n def __init__(self, n, ex):\n self.n = n\n self.ex = ex\n self.raised = False\n\n def before_run(self, run_context):\n # Raise the first time we reach step N.\n self.n -= 1\n if 0 == self.n and not self.raised:\n self.raised = True\n raise self.ex\n return None\n\n\nclass RunOptionsMetadataHook(session_run_hook.SessionRunHook):\n \"\"\"A hook that observes & optionally modifies RunOptions and RunMetadata.\"\"\"\n\n def __init__(self, trace_level, timeout_in_ms, output_partition_graphs,\n debug_tensor_watch, report_tensor_allocations_upon_oom):\n self._trace_level = trace_level\n self._timeout_in_ms = timeout_in_ms\n self._output_partition_graphs = output_partition_graphs\n self._debug_tensor_watch = debug_tensor_watch\n self._report_tensor_allocations_upon_oom = (\n report_tensor_allocations_upon_oom)\n\n self.run_options_list = []\n self.run_metadata_list = []\n\n def before_run(self, run_context):\n options = config_pb2.RunOptions(\n trace_level=self._trace_level,\n timeout_in_ms=self._timeout_in_ms,\n output_partition_graphs=self._output_partition_graphs,\n report_tensor_allocations_upon_oom=self\n ._report_tensor_allocations_upon_oom)\n options.debug_options.debug_tensor_watch_opts.extend(\n [self._debug_tensor_watch])\n return session_run_hook.SessionRunArgs(None, None, options=options)\n\n def after_run(self, run_context, run_values):\n self.run_options_list.append(run_values.options)\n self.run_metadata_list.append(run_values.run_metadata)\n\n\nclass MonitoredSessionTest(test.TestCase):\n \"\"\"MonitoredSession tests.\"\"\"\n\n def test_defaults(self):\n with ops.Graph().as_default():\n a_var = variables.VariableV1(0)\n with monitored_session.MonitoredSession() as session:\n self.assertEqual(0, session.run(a_var))\n\n def test_last_step(self):\n logdir = _test_dir(self.get_temp_dir(), 'test_last_step')\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n do_step = state_ops.assign_add(gstep, 1)\n # Run till step 3 and save.\n hooks = [basic_session_run_hooks.StopAtStepHook(last_step=3)]\n with monitored_session.MonitoredSession(hooks=hooks) as session:\n self.assertEqual(0, session.run(gstep))\n self.assertFalse(session.should_stop())\n self.assertEqual(1, session.run(do_step))\n self.assertFalse(session.should_stop())\n self.assertEqual(2, session.run(do_step))\n self.assertFalse(session.should_stop())\n self.assertEqual(3, session.run(do_step))\n self.assertTrue(session.should_stop())\n save_path = saver_lib._get_saver_or_default().save(\n session._coordinated_creator.tf_sess,\n os.path.join(logdir, 'step-3'))\n # Run till step 5 and save.\n def load_ckpt(scaffold, sess):\n scaffold.saver.restore(sess, save_path)\n\n session_creator = monitored_session.ChiefSessionCreator(\n monitored_session.Scaffold(init_fn=load_ckpt))\n hooks = [basic_session_run_hooks.StopAtStepHook(last_step=5)]\n with monitored_session.MonitoredSession(\n hooks=hooks, session_creator=session_creator) as session:\n self.assertEqual(3, session.run(gstep))\n self.assertFalse(session.should_stop())\n self.assertEqual(4, session.run(do_step))\n self.assertFalse(session.should_stop())\n self.assertEqual(5, session.run(do_step))\n self.assertTrue(session.should_stop())\n\n def test_num_steps(self):\n logdir = _test_dir(self.get_temp_dir(), 'test_num_steps')\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n do_step = state_ops.assign_add(gstep, 1)\n # Do 3 steps and save.\n hooks = [basic_session_run_hooks.StopAtStepHook(num_steps=3)]\n with monitored_session.MonitoredSession(hooks=hooks) as session:\n session.run(do_step)\n self.assertFalse(session.should_stop())\n session.run(do_step)\n self.assertFalse(session.should_stop())\n session.run(do_step)\n self.assertTrue(session.should_stop())\n save_path = saver_lib._get_saver_or_default().save(\n session._coordinated_creator.tf_sess,\n os.path.join(logdir, 'step-3'))\n # Restore and do 4 steps.\n def load_ckpt(scaffold, sess):\n scaffold.saver.restore(sess, save_path)\n\n session_creator = monitored_session.ChiefSessionCreator(\n scaffold=monitored_session.Scaffold(init_fn=load_ckpt))\n hooks = [basic_session_run_hooks.StopAtStepHook(num_steps=4)]\n with monitored_session.MonitoredSession(\n hooks=hooks, session_creator=session_creator) as session:\n self.assertEqual(4, session.run(do_step))\n self.assertFalse(session.should_stop())\n session.run(do_step)\n self.assertFalse(session.should_stop())\n session.run(do_step)\n self.assertFalse(session.should_stop())\n session.run(do_step)\n self.assertTrue(session.should_stop())\n\n # This set of tests, verifies the supervised session behavior when exceptions\n # are raised next to the innermost session run() call.\n\n @test_util.run_deprecated_v1\n def test_recovery(self):\n logdir = _test_dir(self.get_temp_dir(), 'test_recovery')\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n do_step = state_ops.assign_add(gstep, 1)\n scaffold = monitored_session.Scaffold()\n # Use a hook to save the model every 100 steps. It also saves it at\n # the end.\n hooks = [\n basic_session_run_hooks.CheckpointSaverHook(\n logdir, save_steps=1, scaffold=scaffold)\n ]\n with monitored_session.MonitoredSession(\n session_creator=monitored_session.ChiefSessionCreator(\n scaffold, checkpoint_dir=logdir),\n hooks=hooks) as session:\n self.assertEqual(0, session.run(gstep))\n self.assertEqual(1, session.run(do_step))\n self.assertEqual(2, session.run(do_step))\n # A restart will find the checkpoint and recover automatically.\n with monitored_session.MonitoredSession(\n session_creator=monitored_session.ChiefSessionCreator(\n scaffold, checkpoint_dir=logdir)) as session:\n self.assertEqual(2, session.run(gstep))\n # A restart will find the checkpoint and recover automatically.\n with monitored_session.MonitoredSession(\n session_creator=monitored_session.ChiefSessionCreator(\n scaffold,\n checkpoint_filename_with_path=checkpoint_management.\n latest_checkpoint(logdir))) as session:\n self.assertEqual(2, session.run(gstep))\n\n def test_retry_initialization_on_aborted_error(self):\n # Tests that we silently retry on abort during initialization.\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n self.init_raised_aborted_error = False\n\n def _init_fn(scaffold, session):\n _, _ = scaffold, session\n if not self.init_raised_aborted_error:\n self.init_raised_aborted_error = True\n raise errors_impl.AbortedError(None, None, 'Abort')\n\n with monitored_session.MonitoredSession(\n session_creator=monitored_session.ChiefSessionCreator(\n scaffold=monitored_session.Scaffold(\n init_fn=_init_fn))) as session:\n self.assertFalse(session.should_stop())\n self.assertEqual(0, session.run(gstep))\n self.assertTrue(self.init_raised_aborted_error)\n\n def _retry_test(self, ex):\n # Tests that we silently retry on error. Note that this does not test\n # recovery as we do not use a CheckpointSaver in this test.\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n do_step = state_ops.assign_add(gstep, 1)\n hook = RaiseOnceAtCountN(4, ex)\n with monitored_session.MonitoredSession(hooks=[hook]) as session:\n self.assertEqual(0, session.run(gstep))\n self.assertEqual(1, session.run(do_step))\n self.assertEqual(2, session.run(do_step))\n self.assertFalse(session.should_stop())\n # Here at step 3, the hook triggers and raises AbortedError. The\n # MonitoredSession automatically retries and restart from a freshly\n # initialized session, so the step is back to 0 and running do_step\n # moves it to 1.\n self.assertEqual(1, session.run(do_step))\n self.assertFalse(session.should_stop())\n self.assertTrue(hook.raised)\n self.assertEqual(2, session.run(do_step))\n self.assertFalse(session.should_stop())\n\n def test_retry_on_aborted_error(self):\n self._retry_test(errors_impl.AbortedError(None, None, 'Abort'))\n\n def test_retry_on_unavailable_error(self):\n self._retry_test(errors_impl.UnavailableError(None, None, 'Unavailable'))\n\n def test_recover_and_retry_on_aborted_error(self):\n # Tests that we silently retry and recover on abort. This test uses\n # a CheckpointSaver to have something to recover from.\n logdir = _test_dir(self.get_temp_dir(),\n 'test_recover_and_retry_on_aborted_error')\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n do_step = state_ops.assign_add(gstep, 1)\n scaffold = monitored_session.Scaffold()\n abort_hook = RaiseOnceAtCountN(\n 4, errors_impl.AbortedError(None, None, 'Abort'))\n # Save after each step.\n ckpt_hook = basic_session_run_hooks.CheckpointSaverHook(\n logdir, save_steps=1, scaffold=scaffold)\n hooks = [abort_hook, ckpt_hook]\n with monitored_session.MonitoredSession(\n session_creator=monitored_session.ChiefSessionCreator(\n scaffold, checkpoint_dir=logdir),\n hooks=hooks) as session:\n self.assertEqual(0, session.run(gstep))\n self.assertEqual(1, session.run(do_step))\n self.assertEqual(2, session.run(do_step))\n self.assertFalse(session.should_stop())\n # Here at step 3, the hook triggers and raises AbortedError. The\n # MonitoredSession automatically restores and retries.\n self.assertEqual(3, session.run(do_step))\n self.assertTrue(abort_hook.raised)\n self.assertFalse(session.should_stop())\n self.assertEqual(4, session.run(do_step))\n self.assertFalse(session.should_stop())\n\n def test_exit_cleanly_on_out_of_range_exception(self):\n # Tests that we stop cleanly when OutOfRange is raised.\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n do_step = state_ops.assign_add(gstep, 1)\n hook = RaiseOnceAtCountN(2, errors_impl.OutOfRangeError(None, None,\n 'EOI'))\n session = monitored_session.MonitoredSession(hooks=[hook])\n # session should cleanly exit from the context.\n with session:\n self.assertEqual(0, session.run(gstep))\n self.assertFalse(session.should_stop())\n # Here at step 1, the hook triggers and raises OutOfRange. The\n # session should go into should_stop() mode. It should raise the\n # exception. So next step should not be executed.\n session.run(do_step)\n self.assertTrue(False)\n self.assertTrue(session.should_stop())\n\n def test_exit_cleanly_on_stop_iteration_exception(self):\n # Tests that we stop cleanly when OutOfRange is raised.\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n do_step = state_ops.assign_add(gstep, 1)\n hook = RaiseOnceAtCountN(2, StopIteration)\n session = monitored_session.MonitoredSession(hooks=[hook])\n # session should cleanly exit from the context.\n with session:\n self.assertEqual(0, session.run(gstep))\n self.assertFalse(session.should_stop())\n # Here at step 1, the hook triggers and raises StopIteration. The\n # session should go into should_stop() mode. It should raise the\n # exception. So next step should not be executed.\n session.run(do_step)\n self.assertTrue(False)\n self.assertTrue(session.should_stop())\n\n def test_regular_exception_pass_through_run(self):\n # Tests that regular exceptions just pass through a \"with\n # MonitoredSession\" block and set the session in stop mode.\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n do_step = state_ops.assign_add(gstep, 1)\n hook = RaiseOnceAtCountN(4, RuntimeError('regular exception'))\n session = monitored_session.MonitoredSession(hooks=[hook])\n with self.assertRaisesRegexp(RuntimeError, 'regular exception'):\n with session:\n self.assertEqual(0, session.run(gstep))\n self.assertEqual(1, session.run(do_step))\n self.assertEqual(2, session.run(do_step))\n self.assertFalse(session.should_stop())\n # This triggers the hook and raises the exception\n session.run(do_step)\n # We should not hit this\n self.assertFalse(True)\n self.assertTrue(hook.raised)\n self.assertTrue(session.should_stop())\n\n def test_regular_exception_reported_to_coord_pass_through_run(self):\n # Tests that regular exceptions reported to the coordinator from a thread\n # passes through a \"run()\" call within a \"with MonitoredSession\" block and\n # set the session in stop mode.\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n session = monitored_session.MonitoredSession()\n run_performed_without_error = False\n with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):\n with session:\n self.assertEqual(0, session.run(gstep))\n # Report an exception through the coordinator.\n try:\n raise RuntimeError('a thread wants to stop')\n except RuntimeError as e:\n session._coordinated_creator.coord.request_stop(e)\n # Call run() which should perform normally.\n self.assertEqual(0, session.run(gstep))\n run_performed_without_error = True\n self.assertTrue(run_performed_without_error)\n\n def test_regular_exception_reported_to_coord_pass_through_return(self):\n # Tests that regular exceptions reported to the coordinator from a thread\n # passes through returning from a \"with MonitoredSession\" block and\n # set the session in stop mode.\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n session = monitored_session.MonitoredSession()\n with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):\n with session:\n self.assertEqual(0, session.run(gstep))\n # Report an exception through the coordinator.\n try:\n raise RuntimeError('a thread wants to stop')\n except RuntimeError as e:\n session._coordinated_creator.coord.request_stop(e)\n self.assertTrue(session.should_stop())\n\n # This set of tests, verifies the session behavior when exceptions are raised\n # from code inside a \"with MonitoredSession:\" context.\n\n def test_stop_cleanly_when_no_exception_in_with_body(self):\n # Tests that regular exceptions pass through\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n do_step = state_ops.assign_add(gstep, 1)\n session = monitored_session.MonitoredSession()\n with session:\n self.assertEqual(1, session.run(do_step))\n self.assertEqual(2, session.run(do_step))\n self.assertFalse(session.should_stop())\n # Should have closed.\n self.assertTrue(session.should_stop())\n self.assertTrue(session._is_closed())\n\n def test_raises_regular_exceptions_in_with_body(self):\n # Tests that regular exceptions in \"with body\" are seen outside.\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n do_step = state_ops.assign_add(gstep, 1)\n session = monitored_session.MonitoredSession()\n # We should see that exception.\n with self.assertRaisesRegexp(RuntimeError, 'regular exception'):\n with session:\n self.assertEqual(1, session.run(do_step))\n self.assertEqual(2, session.run(do_step))\n self.assertFalse(session.should_stop())\n # Will be visible outside the \"with body\".\n raise RuntimeError('regular exception')\n # Should have closed.\n self.assertTrue(session.should_stop())\n self.assertTrue(session._is_closed())\n\n def test_graph(self):\n with ops.Graph().as_default() as g:\n with monitored_session.MonitoredSession() as session:\n self.assertEqual(g, session.graph)\n\n def test_graph_finalized_during_run_unfinalized_after_exit(self):\n with ops.Graph().as_default() as g:\n a_var = variables.VariableV1(0)\n with monitored_session.MonitoredSession() as session:\n self.assertEqual(0, session.run(a_var))\n self.assertTrue(g.finalized)\n self.assertFalse(g.finalized)\n\n def test_keep_finalized_graph_as_finalized(self):\n with ops.Graph().as_default() as g:\n a_var = variables.VariableV1(0)\n monitored_session.Scaffold().finalize()\n with monitored_session.MonitoredSession() as session:\n self.assertEqual(0, session.run(a_var))\n self.assertTrue(g.finalized)\n self.assertTrue(g.finalized)\n\n def test_merge_run_options_from_hooks(self):\n \"\"\"Test for rewriting RunOptions and observing RunMetadata with hooks.\"\"\"\n\n with ops.Graph().as_default():\n my_const = constant_op.constant(42, name='my_const')\n _ = constant_op.constant(24, name='my_const_2')\n\n watch_a = debug_pb2.DebugTensorWatch(\n node_name='my_const',\n output_slot=0,\n debug_ops=['DebugIdentity'],\n debug_urls=[])\n hook_a = RunOptionsMetadataHook(2, 30000, False, watch_a, False)\n watch_b = debug_pb2.DebugTensorWatch(\n node_name='my_const_2',\n output_slot=0,\n debug_ops=['DebugIdentity'],\n debug_urls=[])\n hook_b = RunOptionsMetadataHook(3, 60000, True, watch_b, True)\n with monitored_session.MonitoredSession(\n hooks=[hook_a, hook_b]) as session:\n self.assertEqual(42, session.run(my_const))\n\n # trace_level=3 should have overridden trace_level=2;\n # timeout_in_ms=60000 should have overridden 30000;\n # output_partition_graphs=True should have overridden False.\n # The two debug tensor watches should have been merged.\n self.assertEqual([\n config_pb2.RunOptions(\n trace_level=3,\n timeout_in_ms=60000,\n output_partition_graphs=True,\n debug_options=debug_pb2.DebugOptions(\n debug_tensor_watch_opts=[watch_a, watch_b]),\n report_tensor_allocations_upon_oom=True),\n ], hook_b.run_options_list)\n self.assertEqual(1, len(hook_b.run_metadata_list))\n self.assertTrue(\n isinstance(hook_b.run_metadata_list[0], config_pb2.RunMetadata))\n self.assertGreater(len(hook_b.run_metadata_list[0].partition_graphs), 0)\n\n def test_merge_caller_and_hook_run_options(self):\n \"\"\"Test that RunOptions from caller and hooks can be merged properly.\"\"\"\n\n with ops.Graph().as_default():\n my_const = constant_op.constant(42, name='my_const')\n _ = constant_op.constant(24, name='my_const_2')\n\n hook_watch = debug_pb2.DebugTensorWatch(\n node_name='my_const_2',\n output_slot=0,\n debug_ops=['DebugIdentity'],\n debug_urls=[])\n hook = RunOptionsMetadataHook(2, 60000, False, hook_watch, False)\n with monitored_session.MonitoredSession(hooks=[hook]) as session:\n caller_watch = debug_pb2.DebugTensorWatch(\n node_name='my_const',\n output_slot=0,\n debug_ops=['DebugIdentity'],\n debug_urls=[])\n caller_options = config_pb2.RunOptions(\n trace_level=3,\n timeout_in_ms=30000,\n output_partition_graphs=True,\n report_tensor_allocations_upon_oom=True)\n caller_options.debug_options.debug_tensor_watch_opts.extend(\n [caller_watch])\n self.assertEqual(42, session.run(my_const, options=caller_options))\n\n # trace_level=3 from the caller should override 2 from the hook.\n # timeout_in_ms=60000 from the hook should override from the caller.\n # output_partition_graph=True from the caller should override False\n # from the hook.\n # The two debug watches from the caller and the hook should be merged,\n # in that order.\n self.assertEqual([\n config_pb2.RunOptions(\n trace_level=3,\n timeout_in_ms=60000,\n output_partition_graphs=True,\n debug_options=debug_pb2.DebugOptions(\n debug_tensor_watch_opts=[caller_watch, hook_watch]),\n report_tensor_allocations_upon_oom=True),\n ], hook.run_options_list)\n self.assertEqual(1, len(hook.run_metadata_list))\n self.assertTrue(\n isinstance(hook.run_metadata_list[0], config_pb2.RunMetadata))\n self.assertGreater(len(hook.run_metadata_list[0].partition_graphs), 0)\n\n @test_util.run_deprecated_v1\n def test_with_statement_and_close(self):\n # Test case for https://github.com.cnpmjs.org/tensorflow/tensorflow/issues/12224\n # where close() inside the with should have a better error message.\n with self.assertRaisesRegexp(RuntimeError, 'Session is already closed'):\n with monitored_session.MonitoredSession() as session:\n session.close()\n\n def test_step_fn_example(self):\n with ops.Graph().as_default():\n c = array_ops.placeholder(dtypes.float32)\n v = array_ops.identity(c)\n\n def step_fn(step_context):\n value = step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})\n return value\n\n with monitored_session.MonitoredSession() as session:\n self.assertNear(3.2, session.run_step_fn(step_fn), 0.1)\n\n def test_step_function_stops(self):\n with ops.Graph().as_default():\n\n def step_fn(step_context):\n step_context.request_stop()\n\n with monitored_session.MonitoredSession() as session:\n self.assertEqual(None, session.run_step_fn(step_fn))\n self.assertTrue(session.should_stop())\n\n def test_step_request_stop_without_a_with_block(self):\n with ops.Graph().as_default():\n was_stop_iteration_raised = False\n\n def step_fn(step_context):\n step_context.request_stop()\n\n session = monitored_session.MonitoredSession()\n try:\n self.assertEqual(None, session.run_step_fn(step_fn))\n except StopIteration:\n was_stop_iteration_raised = True\n\n self.assertTrue(was_stop_iteration_raised)\n self.assertFalse(session.should_stop())\n\n def test_step_request_stop_in_a_loop(self):\n with ops.Graph().as_default():\n def step_fn(step_context):\n step_context.request_stop()\n\n with monitored_session.MonitoredSession() as session:\n while not session.should_stop():\n _ = session.run_step_fn(step_fn)\n self.fail('An exception should be raised on the line above.')\n\n def test_step_request_stop_with_returning_a_type(self):\n with ops.Graph().as_default():\n\n def step_fn(step_context):\n del step_context\n return 'a type'\n\n with monitored_session.MonitoredSession() as session:\n self.assertEqual('a type', session.run_step_fn(step_fn))\n\n def test_step_with_extra_arguments(self):\n with ops.Graph().as_default():\n\n def step_fn(step_context, extra_foo):\n del step_context, extra_foo\n\n with monitored_session.MonitoredSession() as session:\n with self.assertRaisesRegexp(\n ValueError,\n '`step_fn` may either have one `step_context` argument'):\n self.assertEqual(None, session.run_step_fn(step_fn))\n\n def test_step_fn_belongs_to_a_class(self):\n with ops.Graph().as_default():\n c = array_ops.placeholder(dtypes.float32)\n v = array_ops.identity(c)\n\n class Model(object):\n\n def step_fn(self, step_context):\n return step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})\n\n with monitored_session.MonitoredSession() as session:\n model = Model()\n self.assertNear(3.2, session.run_step_fn(model.step_fn), 0.1)\n\n def test_step_fn_belongs_to_a_class_and_has_extra_methods(self):\n with ops.Graph().as_default():\n\n class Model(object):\n\n def step_fn(self, step_context, extra_foo):\n del step_context, extra_foo\n\n with monitored_session.MonitoredSession() as session:\n with self.assertRaisesRegexp(\n ValueError,\n '`step_fn` may either have one `step_context` argument'):\n model = Model()\n self.assertEqual(None, session.run_step_fn(model.step_fn))\n\n def test_step_fn_with_hooks(self):\n with ops.Graph().as_default():\n var = resource_variable_ops.ResourceVariable(0.0)\n\n # This test higlights the interaction of hooks with\n # `Monitoredsession.run_step_fn`. The order of execution of operations\n # below is:\n # 0. stage_0\n # 1. stage_1_0 or stage_1_1 in an undefined order\n # 2. stage_2\n\n stage_0 = state_ops.assign_add(var, 0.3)\n stage_1_0 = state_ops.assign_add(var, 0.7)\n # The order of `stage_1_0` and `stage_1_1` is undefined by\n # `MonitoredSession`, but we should be able to assert when both of them\n # are complete. To obtain a consistent result of adding two different\n # constants to `var`, we rely on a control dependency and\n # `ResourceVariable`. Otherwise, it is possible that one of the\n # additions overwites the result of the other addition.\n with ops.control_dependencies([stage_1_0]):\n stage_1_1 = state_ops.assign_add(var, 0.5)\n stage_2 = state_ops.assign_add(var, 1.1)\n\n class Hook(session_run_hook.SessionRunHook):\n\n def __init__(self, testing):\n self._testing = testing\n\n def before_run(self, run_context):\n return session_run_hook.SessionRunArgs(fetches=stage_1_0)\n\n def after_run(self, run_context, run_values):\n self._testing.assertNear(0.3 + 0.5 + 0.7,\n run_context.session.run(var), 0.1)\n self._testing.assertNear(0.3 + 0.5 + 0.7 + 1.1,\n run_context.session.run(stage_2), 0.1)\n\n def step_fn(step_context):\n self.assertNear(0.3, step_context.session.run(stage_0), 0.1)\n return step_context.run_with_hooks(fetches=stage_1_1)\n\n with monitored_session.MonitoredSession(hooks=[Hook(self)]) as session:\n self.assertEqual(0.3 + 0.5 + 0.7, session.run_step_fn(step_fn))\n\n def test_step_fn_has_the_same_hooks_behavior_without_recovery(self):\n with ops.Graph().as_default():\n var = resource_variable_ops.ResourceVariable(0.0)\n\n stage_0 = state_ops.assign_add(var, 0.3)\n stage_1_0 = state_ops.assign_add(var, 0.7)\n with ops.control_dependencies([stage_1_0]):\n stage_1_1 = state_ops.assign_add(var, 0.5)\n stage_2 = state_ops.assign_add(var, 1.1)\n\n class Hook(session_run_hook.SessionRunHook):\n\n def __init__(self, testing):\n self._testing = testing\n\n def before_run(self, run_context):\n return session_run_hook.SessionRunArgs(fetches=stage_1_0)\n\n def after_run(self, run_context, run_values):\n self._testing.assertNear(0.3 + 0.5 + 0.7,\n run_context.session.run(var), 0.1)\n self._testing.assertNear(0.3 + 0.5 + 0.7 + 1.1,\n run_context.session.run(stage_2), 0.1)\n\n def step_fn(step_context):\n self.assertNear(0.3, step_context.session.run(stage_0), 0.1)\n return step_context.run_with_hooks(fetches=stage_1_1)\n\n with monitored_session.SingularMonitoredSession(\n hooks=[Hook(self)]) as session:\n self.assertEqual(0.3 + 0.5 + 0.7, session.run_step_fn(step_fn))\n\n def test_step_fn_with_hooks_and_request_stop(self):\n with ops.Graph().as_default():\n trace_the_hook = {'before_run': False, 'after_run': False}\n\n class Hook(session_run_hook.SessionRunHook):\n\n def before_run(self, run_context):\n trace_the_hook['before_run'] = True\n\n def after_run(self, run_context, run_values):\n trace_the_hook['after_run'] = True\n\n def step_fn(step_context):\n step_context.request_stop()\n\n with monitored_session.MonitoredSession(hooks=[Hook()]) as session:\n self.assertEqual(None, session.run_step_fn(step_fn))\n self.assertTrue(session.should_stop())\n # `step_context.request_stop()` in a step_fn interrupts the flow of\n # running the hooks.\n self.assertFalse(trace_the_hook['before_run'])\n self.assertFalse(trace_the_hook['after_run'])\n\n def test_recovers_from_an_exception_in_step_fn(self):\n trace_the_exception = {'run_already': False}\n\n with ops.Graph().as_default():\n c = array_ops.placeholder(dtypes.float32)\n v = array_ops.identity(c)\n\n def step_fn(step_context):\n if not trace_the_exception['run_already']:\n trace_the_exception['run_already'] = True\n raise errors_impl.AbortedError(None, None, 'Abort')\n\n return step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})\n\n with monitored_session.MonitoredSession() as session:\n self.assertNear(3.2, session.run_step_fn(step_fn), 0.1)\n self.assertTrue(trace_the_exception['run_already'])\n\n def test_recovers_from_an_exception_in_step_fn_after_hooks(self):\n trace_the_exception = {'run_already': False, 'side_effect_counter': 0}\n\n with ops.Graph().as_default():\n c = array_ops.placeholder(dtypes.float32)\n v = array_ops.identity(c)\n graph_state = variables.VariableV1(0.0)\n graph_side_effect = state_ops.assign_add(graph_state, 0.31)\n\n def step_fn(step_context):\n trace_the_exception['side_effect_counter'] += 1\n step_context.session.run(graph_side_effect)\n\n value = step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})\n\n if not trace_the_exception['run_already']:\n trace_the_exception['run_already'] = True\n raise errors_impl.AbortedError(None, None, 'Abort')\n\n return value\n\n with self.cached_session() as test_session:\n with monitored_session.MonitoredSession(\n CountingSessionCreator(test_session)) as session:\n session.run(variables.global_variables_initializer())\n\n self.assertNear(3.2, session.run_step_fn(step_fn), 0.1)\n self.assertTrue(trace_the_exception['run_already'])\n # Make sure the rest of the body of the step_fn is re-executed upon\n # AbortedError:\n self.assertEqual(2, trace_the_exception['side_effect_counter'])\n self.assertNear(0.62, session.run(graph_state), 0.1)\n\n def test_step_fn_doesnt_recover_when_it_wasnt_asked_to(self):\n trace_the_exception = {'run_already': False}\n\n with ops.Graph().as_default():\n c = array_ops.placeholder(dtypes.float32)\n v = array_ops.identity(c)\n\n def step_fn(step_context):\n if not trace_the_exception['run_already']:\n trace_the_exception['run_already'] = True\n raise errors_impl.AbortedError(None, None, 'Abort')\n\n value = step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})\n return value\n\n with monitored_session.SingularMonitoredSession() as session:\n with self.assertRaisesRegexp(errors_impl.AbortedError, 'Abort'):\n self.assertNear(3.2, session.run_step_fn(step_fn), 0.1)\n self.fail()\n\n self.assertTrue(trace_the_exception['run_already'])\n\n def test_step_fn_exception_from_before_run(self):\n trace_the_exception = {'run_already': False, 'side_effect_counter': 0}\n\n with ops.Graph().as_default():\n c = array_ops.placeholder(dtypes.float32)\n v = array_ops.identity(c)\n vv = constant_op.constant(3.2)\n graph_state = variables.VariableV1(0.0)\n graph_side_effect = state_ops.assign_add(graph_state, 0.31)\n\n class Hook(session_run_hook.SessionRunHook):\n\n def __init__(self, testing):\n self._testing = testing\n\n def before_run(self, run_context):\n if not trace_the_exception['run_already']:\n trace_the_exception['run_already'] = True\n raise errors_impl.AbortedError(None, None, 'Abort')\n return session_run_hook.SessionRunArgs(fetches=vv)\n\n def after_run(self, run_context, run_values):\n self._testing.assertNear(3.2, run_values.results, 0.1)\n\n def step_fn(step_context):\n trace_the_exception['side_effect_counter'] += 1\n step_context.session.run(graph_side_effect)\n return step_context.run_with_hooks(fetches=v, feed_dict={c: 1.3})\n\n with self.cached_session() as test_session:\n with monitored_session.MonitoredSession(\n CountingSessionCreator(test_session),\n hooks=[Hook(self)]) as session:\n test_session.run(variables.global_variables_initializer())\n self.assertNear(1.3, session.run_step_fn(step_fn), 0.1)\n self.assertEqual(2, trace_the_exception['side_effect_counter'])\n self.assertNear(0.62, session.run(graph_state), 0.1)\n\n\nclass SingularMonitoredSessionTest(test.TestCase):\n \"\"\"Tests SingularMonitoredSession.\"\"\"\n\n def test_handles_initialization(self):\n with ops.Graph().as_default():\n a_var = variables.VariableV1(0)\n with monitored_session.SingularMonitoredSession() as session:\n # If it's not initialized, following statement raises an error.\n self.assertEqual(0, session.run(a_var))\n\n def test_do_not_handle_aborted_error(self):\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n\n class _RaiseAbortedHook(session_run_hook.SessionRunHook):\n\n def before_run(self, run_context):\n raise errors_impl.AbortedError(None, None, 'Abort')\n\n with monitored_session.SingularMonitoredSession(\n hooks=[_RaiseAbortedHook()]) as session:\n with self.assertRaises(errors_impl.AbortedError):\n self.assertEqual(0, session.run(gstep))\n\n with self.assertRaises(errors_impl.AbortedError):\n with monitored_session.SingularMonitoredSession(\n hooks=[_RaiseAbortedHook()]) as session:\n self.assertEqual(0, session.run(gstep))\n\n def test_exit_cleanly_on_out_of_range_exception(self):\n # Tests that we stop cleanly when OutOfRange is raised.\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n do_step = state_ops.assign_add(gstep, 1)\n hook = RaiseOnceAtCountN(2, errors_impl.OutOfRangeError(None, None,\n 'EOI'))\n session = monitored_session.SingularMonitoredSession(hooks=[hook])\n # session should cleanly exit from the context.\n with session:\n self.assertEqual(0, session.run(gstep))\n self.assertFalse(session.should_stop())\n # Here at step 1, the hook triggers and raises OutOfRange. The\n # session should go into should_stop() mode. It should raise the\n # exception. So next step should not be executed.\n session.run(do_step)\n self.assertTrue(False)\n self.assertTrue(session.should_stop())\n\n def test_regular_exception_reported_to_coord_pass_through_run(self):\n # Tests that regular exceptions reported to the coordinator from a thread\n # passes through a \"run()\" call within a \"with MonitoredSession\" block and\n # set the session in stop mode.\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n session = monitored_session.SingularMonitoredSession()\n run_performed_without_error = False\n with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):\n with session:\n self.assertEqual(0, session.run(gstep))\n # Report an exception through the coordinator.\n try:\n raise RuntimeError('a thread wants to stop')\n except RuntimeError as e:\n session._coordinated_creator.coord.request_stop(e)\n # Call run() which should perform normally.\n self.assertEqual(0, session.run(gstep))\n run_performed_without_error = True\n self.assertTrue(run_performed_without_error)\n\n def test_stop_cleanly_when_no_exception_in_with_body(self):\n # Tests that regular exceptions pass through\n with ops.Graph().as_default():\n gstep = variables_lib.get_or_create_global_step()\n do_step = state_ops.assign_add(gstep, 1)\n session = monitored_session.SingularMonitoredSession()\n with session:\n self.assertEqual(1, session.run(do_step))\n self.assertEqual(2, session.run(do_step))\n self.assertFalse(session.should_stop())\n # Should have closed.\n self.assertTrue(session.should_stop())\n self.assertEqual(None, session.raw_session())\n\n def test_graph(self):\n with ops.Graph().as_default() as g:\n with monitored_session.SingularMonitoredSession() as session:\n self.assertEqual(g, session.graph)\n\n def test_raw_session(self):\n with ops.Graph().as_default():\n with monitored_session.SingularMonitoredSession() as session:\n self.assertTrue(isinstance(session.raw_session(), session_lib.Session))\n\n\nif __name__ == '__main__':\n test.main()\n" ]
[ [ "tensorflow.python.training.monitored_session.MonitoredSession", "tensorflow.python.summary.summary.scalar", "tensorflow.python.training.monitored_session._WrappedSession.__init__", "tensorflow.python.training.monitored_session._RecoverableSession", "tensorflow.python.training.saver.Saver", "tensorflow.core.protobuf.config_pb2.RunOptions", "tensorflow.python.framework.constant_op.constant", "tensorflow.python.training.checkpoint_management.latest_checkpoint", "tensorflow.python.framework.ops.Graph", "tensorflow.python.framework.errors_impl.AbortedError", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.core.protobuf.config_pb2.RunMetadata", "tensorflow.python.training.monitored_session.SingularMonitoredSession", "tensorflow.python.training.monitored_session._CoordinatedSession", "tensorflow.python.client.session.Session", "tensorflow.python.distribute.collective_all_reduce_strategy.CollectiveAllReduceStrategy", "tensorflow.python.ops.state_ops.assign_add", "tensorflow.python.framework.errors_impl.UnavailableError", "tensorflow.python.framework.errors_impl.UnknownError", "tensorflow.python.training.session_run_hook.SessionRunArgs", "tensorflow.python.framework.errors_impl.CancelledError", "tensorflow.python.ops.control_flow_ops.Assert", "tensorflow.python.training.monitored_session._WrappedSession.run", "tensorflow.python.training.monitored_session.MonitoredTrainingSession", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.training.monitored_session.Scaffold", "tensorflow.python.ops.variables.global_variables_initializer", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.core.protobuf.debug_pb2.DebugTensorWatch", "tensorflow.python.training.monitored_session._WrappedSession", "tensorflow.python.training.coordinator.Coordinator", "tensorflow.python.training.basic_session_run_hooks.CheckpointSaverHook", "tensorflow.contrib.testing.python.framework.util_test.latest_summaries", "tensorflow.python.training.monitored_session.ChiefSessionCreator", "tensorflow.python.training.basic_session_run_hooks.StopAtStepHook", "tensorflow.python.training.saver._get_saver_or_default", "tensorflow.python.training.monitored_session._HookedSession", "tensorflow.python.distribute.distribute_coordinator._WorkerContext", "tensorflow.python.platform.test.main", "tensorflow.python.ops.resource_variable_ops.ResourceVariable", "tensorflow.contrib.framework.python.ops.variables.get_or_create_global_step", "tensorflow.python.framework.errors_impl.OutOfRangeError", "tensorflow.python.ops.variables.VariableV1", "tensorflow.core.protobuf.debug_pb2.DebugOptions" ] ]
mdbernard/manim
[ "213cdede00fc1f90d1b22473112c3c4808b98349" ]
[ "manimlib/constants.py" ]
[ "import numpy as np\nimport os\n\n# Initialize directories\nenv_MEDIA_DIR = os.getenv(\"MEDIA_DIR\")\nif env_MEDIA_DIR:\n MEDIA_DIR = env_MEDIA_DIR\nelif os.path.isfile(\"media_dir.txt\"):\n with open(\"media_dir.txt\", 'rU') as media_file:\n MEDIA_DIR = media_file.readline().strip()\nelse:\n MEDIA_DIR = os.path.join(\n os.path.expanduser('~'),\n \"Dropbox (3Blue1Brown)/3Blue1Brown Team Folder\"\n )\nif not os.path.isdir(MEDIA_DIR):\n MEDIA_DIR = \"media\"\n # print(\n # f\"Media will be stored in {MEDIA_DIR + os.sep}. You can change \"\n # \"this behavior by writing a different directory to media_dir.txt.\"\n # )\n\nVIDEO_DIR = os.path.join(MEDIA_DIR, \"videos\")\nRASTER_IMAGE_DIR = os.path.join(MEDIA_DIR, \"designs\", \"raster_images\")\nSVG_IMAGE_DIR = os.path.join(MEDIA_DIR, \"designs\", \"svg_images\")\nSOUND_DIR = os.path.join(MEDIA_DIR, \"designs\", \"sounds\")\n###\nTHIS_DIR = os.path.dirname(os.path.realpath(__file__))\nFILE_DIR = os.path.join(os.getenv(\"FILE_DIR\", default=THIS_DIR), \"files\")\nTEX_DIR = os.path.join(FILE_DIR, \"Tex\")\n# These two may be depricated now.\nMOBJECT_DIR = os.path.join(FILE_DIR, \"mobjects\")\nIMAGE_MOBJECT_DIR = os.path.join(MOBJECT_DIR, \"image\")\n\nfor folder in [FILE_DIR, RASTER_IMAGE_DIR, SVG_IMAGE_DIR, VIDEO_DIR,\n TEX_DIR, MOBJECT_DIR, IMAGE_MOBJECT_DIR]:\n if not os.path.exists(folder):\n os.makedirs(folder)\n\nTEX_USE_CTEX = False\nTEX_TEXT_TO_REPLACE = \"YourTextHere\"\nTEMPLATE_TEX_FILE = os.path.join(\n THIS_DIR, \"tex_template.tex\" if not TEX_USE_CTEX\n else \"ctex_template.tex\"\n)\nwith open(TEMPLATE_TEX_FILE, \"r\") as infile:\n TEMPLATE_TEXT_FILE_BODY = infile.read()\n TEMPLATE_TEX_FILE_BODY = TEMPLATE_TEXT_FILE_BODY.replace(\n TEX_TEXT_TO_REPLACE,\n \"\\\\begin{align*}\\n\" + TEX_TEXT_TO_REPLACE + \"\\n\\\\end{align*}\",\n )\n\nHELP_MESSAGE = \"\"\"\n Usage:\n python extract_scene.py <module> [<scene name>]\n -p preview in low quality\n -s show and save picture of last frame\n -w write result to file [this is default if nothing else is stated]\n -o <file_name> write to a different file_name\n -l use low quality\n -m use medium quality\n -a run and save every scene in the script, or all args for the given scene\n -q don't print progress\n -f when writing to a movie file, export the frames in png sequence\n -t use transperency when exporting images\n -n specify the number of the animation to start from\n -r specify a resolution\n -c specify a background color\n\"\"\"\nSCENE_NOT_FOUND_MESSAGE = \"\"\"\n {} is not in the script\n\"\"\"\nCHOOSE_NUMBER_MESSAGE = \"\"\"\nChoose number corresponding to desired scene/arguments.\n(Use comma separated list for multiple entries)\nChoice(s): \"\"\"\nINVALID_NUMBER_MESSAGE = \"Fine then, if you don't want to give a valid number I'll just quit\"\n\nNO_SCENE_MESSAGE = \"\"\"\n There are no scenes inside that module\n\"\"\"\n\n# There might be other configuration than pixel shape later...\nPRODUCTION_QUALITY_CAMERA_CONFIG = {\n \"pixel_height\": 1440,\n \"pixel_width\": 2560,\n \"frame_rate\": 60,\n}\n\nHIGH_QUALITY_CAMERA_CONFIG = {\n \"pixel_height\": 1080,\n \"pixel_width\": 1920,\n \"frame_rate\": 30,\n}\n\nMEDIUM_QUALITY_CAMERA_CONFIG = {\n \"pixel_height\": 720,\n \"pixel_width\": 1280,\n \"frame_rate\": 30,\n}\n\nLOW_QUALITY_CAMERA_CONFIG = {\n \"pixel_height\": 480,\n \"pixel_width\": 854,\n \"frame_rate\": 15,\n}\n\nDEFAULT_PIXEL_HEIGHT = PRODUCTION_QUALITY_CAMERA_CONFIG[\"pixel_height\"]\nDEFAULT_PIXEL_WIDTH = PRODUCTION_QUALITY_CAMERA_CONFIG[\"pixel_width\"]\nDEFAULT_FRAME_RATE = 60\n\nDEFAULT_POINT_DENSITY_2D = 25\nDEFAULT_POINT_DENSITY_1D = 250\n\nDEFAULT_STROKE_WIDTH = 4\n\nFRAME_HEIGHT = 8.0\nFRAME_WIDTH = FRAME_HEIGHT * DEFAULT_PIXEL_WIDTH / DEFAULT_PIXEL_HEIGHT\nFRAME_Y_RADIUS = FRAME_HEIGHT / 2\nFRAME_X_RADIUS = FRAME_WIDTH / 2\n\nSMALL_BUFF = 0.1\nMED_SMALL_BUFF = 0.25\nMED_LARGE_BUFF = 0.5\nLARGE_BUFF = 1\n\nDEFAULT_MOBJECT_TO_EDGE_BUFFER = MED_LARGE_BUFF\nDEFAULT_MOBJECT_TO_MOBJECT_BUFFER = MED_SMALL_BUFF\n\n\n# All in seconds\nDEFAULT_POINTWISE_FUNCTION_RUN_TIME = 3.0\nDEFAULT_WAIT_TIME = 1.0\n\n\nORIGIN = np.array((0., 0., 0.))\nUP = np.array((0., 1., 0.))\nDOWN = np.array((0., -1., 0.))\nRIGHT = np.array((1., 0., 0.))\nLEFT = np.array((-1., 0., 0.))\nIN = np.array((0., 0., -1.))\nOUT = np.array((0., 0., 1.))\nX_AXIS = np.array((1., 0., 0.))\nY_AXIS = np.array((0., 1., 0.))\nZ_AXIS = np.array((0., 0., 1.))\n\n# Useful abbreviations for diagonals\nUL = UP + LEFT\nUR = UP + RIGHT\nDL = DOWN + LEFT\nDR = DOWN + RIGHT\n\nTOP = FRAME_Y_RADIUS * UP\nBOTTOM = FRAME_Y_RADIUS * DOWN\nLEFT_SIDE = FRAME_X_RADIUS * LEFT\nRIGHT_SIDE = FRAME_X_RADIUS * RIGHT\n\nPI = np.pi\nTAU = 2 * PI\nDEGREES = TAU / 360\n\nFFMPEG_BIN = \"ffmpeg\"\n\n# Colors\nCOLOR_MAP = {\n \"DARK_BLUE\": \"#236B8E\",\n \"DARK_BROWN\": \"#8B4513\",\n \"LIGHT_BROWN\": \"#CD853F\",\n \"BLUE_E\": \"#1C758A\",\n \"BLUE_D\": \"#29ABCA\",\n \"BLUE_C\": \"#58C4DD\",\n \"BLUE_B\": \"#9CDCEB\",\n \"BLUE_A\": \"#C7E9F1\",\n \"TEAL_E\": \"#49A88F\",\n \"TEAL_D\": \"#55C1A7\",\n \"TEAL_C\": \"#5CD0B3\",\n \"TEAL_B\": \"#76DDC0\",\n \"TEAL_A\": \"#ACEAD7\",\n \"GREEN_E\": \"#699C52\",\n \"GREEN_D\": \"#77B05D\",\n \"GREEN_C\": \"#83C167\",\n \"GREEN_B\": \"#A6CF8C\",\n \"GREEN_A\": \"#C9E2AE\",\n \"YELLOW_E\": \"#E8C11C\",\n \"YELLOW_D\": \"#F4D345\",\n \"YELLOW_C\": \"#FFFF00\",\n \"YELLOW_B\": \"#FFEA94\",\n \"YELLOW_A\": \"#FFF1B6\",\n \"GOLD_E\": \"#C78D46\",\n \"GOLD_D\": \"#E1A158\",\n \"GOLD_C\": \"#F0AC5F\",\n \"GOLD_B\": \"#F9B775\",\n \"GOLD_A\": \"#F7C797\",\n \"RED_E\": \"#CF5044\",\n \"RED_D\": \"#E65A4C\",\n \"RED_C\": \"#FC6255\",\n \"RED_B\": \"#FF8080\",\n \"RED_A\": \"#F7A1A3\",\n \"MAROON_E\": \"#94424F\",\n \"MAROON_D\": \"#A24D61\",\n \"MAROON_C\": \"#C55F73\",\n \"MAROON_B\": \"#EC92AB\",\n \"MAROON_A\": \"#ECABC1\",\n \"PURPLE_E\": \"#644172\",\n \"PURPLE_D\": \"#715582\",\n \"PURPLE_C\": \"#9A72AC\",\n \"PURPLE_B\": \"#B189C6\",\n \"PURPLE_A\": \"#CAA3E8\",\n \"WHITE\": \"#FFFFFF\",\n \"BLACK\": \"#000000\",\n \"LIGHT_GRAY\": \"#BBBBBB\",\n \"LIGHT_GREY\": \"#BBBBBB\",\n \"GRAY\": \"#888888\",\n \"GREY\": \"#888888\",\n \"DARK_GREY\": \"#444444\",\n \"DARK_GRAY\": \"#444444\",\n \"GREY_BROWN\": \"#736357\",\n \"PINK\": \"#D147BD\",\n \"GREEN_SCREEN\": \"#00FF00\",\n \"ORANGE\": \"#FF862F\",\n}\nPALETTE = list(COLOR_MAP.values())\nlocals().update(COLOR_MAP)\nfor name in [s for s in list(COLOR_MAP.keys()) if s.endswith(\"_C\")]:\n locals()[name.replace(\"_C\", \"\")] = locals()[name]\n\n# Streaming related configuration\nLIVE_STREAM_NAME = \"LiveStream\"\nTWITCH_STREAM_KEY = \"YOUR_STREAM_KEY\"\nSTREAMING_PROTOCOL = \"tcp\"\nSTREAMING_IP = \"127.0.0.1\"\nSTREAMING_PORT = \"2000\"\nSTREAMING_CLIENT = \"ffplay\"\nSTREAMING_URL = f\"{STREAMING_PROTOCOL}://{STREAMING_IP}:{STREAMING_PORT}?listen\"\nSTREAMING_CONSOLE_BANNER = \"\"\"\nManim is now running in streaming mode. Stream animations by passing\nthem to manim.play(), e.g.\n>>> c = Circle()\n>>> manim.play(ShowCreation(c))\n\"\"\"\n" ]
[ [ "numpy.array" ] ]
swords123/SSC-6D
[ "4e8ac58036e380fdc26a865747e1343e0ecee42b" ]
[ "eval/utils.py" ]
[ "import logging\nimport math\nimport os\n\nimport _pickle as cPickle\nimport cv2\nimport numpy as np\nimport torch\nfrom tqdm import tqdm\n\n\ndef align_rotation(sRT):\n \"\"\" Align rotations for symmetric objects.\n Args:\n sRT: 4 x 4\n \"\"\"\n s = np.cbrt(np.linalg.det(sRT[:3, :3])) # det求行列式, cbrt求element-wise立方根\n R = sRT[:3, :3] / s # 这里为什么要除行列式的立方根\n T = sRT[:3, 3]\n\n theta_x = R[0, 0] + R[2, 2]\n theta_y = R[0, 2] - R[2, 0]\n r_norm = math.sqrt(theta_x**2 + theta_y**2)\n s_map = np.array([[theta_x/r_norm, 0.0, -theta_y/r_norm],\n [0.0, 1.0, 0.0 ],\n [theta_y/r_norm, 0.0, theta_x/r_norm]])\n rotation = R @ s_map\n aligned_sRT = np.identity(4, dtype=np.float32)\n aligned_sRT[:3, :3] = s * rotation\n aligned_sRT[:3, 3] = T\n return aligned_sRT\n\n \ndef setup_logger(logger_name, log_file, level=logging.INFO):\n logger = logging.getLogger(logger_name)\n formatter = logging.Formatter('%(asctime)s : %(message)s')\n fileHandler = logging.FileHandler(log_file, mode='a')\n fileHandler.setFormatter(formatter)\n logger.setLevel(level)\n logger.addHandler(fileHandler)\n streamHandler = logging.StreamHandler()\n streamHandler.setFormatter(formatter)\n logger.addHandler(streamHandler)\n return logger\n\n\ndef get_3d_bbox(size, shift=0):\n \"\"\"\n Args:\n size: [3] or scalar\n shift: [3] or scalar\n Returns:\n bbox_3d: [3, N]\n\n \"\"\"\n bbox_3d = np.array([[+size[0] / 2, +size[1] / 2, +size[2] / 2],\n [+size[0] / 2, +size[1] / 2, -size[2] / 2],\n [-size[0] / 2, +size[1] / 2, +size[2] / 2],\n [-size[0] / 2, +size[1] / 2, -size[2] / 2],\n [+size[0] / 2, -size[1] / 2, +size[2] / 2],\n [+size[0] / 2, -size[1] / 2, -size[2] / 2],\n [-size[0] / 2, -size[1] / 2, +size[2] / 2],\n [-size[0] / 2, -size[1] / 2, -size[2] / 2]]) + shift\n bbox_3d = bbox_3d.transpose()\n return bbox_3d\n\n\ndef transform_coordinates_3d(coordinates, sRT):\n \"\"\"\n Args:\n coordinates: [3, N]\n sRT: [4, 4]\n\n Returns:\n new_coordinates: [3, N]\n\n \"\"\"\n assert coordinates.shape[0] == 3\n coordinates = np.vstack([coordinates, np.ones((1, coordinates.shape[1]), dtype=np.float32)])\n new_coordinates = sRT @ coordinates\n new_coordinates = new_coordinates[:3, :] / new_coordinates[3, :]\n return new_coordinates\n\ndef Q2R(q):\n \"\"\"\n Q: 四元数, q = [q_0, q_1, q_2, q_3]\n R: 旋转矩阵,左乘\n \"\"\"\n R = np.zeros((3, 3), dtype=float)\n\n R[0, 0] = 1 - 2 * (q[2] ** 2 + q[3] ** 2)\n R[1, 1] = 1 - 2 * (q[1] ** 2 + q[3] ** 2)\n R[2, 2] = 1 - 2 * (q[1] ** 2 + q[2] ** 2)\n\n R[0, 1] = 2 * (q[1] * q[2] - q[0] * q[3])\n R[1, 0] = 2 * (q[1] * q[2] + q[0] * q[3])\n\n R[0, 2] = 2 * (q[1] * q[3] + q[0] * q[2])\n R[2, 0] = 2 * (q[1] * q[3] - q[0] * q[2])\n\n R[1, 2] = 2 * (q[2] * q[3] - q[0] * q[1])\n R[2, 1] = 2 * (q[2] * q[3] + q[0] * q[1])\n\n return R\n\ndef compute_3d_IoU(sRT_1, sRT_2, size_1, size_2, class_name_1, class_name_2, handle_visibility):\n \"\"\" Computes IoU overlaps between two 3D bboxes. \"\"\"\n def asymmetric_3d_iou(sRT_1, sRT_2, size_1, size_2):\n noc_cube_1 = get_3d_bbox(size_1, 0)\n bbox_3d_1 = transform_coordinates_3d(noc_cube_1, sRT_1)\n noc_cube_2 = get_3d_bbox(size_2, 0)\n bbox_3d_2 = transform_coordinates_3d(noc_cube_2, sRT_2)\n\n # new\n bbox_1_max = np.amax(bbox_3d_1, axis=1)\n bbox_1_min = np.amin(bbox_3d_1, axis=1)\n bbox_2_max = np.amax(bbox_3d_2, axis=1)\n bbox_2_min = np.amin(bbox_3d_2, axis=1)\n # old and wrong\n #bbox_1_max = np.amax(bbox_3d_1, axis=0)\n #bbox_1_min = np.amin(bbox_3d_1, axis=0)\n #bbox_2_max = np.amax(bbox_3d_2, axis=0)\n #bbox_2_min = np.amin(bbox_3d_2, axis=0) \n\n overlap_min = np.maximum(bbox_1_min, bbox_2_min)\n overlap_max = np.minimum(bbox_1_max, bbox_2_max)\n\n # intersections and union\n if np.amin(overlap_max - overlap_min) < 0:\n intersections = 0\n else:\n intersections = np.prod(overlap_max - overlap_min)\n union = np.prod(bbox_1_max - bbox_1_min) + np.prod(bbox_2_max - bbox_2_min) - intersections\n overlaps = intersections / union\n return overlaps\n\n if sRT_1 is None or sRT_2 is None:\n return -1\n\n if (class_name_1 in ['bottle', 'bowl', 'can'] and class_name_1 == class_name_2) or \\\n (class_name_1 == 'mug' and class_name_1 == class_name_2 and handle_visibility==0):\n def y_rotation_matrix(theta):\n return np.array([[ np.cos(theta), 0, np.sin(theta), 0],\n [ 0, 1, 0, 0],\n [-np.sin(theta), 0, np.cos(theta), 0],\n [ 0, 0, 0, 1]])\n n = 20\n max_iou = 0\n for i in range(n):\n rotated_RT_1 = sRT_1 @ y_rotation_matrix(2 * math.pi * i / float(n))\n max_iou = max(max_iou, asymmetric_3d_iou(rotated_RT_1, sRT_2, size_1, size_2))\n else:\n max_iou = asymmetric_3d_iou(sRT_1, sRT_2, size_1, size_2)\n\n return max_iou\n\n\ndef compute_IoU_matches(gt_class_ids, gt_sRT, gt_size, gt_handle_visibility,\n pred_class_ids, pred_sRT, pred_size, pred_scores,\n synset_names, iou_3d_thresholds, score_threshold=0):\n \"\"\" Find matches between NOCS prediction and ground truth instances.\n\n Args:\n size: 3D bounding box size\n bboxes: 2D bounding boxes\n\n Returns:\n gt_matches: 2-D array. For each GT box it has the index of the matched predicted box.\n pred_matches: 2-D array. For each predicted box, it has the index of the matched ground truth box.\n overlaps: IoU overlaps.\n indices:\n\n \"\"\"\n num_pred = len(pred_class_ids)\n num_gt = len(gt_class_ids)\n indices = np.zeros(0)\n if num_pred:\n # Sort predictions by score from high to low\n indices = np.argsort(pred_scores)[::-1]\n pred_class_ids = pred_class_ids[indices].copy()\n pred_size = pred_size[indices].copy()\n pred_sRT = pred_sRT[indices].copy()\n # compute IoU overlaps [pred_bboxs gt_bboxs]\n overlaps = np.zeros((num_pred, num_gt), dtype=np.float32)\n for i in range(num_pred):\n for j in range(num_gt):\n overlaps[i, j] = compute_3d_IoU(pred_sRT[i], gt_sRT[j], pred_size[i, :], gt_size[j],\n synset_names[pred_class_ids[i]], synset_names[gt_class_ids[j]], gt_handle_visibility[j])\n # loop through predictions and find matching ground truth boxes\n num_iou_3d_thres = len(iou_3d_thresholds)\n pred_matches = -1 * np.ones([num_iou_3d_thres, num_pred])\n gt_matches = -1 * np.ones([num_iou_3d_thres, num_gt])\n for s, iou_thres in enumerate(iou_3d_thresholds):\n for i in range(indices.shape[0]):\n # Find best matching ground truth box\n # 1. Sort matches by score\n sorted_ixs = np.argsort(overlaps[i])[::-1]\n # 2. Remove low scores\n low_score_idx = np.where(overlaps[i, sorted_ixs] < score_threshold)[0]\n if low_score_idx.size > 0:\n sorted_ixs = sorted_ixs[:low_score_idx[0]]\n # 3. Find the match\n for j in sorted_ixs:\n # If ground truth box is already matched, go to next one\n if gt_matches[s, j] > -1:\n continue\n # If we reach IoU smaller than the threshold, end the loop\n iou = overlaps[i, j]\n if iou < iou_thres:\n break\n # Do we have a match?\n if not pred_class_ids[i] == gt_class_ids[j]:\n continue\n if iou > iou_thres:\n gt_matches[s, j] = i\n pred_matches[s, i] = j\n break\n return gt_matches, pred_matches, overlaps, indices\n\n\ndef compute_RT_errors(sRT_1, sRT_2, class_id, handle_visibility, synset_names):\n \"\"\"\n Args:\n sRT_1: [4, 4]. homogeneous affine transformation\n sRT_2: [4, 4]. homogeneous affine transformation\n\n Returns:\n theta: angle difference of R in degree\n shift: l2 difference of T in centimeter\n \"\"\"\n # make sure the last row is [0, 0, 0, 1]\n if sRT_1 is None or sRT_2 is None:\n return -1\n try:\n assert np.array_equal(sRT_1[3, :], sRT_2[3, :])\n assert np.array_equal(sRT_1[3, :], np.array([0, 0, 0, 1]))\n except AssertionError:\n print(sRT_1[3, :], sRT_2[3, :])\n exit()\n\n R1 = sRT_1[:3, :3] / np.cbrt(np.linalg.det(sRT_1[:3, :3]))\n T1 = sRT_1[:3, 3]\n R2 = sRT_2[:3, :3] / np.cbrt(np.linalg.det(sRT_2[:3, :3]))\n T2 = sRT_2[:3, 3]\n # symmetric when rotating around y-axis\n if synset_names[class_id] in ['bottle', 'can', 'bowl'] or \\\n (synset_names[class_id] == 'mug' and handle_visibility == 0):\n y = np.array([0, 1, 0])\n y1 = R1 @ y\n y2 = R2 @ y\n cos_theta = y1.dot(y2) / (np.linalg.norm(y1) * np.linalg.norm(y2))\n else:\n R = R1 @ R2.transpose()\n cos_theta = (np.trace(R) - 1) / 2\n\n theta = np.arccos(np.clip(cos_theta, -1.0, 1.0)) * 180 / np.pi\n shift = np.linalg.norm(T1 - T2) * 100\n result = np.array([theta, shift])\n\n return result\n\n\ndef compute_RT_overlaps(gt_class_ids, gt_sRT, gt_handle_visibility, pred_class_ids, pred_sRT, synset_names):\n \"\"\" Finds overlaps between prediction and ground truth instances.\n\n Returns:\n overlaps:\n\n \"\"\"\n num_pred = len(pred_class_ids)\n num_gt = len(gt_class_ids)\n overlaps = np.zeros((num_pred, num_gt, 2))\n\n for i in range(num_pred):\n for j in range(num_gt):\n overlaps[i, j, :] = compute_RT_errors(pred_sRT[i], gt_sRT[j], gt_class_ids[j],\n gt_handle_visibility[j], synset_names)\n return overlaps\n\n\ndef compute_RT_matches(overlaps, pred_class_ids, gt_class_ids, degree_thres_list, shift_thres_list):\n num_degree_thres = len(degree_thres_list)\n num_shift_thres = len(shift_thres_list)\n num_pred = len(pred_class_ids)\n num_gt = len(gt_class_ids)\n\n pred_matches = -1 * np.ones((num_degree_thres, num_shift_thres, num_pred))\n gt_matches = -1 * np.ones((num_degree_thres, num_shift_thres, num_gt))\n\n if num_pred == 0 or num_gt == 0:\n return gt_matches, pred_matches\n\n assert num_pred == overlaps.shape[0]\n assert num_gt == overlaps.shape[1]\n assert overlaps.shape[2] == 2\n\n for d, degree_thres in enumerate(degree_thres_list):\n for s, shift_thres in enumerate(shift_thres_list):\n for i in range(num_pred):\n # Find best matching ground truth box\n # 1. Sort matches by scores from low to high\n sum_degree_shift = np.sum(overlaps[i, :, :], axis=-1)\n sorted_ixs = np.argsort(sum_degree_shift)\n # 2. Find the match\n for j in sorted_ixs:\n # If ground truth box is already matched, go to next one\n if gt_matches[d, s, j] > -1 or pred_class_ids[i] != gt_class_ids[j]:\n continue\n # If we reach IoU smaller than the threshold, end the loop\n if overlaps[i, j, 0] > degree_thres or overlaps[i, j, 1] > shift_thres:\n continue\n gt_matches[d, s, j] = i\n pred_matches[d, s, i] = j\n break\n\n return gt_matches, pred_matches\n\n\ndef compute_ap_and_acc(pred_matches, pred_scores, gt_matches):\n # sort the scores from high to low\n assert pred_matches.shape[0] == pred_scores.shape[0]\n score_indices = np.argsort(pred_scores)[::-1]\n # pred_scores = pred_scores[score_indices]\n pred_matches = pred_matches[score_indices]\n precisions = np.cumsum(pred_matches > -1) / (np.arange(len(pred_matches)) + 1)\n recalls = np.cumsum(pred_matches > -1).astype(np.float32) / len(gt_matches)\n # Pad with start and end values to simplify the math\n precisions = np.concatenate([[0], precisions, [0]])\n recalls = np.concatenate([[0], recalls, [1]])\n # Ensure precision values decrease but don't increase. This way, the\n # precision value at each recall threshold is the maximum it can be\n # for all following recall thresholds, as specified by the VOC paper.\n for i in range(len(precisions) - 2, -1, -1):\n precisions[i] = np.maximum(precisions[i], precisions[i + 1])\n # compute mean AP over recall range\n indices = np.where(recalls[:-1] != recalls[1:])[0] + 1\n ap = np.sum((recalls[indices] - recalls[indices - 1]) * precisions[indices])\n # accuracy\n acc = np.sum(pred_matches > -1) / len(pred_matches)\n\n return ap, acc\n\n\ndef compute_mAP(pred_results, out_dir, degree_thresholds=[180], shift_thresholds=[100],\n iou_3d_thresholds=[0.1], iou_pose_thres=0.1, use_matches_for_pose=False):\n \"\"\" Compute mean Average Precision.\n\n Returns:\n iou_aps:\n pose_aps:\n iou_acc:\n pose_acc:\n\n \"\"\"\n synset_names = ['BG', 'bottle', 'bowl', 'camera', 'can', 'laptop', 'mug']\n num_classes = len(synset_names)\n degree_thres_list = list(degree_thresholds) + [360]\n num_degree_thres = len(degree_thres_list)\n shift_thres_list = list(shift_thresholds) + [100]\n num_shift_thres = len(shift_thres_list)\n iou_thres_list = list(iou_3d_thresholds)\n num_iou_thres = len(iou_thres_list)\n\n if use_matches_for_pose:\n assert iou_pose_thres in iou_thres_list\n\n # pre-allocate more than enough memory\n iou_aps = np.zeros((num_classes + 1, num_iou_thres))\n iou_acc = np.zeros((num_classes + 1, num_iou_thres))\n iou_pred_matches_all = [np.zeros((num_iou_thres, 30000)) for _ in range(num_classes)]\n iou_pred_scores_all = [np.zeros((num_iou_thres, 30000)) for _ in range(num_classes)]\n iou_gt_matches_all = [np.zeros((num_iou_thres, 30000)) for _ in range(num_classes)]\n iou_pred_count = [0 for _ in range(num_classes)]\n iou_gt_count = [0 for _ in range(num_classes)]\n\n pose_aps = np.zeros((num_classes + 1, num_degree_thres, num_shift_thres))\n pose_acc = np.zeros((num_classes + 1, num_degree_thres, num_shift_thres))\n pose_pred_matches_all = [np.zeros((num_degree_thres, num_shift_thres, 30000)) for _ in range(num_classes)]\n pose_pred_scores_all = [np.zeros((num_degree_thres, num_shift_thres, 30000)) for _ in range(num_classes)]\n pose_gt_matches_all = [np.zeros((num_degree_thres, num_shift_thres, 30000)) for _ in range(num_classes)]\n pose_pred_count = [0 for _ in range(num_classes)]\n pose_gt_count = [0 for _ in range(num_classes)]\n\n # loop over results to gather pred matches and gt matches for iou and pose metrics\n progress = 0\n for progress, result in enumerate(tqdm(pred_results)):\n gt_class_ids = result['gt_class_ids'].astype(np.int32)\n gt_sRT = np.array(result['gt_RTs'])\n gt_size = np.array(result['gt_scales'])\n gt_handle_visibility = result['gt_handle_visibility']\n\n pred_class_ids = result['pred_class_ids']\n pred_sRT = np.array(result['pred_RTs'])\n pred_size = result['pred_scales']\n pred_scores = result['pred_scores']\n\n if len(gt_class_ids) == 0 and len(pred_class_ids) == 0:\n continue\n\n for cls_id in range(1, num_classes):\n # get gt and predictions in this class\n cls_gt_class_ids = gt_class_ids[gt_class_ids==cls_id] if len(gt_class_ids) else np.zeros(0)\n cls_gt_sRT = gt_sRT[gt_class_ids==cls_id] if len(gt_class_ids) else np.zeros((0, 4, 4))\n cls_gt_size = gt_size[gt_class_ids==cls_id] if len(gt_class_ids) else np.zeros((0, 3))\n if synset_names[cls_id] != 'mug':\n cls_gt_handle_visibility = np.ones_like(cls_gt_class_ids)\n else:\n cls_gt_handle_visibility = gt_handle_visibility[gt_class_ids==cls_id] if len(gt_class_ids) else np.ones(0)\n\n cls_pred_class_ids = pred_class_ids[pred_class_ids==cls_id] if len(pred_class_ids) else np.zeros(0)\n cls_pred_sRT = pred_sRT[pred_class_ids==cls_id] if len(pred_class_ids) else np.zeros((0, 4, 4))\n cls_pred_size = pred_size[pred_class_ids==cls_id] if len(pred_class_ids) else np.zeros((0, 3))\n cls_pred_scores = pred_scores[pred_class_ids==cls_id] if len(pred_class_ids) else np.zeros(0)\n\n # calculate the overlap between each gt instance and pred instance\n iou_cls_gt_match, iou_cls_pred_match, _, iou_pred_indices = \\\n compute_IoU_matches(cls_gt_class_ids, cls_gt_sRT, cls_gt_size, cls_gt_handle_visibility,\n cls_pred_class_ids, cls_pred_sRT, cls_pred_size, cls_pred_scores,\n synset_names, iou_thres_list)\n if len(iou_pred_indices):\n cls_pred_class_ids = cls_pred_class_ids[iou_pred_indices]\n cls_pred_sRT = cls_pred_sRT[iou_pred_indices]\n cls_pred_scores = cls_pred_scores[iou_pred_indices]\n\n num_pred = iou_cls_pred_match.shape[1]\n pred_start = iou_pred_count[cls_id]\n pred_end = pred_start + num_pred\n iou_pred_count[cls_id] = pred_end\n iou_pred_matches_all[cls_id][:, pred_start:pred_end] = iou_cls_pred_match\n cls_pred_scores_tile = np.tile(cls_pred_scores, (num_iou_thres, 1))\n assert cls_pred_scores_tile.shape[1] == num_pred\n iou_pred_scores_all[cls_id][:, pred_start:pred_end] = cls_pred_scores_tile\n num_gt = iou_cls_gt_match.shape[1]\n gt_start = iou_gt_count[cls_id]\n gt_end = gt_start + num_gt\n iou_gt_count[cls_id] = gt_end\n iou_gt_matches_all[cls_id][:, gt_start:gt_end] = iou_cls_gt_match\n\n if use_matches_for_pose:\n thres_ind = list(iou_thres_list).index(iou_pose_thres)\n iou_thres_pred_match = iou_cls_pred_match[thres_ind, :]\n cls_pred_class_ids = cls_pred_class_ids[iou_thres_pred_match > -1] if len(iou_thres_pred_match) > 0 else np.zeros(0)\n cls_pred_sRT = cls_pred_sRT[iou_thres_pred_match > -1] if len(iou_thres_pred_match) > 0 else np.zeros((0, 4, 4))\n cls_pred_scores = cls_pred_scores[iou_thres_pred_match > -1] if len(iou_thres_pred_match) > 0 else np.zeros(0)\n iou_thres_gt_match = iou_cls_gt_match[thres_ind, :]\n cls_gt_class_ids = cls_gt_class_ids[iou_thres_gt_match > -1] if len(iou_thres_gt_match) > 0 else np.zeros(0)\n cls_gt_sRT = cls_gt_sRT[iou_thres_gt_match > -1] if len(iou_thres_gt_match) > 0 else np.zeros((0, 4, 4))\n cls_gt_handle_visibility = cls_gt_handle_visibility[iou_thres_gt_match > -1] if len(iou_thres_gt_match) > 0 else np.zeros(0)\n\n RT_overlaps = compute_RT_overlaps(cls_gt_class_ids, cls_gt_sRT, cls_gt_handle_visibility,\n cls_pred_class_ids, cls_pred_sRT, synset_names)\n pose_cls_gt_match, pose_cls_pred_match = compute_RT_matches(RT_overlaps, cls_pred_class_ids, cls_gt_class_ids,\n degree_thres_list, shift_thres_list)\n num_pred = pose_cls_pred_match.shape[2]\n pred_start = pose_pred_count[cls_id]\n pred_end = pred_start + num_pred\n pose_pred_count[cls_id] = pred_end\n pose_pred_matches_all[cls_id][:, :, pred_start:pred_end] = pose_cls_pred_match\n cls_pred_scores_tile = np.tile(cls_pred_scores, (num_degree_thres, num_shift_thres, 1))\n assert cls_pred_scores_tile.shape[2] == num_pred\n pose_pred_scores_all[cls_id][:, :, pred_start:pred_end] = cls_pred_scores_tile\n num_gt = pose_cls_gt_match.shape[2]\n gt_start = pose_gt_count[cls_id]\n gt_end = gt_start + num_gt\n pose_gt_count[cls_id] = gt_end\n pose_gt_matches_all[cls_id][:, :, gt_start:gt_end] = pose_cls_gt_match\n\n # trim zeros\n for cls_id in range(num_classes):\n # IoU\n iou_pred_matches_all[cls_id] = iou_pred_matches_all[cls_id][:, :iou_pred_count[cls_id]]\n iou_pred_scores_all[cls_id] = iou_pred_scores_all[cls_id][:, :iou_pred_count[cls_id]]\n iou_gt_matches_all[cls_id] = iou_gt_matches_all[cls_id][:, :iou_gt_count[cls_id]]\n # pose\n pose_pred_matches_all[cls_id] = pose_pred_matches_all[cls_id][:, :, :pose_pred_count[cls_id]]\n pose_pred_scores_all[cls_id] = pose_pred_scores_all[cls_id][:, :, :pose_pred_count[cls_id]]\n pose_gt_matches_all[cls_id] = pose_gt_matches_all[cls_id][:, :, :pose_gt_count[cls_id]]\n\n # compute 3D IoU mAP\n for cls_id in range(1, num_classes):\n for s, iou_thres in enumerate(iou_thres_list):\n iou_aps[cls_id, s], iou_acc[cls_id, s] = compute_ap_and_acc(iou_pred_matches_all[cls_id][s, :],\n iou_pred_scores_all[cls_id][s, :],\n iou_gt_matches_all[cls_id][s, :])\n iou_aps[-1, :] = np.mean(iou_aps[1:-1, :], axis=0)\n iou_acc[-1, :] = np.mean(iou_acc[1:-1, :], axis=0)\n # compute pose mAP\n for i, degree_thres in enumerate(degree_thres_list):\n for j, shift_thres in enumerate(shift_thres_list):\n for cls_id in range(1, num_classes):\n cls_pose_pred_matches_all = pose_pred_matches_all[cls_id][i, j, :]\n cls_pose_gt_matches_all = pose_gt_matches_all[cls_id][i, j, :]\n cls_pose_pred_scores_all = pose_pred_scores_all[cls_id][i, j, :]\n pose_aps[cls_id, i, j], pose_acc[cls_id, i, j] = compute_ap_and_acc(cls_pose_pred_matches_all,\n cls_pose_pred_scores_all,\n cls_pose_gt_matches_all)\n pose_aps[-1, i, j] = np.mean(pose_aps[1:-1, i, j])\n pose_acc[-1, i, j] = np.mean(pose_acc[1:-1, i, j])\n\n # save results to pkl\n result_dict = {}\n result_dict['iou_thres_list'] = iou_thres_list\n result_dict['degree_thres_list'] = degree_thres_list\n result_dict['shift_thres_list'] = shift_thres_list\n result_dict['iou_aps'] = iou_aps\n result_dict['pose_aps'] = pose_aps\n result_dict['iou_acc'] = iou_acc\n result_dict['pose_acc'] = pose_acc\n pkl_path = os.path.join(out_dir, 'mAP_Acc.pkl')\n with open(pkl_path, 'wb') as f:\n cPickle.dump(result_dict, f)\n return iou_aps, pose_aps, iou_acc, pose_acc\n\n\ndef plot_mAP(iou_aps, pose_aps, out_dir, iou_thres_list, degree_thres_list, shift_thres_list):\n \"\"\" Draw iou 3d AP vs. iou thresholds.\n \"\"\"\n import matplotlib.pyplot as plt\n\n labels = ['bottle', 'bowl', 'camera', 'can', 'laptop', 'mug', 'mean', 'nocs']\n colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:pink', 'tab:olive', 'tab:purple', 'tab:red', 'tab:gray']\n styles = ['-', '-', '-', '-', '-', '-', '--', ':']\n\n fig, (ax_iou, ax_degree, ax_shift) = plt.subplots(1, 3, figsize=(8, 3.5))\n # IoU subplot\n ax_iou.set_title('3D IoU', fontsize=10)\n ax_iou.set_ylabel('Average Precision')\n ax_iou.set_ylim(0, 100)\n ax_iou.set_xlabel('Percent')\n ax_iou.set_xlim(0, 100)\n ax_iou.xaxis.set_ticks([0, 25, 50, 75, 100])\n ax_iou.grid()\n for i in range(1, iou_aps.shape[0]):\n ax_iou.plot(100*np.array(iou_thres_list), 100*iou_aps[i, :],\n color=colors[i-1], linestyle=styles[i-1], label=labels[i-1])\n # rotation subplot\n ax_degree.set_title('Rotation', fontsize=10)\n ax_degree.set_ylim(0, 100)\n ax_degree.yaxis.set_ticklabels([])\n ax_degree.set_xlabel('Degree')\n ax_degree.set_xlim(0, 60)\n ax_degree.xaxis.set_ticks([0, 20, 40, 60])\n ax_degree.grid()\n for i in range(1, pose_aps.shape[0]):\n ax_degree.plot(np.array(degree_thres_list), 100*pose_aps[i, :len(degree_thres_list), -1],\n color=colors[i-1], linestyle=styles[i-1], label=labels[i-1])\n # translation subplot\n ax_shift.set_title('Translation', fontsize=10)\n ax_shift.set_ylim(0, 100)\n ax_shift.yaxis.set_ticklabels([])\n ax_shift.set_xlabel('Centimeter')\n ax_shift.set_xlim(0, 10)\n ax_shift.xaxis.set_ticks([0, 5, 10])\n ax_shift.grid()\n for i in range(1, pose_aps.shape[0]):\n ax_shift.plot(np.array(shift_thres_list), 100*pose_aps[i, -1, :len(shift_thres_list)],\n color=colors[i-1], linestyle=styles[i-1], label=labels[i-1])\n ax_shift.legend(loc='lower right', fontsize='small')\n plt.tight_layout()\n # plt.show()\n plt.savefig(os.path.join(out_dir, 'mAP.png'))\n plt.close(fig)\n return\n\n\ndef load_depth(img_path):\n \"\"\" Load depth image from img_path. \"\"\"\n depth_path = img_path + '_depth.png'\n depth = cv2.imread(depth_path, -1)\n if len(depth.shape) == 3:\n # This is encoded depth image, let's convert\n # NOTE: RGB is actually BGR in opencv\n depth16 = depth[:, :, 1]*256 + depth[:, :, 2]\n depth16 = np.where(depth16 == 32001, 0, depth16)\n depth16 = depth16.astype(np.uint16)\n elif len(depth.shape) == 2 and depth.dtype == 'uint16':\n depth16 = depth\n else:\n assert False, '[ Error ]: Unsupported depth type.'\n return depth16\n\n\ndef get_bbox(bbox, img_h, img_w):\n \"\"\" Compute square image crop window. \"\"\"\n y1, x1, y2, x2 = bbox\n img_width = img_h\n img_length = img_w\n window_size = (max(y2-y1, x2-x1) // 40 + 1) * 40\n window_size = min(window_size, 440)\n center = [(y1 + y2) // 2, (x1 + x2) // 2]\n rmin = center[0] - int(window_size / 2)\n rmax = center[0] + int(window_size / 2)\n cmin = center[1] - int(window_size / 2)\n cmax = center[1] + int(window_size / 2)\n if rmin < 0:\n delt = -rmin\n rmin = 0\n rmax += delt\n if cmin < 0:\n delt = -cmin\n cmin = 0\n cmax += delt\n if rmax > img_width:\n delt = rmax - img_width\n rmax = img_width\n rmin -= delt\n if cmax > img_length:\n delt = cmax - img_length\n cmax = img_length\n cmin -= delt\n return rmin, rmax, cmin, cmax\n\ndef q2R(pred_r):\n bs, _ = pred_r.size()\n pred_r = pred_r / (torch.norm(pred_r, dim=1).view(bs, 1))\n R_matrix = torch.cat(((1.0 - 2.0*(pred_r[:, 2]**2 + pred_r[:, 3]**2)).view(bs, 1),\n (2.0*pred_r[:, 1]*pred_r[:, 2] - 2.0*pred_r[:, 0]*pred_r[:, 3]).view(bs, 1),\n (2.0*pred_r[:, 0]*pred_r[:, 2] + 2.0*pred_r[:, 1]*pred_r[:, 3]).view(bs, 1),\n (2.0*pred_r[:, 1]*pred_r[:, 2] + 2.0*pred_r[:, 3]*pred_r[:, 0]).view(bs, 1),\n (1.0 - 2.0*(pred_r[:, 1]**2 + pred_r[:, 3]**2)).view(bs, 1),\n (-2.0*pred_r[:, 0]*pred_r[:, 1] + 2.0*pred_r[:, 2]*pred_r[:, 3]).view(bs, 1),\n (-2.0*pred_r[:, 0]*pred_r[:, 2] + 2.0*pred_r[:, 1]*pred_r[:, 3]).view(bs, 1),\n (2.0*pred_r[:, 0]*pred_r[:, 1] + 2.0*pred_r[:, 2]*pred_r[:, 3]).view(bs, 1),\n (1.0 - 2.0*(pred_r[:, 1]**2 + pred_r[:, 2]**2)).view(bs, 1)), dim=1).contiguous().view(bs, 3, 3)\n return R_matrix\n\ndef load_obj(path_to_file):\n vertices = []\n faces = []\n with open(path_to_file, 'r') as f:\n for line in f:\n if line[:2] == 'v ':\n vertex = line[2:].strip().split(' ')\n vertex = [float(xyz) for xyz in vertex]\n vertices.append(vertex)\n elif line[0] == 'f':\n face = line[1:].replace('//', '/').strip().split(' ')\n face = [int(idx.split('/')[0])-1 for idx in face]\n faces.append(face)\n else:\n continue\n vertices = np.asarray(vertices)\n faces = np.asarray(faces)\n return vertices, faces\n\n\ndef random_point(face_vertices):\n r1, r2 = np.random.random(2)\n sqrt_r1 = np.sqrt(r1)\n point = (1 - sqrt_r1) * face_vertices[0, :] + \\\n sqrt_r1 * (1 - r2) * face_vertices[1, :] + \\\n sqrt_r1 * r2 * face_vertices[2, :]\n\n return point\n\ndef uniform_sample(vertices, faces, n_samples, with_normal=False):\n sampled_points = np.zeros((n_samples, 3), dtype=float)\n normals = np.zeros((n_samples, 3), dtype=float)\n faces = vertices[faces]\n vec_cross = np.cross(faces[:, 1, :] - faces[:, 0, :],\n faces[:, 2, :] - faces[:, 0, :])\n face_area = 0.5 * np.linalg.norm(vec_cross, axis=1)\n cum_area = np.cumsum(face_area)\n for i in range(n_samples):\n face_id = np.searchsorted(cum_area, np.random.random() * cum_area[-1])\n sampled_points[i] = random_point(faces[face_id, :, :])\n normals[i] = vec_cross[face_id]\n normals = normals / np.linalg.norm(normals, axis=1, keepdims=True)\n if with_normal:\n sampled_points = np.concatenate((sampled_points, normals), axis=1)\n return sampled_points\n\ndef pairwise_distance(A, B):\n diff = A[:, :, None] - B[:, :, None].T\n C = np.sqrt(np.sum(diff**2, axis=1))\n\n return C\n\ndef farthest_point_sampling(points, n_samples):\n selected_pts = np.zeros((n_samples,), dtype=int)\n dist_mat = pairwise_distance(points, points)\n # start from first point\n pt_idx = 0\n dist_to_set = dist_mat[:, pt_idx]\n for i in range(n_samples):\n selected_pts[i] = pt_idx\n dist_to_set = np.minimum(dist_to_set, dist_mat[:, pt_idx])\n pt_idx = np.argmax(dist_to_set)\n return selected_pts\n\ndef sample_points_from_mesh(path, n_pts, with_normal=False, fps=False, ratio=2):\n vertices, faces = load_obj(path)\n if fps:\n points = uniform_sample(vertices, faces, ratio*n_pts, with_normal)\n pts_idx = farthest_point_sampling(points[:, :3], n_pts)\n points = points[pts_idx]\n else:\n points = uniform_sample(vertices, faces, n_pts, with_normal)\n return points, (vertices, faces)\n" ]
[ [ "numpy.sum", "numpy.ones", "matplotlib.pyplot.tight_layout", "numpy.cross", "numpy.asarray", "numpy.argsort", "numpy.ones_like", "numpy.trace", "numpy.amax", "numpy.cos", "torch.norm", "numpy.where", "numpy.identity", "numpy.mean", "numpy.minimum", "numpy.tile", "numpy.zeros", "numpy.linalg.det", "matplotlib.pyplot.subplots", "numpy.argmax", "matplotlib.pyplot.close", "numpy.prod", "numpy.maximum", "numpy.array", "numpy.linalg.norm", "numpy.cumsum", "numpy.random.random", "numpy.amin", "numpy.clip", "numpy.array_equal", "numpy.sqrt", "numpy.sin", "numpy.concatenate" ] ]
mmfink/raster-functions
[ "55a33bdd2ac4f3333eca6ccd49de6f3d5d21f7ba" ]
[ "functions/Landsat_Image_Synthesis.py" ]
[ "import numpy as np\nimport datetime\nfrom datetime import timedelta\nimport sys\n\n\nimport os\nimport pickle\n\n#debug_logs_directory = r'C:\\PROJECTS\\TEMP'\n\n# Based on QA Band - https://landsat.usgs.gov/collectionqualityband\nQA_BAND_NUM = 7\nlandsat_5_clear_pix_vals = [672, 676, 680, 684]\n#landsat_8_clear_pix_vals = [2720, 2724, 2728, 2732]\nLANDSAT_CLEAR_PIX_VALS = landsat_5_clear_pix_vals #+ landsat_8_clear_pix_vals\n\n\nclass Landsat_Image_Synthesis():\n\n def __init__(self):\n self.name = 'Landsat 5 Scene Synthesis'\n self.description = 'This function takes as input a spatial and temporal '\\\n 'mosaic dataset of Landsat 5 TM images, selects images ' \\\n 'for user defined month, filters out cloudy '\\\n 'pixels from each image in the stack, then '\\\n 'averages the values along a spatial element to '\\\n 'create a synthetic Landsat 5 TM image for the '\\\n 'user define month.'\n\n self.times = []\n self.predict_month = None\n\n def getParameterInfo(self):\n return [\n {\n 'name': 'rasters',\n 'dataType': 'rasters',\n 'value': None,\n 'required': True,\n 'displayName': 'Rasters',\n 'description': 'The collection of overlapping rasters to aggregate.',\n },\n {\n 'name': 'predict_month',\n 'dataType': 'string',\n 'value': 'Jun',\n 'required': True,\n 'domain': ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'),\n 'displayName': 'Month to Predict',\n 'description': 'Jan, Feb, Mar, Apr, May, Jun, Jul, Aug, Sep, Oct, Nov, Dec'\n }\n ]\n\n def getConfiguration(self, **scalars):\n return {\n 'inheritProperties': 4 | 8, # inherit everything but the pixel type (1) and NoData (2)\n 'invalidateProperties': 2 | 4, # invalidate histogram and statistics because we are modifying pixel values\n 'inputMask': True, # need raster mask of all input rasters in .updatePixels().\n 'resampling': False, # process at native resolution\n 'keyMetadata': ['AcquisitionDate']\n }\n\n def updateRasterInfo(self, **kwargs):\n #outStats = {'minimum': -1, 'maximum': 1}\n self.outBandCount = 6\n\n kwargs['output_info']['pixelType'] = 'f4' # output pixels are floating-point values\n kwargs['output_info']['histogram'] = () # no statistics/histogram for output raster specified\n kwargs['output_info']['statistics'] = () # outStatsTuple\n kwargs['output_info']['bandCount'] = self.outBandCount # number of output bands.\n\n self.times = kwargs['rasters_keyMetadata']\n month_dict = {'Jan':1,\n 'Feb':2,\n 'Mar':3,\n 'Apr':4,\n 'May':5,\n 'Jun':6,\n 'Jul':7,\n 'Aug':8,\n 'Sep':9,\n 'Oct':10,\n 'Nov':11,\n 'Dec':12}\n\n self.predict_month = int(month_dict[kwargs['predict_month']])\n\n return kwargs\n\n def updateKeyMetadata(self, names, bandIndex, **keyMetadata):\n return keyMetadata\n\n\n def updatePixels(self, tlc, shape, props, **pixelBlocks):\n\n #fname = '{:%Y_%b_%d_%H_%M_%S}_t.txt'.format(datetime.datetime.now())\n #filename = os.path.join(debug_logs_directory, fname)\n\n #file = open(filename,\"w\")\n #file.write(\"File Open.\\n\")\n\n pix_time = [j['acquisitiondate'] for j in self.times]\n\n #pickle_filename = os.path.join(debug_logs_directory, fname)\n #pickle.dump(pix_time, open(pickle_filename[:-4]+'pix_time.p',\"wb\"))\n\n #file.write(str(len(pix_time))+ \"\\n\")\n\n pix_blocks = pixelBlocks['rasters_pixels']\n pix_array = np.asarray(pix_blocks)\n\n #pickle_filename = os.path.join(debug_logs_directory, fname)\n #pickle.dump(pix_blocks, open(pickle_filename[:-4]+'pix_blocks.p',\"wb\"))\n\n pix_array_dim = pix_array.shape\n num_bands = 7 # pix_array_dim[1]\n num_squares_x = pix_array_dim[2]\n num_squares_y = pix_array_dim[3]\n\n d = datetime.datetime(1900, 1,1)\n\n datetime_list = []\n idx_list = []\n for idx,t in enumerate(pix_time):\n year = timedelta(days=t)\n date = year+d\n if date.month == 6:\n idx_list.append(idx)\n datetime_list.append(year+d)\n\n pix_array_within = pix_array[idx_list, :, :, :]\n out_band_num = self.outBandCount\n output_pixels = np.zeros((out_band_num, num_squares_x, num_squares_y))\n\n QA_BAND_IND = QA_BAND_NUM-1\n for num_x in range(0, int(num_squares_x)):\n for num_y in range(0, int(num_squares_y)):\n for num_b in range(0, int(num_bands)):\n clear_indices = [\n x for x in range(len(pix_array_within[:, QA_BAND_IND, num_x, num_y]))\n if pix_array_within[x, QA_BAND_IND, num_x, num_y]\n in LANDSAT_CLEAR_PIX_VALS\n ]\n\n if len(clear_indices) > 0:\n output_pixels[0, num_x, num_y] = np.mean(pix_array_within[clear_indices, 0, num_x, num_y])\n output_pixels[1, num_x, num_y] = np.mean(pix_array_within[clear_indices, 1, num_x, num_y])\n output_pixels[2, num_x, num_y] = np.mean(pix_array_within[clear_indices, 2, num_x, num_y])\n output_pixels[3, num_x, num_y] = np.mean(pix_array_within[clear_indices, 3, num_x, num_y])\n output_pixels[4, num_x, num_y] = np.mean(pix_array_within[clear_indices, 4, num_x, num_y])\n output_pixels[5, num_x, num_y] = np.mean(pix_array_within[clear_indices, 5, num_x, num_y])\n else:\n output_pixels[:, num_x, num_y] = -1\n\n mask = np.ones((out_band_num, num_squares_x, num_squares_y))\n pixelBlocks['output_mask'] = mask.astype('u1', copy = False)\n pixelBlocks['output_pixels'] = output_pixels.astype(props['pixelType'], copy=False)\n\n return pixelBlocks\n" ]
[ [ "numpy.mean", "numpy.ones", "numpy.asarray", "numpy.zeros" ] ]
18jeffreyma/tensorflow
[ "421453ee0c7471af40bbaf254ecf91d6a3a320cf" ]
[ "tensorflow/python/kernel_tests/list_ops_test.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for ops which manipulate lists of tensors.\"\"\"\n\n# pylint: disable=g-bad-name\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\nimport numpy as np # pylint: disable=unused-import\n\nfrom tensorflow.python.client import session\nfrom tensorflow.python.eager import backprop\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.eager import function\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import gen_list_ops\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import list_ops\nfrom tensorflow.python.ops import map_fn\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import string_ops\nfrom tensorflow.python.ops import variable_scope as vs\nfrom tensorflow.python.platform import test\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):\n\n def _testPushPop(self, max_num_elements):\n l = list_ops.empty_tensor_list(\n element_dtype=dtypes.float32,\n element_shape=[],\n max_num_elements=max_num_elements)\n l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))\n l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)\n l = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)\n l, e = self.evaluate((l, e))\n self.assertAllEqual(l, [])\n self.assertAllEqual(e, 1.0)\n\n @parameterized.named_parameters((\"NoMaxNumElements\", None),\n (\"WithMaxNumElements\", 2))\n def testPushPop(self, max_num_elements):\n self._testPushPop(max_num_elements)\n\n @parameterized.named_parameters((\"NoMaxNumElements\", None),\n (\"WithMaxNumElements\", 2))\n @test_util.run_gpu_only\n def testPushPopGPU(self, max_num_elements):\n with context.device(\"gpu:0\"):\n self._testPushPop(max_num_elements)\n\n @test_util.run_deprecated_v1\n def testPushInFullListFails(self):\n l = list_ops.empty_tensor_list(\n element_dtype=dtypes.float32, element_shape=[], max_num_elements=1)\n l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))\n with self.assertRaisesRegex(errors.InvalidArgumentError,\n \"Tried to push item into a full list\"):\n l = list_ops.tensor_list_push_back(l, 2.)\n self.evaluate(l)\n\n @parameterized.named_parameters((\"NoMaxNumElements\", None),\n (\"WithMaxNumElements\", 2))\n @test_util.run_deprecated_v1\n def testPopFromEmptyTensorListFails(self, max_num_elements):\n l = list_ops.empty_tensor_list(\n element_dtype=dtypes.float32,\n element_shape=[],\n max_num_elements=max_num_elements)\n with self.assertRaisesRegex(errors.InvalidArgumentError,\n \"Trying to pop from an empty list\"):\n l = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)\n self.evaluate(l)\n\n def testPopUninitializedTensorUseListElementShape(self):\n l = list_ops.tensor_list_reserve(\n element_dtype=dtypes.float32, element_shape=[2, 3], num_elements=3)\n _, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)\n l = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)\n l, e = self.evaluate((l, e))\n self.assertAllEqual(e, np.zeros((2, 3)))\n self.assertAllEqual(l, np.zeros((3, 2, 3)))\n\n def testPopUninitializedTensorUseSpecifiedElementShape(self):\n l = list_ops.tensor_list_reserve(\n element_dtype=dtypes.float32, element_shape=[None, 3], num_elements=3)\n _, e = gen_list_ops.tensor_list_pop_back(\n l, element_dtype=dtypes.float32, element_shape=[4, 3])\n self.assertAllEqual(e, np.zeros((4, 3)))\n\n def testPopUninitializedTensorWithInvalidElementShapeFails(self):\n l = list_ops.tensor_list_reserve(\n element_dtype=dtypes.float32, element_shape=None, num_elements=3)\n with self.assertRaisesRegex(\n errors.InvalidArgumentError,\n \"Trying to read an uninitialized tensor but \"\n \"element_shape is not fully defined\"):\n _, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)\n self.evaluate(e)\n\n l = list_ops.tensor_list_reserve(\n element_dtype=dtypes.float32, element_shape=[None, 2], num_elements=3)\n with self.assertRaisesRegex(\n errors.InvalidArgumentError,\n r\"Incompatible shapes during merge: \\[1,3\\] vs. \\[\\?,2\\]\"):\n _, e = gen_list_ops.tensor_list_pop_back(\n l, element_dtype=dtypes.float32, element_shape=[1, 3])\n self.evaluate(e)\n\n def testPushGetGrad(self):\n with backprop.GradientTape() as tape:\n l = list_ops.empty_tensor_list(\n element_dtype=dtypes.float32, element_shape=None)\n c0 = constant_op.constant(5.0)\n c1 = constant_op.constant([10.0, 20.0])\n tape.watch(c0)\n tape.watch(c1)\n l = list_ops.tensor_list_push_back(l, c0)\n l = list_ops.tensor_list_push_back(l, c1)\n t1 = list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32)\n self.assertAllEqual(self.evaluate(t1), [10.0, 20.0])\n # t1 == c1 so the gradient should be [0., [1., 1.]]\n # This tests that the gradient of push_back correctly converts DT_INVALID\n # tensors to zeros. The list returned by the gradient of GetItem will\n # have only have tensor at index 1 set and others set to DT_INVALID.\n dt0, dt1 = tape.gradient(t1, [c0, c1])\n self.assertAllEqual(self.evaluate(dt1), [1.0, 1.0])\n self.assertEqual(self.evaluate(dt0), 0.0)\n\n def _testStack(self, max_num_elements):\n l = list_ops.empty_tensor_list(\n element_dtype=dtypes.float32,\n element_shape=[],\n max_num_elements=max_num_elements)\n l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))\n l = list_ops.tensor_list_push_back(l, constant_op.constant(2.0))\n t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)\n if not context.executing_eagerly():\n self.assertAllEqual(t.shape.as_list(), [None])\n self.assertAllEqual(self.evaluate(t), [1.0, 2.0])\n\n @parameterized.named_parameters((\"NoMaxNumElements\", None),\n (\"WithMaxNumElements\", 2))\n def testStack(self, max_num_elements):\n self._testStack(max_num_elements)\n\n @parameterized.named_parameters((\"NoMaxNumElements\", None),\n (\"WithMaxNumElements\", 2))\n @test_util.run_gpu_only\n def testStackGPU(self, max_num_elements):\n with context.device(\"gpu:0\"):\n self._testStack(max_num_elements)\n\n @parameterized.named_parameters((\"NoMaxNumElements\", None),\n (\"WithMaxNumElements\", 3))\n @test_util.run_deprecated_v1\n def testStackWithUnknownElementShape(self, max_num_elements):\n l = list_ops.empty_tensor_list(\n element_dtype=dtypes.float32,\n element_shape=None,\n max_num_elements=max_num_elements)\n l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))\n l = list_ops.tensor_list_push_back(l, constant_op.constant(2.0))\n\n t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)\n self.assertAllEqual(self.evaluate(t), [1.0, 2.0])\n\n # Should raise an error when the element tensors do not all have the same\n # shape.\n with self.assertRaisesRegex(errors.InvalidArgumentError,\n \"Incompatible ranks during merge: 0 vs. 1\"):\n l = list_ops.tensor_list_push_back(l, constant_op.constant([3.0, 4.0]))\n t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)\n self.evaluate(t)\n\n @parameterized.named_parameters((\"NoMaxNumElements\", None),\n (\"WithMaxNumElements\", 3))\n @test_util.run_deprecated_v1\n def testStackWithPartiallyDefinedElementShape(self, max_num_elements):\n l = list_ops.empty_tensor_list(\n element_dtype=dtypes.float32,\n element_shape=[None],\n max_num_elements=max_num_elements)\n l = list_ops.tensor_list_push_back(l, constant_op.constant([1.0]))\n l = list_ops.tensor_list_push_back(l, constant_op.constant([2.0]))\n\n t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)\n self.assertAllEqual(self.evaluate(t), [[1.0], [2.0]])\n\n # Should raise an error when the element tensors do not all have the same\n # shape.\n with self.assertRaisesRegex(\n errors.InvalidArgumentError,\n r\"Incompatible shapes during merge: \\[1\\] vs. \\[2\\]\"):\n l = list_ops.tensor_list_push_back(l, constant_op.constant([2.0, 3.0]))\n t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)\n self.evaluate(t)\n\n @parameterized.named_parameters((\"NoMaxNumElements\", None),\n (\"WithMaxNumElements\", 2))\n @test_util.run_deprecated_v1\n def testStackEmptyList(self, max_num_elements):\n # Should be able to stack empty lists with fully defined element_shape.\n l = list_ops.empty_tensor_list(\n element_dtype=dtypes.float32,\n element_shape=[1, 2],\n max_num_elements=max_num_elements)\n t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)\n self.assertAllEqual(self.evaluate(t).shape, (0, 1, 2))\n\n # Should not be able to stack empty lists with partially defined\n # element_shape.\n with self.assertRaisesRegex(errors.InvalidArgumentError,\n \"non-fully-defined\"):\n l = list_ops.empty_tensor_list(\n element_dtype=dtypes.float32,\n element_shape=[None, 2],\n max_num_elements=max_num_elements)\n t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)\n self.evaluate(t)\n\n # Should not be able to stack empty lists with undefined element_shape.\n with self.assertRaisesRegex(errors.InvalidArgumentError,\n \"non-fully-defined\"):\n l = list_ops.empty_tensor_list(\n element_dtype=dtypes.float32,\n element_shape=None,\n max_num_elements=max_num_elements)\n t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)\n self.evaluate(t)\n\n def _testStackWithUninitializedTensors(self):\n l = list_ops.tensor_list_reserve(\n element_dtype=dtypes.float32, element_shape=[], num_elements=3)\n t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)\n self.assertAllEqual(t, [0., 0., 0.])\n\n def testStackWithUninitializedTensors(self):\n self._testStackWithUninitializedTensors()\n\n @test_util.run_gpu_only\n def testStackWithUninitializedTensorsGpu(self):\n with context.device(\"gpu:0\"):\n self._testStackWithUninitializedTensors()\n\n def _testStackWithUninitializedTensorsInferShape(self):\n l = list_ops.tensor_list_reserve(\n element_dtype=dtypes.float32, element_shape=None, num_elements=3)\n l = list_ops.tensor_list_set_item(l, 1, [1., 2.])\n t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)\n self.assertAllEqual(t, [[0., 0.], [1., 2.], [0., 0.]])\n\n def testStackWithUninitializedTensorsInferShape(self):\n self._testStackWithUninitializedTensorsInferShape()\n\n @test_util.run_gpu_only\n def testStackWithUninitializedTensorsInferShapeGpu(self):\n with context.device(\"gpu:0\"):\n self._testStackWithUninitializedTensorsInferShape()\n\n def testStackReservedListWithNoElementsAndPartialElementShapeFails(self):\n l = list_ops.tensor_list_reserve(\n element_dtype=dtypes.float32, element_shape=None, num_elements=3)\n with self.assertRaisesRegex(\n errors.InvalidArgumentError, \"Tried to stack list which only contains \"\n \"uninitialized tensors and has a \"\n \"non-fully-defined element_shape: <unknown>\"):\n t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)\n self.evaluate(t)\n\n def testStackUsingSpecifiedElementShape(self):\n l = list_ops.tensor_list_reserve(\n element_dtype=dtypes.float32, element_shape=None, num_elements=3)\n t = gen_list_ops.tensor_list_stack(\n l, element_dtype=dtypes.float32, element_shape=[])\n if context.executing_eagerly():\n self.assertEqual(t.shape.as_list(), [3])\n else:\n self.assertEqual(t.shape.as_list(), [None])\n self.assertAllEqual(self.evaluate(t), np.zeros((3,)))\n\n @parameterized.named_parameters((\"NoMaxNumElements\", None),\n (\"WithMaxNumElements\", 2))\n def testGatherGrad(self, max_num_elements):\n with backprop.GradientTape() as tape:\n l = list_ops.empty_tensor_list(\n element_dtype=dtypes.float32,\n element_shape=[],\n max_num_elements=max_num_elements)\n c0 = constant_op.constant(1.0)\n tape.watch(c0)\n l = list_ops.tensor_list_push_back(l, c0)\n l = list_ops.tensor_list_push_back(l, constant_op.constant(2.0))\n t = list_ops.tensor_list_gather(l, [1, 0], element_dtype=dtypes.float32)\n self.assertAllEqual(self.evaluate(t), [2.0, 1.0])\n s = (t[0] + t[1]) * (t[0] + t[1])\n dt = tape.gradient(s, c0)\n self.assertAllEqual(self.evaluate(dt), 6.0)\n\n @parameterized.named_parameters((\"NoMaxNumElements\", None),\n (\"WithMaxNumElements\", 3))\n @test_util.run_deprecated_v1\n def testGatherWithUnknownElementShape(self, max_num_elements):\n l = list_ops.empty_tensor_list(\n element_dtype=dtypes.float32,\n element_shape=None,\n max_num_elements=max_num_elements)\n l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))\n l = list_ops.tensor_list_push_back(l, constant_op.constant(2.0))\n l = list_ops.tensor_list_push_back(l, constant_op.constant([3.0, 4.0]))\n\n t = list_ops.tensor_list_gather(l, [1, 0], element_dtype=dtypes.float32)\n self.assertAllEqual(self.evaluate(t), [2.0, 1.0])\n\n t = list_ops.tensor_list_gather(l, [2], element_dtype=dtypes.float32)\n self.assertAllEqual(self.evaluate(t), [[3.0, 4.0]])\n\n # Should raise an error when the requested tensors do not all have the same\n # shape.\n with self.assertRaisesRegex(errors.InvalidArgumentError,\n \"Incompatible ranks during merge: 0 vs. 1\"):\n t = list_ops.tensor_list_gather(l, [0, 2], element_dtype=dtypes.float32)\n self.evaluate(t)\n\n @parameterized.named_parameters((\"NoMaxNumElements\", None),\n (\"WithMaxNumElements\", 3))\n @test_util.run_deprecated_v1\n def testGatherWithPartiallyDefinedElementShape(self, max_num_elements):\n l = list_ops.empty_tensor_list(\n element_dtype=dtypes.float32,\n element_shape=[None],\n max_num_elements=max_num_elements)\n l = list_ops.tensor_list_push_back(l, constant_op.constant([1.0]))\n l = list_ops.tensor_list_push_back(l, constant_op.constant([2.0, 3.0]))\n l = list_ops.tensor_list_push_back(l, constant_op.constant([4.0, 5.0]))\n\n t = list_ops.tensor_list_gather(l, [0], element_dtype=dtypes.float32)\n self.assertAllEqual(self.evaluate(t), [[1.0]])\n\n t = list_ops.tensor_list_gather(l, [1, 2], element_dtype=dtypes.float32)\n self.assertAllEqual(self.evaluate(t), [[2.0, 3.0], [4.0, 5.0]])\n\n # Should raise an error when the requested tensors do not all have the same\n # shape.\n with self.assertRaisesRegex(\n errors.InvalidArgumentError,\n r\"Incompatible shapes during merge: \\[1\\] vs. \\[2\\]\"):\n t = list_ops.tensor_list_gather(l, [0, 2], element_dtype=dtypes.float32)\n self.evaluate(t)\n\n @parameterized.named_parameters((\"NoMaxNumElements\", None),\n (\"WithMaxNumElements\", 3))\n @test_util.run_deprecated_v1\n def testGatherEmptyList(self, max_num_elements):\n # Should be able to gather from empty lists with fully defined\n # element_shape.\n l = list_ops.empty_tensor_list(\n element_dtype=dtypes.float32,\n element_shape=[1, 2],\n max_num_elements=max_num_elements)\n t = list_ops.tensor_list_gather(l, [], element_dtype=dtypes.float32)\n self.assertAllEqual((0, 1, 2), self.evaluate(t).shape)\n\n # Should not be able to gather from empty lists with partially defined\n # element_shape.\n with self.assertRaisesRegex(errors.InvalidArgumentError,\n \"non-fully-defined\"):\n l = list_ops.empty_tensor_list(\n element_dtype=dtypes.float32,\n element_shape=[None, 2],\n max_num_elements=max_num_elements)\n t = list_ops.tensor_list_gather(l, [], element_dtype=dtypes.float32)\n self.evaluate(t)\n\n # Should not be able to gather from empty lists with undefined\n # element_shape.\n with self.assertRaisesRegex(errors.InvalidArgumentError,\n \"non-fully-defined\"):\n l = list_ops.empty_tensor_list(\n element_dtype=dtypes.float32,\n element_shape=None,\n max_num_elements=max_num_elements)\n t = list_ops.tensor_list_gather(l, [], element_dtype=dtypes.float32)\n self.evaluate(t)\n\n def testGatherGradWithNonContiguousIndices(self):\n with backprop.GradientTape(persistent=True) as tape:\n t = constant_op.constant([1.0, 2.0, 3.0])\n l = list_ops.tensor_list_from_tensor(t, element_shape=[])\n c = constant_op.constant(5.0)\n tape.watch(c)\n l = list_ops.tensor_list_set_item(l, 1, c)\n t = list_ops.tensor_list_gather(l, [1], element_dtype=dtypes.float32)\n self.assertAllEqual(self.evaluate(t), [5.0])\n s = t[0] * t[0]\n dt = tape.gradient(s, c)\n self.assertAllEqual(self.evaluate(dt), 10.0)\n dl = tape.gradient(t, l)\n dl_length = list_ops.tensor_list_length(dl)\n self.assertAllEqual(self.evaluate(dl_length), 3)\n\n def _testGatherWithUninitializedTensors(self):\n l = list_ops.tensor_list_reserve(\n element_dtype=dtypes.float32, element_shape=[], num_elements=3)\n t = list_ops.tensor_list_gather(l, [0, 2], element_dtype=dtypes.float32)\n self.assertAllEqual(self.evaluate(t), [0., 0.])\n\n def testGatherWithUninitializedTensors(self):\n self._testGatherWithUninitializedTensors()\n\n @test_util.run_gpu_only\n def testGatherWithUninitializedTensorsGpu(self):\n with context.device(\"gpu:0\"):\n self._testGatherWithUninitializedTensors()\n\n def _testGatherWithUninitializedTensorsInferShape(self):\n l = list_ops.tensor_list_reserve(\n element_dtype=dtypes.float32, element_shape=None, num_elements=3)\n l = list_ops.tensor_list_set_item(l, 1, [1., 2.])\n t = list_ops.tensor_list_gather(l, [1, 2], element_dtype=dtypes.float32)\n self.assertAllEqual(self.evaluate(t), [[1., 2.], [0., 0.]])\n\n def testGatherWithUninitializedTensorsInferShape(self):\n self._testGatherWithUninitializedTensorsInferShape()\n\n @test_util.run_gpu_only\n def testGatherWithUninitializedTensorsInferShapeGpu(self):\n with context.device(\"gpu:0\"):\n self._testGatherWithUninitializedTensorsInferShape()\n\n def testGatherReservedListWithNoElementsAndPartialElementShapeFails(self):\n l = list_ops.tensor_list_reserve(\n element_dtype=dtypes.float32, element_shape=None, num_elements=3)\n with self.assertRaisesRegex(\n errors.InvalidArgumentError,\n \"Tried to gather uninitialized tensors from a\"\n \" list with non-fully-defined element_shape\"):\n t = list_ops.tensor_list_gather(l, [0], element_dtype=dtypes.float32)\n self.evaluate(t)\n\n def testGatherUsingSpecifiedElementShape(self):\n l = list_ops.tensor_list_reserve(\n element_dtype=dtypes.float32, element_shape=None, num_elements=3)\n t = gen_list_ops.tensor_list_gather(\n l, [0, 1, 2], element_dtype=dtypes.float32, element_shape=[])\n self.assertEqual(t.shape.as_list(), [3])\n self.assertAllEqual(self.evaluate(t), np.zeros((3,)))\n\n def testScatterOutputListSize(self):\n c0 = constant_op.constant([1.0, 2.0])\n l = list_ops.tensor_list_scatter(c0, [1, 3], [])\n # TensorListScatter should return a list with size largest index + 1.\n self.assertAllEqual(list_ops.tensor_list_length(l), 4)\n\n def testScatterOutputListSizeWithNumElementsSpecified(self):\n c0 = constant_op.constant([1.0, 2.0])\n l = gen_list_ops.tensor_list_scatter_v2(\n c0, [1, 3], list_ops._build_element_shape([]), num_elements=5)\n # TensorListScatter should return a list with size num_elements.\n self.assertAllEqual(list_ops.tensor_list_length(l), 5)\n\n def testScatterFailsWhenIndexLargerThanNumElements(self):\n c0 = constant_op.constant([1.0, 2.0])\n with self.assertRaisesRegex(\n errors.InvalidArgumentError,\n \"TensorListScatter: Trying to scatter at index 3 in list with size 3\"):\n l = gen_list_ops.tensor_list_scatter_v2(\n c0, [1, 3], list_ops._build_element_shape([]), num_elements=3)\n self.evaluate(l)\n\n def testScatterFailsWithInvalidNumElements(self):\n c0 = constant_op.constant([1.0, 2.0])\n with self.assertRaisesRegex(\n errors.InvalidArgumentError,\n \"TensorListScatter expects num_elements >= -1, found: -2\"):\n l = gen_list_ops.tensor_list_scatter_v2(\n c0, [1, 3], list_ops._build_element_shape([]), num_elements=-2)\n self.evaluate(l)\n\n def testScatterWithInvalidRowsInInputTensorFails(self):\n c0 = constant_op.constant([1.0, 2.0])\n with self.assertRaisesRegex(\n errors.InvalidArgumentError,\n \"Invalid number of rows in input tensor. Expected: 3 Actual: 2\"):\n l = list_ops.tensor_list_scatter(c0, [1, 0, 2], [])\n self.evaluate(l)\n\n def testScatterWithNegativeIndicesFails(self):\n c0 = constant_op.constant([1.0, 2.0])\n with self.assertRaisesRegex(\n errors.InvalidArgumentError,\n \"Indices in TensorListScatter must all be non-negative.\"):\n l = list_ops.tensor_list_scatter(c0, [-1, -2], element_shape=[])\n self.evaluate(l)\n\n def testScatterIntoExistingList(self):\n l = list_ops.tensor_list_reserve(\n element_dtype=dtypes.float32, element_shape=[], num_elements=3)\n l = list_ops.tensor_list_scatter(tensor=[1.], indices=[0], element_shape=[])\n l = list_ops.tensor_list_scatter(\n tensor=[2., 3.], indices=[1, 2], element_shape=[], input_handle=l)\n self.assertAllEqual(\n list_ops.tensor_list_stack(l, element_dtype=dtypes.float32),\n [1., 2., 3.])\n\n def testScatterGrad(self):\n with backprop.GradientTape() as tape:\n c0 = constant_op.constant([1.0, 2.0])\n tape.watch(c0)\n l = list_ops.tensor_list_scatter(c0, [1, 0], element_shape=[])\n t0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)\n t1 = list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32)\n self.assertAllEqual(self.evaluate(t0), 2.0)\n self.assertAllEqual(self.evaluate(t1), 1.0)\n loss = t0 * t0 + t1 * t1\n dt = tape.gradient(loss, c0)\n self.assertAllEqual(self.evaluate(dt), [2., 4.])\n\n def testScatterWithPartialReadGrad(self):\n with backprop.GradientTape() as tape:\n c0 = constant_op.constant([1.0, 2.0])\n tape.watch(c0)\n l = list_ops.tensor_list_scatter(c0, [1, 0], element_shape=[])\n t0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)\n self.assertAllEqual(self.evaluate(t0), 2.0)\n loss = t0 * t0\n dt = tape.gradient(loss, c0)\n self.assertAllEqual(self.evaluate(dt), [0., 4.])\n\n def testTensorListFromTensor(self):\n t = constant_op.constant([1.0, 2.0])\n l = list_ops.tensor_list_from_tensor(t, element_shape=[])\n e = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)\n self.assertAllEqual(e, 1.0)\n l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)\n self.assertAllEqual(e, 2.0)\n l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)\n self.assertAllEqual(e, 1.0)\n self.assertAllEqual(list_ops.tensor_list_length(l), 0)\n\n @test_util.run_gpu_only\n def testFromTensorGPU(self):\n with context.device(\"gpu:0\"):\n self.testTensorListFromTensor()\n\n def testGetSetBool(self):\n t = constant_op.constant([True, False])\n l = list_ops.tensor_list_from_tensor(t, element_shape=[])\n e0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.bool)\n self.assertAllEqual(self.evaluate(e0), True)\n l = list_ops.tensor_list_set_item(l, 0, False)\n t = list_ops.tensor_list_stack(l, element_dtype=dtypes.bool)\n self.assertAllEqual(self.evaluate(t), [False, False])\n\n @test_util.run_gpu_only\n def testGetSetBoolGPU(self):\n with context.device(\"gpu:0\"):\n self.testGetSetBool()\n\n def _testGetSetNumeric(self, dtype):\n t = constant_op.constant([1.0, 2.0], dtype=dtype)\n l = list_ops.tensor_list_from_tensor(t, element_shape=[])\n e0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtype)\n self.assertAllEqual(self.evaluate(e0), 1.0)\n l = list_ops.tensor_list_set_item(\n l, 0, constant_op.constant(3.0, dtype=dtype))\n t = list_ops.tensor_list_stack(l, element_dtype=dtype)\n self.assertAllEqual(self.evaluate(t), [3.0, 2.0])\n\n @parameterized.parameters([dtypes.float32, dtypes.float64,\n dtypes.complex64, dtypes.complex128])\n def testGetSetNumeric(self, dtype):\n self._testGetSetNumeric(dtype)\n\n @parameterized.parameters([dtypes.float32, dtypes.float64,\n dtypes.complex64, dtypes.complex128])\n @test_util.run_gpu_only\n def testGetSetNumericGPU(self, dtype):\n with context.device(\"gpu:0\"):\n self._testGetSetNumeric(dtype)\n\n def testGetSetReserved(self):\n l = list_ops.tensor_list_reserve(\n element_dtype=dtypes.float32, element_shape=[], num_elements=2)\n e0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)\n self.assertAllEqual(e0, 0.0)\n l = list_ops.tensor_list_set_item(l, 0, 3.0)\n t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)\n self.assertAllEqual(t, [3.0, 0.0])\n\n @test_util.run_gpu_only\n def testGetSetReservedGPU(self):\n with context.device(\"gpu:0\"):\n self.testGetSetReserved()\n\n def testSetGetGrad(self):\n with backprop.GradientTape() as tape:\n t = constant_op.constant(5.)\n tape.watch(t)\n l = list_ops.tensor_list_reserve(\n element_dtype=dtypes.float32, element_shape=[], num_elements=3)\n l = list_ops.tensor_list_set_item(l, 1, 2. * t)\n e = list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32)\n self.assertAllEqual(self.evaluate(e), 10.0)\n self.assertAllEqual(self.evaluate(tape.gradient(e, t)), 2.0)\n\n def testGetUninitializedTensorUseListElementShape(self):\n l = list_ops.tensor_list_reserve(\n element_dtype=dtypes.float32, element_shape=[], num_elements=3)\n l = list_ops.tensor_list_set_item(l, 0, 5.)\n e1 = list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32)\n e2 = list_ops.tensor_list_get_item(l, 2, element_dtype=dtypes.float32)\n self.assertEqual(self.evaluate(e1), 0.)\n self.assertEqual(self.evaluate(e2), 0.)\n\n def testGetUninitializedTensorUseSpecifiedElementShape(self):\n l = list_ops.tensor_list_reserve(\n element_dtype=dtypes.float32, element_shape=None, num_elements=3)\n e0 = gen_list_ops.tensor_list_get_item(\n l, 0, element_shape=[], element_dtype=dtypes.float32)\n e1 = gen_list_ops.tensor_list_get_item(\n l, 1, element_shape=[2, 3], element_dtype=dtypes.float32)\n self.assertEqual(e0.shape.as_list(), [])\n self.assertEqual(e1.shape.as_list(), [2, 3])\n self.assertEqual(self.evaluate(e0), 0.)\n self.assertAllEqual(self.evaluate(e1), np.zeros((2, 3)))\n\n l = list_ops.tensor_list_reserve(\n element_dtype=dtypes.float32, element_shape=[None, 3], num_elements=3)\n e1 = gen_list_ops.tensor_list_get_item(\n l, 1, element_shape=[2, 3], element_dtype=dtypes.float32)\n self.assertAllEqual(self.evaluate(e1), np.zeros((2, 3)))\n\n def testGetUninitializedTensorWithInvalidElementShapeFails(self):\n l = list_ops.tensor_list_reserve(\n element_dtype=dtypes.float32, element_shape=None, num_elements=3)\n with self.assertRaisesRegex(\n errors.InvalidArgumentError,\n \"Trying to read an uninitialized tensor but \"\n \"element_shape is not fully defined\"):\n e0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)\n self.evaluate(e0)\n\n l = list_ops.tensor_list_reserve(\n element_dtype=dtypes.float32, element_shape=[None, 2], num_elements=3)\n\n # In eager mode the shape mismatch is caught in the TensorListGetItem\n # kernel which raises an InvalidArgumentError.\n # In graph mode the shape mismatch is caught in the C++ shape inference\n # which raises a ValueError.\n if context.executing_eagerly():\n error_type = errors.InvalidArgumentError\n else:\n error_type = ValueError\n with self.assertRaisesRegex(error_type, r\"shapes\"):\n e0 = gen_list_ops.tensor_list_get_item(\n l, 0, element_dtype=dtypes.float32, element_shape=[1, 3])\n self.evaluate(e0)\n\n @test_util.run_deprecated_v1\n @test_util.enable_control_flow_v2\n def testSkipEagerSetItemIndexOutOfBounds(self):\n l = list_ops.empty_tensor_list(\n element_dtype=dtypes.float32, element_shape=[])\n e0 = constant_op.constant(5.)\n l = list_ops.tensor_list_set_item(\n l, 0, 2. * e0, resize_if_index_out_of_bounds=True)\n l = list_ops.tensor_list_set_item(\n l, 1, 1., resize_if_index_out_of_bounds=True)\n t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)\n grad = gradients_impl.gradients(t, e0)[0]\n self.assertAllEqual(self.evaluate(grad), 2.)\n\n @test_util.run_deprecated_v1\n def testSetOnEmptyListWithMaxNumElementsFails(self):\n l = list_ops.empty_tensor_list(\n element_dtype=dtypes.float32, element_shape=[], max_num_elements=3)\n with self.assertRaisesRegex(\n errors.InvalidArgumentError,\n \"Trying to modify element 0 in a list with 0 elements.\"):\n l = list_ops.tensor_list_set_item(l, 0, 1.)\n self.evaluate(l)\n\n def testUnknownShape(self):\n l = list_ops.empty_tensor_list(\n element_dtype=dtypes.float32, element_shape=None)\n l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))\n l = list_ops.tensor_list_push_back(l, constant_op.constant([1.0, 2.0]))\n l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)\n self.assertAllEqual(self.evaluate(e), [1.0, 2.0])\n l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)\n self.assertAllEqual(self.evaluate(e), 1.0)\n\n @test_util.run_gpu_only\n def testCPUGPUCopy(self):\n t = constant_op.constant([1.0, 2.0])\n l = list_ops.tensor_list_from_tensor(t, element_shape=[])\n with context.device(\"gpu:0\"):\n l_gpu = array_ops.identity(l)\n self.assertAllEqual(\n self.evaluate(\n list_ops.tensor_list_pop_back(\n l_gpu, element_dtype=dtypes.float32)[1]), 2.0)\n l_cpu = array_ops.identity(l_gpu)\n self.assertAllEqual(\n self.evaluate(\n list_ops.tensor_list_pop_back(\n l_cpu, element_dtype=dtypes.float32)[1]), 2.0)\n\n @test_util.run_gpu_only\n def testCPUGPUCopyNested(self):\n t = constant_op.constant([1.0, 2.0])\n child_l = list_ops.tensor_list_from_tensor(t, element_shape=[])\n l = list_ops.empty_tensor_list(\n element_shape=constant_op.constant([], dtype=dtypes.int32),\n element_dtype=dtypes.variant)\n l = list_ops.tensor_list_push_back(l, child_l)\n with context.device(\"gpu:0\"):\n l_gpu = array_ops.identity(l)\n _, child_l_gpu = list_ops.tensor_list_pop_back(\n l_gpu, element_dtype=dtypes.variant)\n self.assertAllEqual(\n self.evaluate(\n list_ops.tensor_list_pop_back(\n child_l_gpu, element_dtype=dtypes.float32)[1]), 2.0)\n l_cpu = array_ops.identity(l_gpu)\n _, child_l_cpu = list_ops.tensor_list_pop_back(\n l_cpu, element_dtype=dtypes.variant)\n self.assertAllEqual(\n self.evaluate(\n list_ops.tensor_list_pop_back(\n child_l_cpu, element_dtype=dtypes.float32)[1]), 2.0)\n\n def testGraphStack(self):\n with self.cached_session():\n tl = list_ops.empty_tensor_list(\n element_shape=constant_op.constant([1], dtype=dtypes.int32),\n element_dtype=dtypes.int32)\n tl = list_ops.tensor_list_push_back(tl, [1])\n self.assertAllEqual(\n self.evaluate(\n list_ops.tensor_list_stack(tl, element_dtype=dtypes.int32)),\n [[1]])\n\n def testSkipEagerStackInLoop(self):\n with self.cached_session():\n t1 = list_ops.empty_tensor_list(\n element_shape=constant_op.constant([], dtype=dtypes.int32),\n element_dtype=dtypes.int32)\n i = constant_op.constant(0, dtype=dtypes.int32)\n\n def body(i, t1):\n t1 = list_ops.tensor_list_push_back(t1, i)\n i += 1\n return i, t1\n\n i, t1 = control_flow_ops.while_loop(lambda i, t1: math_ops.less(i, 4),\n body, [i, t1])\n s1 = list_ops.tensor_list_stack(t1, element_dtype=dtypes.int32)\n self.assertAllEqual(self.evaluate(s1), [0, 1, 2, 3])\n\n def testSkipEagerStackSwitchDtype(self):\n with self.cached_session():\n list_ = list_ops.empty_tensor_list(\n element_shape=constant_op.constant([], dtype=dtypes.int32),\n element_dtype=dtypes.int32)\n m = constant_op.constant([1, 2, 3], dtype=dtypes.float32)\n\n def body(list_, m):\n list_ = control_flow_ops.cond(\n math_ops.equal(list_ops.tensor_list_length(list_), 0),\n lambda: list_ops.empty_tensor_list(m.shape, m.dtype), lambda: list_)\n list_ = list_ops.tensor_list_push_back(list_, m)\n return list_, m\n\n for _ in range(2):\n list_, m = body(list_, m)\n\n s1 = list_ops.tensor_list_stack(list_, element_dtype=dtypes.float32)\n np_s1 = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.float32)\n self.assertAllEqual(self.evaluate(s1), np_s1)\n\n def testSkipEagerStackInLoopSwitchDtype(self):\n with self.cached_session():\n t1 = list_ops.empty_tensor_list(\n element_shape=constant_op.constant([], dtype=dtypes.int32),\n element_dtype=dtypes.int32)\n i = constant_op.constant(0, dtype=dtypes.float32)\n m = constant_op.constant([1, 2, 3], dtype=dtypes.float32)\n\n def body(i, m, t1):\n t1 = control_flow_ops.cond(\n math_ops.equal(list_ops.tensor_list_length(t1), 0),\n lambda: list_ops.empty_tensor_list(m.shape, m.dtype), lambda: t1)\n\n t1 = list_ops.tensor_list_push_back(t1, m * i)\n i += 1.0\n return i, m, t1\n\n i, m, t1 = control_flow_ops.while_loop(\n lambda i, m, t1: math_ops.less(i, 4), body, [i, m, t1])\n s1 = list_ops.tensor_list_stack(t1, element_dtype=dtypes.float32)\n np_s1 = np.vstack([np.arange(1, 4) * i for i in range(4)])\n self.assertAllEqual(self.evaluate(s1), np_s1)\n\n def testSerialize(self):\n worker = test_util.create_local_cluster(num_workers=1, num_ps=1)[0][0]\n with ops.Graph().as_default(), session.Session(target=worker.target):\n with ops.device(\"/job:worker\"):\n t = constant_op.constant([[1.0], [2.0]])\n l = list_ops.tensor_list_from_tensor(t, element_shape=[1])\n with ops.device(\"/job:ps\"):\n l_ps = array_ops.identity(l)\n l_ps, e = list_ops.tensor_list_pop_back(\n l_ps, element_dtype=dtypes.float32)\n with ops.device(\"/job:worker\"):\n worker_e = array_ops.identity(e)\n self.assertAllEqual(self.evaluate(worker_e), [2.0])\n\n def testSerializeListWithInvalidTensors(self):\n worker = test_util.create_local_cluster(num_workers=1, num_ps=1)[0][0]\n with ops.Graph().as_default(), session.Session(target=worker.target):\n with ops.device(\"/job:worker\"):\n l = list_ops.tensor_list_reserve(\n element_dtype=dtypes.float32, element_shape=[], num_elements=2)\n l = list_ops.tensor_list_set_item(l, 0, 1.)\n with ops.device(\"/job:ps\"):\n l_ps = array_ops.identity(l)\n l_ps = list_ops.tensor_list_set_item(l_ps, 1, 2.)\n t = list_ops.tensor_list_stack(l_ps, element_dtype=dtypes.float32)\n with ops.device(\"/job:worker\"):\n worker_t = array_ops.identity(t)\n self.assertAllEqual(self.evaluate(worker_t), [1.0, 2.0])\n\n def testSerializeListWithUnknownRank(self):\n worker = test_util.create_local_cluster(num_workers=1, num_ps=1)[0][0]\n with ops.Graph().as_default(), session.Session(target=worker.target):\n with ops.device(\"/job:worker\"):\n t = constant_op.constant([[1.0], [2.0]])\n l = list_ops.tensor_list_from_tensor(t, element_shape=None)\n with ops.device(\"/job:ps\"):\n l_ps = array_ops.identity(l)\n element_shape = list_ops.tensor_list_element_shape(\n l_ps, shape_type=dtypes.int32)\n with ops.device(\"/job:worker\"):\n element_shape = array_ops.identity(element_shape)\n self.assertEqual(self.evaluate(element_shape), -1)\n\n def testSerializeListWithMaxNumElements(self):\n worker = test_util.create_local_cluster(num_workers=1, num_ps=1)[0][0]\n with ops.Graph().as_default(), session.Session(target=worker.target):\n with ops.device(\"/job:worker\"):\n l = list_ops.empty_tensor_list(\n element_shape=None,\n element_dtype=dtypes.float32,\n max_num_elements=2)\n l = list_ops.tensor_list_push_back(l, 1.)\n with ops.device(\"/job:ps\"):\n l_ps = array_ops.identity(l)\n l_ps = list_ops.tensor_list_push_back(l_ps, 2.)\n with self.assertRaisesRegex(errors.InvalidArgumentError,\n \"Tried to push item into a full list\"):\n with ops.device(\"/job:worker\"):\n l_worker = array_ops.identity(l_ps)\n l_worker = list_ops.tensor_list_push_back(l_worker, 3.0)\n self.evaluate(l_worker)\n\n def testPushPopGradients(self):\n with backprop.GradientTape() as tape:\n l = list_ops.empty_tensor_list(\n element_dtype=dtypes.float32, element_shape=[])\n c = constant_op.constant(1.0)\n tape.watch(c)\n l = list_ops.tensor_list_push_back(l, c)\n l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)\n e = 2 * e\n self.assertAllEqual(self.evaluate(tape.gradient(e, [c])[0]), 2.0)\n\n def testStackFromTensorGradients(self):\n with backprop.GradientTape() as tape:\n c = constant_op.constant([1.0, 2.0])\n tape.watch(c)\n l = list_ops.tensor_list_from_tensor(c, element_shape=[])\n c2 = list_ops.tensor_list_stack(\n l, element_dtype=dtypes.float32, num_elements=2)\n result = c2 * 2.0\n grad = tape.gradient(result, [c])[0]\n self.assertAllEqual(self.evaluate(grad), [2.0, 2.0])\n\n def testGetSetGradients(self):\n with backprop.GradientTape() as tape:\n c = constant_op.constant([1.0, 2.0])\n tape.watch(c)\n l = list_ops.tensor_list_from_tensor(c, element_shape=[])\n c2 = constant_op.constant(3.0)\n tape.watch(c2)\n l = list_ops.tensor_list_set_item(l, 0, c2)\n e = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)\n ee = list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32)\n y = e * e + ee * ee\n grad_c, grad_c2 = tape.gradient(y, [c, c2])\n self.assertAllEqual(self.evaluate(grad_c), [0.0, 4.0])\n self.assertAllEqual(self.evaluate(grad_c2), 6.0)\n\n @test_util.run_deprecated_v1\n def testSetOutOfBounds(self):\n c = constant_op.constant([1.0, 2.0])\n l = list_ops.tensor_list_from_tensor(c, element_shape=[])\n with self.assertRaises(errors.InvalidArgumentError):\n self.evaluate(list_ops.tensor_list_set_item(l, 20, 3.0))\n\n @test_util.run_deprecated_v1\n def testSkipEagerSetItemWithMismatchedShapeFails(self):\n with self.cached_session() as sess:\n ph = array_ops.placeholder(dtypes.float32)\n c = constant_op.constant([1.0, 2.0])\n l = list_ops.tensor_list_from_tensor(c, element_shape=[])\n # Set a placeholder with unknown shape to satisfy the shape inference\n # at graph building time.\n l = list_ops.tensor_list_set_item(l, 0, ph)\n l_0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)\n with self.assertRaisesRegex(errors.InvalidArgumentError,\n \"incompatible shape\"):\n sess.run(l_0, {ph: [3.0]})\n\n def testResourceVariableScatterGather(self):\n c = constant_op.constant([1.0, 2.0], dtype=dtypes.float32)\n l = list_ops.tensor_list_from_tensor(c, element_shape=[])\n v = vs.get_variable(\"var\", initializer=[l] * 10, use_resource=True)\n v_r_0_stacked = list_ops.tensor_list_stack(v[0], dtypes.float32)\n self.evaluate(v.initializer)\n self.assertAllEqual([1.0, 2.0], self.evaluate(v_r_0_stacked))\n v_r_sparse_stacked = list_ops.tensor_list_stack(\n v.sparse_read(0), dtypes.float32)\n self.assertAllEqual([1.0, 2.0], self.evaluate(v_r_sparse_stacked))\n l_new_0 = list_ops.tensor_list_from_tensor([3.0, 4.0], element_shape=[])\n l_new_1 = list_ops.tensor_list_from_tensor([5.0, 6.0], element_shape=[])\n updated_v = state_ops.scatter_update(v, [3, 5], [l_new_0, l_new_1])\n updated_v_elems = array_ops.unstack(updated_v)\n updated_v_stacked = [\n list_ops.tensor_list_stack(el, dtypes.float32) for el in updated_v_elems\n ]\n expected = ([[1.0, 2.0]] * 3 + [[3.0, 4.0], [1.0, 2.0], [5.0, 6.0]] +\n [[1.0, 2.0]] * 4)\n self.assertAllEqual(self.evaluate(updated_v_stacked), expected)\n\n @test_util.run_deprecated_v1\n def testConcat(self):\n c = constant_op.constant([1.0, 2.0], dtype=dtypes.float32)\n l0 = list_ops.tensor_list_from_tensor(c, element_shape=[])\n l1 = list_ops.tensor_list_from_tensor([-1.0], element_shape=[])\n l_batch_0 = array_ops.stack([l0, l1])\n l_batch_1 = array_ops.stack([l1, l0])\n\n l_concat_01 = list_ops.tensor_list_concat_lists(\n l_batch_0, l_batch_1, element_dtype=dtypes.float32)\n l_concat_10 = list_ops.tensor_list_concat_lists(\n l_batch_1, l_batch_0, element_dtype=dtypes.float32)\n l_concat_00 = list_ops.tensor_list_concat_lists(\n l_batch_0, l_batch_0, element_dtype=dtypes.float32)\n l_concat_11 = list_ops.tensor_list_concat_lists(\n l_batch_1, l_batch_1, element_dtype=dtypes.float32)\n\n expected_0 = [[1.0, 2.0], [-1.0]]\n expected_1 = [[-1.0], [1.0, 2.0]]\n expected_00 = [[1.0, 2.0, 1.0, 2.0], [-1.0, -1.0]]\n expected_01 = [[1.0, 2.0, -1.0], [-1.0, 1.0, 2.0]]\n expected_10 = [[-1.0, 1.0, 2.0], [1.0, 2.0, -1.0]]\n expected_11 = [[-1.0, -1.0], [1.0, 2.0, 1.0, 2.0]]\n\n for i, (concat, expected) in enumerate(zip(\n [l_batch_0, l_batch_1,\n l_concat_00, l_concat_01, l_concat_10, l_concat_11],\n [expected_0, expected_1,\n expected_00, expected_01, expected_10, expected_11])):\n splitted = array_ops.unstack(concat)\n splitted_stacked_ret = self.evaluate(\n (list_ops.tensor_list_stack(splitted[0], dtypes.float32),\n list_ops.tensor_list_stack(splitted[1], dtypes.float32)))\n print(\"Test concat %d: %s, %s, %s, %s\"\n % (i, expected[0], splitted_stacked_ret[0],\n expected[1], splitted_stacked_ret[1]))\n self.assertAllClose(expected[0], splitted_stacked_ret[0])\n self.assertAllClose(expected[1], splitted_stacked_ret[1])\n\n # Concatenating mismatched shapes fails.\n with self.assertRaises((errors.InvalidArgumentError, ValueError)):\n self.evaluate(\n list_ops.tensor_list_concat_lists(\n l_batch_0,\n list_ops.empty_tensor_list([], dtypes.float32),\n element_dtype=dtypes.float32))\n\n if context.executing_eagerly():\n expected_error = (\n errors.InvalidArgumentError,\n \"element shapes are not identical at index 0\")\n else:\n expected_error = (ValueError, \"Shapes must be equal rank\")\n with self.assertRaisesRegex(*expected_error):\n l_batch_of_vec_tls = array_ops.stack(\n [list_ops.tensor_list_from_tensor([[1.0]], element_shape=[1])] * 2)\n self.evaluate(\n list_ops.tensor_list_concat_lists(l_batch_0, l_batch_of_vec_tls,\n element_dtype=dtypes.float32))\n\n if context.executing_eagerly():\n expected_error = (errors.InvalidArgumentError,\n r\"input_b\\[0\\].dtype != element_dtype.\")\n else:\n expected_error = (ValueError, \"input_b.type != element_dtype\")\n with self.assertRaisesRegex(*expected_error):\n l_batch_of_int_tls = array_ops.stack(\n [list_ops.tensor_list_from_tensor([1], element_shape=[])] * 2)\n self.evaluate(\n list_ops.tensor_list_concat_lists(l_batch_0, l_batch_of_int_tls,\n element_dtype=dtypes.float32))\n\n @test_util.run_deprecated_v1\n def testPushBackBatch(self):\n c = constant_op.constant([1.0, 2.0], dtype=dtypes.float32)\n l0 = list_ops.tensor_list_from_tensor(c, element_shape=[])\n l1 = list_ops.tensor_list_from_tensor([-1.0], element_shape=[])\n l_batch = array_ops.stack([l0, l1])\n l_push = list_ops.tensor_list_push_back_batch(l_batch, [3.0, 4.0])\n l_unstack = array_ops.unstack(l_push)\n l0_ret = list_ops.tensor_list_stack(l_unstack[0], dtypes.float32)\n l1_ret = list_ops.tensor_list_stack(l_unstack[1], dtypes.float32)\n self.assertAllClose([1.0, 2.0, 3.0], self.evaluate(l0_ret))\n self.assertAllClose([-1.0, 4.0], self.evaluate(l1_ret))\n\n with ops.control_dependencies([l_push]):\n l_unstack_orig = array_ops.unstack(l_batch)\n l0_orig_ret = list_ops.tensor_list_stack(l_unstack_orig[0],\n dtypes.float32)\n l1_orig_ret = list_ops.tensor_list_stack(l_unstack_orig[1],\n dtypes.float32)\n\n # Check that without aliasing, push_back_batch still works; and\n # that it doesn't modify the input.\n l0_r_v, l1_r_v, l0_orig_v, l1_orig_v = self.evaluate(\n (l0_ret, l1_ret, l0_orig_ret, l1_orig_ret))\n self.assertAllClose([1.0, 2.0, 3.0], l0_r_v)\n self.assertAllClose([-1.0, 4.0], l1_r_v)\n self.assertAllClose([1.0, 2.0], l0_orig_v)\n self.assertAllClose([-1.0], l1_orig_v)\n\n # Pushing back mismatched shapes fails.\n with self.assertRaises((errors.InvalidArgumentError, ValueError)):\n self.evaluate(list_ops.tensor_list_push_back_batch(l_batch, []))\n\n with self.assertRaisesRegex(errors.InvalidArgumentError,\n \"incompatible shape to a list at index 0\"):\n self.evaluate(\n list_ops.tensor_list_push_back_batch(l_batch, [[3.0], [4.0]]))\n\n if context.executing_eagerly():\n expected_error = (errors.InvalidArgumentError, \"Invalid data type\")\n else:\n expected_error = (ValueError, \"wrong element dtype\")\n with self.assertRaisesRegex(*expected_error):\n self.evaluate(list_ops.tensor_list_push_back_batch(l_batch, [3, 4]))\n\n def testZerosLike(self):\n for dtype in (dtypes.uint8, dtypes.uint16, dtypes.int8, dtypes.int16,\n dtypes.int32, dtypes.int64, dtypes.float16, dtypes.float32,\n dtypes.float64, dtypes.complex64, dtypes.complex128,\n dtypes.bool):\n l_empty = list_ops.empty_tensor_list(\n element_dtype=dtype, element_shape=[])\n l_empty_zeros = array_ops.zeros_like(l_empty)\n t_empty_zeros = list_ops.tensor_list_stack(\n l_empty_zeros, element_dtype=dtype)\n\n l_full = list_ops.tensor_list_push_back(l_empty,\n math_ops.cast(0, dtype=dtype))\n l_full = list_ops.tensor_list_push_back(l_full,\n math_ops.cast(1, dtype=dtype))\n l_full_zeros = array_ops.zeros_like(l_full)\n t_full_zeros = list_ops.tensor_list_stack(\n l_full_zeros, element_dtype=dtype)\n\n self.assertAllEqual(self.evaluate(t_empty_zeros), [])\n self.assertAllEqual(\n self.evaluate(t_full_zeros), np.zeros(\n (2,), dtype=dtype.as_numpy_dtype))\n\n def testZerosLikeNested(self):\n for dtype in (dtypes.uint8, dtypes.uint16, dtypes.int8, dtypes.int16,\n dtypes.int32, dtypes.int64, dtypes.float16, dtypes.float32,\n dtypes.float64, dtypes.complex64, dtypes.complex128,\n dtypes.bool):\n l = list_ops.empty_tensor_list(\n element_dtype=dtypes.variant, element_shape=[])\n\n sub_l = list_ops.empty_tensor_list(element_dtype=dtype, element_shape=[])\n l = list_ops.tensor_list_push_back(l, sub_l)\n sub_l = list_ops.tensor_list_push_back(sub_l, math_ops.cast(\n 1, dtype=dtype))\n l = list_ops.tensor_list_push_back(l, sub_l)\n sub_l = list_ops.tensor_list_push_back(sub_l, math_ops.cast(\n 2, dtype=dtype))\n l = list_ops.tensor_list_push_back(l, sub_l)\n\n # l : [[],\n # [1],\n # [1, 2]]\n #\n # l_zeros : [[],\n # [0],\n # [0, 0]]\n l_zeros = array_ops.zeros_like(l)\n\n outputs = []\n for _ in range(3):\n l_zeros, out = list_ops.tensor_list_pop_back(\n l_zeros, element_dtype=dtypes.variant)\n outputs.append(list_ops.tensor_list_stack(out, element_dtype=dtype))\n\n # Note: `outputs` contains popped values so the order is reversed.\n self.assertAllEqual(self.evaluate(outputs[2]), [])\n self.assertAllEqual(\n self.evaluate(outputs[1]), np.zeros((1,), dtype=dtype.as_numpy_dtype))\n self.assertAllEqual(\n self.evaluate(outputs[0]), np.zeros((2,), dtype=dtype.as_numpy_dtype))\n\n def testElementShape(self):\n l = list_ops.empty_tensor_list(\n element_dtype=dtypes.float32, element_shape=None)\n shape = list_ops.tensor_list_element_shape(l, shape_type=dtypes.int32)\n self.assertEqual(self.evaluate(shape), -1)\n\n def testZerosLikeUninitialized(self):\n l0 = list_ops.tensor_list_reserve([], 3, element_dtype=dtypes.float32)\n l1 = list_ops.tensor_list_set_item(l0, 0, 1.) # [1., _, _]\n zeros_1 = array_ops.zeros_like(l1) # [0., _, _]\n l2 = list_ops.tensor_list_set_item(l1, 2, 2.) # [1., _, 2.]\n zeros_2 = array_ops.zeros_like(l2) # [0., _, 0.]\n\n # Gather indices with zeros in `zeros_1`.\n res_1 = list_ops.tensor_list_gather(\n zeros_1, [0], element_dtype=dtypes.float32)\n # Gather indices with zeros in `zeros_2`.\n res_2 = list_ops.tensor_list_gather(\n zeros_2, [0, 2], element_dtype=dtypes.float32)\n\n self.assertAllEqual(self.evaluate(res_1), [0.])\n self.assertAllEqual(self.evaluate(res_2), [0., 0.])\n\n @test_util.run_deprecated_v1\n def testSkipEagerTensorListGetItemGradAggregation(self):\n l = list_ops.tensor_list_reserve(\n element_shape=[], num_elements=1, element_dtype=dtypes.float32)\n x = constant_op.constant(1.0)\n l = list_ops.tensor_list_set_item(l, 0, x)\n l_read1 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)\n l_read2 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)\n grad = gradients_impl.gradients([l_read1, l_read2], [x])\n with self.cached_session() as sess:\n self.assertSequenceEqual(self.evaluate(grad), [2.])\n\n @test_util.run_deprecated_v1\n def testSkipEagerBuildElementShape(self):\n fn = list_ops._build_element_shape\n # Unknown shape -> -1.\n self.assertEqual(fn(None), -1)\n self.assertEqual(fn(tensor_shape.unknown_shape()), -1)\n # Scalar shape -> [] with type int32.\n self.assertEqual(fn([]).dtype, dtypes.int32)\n self.assertEqual(fn(tensor_shape.TensorShape([])).dtype, dtypes.int32)\n self.assertAllEqual(self.evaluate(fn([])), np.array([], np.int32))\n self.assertAllEqual(\n self.evaluate(fn(tensor_shape.TensorShape([]))), np.array([], np.int32))\n # Tensor -> Tensor\n shape = constant_op.constant(1)\n self.assertIs(fn(shape), shape)\n # Shape with unknown dims -> shape list with -1's.\n shape = [None, 5]\n self.assertAllEqual(fn(shape), [-1, 5])\n self.assertAllEqual(fn(tensor_shape.TensorShape(shape)), [-1, 5])\n # Shape with unknown dims and tensor dims -> shape list with -1's and tensor\n # dims.\n t = array_ops.placeholder(dtypes.int32)\n shape = [None, 5, t]\n result = fn(shape)\n self.assertAllEqual(result[:2], [-1, 5])\n self.assertIs(result[2], t)\n\n def testAddN(self):\n l1 = list_ops.tensor_list_from_tensor([1.0, 2.0], element_shape=[])\n l2 = list_ops.tensor_list_from_tensor([3.0, 4.0], element_shape=[])\n l3 = list_ops.tensor_list_from_tensor([5.0, 6.0], element_shape=[])\n result = math_ops.add_n((l1, l2, l3))\n result_t = list_ops.tensor_list_stack(result, element_dtype=dtypes.float32)\n self.assertAllEqual(self.evaluate(result_t), [9., 12.])\n\n def testAddNNestedList(self):\n l1 = list_ops.tensor_list_from_tensor([1.0, 2.0], element_shape=[])\n l2 = list_ops.tensor_list_from_tensor([3.0, 4.0], element_shape=[])\n l3 = list_ops.tensor_list_from_tensor([5.0, 6.0], element_shape=[])\n l4 = list_ops.tensor_list_from_tensor([7.0, 8.0], element_shape=[])\n a = list_ops.empty_tensor_list(\n element_dtype=dtypes.variant, element_shape=[])\n a = list_ops.tensor_list_push_back(a, l1)\n a = list_ops.tensor_list_push_back(a, l2)\n b = list_ops.empty_tensor_list(\n element_dtype=dtypes.variant, element_shape=[])\n b = list_ops.tensor_list_push_back(b, l3)\n b = list_ops.tensor_list_push_back(b, l4)\n result = math_ops.add_n((a, b))\n result_0 = list_ops.tensor_list_stack(\n list_ops.tensor_list_get_item(result, 0, element_dtype=dtypes.variant),\n element_dtype=dtypes.float32)\n result_1 = list_ops.tensor_list_stack(\n list_ops.tensor_list_get_item(result, 1, element_dtype=dtypes.variant),\n element_dtype=dtypes.float32)\n self.assertAllEqual(self.evaluate(result_0), [6., 8.])\n self.assertAllEqual(self.evaluate(result_1), [10., 12.])\n\n def testAddTensorListsFailsIfLeadingDimsMismatch(self):\n l1 = list_ops.tensor_list_reserve(\n element_shape=[], element_dtype=dtypes.float32, num_elements=2)\n l2 = list_ops.tensor_list_reserve(\n element_shape=[], element_dtype=dtypes.float32, num_elements=3)\n with self.assertRaisesRegex(\n errors.InvalidArgumentError,\n \"Trying to add two lists of tensors with different lengths\"):\n l = math_ops.add_n([l1, l2])\n self.evaluate(list_ops.tensor_list_stack(l, element_dtype=dtypes.float32))\n\n @test_util.run_v1_only(\"Uses placeholders\")\n def testSkipEagerAddTensorListsFailsIfElementShapesMismatch(self):\n with self.cached_session() as sess:\n # Use placeholders instead of constant values for shapes to prevent TF's\n # shape inference from catching this early.\n l1_element_shape = array_ops.placeholder(dtype=dtypes.int32)\n l2_element_shape = array_ops.placeholder(dtype=dtypes.int32)\n l1 = list_ops.tensor_list_reserve(\n element_shape=l1_element_shape,\n element_dtype=dtypes.float32,\n num_elements=3)\n l2 = list_ops.tensor_list_reserve(\n element_shape=l2_element_shape,\n element_dtype=dtypes.float32,\n num_elements=3)\n l = math_ops.add_n([l1, l2])\n with self.assertRaisesRegex(\n errors.InvalidArgumentError,\n \"Trying to add two lists of tensors with incompatible element shapes\"\n ):\n sess.run(\n list_ops.tensor_list_stack(l, element_dtype=dtypes.float32), {\n l1_element_shape: [],\n l2_element_shape: [2]\n })\n\n @test_util.run_deprecated_v1\n def testSkipEagerConcatShapeInference(self):\n\n def BuildTensor(element_shape):\n l = list_ops.empty_tensor_list(\n element_dtype=dtypes.float32, element_shape=element_shape)\n return list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)\n\n self.assertIsNone(BuildTensor(None).shape.rank)\n self.assertAllEqual(BuildTensor([None, 2, 3]).shape.as_list(), [None, 2, 3])\n self.assertAllEqual(\n BuildTensor([None, 2, None]).shape.as_list(), [None, 2, None])\n self.assertAllEqual(BuildTensor([1, 2, 3]).shape.as_list(), [None, 2, 3])\n\n def testConcatWithFullyDefinedElementShape(self):\n l = list_ops.empty_tensor_list(\n element_dtype=dtypes.float32, element_shape=[2, 2])\n l = list_ops.tensor_list_push_back(l, [[0., 1.], [2., 3.]])\n l = list_ops.tensor_list_push_back(l, [[4., 5.], [6., 7.]])\n t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)\n self.assertAllEqual(\n self.evaluate(t), [[0., 1.], [2., 3.], [4., 5.], [6., 7.]])\n\n def testConcatWithNonFullyDefinedElementShape(self):\n l = list_ops.empty_tensor_list(\n element_dtype=dtypes.float32, element_shape=[None, 2])\n l = list_ops.tensor_list_push_back(l, [[0., 1.]])\n l = list_ops.tensor_list_push_back(l, [[2., 3.], [4., 5.]])\n t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)\n self.assertAllEqual(self.evaluate(t), [[0., 1.], [2., 3.], [4., 5.]])\n\n def testConcatWithMismatchingTensorShapesFails(self):\n l = list_ops.empty_tensor_list(\n element_dtype=dtypes.float32, element_shape=None)\n l = list_ops.tensor_list_push_back(l, [[0., 1.]])\n l = list_ops.tensor_list_push_back(l, [[2.], [4.]])\n with self.assertRaisesRegex(\n errors.InvalidArgumentError, r\"Incompatible shapes during merge: \"\n r\"\\[2\\] vs. \\[1\\]\"):\n t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)\n self.evaluate(t)\n\n def testConcatEmptyListWithFullyDefinedElementShape(self):\n l = list_ops.empty_tensor_list(\n element_dtype=dtypes.float32, element_shape=[5, 2])\n t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)\n self.assertAllEqual(self.evaluate(t).shape, (0, 2))\n l = list_ops.empty_tensor_list(\n element_dtype=dtypes.float32, element_shape=[None, 2])\n t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)\n self.assertAllEqual(self.evaluate(t).shape, (0, 2))\n\n def testConcatEmptyListWithUnknownElementShapeFails(self):\n l = list_ops.empty_tensor_list(\n element_dtype=dtypes.float32, element_shape=None)\n with self.assertRaisesRegex(\n errors.InvalidArgumentError,\n \"All except the first dimension must be fully\"\n \" defined when concating an empty tensor list\"):\n t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)\n self.evaluate(t)\n\n def testConcatEmptyListWithPartiallyDefinedElementShapeFails(self):\n l = list_ops.empty_tensor_list(\n element_dtype=dtypes.float32, element_shape=[2, None])\n with self.assertRaisesRegex(\n errors.InvalidArgumentError,\n \"All except the first dimension must be fully\"\n \" defined when concating an empty tensor list\"):\n t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)\n self.evaluate(t)\n\n def testConcatListWithScalarElementShapeFails(self):\n l = list_ops.empty_tensor_list(\n element_dtype=dtypes.float32,\n element_shape=tensor_shape.TensorShape([]))\n with self.assertRaisesRegex(\n errors.InvalidArgumentError,\n \"Concat requires elements to be at least vectors, \"\n \"found scalars instead\"):\n t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)\n self.evaluate(t)\n\n def testConcatListWithScalarElementsFails(self):\n l = list_ops.empty_tensor_list(\n element_dtype=dtypes.float32, element_shape=None)\n l1 = list_ops.tensor_list_push_back(l, 1.)\n with self.assertRaisesRegex(\n errors.InvalidArgumentError, \"Concat saw a scalar shape at index 0\"\n \" but requires at least vectors\"):\n t = list_ops.tensor_list_concat(l1, element_dtype=dtypes.float32)\n self.evaluate(t)\n l1 = list_ops.tensor_list_push_back(l, [1.])\n l1 = list_ops.tensor_list_push_back(l1, 2.)\n with self.assertRaisesRegex(\n errors.InvalidArgumentError, \"Concat saw a scalar shape at index 1\"\n \" but requires at least vectors\"):\n t = list_ops.tensor_list_concat(l1, element_dtype=dtypes.float32)\n self.evaluate(t)\n\n def testConcatWithUninitializedTensorsUseListElementShape(self):\n l = list_ops.tensor_list_reserve(\n element_dtype=dtypes.float32, element_shape=[2, 3], num_elements=3)\n t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)\n self.assertAllEqual(np.zeros((6, 3)), t)\n\n def testConcatWithUninitializedTensorsUseProvidedElementShape(self):\n l = list_ops.tensor_list_reserve(\n element_dtype=dtypes.float32, element_shape=None, num_elements=3)\n t = list_ops.tensor_list_concat(\n l, element_dtype=dtypes.float32, element_shape=(2, 3))\n self.assertAllEqual(np.zeros((6, 3)), t)\n\n def testConcatWithUninitializedTensorsUseProvidedElementShapeAndLengths(self):\n l = list_ops.tensor_list_reserve(\n element_dtype=dtypes.float32, element_shape=None, num_elements=3)\n t, _ = gen_list_ops.tensor_list_concat_v2(\n l,\n element_dtype=dtypes.float32,\n element_shape=list_ops._build_element_shape((None, 3)),\n leading_dims=[2, 3, 5])\n self.assertAllEqual(np.zeros((10, 3)), t)\n l = list_ops.tensor_list_set_item(l, 1, [[2., 3.], [4., 5.], [6., 7.]])\n t, _ = gen_list_ops.tensor_list_concat_v2(\n l,\n element_dtype=dtypes.float32,\n element_shape=list_ops._build_element_shape((None, 2)),\n leading_dims=[2, 3, 4])\n self.assertAllEqual([[0., 0.], [0., 0.], [2., 3.], [4., 5.], [6., 7.],\n [0., 0.], [0., 0.], [0., 0.], [0., 0.]], t)\n\n def testConcatWithUninitializedTensorsInferShapeFromElements(self):\n l = list_ops.tensor_list_reserve(\n element_dtype=dtypes.float32, element_shape=None, num_elements=3)\n l = list_ops.tensor_list_set_item(l, 1, [[2., 3.], [4., 5.], [6., 7.]])\n t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)\n self.assertAllEqual([[0., 0.], [0., 0.], [0., 0.], [2., 3.], [4., 5.],\n [6., 7.], [0., 0.], [0., 0.], [0., 0.]], t)\n\n def testConcatWithUninitializedTensorsFailsIfNoElementShape(self):\n l = list_ops.tensor_list_reserve(\n element_dtype=dtypes.float32, element_shape=None, num_elements=3)\n with self.assertRaisesRegex(\n errors.InvalidArgumentError,\n r\"Trying to concat list with only uninitialized tensors \"\n r\"but element_shape_except_first_dim_ is not fully defined\"):\n t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)\n self.evaluate(t)\n\n def testConcatWithUninitializedTensorsFailsIfNoInputLengths(self):\n l = list_ops.tensor_list_reserve(\n element_dtype=dtypes.float32, element_shape=[None, 3], num_elements=3)\n with self.assertRaisesRegex(\n errors.InvalidArgumentError,\n r\"List contains uninitialized tensor at index 0\"\n r\" but leading_dims has only 0 elements.\"):\n t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)\n self.evaluate(t)\n\n def testEvenSplit(self):\n\n def RunTest(input_tensor, lengths, expected_stacked_output):\n l = list_ops.tensor_list_split(\n input_tensor, element_shape=None, lengths=lengths)\n self.assertAllEqual(\n list_ops.tensor_list_stack(l, element_dtype=dtypes.float32),\n expected_stacked_output)\n\n RunTest([1., 2., 3.], [1, 1, 1], [[1.], [2.], [3.]])\n RunTest([1., 2., 3., 4.], [2, 2], [[1., 2.], [3., 4.]])\n RunTest([[1., 2.], [3., 4.]], [1, 1], [[[1., 2.]], [[3., 4.]]])\n\n def testUnevenSplit(self):\n l = list_ops.tensor_list_split([1., 2., 3., 4., 5],\n element_shape=None,\n lengths=[3, 2])\n self.assertAllEqual(list_ops.tensor_list_length(l), 2)\n self.assertAllEqual(\n list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32),\n [1., 2., 3.])\n self.assertAllEqual(\n list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32),\n [4., 5.])\n\n @test_util.run_deprecated_v1\n def testSkipEagerSplitWithInvalidTensorShapeFails(self):\n with self.cached_session():\n tensor = array_ops.placeholder(dtype=dtypes.float32)\n l = list_ops.tensor_list_split(tensor, element_shape=None, lengths=[1])\n with self.assertRaisesRegex(\n errors.InvalidArgumentError,\n r\"Tensor must be at least a vector, but saw shape: \\[\\]\"):\n l.eval({tensor: 1})\n\n @test_util.run_deprecated_v1\n def testSkipEagerSplitWithInvalidLengthsShapeFails(self):\n with self.cached_session():\n lengths = array_ops.placeholder(dtype=dtypes.int64)\n l = list_ops.tensor_list_split([1., 2.],\n element_shape=None,\n lengths=lengths)\n with self.assertRaisesRegex(\n errors.InvalidArgumentError,\n r\"Expected lengths to be a vector, received shape: \\[\\]\"):\n l.eval({lengths: 1})\n\n def testSplitWithInvalidLengthsFails(self):\n with self.assertRaisesRegex(errors.InvalidArgumentError,\n r\"Invalid value in lengths: -1\"):\n l = list_ops.tensor_list_split([1., 2.],\n element_shape=None,\n lengths=[1, -1])\n self.evaluate(l)\n with self.assertRaisesRegex(\n errors.InvalidArgumentError,\n r\"Attempting to slice \\[0, 3\\] from tensor with length 2\"):\n l = list_ops.tensor_list_split([1., 2.], element_shape=None, lengths=[3])\n self.evaluate(l)\n with self.assertRaisesRegex(\n errors.InvalidArgumentError,\n r\"Unused values in tensor. Length of tensor: 2 Values used: 1\"):\n l = list_ops.tensor_list_split([1., 2.], element_shape=None, lengths=[1])\n self.evaluate(l)\n\n @test_util.run_deprecated_v1\n def testSkipEagerSplitWithScalarElementShapeFails(self):\n with self.assertRaisesRegex(ValueError,\n r\"Shapes must be equal rank, but are 1 and 0\"):\n l = list_ops.tensor_list_split([1., 2.], element_shape=[], lengths=[1, 1])\n with self.cached_session():\n with self.assertRaisesRegex(\n errors.InvalidArgumentError,\n r\"TensorListSplit requires element_shape to be at least of rank 1, \"\n r\"but saw: \\[\\]\"):\n element_shape = array_ops.placeholder(dtype=dtypes.int32)\n l = list_ops.tensor_list_split([1., 2.],\n element_shape=element_shape,\n lengths=[1, 1])\n l.eval({element_shape: []})\n\n def testEagerOnlySplitWithScalarElementShapeFails(self):\n if context.executing_eagerly():\n with self.assertRaisesRegex(\n errors.InvalidArgumentError,\n r\"TensorListSplit requires element_shape to be at least of rank 1, \"\n r\"but saw: \\[\\]\"):\n list_ops.tensor_list_split([1., 2.], element_shape=[], lengths=[1, 1])\n\n @test_util.run_deprecated_v1\n def testSkipEagerSplitWithIncompatibleTensorShapeAndElementShapeFails(self):\n with self.assertRaisesRegex(ValueError,\n r\"Shapes must be equal rank, but are 2 and 1\"):\n l = list_ops.tensor_list_split([[1.], [2.]],\n element_shape=[1],\n lengths=[1, 1])\n\n with self.cached_session():\n with self.assertRaisesRegex(\n errors.InvalidArgumentError,\n r\"tensor shape \\[2,1\\] is not compatible with element_shape \\[1\\]\"):\n element_shape = array_ops.placeholder(dtype=dtypes.int32)\n l = list_ops.tensor_list_split([[1.], [2.]],\n element_shape=element_shape,\n lengths=[1, 1])\n l.eval({element_shape: [1]})\n\n def testEagerOnlySplitWithIncompatibleTensorShapeAndElementShapeFails(self):\n if context.executing_eagerly():\n with self.assertRaisesRegex(\n errors.InvalidArgumentError,\n r\"tensor shape \\[2,1\\] is not compatible with element_shape \\[1\\]\"):\n list_ops.tensor_list_split([[1.], [2.]],\n element_shape=[1],\n lengths=[1, 1])\n\n def testResizeGrow(self):\n l = list_ops.tensor_list_from_tensor([1., 2.], element_shape=[])\n l = list_ops.tensor_list_resize(l, 4)\n self.assertEqual(self.evaluate(list_ops.tensor_list_length(l)), 4)\n self.assertEqual(\n self.evaluate(\n list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)),\n 1.)\n self.assertEqual(\n self.evaluate(\n list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32)),\n 2.)\n\n def testResizeShrink(self):\n l = list_ops.tensor_list_from_tensor([1., 2., 3.], element_shape=[])\n l = list_ops.tensor_list_resize(l, 2)\n self.assertEqual(self.evaluate(list_ops.tensor_list_length(l)), 2)\n self.assertAllEqual(\n self.evaluate(\n list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)),\n [1., 2.])\n\n def testResizeWithInvalidSizeFails(self):\n with self.assertRaisesRegex(\n errors.InvalidArgumentError,\n \"TensorListSlice expects size to be non-negative\"):\n l = list_ops.tensor_list_from_tensor([1., 2., 3.], element_shape=[])\n l = list_ops.tensor_list_resize(l, -1)\n self.evaluate(l)\n\n @test_util.run_deprecated_v1\n @test_util.enable_control_flow_v2\n def testSkipEagerResizeGrad(self):\n t = constant_op.constant([1., 2., 3.])\n l = list_ops.tensor_list_from_tensor(t, element_shape=[])\n l = list_ops.tensor_list_set_item(\n l, 3, 4., resize_if_index_out_of_bounds=True)\n t1 = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)\n grad = gradients_impl.gradients(t1, t)[0]\n self.assertAllEqual(self.evaluate(grad), [1., 1., 1.])\n\n def testHandleDataAcrossFunctionCall(self):\n\n @def_function.function\n def func():\n t = constant_op.constant([1., 2., 3.])\n l = list_ops.tensor_list_from_tensor(t, element_shape=[])\n return l\n\n tensor_list = func()\n element = list_ops.tensor_list_get_item(\n tensor_list, 0, element_dtype=dtypes.float32)\n self.assertAllEqual(element.shape.as_list(), [])\n\n @test_util.run_gpu_only\n def testNestedListDevicetoDeviceCopy(self):\n if context.num_gpus() < 2:\n self.skipTest(\"Need at least 2 GPUs for this test, found %d\" %\n context.num_gpus())\n with ops.device(\"gpu:0\"):\n t = constant_op.constant([1.0, 2.0, 3.0])\n inner_l = list_ops.tensor_list_from_tensor(t, element_shape=[])\n outer_l = list_ops.empty_tensor_list(\n element_dtype=dtypes.variant, element_shape=[])\n outer_l = list_ops.tensor_list_push_back(outer_l, inner_l)\n\n # Stress test.\n for _ in range(1024):\n with ops.device(\"gpu:1\"):\n outer_l = array_ops.identity(outer_l)\n with ops.device(\"gpu:0\"):\n outer_l = array_ops.identity(outer_l)\n\n with ops.device(\"gpu:1\"):\n _, inner_l = list_ops.tensor_list_pop_back(\n outer_l, element_dtype=dtypes.variant)\n t = list_ops.tensor_list_stack(inner_l, element_dtype=dtypes.float32)\n self.assertAllEqual(t, [1.0, 2.0, 3.0])\n\n def testTensorListStrings(self):\n @def_function.function\n def f():\n return map_fn.map_fn(string_ops.string_upper,\n constant_op.constant([\"a\", \"b\", \"c\"]))\n\n self.assertAllEqual(f(), [b\"A\", b\"B\", b\"C\"])\n\n def testTensorListStringsNoInline(self):\n # Generator function output type is a variant with a host-only underlying\n # data type. \"ColocationGraph::AddHostOnlyDataTypesConstraints\" needs to\n # have \"deep op inspection\" to be able to correctly place the while loop\n # generated from map_fn.\n self.skipTest(\"b/150742232\")\n\n @function.defun_with_attributes(attributes={\"_noinline\": True})\n def generator():\n c = constant_op.constant([\"a\", \"b\", \"c\"])\n return list_ops.tensor_list_from_tensor(c, element_shape=[])\n\n @def_function.function\n def f():\n l = generator()\n\n def upper(i):\n e = list_ops.tensor_list_get_item(l, i, element_dtype=dtypes.string)\n return string_ops.string_upper(e)\n\n return map_fn.map_fn(\n upper, constant_op.constant([0, 1, 2]), dtype=dtypes.string)\n\n self.assertAllEqual(f(), [b\"A\", b\"B\", b\"C\"])\n\n def testPopBackGrad(self):\n # https://github.com/tensorflow/tensorflow/issues/37230\n\n @def_function.function\n def g(x):\n x_prod = constant_op.constant([1.])\n for unused_i in math_ops.range(3):\n x_prod = x_prod * x\n return x_prod\n\n x = constant_op.constant(1.)\n with backprop.GradientTape() as t:\n t.watch(x)\n with backprop.GradientTape() as tt:\n tt.watch(x)\n loss = g(x)\n jac = tt.gradient(loss, x)\n hess = t.gradient(jac, x)\n self.assertAllEqual(hess, 6.)\n\n def testTensorListElementShapeShapeInference(self):\n\n @def_function.function\n def f():\n l = list_ops.empty_tensor_list(\n element_dtype=dtypes.float32, element_shape=None)\n l_element_shape = list_ops.tensor_list_element_shape(l, dtypes.int32)\n self.assertIsNone(l_element_shape.shape.rank)\n shape_l = list_ops.empty_tensor_list(\n element_dtype=dtypes.int32, element_shape=l_element_shape.shape)\n shape_l = list_ops.tensor_list_push_back(shape_l, l_element_shape)\n return list_ops.tensor_list_pop_back(shape_l, dtypes.int32)[1]\n\n self.assertAllEqual(f(), -1)\n\n\nif __name__ == \"__main__\":\n test.main()\n" ]
[ [ "tensorflow.python.framework.test_util.run_v1_only", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.framework.tensor_shape.unknown_shape", "tensorflow.python.ops.state_ops.scatter_update", "tensorflow.python.ops.list_ops.tensor_list_element_shape", "tensorflow.python.ops.gen_list_ops.tensor_list_pop_back", "tensorflow.python.ops.list_ops.tensor_list_set_item", "tensorflow.python.ops.list_ops.tensor_list_concat_lists", "tensorflow.python.ops.list_ops.tensor_list_concat", "tensorflow.python.ops.gradients_impl.gradients", "tensorflow.python.ops.gen_list_ops.tensor_list_stack", "tensorflow.python.ops.gen_list_ops.tensor_list_get_item", "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.ops.list_ops._build_element_shape", "tensorflow.python.framework.constant_op.constant", "tensorflow.python.eager.context.num_gpus", "tensorflow.python.ops.list_ops.tensor_list_reserve", "tensorflow.python.framework.ops.Graph", "tensorflow.python.ops.variable_scope.get_variable", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.eager.backprop.GradientTape", "tensorflow.python.ops.math_ops.range", "tensorflow.python.ops.list_ops.empty_tensor_list", "tensorflow.python.ops.list_ops.tensor_list_push_back_batch", "tensorflow.python.client.session.Session", "tensorflow.python.ops.list_ops.tensor_list_push_back", "tensorflow.python.ops.list_ops.tensor_list_pop_back", "tensorflow.python.ops.list_ops.tensor_list_gather", "tensorflow.python.ops.array_ops.unstack", "tensorflow.python.framework.ops.device", "numpy.zeros", "tensorflow.python.ops.list_ops.tensor_list_stack", "tensorflow.python.eager.function.defun_with_attributes", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.ops.list_ops.tensor_list_resize", "tensorflow.python.ops.math_ops.less", "tensorflow.python.ops.list_ops.tensor_list_from_tensor", "numpy.arange", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.ops.math_ops.cast", "tensorflow.python.eager.context.device", "tensorflow.python.ops.list_ops.tensor_list_get_item", "tensorflow.python.ops.list_ops.tensor_list_split", "tensorflow.python.ops.list_ops.tensor_list_scatter", "tensorflow.python.ops.string_ops.string_upper", "tensorflow.python.framework.test_util.create_local_cluster", "tensorflow.python.ops.array_ops.zeros_like", "tensorflow.python.ops.list_ops.tensor_list_length", "tensorflow.python.platform.test.main", "tensorflow.python.ops.array_ops.stack", "tensorflow.python.ops.math_ops.add_n", "numpy.array", "tensorflow.python.ops.gen_list_ops.tensor_list_gather" ] ]
frogbam/sasa_img
[ "031768c02ce40149f9cd82c3301404d8890dd03f" ]
[ "pcdet/models/dense_heads/point_head_vote.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\nfrom ...ops.iou3d_nms import iou3d_nms_utils\nfrom ...ops.roiaware_pool3d import roiaware_pool3d_utils\nfrom ...ops.pointnet2.pointnet2_batch import pointnet2_modules\nfrom ...utils import box_coder_utils, box_utils, common_utils, loss_utils\nfrom .point_head_template import PointHeadTemplate\n\n\nclass PointHeadVote(PointHeadTemplate):\n \"\"\"\n A simple vote-based detection head, which is used for 3DSSD.\n Reference Paper: https://arxiv.org/abs/2002.10187\n 3DSSD: Point-based 3D Single Stage Object Detector\n \"\"\"\n def __init__(self, num_class, input_channels, model_cfg, predict_boxes_when_training=False, **kwargs):\n super().__init__(model_cfg=model_cfg, num_class=num_class)\n use_bn = self.model_cfg.USE_BN\n self.predict_boxes_when_training = predict_boxes_when_training\n\n self.vote_cfg = self.model_cfg.VOTE_CONFIG\n self.vote_layers = self.make_fc_layers(\n input_channels=input_channels,\n output_channels=3,\n fc_list=self.vote_cfg.VOTE_FC\n )\n\n self.sa_cfg = self.model_cfg.SA_CONFIG\n channel_in, channel_out = input_channels, 0\n\n mlps = self.sa_cfg.MLPS.copy()\n for idx in range(mlps.__len__()):\n mlps[idx] = [channel_in] + mlps[idx]\n channel_out += mlps[idx][-1]\n\n self.SA_module = pointnet2_modules.PointnetSAModuleFSMSG(\n radii=self.sa_cfg.RADIUS,\n nsamples=self.sa_cfg.NSAMPLE,\n mlps=mlps,\n use_xyz=True,\n bn=use_bn\n )\n\n channel_in = channel_out\n shared_fc_list = []\n for k in range(0, self.model_cfg.SHARED_FC.__len__()):\n shared_fc_list.extend([\n nn.Conv1d(channel_in, self.model_cfg.SHARED_FC[k], kernel_size=1, bias=False),\n nn.BatchNorm1d(self.model_cfg.SHARED_FC[k]),\n nn.ReLU()\n ])\n channel_in = self.model_cfg.SHARED_FC[k]\n\n self.shared_fc_layer = nn.Sequential(*shared_fc_list)\n channel_in = self.model_cfg.SHARED_FC[-1]\n\n self.cls_layers = self.make_fc_layers(\n input_channels=channel_in,\n output_channels=num_class if not self.model_cfg.LOSS_CONFIG.LOSS_CLS == 'CrossEntropy' else num_class + 1,\n fc_list=self.model_cfg.CLS_FC\n )\n\n target_cfg = self.model_cfg.TARGET_CONFIG\n self.box_coder = getattr(box_coder_utils, target_cfg.BOX_CODER)(\n **target_cfg.BOX_CODER_CONFIG\n )\n self.reg_layers = self.make_fc_layers(\n input_channels=channel_in,\n output_channels=self.box_coder.code_size,\n fc_list=self.model_cfg.REG_FC\n )\n\n self.init_weights(weight_init='xavier')\n\n def init_weights(self, weight_init='xavier'):\n if weight_init == 'kaiming':\n init_func = nn.init.kaiming_normal_\n elif weight_init == 'xavier':\n init_func = nn.init.xavier_normal_\n elif weight_init == 'normal':\n init_func = nn.init.normal_\n else:\n raise NotImplementedError\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv1d):\n if weight_init == 'normal':\n init_func(m.weight, mean=0, std=0.001)\n else:\n init_func(m.weight)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n\n def build_losses(self, losses_cfg):\n # classification loss\n if losses_cfg.LOSS_CLS.startswith('WeightedBinaryCrossEntropy'):\n self.add_module(\n 'cls_loss_func',\n loss_utils.WeightedBinaryCrossEntropyLoss()\n )\n elif losses_cfg.LOSS_CLS == 'WeightedCrossEntropy':\n self.add_module(\n 'cls_loss_func',\n loss_utils.WeightedCrossEntropyLoss()\n )\n elif losses_cfg.LOSS_CLS == 'FocalLoss':\n self.add_module(\n 'cls_loss_func',\n loss_utils.SigmoidFocalClassificationLoss(\n **losses_cfg.get('LOSS_CLS_CONFIG', {})\n )\n )\n else:\n raise NotImplementedError\n\n # regression loss\n if losses_cfg.LOSS_REG == 'WeightedSmoothL1Loss':\n self.add_module(\n 'reg_loss_func',\n loss_utils.WeightedSmoothL1Loss(\n code_weights=losses_cfg.LOSS_WEIGHTS.get('code_weights', None),\n **losses_cfg.get('LOSS_REG_CONFIG', {})\n )\n )\n elif losses_cfg.LOSS_REG == 'WeightedL1Loss':\n self.add_module(\n 'reg_loss_func',\n loss_utils.WeightedL1Loss(\n code_weights=losses_cfg.LOSS_WEIGHTS.get('code_weights', None)\n )\n )\n else:\n raise NotImplementedError\n\n # sasa loss\n loss_sasa_cfg = losses_cfg.get('LOSS_SASA_CONFIG', None)\n if loss_sasa_cfg is not None:\n self.enable_sasa = True\n self.add_module(\n 'loss_point_sasa',\n loss_utils.PointSASALoss(**loss_sasa_cfg)\n )\n else:\n self.enable_sasa = False\n\n def make_fc_layers(self, input_channels, output_channels, fc_list):\n fc_layers = []\n pre_channel = input_channels\n for k in range(0, fc_list.__len__()):\n fc_layers.extend([\n nn.Conv1d(pre_channel, fc_list[k], kernel_size=1, bias=False),\n nn.BatchNorm1d(fc_list[k]),\n nn.ReLU()\n ])\n pre_channel = fc_list[k]\n fc_layers.append(nn.Conv1d(pre_channel, output_channels, kernel_size=1, bias=True))\n fc_layers = nn.Sequential(*fc_layers)\n return fc_layers\n\n def assign_stack_targets_simple(self, points, gt_boxes, extend_gt_boxes=None, set_ignore_flag=True):\n \"\"\"\n Args:\n points: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]\n gt_boxes: (B, M, 8)\n extend_gt_boxes: (B, M, 8), required if set ignore flag\n set_ignore_flag:\n Returns:\n point_cls_labels: (N1 + N2 + N3 + ...), long type, 0:background, -1:ignore\n point_reg_labels: (N1 + N2 + N3 + ..., 3), corresponding object centroid\n \"\"\"\n assert len(points.shape) == 2 and points.shape[1] == 4, 'points.shape=%s' % str(points.shape)\n assert len(gt_boxes.shape) == 3, 'gt_boxes.shape=%s' % str(gt_boxes.shape)\n assert extend_gt_boxes is None or len(extend_gt_boxes.shape) == 3, \\\n 'extend_gt_boxes.shape=%s' % str(extend_gt_boxes.shape)\n assert not set_ignore_flag or extend_gt_boxes is not None\n batch_size = gt_boxes.shape[0]\n bs_idx = points[:, 0]\n point_cls_labels = points.new_zeros(points.shape[0]).long()\n point_reg_labels = gt_boxes.new_zeros((points.shape[0], 3))\n for k in range(batch_size):\n bs_mask = (bs_idx == k)\n points_single = points[bs_mask][:, 1:4]\n point_cls_labels_single = point_cls_labels.new_zeros(bs_mask.sum())\n box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu(\n points_single.unsqueeze(dim=0), gt_boxes[k:k + 1, :, 0:7].contiguous()\n ).long().squeeze(dim=0)\n box_fg_flag = (box_idxs_of_pts >= 0)\n\n if extend_gt_boxes is not None:\n extend_box_idx_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu(\n points_single.unsqueeze(dim=0), extend_gt_boxes[k:k + 1, :, 0:7].contiguous()\n ).long().squeeze(dim=0)\n fg_flag = box_fg_flag\n ignore_flag = fg_flag ^ (extend_box_idx_of_pts >= 0)\n point_cls_labels_single[ignore_flag] = -1\n\n gt_box_of_fg_points = gt_boxes[k][box_idxs_of_pts[box_fg_flag]]\n point_cls_labels_single[box_fg_flag] = 1\n point_cls_labels[bs_mask] = point_cls_labels_single\n\n point_reg_labels_single = point_reg_labels.new_zeros((bs_mask.sum(), 3))\n point_reg_labels_single[box_fg_flag] = gt_box_of_fg_points[:, 0:3]\n point_reg_labels[bs_mask] = point_reg_labels_single\n\n targets_dict = {\n 'point_cls_labels': point_cls_labels,\n 'point_reg_labels': point_reg_labels,\n }\n return targets_dict\n\n def assign_targets_simple(self, points, gt_boxes, extra_width=None, set_ignore_flag=True):\n \"\"\"\n Args:\n points: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]\n gt_boxes: (B, M, 8)\n extra_width: (dx, dy, dz) extra width applied to gt boxes\n assign_method: binary or distance\n set_ignore_flag:\n Returns:\n point_vote_labels: (N1 + N2 + N3 + ..., 3)\n \"\"\"\n assert gt_boxes.shape.__len__() == 3, 'gt_boxes.shape=%s' % str(gt_boxes.shape)\n assert points.shape.__len__() in [2], 'points.shape=%s' % str(points.shape)\n batch_size = gt_boxes.shape[0]\n extend_gt_boxes = box_utils.enlarge_box3d(\n gt_boxes.view(-1, gt_boxes.shape[-1]), extra_width=extra_width\n ).view(batch_size, -1, gt_boxes.shape[-1]) \\\n if extra_width is not None else gt_boxes\n if set_ignore_flag:\n targets_dict = self.assign_stack_targets_simple(points=points, gt_boxes=gt_boxes,\n extend_gt_boxes=extend_gt_boxes,\n set_ignore_flag=set_ignore_flag)\n else:\n targets_dict = self.assign_stack_targets_simple(points=points, gt_boxes=extend_gt_boxes,\n set_ignore_flag=set_ignore_flag)\n return targets_dict\n\n def assign_stack_targets_mask(self, points, gt_boxes, extend_gt_boxes=None,\n set_ignore_flag=True, use_ball_constraint=False, central_radius=2.0):\n \"\"\"\n Args:\n points: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]\n gt_boxes: (B, M, 8)\n extend_gt_boxes: [B, M, 8]\n set_ignore_flag:\n use_ball_constraint:\n central_radius:\n Returns:\n point_cls_labels: (N1 + N2 + N3 + ...), long type, 0:background, -1:ignored\n point_reg_labels: (N1 + N2 + N3 + ..., code_size)\n point_box_labels: (N1 + N2 + N3 + ..., 7)\n \"\"\"\n assert len(points.shape) == 2 and points.shape[1] == 4, 'points.shape=%s' % str(points.shape)\n assert len(gt_boxes.shape) == 3, 'gt_boxes.shape=%s' % str(gt_boxes.shape)\n assert extend_gt_boxes is None or len(extend_gt_boxes.shape) == 3, \\\n 'extend_gt_boxes.shape=%s' % str(extend_gt_boxes.shape)\n assert set_ignore_flag != use_ball_constraint, 'Choose one only!'\n batch_size = gt_boxes.shape[0]\n bs_idx = points[:, 0]\n point_cls_labels = gt_boxes.new_zeros(points.shape[0]).long()\n point_reg_labels = gt_boxes.new_zeros((points.shape[0], self.box_coder.code_size))\n point_box_labels = gt_boxes.new_zeros((points.shape[0], gt_boxes.size(2) - 1))\n for k in range(batch_size):\n bs_mask = (bs_idx == k)\n points_single = points[bs_mask][:, 1:4]\n point_cls_labels_single = point_cls_labels.new_zeros(bs_mask.sum())\n box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu(\n points_single.unsqueeze(dim=0), gt_boxes[k:k + 1, :, 0:7].contiguous()\n ).long().squeeze(dim=0)\n box_fg_flag = (box_idxs_of_pts >= 0)\n if set_ignore_flag:\n extend_box_idxs_of_pts = roiaware_pool3d_utils.points_in_boxes_gpu(\n points_single.unsqueeze(dim=0), extend_gt_boxes[k:k+1, :, 0:7].contiguous()\n ).long().squeeze(dim=0)\n fg_flag = box_fg_flag\n ignore_flag = fg_flag ^ (extend_box_idxs_of_pts >= 0)\n point_cls_labels_single[ignore_flag] = -1\n elif use_ball_constraint:\n box_centers = gt_boxes[k][box_idxs_of_pts][:, 0:3].clone()\n ball_flag = ((box_centers - points_single).norm(dim=1) < central_radius)\n fg_flag = box_fg_flag & ball_flag\n ignore_flag = fg_flag ^ box_fg_flag\n point_cls_labels_single[ignore_flag] = -1\n else:\n raise NotImplementedError\n\n gt_box_of_fg_points = gt_boxes[k][box_idxs_of_pts[fg_flag]]\n point_cls_labels_single[fg_flag] = 1 if self.num_class == 1 else gt_box_of_fg_points[:, -1].long()\n point_cls_labels[bs_mask] = point_cls_labels_single\n\n if gt_box_of_fg_points.shape[0] > 0:\n point_reg_labels_single = point_reg_labels.new_zeros((bs_mask.sum(), self.box_coder.code_size))\n fg_point_box_labels = self.box_coder.encode_torch(\n gt_boxes=gt_box_of_fg_points[:, :-1], points=points_single[fg_flag],\n gt_classes=gt_box_of_fg_points[:, -1].long()\n )\n point_reg_labels_single[fg_flag] = fg_point_box_labels\n point_reg_labels[bs_mask] = point_reg_labels_single\n\n point_box_labels_single = point_box_labels.new_zeros((bs_mask.sum(), gt_boxes.size(2) - 1))\n point_box_labels_single[fg_flag] = gt_box_of_fg_points[:, :-1]\n point_box_labels[bs_mask] = point_box_labels_single\n\n targets_dict = {\n 'point_cls_labels': point_cls_labels,\n 'point_reg_labels': point_reg_labels,\n 'point_box_labels': point_box_labels\n }\n return targets_dict\n\n def assign_stack_targets_iou(self, points, pred_boxes, gt_boxes,\n pos_iou_threshold=0.5, neg_iou_threshold=0.35):\n \"\"\"\n Args:\n points: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]\n pred_boxes: (N, 7/8)\n gt_boxes: (B, M, 8)\n pos_iou_threshold:\n neg_iou_threshold:\n Returns:\n point_cls_labels: (N1 + N2 + N3 + ...), long type, 0:background, -1:ignored\n point_reg_labels: (N1 + N2 + N3 + ..., code_size)\n point_box_labels: (N1 + N2 + N3 + ..., 7)\n \"\"\"\n assert len(points.shape) == 2 and points.shape[1] == 4, 'points.shape=%s' % str(points.shape)\n assert len(pred_boxes.shape) == 2 and pred_boxes.shape[1] >= 7, 'pred_boxes.shape=%s' % str(pred_boxes.shape)\n assert len(gt_boxes.shape) == 3 and gt_boxes.shape[2] == 8, 'gt_boxes.shape=%s' % str(gt_boxes.shape)\n batch_size = gt_boxes.shape[0]\n bs_idx = points[:, 0]\n point_cls_labels = gt_boxes.new_zeros(pred_boxes.shape[0]).long()\n point_reg_labels = gt_boxes.new_zeros((pred_boxes.shape[0], self.box_coder.code_size))\n point_box_labels = gt_boxes.new_zeros((pred_boxes.shape[0], 7))\n for k in range(batch_size):\n bs_mask = (bs_idx == k)\n points_single = points[bs_mask][:, 1:4]\n pred_boxes_single = pred_boxes[bs_mask]\n point_cls_labels_single = point_cls_labels.new_zeros(bs_mask.sum())\n pred_boxes_iou = iou3d_nms_utils.boxes_iou3d_gpu(\n pred_boxes_single,\n gt_boxes[k][:, :7]\n )\n pred_boxes_iou, box_idxs_of_pts = torch.max(pred_boxes_iou, dim=-1)\n fg_flag = pred_boxes_iou > pos_iou_threshold\n ignore_flag = (pred_boxes_iou > neg_iou_threshold) ^ fg_flag\n gt_box_of_fg_points = gt_boxes[k][box_idxs_of_pts[fg_flag]]\n point_cls_labels_single[fg_flag] = 1 if self.num_class == 1 else gt_box_of_fg_points[:, -1].long()\n point_cls_labels_single[ignore_flag] = -1\n point_cls_labels[bs_mask] = point_cls_labels_single\n\n if gt_box_of_fg_points.shape[0] > 0:\n point_reg_labels_single = point_reg_labels.new_zeros((bs_mask.sum(), self.box_coder.code_size))\n fg_point_box_labels = self.box_coder.encode_torch(\n gt_boxes=gt_box_of_fg_points[:, :-1], points=points_single[fg_flag],\n gt_classes=gt_box_of_fg_points[:, -1].long()\n )\n point_reg_labels_single[fg_flag] = fg_point_box_labels\n point_reg_labels[bs_mask] = point_reg_labels_single\n\n point_box_labels_single = point_box_labels.new_zeros((bs_mask.sum(), 7))\n point_box_labels_single[fg_flag] = gt_box_of_fg_points[:, :-1]\n point_box_labels[bs_mask] = point_box_labels_single\n\n targets_dict = {\n 'point_cls_labels': point_cls_labels,\n 'point_reg_labels': point_reg_labels,\n 'point_box_labels': point_box_labels\n }\n return targets_dict\n\n def assign_targets(self, input_dict):\n \"\"\"\n Args:\n input_dict:\n batch_size:\n point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]\n gt_boxes (optional): (B, M, 8)\n Returns:\n point_part_labels: (N1 + N2 + N3 + ..., 3)\n \"\"\"\n assign_method = self.model_cfg.TARGET_CONFIG.ASSIGN_METHOD # mask or iou\n if assign_method == 'mask':\n points = input_dict['point_vote_coords']\n gt_boxes = input_dict['gt_boxes']\n assert points.shape.__len__() == 2, 'points.shape=%s' % str(points.shape)\n assert gt_boxes.shape.__len__() == 3, 'gt_boxes.shape=%s' % str(gt_boxes.shape)\n central_radius = self.model_cfg.TARGET_CONFIG.get('GT_CENTRAL_RADIUS', 2.0)\n targets_dict = self.assign_stack_targets_mask(\n points=points, gt_boxes=gt_boxes,\n set_ignore_flag=False, use_ball_constraint=True, central_radius=central_radius\n )\n elif assign_method == 'iou':\n points = input_dict['point_vote_coords']\n pred_boxes = input_dict['point_box_preds']\n gt_boxes = input_dict['gt_boxes']\n assert points.shape.__len__() == 2, 'points.shape=%s' % str(points.shape)\n assert gt_boxes.shape.__len__() == 3, 'gt_boxes.shape=%s' % str(gt_boxes.shape)\n assert pred_boxes.shape.__len__() == 2, 'pred_boxes.shape=%s' % str(pred_boxes.shape)\n pos_iou_threshold = self.model_cfg.TARGET_CONFIG.POS_IOU_THRESHOLD\n neg_iou_threshold = self.model_cfg.TARGET_CONFIG.NEG_IOU_THRESHOLD\n targets_dict = self.assign_stack_targets_iou(\n points=points, pred_boxes=pred_boxes, gt_boxes=gt_boxes,\n pos_iou_threshold=pos_iou_threshold, neg_iou_threshold=neg_iou_threshold\n )\n else:\n raise NotImplementedError\n\n return targets_dict\n\n def get_vote_layer_loss(self, tb_dict=None):\n pos_mask = self.forward_ret_dict['vote_cls_labels'] > 0\n vote_reg_labels = self.forward_ret_dict['vote_reg_labels']\n vote_reg_preds = self.forward_ret_dict['point_vote_coords']\n\n reg_weights = pos_mask.float()\n pos_normalizer = pos_mask.sum().float()\n reg_weights /= torch.clamp(pos_normalizer, min=1.0)\n\n vote_loss_reg_src = self.reg_loss_func(\n vote_reg_preds[None, ...],\n vote_reg_labels[None, ...],\n weights=reg_weights[None, ...])\n vote_loss_reg = vote_loss_reg_src.sum()\n\n loss_weights_dict = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS\n vote_loss_reg = vote_loss_reg * loss_weights_dict['vote_reg_weight']\n if tb_dict is None:\n tb_dict = {}\n tb_dict.update({'vote_loss_reg': vote_loss_reg.item()})\n return vote_loss_reg, tb_dict\n\n @torch.no_grad()\n def generate_centerness_label(self, point_base, point_box_labels, pos_mask, epsilon=1e-6):\n \"\"\"\n Args:\n point_base: (N1 + N2 + N3 + ..., 3)\n point_box_labels: (N1 + N2 + N3 + ..., 7)\n pos_mask: (N1 + N2 + N3 + ...)\n epsilon:\n Returns:\n centerness_label: (N1 + N2 + N3 + ...)\n \"\"\"\n centerness = point_box_labels.new_zeros(pos_mask.shape)\n\n point_box_labels = point_box_labels[pos_mask, :]\n canonical_xyz = point_base[pos_mask, :] - point_box_labels[:, :3]\n rys = point_box_labels[:, -1]\n canonical_xyz = common_utils.rotate_points_along_z(\n canonical_xyz.unsqueeze(dim=1), -rys\n ).squeeze(dim=1)\n\n distance_front = point_box_labels[:, 3] / 2 - canonical_xyz[:, 0]\n distance_back = point_box_labels[:, 3] / 2 + canonical_xyz[:, 0]\n distance_left = point_box_labels[:, 4] / 2 - canonical_xyz[:, 1]\n distance_right = point_box_labels[:, 4] / 2 + canonical_xyz[:, 1]\n distance_top = point_box_labels[:, 5] / 2 - canonical_xyz[:, 2]\n distance_bottom = point_box_labels[:, 5] / 2 + canonical_xyz[:, 2]\n\n centerness_l = torch.min(distance_front, distance_back) / torch.max(distance_front, distance_back)\n centerness_w = torch.min(distance_left, distance_right) / torch.max(distance_left, distance_right)\n centerness_h = torch.min(distance_top, distance_bottom) / torch.max(distance_top, distance_bottom)\n centerness_pos = torch.clamp(centerness_l * centerness_w * centerness_h, min=epsilon) ** (1 / 3.0)\n\n centerness[pos_mask] = centerness_pos\n\n return centerness\n\n def get_axis_aligned_iou_loss_lidar(self, pred_boxes: torch.Tensor, gt_boxes: torch.Tensor):\n \"\"\"\n Args:\n pred_boxes: (N, 7) float Tensor.\n gt_boxes: (N, 7) float Tensor.\n Returns:\n iou_loss: (N) float Tensor.\n \"\"\"\n assert pred_boxes.shape[0] == gt_boxes.shape[0]\n\n pos_p, len_p, *cps = torch.split(pred_boxes, 3, dim=-1)\n pos_g, len_g, *cgs = torch.split(gt_boxes, 3, dim=-1)\n\n len_p = torch.clamp(len_p, min=1e-5)\n len_g = torch.clamp(len_g, min=1e-5)\n vol_p = len_p.prod(dim=-1)\n vol_g = len_g.prod(dim=-1)\n\n min_p, max_p = pos_p - len_p / 2, pos_p + len_p / 2\n min_g, max_g = pos_g - len_g / 2, pos_g + len_g / 2\n\n min_max = torch.min(max_p, max_g)\n max_min = torch.max(min_p, min_g)\n diff = torch.clamp(min_max - max_min, min=0)\n intersection = diff.prod(dim=-1)\n union = vol_p + vol_g - intersection\n iou_axis_aligned = intersection / torch.clamp(union, min=1e-5)\n\n iou_loss = 1 - iou_axis_aligned\n return iou_loss\n\n def get_corner_loss_lidar(self, pred_boxes: torch.Tensor, gt_boxes: torch.Tensor):\n \"\"\"\n Args:\n pred_boxes: (N, 7) float Tensor.\n gt_boxes: (N, 7) float Tensor.\n Returns:\n corner_loss: (N) float Tensor.\n \"\"\"\n assert pred_boxes.shape[0] == gt_boxes.shape[0]\n\n pred_box_corners = box_utils.boxes_to_corners_3d(pred_boxes)\n gt_box_corners = box_utils.boxes_to_corners_3d(gt_boxes)\n\n gt_boxes_flip = gt_boxes.clone()\n gt_boxes_flip[:, 6] += np.pi\n gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_boxes_flip)\n # (N, 8, 3)\n corner_loss = loss_utils.WeightedSmoothL1Loss.smooth_l1_loss(pred_box_corners - gt_box_corners, 1.0)\n corner_loss_flip = loss_utils.WeightedSmoothL1Loss.smooth_l1_loss(pred_box_corners - gt_box_corners_flip, 1.0)\n corner_loss = torch.min(corner_loss.sum(dim=2), corner_loss_flip.sum(dim=2))\n\n return corner_loss.mean(dim=1)\n\n def get_cls_layer_loss(self, tb_dict=None):\n point_cls_labels = self.forward_ret_dict['point_cls_labels'].view(-1)\n point_cls_preds = self.forward_ret_dict['point_cls_preds'].view(-1, self.num_class)\n\n positives = point_cls_labels > 0\n negatives = point_cls_labels == 0\n cls_weights = positives * 1.0 + negatives * 1.0\n\n one_hot_targets = point_cls_preds.new_zeros(*list(point_cls_labels.shape), self.num_class + 1)\n one_hot_targets.scatter_(-1, (point_cls_labels * (point_cls_labels >= 0).long()).unsqueeze(dim=-1).long(), 1.0)\n self.forward_ret_dict['point_cls_labels_onehot'] = one_hot_targets\n\n loss_cfgs = self.model_cfg.LOSS_CONFIG\n if 'WithCenterness' in loss_cfgs.LOSS_CLS:\n point_base = self.forward_ret_dict['point_vote_coords']\n point_box_labels = self.forward_ret_dict['point_box_labels']\n centerness_label = self.generate_centerness_label(point_base, point_box_labels, positives)\n \n loss_cls_cfg = loss_cfgs.get('LOSS_CLS_CONFIG', None)\n centerness_min = loss_cls_cfg['centerness_min'] if loss_cls_cfg is not None else 0.0\n centerness_max = loss_cls_cfg['centerness_max'] if loss_cls_cfg is not None else 1.0\n centerness_label = centerness_min + (centerness_max - centerness_min) * centerness_label\n \n one_hot_targets *= centerness_label.unsqueeze(dim=-1)\n\n point_loss_cls = self.cls_loss_func(point_cls_preds, one_hot_targets[..., 1:], weights=cls_weights)\n\n loss_weights_dict = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS\n point_loss_cls = point_loss_cls * loss_weights_dict['point_cls_weight']\n if tb_dict is None:\n tb_dict = {}\n tb_dict.update({\n 'point_pos_num': positives.sum().item()\n })\n return point_loss_cls, cls_weights, tb_dict # point_loss_cls: (N)\n\n def get_box_layer_loss(self, tb_dict=None):\n pos_mask = self.forward_ret_dict['point_cls_labels'] > 0\n point_reg_preds = self.forward_ret_dict['point_reg_preds']\n point_reg_labels = self.forward_ret_dict['point_reg_labels']\n\n reg_weights = pos_mask.float()\n\n loss_weights_dict = self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS\n if tb_dict is None:\n tb_dict = {}\n\n point_loss_offset_reg = self.reg_loss_func(\n point_reg_preds[None, :, :6],\n point_reg_labels[None, :, :6],\n weights=reg_weights[None, ...]\n )\n point_loss_offset_reg = point_loss_offset_reg.sum(dim=-1).squeeze()\n\n if hasattr(self.box_coder, 'pred_velo') and self.box_coder.pred_velo:\n point_loss_velo_reg = self.reg_loss_func(\n point_reg_preds[None, :, 6 + 2 * self.box_coder.angle_bin_num:8 + 2 * self.box_coder.angle_bin_num],\n point_reg_labels[None, :, 6 + 2 * self.box_coder.angle_bin_num:8 + 2 * self.box_coder.angle_bin_num],\n weights=reg_weights[None, ...]\n )\n point_loss_velo_reg = point_loss_velo_reg.sum(dim=-1).squeeze()\n point_loss_offset_reg = point_loss_offset_reg + point_loss_velo_reg\n\n point_loss_offset_reg *= loss_weights_dict['point_offset_reg_weight']\n\n if isinstance(self.box_coder, box_coder_utils.PointBinResidualCoder):\n point_angle_cls_labels = \\\n point_reg_labels[:, 6:6 + self.box_coder.angle_bin_num]\n point_loss_angle_cls = F.cross_entropy( # angle bin cls\n point_reg_preds[:, 6:6 + self.box_coder.angle_bin_num],\n point_angle_cls_labels.argmax(dim=-1), reduction='none') * reg_weights\n\n point_angle_reg_preds = point_reg_preds[:, 6 + self.box_coder.angle_bin_num:6 + 2 * self.box_coder.angle_bin_num]\n point_angle_reg_labels = point_reg_labels[:, 6 + self.box_coder.angle_bin_num:6 + 2 * self.box_coder.angle_bin_num]\n point_angle_reg_preds = (point_angle_reg_preds * point_angle_cls_labels).sum(dim=-1, keepdim=True)\n point_angle_reg_labels = (point_angle_reg_labels * point_angle_cls_labels).sum(dim=-1, keepdim=True)\n point_loss_angle_reg = self.reg_loss_func(\n point_angle_reg_preds[None, ...],\n point_angle_reg_labels[None, ...],\n weights=reg_weights[None, ...]\n )\n point_loss_angle_reg = point_loss_angle_reg.squeeze()\n\n point_loss_angle_cls *= loss_weights_dict['point_angle_cls_weight']\n point_loss_angle_reg *= loss_weights_dict['point_angle_reg_weight']\n\n point_loss_box = point_loss_offset_reg + point_loss_angle_cls + point_loss_angle_reg # (N)\n else:\n point_angle_reg_preds = point_reg_preds[:, 6:]\n point_angle_reg_labels = point_reg_labels[:, 6:]\n point_loss_angle_reg = self.reg_loss_func(\n point_angle_reg_preds[None, ...],\n point_angle_reg_labels[None, ...],\n weights=reg_weights[None, ...]\n )\n point_loss_angle_reg *= loss_weights_dict['point_angle_reg_weight']\n point_loss_box = point_loss_offset_reg + point_loss_angle_reg\n\n if reg_weights.sum() > 0:\n point_box_preds = self.forward_ret_dict['point_box_preds']\n point_box_labels = self.forward_ret_dict['point_box_labels']\n point_loss_box_aux = 0\n\n if self.model_cfg.LOSS_CONFIG.get('AXIS_ALIGNED_IOU_LOSS_REGULARIZATION', False):\n point_loss_iou = self.get_axis_aligned_iou_loss_lidar(\n point_box_preds[pos_mask, :],\n point_box_labels[pos_mask, :]\n )\n point_loss_iou *= self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['point_iou_weight']\n point_loss_box_aux = point_loss_box_aux + point_loss_iou\n\n if self.model_cfg.LOSS_CONFIG.get('CORNER_LOSS_REGULARIZATION', False):\n point_loss_corner = self.get_corner_loss_lidar(\n point_box_preds[pos_mask, 0:7],\n point_box_labels[pos_mask, 0:7]\n )\n point_loss_corner *= self.model_cfg.LOSS_CONFIG.LOSS_WEIGHTS['point_corner_weight']\n point_loss_box_aux = point_loss_box_aux + point_loss_corner\n \n point_loss_box[pos_mask] = point_loss_box[pos_mask] + point_loss_box_aux\n\n return point_loss_box, reg_weights, tb_dict # point_loss_box: (N)\n\n def get_sasa_layer_loss(self, tb_dict=None):\n if self.enable_sasa:\n point_loss_sasa_list = self.loss_point_sasa.loss_forward(\n self.forward_ret_dict['point_sasa_preds'],\n self.forward_ret_dict['point_sasa_labels']\n )\n point_loss_sasa = 0\n tb_dict = dict()\n for i in range(len(point_loss_sasa_list)):\n cur_point_loss_sasa = point_loss_sasa_list[i]\n if cur_point_loss_sasa is None:\n continue\n point_loss_sasa = point_loss_sasa + cur_point_loss_sasa\n tb_dict['point_loss_sasa_layer_%d' % i] = point_loss_sasa_list[i].item()\n tb_dict['point_loss_sasa'] = point_loss_sasa.item()\n return point_loss_sasa, tb_dict\n else:\n return None, None\n\n def get_loss(self, tb_dict=None):\n tb_dict = {} if tb_dict is None else tb_dict\n point_loss_vote, tb_dict_0 = self.get_vote_layer_loss()\n\n point_loss_cls, cls_weights, tb_dict_1 = self.get_cls_layer_loss()\n point_loss_box, box_weights, tb_dict_2 = self.get_box_layer_loss()\n\n point_loss_cls = point_loss_cls.sum() / torch.clamp(cls_weights.sum(), min=1.0)\n point_loss_box = point_loss_box.sum() / torch.clamp(box_weights.sum(), min=1.0)\n tb_dict.update({\n 'point_loss_vote': point_loss_vote.item(),\n 'point_loss_cls': point_loss_cls.item(),\n 'point_loss_box': point_loss_box.item()\n })\n\n point_loss = point_loss_vote + point_loss_cls + point_loss_box\n tb_dict.update(tb_dict_0)\n tb_dict.update(tb_dict_1)\n tb_dict.update(tb_dict_2)\n\n point_loss_sasa, tb_dict_3 = self.get_sasa_layer_loss()\n if point_loss_sasa is not None:\n tb_dict.update(tb_dict_3)\n point_loss += point_loss_sasa\n return point_loss, tb_dict\n\n def forward(self, batch_dict):\n \"\"\"\n Args:\n batch_dict:\n batch_size:\n point_features: (N1 + N2 + N3 + ..., C)\n point_coords: (N1 + N2 + N3 + ..., 4) [bs_idx, x, y, z]\n point_scores (optional): (B, N)\n gt_boxes (optional): (B, M, 8)\n Returns:\n batch_dict:\n point_cls_scores: (N1 + N2 + N3 + ..., 1)\n point_part_offset: (N1 + N2 + N3 + ..., 3)\n \"\"\"\n batch_size = batch_dict['batch_size']\n\n point_coords = batch_dict['point_coords']\n point_features = batch_dict['point_features']\n\n batch_idx, point_coords = point_coords[:, 0], point_coords[:, 1:4]\n batch_idx = batch_idx.view(batch_size, -1, 1)\n point_coords = point_coords.view(batch_size, -1, 3).contiguous()\n point_features = point_features.reshape(\n batch_size,\n point_coords.size(1),\n -1\n ).permute(0, 2, 1).contiguous()\n\n # candidate points sampling\n sample_range = self.model_cfg.SAMPLE_RANGE\n sample_batch_idx = batch_idx[:, sample_range[0]:sample_range[1], :].contiguous()\n\n candidate_coords = point_coords[:, sample_range[0]:sample_range[1], :].contiguous()\n candidate_features = point_features[:, :, sample_range[0]:sample_range[1]].contiguous()\n\n # generate vote points\n vote_offsets = self.vote_layers(candidate_features) # (B, 3, N)\n vote_translation_range = np.array(self.vote_cfg.MAX_TRANSLATION_RANGE, dtype=np.float32)\n vote_translation_range = torch.from_numpy(vote_translation_range).cuda().unsqueeze(dim=0).unsqueeze(dim=-1)\n vote_offsets = torch.max(vote_offsets, -vote_translation_range)\n vote_offsets = torch.min(vote_offsets, vote_translation_range)\n vote_coords = candidate_coords + vote_offsets.permute(0, 2, 1).contiguous()\n\n ret_dict = {'batch_size': batch_size,\n 'point_candidate_coords': candidate_coords.view(-1, 3).contiguous(),\n 'point_vote_coords': vote_coords.view(-1, 3).contiguous()}\n\n sample_batch_idx_flatten = sample_batch_idx.view(-1, 1).contiguous() # (N, 1)\n batch_dict['batch_index'] = sample_batch_idx_flatten.squeeze(-1)\n batch_dict['point_candidate_coords'] = torch.cat( # (N, 4)\n (sample_batch_idx_flatten, ret_dict['point_candidate_coords']), dim=-1)\n batch_dict['point_vote_coords'] = torch.cat( # (N, 4)\n (sample_batch_idx_flatten, ret_dict['point_vote_coords']), dim=-1)\n\n if self.training: # assign targets for vote loss\n extra_width = self.model_cfg.TARGET_CONFIG.get('VOTE_EXTRA_WIDTH', None)\n targets_dict = self.assign_targets_simple(batch_dict['point_candidate_coords'],\n batch_dict['gt_boxes'],\n extra_width=extra_width,\n set_ignore_flag=False)\n ret_dict['vote_cls_labels'] = targets_dict['point_cls_labels'] # (N)\n ret_dict['vote_reg_labels'] = targets_dict['point_reg_labels'] # (N, 3)\n\n _, point_features, _ = self.SA_module(\n point_coords,\n point_features,\n new_xyz=vote_coords\n )\n\n point_features = self.shared_fc_layer(point_features)\n point_cls_preds = self.cls_layers(point_features)\n point_reg_preds = self.reg_layers(point_features)\n\n point_cls_preds = point_cls_preds.permute(0, 2, 1).contiguous()\n point_cls_preds = point_cls_preds.view(-1, point_cls_preds.shape[-1]).contiguous()\n point_reg_preds = point_reg_preds.permute(0, 2, 1).contiguous()\n point_reg_preds = point_reg_preds.view(-1, point_reg_preds.shape[-1]).contiguous()\n\n point_cls_scores = torch.sigmoid(point_cls_preds)\n batch_dict['point_cls_scores'] = point_cls_scores\n\n point_box_preds = self.box_coder.decode_torch(point_reg_preds,\n ret_dict['point_vote_coords'])\n batch_dict['point_box_preds'] = point_box_preds\n\n ret_dict.update({'point_cls_preds': point_cls_preds,\n 'point_reg_preds': point_reg_preds,\n 'point_box_preds': point_box_preds,\n 'point_cls_scores': point_cls_scores})\n\n if self.training:\n targets_dict = self.assign_targets(batch_dict)\n ret_dict['point_cls_labels'] = targets_dict['point_cls_labels']\n ret_dict['point_reg_labels'] = targets_dict['point_reg_labels']\n ret_dict['point_box_labels'] = targets_dict['point_box_labels']\n\n if self.enable_sasa:\n point_sasa_labels = self.loss_point_sasa(\n batch_dict['point_coords_list'],\n batch_dict['point_scores_list'],\n batch_dict['gt_boxes']\n )\n ret_dict.update({\n 'point_sasa_preds': batch_dict['point_scores_list'],\n 'point_sasa_labels': point_sasa_labels\n })\n\n if not self.training or self.predict_boxes_when_training:\n point_cls_preds, point_box_preds = self.generate_predicted_boxes(\n points=batch_dict['point_vote_coords'][:, 1:4],\n point_cls_preds=point_cls_preds, point_box_preds=point_reg_preds\n )\n batch_dict['batch_cls_preds'] = point_cls_preds\n batch_dict['batch_box_preds'] = point_box_preds\n batch_dict['cls_preds_normalized'] = False\n\n self.forward_ret_dict = ret_dict\n\n return batch_dict\n" ]
[ [ "torch.min", "torch.split", "torch.nn.BatchNorm1d", "torch.nn.init.constant_", "torch.no_grad", "torch.nn.Conv1d", "torch.nn.ReLU", "torch.from_numpy", "torch.nn.Sequential", "torch.max", "numpy.array", "torch.sigmoid", "torch.cat", "torch.clamp" ] ]
vlimant/NADE
[ "e2446c73250a99979c8710a8acbb14823a54bce0" ]
[ "deepnade/buml/NADE/NADE.py" ]
[ "from __future__ import division\nimport numpy as np\nimport theano\nimport theano.tensor as T\nfrom theano.tensor.shared_randomstreams import RandomStreams\nfrom Model import *\nfrom Utils.Estimation import Estimation\nimport Utils\n\n\nclass NADE(Model):\n \"\"\"A NADE abstract class\"\"\"\n def __init__(self, n_visible, n_hidden, nonlinearity=\"RLU\"):\n self.theano_rng = RandomStreams(np.random.randint(2 ** 30))\n self.add_parameter(SizeParameter(\"n_visible\"))\n self.add_parameter(SizeParameter(\"n_hidden\"))\n self.add_parameter(NonLinearityParameter(\"nonlinearity\"))\n self.n_visible = n_visible\n self.n_hidden = n_hidden\n self.parameters[\"nonlinearity\"].set_value(nonlinearity)\n\n def logdensity(self, x):\n return self.compiled_logdensity(x)\n\n def logdensity_new(self, x):\n return self.compiled_logdensity_new(x)\n\n def gradients(self, x):\n return self.compiled_gradients(x)\n\n def gradients_new(self, x):\n return self.compiled_gradients_new(x)\n\n def sample(self):\n return self.compiled_sample()\n\n def estimate_loglikelihood_for_dataset(self, x_dataset, minibatch_size=1000):\n loglikelihood = 0.0\n loglikelihood_sq = 0.0\n n = 0\n iterator = x_dataset.iterator(batch_size=minibatch_size, get_smaller_final_batch=True)\n for x in iterator:\n x = x.T # VxB\n n += x.shape[1]\n ld = self.logdensity(x)\n loglikelihood += ld.sum()\n loglikelihood_sq += (ld ** 2).sum()\n return Estimation.sample_mean_from_sum_and_sum_sq(loglikelihood, loglikelihood_sq, n)\n\n def recompile(self):\n x = T.matrix('x', dtype=theano.config.floatX)\n logdensity, updates = self.sym_logdensity(x)\n # self.compiled_logdensity = theano.function([x], logdensity, allow_input_downcast = True, updates = updates, mode=theano.compile.MonitorMode(post_func=Utils.theano_helpers.detect_nan))\n self.compiled_logdensity = theano.function([x], logdensity, allow_input_downcast=True, updates=updates)\n# gradients, updates = self.sym_gradients(x)\n# self.compiled_gradients = theano.function([x], gradients, allow_input_downcast=True, updates=updates)\n\n def sym_logdensity(self, X):\n pass\n\n def sym_neg_loglikelihood_gradient(self, X):\n ret = self.sym_logdensity(X)\n if isinstance(ret, tuple):\n assert(len(ret) == 2)\n loglikelihood, updates = ret\n else:\n loglikelihood = ret\n updates = dict()\n loss = -loglikelihood.mean()\n # Gradients\n gradients = dict([(param, T.grad(loss, self.get_parameter(param))) for param in self.get_parameters_to_optimise()])\n return (loss, gradients, updates)\n\n @classmethod\n def create_from_params(cls, params):\n model = cls(params[\"n_visible\"], params[\"n_hidden\"], params[\"nonlinearity\"])\n model.set_parameters(params)\n return model\n\n\nclass MixtureNADE(NADE):\n \"\"\" An abstract NADE model, that outputs a mixture model for each element \"\"\"\n def __init__(self, n_visible, n_hidden, n_components, nonlinearity=\"RLU\"):\n NADE.__init__(self, n_visible, n_hidden, nonlinearity)\n self.add_parameter(SizeParameter(\"n_components\"))\n self.n_components = n_components\n\n @classmethod\n def create_from_params(cls, params):\n model = cls(params[\"n_visible\"], params[\"n_hidden\"], params[\"n_components\"], params[\"nonlinearity\"])\n model.set_parameters(params)\n return model\n" ]
[ [ "numpy.random.randint" ] ]
thesouther/MachineLearningAndMatrixAnalysis
[ "d756ffc56319e11076afa4bf185ea5825938a79d" ]
[ "language_model/model.py" ]
[ "# -*- coding: UTF-8 -*-\n\nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom torch.nn.utils import clip_grad_norm_\n \n# RNN语言模型\nclass RNN(nn.Module): #RNNLM类继承nn.Module类\n def __init__(self, vocab_size, embed_size, hidden_size, num_layers):\n super(RNN, self).__init__()\n #嵌入层 one-hot形式(vocab_size,1) -> (embed_size,1)\n self.embed = nn.Embedding(vocab_size, embed_size)\n #LSTM单元/循环单元\n self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first=True)\n #输出层的全联接操作 \n self.linear = nn.Linear(hidden_size, vocab_size)\n \n def forward(self, x, h):\n # 词嵌入\n x = self.embed(x)\n \n # LSTM前向运算\n out,(h,c) = self.lstm(x,h)\n \n # 每个时间步骤上LSTM单元都会有一个输出,batch_size个样本并行计算(每个样本/序列长度一致) out (batch_size,sequence_length,hidden_size)\n # 把LSTM的输出结果变更为(batch_size*sequence_length, hidden_size)的维度\n out = out.reshape(out.size(0)*out.size(1),out.size(2))\n # 全连接\n out = self.linear(out) #(batch_size*sequence_length, hidden_size)->(batch_size*sequence_length, vacab_size)\n return out,(h,c)\n\n\n# Truncated backpropagation\ndef detach(states):\n return [state.detach() for state in states] \n\ndef test():\n import torch\n import torch.nn as nn\n from torch.nn.utils import clip_grad_norm_\n import numpy as np\n\n from data_helper import Corpus\n from config import Config\n conf = Config()\n\n device = torch.device('cuda:2' if torch.cuda.is_available() else 'cpu')\n\n C = Corpus(conf)\n word2id = C.word2id\n vocab_size = len(word2id)\n print(\"vocab_size\", vocab_size)\n\n # 导入数据\n print(\"extracting data... \")\n train_data, valid_data, test_data = C.build_dataset(conf)\n\n train_size = train_data.size(1)\n \n # 实例化模型\n model = RNN(vocab_size, conf.embed_size, conf.hidden_size, conf.num_layers).to(device)\n criterion = nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=conf.learning_rate)\n states = (torch.zeros(conf.num_layers, conf.batch_size, conf.hidden_size).to(device),\n torch.zeros(conf.num_layers, conf.batch_size, conf.hidden_size).to(device))\n for i in range(2):\n batch_x = train_data[:, i:(i+conf.seq_length)].to(device)\n batch_y = train_data[:, (i+1) : ((i+1+conf.seq_length)%train_size)].to(device)\n\n # 前传\n states = detach(states)\n outputs,states = model(batch_x, states)\n print(\"outputs.size()\",outputs.size())\n print(batch_y.reshape(-1).size())\n loss = criterion(outputs, batch_y.reshape(-1))\n\nif __name__ == \"__main__\":\n test()" ]
[ [ "torch.nn.LSTM", "torch.nn.Linear", "torch.nn.Embedding", "torch.nn.CrossEntropyLoss", "torch.cuda.is_available", "torch.zeros" ] ]
Jason-Khan/ubdvss
[ "76cabfa642af1f659920de32827ea6c3fe008588" ]
[ "semantic_segmentation/keras_callbacks.py" ]
[ "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# Copyright (С) ABBYY (BIT Software), 1993 - 2019. All rights reserved.\n\"\"\"\nВсе что связано с callbacks для обучения и валидации\n\"\"\"\nimport os\n\nimport keras.backend as K\nimport tensorflow as tf\nfrom keras import callbacks\nfrom keras.callbacks import ModelCheckpoint, ReduceLROnPlateau\n\nfrom semantic_segmentation.model_runner import ModelRunner\n\n\nclass SingleSplitLogCallback(callbacks.TensorBoard):\n \"\"\"\n Callback для отдельного сохранения метрик в трейне/валидации\n Используется для отрисовки графиков обучения/валидации на одной оси\n \"\"\"\n ONLY_TRAIN_LOGS_MODE = 'train'\n ONLY_VALID_LOGS_MODE = 'valid'\n\n def __init__(self, log_dir, mode=ONLY_TRAIN_LOGS_MODE):\n super().__init__(log_dir=log_dir)\n assert mode in (SingleSplitLogCallback.ONLY_VALID_LOGS_MODE, SingleSplitLogCallback.ONLY_TRAIN_LOGS_MODE)\n self.mode = mode\n\n def on_epoch_end(self, epoch, logs=None):\n logs = logs or {}\n if self.is_train_log_mode():\n filter_fn = lambda k: not k.startswith('val')\n map_fn = lambda k, v: (k, v)\n else:\n filter_fn = lambda k: k.startswith('val')\n map_fn = lambda k, v: ('_'.join(k.split('_')[1:]), v)\n logs = dict(map_fn(k, v) for (k, v) in logs.items() if filter_fn(k))\n super().on_epoch_end(epoch=epoch, logs=logs)\n\n def is_train_log_mode(self):\n return self.mode == SingleSplitLogCallback.ONLY_TRAIN_LOGS_MODE\n\n @classmethod\n def get_callbacks(cls, train_log_dir, valid_log_dir):\n return [\n cls(train_log_dir, cls.ONLY_TRAIN_LOGS_MODE),\n cls(valid_log_dir, cls.ONLY_VALID_LOGS_MODE)\n ]\n\n\nclass EvaluationCallback(SingleSplitLogCallback):\n \"\"\"\n Служит для подсчета целевых метрик и визуализации картинок в тензорборде при обучении\n \"\"\"\n\n def __init__(self, log_dir, net_config, batch_generator, max_evaluated_images=-1,\n mode=SingleSplitLogCallback.ONLY_TRAIN_LOGS_MODE):\n \"\"\"\n :param log_dir: путь до папки с логами (тензорборда)\n :param net_config:\n :param batch_generator: объект типа BatchGenerator с оцениваемой выборкой\n :param max_evaluated_images: сколько максимум картинок использовать для оценки,\n если -1 все что есть в выборке, иначе min(картинок в выборке, max_evaluated_images)\n :param mode: 'train' / 'valid'\n \"\"\"\n super().__init__(log_dir=log_dir, mode=mode)\n self.__net_config = net_config\n self.__batch_generator = batch_generator.generate(add_metainfo=True)\n self.__n_evaluated_images = batch_generator.get_images_per_epoch()\n if max_evaluated_images >= 0:\n self.__n_evaluated_images = min(self.__n_evaluated_images, max_evaluated_images)\n self.__epochs_count = 0\n self.__model_runner = ModelRunner(net_config=net_config, pixel_threshold=0.5)\n\n def on_epoch_end(self, epoch, logs=None):\n scalar_logs, visualizations = self.__model_runner.run(model=self.model,\n data_generator=self.__batch_generator,\n n_images=self.__n_evaluated_images,\n save_dir=None,\n save_visualizations=False)\n if not self.is_train_log_mode():\n # это такой чит чтобы в родителе отфильтровалось то что надо, а то что не надо наоборот осталось\n scalar_logs = dict(('val_' + k, v) for k, v in scalar_logs.items())\n\n for k, v in logs.items():\n scalar_logs[k] = v\n\n image_logs = dict()\n for key, value in visualizations.items():\n image_logs[f\"{self.mode}_{key}\"] = value\n if epoch < 1:\n for name, value in image_logs.items():\n images_placeholder = K.placeholder(shape=(None, None, None, 3), dtype=None, name=name)\n tf.summary.image(name, images_placeholder, max_outputs=10)\n\n summary_str = tf.summary.merge_all(\n key=tf.GraphKeys.SUMMARIES,\n scope=f\"{self.mode}.*\"\n )\n feed_dict = dict((\"{}:0\".format(key), value) for key, value in image_logs.items())\n self.writer.add_summary(self.sess.run([summary_str], feed_dict=feed_dict)[0], epoch)\n super().on_epoch_end(epoch, scalar_logs)\n\n @classmethod\n def get_callbacks(cls, net_config,\n train_log_dir, valid_log_dir,\n train_generator, valid_generator,\n max_evaluated_images):\n return [\n cls(train_log_dir, net_config, train_generator, max_evaluated_images, mode=cls.ONLY_TRAIN_LOGS_MODE),\n cls(valid_log_dir, net_config, valid_generator, max_evaluated_images, mode=cls.ONLY_VALID_LOGS_MODE)\n ]\n\n\ndef build_callbacks_list(log_dir, net_config, training_generator, validation_generator, max_evaluated_images=-1):\n \"\"\"\n Собирает список всех callbacks для обучение\n :param log_dir: основная директория с обучением\n :param net_config:\n :param training_generator: BatchGenerator по обучающим данным\n :param validation_generator: BatchGenerator по валидационным данным\n :param max_evaluated_images: максимальное количество изображений, использующееся для оценки целевых метрик\n :return:\n \"\"\"\n backup_dir = os.path.join(log_dir, \"backup\")\n os.makedirs(backup_dir, exist_ok=True)\n backup_checkpoint_callback = ModelCheckpoint(filepath=os.path.join(backup_dir, \"model_{epoch:03d}.h5\"))\n\n last_checkpoint_callback = ModelCheckpoint(filepath=os.path.join(log_dir, \"model.h5\"))\n best_checkpoint_callback = ModelCheckpoint(filepath=os.path.join(log_dir, \"model_best.h5\"), save_best_only=True)\n reduce_lr_callback = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=20, verbose=1, min_lr=1e-4)\n\n basic_callbacks = [\n last_checkpoint_callback,\n best_checkpoint_callback,\n backup_checkpoint_callback,\n reduce_lr_callback\n ]\n\n train_log_dir = os.path.join(log_dir, 'train')\n valid_log_dir = os.path.join(log_dir, 'valid')\n tensorboard_callbacks = EvaluationCallback.get_callbacks(\n net_config, train_log_dir, valid_log_dir, training_generator, validation_generator, max_evaluated_images)\n\n return basic_callbacks + tensorboard_callbacks\n" ]
[ [ "tensorflow.summary.image", "tensorflow.summary.merge_all" ] ]
pyspextool/pyspextool
[ "7a56584d85f1f20449c65c118ce65cb9f3865124" ]
[ "pyspextool/readuspexfits.py" ]
[ "from numpy import empty as npempty\nfrom numpy import int8 as npint8\nfrom numpy import stack as npstack\nfrom numpy import squeeze as npsqueeze\nfrom numpy import where as npwhere\nfrom numpy import absolute as npabsolute\nfrom numpy import shape as npshape\nfrom astropy.io import fits\nimport re\nimport sys\nfrom gethdrinfo import gethdrinfo\nfrom combflagstack import combflagstack\nfrom imgpoly import imgpoly\nfrom uspexampcor import uspexampcor\nfrom idlrotate import idlrotate\n\n\n#\n#=============================================================================\n#\ndef readuspexfits(files,lininfo,keywords=None,pair=False,rotate=0,\\\n lincor=None,ampcor=False,clupdate=False):\n\n \"\"\"\n To read an (upgraded) SpeX FITS image file.\n\n Parameters\n ----------------\n files : list of str\n A list of fullpaths to FITS files.\n\n lininfo : dict {'bias':str,'max':int,'bit':int}\n information to identify pixels beyond range of linearity correction\n\n 'bias' is the fullpath to the bias frame\n 'max' maximum value in DN\n 'bit' the bit to set for pixels beyond `max`\n\n keywords : list of str, optional\n A list of FITS keyword to retain \n\n pair : {False, True}, optional\n Set to pair subtract the images. \n\n rotate : {0,1,2,3,4,5,6,7}, optional \n Direction Transpose? Rotation Counterclockwise\n -------------------------------------------------\n\n 0 No None\n 1 No 90 deg\n 2 No 180 deg\n 3 No 270 deg\n 4 Yes None\n 5 Yes 90 deg\n 6 Yes 180 deg\n 7 Yes 270 deg\n\n The directions follow the IDL rotate function convention.\n \n lincor : str, optional\n the fullpath to the FITS file of linearity correction coefficients\n\n ampor : {False, True}, optional \n Set to correct for amplifying drift (see uspexampcor.py)\n\n Returns\n --------\n tuple \n The results are returned as (data,var,hdrinfo,bitmask) where\n data = the image(s) in DN/s\n var = the variance image(s) in (DN/s)**2\n hdrinfo = a list where element is a dict. The key is the FITS \n keyword and the value is a list consiting of the FITS value and FITS \n comment.\n\n Procedure\n ---------\n ?\n\n\n Example\n --------\n ?\n\n Modification History\n --------------------\n 2022-05-25 - Written by M. Cushing, University of Toledo.\n Based on the Spextool mc_readuspexfits.pro IDL program.\n \"\"\"\n\n#\n# Get setup information\n#\n\n NAXIS1=2048\n NAXIS2=2048\n \n nfiles = len(files)\n \n dolincor = [0,1][lincor is not None]\n\n# Correct for non-linearity?\n\n if dolincor:\n\n lc_coeffs = fits.getdata(lincor)\n \n else:\n\n lc_coeffs = None\n \n# Get set up for lineary check\n\n hdul = fits.open(lininfo['bias']) \n DIVISOR = hdul[0].header['DIVISOR']\n bias = (hdul[0].data)/DIVISOR\n hdul.close()\n\n if pair:\n\n# Check to make sure the right number of files\n\n if (nfiles % 2) != 0:\n\n print('mc_readuspexfits: Not an even number of images.')\n sys.exit(1)\n \n else:\n\n nimages = int(nfiles/2)\n\n else:\n\n nimages = nfiles\n\n# Make empty arrays\n\n data = npempty((nimages,NAXIS2,NAXIS1))\n var = npempty((nimages,NAXIS2,NAXIS1))\n hdrinfo = []\n bitmask = npempty((nimages,NAXIS2,NAXIS1),dtype=npint8)\n\n#\n# Load the data\n# \n if pair is True:\n\n# pair subtraction\n\n for i in range(0,nimages):\n\n A = loaddata(files[i*2],lininfo,bias,\\\n keywords=keywords,ampcor=ampcor,lccoeffs=lc_coeffs)\n\n B = loaddata(files[i*2+1],lininfo,bias,\\\n keywords=keywords,ampcor=ampcor,lccoeffs=lc_coeffs)\n\n combmask=combflagstack(npstack((A[3],B[3])),nbits=lininfo['bit']+1)\n \n data[i,:,:] = idlrotate(A[0]-B[0],rotate)\n var[i,:,:] = idlrotate(A[1]+B[1],rotate)\n bitmask[i,:,:] = idlrotate(combmask,rotate)\n\n hdrinfo.append(A[2])\n hdrinfo.append(B[2])\n \n if not pair:\n\n for i in range(0,nimages):\n\n im,va,hd,bm = loaddata(files[i],lininfo,bias,keywords=keywords,\\\n ampcor=ampcor,lccoeffs=lc_coeffs)\n\n data[i,:,:] = idlrotate(im,rotate)\n var[i,:,:] = idlrotate(va,rotate)\n bitmask[i,:,:] = idlrotate(bm,rotate)\n\n hdrinfo.append(hd)\n\n return(npsqueeze(data),npsqueeze(var),hdrinfo,npsqueeze(bitmask))\n\n#\n#=============================================================================\n#\ndef loaddata(file,lininfo,bias,keywords=None,ampcor=None,lccoeffs=None):\n \n\n readnoise = 12.0 # per single read\n gain = 1.5 # electrons per DN\n\n hdul = fits.open(file)\n hdul[0].verify('silentfix') # this was needed for to correct hdr problems\n \n ITIME = hdul[0].header['ITIME']\n COADDS = hdul[0].header['CO_ADDS']\n NDRS = hdul[0].header['NDR']\n READTIME = hdul[0].header['TABLE_SE']\n DIVISOR = hdul[0].header['DIVISOR']\n \n# Get set up for error propagation and store total exposure time\n\n rdvar = (2.*readnoise**2)/NDRS/COADDS/ITIME**2/gain**2\n crtn = (1.0 - READTIME*(NDRS**2 -1.0)/3./ITIME/NDRS)\n\n# Read images, get into units of DN.\n \n img_P = (hdul[1].data)/DIVISOR\n img_S = (hdul[2].data)/DIVISOR\n \n# Check for linearity maximum\n\n mskP = (img_P < (bias-lininfo['max']))*2**lininfo['bit']\n mskS = (img_S < (bias-lininfo['max']))*2**lininfo['bit'] \n\n# Combine the masks \n \n bitmask=combflagstack(npstack((mskP,mskS)),nbits=lininfo['bit']+1)\n \n# Create the image\n\n img = img_P-img_S\n \n# Correct for amplifier offsets\n\n if ampcor:\n\n img = uspexampcor(img)\n\n# Determine the linearity correction for the image\n\n if lccoeffs is not None:\n\n cor = imgpoly(img,lccoeffs)\n cor = npwhere(cor == 0,1,cor) \n \n# Now set the corrections to unity for pixels > lincormax\n\n cor = npwhere(bitmask == 2**lininfo['bit'],1,cor)\n \n# Set black pixel corrections to unity as well.\n\n cor[:,0:3+1] = 1.0\n cor[:,2044:2047+1] = 1.0\n cor[0:3+1,:] = 1.0\n cor[2044:2047+1,:] = 1.0 \n\n# Apply the corrections\n\n img/=cor\n\n# Delete unecessary files\n\n del cor,img_P,img_S\n\n# Create the actual image.\n# Convert image back to total DN for error propagation\n\n img = img*DIVISOR\n\n# Compute the variance and the final image\n\n var=npabsolute(img)*crtn/NDRS/(COADDS**2)/(ITIME**2)/gain + rdvar\n img = img/DIVISOR/ITIME\n \n# Collect header information\n\n hdr = gethdr(hdul[0].header)\n \n hdul.close()\n\n return[img,var,hdr,bitmask]\n\n#\n#=============================================================================\n#\ndef gethdr(hdr,keywords=None):\n\n \n# Grab keywords if requested \n\n if keywords:\n\n hdrinfo = gethdrinfo(hdr,keywords=keywords)\n \n else:\n\n hdrinfo = gethdrinfo(hdr) \n\n# Grab require keywords and convert to standard Spextool keywords\n\n# Airmass \n \n hdrinfo['AM'] = [hdr['TCS_AM'],' Airmass']\n\n# Hour angle\n \n val = hdr['TCS_HA']\n m = re.search('[-]','['+val+']')\n if not m: val = '+'+val.strip()\n hdrinfo['HA'] = [val,' Hour angle (hours)']\n\n# Position Angle\n \n hdrinfo['PA'] = [hdr['POSANGLE'],' Position Angle E of N (deg)']\n\n# Dec \n \n val = hdr['TCS_DEC']\n m = re.search('[-]','['+val+']')\n if not m: val = '+'+val.strip()\n hdrinfo['DEC'] = [val,' Declination, FK5 J2000']\n\n# RA\n \n hdrinfo['RA'] = [hdr['TCS_RA'].strip(),' Right Ascension, FK5 J2000']\n\n# COADDS, ITIME\n \n coadds = hdr['CO_ADDS']\n\n\n itime = hdr['ITIME']\n hdrinfo['ITIME'] = [itime,' Integration time (sec)']\n hdrinfo['NCOADDS'] = [coadds,' Number of COADDS'] \n hdrinfo['IMGITIME'] = [coadds*itime,\\\n ' Image integration time, NCOADDSxITIME (sec)']\n\n# Time\n\n \n hdrinfo['TIME'] = [hdr['TIME_OBS'].strip(),' Observation time in UTC']\n\n# Date\n \n hdrinfo['DATE'] = [hdr['DATE_OBS'].strip(),' Observation date in UTC'] \n\n# MJD\n \n hdrinfo['MJD'] = [hdr['MJD_OBS'],' Modified Julian date OBSDATE+TIME_OBS']\n\n# FILENAME\n \n hdrinfo['FILENAME'] = [hdr['IRAFNAME'].strip(),' Filename']\n\n# MODE\n \n hdrinfo['MODE'] = [hdr['GRAT'].strip(),' Instrument Mode']\n\n# INSTRUMENT\n \n hdrinfo['INSTR'] = ['SpeX',' Instrument'] \n\n return(hdrinfo)\n" ]
[ [ "numpy.empty", "numpy.squeeze", "numpy.absolute", "numpy.stack", "numpy.where" ] ]
adamjanovsky/AndroidMalwareCrypto
[ "bacd30c2fa0dd00879604f713f6eedb9fa22e3cb" ]
[ "androidcrypto/analysis/training/logistic_regression.py" ]
[ "\"\"\"\nFile contains functions used to traing Logistic Regression.\n\nAuthor: Dominik Macko\n\"\"\"\n\nfrom typing import Dict, Any, Union, Callable, Tuple, Optional\n\nimport numpy as np\nimport pandas as pd\n#from sklearn.linear_model import LogisticRegression\n# this should be then easier to interpret - just a simple scikit-learn wrapper\nfrom interpret.glassbox import LogisticRegression\nimport joblib\n\nfrom androidcrypto.analysis.training.config import TrainingTaskConfig\nfrom androidcrypto.analysis.training.utils import is_multiclass\nfrom androidcrypto.analysis.training.generic import train_model, train_gridsearchcv_model\n\ndef logistic_regression_best_params_surroundings(best_params: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Get best parameters surroundings for random forest.\"\"\"\n \n alpha = best_params[\"l1_ratio\"]\n C = best_params[\"C\"]\n return {\n \"l1_ratio\": [max(0.0, alpha - 0.05), alpha, min(alpha + 0.05, 1.0)],\n \"C\": [1/2 * C, C, 3/2 * C]\n }\n\ndef train_logistic_regression(train_X: np.array,\n train_y: np.array,\n scoring: Union[str, Callable[[Any, np.array, np.array], int]]=\"f1_macro\",\n n_jobs: int=8,\n verbose: int=3,\n seed: int=42,\n cv_splits: int=5\n ) -> Tuple[LogisticRegression, pd.DataFrame]:\n \"\"\"Trains logistic regression by searching for optimal l1 ratio and inverse regularization strength C.\n \n train_X - training set features\n train_y - training set targets\n scoring - scikit scoring function to use\n n_jobs - threads to use\n verbose - scikit verbose level\n seed - random seed to use\n \n returns (model, history dataframe)\n \"\"\"\n \n grid = {\n \"l1_ratio\": [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],\n \"C\": [0.001, 0.01, 0.1, 1.0, 10.0, 100.0]\n }\n return train_model(\n LogisticRegression(\n penalty=\"elasticnet\",\n solver=\"saga\", # because we want to be faster + possibly multinomial + l1 and l2\n class_weight=\"balanced\",\n random_state=seed,\n max_iter=1000\n ),\n train_gridsearchcv_model,\n train_gridsearchcv_model,\n grid,\n logistic_regression_best_params_surroundings,\n train_X,\n train_y,\n scoring=scoring,\n n_jobs=n_jobs,\n verbose=verbose,\n cv_splits=cv_splits\n )\n\ndef train_logistic_regression_based_on_config(train_X: pd.DataFrame, \n train_y: pd.Series, \n test_X: Optional[pd.DataFrame], \n task_config: TrainingTaskConfig,\n cv_splits: int,\n threads: int\n ) -> Tuple[LogisticRegression, Optional[pd.DataFrame]]:\n \n print(\"Training: Logistic Regression: Starting training.\")\n if is_multiclass(train_y):\n metric = \"f1_macro\"\n else:\n metric = \"f1\"\n model, _ = train_logistic_regression(train_X.values, train_y.values, scoring=metric, cv_splits=cv_splits, n_jobs=threads)\n if task_config.output_model_path:\n print(\"Training: Logistic Regression: Saving model.\")\n joblib.dump(model, task_config.output_model_path)\n\n test_pred = None\n if test_X is not None:\n print(\"Training: Logistic Regression: Predicting test labels.\")\n test_pred = model.predict(test_X)\n if task_config.output_prediction_path:\n print(\"Training: Logistic Regression: Saving predicted test labels.\")\n pd.Series(test_pred).to_csv(task_config.output_prediction_path)\n \n return model, test_pred\n " ]
[ [ "pandas.Series" ] ]
Laurentww/smt
[ "4a4df255b9259965439120091007f9852f41523e" ]
[ "smt/surrogate_models/kpls.py" ]
[ "\"\"\"\nAuthor: Dr. Mohamed A. Bouhlel <[email protected]>\n\nThis package is distributed under New BSD license.\n\"\"\"\n\nimport numpy as np\n\nfrom packaging import version\nfrom sklearn.cross_decomposition import PLSRegression as pls\n\nfrom smt.surrogate_models.krg_based import KrgBased\nfrom smt.utils.kriging_utils import componentwise_distance_PLS\n\n\nclass KPLS(KrgBased):\n name = \"KPLS\"\n\n def _initialize(self):\n super(KPLS, self)._initialize()\n declare = self.options.declare\n declare(\"n_comp\", 1, types=int, desc=\"Number of principal components\")\n # KPLS used only with \"abs_exp\" and \"squar_exp\" correlations\n declare(\n \"corr\",\n \"squar_exp\",\n values=(\"abs_exp\", \"squar_exp\"),\n desc=\"Correlation function type\",\n types=(str),\n )\n declare(\n \"eval_n_comp\",\n False,\n types=(bool),\n values=(True, False),\n desc=\"n_comp evaluation flag\",\n )\n declare(\n \"eval_comp_treshold\",\n 1.0,\n types=(float),\n desc=\"n_comp evaluation treshold for Wold's R criterion\",\n )\n\n def _compute_pls(self, X, y):\n _pls = pls(self.options[\"n_comp\"])\n # As of sklearn 0.24.1 zeroed outputs raises an exception while sklearn 0.23 returns zeroed x_rotations\n # For now the try/except below is a workaround to restore the 0.23 behaviour\n try:\n self.coeff_pls = _pls.fit(X.copy(), y.copy()).x_rotations_\n except StopIteration:\n self.coeff_pls = np.zeros((X.shape[1], self.options[\"n_comp\"]))\n return X, y\n\n def _componentwise_distance(self, dx, opt=0, theta=None, return_derivative=False):\n d = componentwise_distance_PLS(\n dx,\n self.options[\"corr\"],\n self.options[\"n_comp\"],\n self.coeff_pls,\n theta=theta,\n return_derivative=return_derivative,\n )\n return d\n\n def _estimate_number_of_components(self):\n \"\"\"\n self.options[n_comp] value from user is ignored and replaced by an estimated one wrt Wold's R criterion.\n \"\"\"\n eval_comp_treshold = self.options[\"eval_comp_treshold\"]\n X = self.training_points[None][0][0]\n y = self.training_points[None][0][1]\n k_fold = 4\n nbk = int(self.nt / k_fold)\n press_m = 0.0\n press_m1 = 0.0\n self.options[\"n_comp\"] = 0\n nextcomp = True\n while nextcomp:\n self.options[\"n_comp\"] += 1\n press_m = press_m1\n press_m1 = 0\n self.options[\"theta0\"] = [0.1]\n for fold in range(k_fold):\n self.nt = len(X) - nbk\n todel = np.arange(fold * nbk, (fold + 1) * nbk)\n Xfold = np.copy(X)\n Xfold = np.delete(X, todel, axis=0)\n yfold = np.copy(y)\n yfold = np.delete(y, todel, axis=0)\n Xtest = np.copy(X)[fold * nbk : (fold + 1) * nbk, :]\n ytest = np.copy(y)[fold * nbk : (fold + 1) * nbk, :]\n\n self.training_points[None][0][0] = Xfold\n self.training_points[None][0][1] = yfold\n try:\n self._new_train()\n except ValueError:\n self.options[\"n_comp\"] -= 1\n nextcomp = False\n break\n ye = self._predict_values(Xtest)\n press_m1 = press_m1 + np.sum(np.power((1 / len(X)) * (ye - ytest), 2))\n if self.options[\"n_comp\"] > 1 and press_m1 / press_m > eval_comp_treshold:\n self.options[\"n_comp\"] -= 1\n nextcomp = False\n self.training_points[None][0][0] = X\n self.training_points[None][0][1] = y\n self.nt = len(X)\n self.options[\"theta0\"] = [0.1]\n\n def _train(self):\n \"\"\"\n Train the model\n \"\"\"\n # outputs['sol'] = self.sol\n\n if self.options[\"eval_n_comp\"]:\n self._estimate_number_of_components()\n self._new_train()\n" ]
[ [ "sklearn.cross_decomposition.PLSRegression", "numpy.zeros", "numpy.copy", "numpy.arange", "numpy.delete" ] ]
samie-hash/data-science-repo
[ "574ebad704e3f2ebce18f573af87cd95571b4cc9" ]
[ "credit-card-fraud/src/data/make_dataset.py" ]
[ "# -*- coding: utf-8 -*-\nimport sys\nsys.path.append('..')\n\nimport click\nimport logging\nfrom pathlib import Path\nfrom dotenv import find_dotenv, load_dotenv\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.pipeline import Pipeline\n\nimport features.build_features as buif\n\[email protected]()\[email protected]('input_filepath', type=click.Path(exists=True))\[email protected]('output_filepath', type=click.Path())\ndef main(input_filepath, output_filepath):\n \"\"\" Runs data processing scripts to turn raw data from (../raw) into\n cleaned data ready to be analyzed (saved in ../processed).\n\n To run this file; navigate to this file location in the command line and run the command\n `python make_dataset.py ../../data/raw/creditcard.csv ../../data/processed/processed.csv`\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n \n # read the input file\n data = pd.read_csv(input_filepath)\n\n # create the data processing pipeline\n columns = [buif.correlation_columns(data, 'Class', k=.2)[0]]\n columns.extend(['Class'])\n\n pipeline = Pipeline(steps=[\n ('column_extractor', buif.ColumnExtractor(columns)),\n ])\n\n # fit the pipeline to data\n processed = pipeline.fit_transform(data)\n\n # save the processed data to disk\n processed.to_csv(output_filepath, index=None)\n\n\nif __name__ == '__main__':\n log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(level=logging.INFO, format=log_fmt)\n\n # not used in this stub but often useful for finding various files\n project_dir = Path(__file__).resolve().parents[2]\n\n # find .env automagically by walking up directories until it's found, then\n # load up the .env entries as environment variables\n load_dotenv(find_dotenv())\n\n main()\n" ]
[ [ "pandas.read_csv" ] ]
yhpark-knu/deeplearning.ai-Assignments
[ "1db8b4faf71e6ae2714420aec8b6b0f7516363e7" ]
[ "Improving Deep Neural Networks/Week2/testCases.py" ]
[ "import numpy as np\r\n\r\ndef compute_cost_with_regularization_test_case():\r\n np.random.seed(1)\r\n Y_assess = np.array([[1, 1, 0, 1, 0]])\r\n W1 = np.random.randn(2, 3)\r\n b1 = np.random.randn(2, 1)\r\n W2 = np.random.randn(3, 2)\r\n b2 = np.random.randn(3, 1)\r\n W3 = np.random.randn(1, 3)\r\n b3 = np.random.randn(1, 1)\r\n parameters = {\"W1\": W1, \"b1\": b1, \"W2\": W2, \"b2\": b2, \"W3\": W3, \"b3\": b3}\r\n a3 = np.array([[ 0.40682402, 0.01629284, 0.16722898, 0.10118111, 0.40682402]])\r\n return a3, Y_assess, parameters\r\n\r\ndef backward_propagation_with_regularization_test_case():\r\n np.random.seed(1)\r\n X_assess = np.random.randn(3, 5)\r\n Y_assess = np.array([[1, 1, 0, 1, 0]])\r\n cache = (np.array([[-1.52855314, 3.32524635, 2.13994541, 2.60700654, -0.75942115],\r\n [-1.98043538, 4.1600994 , 0.79051021, 1.46493512, -0.45506242]]),\r\n np.array([[ 0. , 3.32524635, 2.13994541, 2.60700654, 0. ],\r\n [ 0. , 4.1600994 , 0.79051021, 1.46493512, 0. ]]),\r\n np.array([[-1.09989127, -0.17242821, -0.87785842],\r\n [ 0.04221375, 0.58281521, -1.10061918]]),\r\n np.array([[ 1.14472371],\r\n [ 0.90159072]]),\r\n np.array([[ 0.53035547, 5.94892323, 2.31780174, 3.16005701, 0.53035547],\r\n [-0.69166075, -3.47645987, -2.25194702, -2.65416996, -0.69166075],\r\n [-0.39675353, -4.62285846, -2.61101729, -3.22874921, -0.39675353]]),\r\n np.array([[ 0.53035547, 5.94892323, 2.31780174, 3.16005701, 0.53035547],\r\n [ 0. , 0. , 0. , 0. , 0. ],\r\n [ 0. , 0. , 0. , 0. , 0. ]]),\r\n np.array([[ 0.50249434, 0.90085595],\r\n [-0.68372786, -0.12289023],\r\n [-0.93576943, -0.26788808]]),\r\n np.array([[ 0.53035547],\r\n [-0.69166075],\r\n [-0.39675353]]),\r\n np.array([[-0.3771104 , -4.10060224, -1.60539468, -2.18416951, -0.3771104 ]]),\r\n np.array([[ 0.40682402, 0.01629284, 0.16722898, 0.10118111, 0.40682402]]),\r\n np.array([[-0.6871727 , -0.84520564, -0.67124613]]),\r\n np.array([[-0.0126646]]))\r\n return X_assess, Y_assess, cache\r\n\r\ndef forward_propagation_with_dropout_test_case():\r\n np.random.seed(1)\r\n X_assess = np.random.randn(3, 5)\r\n W1 = np.random.randn(2, 3)\r\n b1 = np.random.randn(2, 1)\r\n W2 = np.random.randn(3, 2)\r\n b2 = np.random.randn(3, 1)\r\n W3 = np.random.randn(1, 3)\r\n b3 = np.random.randn(1, 1)\r\n parameters = {\"W1\": W1, \"b1\": b1, \"W2\": W2, \"b2\": b2, \"W3\": W3, \"b3\": b3}\r\n \r\n return X_assess, parameters\r\n\r\ndef backward_propagation_with_dropout_test_case():\r\n np.random.seed(1)\r\n X_assess = np.random.randn(3, 5)\r\n Y_assess = np.array([[1, 1, 0, 1, 0]])\r\n cache = (np.array([[-1.52855314, 3.32524635, 2.13994541, 2.60700654, -0.75942115],\r\n [-1.98043538, 4.1600994 , 0.79051021, 1.46493512, -0.45506242]]), np.array([[ True, False, True, True, True],\r\n [ True, True, True, True, False]], dtype=bool), np.array([[ 0. , 0. , 4.27989081, 5.21401307, 0. ],\r\n [ 0. , 8.32019881, 1.58102041, 2.92987024, 0. ]]), np.array([[-1.09989127, -0.17242821, -0.87785842],\r\n [ 0.04221375, 0.58281521, -1.10061918]]), np.array([[ 1.14472371],\r\n [ 0.90159072]]), np.array([[ 0.53035547, 8.02565606, 4.10524802, 5.78975856, 0.53035547],\r\n [-0.69166075, -1.71413186, -3.81223329, -4.61667916, -0.69166075],\r\n [-0.39675353, -2.62563561, -4.82528105, -6.0607449 , -0.39675353]]), np.array([[ True, False, True, False, True],\r\n [False, True, False, True, True],\r\n [False, False, True, False, False]], dtype=bool), np.array([[ 1.06071093, 0. , 8.21049603, 0. , 1.06071093],\r\n [ 0. , 0. , 0. , 0. , 0. ],\r\n [ 0. , 0. , 0. , 0. , 0. ]]), np.array([[ 0.50249434, 0.90085595],\r\n [-0.68372786, -0.12289023],\r\n [-0.93576943, -0.26788808]]), np.array([[ 0.53035547],\r\n [-0.69166075],\r\n [-0.39675353]]), np.array([[-0.7415562 , -0.0126646 , -5.65469333, -0.0126646 , -0.7415562 ]]), np.array([[ 0.32266394, 0.49683389, 0.00348883, 0.49683389, 0.32266394]]), np.array([[-0.6871727 , -0.84520564, -0.67124613]]), np.array([[-0.0126646]]))\r\n\r\n\r\n return X_assess, Y_assess, cache" ]
[ [ "numpy.array", "numpy.random.seed", "numpy.random.randn" ] ]
harrisonized/sf-rent-petitions
[ "6199534366a18cb38c8ff254b8a961fcdcc6ba38" ]
[ "functions/plotting/mpl.py" ]
[ "import matplotlib.pyplot as plt\n\n\n# Objects included in this file:\n\n# Functions included in this file:\n# # plot_empty (mpl)\n\n\ndef plot_empty(xlabel=None, ylabel=None,\n title=None,\n figsize=(8, 5)):\n \"\"\"Initialize fig object for seaborns objects that do not include fig by default\n \"\"\"\n fig = plt.figure(figsize=figsize, dpi=80)\n\n ax = fig.gca()\n ax.set_xlabel(xlabel, fontsize=16)\n ax.set_ylabel(ylabel, fontsize=16)\n ax.set_title(title, fontsize=24)\n\n return fig, ax\n" ]
[ [ "matplotlib.pyplot.figure" ] ]
Romit-Maulik/Tutorials-Demos-Practice
[ "77eecdc2a202e6b333123cfd92e7db6dc0eea021" ]
[ "Other_Python/MAE_Tensorflow_Tutorial/MAE_TF_Tutorial.py" ]
[ "import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib import cm\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nimport tensorflow as tf\r\nimport os\r\nimport time\r\n\r\n\r\ndef make_training_data():\r\n n_samples = 500 #Number of training data samples\r\n n_inputs = 2 #Number of input parameters\r\n n_outputs = 3 #Number of output parameters\r\n\r\n #Define arrays for storing these\r\n input_data = np.zeros(shape=(n_samples, n_inputs), dtype='double')\r\n output_data = np.zeros(shape=(n_samples, n_outputs), dtype='double')\r\n\r\n #Populate arrays\r\n np.random.seed(1)\r\n for i in range(n_samples):\r\n x = np.random.uniform(low=0.0, high=2.0 * np.pi)\r\n y = np.random.uniform(low=0.0, high=2.0 * np.pi)\r\n\r\n input_data[i, 0] = x\r\n input_data[i, 1] = y\r\n\r\n output_data[i, 0] = np.sin(x)*np.sin(y)\r\n output_data[i, 1] = np.sin(x)+np.cos(y)\r\n output_data[i, 2] = np.sin(-x-y)\r\n\r\n return n_samples, n_inputs, n_outputs, input_data, output_data\r\n\r\n\r\ndef make_testing_data():\r\n n_samples = 5000 #Number of training data samples\r\n n_inputs = 2 #Number of input parameters\r\n\r\n #Define arrays for storing these\r\n input_data = np.zeros(shape=(n_samples, n_inputs), dtype='double')\r\n\r\n #Populate arrays\r\n np.random.seed(2)\r\n for i in range(n_samples):\r\n x = np.random.uniform(low=0.0, high=2.0 * np.pi)\r\n y = np.random.uniform(low=0.0, high=2.0 * np.pi)\r\n\r\n input_data[i, 0] = x\r\n input_data[i, 1] = y\r\n\r\n return input_data\r\n\r\n\r\ndef plot_data(inputs,outputs):\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot(311, projection='3d')\r\n ax.plot_trisurf(inputs[:,0],inputs[:,1],outputs[:,0],cmap=cm.jet, linewidth=0.2)\r\n ax.set_title('Function 1')\r\n ax.grid(False)\r\n ax.axis('off')\r\n\r\n ax = fig.add_subplot(312, projection='3d')\r\n ax.plot_trisurf(inputs[:,0],inputs[:,1],outputs[:,1],cmap=cm.jet, linewidth=0.2)\r\n ax.set_title('Function 2')\r\n ax.grid(False)\r\n ax.axis('off')\r\n\r\n\r\n ax = fig.add_subplot(313, projection='3d')\r\n ax.plot_trisurf(inputs[:,0],inputs[:,1],outputs[:,2],cmap=cm.jet, linewidth=0.2)\r\n ax.set_title('Function 3')\r\n ax.grid(False)\r\n ax.axis('off')\r\n\r\n plt.legend()\r\n plt.show()\r\n\r\n\r\n plt.figure()\r\n f1_true = outputs[:, 0].flatten()\r\n f2_true = outputs[:, 1].flatten()\r\n f3_true = outputs[:, 2].flatten()\r\n plt.hist(f1_true, bins=16, label=r'Function 1', histtype='step') # arguments are passed to np.histogram\r\n plt.hist(f2_true, bins=16, label=r'Function 2', histtype='step') # arguments are passed to np.histogram\r\n plt.hist(f3_true, bins=16, label=r'Function 3', histtype='step') # arguments are passed to np.histogram\r\n plt.legend()\r\n plt.show()\r\n\r\n\r\n\r\ndef define_weights_biases(n_inputs,n_outputs):\r\n\r\n l1_nodes = 50\r\n l2_nodes = 50\r\n\r\n #Truncated normal distribution - values greater than 2 standard deviations are dropped and repicked\r\n\r\n weights = {\r\n 'l1': tf.Variable(tf.truncated_normal(shape=[n_inputs, l1_nodes], seed=1, mean=0.0, stddev=0.1)),\r\n 'l2': tf.Variable(tf.truncated_normal([l1_nodes, l2_nodes], seed=1, mean=0.0, stddev=0.1)),\r\n 'l3': tf.Variable(tf.truncated_normal([l2_nodes, n_outputs], seed=1, mean=0.0, stddev=0.1))\r\n }\r\n\r\n biases = {\r\n 'l1': tf.Variable(tf.truncated_normal([l1_nodes], seed=1, mean=0.0, stddev=0.1)),\r\n 'l2': tf.Variable(tf.truncated_normal([l2_nodes], seed=1, mean=0.0, stddev=0.1)),\r\n 'l3': tf.Variable(tf.truncated_normal([n_outputs], seed=1, mean=0.0, stddev=0.1))\r\n }\r\n\r\n return weights, biases\r\n\r\n\r\ndef feed_forward(x,weights,biases):\r\n # Inputs of weights and biases as dictionaries\r\n l1 = tf.add(tf.matmul(x, weights['l1']), biases['l1'])\r\n l1 = tf.nn.sigmoid(l1)\r\n\r\n l2 = tf.add(tf.matmul(l1, weights['l2']), biases['l2'])\r\n l2 = tf.nn.sigmoid(l2)\r\n\r\n prediction = tf.add(tf.matmul(l2, weights['l3']), biases['l3'])\r\n\r\n return prediction\r\n\r\ndef our_cost(y,y_true):\r\n #Simple MSE error\r\n return tf.reduce_mean(tf.squared_difference(y,y_true))\r\n\r\n\r\ndef placeholders(n_inputs,n_outputs):\r\n #Inputs to DNN\r\n x_true = tf.placeholder(tf.float32,shape=[None, n_inputs])\r\n #Outputs\r\n y_true = tf.placeholder(tf.float32,shape=[None, n_outputs])\r\n\r\n return x_true, y_true\r\n\r\n\r\ndef train_my_neural_network():\r\n # Load data and sizes\r\n n_samples, n_inputs, n_outputs, input_data, output_data = make_training_data()\r\n\r\n #Check plots\r\n plot_data(input_data,output_data)\r\n\r\n t0 = time.time()\r\n #Start network code\r\n batch_size = 200\r\n x_true, y_true = placeholders(n_inputs, n_outputs)\r\n\r\n # Initialize weights and biases\r\n weights, biases = define_weights_biases(n_inputs, n_outputs)\r\n prediction = feed_forward(x_true,weights,biases)\r\n cost = our_cost(prediction,y_true)\r\n\r\n\r\n with tf.Session() as sess:\r\n #prediction = feed_forward(x_true,weights,biases)\r\n #cost = our_cost(prediction,y_true)\r\n train_step = tf.train.AdamOptimizer().minimize(cost)\r\n\r\n sess.run(tf.global_variables_initializer())\r\n\r\n hm_epochs = 10000\r\n epoch = 0\r\n\r\n # For visualization\r\n epoch_loss_array = np.zeros((hm_epochs, 3), dtype='double')\r\n\r\n while epoch < hm_epochs:\r\n epoch_loss = 0\r\n epoch_loss_val = 0\r\n\r\n\r\n for _ in range(int(n_samples / batch_size)):\r\n idx1 = np.random.randint(n_samples, size=batch_size)\r\n epoch_ip, epoch_op = input_data[idx1, :], output_data[idx1, :]\r\n\r\n #ycheck = np.ones(shape=(batch_size,n_outputs),dtype=float)\r\n\r\n # c = sess.run(func_check,feed_dict={x: epoch_ip, y_: ycheck})\r\n # print(c)\r\n # print(np.shape(c))\r\n # os.system('pause')\r\n\r\n train_step.run(session=sess,\r\n feed_dict={x_true: epoch_ip, y_true: epoch_op})\r\n c = sess.run(cost, feed_dict={x_true: epoch_ip, y_true: epoch_op})\r\n\r\n epoch_loss = epoch_loss + c\r\n\r\n idx1 = np.random.randint(n_samples, size=batch_size)\r\n epoch_ip_val, epoch_op_val = input_data[idx1, :], output_data[idx1, :]\r\n\r\n c_val = sess.run(cost, feed_dict={x_true: epoch_ip_val, y_true: epoch_op_val})\r\n\r\n epoch_loss_val = epoch_loss_val + c_val\r\n\r\n print('Epoch', epoch, 'completed out of', hm_epochs, 'loss:', epoch_loss)\r\n print('Validation loss:', epoch_loss_val)\r\n\r\n epoch_loss_array[epoch, 0] = epoch\r\n epoch_loss_array[epoch, 1] = epoch_loss\r\n epoch_loss_array[epoch, 2] = epoch_loss_val\r\n\r\n epoch = epoch + 1\r\n\r\n t1 = time.time()\r\n print('Time = ',t1-t0)\r\n\r\n # Plotting training performance\r\n plt.figure()\r\n plt.title('Performance')\r\n plt.xlabel('Epoch')\r\n plt.ylabel('Loss')\r\n plt.semilogx(epoch_loss_array[:, 0], epoch_loss_array[:, 1], label='Total training loss')\r\n plt.semilogx(epoch_loss_array[:, 0], epoch_loss_array[:, 2], label='Total validation loss')\r\n plt.legend()\r\n plt.show()\r\n\r\n # Training done - save weights\r\n w1_val = sess.run(weights['l1'])\r\n w2_val = sess.run(weights['l2'])\r\n w3_val = sess.run(weights['l3'])\r\n\r\n b1_val = sess.run(biases['l1'])\r\n b2_val = sess.run(biases['l2'])\r\n b3_val = sess.run(biases['l3'])\r\n\r\n return w1_val, w2_val, w3_val, b1_val, b2_val, b3_val\r\n\r\n\r\ndef network_prediction(x, w1, w2, w3, b1, b2, b3):\r\n # Inputs of weights and biases as dictionaries\r\n l1 = np.add(np.matmul(x, w1), b1)\r\n l1 = sigmoid(l1)\r\n\r\n l2 = np.add(np.matmul(l1, w2), b2)\r\n l2 = sigmoid(l2)\r\n\r\n f = np.add(np.matmul(l2, w3), b3)\r\n\r\n return f\r\n\r\ndef sigmoid(x):\r\n return 1/(1+np.exp(-x))\r\n\r\nif __name__ == \"__main__\":\r\n w1, w2, w3, b1, b2, b3 = train_my_neural_network()\r\n\r\n testing_data = make_testing_data()\r\n testing_outputs = network_prediction(testing_data, w1, w2, w3, b1, b2, b3)\r\n\r\n plot_data(testing_data,testing_outputs)\r\n" ]
[ [ "matplotlib.pyplot.semilogx", "numpy.random.seed", "tensorflow.matmul", "matplotlib.pyplot.ylabel", "tensorflow.global_variables_initializer", "matplotlib.pyplot.figure", "numpy.cos", "matplotlib.pyplot.title", "matplotlib.pyplot.hist", "numpy.random.uniform", "numpy.zeros", "tensorflow.Session", "tensorflow.nn.sigmoid", "tensorflow.placeholder", "matplotlib.pyplot.legend", "numpy.matmul", "tensorflow.squared_difference", "tensorflow.truncated_normal", "tensorflow.train.AdamOptimizer", "numpy.exp", "matplotlib.pyplot.show", "numpy.sin", "numpy.random.randint", "matplotlib.pyplot.xlabel" ] ]
Infaehig/ThesisPython
[ "0365cda2d006fdc3e762535ec6d0491007a81928" ]
[ "old/FEniCS/homogenization/netgen_meshes/oht_8layers_3patches.py" ]
[ "import os, sys\nsys.path.insert(0,os.path.dirname(os.path.dirname(os.path.realpath(__file__))))\n\nimport sa_utils\nimport netgen_csg\nimport numpy as np\n\nif __name__ == '__main__':\n prefix = 'oht_8layers_3patches'\n logger = sa_utils.LogWrapper(prefix+'/'+prefix)\n\n netgen_csg.create_patches(box = np.array([0., 0., 0., 6, 1.5, 0.0592]), hole_radius = 0.125, layers = 8, max_resolution = 0.25, max_refines = 5,\n num = 0, create_inclusions = False, prefix = prefix, logger = logger, alpha = 1.25, beta = 5.0, patch_nums = np.array([3, 1, 1]))\n" ]
[ [ "numpy.array" ] ]
LiqunChen0606/OT-Seq2Seq
[ "bde86f3e6291751aa79c03cd1c353fc65032b76a" ]
[ "texar/baseline_seq2seq_attn_ot.py" ]
[ "# Copyright 2018 The Texar Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nAttentional Seq2seq.\nsame as examples/seq2seq_attn except that here Rouge is also supported.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\n# pylint: disable=invalid-name, too-many-arguments, too-many-locals\nimport os\nfrom io import open\nimport importlib\nimport tensorflow as tf\nimport texar as tx\nfrom rouge import Rouge\nimport OT\nimport pdb\n\nGPUID = 0\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = str(GPUID)\n\nflags = tf.flags\n\nflags.DEFINE_string(\"config_model\", \"configs.config_model\", \"The model config.\")\nflags.DEFINE_string(\"config_data\", \"configs.config_iwslt14\",\n \"The dataset config.\")\n\nflags.DEFINE_string('output_dir', '.', 'where to keep training logs')\n\nFLAGS = flags.FLAGS\n\nconfig_model = importlib.import_module(FLAGS.config_model)\nconfig_data = importlib.import_module(FLAGS.config_data)\n\nif not FLAGS.output_dir.endswith('/'):\n FLAGS.output_dir += '/'\nlog_dir = FLAGS.output_dir + 'training_log_baseline/'\ntx.utils.maybe_create_dir(log_dir)\n\n\ndef build_model(batch, train_data):\n \"\"\"Assembles the seq2seq model.\n \"\"\"\n source_embedder = tx.modules.WordEmbedder(\n vocab_size=train_data.source_vocab.size, hparams=config_model.embedder)\n\n encoder = tx.modules.BidirectionalRNNEncoder(\n hparams=config_model.encoder)\n\n enc_outputs, _ = encoder(source_embedder(batch['source_text_ids']))\n\n target_embedder = tx.modules.WordEmbedder(\n vocab_size=train_data.target_vocab.size, hparams=config_model.embedder)\n\n decoder = tx.modules.AttentionRNNDecoder(\n memory=tf.concat(enc_outputs, axis=2),\n memory_sequence_length=batch['source_length'],\n vocab_size=train_data.target_vocab.size,\n hparams=config_model.decoder)\n\n training_outputs, _, _ = decoder(\n decoding_strategy='train_greedy',\n inputs=target_embedder(batch['target_text_ids'][:, :-1]),\n sequence_length=batch['target_length'] - 1)\n\n # Modify loss\n MLE_loss = tx.losses.sequence_sparse_softmax_cross_entropy(\n labels=batch['target_text_ids'][:, 1:],\n logits=training_outputs.logits,\n sequence_length=batch['target_length'] - 1)\n\n # TODO: key words matching loss\n tgt_logits = training_outputs.logits\n tgt_words = target_embedder(soft_ids = tgt_logits)\n src_words = source_embedder(ids = batch['source_text_ids'])\n src_words = tf.nn.l2_normalize(src_words, 2, epsilon=1e-12)\n tgt_words = tf.nn.l2_normalize(tgt_words, 2, epsilon=1e-12)\n\n cosine_cost = 1 - tf.einsum(\n 'aij,ajk->aik', src_words, tf.transpose(tgt_words, [0,2,1]))\n # pdb.set_trace()\n OT_loss = tf.reduce_mean(OT.IPOT_distance2(cosine_cost))\n\n Total_loss = MLE_loss + 0.1 * OT_loss\n\n train_op = tx.core.get_train_op(\n Total_loss,\n hparams=config_model.opt)\n\n \n start_tokens = tf.ones_like(batch['target_length']) *\\\n train_data.target_vocab.bos_token_id\n beam_search_outputs, _, _ = \\\n tx.modules.beam_search_decode(\n decoder_or_cell=decoder,\n embedding=target_embedder,\n start_tokens=start_tokens,\n end_token=train_data.target_vocab.eos_token_id,\n beam_width=config_model.beam_width,\n max_decoding_length=60)\n\n return train_op, beam_search_outputs\n\n\ndef print_stdout_and_file(content, file):\n print(content)\n print(content, file=file)\n\n\ndef main():\n \"\"\"Entrypoint.\n \"\"\"\n train_data = tx.data.PairedTextData(hparams=config_data.train)\n val_data = tx.data.PairedTextData(hparams=config_data.val)\n test_data = tx.data.PairedTextData(hparams=config_data.test)\n # pdb.set_trace()\n data_iterator = tx.data.TrainTestDataIterator(\n train=train_data, val=val_data, test=test_data)\n\n batch = data_iterator.get_next()\n\n train_op, infer_outputs = build_model(batch, train_data)\n\n def _train_epoch(sess, epoch_no):\n data_iterator.switch_to_train_data(sess)\n training_log_file = \\\n open(log_dir + 'training_log' + str(epoch_no) + '.txt', 'w',\n encoding='utf-8')\n\n step = 0\n while True:\n try:\n loss = sess.run(train_op)\n print(\"step={}, loss={:.4f}\".format(step, loss),\n file=training_log_file)\n if step % config_data.observe_steps == 0:\n print(\"step={}, loss={:.4f}\".format(step, loss))\n training_log_file.flush()\n step += 1\n except tf.errors.OutOfRangeError:\n break\n\n def _eval_epoch(sess, mode, epoch_no):\n if mode == 'val':\n data_iterator.switch_to_val_data(sess)\n else:\n data_iterator.switch_to_test_data(sess)\n\n refs, hypos = [], []\n while True:\n try:\n fetches = [\n batch['target_text'][:, 1:],\n infer_outputs.predicted_ids[:, :, 0]\n ]\n feed_dict = {\n tx.global_mode(): tf.estimator.ModeKeys.EVAL\n }\n target_texts_ori, output_ids = \\\n sess.run(fetches, feed_dict=feed_dict)\n\n target_texts = tx.utils.strip_special_tokens(\n target_texts_ori.tolist(), is_token_list=True)\n target_texts = tx.utils.str_join(target_texts)\n output_texts = tx.utils.map_ids_to_strs(\n ids=output_ids, vocab=val_data.target_vocab)\n\n tx.utils.write_paired_text(\n target_texts, output_texts,\n log_dir + mode + '_results' + str(epoch_no) + '.txt',\n append=True, mode='h', sep=' ||| ')\n\n for hypo, ref in zip(output_texts, target_texts):\n if config_data.eval_metric == 'bleu':\n hypos.append(hypo)\n refs.append([ref])\n elif config_data.eval_metric == 'rouge':\n hypos.append(tx.utils.compat_as_text(hypo))\n refs.append(tx.utils.compat_as_text(ref))\n except tf.errors.OutOfRangeError:\n break\n\n if config_data.eval_metric == 'bleu':\n return tx.evals.corpus_bleu_moses(\n list_of_references=refs, hypotheses=hypos)\n elif config_data.eval_metric == 'rouge':\n rouge = Rouge()\n return rouge.get_scores(hyps=hypos, refs=refs, avg=True)\n\n def _calc_reward(score):\n \"\"\"\n Return the bleu score or the sum of (Rouge-1, Rouge-2, Rouge-L).\n \"\"\"\n if config_data.eval_metric == 'bleu':\n return score\n elif config_data.eval_metric == 'rouge':\n return sum([value['f'] for key, value in score.items()])\n\n config = tf.ConfigProto()\n config.gpu_options.per_process_gpu_memory_fraction = 0.4\n with tf.Session(config=config) as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n sess.run(tf.tables_initializer())\n\n best_val_score = -1.\n scores_file = open(log_dir + 'scores.txt', 'w', encoding='utf-8')\n for i in range(config_data.num_epochs):\n _train_epoch(sess, i)\n\n val_score = _eval_epoch(sess, 'val', i)\n test_score = _eval_epoch(sess, 'test', i)\n\n best_val_score = max(best_val_score, _calc_reward(val_score))\n\n if config_data.eval_metric == 'bleu':\n print_stdout_and_file(\n 'val epoch={}, BLEU={:.4f}; best-ever={:.4f}'.format(\n i, val_score, best_val_score), file=scores_file)\n\n print_stdout_and_file(\n 'test epoch={}, BLEU={:.4f}'.format(i, test_score),\n file=scores_file)\n print_stdout_and_file('=' * 50, file=scores_file)\n\n elif config_data.eval_metric == 'rouge':\n print_stdout_and_file(\n 'valid epoch {}:'.format(i), file=scores_file)\n for key, value in val_score.items():\n print_stdout_and_file(\n '{}: {}'.format(key, value), file=scores_file)\n print_stdout_and_file('fsum: {}; best_val_fsum: {}'.format(\n _calc_reward(val_score), best_val_score), file=scores_file)\n\n print_stdout_and_file(\n 'test epoch {}:'.format(i), file=scores_file)\n for key, value in test_score.items():\n print_stdout_and_file(\n '{}: {}'.format(key, value), file=scores_file)\n print_stdout_and_file('=' * 110, file=scores_file)\n\n scores_file.flush()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "tensorflow.global_variables_initializer", "tensorflow.ones_like", "tensorflow.local_variables_initializer", "tensorflow.Session", "tensorflow.concat", "tensorflow.transpose", "tensorflow.ConfigProto", "tensorflow.nn.l2_normalize", "tensorflow.tables_initializer" ] ]
ailabteam/clone_test_botnet-detection
[ "95f02d4967186440f60d83db04f139c197e178b7" ]
[ "botdet/models_pyg/graph_attention.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch.nn import Parameter\nfrom torch_geometric.nn.inits import glorot, zeros\nfrom torch_geometric.utils import scatter_\n\nfrom .gcn_base_models import NodeModelBase\nfrom .common import activation, softmax\n\n\nclass NodeModelAttention(NodeModelBase):\n \"\"\"\n Multi-head soft attention over a node's neighborhood.\n Note:\n - Inheritance to :class:`NodeModelBase` is only for organization purpose, which is actually not necessary\n So deg_norm=None, edge_gate=None, aggr='add' (defaults), and they are not currently used.\n - When `att_combine` is 'cat', out_channels for 1 head is out_channels / nheads;\n otherwise, it is out_channels for every head.\n \"\"\"\n\n def __init__(self, in_channels, out_channels, in_edgedim=None,\n nheads=1, att_act='none', att_dropout=0, att_combine='cat', att_dir='in', bias=False, **kwargs):\n assert att_act in ['none', 'lrelu', 'relu']\n assert att_combine in ['cat', 'add', 'mean']\n assert att_dir in ['in', 'out']\n\n super(NodeModelAttention, self).__init__(in_channels, out_channels, in_edgedim)\n\n self.nheads = nheads\n if att_combine == 'cat':\n self.out_channels_1head = out_channels // nheads\n assert self.out_channels_1head * nheads == out_channels, 'out_channels should be divisible by nheads'\n else:\n self.out_channels_1head = out_channels\n\n self.att_combine = att_combine\n self.att_dir = att_dir\n\n if att_combine == 'cat':\n self.weight = Parameter(torch.Tensor(in_channels, out_channels))\n else: # 'add' or 'mean':\n self.weight = Parameter(torch.Tensor(in_channels, out_channels * nheads))\n self.att_weight = Parameter(torch.Tensor(1, nheads, 2 * self.out_channels_1head))\n self.att_act = activation(att_act)\n self.att_dropout = nn.Dropout(p=att_dropout)\n\n if bias:\n self.bias = Parameter(torch.Tensor(out_channels))\n else:\n self.register_parameter('bias', None)\n\n self.reset_parameters()\n\n def reset_parameters(self):\n glorot(self.weight)\n glorot(self.att_weight)\n zeros(self.bias)\n\n def forward(self, x, edge_index, edge_attr=None, deg=None, edge_weight=None, attn_store=None, **kwargs):\n \"\"\"\n 'deg' and 'edge_weight' are not used. Just to be consistent for API.\n \"\"\"\n x = torch.mm(x, self.weight).view(-1, self.nheads, self.out_channels_1head) # size (N, n_heads, C_out_1head)\n\n # lift the features to source and target nodes, size (E, nheads, C_out_1head) for each\n x_j = torch.index_select(x, 0, edge_index[0])\n x_i = torch.index_select(x, 0, edge_index[1])\n\n # calculate attention coefficients, size (E, nheads)\n alpha = self.att_act((torch.cat([x_j, x_i], dim=-1) * self.att_weight).sum(dim=-1))\n\n # softmax over each node's neighborhood, size (E, nheads)\n if self.att_dir == 'out':\n # random walk\n alpha = softmax(alpha, edge_index[0], num_nodes=x.size(0))\n else:\n # attend over nodes that all points to the current one\n alpha = softmax(alpha, edge_index[1], num_nodes=x.size(0))\n\n # dropout on attention coefficients (which means that during training, the neighbors are stochastically sampled)\n alpha = self.att_dropout(alpha)\n\n ''' \n # check attention entropy\n if self.att_dir == 'out':\n entropy = scatter_('add', -alpha * torch.log(alpha + 1e-16), edge_index[0], dim_size=x.size(0))\n else: # size (N, nheads)\n entropy = scatter_('add', -alpha * torch.log(alpha + 1e-16), edge_index[1], dim_size=x.size(0))\n # breakpoint()\n entropy = entropy[deg > 100, :].mean()\n entropy_max = (torch.log(deg[deg > 100] + 1e-16)).mean()\n print(f'average attention entropy {entropy.item()} (average max entropy {entropy_max.item()})')\n '''\n\n # normalize messages on each edges with attention coefficients\n x_j = x_j * alpha.view(-1, self.nheads, 1)\n\n # aggregate features to nodes, resulting in size (N, n_heads, C_out_1head)\n x = scatter_(self.aggr, x_j, edge_index[1], dim_size=x.size(0))\n\n # combine multi-heads, resulting in size (N, C_out)\n if self.att_combine == 'cat':\n x = x.view(-1, self.out_channels)\n elif self.att_combine == 'add':\n x = x.sum(dim=1)\n else:\n x = x.mean(dim=1)\n\n # add bias\n if self.bias is not None:\n x = x + self.bias\n \n if attn_store is not None: # attn_store is a callback list in case we want to get the attention scores out\n attn_store.append(alpha)\n\n return x\n\n def __repr__(self):\n return ('{} (in_channels: {}, out_channels: {}, in_edgedim: {}, nheads: {}, att_activation: {},'\n 'att_dropout: {}, att_combine: {}, att_dir: {} | number of parameters: {}').format(\n self.__class__.__name__, self.in_channels, self.out_channels, self.in_edgedim,\n self.nheads, self.att_act, self.att_dropout.p, self.att_combine, self.att_dir, self.num_parameters())\n" ]
[ [ "torch.mm", "torch.index_select", "torch.cat", "torch.nn.Dropout", "torch.Tensor" ] ]
egvincent/rgbd-sem-seg
[ "054890ace318a883aac0ad1bfa3d2383939a6892" ]
[ "preprocessing/preprocess-dataset.py" ]
[ "#!/usr/bin/env python\n#######################################################################################\n# The MIT License\n\n# Copyright (c) 2014 Hannes Schulz, University of Bonn <[email protected]>\n# Copyright (c) 2013 Benedikt Waldvogel, University of Bonn <[email protected]>\n# Copyright (c) 2008-2009 Sebastian Nowozin <[email protected]>\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#######################################################################################\n# vim: set fileencoding=utf-8 :\n#\n# Helper script to convert the NYU Depth v2 dataset Matlab file into a set of\n# PNG images in the CURFIL dataset format.\n#\n# See https://github.com/deeplearningais/curfil/wiki/Training-and-Prediction-with-the-NYU-Depth-v2-Dataset\n\nfrom __future__ import print_function\n\nfrom joblib import Parallel, delayed\nfrom skimage import exposure\nfrom skimage.io import imsave\nimport h5py\nimport numpy as np\nimport os\nimport png\nimport scipy.io\nimport sys\nimport cv2\n\nfrom _structure_classes import get_structure_classes\nimport _solarized\nfrom utils.rgbd_util import *\nfrom utils.getCameraParam import *\nfrom getHHA import *\n\n\n### modified to collapse the ~950 original classes to 40 classes, not 4\n### and to not put it in a color map format\ndef process_ground_truth(ground_truth):\n ### added:\n collapsed_classes = {\n # void is already 0\n \"wall\" : 1,\n \"floor\" : 2,\n \"cabinet\" : 3,\n \"bed\" : 4,\n \"chair\" : 5,\n \"sofa\" : 6,\n \"table\" : 7,\n \"door\" : 8,\n \"window\" : 9,\n \"bookshelf\" : 10,\n \"picture\" : 11,\n \"counter\" : 12,\n \"blinds\" : 13,\n \"desk\" : 14,\n \"shelves\" : 15,\n \"curtain\" : 16,\n \"dresser\" : 17,\n \"pillow\" : 18,\n \"mirror\" : 19,\n \"floor mat\" : 20,\n \"clothes\" : 21,\n \"ceiling\" : 22,\n \"books\" : 23,\n \"refridgerator\" : 24,\n \"television\" : 25,\n \"paper\" : 26,\n \"towel\" : 27,\n \"shower curtain\" : 28,\n \"box\" : 29,\n \"whiteboard\" : 30,\n \"person\" : 31,\n \"night stand\" : 32,\n \"toilet\" : 33,\n \"sink\" : 34,\n \"lamp\" : 35,\n \"bathtub\" : 36,\n \"bag\" : 37,\n #\"otherstructure\" : 38,\n #\"otherfurniture\" : 39,\n #\"otherprop\" : 40\n }\n ###\n\n ### anything commented out below is code I removed from the initial implementation\n #colors = dict()\n #colors[\"structure\"] = _solarized.colors[5]\n #colors[\"prop\"] = _solarized.colors[8]\n #colors[\"furniture\"] = _solarized.colors[9]\n #colors[\"floor\"] = _solarized.colors[1]\n shape = ground_truth.shape # list(ground_truth.shape) + [3]\n img = np.ndarray(shape=shape, dtype=np.uint8)\n for i in xrange(shape[0]):\n for j in xrange(shape[1]):\n l = ground_truth[i, j]\n #if (l == 0):\n # img[i, j] = (0, 0, 0) # background\n #else:\n #name = classes[names[l - 1]]\n #assert name in colors, name\n #img[i, j] = colors[name]\n\n name = names[l - 1]\n class_name = classes[name]\n if name in collapsed_classes:\n img[i, j] = collapsed_classes[name]\n elif class_name == \"structure\":\n img[i, j] = 38\n elif class_name == \"furniture\":\n img[i, j] = 39\n elif class_name == \"prop\":\n img[i, j] = 40\n else:\n img[i, j] = 0\n return img\n\n\ndef visualize_depth_image(data):\n\n data[data == 0.0] = np.nan\n\n maxdepth = np.nanmax(data)\n mindepth = np.nanmin(data)\n data = data.copy()\n data -= mindepth\n data /= (maxdepth - mindepth)\n\n gray = np.zeros(list(data.shape) + [3], dtype=data.dtype)\n data = (1.0 - data)\n gray[..., :3] = np.dstack((data, data, data))\n\n # use a greenish color to visualize missing depth\n gray[np.isnan(data), :] = (97, 160, 123)\n gray[np.isnan(data), :] /= 255\n\n gray = exposure.equalize_hist(gray)\n\n # set alpha channel\n gray = np.dstack((gray, np.ones(data.shape[:2])))\n gray[np.isnan(data), -1] = 0.5\n\n return gray * 255\n\n\n### argument data_list_folder is added since the original. see main function\ndef convert_image(i, scene, img_depth, image, label, data_list_folder):\n\n write_filenames = data_list_folder != None\n\n idx = int(i) + 1\n if idx in train_images:\n train_test = \"training\"\n else:\n assert idx in test_images, \"index %d neither found in training set nor in test set\" % idx\n train_test = \"testing\"\n\n folder = \"%s/%s/%s\" % (out_folder, train_test, scene)\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n img_depth = img_depth * 1000.0\n\n depth_image_filename = \"%s/%05d_depth.png\" % (folder, i)\n png.from_array(img_depth, 'L;16').save(depth_image_filename)\n\n ### HHA processing added\n # depth image is in millimeters, and we need meters, so divide by 1000 ...\n D = img_depth / 1000.0 # lol\n\n HHA_depth_image_filename = \"%s/%05d_depth_HHA.png\" % (folder, i)\n HHA_depth_image = getHHA(getCameraParam('color'), D, D)\n cv2.imwrite(HHA_depth_image_filename, HHA_depth_image)\n\n ### block commented out\n #depth_visualization = visualize_depth_image(img_depth)\n #\n # workaround for a bug in the png module\n #depth_visualization = depth_visualization.copy() # makes in contiguous\n #shape = depth_visualization.shape\n #depth_visualization.shape = (shape[0], np.prod(shape[1:]))\n #\n #depth_image = png.from_array(depth_visualization, \"RGBA;8\") \n #depth_image.save(\"%s/%05d_depth_visualization.png\" % (folder, i))\n\n image_filename = \"%s/%05d_colors.png\" % (folder, i)\n imsave(image_filename, image)\n\n ground_truth = process_ground_truth(label)\n ground_truth_filename = \"%s/%05d_ground_truth.png\" % (folder, i)\n imsave(ground_truth_filename, ground_truth)\n\n ### new:\n data_list_file = data_list_folder + train_test + \".txt\"\n with open(data_list_file, 'a') as f:\n f.write(\"%s\\t%s\\t%s\\n\" % (image_filename, HHA_depth_image_filename, ground_truth_filename))\n ###\n\n\nif __name__ == \"__main__\":\n\n if len(sys.argv) < 4:\n print(\"usage: %s <h5_file> <train_test_split> <out_folder> [<rawDepth> <num_threads>]\" % sys.argv[0], file=sys.stderr)\n sys.exit(0)\n\n h5_file = h5py.File(sys.argv[1], \"r\")\n # h5py is not able to open that file. but scipy is\n train_test = scipy.io.loadmat(sys.argv[2])\n out_folder = sys.argv[3]\n\n ### added since original code:\n # folder to put training and testing data list files, training.txt and testing.txt\n # each contains a list of all filenames. each line: <rgb image>\\t<depth image>\\t<label image>\\n\n data_list_folder = sys.argv[4]\n ###\n\n ### replaced since original code ...\n #if len(sys.argv) >= 5:\n # raw_depth = bool(int(sys.argv[4]))\n #else:\n # raw_depth = False\n raw_depth = False\n\n ### replaced since original code ...\n #if len(sys.argv) >= 6:\n # num_threads = int(sys.argv[5])\n #else:\n # num_threads = -1\n num_threads = 1\n\n test_images = set([int(x) for x in train_test[\"testNdxs\"]])\n train_images = set([int(x) for x in train_test[\"trainNdxs\"]])\n print(\"%d training images\" % len(train_images))\n print(\"%d test images\" % len(test_images))\n\n if raw_depth:\n print(\"using raw depth images\")\n depth = h5_file['rawDepths']\n else:\n print(\"using filled depth images\")\n depth = h5_file['depths']\n\n print(\"reading\", sys.argv[1])\n\n labels = h5_file['labels']\n images = h5_file['images']\n\n rawDepthFilenames = [u''.join(unichr(c) for c in h5_file[obj_ref]) for obj_ref in h5_file['rawDepthFilenames'][0]]\n names = [u''.join(unichr(c) for c in h5_file[obj_ref]) for obj_ref in h5_file['names'][0]]\n scenes = [u''.join(unichr(c) for c in h5_file[obj_ref]) for obj_ref in h5_file['sceneTypes'][0]]\n rawRgbFilenames = [u''.join(unichr(c) for c in h5_file[obj_ref]) for obj_ref in h5_file['rawRgbFilenames'][0]]\n classes = get_structure_classes()\n\n print(\"processing images\")\n if num_threads == 1:\n print(\"single-threaded mode\")\n for i, image in enumerate(images):\n print(\"image\", i + 1, \"/\", len(images))\n convert_image(i, scenes[i], depth[i, :, :].T, image.T, labels[i, :, :].T, data_list_folder)\n else:\n Parallel(num_threads)(delayed(convert_image)(i, scenes[i], depth[i, :, :].T, images[i, :, :].T, labels[i, :, :].T, data_list_folder) for i in range(len(images)))\n\n print(\"finished\")\n" ]
[ [ "numpy.ones", "numpy.nanmax", "numpy.nanmin", "numpy.ndarray", "numpy.dstack", "numpy.isnan" ] ]
paulinelemenkova/Python-script-029-SM-Quantile-Regression
[ "fde0e7455822fa532a5f23e1c5811d21e8b8606b" ]
[ "Script-029c-SM-Quantile regression (plate_maria-sedim_thick).py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\nfrom __future__ import print_function\n#get_ipython().run_line_magic('matplotlib', 'inline')\nimport os\nimport patsy\nimport numpy as np\nimport pandas as pd\nimport statsmodels.api as sm\nimport statsmodels.formula.api as smf\nfrom statsmodels.regression.quantile_regression import QuantReg\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nsns.set_style('whitegrid')\nsns.set_context('paper')\n\nos.chdir('/Users/pauline/Documents/Python')\ndata = pd.read_csv(\"Tab-Morph.csv\")\n\n# Least Absolute Deviation\nmod = smf.quantreg('profile ~ plate_maria', data)\nres = mod.fit(q=.5)\nprint(res.summary())\n\n# Placing the quantile regression results in a Pandas DataFrame, and the OLS results in a dictionary\nquantiles = np.arange(.05, .96, .1)\ndef fit_model(q):\n res = mod.fit(q=q)\n return [q, res.params['Intercept'], res.params['plate_maria']] + \\\n res.conf_int().loc['plate_maria'].tolist()\n\nmodels = [fit_model(x) for x in quantiles]\nmodels = pd.DataFrame(models, columns=['q', 'a', 'b','lb','ub'])\n\nols = smf.ols('profile ~ plate_maria', data).fit()\nols_ci = ols.conf_int().loc['plate_maria'].tolist()\nols = dict(a = ols.params['Intercept'],\n b = ols.params['plate_maria'],\n lb = ols_ci[0],\n ub = ols_ci[1])\n\nprint(models)\nprint(ols)\n\n# Plotting\nx = np.arange(data.plate_maria.min(), data.plate_maria.max(), 5)\nget_y = lambda a, b: a + b * x\n\nfig, ax = plt.subplots(figsize=(8, 6), dpi=300)\n\nfor i in range(models.shape[0]):\n y = get_y(models.a[i], models.b[i])\n ax.plot(x, y, linestyle='dotted', color='grey')\n\nbbox_props = dict(boxstyle='round, pad=0.3', fc='w',\n edgecolor='grey', linewidth=1, alpha=0.9)\n\ny = get_y(ols['a'], ols['b'])\n\nax.plot(x, y, color='red', label='OLS')\nax.scatter(data.plate_maria, data.profile, alpha=.5, c='#4d5aaf', s=70)\nax.set_xlim((0, 450))\nax.set_ylim((0, 25))\nlegend = ax.legend()\nax.set_xlabel('Sediment thickness at Mariana Plate, m', fontsize=14)\nax.set_ylabel('Profile, nr.', fontsize=14);\nplt.title(\"Mariana Trench: \\\n Quantile regression \\nof sediment thickness at\\\n Mariana Plate by 25 bathymetric profiles\",\n fontsize=14)\nplt.annotate('C', xy=(-0.01, 1.06), xycoords=\"axes fraction\",\n fontsize=18, bbox=bbox_props)\n\n# visualize and save\nplt.tight_layout()\nplt.subplots_adjust(top=0.85, bottom=0.15,\n left=0.10, right=0.95,\n hspace=0.25, wspace=0.35\n )\nfig.savefig('plot_QRc.png', dpi=300)\nplt.show()\n" ]
[ [ "pandas.read_csv", "matplotlib.pyplot.tight_layout", "pandas.DataFrame", "matplotlib.pyplot.annotate", "matplotlib.pyplot.subplots", "matplotlib.pyplot.title", "numpy.arange", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.show" ] ]
TeoZosa/deep-learning-v2-pytorch
[ "8e73c26f2ebf49769b798e9ff26bd90d7de69f7d" ]
[ "deep_learning_v2_pytorch/project-tv-script-generation/problem_unittests.py" ]
[ "from unittest.mock import MagicMock, patch\n\nimport numpy as np\nimport torch\n\n\nclass _TestNN(torch.nn.Module):\n def __init__(self, input_size, output_size):\n super().__init__()\n self.decoder = torch.nn.Linear(input_size, output_size)\n self.forward_called = False\n\n def forward(self, nn_input, hidden):\n self.forward_called = True\n output = self.decoder(nn_input)\n\n return output, hidden\n\n\ndef _print_success_message():\n print(\"Tests Passed\")\n\n\nclass AssertTest:\n def __init__(self, params):\n self.assert_param_message = \"\\n\".join(\n [str(k) + \": \" + str(v) + \"\" for k, v in params.items()]\n )\n\n def test(self, assert_condition, assert_message):\n assert assert_condition, (\n assert_message\n + \"\\n\\nUnit Test Function Parameters\\n\"\n + self.assert_param_message\n )\n\n\ndef test_create_lookup_tables(create_lookup_tables):\n test_text = \"\"\"\n Moe_Szyslak Moe's Tavern Where the elite meet to drink\n Bart_Simpson Eh yeah hello is Mike there Last name Rotch\n Moe_Szyslak Hold on I'll check Mike Rotch Mike Rotch Hey has anybody seen Mike Rotch lately\n Moe_Szyslak Listen you little puke One of these days I'm gonna catch you and I'm gonna carve my name on your back with an ice pick\n Moe_Szyslak Whats the matter Homer You're not your normal effervescent self\n Homer_Simpson I got my problems Moe Give me another one\n Moe_Szyslak Homer hey you should not drink to forget your problems\n Barney_Gumble Yeah you should only drink to enhance your social skills\"\"\"\n\n test_text = test_text.lower()\n test_text = test_text.split()\n\n vocab_to_int, int_to_vocab = create_lookup_tables(test_text)\n\n # Check types\n assert isinstance(vocab_to_int, dict), \"vocab_to_int is not a dictionary.\"\n assert isinstance(int_to_vocab, dict), \"int_to_vocab is not a dictionary.\"\n\n # Compare lengths of dicts\n assert len(vocab_to_int) == len(int_to_vocab), (\n \"Length of vocab_to_int and int_to_vocab don't match. \"\n \"vocab_to_int is length {}. int_to_vocab is length {}\".format(\n len(vocab_to_int), len(int_to_vocab)\n )\n )\n\n # Make sure the dicts have the same words\n vocab_to_int_word_set = set(vocab_to_int.keys())\n int_to_vocab_word_set = set(int_to_vocab.values())\n\n assert not (vocab_to_int_word_set - int_to_vocab_word_set), (\n \"vocab_to_int and int_to_vocab don't have the same words.\"\n \"{} found in vocab_to_int, but not in int_to_vocab\".format(\n vocab_to_int_word_set - int_to_vocab_word_set\n )\n )\n assert not (int_to_vocab_word_set - vocab_to_int_word_set), (\n \"vocab_to_int and int_to_vocab don't have the same words.\"\n \"{} found in int_to_vocab, but not in vocab_to_int\".format(\n int_to_vocab_word_set - vocab_to_int_word_set\n )\n )\n\n # Make sure the dicts have the same word ids\n vocab_to_int_word_id_set = set(vocab_to_int.values())\n int_to_vocab_word_id_set = set(int_to_vocab.keys())\n\n assert not (vocab_to_int_word_id_set - int_to_vocab_word_id_set), (\n \"vocab_to_int and int_to_vocab don't contain the same word ids.\"\n \"{} found in vocab_to_int, but not in int_to_vocab\".format(\n vocab_to_int_word_id_set - int_to_vocab_word_id_set\n )\n )\n assert not (int_to_vocab_word_id_set - vocab_to_int_word_id_set), (\n \"vocab_to_int and int_to_vocab don't contain the same word ids.\"\n \"{} found in int_to_vocab, but not in vocab_to_int\".format(\n int_to_vocab_word_id_set - vocab_to_int_word_id_set\n )\n )\n\n # Make sure the dicts make the same lookup\n missmatches = [\n (word, id, id, int_to_vocab[id])\n for word, id in vocab_to_int.items()\n if int_to_vocab[id] != word\n ]\n\n assert (\n not missmatches\n ), \"Found {} missmatche(s). First missmatch: vocab_to_int[{}] = {} and int_to_vocab[{}] = {}\".format(\n len(missmatches), *missmatches[0]\n )\n\n assert (\n len(vocab_to_int) > len(set(test_text)) / 2\n ), \"The length of vocab seems too small. Found a length of {}\".format(\n len(vocab_to_int)\n )\n\n _print_success_message()\n\n\ndef test_tokenize(token_lookup):\n symbols = {\".\", \",\", '\"', \";\", \"!\", \"?\", \"(\", \")\", \"-\", \"\\n\"}\n token_dict = token_lookup()\n\n # Check type\n assert isinstance(token_dict, dict), f\"Returned type is {type(token_dict)}.\"\n\n # Check symbols\n missing_symbols = symbols - set(token_dict.keys())\n unknown_symbols = set(token_dict.keys()) - symbols\n\n assert not missing_symbols, f\"Missing symbols: {missing_symbols}\"\n assert not unknown_symbols, f\"Unknown symbols: {unknown_symbols}\"\n\n # Check values type\n bad_value_type = [\n type(val) for val in token_dict.values() if not isinstance(val, str)\n ]\n\n assert not bad_value_type, f\"Found token as {bad_value_type[0]} type.\"\n\n # Check for spaces\n key_has_spaces = [k for k in token_dict.keys() if \" \" in k]\n val_has_spaces = [val for val in token_dict.values() if \" \" in val]\n\n assert (\n not key_has_spaces\n ), 'The key \"{}\" includes spaces. Remove spaces from keys and values'.format(\n key_has_spaces[0]\n )\n assert (\n not val_has_spaces\n ), 'The value \"{}\" includes spaces. Remove spaces from keys and values'.format(\n val_has_spaces[0]\n )\n\n # Check for symbols in values\n symbol_val = ()\n for symbol in symbols:\n for val in token_dict.values():\n if symbol in val:\n symbol_val = (symbol, val)\n\n assert (\n not symbol_val\n ), \"Don't use a symbol that will be replaced in your tokens. Found the symbol {} in value {}\".format(\n *symbol_val\n )\n\n _print_success_message()\n\n\ndef test_rnn(RNN, train_on_gpu):\n batch_size = 50\n sequence_length = 3\n vocab_size = 20\n output_size = 20\n embedding_dim = 15\n hidden_dim = 10\n n_layers = 2\n\n # create test RNN\n # params: (vocab_size, output_size, embedding_dim, hidden_dim, n_layers)\n rnn = RNN(vocab_size, output_size, embedding_dim, hidden_dim, n_layers)\n\n # create test input\n a = np.random.randint(vocab_size, size=(batch_size, sequence_length))\n # b = torch.LongTensor(a)\n b = torch.from_numpy(a)\n hidden = rnn.init_hidden(batch_size)\n\n if train_on_gpu:\n rnn.cuda()\n b = b.cuda()\n\n output, hidden_out = rnn(b, hidden)\n\n assert_test = AssertTest(\n {\n \"Input Size\": vocab_size,\n \"Output Size\": output_size,\n \"Hidden Dim\": hidden_dim,\n \"N Layers\": n_layers,\n \"Batch Size\": batch_size,\n \"Sequence Length\": sequence_length,\n \"Input\": b,\n }\n )\n\n # initialization\n correct_hidden_size = (n_layers, batch_size, hidden_dim)\n\n if type(hidden) == tuple:\n # LSTM\n assert_condition = hidden[0].size() == correct_hidden_size\n else:\n # GRU\n assert_condition = hidden.size() == correct_hidden_size\n\n assert_message = \"Wrong hidden state size. Expected type {}. Got type {}\".format(\n correct_hidden_size, hidden[0].size()\n )\n assert_test.test(assert_condition, assert_message)\n\n # output of rnn\n correct_hidden_size = (n_layers, batch_size, hidden_dim)\n\n if type(hidden) == tuple:\n # LSTM\n assert_condition = hidden_out[0].size() == correct_hidden_size\n else:\n # GRU\n assert_condition = hidden_out.size() == correct_hidden_size\n\n assert_message = \"Wrong hidden state size. Expected type {}. Got type {}\".format(\n correct_hidden_size, hidden_out[0].size()\n )\n assert_test.test(assert_condition, assert_message)\n\n correct_output_size = (batch_size, output_size)\n assert_condition = output.size() == correct_output_size\n assert_message = \"Wrong output size. Expected type {}. Got type {}\".format(\n correct_output_size, output.size()\n )\n assert_test.test(assert_condition, assert_message)\n\n _print_success_message()\n\n\ndef test_forward_back_prop(RNN, forward_back_prop, train_on_gpu):\n batch_size = 200\n input_size = 20\n output_size = 10\n sequence_length = 3\n embedding_dim = 15\n hidden_dim = 10\n n_layers = 2\n learning_rate = 0.01\n\n # create test RNN\n rnn = RNN(input_size, output_size, embedding_dim, hidden_dim, n_layers)\n\n mock_decoder = MagicMock(wraps=_TestNN(input_size, output_size))\n if train_on_gpu:\n mock_decoder.cuda()\n\n mock_decoder_optimizer = MagicMock(\n wraps=torch.optim.Adam(mock_decoder.parameters(), lr=learning_rate)\n )\n mock_criterion = MagicMock(wraps=torch.nn.CrossEntropyLoss())\n\n with patch.object(\n torch.autograd, \"backward\", wraps=torch.autograd.backward\n ) as mock_autograd_backward:\n inp = torch.FloatTensor(np.random.rand(batch_size, input_size))\n target = torch.LongTensor(np.random.randint(output_size, size=batch_size))\n\n hidden = rnn.init_hidden(batch_size)\n\n loss, hidden_out = forward_back_prop(\n mock_decoder, mock_decoder_optimizer, mock_criterion, inp, target, hidden\n )\n\n if type(hidden_out) == tuple:\n # LSTM\n assert (\n hidden_out[0][0] == hidden[0][0]\n ).sum() == batch_size * hidden_dim, (\n \"Returned hidden state is the incorrect size.\"\n )\n else:\n # GRU\n assert (\n hidden_out[0] == hidden[0]\n ).sum() == batch_size * hidden_dim, (\n \"Returned hidden state is the incorrect size.\"\n )\n\n assert (\n mock_decoder.zero_grad.called or mock_decoder_optimizer.zero_grad.called\n ), \"Didn't set the gradients to 0.\"\n assert mock_decoder.forward_called, \"Forward propagation not called.\"\n assert mock_autograd_backward.called, \"Backward propagation not called\"\n assert mock_decoder_optimizer.step.called, \"Optimization step not performed\"\n assert type(loss) == float, \"Wrong return type. Expected {}, got {}\".format(\n float, type(loss)\n )\n\n _print_success_message()\n" ]
[ [ "torch.nn.Linear", "torch.nn.CrossEntropyLoss", "torch.from_numpy", "numpy.random.rand", "numpy.random.randint" ] ]
ToJestKrzysio/ProcessVisualization
[ "9a359a31816bf1be65e3684a571509e3a2c2c0ac" ]
[ "src/bpmn_python/bpmn_process_csv_import.py" ]
[ "# coding=utf-8\n\"\"\"\nImplementation of exporting process to CSV functionality, as proposed in article \"Spreadsheet-Based Business\nProcess Modeling\" by Kluza k. and Wisniewski P.\n\"\"\"\nfrom __future__ import print_function\n\nimport copy\n\nimport pandas as pd\nimport re\nimport six\n\nfrom . import bpmn_python_consts as consts\nfrom . import bpmn_diagram_exception as bpmn_exception\n\nregex_pa_trailing_number = r'^(.*[a-z|A-Z]|[^0-9]?)([0-9]+)$'\nregex_pa_trailing_letter = r'(.+)([a-z|A-Z])'\nregex_pa_merge_node_finder = r'(.*?)([0-9]+[a-z|A-Z])(.*?)'\nregex_pa_num_let = r'([0-9]+)([a-z,A-Z])'\nregex_prefix_split_succ = r'^'\nregex_suffix_split_succ = r'([a-z|A-Z]|[a-z|A-Z][1]+)$'\n\ndefault_process_id = 'process_1'\ndefault_plane_id = 'plane_1'\n\n\ndef get_node_type(order, csv_line_dict):\n \"\"\"\n\n :param order:\n :param csv_line_dict:\n :return:\n \"\"\"\n if order == str(0):\n return consts.Consts.start_event\n if csv_line_dict[consts.Consts.csv_terminated] == 'yes':\n return consts.Consts.end_event\n if csv_line_dict[consts.Consts.csv_subprocess] == 'yes':\n return consts.Consts.subprocess\n else:\n return consts.Consts.task\n\n\ndef add_node_info_to_diagram_graph(order, node_type, activity, process_id, bpmn_diagram):\n \"\"\"\n\n :param order:\n :param node_type:\n :param activity:\n :param process_id:\n :param bpmn_diagram:\n \"\"\"\n if node_type == consts.Consts.start_event:\n bpmn_diagram.add_start_event_to_diagram(process_id, start_event_name=activity, node_id=order)\n elif node_type == consts.Consts.subprocess:\n bpmn_diagram.add_subprocess_to_diagram(process_id, subprocess_name=activity, node_id=order)\n elif node_type == consts.Consts.end_event:\n bpmn_diagram.add_end_event_to_diagram(process_id, node_id=order)\n elif node_type == consts.Consts.inclusive_gateway:\n bpmn_diagram.add_inclusive_gateway_to_diagram(process_id, node_id=order)\n elif node_type == consts.Consts.exclusive_gateway:\n bpmn_diagram.add_exclusive_gateway_to_diagram(process_id, node_id=order)\n elif node_type == consts.Consts.parallel_gateway:\n bpmn_diagram.add_parallel_gateway_to_diagram(process_id, node_id=order)\n else:\n bpmn_diagram.add_task_to_diagram(process_id, task_name=activity, node_id=order)\n\n\ndef import_nodes_info(process_dict, bpmn_diagram):\n \"\"\"\n\n :param process_dict:\n :param bpmn_diagram:\n \"\"\"\n for order, csv_line_dict in process_dict.items():\n node_type = get_node_type(order, csv_line_dict)\n activity = process_dict[order][consts.Consts.csv_activity]\n process_id = default_process_id\n add_node_info_to_diagram_graph(order, node_type, activity, process_id, bpmn_diagram)\n\n\ndef remove_white_spaces_in_orders(process_dict):\n \"\"\"\n\n :param process_dict:\n \"\"\"\n tmp_process_dict = copy.deepcopy(process_dict)\n for order, csv_line_dict in tmp_process_dict.items():\n del process_dict[order]\n if isinstance(order, six.string_types) and order.strip() != order:\n process_dict[order.strip()] = csv_line_dict\n else:\n process_dict[str(order)] = csv_line_dict\n\n\ndef get_possible_sequence_continuation_successor(node_id):\n \"\"\"\n\n :param node_id:\n :return:\n \"\"\"\n result = re.match(regex_pa_trailing_number, node_id)\n if result:\n last_number_in_order = result.group(2)\n next_number = str(int(last_number_in_order) + 1)\n prefix = result.group(1)\n return [prefix + next_number]\n else:\n # possible if e.g. 4a\n return []\n\n\ndef get_possible_split_continuation_successor(node_id):\n \"\"\"\n\n :param node_id:\n :return:\n \"\"\"\n result = re.match(regex_pa_trailing_number, node_id)\n if result:\n trailing_number = result.group(2)\n prefix = result.group(1)\n new_trailing_number = str(int(trailing_number) + 1)\n new_node_id = prefix + new_trailing_number\n return [new_node_id + 'a', new_node_id + 'a1']\n else:\n return []\n\n\ndef get_possible_merge_continuation_successors(node_id_arg):\n \"\"\"\n\n :param node_id_arg:\n :return:\n \"\"\"\n node_id = copy.deepcopy(node_id_arg)\n result_trailing_number = re.match(regex_pa_trailing_number, node_id)\n if result_trailing_number:\n node_id = result_trailing_number.group(1)\n\n result_trailing_letter = re.match(regex_pa_trailing_letter, node_id)\n if result_trailing_letter:\n possible_successors = []\n for result in re.finditer(regex_pa_merge_node_finder, node_id):\n num_let_pair = result.group(2)\n prefix = result.group(1)\n num_let_result = re.match(regex_pa_num_let, num_let_pair)\n num = num_let_result.group(1)\n inc_num = str(int(num) + 1)\n possible_successors.append(prefix + inc_num)\n return possible_successors\n else:\n return []\n\n\ndef is_any_possible_successor_present_in_node_ids(possible_successors, nodes_ids):\n \"\"\"\n\n :param possible_successors:\n :param nodes_ids:\n :return:\n \"\"\"\n return bool(get_possible_successors_set_present_in_node_ids(possible_successors, nodes_ids))\n\n\ndef get_possible_successors_set_present_in_node_ids(possible_successors, nodes_ids):\n \"\"\"\n\n :param possible_successors:\n :param nodes_ids:\n :return:\n \"\"\"\n return set(possible_successors).intersection(set(nodes_ids))\n\n\ndef get_possible_successor_present_in_node_ids_or_raise_excp(poissible_successors_node_id, nodes_ids):\n \"\"\"\n\n :param poissible_successors_node_id:\n :param nodes_ids:\n :return:\n \"\"\"\n possible_successor_set = get_possible_successors_set_present_in_node_ids(poissible_successors_node_id, nodes_ids)\n if len(possible_successor_set) != 1:\n raise bpmn_exception.BpmnPythonError(\"Some error in program - there should be exactly one found successor.\")\n else:\n return possible_successor_set.pop()\n\n\ndef get_all_split_successors(node_id, nodes_ids):\n \"\"\"\n\n :param node_id:\n :param nodes_ids:\n :return:\n \"\"\"\n result = re.match(regex_pa_trailing_number, node_id)\n if not result:\n raise bpmn_exception.BpmnPythonError(\"Something wrong in program - look for \" + node_id)\n trailing_number = result.group(2)\n prefix = result.group(1)\n new_trailing_number = str(int(trailing_number) + 1)\n next_node_id = prefix + new_trailing_number\n\n pattern = regex_prefix_split_succ + next_node_id + regex_suffix_split_succ\n split_successors = []\n for elem in nodes_ids:\n if re.match(pattern, elem):\n split_successors.append(elem)\n return split_successors\n\n\ndef is_there_sequence_continuation(node_id, nodes_ids):\n \"\"\"\n\n :param node_id:\n :param nodes_ids:\n :return:\n \"\"\"\n possible_seq_succ = get_possible_sequence_continuation_successor(node_id)\n return is_any_possible_successor_present_in_node_ids(possible_seq_succ, nodes_ids)\n\n\ndef is_there_split_continuation(node_id, nodes_ids):\n \"\"\"\n\n :param node_id:\n :param nodes_ids:\n :return:\n \"\"\"\n possible_split_succ = get_possible_split_continuation_successor(node_id)\n return is_any_possible_successor_present_in_node_ids(possible_split_succ, nodes_ids)\n\n\ndef is_there_merge_continuation(node_id, nodes_ids):\n \"\"\"\n\n :param node_id:\n :param nodes_ids:\n :return:\n \"\"\"\n possible_merge_succ = get_possible_merge_continuation_successors(node_id)\n return is_any_possible_successor_present_in_node_ids(possible_merge_succ, nodes_ids)\n\n\ndef is_node_the_end_event(node_id, process_dict):\n \"\"\"\n\n :param node_id:\n :param process_dict:\n :return:\n \"\"\"\n return process_dict[node_id][consts.Consts.csv_terminated] == 'yes'\n\n\ndef add_outgoing_flow(node_id, successor_node_id, bpmn_diagram):\n \"\"\"\n\n :param node_id:\n :param successor_node_id:\n :param bpmn_diagram:\n \"\"\"\n if bpmn_diagram.diagram_graph.node[node_id].get(consts.Consts.outgoing_flow) is None:\n bpmn_diagram.diagram_graph.node[node_id][consts.Consts.outgoing_flow] = []\n bpmn_diagram.diagram_graph.node[node_id][consts.Consts.outgoing_flow].append(get_flow_id(node_id, successor_node_id))\n\n\ndef add_incoming_flow(node_id, from_node_id, bpmn_diagram):\n \"\"\"\n\n :param node_id:\n :param from_node_id:\n :param bpmn_diagram:\n \"\"\"\n if bpmn_diagram.diagram_graph.node[node_id].get(consts.Consts.incoming_flow) is None:\n bpmn_diagram.diagram_graph.node[node_id][consts.Consts.incoming_flow] = []\n bpmn_diagram.diagram_graph.node[node_id][consts.Consts.incoming_flow].append(get_flow_id(from_node_id, node_id))\n\n\ndef get_connection_condition_if_present(to_node_id, process_dict):\n \"\"\"\n\n :param to_node_id:\n :param process_dict:\n :return:\n \"\"\"\n if to_node_id in process_dict:\n return process_dict[to_node_id].get(consts.Consts.csv_condition)\n\n\ndef get_flow_id(from_node_id, to_node_id):\n \"\"\"\n\n :param from_node_id:\n :param to_node_id:\n :return:\n \"\"\"\n return from_node_id + \"__\" + to_node_id\n\n\ndef add_edge(from_node_id, to_node_id, process_dict, bpmn_diagram, sequence_flows):\n \"\"\"\n\n :param from_node_id:\n :param to_node_id:\n :param process_dict:\n :param bpmn_diagram:\n :param sequence_flows:\n \"\"\"\n condition = get_connection_condition_if_present(to_node_id, process_dict)\n bpmn_diagram.diagram_graph.add_edge(from_node_id, to_node_id)\n flow_id = get_flow_id(from_node_id, to_node_id)\n bpmn_diagram.diagram_graph[from_node_id][to_node_id][consts.Consts.id] = flow_id\n bpmn_diagram.diagram_graph[from_node_id][to_node_id][consts.Consts.process] = default_process_id\n bpmn_diagram.diagram_graph[from_node_id][to_node_id][consts.Consts.name] = \"\"\n bpmn_diagram.diagram_graph[from_node_id][to_node_id][consts.Consts.source_ref] = from_node_id\n bpmn_diagram.diagram_graph[from_node_id][to_node_id][consts.Consts.target_ref] = to_node_id\n if bool(condition):\n bpmn_diagram.diagram_graph[from_node_id][to_node_id][consts.Consts.condition_expression] = {\n consts.Consts.id: flow_id + \"_cond\",\n consts.Consts.condition_expression: condition\n }\n sequence_flows[flow_id] = {consts.Consts.name: flow_id, consts.Consts.source_ref: from_node_id,\n consts.Consts.target_ref: to_node_id}\n\n\ndef add_connection(from_node_id, to_node_id, process_dict, diagram_graph, sequence_flows):\n \"\"\"\n\n :param from_node_id:\n :param to_node_id:\n :param process_dict:\n :param diagram_graph:\n :param sequence_flows:\n \"\"\"\n add_outgoing_flow(from_node_id, to_node_id, diagram_graph)\n add_incoming_flow(to_node_id, from_node_id, diagram_graph)\n add_edge(from_node_id, to_node_id, process_dict, diagram_graph, sequence_flows)\n\n\ndef get_node_conditions(split_successors, process_dict):\n \"\"\"\n\n :param split_successors:\n :param process_dict:\n :return:\n \"\"\"\n conditions = []\n for succ in split_successors:\n conditions.append(process_dict[succ][consts.Consts.csv_condition].strip())\n return conditions\n\n\ndef yes_no_conditions(node_conditions):\n \"\"\"\n\n :param node_conditions:\n :return:\n \"\"\"\n return set(node_conditions) == {\"yes\", \"no\"}\n\n\ndef sth_else_conditions(node_conditions):\n \"\"\"\n\n :param node_conditions:\n :return:\n \"\"\"\n return \"else\" in node_conditions\n\n\ndef no_conditions(node_conditions):\n \"\"\"\n\n :param node_conditions:\n :return:\n \"\"\"\n for node in node_conditions:\n if bool(node):\n return False\n return True\n\n\ndef get_gateway_type(node_id_to_add_after, nodes_ids, process_dict):\n \"\"\"\n\n :param node_id_to_add_after:\n :param nodes_ids:\n :param process_dict:\n :return:\n \"\"\"\n split_successors = get_all_split_successors(node_id_to_add_after, nodes_ids)\n successors_conditions = get_node_conditions(split_successors, process_dict)\n if len(split_successors) == 2:\n if yes_no_conditions(successors_conditions) or sth_else_conditions(successors_conditions):\n return consts.Consts.exclusive_gateway\n if no_conditions(successors_conditions):\n return consts.Consts.parallel_gateway\n return consts.Consts.inclusive_gateway\n\n\ndef add_split_gateway(node_id_to_add_after, nodes_ids, process_dict, diagram_graph):\n \"\"\"\n\n :param node_id_to_add_after:\n :param nodes_ids:\n :param process_dict:\n :param diagram_graph:\n :return:\n \"\"\"\n split_gateway_id = node_id_to_add_after + \"_split\"\n process_id = default_process_id\n gateway_type = get_gateway_type(node_id_to_add_after, nodes_ids, process_dict)\n activity = \"\"\n add_node_info_to_diagram_graph(split_gateway_id, gateway_type, activity, process_id, diagram_graph)\n return split_gateway_id\n\n\ndef get_merge_node_type(merge_successor_id, bpmn_diagram):\n \"\"\"\n\n :param merge_successor_id:\n :param bpmn_diagram:\n :return:\n \"\"\"\n result = re.match(regex_pa_trailing_number, merge_successor_id)\n if result:\n trailing_number = result.group(2)\n prev_prev_number = int(trailing_number) - 2\n if prev_prev_number < 0:\n raise bpmn_exception.BpmnPythonError(\"Something wrong in csv file syntax - look for \" + merge_successor_id)\n prefix = result.group(1)\n split_node_id = prefix + str(prev_prev_number) + \"_split\"\n if bool(bpmn_diagram.diagram_graph.has_node(split_node_id)):\n node_type = bpmn_diagram.diagram_graph.node[split_node_id][consts.Consts.type]\n if bool(node_type):\n return node_type\n return consts.Consts.inclusive_gateway\n\n\ndef add_merge_gateway_if_not_exists(merge_successor_id, bpmn_diagram):\n \"\"\"\n\n :param merge_successor_id:\n :param bpmn_diagram:\n :return:\n \"\"\"\n merge_gateway_id = merge_successor_id + \"_join\"\n if bpmn_diagram.diagram_graph.has_node(merge_gateway_id):\n just_created = False\n return merge_gateway_id, just_created\n else:\n just_created = True\n process_id = default_process_id\n gateway_type = get_merge_node_type(merge_successor_id, bpmn_diagram)\n activity = \"\"\n add_node_info_to_diagram_graph(merge_gateway_id, gateway_type, activity, process_id, bpmn_diagram)\n return merge_gateway_id, just_created\n\n\ndef fill_graph_connections(process_dict, bpmn_diagram, sequence_flows):\n \"\"\"\n\n :param process_dict:\n :param bpmn_diagram:\n :param sequence_flows:\n \"\"\"\n nodes_ids = list(bpmn_diagram.diagram_graph.node.keys())\n nodes_ids_to_process = copy.deepcopy(nodes_ids)\n while bool(nodes_ids_to_process):\n node_id = str(nodes_ids_to_process.pop(0))\n if is_node_the_end_event(node_id, process_dict):\n pass\n elif is_there_sequence_continuation(node_id, nodes_ids):\n possible_sequence_successors = get_possible_sequence_continuation_successor(node_id)\n successor_node_id = get_possible_successor_present_in_node_ids_or_raise_excp(possible_sequence_successors,\n nodes_ids)\n add_connection(node_id, successor_node_id, process_dict, bpmn_diagram, sequence_flows)\n elif is_there_split_continuation(node_id, nodes_ids):\n split_gateway_id = add_split_gateway(node_id, nodes_ids, process_dict, bpmn_diagram)\n add_connection(node_id, split_gateway_id, process_dict, bpmn_diagram, sequence_flows)\n for successor_node_id in get_all_split_successors(node_id, nodes_ids):\n add_connection(split_gateway_id, successor_node_id, process_dict, bpmn_diagram, sequence_flows)\n pass\n elif is_there_merge_continuation(node_id, nodes_ids):\n possible_merge_successors = get_possible_merge_continuation_successors(node_id)\n merge_successor_id = get_possible_successor_present_in_node_ids_or_raise_excp(possible_merge_successors,\n nodes_ids)\n merge_gateway_id, just_created = add_merge_gateway_if_not_exists(merge_successor_id, bpmn_diagram)\n if just_created:\n add_connection(merge_gateway_id, merge_successor_id, process_dict, bpmn_diagram, sequence_flows)\n add_connection(node_id, merge_gateway_id, process_dict, bpmn_diagram, sequence_flows)\n else:\n raise bpmn_exception.BpmnPythonError(\"Something wrong in csv file syntax - look for \" + node_id)\n\n\ndef remove_outgoing_connection(base_node, bpmn_diagram, sequence_flows):\n \"\"\"\n\n :param base_node:\n :param bpmn_diagram:\n :param sequence_flows:\n :return:\n \"\"\"\n outgoing_flow_id = bpmn_diagram.diagram_graph.node[base_node][consts.Consts.outgoing_flow][0]\n neighbour_node = sequence_flows[outgoing_flow_id][consts.Consts.target_ref]\n bpmn_diagram.diagram_graph.node[neighbour_node][consts.Consts.incoming_flow].remove(outgoing_flow_id)\n del sequence_flows[outgoing_flow_id]\n bpmn_diagram.diagram_graph.remove_edge(base_node, neighbour_node)\n return neighbour_node\n\n\ndef remove_incoming_connection(base_node, bpmn_diagram, sequence_flows):\n \"\"\"\n\n :param base_node:\n :param bpmn_diagram:\n :param sequence_flows:\n :return:\n \"\"\"\n incoming_flow_id = bpmn_diagram.diagram_graph.node[base_node][consts.Consts.incoming_flow][0]\n neighbour_node = sequence_flows[incoming_flow_id][consts.Consts.source_ref]\n bpmn_diagram.diagram_graph.node[neighbour_node][consts.Consts.outgoing_flow].remove(incoming_flow_id)\n del sequence_flows[incoming_flow_id]\n bpmn_diagram.diagram_graph.remove_edge(neighbour_node, base_node)\n return neighbour_node\n\n\ndef remove_node(node_id_to_remove, process_dict, bpmn_diagram, sequence_flows):\n \"\"\"\n\n :param node_id_to_remove:\n :param process_dict:\n :param bpmn_diagram:\n :param sequence_flows:\n :return:\n \"\"\"\n new_source_node = remove_incoming_connection(node_id_to_remove, bpmn_diagram, sequence_flows)\n new_target_node = remove_outgoing_connection(node_id_to_remove, bpmn_diagram, sequence_flows)\n bpmn_diagram.diagram_graph.remove_node(node_id_to_remove)\n process_dict.pop(node_id_to_remove, None)\n # add new connection\n return new_source_node, new_target_node\n\n\ndef remove_unnecessary_merge_gateways(process_dict, bpmn_diagram, sequence_flows):\n \"\"\"\n\n :param process_dict:\n :param bpmn_diagram:\n :param sequence_flows:\n \"\"\"\n for node in list(bpmn_diagram.get_nodes()):\n gateway_type = node[1].get(consts.Consts.type)\n if gateway_type in [consts.Consts.inclusive_gateway, consts.Consts.exclusive_gateway,\n consts.Consts.parallel_gateway]:\n if len(node[1].get(consts.Consts.incoming_flow)) < 2 \\\n and len(node[1].get(consts.Consts.outgoing_flow)) < 2:\n new_source_node, new_target_node = remove_node(node[0], process_dict, bpmn_diagram, sequence_flows)\n add_connection(new_source_node, new_target_node, process_dict, bpmn_diagram, sequence_flows)\n\n\ndef remove_goto_nodes(process_dict, diagram_graph, sequence_flows):\n \"\"\"\n\n :param process_dict:\n :param diagram_graph:\n :param sequence_flows:\n \"\"\"\n for order, csv_line_dict in copy.deepcopy(process_dict).items():\n if csv_line_dict[consts.Consts.csv_activity].lower().startswith(\"goto\"):\n source_node, _ = remove_node(order, process_dict, diagram_graph, sequence_flows)\n target_node = csv_line_dict[consts.Consts.csv_activity].strip().split()[1]\n add_connection(source_node, target_node, process_dict, diagram_graph, sequence_flows)\n\n\nclass BpmnDiagramGraphCSVImport(object):\n \"\"\"\n Template\n \"\"\"\n @staticmethod\n def load_diagram_from_csv(filepath, bpmn_diagram):\n \"\"\"\n Reads an CSV file from given filepath and maps it into inner representation of BPMN diagram.\n Returns an instance of BPMNDiagramGraph class.\n\n :param filepath: string with output filepath,\n :param bpmn_diagram: an instance of BpmnDiagramGraph class.\n \"\"\"\n sequence_flows = bpmn_diagram.sequence_flows\n process_elements_dict = bpmn_diagram.process_elements\n diagram_attributes = bpmn_diagram.diagram_attributes\n plane_attributes = bpmn_diagram.plane_attributes\n\n process_dict = BpmnDiagramGraphCSVImport.import_csv_file_as_dict(filepath)\n\n BpmnDiagramGraphCSVImport.populate_diagram_elements_dict(diagram_attributes)\n BpmnDiagramGraphCSVImport.populate_process_elements_dict(process_elements_dict, process_dict)\n BpmnDiagramGraphCSVImport.populate_plane_elements_dict(plane_attributes)\n\n BpmnDiagramGraphCSVImport.import_nodes(process_dict, bpmn_diagram, sequence_flows)\n BpmnDiagramGraphCSVImport.representation_adjustment(process_dict, bpmn_diagram, sequence_flows)\n\n @staticmethod\n def import_csv_file_as_dict(filepath):\n \"\"\"\n\n :param filepath:\n :return:\n \"\"\"\n process_dict = pd.read_csv(filepath, index_col=0).fillna(\"\").to_dict('index')\n remove_white_spaces_in_orders(process_dict)\n return process_dict\n\n @staticmethod\n def get_given_task_as_dict(csv_df, order_val):\n \"\"\"\n\n :param csv_df:\n :param order_val:\n :return:\n \"\"\"\n return csv_df.loc[csv_df[consts.Consts.csv_order] == order_val].iloc[0].to_dict()\n\n @staticmethod\n def import_nodes(process_dict, bpmn_diagram, sequence_flows):\n \"\"\"\n\n :param process_dict:\n :param bpmn_diagram:\n :param sequence_flows:\n \"\"\"\n import_nodes_info(process_dict, bpmn_diagram)\n fill_graph_connections(process_dict, bpmn_diagram, sequence_flows)\n\n @staticmethod\n def populate_diagram_elements_dict(diagram_elements_dict):\n \"\"\"\n\n :param diagram_elements_dict:\n \"\"\"\n diagram_elements_dict[consts.Consts.id] = \"diagram1\"\n diagram_elements_dict[consts.Consts.name] = \"diagram_name\"\n\n @staticmethod\n def populate_process_elements_dict(process_elements_dict, process_dict):\n \"\"\"\n\n :param process_elements_dict:\n :param process_dict:\n \"\"\"\n process_id = default_process_id\n process_element_attributes = {consts.Consts.id: default_process_id,\n consts.Consts.name: \"\",\n consts.Consts.is_closed: \"false\",\n consts.Consts.is_executable: \"false\",\n consts.Consts.process_type: \"None\",\n consts.Consts.node_ids: list(process_dict.keys())}\n process_elements_dict[process_id] = process_element_attributes\n\n @staticmethod\n def populate_plane_elements_dict(plane_elements_dict):\n \"\"\"\n\n :param plane_elements_dict:\n \"\"\"\n plane_elements_dict[consts.Consts.id] = default_plane_id\n plane_elements_dict[consts.Consts.bpmn_element] = default_process_id\n\n @staticmethod\n def legacy_adjustment(bpmn_diagram):\n \"\"\"\n\n :param bpmn_diagram:\n \"\"\"\n for node in bpmn_diagram.get_nodes():\n if node[1].get(consts.Consts.incoming_flow) is None:\n node[1][consts.Consts.incoming_flow] = []\n if node[1].get(consts.Consts.outgoing_flow) is None:\n node[1][consts.Consts.outgoing_flow] = []\n if node[1].get(consts.Consts.event_definitions) is None:\n node[1][consts.Consts.event_definitions] = []\n\n @staticmethod\n def representation_adjustment(process_dict, diagram_graph, sequence_flows):\n \"\"\"\n\n :param process_dict:\n :param diagram_graph:\n :param sequence_flows:\n \"\"\"\n BpmnDiagramGraphCSVImport.legacy_adjustment(diagram_graph)\n remove_goto_nodes(process_dict, diagram_graph, sequence_flows)\n remove_unnecessary_merge_gateways(process_dict, diagram_graph, sequence_flows)\n" ]
[ [ "pandas.read_csv" ] ]
nicolenair/poke-env
[ "47f645093441c13448278290c872102da9339025" ]
[ "src/CEMAgent/rl_with_open_ai_gym_wrapper-cem.py" ]
[ "# -*- coding: utf-8 -*-\nimport numpy as np\nimport tensorflow as tf\nimport pandas as pd\nimport json\nimport random\n\nfrom poke_env.player_configuration import PlayerConfiguration\nfrom poke_env.player.env_player import Gen7EnvSinglePlayer\nfrom poke_env.player.random_player import RandomPlayer\nfrom poke_env.player.frozen_rl_player import FrozenRLPlayer\nfrom poke_env.player.frozen_rl_player_ratio import FrozenRLPlayerRatio\nfrom poke_env.server_configuration import LocalhostServerConfiguration\n\nfrom rl.agents.cem import CEMAgent\nfrom rl.policy import LinearAnnealedPolicy, EpsGreedyQPolicy\nfrom rl.memory import SequentialMemory, EpisodeParameterMemory\nfrom tensorflow.keras.layers import Dense, Flatten, Activation\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.optimizers import Adam\n\n\n# We define our RL player\n# It needs a state embedder and a reward computer, hence these two methods\nclass SimpleRLPlayer(Gen7EnvSinglePlayer):\n def embed_battle(self, battle):\n # -1 indicates that the move does not have a base power\n # or is not available\n moves_base_power = -np.ones(4)\n moves_dmg_multiplier = np.ones(4)\n for i, move in enumerate(battle.available_moves):\n moves_base_power[i] = (\n move.base_power / 100\n ) # Simple rescaling to facilitate learning\n if move.type:\n moves_dmg_multiplier[i] = move.type.damage_multiplier(\n battle.opponent_active_pokemon.type_1,\n battle.opponent_active_pokemon.type_2,\n )\n\n # We count how many pokemons have not fainted in each team\n remaining_mon_team = (\n len([mon for mon in battle.team.values() if mon.fainted]) / 6\n )\n remaining_mon_opponent = (\n len([mon for mon in battle.opponent_team.values() if mon.fainted]) / 6\n )\n\n # Final vector with 10 components\n return np.concatenate(\n [\n moves_base_power,\n moves_dmg_multiplier,\n [remaining_mon_team, remaining_mon_opponent],\n ]\n )\n\n def compute_reward(self, battle) -> float:\n return self.reward_computing_helper(\n battle, fainted_value=2, hp_value=1, victory_value=30\n )\n\n\nclass MaxDamagePlayer(RandomPlayer):\n def choose_move(self, battle):\n # If the player can attack, it will\n if battle.available_moves:\n # Finds the best move among available ones\n best_move = max(battle.available_moves, key=lambda move: move.base_power)\n return self.create_order(best_move)\n\n # If no attack is available, a random switch will be made\n else:\n return self.choose_random_move(battle)\n\n\n\n\nNB_TRAINING_STEPS = 10000\nNB_EVALUATION_EPISODES = 100\n\n# variable for naming .csv files.\n# Change this according to whether the training process was carried out against a random player or a max damage player or frozen player\nTRAINING_OPPONENT = 'RandomPlayer'\nFROZEN_RL_PRESENT = True\n\ntf.random.set_seed(0)\nnp.random.seed(0)\n\n\n# This is the function that will be used to train the agent\ndef agent_training(player, agent, nb_steps, filename):\n model = agent.fit(player, nb_steps=nb_steps)\n # save model history to csv\n save_file = f\"{filename}_trainlog_{nb_steps}eps.csv\"\n print(\"===============================================\")\n print(f\"Saving model history as {save_file}\")\n print(\"===============================================\")\n pd.DataFrame(model.history).to_csv(save_file)\n player.complete_current_battle()\n\n\ndef agent_evaluation(player, agent, nb_episodes, filename):\n # Reset battle statistics\n player.reset_battles()\n model = agent.test(player, nb_episodes=nb_episodes, visualize=False, verbose=False)\n\n # save model history to csv\n save_file = f\"{filename}_testlog_{nb_episodes}eps.csv\"\n print(\"===============================================\")\n print(f\"Saving model history as {save_file}\")\n print(\"===============================================\")\n pd.DataFrame(model.history).to_csv(save_file)\n \n print(\n \"CEM Evaluation: %d victories out of %d episodes\"\n % (player.n_won_battles, nb_episodes)\n )\n\n########################## Frozen RL Model variables ##########################\n\nif FROZEN_RL_PRESENT:\n ### CHANGE THIS IF YOU'RE NOT USING A CEM MODEL - REFER TO frozen_rl_player.py FOR MORE DETAILS\n MODEL_NAME = 'CEM'\n\n ### CHANGE THE LOAD MODEL DIRECTORY ACCORDING TO LOCAL SETUP ###\n loaded_model = tf.keras.models.load_model('/Users/nicarinanan/Desktop/poke-env/modelmax_20000')\n\n ### CHANGE AGENT DETAILS ACCORDING TO THE SAVED MODEL AGENT TYPE ###\n memory = EpisodeParameterMemory(limit=10000, window_length=1)\n\n # load saved model into CEMAgent class\n trained_agent = CEMAgent(model=loaded_model, nb_actions=18, memory=memory,\n batch_size=50, nb_steps_warmup=1000, train_interval=50, elite_frac=0.05, noise_ampl=4)\n\n##############################################################################\nif __name__ == \"__main__\":\n env_player = SimpleRLPlayer(\n player_configuration=PlayerConfiguration(\"satunicarina\", None),\n battle_format=\"gen7randombattle\",\n server_configuration=LocalhostServerConfiguration,\n )\n\n random_opponent = RandomPlayer(\n player_configuration=PlayerConfiguration(\"duanicarina\", None),\n battle_format=\"gen7randombattle\",\n server_configuration=LocalhostServerConfiguration,\n )\n\n maxdamage_opponent = MaxDamagePlayer(\n player_configuration=PlayerConfiguration(\"tiganicarina\", None),\n battle_format=\"gen7randombattle\",\n server_configuration=LocalhostServerConfiguration,\n )\n \n if FROZEN_RL_PRESENT:\n frozen_opponent = FrozenRLPlayer(player_configuration=PlayerConfiguration(\"empatnicarina\", None), battle_format=\"gen7randombattle\", server_configuration=LocalhostServerConfiguration, trained_rl_model=trained_agent,\n model_name = MODEL_NAME,)\n \n frozenratio_opponent = FrozenRLPlayerRatio(player_configuration=PlayerConfiguration(\"limanicarina\", None), battle_format=\"gen7randombattle\", server_configuration=LocalhostServerConfiguration, trained_rl_model=trained_agent,\n model_name = MODEL_NAME,)\n\n # Output dimension\n n_action = len(env_player.action_space)\n\n# model = Sequential()\n# model.add(Dense(128, activation=\"elu\", input_shape=(1, 10)))\n# model.add(Flatten())\n# model.add(Dense(n_action))\n# model.add(Activation('softmax'))\n memory = EpisodeParameterMemory(limit=10000, window_length=1)\n# Option 2: deep network\n model = Sequential()\n model.add(Flatten(input_shape=(1, 10)))\n model.add(Dense(16))\n model.add(Activation('relu'))\n model.add(Dense(16))\n model.add(Activation('relu'))\n model.add(Dense(16))\n model.add(Activation('relu'))\n model.add(Dense(n_action))\n model.add(Activation('softmax'))\n\n \n# #only uncomment below line for preserved model self-play\n# model = tf.keras.models.load_model('/Users/nicarinanan/Desktop/poke-env/modelpostmaxpreserved2_10000')\n\n # Defining our agent\n agent = CEMAgent(model=model, nb_actions=n_action, memory=memory,\n batch_size=50, nb_steps_warmup=1000, train_interval=50, elite_frac=0.05, noise_ampl=4)\n \n\n agent.compile()\n\n # Training\n env_player.play_against(\n env_algorithm=agent_training,\n opponent=random_opponent,\n env_algorithm_kwargs={\"agent\": agent, \"nb_steps\": NB_TRAINING_STEPS, \"filename\": TRAINING_OPPONENT},\n )\n model.save(\"model_%d\" % NB_TRAINING_STEPS)\n\n # Evaluation\n print(\"Results against random player:\")\n env_player.play_against(\n env_algorithm=agent_evaluation,\n opponent=random_opponent,\n env_algorithm_kwargs={\"agent\": agent, \"nb_episodes\": NB_EVALUATION_EPISODES, \"filename\": f'({TRAINING_OPPONENT}_{NB_TRAINING_STEPS})RandomPlayer'},\n )\n\n print(\"\\nResults against max player:\")\n env_player.play_against(\n env_algorithm=agent_evaluation,\n opponent=maxdamage_opponent,\n env_algorithm_kwargs={\"agent\": agent, \"nb_episodes\": NB_EVALUATION_EPISODES, \"filename\": f'({TRAINING_OPPONENT}_{NB_TRAINING_STEPS})MaxPlayer'},\n )\n\n if FROZEN_RL_PRESENT:\n\n print(\"\\nResults against frozen rl player:\")\n env_player.play_against(\n env_algorithm=agent_evaluation,\n opponent=frozen_opponent,\n env_algorithm_kwargs={\"agent\": agent, \"nb_episodes\": NB_EVALUATION_EPISODES, \"filename\": f'({TRAINING_OPPONENT}_{NB_TRAINING_STEPS})FrozenRLPlayer'},\n )\n\n\n\n" ]
[ [ "tensorflow.keras.models.Sequential", "numpy.ones", "tensorflow.keras.layers.Flatten", "tensorflow.keras.models.load_model", "pandas.DataFrame", "numpy.random.seed", "tensorflow.keras.layers.Activation", "tensorflow.keras.layers.Dense", "numpy.concatenate", "tensorflow.random.set_seed" ] ]
leehsiu/pyopenpose
[ "c4feef04a9e563fb91e18f745bc187c6f2aeb72c" ]
[ "openpose/model/rpn/loss.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n\"\"\"\nThis file contains specific functions for computing losses on the RPN\nfile\n\"\"\"\n\nimport torch\nfrom torch.nn import functional as F\n\nfrom .utils import concat_box_prediction_layers\n\nfrom ..balanced_positive_negative_sampler import BalancedPositiveNegativeSampler\nfrom ..utils import cat\n\nfrom openpose.layers import smooth_l1_loss\nfrom openpose.model.matcher import Matcher\nfrom openpose.structures.boxlist_ops import boxlist_iou\nfrom openpose.structures.boxlist_ops import cat_boxlist\n\n\nclass RPNLossComputation(object):\n \"\"\"\n This class computes the RPN loss.\n \"\"\"\n\n def __init__(self, proposal_matcher, fg_bg_sampler, box_coder,\n generate_labels_func):\n \"\"\"\n Arguments:\n proposal_matcher (Matcher)\n fg_bg_sampler (BalancedPositiveNegativeSampler)\n box_coder (BoxCoder)\n \"\"\"\n # self.target_preparator = target_preparator\n self.proposal_matcher = proposal_matcher\n self.fg_bg_sampler = fg_bg_sampler\n self.box_coder = box_coder\n self.copied_fields = []\n self.generate_labels_func = generate_labels_func\n self.discard_cases = ['not_visibility', 'between_thresholds']\n\n def match_targets_to_anchors(self, anchor, target, copied_fields=[]):\n match_quality_matrix = boxlist_iou(target, anchor)\n matched_idxs = self.proposal_matcher(match_quality_matrix)\n # RPN doesn't need any fields from target\n # for creating the labels, so clear them all\n target = target.copy_with_fields(copied_fields)\n # get the targets corresponding GT for each anchor\n # NB: need to clamp the indices because we can have a single\n # GT in the image, and matched_idxs can be -2, which goes\n # out of bounds\n matched_targets = target[matched_idxs.clamp(min=0)]\n matched_targets.add_field(\"matched_idxs\", matched_idxs)\n return matched_targets\n\n def prepare_targets(self, anchors, targets):\n labels = []\n regression_targets = []\n for anchors_per_image, targets_per_image in zip(anchors, targets):\n matched_targets = self.match_targets_to_anchors(\n anchors_per_image, targets_per_image, self.copied_fields\n )\n\n matched_idxs = matched_targets.get_field(\"matched_idxs\")\n labels_per_image = self.generate_labels_func(matched_targets)\n labels_per_image = labels_per_image.to(dtype=torch.float32)\n\n # Background (negative examples)\n bg_indices = matched_idxs == Matcher.BELOW_LOW_THRESHOLD\n labels_per_image[bg_indices] = 0\n\n # discard anchors that go out of the boundaries of the image\n if \"not_visibility\" in self.discard_cases:\n labels_per_image[~anchors_per_image.get_field(\"visibility\")] = -1\n\n # discard indices that are between thresholds\n if \"between_thresholds\" in self.discard_cases:\n inds_to_discard = matched_idxs == Matcher.BETWEEN_THRESHOLDS\n labels_per_image[inds_to_discard] = -1\n\n # compute regression targets\n regression_targets_per_image = self.box_coder.encode(\n matched_targets.bbox, anchors_per_image.bbox\n )\n\n labels.append(labels_per_image)\n regression_targets.append(regression_targets_per_image)\n\n return labels, regression_targets\n\n\n def __call__(self, anchors, objectness, box_regression, targets):\n \"\"\"\n Arguments:\n anchors (list[BoxList])\n objectness (list[Tensor])\n box_regression (list[Tensor])\n targets (list[BoxList])\n\n Returns:\n objectness_loss (Tensor)\n box_loss (Tensor\n \"\"\"\n anchors = [cat_boxlist(anchors_per_image) for anchors_per_image in anchors]\n labels, regression_targets = self.prepare_targets(anchors, targets)\n sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels)\n sampled_pos_inds = torch.nonzero(torch.cat(sampled_pos_inds, dim=0)).squeeze(1)\n sampled_neg_inds = torch.nonzero(torch.cat(sampled_neg_inds, dim=0)).squeeze(1)\n\n sampled_inds = torch.cat([sampled_pos_inds, sampled_neg_inds], dim=0)\n\n objectness, box_regression = \\\n concat_box_prediction_layers(objectness, box_regression)\n\n objectness = objectness.squeeze()\n\n labels = torch.cat(labels, dim=0)\n regression_targets = torch.cat(regression_targets, dim=0)\n\n box_loss = smooth_l1_loss(\n box_regression[sampled_pos_inds],\n regression_targets[sampled_pos_inds],\n beta=1.0 / 9,\n size_average=False,\n ) / (sampled_inds.numel())\n\n objectness_loss = F.binary_cross_entropy_with_logits(\n objectness[sampled_inds], labels[sampled_inds]\n )\n\n return objectness_loss, box_loss\n\n# This function should be overwritten in RetinaNet\ndef generate_rpn_labels(matched_targets):\n matched_idxs = matched_targets.get_field(\"matched_idxs\")\n labels_per_image = matched_idxs >= 0\n return labels_per_image\n\n\ndef make_rpn_loss_evaluator(cfg, box_coder):\n matcher = Matcher(\n cfg.MODEL.RPN.FG_IOU_THRESHOLD,\n cfg.MODEL.RPN.BG_IOU_THRESHOLD,\n allow_low_quality_matches=True,\n )\n\n fg_bg_sampler = BalancedPositiveNegativeSampler(\n cfg.MODEL.RPN.BATCH_SIZE_PER_IMAGE, cfg.MODEL.RPN.POSITIVE_FRACTION\n )\n\n loss_evaluator = RPNLossComputation(\n matcher,\n fg_bg_sampler,\n box_coder,\n generate_rpn_labels\n )\n return loss_evaluator\n" ]
[ [ "torch.cat", "torch.nn.functional.binary_cross_entropy_with_logits" ] ]
unendin/allennlp
[ "0dcbaea6dbc6cc43e24a3564d6d37f8a1421484c" ]
[ "tests/models/constituency_parser_test.py" ]
[ "# pylint: disable=no-self-use,invalid-name,no-value-for-parameter\nimport os\n\nfrom nltk import Tree\nimport torch\nfrom torch.autograd import Variable\n\n\nfrom allennlp.common.testing.model_test_case import ModelTestCase\nfrom allennlp.models.constituency_parser import SpanInformation\n\nclass SpanConstituencyParserTest(ModelTestCase):\n\n def setUp(self):\n os.system(\"cd ./scripts/EVALB/ && make && cd ../../\")\n super(SpanConstituencyParserTest, self).setUp()\n self.set_up_model(\"tests/fixtures/constituency_parser/constituency_parser.json\",\n \"tests/fixtures/data/example_ptb.trees\")\n\n def tearDown(self):\n os.system(\"rm scripts/EVALB/evalb\")\n super().tearDown()\n\n def test_span_parser_can_save_and_load(self):\n self.ensure_model_can_train_save_and_load(self.param_file)\n\n def test_batch_predictions_are_consistent(self):\n self.ensure_batch_predictions_are_consistent()\n\n def test_forward_can_handle_a_single_word_as_input(self):\n # A very annoying edge case: the PTB has several single word sentences.\n # when running with a batch size 1, we have to be very careful\n # about how we .squeeze/.unsqueeze things to make sure it still runs.\n text = {\"tokens\": Variable(torch.LongTensor([[1]]).long())}\n pos_tags = Variable(torch.LongTensor([[1]]).long())\n spans = Variable(torch.LongTensor([[[0, 0]]]))\n label = Variable(torch.LongTensor([[1]]))\n self.model(text, spans, [{\"tokens\": [\"hello\"]}], pos_tags, label)\n\n def test_decode_runs(self):\n self.model.eval()\n training_tensors = self.dataset.as_tensor_dict()\n output_dict = self.model(**training_tensors)\n decode_output_dict = self.model.decode(output_dict)\n assert set(decode_output_dict.keys()) == {'spans', 'class_probabilities', 'trees',\n 'tokens', 'pos_tags', 'num_spans', 'loss'}\n metrics = self.model.get_metrics(reset=True)\n metric_keys = set(metrics.keys())\n assert \"evalb_precision\" in metric_keys\n assert \"evalb_recall\" in metric_keys\n assert \"evalb_f1_measure\" in metric_keys\n\n def test_resolve_overlap_conflicts_greedily(self):\n spans = [SpanInformation(start=1, end=5, no_label_prob=0.7,\n label_prob=0.2, label_index=2),\n SpanInformation(start=2, end=7, no_label_prob=0.5,\n label_prob=0.3, label_index=4)]\n resolved_spans = self.model.resolve_overlap_conflicts_greedily(spans)\n assert resolved_spans == [SpanInformation(start=2, end=7, no_label_prob=0.5,\n label_prob=0.3, label_index=4)]\n\n def test_construct_tree_from_spans(self):\n # (S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))\n tree_spans = [((0, 1), 'D'), ((1, 2), 'N'), ((0, 2), 'NP'),\n ((2, 3), 'V'), ((3, 4), 'D'), ((4, 5), 'N'),\n ((3, 5), 'NP'), ((2, 5), 'VP'), ((0, 5), 'S')]\n sentence = [\"the\", \"dog\", \"chased\", \"the\", \"cat\"]\n tree = self.model.construct_tree_from_spans({x:y for x, y in tree_spans}, sentence)\n correct_tree = Tree.fromstring(\"(S (NP (D the) (N dog)) (VP (V chased) (NP (D the) (N cat))))\")\n assert tree == correct_tree\n\n def test_construct_tree_from_spans_handles_nested_labels(self):\n # The tree construction should split the \"S-NP\" into (S (NP ...)).\n tree_spans = [((0, 1), 'D'), ((1, 2), 'N'), ((0, 2), 'S-NP')]\n sentence = [\"the\", \"dog\"]\n tree = self.model.construct_tree_from_spans({x:y for x, y in tree_spans}, sentence)\n correct_tree = Tree.fromstring(\"(S (NP (D the) (N dog)))\")\n assert tree == correct_tree\n\n def test_tree_construction_with_too_few_spans_creates_trees_with_depth_one_word_nodes(self):\n # We only have a partial tree here: (S (NP (D the) (N dog)). Decoding should\n # recover this from the spans, whilst attaching all other words to the root node with\n # XX POS tag labels, as the right hand side splits will not occur in tree_spans.\n tree_spans = [((0, 1), 'D'), ((1, 2), 'N'), ((0, 2), 'NP'), ((0, 5), 'S')]\n sentence = [\"the\", \"dog\", \"chased\", \"the\", \"cat\"]\n tree = self.model.construct_tree_from_spans({x:y for x, y in tree_spans}, sentence)\n correct_tree = Tree.fromstring(\"(S (NP (D the) (N dog)) (XX chased) (XX the) (XX cat))\")\n assert tree == correct_tree\n" ]
[ [ "torch.LongTensor" ] ]
helene-todd/M2_thesis_code
[ "f844d6652229c6abe09bd40aa43f5002faa9e5ba" ]
[ "with_chemical_syn/simulation.py" ]
[ "from matplotlib import cm, rcParams\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nimport matplotlib as matplotlib\nfrom matplotlib import patheffects\nimport numpy as np\nimport math as math\nimport random as rand\nimport os, sys, csv\nimport pandas as pd\n\n# matplotlib.pyplot.xkcd(scale=.5, length=100, randomness=2)\n# rcParams['path.effects'] = [patheffects.withStroke(linewidth=.5)]\n\ndef lif_euler(t, dt, v1, v2, I1, I2):\n if len(ts1) > 0 :\n syn2 = alpha*alpha*(t-ts1)*np.exp(-alpha*(t-ts1))\n else :\n syn2 = 0\n if len(ts2) > 0 :\n syn1 = alpha*alpha*(t-ts2)*np.exp(-alpha*(t-ts2))\n else :\n syn1 = 0\n res = [v1 + dt*(-v1 + gc*(v2-v1) + I1) + -gs*dt*np.sum(syn1), v2 + dt*(-v2 + gc*(v1-v2) + I2) + -gs*dt*np.sum(syn2)]\n print(t, res[0], res[1])\n return res\n\ngc, gs, beta, alpha = 0., 0.1, 0.2, 4\nI1, I2 = 1.6, 1.6\nVth, Vr = 1, 0\n\ndt = 0.001\nphis1, phis2 = [], []\n\nmaxtime = 200\n\nv1_0, v2_0 = 0.5, 0\nx1, x2 = [v1_0], [v2_0]\nts1, ts2 = np.array([]), np.array([]) #time spike 1, time spike 2\nt = [0]\n\nwhile t[-1] < maxtime :\n t.append(t[-1]+dt)\n next_values= lif_euler(t[-1], dt, x1[-1], x2[-1], I1, I2) # example of common input\n\n if (next_values[0] > 1 and (next_values[1] > 1 or next_values[1]+gc*beta > 1)) or (next_values[1] > 1 and (next_values[0] > 1 or next_values[0]+gc*beta > 1)) :\n x2.append(0)\n x1.append(0)\n ts2 = np.append(ts2, t[-1])\n ts1 = np.append(ts1, t[-1])\n\n elif next_values[1] > 1 :\n x2.append(0)\n ts2 = np.append(ts2, t[-1])\n if next_values[0] + gc*beta > 1 :\n x1.append(0)\n else :\n x1.append(next_values[0]+gc*beta)\n\n elif next_values[0] > 1 :\n x1.append(0)\n ts1 = np.append(ts1, t[-1])\n if next_values[1] + gc*beta > 1 :\n x2.append(0)\n else :\n x2.append(next_values[1]+gc*beta)\n\n else :\n x2.append(next_values[1])\n x1.append(next_values[0])\n\n# A spike occurs iff there was a reset\nfor i in range(1,len(x1)) :\n if abs(x1[i]-x1[i-1]) > (Vth-Vr)/2 and x1[i] < 1 and x1[i-1] < 1:\n x1.insert(i, Vth+0.5)\n x2.insert(i, x2[i])\n t.insert(i, t[i])\n\nfor i in range(1,len(x2)) :\n if abs(x2[i]-x2[i-1]) > (Vth-Vr)/2 and x2[i] < 1 and x2[i-1] < 1:\n x2.insert(i, Vth+0.5)\n x1.insert(i, x1[i])\n t.insert(i, t[i])\n\nplt.figure(figsize=(12,3.5))\n\nplt.plot(t, x1, label='$V_{1}$', color='#aa3863')\nplt.plot(t, x2, label='$V_{2}$', color='#3b7d86')\nplt.xlim(100, 200)\n\nplt.legend(loc='upper right', fontsize=10.5)\nplt.xlabel('Time ($10^{-2}$ s)', fontsize=11)\nplt.ylabel('Voltage $V_k, k \\in \\{1,2}$', fontsize=11)\nplt.title(f'Example of electrically & chemically coupled neurons, $I={I1}, \\gamma={gc}, \\\\beta={beta}, \\kappa={gs}, \\\\alpha={alpha}$', pad=15, size=14)\n\nplt.tight_layout()\nplt.savefig('example_elec_chem.svg')\nplt.show()\n" ]
[ [ "numpy.sum", "matplotlib.pyplot.legend", "numpy.append", "matplotlib.pyplot.figure", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.savefig", "numpy.exp", "matplotlib.pyplot.xlim", "matplotlib.pyplot.title", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "numpy.array", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel" ] ]
Brian-ZhenLiu/CNN_models
[ "7cc646b181d86facf19b4129762504ea7b8f1409" ]
[ "Model_Code/train_dnn_002.py" ]
[ "import tensorflow as tf\r\nimport os\r\n#from PIL import Image\r\nimport random\r\nimport numpy as np\r\nfrom datetime import datetime\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.metrics import classification_report\r\n\r\nslim = tf.contrib.slim\r\n\r\nglobal first\r\nfirst = True\r\n\r\nclassnum=12\r\ntestnum = tf.placeholder(tf.int32)\r\ntrainnum = tf.placeholder(tf.int32)\r\nvalidnum = tf.placeholder(tf.int32)\r\nlearnrate = tf.placeholder(tf.float32)\r\n\r\ndef getinputs(path):\r\n filename_queue=tf.train.string_input_producer([path])\r\n reader=tf.TFRecordReader()\r\n _,serialized_example=reader.read(filename_queue)\r\n features=tf.parse_single_example(serialized_example,\r\n features={\r\n 'label':tf.FixedLenFeature([], tf.int64),\r\n 'img_raw' : tf.FixedLenFeature([], tf.string),\r\n })\r\n image=tf.decode_raw(features['img_raw'],tf.uint8)\r\n label=tf.cast(features['label'],tf.int32)\r\n image=tf.reshape(image,[4096,1])\r\n return image,label\r\n\r\ndef get_batch(image,label,batch_size,crop_size):\r\n #print(image.shape)\r\n #print(label.shape)\r\n images,labels=tf.train.shuffle_batch([image,label],\r\n batch_size=batch_size,num_threads=10,capacity=10000,min_after_dequeue=200)\r\n return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size])\r\n\r\ndef get_test_batch(image,label,batch_size):\r\n images,labels=tf.train.batch([image,label],batch_size=batch_size)\r\n return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size])\r\n\r\ndef get_valid_batch(image,label,batch_size):\r\n images,labels=tf.train.batch([image,label],batch_size=batch_size)\r\n return tf.reshape(images,[batch_size,4096]),tf.reshape(labels,[batch_size])\r\n \r\nclass trainwork(object):\r\n def __init__(self):\r\n with tf.variable_scope('scop'):\r\n self.w1=tf.get_variable('w1', [4096,1024],initializer=tf.contrib.layers.xavier_initializer_conv2d())\r\n self.w2=tf.get_variable('w2', [1024,classnum],initializer=tf.contrib.layers.xavier_initializer_conv2d())\r\n self.b1 = tf.get_variable('b1', [1024],initializer=tf.constant_initializer(0.0))\r\n self.b2 = tf.get_variable('b2', [classnum],initializer=tf.constant_initializer(0.0))\r\n\r\n def inference(self,images):\r\n images=tf.cast(images,tf.float32)/255.0\r\n l1 = tf.matmul(images, self.w1)+self.b1\r\n l1=tf.nn.relu(l1)\r\n out = tf.matmul(l1, self.w2)+self.b2\r\n return out\r\n\r\n def test_inference(self,images):\r\n images=tf.cast(images,tf.float32)/255.0\r\n l1 = tf.matmul(images, self.w1)+self.b1\r\n l1=tf.nn.relu(l1)\r\n out = tf.matmul(l1, self.w2)+self.b2\r\n return out\r\n \r\n def valid_inference(self,images):\r\n images=tf.cast(images,tf.float32)/255.0\r\n l1 = tf.matmul(images, self.w1)+self.b1\r\n l1=tf.nn.relu(l1)\r\n out = tf.matmul(l1, self.w2)+self.b2\r\n return out\r\n \r\n def softmax_loss(self,predicts,labels):\r\n predicts=tf.nn.softmax(predicts)\r\n labels=tf.one_hot(labels,classnum)\r\n loss=-tf.reduce_sum(labels*tf.log(predicts))\r\n return loss\r\n\r\n def optimer(self,loss,lr=0.001):\r\n train_step=tf.train.GradientDescentOptimizer(lr).minimize(loss)\r\n return train_step\r\n\r\npath=r'C:\\JC\\test\\train_model.ckpt'\r\nimage,label=getinputs(r'C:\\JC\\tfrecord\\64_shuffle/train.tfrecords')\r\ntest_image,test_label=getinputs(r'C:\\JC\\tfrecord\\64_shuffle/test.tfrecords')\r\nvalid_image,valid_label= getinputs(r'C:\\JC\\tfrecord\\64_shuffle\\validation.tfrecords')\r\n\r\nbatch_image,batch_label=get_batch(image,label,trainnum,0)\r\nwork=trainwork()\r\ninf=work.inference(batch_image)\r\nloss=work.softmax_loss(inf,batch_label)\r\nopti=work.optimer(loss,learnrate)\r\n\r\ntest_image_batch,test_label_batch=get_test_batch(test_image,test_label,testnum)\r\ntest_inf=work.test_inference(test_image_batch)\r\ntest_labels=tf.one_hot(test_label_batch,classnum)\r\ntest_pre = tf.reshape(test_inf, [testnum, classnum])\r\ncorrect_prediction=tf.equal(tf.argmax(test_inf,1),tf.argmax(test_labels,1))\r\naccuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))\r\ntest_pre = tf.argmax(test_pre, 1)\r\ntest_true = tf.argmax(test_labels, 1)\r\n\r\nvalid_image_batch,valid_label_batch=get_valid_batch(valid_image,valid_label,validnum)\r\nvalid_inf=work.valid_inference(valid_image_batch)\r\nvalid_labels=tf.one_hot(valid_label_batch,classnum)\r\n#train_step=tf.train.GradientDescentOptimizer(0.001).minimize(cross_entropy)\r\nvalid_pre = tf.reshape(valid_inf, [validnum, classnum])\r\nvalid_correct_prediction=tf.equal(tf.argmax(valid_inf,1),tf.argmax(valid_labels,1))\r\nvalid_accuracy=tf.reduce_mean(tf.cast(valid_correct_prediction,tf.float32))\r\nvalid_pre = tf.argmax(valid_pre, 1)\r\nvalid_true = tf.argmax(valid_labels, 1)\r\n\r\ntarget_names = ['class sg', 'class bm', 'class wd', 'class wt', 'class wj', 'class wo', 'class ym', 'class shq', 'class shj',\r\n 'class no', 'class yh', 'class fb']\r\n\r\ninit = tf.initialize_all_variables()\r\nconfig=tf.ConfigProto()\r\nconfig.gpu_options.allow_growth=True\r\n\r\n#init=tf.initialize_all_variables()\r\ndef train(train_num=64,test_num=32,lr=1e-4,loop_count=10000,report_step=100,save_step=1000,restore=False):\r\n with tf.Session(config=config) as sess:\r\n sess.run(init)\r\n coord = tf.train.Coordinator()\r\n threads = tf.train.start_queue_runners(coord=coord)\r\n if restore:\r\n tf.train.Saver().restore(sess,path)\r\n feed_dict={\r\n testnum: test_num,\r\n trainnum: train_num,\r\n learnrate:lr\r\n }\r\n for i in range(loop_count):\r\n loss_np, _, label_np, image_np, inf_np = sess.run(\r\n [loss, opti, batch_label, batch_image, inf],feed_dict=feed_dict)\r\n if i > 0 and i % report_step == 0:\r\n accuracy_np = sess.run([accuracy],feed_dict=feed_dict)\r\n print(i, accuracy_np, loss_np)\r\n if i > 0 and i % save_step == 0:\r\n tf.train.Saver().save(sess, path)\r\n tf.train.Saver().save(sess, path)\r\n coord.request_stop()\r\n coord.join(threads)\r\n\r\n \r\ndef test_and_valid(test_loop=1,valid_loop=1,test_num=64,valid_num=64):\r\n feed_dict={\r\n testnum: test_num,\r\n validnum: valid_num\r\n }\r\n with tf.Session(config=config) as sess:\r\n sess.run(init)\r\n coord = tf.train.Coordinator()\r\n threads = tf.train.start_queue_runners(coord=coord)\r\n tf.train.Saver().restore(sess,path)\r\n #test\r\n test_acc_avg = 0.0\r\n test_true_total=np.array([])\r\n test_pre_total=np.array([])\r\n for i in range(0, test_loop):\r\n accuracy_np = sess.run([accuracy],feed_dict=feed_dict)\r\n test_pre_1, test_true_1 = sess.run([test_pre, test_true],feed_dict=feed_dict)\r\n test_pre_1 = np.array(test_pre_1)\r\n test_true_1 = np.array(test_true_1)\r\n \r\n test_acc_avg = test_acc_avg + accuracy_np[0]\r\n test_true_total = np.concatenate((test_true_total,test_true_1),axis=0)\r\n test_pre_total = np.concatenate((test_pre_total,test_pre_1), axis=0)\r\n print('------test_accuracy-----')\r\n print(test_acc_avg / test_loop)\r\n print('------test_accuracy-----')\r\n\r\n print('------test_classification_report-----')\r\n print(classification_report(test_true_total, test_pre_total, target_names=target_names))\r\n print('------test_classification_report-----')\r\n print('------test_confusion_matrix-----')\r\n cm = confusion_matrix(y_true=test_true_total, y_pred=test_pre_total)\r\n print(cm)\r\n print('------test_confusion_matrix-----')\r\n\r\n #valid\r\n if valid_loop>0:\r\n valid_acc_avg = 0.0\r\n valid_true_total=np.array([])\r\n valid_pre_total=np.array([])\r\n for i in range(0, valid_loop):\r\n accuracy_np = sess.run([valid_accuracy],feed_dict=feed_dict) \r\n valid_pre_1, valid_true_1 = sess.run([valid_pre, valid_true],feed_dict=feed_dict)\r\n valid_pre_1 = np.array(valid_pre_1)\r\n valid_true_1 = np.array(valid_true_1)\r\n \r\n valid_acc_avg = valid_acc_avg + accuracy_np[0]\r\n valid_true_total = np.concatenate((valid_true_total,valid_true_1),axis=0)\r\n valid_pre_total = np.concatenate((valid_pre_total,valid_pre_1), axis=0)\r\n \r\n print('------valid_accuracy-----')\r\n print(valid_acc_avg / valid_loop)\r\n print('------valid_accuracy-----')\r\n \r\n print('------valid_classification_report-----')\r\n print(classification_report(valid_true_total, valid_pre_total, target_names=target_names))\r\n print('------valid_classification_report-----')\r\n print('------valid_confusion_matrix-----')\r\n cm = confusion_matrix(y_true=valid_true_total, y_pred=valid_pre_total)\r\n print(cm)\r\n print('------valid_confusion_matrix-----')\r\n \r\n coord.request_stop()\r\n coord.join(threads)\r\n\r\ndef predict_time(loop=100):\r\n feed_dict={\r\n testnum:1\r\n }\r\n with tf.Session(config=config) as sess:\r\n sess.run(init)\r\n coord = tf.train.Coordinator()\r\n threads = tf.train.start_queue_runners(coord=coord)\r\n tf.train.Saver().restore(sess,path)\r\n total=0.0\r\n for i in range(loop):\r\n a = datetime.now()\r\n accuracy_np = sess.run([accuracy],feed_dict=feed_dict)\r\n b = datetime.now()\r\n c = (b - a).microseconds\r\n total+=c\r\n print('predict_time(ms): ',total/(loop*1000))\r\n coord.request_stop()\r\n coord.join(threads)\r\n \r\n \r\ntrain(train_num=128,loop_count=1200)\r\ntest_and_valid(10,10,200,200)\r\npredict_time(1000)\r\n" ]
[ [ "tensorflow.initialize_all_variables", "tensorflow.reshape", "sklearn.metrics.classification_report", "tensorflow.decode_raw", "tensorflow.variable_scope", "tensorflow.matmul", "tensorflow.contrib.layers.xavier_initializer_conv2d", "tensorflow.one_hot", "tensorflow.train.batch", "tensorflow.nn.softmax", "numpy.concatenate", "tensorflow.FixedLenFeature", "tensorflow.train.shuffle_batch", "tensorflow.TFRecordReader", "tensorflow.constant_initializer", "tensorflow.train.string_input_producer", "sklearn.metrics.confusion_matrix", "tensorflow.cast", "tensorflow.train.start_queue_runners", "tensorflow.Session", "tensorflow.train.Saver", "tensorflow.ConfigProto", "tensorflow.train.Coordinator", "tensorflow.placeholder", "tensorflow.train.GradientDescentOptimizer", "tensorflow.argmax", "numpy.array", "tensorflow.log", "tensorflow.nn.relu" ] ]
Haichao-Zhang/alf_randperm_reproduce
[ "d5223b7534ab20ca725aac940ad274ef806d1d3e" ]
[ "alf/networks/mdq_critic_networks.py" ]
[ "# Copyright (c) 2020 Horizon Robotics and ALF Contributors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"MdqCriticNetworks\"\"\"\n\nimport gin\nimport functools\nimport math\nimport numpy as np\n\nimport torch\nimport torch.nn.functional as f\nimport torch.nn as nn\n\nimport alf\nimport alf.layers as layers\nimport alf.nest as nest\nfrom alf.networks import Network, EncodingNetwork, ParallelEncodingNetwork\nfrom alf.initializers import variance_scaling_init\nfrom alf.tensor_specs import TensorSpec, BoundedTensorSpec\nfrom alf.utils import common, spec_utils, tensor_utils\nimport alf.utils.math_ops as math_ops\nfrom alf.utils.action_quantizer import ActionQuantizer\n\n\[email protected]\nclass MdqCriticNetwork(Network):\n \"\"\"Create an instance of MdqCriticNetwork for estimating action-value\n of continuous actions and action sampling used in the MDQ algorithm.\"\"\"\n\n def __init__(self,\n input_tensor_spec,\n action_qt: ActionQuantizer = None,\n num_critic_replicas=2,\n obs_encoding_layer_params=None,\n pre_encoding_layer_params=None,\n mid_encoding_layer_params=None,\n post_encoding_layer_params=None,\n free_form_fc_layer_params=None,\n activation=torch.relu_,\n kernel_initializer=None,\n debug_summaries=False,\n name=\"MdqCriticNetwork\"):\n \"\"\"Creates an instance of `MdqCriticNetwork` for estimating action-value\n of continuous actions and action sampling.\n\n Currently there are two branches of networks:\n - free-form branch: a plain MLP for Q-learning\n - adv-form branch: an advantage form of network for action\n generation. It is trained by a target from the free-form net.\n\n The adv-form branch has the following structures for flexibility:\n obs -> [obs_encoding_net] -> encoded_obs\n encoded_obs, action ->\n [pre_encoding_nets] ->\n [mid_shared_encoding_nets] ->\n [post_encoding_nets] -> outputs\n where the pre_encoding_nets and post_encoding_nets do not share\n parameters across action dimensions while mid_shared_encoding_nets\n shares parameters across action dimensions.\n If the encoding_layer_params for a sub-net is None, that sub-net is\n effectively neglected.\n\n Furthermore, to enable parallel computation across action dimension in\n the case of value computation, we have both parallel and individual\n versions for the nets without parameter sharing. For exmaple, for\n post_encoding_nets, we also have post_encoding_parallel_net, which is\n essentially the equivalent form of post_encoding_nets but supports\n parallel forwarding. The parameters of the two versions are synced.\n The partial actions (a[0:i]) are zero-padded for both parallel and\n individual networks to enable parallel computation.\n\n\n For conciseness purpose, the following notations will be used when\n convenient:\n - B: batch size\n - d: dimensionality of feature\n - n: number of network replica\n - action_dim: the dimensionality of actions\n - action_bin: number of discrete bins for each action dim\n\n Args:\n input_tensor_spec: A tuple of TensorSpecs (observation_spec, action_spec)\n representing the inputs.\n action_qt (ActionQuantizer): action quantization module\n num_critic_replicas (int): number of critic networks\n obs_encoding_layer_params (tuple[int]): a tuple of integers\n representing hidden FC layer sizes for encoding observations.\n pre_encoding_layer_params (tuple[int]): a tuple of integers\n representing hidden FC layer sizes for encoding concatenated\n [encoded_observation, actions]. Parameters are not shared across\n action dimensions\n mid_encoding_layer_params (tuple[int]): a tuple of integers\n representing hidden FC layer for further encoding the outputs\n from pre_encoding_net. The parameters are shared across action\n dimentions.\n post_encoding_layer_params (tuple[int]): a tuple of integers\n representing hidden FC layer for further encoding the outputs\n from mid_encoding_net. The parameters are not shared across\n action dimentions.\n free_form_fc_layer_params (tuple[int]): a tuple of integers\n representing hidden FC layer for Q-learning. We refer it as\n the free form to differentiate it from the mdq-form of network\n which is structured.\n activation (nn.functional): activation used for hidden layers. The\n last layer will not be activated.\n kernel_initializer (Callable): initializer for all the layers but\n the last layer. If none is provided a variance_scaling_initializer\n with uniform distribution will be used.\n name (str):\n \"\"\"\n\n super().__init__(input_tensor_spec, name=name)\n\n observation_spec, action_spec = input_tensor_spec\n\n flat_action_spec = nest.flatten(action_spec)\n if len(flat_action_spec) > 1:\n raise ValueError(\n 'Only a single action is supported by this network')\n\n self._single_action_spec = flat_action_spec[0]\n\n if action_qt is None:\n action_qt = ActionQuantizer(action_spec, \"uniform\", 15)\n self._action_qt = action_qt\n self._action_bins = self._action_qt._action_bins\n\n # the logpi of the uniform prior used for KL computation\n self._log_pi_uniform_prior = -np.log(self._action_bins)\n\n self._action_dim = action_spec.shape[0] # control vector dim\n self._num_critic_replicas = num_critic_replicas\n\n self._obs_encoding_net = ParallelEncodingNetwork(\n observation_spec,\n self._num_critic_replicas,\n fc_layer_params=obs_encoding_layer_params,\n activation=activation,\n kernel_initializer=kernel_initializer)\n\n last_activation = math_ops.identity\n last_kernel_initializer = functools.partial(torch.nn.init.uniform_, \\\n a=-0.003, b=0.003)\n\n in_size = self._action_dim\n\n self._pre_encoding_nets = []\n for i in range(self._action_dim):\n # output_spec.shape: [n, d]\n self._pre_encoding_nets.append(\n ParallelEncodingNetwork(\n TensorSpec((self._obs_encoding_net.output_spec.shape[-1] +\n in_size, )),\n self._num_critic_replicas,\n fc_layer_params=pre_encoding_layer_params,\n activation=activation,\n kernel_initializer=kernel_initializer))\n\n # parallel along both critic and action dims without sharing parameters\n # for each action dimension.\n # input: [B, action_dim*n, d]: need to stack over dim1\n # output: [B, action_dim*n, d']: need to unstack over dim1 for\n # splitting over networks\n self._pre_encoding_parallel_net = ParallelEncodingNetwork(\n TensorSpec(\n (self._obs_encoding_net.output_spec.shape[-1] + in_size, )),\n self._num_critic_replicas * self._action_dim,\n fc_layer_params=pre_encoding_layer_params,\n activation=activation,\n kernel_initializer=kernel_initializer)\n\n # parallel along both critic and action dims with sharing parameters\n # for each action dimension.\n # input: [action_dim*B, n, d]: need to stack over dim0\n # output: [action_dim*B, n, d']: need to unstack over dim0 for\n # splitting over networks\n self._mid_shared_encoding_nets = ParallelEncodingNetwork(\n TensorSpec(\n (self._pre_encoding_parallel_net.output_spec.shape[-1], )),\n self._num_critic_replicas,\n fc_layer_params=mid_encoding_layer_params,\n activation=activation,\n kernel_initializer=kernel_initializer)\n out_size = self._mid_shared_encoding_nets.output_spec.shape[-1]\n\n post_enc_out_size = self._action_qt.action_bins\n\n self._post_encoding_nets = []\n for i in range(self._action_dim):\n self._post_encoding_nets.append(\n ParallelEncodingNetwork(\n TensorSpec((out_size, )),\n self._num_critic_replicas,\n fc_layer_params=post_encoding_layer_params,\n activation=activation,\n kernel_initializer=kernel_initializer,\n last_layer_size=post_enc_out_size,\n last_activation=last_activation,\n last_kernel_initializer=last_kernel_initializer))\n\n # parallel along both critic and action dims without sharing parameters\n # for each action dimension.\n # input: [B, action_dim*n, d]: need to stack over dim1\n # output: [B, action_dim*n, d']: need to unstack over dim1 for\n # splitting over networks\n self._post_encoding_parallel_net = ParallelEncodingNetwork(\n TensorSpec((out_size, )),\n self._num_critic_replicas * self._action_dim,\n fc_layer_params=post_encoding_layer_params,\n activation=activation,\n kernel_initializer=kernel_initializer,\n last_layer_size=post_enc_out_size,\n last_activation=last_activation,\n last_kernel_initializer=last_kernel_initializer)\n\n assert free_form_fc_layer_params is not None\n\n self._free_form_q_net = ParallelEncodingNetwork(\n TensorSpec((observation_spec.shape[-1] + self._action_dim, )),\n self._num_critic_replicas,\n fc_layer_params=free_form_fc_layer_params,\n activation=activation,\n kernel_initializer=kernel_initializer,\n last_layer_size=1,\n last_activation=math_ops.identity,\n last_kernel_initializer=last_kernel_initializer)\n\n MdqCriticNetwork._parallel_to_individual_network_sync(\n self._pre_encoding_parallel_net,\n self._pre_encoding_nets,\n step=self._num_critic_replicas)\n\n MdqCriticNetwork._parallel_to_individual_network_sync(\n self._post_encoding_parallel_net,\n self._post_encoding_nets,\n step=self._num_critic_replicas)\n\n self._output_spec = TensorSpec(())\n\n self._debug_summaries = debug_summaries\n\n @torch.no_grad()\n def get_action(self, inputs, alpha, greedy):\n \"\"\"Sample action from the distribution induced by the mdq-net.\n\n Args:\n inputs: A tuple of Tensors consistent with `input_tensor_spec`\n alpha: the temperature used for the advantage computation\n greedy (bool): If True, do greedy sampling by taking the mode of\n the distribution. If False, do direct sampling from the\n distribution.\n Returns:\n actions (torch.Tensor): a tensor of the shape [B, n, action_dim]\n log_pi_per_dim (torch.Tensor): a tensor of the shape\n [B, n, action_dim] representing the log_pi for each dimension\n of the sampled multi-dimensional action\n \"\"\"\n\n observations = inputs\n\n # [B, n, d]\n t_shape = (observations.shape[0], self._num_critic_replicas,\n self._action_dim)\n\n actions = torch.zeros(t_shape)\n log_pi_per_dim = torch.zeros(t_shape)\n\n # [B, n, d]\n encoded_obs, _ = self._obs_encoding_net(observations)\n\n if actions.ndim == 2:\n actions = tensor_utils.tensor_extend_new_dim(\n actions, dim=1, n=self._num_critic_replicas)\n\n action_padded = torch.zeros(t_shape)\n\n for i in range(self._action_dim):\n action_padded[..., 0:i] = actions[..., 0:i]\n joint = torch.cat((encoded_obs, action_padded.detach()), -1)\n\n action_values_i, _ = self._net_forward_individual(joint, alpha, i)\n\n trans_action_values_i = self._transform_action_value(\n action_values_i, alpha)\n sampled_indices, sampled_log_pi = self._sample_action_from_value(\n trans_action_values_i / alpha, alpha, greedy)\n # convert index to action\n actions[..., i] = self._action_qt.ind_to_action(sampled_indices)\n log_pi_per_dim[..., i] = sampled_log_pi\n\n return actions, log_pi_per_dim\n\n def forward(self, inputs, alpha, state=(), free_form=False):\n \"\"\"Computes action-value given an observation.\n\n Args:\n inputs: A tuple of Tensors consistent with `input_tensor_spec`\n alpha: the temperature used for the advantage computation\n state: empty for API consistenty\n free_form (bool): use the free-form branch for computation if True;\n default value is False\n\n Returns:\n Q_values (torch.Tensor):\n - if free_form is True, its shape is [B, n]\n - if free_form is False, its shape is [B, n, action_dim]\n state: empty\n \"\"\"\n\n if free_form:\n Q_values, state = self._free_form_q_net(inputs)\n Q_values = Q_values.squeeze(2)\n return Q_values, state\n\n observations, actions = inputs\n\n # observations: [B, d]\n # encoded_obs: [B, n, d']\n # Note that when obs_encoding_net is a dummy network\n # (i.e., layer_params is None), d' is the same as d.\n encoded_obs, _ = self._obs_encoding_net(observations)\n\n if actions.ndim == 2:\n # [B, action_dim] -> [B, n, action_dim]\n actions = tensor_utils.tensor_extend_new_dim(\n actions, dim=1, n=self._num_critic_replicas)\n\n # [B, n, action_dim]\n t_shape = (observations.shape[0], self._num_critic_replicas,\n self._action_dim)\n\n # [action_dim, B, n, 1]\n Q_values = torch.zeros(self._action_dim, observations.shape[0],\n self._num_critic_replicas, 1)\n\n joint = torch.empty(0)\n action_padded = torch.zeros(t_shape)\n\n # prepare parallel-forwarding inputs\n inputs_per_dim = []\n for i in range(self._action_dim):\n action_padded[..., 0:i] = actions[..., 0:i]\n # concat (obs, action) for each action dimension\n inputs_per_dim.append(\n torch.cat((encoded_obs, action_padded.detach()), dim=-1))\n\n # concat per dim input batch to a joint batch along dim1\n # [B, action_dim*n, d]\n joint = torch.cat(inputs_per_dim, dim=1)\n\n # forward the joint batch\n # action_values_per_dim: [action_dim, B, n, action_bin]\n action_values_per_dim, _ = self._net_forward_parallel(\n joint, alpha, batch_size=observations.shape[0])\n\n trans_action_values_per_dim = self._transform_action_value(\n action_values_per_dim, alpha)\n\n for i in range(self._action_dim):\n action_ind = self._action_qt.action_to_ind(actions[..., i])\n if i == 0:\n action_value_i = self._batched_index_select(\n action_values_per_dim[i], -1, action_ind.long())\n Q_values[i] = action_value_i\n # KL-divergence\n Q_values[i] = Q_values[i] - alpha * self._log_pi_uniform_prior\n else:\n selected_trans_action_value_i = self._batched_index_select(\n trans_action_values_per_dim[i], -1, action_ind.long())\n Q_values[i] = Q_values[i - 1] + selected_trans_action_value_i\n # KL-divergence\n Q_values[i] = Q_values[i] - alpha * self._log_pi_uniform_prior\n\n # [action_dim, B, n, 1] -> [B, n, action_dim]\n Q_values = Q_values.squeeze(3).permute(1, 2, 0)\n return Q_values, state\n\n def _net_forward_individual(self, inputs, alpha, i, state=()):\n \"\"\"Individiual forwarding for a specified action dims for value computation.\n Args:\n inputs (torch.Tensor): a tensor of the shape [B, n, d]\n alpha: the temperature used for the advantage computation\n i (int): the specified action dim to perform forwarding\n Returns:\n action_values_i (torch.Tensor): a tensor of the shape [B, n, action_bin]\n state: empty\n \"\"\"\n inputs, _ = self._pre_encoding_nets[i](inputs)\n action_values_i, state = self._mid_shared_encoding_nets(inputs)\n action_values_i, state = self._post_encoding_nets[i](action_values_i)\n return action_values_i, state\n\n def _net_forward_parallel(self, inputs, alpha, batch_size, state=()):\n \"\"\"Parallel forwarding across action dims for value computation.\n Args:\n inputs (torch.Tensor): a tensor of the shape [B, action_dim*n, d]\n with the data for each action dimension concanated along the\n dim1 for parallel computation\n alpha: the temperature used for the advantage computation\n batch_size: the size of the original batch without stacking\n all action dimensions\n Returns:\n action_values (torch.Tensor): a tensor of the shape\n [action_dim, B, n, action_bin]\n state: empty\n \"\"\"\n # [B, action_dim*n, d]\n action_values_pre, _ = self._pre_encoding_parallel_net(inputs)\n # [B, action_dim*n, d] -> [action_dim*B, n, d]\n action_values_pre = self._reshape_from_ensemble_to_batch(\n action_values_pre, batch_size)\n action_values_mid, state = self._mid_shared_encoding_nets(\n action_values_pre)\n # [action_dim*B, n, d] -> [B, action_dim*n, d]\n action_values_mid = self._reshape_from_batch_to_ensemble(\n action_values_mid, batch_size)\n action_values_final, _ = self._post_encoding_parallel_net(\n action_values_mid)\n # [B, action_dim*n, d]-> [B, action_dim, n, d] -> [action_dim, B, n, d]\n action_values = action_values_final.view(batch_size, self._action_dim,\n self._num_critic_replicas,\n -1).transpose(0, 1)\n\n return action_values, state\n\n def _reshape_from_batch_to_ensemble(self, joint_batch, batch_size):\n \"\"\"Reshape the joint batch of the shape [action_dim*B, n, d] to\n [B, action_dim*n, d], i.e., separate and move the action dimension\n axis from the batch dimension (dim0) to the ensemble dimension (dim1)\n Args:\n joint_batch (torch.Tensor): a tensor of the shape [action_dim*B, n, d]\n with the data for each action dimension concanated along the\n batch dimension (dim0)\n batch_size: the size of the original batch without stacking\n all action dimensions\n Returns:\n reshaped_batch (torch.Tensor): a tensor of the shape\n [B, action_dim*n, d]\n \"\"\"\n assert len(joint_batch.shape) == 3 and joint_batch.shape[:-1] == \\\n (self._action_dim * batch_size, self._num_critic_replicas)\n\n d = joint_batch.shape[-1]\n # [action_dim*B, n, d] -> [action_dim, B, n, d]\n reshaped_batch = joint_batch.view(self._action_dim, batch_size,\n self._num_critic_replicas, d)\n\n # [action_dim, B, n, d] -> [B, action_dim, n, d] -> [B, action_dim*n, d]\n reshaped_batch = reshaped_batch.transpose(0, 1).reshape(\n batch_size, -1, d)\n return reshaped_batch\n\n def _reshape_from_ensemble_to_batch(self, joint_batch, batch_size):\n \"\"\"Reshape the joint batch of the shape [B, action_dim*n, d] to\n [action_dim*B, n, d], i.e., separate and move the action dimension\n axis from the ensemble dimension (dim1) to the batch dimension (dim0)\n Args:\n joint_batch (torch.Tensor): a tensor of the shape [B, action_dim*n, d]\n with the data for each action dimension concanated along the\n ensemble dimension (dim1)\n batch_size: the size of the original batch without stacking\n all action dimensions\n Returns:\n reshaped_batch (torch.Tensor): a tensor of the shape\n [action_dim*B, n, d]\n \"\"\"\n assert len(joint_batch.shape) == 3 and joint_batch.shape[:-1] == \\\n (batch_size, self._action_dim * self._num_critic_replicas)\n\n d = joint_batch.shape[-1]\n # [B, action_dim*n, d] -> [B, action_dim, n, d]\n reshaped_batch = joint_batch.view(batch_size, self._action_dim,\n self._num_critic_replicas, d)\n\n # [B, action_dim, n, d] -> [action_dim, B, n, d] -> [action_dim*B, n, d]\n reshaped_batch = reshaped_batch.transpose(0, 1).reshape(\n -1, self._num_critic_replicas, d)\n return reshaped_batch\n\n def _transform_action_value(self, action_values, alpha):\n \"\"\"Transform raw action values to valid alpha * log_pi\n Args:\n action_values (torch.Tensor): raw action values computed from a\n network, with the last dim as the distribution dimension\n alpha: the temperature used for the transformation\n\n Returns:\n transformed_value (torch.Tensor): a tensor with value equals\n alpha * log_pi computed from input action_values\n \"\"\"\n v_value = alpha * torch.logsumexp(\n action_values / alpha, dim=-1, keepdim=True)\n transformed_value = action_values - v_value\n return transformed_value\n\n def _sample_action_from_value(self, logits, alpha, greedy=False):\n \"\"\"Sample discrete action from given logits\n Args:\n logits (torch.Tensor): log pi of the discrete distribution with\n the last dim as the distribution dimension\n alpha: the temperature used for the transformation\n greedy (bool): if True, do greedy sampling by taking the mode\n of the distribution; otherwise, sample according\n to the probability of the distribution\n Returns:\n sampled_ind (torch.Tensor): the indices of the sampled action\n sampled_log_pi (torch.Tensor): the log prob of the sampled action\n \"\"\"\n if greedy:\n sampled_log_pi, sampled_ind = torch.max(logits, dim=-1)\n else:\n batch_size = logits.shape[0]\n\n # logits [B, n, d] -> [B*n, d]\n batched_logits = logits.reshape(-1, self._action_bins)\n dist = torch.distributions.categorical.Categorical(\n logits=batched_logits)\n\n # [1, B*n] -> [B, n]\n sampled_ind = dist.sample((1, ))\n sampled_log_pi = dist.log_prob(sampled_ind)\n\n sampled_ind = sampled_ind.view(batch_size, -1)\n sampled_log_pi = sampled_log_pi.view(batch_size, -1)\n\n return sampled_ind, sampled_log_pi\n\n def _batched_index_select(self, t, dim, inds):\n expanded_ind = inds.unsqueeze(-1)\n out = t.gather(dim, expanded_ind)\n return out\n\n @staticmethod\n def _parallel_to_individual_network_sync(p_net, np_net, step):\n \"\"\"Sync parameters from parallel version to indivisual version\n Args:\n p_net (ParallelNetwork): the parallel version of network\n np_net (list[Network|ParallelNetwork]): a list of the individual\n networks. Note that each individual network can also be an\n instance of ParallelNetwork.\n step (int): the replica contained in the individual network.\n For exmaple:\n - if the individual net is a plain network, step=1\n - if the individual net is a parallel network, step = replica\n of the individual net\n \"\"\"\n split_num = len(np_net)\n for i in range(split_num):\n for ws, wt in zip(p_net.parameters(), np_net[i].parameters()):\n wt.data.copy_(ws[i * step:(i + 1) * step])\n\n def get_uniform_prior_logpi(self):\n return self._log_pi_uniform_prior\n\n def sync_net(self):\n MdqCriticNetwork._parallel_to_individual_network_sync(\n self._pre_encoding_parallel_net, self._pre_encoding_nets,\n self._num_critic_replicas)\n\n MdqCriticNetwork._parallel_to_individual_network_sync(\n self._post_encoding_parallel_net, self._post_encoding_nets,\n self._num_critic_replicas)\n" ]
[ [ "torch.empty", "torch.logsumexp", "torch.distributions.categorical.Categorical", "torch.no_grad", "numpy.log", "torch.max", "torch.zeros", "torch.cat" ] ]
simonlet/MissRateSimulator
[ "89f481a3408d243a61a6907f332e36ca9a2fb45d" ]
[ "printers/simulator_printer.py" ]
[ "import matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.patches as mpatches\nimport random\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport itertools\nfrom matplotlib import rcParams\nfrom matplotlib.backends.backend_pdf import PdfPages\n\nN = 20\nwidth = 0.15 # the width of the bars\nind = np.arange(N) # the x locations for the groups\n\n#fileName = 'diffrent_set_size'\nfileName = 'sims'\nfolder = 'final_plot/'\nperfault = []\n\n# plot in pdf\npp = PdfPages(folder + fileName + '.pdf')\n\npercentageU = 75\ntitle = 'Tasks: '+ repr(2) + ', $U^N_{SUM}$:'+repr(percentageU)+'%' + ', Fault Rate:'+repr(10**-4)\n\nplt.title(title, fontsize=20)\nplt.grid(True)\nplt.ylabel('Expected Miss Rate', fontsize=20)\nax = plt.subplot()\nax.set_yscale(\"log\")\n#ax.set_xlim([0, 11])\nax.set_ylim([10**-5,10**0])\nax.set_xticks(ind + width /2)\nax.set_xticklabels(('S1','S2','S3','S4','S5','S6','S7','S8','S9','S10','S11','S12','S13','S14','S15','S16','S17','S18','S19','S20'))\nax.tick_params(axis='both', which='major',labelsize=18)\n\nSIM = [8.45e-05, 9.4e-05, 9.35e-05, 9.7e-05, 0.000105, 0.000186, 0.000101, 0.0001055, 9.4e-05, 0.000184, 0.000182, 0.0001035, 0.000206, 0.000111, 0.000205, 0.000106, 9.25e-05, 0.000213, 0.000101, 8.95e-05, 8.85e-05, 0.0, 0.000207, 9.65e-05, 9.05e-05, 0.000243]\n#[8.45e-05, 9.4e-05, 9.35e-05, 9.7e-05, 0.000105, 0.000186, 0.000101, 0.0001055, 9.4e-05, 9.7e-05, 0.000184, 0.000182, 0.0001035, 0.000206, 0.000111, 0.000205, 0.0001025, 0.000106, 9.25e-05, 0.000213, 0.000101, 8.95e-05, 8.85e-05, 0.0, 0.000207, 9.65e-05, 9.05e-05, 0.000243]\nCONV =[0.0010053685517187949031, 0.0010050283581860040246, 0.0010050109468929518394, 0.0010030602488507495633, 0.0010050109468794092278, 0.0030148929435695472194, 0.0010020093387333380717, 0.0010050109472946719814, 0.0010050221798409180395, 0.0030148929435695472194, 0.0030148929435695472194, 0.0010050362727498393625, 0.0030148929435695472194, 0.0010020009989874518292, 0.0030148929435695472194, 0.0010020025172624208554, 0.0010050221798561057422, 0.0030148929435695472194, 0.0010068847464266960405, 0.0010050164813971228616, 0.0010000337727699791769, 6.5561497176718002027e-08, 0.0030148929435695472194, 0.0010020009999315654303, 0.0010050176623350693234, 0.0030148929435695472194]\n#[0.0010053685517187949031, 0.0010050283581860040246, 0.0010050109468929518394, 0.0010030602488507495633, 0.0010050109468794092278, 0.0030148929435695472194, 0.0010020093387333380717, 0.0010050109472946719814, 0.0010050221798409180395, 0.0010020009969879950499, 0.0030148929435695472194, 0.0030148929435695472194, 0.0010050362727498393625, 0.0030148929435695472194, 0.0010020009989874518292, 0.0030148929435695472194, 0.0010020009971426968134, 0.0010020025172624208554, 0.0010050221798561057422, 0.0030148929435695472194, 0.0010068847464266960405, 0.0010050164813971228616, 0.0010000337727699791769, 6.5561497176718002027e-08, 0.0030148929435695472194, 0.0010020009999315654303, 0.0010050176623350693234, 0.0030148929435695472194]\nEMR =[0.0139450629923560, 0.0162080930165612, 0.00839879405437142, 0.138437021519246, 0.00663733900833972, 0.0389274708069242, 0.00231197003297432, 0.00568520020691726, 0.0135173451330998, 0.0430670974824760, 0.0487444703580032, 0.0981640264251833, 0.0374159686448752, 0.00179877954996446, 0.0304324433713488, 0.00160735368262164, 0.0141213676838256, 0.0345369667805539, 0.0177943976360550, 0.00925867834967103, 0.000258134467239860, 0.00102879870692213, 0.105410030290883, 0.000854470767647069, 0.0170310283779263, 0.0290942887226923]\n#[0.0139450629923560, 0.0162080930165612, 0.00839879405437142, 0.138437021519246, 0.00663733900833972, 0.0389274708069242, 0.00231197003297432, 0.00568520020691726, 0.0135173451330998, 0.000245024189468552, 0.0430670974824760, 0.0487444703580032, 0.0981640264251833, 0.0374159686448752, 0.00179877954996446, 0.0304324433713488, 0.000853877971175162, 0.00160735368262164, 0.0141213676838256, 0.0345369667805539, 0.0177943976360550, 0.00925867834967103, 0.000258134467239860, 0.00102879870692213, 0.105410030290883, 0.000854470767647069, 0.0170310283779263, 0.0290942887226923]\nprint(len(SIM))\nprint(len(CONV))\nprint(len(EMR))\nSIM = SIM[:20]\nCONV = CONV[:20]\nEMR = EMR[:20]\n\ntry:\n rects1 = ax.bar(ind-0.1, SIM, width, color='black', edgecolor='black')\n rects2 = ax.bar(ind+0.1, CONV, width, fill=False, edgecolor='black')\n rects3 = ax.bar(ind+0.3, EMR, width, edgecolor='black', hatch=\"/\")\n ax.legend((rects1[0], rects2[0], rects3[0]), ('SIM', 'CON', 'EMR'))\nexcept ValueError:\n print(\"ValueError\")\nfigure = plt.gcf()\nfigure.set_size_inches([14.5,6.5])\n\n#plt.legend(handles=[set1, set2, set3, set4, set5], fontsize=12, frameon=True, loc=3)\n\npp.savefig()\nplt.clf()\npp.close()\n\n" ]
[ [ "matplotlib.pyplot.grid", "matplotlib.pyplot.gcf", "matplotlib.backends.backend_pdf.PdfPages", "matplotlib.pyplot.clf", "matplotlib.pyplot.title", "numpy.arange", "matplotlib.pyplot.subplot", "matplotlib.pyplot.ylabel", "matplotlib.use" ] ]
oz90210/Pyto
[ "901ac307b68486d8289105c159ca702318bea5b0" ]
[ "site-packages/skimage/filters/rank/tests/test_rank.py" ]
[ "import os\nimport numpy as np\nfrom skimage._shared.testing import (assert_equal, assert_array_equal,\n assert_allclose)\nfrom skimage._shared import testing\n\nimport skimage\nfrom skimage.util import img_as_ubyte, img_as_float\nfrom skimage import data, util, morphology\nfrom skimage.morphology import grey, disk\nfrom skimage.filters import rank\nfrom skimage.filters.rank import __all__ as all_rank_filters\nfrom skimage._shared._warnings import expected_warnings\nfrom skimage._shared.testing import test_parallel, arch32, parametrize, xfail\nfrom pytest import param\n\n\ndef test_otsu_edge_case():\n # This is an edge case that causes OTSU to appear to misbehave\n # Pixel [1, 1] may take a value of of 41 or 81. Both should be considered\n # valid. The value will change depending on the particular implementation\n # of OTSU.\n # To better understand, see\n # https://mybinder.org/v2/gist/hmaarrfk/4afae1cfded1d78e44c9e4f58285d552/master\n\n selem = np.array([[0, 1, 0],\n [1, 1, 1],\n [0, 1, 0]], dtype=np.uint8)\n\n img = np.array([[ 0, 41, 0],\n [ 30, 81, 106],\n [ 0, 147, 0]], dtype=np.uint8)\n\n result = rank.otsu(img, selem)\n assert result[1, 1] in [41, 81]\n\n img = np.array([[ 0, 214, 0],\n [229, 104, 141],\n [ 0, 172, 0]], dtype=np.uint8)\n result = rank.otsu(img, selem)\n assert result[1, 1] in [141, 172]\n\n\n\nclass TestRank():\n def setup(self):\n np.random.seed(0)\n # This image is used along with @test_parallel\n # to ensure that the same seed is used for each thread.\n self.image = np.random.rand(25, 25)\n # Set again the seed for the other tests.\n np.random.seed(0)\n self.selem = morphology.disk(1)\n self.refs = np.load(os.path.join(skimage.data_dir,\n \"rank_filter_tests.npz\"))\n\n @parametrize('filter', all_rank_filters)\n def test_rank_filter(self, filter):\n @test_parallel()\n def check():\n expected = self.refs[filter]\n with expected_warnings(['Possible precision loss']):\n result = getattr(rank, filter)(self.image, self.selem)\n if filter == \"entropy\":\n # There may be some arch dependent rounding errors\n # See the discussions in\n # https://github.com/scikit-image/scikit-image/issues/3091\n # https://github.com/scikit-image/scikit-image/issues/2528\n assert_allclose(expected, result, atol=0, rtol=1E-15)\n elif filter == \"otsu\":\n # OTSU May also have some optimization dependent failures\n # See the discussions in\n # https://github.com/scikit-image/scikit-image/issues/3091\n # Pixel 3, 5 was found to be problematic. It can take either\n # a value of 41 or 81 depending on the specific optimizations\n # used.\n assert result[3, 5] in [41, 81]\n result[3, 5] = 81\n # Pixel [19, 18] is also found to be problematic for the same\n # reason.\n assert result[19, 18] in [141, 172]\n result[19, 18] = 172\n assert_array_equal(expected, result)\n else:\n assert_array_equal(expected, result)\n\n check()\n\n\n def test_random_sizes(self):\n # make sure the size is not a problem\n\n elem = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]], dtype=np.uint8)\n for m, n in np.random.randint(1, 101, size=(10, 2)):\n mask = np.ones((m, n), dtype=np.uint8)\n\n image8 = np.ones((m, n), dtype=np.uint8)\n out8 = np.empty_like(image8)\n rank.mean(image=image8, selem=elem, mask=mask, out=out8,\n shift_x=0, shift_y=0)\n assert_equal(image8.shape, out8.shape)\n rank.mean(image=image8, selem=elem, mask=mask, out=out8,\n shift_x=+1, shift_y=+1)\n assert_equal(image8.shape, out8.shape)\n\n rank.geometric_mean(image=image8, selem=elem, mask=mask, out=out8,\n shift_x=0, shift_y=0)\n assert_equal(image8.shape, out8.shape)\n rank.geometric_mean(image=image8, selem=elem, mask=mask, out=out8,\n shift_x=+1, shift_y=+1)\n assert_equal(image8.shape, out8.shape)\n\n image16 = np.ones((m, n), dtype=np.uint16)\n out16 = np.empty_like(image8, dtype=np.uint16)\n rank.mean(image=image16, selem=elem, mask=mask, out=out16,\n shift_x=0, shift_y=0)\n assert_equal(image16.shape, out16.shape)\n rank.mean(image=image16, selem=elem, mask=mask, out=out16,\n shift_x=+1, shift_y=+1)\n assert_equal(image16.shape, out16.shape)\n\n rank.geometric_mean(image=image16, selem=elem, mask=mask, out=out16,\n shift_x=0, shift_y=0)\n assert_equal(image16.shape, out16.shape)\n rank.geometric_mean(image=image16, selem=elem, mask=mask, out=out16,\n shift_x=+1, shift_y=+1)\n assert_equal(image16.shape, out16.shape)\n\n rank.mean_percentile(image=image16, mask=mask, out=out16,\n selem=elem, shift_x=0, shift_y=0, p0=.1, p1=.9)\n assert_equal(image16.shape, out16.shape)\n rank.mean_percentile(image=image16, mask=mask, out=out16,\n selem=elem, shift_x=+1, shift_y=+1, p0=.1, p1=.9)\n assert_equal(image16.shape, out16.shape)\n\n\n def test_compare_with_grey_dilation(self):\n # compare the result of maximum filter with dilate\n\n image = (np.random.rand(100, 100) * 256).astype(np.uint8)\n out = np.empty_like(image)\n mask = np.ones(image.shape, dtype=np.uint8)\n\n for r in range(3, 20, 2):\n elem = np.ones((r, r), dtype=np.uint8)\n rank.maximum(image=image, selem=elem, out=out, mask=mask)\n cm = grey.dilation(image=image, selem=elem)\n assert_equal(out, cm)\n\n\n def test_compare_with_grey_erosion(self):\n # compare the result of maximum filter with erode\n\n image = (np.random.rand(100, 100) * 256).astype(np.uint8)\n out = np.empty_like(image)\n mask = np.ones(image.shape, dtype=np.uint8)\n\n for r in range(3, 20, 2):\n elem = np.ones((r, r), dtype=np.uint8)\n rank.minimum(image=image, selem=elem, out=out, mask=mask)\n cm = grey.erosion(image=image, selem=elem)\n assert_equal(out, cm)\n\n\n def test_bitdepth(self):\n # test the different bit depth for rank16\n\n elem = np.ones((3, 3), dtype=np.uint8)\n out = np.empty((100, 100), dtype=np.uint16)\n mask = np.ones((100, 100), dtype=np.uint8)\n\n for i in range(8, 13):\n max_val = 2 ** i - 1\n image = np.full((100, 100), max_val, dtype=np.uint16)\n if i > 10:\n expected = [\"Bad rank filter performance\"]\n else:\n expected = []\n with expected_warnings(expected):\n rank.mean_percentile(image=image, selem=elem, mask=mask,\n out=out, shift_x=0, shift_y=0, p0=.1, p1=.9)\n\n\n def test_population(self):\n # check the number of valid pixels in the neighborhood\n\n image = np.zeros((5, 5), dtype=np.uint8)\n elem = np.ones((3, 3), dtype=np.uint8)\n out = np.empty_like(image)\n mask = np.ones(image.shape, dtype=np.uint8)\n\n rank.pop(image=image, selem=elem, out=out, mask=mask)\n r = np.array([[4, 6, 6, 6, 4],\n [6, 9, 9, 9, 6],\n [6, 9, 9, 9, 6],\n [6, 9, 9, 9, 6],\n [4, 6, 6, 6, 4]])\n assert_equal(r, out)\n\n\n def test_structuring_element8(self):\n # check the output for a custom structuring element\n\n r = np.array([[0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 255, 0, 0, 0],\n [0, 0, 255, 255, 255, 0],\n [0, 0, 0, 255, 255, 0],\n [0, 0, 0, 0, 0, 0]])\n\n # 8-bit\n image = np.zeros((6, 6), dtype=np.uint8)\n image[2, 2] = 255\n elem = np.asarray([[1, 1, 0], [1, 1, 1], [0, 0, 1]], dtype=np.uint8)\n out = np.empty_like(image)\n mask = np.ones(image.shape, dtype=np.uint8)\n\n rank.maximum(image=image, selem=elem, out=out, mask=mask,\n shift_x=1, shift_y=1)\n assert_equal(r, out)\n\n # 16-bit\n image = np.zeros((6, 6), dtype=np.uint16)\n image[2, 2] = 255\n out = np.empty_like(image)\n\n rank.maximum(image=image, selem=elem, out=out, mask=mask,\n shift_x=1, shift_y=1)\n assert_equal(r, out)\n\n\n def test_pass_on_bitdepth(self):\n # should pass because data bitdepth is not too high for the function\n\n image = np.full((100, 100), 2 ** 11, dtype=np.uint16)\n elem = np.ones((3, 3), dtype=np.uint8)\n out = np.empty_like(image)\n mask = np.ones(image.shape, dtype=np.uint8)\n with expected_warnings([\"Bad rank filter performance\"]):\n rank.maximum(image=image, selem=elem, out=out, mask=mask)\n\n\n def test_inplace_output(self):\n # rank filters are not supposed to filter inplace\n\n selem = disk(20)\n image = (np.random.rand(500, 500) * 256).astype(np.uint8)\n out = image\n with testing.raises(NotImplementedError):\n rank.mean(image, selem, out=out)\n\n\n def test_compare_autolevels(self):\n # compare autolevel and percentile autolevel with p0=0.0 and p1=1.0\n # should returns the same arrays\n\n image = util.img_as_ubyte(data.camera())\n\n selem = disk(20)\n loc_autolevel = rank.autolevel(image, selem=selem)\n loc_perc_autolevel = rank.autolevel_percentile(image, selem=selem,\n p0=.0, p1=1.)\n\n assert_equal(loc_autolevel, loc_perc_autolevel)\n\n\n def test_compare_autolevels_16bit(self):\n # compare autolevel(16-bit) and percentile autolevel(16-bit) with p0=0.0\n # and p1=1.0 should returns the same arrays\n\n image = data.camera().astype(np.uint16) * 4\n\n selem = disk(20)\n loc_autolevel = rank.autolevel(image, selem=selem)\n loc_perc_autolevel = rank.autolevel_percentile(image, selem=selem,\n p0=.0, p1=1.)\n\n assert_equal(loc_autolevel, loc_perc_autolevel)\n\n\n def test_compare_ubyte_vs_float(self):\n\n # Create signed int8 image that and convert it to uint8\n image_uint = img_as_ubyte(data.camera()[:50, :50])\n image_float = img_as_float(image_uint)\n\n methods = ['autolevel', 'bottomhat', 'equalize', 'gradient', 'threshold',\n 'subtract_mean', 'enhance_contrast', 'pop', 'tophat']\n\n for method in methods:\n func = getattr(rank, method)\n out_u = func(image_uint, disk(3))\n with expected_warnings([\"Possible precision loss\"]):\n out_f = func(image_float, disk(3))\n assert_equal(out_u, out_f)\n\n\n def test_compare_8bit_unsigned_vs_signed(self):\n # filters applied on 8-bit image ore 16-bit image (having only real 8-bit\n # of dynamic) should be identical\n\n # Create signed int8 image that and convert it to uint8\n image = img_as_ubyte(data.camera())[::2, ::2]\n image[image > 127] = 0\n image_s = image.astype(np.int8)\n image_u = img_as_ubyte(image_s)\n assert_equal(image_u, img_as_ubyte(image_s))\n\n methods = ['autolevel', 'bottomhat', 'equalize', 'gradient', 'maximum',\n 'mean', 'geometric_mean', 'subtract_mean', 'median', 'minimum',\n 'modal', 'enhance_contrast', 'pop', 'threshold', 'tophat']\n\n for method in methods:\n func = getattr(rank, method)\n out_u = func(image_u, disk(3))\n with expected_warnings([\"Possible precision loss\"]):\n out_s = func(image_s, disk(3))\n assert_equal(out_u, out_s)\n\n @parametrize('method',\n ['autolevel', 'bottomhat', 'equalize', 'gradient', 'maximum',\n 'mean', 'subtract_mean', 'median', 'minimum', 'modal',\n 'enhance_contrast', 'pop', 'threshold', 'tophat'])\n def test_compare_8bit_vs_16bit(self, method):\n # filters applied on 8-bit image ore 16-bit image (having only real 8-bit\n # of dynamic) should be identical\n image8 = util.img_as_ubyte(data.camera())[::2, ::2]\n image16 = image8.astype(np.uint16)\n assert_equal(image8, image16)\n\n func = getattr(rank, method)\n f8 = func(image8, disk(3))\n f16 = func(image16, disk(3))\n assert_equal(f8, f16)\n\n\n def test_trivial_selem8(self):\n # check that min, max and mean returns identity if structuring element\n # contains only central pixel\n\n image = np.zeros((5, 5), dtype=np.uint8)\n out = np.zeros_like(image)\n mask = np.ones_like(image, dtype=np.uint8)\n image[2, 2] = 255\n image[2, 3] = 128\n image[1, 2] = 16\n\n elem = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]], dtype=np.uint8)\n rank.mean(image=image, selem=elem, out=out, mask=mask,\n shift_x=0, shift_y=0)\n assert_equal(image, out)\n rank.geometric_mean(image=image, selem=elem, out=out, mask=mask,\n shift_x=0, shift_y=0)\n assert_equal(image, out)\n rank.minimum(image=image, selem=elem, out=out, mask=mask,\n shift_x=0, shift_y=0)\n assert_equal(image, out)\n rank.maximum(image=image, selem=elem, out=out, mask=mask,\n shift_x=0, shift_y=0)\n assert_equal(image, out)\n\n\n def test_trivial_selem16(self):\n # check that min, max and mean returns identity if structuring element\n # contains only central pixel\n\n image = np.zeros((5, 5), dtype=np.uint16)\n out = np.zeros_like(image)\n mask = np.ones_like(image, dtype=np.uint8)\n image[2, 2] = 255\n image[2, 3] = 128\n image[1, 2] = 16\n\n elem = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]], dtype=np.uint8)\n rank.mean(image=image, selem=elem, out=out, mask=mask,\n shift_x=0, shift_y=0)\n assert_equal(image, out)\n rank.geometric_mean(image=image, selem=elem, out=out, mask=mask,\n shift_x=0, shift_y=0)\n assert_equal(image, out)\n rank.minimum(image=image, selem=elem, out=out, mask=mask,\n shift_x=0, shift_y=0)\n assert_equal(image, out)\n rank.maximum(image=image, selem=elem, out=out, mask=mask,\n shift_x=0, shift_y=0)\n assert_equal(image, out)\n\n\n def test_smallest_selem8(self):\n # check that min, max and mean returns identity if structuring element\n # contains only central pixel\n\n image = np.zeros((5, 5), dtype=np.uint8)\n out = np.zeros_like(image)\n mask = np.ones_like(image, dtype=np.uint8)\n image[2, 2] = 255\n image[2, 3] = 128\n image[1, 2] = 16\n\n elem = np.array([[1]], dtype=np.uint8)\n rank.mean(image=image, selem=elem, out=out, mask=mask,\n shift_x=0, shift_y=0)\n assert_equal(image, out)\n rank.minimum(image=image, selem=elem, out=out, mask=mask,\n shift_x=0, shift_y=0)\n assert_equal(image, out)\n rank.maximum(image=image, selem=elem, out=out, mask=mask,\n shift_x=0, shift_y=0)\n assert_equal(image, out)\n\n\n def test_smallest_selem16(self):\n # check that min, max and mean returns identity if structuring element\n # contains only central pixel\n\n image = np.zeros((5, 5), dtype=np.uint16)\n out = np.zeros_like(image)\n mask = np.ones_like(image, dtype=np.uint8)\n image[2, 2] = 255\n image[2, 3] = 128\n image[1, 2] = 16\n\n elem = np.array([[1]], dtype=np.uint8)\n rank.mean(image=image, selem=elem, out=out, mask=mask,\n shift_x=0, shift_y=0)\n assert_equal(image, out)\n rank.geometric_mean(image=image, selem=elem, out=out, mask=mask,\n shift_x=0, shift_y=0)\n assert_equal(image, out)\n rank.minimum(image=image, selem=elem, out=out, mask=mask,\n shift_x=0, shift_y=0)\n assert_equal(image, out)\n rank.maximum(image=image, selem=elem, out=out, mask=mask,\n shift_x=0, shift_y=0)\n assert_equal(image, out)\n\n\n def test_empty_selem(self):\n # check that min, max and mean returns zeros if structuring element is\n # empty\n\n image = np.zeros((5, 5), dtype=np.uint16)\n out = np.zeros_like(image)\n mask = np.ones_like(image, dtype=np.uint8)\n res = np.zeros_like(image)\n image[2, 2] = 255\n image[2, 3] = 128\n image[1, 2] = 16\n\n elem = np.array([[0, 0, 0], [0, 0, 0]], dtype=np.uint8)\n\n rank.mean(image=image, selem=elem, out=out, mask=mask,\n shift_x=0, shift_y=0)\n assert_equal(res, out)\n rank.geometric_mean(image=image, selem=elem, out=out, mask=mask,\n shift_x=0, shift_y=0)\n assert_equal(res, out)\n rank.minimum(image=image, selem=elem, out=out, mask=mask,\n shift_x=0, shift_y=0)\n assert_equal(res, out)\n rank.maximum(image=image, selem=elem, out=out, mask=mask,\n shift_x=0, shift_y=0)\n assert_equal(res, out)\n\n\n def test_otsu(self):\n # test the local Otsu segmentation on a synthetic image\n # (left to right ramp * sinus)\n\n test = np.tile([128, 145, 103, 127, 165, 83, 127, 185, 63, 127, 205, 43,\n 127, 225, 23, 127],\n (16, 1))\n test = test.astype(np.uint8)\n res = np.tile([1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1], (16, 1))\n selem = np.ones((6, 6), dtype=np.uint8)\n th = 1 * (test >= rank.otsu(test, selem))\n assert_equal(th, res)\n\n\n def test_entropy(self):\n # verify that entropy is coherent with bitdepth of the input data\n\n selem = np.ones((16, 16), dtype=np.uint8)\n # 1 bit per pixel\n data = np.tile(np.asarray([0, 1]), (100, 100)).astype(np.uint8)\n assert(np.max(rank.entropy(data, selem)) == 1)\n\n # 2 bit per pixel\n data = np.tile(np.asarray([[0, 1], [2, 3]]), (10, 10)).astype(np.uint8)\n assert(np.max(rank.entropy(data, selem)) == 2)\n\n # 3 bit per pixel\n data = np.tile(\n np.asarray([[0, 1, 2, 3], [4, 5, 6, 7]]), (10, 10)).astype(np.uint8)\n assert(np.max(rank.entropy(data, selem)) == 3)\n\n # 4 bit per pixel\n data = np.tile(\n np.reshape(np.arange(16), (4, 4)), (10, 10)).astype(np.uint8)\n assert(np.max(rank.entropy(data, selem)) == 4)\n\n # 6 bit per pixel\n data = np.tile(\n np.reshape(np.arange(64), (8, 8)), (10, 10)).astype(np.uint8)\n assert(np.max(rank.entropy(data, selem)) == 6)\n\n # 8-bit per pixel\n data = np.tile(\n np.reshape(np.arange(256), (16, 16)), (10, 10)).astype(np.uint8)\n assert(np.max(rank.entropy(data, selem)) == 8)\n\n # 12 bit per pixel\n selem = np.ones((64, 64), dtype=np.uint8)\n data = np.zeros((65, 65), dtype=np.uint16)\n data[:64, :64] = np.reshape(np.arange(4096), (64, 64))\n with expected_warnings(['Bad rank filter performance']):\n assert(np.max(rank.entropy(data, selem)) == 12)\n\n # make sure output is of dtype double\n with expected_warnings(['Bad rank filter performance']):\n out = rank.entropy(data, np.ones((16, 16), dtype=np.uint8))\n assert out.dtype == np.double\n\n\n def test_selem_dtypes(self):\n\n image = np.zeros((5, 5), dtype=np.uint8)\n out = np.zeros_like(image)\n mask = np.ones_like(image, dtype=np.uint8)\n image[2, 2] = 255\n image[2, 3] = 128\n image[1, 2] = 16\n\n for dtype in (np.uint8, np.uint16, np.int32, np.int64,\n np.float32, np.float64):\n elem = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]], dtype=dtype)\n rank.mean(image=image, selem=elem, out=out, mask=mask,\n shift_x=0, shift_y=0)\n assert_equal(image, out)\n rank.geometric_mean(image=image, selem=elem, out=out, mask=mask,\n shift_x=0, shift_y=0)\n assert_equal(image, out)\n rank.mean_percentile(image=image, selem=elem, out=out, mask=mask,\n shift_x=0, shift_y=0)\n assert_equal(image, out)\n\n\n def test_16bit(self):\n image = np.zeros((21, 21), dtype=np.uint16)\n selem = np.ones((3, 3), dtype=np.uint8)\n\n for bitdepth in range(17):\n value = 2 ** bitdepth - 1\n image[10, 10] = value\n if bitdepth >= 11:\n expected = ['Bad rank filter performance']\n else:\n expected = []\n with expected_warnings(expected):\n assert rank.minimum(image, selem)[10, 10] == 0\n assert rank.maximum(image, selem)[10, 10] == value\n assert rank.mean(image, selem)[10, 10] == int(value / selem.size)\n\n\n def test_bilateral(self):\n image = np.zeros((21, 21), dtype=np.uint16)\n selem = np.ones((3, 3), dtype=np.uint8)\n\n image[10, 10] = 1000\n image[10, 11] = 1010\n image[10, 9] = 900\n\n assert rank.mean_bilateral(image, selem, s0=1, s1=1)[10, 10] == 1000\n assert rank.pop_bilateral(image, selem, s0=1, s1=1)[10, 10] == 1\n assert rank.mean_bilateral(image, selem, s0=11, s1=11)[10, 10] == 1005\n assert rank.pop_bilateral(image, selem, s0=11, s1=11)[10, 10] == 2\n\n\n def test_percentile_min(self):\n # check that percentile p0 = 0 is identical to local min\n img = data.camera()\n img16 = img.astype(np.uint16)\n selem = disk(15)\n # check for 8bit\n img_p0 = rank.percentile(img, selem=selem, p0=0)\n img_min = rank.minimum(img, selem=selem)\n assert_equal(img_p0, img_min)\n # check for 16bit\n img_p0 = rank.percentile(img16, selem=selem, p0=0)\n img_min = rank.minimum(img16, selem=selem)\n assert_equal(img_p0, img_min)\n\n\n def test_percentile_max(self):\n # check that percentile p0 = 1 is identical to local max\n img = data.camera()\n img16 = img.astype(np.uint16)\n selem = disk(15)\n # check for 8bit\n img_p0 = rank.percentile(img, selem=selem, p0=1.)\n img_max = rank.maximum(img, selem=selem)\n assert_equal(img_p0, img_max)\n # check for 16bit\n img_p0 = rank.percentile(img16, selem=selem, p0=1.)\n img_max = rank.maximum(img16, selem=selem)\n assert_equal(img_p0, img_max)\n\n\n def test_percentile_median(self):\n # check that percentile p0 = 0.5 is identical to local median\n img = data.camera()\n img16 = img.astype(np.uint16)\n selem = disk(15)\n # check for 8bit\n img_p0 = rank.percentile(img, selem=selem, p0=.5)\n img_max = rank.median(img, selem=selem)\n assert_equal(img_p0, img_max)\n # check for 16bit\n img_p0 = rank.percentile(img16, selem=selem, p0=.5)\n img_max = rank.median(img16, selem=selem)\n assert_equal(img_p0, img_max)\n\n\n def test_sum(self):\n # check the number of valid pixels in the neighborhood\n\n image8 = np.array([[0, 0, 0, 0, 0],\n [0, 1, 1, 1, 0],\n [0, 1, 1, 1, 0],\n [0, 1, 1, 1, 0],\n [0, 0, 0, 0, 0]], dtype=np.uint8)\n image16 = 400 * np.array([[0, 0, 0, 0, 0],\n [0, 1, 1, 1, 0],\n [0, 1, 1, 1, 0],\n [0, 1, 1, 1, 0],\n [0, 0, 0, 0, 0]], dtype=np.uint16)\n elem = np.ones((3, 3), dtype=np.uint8)\n out8 = np.empty_like(image8)\n out16 = np.empty_like(image16)\n mask = np.ones(image8.shape, dtype=np.uint8)\n\n r = np.array([[1, 2, 3, 2, 1],\n [2, 4, 6, 4, 2],\n [3, 6, 9, 6, 3],\n [2, 4, 6, 4, 2],\n [1, 2, 3, 2, 1]], dtype=np.uint8)\n rank.sum(image=image8, selem=elem, out=out8, mask=mask)\n assert_equal(r, out8)\n rank.sum_percentile(\n image=image8, selem=elem, out=out8, mask=mask, p0=.0, p1=1.)\n assert_equal(r, out8)\n rank.sum_bilateral(\n image=image8, selem=elem, out=out8, mask=mask, s0=255, s1=255)\n assert_equal(r, out8)\n\n r = 400 * np.array([[1, 2, 3, 2, 1],\n [2, 4, 6, 4, 2],\n [3, 6, 9, 6, 3],\n [2, 4, 6, 4, 2],\n [1, 2, 3, 2, 1]], dtype=np.uint16)\n rank.sum(image=image16, selem=elem, out=out16, mask=mask)\n assert_equal(r, out16)\n rank.sum_percentile(\n image=image16, selem=elem, out=out16, mask=mask, p0=.0, p1=1.)\n assert_equal(r, out16)\n rank.sum_bilateral(\n image=image16, selem=elem, out=out16, mask=mask, s0=1000, s1=1000)\n assert_equal(r, out16)\n\n\n def test_windowed_histogram(self):\n # check the number of valid pixels in the neighborhood\n\n image8 = np.array([[0, 0, 0, 0, 0],\n [0, 1, 1, 1, 0],\n [0, 1, 1, 1, 0],\n [0, 1, 1, 1, 0],\n [0, 0, 0, 0, 0]], dtype=np.uint8)\n elem = np.ones((3, 3), dtype=np.uint8)\n outf = np.empty(image8.shape + (2,), dtype=float)\n mask = np.ones(image8.shape, dtype=np.uint8)\n\n # Population so we can normalize the expected output while maintaining\n # code readability\n pop = np.array([[4, 6, 6, 6, 4],\n [6, 9, 9, 9, 6],\n [6, 9, 9, 9, 6],\n [6, 9, 9, 9, 6],\n [4, 6, 6, 6, 4]], dtype=float)\n\n r0 = np.array([[3, 4, 3, 4, 3],\n [4, 5, 3, 5, 4],\n [3, 3, 0, 3, 3],\n [4, 5, 3, 5, 4],\n [3, 4, 3, 4, 3]], dtype=float) / pop\n r1 = np.array([[1, 2, 3, 2, 1],\n [2, 4, 6, 4, 2],\n [3, 6, 9, 6, 3],\n [2, 4, 6, 4, 2],\n [1, 2, 3, 2, 1]], dtype=float) / pop\n rank.windowed_histogram(image=image8, selem=elem, out=outf, mask=mask)\n assert_equal(r0, outf[:, :, 0])\n assert_equal(r1, outf[:, :, 1])\n\n # Test n_bins parameter\n larger_output = rank.windowed_histogram(image=image8, selem=elem,\n mask=mask, n_bins=5)\n assert larger_output.shape[2] == 5\n\n\n def test_median_default_value(self):\n a = np.zeros((3, 3), dtype=np.uint8)\n a[1] = 1\n full_selem = np.ones((3, 3), dtype=np.uint8)\n assert_equal(rank.median(a), rank.median(a, full_selem))\n assert rank.median(a)[1, 1] == 0\n assert rank.median(a, disk(1))[1, 1] == 1\n\n\n def test_majority(self):\n img = data.camera()\n elem = np.ones((3, 3), dtype=np.uint8)\n expected = rank.windowed_histogram(\n img, elem).argmax(-1).astype(np.uint8)\n assert_equal(expected, rank.majority(img, elem))\n" ]
[ [ "numpy.ones", "numpy.zeros_like", "numpy.tile", "numpy.empty", "numpy.zeros", "numpy.random.seed", "numpy.asarray", "numpy.ones_like", "numpy.empty_like", "numpy.arange", "numpy.random.rand", "numpy.full", "numpy.array", "numpy.random.randint" ] ]
must-11/ap_experiment
[ "50af0e239bfbd4e3b02c8a23fe1b110601715952" ]
[ "src/dm/func.py" ]
[ "from collections import deque\n\nimport numpy as np\n\n\ndef make_Gm(G, match):\n n = G.shape[0]\n Gm = np.zeros((2*n, 2*n), dtype=np.int8)\n\n Gm[: n, n: ] = G.copy()\n for i, j in enumerate(match):\n if j != -999:\n Gm[j, i] = 1\n return Gm\n\n\ndef trans_match(match):\n n = len(match)\n match_ = np.array([-999]*n)\n for i, j in enumerate(match):\n if j != -999:\n match_[j - n] = i\n return match_\n\n\ndef dfs(u, G, color, n, parents):\n color[u] = 1\n if u >= n:\n for v in range(n):\n if (G[u, v]==1) & (color[v]==0):\n parents[v] = u\n dfs(v, G, color, n, parents)\n else:\n for v in range(n, 2*n):\n if (G[u, v]==1) & (color[v]==0):\n parents[v] = u\n dfs(v, G, color, n, parents)\n\n color[u] = 2\n\n\ndef dfs_time(u, G, color, n, f, time):\n color[u] = 1\n if u >= n:\n for v in range(n):\n if (G[u, v]==1) & (color[v]==0):\n dfs_time(v, G, color, n, f, time)\n else:\n for v in range(n, 2*n):\n if (G[u, v]==1) & (color[v]==0):\n dfs_time(v, G, color, n, f, time)\n\n color[u] = 2\n f[u] = time.pop()\n time.append(f[u]+1)\n\n\ndef dfs_group(u, G, color, group, n):\n color[u] = 1\n if u >= n:\n for v in range(n):\n if (G[u, v]==1) & (color[v]==0):\n dfs_group(v, G, color, group, n)\n else:\n for v in range(n, 2*n):\n if (G[u, v]==1) & (color[v]==0):\n dfs_group(v, G, color, group, n)\n\n color[u] = 2\n group.append(u)\n\n\ndef max_matching(G):\n n = G.shape[0]\n match = np.array([-999]*n)\n Gm = make_Gm(G, match)\n for u in range(n):\n if match[u] == -999:\n color = [0]*(2*n)\n parents = [-1]*(2*n)\n dfs(u, Gm, color, n, parents)\n e = -1\n for i, v in enumerate(parents[n: ]):\n if (v != -1) & ((i+n) not in match):\n e = i + n\n break\n if e != -1:\n for _ in range(n):\n s = parents[e]\n match[s] = e\n if s == u:\n break\n e = parents[s]\n Gm = make_Gm(G, match)\n return match\n\n\ndef remove(Gm, match):\n n = len(match)\n U_0 = np.where(match == -999)[0]\n V_8 = np.where(trans_match(match) == -999)[0] + n\n\n Gm_ = Gm.copy()\n Gm_[U_0] = 0\n Gm_[:, V_8] = 0\n return Gm_\n\n\ndef scc(Gm_, n):\n color = [0]*(2*n)\n f = np.array([-1]*(2*n))\n time = deque()\n time = ([0])\n for u in range(2*n):\n if color[u] == 0:\n dfs_time(u, Gm_, color, n, f, time)\n\n order = np.argsort(f)[::-1]\n color = [0]*(2*n)\n group = []\n out = []\n for i in order:\n if i not in out:\n g = []\n dfs_group(i, Gm_.T, color, g, n)\n group.append(g)\n out.extend(g)\n\n rank = []\n for g in group:\n rank.append(f[g].max())\n rank = np.argsort(rank)[::-1]\n return rank, group\n" ]
[ [ "numpy.array", "numpy.where", "numpy.argsort", "numpy.zeros" ] ]
xincao79/tensorflow
[ "7fa0cf39f854d5fdaaa19ad6425dfed02f5fea64" ]
[ "tensorflow/contrib/learn/python/learn/estimators/head_test.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for head.py.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\n\n# pylint: disable=g-bad-todo,g-import-not-at-top\nimport numpy as np\nimport six\n\nfrom tensorflow.contrib.learn.python.learn.estimators import constants\nfrom tensorflow.contrib.learn.python.learn.estimators import head as head_lib\nfrom tensorflow.contrib.learn.python.learn.estimators import model_fn\nfrom tensorflow.contrib.learn.python.learn.estimators import prediction_key\nfrom tensorflow.core.framework import summary_pb2\nfrom tensorflow.python.client import session\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.ops import lookup_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.ops.losses import losses as losses_lib\nfrom tensorflow.python.platform import test\n\n\ndef _assert_variables(test_case,\n expected_global=None,\n expected_model=None,\n expected_trainable=None):\n test_case.assertItemsEqual(\n tuple([] if expected_global is None else expected_global),\n tuple([k.name for k in variables.global_variables()]))\n test_case.assertItemsEqual(\n tuple([] if expected_model is None else expected_model),\n tuple([k.name for k in variables.model_variables()]))\n test_case.assertItemsEqual(\n tuple([] if expected_trainable is None else expected_trainable),\n tuple([k.name for k in variables.trainable_variables()]))\n\n\ndef _assert_no_variables(test_case):\n _assert_variables(test_case)\n\n\n# This must be called from within a tf.Session.\ndef _assert_metrics(test_case, expected_loss, expected_eval_metrics,\n model_fn_ops):\n test_case.assertAlmostEqual(expected_loss, model_fn_ops.loss.eval(), places=4)\n for k in six.iterkeys(expected_eval_metrics):\n test_case.assertIn(k, six.iterkeys(model_fn_ops.eval_metric_ops))\n variables.initialize_local_variables().run()\n for key, expected_value in six.iteritems(expected_eval_metrics):\n value_tensor, update_tensor = model_fn_ops.eval_metric_ops[key]\n update = update_tensor.eval()\n test_case.assertAlmostEqual(\n expected_value,\n update,\n places=4,\n msg=\"%s: update, expected %s, got %s.\" % (key, expected_value, update))\n value = value_tensor.eval()\n test_case.assertAlmostEqual(\n expected_value,\n value,\n places=4,\n msg=\"%s: value, expected %s, got %s.\" % (key, expected_value, value))\n\n\n# This must be called from within a tf.Session.\ndef _assert_summary_tags(test_case, expected_tags=None):\n actual_tags = []\n for summary_op in ops.get_collection(ops.GraphKeys.SUMMARIES):\n summ = summary_pb2.Summary()\n summ.ParseFromString(summary_op.eval())\n actual_tags.append(summ.value[0].tag)\n test_case.assertItemsEqual(expected_tags or [], actual_tags)\n\n\ndef _sigmoid(x):\n return 1. / (1. + math.exp(-1 * x))\n\n\nclass PoissonHeadTest(test.TestCase):\n\n def _assert_output_alternatives(self, model_fn_ops):\n self.assertEquals({\n None: constants.ProblemType.LINEAR_REGRESSION\n }, {\n k: v[0] for k, v in six.iteritems(model_fn_ops.output_alternatives)\n })\n\n def _log_poisson_loss(self, logits, labels):\n x = np.array([f[0] for f in logits])\n z = np.array([f[0] for f in labels])\n lpl = np.exp(x) - z * x\n stirling_approx = z * np.log(z) - z + 0.5 * np.log(2. * np.pi * z)\n lpl += np.ma.masked_array(stirling_approx, mask=(z <= 1)).filled(0.)\n return sum(lpl)/len(lpl)\n\n def testPoissonWithLogits(self):\n head = head_lib.poisson_regression_head()\n labels = ((0.,), (1.,), (1.,))\n logits = ((0.,), (-1.,), (3.,))\n with ops.Graph().as_default(), session.Session():\n model_fn_ops = head.create_model_fn_ops(\n {},\n labels=labels,\n mode=model_fn.ModeKeys.TRAIN,\n train_op_fn=head_lib.no_op_train_fn,\n logits=logits)\n self._assert_output_alternatives(model_fn_ops)\n _assert_summary_tags(self, [\"loss\"])\n _assert_no_variables(self)\n loss = self._log_poisson_loss(logits, labels)\n _assert_metrics(self, loss, {\"loss\": loss}, model_fn_ops)\n\n\nclass RegressionHeadTest(test.TestCase):\n\n def _assert_output_alternatives(self, model_fn_ops):\n self.assertEquals({\n None: constants.ProblemType.LINEAR_REGRESSION\n }, {\n k: v[0] for k, v in six.iteritems(model_fn_ops.output_alternatives)\n })\n\n # TODO(zakaria): test multilabel regression.\n def testRegressionWithLogits(self):\n head = head_lib.regression_head()\n with ops.Graph().as_default(), session.Session():\n model_fn_ops = head.create_model_fn_ops(\n {},\n labels=((0.,), (1.,), (1.,)),\n mode=model_fn.ModeKeys.TRAIN,\n train_op_fn=head_lib.no_op_train_fn,\n logits=((1.,), (1.,), (3.,)))\n self._assert_output_alternatives(model_fn_ops)\n _assert_summary_tags(self, [\"loss\"])\n _assert_no_variables(self)\n _assert_metrics(self, 5. / 3, {\"loss\": 5. / 3}, model_fn_ops)\n\n def testRegressionWithInvalidLogits(self):\n head = head_lib.regression_head()\n with ops.Graph().as_default(), session.Session():\n with self.assertRaisesRegexp(ValueError, \"Dimensions.*not compatible\"):\n head.create_model_fn_ops(\n {},\n labels=((0.,), (1.,), (1.,)),\n mode=model_fn.ModeKeys.TRAIN,\n train_op_fn=head_lib.no_op_train_fn,\n logits=((1., 1.), (1., 1.), (3., 1.)))\n\n def testRegressionWithLogitsInput(self):\n head = head_lib.regression_head()\n with ops.Graph().as_default(), session.Session():\n model_fn_ops = head.create_model_fn_ops(\n {},\n labels=((0.,), (1.,), (1.,)),\n mode=model_fn.ModeKeys.TRAIN,\n train_op_fn=head_lib.no_op_train_fn,\n logits_input=((0., 0.), (0., 0.), (0., 0.)))\n self._assert_output_alternatives(model_fn_ops)\n w = (\"regression_head/logits/weights:0\",\n \"regression_head/logits/biases:0\")\n _assert_variables(\n self, expected_global=w, expected_model=w, expected_trainable=w)\n variables.global_variables_initializer().run()\n _assert_summary_tags(self, [\"loss\"])\n _assert_metrics(self, 2. / 3, {\"loss\": 2. / 3}, model_fn_ops)\n\n def testRegressionWithLogitsAndLogitsInput(self):\n head = head_lib.regression_head()\n with ops.Graph().as_default(), session.Session():\n with self.assertRaisesRegexp(\n ValueError, \"Both logits and logits_input supplied\"):\n head.create_model_fn_ops(\n {},\n labels=((0.,), (1.,), (1.,)),\n mode=model_fn.ModeKeys.TRAIN,\n train_op_fn=head_lib.no_op_train_fn,\n logits_input=((0., 0.), (0., 0.), (0., 0.)),\n logits=((1.,), (1.,), (3.,)))\n\n def testRegressionEvalMode(self):\n head = head_lib.regression_head()\n with ops.Graph().as_default(), session.Session():\n model_fn_ops = head.create_model_fn_ops(\n {},\n labels=((1.,), (1.,), (3.,)),\n mode=model_fn.ModeKeys.EVAL,\n train_op_fn=head_lib.no_op_train_fn,\n logits=((0.,), (1.,), (1.,)))\n self._assert_output_alternatives(model_fn_ops)\n self.assertIsNone(model_fn_ops.train_op)\n _assert_no_variables(self)\n _assert_summary_tags(self, [\"loss\"])\n _assert_metrics(self, 5. / 3, {\"loss\": 5. / 3}, model_fn_ops)\n\n def testRegressionWithLabelName(self):\n label_name = \"my_label\"\n head = head_lib.regression_head(label_name=label_name)\n with ops.Graph().as_default(), session.Session():\n model_fn_ops = head.create_model_fn_ops(\n {},\n labels={label_name: ((0.,), (1.,), (1.,))},\n mode=model_fn.ModeKeys.TRAIN,\n train_op_fn=head_lib.no_op_train_fn,\n logits=((1.,), (1.,), (3.,)))\n self._assert_output_alternatives(model_fn_ops)\n _assert_no_variables(self)\n _assert_summary_tags(self, [\"loss\"])\n _assert_metrics(self, 5. / 3, {\"loss\": 5. / 3}, model_fn_ops)\n\n def testRegressionWithWeights(self):\n head = head_lib.regression_head(weight_column_name=\"label_weight\")\n with ops.Graph().as_default(), session.Session():\n weights = ((2.,), (5.,), (0.,))\n model_fn_ops = head.create_model_fn_ops(\n features={\"label_weight\": weights},\n labels=((0.,), (1.,), (1.,)),\n mode=model_fn.ModeKeys.TRAIN,\n train_op_fn=head_lib.no_op_train_fn,\n logits=((1.,), (1.,), (3.,)))\n self._assert_output_alternatives(model_fn_ops)\n _assert_no_variables(self)\n _assert_summary_tags(self, [\"loss\"])\n _assert_metrics(self, 2. / len(weights), {\"loss\": 2. / np.sum(weights)},\n model_fn_ops)\n\n def testRegressionWithCenteredBias(self):\n head = head_lib.regression_head(enable_centered_bias=True)\n with ops.Graph().as_default(), session.Session():\n model_fn_ops = head.create_model_fn_ops(\n {},\n labels=((0.,), (1.,), (1.,)),\n mode=model_fn.ModeKeys.TRAIN,\n train_op_fn=head_lib.no_op_train_fn,\n logits=((1.,), (1.,), (3.,)))\n self._assert_output_alternatives(model_fn_ops)\n _assert_variables(\n self,\n expected_global=(\n \"regression_head/centered_bias_weight:0\",\n \"regression_head/regression_head/centered_bias_weight/Adagrad:0\",\n ),\n expected_trainable=(\"regression_head/centered_bias_weight:0\",))\n variables.global_variables_initializer().run()\n _assert_summary_tags(self, [\n \"loss\",\n \"regression_head/centered_bias/bias_0\"\n ])\n _assert_metrics(self, 5. / 3, {\"loss\": 5. / 3}, model_fn_ops)\n\n def testRegressionErrorInSparseTensorLabels(self):\n head = head_lib.regression_head()\n with ops.Graph().as_default():\n labels = sparse_tensor.SparseTensorValue(\n indices=((0, 0), (1, 0), (2, 0)),\n values=(0., 1., 1.),\n dense_shape=(3, 1))\n with self.assertRaisesRegexp(ValueError,\n \"SparseTensor is not supported\"):\n head.create_model_fn_ops(\n {},\n labels=labels,\n mode=model_fn.ModeKeys.TRAIN,\n train_op_fn=head_lib.no_op_train_fn,\n logits=((1.,), (1.,), (3.,)))\n\n\nclass MultiLabelHeadTest(test.TestCase):\n\n def _assert_output_alternatives(self, model_fn_ops):\n self.assertEquals({\n None: constants.ProblemType.CLASSIFICATION\n }, {\n k: v[0] for k, v in six.iteritems(model_fn_ops.output_alternatives)\n })\n\n def setUp(self):\n self._logits = ((1., 0., 0.),)\n self._labels = ((0, 0, 1),)\n\n def _expected_eval_metrics(self, expected_loss):\n return {\n \"accuracy\": 1. / 3,\n \"loss\": expected_loss,\n \"auc\": 1. / 4,\n \"auc/class0\": 1.,\n \"auc/class1\": 1.,\n \"auc/class2\": 0.,\n \"auc_precision_recall\": 0.166667,\n \"auc_precision_recall/class0\": 0,\n \"auc_precision_recall/class1\": 0.,\n \"auc_precision_recall/class2\": 1.,\n \"labels/actual_label_mean/class0\": self._labels[0][0],\n \"labels/actual_label_mean/class1\": self._labels[0][1],\n \"labels/actual_label_mean/class2\": self._labels[0][2],\n \"labels/logits_mean/class0\": self._logits[0][0],\n \"labels/logits_mean/class1\": self._logits[0][1],\n \"labels/logits_mean/class2\": self._logits[0][2],\n \"labels/prediction_mean/class0\": self._logits[0][0],\n \"labels/prediction_mean/class1\": self._logits[0][1],\n \"labels/prediction_mean/class2\": self._logits[0][2],\n \"labels/probability_mean/class0\": _sigmoid(self._logits[0][0]),\n \"labels/probability_mean/class1\": _sigmoid(self._logits[0][1]),\n \"labels/probability_mean/class2\": _sigmoid(self._logits[0][2]),\n }\n\n def testMultiLabelWithLogits(self):\n n_classes = 3\n head = head_lib.multi_label_head(\n n_classes=n_classes, metric_class_ids=range(n_classes))\n with ops.Graph().as_default(), session.Session():\n model_fn_ops = head.create_model_fn_ops(\n {}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,\n logits=self._logits)\n self._assert_output_alternatives(model_fn_ops)\n _assert_no_variables(self)\n _assert_summary_tags(self, [\"loss\"])\n expected_loss = .89985204\n _assert_metrics(self, expected_loss,\n self._expected_eval_metrics(expected_loss), model_fn_ops)\n\n def testMultiLabelTwoClasses(self):\n n_classes = 2\n labels = ((0, 1),)\n logits = ((1., 0.),)\n head = head_lib.multi_label_head(\n n_classes=n_classes, metric_class_ids=range(n_classes))\n with ops.Graph().as_default(), session.Session():\n model_fn_ops = head.create_model_fn_ops(\n {}, model_fn.ModeKeys.TRAIN, labels=labels,\n train_op_fn=head_lib.no_op_train_fn, logits=logits)\n self._assert_output_alternatives(model_fn_ops)\n _assert_no_variables(self)\n _assert_summary_tags(self, [\"loss\"])\n expected_loss = 1.00320443\n _assert_metrics(self, expected_loss, {\n \"accuracy\": 0.,\n \"auc\": 0.,\n \"loss\": expected_loss,\n \"auc/class0\": 1.,\n \"auc/class1\": 0.,\n \"labels/actual_label_mean/class0\": labels[0][0],\n \"labels/actual_label_mean/class1\": labels[0][1],\n \"labels/logits_mean/class0\": logits[0][0],\n \"labels/logits_mean/class1\": logits[0][1],\n \"labels/prediction_mean/class0\": logits[0][0],\n \"labels/prediction_mean/class1\": logits[0][1],\n \"labels/probability_mean/class0\": _sigmoid(logits[0][0]),\n \"labels/probability_mean/class1\": _sigmoid(logits[0][1]),\n }, model_fn_ops)\n\n def testMultiLabelWithInvalidLogits(self):\n head = head_lib.multi_label_head(n_classes=len(self._labels[0]) + 1)\n with ops.Graph().as_default(), session.Session():\n with self.assertRaisesRegexp(ValueError, \"Dimensions.*not compatible\"):\n head.create_model_fn_ops(\n {}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,\n logits=self._logits)\n\n def testMultiLabelWithLogitsInput(self):\n n_classes = 3\n head = head_lib.multi_label_head(\n n_classes=n_classes, metric_class_ids=range(n_classes))\n with ops.Graph().as_default(), session.Session():\n model_fn_ops = head.create_model_fn_ops(\n {}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,\n logits_input=((0., 0.),))\n self._assert_output_alternatives(model_fn_ops)\n w = (\"multi_label_head/logits/weights:0\",\n \"multi_label_head/logits/biases:0\")\n _assert_variables(\n self, expected_global=w, expected_model=w, expected_trainable=w)\n variables.global_variables_initializer().run()\n _assert_summary_tags(self, [\"loss\"])\n expected_loss = .69314718\n _assert_metrics(self, expected_loss, {\n \"accuracy\": 2. / 3,\n \"auc\": 2. / 4,\n \"loss\": expected_loss,\n \"auc/class0\": 1.,\n \"auc/class1\": 1.,\n \"auc/class2\": 0.,\n \"labels/actual_label_mean/class0\": self._labels[0][0],\n \"labels/actual_label_mean/class1\": self._labels[0][1],\n \"labels/actual_label_mean/class2\": self._labels[0][2],\n \"labels/logits_mean/class0\": 0.,\n \"labels/logits_mean/class1\": 0.,\n \"labels/logits_mean/class2\": 0.,\n \"labels/prediction_mean/class0\": 0.,\n \"labels/prediction_mean/class1\": 0.,\n \"labels/prediction_mean/class2\": 0.,\n \"labels/probability_mean/class0\": .5,\n \"labels/probability_mean/class1\": .5,\n \"labels/probability_mean/class2\": .5,\n }, model_fn_ops)\n\n def testMultiLabelWithLogitsAndLogitsInput(self):\n n_classes = 3\n head = head_lib.multi_label_head(\n n_classes=n_classes, metric_class_ids=range(n_classes))\n with ops.Graph().as_default(), session.Session():\n with self.assertRaisesRegexp(\n ValueError, \"Both logits and logits_input supplied\"):\n head.create_model_fn_ops(\n {}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,\n logits_input=((0., 0.),), logits=self._logits)\n\n def testMultiLabelEval(self):\n n_classes = 3\n head = head_lib.multi_label_head(\n n_classes=n_classes, metric_class_ids=range(n_classes))\n with ops.Graph().as_default(), session.Session():\n model_fn_ops = head.create_model_fn_ops(\n {}, model_fn.ModeKeys.EVAL, self._labels, head_lib.no_op_train_fn,\n logits=self._logits)\n self._assert_output_alternatives(model_fn_ops)\n self.assertIsNone(model_fn_ops.train_op)\n _assert_no_variables(self)\n _assert_summary_tags(self, [\"loss\"])\n expected_loss = .89985204\n _assert_metrics(self, expected_loss,\n self._expected_eval_metrics(expected_loss), model_fn_ops)\n\n def testMultiClassEvalWithLargeLogits(self):\n n_classes = 3\n head = head_lib.multi_label_head(\n n_classes=n_classes, metric_class_ids=range(n_classes))\n logits = ((2., 0., -1),)\n with ops.Graph().as_default(), session.Session():\n # logloss: z:label, x:logit\n # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))\n model_fn_ops = head.create_model_fn_ops(\n {}, model_fn.ModeKeys.EVAL, self._labels, head_lib.no_op_train_fn,\n logits=logits)\n self._assert_output_alternatives(model_fn_ops)\n self.assertIsNone(model_fn_ops.train_op)\n _assert_no_variables(self)\n _assert_summary_tags(self, [\"loss\"])\n expected_loss = 1.377779\n expected_eval_metrics = {\n \"accuracy\": 1. / 3,\n \"auc\": 9.99999e-07,\n \"loss\": expected_loss,\n \"auc/class0\": 1.,\n \"auc/class1\": 1.,\n \"auc/class2\": 0.,\n \"labels/actual_label_mean/class0\": 0. / 1,\n \"labels/actual_label_mean/class1\": 0. / 1,\n \"labels/actual_label_mean/class2\": 1. / 1,\n \"labels/logits_mean/class0\": logits[0][0],\n \"labels/logits_mean/class1\": logits[0][1],\n \"labels/logits_mean/class2\": logits[0][2],\n \"labels/prediction_mean/class0\": 1,\n \"labels/prediction_mean/class1\": 0,\n \"labels/prediction_mean/class2\": 0,\n \"labels/probability_mean/class0\": _sigmoid(logits[0][0]),\n \"labels/probability_mean/class1\": _sigmoid(logits[0][1]),\n \"labels/probability_mean/class2\": _sigmoid(logits[0][2]),\n }\n _assert_metrics(self, expected_loss,\n expected_eval_metrics, model_fn_ops)\n\n def testMultiLabelInfer(self):\n n_classes = 3\n head = head_lib.multi_label_head(n_classes=n_classes, head_name=\"head_name\")\n with ops.Graph().as_default(), session.Session():\n model_fn_ops = head.create_model_fn_ops(\n {}, model_fn.ModeKeys.INFER, self._labels, head_lib.no_op_train_fn,\n logits=((1., 0., 0.), (0., 0., 1)))\n self.assertIsNone(model_fn_ops.train_op)\n _assert_no_variables(self)\n with session.Session():\n self.assertListEqual(\n [1, 0, 0], model_fn_ops.predictions[\"classes\"].eval().tolist()[0])\n self.assertItemsEqual(\n [\"head_name\"], six.iterkeys(model_fn_ops.output_alternatives))\n self.assertEqual(\n constants.ProblemType.CLASSIFICATION,\n model_fn_ops.output_alternatives[\"head_name\"][0])\n\n predictions_for_serving = (\n model_fn_ops.output_alternatives[\"head_name\"][1])\n self.assertIn(\"classes\", six.iterkeys(predictions_for_serving))\n self.assertAllEqual(\n [[b\"0\", b\"1\", b\"2\"], [b\"0\", b\"1\", b\"2\"]],\n predictions_for_serving[\"classes\"].eval())\n self.assertIn(\"probabilities\", six.iterkeys(predictions_for_serving))\n self.assertAllClose(\n [[0.731059, 0.5, 0.5],\n [0.5, 0.5, 0.731059,]],\n predictions_for_serving[\"probabilities\"].eval())\n\n def testMultiLabelWithLabelName(self):\n n_classes = 3\n label_name = \"my_label\"\n head = head_lib.multi_label_head(\n n_classes=n_classes,\n label_name=label_name,\n metric_class_ids=range(n_classes))\n with ops.Graph().as_default(), session.Session():\n model_fn_ops = head.create_model_fn_ops(\n {}, model_fn.ModeKeys.TRAIN, {label_name: self._labels},\n head_lib.no_op_train_fn, logits=self._logits)\n self._assert_output_alternatives(model_fn_ops)\n _assert_no_variables(self)\n _assert_summary_tags(self, [\"loss\"])\n expected_loss = .89985204\n _assert_metrics(self, expected_loss,\n self._expected_eval_metrics(expected_loss), model_fn_ops)\n\n def testMultiLabelWithWeight(self):\n n_classes = 3\n head = head_lib.multi_label_head(\n n_classes=n_classes,\n weight_column_name=\"label_weight\",\n metric_class_ids=range(n_classes))\n with ops.Graph().as_default(), session.Session():\n model_fn_ops = head.create_model_fn_ops(\n features={\"label_weight\": .1},\n labels=self._labels,\n mode=model_fn.ModeKeys.TRAIN,\n train_op_fn=head_lib.no_op_train_fn,\n logits=self._logits)\n self._assert_output_alternatives(model_fn_ops)\n _assert_no_variables(self)\n _assert_summary_tags(self, [\"loss\"])\n _assert_metrics(self, .089985214,\n self._expected_eval_metrics(.89985214), model_fn_ops)\n\n def testMultiLabelWithMultiDimensionalWeight(self):\n n_classes = 3\n head = head_lib.multi_label_head(\n n_classes=n_classes,\n weight_column_name=\"label_weight\",\n metric_class_ids=range(n_classes))\n with ops.Graph().as_default(), session.Session():\n model_fn_ops = head.create_model_fn_ops(\n features={\"label_weight\": ((.1, .1, .1),)},\n labels=self._labels,\n mode=model_fn.ModeKeys.TRAIN,\n train_op_fn=head_lib.no_op_train_fn,\n logits=self._logits)\n self._assert_output_alternatives(model_fn_ops)\n _assert_no_variables(self)\n _assert_summary_tags(self, [\"loss\"])\n _assert_metrics(self, .089985214,\n self._expected_eval_metrics(.89985214), model_fn_ops)\n\n def testMultiLabelWithCustomLoss(self):\n n_classes = 3\n head = head_lib.multi_label_head(\n n_classes=n_classes,\n weight_column_name=\"label_weight\",\n metric_class_ids=range(n_classes),\n loss_fn=_sigmoid_cross_entropy)\n with ops.Graph().as_default(), session.Session():\n model_fn_ops = head.create_model_fn_ops(\n features={\"label_weight\": .1},\n labels=self._labels,\n mode=model_fn.ModeKeys.TRAIN,\n train_op_fn=head_lib.no_op_train_fn,\n logits=self._logits)\n self._assert_output_alternatives(model_fn_ops)\n _assert_no_variables(self)\n _assert_summary_tags(self, [\"loss\"])\n expected_loss = .089985214\n _assert_metrics(self, expected_loss,\n self._expected_eval_metrics(expected_loss), model_fn_ops)\n\n def testMultiLabelWithCenteredBias(self):\n n_classes = 3\n head = head_lib.multi_label_head(\n n_classes=n_classes,\n enable_centered_bias=True,\n metric_class_ids=range(n_classes))\n with ops.Graph().as_default(), session.Session():\n model_fn_ops = head.create_model_fn_ops(\n {}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,\n logits=self._logits)\n self._assert_output_alternatives(model_fn_ops)\n _assert_variables(\n self,\n expected_global=(\n \"multi_label_head/centered_bias_weight:0\",\n (\"multi_label_head/multi_label_head/centered_bias_weight/\"\n \"Adagrad:0\"),),\n expected_trainable=(\"multi_label_head/centered_bias_weight:0\",))\n variables.global_variables_initializer().run()\n _assert_summary_tags(self, (\n \"loss\",\n \"multi_label_head/centered_bias/bias_0\",\n \"multi_label_head/centered_bias/bias_1\",\n \"multi_label_head/centered_bias/bias_2\"\n ))\n expected_loss = .89985204\n _assert_metrics(self, expected_loss,\n self._expected_eval_metrics(expected_loss), model_fn_ops)\n\n def testMultiLabelSparseTensorLabels(self):\n n_classes = 3\n head = head_lib.multi_label_head(\n n_classes=n_classes, metric_class_ids=range(n_classes))\n with ops.Graph().as_default(), session.Session():\n labels = sparse_tensor.SparseTensorValue(\n indices=((0, 0),),\n values=(2,),\n dense_shape=(1, 1))\n model_fn_ops = head.create_model_fn_ops(\n features={},\n mode=model_fn.ModeKeys.TRAIN,\n labels=labels,\n train_op_fn=head_lib.no_op_train_fn,\n logits=self._logits)\n _assert_no_variables(self)\n _assert_summary_tags(self, [\"loss\"])\n expected_loss = .89985204\n _assert_metrics(self, expected_loss,\n self._expected_eval_metrics(expected_loss), model_fn_ops)\n\n def testMultiLabelSparseTensorLabelsTooFewClasses(self):\n n_classes = 3\n head = head_lib.multi_label_head(\n n_classes=n_classes, metric_class_ids=range(n_classes))\n # Set _logits_dimension (n_classes) to a lower value; if it's set to 1\n # upfront, the class throws an error during initialization.\n head._logits_dimension = 1\n with ops.Graph().as_default(), session.Session():\n labels = sparse_tensor.SparseTensorValue(\n indices=((0, 0),),\n values=(2,),\n dense_shape=(1, 1))\n with self.assertRaisesRegexp(ValueError,\n \"Must set num_classes >= 2 when passing\"):\n head.create_model_fn_ops(\n features={},\n labels=labels,\n mode=model_fn.ModeKeys.TRAIN,\n train_op_fn=head_lib.no_op_train_fn,\n logits=[0.])\n\n\nclass BinaryClassificationHeadTest(test.TestCase):\n\n def _assert_output_alternatives(self, model_fn_ops):\n self.assertEquals({\n None: constants.ProblemType.LOGISTIC_REGRESSION\n }, {\n k: v[0] for k, v in six.iteritems(model_fn_ops.output_alternatives)\n })\n\n def setUp(self):\n self._logits = ((1.,), (1.,))\n self._labels = ((1.,), (0.,))\n\n def _expected_eval_metrics(self, expected_loss):\n label_mean = np.mean(self._labels)\n return {\n \"accuracy\": 1. / 2,\n \"accuracy/baseline_label_mean\": label_mean,\n \"accuracy/threshold_0.500000_mean\": 1. / 2,\n \"auc\": 1. / 2,\n \"auc_precision_recall\": 0.749999,\n \"labels/actual_label_mean\": label_mean,\n \"labels/prediction_mean\": .731059, # softmax\n \"loss\": expected_loss,\n \"precision/positive_threshold_0.500000_mean\": 1. / 2,\n \"recall/positive_threshold_0.500000_mean\": 1. / 1,\n }\n\n def testBinaryClassificationWithLogits(self):\n n_classes = 2\n head = head_lib.multi_class_head(n_classes=n_classes)\n with ops.Graph().as_default(), session.Session():\n # logloss: z:label, x:logit\n # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))\n model_fn_ops = head.create_model_fn_ops(\n {}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,\n logits=self._logits)\n self._assert_output_alternatives(model_fn_ops)\n _assert_no_variables(self)\n _assert_summary_tags(self, [\"loss\"])\n expected_loss = .81326175\n _assert_metrics(self, expected_loss,\n self._expected_eval_metrics(expected_loss), model_fn_ops)\n\n def testBinaryClassificationWithInvalidLogits(self):\n head = head_lib.multi_class_head(n_classes=len(self._labels) + 1)\n with ops.Graph().as_default(), session.Session():\n with self.assertRaisesRegexp(ValueError, \"Dimensions.*not compatible\"):\n head.create_model_fn_ops(\n {}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,\n logits=self._logits)\n\n def testBinaryClassificationWithLogitsInput(self):\n n_classes = 2\n head = head_lib.multi_class_head(n_classes=n_classes)\n with ops.Graph().as_default(), session.Session():\n # logloss: z:label, x:logit\n # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))\n model_fn_ops = head.create_model_fn_ops(\n {}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,\n logits_input=((0., 0.), (0., 0.)))\n self._assert_output_alternatives(model_fn_ops)\n w = (\"binary_logistic_head/logits/weights:0\",\n \"binary_logistic_head/logits/biases:0\")\n _assert_variables(\n self, expected_global=w, expected_model=w, expected_trainable=w)\n variables.global_variables_initializer().run()\n _assert_summary_tags(self, [\"loss\"])\n expected_loss = .69314718\n label_mean = np.mean(self._labels)\n _assert_metrics(self, expected_loss, {\n \"accuracy\": 1. / 2,\n \"accuracy/baseline_label_mean\": label_mean,\n \"accuracy/threshold_0.500000_mean\": 1. / 2,\n \"auc\": 1. / 2,\n \"labels/actual_label_mean\": label_mean,\n \"labels/prediction_mean\": .5, # softmax\n \"loss\": expected_loss,\n \"precision/positive_threshold_0.500000_mean\": 0. / 2,\n \"recall/positive_threshold_0.500000_mean\": 0. / 1,\n }, model_fn_ops)\n\n def testBinaryClassificationWithLogitsAndLogitsInput(self):\n head = head_lib.multi_class_head(n_classes=2)\n with ops.Graph().as_default(), session.Session():\n with self.assertRaisesRegexp(\n ValueError, \"Both logits and logits_input supplied\"):\n head.create_model_fn_ops(\n {}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,\n logits_input=((0., 0.), (0., 0.)), logits=self._logits)\n\n def testBinaryClassificationEval(self):\n n_classes = 2\n head = head_lib.multi_class_head(n_classes=n_classes)\n with ops.Graph().as_default(), session.Session():\n # logloss: z:label, x:logit\n # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))\n model_fn_ops = head.create_model_fn_ops(\n {}, model_fn.ModeKeys.EVAL, self._labels, head_lib.no_op_train_fn,\n logits=self._logits)\n self._assert_output_alternatives(model_fn_ops)\n self.assertIsNone(model_fn_ops.train_op)\n _assert_no_variables(self)\n _assert_summary_tags(self, [\"loss\"])\n expected_loss = .81326175\n _assert_metrics(self, expected_loss,\n self._expected_eval_metrics(expected_loss), model_fn_ops)\n\n def testBinaryClassificationInfer(self):\n n_classes = 2\n head = head_lib.multi_class_head(n_classes=n_classes, head_name=\"head_name\")\n with ops.Graph().as_default(), session.Session():\n # logloss: z:label, x:logit\n # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))\n model_fn_ops = head.create_model_fn_ops(\n {}, model_fn.ModeKeys.INFER, self._labels, head_lib.no_op_train_fn,\n logits=self._logits)\n self.assertIsNone(model_fn_ops.train_op)\n _assert_no_variables(self)\n with session.Session():\n self.assertListEqual(\n [1, 1], list(model_fn_ops.predictions[\"classes\"].eval()))\n self.assertItemsEqual(\n [\"head_name\"], six.iterkeys(model_fn_ops.output_alternatives))\n self.assertEqual(\n constants.ProblemType.LOGISTIC_REGRESSION,\n model_fn_ops.output_alternatives[\"head_name\"][0])\n predictions_for_serving = (\n model_fn_ops.output_alternatives[\"head_name\"][1])\n self.assertIn(\"classes\", six.iterkeys(predictions_for_serving))\n predicted_classes = predictions_for_serving[\"classes\"].eval().tolist()\n self.assertListEqual(\n [b\"0\", b\"1\"], predicted_classes[0])\n self.assertIn(\"probabilities\", six.iterkeys(predictions_for_serving))\n\n def testBinaryClassificationInferMode_withWeightColumn(self):\n n_classes = 2\n head = head_lib.multi_class_head(n_classes=n_classes,\n weight_column_name=\"label_weight\")\n with ops.Graph().as_default(), session.Session():\n # logloss: z:label, x:logit\n # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))\n model_fn_ops = head.create_model_fn_ops(\n # This is what is being tested, features should not have weight for\n # inference.\n {}, model_fn.ModeKeys.INFER, self._labels, head_lib.no_op_train_fn,\n logits=self._logits)\n self._assert_output_alternatives(model_fn_ops)\n self.assertIsNone(model_fn_ops.train_op)\n _assert_no_variables(self)\n\n def testErrorInSparseTensorLabels(self):\n n_classes = 2\n head = head_lib.multi_class_head(n_classes=n_classes)\n with ops.Graph().as_default():\n labels = sparse_tensor.SparseTensorValue(\n indices=((0, 0), (1, 0), (2, 0)),\n values=(0, 1, 1),\n dense_shape=(3, 1))\n with self.assertRaisesRegexp(ValueError,\n \"SparseTensor is not supported\"):\n head.create_model_fn_ops(\n {},\n model_fn.ModeKeys.TRAIN,\n labels,\n head_lib.no_op_train_fn,\n logits=((1.,), (1.,), (3.,)))\n\n def testBinaryClassificationWithLabelName(self):\n label_name = \"my_label\"\n head = head_lib.multi_class_head(n_classes=2, label_name=label_name)\n with ops.Graph().as_default(), session.Session():\n # logloss: z:label, x:logit\n # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))\n model_fn_ops = head.create_model_fn_ops(\n {},\n labels={label_name: self._labels},\n mode=model_fn.ModeKeys.TRAIN,\n train_op_fn=head_lib.no_op_train_fn,\n logits=self._logits)\n self._assert_output_alternatives(model_fn_ops)\n _assert_no_variables(self)\n _assert_summary_tags(self, [\"loss\"])\n expected_loss = .81326175\n _assert_metrics(self, expected_loss,\n self._expected_eval_metrics(expected_loss), model_fn_ops)\n\n def testBinaryClassificationWithWeights(self):\n n_classes = 2\n head = head_lib.multi_class_head(\n n_classes=n_classes, weight_column_name=\"label_weight\")\n with ops.Graph().as_default(), session.Session():\n weights = ((1.,), (0.,))\n # logloss: z:label, x:logit\n # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))\n model_fn_ops = head.create_model_fn_ops(\n features={\"label_weight\": weights},\n labels=self._labels,\n mode=model_fn.ModeKeys.TRAIN,\n train_op_fn=head_lib.no_op_train_fn,\n logits=self._logits)\n self._assert_output_alternatives(model_fn_ops)\n _assert_no_variables(self)\n _assert_summary_tags(self, [\"loss\"])\n expected_total_loss = .31326166\n _assert_metrics(\n self,\n expected_total_loss / len(weights),\n {\n \"accuracy\": 1. / 1,\n \"accuracy/baseline_label_mean\": 1. / 1,\n \"accuracy/threshold_0.500000_mean\": 1. / 1,\n \"auc\": 0. / 1,\n \"labels/actual_label_mean\": 1. / 1,\n \"labels/prediction_mean\": .731059, # softmax\n # eval loss is weighted loss divided by sum of weights.\n \"loss\": expected_total_loss,\n \"precision/positive_threshold_0.500000_mean\": 1. / 1,\n \"recall/positive_threshold_0.500000_mean\": 1. / 1,\n },\n model_fn_ops)\n\n def testBinaryClassificationWithCustomLoss(self):\n head = head_lib.multi_class_head(\n n_classes=2, weight_column_name=\"label_weight\",\n loss_fn=_sigmoid_cross_entropy)\n with ops.Graph().as_default(), session.Session():\n weights = ((.2,), (0.,))\n model_fn_ops = head.create_model_fn_ops(\n features={\"label_weight\": weights},\n labels=self._labels,\n mode=model_fn.ModeKeys.TRAIN,\n train_op_fn=head_lib.no_op_train_fn,\n logits=self._logits)\n self._assert_output_alternatives(model_fn_ops)\n _assert_no_variables(self)\n _assert_summary_tags(self, [\"loss\"])\n # logloss: z:label, x:logit\n # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))\n # expected_loss is (total_weighted_loss)/1 since there is 1 nonzero\n # weight.\n expected_loss = 0.062652342\n _assert_metrics(\n self,\n expected_loss,\n {\n \"accuracy\": 1. / 1,\n \"accuracy/baseline_label_mean\": 1. / 1,\n \"accuracy/threshold_0.500000_mean\": 1. / 1,\n \"auc\": 0. / 1,\n \"labels/actual_label_mean\": 1. / 1,\n \"labels/prediction_mean\": .731059, # softmax\n \"loss\": expected_loss,\n \"precision/positive_threshold_0.500000_mean\": 1. / 1,\n \"recall/positive_threshold_0.500000_mean\": 1. / 1,\n },\n model_fn_ops)\n\n def testBinaryClassificationWithCenteredBias(self):\n head = head_lib.multi_class_head(n_classes=2, enable_centered_bias=True)\n with ops.Graph().as_default(), session.Session():\n # logloss: z:label, x:logit\n # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))\n model_fn_ops = head.create_model_fn_ops(\n {}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,\n logits=self._logits)\n self._assert_output_alternatives(model_fn_ops)\n _assert_variables(\n self,\n expected_global=(\n \"binary_logistic_head/centered_bias_weight:0\",\n (\"binary_logistic_head/binary_logistic_head/centered_bias_weight/\"\n \"Adagrad:0\"),),\n expected_trainable=(\"binary_logistic_head/centered_bias_weight:0\",))\n variables.global_variables_initializer().run()\n _assert_summary_tags(self, [\n \"loss\",\n \"binary_logistic_head/centered_bias/bias_0\"\n ])\n expected_loss = .81326175\n _assert_metrics(self, expected_loss,\n self._expected_eval_metrics(expected_loss), model_fn_ops)\n\n\nclass MultiClassHeadTest(test.TestCase):\n\n def _assert_output_alternatives(self, model_fn_ops):\n self.assertEquals({\n None: constants.ProblemType.CLASSIFICATION\n }, {\n k: v[0] for k, v in six.iteritems(model_fn_ops.output_alternatives)\n })\n\n def setUp(self):\n self._logits = ((1., 0., 0.),)\n self._labels = ((2,),)\n\n def _expected_eval_metrics(self, expected_loss):\n return {\n \"accuracy\": 0.,\n \"loss\": expected_loss,\n \"labels/actual_label_mean/class0\": 0. / 1,\n \"labels/actual_label_mean/class1\": 0. / 1,\n \"labels/actual_label_mean/class2\": 1. / 1,\n \"labels/logits_mean/class0\": self._logits[0][0],\n \"labels/logits_mean/class1\": self._logits[0][1],\n \"labels/logits_mean/class2\": self._logits[0][2],\n \"labels/prediction_mean/class0\": self._logits[0][0],\n \"labels/prediction_mean/class1\": self._logits[0][1],\n \"labels/prediction_mean/class2\": self._logits[0][2],\n \"labels/probability_mean/class0\": 0.576117, # softmax\n \"labels/probability_mean/class1\": 0.211942, # softmax\n \"labels/probability_mean/class2\": 0.211942, # softmax\n }\n\n def testMultiClassWithLogits(self):\n n_classes = 3\n head = head_lib.multi_class_head(\n n_classes=n_classes, metric_class_ids=range(n_classes))\n with ops.Graph().as_default(), session.Session():\n # logloss: z:label, x:logit\n # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))\n model_fn_ops = head.create_model_fn_ops(\n {}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,\n logits=self._logits)\n self._assert_output_alternatives(model_fn_ops)\n _assert_no_variables(self)\n _assert_summary_tags(self, [\"loss\"])\n expected_loss = 1.5514447\n _assert_metrics(self, expected_loss,\n self._expected_eval_metrics(expected_loss), model_fn_ops)\n\n def testMultiClassWithInvalidLogits(self):\n head = head_lib.multi_class_head(n_classes=len(self._logits[0]) + 1)\n with ops.Graph().as_default(), session.Session():\n with self.assertRaisesRegexp(ValueError, \"Dimensions.*not compatible\"):\n head.create_model_fn_ops(\n {}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,\n logits=self._logits)\n\n def testMultiClassWithNoneTrainOpFnInTrain(self):\n head = head_lib.multi_class_head(n_classes=3)\n with ops.Graph().as_default(), session.Session():\n with self.assertRaisesRegexp(\n ValueError, \"train_op_fn can not be None in TRAIN mode\"):\n head.create_model_fn_ops(\n {}, model_fn.ModeKeys.TRAIN, self._labels,\n train_op_fn=None,\n logits=self._logits)\n\n def testMultiClassWithLogitsInput(self):\n n_classes = 3\n head = head_lib.multi_class_head(\n n_classes=n_classes, metric_class_ids=range(n_classes))\n with ops.Graph().as_default(), session.Session():\n # logloss: z:label, x:logit\n # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))\n model_fn_ops = head.create_model_fn_ops(\n {}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,\n logits_input=((0., 0.),))\n self._assert_output_alternatives(model_fn_ops)\n w = (\"multi_class_head/logits/weights:0\",\n \"multi_class_head/logits/biases:0\")\n _assert_variables(\n self, expected_global=w, expected_model=w, expected_trainable=w)\n variables.global_variables_initializer().run()\n _assert_summary_tags(self, [\"loss\"])\n expected_loss = 1.0986123\n _assert_metrics(self, expected_loss, {\n \"accuracy\": 0.,\n \"loss\": expected_loss,\n \"labels/actual_label_mean/class0\": 0. / 1,\n \"labels/actual_label_mean/class1\": 0. / 1,\n \"labels/actual_label_mean/class2\": 1. / 1,\n \"labels/logits_mean/class0\": 0.,\n \"labels/logits_mean/class1\": 0.,\n \"labels/logits_mean/class2\": 0.,\n \"labels/prediction_mean/class0\": 1.,\n \"labels/prediction_mean/class1\": 0.,\n \"labels/prediction_mean/class2\": 0.,\n \"labels/probability_mean/class0\": 0.333333, # softmax\n \"labels/probability_mean/class1\": 0.333333, # softmax\n \"labels/probability_mean/class2\": 0.333333, # softmax\n }, model_fn_ops)\n\n def testMultiClassWithLogitsAndLogitsInput(self):\n n_classes = 3\n head = head_lib.multi_class_head(\n n_classes=n_classes, metric_class_ids=range(n_classes))\n with ops.Graph().as_default(), session.Session():\n with self.assertRaisesRegexp(\n ValueError, \"Both logits and logits_input supplied\"):\n head.create_model_fn_ops(\n {}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,\n logits_input=((0., 0.),), logits=self._logits)\n\n def testMultiClassEnableCenteredBias(self):\n n_classes = 3\n head = head_lib.multi_class_head(\n n_classes=n_classes, enable_centered_bias=True)\n with ops.Graph().as_default(), session.Session():\n # logloss: z:label, x:logit\n # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))\n model_fn_ops = head.create_model_fn_ops(\n {}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,\n logits=self._logits)\n self._assert_output_alternatives(model_fn_ops)\n _assert_variables(\n self,\n expected_global=(\n \"multi_class_head/centered_bias_weight:0\",\n (\"multi_class_head/multi_class_head/centered_bias_weight/\"\n \"Adagrad:0\"),\n ),\n expected_trainable=(\"multi_class_head/centered_bias_weight:0\",))\n variables.global_variables_initializer().run()\n _assert_summary_tags(self,\n [\"loss\",\n \"multi_class_head/centered_bias/bias_0\",\n \"multi_class_head/centered_bias/bias_1\",\n \"multi_class_head/centered_bias/bias_2\"])\n\n def testMultiClassEval(self):\n n_classes = 3\n head = head_lib.multi_class_head(\n n_classes=n_classes, metric_class_ids=range(n_classes))\n with ops.Graph().as_default(), session.Session():\n # logloss: z:label, x:logit\n # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))\n model_fn_ops = head.create_model_fn_ops(\n {}, model_fn.ModeKeys.EVAL, self._labels, head_lib.no_op_train_fn,\n logits=self._logits)\n self._assert_output_alternatives(model_fn_ops)\n self.assertIsNone(model_fn_ops.train_op)\n _assert_no_variables(self)\n _assert_summary_tags(self, [\"loss\"])\n expected_loss = 1.5514447\n _assert_metrics(self, expected_loss,\n self._expected_eval_metrics(expected_loss), model_fn_ops)\n\n def testMultiClassEvalModeWithLargeLogits(self):\n n_classes = 3\n head = head_lib.multi_class_head(\n n_classes=n_classes, metric_class_ids=range(n_classes))\n logits = ((2., 0., -1),)\n with ops.Graph().as_default(), session.Session():\n # logloss: z:label, x:logit\n # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))\n model_fn_ops = head.create_model_fn_ops(\n {}, model_fn.ModeKeys.EVAL, self._labels, head_lib.no_op_train_fn,\n logits=logits)\n self._assert_output_alternatives(model_fn_ops)\n self.assertIsNone(model_fn_ops.train_op)\n _assert_no_variables(self)\n _assert_summary_tags(self, [\"loss\"])\n expected_loss = 3.1698461\n expected_eval_metrics = {\n \"accuracy\": 0.,\n \"loss\": expected_loss,\n \"labels/actual_label_mean/class0\": 0. / 1,\n \"labels/actual_label_mean/class1\": 0. / 1,\n \"labels/actual_label_mean/class2\": 1. / 1,\n \"labels/logits_mean/class0\": logits[0][0],\n \"labels/logits_mean/class1\": logits[0][1],\n \"labels/logits_mean/class2\": logits[0][2],\n \"labels/prediction_mean/class0\": 1,\n \"labels/prediction_mean/class1\": 0,\n \"labels/prediction_mean/class2\": 0,\n \"labels/probability_mean/class0\": 0.843795, # softmax\n \"labels/probability_mean/class1\": 0.114195, # softmax\n \"labels/probability_mean/class2\": 0.0420101, # softmax\n }\n _assert_metrics(self, expected_loss,\n expected_eval_metrics, model_fn_ops)\n\n def testMultiClassWithScalarWeight(self):\n n_classes = 3\n head = head_lib.multi_class_head(\n n_classes=n_classes,\n weight_column_name=\"label_weight\",\n metric_class_ids=range(n_classes))\n with ops.Graph().as_default(), session.Session():\n weight = .1\n # logloss: z:label, x:logit\n # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))\n model_fn_ops = head.create_model_fn_ops(\n features={\"label_weight\": weight},\n labels=self._labels,\n mode=model_fn.ModeKeys.TRAIN,\n train_op_fn=head_lib.no_op_train_fn,\n logits=self._logits)\n self._assert_output_alternatives(model_fn_ops)\n _assert_no_variables(self)\n _assert_summary_tags(self, [\"loss\"])\n expected_loss = 1.5514447\n _assert_metrics(self, expected_loss * weight,\n self._expected_eval_metrics(expected_loss), model_fn_ops)\n\n def testMultiClassWith2DWeight(self):\n n_classes = 3\n head = head_lib.multi_class_head(\n n_classes=n_classes,\n weight_column_name=\"label_weight\",\n metric_class_ids=range(n_classes))\n with ops.Graph().as_default(), session.Session():\n weight = .1\n weights = ((weight,),)\n # logloss: z:label, x:logit\n # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))\n model_fn_ops = head.create_model_fn_ops(\n features={\"label_weight\": weights},\n labels=self._labels,\n mode=model_fn.ModeKeys.TRAIN,\n train_op_fn=head_lib.no_op_train_fn,\n logits=self._logits)\n self._assert_output_alternatives(model_fn_ops)\n _assert_no_variables(self)\n _assert_summary_tags(self, [\"loss\"])\n expected_loss = 1.5514447\n _assert_metrics(self, expected_loss * weight,\n self._expected_eval_metrics(expected_loss), model_fn_ops)\n\n def testMultiClassWithCustomLoss(self):\n n_classes = 3\n head = head_lib.multi_class_head(\n n_classes=n_classes,\n weight_column_name=\"label_weight\",\n metric_class_ids=range(n_classes),\n loss_fn=losses_lib.sparse_softmax_cross_entropy)\n with ops.Graph().as_default(), session.Session():\n weight = .1\n # logloss: z:label, x:logit\n # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))\n model_fn_ops = head.create_model_fn_ops(\n features={\"label_weight\": weight},\n labels=self._labels,\n mode=model_fn.ModeKeys.TRAIN,\n train_op_fn=head_lib.no_op_train_fn,\n logits=self._logits)\n self._assert_output_alternatives(model_fn_ops)\n _assert_no_variables(self)\n _assert_summary_tags(self, [\"loss\"])\n expected_loss = 1.5514447 * weight\n _assert_metrics(self, expected_loss,\n self._expected_eval_metrics(expected_loss), model_fn_ops)\n\n def testMultiClassInfer(self):\n n_classes = 3\n head = head_lib._multi_class_head(\n n_classes=n_classes,\n head_name=\"head_name\")\n with ops.Graph().as_default():\n model_fn_ops = head.create_model_fn_ops(\n features={},\n mode=model_fn.ModeKeys.INFER,\n train_op_fn=head_lib.no_op_train_fn,\n logits=((1., 0., 0.), (0., 0., 1.),))\n with session.Session():\n lookup_ops.tables_initializer().run()\n self.assertAllEqual(\n [0, 2],\n model_fn_ops.predictions[\"classes\"].eval())\n self.assertItemsEqual(\n [\"head_name\"], six.iterkeys(model_fn_ops.output_alternatives))\n self.assertEqual(\n constants.ProblemType.CLASSIFICATION,\n model_fn_ops.output_alternatives[\"head_name\"][0])\n predictions_for_serving = (\n model_fn_ops.output_alternatives[\"head_name\"][1])\n self.assertIn(\"classes\", six.iterkeys(predictions_for_serving))\n self.assertAllEqual(\n [[b\"0\", b\"1\", b\"2\"], [b\"0\", b\"1\", b\"2\"]],\n predictions_for_serving[\"classes\"].eval())\n self.assertIn(\"probabilities\", six.iterkeys(predictions_for_serving))\n self.assertAllClose(\n [[0.576117, 0.2119416, 0.2119416],\n [0.2119416, 0.2119416, 0.576117]],\n predictions_for_serving[\"probabilities\"].eval())\n\n def testInvalidNClasses(self):\n for n_classes in (None, -1, 0, 1):\n with self.assertRaisesRegexp(ValueError, \"n_classes must be > 1\"):\n head_lib.multi_class_head(n_classes=n_classes)\n\n def testMultiClassWithLabelKeysInvalidShape(self):\n with self.assertRaisesRegexp(\n ValueError, \"Length of label_keys must equal n_classes\"):\n head_lib._multi_class_head(\n n_classes=3, label_keys=(\"key0\", \"key1\"))\n\n def testMultiClassWithLabelKeysTwoClasses(self):\n with self.assertRaisesRegexp(\n ValueError, \"label_keys is not supported for n_classes=2\"):\n head_lib._multi_class_head(\n n_classes=2, label_keys=(\"key0\", \"key1\"))\n\n def testMultiClassWithLabelKeysInfer(self):\n n_classes = 3\n label_keys = (\"key0\", \"key1\", \"key2\")\n head = head_lib._multi_class_head(\n n_classes=n_classes, label_keys=label_keys,\n metric_class_ids=range(n_classes),\n head_name=\"head_name\")\n with ops.Graph().as_default():\n model_fn_ops = head.create_model_fn_ops(\n features={},\n mode=model_fn.ModeKeys.INFER,\n train_op_fn=head_lib.no_op_train_fn,\n logits=((1., 0., 0.), (0., 0., 1.),))\n with session.Session():\n lookup_ops.tables_initializer().run()\n self.assertAllEqual(\n [b\"key0\", b\"key2\"],\n model_fn_ops.predictions[\"classes\"].eval())\n self.assertItemsEqual(\n [\"head_name\"], six.iterkeys(model_fn_ops.output_alternatives))\n self.assertEqual(\n constants.ProblemType.CLASSIFICATION,\n model_fn_ops.output_alternatives[\"head_name\"][0])\n predictions_for_serving = (\n model_fn_ops.output_alternatives[\"head_name\"][1])\n self.assertIn(\"classes\", six.iterkeys(predictions_for_serving))\n self.assertAllEqual(\n [[b\"key0\", b\"key1\", b\"key2\"], [b\"key0\", b\"key1\", b\"key2\"]],\n predictions_for_serving[\"classes\"].eval())\n self.assertIn(\"probabilities\", six.iterkeys(predictions_for_serving))\n self.assertAllClose(\n [[0.576117, 0.2119416, 0.2119416],\n [0.2119416, 0.2119416, 0.576117]],\n predictions_for_serving[\"probabilities\"].eval())\n\n def testMultiClassWithLabelKeysEvalAccuracy0(self):\n n_classes = 3\n label_keys = (\"key0\", \"key1\", \"key2\")\n head = head_lib._multi_class_head(\n n_classes=n_classes,\n label_keys=label_keys)\n with ops.Graph().as_default():\n model_fn_ops = head.create_model_fn_ops(\n features={},\n mode=model_fn.ModeKeys.EVAL,\n labels=(\"key2\",),\n train_op_fn=head_lib.no_op_train_fn,\n logits=((1., 0., 0.),))\n with session.Session():\n lookup_ops.tables_initializer().run()\n self.assertIsNone(model_fn_ops.train_op)\n _assert_no_variables(self)\n _assert_summary_tags(self, [\"loss\"])\n expected_loss = 1.5514447\n expected_eval_metrics = {\n \"accuracy\": 0.,\n \"loss\": expected_loss,\n }\n _assert_metrics(self, expected_loss,\n expected_eval_metrics, model_fn_ops)\n\n def testMultiClassWithLabelKeysEvalAccuracy1(self):\n n_classes = 3\n label_keys = (\"key0\", \"key1\", \"key2\")\n head = head_lib._multi_class_head(\n n_classes=n_classes,\n label_keys=label_keys)\n with ops.Graph().as_default():\n model_fn_ops = head.create_model_fn_ops(\n features={},\n mode=model_fn.ModeKeys.EVAL,\n labels=(\"key2\",),\n train_op_fn=head_lib.no_op_train_fn,\n logits=((0., 0., 1.),))\n with session.Session():\n lookup_ops.tables_initializer().run()\n self.assertIsNone(model_fn_ops.train_op)\n _assert_no_variables(self)\n _assert_summary_tags(self, [\"loss\"])\n expected_loss = 0.5514447\n expected_eval_metrics = {\n \"accuracy\": 1.,\n \"loss\": expected_loss,\n }\n _assert_metrics(self, expected_loss,\n expected_eval_metrics, model_fn_ops)\n\n\nclass BinarySvmHeadTest(test.TestCase):\n\n def _assert_output_alternatives(self, model_fn_ops):\n self.assertEquals({\n None: constants.ProblemType.LOGISTIC_REGRESSION\n }, {\n k: v[0] for k, v in six.iteritems(model_fn_ops.output_alternatives)\n })\n\n def setUp(self):\n # Prediction for first example is in the right side of the hyperplane\n # (i.e., < 0) but it is within the [-1,1] margin. There is a 0.5 loss\n # incurred by this example. The 2nd prediction is outside the margin so it\n # incurs no loss at all.\n self._predictions = ((-.5,), (1.2,))\n self._labels = (0, 1)\n self._expected_losses = (.5, 0.)\n\n def testBinarySVMWithLogits(self):\n head = head_lib.binary_svm_head()\n with ops.Graph().as_default(), session.Session():\n model_fn_ops = head.create_model_fn_ops(\n {},\n model_fn.ModeKeys.TRAIN,\n self._labels,\n head_lib.no_op_train_fn,\n logits=self._predictions)\n self._assert_output_alternatives(model_fn_ops)\n _assert_no_variables(self)\n _assert_summary_tags(self, [\"loss\"])\n expected_loss = np.average(self._expected_losses)\n _assert_metrics(self, expected_loss, {\n \"accuracy\": 1.,\n \"loss\": expected_loss,\n }, model_fn_ops)\n\n def testBinarySVMWithInvalidLogits(self):\n head = head_lib.binary_svm_head()\n with ops.Graph().as_default(), session.Session():\n with self.assertRaisesRegexp(ValueError, \"Dimensions.*not compatible\"):\n head.create_model_fn_ops(\n {}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,\n logits=np.ones((2, 2)))\n\n def testBinarySVMWithLogitsInput(self):\n head = head_lib.binary_svm_head()\n with ops.Graph().as_default(), session.Session():\n model_fn_ops = head.create_model_fn_ops(\n {},\n model_fn.ModeKeys.TRAIN,\n self._labels,\n head_lib.no_op_train_fn,\n logits_input=((0., 0.), (0., 0.)))\n self._assert_output_alternatives(model_fn_ops)\n w = (\"binary_svm_head/logits/weights:0\",\n \"binary_svm_head/logits/biases:0\")\n _assert_variables(\n self, expected_global=w, expected_model=w, expected_trainable=w)\n variables.global_variables_initializer().run()\n _assert_summary_tags(self, [\"loss\"])\n expected_loss = 1.\n _assert_metrics(self, expected_loss, {\n \"accuracy\": .5,\n \"loss\": expected_loss,\n }, model_fn_ops)\n\n def testBinarySVMWithLogitsAndLogitsInput(self):\n head = head_lib.binary_svm_head()\n with ops.Graph().as_default(), session.Session():\n with self.assertRaisesRegexp(\n ValueError, \"Both logits and logits_input supplied\"):\n head.create_model_fn_ops(\n {},\n model_fn.ModeKeys.TRAIN,\n self._labels,\n head_lib.no_op_train_fn,\n logits_input=((0., 0.), (0., 0.)),\n logits=self._predictions)\n\n def testBinarySVMEvalMode(self):\n head = head_lib.binary_svm_head()\n with ops.Graph().as_default(), session.Session():\n model_fn_ops = head.create_model_fn_ops(\n {},\n model_fn.ModeKeys.EVAL,\n self._labels,\n head_lib.no_op_train_fn,\n logits=self._predictions)\n self._assert_output_alternatives(model_fn_ops)\n self.assertIsNone(model_fn_ops.train_op)\n _assert_no_variables(self)\n _assert_summary_tags(self, [\"loss\"])\n expected_loss = np.average(self._expected_losses)\n _assert_metrics(self, expected_loss, {\n \"accuracy\": 1.,\n \"loss\": expected_loss,\n }, model_fn_ops)\n\n def testBinarySVMWithLabelName(self):\n label_name = \"my_label\"\n head = head_lib.binary_svm_head(label_name=label_name)\n with ops.Graph().as_default(), session.Session():\n model_fn_ops = head.create_model_fn_ops(\n {},\n model_fn.ModeKeys.TRAIN,\n {label_name: self._labels},\n head_lib.no_op_train_fn,\n logits=self._predictions)\n self._assert_output_alternatives(model_fn_ops)\n _assert_no_variables(self)\n _assert_summary_tags(self, [\"loss\"])\n expected_loss = np.average(self._expected_losses)\n _assert_metrics(self, expected_loss, {\n \"accuracy\": 1.,\n \"loss\": expected_loss,\n }, model_fn_ops)\n\n def testBinarySVMWithWeights(self):\n head = head_lib.binary_svm_head(weight_column_name=\"weights\")\n with ops.Graph().as_default(), session.Session():\n weights = (7., 11.)\n model_fn_ops = head.create_model_fn_ops(\n # We have to add an extra dim here for weights broadcasting to work.\n features={\"weights\": tuple([(w,) for w in weights])},\n mode=model_fn.ModeKeys.TRAIN,\n labels=self._labels,\n train_op_fn=head_lib.no_op_train_fn,\n logits=self._predictions)\n self._assert_output_alternatives(model_fn_ops)\n _assert_no_variables(self)\n _assert_summary_tags(self, [\"loss\"])\n expected_weighted_losses = np.multiply(weights, self._expected_losses)\n _assert_metrics(self, np.mean(expected_weighted_losses), {\n \"accuracy\": 1.,\n \"loss\": np.sum(expected_weighted_losses) / np.sum(weights),\n }, model_fn_ops)\n\n def testBinarySVMWithCenteredBias(self):\n head = head_lib.binary_svm_head(enable_centered_bias=True)\n with ops.Graph().as_default(), session.Session():\n model_fn_ops = head.create_model_fn_ops(\n {},\n model_fn.ModeKeys.TRAIN,\n self._labels,\n head_lib.no_op_train_fn,\n logits=self._predictions)\n self._assert_output_alternatives(model_fn_ops)\n _assert_variables(\n self,\n expected_global=(\n \"binary_svm_head/centered_bias_weight:0\",\n (\"binary_svm_head/binary_svm_head/centered_bias_weight/\"\n \"Adagrad:0\"),\n ),\n expected_trainable=(\"binary_svm_head/centered_bias_weight:0\",))\n variables.global_variables_initializer().run()\n _assert_summary_tags(self, [\n \"loss\",\n \"binary_svm_head/centered_bias/bias_0\"\n ])\n expected_loss = np.average(self._expected_losses)\n _assert_metrics(self, expected_loss, {\n \"accuracy\": 1.,\n \"loss\": expected_loss,\n }, model_fn_ops)\n\n\nclass MultiHeadTest(test.TestCase):\n\n def testInvalidHeads(self):\n named_head = head_lib.multi_class_head(\n n_classes=3, label_name=\"label\", head_name=\"head1\")\n unnamed_head = head_lib.multi_class_head(\n n_classes=4, label_name=\"label\")\n with self.assertRaisesRegexp(ValueError, \"must have names\"):\n head_lib.multi_head((named_head, unnamed_head))\n\n def testTrainWithNoneTrainOpFn(self):\n head1 = head_lib.multi_class_head(\n n_classes=3, label_name=\"label1\", head_name=\"head1\")\n head2 = head_lib.multi_class_head(\n n_classes=4, label_name=\"label2\", head_name=\"head2\")\n head = head_lib.multi_head((head1, head2))\n labels = {\n \"label1\": (1,),\n \"label2\": (1,)\n }\n with self.assertRaisesRegexp(\n ValueError, \"train_op_fn can not be None in TRAIN mode\"):\n head.create_model_fn_ops(\n features={\"weights\": (2.0, 10.0)},\n labels=labels,\n mode=model_fn.ModeKeys.TRAIN,\n train_op_fn=None,\n logits=((-0.7, 0.2, .1, .1, .1, .1, .1),))\n\n def testTrain_withNoHeadWeights(self):\n head1 = head_lib.multi_class_head(\n n_classes=3, label_name=\"label1\", head_name=\"head1\")\n head2 = head_lib.multi_class_head(\n n_classes=4, label_name=\"label2\", head_name=\"head2\")\n head = head_lib.multi_head((head1, head2))\n labels = {\n \"label1\": (1,),\n \"label2\": (1,)\n }\n model_fn_ops = head.create_model_fn_ops(\n features={\"weights\": (2.0, 10.0)},\n labels=labels,\n mode=model_fn.ModeKeys.TRAIN,\n train_op_fn=head_lib.no_op_train_fn,\n logits=((-0.7, 0.2, .1, .1, .1, .1, .1),))\n\n self.assertIsNone(model_fn_ops.predictions)\n self.assertIsNotNone(model_fn_ops.loss)\n self.assertIsNotNone(model_fn_ops.train_op)\n self.assertFalse(model_fn_ops.eval_metric_ops)\n self.assertIsNone(model_fn_ops.output_alternatives)\n\n with session.Session() as sess:\n self.assertAlmostEqual(2.224, sess.run(model_fn_ops.loss), places=3)\n\n def testTrain_withHeadWeights(self):\n head1 = head_lib.multi_class_head(\n n_classes=3, label_name=\"label1\", head_name=\"head1\")\n head2 = head_lib.multi_class_head(\n n_classes=4, label_name=\"label2\", head_name=\"head2\")\n head = head_lib.multi_head((head1, head2), (1, .5))\n labels = {\n \"label1\": (1,),\n \"label2\": (1,)\n }\n model_fn_ops = head.create_model_fn_ops(\n features={\"weights\": (2.0, 10.0)},\n labels=labels,\n mode=model_fn.ModeKeys.TRAIN,\n train_op_fn=head_lib.no_op_train_fn,\n logits=((-0.7, 0.2, .1, .1, .1, .1, .1),))\n self.assertIsNone(model_fn_ops.predictions)\n self.assertIsNotNone(model_fn_ops.loss)\n self.assertIsNotNone(model_fn_ops.train_op)\n self.assertFalse(model_fn_ops.eval_metric_ops)\n self.assertIsNone(model_fn_ops.output_alternatives)\n\n with session.Session() as sess:\n self.assertAlmostEqual(1.531, sess.run(model_fn_ops.loss), places=3)\n\n def testTrain_withDictLogits(self):\n head1 = head_lib.multi_class_head(\n n_classes=3, label_name=\"label1\", head_name=\"head1\")\n head2 = head_lib.multi_class_head(\n n_classes=4, label_name=\"label2\", head_name=\"head2\")\n head = head_lib.multi_head((head1, head2))\n labels = {\n \"label1\": (1,),\n \"label2\": (1,)\n }\n model_fn_ops = head.create_model_fn_ops(\n features={\"weights\": (2.0, 10.0)},\n labels=labels,\n mode=model_fn.ModeKeys.TRAIN,\n train_op_fn=head_lib.no_op_train_fn,\n logits={head1.head_name: ((-0.7, 0.2, .1),),\n head2.head_name: ((.1, .1, .1, .1),)})\n\n self.assertIsNone(model_fn_ops.predictions)\n self.assertIsNotNone(model_fn_ops.loss)\n self.assertIsNotNone(model_fn_ops.train_op)\n self.assertFalse(model_fn_ops.eval_metric_ops)\n self.assertIsNone(model_fn_ops.output_alternatives)\n\n with session.Session() as sess:\n self.assertAlmostEqual(2.224, sess.run(model_fn_ops.loss), places=3)\n\n def testInfer(self):\n head1 = head_lib.multi_class_head(\n n_classes=3, label_name=\"label1\", head_name=\"head1\")\n head2 = head_lib.multi_class_head(\n n_classes=4, label_name=\"label2\", head_name=\"head2\")\n head = head_lib.multi_head((head1, head2), (1, .5))\n labels = {\n \"label1\": (1,),\n \"label2\": (1,)\n }\n model_fn_ops = head.create_model_fn_ops(\n features={\"weights\": (2.0, 10.0)},\n labels=labels,\n mode=model_fn.ModeKeys.INFER,\n train_op_fn=head_lib.no_op_train_fn,\n logits=((-0.7, 0.2, .1, .1, .1, .1, .1),))\n\n self.assertIsNotNone(model_fn_ops.predictions)\n self.assertIsNone(model_fn_ops.loss)\n self.assertIsNone(model_fn_ops.train_op)\n self.assertFalse(model_fn_ops.eval_metric_ops)\n\n # Tests predictions keys.\n self.assertItemsEqual((\n (\"head1\", prediction_key.PredictionKey.LOGITS),\n (\"head1\", prediction_key.PredictionKey.PROBABILITIES),\n (\"head1\", prediction_key.PredictionKey.CLASSES),\n (\"head2\", prediction_key.PredictionKey.LOGITS),\n (\"head2\", prediction_key.PredictionKey.PROBABILITIES),\n (\"head2\", prediction_key.PredictionKey.CLASSES),\n ), model_fn_ops.predictions.keys())\n\n # Tests output alternative.\n self.assertEquals({\n \"head1\": constants.ProblemType.CLASSIFICATION,\n \"head2\": constants.ProblemType.CLASSIFICATION,\n }, {\n k: v[0] for k, v in six.iteritems(model_fn_ops.output_alternatives)\n })\n self.assertItemsEqual((\n prediction_key.PredictionKey.PROBABILITIES,\n prediction_key.PredictionKey.CLASSES,\n ), model_fn_ops.output_alternatives[\"head1\"][1].keys())\n self.assertItemsEqual((\n prediction_key.PredictionKey.PROBABILITIES,\n prediction_key.PredictionKey.CLASSES,\n ), model_fn_ops.output_alternatives[\"head2\"][1].keys())\n\n def testEval(self):\n head1 = head_lib.multi_class_head(\n n_classes=3, label_name=\"label1\", head_name=\"head1\")\n head2 = head_lib.multi_class_head(\n n_classes=4, label_name=\"label2\", head_name=\"head2\")\n head = head_lib.multi_head((head1, head2), (1, .5))\n labels = {\n \"label1\": (1,),\n \"label2\": (1,)\n }\n model_fn_ops = head.create_model_fn_ops(\n features={\"weights\": (2.0, 10.0)},\n labels=labels,\n mode=model_fn.ModeKeys.EVAL,\n train_op_fn=head_lib.no_op_train_fn,\n logits=((-0.7, 0.2, .1, .1, .1, .1, .1),))\n\n self.assertIsNotNone(model_fn_ops.predictions)\n self.assertIsNotNone(model_fn_ops.loss)\n self.assertIsNone(model_fn_ops.train_op)\n self.assertIsNotNone(model_fn_ops.eval_metric_ops)\n self.assertIsNone(model_fn_ops.output_alternatives)\n\n metric_ops = model_fn_ops.eval_metric_ops\n\n # Tests eval keys.\n self.assertIn(\"accuracy/head1\", metric_ops.keys())\n self.assertIn(\"accuracy/head2\", metric_ops.keys())\n\n\ndef _sigmoid_cross_entropy(labels, logits, weights):\n return losses_lib.sigmoid_cross_entropy(labels, logits, weights)\n\n\nif __name__ == \"__main__\":\n test.main()\n" ]
[ [ "numpy.sum", "tensorflow.python.ops.variables.initialize_local_variables", "numpy.multiply", "numpy.ones", "numpy.log", "tensorflow.core.framework.summary_pb2.Summary", "tensorflow.contrib.learn.python.learn.estimators.head.poisson_regression_head", "numpy.ma.masked_array", "tensorflow.python.framework.ops.Graph", "tensorflow.contrib.learn.python.learn.estimators.head.multi_class_head", "tensorflow.python.client.session.Session", "numpy.average", "tensorflow.contrib.learn.python.learn.estimators.head.multi_label_head", "numpy.mean", "tensorflow.contrib.learn.python.learn.estimators.head.multi_head", "tensorflow.contrib.learn.python.learn.estimators.head.binary_svm_head", "tensorflow.python.ops.losses.losses.sigmoid_cross_entropy", "tensorflow.python.ops.variables.global_variables_initializer", "tensorflow.python.ops.variables.trainable_variables", "tensorflow.python.ops.lookup_ops.tables_initializer", "tensorflow.python.framework.sparse_tensor.SparseTensorValue", "tensorflow.contrib.learn.python.learn.estimators.head._multi_class_head", "tensorflow.python.ops.variables.model_variables", "tensorflow.python.framework.ops.get_collection", "numpy.exp", "tensorflow.python.platform.test.main", "tensorflow.python.ops.variables.global_variables", "tensorflow.contrib.learn.python.learn.estimators.head.regression_head", "numpy.array" ] ]
slf12/PaddleDetection
[ "de1adc8be683d21b59fdd6ecada9d3d2eff39eb9" ]
[ "ppdet/data/source/coco.py" ]
[ "# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport numpy as np\n\nfrom .dataset import DataSet\nfrom ppdet.core.workspace import register, serializable\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n\n@register\n@serializable\nclass COCODataSet(DataSet):\n \"\"\"\n Load COCO records with annotations in json file 'anno_path'\n\n Args:\n dataset_dir (str): root directory for dataset.\n image_dir (str): directory for images.\n anno_path (str): json file path.\n sample_num (int): number of samples to load, -1 means all.\n with_background (bool): whether load background as a class.\n if True, total class number will be 81. default True.\n \"\"\"\n\n def __init__(self,\n image_dir=None,\n anno_path=None,\n dataset_dir=None,\n sample_num=-1,\n with_background=True):\n super(COCODataSet, self).__init__(\n image_dir=image_dir,\n anno_path=anno_path,\n dataset_dir=dataset_dir,\n sample_num=sample_num,\n with_background=with_background)\n self.anno_path = anno_path\n self.sample_num = sample_num\n self.with_background = with_background\n # `roidbs` is list of dict whose structure is:\n # {\n # 'im_file': im_fname, # image file name\n # 'im_id': img_id, # image id\n # 'h': im_h, # height of image\n # 'w': im_w, # width\n # 'is_crowd': is_crowd,\n # 'gt_score': gt_score,\n # 'gt_class': gt_class,\n # 'gt_bbox': gt_bbox,\n # 'gt_poly': gt_poly,\n # }\n self.roidbs = None\n # a dict used to map category name to class id\n self.cname2cid = None\n\n def load_roidb_and_cname2cid(self):\n anno_path = os.path.join(self.dataset_dir, self.anno_path)\n image_dir = os.path.join(self.dataset_dir, self.image_dir)\n\n assert anno_path.endswith('.json'), \\\n 'invalid coco annotation file: ' + anno_path\n from pycocotools.coco import COCO\n coco = COCO(anno_path)\n img_ids = coco.getImgIds()\n cat_ids = coco.getCatIds()\n records = []\n ct = 0\n\n # when with_background = True, mapping category to classid, like:\n # background:0, first_class:1, second_class:2, ...\n catid2clsid = dict({\n catid: i + int(self.with_background)\n for i, catid in enumerate(cat_ids)\n })\n cname2cid = dict({\n coco.loadCats(catid)[0]['name']: clsid\n for catid, clsid in catid2clsid.items()\n })\n\n for img_id in img_ids:\n img_anno = coco.loadImgs(img_id)[0]\n im_fname = img_anno['file_name']\n im_w = float(img_anno['width'])\n im_h = float(img_anno['height'])\n\n ins_anno_ids = coco.getAnnIds(imgIds=img_id, iscrowd=False)\n instances = coco.loadAnns(ins_anno_ids)\n\n bboxes = []\n for inst in instances:\n x, y, box_w, box_h = inst['bbox']\n x1 = max(0, x)\n y1 = max(0, y)\n x2 = min(im_w - 1, x1 + max(0, box_w - 1))\n y2 = min(im_h - 1, y1 + max(0, box_h - 1))\n if inst['area'] > 0 and x2 >= x1 and y2 >= y1:\n inst['clean_bbox'] = [x1, y1, x2, y2]\n bboxes.append(inst)\n else:\n logger.warn(\n 'Found an invalid bbox in annotations: im_id: {}, '\n 'area: {} x1: {}, y1: {}, x2: {}, y2: {}.'.format(\n img_id, float(inst['area']), x1, y1, x2, y2))\n num_bbox = len(bboxes)\n\n gt_bbox = np.zeros((num_bbox, 4), dtype=np.float32)\n gt_class = np.zeros((num_bbox, 1), dtype=np.int32)\n gt_score = np.ones((num_bbox, 1), dtype=np.float32)\n is_crowd = np.zeros((num_bbox, 1), dtype=np.int32)\n difficult = np.zeros((num_bbox, 1), dtype=np.int32)\n gt_poly = [None] * num_bbox\n\n for i, box in enumerate(bboxes):\n catid = box['category_id']\n gt_class[i][0] = catid2clsid[catid]\n gt_bbox[i, :] = box['clean_bbox']\n is_crowd[i][0] = box['iscrowd']\n if 'segmentation' in box:\n gt_poly[i] = box['segmentation']\n\n im_fname = os.path.join(image_dir,\n im_fname) if image_dir else im_fname\n coco_rec = {\n 'im_file': im_fname,\n 'im_id': np.array([img_id]),\n 'h': im_h,\n 'w': im_w,\n 'is_crowd': is_crowd,\n 'gt_class': gt_class,\n 'gt_bbox': gt_bbox,\n 'gt_score': gt_score,\n 'gt_poly': gt_poly,\n }\n\n logger.debug('Load file: {}, im_id: {}, h: {}, w: {}.'.format(\n im_fname, img_id, im_h, im_w))\n records.append(coco_rec)\n ct += 1\n if self.sample_num > 0 and ct >= self.sample_num:\n break\n assert len(records) > 0, 'not found any coco record in %s' % (anno_path)\n logger.info('{} samples in file {}'.format(ct, anno_path))\n self.roidbs, self.cname2cid = records, cname2cid\n" ]
[ [ "numpy.array", "numpy.ones", "numpy.zeros" ] ]
mvpossum/deep-learning
[ "cba1206a0b54ffbf59af4fc43435252bbee5a23e" ]
[ "tp4/entrega/train_lstm.py" ]
[ "from __future__ import print_function\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, Dropout\nfrom keras.layers import LSTM\nfrom keras.optimizers import RMSprop\nfrom keras.utils.data_utils import get_file\nimport numpy as np\nimport random\nimport sys\nimport h5py\nfrom utils import *\nimport os\nfrom keras.models import load_model\n\nloadM = False\n\nprint('Cargando dataset...')\npath = \"src/wikicorpus/con_dict_7500lines.h5\"\nwith h5py.File(path,'r') as hf:\n text = str(hf.get('dataset')[0]).decode(\"unicode_escape\")\nprint('corpus length:', len(text))\n\nchars = sorted(list(set(text)))\nprint('total chars:', len(chars))\nchar_indices = dict((c, i) for i, c in enumerate(chars))\nindices_char = dict((i, c) for i, c in enumerate(chars))\n\nprint('Creando oraciones...')\n# cut the text in semi-redundant sequences of maxlen characters\nmaxlen = 100\nstep = 31\nsentences = []\nnext_chars = []\nfor i in range(0, len(text) - maxlen, step):\n sentences.append(text[i: i + maxlen])\n next_chars.append(text[i + maxlen])\nprint('nb sequences:', len(sentences))\n\nprint('Vectorizando...')\nX = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)\ny = np.zeros((len(sentences), len(chars)), dtype=np.bool)\nfor i, sentence in enumerate(sentences):\n for t, char in enumerate(sentence):\n X[i, t, char_indices[char]] = 1\n y[i, char_indices[next_chars[i]]] = 1\n\n\n# build the model: a single LSTM\nprint('Creando modelo...')\nmodel = Sequential()\nmodel.add(LSTM(128, name='lstm1-128', consume_less='gpu', input_shape=(maxlen, len(chars)), return_sequences=True))\nmodel.add(Dropout(0.2))\nmodel.add(LSTM(128, name='lstm2-128', consume_less='gpu', return_sequences=True))\nmodel.add(Dropout(0.2))\nmodel.add(LSTM(256, name='lstm3-256', consume_less='gpu', return_sequences=True))\nmodel.add(Dropout(0.2))\nmodel.add(LSTM(256, name='lstm4-256', consume_less='gpu'))\nmodel.add(Dropout(0.2))\n\nmodel.add(Dense(256, name='densa_extra'))\nmodel.add(Dropout(0.3))\nmodel.add(Dense(len(chars), name='softmax', activation='softmax'))\n\nif loadM:\n sys.stdout.write('Cargando pesos desde archivo...')\n sys.stdout.flush()\n model.load_weights('models/red_alvi_labdcc_moreNeurons.py--23-Dec-2016--18-57--iter58loss[1.2178814809178105]val_loss[1.1792419333715782].h5',by_name=False)\n print('OK')\n\noptimizer = RMSprop(lr=0.01) #baje el lr de 0.01 a 0.0001\nprint('Compilando...')\nmodel.compile(loss='categorical_crossentropy', optimizer=optimizer)\n\n\ndef sample(preds, temperature=1.0):\n # helper function to sample an index from a probability array\n preds = np.asarray(preds).astype('float64')\n preds = np.log(preds) / temperature\n exp_preds = np.exp(preds)\n preds = exp_preds / np.sum(exp_preds)\n probas = np.random.multinomial(1, preds, 1)\n return np.argmax(probas)\n\nprint('Entrenando...')\nname=NameGen(os.path.basename(sys.argv[0]))\nmodelfile = name.get_model_file()\n# train the model, output generated text after each iteration\n\nbest_val_loss = None\nfor iteration in range(1, 60):\n print()\n print('-' * 50)\n print('Iteration', iteration)\n history = model.fit(X, y, batch_size=3072, nb_epoch=1, validation_split=0.25) #added validation\n if best_val_loss==None or history.history['val_loss']<best_val_loss:\n print('Guardando modelo en {}'.format(modelfile+'iter'+str(iteration)+'loss'+str(history.history['loss'])+'val_loss'+str(history.history['val_loss'])))\n best_val_loss = history.history['val_loss']\n model.save(modelfile+'iter'+str(iteration)+'loss'+str(history.history['loss'])+'val_loss'+str(history.history['val_loss'])+'.h5')\n\n start_index = random.randint(0, len(text) - maxlen - 1)\n\n for diversity in [0.4, 0.7, 0.9, 1.1]:\n print()\n print('----- diversity:', diversity)\n\n generated = ''\n sentence = text[start_index: start_index + maxlen]\n generated += sentence\n print('----- Generating with seed: \"' + sentence + '\"')\n sys.stdout.write(generated)\n for i in range(400):\n x = np.zeros((1, maxlen, len(chars)))\n for t, char in enumerate(sentence):\n x[0, t, char_indices[char]] = 1.\n\n preds = model.predict(x, verbose=0)[0]\n next_index = sample(preds, diversity)\n next_char = indices_char[next_index]\n\n generated += next_char\n sentence = sentence[1:] + next_char\n\n sys.stdout.write(next_char)\n sys.stdout.flush()\n print()\n\n" ]
[ [ "numpy.sum", "numpy.asarray", "numpy.argmax", "numpy.exp", "numpy.log", "numpy.random.multinomial" ] ]
Rambatino/Kruskals
[ "24d68822ce207633f878c3fc2c28679a1e746003" ]
[ "tests/test_kruskals.py" ]
[ "\"\"\"\nTesting module for Kruskals\n\"\"\"\n\nfrom setup_tests import Kruskals\nimport numpy as np\nimport pandas as pd\nimport pytest\n\ndef test_driver_score():\n \"\"\" Test driver_score is calculated correctly \"\"\"\n ndarr = np.array([\n [1, 2, 3, 4, 5, 6],\n [6, 5, 4, 3, 8, 1],\n [1, 1, 9, 1, 1, 1],\n [9, 2, 2, 2, 2, 2],\n [3, 3, 3, 9, 3, 3],\n [1, 2, 2, 9, 1, 4]\n ])\n\n arr = np.array([1, 2, 3, 4, 5, 6])\n\n exp_driver_score = np.array([ 0.14721, 0.44398, 0.23979, 0.62493, 0.71898, 0.31662])\n driver_score = np.round(Kruskals.Kruskals(ndarr, arr).driver_score(), decimals=5)\n\n assert np.array_equal(driver_score, exp_driver_score)\n\ndef test_from_pandas_df():\n \"\"\" Test from pandas_df correctly slices the data \"\"\"\n ndarr = np.array([\n [1, 2, 3, 4, 5, 6, 1],\n [6, 5, 4, 3, 8, 1, 2],\n [1, 1, 9, 1, 1, 1, 3],\n [9, 2, 2, 2, 2, 2, 4],\n [3, 3, 3, 9, 3, 3, 5],\n [1, 2, 2, 9, 1, 4, 6]\n ])\n\n exp_driver_score = np.array([ 0.14721, 0.44398, 0.23979, 0.62493, 0.71898, 0.31662])\n\n df = pd.DataFrame(ndarr)\n driver_score = np.round(Kruskals.Kruskals.from_pandas_df(df, list(range(6)), 6).driver_score(), decimals=5)\n\n assert np.array_equal(driver_score, exp_driver_score)\n\ndef test_percentage():\n \"\"\" Test percentage is calculated correctly \"\"\"\n ndarr = np.array([\n [1, 2, 3, 4, 5, 6],\n [6, 5, 4, 3, 8, 1],\n [1, 1, 9, 1, 1, 1],\n [9, 2, 2, 2, 2, 2],\n [3, 3, 3, 9, 3, 3],\n [1, 2, 2, 9, 1, 4]\n ])\n\n arr = np.array([1, 2, 3, 4, 5, 6])\n\n exp_driver_score = np.array([ 5.90856, 17.81959, 9.62429, 25.08222, 28.85722, 12.70813])\n driver_score = np.round(Kruskals.Kruskals(ndarr, arr).percentage(), decimals=5)\n\n assert np.array_equal(driver_score, exp_driver_score)\n\ndef test_series_output():\n \"\"\" Test percentage is calculated correctly \"\"\"\n ndarr = np.array([\n [1, 2, 3, 4, 5, 6],\n [6, 5, 4, 3, 8, 1],\n [1, 1, 9, 1, 1, 1],\n [9, 2, 2, 2, 2, 2],\n [3, 3, 3, 9, 3, 3],\n [1, 2, 2, 9, 1, 4]\n ])\n\n arr = np.array([1, 2, 3, 4, 5, 6])\n\n exp_driver_score = np.array([ 0.14721, 0.44398, 0.23979, 0.62493, 0.71898, 0.31662])\n series = Kruskals.Kruskals(ndarr, arr).driver_score_to_series()\n\n assert np.array_equal(np.round(series.values, decimals=5), exp_driver_score)\n assert series.name == 'score'\n assert series.index.name == 'driver'\n\ndef test_ivars_sub_into_series():\n \"\"\"\n Test that the column names are correctly mapped\n to the index values of the series\n \"\"\"\n ndarr = np.array([\n [1, 2, 3, 4, 5, 6, 1],\n [6, 5, 4, 3, 8, 1, 2],\n [1, 1, 9, 1, 1, 1, 3],\n [9, 2, 2, 2, 2, 2, 4],\n [3, 3, 3, 9, 3, 3, 5],\n [1, 2, 2, 9, 1, 4, 6]\n ])\n\n\n df = pd.DataFrame(ndarr)\n df.columns = ['a', 'b', 'c', 'd', 'e', 'f', 'g']\n ind_cols = ['a', 'b', 'c', 'd', 'e', 'f']\n\n series = Kruskals.Kruskals.from_pandas_df(df, ind_cols, 'g').driver_score_to_series()\n\n assert (series.index.values == ind_cols).all()\n\ndef test_that_direction_is_applied_on_directional_drivers_analysis():\n \"\"\" Test whether some driver scores are negative \"\"\"\n ndarr = np.array([\n [10, 2, 3, 4, 5, 6],\n [6, 5, 4, 3, 8, 1],\n [1, 1, 9, 1, 1, 1],\n [9, 2, 2, 2, 2, 2],\n [3, 3, 3, 9, 3, 3],\n [1, 2, 2, 9, 1, 4],\n [1, 2, 2, 9, 1, 4],\n [1, 2, 2, 9, 1, 4]\n ])\n\n arr = np.array([1, 2, 3, 4, 5, 6, 7, 8])\n\n series = Kruskals.Kruskals(ndarr, arr).driver_score_to_series(True)\n\n assert (series.values < 0).any()\n\ndef test_ability_to_handle_all_same_type():\n \"\"\"\n Test to make sure that kruskals can handle data\n when all the values for and independent set are 0\n \"\"\"\n ndarr = np.array([\n [10, 0, 3, 4, 5, 6],\n [6, 0, 4, 3, 5, 1],\n [1, 0, 9, 1, 5, 1],\n [9, 0, 2, 2, 5, 2],\n [3, 0, 3, 9, 5, 3],\n [1, 0, 2, 9, 5, 4],\n [1, 0, 2, 9, 5, 4],\n [1, 0, 2, 9, 5, 4]\n ])\n\n arr = np.array([1, 2, 3, 4, 5, 6, 7, 8])\n\n series = Kruskals.Kruskals(ndarr, arr).driver_score()\n\n assert series[1] == 0.0\n assert series[4] == 0.0\n\ndef test_can_handle_numpy_arrays_for_col_names():\n \"\"\" Test that df.columns can be passed into __init__ \"\"\"\n ndarr = np.array([\n [1, 2, 3, 4, 5, 6, 1],\n [6, 5, 4, 3, 8, 1, 2],\n [1, 1, 9, 1, 1, 1, 3],\n [9, 2, 2, 2, 2, 2, 4],\n [3, 3, 3, 9, 3, 3, 5],\n [1, 2, 2, 9, 1, 4, 6]\n ])\n\n exp_driver_score = np.array([0.14721, 0.44398, 0.23979, 0.62493, 0.71898, 0.31662])\n\n df = pd.DataFrame(ndarr)\n df.columns = ['a', 'b', 'c', 'd', 'e', 'f', 'g']\n driver_score = Kruskals.Kruskals(ndarr, exp_driver_score, i_vars=df.columns).driver_score_to_series()\n assert np.array_equal(driver_score.index.values, ['a', 'b', 'c', 'd', 'e', 'f', 'g'])\n\ndef test_return_error_if_i_vars_not_sufficient():\n \"\"\" Test that error raised when columns insufficient length \"\"\"\n ndarr = np.array([\n [1, 2, 3, 4, 5, 6, 1],\n [6, 5, 4, 3, 8, 1, 2],\n [1, 1, 9, 1, 1, 1, 3],\n [9, 2, 2, 2, 2, 2, 4],\n [3, 3, 3, 9, 3, 3, 5],\n [1, 2, 2, 9, 1, 4, 6]\n ])\n\n exp_driver_score = np.array([0.14721, 0.44398, 0.23979, 0.62493, 0.71898, 0.31662])\n i_vars = ['a', 'b', 'c', 'd', 'e', 'f']\n\n with pytest.raises(ValueError) as e:\n Kruskals.Kruskals(ndarr, exp_driver_score, i_vars=i_vars).driver_score_to_series()\n assert 'driver labels: {}, not sufficient for ndarray of shape {}'.format(i_vars, ndarr.shape) in str(e.value)\n\ndef test_percentage_when_non_directional():\n \"\"\" Test the percentage function behaves as expected \"\"\"\n ndarr = np.array([\n [10, 2, 3, 4, 5, 6],\n [6, 5, 4, 3, 8, 1],\n [1, 1, 9, 1, 1, 1],\n [9, 2, 2, 2, 2, 2],\n [3, 3, 3, 9, 3, 3],\n [1, 2, 2, 9, 1, 4],\n [1, 2, 2, 9, 1, 4],\n [1, 2, 2, 9, 1, 4]\n ])\n arr = np.array([1, 2, 3, 4, 5, 6, 7, 8])\n percentage = Kruskals.Kruskals(ndarr, arr).driver_score(percentage=True)\n assert (np.round(percentage, decimals=4) == [18.7523, 13.8413, 15.4078, 21.5111, 23.4954, 6.9921]).all()\n\ndef test_percentage_when_directional():\n \"\"\" Test the percentage function behaves as expected \"\"\"\n ndarr = np.array([\n [10, 2, 3, 4, 5, 6],\n [6, 5, 4, 3, 8, 1],\n [1, 1, 9, 1, 1, 1],\n [9, 2, 2, 2, 2, 2],\n [3, 3, 3, 9, 3, 3],\n [1, 2, 2, 9, 1, 4],\n [1, 2, 2, 9, 1, 4],\n [1, 2, 2, 9, 1, 4]\n ])\n arr = np.array([1, 2, 3, 4, 5, 6, 7, 8])\n percentage = Kruskals.Kruskals(ndarr, arr).driver_score(directional=True, percentage=True)\n assert (np.round(percentage, decimals=4) == [-18.7523, -13.8413, -15.4078, 21.5111, -23.4954, 6.9921]).all()\n\ndef test_dependent_variable_can_be_nan():\n ndarr = np.array([\n [10, 2, 3, 4, 5, 6],\n [6, 5, 4, 3, 8, 1],\n [1, 1, 9, 1, 1, 1],\n [9, 2, 2, 2, 2, 2],\n [3, 3, 3, 9, 3, 3],\n [1, 2, 2, 9, 1, 4],\n [1, 2, 2, 9, 1, 4],\n [1, 2, 2, 9, 1, 4]\n ])\n arr = np.array([1, 2, 3, 4, np.nan, 6, 7, 8])\n percentage = Kruskals.Kruskals(ndarr, arr).driver_score(directional=True, percentage=True)\n assert (np.round(percentage, decimals=4) == [-17.2805, -13.5913, -14.5028, 23.0658, -22.5377, 9.0218]).all()\n\n\ndef test_independent_1_col():\n ndarr = np.array([\n [10],\n [6],\n [1],\n [9],\n [3],\n [1],\n [1],\n [1],\n ])\n arr = np.array([1, 2, 3, 4, 5, 6, 7, 8])\n percentage = Kruskals.Kruskals(ndarr, arr).driver_score(directional=True, percentage=True)\n\n assert (np.isnan(np.round(percentage, decimals=4))).all()\n\n\ndef test_independent_2_col():\n ndarr = np.array([\n [10, 2],\n [6, 5],\n [1, 1],\n [9, 2],\n [3, 3],\n [1, 2],\n [1, 2],\n [1, 2]\n ])\n arr = np.array([1, 2, 3, 4, 5, 6, 7, 8])\n percentage = Kruskals.Kruskals(ndarr, arr).driver_score(directional=True, percentage=True)\n\n assert (np.isnan(np.round(percentage, decimals=4))).all()\n" ]
[ [ "numpy.array", "pandas.DataFrame", "numpy.round", "numpy.array_equal" ] ]
rokrokss/xgboost_ray
[ "15396fd96daf866512d5f4e4337cbfc4cf3f6061" ]
[ "xgboost_ray/data_sources/ml_dataset.py" ]
[ "from typing import Any, Optional, Sequence, List, Tuple, Dict\n\nfrom ray.actor import ActorHandle\n\nimport pandas as pd\nfrom xgboost_ray.data_sources.data_source import DataSource, RayFileType\n\ntry:\n import pyarrow # noqa: F401\n PYARROW_INSTALLED = True\nexcept (ImportError, AttributeError):\n PYARROW_INSTALLED = False\n\nif PYARROW_INSTALLED:\n from ray.util.data import MLDataset as MLDatasetType\nelse:\n MLDatasetType = None\n\n\ndef _assert_pyarrow_installed():\n if not PYARROW_INSTALLED:\n raise RuntimeError(\n \"Tried to use MLDataset as a data source, but pyarrow is not \"\n \"installed. This function shouldn't have been called. \"\n \"\\nFIX THIS by installing pyarrow: `pip install pyarrow`. \"\n \"\\nPlease also raise an issue on our GitHub: \"\n \"https://github.com/ray-project/xgboost_ray as this part of \"\n \"the code should not have been reached.\")\n\n\nclass MLDataset(DataSource):\n \"\"\"Read from distributed Ray MLDataset.\n\n The Ray MLDataset is a distributed dataset based on Ray's\n `parallel iterators <https://docs.ray.io/en/master/iter.html>`_.\n\n Shards of the MLDataset can be stored on different nodes, making\n it suitable for distributed loading.\n \"\"\"\n supports_central_loading = True\n supports_distributed_loading = True\n\n @staticmethod\n def is_data_type(data: Any,\n filetype: Optional[RayFileType] = None) -> bool:\n if not PYARROW_INSTALLED:\n return False\n return isinstance(data, MLDatasetType)\n\n @staticmethod\n def load_data(data: MLDatasetType,\n ignore: Optional[Sequence[str]] = None,\n indices: Optional[Sequence[int]] = None,\n **kwargs):\n _assert_pyarrow_installed()\n indices = indices or list(range(0, data.num_shards()))\n\n shards: List[pd.DataFrame] = [\n pd.concat(data.get_shard(i), copy=False) for i in indices\n ]\n\n # Concat all shards\n local_df = pd.concat(shards, copy=False)\n\n if ignore:\n local_df = local_df[local_df.columns.difference(ignore)]\n\n return local_df\n\n @staticmethod\n def get_n(data: MLDatasetType):\n return data.num_shards()\n\n @staticmethod\n def convert_to_series(data: MLDatasetType) -> pd.Series:\n _assert_pyarrow_installed()\n return DataSource.convert_to_series(data)\n\n @staticmethod\n def get_actor_shards(data: MLDatasetType, actors: Sequence[ActorHandle]\n ) -> Tuple[Any, Optional[Dict[int, Any]]]:\n _assert_pyarrow_installed()\n return DataSource.get_actor_shards(data, actors)\n" ]
[ [ "pandas.concat" ] ]
MolecularAI/route-distances
[ "7dac87578072f363c8857c85de6502b45e2c7ede" ]
[ "route_distances/ted/reactiontree.py" ]
[ "\"\"\"\nModule containing helper classes to compute the distance between to reaction trees using the APTED method\nSince APTED is based on ordered trees and the reaction trees are unordered, plenty of\nheuristics are implemented to deal with this.\n\"\"\"\nfrom __future__ import annotations\nimport itertools\nimport math\nfrom copy import deepcopy\nfrom typing import List, Union, Iterable, Tuple, Dict, Any\nfrom logging import getLogger\n\nimport numpy as np\nfrom rdkit import Chem, DataStructs\nfrom rdkit.Chem import AllChem\nfrom apted import APTED as Apted\n\nfrom route_distances.ted.utils import TreeContent, AptedConfig\nfrom route_distances.validation import validate_dict\nfrom route_distances.utils.type_utils import StrDict\n\n_FloatIterator = Iterable[float]\n\n\nclass ReactionTreeWrapper:\n \"\"\"\n Wrapper for a reaction tree that can calculate distances between\n trees.\n\n :param reaction_tree: the reaction tree to wrap\n :param content: the content of the route to consider in the distance calculation\n :param exhaustive_limit: if the number of possible ordered trees are below this limit create them all\n \"\"\"\n\n _index_permutations = {\n n: list(itertools.permutations(range(n), n)) for n in range(1, 8)\n }\n\n def __init__(\n self,\n reaction_tree: StrDict,\n content: Union[str, TreeContent] = TreeContent.MOLECULES,\n exhaustive_limit: int = 20,\n fp_radius: int = 2,\n fp_nbits: int = 2048,\n ) -> None:\n validate_dict(reaction_tree)\n single_node_tree = not bool(reaction_tree.get(\"children\", []))\n if single_node_tree and content == TreeContent.REACTIONS:\n raise ValueError(\n \"Cannot create wrapping with content = reactions for a tree without reactions\"\n )\n\n self._logger = getLogger(\"route_distances\")\n # Will convert string input automatically\n self._content = TreeContent(content)\n self._base_tree = deepcopy(reaction_tree)\n\n self._fp_params = (fp_radius, fp_nbits)\n self._add_mol_fingerprints(self._base_tree)\n\n if self._content != TreeContent.MOLECULES and not single_node_tree:\n self._add_rxn_fingerprint(self._base_tree[\"children\"][0], self._base_tree)\n\n if self._content == TreeContent.MOLECULES:\n self._base_tree = self._remove_children_nodes(self._base_tree)\n elif not single_node_tree and self._content == TreeContent.REACTIONS:\n self._base_tree = self._remove_children_nodes(\n self._base_tree[\"children\"][0]\n )\n\n self._trees = []\n self._tree_count, self._node_index_list = self._inspect_tree()\n self._enumeration = self._tree_count <= exhaustive_limit\n\n if self._enumeration:\n self._create_all_trees()\n else:\n self._trees.append(self._base_tree)\n\n @property\n def info(self) -> StrDict:\n \"\"\"Return a dictionary with internal information about the wrapper\"\"\"\n return {\n \"content\": self._content,\n \"tree count\": self._tree_count,\n \"enumeration\": self._enumeration,\n }\n\n @property\n def first_tree(self) -> StrDict:\n \"\"\"Return the first created ordered tree\"\"\"\n return self._trees[0]\n\n @property\n def trees(self) -> List[StrDict]:\n \"\"\"Return a list of all created ordered trees\"\"\"\n return self._trees\n\n def distance_iter(\n self, other: \"ReactionTreeWrapper\", exhaustive_limit: int = 20\n ) -> _FloatIterator:\n \"\"\"\n Iterate over all distances computed between this and another tree\n\n There are three possible enumeration of distances possible dependent\n on the number of possible ordered trees for the two routes that are compared\n\n * If the product of the number of possible ordered trees for both routes are\n below `exhaustive_limit` compute the distance between all pair of trees\n * If both self and other has been fully enumerated (i.e. all ordered trees has been created)\n compute the distances between all trees of the route with the most ordered trees and\n the first tree of the other route\n * Compute `exhaustive_limit` number of distances by shuffling the child order for\n each of the routes.\n\n The rules are applied top-to-bottom.\n\n :param other: another tree to calculate distance to\n :param exhaustive_limit: used to determine what type of enumeration to do\n :yield: the next computed distance between self and other\n \"\"\"\n if self._tree_count * other.info[\"tree count\"] < exhaustive_limit:\n yield from self._distance_iter_exhaustive(other)\n elif self._enumeration or other.info[\"enumeration\"]:\n yield from self._distance_iter_semi_exhaustive(other)\n else:\n yield from self._distance_iter_random(other, exhaustive_limit)\n\n def distance_to(\n self, other: \"ReactionTreeWrapper\", exhaustive_limit: int = 20\n ) -> float:\n \"\"\"\n Calculate the minimum distance from this route to another route\n\n Enumerate the distances using `distance_iter`.\n\n :param other: another tree to calculate distance to\n :param exhaustive_limit: used to determine what type of enumeration to do\n :return: the minimum distance\n \"\"\"\n min_dist = 1e6\n min_iter = -1\n for iteration, distance in enumerate(\n self.distance_iter(other, exhaustive_limit)\n ):\n if distance < min_dist:\n min_iter = iteration\n min_dist = distance\n self._logger.debug(f\"Found minimum after {min_iter} iterations\")\n return min_dist\n\n def distance_to_with_sorting(self, other: \"ReactionTreeWrapper\") -> float:\n \"\"\"\n Compute the distance to another tree, by simpling sorting the children\n of both trees. This is not guaranteed to return the minimum distance.\n\n :param other: another tree to calculate distance to\n :return: the distance\n \"\"\"\n config = AptedConfig(sort_children=True)\n return Apted(self.first_tree, other.first_tree, config).compute_edit_distance()\n\n def _add_mol_fingerprints(self, tree: Dict[str, Any]) -> None:\n mol = Chem.MolFromSmiles(tree[\"smiles\"])\n rd_fp = AllChem.GetMorganFingerprintAsBitVect(mol, *self._fp_params)\n tree[\"fingerprint\"] = np.zeros((1,), dtype=np.int8)\n DataStructs.ConvertToNumpyArray(rd_fp, tree[\"fingerprint\"])\n tree[\"sort_key\"] = \"\".join(f\"{digit}\" for digit in tree[\"fingerprint\"])\n if \"children\" not in tree:\n tree[\"children\"] = []\n\n for child in tree[\"children\"]:\n for grandchild in child[\"children\"]:\n self._add_mol_fingerprints(grandchild)\n\n def _add_rxn_fingerprint(self, node: StrDict, parent: StrDict) -> None:\n node[\"fingerprint\"] = parent[\"fingerprint\"].copy()\n for reactant in node[\"children\"]:\n node[\"fingerprint\"] -= reactant[\"fingerprint\"]\n node[\"sort_key\"] = \"\".join(f\"{digit}\" for digit in node[\"fingerprint\"])\n\n for child in node[\"children\"]:\n for grandchild in child.get(\"children\", []):\n self._add_rxn_fingerprint(grandchild, child)\n\n def _create_all_trees(self) -> None:\n self._trees = []\n # Iterate over all possible combinations of child order\n for order_list in itertools.product(*self._node_index_list):\n self._trees.append(\n self._create_tree_recursively(self._base_tree, list(order_list))\n )\n\n def _create_tree_recursively(\n self,\n node: StrDict,\n order_list: List[List[int]],\n ) -> StrDict:\n new_tree = self._make_base_copy(node)\n children = node.get(\"children\", [])\n if children:\n child_order = order_list.pop(0)\n assert len(child_order) == len(children)\n new_children = [\n self._create_tree_recursively(child, order_list) for child in children\n ]\n new_tree[\"children\"] = [new_children[idx] for idx in child_order]\n return new_tree\n\n def _distance_iter_exhaustive(self, other: \"ReactionTreeWrapper\") -> _FloatIterator:\n self._logger.debug(\n f\"APTED: Exhaustive search. {len(self.trees)} {len(other.trees)}\"\n )\n config = AptedConfig(randomize=False)\n for tree1, tree2 in itertools.product(self.trees, other.trees):\n yield Apted(tree1, tree2, config).compute_edit_distance()\n\n def _distance_iter_random(\n self, other: \"ReactionTreeWrapper\", ntimes: int\n ) -> _FloatIterator:\n self._logger.debug(\n f\"APTED: Heuristic search. {len(self.trees)} {len(other.trees)}\"\n )\n config = AptedConfig(randomize=False)\n yield Apted(self.first_tree, other.first_tree, config).compute_edit_distance()\n\n config = AptedConfig(randomize=True)\n for _ in range(ntimes):\n yield Apted(\n self.first_tree, other.first_tree, config\n ).compute_edit_distance()\n\n def _distance_iter_semi_exhaustive(\n self, other: \"ReactionTreeWrapper\"\n ) -> _FloatIterator:\n self._logger.debug(\n f\"APTED: Semi-exhaustive search. {len(self.trees)} {len(other.trees)}\"\n )\n if len(self.trees) < len(other.trees):\n first_wrapper = self\n second_wrapper = other\n else:\n first_wrapper = other\n second_wrapper = self\n\n config = AptedConfig(randomize=False)\n for tree1 in first_wrapper.trees:\n yield Apted(\n tree1, second_wrapper.first_tree, config\n ).compute_edit_distance()\n\n def _inspect_tree(self) -> Tuple[int, List[List[int]]]:\n \"\"\"\n Find the number of children for each node in the tree, which\n will be used to compute the number of possible combinations of child orders\n\n Also accumulate the possible child orders for the nodes.\n \"\"\"\n\n def _recurse_tree(node):\n children = node.get(\"children\", [])\n nchildren = len(children)\n permutations.append(math.factorial(nchildren))\n\n if nchildren > 0:\n node_index_list.append(list(self._index_permutations[nchildren]))\n for child in children:\n _recurse_tree(child)\n\n permutations: List[int] = []\n node_index_list: List[List[int]] = []\n _recurse_tree(self._base_tree)\n if not permutations:\n return 0, []\n return int(np.prod(permutations)), node_index_list\n\n @staticmethod\n def _make_base_copy(node: StrDict) -> StrDict:\n return {\n \"type\": node[\"type\"],\n \"smiles\": node[\"smiles\"],\n \"fingerprint\": node[\"fingerprint\"],\n \"sort_key\": node[\"sort_key\"],\n \"children\": [],\n }\n\n @staticmethod\n def _remove_children_nodes(tree: StrDict) -> StrDict:\n new_tree = ReactionTreeWrapper._make_base_copy(tree)\n\n if tree.get(\"children\"):\n new_tree[\"children\"] = []\n for child in tree[\"children\"]:\n new_tree[\"children\"].extend(\n [\n ReactionTreeWrapper._remove_children_nodes(grandchild)\n for grandchild in child.get(\"children\", [])\n ]\n )\n return new_tree\n" ]
[ [ "numpy.zeros", "numpy.prod" ] ]
jkroenke/pyunicorn
[ "0d0de845587879e303d738b62e7831058e28e596" ]
[ "pyunicorn/funcnet/coupling_analysis_pure_python.py" ]
[ "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# This file is part of pyunicorn.\n# Copyright (C) 2008--2019 Jonathan F. Donges and pyunicorn authors\n# URL: <http://www.pik-potsdam.de/members/donges/software>\n# License: BSD (3-clause)\n#\n# Please acknowledge and cite the use of this software and its authors\n# when results are used in publications or published elsewhere.\n#\n# You can use the following reference:\n# J.F. Donges, J. Heitzig, B. Beronov, M. Wiedermann, J. Runge, Q.-Y. Feng,\n# L. Tupikina, V. Stolbova, R.V. Donner, N. Marwan, H.A. Dijkstra,\n# and J. Kurths, \"Unified functional network and nonlinear time series analysis\n# for complex systems science: The pyunicorn package\"\n\n\"\"\"\nProvides classes for analyzing spatially embedded complex networks, handling\nmultivariate data and generating time series surrogates.\n\nWritten by Jakob Runge.\nCMSI Method Reference: [Pompe2011]_\n\"\"\"\n\n# array object and fast numerics\nimport numpy\n\n\n#\n# Define class CouplingAnalysisPurePython\n#\n\nclass CouplingAnalysisPurePython:\n\n \"\"\"\n Contains methods to calculate coupling matrices from large arrays\n of scalar time series.\n\n Comprises linear and information theoretic measures, lagged\n and directed (causal) couplings.\n \"\"\"\n\n #\n # Definitions of internal methods\n #\n\n def __init__(self, dataarray, only_tri=False, silence_level=0):\n \"\"\"\n Initialize an instance of CouplingAnalysisPurePython.\n\n Possible choices for only_tri:\n - \"True\" will calculate only the upper triangle of the coupling\n matrix, excluding the diagonal, assuming symmetry (not for directed\n measures)\n - \"False\" will calculate the whole matrix (asymmetry somes from\n different integration ranges)\n\n :type dataarray: 4D, 3D or 2D Numpy array [time, index, index] or\n [time, index]\n :arg dataarray: The time series array with time in first dimension\n :arg bool only_tri: Symmetric/asymmetric assumption on coupling matrix.\n :arg int silence_level: The inverse level of verbosity of the object.\n \"\"\"\n\n # only_tri will calculate the upper triangle excluding the diagonal\n # only. This assumes stationarity on the time series\n self.only_tri = only_tri\n\n # Set silence level\n self.silence_level = silence_level\n\n # Flatten observable anomaly array along lon/lat dimension to allow\n # for more convinient indexing and transpose the whole array as this\n # is faster in loops\n if numpy.ndim(dataarray) == 4:\n (self.total_time, n_lev, n_lat, n_lon) = dataarray.shape\n self.N = n_lev * n_lat * n_lon\n self.dataarray = numpy.\\\n fastCopyAndTranspose(dataarray.reshape(-1, self.N))\n if numpy.ndim(dataarray) == 3:\n (self.total_time, n_lat, n_lon) = dataarray.shape\n self.N = n_lat * n_lon\n self.dataarray = numpy.\\\n fastCopyAndTranspose(dataarray.reshape(-1, self.N))\n\n elif numpy.ndim(dataarray) == 2:\n (self.total_time, self.N) = dataarray.shape\n self.dataarray = numpy.fastCopyAndTranspose(dataarray)\n\n else:\n print(\"irregular array shape...\")\n self.dataarray = numpy.fastCopyAndTranspose(dataarray)\n\n # factorials below 10 in a list for permutation patterns\n self.factorial = \\\n numpy.array([1, 1, 2, 6, 24, 120, 720, 5040, 40320, 362880])\n self.patternized = False\n self.has_fft = False\n self.originalFFT = None\n\n # lag_mode dict\n self.lag_modi = {\"all\": 0, \"sum\": 1, \"max\": 2}\n\n def __str__(self):\n \"\"\"\n Return a string representation of the CouplingAnalysisPurePython\n object.\n \"\"\"\n shape = self.dataarray.shape\n return 'CouplingAnalysisPurePython: %i variables, %i timesteps.' % (\n shape[0], shape[1])\n\n #\n # Define methods to calculate correlation strength and lags\n #\n\n #\n # Routines for calculating Cross Correlation\n #\n\n def cross_correlation(self, tau_max=0, lag_mode='all'):\n \"\"\"\n Returns the normalized cross correlation from all pairs of nodes from\n a range of time lags.\n\n The calculation ranges are shown below::\n\n (-------------------------total_time--------------------------)\n (---tau_max---)(---------corr_range------------)(---tau_max---)\n\n CC is calculated about corr_range and with the other time series\n shifted by tau\n\n Possible choices for lag_mode:\n\n - \"all\" will return the full function for all lags, possible large\n memory need if only_tri is True, only the upper triangle contains the\n values, the lower one is zeros\n - \"sum\" will return the sum over positive and negative lags seperatly,\n each inclunding tau=0 corrmat[0] is the positive sum, corrmat[1] the\n negative sum\n - \"max\" will return only the maximum coupling (in corrmat[0]) and its\n lag (in corrmat[1])\n\n :arg int tau_max: maximum lag in both directions, including last lag\n :arg str lag_mode: the output mode\n :rtype: 3D numpy array (float) [index, index, index]\n :return: correlation matrix with different lag_mode choices\n \"\"\"\n # Normalize anomaly time series to zero mean and unit variance for all\n # lags, array contains normalizations for all lags\n corr_range = self.total_time - 2*tau_max\n normalized_array = numpy.empty((2*tau_max + 1, self.N, corr_range),\n dtype=\"float32\")\n\n for t in range(2*tau_max + 1):\n # Remove mean value from time series at each vertex (grid point)\n normalized_array[t] = self.dataarray[:, t:t+corr_range] - \\\n self.dataarray[:, t:t+corr_range].\\\n mean(axis=1).reshape(self.N, 1)\n\n # Normalize the variance of anomalies to one\n normalized_array[t] /= normalized_array[t].\\\n std(axis=1).reshape(self.N, 1)\n\n # Correct for grid points with zero variance in their time series\n normalized_array[t][numpy.isnan(normalized_array[t])] = 0\n\n return self._calculate_cc(normalized_array, corr_range=corr_range,\n tau_max=tau_max, lag_mode=lag_mode)\n\n def shuffled_surrogate_for_cc(self, fourier=False, tau_max=1,\n lag_mode='all'):\n \"\"\"\n Returns a correlation matrix calculated with an independently shuffled\n surrogate of the dataarray of length corr_range for all taus.\n\n :arg int corr_range: length of sample\n :arg int tau_max: maximum lag in both directions, including last lag\n :arg str lag_mode: output mode\n :rtype: 3D numpy array (float) [index, index, index]\n :return: correlation matrix with different lag_mode choices\n \"\"\"\n corr_range = self.total_time - 2*tau_max\n\n # Shuffle a copy of dataarray separatly for each node\n array = numpy.copy(self.dataarray)\n if fourier:\n array = self.correlatedNoiseSurrogates(array)\n else:\n for i in range(self.N):\n numpy.random.shuffle(array[i])\n\n sample_array = numpy.zeros((1, self.N, corr_range), dtype=\"float32\")\n\n sample_array[0] = array[:, :corr_range]\n sample_array[0] -= sample_array[0].mean(axis=1).reshape(self.N, 1)\n sample_array[0] /= sample_array[0].std(axis=1).reshape(self.N, 1)\n sample_array[0, numpy.isnan(sample_array[0])] = 0\n\n res = self._calculate_cc(sample_array, corr_range=corr_range,\n tau_max=0, lag_mode='all')\n\n if lag_mode == 'all':\n corrmat = numpy.repeat(res, 2*tau_max + 1, axis=0)\n elif lag_mode == 'sum':\n corrmat = numpy.array([abs(res[0]), abs(res[0])]) * (tau_max+1.)\n elif lag_mode == 'max':\n corrmat = numpy.array([abs(res[0]),\n numpy.random.randint(-tau_max, tau_max+1,\n (self.N, self.N))])\n\n return corrmat\n\n def time_surrogate_for_cc(self, sample_range=100, tau_max=1,\n lag_mode='all'):\n \"\"\"\n Returns a joint shuffled surrogate of the full dataarray of length\n sample_range for all taus.\n\n Used for time evolution analysis. First one initializes the\n CouplingAnalysis class with the full dataarray and then this function\n is called for every single surrogate.\n\n :arg int sample_range: length of sample\n :arg int tau_max: maximum lag in both directions, including last lag\n :arg str lag_mode: output mode\n :rtype: 3D numpy array (float) [index, index, index]\n :return: correlation matrix with different lag_mode choices\n \"\"\"\n\n perm = numpy.random.permutation(\n range(tau_max, self.total_time - tau_max))[:sample_range]\n\n sample_array = numpy.empty((2*tau_max + 1, self.N, sample_range),\n dtype=\"float32\")\n\n for t in range(2 * tau_max + 1):\n tau = t - tau_max\n sample_array[t] = self.dataarray[:, perm + tau]\n sample_array[t] -= sample_array[t].mean(axis=1).reshape(self.N, 1)\n sample_array[t] /= sample_array[t].std(axis=1).reshape(self.N, 1)\n sample_array[t][numpy.isnan(sample_array[t])] = 0\n\n return self._calculate_cc(sample_array, corr_range=sample_range,\n tau_max=tau_max, lag_mode=lag_mode)\n\n def _calculate_cc(self, array, corr_range, tau_max, lag_mode):\n \"\"\"\n Returns the CC matrix.\n\n :arg int tau_max: maximum lag in both directions, including last lag\n :arg str lag_mode: output mode\n :rtype: 3D numpy array (float) [index, index, index]\n :return: correlation matrix with different lag_mode choices\n\n ## lag_mode dict\n mode = self.lag_modi[lag_mode]\n \"\"\"\n\n # lag_mode dict\n mode = self.lag_modi[lag_mode]\n only_tri = int(self.only_tri)\n\n if lag_mode == 'all':\n corrmat = numpy.zeros((2*tau_max + 1, self.N, self.N),\n dtype='float32')\n elif lag_mode == 'sum':\n corrmat = numpy.zeros((2, self.N, self.N), dtype='float32')\n elif lag_mode == 'max':\n corrmat = numpy.zeros((2, self.N, self.N), dtype='float32')\n\n # loop over all node pairs, NOT symmetric due to time shifts!\n for i in range(self.N-only_tri):\n for j in range((i+1)*only_tri, self.N):\n\n if mode == 2:\n maxcross = 0.0\n argmax = 0\n\n # loop over taus INCLUDING the last tau value\n for t in range(2*tau_max+1):\n\n # here the actual cross correlation is calculated\n crossij = (array[tau_max, i, :] * array[t, j, :]).mean()\n\n # fill in values in matrix depending on lag_mode\n if mode == 0:\n corrmat[t, i, j] = crossij\n\n elif mode == 1:\n if t <= tau_max:\n corrmat[1, i, j] += numpy.abs(crossij)\n if t >= tau_max:\n corrmat[0, i, j] += numpy.abs(crossij)\n\n elif mode == 2:\n # calculate max and argmax by comparing to previous\n # value and storing max\n if numpy.abs(crossij) > maxcross:\n maxcross = numpy.abs(crossij)\n argmax = t\n\n if mode == 2:\n corrmat[0, i, j] = maxcross\n corrmat[1, i, j] = argmax - tau_max\n\n if self.only_tri:\n if lag_mode == 'all':\n corrmat = corrmat + corrmat.transpose(0, 2, 1)[::-1]\n elif lag_mode == 'sum':\n corrmat[0] += corrmat[1].transpose()\n corrmat[1] = corrmat[0].transpose()\n elif lag_mode == 'max':\n corrmat[0] += corrmat[0].transpose()\n corrmat[1] -= corrmat[1].transpose()\n\n return corrmat\n\n #\n # Routines for calculating Mutual Information with adaptive bins\n #\n\n def mutual_information(self, bins=16, tau_max=0, lag_mode='all'):\n \"\"\"\n Returns the normalized mutual information from all pairs of nodes from\n a range of time lags.\n\n MI = H_x + H_y - H_xy\n\n Uses adaptive bins, where each marginal bin contains the same number of\n samples. Then the marginal entropies have equal probable distributions\n H_x = H_y = log(bins)\n\n The calculation ranges are shown below::\n\n (-------------------------total_time--------------------------)\n (---tau_max---)(---------corr_range------------)(---tau_max---)\n\n MI is calculated about corr_range and with the other time series\n shifted by tau\n\n Possible choices for lag_mode:\n\n - \"all\" will return the full function for all lags, possible large\n memory need if only_tri is True, only the upper triangle contains the\n values, the lower one is zeros\n - \"sum\" will return the sum over positive and negative lags seperatly,\n each inclunding tau=0 corrmat[0] is the positive sum, corrmat[1] the\n negative sum\n - \"max\" will return only the maximum coupling (in corrmat[0]) and its\n lag (in corrmat[1])\n\n :arg int bins: number of bins for estimating MI\n :arg int tau_max: maximum lag in both directions, including last lag\n :arg str lag_mode: output mode\n :rtype: 3D numpy array (float) [index, index, index]\n :return: correlation matrix with different lag_mode choices\n \"\"\"\n if bins < 255:\n dtype = 'uint8'\n else:\n dtype = 'int16'\n\n # Normalize anomaly time series to zero mean and unit variance for all\n # lags, array contains normalizations for all lags\n corr_range = self.total_time - 2*tau_max\n\n # get the bin quantile steps\n bin_edge = numpy.ceil(corr_range/float(bins))\n\n symbolic_array = numpy.empty((2*tau_max + 1, self.N, corr_range),\n dtype=dtype)\n\n for t in range(2*tau_max + 1):\n\n array = self.dataarray[:, t:t+corr_range]\n\n # get the lower edges of the bins for every time series\n edges = numpy.sort(array, axis=1)[:, ::bin_edge]\n bins = edges.shape[1]\n\n # This gives the symbolic time series\n symbolic_array[t] = \\\n (array.reshape(self.N, corr_range, 1)\n >= edges.reshape(self.N, 1, bins)).sum(axis=2) - 1\n\n return self._calculate_mi(symbolic_array, corr_range=corr_range,\n bins=bins, tau_max=tau_max,\n lag_mode=lag_mode)\n\n def mutual_information_edges(self, bins=16, tau=0, lag_mode='all'):\n \"\"\"\n Returns the normalized mutual information from all pairs of nodes from\n a range of time lags.\n\n MI = H_x + H_y - H_xy\n\n Uses adaptive bins, where each marginal bin contains the same number of\n samples. Then the marginal entropies have equal probable distributions\n H_x = H_y = log(bins)\n\n The calculation ranges are shown below::\n\n (-------------------------total_time--------------------------)\n (---tau_max---)(---------corr_range------------)(---tau_max---)\n\n MI is calculated about corr_range and with the other time series\n shifted by tau\n\n Possible choices for lag_mode:\n\n - \"all\" will return the full function for all lags, possible large\n memory need if only_tri is True, only the upper triangle contains the\n values, the lower one is zeros\n - \"sum\" will return the sum over positive and negative lags seperatly,\n each inclunding tau=0 corrmat[0] is the positive sum, corrmat[1] the\n negative sum\n - \"max\" will return only the maximum coupling (in corrmat[0]) and its\n lag (in corrmat[1])\n\n :arg int bins: number of bins for estimating MI\n :arg int tau_max: maximum lag in both directions, including last lag\n :arg str lag_mode: output mode\n :rtype: 2D numpy array (float) [index, index]\n :return: bin edges for zero lag\n \"\"\"\n\n # get the bin quantile steps\n bin_edge = numpy.ceil(self.total_time/float(bins))\n\n array = self.dataarray[:, :]\n array[:-tau, 1] = array[tau, 1]\n\n # get the lower edges of the bins for every time series\n edges = numpy.sort(array, axis=1)[:, ::bin_edge]\n bins = edges.shape[1]\n\n return edges\n\n def shuffled_surrogate_for_mi(self, fourier=False, bins=16, tau_max=0,\n lag_mode='all'):\n \"\"\"\n Returns a shuffled surrogate of normalized mutual information from all\n pairs of nodes from a range of time lags.\n\n :arg int bins: number of bins for estimating MI\n :arg int tau_max: maximum lag in both directions, including last lag\n :arg str lag_mode: output mode\n :rtype: 3D numpy array (float) [index, index, index]\n :return: correlation matrix with different lag_mode choices\n \"\"\"\n if bins < 255:\n dtype = 'uint8'\n else:\n dtype = 'int16'\n\n # Normalize anomaly time series to zero mean and unit variance for all\n # lags, array contains normalizations for all lags\n corr_range = self.total_time - 2*tau_max\n\n # Shuffle a copy of dataarray seperatly for each node\n array = numpy.copy(self.dataarray)\n if fourier:\n array = self.correlatedNoiseSurrogates(array)\n else:\n for i in range(self.N):\n numpy.random.shuffle(array[i])\n\n # get the bin quantile steps\n bin_edge = numpy.ceil(corr_range/float(bins))\n\n symbolic_array = numpy.empty((1, self.N, corr_range), dtype=dtype)\n\n array = array[:, :corr_range]\n\n # get the lower edges of the bins for every time series\n edges = numpy.sort(array, axis=1)[:, ::bin_edge]\n bins = edges.shape[1]\n\n # This gives the symbolic time series\n symbolic_array[0] = \\\n (array.reshape(self.N, corr_range, 1)\n >= edges.reshape(self.N, 1, bins)).sum(axis=2) - 1\n\n res = self._calculate_mi(symbolic_array, corr_range=corr_range,\n bins=bins, tau_max=0, lag_mode='all')\n\n if lag_mode == 'all':\n corrmat = numpy.repeat(res, 2*tau_max + 1, axis=0)\n elif lag_mode == 'sum':\n corrmat = numpy.array([res[0], res[0]]) * (tau_max+1.)\n elif lag_mode == 'max':\n corrmat = numpy.array(\n [res[0], numpy.random.randint(-tau_max, tau_max+1,\n (self.N, self.N))])\n\n return corrmat\n\n def time_surrogate_for_mi(self, bins=16, sample_range=100, tau_max=1,\n lag_mode='all'):\n \"\"\"\n Returns a joint shuffled surrogate of the full dataarray of length\n sample_range for all taus.\n\n Used for time evolution analysis. First one initializes the\n CouplingAnalysis class with the full dataarray and then this function\n is called for every single surrogate.\n\n :arg int sample_range: length of sample\n :arg int bins: number of bins for estimating MI\n :arg int tau_max: maximum lag in both directions, including last lag\n :arg str lag_mode: output mode\n :rtype: 3D numpy array (float) [index, index, index]\n :return: correlation matrix with different lag_mode choices\n \"\"\"\n\n if bins < 255:\n dtype = 'uint8'\n else:\n dtype = 'int16'\n\n perm = numpy.random.permutation(\n range(tau_max, self.total_time - tau_max))[:sample_range]\n\n # get the bin quantile steps\n bin_edge = numpy.ceil(sample_range/float(bins))\n\n symbolic_array = numpy.empty((2*tau_max + 1, self.N, sample_range),\n dtype=dtype)\n\n for t in range(2*tau_max + 1):\n tau = t - tau_max\n\n array = self.dataarray[:, perm + tau]\n\n # get the lower edges of the bins for every time series\n edges = numpy.sort(array, axis=1)[:, ::bin_edge]\n bins = edges.shape[1]\n\n # This gives the symbolic time series\n symbolic_array[t] = \\\n (array.reshape(self.N, sample_range, 1)\n >= edges.reshape(self.N, 1, bins)).sum(axis=2) - 1\n\n return self._calculate_mi(symbolic_array, corr_range=sample_range,\n bins=bins, tau_max=tau_max,\n lag_mode=lag_mode)\n\n def _calculate_mi(self, array, corr_range, bins, tau_max, lag_mode):\n \"\"\"\n Returns the mi matrix.\n\n :arg int bins: number of bins for estimating MI\n :arg int tau_max: maximum lag in both directions, including last lag\n :arg str lag_mode: output mode\n :rtype: 3D numpy array (float) [index, index, index]\n :return: correlation matrix with different lag_mode choices\n \"\"\"\n\n # lag_mode dict\n mode = self.lag_modi[lag_mode]\n only_tri = int(self.only_tri)\n\n # Initialize\n hist2D = numpy.zeros((bins, bins), dtype=\"int32\")\n if lag_mode == 'all':\n corrmat = numpy.zeros((2*tau_max + 1, self.N, self.N),\n dtype='float32')\n elif lag_mode == 'sum':\n corrmat = numpy.zeros((2, self.N, self.N), dtype='float32')\n elif lag_mode == 'max':\n corrmat = numpy.zeros((2, self.N, self.N), dtype='float32')\n\n # Precalculation of the log\n gfunc = numpy.zeros(corr_range+1)\n for t in range(1, corr_range + 1):\n gfunc[t] = t*numpy.log(t)\n\n # loop over all node pairs, NOT symmetric due to time shifts!\n for i in range(self.N-only_tri):\n for j in range((i+1)*only_tri, self.N):\n\n if mode == 2:\n maxcross = 0.0\n argmax = 0\n\n # loop over taus from -tau_max to tau_max INCLUDING the last\n # tau value\n for t in range(2*tau_max + 1):\n tau = t - tau_max\n\n # here the joint probability distribution is calculated\n for k in range(corr_range):\n indexi = array[tau_max, i, k]\n indexj = array[t, j, k]\n hist2D[indexi, indexj] += 1\n\n # here the joint entropy is calculated by summing over all\n # pattern combinations\n jointent = 0.0\n for l in range(bins):\n for m in range(bins):\n jointent -= gfunc[hist2D[l, m]]\n hist2D[l, m] = 0\n\n jointent /= float(corr_range)\n jointent += numpy.log(float(corr_range))\n\n # Mutual Information is...\n mi = 0.0\n mi = 2. * numpy.log(bins) - jointent\n\n # norm the mi\n mi /= numpy.log(bins)\n\n # fill in values in matrix depending on lag_mode\n if mode == 0:\n corrmat[t, i, j] = mi\n\n elif mode == 1:\n if t <= tau_max:\n corrmat[1, i, j] += mi\n if t >= tau_max:\n corrmat[0, i, j] += mi\n\n elif mode == 2:\n # calculate max and argmax by comparing to previous\n # value and storing max\n if mi > maxcross:\n maxcross = mi\n argmax = tau\n\n if mode == 2:\n corrmat[0, i, j] = maxcross\n corrmat[1, i, j] = argmax\n\n if self.only_tri:\n if lag_mode == 'all':\n corrmat = corrmat + corrmat.transpose(0, 2, 1)[::-1]\n if lag_mode == 'sum':\n corrmat[0] += corrmat[1].transpose()\n corrmat[1] = corrmat[0].transpose()\n elif lag_mode == 'max':\n corrmat[0] += corrmat[0].transpose()\n corrmat[1] -= corrmat[1].transpose()\n\n return corrmat\n\n #\n # A subroutine for fourier surrogates (from J Donges)\n #\n\n def correlatedNoiseSurrogates(self, original):\n \"\"\"\n Generates surrogates by Fourier transforming the original time series,\n randomizing the phases and then applying an inverse Fourier transform.\n Correlated noise surrogates share their power spectrum and\n autocorrelation function with the original time series.\n\n :type original: 2D array\n :arg original: dim. 0 is index of time series, dim. 1 is time\n :return: surrogate time series (same dimensions as original)\n \"\"\"\n\n # Calculate FFT of original time series\n # The FFT of the original data has to be calculated only once, so it\n # is stored in self.originalFFT\n if self.has_fft:\n surrogates = self.originalFFT\n else:\n surrogates = numpy.fft.fft(original, axis=1)\n self.originalFFT = surrogates\n self.has_fft = True\n\n (nNodes, ntime) = original.shape\n\n if (ntime % 2) == 0:\n lenPhase = (ntime - 2) / 2\n else:\n lenPhase = (ntime - 1) / 2\n\n # Generate random phases uniformly distributed in the interval\n # [0, 2*Pi]. Guarantee that the phases for positive and negative\n # frquencies are the same to obtain real surrogates in the end!\n phases = numpy.random.uniform(low=0, high=2 * numpy.pi,\n size=(nNodes, lenPhase))\n\n # Add random phases uniformly distributed in the interval [0, 2*Pi]\n surrogates[:, 1:lenPhase+1] *= numpy.exp(1j * phases)\n\n # Discriminate between even and uneven number of samples\n # Note that the output of fft has the following form:\n # - Even sample number: (mean, pos. freq, nyquist freq, neg. freq)\n # - Odd sample number: (mean, pos. freq, neg. freq)\n if (ntime % 2) == 0:\n surrogates[:, lenPhase+2:ntime] = \\\n numpy.flipud(surrogates[:, 1:lenPhase+1].conjugate())\n else:\n surrogates[:, lenPhase+1:ntime] = \\\n numpy.flipud(surrogates[:, 1:lenPhase+1].conjugate())\n\n # Calculate IFFT and take the real part, the remaining imaginary part\n # is due to numerical errors\n return numpy.ascontiguousarray(numpy.real(\n numpy.fft.ifft(surrogates, axis=1)))\n" ]
[ [ "numpy.random.uniform", "numpy.fft.fft", "numpy.sort", "numpy.random.shuffle", "numpy.empty", "numpy.zeros", "numpy.abs", "numpy.repeat", "numpy.copy", "numpy.exp", "numpy.fft.ifft", "numpy.log", "numpy.ndim", "numpy.isnan", "numpy.array", "numpy.random.randint", "numpy.fastCopyAndTranspose" ] ]
robotanica/moveo_ros
[ "12d9e81dd393f3b540e6b9f771ce332a73c86472" ]
[ "object_detector_app/object_detection/utils/ops.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"A module for helper tensorflow ops.\"\"\"\nimport math\n\nimport tensorflow as tf\n\nfrom object_detection.core import box_list\nfrom object_detection.core import box_list_ops\nfrom object_detection.core import standard_fields as fields\nfrom object_detection.utils import static_shape\n\n\ndef expanded_shape(orig_shape, start_dim, num_dims):\n \"\"\"Inserts multiple ones into a shape vector.\n\n Inserts an all-1 vector of length num_dims at position start_dim into a shape.\n Can be combined with tf.reshape to generalize tf.expand_dims.\n\n Args:\n orig_shape: the shape into which the all-1 vector is added (int32 vector)\n start_dim: insertion position (int scalar)\n num_dims: length of the inserted all-1 vector (int scalar)\n Returns:\n An int32 vector of length tf.size(orig_shape) + num_dims.\n \"\"\"\n with tf.compat.v1.name_scope('ExpandedShape'):\n start_dim = tf.expand_dims(start_dim, 0) # scalar to rank-1\n before = tf.slice(orig_shape, [0], start_dim)\n add_shape = tf.ones(tf.reshape(num_dims, [1]), dtype=tf.int32)\n after = tf.slice(orig_shape, start_dim, [-1])\n new_shape = tf.concat([before, add_shape, after], 0)\n return new_shape\n\n\ndef normalized_to_image_coordinates(normalized_boxes, image_shape,\n parallel_iterations=32):\n \"\"\"Converts a batch of boxes from normal to image coordinates.\n\n Args:\n normalized_boxes: a float32 tensor of shape [None, num_boxes, 4] in\n normalized coordinates.\n image_shape: a float32 tensor of shape [4] containing the image shape.\n parallel_iterations: parallelism for the map_fn op.\n\n Returns:\n absolute_boxes: a float32 tensor of shape [None, num_boxes, 4] containg the\n boxes in image coordinates.\n \"\"\"\n def _to_absolute_coordinates(normalized_boxes):\n return box_list_ops.to_absolute_coordinates(\n box_list.BoxList(normalized_boxes),\n image_shape[1], image_shape[2], check_range=False).get()\n\n absolute_boxes = tf.map_fn(\n _to_absolute_coordinates,\n elems=(normalized_boxes),\n dtype=tf.float32,\n parallel_iterations=parallel_iterations,\n back_prop=True)\n return absolute_boxes\n\n\ndef meshgrid(x, y):\n \"\"\"Tiles the contents of x and y into a pair of grids.\n\n Multidimensional analog of numpy.meshgrid, giving the same behavior if x and y\n are vectors. Generally, this will give:\n\n xgrid(i1, ..., i_m, j_1, ..., j_n) = x(j_1, ..., j_n)\n ygrid(i1, ..., i_m, j_1, ..., j_n) = y(i_1, ..., i_m)\n\n Keep in mind that the order of the arguments and outputs is reverse relative\n to the order of the indices they go into, done for compatibility with numpy.\n The output tensors have the same shapes. Specifically:\n\n xgrid.get_shape() = y.get_shape().concatenate(x.get_shape())\n ygrid.get_shape() = y.get_shape().concatenate(x.get_shape())\n\n Args:\n x: A tensor of arbitrary shape and rank. xgrid will contain these values\n varying in its last dimensions.\n y: A tensor of arbitrary shape and rank. ygrid will contain these values\n varying in its first dimensions.\n Returns:\n A tuple of tensors (xgrid, ygrid).\n \"\"\"\n with tf.compat.v1.name_scope('Meshgrid'):\n x = tf.convert_to_tensor(value=x)\n y = tf.convert_to_tensor(value=y)\n x_exp_shape = expanded_shape(tf.shape(input=x), 0, tf.rank(y))\n y_exp_shape = expanded_shape(tf.shape(input=y), tf.rank(y), tf.rank(x))\n\n xgrid = tf.tile(tf.reshape(x, x_exp_shape), y_exp_shape)\n ygrid = tf.tile(tf.reshape(y, y_exp_shape), x_exp_shape)\n new_shape = y.get_shape().concatenate(x.get_shape())\n xgrid.set_shape(new_shape)\n ygrid.set_shape(new_shape)\n\n return xgrid, ygrid\n\n\ndef pad_to_multiple(tensor, multiple):\n \"\"\"Returns the tensor zero padded to the specified multiple.\n\n Appends 0s to the end of the first and second dimension (height and width) of\n the tensor until both dimensions are a multiple of the input argument\n 'multiple'. E.g. given an input tensor of shape [1, 3, 5, 1] and an input\n multiple of 4, PadToMultiple will append 0s so that the resulting tensor will\n be of shape [1, 4, 8, 1].\n\n Args:\n tensor: rank 4 float32 tensor, where\n tensor -> [batch_size, height, width, channels].\n multiple: the multiple to pad to.\n\n Returns:\n padded_tensor: the tensor zero padded to the specified multiple.\n \"\"\"\n tensor_shape = tensor.get_shape()\n batch_size = static_shape.get_batch_size(tensor_shape)\n tensor_height = static_shape.get_height(tensor_shape)\n tensor_width = static_shape.get_width(tensor_shape)\n tensor_depth = static_shape.get_depth(tensor_shape)\n\n if batch_size is None:\n batch_size = tf.shape(input=tensor)[0]\n\n if tensor_height is None:\n tensor_height = tf.shape(input=tensor)[1]\n padded_tensor_height = tf.cast(\n tf.math.ceil(tf.cast(tensor_height, dtype=tf.float32) / tf.cast(multiple, dtype=tf.float32)), dtype=tf.int32) * multiple\n else:\n padded_tensor_height = int(\n math.ceil(float(tensor_height) / multiple) * multiple)\n\n if tensor_width is None:\n tensor_width = tf.shape(input=tensor)[2]\n padded_tensor_width = tf.cast(\n tf.math.ceil(tf.cast(tensor_width, dtype=tf.float32) / tf.cast(multiple, dtype=tf.float32)), dtype=tf.int32) * multiple\n else:\n padded_tensor_width = int(\n math.ceil(float(tensor_width) / multiple) * multiple)\n\n if tensor_depth is None:\n tensor_depth = tf.shape(input=tensor)[3]\n\n # Use tf.concat instead of tf.pad to preserve static shape\n height_pad = tf.zeros([\n batch_size, padded_tensor_height - tensor_height, tensor_width,\n tensor_depth\n ])\n padded_tensor = tf.concat([tensor, height_pad], 1)\n width_pad = tf.zeros([\n batch_size, padded_tensor_height, padded_tensor_width - tensor_width,\n tensor_depth\n ])\n padded_tensor = tf.concat([padded_tensor, width_pad], 2)\n\n return padded_tensor\n\n\ndef padded_one_hot_encoding(indices, depth, left_pad):\n \"\"\"Returns a zero padded one-hot tensor.\n\n This function converts a sparse representation of indices (e.g., [4]) to a\n zero padded one-hot representation (e.g., [0, 0, 0, 0, 1] with depth = 4 and\n left_pad = 1). If `indices` is empty, the result will simply be a tensor of\n shape (0, depth + left_pad). If depth = 0, then this function just returns\n `None`.\n\n Args:\n indices: an integer tensor of shape [num_indices].\n depth: depth for the one-hot tensor (integer).\n left_pad: number of zeros to left pad the one-hot tensor with (integer).\n\n Returns:\n padded_onehot: a tensor with shape (num_indices, depth + left_pad). Returns\n `None` if the depth is zero.\n\n Raises:\n ValueError: if `indices` does not have rank 1 or if `left_pad` or `depth are\n either negative or non-integers.\n\n TODO: add runtime checks for depth and indices.\n \"\"\"\n if depth < 0 or not isinstance(depth, (int, long)):\n raise ValueError('`depth` must be a non-negative integer.')\n if left_pad < 0 or not isinstance(left_pad, (int, long)):\n raise ValueError('`left_pad` must be a non-negative integer.')\n if depth == 0:\n return None\n if len(indices.get_shape().as_list()) != 1:\n raise ValueError('`indices` must have rank 1')\n\n def one_hot_and_pad():\n one_hot = tf.cast(tf.one_hot(tf.cast(indices, tf.int64), depth,\n on_value=1, off_value=0), tf.float32)\n return tf.pad(tensor=one_hot, paddings=[[0, 0], [left_pad, 0]], mode='CONSTANT')\n result = tf.cond(pred=tf.greater(tf.size(input=indices), 0), true_fn=one_hot_and_pad,\n false_fn=lambda: tf.zeros((depth + left_pad, 0)))\n return tf.reshape(result, [-1, depth + left_pad])\n\n\ndef dense_to_sparse_boxes(dense_locations, dense_num_boxes, num_classes):\n \"\"\"Converts bounding boxes from dense to sparse form.\n\n Args:\n dense_locations: a [max_num_boxes, 4] tensor in which only the first k rows\n are valid bounding box location coordinates, where k is the sum of\n elements in dense_num_boxes.\n dense_num_boxes: a [max_num_classes] tensor indicating the counts of\n various bounding box classes e.g. [1, 0, 0, 2] means that the first\n bounding box is of class 0 and the second and third bounding boxes are\n of class 3. The sum of elements in this tensor is the number of valid\n bounding boxes.\n num_classes: number of classes\n\n Returns:\n box_locations: a [num_boxes, 4] tensor containing only valid bounding\n boxes (i.e. the first num_boxes rows of dense_locations)\n box_classes: a [num_boxes] tensor containing the classes of each bounding\n box (e.g. dense_num_boxes = [1, 0, 0, 2] => box_classes = [0, 3, 3]\n \"\"\"\n\n num_valid_boxes = tf.reduce_sum(input_tensor=dense_num_boxes)\n box_locations = tf.slice(dense_locations,\n tf.constant([0, 0]), tf.stack([num_valid_boxes, 4]))\n tiled_classes = [tf.tile([i], tf.expand_dims(dense_num_boxes[i], 0))\n for i in range(num_classes)]\n box_classes = tf.concat(tiled_classes, 0)\n box_locations.set_shape([None, 4])\n return box_locations, box_classes\n\n\ndef indices_to_dense_vector(indices,\n size,\n indices_value=1.,\n default_value=0,\n dtype=tf.float32):\n \"\"\"Creates dense vector with indices set to specific value and rest to zeros.\n\n This function exists because it is unclear if it is safe to use\n tf.sparse_to_dense(indices, [size], 1, validate_indices=False)\n with indices which are not ordered.\n This function accepts a dynamic size (e.g. tf.shape(tensor)[0])\n\n Args:\n indices: 1d Tensor with integer indices which are to be set to\n indices_values.\n size: scalar with size (integer) of output Tensor.\n indices_value: values of elements specified by indices in the output vector\n default_value: values of other elements in the output vector.\n dtype: data type.\n\n Returns:\n dense 1D Tensor of shape [size] with indices set to indices_values and the\n rest set to default_value.\n \"\"\"\n size = tf.cast(size, dtype=tf.int32)\n zeros = tf.ones([size], dtype=dtype) * default_value\n values = tf.ones_like(indices, dtype=dtype) * indices_value\n\n return tf.dynamic_stitch([tf.range(size), tf.cast(indices, dtype=tf.int32)],\n [zeros, values])\n\n\ndef retain_groundtruth(tensor_dict, valid_indices):\n \"\"\"Retains groundtruth by valid indices.\n\n Args:\n tensor_dict: a dictionary of following groundtruth tensors -\n fields.InputDataFields.groundtruth_boxes\n fields.InputDataFields.groundtruth_classes\n fields.InputDataFields.groundtruth_is_crowd\n fields.InputDataFields.groundtruth_area\n fields.InputDataFields.groundtruth_label_types\n fields.InputDataFields.groundtruth_difficult\n valid_indices: a tensor with valid indices for the box-level groundtruth.\n\n Returns:\n a dictionary of tensors containing only the groundtruth for valid_indices.\n\n Raises:\n ValueError: If the shape of valid_indices is invalid.\n ValueError: field fields.InputDataFields.groundtruth_boxes is\n not present in tensor_dict.\n \"\"\"\n input_shape = valid_indices.get_shape().as_list()\n if not (len(input_shape) == 1 or\n (len(input_shape) == 2 and input_shape[1] == 1)):\n raise ValueError('The shape of valid_indices is invalid.')\n valid_indices = tf.reshape(valid_indices, [-1])\n valid_dict = {}\n if fields.InputDataFields.groundtruth_boxes in tensor_dict:\n # Prevents reshape failure when num_boxes is 0.\n num_boxes = tf.maximum(tf.shape(\n input=tensor_dict[fields.InputDataFields.groundtruth_boxes])[0], 1)\n for key in tensor_dict:\n if key in [fields.InputDataFields.groundtruth_boxes,\n fields.InputDataFields.groundtruth_classes]:\n valid_dict[key] = tf.gather(tensor_dict[key], valid_indices)\n # Input decoder returns empty tensor when these fields are not provided.\n # Needs to reshape into [num_boxes, -1] for tf.gather() to work.\n elif key in [fields.InputDataFields.groundtruth_is_crowd,\n fields.InputDataFields.groundtruth_area,\n fields.InputDataFields.groundtruth_difficult,\n fields.InputDataFields.groundtruth_label_types]:\n valid_dict[key] = tf.reshape(\n tf.gather(tf.reshape(tensor_dict[key], [num_boxes, -1]),\n valid_indices), [-1])\n # Fields that are not associated with boxes.\n else:\n valid_dict[key] = tensor_dict[key]\n else:\n raise ValueError('%s not present in input tensor dict.' % (\n fields.InputDataFields.groundtruth_boxes))\n return valid_dict\n\n\ndef retain_groundtruth_with_positive_classes(tensor_dict):\n \"\"\"Retains only groundtruth with positive class ids.\n\n Args:\n tensor_dict: a dictionary of following groundtruth tensors -\n fields.InputDataFields.groundtruth_boxes\n fields.InputDataFields.groundtruth_classes\n fields.InputDataFields.groundtruth_is_crowd\n fields.InputDataFields.groundtruth_area\n fields.InputDataFields.groundtruth_label_types\n fields.InputDataFields.groundtruth_difficult\n\n Returns:\n a dictionary of tensors containing only the groundtruth with positive\n classes.\n\n Raises:\n ValueError: If groundtruth_classes tensor is not in tensor_dict.\n \"\"\"\n if fields.InputDataFields.groundtruth_classes not in tensor_dict:\n raise ValueError('`groundtruth classes` not in tensor_dict.')\n keep_indices = tf.compat.v1.where(tf.greater(\n tensor_dict[fields.InputDataFields.groundtruth_classes], 0))\n return retain_groundtruth(tensor_dict, keep_indices)\n\n\ndef filter_groundtruth_with_nan_box_coordinates(tensor_dict):\n \"\"\"Filters out groundtruth with no bounding boxes.\n\n Args:\n tensor_dict: a dictionary of following groundtruth tensors -\n fields.InputDataFields.groundtruth_boxes\n fields.InputDataFields.groundtruth_classes\n fields.InputDataFields.groundtruth_is_crowd\n fields.InputDataFields.groundtruth_area\n fields.InputDataFields.groundtruth_label_types\n\n Returns:\n a dictionary of tensors containing only the groundtruth that have bounding\n boxes.\n \"\"\"\n groundtruth_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]\n nan_indicator_vector = tf.greater(tf.reduce_sum(input_tensor=tf.cast(\n tf.math.is_nan(groundtruth_boxes), dtype=tf.int32), axis=[1]), 0)\n valid_indicator_vector = tf.logical_not(nan_indicator_vector)\n valid_indices = tf.compat.v1.where(valid_indicator_vector)\n\n return retain_groundtruth(tensor_dict, valid_indices)\n\n\ndef normalize_to_target(inputs,\n target_norm_value,\n dim,\n epsilon=1e-7,\n trainable=True,\n scope='NormalizeToTarget',\n summarize=True):\n \"\"\"L2 normalizes the inputs across the specified dimension to a target norm.\n\n This op implements the L2 Normalization layer introduced in\n Liu, Wei, et al. \"SSD: Single Shot MultiBox Detector.\"\n and Liu, Wei, Andrew Rabinovich, and Alexander C. Berg.\n \"Parsenet: Looking wider to see better.\" and is useful for bringing\n activations from multiple layers in a convnet to a standard scale.\n\n Note that the rank of `inputs` must be known and the dimension to which\n normalization is to be applied should be statically defined.\n\n TODO: Add option to scale by L2 norm of the entire input.\n\n Args:\n inputs: A `Tensor` of arbitrary size.\n target_norm_value: A float value that specifies an initial target norm or\n a list of floats (whose length must be equal to the depth along the\n dimension to be normalized) specifying a per-dimension multiplier\n after normalization.\n dim: The dimension along which the input is normalized.\n epsilon: A small value to add to the inputs to avoid dividing by zero.\n trainable: Whether the norm is trainable or not\n scope: Optional scope for variable_scope.\n summarize: Whether or not to add a tensorflow summary for the op.\n\n Returns:\n The input tensor normalized to the specified target norm.\n\n Raises:\n ValueError: If dim is smaller than the number of dimensions in 'inputs'.\n ValueError: If target_norm_value is not a float or a list of floats with\n length equal to the depth along the dimension to be normalized.\n \"\"\"\n with tf.compat.v1.variable_scope(scope, 'NormalizeToTarget', [inputs]):\n if not inputs.get_shape():\n raise ValueError('The input rank must be known.')\n input_shape = inputs.get_shape().as_list()\n input_rank = len(input_shape)\n if dim < 0 or dim >= input_rank:\n raise ValueError(\n 'dim must be non-negative but smaller than the input rank.')\n if not input_shape[dim]:\n raise ValueError('input shape should be statically defined along '\n 'the specified dimension.')\n depth = input_shape[dim]\n if not (isinstance(target_norm_value, float) or\n (isinstance(target_norm_value, list) and\n len(target_norm_value) == depth) and\n all([isinstance(val, float) for val in target_norm_value])):\n raise ValueError('target_norm_value must be a float or a list of floats '\n 'with length equal to the depth along the dimension to '\n 'be normalized.')\n if isinstance(target_norm_value, float):\n initial_norm = depth * [target_norm_value]\n else:\n initial_norm = target_norm_value\n target_norm = tf.contrib.framework.model_variable(\n name='weights', dtype=tf.float32,\n initializer=tf.constant(initial_norm, dtype=tf.float32),\n trainable=trainable)\n if summarize:\n mean = tf.reduce_mean(input_tensor=target_norm)\n mean = tf.compat.v1.Print(mean, ['NormalizeToTarget:', mean])\n tf.compat.v1.summary.scalar(tf.compat.v1.get_variable_scope().name, mean)\n lengths = epsilon + tf.sqrt(tf.reduce_sum(input_tensor=tf.square(inputs), axis=dim, keepdims=True))\n mult_shape = input_rank*[1]\n mult_shape[dim] = depth\n return tf.reshape(target_norm, mult_shape) * tf.truediv(inputs, lengths)\n\n\ndef position_sensitive_crop_regions(image,\n boxes,\n box_ind,\n crop_size,\n num_spatial_bins,\n global_pool,\n extrapolation_value=None):\n \"\"\"Position-sensitive crop and pool rectangular regions from a feature grid.\n\n The output crops are split into `spatial_bins_y` vertical bins\n and `spatial_bins_x` horizontal bins. For each intersection of a vertical\n and a horizontal bin the output values are gathered by performing\n `tf.image.crop_and_resize` (bilinear resampling) on a a separate subset of\n channels of the image. This reduces `depth` by a factor of\n `(spatial_bins_y * spatial_bins_x)`.\n\n When global_pool is True, this function implements a differentiable version\n of position-sensitive RoI pooling used in\n [R-FCN detection system](https://arxiv.org/abs/1605.06409).\n\n When global_pool is False, this function implements a differentiable version\n of position-sensitive assembling operation used in\n [instance FCN](https://arxiv.org/abs/1603.08678).\n\n Args:\n image: A `Tensor`. Must be one of the following types: `uint8`, `int8`,\n `int16`, `int32`, `int64`, `half`, `float32`, `float64`.\n A 4-D tensor of shape `[batch, image_height, image_width, depth]`.\n Both `image_height` and `image_width` need to be positive.\n boxes: A `Tensor` of type `float32`.\n A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor\n specifies the coordinates of a box in the `box_ind[i]` image and is\n specified in normalized coordinates `[y1, x1, y2, x2]`. A normalized\n coordinate value of `y` is mapped to the image coordinate at\n `y * (image_height - 1)`, so as the `[0, 1]` interval of normalized image\n height is mapped to `[0, image_height - 1] in image height coordinates.\n We do allow y1 > y2, in which case the sampled crop is an up-down flipped\n version of the original image. The width dimension is treated similarly.\n Normalized coordinates outside the `[0, 1]` range are allowed, in which\n case we use `extrapolation_value` to extrapolate the input image values.\n box_ind: A `Tensor` of type `int32`.\n A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`.\n The value of `box_ind[i]` specifies the image that the `i`-th box refers\n to.\n crop_size: A list of two integers `[crop_height, crop_width]`. All\n cropped image patches are resized to this size. The aspect ratio of the\n image content is not preserved. Both `crop_height` and `crop_width` need\n to be positive.\n num_spatial_bins: A list of two integers `[spatial_bins_y, spatial_bins_x]`.\n Represents the number of position-sensitive bins in y and x directions.\n Both values should be >= 1. `crop_height` should be divisible by\n `spatial_bins_y`, and similarly for width.\n The number of image channels should be divisible by\n (spatial_bins_y * spatial_bins_x).\n Suggested value from R-FCN paper: [3, 3].\n global_pool: A boolean variable.\n If True, we perform average global pooling on the features assembled from\n the position-sensitive score maps.\n If False, we keep the position-pooled features without global pooling\n over the spatial coordinates.\n Note that using global_pool=True is equivalent to but more efficient than\n running the function with global_pool=False and then performing global\n average pooling.\n extrapolation_value: An optional `float`. Defaults to `0`.\n Value used for extrapolation, when applicable.\n Returns:\n position_sensitive_features: A 4-D tensor of shape\n `[num_boxes, K, K, crop_channels]`,\n where `crop_channels = depth / (spatial_bins_y * spatial_bins_x)`,\n where K = 1 when global_pool is True (Average-pooled cropped regions),\n and K = crop_size when global_pool is False.\n Raises:\n ValueError: Raised in four situations:\n `num_spatial_bins` is not >= 1;\n `num_spatial_bins` does not divide `crop_size`;\n `(spatial_bins_y*spatial_bins_x)` does not divide `depth`;\n `bin_crop_size` is not square when global_pool=False due to the\n constraint in function space_to_depth.\n \"\"\"\n total_bins = 1\n bin_crop_size = []\n\n for (num_bins, crop_dim) in zip(num_spatial_bins, crop_size):\n if num_bins < 1:\n raise ValueError('num_spatial_bins should be >= 1')\n\n if crop_dim % num_bins != 0:\n raise ValueError('crop_size should be divisible by num_spatial_bins')\n\n total_bins *= num_bins\n bin_crop_size.append(crop_dim / num_bins)\n\n if not global_pool and bin_crop_size[0] != bin_crop_size[1]:\n raise ValueError('Only support square bin crop size for now.')\n\n ymin, xmin, ymax, xmax = tf.unstack(boxes, axis=1)\n spatial_bins_y, spatial_bins_x = num_spatial_bins\n\n # Split each box into spatial_bins_y * spatial_bins_x bins.\n position_sensitive_boxes = []\n for bin_y in range(spatial_bins_y):\n step_y = (ymax - ymin) / spatial_bins_y\n for bin_x in range(spatial_bins_x):\n step_x = (xmax - xmin) / spatial_bins_x\n box_coordinates = [ymin + bin_y * step_y,\n xmin + bin_x * step_x,\n ymin + (bin_y + 1) * step_y,\n xmin + (bin_x + 1) * step_x,\n ]\n position_sensitive_boxes.append(tf.stack(box_coordinates, axis=1))\n\n image_splits = tf.split(value=image, num_or_size_splits=total_bins, axis=3)\n\n image_crops = []\n for (split, box) in zip(image_splits, position_sensitive_boxes):\n crop = tf.image.crop_and_resize(split, box, box_ind, bin_crop_size,\n extrapolation_value=extrapolation_value)\n image_crops.append(crop)\n\n if global_pool:\n # Average over all bins.\n position_sensitive_features = tf.add_n(image_crops) / len(image_crops)\n # Then average over spatial positions within the bins.\n position_sensitive_features = tf.reduce_mean(\n input_tensor=position_sensitive_features, axis=[1, 2], keepdims=True)\n else:\n # Reorder height/width to depth channel.\n block_size = bin_crop_size[0]\n if block_size >= 2:\n image_crops = [tf.compat.v1.space_to_depth(\n input=crop, block_size=block_size) for crop in image_crops]\n\n # Pack image_crops so that first dimension is for position-senstive boxes.\n position_sensitive_features = tf.stack(image_crops, axis=0)\n\n # Unroll the position-sensitive boxes to spatial positions.\n position_sensitive_features = tf.squeeze(\n tf.batch_to_space(position_sensitive_features,\n block_shape=[1] + num_spatial_bins,\n crops=tf.zeros((3, 2), dtype=tf.int32)),\n axis=[0])\n\n # Reorder back the depth channel.\n if block_size >= 2:\n position_sensitive_features = tf.compat.v1.depth_to_space(\n input=position_sensitive_features, block_size=block_size)\n\n return position_sensitive_features\n\n\ndef reframe_box_masks_to_image_masks(box_masks, boxes, image_height,\n image_width):\n \"\"\"Transforms the box masks back to full image masks.\n\n Embeds masks in bounding boxes of larger masks whose shapes correspond to\n image shape.\n\n Args:\n box_masks: A tf.float32 tensor of size [num_masks, mask_height, mask_width].\n boxes: A tf.float32 tensor of size [num_masks, 4] containing the box\n corners. Row i contains [ymin, xmin, ymax, xmax] of the box\n corresponding to mask i. Note that the box corners are in\n normalized coordinates.\n image_height: Image height. The output mask will have the same height as\n the image height.\n image_width: Image width. The output mask will have the same width as the\n image width.\n\n Returns:\n A tf.float32 tensor of size [num_masks, image_height, image_width].\n \"\"\"\n # TODO: Make this a public function.\n def transform_boxes_relative_to_boxes(boxes, reference_boxes):\n boxes = tf.reshape(boxes, [-1, 2, 2])\n min_corner = tf.expand_dims(reference_boxes[:, 0:2], 1)\n max_corner = tf.expand_dims(reference_boxes[:, 2:4], 1)\n transformed_boxes = (boxes - min_corner) / (max_corner - min_corner)\n return tf.reshape(transformed_boxes, [-1, 4])\n\n box_masks = tf.expand_dims(box_masks, axis=3)\n num_boxes = tf.shape(input=box_masks)[0]\n unit_boxes = tf.concat(\n [tf.zeros([num_boxes, 2]), tf.ones([num_boxes, 2])], axis=1)\n reverse_boxes = transform_boxes_relative_to_boxes(unit_boxes, boxes)\n image_masks = tf.image.crop_and_resize(image=box_masks,\n boxes=reverse_boxes,\n box_indices=tf.range(num_boxes),\n crop_size=[image_height, image_width],\n extrapolation_value=0.0)\n return tf.squeeze(image_masks, axis=3)\n" ]
[ [ "tensorflow.reshape", "tensorflow.unstack", "tensorflow.ones", "tensorflow.squeeze", "tensorflow.convert_to_tensor", "tensorflow.concat", "tensorflow.slice", "tensorflow.split", "tensorflow.reduce_sum", "tensorflow.math.is_nan", "tensorflow.greater", "tensorflow.compat.v1.where", "tensorflow.compat.v1.get_variable_scope", "tensorflow.constant", "tensorflow.truediv", "tensorflow.stack", "tensorflow.shape", "tensorflow.add_n", "tensorflow.ones_like", "tensorflow.expand_dims", "tensorflow.cast", "tensorflow.rank", "tensorflow.pad", "tensorflow.size", "tensorflow.zeros", "tensorflow.map_fn", "tensorflow.logical_not", "tensorflow.range", "tensorflow.reduce_mean", "tensorflow.compat.v1.space_to_depth", "tensorflow.compat.v1.variable_scope", "tensorflow.image.crop_and_resize", "tensorflow.compat.v1.depth_to_space", "tensorflow.square", "tensorflow.compat.v1.Print", "tensorflow.gather", "tensorflow.compat.v1.name_scope" ] ]
gjheij/linescanning
[ "967a2ac81a162e50639b4e211f1317f6203abcf6" ]
[ "linescanning/dataset.py" ]
[ "import hedfpy\nfrom . import glm, plotting, preproc, utils\nimport matplotlib.pyplot as plt\nimport nibabel as nb\nfrom nilearn.signal import clean\nimport numpy as np\nimport os\nimport pandas as pd\nfrom scipy import io, stats\nimport warnings\n\nopj = os.path.join\npd.options.mode.chained_assignment = None # disable warning thrown by string2float\nwarnings.filterwarnings(\"ignore\")\n\ndef check_input_is_list(obj, var=None, list_element=0):\n\n if hasattr(obj, var):\n attr = getattr(obj, var)\n else:\n raise ValueError(f\"Class does not have '{var}'-attribute\")\n \n if isinstance(attr, list) or isinstance(attr, np.ndarray):\n if len(attr) != len(obj.func_file):\n raise ValueError(f\"Length of '{var}' ({len(attr)}) does not match number of func files ({len(obj.func_file)}). Either specify a list of equal lenghts or 1 integer value for all volumes\")\n\n return attr[list_element]\n else:\n return attr\n\nclass ParseEyetrackerFile():\n\n \"\"\"ParseEyetrackerFile()\n\n Class for parsing edf-files created during experiments with Exptools2. The class will read in the file, read when the experiment actually started, correct onset times for this start time and time deleted because of removing the first few volumes (to do this correctly, set the `TR` and `deleted_first_timepoints`). You can also provide a numpy array/file containing eye blinks that should be added to the onset times in real-world time (seconds). In principle, it will return a pandas DataFrame indexed by subject and run that can be easily concatenated over runs. This function relies on the naming used when programming the experiment. In the `session.py` file, you should have created `phase_names=['iti', 'stim']`; the class will use these things to parse the file.\n\n Parameters\n ----------\n edf_file: str, list\n path pointing to the output file of the experiment; can be a list of multiple \n subject: int\n subject number in the returned pandas DataFrame (should start with 1, ..., n)\n run: int\n run number you'd like to have the onset times for\n low_pass_pupil_f: float, optional\n Low-pass cutoff frequency\n high_pass_pupil_f: float, optional\n High-pass cutoff frequency\n TR: float\n repetition time to correct onset times for deleted volumes\n deleted_first_timepoints: int\n number of volumes to delete to correct onset times for deleted volumes\n\n Examples\n ----------\n >>> from linescanning.utils import ParseExpToolsFile\n >>> file = 'some/path/to/exptoolsfile.tsv'\n >>> parsed_file = ParseExpToolsFile(file, subject=1, run=1, button=True)\n >>> onsets = parsed_file.get_onset_df()\n\n >>> # If you want to get all your subjects and runs in 1 nideconv compatible dataframe, you can do something like this:\n >>> onsets = []\n >>> run_subjects = ['001','002','003']\n >>> for sub in run_subjects:\n >>> path_tsv_files = os.path.join(f'some/path/sub-{sub}')\n >>> f = os.listdir(path_tsv_files)\n >>> nr_runs = []; [nr_runs.append(os.path.join(path_tsv_files, r)) for r in f if \"events.tsv\" in r]\n >>> \n >>> for run in range(1,len(nr_runs)+1):\n >>> sub_idx = run_subjects.index(sub)+1\n >>> onsets.append(ParseExpToolsFile(df_onsets, subject=sub_idx, run=run).get_onset_df())\n >>> \n >>> onsets = pd.concat(onsets).set_index(['subject', 'run', 'event_type'])\n \"\"\"\n\n def __init__(self, \n edf_file, \n subject=1, \n low_pass_pupil_f=6.0, \n high_pass_pupil_f=0.01,\n func_file=None, \n TR1=0.105, \n TR2=None, \n verbose=False, \n use_bids=True):\n\n self.edf_file = edf_file\n self.func_file = func_file\n self.sub = subject\n self.TR1 = TR1\n self.TR2 = TR2\n self.low_pass_pupil_f = low_pass_pupil_f\n self.high_pass_pupil_f = high_pass_pupil_f\n self.verbose = verbose\n self.use_bids = use_bids\n self.include_blinks = False\n\n\n # add all files to h5-file\n if isinstance(self.edf_file, str) or isinstance(self.edf_file, list):\n \n if self.verbose:\n print(\"\\nEYETRACKER\")\n\n self.preprocess_edf_files()\n self.include_blinks = True\n\n def preprocess_edf_files(self):\n\n # deal with edf-files\n if isinstance(self.edf_file, str):\n edfs = [self.edf_file]\n elif isinstance(self.edf_file, list):\n edfs = self.edf_file.copy()\n else:\n raise ValueError(f\"Input must be 'str' or 'list', not '{type(self.edf_file)}'\")\n\n # deal with edf-files\n if self.func_file != None:\n if isinstance(self.func_file, str):\n self.func_file = [str(self.func_file)]\n elif isinstance(self.func_file, list):\n self.func_file = self.func_file.copy()\n else:\n raise ValueError(f\"Input must be 'str' or 'list', not '{type(self.edf_file)}'\")\n\n h5_file = opj(os.path.dirname(edfs[0]), f\"eye.h5\")\n self.ho = hedfpy.HDFEyeOperator(h5_file)\n if not os.path.exists(h5_file):\n for i, edf_file in enumerate(edfs):\n\n if self.use_bids:\n comps = utils.split_bids_components(edf_file)\n try:\n run_ID = comps['run']\n except:\n run_ID = i+1\n else:\n run_ID = i+1\n\n alias = f\"run_{run_ID}\"\n\n self.ho.add_edf_file(edf_file)\n self.ho.edf_message_data_to_hdf(alias=alias)\n self.ho.edf_gaze_data_to_hdf(alias=alias,\n pupil_hp=self.high_pass_pupil_f,\n pupil_lp=self.low_pass_pupil_f)\n else:\n self.ho.open_hdf_file()\n\n self.df_eye = []\n self.blink_events = []\n self.eye_in_func = []\n for i, edf_file in enumerate(edfs):\n\n if self.verbose:\n print(f\"Dealing with {edf_file}\")\n\n if self.use_bids:\n bids_comps = utils.split_bids_components(edf_file)\n self.sub, run_ID = bids_comps['sub'], bids_comps['run']\n else:\n run_ID = i+1\n\n # full output from 'fetch_relevant_info' > use sub as differentiator if multiple files were given\n if self.use_bids:\n self.data = self.fetch_relevant_info(sub=self.sub, run=run_ID)\n else:\n self.data = self.fetch_relevant_info(run=run_ID)\n\n # collect outputs\n self.blink_events.append(self.fetch_eyeblinks())\n self.eye_in_func.append(self.fetch_eye_func_time())\n\n self.blink_events = pd.concat(self.blink_events).set_index(['subject', 'run', 'event_type'])\n self.eye_in_func = pd.concat(self.eye_in_func).set_index(['subject', 'run', 't'])\n\n def fetch_blinks_run(self, run=1, return_type='df'):\n blink_df = utils.select_from_df(self.blink_events, expression=(f\"run = {run}\"), index=['subject', 'run', 'event_type'])\n\n if return_type == \"df\":\n return blink_df\n else:\n return blink_df.values\n\n def fetch_eyeblinks(self):\n return self.data['blink_events']\n\n def fetch_eye_func_time(self):\n return self.data['space_func']\n\n def fetch_eye_tracker_time(self):\n return self.data['space_eye']\n\n def fetch_relevant_info(self, sub=None, run=1):\n\n # set alias\n alias = f'run_{run}'\n if self.verbose:\n print(\" Alias: \", alias)\n\n # load times per session:\n trial_times = self.ho.read_session_data(alias, 'trials')\n trial_phase_times = self.ho.read_session_data(alias, 'trial_phases')\n\n # read func data file to get nr of volumes\n if sub != None:\n func = utils.get_file_from_substring([f\"sub-{sub}_\", f'run-{run}'], self.func_file)\n else:\n func = utils.get_file_from_substring(f'run-{run}', self.func_file)\n\n nr_vols = self.vols(func)\n\n if func.endswith(\"nii\") or func.endswith(\"gz\"):\n TR = self.TR2\n elif func.endswith('mat'):\n TR = self.TR1\n else:\n TR = 0.105\n\n # fetch duration of scan\n func_time = nr_vols*TR\n\n # get block parameters\n session_start_EL_time = trial_times.iloc[0, :][0]\n sample_rate = self.ho.sample_rate_during_period(alias)\n # add number of fMRI*samplerate as stop EL time\n session_stop_EL_time = session_start_EL_time+(func_time*sample_rate)\n\n eye = self.ho.eye_during_period(\n [session_start_EL_time, session_stop_EL_time], alias)\n\n if self.verbose:\n print(\" Sample rate: \", sample_rate)\n print(\" Start time: \", session_start_EL_time)\n print(\" Stop time: \", session_stop_EL_time)\n\n # set some stuff required for successful plotting with seconds on the x-axis\n div = False\n if sample_rate == 500:\n n_samples = int(session_stop_EL_time-session_start_EL_time)/2\n duration_sec = n_samples*(1/sample_rate)*2\n\n div = True\n elif sample_rate == 1000:\n n_samples = int(session_stop_EL_time-session_start_EL_time)\n duration_sec = n_samples*(1/sample_rate)\n else:\n raise ValueError(f\"Did not recognize sample_rate of {sample_rate}\")\n\n if self.verbose:\n print(\" Duration: {}s [{} samples]\".format(\n duration_sec, n_samples))\n\n # Fetch a bunch of data\n pupil_raw = np.squeeze(self.ho.signal_during_period(time_period=[\n session_start_EL_time, session_stop_EL_time+1], alias=alias, signal='pupil', requested_eye=eye))\n pupil_int = np.squeeze(self.ho.signal_during_period(time_period=[\n session_start_EL_time, session_stop_EL_time+1], alias=alias, signal='pupil_int', requested_eye=eye))\n pupil_bp = np.squeeze(self.ho.signal_during_period(time_period=[\n session_start_EL_time, session_stop_EL_time+1], alias=alias, signal='pupil_bp', requested_eye=eye))\n pupil_lp = np.squeeze(self.ho.signal_during_period(time_period=[\n session_start_EL_time, session_stop_EL_time+1], alias=alias, signal='pupil_lp', requested_eye=eye))\n pupil_hp = np.squeeze(self.ho.signal_during_period(time_period=[\n session_start_EL_time, session_stop_EL_time+1], alias=alias, signal='pupil_hp', requested_eye=eye))\n pupil_bp_psc = np.squeeze(self.ho.signal_during_period(time_period=[\n session_start_EL_time, session_stop_EL_time+1], alias=alias, signal='pupil_bp_psc', requested_eye=eye))\n pupil_bp_psc_c = np.squeeze(self.ho.signal_during_period(time_period=[\n session_start_EL_time, session_stop_EL_time+1], alias=alias, signal='pupil_bp_clean_psc', requested_eye=eye))\n\n # Do some plotting\n if not div:\n x = np.arange(0, duration_sec, (1/sample_rate))\n else:\n x = np.arange(0, duration_sec, (1/(sample_rate/2)))\n\n # resample to match functional data\n resamp = glm.resample_stim_vector(pupil_bp_psc_c.values, nr_vols)\n resamp1 = glm.resample_stim_vector(pupil_raw.values, nr_vols)\n\n # add start time to it\n start_exp_time = trial_times.iloc[0, :][-1]\n\n if self.verbose:\n print(\" Start time exp = \", round(start_exp_time, 2))\n\n # get onset time of blinks, cluster blinks that occur within 350 ms\n onsets = self.filter_for_eyeblinks(pupil_raw.to_numpy(),\n skip_time=10,\n filt_window=500,\n sample_rate=sample_rate,\n exp_start=start_exp_time)\n\n # normal eye blink is 1 blink every 4 seconds, throw warning if we found more than a blink per second\n # ref: https://www.sciencedirect.com/science/article/abs/pii/S0014483599906607\n blink_rate = len(onsets) / duration_sec\n\n if self.verbose:\n print(\" Found {} blinks [{} blinks per second]\".format(\n len(onsets), round(blink_rate, 2)))\n\n if blink_rate > 1 or blink_rate < 0.1:\n print(\n f\"WARNING for run-{run}: found {round(blink_rate,2)} blinks per second; normal blink rate is 0.25 blinks per second ({len(onsets)} in {duration_sec}s)\")\n\n # if verbose:\n # print(\"Saving blink onsets times and pupil size trace\")\n # np.save(opj(func_dir, '{}_ses-2_task-LR_{}_eyeblinks.npy'.format(subject, alias.replace('_','-'))), onsets.T)\n # np.save(opj(func_dir, '{}_ses-2_task-LR_{}_pupilsize.npy'.format(subject, alias.replace('_','-'))), resamp)\n\n # build dataframe with relevant information\n df_space_eye = pd.DataFrame({\"pupil_raw\": pupil_raw,\n \"pupil_int\": pupil_int,\n \"pupil_bp\": pupil_bp,\n \"pupil_lp\": pupil_lp,\n \"pupil_hp\": pupil_hp,\n \"pupil_bp_psc\": pupil_bp_psc,\n \"pupil_bp_psc_c\": pupil_bp_psc_c})\n\n # index\n df_space_eye['subject'], df_space_eye['run'] = self.sub, run\n\n df_space_func = pd.DataFrame({\"pupil_raw_2_func\": resamp,\n \"pupil_psc_2_func\": resamp1})\n\n # index\n df_space_func['subject'], df_space_func['run'], df_space_func['t'] = self.sub, run, list(\n TR*np.arange(df_space_func.shape[0]))\n\n # index\n df_blink_events = pd.DataFrame(onsets.T, columns=['onsets'])\n df_blink_events['subject'], df_blink_events['run'], df_blink_events['event_type'] = self.sub, run, \"blink\"\n\n if self.verbose:\n print(\"Done\")\n\n return {\"space_eye\": df_space_eye,\n \"space_func\": df_space_func,\n \"blink_events\": df_blink_events}\n\n def vols(self, func_file):\n if func_file.endswith(\"gz\") or func_file.endswith('nii'):\n img = nb.load(func_file)\n nr_vols = img.get_fdata().shape[-1]\n self.TR2 = img.header['pixdim'][4]\n elif func_file.endswith(\"mat\"):\n raw = io.loadmat(func_file)\n tag = list(raw.keys())[-1]\n raw = raw[tag]\n nr_vols = raw.shape[-1]\n else:\n raise ValueError(\n f\"Could not derive number of volumes for file '{func_file}'\")\n\n return nr_vols\n\n @staticmethod\n def filter_for_eyeblinks(arr, skip_time=None, filt_window=350, sample_rate=500, exp_start=None):\n \"\"\"filter_for_eyeblinks\n\n This function reads where a blink occurred and will filter onset times with a particular window of \n occurrance. For instance, a blink generally takes about 100 ms, so any onsets within 100 ms of each\n other can't be physiologically correct. The function will find the first onset time, checks for onset\n times within the 100ms window using the sampling rate, and return the filtered onset times.\n\n Parameters\n -----------\n arr: np.ndarray\n Array to-be-filtered. If obtained from 'signal_during_period', use 'to_numpy()' as input\n skip_time: int\n skip the first <skip_time> seconds from sampled data to leave out any unphysiological events (default = None)\n filt_window: float\n consider events within <filt_window> as one blink. Given in seconds, default is set to 350ms (0.35s). See: `https://bionumbers.hms.harvard.edu/bionumber.aspx?id=100706&ver=0`\n sample_rate: int\n sampling rate of data, used together with <filt_window> to get the amount of data points that need to be clustered as 1 event\n exp_start: float\n add the start of the experiment time to the onset times. Otherwise timing is re-\n lative to 0, so it's not synced with the experiment.\n\n Returns\n ----------\n onset times: np.ndarray\n numpy array containing onset times in seconds\n \"\"\"\n\n blink_onsets = np.where(arr == 0)[0]\n\n blink = 0\n filter = True\n blink_arr = []\n while filter:\n\n try:\n start_blink = blink_onsets[blink]\n end_blink = start_blink+int((filt_window/1000*sample_rate))\n\n for ii in np.arange(start_blink+1, end_blink):\n if ii in blink_onsets:\n blink_onsets = np.delete(\n blink_onsets, np.where(blink_onsets == ii))\n\n blink_arr.append(blink_onsets[blink])\n\n blink += 1\n except:\n filter = False\n\n onsets = np.array(blink_arr)\n onsets = onsets*(1/sample_rate)\n\n if skip_time:\n for pp in onsets:\n if pp < skip_time:\n onsets = np.delete(onsets, np.where(onsets == pp))\n\n if exp_start:\n onsets = onsets+exp_start\n\n return onsets\n\nclass ParseExpToolsFile(ParseEyetrackerFile):\n\n \"\"\"ParseExpToolsFile()\n\n Class for parsing tsv-files created during experiments with Exptools2. The class will read in the file, read when the experiment actually started, correct onset times for this start time and time deleted because of removing the first few volumes (to do this correctly, set the `TR` and `deleted_first_timepoints`). You can also provide a numpy array/file containing eye blinks that should be added to the onset times in real-world time (seconds). In principle, it will return a pandas DataFrame indexed by subject and run that can be easily concatenated over runs. This function relies on the naming used when programming the experiment. In the `session.py` file, you should have created `phase_names=['iti', 'stim']`; the class will use these things to parse the file.\n\n Parameters\n ----------\n tsv_file: str\n path pointing to the output file of the experiment\n subject: int\n subject number in the returned pandas DataFrame (should start with 1, ..., n)\n run: int\n run number you'd like to have the onset times for\n button: bool\n boolean whether to include onset times of button responses (default is false)\n blinks: str, np.ndarray\n string or array containing the onset times of eye blinks as extracted with hedfpy\n TR: float\n repetition time to correct onset times for deleted volumes\n deleted_first_timepoints: int\n number of volumes to delete to correct onset times for deleted volumes. Can be specified for each individual run if `tsv_file` is a list\n use_bids: bool, optional\n If true, we'll read BIDS-components such as 'sub', 'run', 'task', etc from the input file and use those as indexers, rather than sequential 1,2,3.\n\n Examples\n ----------\n >>> from linescanning.utils import ParseExpToolsFile\n >>> file = 'some/path/to/exptoolsfile.tsv'\n >>> parsed_file = ParseExpToolsFile(file, subject=1, run=1, button=True)\n >>> onsets = parsed_file.get_onset_df()\n\n >>> # If you want to get all your subjects and runs in 1 nideconv compatible dataframe, you can do something like this:\n >>> onsets = []\n >>> run_subjects = ['001','002','003']\n >>> for sub in run_subjects:\n >>> path_tsv_files = os.path.join(f'some/path/sub-{sub}')\n >>> f = os.listdir(path_tsv_files)\n >>> nr_runs = []; [nr_runs.append(os.path.join(path_tsv_files, r)) for r in f if \"events.tsv\" in r]\n >>> \n >>> for run in range(1,len(nr_runs)+1):\n >>> sub_idx = run_subjects.index(sub)+1\n >>> onsets.append(ParseExpToolsFile(df_onsets, subject=sub_idx, run=run).get_onset_df())\n >>> \n >>> onsets = pd.concat(onsets).set_index(['subject', 'run', 'event_type'])\n \"\"\"\n\n def __init__(self, \n tsv_file, \n subject=1, \n run=1, \n button=False, \n blinks=None, \n TR=0.105, \n deleted_first_timepoints=0, \n edfs=None, \n funcs=None, \n use_bids=True,\n verbose=False,\n phase_onset=1,\n **kwargs):\n\n self.tsv_file = tsv_file\n self.sub = int(subject)\n self.run = int(run)\n self.TR = TR\n self.deleted_first_timepoints = deleted_first_timepoints\n self.button = button\n self.blinks = blinks\n self.funcs = funcs\n self.edfs = edfs\n self.use_bids = use_bids\n self.verbose = verbose\n self.phase_onset = phase_onset\n self.__dict__.update(kwargs)\n\n if self.edfs != None:\n super().__init__(self.edfs, \n subject=self.sub, \n func_file=self.funcs, \n TR1=self.TR, \n use_bids=self.use_bids, \n verbose=self.verbose)\n else:\n self.include_blinks = False\n\n if self.verbose:\n print(\"\\nEXPTOOLS\")\n\n if isinstance(self.tsv_file, str):\n self.tsv_file = [self.tsv_file]\n\n if isinstance(self.tsv_file, list):\n df_onsets = []\n for run, onset_file in enumerate(self.tsv_file):\n\n if self.use_bids:\n bids_comps = utils.split_bids_components(onset_file)\n for el in ['sub', 'run']:\n setattr(self, el, bids_comps[el])\n\n # include eyeblinks?\n if self.include_blinks:\n self.blinks = self.fetch_blinks_run(run=self.run)\n\n # check if we got different nr of vols to delete per run\n delete_vols = check_input_is_list(self, \"deleted_first_timepoints\", list_element=run)\n\n # read in the exptools-file\n self.preprocess_exptools_file(onset_file, run=self.run, delete_vols=delete_vols, phase_onset=self.phase_onset)\n\n # append to df\n df_onsets.append(self.get_onset_df(index=False))\n\n # concatemate df\n self.df_onsets = pd.concat(df_onsets).set_index(['subject', 'run', 'event_type'])\n\n # get events per run\n self.events_per_run = self.events_per_run()\n\n\n def events_per_run(self):\n n_runs = np.unique(self.df_onsets.reset_index()['run'].values)\n events = {}\n for run in n_runs:\n df = utils.select_from_df(self.df_onsets, expression=f\"run = {run}\", index=None)\n events[run] = np.unique(df['event_type'].values)\n\n return events\n\n def events_single_run(self, run=1):\n return self.events_per_run[run]\n\n def preprocess_exptools_file(self, tsv_file, run=1, delete_vols=0, phase_onset=1):\n \n data_onsets = []\n with open(tsv_file) as f:\n timings = pd.read_csv(f, delimiter='\\t')\n data_onsets.append(pd.DataFrame(timings))\n\n delete_time = delete_vols*self.TR\n self.data = data_onsets[0]\n self.start_time = float(timings.loc[(timings['event_type'] == \"pulse\") & (timings['response'] == \"t\")].loc[(timings['trial_nr'] == 1) & (timings['phase'] == 0)]['onset'].values)\n # self.data_cut_start = self.data.drop([q for q in np.arange(0,self.start_times.index[0])])\n # self.onset_times = pd.DataFrame(self.data_cut_start[(self.data_cut_start['event_type'] == 'stim') & (self.data_cut_start['condition'].notnull()) | (self.data_cut_start['response'] == 'b')][['onset', 'condition']]['onset'])\n\n self.trimmed = timings.loc[(timings['event_type'] == \"stim\") & (timings['phase'] == phase_onset)].iloc[1:,:]\n self.onset_times = self.trimmed['onset'].values[...,np.newaxis]\n # self.condition = pd.DataFrame(self.data_cut_start[(self.data_cut_start['event_type'] == 'stim') & (self.data_cut_start['condition'].notnull()) | (self.data_cut_start['response'] == 'b')]['condition'])\n\n self.condition = self.trimmed['condition'].values[..., np.newaxis]\n if self.verbose:\n print(f\" 1st 't' @{round(self.start_time,2)}s\")\n \n # add button presses\n if self.button:\n self.response = self.data_cut_start[(self.data_cut_start['response'] == 'b')]\n self.condition.loc[self.response.index] = 'response'\n\n # self.onset = np.concatenate((self.onset_times, self.condition), axis=1)\n self.onset = np.hstack((self.onset_times, self.condition))\n\n # add eyeblinks\n if isinstance(self.blinks, np.ndarray) or isinstance(self.blinks, str):\n\n if self.verbose:\n print(\" Including eyeblinks\")\n\n if isinstance(self.blinks, np.ndarray):\n self.eye_blinks = self.blinks\n elif isinstance(self.blinks, str):\n if self.blinks.endwith(\".npy\"):\n self.eye_blinks = np.load(self.blinks)\n else:\n raise ValueError(f\"Could not recognize type of {self.blinks}. Should be numpy array or string to numpy file\")\n\n self.eye_blinks = self.eye_blinks.astype('object').flatten()\n tmp = self.onset[:,0].flatten()\n\n # combine and sort timings\n comb = np.concatenate((self.eye_blinks, tmp))\n comb = np.sort(comb)[...,np.newaxis]\n\n # add back event types by checking timing values in both arrays\n event_array = []\n for ii in comb:\n\n if ii in self.onset:\n idx = np.where(self.onset == ii)[0][0]\n event_array.append(self.onset[idx][-1])\n else:\n idx = np.where(self.eye_blinks == ii)[0]\n event_array.append('blink')\n\n event_array = np.array(event_array)[...,np.newaxis]\n\n self.onset = np.concatenate((comb, event_array), axis=1)\n\n # correct for start time of experiment and deleted time due to removal of inital volumes\n self.onset[:, 0] = self.onset[:, 0] - (self.start_time + delete_time)\n\n if self.verbose:\n print(f\" Cutting {round(self.start_time + delete_time,2)}s from onsets\")\n\n # make dataframe\n self.onset_df = self.index_onset(self.onset, columns=['onset', 'event_type'], subject=self.sub, run=run)\n\n @staticmethod\n def index_onset(array, columns=None, subject=1, run=1, TR=0.105, set_index=False):\n \n if columns == None:\n df = pd.DataFrame(array)\n else:\n df = pd.DataFrame(array, columns=columns)\n \n df['subject'], df['run'] = subject, run\n df['event_type'] = df['event_type'].astype(str)\n df['onset'] = df['onset'].astype(float)\n\n if set_index:\n return df.set_index(['subject', 'event_type'])\n else:\n return df \n\n def get_onset_df(self, index=False):\n \"\"\"Return the indexed DataFrame containing onset times\"\"\"\n\n if index:\n return self.onset_df.set_index(['subject', 'run', 'event_type'])\n else:\n return self.onset_df\n\n def onsets_to_fsl(self, fmt='3-column', duration=1, amplitude=1, output_base=None):\n \"\"\"onsets_to_fsl\n\n This function creates a text file with a single column containing the onset times of a given condition. Such a file can be used for SPM or FSL modeling, but it should be noted that the onset times have been corrected for the deleted volumes at the beginning. So make sure your inputting the correct functional data in these cases.\n\n Parameters\n ----------\n subject: int\n subject number you'd like to have the onset times for\n run: int\n run number you'd like to have the onset times for\n condition: str\n name of the condition you'd like to have the onset times for as specified in the data frame\n fname: str\n path to output name for text file\n\n Returns\n ----------\n str\n if `fname` was specified, a new file will be created and `fname` will be returned as string pointing to that file\n\n list\n if `fname` was *None*, the list of onset times will be returned\n \"\"\"\n\n onsets = self.df_onsets.copy()\n subj_list = self.get_subjects(onsets)\n for sub in subj_list:\n df = utils.select_from_df(onsets, expression=f\"subject = {sub}\")\n\n n_runs = self.get_runs(df)\n\n for run in n_runs:\n onsets_per_run = utils.select_from_df(df, expression=f\"run = {run}\")\n events_per_run = self.get_events(onsets_per_run)\n\n for ix, ev in enumerate(events_per_run):\n onsets_per_event = utils.select_from_df(onsets_per_run, expression=f\"event_type = {events_per_run[ix]}\").values.flatten()[..., np.newaxis]\n\n if output_base == None:\n fname = f\"{ev}_run-{run}.txt\"\n else:\n fname = f\"{output_base}{ix+1}_run-{run}.txt\"\n\n if fmt == \"3-column\":\n duration_arr = np.full_like(onsets_per_event, duration)\n amplitude_arr = np.full_like(onsets_per_event, amplitude)\n three_col = np.hstack((onsets_per_event, duration_arr, amplitude_arr))\n\n print(f\"Writing {fname}; {three_col.shape}\")\n np.savetxt(fname, three_col,\n delimiter='\\t', fmt='%1.3f')\n else:\n np.savetxt(fname, onsets_per_event,\n delimiter='\\t', fmt='%1.3f')\n\n @staticmethod\n def get_subjects(df):\n try:\n df = df.reset_index()\n except:\n pass\n\n return np.unique(df['subject'].values)\n\n @staticmethod\n def get_runs(df):\n try:\n df = df.reset_index()\n except:\n pass\n\n return np.unique(df['run'].values)\n\n @staticmethod\n def get_events(df):\n try:\n df = df.reset_index()\n except:\n pass\n\n return np.unique(df['event_type'].values)\n\nclass ParsePhysioFile():\n\n \"\"\"ParsePhysioFile\n \n In similar style to :class:`linescanning.utils.ParseExpToolsFile` and :class:`linescanning.utils.ParseFuncFile`, we use this class to read in physiology-files created with the PhysIO-toolbox (https://www.tnu.ethz.ch/en/software/tapas/documentations/physio-toolbox) (via `call_spmphysio` for instance). Using the *.mat*-file created with `PhysIO`, we can also attempt to extract `heart rate variability` measures. If this file cannot be found, this operation will be skipped\n\n Parameters\n ----------\n physio_file: str\n path pointing to the regressor file created with PhysIO (e.g., `call_spmphysio`)\n physio_mat: str\n path pointing to the *.mat*-file created with PhysIO (e.g., `call_spmphysio`)\n subject: int\n subject number in the returned pandas DataFrame (should start with 1, ..., n)\n run: int\n run number you'd like to have the onset times for\n TR: float\n repetition time to correct onset times for deleted volumes\n orders: list\n list of orders used to create the regressor files (see `call_spmphysio`, but default = [2,2,2,]). This one is necessary to create the correct column names for the dataframe\n deleted_first_timepoints: int, optional\n number of volumes deleted at the beginning of the timeseries\n deleted_last_timepoints: int, optional\n number of volumes deleted at the end of the timeseries\n\n Example\n ----------\n >>> physio_file = opj(os.path.dirname(func_file), \"sub-001_ses-1_task-SR_run-1_physio.txt\")\n >>> physio_mat = opj(os.path.dirname(func_file), \"sub-001_ses-1_task-SR_run-1_physio.mat\")\n >>> physio = utils.ParsePhysioFile(physio_file,\n >>> physio_mat=physio_mat,\n >>> subject=func.subject,\n >>> run=func.run,\n >>> TR=func.TR,\n >>> deleted_first_timepoints=func.deleted_first_timepoints,\n >>> deleted_last_timepoints=func.deleted_last_timepoints)\n >>> physio_df = physio.get_physio(index=False)\n \"\"\"\n\n def __init__(self, \n physio_file, \n physio_mat=None, \n subject=1, \n run=1, \n TR=0.105, \n orders=[3,4,1], \n deleted_first_timepoints=0, \n deleted_last_timepoints=0, \n use_bids=False, \n verbose=True,\n **kwargs):\n\n self.physio_file = physio_file\n self.physio_mat = physio_mat\n self.sub = subject\n self.run = run\n self.TR = TR\n self.orders = orders\n self.deleted_first_timepoints = deleted_first_timepoints\n self.deleted_last_timepoints = deleted_last_timepoints\n self.physio_mat = physio_mat\n self.use_bids = use_bids\n self.verbose = verbose\n self.__dict__.update(kwargs)\n\n print(\"\\nPHYSIO\")\n \n self.physio_cols = [f'c_{i}' for i in range(self.orders[0])] + [f'r_{i}' for i in range(self.orders[1])] + [f'cr_{i}' for i in range(self.orders[2])]\n\n if isinstance(self.physio_file, str):\n self.physio_file = [self.physio_file]\n\n if isinstance(self.physio_mat, str):\n self.physio_mat = [self.physio_mat]\n \n if isinstance(self.physio_file, list):\n\n df_physio = []\n for run, func in enumerate(self.physio_file):\n\n if self.verbose:\n print(f\"Preprocessing {func}\")\n\n if self.use_bids:\n bids_comps = utils.split_bids_components(func)\n for el in ['sub', 'run']:\n setattr(self, el, bids_comps[el])\n else:\n self.run = run+1\n\n # check if deleted_first_timepoints is list or not\n delete_first = check_input_is_list(self, var=\"deleted_first_timepoints\", list_element=run)\n\n # check if deleted_last_timepoints is list or not\n delete_last = check_input_is_list(self, var=\"deleted_last_timepoints\", list_element=run)\n\n if self.physio_mat != None:\n if isinstance(self.physio_mat, list):\n if len(self.physio_mat) == len(self.physio_file):\n mat_file = self.physio_mat[run]\n else:\n raise ValueError(f\"Length of mat-files ({len(self.physio_mat)}) does not match length of physio-files ({len(self.physio_mat)})\")\n else:\n raise ValueError(\"Please specify a list of mat-files of equal lengths to that of the list of physio files\")\n else:\n mat_file = None\n\n self.preprocess_physio_file(func, \n physio_mat=mat_file,\n deleted_first_timepoints=delete_first,\n deleted_last_timepoints=delete_last)\n\n df_physio.append(self.get_physio(index=False))\n\n self.df_physio = pd.concat(df_physio).set_index(['subject', 'run', 't'])\n \n def preprocess_physio_file(self, \n physio_tsv, \n physio_mat=None, \n deleted_first_timepoints=0, \n deleted_last_timepoints=0):\n\n self.physio_data = pd.read_csv(physio_tsv,\n header=None,\n sep=\"\\t\",\n engine='python',\n skiprows=deleted_first_timepoints,\n usecols=list(range(0, len(self.physio_cols))))\n\n self.physio_df = pd.DataFrame(self.physio_data)\n self.physio_df.drop(self.physio_df.tail(deleted_last_timepoints).index,inplace=True)\n self.physio_df.columns = self.physio_cols\n\n # Try to get the heart rate\n if physio_mat != None:\n\n self.mat = io.loadmat(physio_mat)\n try:\n self.hr = self.mat['physio']['ons_secs'][0][0][0][0][12]\n except:\n print(\" WARNING: no heart rate trace found..\")\n \n try:\n self.rvt = self.mat['physio']['ons_secs'][0][0][0][0][13]\n except:\n print(\" WARNING: no respiration trace found..\")\n\n # trim beginning and end\n for trace in ['hr', 'rvt']:\n if hasattr(self, trace):\n if deleted_last_timepoints != 0:\n self.physio_df[trace] = getattr(self, trace)[deleted_first_timepoints:-deleted_last_timepoints,:]\n else:\n self.physio_df[trace] = getattr(self, trace)[deleted_first_timepoints:, :]\n\n self.physio_df['subject'], self.physio_df['run'], self.physio_df['t'] = self.sub, self.run, list(self.TR*np.arange(self.physio_df.shape[0]))\n\n def get_physio(self, index=True):\n if index:\n return self.physio_df.set_index(['subject', 'run', 't'])\n else:\n return self.physio_df\n\n\nclass ParseFuncFile(ParseExpToolsFile, ParsePhysioFile):\n\n \"\"\"ParseFuncFile\n\n Class for parsing func-files created with Luisa's reconstruction. It can do filtering, conversion to percent signal change, and create power spectra. It is supposed to look similar to :class:`linescanning.utils.ParseExpToolsFile` to make it easy to translate between the functional data and experimental data.\n\n Parameters\n ----------\n func_file: str, list\n path or list of paths pointing to the output file of the experiment\n subject: int, optional\n subject number in the returned pandas DataFrame (should start with 1, ..., n)\n run: int, optional\n run number you'd like to have the onset times for\n standardize: str, optional\n method of standardization (e.g., \"zscore\" or \"psc\")\n low_pass: bool, optional\n Temporally smooth the data. It's a bit of a shame if this is needed. The preferred option is to use aCompCor with `filter_pca=0.2`\n lb: float, optional\n lower bound for signal filtering\n TR: float, optional\n repetition time to correct onset times for deleted volumes\n deleted_first_timepoints: int, list, optional\n number of volumes deleted at the beginning of the timeseries. Can be specified for each individual run if `func_file` is a list\n deleted_last_timepoints: int, list, optional\n number of volumes deleted at the end of the timeseries. Can be specified for each individual run if `func_file` is a list\n window_size: int, optional\n size of window for rolling median and Savitsky-Golay filter\n poly_order: int, optional\n The order of the polynomial used to fit the samples. polyorder must be less than window_length.\n use_bids: bool, optional\n If true, we'll read BIDS-components such as 'sub', 'run', 'task', etc from the input file and use those as indexers, rather than sequential 1,2,3.\n verbose: bool, optional\n Print details to the terminal, default is False\n retroicor: bool, optional\n WIP: implementation of retroicor, requires the specification of `phys_file` and `phys_mat` containing the output from the PhysIO-toolbox\n n_pca: int, optional\n Number of components to use for WM/CSF PCA during aCompCor\n select_component: int, optional\n If `verbose=True` and `aCompcor=True`, we'll create a scree-plot of the PCA components. With this flag, you can re-run this call but regress out only this particular component. [Deprecated: `filter_pca` is much more effective]\n filter_pca: float, optional\n High-pass filter the components from the PCA during aCompCor. This seems to be pretty effective. Default is 0.2Hz.\n ses1_2_ls: str, optional:\n Transformation mapping `ses-1` anatomy to current linescanning-session, ideally the multi-slice image that is acquired directly before the first `1slice`-image. Default is None.\n run_2_run: str, list, optional\n (List of) Transformation(s) mapping the slices of subsequent runs to the first acquired `1slice` image. Default is None.\n save_as: str, optional\n Directory + basename for several figures that can be created during the process (mainly during aCompCor)\n\n Example\n ----------\n >>> from linescanning import utils\n >>> func_file = utils.get_file_from_substring(f\"run-1_bold.mat\", opj('sub-001', 'ses-1', 'func'))\n >>> func = utils.ParseFuncFile(func_file, subject=1, run=1, deleted_first_timepoints=100, deleted_last_timepoints=300)\n >>> raw = func.get_raw(index=True)\n >>> psc = func.get_psc(index=True)\n \"\"\"\n\n def __init__(self, \n func_file, \n subject=1, \n run=1,\n low_pass=False,\n lb=0.05, \n hb=4,\n TR=0.105, \n deleted_first_timepoints=0, \n deleted_last_timepoints=0, \n window_size=11,\n poly_order=3,\n attribute_tag=None,\n hdf_key=\"df\",\n tsv_file=None,\n edf_file=None,\n phys_file=None,\n phys_mat=None,\n use_bids=True,\n button=False,\n verbose=True,\n retroicor=False,\n acompcor=False,\n n_pca=5,\n func_tag=None,\n select_component=None,\n standardization=\"zscore\",\n filter_pca=None,\n ses1_2_ls=None,\n run_2_run=None,\n save_as=None,\n gm_range=[355, 375],\n tissue_thresholds=[0.7,0.7,0.7],\n save_ext=\"pdf\",\n **kwargs):\n\n self.sub = subject\n self.run = run\n self.TR = TR\n self.lb = lb\n self.hb = hb\n self.low_pass = low_pass\n self.deleted_first_timepoints = deleted_first_timepoints\n self.deleted_last_timepoints = deleted_last_timepoints\n self.window_size = window_size\n self.poly_order = poly_order\n self.attribute_tag = attribute_tag\n self.hdf_key = hdf_key\n self.button = button\n self.func_file = func_file\n self.tsv_file = tsv_file\n self.edf_file = edf_file\n self.phys_file = phys_file\n self.phys_mat = phys_mat\n self.use_bids = use_bids\n self.verbose = verbose\n self.retroicor = retroicor\n self.acompcor = acompcor\n self.foldover = \"FH\"\n self.func_tag = func_tag\n self.n_pca = n_pca\n self.select_component = select_component\n self.filter_pca = filter_pca\n self.standardization = standardization\n self.ses1_2_ls = ses1_2_ls\n self.run_2_run = run_2_run\n self.save_as = save_as\n self.gm_range = gm_range\n self.tissue_thresholds = tissue_thresholds\n self.save_ext = save_ext\n self.__dict__.update(kwargs)\n\n # sampling rate and nyquist freq\n self.fs = 1/self.TR\n self.fn = self.fs/2\n\n # check filtering approach\n if self.low_pass:\n self.filter_strategy = \"lp\"\n else:\n self.filter_strategy = \"hp\"\n\n if self.phys_file != None: \n \n # super(ParsePhysioFile, self).__init__(**kwargs) \n ParsePhysioFile.__init__(self, \n self.phys_file, \n physio_mat=self.phys_mat, \n use_bids=self.use_bids,\n TR=self.TR,\n deleted_first_timepoints=self.deleted_first_timepoints,\n deleted_last_timepoints=self.deleted_last_timepoints,\n **kwargs)\n \n if self.acompcor:\n if isinstance(self.ref_slice, str):\n self.ref_slice = [self.ref_slice]\n\n if self.verbose:\n print(\"\\nFUNCTIONAL\")\n\n if isinstance(self.func_file, str):\n self.func_file = [self.func_file]\n \n if isinstance(self.func_file, list):\n \n # initiate some dataframes\n self.df_psc = [] # psc-data (filtered or not)\n self.df_raw = [] # raw-data (filtered or not)\n self.df_retro = [] # z-score data (retroicor'ed, `if retroicor=True`)\n self.df_r2 = [] # r2 for portions of retroicor-regressors (e.g., 'all', 'cardiac', etc)\n self.df_acomp = [] # aCompCor'ed data\n self.df_zscore = [] # zscore-d data\n self.acomp_objs = [] # keep track of all aCompCor elements\n self.df_gm_only = [] # aCompCor'ed data, only GM voxels\n self.gm_per_run = [] # keep track of GM-voxel indices\n for run, func in enumerate(self.func_file):\n \n if self.verbose:\n print(f\"Preprocessing {func}\")\n if self.use_bids:\n bids_comps = utils.split_bids_components(func)\n for el in ['sub', 'run']:\n setattr(self, el, bids_comps[el])\n else:\n self.run = run+1\n\n # check if deleted_first_timepoints is list or not\n delete_first = check_input_is_list(self, var=\"deleted_first_timepoints\", list_element=run)\n\n # check if deleted_last_timepoints is list or not\n delete_last = check_input_is_list(self, var=\"deleted_last_timepoints\", list_element=run)\n\n if self.acompcor:\n ref_slice = self.ref_slice[run]\n else:\n ref_slice = None\n\n if self.verbose:\n print(f\" Filtering strategy: '{self.filter_strategy}'\")\n print(f\" Standardization strategy: '{self.standardization}'\")\n\n self.preprocess_func_file(func, \n run=self.run, \n deleted_first_timepoints=delete_first,\n deleted_last_timepoints=delete_last,\n acompcor=self.acompcor,\n reference_slice=ref_slice,\n save_as=self.save_as,\n **kwargs)\n \n if self.standardization == \"psc\":\n self.df_psc.append(self.get_data(index=False, filter_strategy=self.filter_strategy, dtype='psc'))\n elif self.standardization == \"zscore\":\n if not self.acompcor:\n self.df_zscore.append(self.get_data(index=False, filter_strategy=self. filter_strategy, dtype='zscore'))\n\n self.df_raw.append(self.get_data(index=False, filter_strategy=None, dtype='raw'))\n\n if self.retroicor:\n self.df_retro.append(self.get_retroicor(index=False))\n self.df_r2.append(self.r2_physio_df)\n\n if self.acompcor:\n acomp_data = self.get_acompcor(index=False, filter_strategy=self.filter_strategy, dtype=self.standardization)\n self.df_acomp.append(acomp_data)\n \n # append the linescanning.preproc.aCompCor object in case we have multiple runs\n self.acomp_objs.append(self.acomp)\n\n # select GM-voxels based on segmentations in case we have single run\n self.select_gm_voxels = [ii for ii in self.acomp.gm_voxels if ii in range(*self.gm_range)]\n self.gm_per_run.append(self.select_gm_voxels)\n \n # fetch the data\n self.df_gm_only.append(utils.select_from_df(acomp_data, expression='ribbon', indices=self.select_gm_voxels))\n\n # check for standardization method\n if self.standardization == \"psc\":\n self.df_func_psc = pd.concat(self.df_psc)\n elif self.standardization == \"zscore\":\n if not self.acompcor:\n self.df_func_zscore = pd.concat(self.df_zscore)\n\n # we'll always have raw data\n self.df_func_raw = pd.concat(self.df_raw)\n\n if self.retroicor:\n try:\n self.df_func_retroicor = pd.concat(self.df_retro).set_index(['subject', 'run', 't'])\n self.df_physio_r2 = pd.concat(self.df_r2)\n except:\n raise ValueError(\"RETROICOR did not complete successfully..\")\n\n if self.acompcor: \n \n # check if elements of list contain dataframes\n if all(elem is None for elem in self.df_acomp):\n print(\"WARNING: aCompCor did not execute properly. All runs have 'None'\")\n else:\n try:\n self.df_func_acomp = pd.concat(self.df_acomp).set_index(['subject', 'run', 't'])\n except:\n self.df_func_acomp = pd.concat(self.df_acomp)\n\n # decide on GM-voxels across runs by averaging tissue probabilities\n if len(self.acomp_objs) > 1:\n self.select_voxels_across_runs()\n self.gm_df = utils.select_from_df(self.df_func_acomp, expression='ribbon', indices=self.voxel_classification['gm'])\n self.ribbon_voxels = [ii for ii in range(*self.gm_range) if ii in self.voxel_classification['gm']]\n self.ribbon_df = utils.select_from_df(\n self.df_func_acomp, expression='ribbon', indices=self.ribbon_voxels)\n else:\n self.gm_df = self.df_gm_only[0].copy()\n\n # now that we have nicely formatted functional data, initialize the ParseExpToolsFile-class\n if self.tsv_file != None: \n ParseExpToolsFile.__init__(self,\n self.tsv_file, \n subject=self.sub, \n deleted_first_timepoints=self.deleted_first_timepoints, \n TR=self.TR, \n edfs=self.edf_file, \n funcs=self.func_file, \n use_bids=self.use_bids,\n button=self.button,\n verbose=self.verbose,\n **kwargs)\n\n def preprocess_func_file(self, \n func_file, \n run=1, \n deleted_first_timepoints=0, \n deleted_last_timepoints=0,\n acompcor=False,\n reference_slice=None,\n save_as=None,\n **kwargs):\n\n #----------------------------------------------------------------------------------------------------------------------------------------------------\n # BASIC DATA LOADING\n\n # Load in datasets with tag \"wcsmtSNR\"\n if func_file.endswith(\"mat\"):\n\n # load matlab file\n self.ts_wcsmtSNR = io.loadmat(func_file)\n\n # decide which key to read from the .mat file\n if self.func_tag == None:\n self.tag = list(self.ts_wcsmtSNR.keys())[-1]\n else:\n self.tag = self.func_tag\n\n # select data\n self.ts_wcsmtSNR = self.ts_wcsmtSNR[self.tag]\n self.ts_complex = self.ts_wcsmtSNR\n self.ts_magnitude = np.abs(self.ts_wcsmtSNR)\n\n elif func_file.endswith(\"npy\") or isinstance(func_file, np.ndarray):\n self.ts_magnitude = np.load(func_file)\n elif func_file.endswith(\"nii\") or func_file.endswith(\"gz\"):\n fdata = nb.load(func_file).get_fdata()\n xdim,ydim,zdim,time_points = fdata.shape\n self.ts_magnitude = fdata.reshape(xdim*ydim*zdim, time_points)\n else:\n raise NotImplementedError()\n\n # trim beginning and end\n if deleted_last_timepoints != 0:\n self.ts_corrected = self.ts_magnitude[:,deleted_first_timepoints:-deleted_last_timepoints]\n else:\n self.ts_corrected = self.ts_magnitude[:,deleted_first_timepoints:]\n\n if self.verbose:\n print(f\" Cutting {deleted_first_timepoints} volumes from beginning\")\n\n self.vox_cols = [f'vox {x}' for x in range(self.ts_corrected.shape[0])]\n\n #----------------------------------------------------------------------------------------------------------------------------------------------------\n # STANDARDIZATION OF UNFILTERED DATA & CREATE DATAFRAMES\n\n # dataframe of raw, unfiltered data\n self.data_raw = self.ts_corrected.copy()\n self.data_raw_df = self.index_func(self.data_raw, \n columns=self.vox_cols, \n subject=self.sub, \n run=run, \n TR=self.TR,\n set_index=True)\n\n # dataframe of unfiltered PSC-data\n self.data_psc = utils.percent_change(self.data_raw, -1)\n self.data_psc_df = self.index_func(self.data_psc,\n columns=self.vox_cols, \n subject=self.sub,\n run=run, \n TR=self.TR, \n set_index=True)\n\n # dataframe of unfiltered z-scored data\n self.data_zscore = clean(self.data_raw.T, standardize=True).T\n self.data_zscore_df = self.index_func(self.data_zscore,\n columns=self.vox_cols, \n subject=self.sub, \n run=run, \n TR=self.TR,\n set_index=True)\n\n #----------------------------------------------------------------------------------------------------------------------------------------------------\n # HIGH PASS FILTER\n\n if self.verbose:\n print(f\" DCT-high pass filter [removes low frequencies <{self.lb} Hz]\")\n\n self.hp_raw, self._cosine_drift = preproc.highpass_dct(self.data_raw, self.lb, TR=self.TR)\n self.hp_raw_df = self.index_func(self.hp_raw,\n columns=self.vox_cols, \n subject=self.sub, \n run=run, \n TR=self.TR,\n set_index=True)\n\n # dataframe of high-passed PSC-data\n self.hp_psc = utils.percent_change(self.hp_raw, -1)\n self.hp_psc_df = self.index_func(self.hp_psc,\n columns=self.vox_cols, \n subject=self.sub,\n run=run, \n TR=self.TR, \n set_index=True)\n\n # dataframe of high-passed z-scored data\n self.hp_zscore = clean(self.hp_raw.T, standardize=True).T\n self.hp_zscore_df = self.index_func(self.hp_zscore,\n columns=self.vox_cols, \n subject=self.sub, \n run=run, \n TR=self.TR,\n set_index=True)\n\n # save SD and Mean so we can go from zscore back to original\n self.zscore_SD = self.hp_raw.std(axis=-1, keepdims=True)\n self.zscore_M = self.hp_raw.mean(axis=-1, keepdims=True)\n\n #----------------------------------------------------------------------------------------------------------------------------------------------------\n # ACOMPCOR AFTER HIGH-PASS FILTERING\n if acompcor:\n\n # do some checks beforehand\n if reference_slice != None:\n if self.use_bids:\n bids_comps = utils.split_bids_components(reference_slice)\n setattr(self, \"target_session\", bids_comps['ses'])\n setattr(self, \"subject\", f\"sub-{bids_comps['sub']}\")\n else:\n assert hasattr(self, \"target_session\"), f\"Please specify a target_session with 'target_session=<int>'\"\n assert hasattr(self, \"subject\"), f\"Please specify a subject with 'target_session=<int>'\"\n\n # check the transformations inputs\n assert hasattr(self, \"ses1_2_ls\"), f\"Please specify a transformation matrix mapping FreeSurfer to ses-{self.target_session}\"\n\n if hasattr(self, \"run_2_run\"):\n if isinstance(self.run_2_run, list):\n run_trafo = utils.get_file_from_substring(f\"to-run{self.run}\", self.run_2_run)\n self.trafos = [self.ses1_2_ls, run_trafo]\n else:\n if self.run_2_run != None:\n self.trafos = [self.ses1_2_ls, self.run_2_run]\n else:\n self.trafos = [self.ses1_2_ls]\n else:\n self.trafos = self.ses1_2_ls \n\n # aCompCor implemented in `preproc` module\n self.acomp = preproc.aCompCor(self.hp_zscore_df,\n subject=self.subject,\n run=self.run,\n trg_session=self.target_session,\n reference_slice=reference_slice,\n trafo_list=self.trafos,\n n_pca=self.n_pca,\n filter_pca=self.filter_pca,\n save_as=self.save_as,\n select_component=self.select_component, \n summary_plot=self.verbose,\n TR=self.TR,\n foldover=self.foldover,\n verbose=self.verbose,\n save_ext=self.save_ext,\n **kwargs)\n \n self.hp_acomp_df = self.index_func(self.acomp.acomp_data,\n columns=self.vox_cols, \n subject=self.sub, \n run=run, \n TR=self.TR,\n set_index=True)\n \n # multiply by SD and add mean\n self.hp_acomp_raw = (self.acomp.acomp_data * self.zscore_SD) + self.zscore_M\n self.hp_acomp_raw_df = self.index_func(self.hp_acomp_raw,\n columns=self.vox_cols, \n subject=self.sub, \n run=run, \n TR=self.TR,\n set_index=True)\n\n # make percent signal\n self.hp_acomp_psc = utils.percent_change(self.hp_acomp_raw, -1)\n self.hp_acomp_psc_df = self.index_func(self.hp_acomp_psc,\n columns=self.vox_cols, \n subject=self.sub, \n run=run, \n TR=self.TR,\n set_index=True) \n \n #----------------------------------------------------------------------------------------------------------------------------------------------------\n # LOW PASS FILTER\n if self.low_pass:\n\n if acompcor:\n info = \" Using aCompCor-data for low-pass filtering\"\n data_for_filtering = self.get_acompcor(index=True, filter_strategy=\"hp\", dtype=self.standardization).T.values\n out_attr = f\"lp_acomp_{self.standardization}\"\n elif hasattr(self, f\"hp_{self.standardization}\"):\n info = \" Using high-pass filtered data for low-pass filtering\"\n data_for_filtering = getattr(self, f\"hp_{self.standardization}\")\n out_attr = f\"lp_{self.standardization}\"\n else:\n info = \" Using unfiltered/un-aCompCor'ed data for low-pass filtering\"\n data_for_filtering = getattr(self, f\"data_{self.standardization}\")\n out_attr = f\"lp_data_{self.standardization}\"\n\n if self.verbose:\n print(info)\n print(f\" Savitsky-Golay low-pass filter [removes high frequences] (window={self.window_size}, order={self.poly_order})\")\n\n tmp_filtered = preproc.lowpass_savgol(data_for_filtering, window_length=self.window_size, polyorder=self.poly_order)\n\n tmp_filtered_df = self.index_func(tmp_filtered,\n columns=self.vox_cols,\n subject=self.sub,\n run=run,\n TR=self.TR,\n set_index=True)\n\n setattr(self, out_attr, tmp_filtered.copy())\n setattr(self, f'{out_attr}_df', tmp_filtered_df.copy())\n\n\n def select_voxels_across_runs(self):\n\n if self.verbose:\n fig = plt.figure(figsize=(20,10))\n gs = fig.add_gridspec(3,1, hspace=0.4)\n\n self.voxel_classification = {}\n self.tissue_probabilities = {}\n\n # get the nice blue from CSF-regressor\n if hasattr(self.acomp_objs[0], \"regressor_voxel_colors\"):\n use_color = self.acomp_objs[0].regressor_voxel_colors[0]\n else:\n use_color = \"#0062C7\"\n\n for ix, seg in enumerate(['wm', 'csf', 'gm']):\n \n self.tissue_probabilities[seg] = []\n for compcor in self.acomp_objs:\n compcor.segmentations_to_beam()\n self.tissue_probabilities[seg].append(\n compcor.segmentations_in_beam[compcor.subject][seg][..., np.newaxis])\n\n img = np.concatenate((self.tissue_probabilities[seg]), axis=-1)\n avg_runs = img.mean(axis=-1).mean(axis=-1)\n # avg_err = stats.sem(avg_runs, axis=-1)\n avg_err = img.mean(axis=-1).std(axis=-1)\n\n if self.verbose:\n ax = fig.add_subplot(gs[ix])\n add_hline = {'pos': self.tissue_thresholds[ix], 'color': 'k', 'ls': '--', 'lw': 1}\n\n # add indication for new classification\n self.voxel_classification[seg] = np.where(avg_runs > self.tissue_thresholds[ix])[0]\n for ii in self.voxel_classification[seg]:\n ax.axvline(ii, alpha=0.3, color=\"#cccccc\")\n\n plotting.LazyPlot(avg_runs,\n axs=ax,\n error=avg_err,\n title=f'Average probability of {seg.upper()}',\n font_size=16,\n linewidth=2,\n color=use_color,\n sns_trim=True,\n line_width=2,\n add_hline=add_hline)\n\n \n if self.save_as != None:\n fname = self.save_as+f\"_desc-tissue_classification.{self.save_ext}\"\n \n if self.verbose:\n print(f\" Saving {fname}\")\n\n fig.savefig(fname) \n \n def apply_retroicor(self, run=1, **kwargs):\n\n # we should have df_physio dataframe from ParsePhysioFile\n if hasattr(self, \"df_physio\"):\n try:\n # select subset of df_physio. Run IDs must correspond!\n self.confs = utils.select_from_df(self.df_physio, expression=f\"run = {self.run}\")\n except:\n raise ValueError(f\"Could not extract dataframe from 'df_physio' with expression: 'run = {self.run}'\")\n\n if hasattr(self, f\"data_zscore\"):\n\n self.z_score = getattr(self, f\"{data_type}_zscore\").copy()\n\n for trace in ['hr', 'rvt']:\n if trace in list(self.confs.columns):\n self.confs = self.confs.drop(columns=[trace])\n\n # regress out the confounds with clean\n if self.verbose:\n print(f\" RETROICOR on '{data_type}_zscore'\")\n\n cardiac = utils.select_from_df(self.confs, expression='ribbon', indices=(0,self.orders[0]))\n respiration = utils.select_from_df(self.confs, expression='ribbon', indices=(self.orders[0],self.orders[0]+self.orders[1]))\n interaction = utils.select_from_df(self.confs, expression='ribbon', indices=(self.orders[0]+self.orders[1],len(list(self.confs.columns))))\n\n self.clean_all = clean(self.z_score.T, standardize=False, confounds=self.confs.values).T\n self.clean_resp = clean(self.z_score.T, standardize=False, confounds=respiration.values).T\n self.clean_cardiac = clean(self.z_score.T, standardize=False, confounds=cardiac.values).T\n self.clean_interaction = clean(self.z_score.T, standardize=False, confounds=interaction.values).T\n\n # create the dataframes\n self.z_score_df = self.index_func(self.z_score, columns=self.vox_cols, subject=self.sub, run=run, TR=self.TR)\n\n self.z_score_retroicor_df = self.index_func(self.clean_all, columns=self.vox_cols, subject=self.sub, run=run, TR=self.TR)\n\n print(self.z_score.shape)\n self.r2_all = 1-(np.var(self.clean_all, -1) / np.var(self.z_score, -1))\n self.r2_resp = 1-(np.var(self.clean_resp, -1) / np.var(self.z_score, -1))\n self.r2_cardiac = 1-(np.var(self.clean_cardiac, -1) / np.var(self.z_score, -1))\n self.r2_interaction = 1-(np.var(self.clean_interaction, -1) / np.var(self.z_score, -1))\n \n # save in a subject X run X voxel manner\n self.r2_physio = {'all': self.r2_all, \n 'respiration': self.r2_resp, \n 'cardiac': self.r2_cardiac, \n 'interaction': self.r2_interaction}\n\n self.r2_physio_df = pd.DataFrame(self.r2_physio)\n self.r2_physio_df['subject'], self.r2_physio_df['run'], self.r2_physio_df['vox'] = self.sub, run, np.arange(0,self.r2_all.shape[0])\n\n setattr(self, f\"data_zscore_retroicor\", self.z_score_retroicor_df)\n setattr(self, f\"data_zscore_retroicor_r2\", self.r2_physio_df)\n\n def get_retroicor(self, index=False):\n if hasattr(self, 'z_score_retroicor_df'):\n if index:\n return self.z_score_retroicor_df.set_index(['subject', 'run', 't'])\n else:\n return self.z_score_retroicor_df\n\n def get_acompcor(self, index=False, filter_strategy=None, dtype=None):\n\n if dtype == None or dtype == \"zscore\":\n tag = \"_\"\n else:\n tag = f\"_{dtype}_\"\n\n if filter_strategy == None:\n attr = f\"acomp{tag}df\"\n elif filter_strategy == \"lp\":\n attr = f\"lp_acomp{tag}df\"\n elif filter_strategy == \"hp\":\n attr = f\"hp_acomp{tag}df\"\n else:\n raise ValueError(f\"Invalid filter strategy '{filter_strategy}'. Must be None, 'hp', or 'lp'\")\n\n if hasattr(self, attr):\n data = getattr(self, attr)\n if index:\n try:\n return data.set_index(['subject', 'run', 't'])\n except:\n return data\n else:\n return data\n\n def get_data(self, filter_strategy=None, index=False, dtype=\"psc\"):\n\n if dtype != \"psc\" and dtype != \"zscore\" and dtype != \"raw\":\n raise ValueError(f\"Requested data type '{dtype}' is not supported. Use 'psc', 'zscore', or 'raw'\")\n\n return_data = None\n allowed = [None, \"raw\", \"hp\", \"lp\"]\n\n if filter_strategy == None or filter_strategy == \"raw\":\n attr = f\"data_{dtype}_df\"\n elif filter_strategy == \"lp\":\n attr = f\"lp_{dtype}_df\"\n elif filter_strategy == \"hp\":\n attr = f\"hp_{dtype}_df\"\n else:\n raise ValueError(f\"Unknown attribute '{filter_strategy}'. Must be one of: {allowed}\")\n\n if hasattr(self, attr):\n # print(f\" Fetching attribute: {attr}\")\n return_data = getattr(self, attr)\n\n if isinstance(return_data, pd.DataFrame):\n if index:\n return return_data.set_index(['subject', 'run', 't'])\n else:\n return return_data\n else:\n raise ValueError(f\"No dataframe was found with search term: '{filter_strategy}' and standardization method '{dtype}'\")\n\n @staticmethod\n def index_func(array, columns=None, subject=1, run=1, TR=0.105, set_index=False):\n \n if columns == None:\n df = pd.DataFrame(array.T)\n else:\n df = pd.DataFrame(array.T, columns=columns)\n \n df['subject'] = subject\n df['run'] = run\n df['t'] = list(TR*np.arange(df.shape[0]))\n\n if set_index:\n return df.set_index(['subject', 'run', 't'])\n else:\n return df\n\nclass Dataset(ParseFuncFile):\n \"\"\"Dataset\n\n Main class for retrieving, formatting, and preprocessing of all datatypes including fMRI (2D), eyetracker (*.edf), physiology (*.log [WIP]), and experiment files derived from `Exptools2` (*.tsv). If you leave `subject` and `run` empty, these elements will be derived from the file names. So if you have BIDS-like files, leave them empty and the dataframe will be created for you with the correct subject/run IDs\n \n Parameters\n ----------\n func_file: str, list\n path or list of paths pointing to the output file of the experiment\n tsv_file: str\n path pointing to the output file of the experiment \n edf_file: str, list\n path pointing to the output file of the experiment; can be a list of multiple\n phys_file: str, list\n output from PhysIO-toolbox containing the regressors that we need to implement for RETROICOR\n phys_mat: str, list\n output *.mat file containing the heart rate and respiration traces\n subject: int, optional\n subject number in the returned pandas DataFrame (should start with 1, ..., n)\n run: int, optional\n run number you'd like to have the onset times for\n lb: float, optional\n lower bound for signal filtering\n TR: float, optional\n repetition time to correct onset times for deleted volumes\n button: bool\n boolean whether to include onset times of button responses (default is false) \n deleted_first_timepoints: int, list, optional\n number of volumes deleted at the beginning of the timeseries. Can be specified for each individual run if `func_file` is a list\n deleted_last_timepoints: int, list, optional\n number of volumes deleted at the end of the timeseries. Can be specified for each individual run if `func_file` is a list\n window_size: int, optional\n size of window for rolling median and Savitsky-Golay filter\n poly_order: int, optional\n The order of the polynomial used to fit the samples. polyorder must be less than window_length.\n use_bids: bool, optional\n If true, we'll read BIDS-components such as 'sub', 'run', 'task', etc from the input file and use those as indexers, rather than sequential 1,2,3.\n verbose: bool, optional\n Print details to the terminal, default is False\n retroicor: bool, optional\n WIP: implementation of retroicor, requires the specification of `phys_file` and `phys_mat` containing the output from the PhysIO-toolbox\n n_pca: int, optional\n Number of components to use for WM/CSF PCA during aCompCor\n select_component: int, optional\n If `verbose=True` and `aCompcor=True`, we'll create a scree-plot of the PCA components. With this flag, you can re-run this call but regress out only this particular component. [Deprecated: `filter_pca` is much more effective]\n filter_pca: float, optional\n High-pass filter the components from the PCA during aCompCor. This seems to be pretty effective. Default is 0.2Hz.\n ses1_2_ls: str, optional:\n Transformation mapping `ses-1` anatomy to current linescanning-session, ideally the multi-slice image that is acquired directly before the first `1slice`-image. Default is None.\n run_2_run: str, list, optional\n (List of) Transformation(s) mapping the slices of subsequent runs to the first acquired `1slice` image. Default is None.\n save_as: str, optional\n Directory + basename for several figures that can be created during the process (mainly during aCompCor) \n\n Example\n ----------\n >>> from linescanning import dataset, utils\n >>> func_dir = \"/some/dir\"\n >>> exp = utils.get_file_from_substring(\"tsv\", func_dir)\n >>> funcs = utils.get_file_from_substring(\"bold.mat\", func_dir)\n >>> # \n >>> # only cut from SR-runs\n >>> delete_first = 100\n >>> delete_last = 0\n >>> #\n >>> window = 19\n >>> order = 3\n >>> data = dataset.Dataset(funcs,\n >>> deleted_first_timepoints=delete_first,\n >>> deleted_last_timepoints=delete_last,\n >>> window_size=window,\n >>> high_pass=True,\n >>> low_pass=True,\n >>> poly_order=order,\n >>> tsv_file=exp,\n >>> verbose=True)\n >>> #\n >>> # retrieve data\n >>> fmri = data.fetch_fmri()\n >>> onsets = data.fetch_onsets()\n \"\"\"\n\n def __init__(self, \n func_file,\n subject=1,\n run=1,\n TR=0.105, \n tsv_file=None,\n edf_file=None,\n phys_file=None,\n phys_mat=None,\n low_pass=False,\n button=False,\n lb=0.01, \n hb=4,\n deleted_first_timepoints=0, \n deleted_last_timepoints=0, \n window_size=11,\n poly_order=3,\n attribute_tag=None,\n hdf_key=\"df\",\n use_bids=True,\n verbose=False,\n retroicor=False,\n filter=None,\n n_pca=5,\n select_component=None,\n filter_pca=0.2,\n ses1_2_ls=None,\n run_2_run=None,\n save_as=None,\n gm_range=[355,375],\n tissue_thresholds=[0.7,0.7,0.7],\n save_ext=\"pdf\",\n **kwargs):\n\n if verbose:\n print(\"DATASET\")\n \n self.read_attributes = ['df_func_psc', \n 'df_func_raw', \n 'df_retro_zscore', \n 'df_onsets', \n 'eye_in_func', \n 'blink_events']\n\n if isinstance(func_file, str) and func_file.endswith(\".h5\"):\n print(f\" Reading from {func_file}\")\n self.from_hdf(func_file)\n else:\n super().__init__(func_file,\n TR=TR,\n subject=subject,\n run=run,\n lb=lb,\n hb=hb,\n low_pass=low_pass,\n deleted_first_timepoints=deleted_first_timepoints,\n deleted_last_timepoints=deleted_last_timepoints,\n window_size=window_size,\n poly_order=poly_order,\n tsv_file=tsv_file,\n edf_file=edf_file,\n phys_file=phys_file,\n phys_mat=phys_mat,\n use_bids=use_bids,\n verbose=verbose,\n retroicor=retroicor,\n n_pca=n_pca,\n select_component=select_component,\n filter_pca=filter_pca,\n ses1_2_ls=ses1_2_ls,\n run_2_run=run_2_run,\n save_as=save_as,\n gm_range=gm_range,\n tissue_thresholds=tissue_thresholds,\n save_ext=save_ext,\n **kwargs)\n\n if verbose:\n print(\"\\nDATASET: created\")\n\n def fetch_fmri(self, strip_index=False, dtype=None):\n\n if dtype == None:\n if self.acompcor:\n dtype = \"acompcor\"\n else:\n dtype = self.standardization\n \n if dtype == \"psc\":\n attr = 'df_func_psc'\n elif dtype == \"retroicor\":\n attr = 'df_func_retroicor'\n elif dtype == \"raw\" or dtype == None:\n attr = 'df_func_raw'\n elif dtype == \"zscore\":\n attr = 'df_func_zscore'\n elif dtype == \"acompcor\":\n attr = 'df_func_acomp'\n else:\n raise ValueError(f\"Unknown option '{dtype}'. Must be 'psc', 'retroicor', 'acompcor', or 'zscore'\")\n\n if hasattr(self, attr):\n \n if self.verbose:\n print(f\"Fetching dataframe from attribute '{attr}'\")\n \n df = getattr(self, attr)\n if strip_index:\n return df.reset_index().drop(labels=['subject', 'run', 't'], axis=1) \n else:\n return df\n else:\n print(f\"Could not find '{attr}' attribute\")\n \n def fetch_onsets(self, strip_index=False):\n if hasattr(self, 'df_onsets'):\n if strip_index:\n return self.df_onsets.reset_index().drop(labels=list(self.df_onsets.index.names), axis=1)\n else:\n return self.df_onsets\n else:\n print(\"No event-data was provided\")\n\n def fetch_physio(self, strip_index=False):\n if hasattr(self, 'df_physio'):\n if strip_index:\n return self.df_physio.reset_index().drop(labels=list(self.df_physio.index.names), axis=1)\n else:\n return self.df_physio\n else:\n print(\"No physio-data was provided\") \n\n def fetch_trace(self, strip_index=False):\n if hasattr(self, 'eye_in_func'):\n if strip_index:\n return self.eye_in_func.reset_index().drop(labels=list(self.eye_in_func.index.names), axis=1)\n else:\n return self.eye_in_func\n else:\n print(\"No eyetracking-data was provided\")\n\n def fetch_blinks(self, strip_index=False):\n if hasattr(self, 'blink_events'):\n return self.blink_events\n else:\n print(\"No eyetracking-data was provided\")\n\n def to_hdf(self, output_file):\n \n if self.verbose:\n print(f\"Saving to {output_file}\")\n\n for attr in self.read_attributes:\n if hasattr(self, attr):\n \n if self.verbose:\n print(f\" Saving attribute: {attr}\")\n \n add_df = getattr(self, attr)\n if os.path.exists(output_file):\n add_df.to_hdf(output_file, key=attr, append=True, mode='r+', format='t')\n else:\n add_df.to_hdf(output_file, key=attr, mode='w', format='t')\n \n if self.verbose:\n print(\"Done\")\n\n def from_hdf(self, input_file):\n hdf_store = pd.HDFStore(input_file)\n hdf_keys = hdf_store.keys()\n for key in hdf_keys:\n key = key.strip(\"/\")\n \n if self.verbose:\n print(f\" Setting attribute: {key}\")\n\n setattr(self, key, hdf_store.get(key))\n\nclass DatasetCollector():\n\n def __init__(self, dataset_objects):\n\n self.datasets = dataset_objects\n if len(self.datasets) != None:\n self.data = []\n self.onsets = []\n for dataset in self.datasets:\n self.data.append(dataset.fetch_fmri())\n\n # check if we got onsets\n if hasattr(dataset, 'df_onsets'):\n onsets = True\n self.onsets.append(dataset.fetch_onsets())\n else:\n onsets = False\n\n self.data = pd.concat(self.data)\n if onsets:\n self.onsets = pd.concat(self.onsets)\n\n# this is basically a wrapper around pybest.utils.load_gifti\nclass ParseGiftiFile():\n\n def __init__(self, gifti_file, set_tr=None):\n\n self.gifti_file = gifti_file\n self.f_gif = nb.load(self.gifti_file)\n self.data = np.vstack([arr.data for arr in self.f_gif.darrays])\n self.set_tr = set_tr\n\n if set_tr != None:\n if len(self.f_gif.darrays[0].metadata) == 0:\n self.f_gif = self.set_metadata()\n elif int(float(self.f_gif.darrays[0].metadata['TimeStep'])) == 0:\n # int(float) construction from https://stackoverflow.com/questions/1841565/valueerror-invalid-literal-for-int-with-base-10\n self.f_gif = self.set_metadata()\n elif int(float(self.f_gif.darrays[0].metadata['TimeStep'])) == set_tr:\n pass\n else:\n raise ValueError(\"Could not update TR..\")\n \n self.meta = self.f_gif.darrays[0].metadata\n self.TR_ms = float(self.meta['TimeStep'])\n self.TR_sec = float(self.meta['TimeStep']) / 1000\n\n def set_metadata(self):\n \n # define metadata\n image_metadata = nb.gifti.GiftiMetaData().from_dict({'TimeStep': str(float(self.set_tr))})\n\n # copy old data and combine it with metadata\n darray = nb.gifti.GiftiDataArray(self.data, meta=image_metadata)\n\n # store in new gifti image object\n gifti_image = nb.GiftiImage()\n\n # add data to this object\n gifti_image.add_gifti_data_array(darray)\n \n # save in same file name\n nb.save(gifti_image, self.gifti_file)\n\n return gifti_image\n" ]
[ [ "numpy.var", "numpy.savetxt", "numpy.vstack", "numpy.full_like", "matplotlib.pyplot.figure", "numpy.abs", "pandas.HDFStore", "numpy.where", "numpy.unique", "scipy.io.loadmat", "numpy.load", "pandas.read_csv", "numpy.arange", "numpy.hstack", "pandas.concat", "numpy.sort", "pandas.DataFrame", "numpy.array", "numpy.concatenate" ] ]
NickVeld/scikit-learn-proj
[ "9694a5641a7abbec96c93817aed88ce827dbacd3" ]
[ "sklearn/linear_model/bayes.py" ]
[ "\"\"\"\nVarious bayesian regression\n\"\"\"\nfrom __future__ import print_function\n\n# Authors: V. Michel, F. Pedregosa, A. Gramfort\n# License: BSD 3 clause\n\nfrom math import log\nimport numpy as np\nfrom scipy import linalg\n\nfrom .base import LinearModel\nfrom ..base import RegressorMixin\nfrom ..utils.extmath import fast_logdet, pinvh\nfrom ..utils import check_X_y\n\n\n###############################################################################\n# BayesianRidge regression\n\nclass BayesianRidge(LinearModel, RegressorMixin):\n \"\"\"Bayesian ridge regression\n\n Fit a Bayesian ridge model and optimize the regularization parameters\n lambda (precision of the weights) and alpha (precision of the noise).\n\n Read more in the :ref:`User Guide <bayesian_regression>`.\n\n Parameters\n ----------\n n_iter : int, optional\n Maximum number of iterations. Default is 300.\n\n tol : float, optional\n Stop the algorithm if w has converged. Default is 1.e-3.\n\n alpha_1 : float, optional\n Hyper-parameter : shape parameter for the Gamma distribution prior\n over the alpha parameter. Default is 1.e-6\n\n alpha_2 : float, optional\n Hyper-parameter : inverse scale parameter (rate parameter) for the\n Gamma distribution prior over the alpha parameter.\n Default is 1.e-6.\n\n lambda_1 : float, optional\n Hyper-parameter : shape parameter for the Gamma distribution prior\n over the lambda parameter. Default is 1.e-6.\n\n lambda_2 : float, optional\n Hyper-parameter : inverse scale parameter (rate parameter) for the\n Gamma distribution prior over the lambda parameter.\n Default is 1.e-6\n\n compute_score : boolean, optional\n If True, compute the objective function at each step of the model.\n Default is False\n\n fit_intercept : boolean, optional\n whether to calculate the intercept for this model. If set\n to false, no intercept will be used in calculations\n (e.g. data is expected to be already centered).\n Default is True.\n\n normalize : boolean, optional, default False\n If True, the regressors X will be normalized before regression.\n This parameter is ignored when `fit_intercept` is set to False.\n When the regressors are normalized, note that this makes the\n hyperparameters learnt more robust and almost independent of the number\n of samples. The same property is not valid for standardized data.\n However, if you wish to standardize, please use\n `preprocessing.StandardScaler` before calling `fit` on an estimator\n with `normalize=False`.\n\n copy_X : boolean, optional, default True\n If True, X will be copied; else, it may be overwritten.\n\n verbose : boolean, optional, default False\n Verbose mode when fitting the model.\n\n\n Attributes\n ----------\n coef_ : array, shape = (n_features)\n Coefficients of the regression model (mean of distribution)\n\n alpha_ : float\n estimated precision of the noise.\n\n lambda_ : float\n estimated precision of the weights.\n\n sigma_ : array, shape = (n_features, n_features)\n estimated variance-covariance matrix of the weights\n\n scores_ : float\n if computed, value of the objective function (to be maximized)\n\n Examples\n --------\n >>> from sklearn import linear_model\n >>> clf = linear_model.BayesianRidge()\n >>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])\n ... # doctest: +NORMALIZE_WHITESPACE\n BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,\n copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,\n n_iter=300, normalize=False, tol=0.001, verbose=False)\n >>> clf.predict([[1, 1]])\n array([ 1.])\n\n Notes\n -----\n See examples/linear_model/plot_bayesian_ridge.py for an example.\n\n References\n ----------\n D. J. C. MacKay, Bayesian Interpolation, Computation and Neural Systems,\n Vol. 4, No. 3, 1992.\n\n R. Salakhutdinov, Lecture notes on Statistical Machine Learning,\n http://www.utstat.toronto.edu/~rsalakhu/sta4273/notes/Lecture2.pdf#page=15\n Their beta is our self.alpha_\n Their alpha is our self.lambda_\n \"\"\"\n\n def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,\n lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,\n fit_intercept=True, normalize=False, copy_X=True,\n verbose=False):\n self.n_iter = n_iter\n self.tol = tol\n self.alpha_1 = alpha_1\n self.alpha_2 = alpha_2\n self.lambda_1 = lambda_1\n self.lambda_2 = lambda_2\n self.compute_score = compute_score\n self.fit_intercept = fit_intercept\n self.normalize = normalize\n self.copy_X = copy_X\n self.verbose = verbose\n\n def fit(self, X, y):\n \"\"\"Fit the model\n\n Parameters\n ----------\n X : numpy array of shape [n_samples,n_features]\n Training data\n y : numpy array of shape [n_samples]\n Target values\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)\n X, y, X_offset_, y_offset_, X_scale_ = self._preprocess_data(\n X, y, self.fit_intercept, self.normalize, self.copy_X)\n self.X_offset_ = X_offset_\n self.X_scale_ = X_scale_\n n_samples, n_features = X.shape\n\n # Initialization of the values of the parameters\n alpha_ = 1. / np.var(y)\n lambda_ = 1.\n\n verbose = self.verbose\n lambda_1 = self.lambda_1\n lambda_2 = self.lambda_2\n alpha_1 = self.alpha_1\n alpha_2 = self.alpha_2\n\n self.scores_ = list()\n coef_old_ = None\n\n XT_y = np.dot(X.T, y)\n U, S, Vh = linalg.svd(X, full_matrices=False)\n eigen_vals_ = S ** 2\n\n # Convergence loop of the bayesian ridge regression\n for iter_ in range(self.n_iter):\n\n # Compute mu and sigma\n # sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)\n # coef_ = sigma_^-1 * XT * y\n if n_samples > n_features:\n coef_ = np.dot(Vh.T,\n Vh / (eigen_vals_ +\n lambda_ / alpha_)[:, np.newaxis])\n coef_ = np.dot(coef_, XT_y)\n if self.compute_score:\n logdet_sigma_ = - np.sum(\n np.log(lambda_ + alpha_ * eigen_vals_))\n else:\n coef_ = np.dot(X.T, np.dot(\n U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))\n coef_ = np.dot(coef_, y)\n if self.compute_score:\n logdet_sigma_ = lambda_ * np.ones(n_features)\n logdet_sigma_[:n_samples] += alpha_ * eigen_vals_\n logdet_sigma_ = - np.sum(np.log(logdet_sigma_))\n\n # Update alpha and lambda\n rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)\n gamma_ = (np.sum((alpha_ * eigen_vals_) /\n (lambda_ + alpha_ * eigen_vals_)))\n lambda_ = ((gamma_ + 2 * lambda_1) /\n (np.sum(coef_ ** 2) + 2 * lambda_2))\n alpha_ = ((n_samples - gamma_ + 2 * alpha_1) /\n (rmse_ + 2 * alpha_2))\n\n # Compute the objective function\n if self.compute_score:\n s = lambda_1 * log(lambda_) - lambda_2 * lambda_\n s += alpha_1 * log(alpha_) - alpha_2 * alpha_\n s += 0.5 * (n_features * log(lambda_) +\n n_samples * log(alpha_) -\n alpha_ * rmse_ -\n (lambda_ * np.sum(coef_ ** 2)) -\n logdet_sigma_ -\n n_samples * log(2 * np.pi))\n self.scores_.append(s)\n\n # Check for convergence\n if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:\n if verbose:\n print(\"Convergence after \", str(iter_), \" iterations\")\n break\n coef_old_ = np.copy(coef_)\n\n self.alpha_ = alpha_\n self.lambda_ = lambda_\n self.coef_ = coef_\n sigma_ = np.dot(Vh.T,\n Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis])\n self.sigma_ = (1. / alpha_) * sigma_\n\n self._set_intercept(X_offset_, y_offset_, X_scale_)\n return self\n\n def predict(self, X, return_std=False):\n \"\"\"Predict using the linear model.\n\n In addition to the mean of the predictive distribution, also its\n standard deviation can be returned.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape = (n_samples, n_features)\n Samples.\n\n return_std : boolean, optional\n Whether to return the standard deviation of posterior prediction.\n\n Returns\n -------\n y_mean : array, shape = (n_samples,)\n Mean of predictive distribution of query points.\n\n y_std : array, shape = (n_samples,)\n Standard deviation of predictive distribution of query points.\n \"\"\"\n y_mean = self._decision_function(X)\n if return_std is False:\n return y_mean\n else:\n if self.normalize:\n X = (X - self.X_offset_) / self.X_scale_\n sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)\n y_std = np.sqrt(sigmas_squared_data + (1. / self.alpha_))\n return y_mean, y_std\n\n\n###############################################################################\n# ARD (Automatic Relevance Determination) regression\n\n\nclass ARDRegression(LinearModel, RegressorMixin):\n \"\"\"Bayesian ARD regression.\n\n Fit the weights of a regression model, using an ARD prior. The weights of\n the regression model are assumed to be in Gaussian distributions.\n Also estimate the parameters lambda (precisions of the distributions of the\n weights) and alpha (precision of the distribution of the noise).\n The estimation is done by an iterative procedures (Evidence Maximization)\n\n Read more in the :ref:`User Guide <bayesian_regression>`.\n\n Parameters\n ----------\n n_iter : int, optional\n Maximum number of iterations. Default is 300\n\n tol : float, optional\n Stop the algorithm if w has converged. Default is 1.e-3.\n\n alpha_1 : float, optional\n Hyper-parameter : shape parameter for the Gamma distribution prior\n over the alpha parameter. Default is 1.e-6.\n\n alpha_2 : float, optional\n Hyper-parameter : inverse scale parameter (rate parameter) for the\n Gamma distribution prior over the alpha parameter. Default is 1.e-6.\n\n lambda_1 : float, optional\n Hyper-parameter : shape parameter for the Gamma distribution prior\n over the lambda parameter. Default is 1.e-6.\n\n lambda_2 : float, optional\n Hyper-parameter : inverse scale parameter (rate parameter) for the\n Gamma distribution prior over the lambda parameter. Default is 1.e-6.\n\n compute_score : boolean, optional\n If True, compute the objective function at each step of the model.\n Default is False.\n\n threshold_lambda : float, optional\n threshold for removing (pruning) weights with high precision from\n the computation. Default is 1.e+4.\n\n fit_intercept : boolean, optional\n whether to calculate the intercept for this model. If set\n to false, no intercept will be used in calculations\n (e.g. data is expected to be already centered).\n Default is True.\n\n normalize : boolean, optional, default False\n If True, the regressors X will be normalized before regression.\n This parameter is ignored when `fit_intercept` is set to False.\n When the regressors are normalized, note that this makes the\n hyperparameters learnt more robust and almost independent of the number\n of samples. The same property is not valid for standardized data.\n However, if you wish to standardize, please use\n `preprocessing.StandardScaler` before calling `fit` on an estimator\n with `normalize=False`.\n\n copy_X : boolean, optional, default True.\n If True, X will be copied; else, it may be overwritten.\n\n verbose : boolean, optional, default False\n Verbose mode when fitting the model.\n\n Attributes\n ----------\n coef_ : array, shape = (n_features)\n Coefficients of the regression model (mean of distribution)\n\n alpha_ : float\n estimated precision of the noise.\n\n lambda_ : array, shape = (n_features)\n estimated precisions of the weights.\n\n sigma_ : array, shape = (n_features, n_features)\n estimated variance-covariance matrix of the weights\n\n scores_ : float\n if computed, value of the objective function (to be maximized)\n\n Examples\n --------\n >>> from sklearn import linear_model\n >>> clf = linear_model.ARDRegression()\n >>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])\n ... # doctest: +NORMALIZE_WHITESPACE\n ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,\n copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,\n n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,\n verbose=False)\n >>> clf.predict([[1, 1]])\n array([ 1.])\n\n Notes\n --------\n See examples/linear_model/plot_ard.py for an example.\n\n References\n ----------\n D. J. C. MacKay, Bayesian nonlinear modeling for the prediction\n competition, ASHRAE Transactions, 1994.\n\n R. Salakhutdinov, Lecture notes on Statistical Machine Learning,\n http://www.utstat.toronto.edu/~rsalakhu/sta4273/notes/Lecture2.pdf#page=15\n Their beta is our self.alpha_\n Their alpha is our self.lambda_\n ARD is a little different than the slide: only dimensions/features for\n which self.lambda_ < self.threshold_lambda are kept and the rest are\n discarded.\n \"\"\"\n\n def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,\n lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,\n threshold_lambda=1.e+4, fit_intercept=True, normalize=False,\n copy_X=True, verbose=False):\n self.n_iter = n_iter\n self.tol = tol\n self.fit_intercept = fit_intercept\n self.normalize = normalize\n self.alpha_1 = alpha_1\n self.alpha_2 = alpha_2\n self.lambda_1 = lambda_1\n self.lambda_2 = lambda_2\n self.compute_score = compute_score\n self.threshold_lambda = threshold_lambda\n self.copy_X = copy_X\n self.verbose = verbose\n\n def fit(self, X, y):\n \"\"\"Fit the ARDRegression model according to the given training data\n and parameters.\n\n Iterative procedure to maximize the evidence\n\n Parameters\n ----------\n X : array-like, shape = [n_samples, n_features]\n Training vector, where n_samples in the number of samples and\n n_features is the number of features.\n y : array, shape = [n_samples]\n Target values (integers)\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)\n\n n_samples, n_features = X.shape\n coef_ = np.zeros(n_features)\n\n X, y, X_offset_, y_offset_, X_scale_ = self._preprocess_data(\n X, y, self.fit_intercept, self.normalize, self.copy_X)\n\n # Launch the convergence loop\n keep_lambda = np.ones(n_features, dtype=bool)\n\n lambda_1 = self.lambda_1\n lambda_2 = self.lambda_2\n alpha_1 = self.alpha_1\n alpha_2 = self.alpha_2\n verbose = self.verbose\n\n # Initialization of the values of the parameters\n alpha_ = 1. / np.var(y)\n lambda_ = np.ones(n_features)\n\n self.scores_ = list()\n coef_old_ = None\n\n # Iterative procedure of ARDRegression\n for iter_ in range(self.n_iter):\n # Compute mu and sigma (using Woodbury matrix identity)\n sigma_ = pinvh(np.eye(n_samples) / alpha_ +\n np.dot(X[:, keep_lambda] *\n np.reshape(1. / lambda_[keep_lambda], [1, -1]),\n X[:, keep_lambda].T))\n sigma_ = np.dot(sigma_, X[:, keep_lambda] *\n np.reshape(1. / lambda_[keep_lambda], [1, -1]))\n sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1]) *\n X[:, keep_lambda].T, sigma_)\n sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]\n coef_[keep_lambda] = alpha_ * np.dot(\n sigma_, np.dot(X[:, keep_lambda].T, y))\n\n # Update alpha and lambda\n rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)\n gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)\n lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1) /\n ((coef_[keep_lambda]) ** 2 +\n 2. * lambda_2))\n alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1) /\n (rmse_ + 2. * alpha_2))\n\n # Prune the weights with a precision over a threshold\n keep_lambda = lambda_ < self.threshold_lambda\n coef_[~keep_lambda] = 0\n\n # Compute the objective function\n if self.compute_score:\n s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()\n s += alpha_1 * log(alpha_) - alpha_2 * alpha_\n s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_) +\n np.sum(np.log(lambda_)))\n s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())\n self.scores_.append(s)\n\n # Check for convergence\n if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:\n if verbose:\n print(\"Converged after %s iterations\" % iter_)\n break\n coef_old_ = np.copy(coef_)\n\n self.coef_ = coef_\n self.alpha_ = alpha_\n self.sigma_ = sigma_\n self.lambda_ = lambda_\n self._set_intercept(X_offset_, y_offset_, X_scale_)\n return self\n\n def predict(self, X, return_std=False):\n \"\"\"Predict using the linear model.\n\n In addition to the mean of the predictive distribution, also its\n standard deviation can be returned.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape = (n_samples, n_features)\n Samples.\n\n return_std : boolean, optional\n Whether to return the standard deviation of posterior prediction.\n\n Returns\n -------\n y_mean : array, shape = (n_samples,)\n Mean of predictive distribution of query points.\n\n y_std : array, shape = (n_samples,)\n Standard deviation of predictive distribution of query points.\n \"\"\"\n y_mean = self._decision_function(X)\n if return_std is False:\n return y_mean\n else:\n if self.normalize:\n X = (X - self.X_offset_) / self.X_scale_\n X = X[:, self.lambda_ < self.threshold_lambda]\n sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1)\n y_std = np.sqrt(sigmas_squared_data + (1. / self.alpha_))\n return y_mean, y_std\n" ]
[ [ "numpy.ones", "numpy.sum", "numpy.eye", "numpy.zeros", "numpy.diag", "numpy.var", "numpy.reshape", "numpy.abs", "numpy.copy", "numpy.log", "numpy.sqrt", "numpy.dot", "scipy.linalg.svd" ] ]
danielgordon10/tensorflow
[ "395cfc42ee3c5842f5383f4049674c012998b133" ]
[ "tensorflow/python/ops/sparse_ops.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n# pylint: disable=g-short-docstring-punctuation\n\"\"\"## Sparse Tensor Representation\n\nTensorFlow supports a `SparseTensor` representation for data that is sparse\nin multiple dimensions. Contrast this representation with `IndexedSlices`,\nwhich is efficient for representing tensors that are sparse in their first\ndimension, and dense along all other dimensions.\n\n@@SparseTensor\n@@SparseTensorValue\n\n## Conversion\n\n@@sparse_to_dense\n@@sparse_tensor_to_dense\n@@sparse_to_indicator\n@@sparse_merge\n\n## Manipulation\n\n@@sparse_concat\n@@sparse_reorder\n@@sparse_reshape\n@@sparse_split\n@@sparse_retain\n@@sparse_reset_shape\n@@sparse_fill_empty_rows\n@@sparse_transpose\n\n## Reduction\n@@sparse_reduce_sum\n@@sparse_reduce_sum_sparse\n\n## Math Operations\n@@sparse_add\n@@sparse_softmax\n@@sparse_tensor_dense_matmul\n@@sparse_maximum\n@@sparse_minimum\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import gen_sparse_ops\nfrom tensorflow.python.ops import math_ops\n# go/tf-wildcard-import\n# pylint: disable=wildcard-import\nfrom tensorflow.python.ops.gen_sparse_ops import *\n# pylint: enable=wildcard-import\nfrom tensorflow.python.util import deprecation\n\n\ndef _convert_to_sparse_tensor(sp_input):\n \"\"\"Convert `sp_input` to `SparseTensor` and return it.\n\n Args:\n sp_input: `SparseTensor` or `SparseTensorValue`.\n\n Returns:\n `sp_input` converted to `SparseTensor`.\n\n Raises:\n ValueError: if `sp_input` is neither `SparseTensor` nor `SparseTensorValue`.\n \"\"\"\n if isinstance(sp_input, sparse_tensor.SparseTensorValue):\n return sparse_tensor.SparseTensor.from_value(sp_input)\n if not isinstance(sp_input, sparse_tensor.SparseTensor):\n raise TypeError(\"Input must be a SparseTensor.\")\n return sp_input\n\n\ndef _convert_to_sparse_tensors(sp_inputs):\n \"\"\"Convert `sp_inputs` to `SparseTensor` objects and return them.\n\n Args:\n sp_inputs: `list` or `tuple` of `SparseTensor` or `SparseTensorValue`\n objects.\n\n Returns:\n `sp_inputs` converted to `SparseTensor` objects.\n\n Raises:\n ValueError: if any item in `sp_inputs` is neither `SparseTensor` nor\n `SparseTensorValue`.\n \"\"\"\n if isinstance(sp_inputs, list):\n return [_convert_to_sparse_tensor(sp_input) for sp_input in sp_inputs]\n if isinstance(sp_inputs, tuple):\n return (_convert_to_sparse_tensor(sp_input) for sp_input in sp_inputs)\n raise TypeError(\"Inputs must be a list or tuple.\")\n\n\n# pylint: disable=protected-access\ndef sparse_concat(axis,\n sp_inputs,\n name=None,\n expand_nonconcat_dim=False,\n concat_dim=None):\n \"\"\"Concatenates a list of `SparseTensor` along the specified dimension.\n\n Concatenation is with respect to the dense versions of each sparse input.\n It is assumed that each inputs is a `SparseTensor` whose elements are ordered\n along increasing dimension number.\n\n If expand_nonconcat_dim is False, all inputs' shapes must match, except for\n the concat dimension. If expand_nonconcat_dim is True, then inputs' shapes are\n allowed to vary among all inputs.\n\n The `indices`, `values`, and `shapes` lists must have the same length.\n\n If expand_nonconcat_dim is False, then the output shape is identical to the\n inputs', except along the concat dimension, where it is the sum of the inputs'\n sizes along that dimension.\n\n If expand_nonconcat_dim is True, then the output shape along the non-concat\n dimensions will be expand to be the largest among all inputs, and it is the\n sum of the inputs sizes along the concat dimension.\n\n The output elements will be resorted to preserve the sort order along\n increasing dimension number.\n\n This op runs in `O(M log M)` time, where `M` is the total number of non-empty\n values across all inputs. This is due to the need for an internal sort in\n order to concatenate efficiently across an arbitrary dimension.\n\n For example, if `axis = 1` and the inputs are\n\n sp_inputs[0]: shape = [2, 3]\n [0, 2]: \"a\"\n [1, 0]: \"b\"\n [1, 1]: \"c\"\n\n sp_inputs[1]: shape = [2, 4]\n [0, 1]: \"d\"\n [0, 2]: \"e\"\n\n then the output will be\n\n shape = [2, 7]\n [0, 2]: \"a\"\n [0, 4]: \"d\"\n [0, 5]: \"e\"\n [1, 0]: \"b\"\n [1, 1]: \"c\"\n\n Graphically this is equivalent to doing\n\n [ a] concat [ d e ] = [ a d e ]\n [b c ] [ ] [b c ]\n\n Another example, if 'axis = 1' and the inputs are\n\n sp_inputs[0]: shape = [3, 3]\n [0, 2]: \"a\"\n [1, 0]: \"b\"\n [2, 1]: \"c\"\n\n sp_inputs[1]: shape = [2, 4]\n [0, 1]: \"d\"\n [0, 2]: \"e\"\n\n if expand_nonconcat_dim = False, this will result in an error. But if\n expand_nonconcat_dim = True, this will result in:\n\n shape = [3, 7]\n [0, 2]: \"a\"\n [0, 4]: \"d\"\n [0, 5]: \"e\"\n [1, 0]: \"b\"\n [2, 1]: \"c\"\n\n Graphically this is equivalent to doing\n\n [ a] concat [ d e ] = [ a d e ]\n [b ] [ ] [b ]\n [ c ] [ c ]\n\n\n Args:\n axis: Dimension to concatenate along. Must be in range [-rank, rank),\n where rank is the number of dimensions in each input `SparseTensor`.\n sp_inputs: List of `SparseTensor` to concatenate.\n name: A name prefix for the returned tensors (optional).\n expand_nonconcat_dim: Whether to allow the expansion in the non-concat\n dimensions. Defaulted to False.\n concat_dim: The old (deprecated) name for axis.\n\n Returns:\n A `SparseTensor` with the concatenated output.\n\n Raises:\n TypeError: If `sp_inputs` is not a list of `SparseTensor`.\n \"\"\"\n axis = deprecation.deprecated_argument_lookup(\"axis\", axis, \"concat_dim\",\n concat_dim)\n sp_inputs = _convert_to_sparse_tensors(sp_inputs)\n\n if len(sp_inputs) == 1: # Degenerate case of one tensor.\n return sp_inputs[0]\n\n inds = [sp_input.indices for sp_input in sp_inputs]\n vals = [sp_input.values for sp_input in sp_inputs]\n shapes = [sp_input.dense_shape for sp_input in sp_inputs]\n\n if expand_nonconcat_dim:\n max_shape = math_ops.reduce_max(\n array_ops.concat(0, [array_ops.reshape(shape, [1, -1])\n for shape in shapes]), 0)\n shapes = [array_ops.concat(0, [\n max_shape[:axis], shape[-1:] if axis == -1 else\n shape[axis:axis + 1], [] if axis == -1 else\n max_shape[axis + 1:]\n ]) for shape in shapes]\n\n output_ind, output_val, output_shape = (gen_sparse_ops._sparse_concat(\n inds, vals, shapes, axis, name=name))\n\n return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)\n\n\ndef sparse_add(a, b, thresh=0):\n \"\"\"Adds two tensors, at least one of each is a `SparseTensor`.\n\n If one `SparseTensor` and one `Tensor` are passed in, returns a `Tensor`. If\n both arguments are `SparseTensor`s, this returns a `SparseTensor`. The order\n of arguments does not matter. Use vanilla `tf.add()` for adding two dense\n `Tensor`s.\n\n The indices of any input `SparseTensor` are assumed ordered in standard\n lexicographic order. If this is not the case, before this step run\n `SparseReorder` to restore index ordering.\n\n If both arguments are sparse, we perform \"clipping\" as follows. By default,\n if two values sum to zero at some index, the output `SparseTensor` would still\n include that particular location in its index, storing a zero in the\n corresponding value slot. To override this, callers can specify `thresh`,\n indicating that if the sum has a magnitude strictly smaller than `thresh`, its\n corresponding value and index would then not be included. In particular,\n `thresh == 0.0` (default) means everything is kept and actual thresholding\n happens only for a positive value.\n\n For example, suppose the logical sum of two sparse operands is (densified):\n\n [ 2]\n [.1 0]\n [ 6 -.2]\n\n Then,\n\n * `thresh == 0` (the default): all 5 index/value pairs will be returned.\n * `thresh == 0.11`: only .1 and 0 will vanish, and the remaining three\n index/value pairs will be returned.\n * `thresh == 0.21`: .1, 0, and -.2 will vanish.\n\n Args:\n a: The first operand; `SparseTensor` or `Tensor`.\n b: The second operand; `SparseTensor` or `Tensor`. At least one operand\n must be sparse.\n thresh: A 0-D `Tensor`. The magnitude threshold that determines if an\n output value/index pair takes space. Its dtype should match that of the\n values if they are real; if the latter are complex64/complex128, then the\n dtype should be float32/float64, correspondingly.\n\n Returns:\n A `SparseTensor` or a `Tensor`, representing the sum.\n\n Raises:\n TypeError: If both `a` and `b` are `Tensor`s. Use `tf.add()` instead.\n \"\"\"\n sparse_classes = (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)\n if not any(isinstance(inp, sparse_classes) for inp in [a, b]):\n raise TypeError(\"At least one input should be SparseTensor; do you mean to\"\n \" use tf.add()?\")\n\n if all(isinstance(inp, sparse_classes) for inp in [a, b]):\n a = _convert_to_sparse_tensor(a)\n thresh = ops.convert_to_tensor(\n thresh, dtype=a.values.dtype.real_dtype, name=\"thresh\")\n output_ind, output_val, output_shape = (gen_sparse_ops._sparse_add(\n a.indices, a.values, a.dense_shape,\n b.indices, b.values, b.dense_shape,\n thresh))\n return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)\n else:\n # swap to make `a` the SparseTensor.\n if isinstance(b, sparse_classes):\n a, b = b, a\n return gen_sparse_ops._sparse_tensor_dense_add(\n a.indices, a.values, a.dense_shape, b)\n\n\ndef sparse_dense_cwise_add(sp_t, dense_t):\n \"\"\"Adds up a SparseTensor and a dense Tensor, using these special rules:\n\n (1) Broadcasts the dense side to have the same shape as the sparse side, if\n eligible;\n (2) Then, only the dense values pointed to by the indices of the SparseTensor\n participate in the cwise addition.\n\n By the rules, the result is a logical SparseTensor with exactly the same\n indices and shape, but possibly with different non-zero values. The output of\n this Op is the resultant non-zero values.\n\n Args:\n sp_t: the SparseTensor operand.\n dense_t: the dense Tensor operand; must have the same dtype and a\n broadcast-compatible shape as `sp_t`.\n\n Returns:\n output: the SparseTensor output.\n \"\"\"\n result = gen_sparse_ops.sparse_dense_cwise_add(sp_t.indices, sp_t.values,\n sp_t.dense_shape, dense_t)\n return sparse_tensor.SparseTensor(sp_t.indices, result, sp_t.dense_shape)\n\n\ndef sparse_reorder(sp_input, name=None):\n \"\"\"Reorders a `SparseTensor` into the canonical, row-major ordering.\n\n Note that by convention, all sparse ops preserve the canonical ordering\n along increasing dimension number. The only time ordering can be violated\n is during manual manipulation of the indices and values to add entries.\n\n Reordering does not affect the shape of the `SparseTensor`.\n\n For example, if `sp_input` has shape `[4, 5]` and `indices` / `values`:\n\n [0, 3]: b\n [0, 1]: a\n [3, 1]: d\n [2, 0]: c\n\n then the output will be a `SparseTensor` of shape `[4, 5]` and\n `indices` / `values`:\n\n [0, 1]: a\n [0, 3]: b\n [2, 0]: c\n [3, 1]: d\n\n Args:\n sp_input: The input `SparseTensor`.\n name: A name prefix for the returned tensors (optional)\n\n Returns:\n A `SparseTensor` with the same shape and non-empty values, but in\n canonical ordering.\n\n Raises:\n TypeError: If `sp_input` is not a `SparseTensor`.\n \"\"\"\n sp_input = _convert_to_sparse_tensor(sp_input)\n\n reordered_ind, reordered_val = (gen_sparse_ops._sparse_reorder(\n sp_input.indices, sp_input.values, sp_input.dense_shape, name=name))\n\n return sparse_tensor.SparseTensor(reordered_ind, reordered_val,\n array_ops.identity(sp_input.dense_shape))\n\n\ndef sparse_reshape(sp_input, shape, name=None):\n \"\"\"Reshapes a `SparseTensor` to represent values in a new dense shape.\n\n This operation has the same semantics as `reshape` on the represented dense\n tensor. The indices of non-empty values in `sp_input` are recomputed based\n on the new dense shape, and a new `SparseTensor` is returned containing the\n new indices and new shape. The order of non-empty values in `sp_input` is\n unchanged.\n\n If one component of `shape` is the special value -1, the size of that\n dimension is computed so that the total dense size remains constant. At\n most one component of `shape` can be -1. The number of dense elements\n implied by `shape` must be the same as the number of dense elements\n originally represented by `sp_input`.\n\n For example, if `sp_input` has shape `[2, 3, 6]` and `indices` / `values`:\n\n [0, 0, 0]: a\n [0, 0, 1]: b\n [0, 1, 0]: c\n [1, 0, 0]: d\n [1, 2, 3]: e\n\n and `shape` is `[9, -1]`, then the output will be a `SparseTensor` of\n shape `[9, 4]` and `indices` / `values`:\n\n [0, 0]: a\n [0, 1]: b\n [1, 2]: c\n [4, 2]: d\n [8, 1]: e\n\n Args:\n sp_input: The input `SparseTensor`.\n shape: A 1-D (vector) int64 `Tensor` specifying the new dense shape of the\n represented `SparseTensor`.\n name: A name prefix for the returned tensors (optional)\n\n Returns:\n A `SparseTensor` with the same non-empty values but with indices calculated\n by the new dense shape.\n\n Raises:\n TypeError: If `sp_input` is not a `SparseTensor`.\n \"\"\"\n sp_input = _convert_to_sparse_tensor(sp_input)\n\n with ops.name_scope(name, \"SparseReshape\", [sp_input]) as name:\n reshaped_ind, reshaped_shape = gen_sparse_ops._sparse_reshape(\n sp_input.indices, sp_input.dense_shape, shape, name=name)\n\n return sparse_tensor.SparseTensor(\n reshaped_ind, array_ops.identity(sp_input.values),\n reshaped_shape)\n\n\n# TODO(aselle): Remove keyword required once for 1.0 final\nclass KeywordRequired(object):\n\n def __repr__(self):\n # This is needed to make documentation without fully qualified module paths\n return \"KeywordRequired()\"\n\n\ndef sparse_split(keyword_required=KeywordRequired(),\n sp_input=None, num_split=None, axis=None,\n name=None, split_dim=None):\n \"\"\"Split a `SparseTensor` into `num_split` tensors along `axis`.\n\n If the `sp_input.dense_shape[axis]` is not an integer multiple of `num_split`\n each slice starting from 0:`shape[axis] % num_split` gets extra one\n dimension. For example, if `axis = 1` and `num_split = 2` and the\n input is:\n\n input_tensor = shape = [2, 7]\n [ a d e ]\n [b c ]\n\n Graphically the output tensors are:\n\n output_tensor[0] =\n [ a ]\n [b c ]\n\n output_tensor[1] =\n [ d e ]\n [ ]\n\n Args:\n keyword_required: Python 2 standin for * (temporary for argument reorder)\n sp_input: The `SparseTensor` to split.\n num_split: A Python integer. The number of ways to split.\n axis: A 0-D `int32` `Tensor`. The dimension along which to split.\n name: A name for the operation (optional).\n split_dim: Deprecated old name for axis.\n\n Returns:\n `num_split` `SparseTensor` objects resulting from splitting `value`.\n\n Raises:\n TypeError: If `sp_input` is not a `SparseTensor`.\n ValueError: If the deprecated `split_dim` and `axis` are both non None.\n \"\"\"\n if not isinstance(keyword_required, KeywordRequired):\n raise ValueError(\"Keyword arguments are required for this function.\")\n if sp_input is None:\n raise ValueError(\"sp_input is required\")\n if num_split is None:\n raise ValueError(\"num_split is required\")\n if axis is None:\n raise ValueError(\"axis is required\")\n axis = deprecation.deprecated_argument_lookup(\"axis\", axis, \"split_dim\",\n split_dim)\n sp_input = _convert_to_sparse_tensor(sp_input)\n\n output_inds, output_vals, output_shapes = (gen_sparse_ops._sparse_split(\n axis,\n sp_input.indices,\n sp_input.values,\n sp_input.dense_shape,\n num_split,\n name=name))\n sparse_tensors = []\n for i in range(0, num_split):\n sparse_tensors.append(\n sparse_tensor.SparseTensor(\n output_inds[i], output_vals[i], output_shapes[i]))\n return sparse_tensors\n\n\ndef sparse_to_dense(sparse_indices,\n output_shape,\n sparse_values,\n default_value=0,\n validate_indices=True,\n name=None):\n \"\"\"Converts a sparse representation into a dense tensor.\n\n Builds an array `dense` with shape `output_shape` such that\n\n ```python\n # If sparse_indices is scalar\n dense[i] = (i == sparse_indices ? sparse_values : default_value)\n\n # If sparse_indices is a vector, then for each i\n dense[sparse_indices[i]] = sparse_values[i]\n\n # If sparse_indices is an n by d matrix, then for each i in [0, n)\n dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]\n ```\n\n All other values in `dense` are set to `default_value`. If `sparse_values`\n is a scalar, all sparse indices are set to this single value.\n\n Indices should be sorted in lexicographic order, and indices must not\n contain any repeats. If `validate_indices` is True, these properties\n are checked during execution.\n\n Args:\n sparse_indices: A 0-D, 1-D, or 2-D `Tensor` of type `int32` or `int64`.\n `sparse_indices[i]` contains the complete index where `sparse_values[i]`\n will be placed.\n output_shape: A 1-D `Tensor` of the same type as `sparse_indices`. Shape\n of the dense output tensor.\n sparse_values: A 0-D or 1-D `Tensor`. Values corresponding to each row of\n `sparse_indices`, or a scalar value to be used for all sparse indices.\n default_value: A 0-D `Tensor` of the same type as `sparse_values`. Value\n to set for indices not specified in `sparse_indices`. Defaults to zero.\n validate_indices: A boolean value. If True, indices are checked to make\n sure they are sorted in lexicographic order and that there are no repeats.\n name: A name for the operation (optional).\n\n Returns:\n Dense `Tensor` of shape `output_shape`. Has the same type as\n `sparse_values`.\n \"\"\"\n return gen_sparse_ops._sparse_to_dense(\n sparse_indices,\n output_shape,\n sparse_values,\n default_value=default_value,\n validate_indices=validate_indices,\n name=name)\n\n\ndef sparse_reduce_sum(sp_input, axis=None, keep_dims=False,\n reduction_axes=None):\n \"\"\"Computes the sum of elements across dimensions of a SparseTensor.\n\n This Op takes a SparseTensor and is the sparse counterpart to\n `tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor`\n instead of a sparse one.\n\n Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless\n `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in\n `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained\n with length 1.\n\n If `reduction_axes` has no entries, all dimensions are reduced, and a tensor\n with a single element is returned. Additionally, the axes can be negative,\n similar to the indexing rules in Python.\n\n For example:\n\n ```python\n # 'x' represents [[1, ?, 1]\n # [?, 1, ?]]\n # where ? is implicitly-zero.\n tf.sparse_reduce_sum(x) ==> 3\n tf.sparse_reduce_sum(x, 0) ==> [1, 1, 1]\n tf.sparse_reduce_sum(x, 1) ==> [2, 1] # Can also use -1 as the axis.\n tf.sparse_reduce_sum(x, 1, keep_dims=True) ==> [[2], [1]]\n tf.sparse_reduce_sum(x, [0, 1]) ==> 3\n ```\n\n Args:\n sp_input: The SparseTensor to reduce. Should have numeric type.\n axis: The dimensions to reduce; list or scalar. If `None` (the\n default), reduces all dimensions.\n keep_dims: If true, retain reduced dimensions with length 1.\n reduction_axes: Deprecated name of axis.\n\n Returns:\n The reduced Tensor.\n \"\"\"\n return gen_sparse_ops.sparse_reduce_sum(\n sp_input.indices, sp_input.values,\n sp_input.dense_shape,\n math_ops._ReductionDims(sp_input, axis, reduction_axes),\n keep_dims)\n\n\ndef sparse_reduce_sum_sparse(sp_input, axis=None, keep_dims=False,\n reduction_axes=None):\n \"\"\"Computes the sum of elements across dimensions of a SparseTensor.\n\n This Op takes a SparseTensor and is the sparse counterpart to\n `tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a\n SparseTensor.\n\n Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless\n `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in\n `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained\n with length 1.\n\n If `reduction_axes` has no entries, all dimensions are reduced, and a tensor\n with a single element is returned. Additionally, the axes can be negative,\n which are interpreted according to the indexing rules in Python.\n\n Args:\n sp_input: The SparseTensor to reduce. Should have numeric type.\n axis: The dimensions to reduce; list or scalar. If `None` (the\n default), reduces all dimensions.\n keep_dims: If true, retain reduced dimensions with length 1.\n reduction_axes: Deprecated name of axis\n\n Returns:\n The reduced SparseTensor.\n \"\"\"\n output_ind, output_val, output_shape = (\n gen_sparse_ops.sparse_reduce_sum_sparse(\n sp_input.indices, sp_input.values,\n sp_input.dense_shape, math_ops._ReductionDims(sp_input, axis,\n reduction_axes),\n keep_dims))\n\n return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)\n\n\ndef sparse_tensor_to_dense(sp_input,\n default_value=0,\n validate_indices=True,\n name=None):\n \"\"\"Converts a `SparseTensor` into a dense tensor.\n\n This op is a convenience wrapper around `sparse_to_dense` for `SparseTensor`s.\n\n For example, if `sp_input` has shape `[3, 5]` and non-empty string values:\n\n [0, 1]: a\n [0, 3]: b\n [2, 0]: c\n\n and `default_value` is `x`, then the output will be a dense `[3, 5]`\n string tensor with values:\n\n [[x a x b x]\n [x x x x x]\n [c x x x x]]\n\n Indices must be without repeats. This is only\n tested if validate_indices is True.\n\n Args:\n sp_input: The input `SparseTensor`.\n default_value: Scalar value to set for indices not specified in\n `sp_input`. Defaults to zero.\n validate_indices: A boolean value. If `True`, indices are checked to make\n sure they are sorted in lexicographic order and that there are no repeats.\n name: A name prefix for the returned tensors (optional).\n\n Returns:\n A dense tensor with shape `sp_input.dense_shape` and values specified by\n the non-empty values in `sp_input`. Indices not in `sp_input` are assigned\n `default_value`.\n\n Raises:\n TypeError: If `sp_input` is not a `SparseTensor`.\n \"\"\"\n sp_input = _convert_to_sparse_tensor(sp_input)\n\n return sparse_to_dense(\n sp_input.indices,\n sp_input.dense_shape,\n sp_input.values,\n default_value=default_value,\n validate_indices=validate_indices,\n name=name)\n\n\ndef sparse_to_indicator(sp_input, vocab_size, name=None):\n \"\"\"Converts a `SparseTensor` of ids into a dense bool indicator tensor.\n\n The last dimension of `sp_input.indices` is discarded and replaced with\n the values of `sp_input`. If `sp_input.dense_shape = [D0, D1, ..., Dn, K]`,\n then `output.shape = [D0, D1, ..., Dn, vocab_size]`, where\n\n output[d_0, d_1, ..., d_n, sp_input[d_0, d_1, ..., d_n, k]] = True\n\n and False elsewhere in `output`.\n\n For example, if `sp_input.dense_shape = [2, 3, 4]` with non-empty values:\n\n [0, 0, 0]: 0\n [0, 1, 0]: 10\n [1, 0, 3]: 103\n [1, 1, 2]: 150\n [1, 1, 3]: 149\n [1, 1, 4]: 150\n [1, 2, 1]: 121\n\n and `vocab_size = 200`, then the output will be a `[2, 3, 200]` dense bool\n tensor with False everywhere except at positions\n\n (0, 0, 0), (0, 1, 10), (1, 0, 103), (1, 1, 149), (1, 1, 150),\n (1, 2, 121).\n\n Note that repeats are allowed in the input SparseTensor.\n This op is useful for converting `SparseTensor`s into dense formats for\n compatibility with ops that expect dense tensors.\n\n The input `SparseTensor` must be in row-major order.\n\n Args:\n sp_input: A `SparseTensor` with `values` property of type `int32` or\n `int64`.\n vocab_size: A scalar int64 Tensor (or Python int) containing the new size\n of the last dimension, `all(0 <= sp_input.values < vocab_size)`.\n name: A name prefix for the returned tensors (optional)\n\n Returns:\n A dense bool indicator tensor representing the indices with specified value.\n\n Raises:\n TypeError: If `sp_input` is not a `SparseTensor`.\n \"\"\"\n sp_input = _convert_to_sparse_tensor(sp_input)\n\n with ops.name_scope(name, \"SparseToIndicator\", [sp_input]) as name:\n num_entries = array_ops.shape(sp_input.indices)[0]\n new_values = array_ops.fill(array_ops.expand_dims(num_entries, 0), True)\n sp_values = sparse_tensor.SparseTensor(\n sp_input.indices, new_values, sp_input.dense_shape)\n\n sp_new = sparse_merge(sp_input, sp_values, vocab_size, name)\n\n # validate_indices may be False because we allow duplicates in new_indices:\n # repeated indices are allowed when creating an indicator matrix.\n return sparse_tensor_to_dense(\n sp_new, default_value=False, validate_indices=False, name=name)\n\n\ndef sparse_merge(sp_ids, sp_values, vocab_size, name=None,\n already_sorted=False):\n \"\"\"Combines a batch of feature ids and values into a single `SparseTensor`.\n\n The most common use case for this function occurs when feature ids and\n their corresponding values are stored in `Example` protos on disk.\n `parse_example` will return a batch of ids and a batch of values, and this\n function joins them into a single logical `SparseTensor` for use in\n functions such as `sparse_tensor_dense_matmul`, `sparse_to_dense`, etc.\n\n The `SparseTensor` returned by this function has the following properties:\n\n - `indices` is equivalent to `sp_ids.indices` with the last\n dimension discarded and replaced with `sp_ids.values`.\n - `values` is simply `sp_values.values`.\n - If `sp_ids.dense_shape = [D0, D1, ..., Dn, K]`, then\n `output.shape = [D0, D1, ..., Dn, vocab_size]`.\n\n For example, consider the following feature vectors:\n\n ```python\n vector1 = [-3, 0, 0, 0, 0, 0]\n vector2 = [ 0, 1, 0, 4, 1, 0]\n vector3 = [ 5, 0, 0, 9, 0, 0]\n ```\n\n These might be stored sparsely in the following Example protos by storing\n only the feature ids (column number if the vectors are treated as a matrix)\n of the non-zero elements and the corresponding values:\n\n ```python\n examples = [Example(features={\n \"ids\": Feature(int64_list=Int64List(value=[0])),\n \"values\": Feature(float_list=FloatList(value=[-3]))}),\n Example(features={\n \"ids\": Feature(int64_list=Int64List(value=[1, 4, 3])),\n \"values\": Feature(float_list=FloatList(value=[1, 1, 4]))}),\n Example(features={\n \"ids\": Feature(int64_list=Int64List(value=[0, 3])),\n \"values\": Feature(float_list=FloatList(value=[5, 9]))})]\n ```\n\n The result of calling parse_example on these examples will produce a\n dictionary with entries for \"ids\" and \"values\". Passing those two objects\n to this function along with vocab_size=6, will produce a `SparseTensor` that\n sparsely represents all three instances. Namely, the `indices` property will\n contain the coordinates of the non-zero entries in the feature matrix (the\n first dimension is the row number in the matrix, i.e., the index within the\n batch, and the second dimension is the column number, i.e., the feature id);\n `values` will contain the actual values. `shape` will be the shape of the\n original matrix, i.e., (3, 6). For our example above, the output will be\n equal to:\n\n ```python\n SparseTensor(indices=[[0, 0], [1, 1], [1, 3], [1, 4], [2, 0], [2, 3]],\n values=[-3, 1, 4, 1, 5, 9],\n dense_shape=[3, 6])\n ```\n\n Args:\n sp_ids: A `SparseTensor` with `values` property of type `int32`\n or `int64`.\n sp_values: A`SparseTensor` of any type.\n vocab_size: A scalar `int64` Tensor (or Python int) containing the new size\n of the last dimension, `all(0 <= sp_ids.values < vocab_size)`.\n name: A name prefix for the returned tensors (optional)\n already_sorted: A boolean to specify whether the per-batch values in\n `sp_values` are already sorted. If so skip sorting, False by default\n (optional).\n\n Returns:\n A `SparseTensor` compactly representing a batch of feature ids and values,\n useful for passing to functions that expect such a `SparseTensor`.\n\n Raises:\n TypeError: If `sp_ids` or `sp_values` are not a `SparseTensor`.\n \"\"\"\n sp_ids = _convert_to_sparse_tensor(sp_ids)\n sp_values = _convert_to_sparse_tensor(sp_values)\n\n with ops.name_scope(name, \"SparseMerge\", [sp_ids, sp_values]):\n indices_shape = array_ops.shape(sp_ids.indices)\n rank = indices_shape[1]\n\n ids = sp_ids.values\n if ids.dtype != dtypes.int64:\n ids = math_ops.cast(ids, dtypes.int64)\n\n # Slice off the last dimension of indices, then tack on the ids\n indices_columns_to_preserve = array_ops.slice(\n sp_ids.indices, [0, 0], array_ops.pack([-1, rank - 1]))\n new_indices = array_ops.concat(1, [indices_columns_to_preserve,\n array_ops.reshape(ids, [-1, 1])])\n\n new_values = sp_values.values\n new_shape = array_ops.concat(\n 0,\n [array_ops.slice(\n sp_ids.dense_shape, [0], array_ops.expand_dims(rank - 1, 0)),\n math_ops.cast(array_ops.pack([vocab_size]), dtypes.int64)])\n\n result = sparse_tensor.SparseTensor(new_indices, new_values, new_shape)\n return result if already_sorted else sparse_reorder(result)\n\n\ndef sparse_retain(sp_input, to_retain):\n \"\"\"Retains specified non-empty values within a `SparseTensor`.\n\n For example, if `sp_input` has shape `[4, 5]` and 4 non-empty string values:\n\n [0, 1]: a\n [0, 3]: b\n [2, 0]: c\n [3, 1]: d\n\n and `to_retain = [True, False, False, True]`, then the output will\n be a `SparseTensor` of shape `[4, 5]` with 2 non-empty values:\n\n [0, 1]: a\n [3, 1]: d\n\n Args:\n sp_input: The input `SparseTensor` with `N` non-empty elements.\n to_retain: A bool vector of length `N` with `M` true values.\n\n Returns:\n A `SparseTensor` with the same shape as the input and `M` non-empty\n elements corresponding to the true positions in `to_retain`.\n\n Raises:\n TypeError: If `sp_input` is not a `SparseTensor`.\n \"\"\"\n sp_input = _convert_to_sparse_tensor(sp_input)\n\n to_retain = ops.convert_to_tensor(to_retain)\n\n # Shape checking, if shape is known at graph construction time\n retain_shape = to_retain.get_shape()\n retain_shape.assert_has_rank(1)\n sp_input.values.get_shape()[0].merge_with(retain_shape[0])\n\n where_true = array_ops.reshape(array_ops.where(to_retain), [-1])\n new_indices = array_ops.gather(sp_input.indices, where_true)\n new_values = array_ops.gather(sp_input.values, where_true)\n return sparse_tensor.SparseTensor(new_indices, new_values,\n array_ops.identity(sp_input.dense_shape))\n\n\ndef sparse_reset_shape(sp_input, new_shape=None):\n \"\"\"Resets the shape of a `SparseTensor` with indices and values unchanged.\n\n If `new_shape` is None, returns a copy of `sp_input` with its shape reset\n to the tight bounding box of `sp_input`.\n\n If `new_shape` is provided, then it must be larger or equal in all dimensions\n compared to the shape of `sp_input`. When this condition is met, the returned\n SparseTensor will have its shape reset to `new_shape` and its indices and\n values unchanged from that of `sp_input.`\n\n For example:\n\n Consider a `sp_input` with shape [2, 3, 5]:\n\n [0, 0, 1]: a\n [0, 1, 0]: b\n [0, 2, 2]: c\n [1, 0, 3]: d\n\n - It is an error to set `new_shape` as [3, 7] since this represents a\n rank-2 tensor while `sp_input` is rank-3. This is either a ValueError\n during graph construction (if both shapes are known) or an OpError during\n run time.\n\n - Setting `new_shape` as [2, 3, 6] will be fine as this shape is larger or\n equal in every dimension compared to the original shape [2, 3, 5].\n\n - On the other hand, setting new_shape as [2, 3, 4] is also an error: The\n third dimension is smaller than the original shape [2, 3, 5] (and an\n `InvalidArgumentError` will be raised).\n\n - If `new_shape` is None, the returned SparseTensor will have a shape\n [2, 3, 4], which is the tight bounding box of `sp_input`.\n\n Args:\n sp_input: The input `SparseTensor`.\n new_shape: None or a vector representing the new shape for the returned\n `SparseTensor`.\n\n Returns:\n A `SparseTensor` indices and values unchanged from `input_sp`. Its shape is\n `new_shape` if that is set. Otherwise it is the tight bounding box of\n `input_sp`\n\n Raises:\n TypeError: If `sp_input` is not a `SparseTensor`.\n ValueError: If `new_shape` represents a tensor with a different rank from\n that of `sp_input` (if shapes are known when graph is constructed).\n OpError:\n - If `new_shape` has dimension sizes that are too small.\n - If shapes are not known during graph construction time, and during run\n time it is found out that the ranks do not match.\n \"\"\"\n sp_input = _convert_to_sparse_tensor(sp_input)\n\n in_indices = array_ops.identity(sp_input.indices)\n in_values = array_ops.identity(sp_input.values)\n in_shape = array_ops.identity(sp_input.dense_shape)\n\n if new_shape is None:\n dim_low_bound = math_ops.reduce_max(in_indices, 0)\n output_shape_tensor = math_ops.add(dim_low_bound,\n array_ops.ones_like(in_shape))\n else:\n output_shape_tensor = ops.convert_to_tensor(new_shape)\n output_shape_tensor.get_shape().assert_has_rank(1)\n output_shape_tensor = math_ops.cast(output_shape_tensor, dtypes.int64)\n # For cases when shape is known during graph construction, this catches the\n # error before the sparse_tensor.SparseTensor catches it.\n output_shape_tensor.get_shape()[0].merge_with(in_shape.get_shape()[0])\n\n # For cases where shape is not known during graph construction.\n output_shape_tensor = control_flow_ops.with_dependencies(\n [check_ops.assert_equal(\n array_ops.shape(in_shape), array_ops.shape(output_shape_tensor))],\n output_shape_tensor)\n output_shape_tensor = control_flow_ops.with_dependencies(\n [check_ops.assert_less_equal(in_shape, output_shape_tensor)],\n output_shape_tensor)\n\n return sparse_tensor.SparseTensor(in_indices, in_values, output_shape_tensor)\n\n\ndef sparse_fill_empty_rows(sp_input, default_value, name=None):\n \"\"\"Fills empty rows in the input 2-D `SparseTensor` with a default value.\n\n This op adds entries with the specified `default_value` at index\n `[row, 0]` for any row in the input that does not already have a value.\n\n For example, suppose `sp_input` has shape `[5, 6]` and non-empty values:\n\n [0, 1]: a\n [0, 3]: b\n [2, 0]: c\n [3, 1]: d\n\n Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values:\n\n [0, 1]: a\n [0, 3]: b\n [1, 0]: default_value\n [2, 0]: c\n [3, 1]: d\n [4, 0]: default_value\n\n Note that the input may have empty columns at the end, with no effect on\n this op.\n\n The output `SparseTensor` will be in row-major order and will have the\n same shape as the input.\n\n This op also returns an indicator vector such that\n\n empty_row_indicator[i] = True iff row i was an empty row.\n\n Args:\n sp_input: A `SparseTensor` with shape `[N, M]`.\n default_value: The value to fill for empty rows, with the same type as\n `sp_input.`\n name: A name prefix for the returned tensors (optional)\n\n Returns:\n sp_ordered_output: A `SparseTensor` with shape `[N, M]`, and with all empty\n rows filled in with `default_value`.\n empty_row_indicator: A bool vector of length `N` indicating whether each\n input row was empty.\n\n Raises:\n TypeError: If `sp_input` is not a `SparseTensor`.\n \"\"\"\n sp_input = _convert_to_sparse_tensor(sp_input)\n\n with ops.name_scope(name, \"SparseFillEmptyRows\", [sp_input]):\n default_value = ops.convert_to_tensor(\n default_value, dtype=sp_input.values.dtype)\n\n num_rows = math_ops.cast(sp_input.dense_shape[0], dtypes.int32)\n all_row_indices = math_ops.cast(math_ops.range(num_rows), dtypes.int64)\n empty_row_indices, _ = array_ops.setdiff1d(all_row_indices,\n sp_input.indices[:, 0])\n empty_row_indicator = sparse_to_dense(\n empty_row_indices,\n array_ops.expand_dims(sp_input.dense_shape[0], -1), True,\n False)\n\n empty_row_indices_as_column = array_ops.reshape(empty_row_indices, [-1, 1])\n additional_indices = array_ops.concat(\n 1, [empty_row_indices_as_column,\n array_ops.zeros_like(empty_row_indices_as_column)])\n additional_values = array_ops.fill(\n array_ops.shape(empty_row_indices), default_value)\n\n all_indices_unordered = array_ops.concat(0, [sp_input.indices,\n additional_indices])\n all_values_unordered = array_ops.concat(0, [sp_input.values,\n additional_values])\n sp_unordered_output = sparse_tensor.SparseTensor(\n all_indices_unordered,\n all_values_unordered, sp_input.dense_shape)\n sp_ordered_output = sparse_reorder(sp_unordered_output)\n\n return sp_ordered_output, empty_row_indicator\n\n\ndef serialize_sparse(sp_input, name=None):\n \"\"\"Serialize a `SparseTensor` into a string 3-vector (1-D `Tensor`) object.\n\n Args:\n sp_input: The input `SparseTensor`.\n name: A name prefix for the returned tensors (optional).\n\n Returns:\n A string 3-vector (1D `Tensor`), with each column representing the\n serialized `SparseTensor`'s indices, values, and shape (respectively).\n\n Raises:\n TypeError: If `sp_input` is not a `SparseTensor`.\n \"\"\"\n sp_input = _convert_to_sparse_tensor(sp_input)\n\n return gen_sparse_ops._serialize_sparse(\n sp_input.indices, sp_input.values, sp_input.dense_shape, name=name)\n\n\ndef serialize_many_sparse(sp_input, name=None):\n \"\"\"Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` string `Tensor`.\n\n The `SparseTensor` must have rank `R` greater than 1, and the first dimension\n is treated as the minibatch dimension. Elements of the `SparseTensor`\n must be sorted in increasing order of this first dimension. The serialized\n `SparseTensor` objects going into each row of the output `Tensor` will have\n rank `R-1`.\n\n The minibatch size `N` is extracted from `sparse_shape[0]`.\n\n Args:\n sp_input: The input rank `R` `SparseTensor`.\n name: A name prefix for the returned tensors (optional).\n\n Returns:\n A string matrix (2-D `Tensor`) with `N` rows and `3` columns.\n Each column represents serialized `SparseTensor`'s indices, values, and\n shape (respectively).\n\n Raises:\n TypeError: If `sp_input` is not a `SparseTensor`.\n \"\"\"\n sp_input = _convert_to_sparse_tensor(sp_input)\n\n return gen_sparse_ops._serialize_many_sparse(\n sp_input.indices, sp_input.values, sp_input.dense_shape, name=name)\n\n\ndef deserialize_many_sparse(serialized_sparse, dtype, rank=None, name=None):\n \"\"\"Deserialize and concatenate `SparseTensors` from a serialized minibatch.\n\n The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where\n `N` is the minibatch size and the rows correspond to packed outputs of\n `serialize_sparse`. The ranks of the original `SparseTensor` objects\n must all match. When the final `SparseTensor` is created, it has rank one\n higher than the ranks of the incoming `SparseTensor` objects (they have been\n concatenated along a new row dimension).\n\n The output `SparseTensor` object's shape values for all dimensions but the\n first are the max across the input `SparseTensor` objects' shape values\n for the corresponding dimensions. Its first shape value is `N`, the minibatch\n size.\n\n The input `SparseTensor` objects' indices are assumed ordered in\n standard lexicographic order. If this is not the case, after this\n step run `sparse_reorder` to restore index ordering.\n\n For example, if the serialized input is a `[2, 3]` matrix representing two\n original `SparseTensor` objects:\n\n index = [ 0]\n [10]\n [20]\n values = [1, 2, 3]\n shape = [50]\n\n and\n\n index = [ 2]\n [10]\n values = [4, 5]\n shape = [30]\n\n then the final deserialized `SparseTensor` will be:\n\n index = [0 0]\n [0 10]\n [0 20]\n [1 2]\n [1 10]\n values = [1, 2, 3, 4, 5]\n shape = [2 50]\n\n Args:\n serialized_sparse: 2-D `Tensor` of type `string` of shape `[N, 3]`.\n The serialized and packed `SparseTensor` objects.\n dtype: The `dtype` of the serialized `SparseTensor` objects.\n rank: (optional) Python int, the rank of the `SparseTensor` objects.\n name: A name prefix for the returned tensors (optional)\n\n Returns:\n A `SparseTensor` representing the deserialized `SparseTensor`s,\n concatenated along the `SparseTensor`s' first dimension.\n\n All of the serialized `SparseTensor`s must have had the same rank and type.\n \"\"\"\n output_indices, output_values, output_shape = (\n gen_sparse_ops._deserialize_many_sparse(\n serialized_sparse, dtype, name=name))\n\n # Feed rank data back in, if available\n output_indices.set_shape([None, rank])\n output_shape.set_shape([rank])\n\n return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)\n\n\ndef sparse_tensor_dense_matmul(sp_a,\n b,\n adjoint_a=False,\n adjoint_b=False,\n name=None):\n # pylint: disable=line-too-long\n \"\"\"Multiply SparseTensor (of rank 2) \"A\" by dense matrix \"B\".\n\n No validity checking is performed on the indices of A. However, the following\n input format is recommended for optimal behavior:\n\n if adjoint_a == false:\n A should be sorted in lexicographically increasing order. Use\n sparse_reorder if you're not sure.\n if adjoint_a == true:\n A should be sorted in order of increasing dimension 1 (i.e., \"column major\"\n order instead of \"row major\" order).\n\n Deciding when to use sparse_tensor_dense_matmul vs. matmul(sp_a=True):\n\n There are a number of questions to ask in the decision process, including:\n\n * Will the SparseTensor A fit in memory if densified?\n * Is the column count of the product large (>> 1)?\n * Is the density of A larger than approximately 15%?\n\n If the answer to several of these questions is yes, consider\n converting the `SparseTensor` to a dense one and using `tf.matmul` with\n `sp_a=True`.\n\n This operation tends to perform well when A is more sparse, if the column size\n of the product is small (e.g. matrix-vector multiplication), if\n `sp_a.dense_shape` takes on large values.\n\n Below is a rough speed comparison between sparse_tensor_dense_matmul,\n labelled 'sparse', and matmul(sp_a=True), labelled 'dense'. For purposes of\n the comparison, the time spent converting from a SparseTensor to a dense\n Tensor is not included, so it is overly conservative with respect to\n the time ratio.\n\n Benchmark system:\n CPU: Intel Ivybridge with HyperThreading (6 cores) dL1:32KB dL2:256KB dL3:12MB\n GPU: NVidia Tesla k40c\n\n Compiled with:\n -c opt --config=cuda --copt=-mavx\n\n ```tensorflow/python/sparse_tensor_dense_matmul_op_test --benchmarks\n A sparse [m, k] with % nonzero values between 1% and 80%\n B dense [k, n]\n\n % nnz n gpu m k dt(dense) dt(sparse) dt(sparse)/dt(dense)\n 0.01 1 True 100 100 0.000221166 0.00010154 0.459112\n 0.01 1 True 100 1000 0.00033858 0.000109275 0.322745\n 0.01 1 True 1000 100 0.000310557 9.85661e-05 0.317385\n 0.01 1 True 1000 1000 0.0008721 0.000100875 0.115669\n 0.01 1 False 100 100 0.000208085 0.000107603 0.51711\n 0.01 1 False 100 1000 0.000327112 9.51118e-05 0.290762\n 0.01 1 False 1000 100 0.000308222 0.00010345 0.335635\n 0.01 1 False 1000 1000 0.000865721 0.000101397 0.117124\n 0.01 10 True 100 100 0.000218522 0.000105537 0.482958\n 0.01 10 True 100 1000 0.000340882 0.000111641 0.327506\n 0.01 10 True 1000 100 0.000315472 0.000117376 0.372064\n 0.01 10 True 1000 1000 0.000905493 0.000123263 0.136128\n 0.01 10 False 100 100 0.000221529 9.82571e-05 0.44354\n 0.01 10 False 100 1000 0.000330552 0.000112615 0.340687\n 0.01 10 False 1000 100 0.000341277 0.000114097 0.334324\n 0.01 10 False 1000 1000 0.000819944 0.000120982 0.147549\n 0.01 25 True 100 100 0.000207806 0.000105977 0.509981\n 0.01 25 True 100 1000 0.000322879 0.00012921 0.400181\n 0.01 25 True 1000 100 0.00038262 0.00014158 0.370035\n 0.01 25 True 1000 1000 0.000865438 0.000202083 0.233504\n 0.01 25 False 100 100 0.000209401 0.000104696 0.499979\n 0.01 25 False 100 1000 0.000321161 0.000130737 0.407076\n 0.01 25 False 1000 100 0.000377012 0.000136801 0.362856\n 0.01 25 False 1000 1000 0.000861125 0.00020272 0.235413\n 0.2 1 True 100 100 0.000206952 9.69219e-05 0.46833\n 0.2 1 True 100 1000 0.000348674 0.000147475 0.422959\n 0.2 1 True 1000 100 0.000336908 0.00010122 0.300439\n 0.2 1 True 1000 1000 0.001022 0.000203274 0.198898\n 0.2 1 False 100 100 0.000207532 9.5412e-05 0.459746\n 0.2 1 False 100 1000 0.000356127 0.000146824 0.41228\n 0.2 1 False 1000 100 0.000322664 0.000100918 0.312764\n 0.2 1 False 1000 1000 0.000998987 0.000203442 0.203648\n 0.2 10 True 100 100 0.000211692 0.000109903 0.519165\n 0.2 10 True 100 1000 0.000372819 0.000164321 0.440753\n 0.2 10 True 1000 100 0.000338651 0.000144806 0.427596\n 0.2 10 True 1000 1000 0.00108312 0.000758876 0.70064\n 0.2 10 False 100 100 0.000215727 0.000110502 0.512231\n 0.2 10 False 100 1000 0.000375419 0.0001613 0.429653\n 0.2 10 False 1000 100 0.000336999 0.000145628 0.432132\n 0.2 10 False 1000 1000 0.00110502 0.000762043 0.689618\n 0.2 25 True 100 100 0.000218705 0.000129913 0.594009\n 0.2 25 True 100 1000 0.000394794 0.00029428 0.745402\n 0.2 25 True 1000 100 0.000404483 0.0002693 0.665788\n 0.2 25 True 1000 1000 0.0012002 0.00194494 1.62052\n 0.2 25 False 100 100 0.000221494 0.0001306 0.589632\n 0.2 25 False 100 1000 0.000396436 0.000297204 0.74969\n 0.2 25 False 1000 100 0.000409346 0.000270068 0.659754\n 0.2 25 False 1000 1000 0.00121051 0.00193737 1.60046\n 0.5 1 True 100 100 0.000214981 9.82111e-05 0.456836\n 0.5 1 True 100 1000 0.000415328 0.000223073 0.537101\n 0.5 1 True 1000 100 0.000358324 0.00011269 0.314492\n 0.5 1 True 1000 1000 0.00137612 0.000437401 0.317851\n 0.5 1 False 100 100 0.000224196 0.000101423 0.452386\n 0.5 1 False 100 1000 0.000400987 0.000223286 0.556841\n 0.5 1 False 1000 100 0.000368825 0.00011224 0.304318\n 0.5 1 False 1000 1000 0.00136036 0.000429369 0.31563\n 0.5 10 True 100 100 0.000222125 0.000112308 0.505608\n 0.5 10 True 100 1000 0.000461088 0.00032357 0.701753\n 0.5 10 True 1000 100 0.000394624 0.000225497 0.571422\n 0.5 10 True 1000 1000 0.00158027 0.00190898 1.20801\n 0.5 10 False 100 100 0.000232083 0.000114978 0.495418\n 0.5 10 False 100 1000 0.000454574 0.000324632 0.714146\n 0.5 10 False 1000 100 0.000379097 0.000227768 0.600817\n 0.5 10 False 1000 1000 0.00160292 0.00190168 1.18638\n 0.5 25 True 100 100 0.00023429 0.000151703 0.647501\n 0.5 25 True 100 1000 0.000497462 0.000598873 1.20386\n 0.5 25 True 1000 100 0.000460778 0.000557038 1.20891\n 0.5 25 True 1000 1000 0.00170036 0.00467336 2.74845\n 0.5 25 False 100 100 0.000228981 0.000155334 0.678371\n 0.5 25 False 100 1000 0.000496139 0.000620789 1.25124\n 0.5 25 False 1000 100 0.00045473 0.000551528 1.21287\n 0.5 25 False 1000 1000 0.00171793 0.00467152 2.71927\n 0.8 1 True 100 100 0.000222037 0.000105301 0.47425\n 0.8 1 True 100 1000 0.000410804 0.000329327 0.801664\n 0.8 1 True 1000 100 0.000349735 0.000131225 0.375212\n 0.8 1 True 1000 1000 0.00139219 0.000677065 0.48633\n 0.8 1 False 100 100 0.000214079 0.000107486 0.502085\n 0.8 1 False 100 1000 0.000413746 0.000323244 0.781261\n 0.8 1 False 1000 100 0.000348983 0.000131983 0.378193\n 0.8 1 False 1000 1000 0.00136296 0.000685325 0.50282\n 0.8 10 True 100 100 0.000229159 0.00011825 0.516017\n 0.8 10 True 100 1000 0.000498845 0.000532618 1.0677\n 0.8 10 True 1000 100 0.000383126 0.00029935 0.781336\n 0.8 10 True 1000 1000 0.00162866 0.00307312 1.88689\n 0.8 10 False 100 100 0.000230783 0.000124958 0.541452\n 0.8 10 False 100 1000 0.000493393 0.000550654 1.11606\n 0.8 10 False 1000 100 0.000377167 0.000298581 0.791642\n 0.8 10 False 1000 1000 0.00165795 0.00305103 1.84024\n 0.8 25 True 100 100 0.000233496 0.000175241 0.75051\n 0.8 25 True 100 1000 0.00055654 0.00102658 1.84458\n 0.8 25 True 1000 100 0.000463814 0.000783267 1.68875\n 0.8 25 True 1000 1000 0.00186905 0.00755344 4.04132\n 0.8 25 False 100 100 0.000240243 0.000175047 0.728625\n 0.8 25 False 100 1000 0.000578102 0.00104499 1.80763\n 0.8 25 False 1000 100 0.000485113 0.000776849 1.60138\n 0.8 25 False 1000 1000 0.00211448 0.00752736 3.55992\n ```\n\n Args:\n sp_a: SparseTensor A, of rank 2.\n b: A dense Matrix with the same dtype as sp_a.\n adjoint_a: Use the adjoint of A in the matrix multiply. If A is complex,\n this is transpose(conj(A)). Otherwise it's transpose(A).\n adjoint_b: Use the adjoint of B in the matrix multiply. If B is complex,\n this is transpose(conj(B)). Otherwise it's transpose(B).\n name: A name prefix for the returned tensors (optional)\n\n Returns:\n A dense matrix (pseudo-code in dense np.matrix notation):\n A = A.H if adjoint_a else A\n B = B.H if adjoint_b else B\n return A*B\n \"\"\"\n # pylint: enable=line-too-long\n sp_a = _convert_to_sparse_tensor(sp_a)\n with ops.name_scope(name, \"SparseTensorDenseMatMul\",\n [sp_a.indices, sp_a.values, b]) as name:\n b = ops.convert_to_tensor(b, name=\"b\")\n return gen_sparse_ops._sparse_tensor_dense_mat_mul(\n a_indices=sp_a.indices,\n a_values=sp_a.values,\n a_shape=sp_a.dense_shape,\n b=b,\n adjoint_a=adjoint_a,\n adjoint_b=adjoint_b)\n\n\ndef sparse_softmax(sp_input, name=None):\n \"\"\"Applies softmax to a batched N-D `SparseTensor`.\n\n The inputs represent an N-D SparseTensor with logical shape `[..., B, C]`\n (where `N >= 2`), and with indices sorted in the canonical lexicographic\n order.\n\n This op is equivalent to applying the normal `tf.nn.softmax()` to each\n innermost logical submatrix with shape `[B, C]`, but with the catch that *the\n implicitly zero elements do not participate*. Specifically, the algorithm is\n equivalent to:\n\n (1) Applies `tf.nn.softmax()` to a densified view of each innermost\n submatrix with shape `[B, C]`, along the size-C dimension;\n (2) Masks out the original implicitly-zero locations;\n (3) Renormalizes the remaining elements.\n\n Hence, the `SparseTensor` result has exactly the same non-zero indices and\n shape.\n\n Example:\n\n ```python\n # First batch:\n # [? e.]\n # [1. ? ]\n # Second batch:\n # [e ? ]\n # [e e ]\n shape = [2, 2, 2] # 3-D SparseTensor\n values = np.asarray([[[0., np.e], [1., 0.]], [[np.e, 0.], [np.e, np.e]]])\n indices = np.vstack(np.where(values)).astype(np.int64).T\n\n result = tf.sparse_softmax(tf.SparseTensor(indices, values, shape))\n # ...returning a 3-D SparseTensor, equivalent to:\n # [? 1.] [1 ?]\n # [1. ? ] and [.5 .5]\n # where ? means implicitly zero.\n ```\n\n Args:\n sp_input: N-D `SparseTensor`, where `N >= 2`.\n name: optional name of the operation.\n Returns:\n output: N-D `SparseTensor` representing the results.\n \"\"\"\n with ops.name_scope(name, \"SparseSoftmax\",\n [sp_input.indices, sp_input.values]) as name:\n out_vals = gen_sparse_ops.sparse_softmax(sp_input.indices, sp_input.values,\n sp_input.dense_shape)\n return sparse_tensor.SparseTensor(\n sp_input.indices, out_vals, sp_input.dense_shape)\n\n\ndef sparse_maximum(sp_a, sp_b, name=None):\n \"\"\"Returns the element-wise max of two SparseTensors.\n\n Assumes the two SparseTensors have the same shape, i.e., no broadcasting.\n Example:\n\n ```python\n sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])\n sp_one = sparse_tensor.SparseTensor([[1]], [1], [7])\n res = tf.sparse_maximum(sp_zero, sp_one).eval()\n # \"res\" should be equal to SparseTensor([[0], [1]], [0, 1], [7]).\n ```\n\n Args:\n sp_a: a `SparseTensor` operand whose dtype is real, and indices\n lexicographically ordered.\n sp_b: the other `SparseTensor` operand with the same requirements (and the\n same shape).\n name: optional name of the operation.\n Returns:\n output: the output SparseTensor.\n \"\"\"\n with ops.name_scope(name, \"SparseSparseMaximum\", [sp_a.indices, sp_a.values,\n sp_b.indices,\n sp_b.values]) as name:\n out_indices, out_values = gen_sparse_ops.sparse_sparse_maximum(\n sp_a.indices,\n sp_a.values,\n sp_a.dense_shape,\n sp_b.indices,\n sp_b.values,\n sp_b.dense_shape,\n name=name)\n return sparse_tensor.SparseTensor(out_indices, out_values, sp_a.dense_shape)\n\n\ndef sparse_minimum(sp_a, sp_b, name=None):\n \"\"\"Returns the element-wise min of two SparseTensors.\n\n Assumes the two SparseTensors have the same shape, i.e., no broadcasting.\n Example:\n\n ```python\n sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])\n sp_one = sparse_tensor.SparseTensor([[1]], [1], [7])\n res = tf.sparse_minimum(sp_zero, sp_one).eval()\n # \"res\" should be equal to SparseTensor([[0], [1]], [0, 0], [7]).\n ```\n\n Args:\n sp_a: a `SparseTensor` operand whose dtype is real, and indices\n lexicographically ordered.\n sp_b: the other `SparseTensor` operand with the same requirements (and the\n same shape).\n name: optional name of the operation.\n Returns:\n output: the output SparseTensor.\n \"\"\"\n with ops.name_scope(name, \"SparseSparseMinimum\", [sp_a.indices, sp_a.values,\n sp_b.indices,\n sp_b.values]) as name:\n out_indices, out_values = gen_sparse_ops.sparse_sparse_minimum(\n sp_a.indices,\n sp_a.values,\n sp_a.dense_shape,\n sp_b.indices,\n sp_b.values,\n sp_b.dense_shape,\n name=name)\n return sparse_tensor.SparseTensor(out_indices, out_values, sp_a.dense_shape)\n\n\ndef sparse_transpose(sp_input, perm=None, name=None):\n \"\"\"Transposes a `SparseTensor`\n\n The returned tensor's dimension i will correspond to the input dimension\n `perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is\n the rank of the input tensor. Hence by default, this operation performs a\n regular matrix transpose on 2-D input Tensors.\n\n For example, if `sp_input` has shape `[4, 5]` and `indices` / `values`:\n\n [0, 3]: b\n [0, 1]: a\n [3, 1]: d\n [2, 0]: c\n\n then the output will be a `SparseTensor` of shape `[5, 4]` and\n `indices` / `values`:\n\n [0, 2]: c\n [1, 0]: a\n [1, 3]: d\n [3, 0]: b\n\n Args:\n sp_input: The input `SparseTensor`.\n perm: A permutation of the dimensions of `sp_input`.\n name: A name prefix for the returned tensors (optional)\n Returns:\n A transposed `SparseTensor`.\n\n Raises:\n TypeError: If `sp_input` is not a `SparseTensor`.\n \"\"\"\n with ops.op_scope([sp_input], name, \"SparseTranspose\") as name:\n if perm is None:\n rank = array_ops.rank(sp_input)\n perm = (rank - 1) - math_ops.range(0, rank, 1)\n indices = sp_input.indices\n transposed_indices = array_ops.transpose(\n array_ops.gather(array_ops.transpose(indices), perm))\n dense_shape = sp_input.dense_shape\n transposed_dense_shape = array_ops.gather(dense_shape, perm)\n transposed_st = sparse_tensor.SparseTensor(\n transposed_indices, sp_input.values,\n transposed_dense_shape)\n transposed_st = sparse_reorder(transposed_st)\n return transposed_st\n\n\ndef _add_sparse_to_tensors_map(sp_input, container=None,\n shared_name=None, name=None):\n \"\"\"Add a `SparseTensor` to a `SparseTensorsMap` and return its handle.\n\n Args:\n sp_input: The input `SparseTensor`.\n container: The container for the underlying `SparseTensorsMap` (optional).\n shared_name: The shared name for the underlying `SparseTensorsMap`\n (optional, defaults to the name of the newly created op).\n name: A name prefix for the returned tensors (optional).\n\n Returns:\n A string 1-vector (1D `Tensor`), with the single element representing the\n a unique handle to a `SparseTensor` stored by the `SparseTensorMap`\n underlying this op.\n\n Raises:\n TypeError: If `sp_input` is not a `SparseTensor`.\n \"\"\"\n sp_input = _convert_to_sparse_tensor(sp_input)\n\n return gen_sparse_ops._add_sparse_to_tensors_map(\n sp_input.indices, sp_input.values, sp_input.dense_shape,\n container=container, shared_name=shared_name, name=name)\n\n\ndef _add_many_sparse_to_tensors_map(sp_input, container=None,\n shared_name=None, name=None):\n \"\"\"Add a minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles.\n\n The `SparseTensor` must have rank `R` greater than 1, and the first dimension\n is treated as the minibatch dimension. Elements of the `SparseTensor`\n must be sorted in increasing order of this first dimension. The serialized\n `SparseTensor` objects going into each row of the output `Tensor` will have\n rank `R-1`.\n\n The minibatch size `N` is extracted from `sparse_shape[0]`.\n\n Args:\n sp_input: The input rank `R` `SparseTensor`.\n container: The container for the underlying `SparseTensorsMap` (optional).\n shared_name: The shared name for the underlying `SparseTensorsMap`\n (optional, defaults to the name of the newly created op).\n name: A name prefix for the returned tensors (optional).\n\n Returns:\n A string matrix (2-D `Tensor`) with `N` rows and `1` column.\n Each row represents a unique handle to a `SparseTensor` stored by\n the `SparseTensorMap` underlying this op.\n\n Raises:\n TypeError: If `sp_input` is not a `SparseTensor`.\n \"\"\"\n sp_input = _convert_to_sparse_tensor(sp_input)\n\n return gen_sparse_ops._add_many_sparse_to_tensors_map(\n sp_input.indices, sp_input.values, sp_input.dense_shape,\n container=container, shared_name=shared_name, name=name)\n\n\ndef _take_many_sparse_from_tensors_map(\n sparse_map_op, sparse_handles, rank=None, name=None):\n \"\"\"Read `SparseTensors` from a `SparseTensorsMap` and concatenate them.\n\n The input `sparse_handles` must be a string matrix of shape `[N, 1]` where\n `N` is the minibatch size and the rows correspond to packed outputs of\n `add_sparse_to_tensors_map`. The ranks of the original `SparseTensor` objects\n must all match. When the final `SparseTensor` is created, it has rank one\n higher than the ranks of the incoming `SparseTensor` objects (they have been\n concatenated along a new row dimension).\n\n The output `SparseTensor` object's shape values for all dimensions but the\n first are the max across the input `SparseTensor` objects' shape values\n for the corresponding dimensions. Its first shape value is `N`, the minibatch\n size.\n\n The input `SparseTensor` objects' indices are assumed ordered in\n standard lexicographic order. If this is not the case, after this\n step run `sparse_reorder` to restore index ordering.\n\n For example, if the serialized input is a `[2, 3]` matrix representing two\n original `SparseTensor` objects:\n\n index = [ 0]\n [10]\n [20]\n values = [1, 2, 3]\n shape = [50]\n\n and\n\n index = [ 2]\n [10]\n values = [4, 5]\n shape = [30]\n\n then the final deserialized `SparseTensor` will be:\n\n index = [0 0]\n [0 10]\n [0 20]\n [1 2]\n [1 10]\n values = [1, 2, 3, 4, 5]\n shape = [2 50]\n\n Args:\n sparse_map_op: The `Operation` that created the original handles.\n Usually this is, e.g., `add_sparse_to_tensors_map(...).op`.\n sparse_handles: 2-D `Tensor` of type `string` of shape `[N, 1]`.\n The serialized and packed `SparseTensor` objects.\n rank: (optional) Python int, the rank of the `SparseTensor` objects.\n name: A name prefix for the returned tensors (optional)\n\n Returns:\n A `SparseTensor` representing the deserialized `SparseTensor`s,\n concatenated along the `SparseTensor`s' first dimension.\n\n All of the serialized `SparseTensor`s must have had the same rank and type.\n \"\"\"\n if not isinstance(sparse_map_op, ops.Operation):\n raise TypeError(\"sparse_map_op be an Operation\")\n if sparse_map_op.type not in (\"AddSparseToTensorsMap\",\n \"AddManySparseToTensorsMap\"):\n raise TypeError(\"sparse_map_op must be one of AddSparseToTensorsMap or \"\n \"AddSparseToTensorsMap. Instead, found `%s`.\" %\n sparse_map_op.type)\n with ops.colocate_with(sparse_map_op):\n shared_name = sparse_map_op.get_attr(\"shared_name\") or sparse_map_op.name\n output_indices, output_values, output_shape = (\n gen_sparse_ops._take_many_sparse_from_tensors_map(\n sparse_handles, dtype=sparse_map_op.get_attr(\"T\"),\n container=sparse_map_op.get_attr(\"container\"),\n shared_name=shared_name, name=name))\n\n # Feed rank data back in, if available\n output_indices.set_shape([None, rank])\n output_shape.set_shape([rank])\n\n return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)\n" ]
[ [ "tensorflow.python.util.deprecation.deprecated_argument_lookup", "tensorflow.python.framework.ops.colocate_with", "tensorflow.python.ops.gen_sparse_ops._deserialize_many_sparse", "tensorflow.python.ops.array_ops.expand_dims", "tensorflow.python.ops.gen_sparse_ops._sparse_split", "tensorflow.python.ops.check_ops.assert_less_equal", "tensorflow.python.ops.gen_sparse_ops.sparse_sparse_maximum", "tensorflow.python.ops.gen_sparse_ops._sparse_tensor_dense_mat_mul", "tensorflow.python.framework.sparse_tensor.SparseTensor.from_value", "tensorflow.python.ops.math_ops.range", "tensorflow.python.ops.array_ops.rank", "tensorflow.python.ops.gen_sparse_ops._sparse_concat", "tensorflow.python.framework.sparse_tensor.SparseTensor", "tensorflow.python.ops.math_ops._ReductionDims", "tensorflow.python.ops.gen_sparse_ops._serialize_sparse", "tensorflow.python.ops.gen_sparse_ops._sparse_reorder", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.ops.gen_sparse_ops._sparse_tensor_dense_add", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.ops.array_ops.setdiff1d", "tensorflow.python.ops.math_ops.cast", "tensorflow.python.ops.gen_sparse_ops.sparse_dense_cwise_add", "tensorflow.python.ops.gen_sparse_ops._add_many_sparse_to_tensors_map", "tensorflow.python.framework.ops.op_scope", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.ops.gen_sparse_ops.sparse_softmax", "tensorflow.python.ops.gen_sparse_ops._sparse_add", "tensorflow.python.ops.gen_sparse_ops._sparse_reshape", "tensorflow.python.ops.array_ops.gather", "tensorflow.python.ops.math_ops.reduce_max", "tensorflow.python.ops.array_ops.pack", "tensorflow.python.ops.array_ops.zeros_like", "tensorflow.python.ops.gen_sparse_ops._sparse_to_dense", "tensorflow.python.ops.gen_sparse_ops._add_sparse_to_tensors_map", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.gen_sparse_ops.sparse_sparse_minimum", "tensorflow.python.ops.array_ops.ones_like", "tensorflow.python.ops.array_ops.concat", "tensorflow.python.ops.array_ops.where", "tensorflow.python.ops.array_ops.reshape", "tensorflow.python.ops.gen_sparse_ops._serialize_many_sparse", "tensorflow.python.ops.array_ops.transpose" ] ]
HassamSheikh/jaxrl
[ "f3d924d793bd7e0ba46b24b2092cc2817c7f962f" ]
[ "jaxrl/agents/ddpg/ddpg_learner.py" ]
[ "\"\"\"Implementations of algorithms for continuous control.\"\"\"\n\nimport functools\nfrom typing import Sequence, Tuple\n\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nimport optax\n\nfrom jaxrl.agents.ddpg.actor import update as update_actor\nfrom jaxrl.agents.ddpg.critic import update as update_critic\nfrom jaxrl.agents.sac.critic import target_update\nfrom jaxrl.datasets import Batch\nfrom jaxrl.networks import critic_net, policies\nfrom jaxrl.networks.common import InfoDict, Model, PRNGKey\n\n\[email protected](jax.jit, static_argnames=('update_target'))\ndef _update_jit(\n actor: Model, critic: Model, target_critic: Model, batch: Batch,\n discount: float, tau: float, update_target: bool\n) -> Tuple[PRNGKey, Model, Model, Model, Model, InfoDict]:\n\n new_critic, critic_info = update_critic(actor, critic, target_critic,\n batch, discount)\n if update_target:\n new_target_critic = target_update(new_critic, target_critic, tau)\n else:\n new_target_critic = target_critic\n\n new_actor, actor_info = update_actor(actor, new_critic, batch)\n\n return new_actor, new_critic, new_target_critic, {\n **critic_info,\n **actor_info,\n }\n\n\nclass DDPGLearner(object):\n def __init__(self,\n seed: int,\n observations: jnp.ndarray,\n actions: jnp.ndarray,\n actor_lr: float = 3e-4,\n critic_lr: float = 3e-4,\n hidden_dims: Sequence[int] = (256, 256),\n discount: float = 0.99,\n tau: float = 0.005,\n target_update_period: int = 1,\n exploration_noise: float = 0.1):\n \"\"\"\n An implementation of [Deep Deterministic Policy Gradient](https://arxiv.org/abs/1509.02971)\n and Clipped Double Q-Learning (https://arxiv.org/abs/1802.09477).\n \"\"\"\n\n action_dim = actions.shape[-1]\n\n self.tau = tau\n self.target_update_period = target_update_period\n self.discount = discount\n self.exploration_noise = exploration_noise\n\n rng = jax.random.PRNGKey(seed)\n rng, actor_key, critic_key = jax.random.split(rng, 3)\n\n actor_def = policies.MSEPolicy(hidden_dims, action_dim)\n actor = Model.create(actor_def,\n inputs=[actor_key, observations],\n tx=optax.adam(learning_rate=actor_lr))\n\n critic_def = critic_net.DoubleCritic(hidden_dims)\n critic = Model.create(critic_def,\n inputs=[critic_key, observations, actions],\n tx=optax.adam(learning_rate=critic_lr))\n target_critic = Model.create(\n critic_def, inputs=[critic_key, observations, actions])\n\n self.actor = actor\n self.critic = critic\n self.target_critic = target_critic\n self.rng = rng\n\n self.step = 1\n\n def sample_actions(self,\n observations: np.ndarray,\n temperature: float = 1.0) -> jnp.ndarray:\n rng, actions = policies.sample_actions(self.rng,\n self.actor.apply_fn,\n self.actor.params,\n observations,\n temperature,\n distribution='det')\n self.rng = rng\n\n actions = np.asarray(actions)\n actions = actions + np.random.normal(\n size=actions.shape) * self.exploration_noise * temperature\n return np.clip(actions, -1, 1)\n\n def update(self, batch: Batch) -> InfoDict:\n self.step += 1\n\n new_actor, new_critic, new_target_critic, info = _update_jit(\n self.actor, self.critic, self.target_critic, batch, self.discount,\n self.tau, self.step % self.target_update_period == 0)\n\n self.actor = new_actor\n self.critic = new_critic\n self.target_critic = new_target_critic\n\n return info\n" ]
[ [ "numpy.random.normal", "numpy.clip", "numpy.asarray" ] ]
vvandriichuk/etl_spark_airflow_emr
[ "0c793ca67389a789c8eb1f487fe3a384e714ff3b" ]
[ "src/data/helper_functions.py" ]
[ "from time import strftime, sleep\nimport sys \nimport os\nimport pandas as pd\nfrom glob import glob \nimport numpy as np\n\n\n\ndef inspect_core_specifications(data, descriptives=False):\n\t\"\"\"Inspect data types, shape and descriptives\n\t\n\t:param data: pandas dataframe \n\t:param descriptives: boolean, print descriptive statistics (default=False)\n\t:return: None\n\t\"\"\"\t\n\t# check if data is list of dataframes\n\tif isinstance(data, list):\n\t\tfor d in data:\n\t\t\tprint('-'*40)\n\t\t\tprint(d.info())\n\t\t\t\n\t\t\tif descriptives:\n\t\t\t\tprint('-'*40)\n\t\t\t\tprint(round(d.describe(include='all', percentiles=[])))\n\t\t\t\n\telse:\n\t\tprint('-'*40)\n\t\tprint(data.info())\n\t\t\n\t\tif descriptives:\n\t\t\tprint('-'*40)\n\t\t\tprint(round(data.describe(include='all', percentiles=[])))\n\tprint('*'*40)\n\t\t\n\ndef inspect_missings(data, verbose=True):\n\t\"\"\"Inspect missings across rows and across columns\n\t\n\tArgs \n\t\tdata: pandas dataframe \n\t\t\n\tReturns\n\t\t:return : dataframe with info on column missings \n\t\"\"\"\n\tif verbose:\n\t\tprint(\"MISSINGS\")\n\t\tprint('-'*40)\n\t\t\n\t# check rows\n\trows_all = data.shape[0]\n\trows_nomiss = data.dropna().shape[0]\n\n\trowmiss_count = rows_all - rows_nomiss\n\trowmiss_share = rowmiss_count/rows_all*100\n\n\tif verbose:\n\t\tprint(\"Any missing in any row: {}/{} ({} %)\".format(rowmiss_count,rows_all, rowmiss_share))\n\t\tprint()\n\t\n\t# check columns\n\tcol_miss = [col for col in data.columns if data[col].isna().any()]\n\t# no missings for any column\n\tif not col_miss:\n\t\tprint(\"No missings for any column.\")\n\telse:\n\t\t# print share of missings for each column\n\t\tprint(\"Return info on column missings\")\n\t\tds_colmiss = data.loc[:,col_miss].isna().sum()\n\t\tds_colmiss_relative = data.loc[:,col_miss].isna().sum()/rows_all*100\n\t\t\n\t\tdf_colmiss = pd.concat([ds_colmiss, ds_colmiss_relative], axis=1, keys=['missing_count', 'share'])\\\n\t\t\t\t\t\t.sort_values(\"share\", ascending=False)\n\t\tif verbose:\n\t\t\tprint(df_colmiss)\n\t\t\tprint('*'*40)\n\n\t\treturn df_colmiss\n\t\t\n\t\n\n\t\ndef row_missing_count(df, top_n=None):\n\t\"\"\"Inspect absolute and relative missings across rows\n\tArgs\n\t\tdf: pandas DataFrame\n\t\ttop_n: restrict output to top_n indices with most missings across columns \n\tReturn\n\t\tpandas dataframe with indices and their absolute and relative missings across columns\n\t\n\t\"\"\"\n\t\n\tdf_colmiss_idx = df.T.isna().sum().sort_values(ascending=False)[:top_n]\n\tdf_colmiss_idx_share = df_colmiss_idx/df.shape[1]\n\t\n\treturn pd.concat([df_colmiss_idx, df_colmiss_idx_share], axis=1, keys=['missing_count', 'missing_share'])\n\n\n\n\ndef sleep_countdown(duration, print_step=1):\n \"\"\"Sleep for certain duration and print remaining time in steps of print_step\n\n\tInput\n\t\tduration: duration of timeout (int)\n\t\tprint_step: steps to print countdown (int)\n\n\tReturn \n\t\tNone\n\t\"\"\"\n sys.stdout.write(\"\\r Seconds remaining:\")\n\n for remaining in range(duration, 0, -1):\n # display only steps\n if remaining % print_step == 0:\n sys.stdout.write(\"\\r\")\n sys.stdout.write(\"{:2d}\".format(remaining))\n sys.stdout.flush()\n\n sleep(1)\n\n sys.stdout.write(\"\\r Complete!\\n\")\n\ndef timestamp_now():\n\t\"\"\"Create timestamp string in format: yyyy/mm/dd-hh/mm/ss\n\t\tprimaryliy used for file naming\n\n\tInput\n\t\tNone\n\n\tReturn\n\t\tString: Timestamp for current time\n\n\t\"\"\"\n\n\ttimestr = strftime(\"%Y%m%d-%H%M%S\")\n\ttimestamp = '{}'.format(timestr) \n\n\treturn timestamp\n\n\n\ndef get_files(filepath='./', name_contains=\"\", absolute_path=True, subdirectories=True):\n \"\"\"List all files of directory filepath with their absolute filepaths\n \n Args\n\tfilepath:\t string specifying folder\n\tname_containts: string with constraint on name, \n\t\t\t\t\tfor example all files ending with \"*.py\", or every file starting with '0': \"0*\"\n\tabsolute_path: return absolute paths of files or not\n\tsubdirectories: include subdirectories or not \n\n Return\n\tlist: list with string elements of filenames \n \n \"\"\"\n\n all_files = []\n\n\n for (dirpath, dirnames, filenames) in os.walk(filepath):\n\t \n\t# filenames specified\n if name_contains: \n all_files.extend(glob(os.path.join(dirpath,name_contains)))\n\n elif not name_contains:\n all_files.extend(filenames)\n\t\n\t# exclude subdirectories, break loop \n if not subdirectories:\n break\n\t# otherwise continue with loop, walking down the directory\n\n # get absolute path\n if absolute_path: \n all_files_absolute = [os.path.abspath(f) for f in all_files]\n return all_files_absolute\n\n else:\n return all_files\n\n\n\ndef make_csv(x, filename, data_dir, append=False, header=False, index=False):\n\t'''Merges features and labels and converts them into one csv file with labels in the first column\n\n\t\tInput\n\t\t\tx: Data features\n\t\t\tfile_name: Name of csv file, ex. 'train.csv'\n\t\t\tdata_dir: The directory where files will be saved\n\n\t\tReturn\n\t\t\tNone: Create csv file as specified\n\t'''\n\n\t# create dir if nonexistent\n\tif not os.path.exists(data_dir):\n\t\tos.makedirs(data_dir)\n\n\t# make sure its a df\n\tx = pd.DataFrame(x)\n\n\t# export to csv\n\tif not append:\n\t\tx.to_csv(os.path.join(data_dir, filename), \n\t\t\t\t\t\t\t\t\t header=header, \n\t\t\t\t\t\t\t\t\t index=index)\n\t# append to existing\n\telse:\n\t\tx.to_csv(os.path.join(data_dir, filename),\n\t\t\t\t\t\t\t\t\t mode = 'a',\n\t\t\t\t\t\t\t\t\t header=header, \n\t\t\t\t\t\t\t\t\t index=index)\t\t\n\n\t# nothing is returned, but a print statement indicates that the function has run\n\tprint('Path created: '+str(data_dir)+'/'+str(filename))\n\n\ndef inspect_drop_rows_retain_columns(data, max_missing=3):\n \"\"\"Dropping rows with many missings for certain columns \n to keep columns\n :param data: dataframe\n :param max_missing: defines until which column-wise missing count should be iterated\n :return list with indices to drop, tuple of numpy arrays for plotting: Columns to keep vs. rows dropped (%)\n \"\"\"\n # count missings per column, store in series\n count_column_missing = data.isna().sum(axis=0).sort_values(ascending=False)\n column_missing_names = count_column_missing.index\n\n # get array of missing count with unique values to loop over\n count_column_missing_unique = count_column_missing[count_column_missing >= 0].sort_values().unique()\n\n # define until which column-wise missing count should be iterated\n plot_x, plot_y = np.zeros(max_missing), np.zeros(max_missing)\n print(\"Trade off rows against columns\\nDrop rows and modifiy original dataframe to keep more columns\\n\")\n print(\"i\\tKeep Columns\\tDrop Rows [%]\\n\"+'-'*40)\n \n # for comparisons:\n # raw df drop columns with missing, no modification \n df_raw = data.dropna(axis=1)\n\n\n drop_rows = []\n\n for arr_idx, i in enumerate(count_column_missing_unique[:max_missing]):\n select_columns = count_column_missing[count_column_missing <= i].index\n\n # return indices (=ticker) where the 1 missing per column occurs\n indices_many_missings = list(data.loc[data[select_columns].isnull().any(1),:].index)\n drop_rows.append(indices_many_missings)\n \n ## compare modified vs. raw dropped missings\n # modified df \n df_fin_mod = data.drop(indices_many_missings).dropna(axis=1)\n df_fin_nomiss = df_fin_mod\n\n ### Benefits of dropping x rows\n ## comparison of dropping and keeping\n # original \n n_row_orig, n_col_orig = data.shape\n # modified & cleaned \n n_row_mod, n_col_mod = df_fin_mod.shape\n # raw & cleaned\n n_row_nomod, n_col_nomod = df_raw.shape\n\n ## STATISTICS\n # dropped rows to modify df (count and %)\n n_dropped_rows_mod = len(indices_many_missings)\n pct_dropped_rows_mod = round(n_dropped_rows_mod/n_row_orig*100)\n\n # dropped cols for modified df (count and %)\n n_dropped_cols_mod = n_col_orig - n_col_mod\n pct_dropped_cols_mod = round(n_dropped_cols_mod/n_col_orig*100)\n\n # dropped cols for non-modified df (count and %)\n n_dropped_cols_nomod = n_col_orig - n_col_nomod\n pct_dropped_cols_nomod = round(n_dropped_cols_nomod/n_col_orig*100)\n\n # plot dropped rows against retained columns \n # (x=100-pct_dropped_rows_mod, y=pct_dropped_rows_mod)\n plot_x[arr_idx], plot_y[arr_idx] = 100-pct_dropped_cols_mod, pct_dropped_rows_mod\n print(\"{}\\t{}\\t\\t{}\".format(i, plot_x[arr_idx], plot_y[arr_idx])) \n \n\n return drop_rows, plot_x, plot_y" ]
[ [ "pandas.DataFrame", "pandas.concat", "numpy.zeros" ] ]
aoyandong/horovod
[ "e94d8ea0dff8c2b45698cfe4fabb2e6553d0b9a8" ]
[ "horovod/spark/torch/estimator.py" ]
[ "# Copyright 2019 Uber Technologies, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport horovod.spark.common._namedtuple_fix\n\nimport copy\nimport io\nimport numbers\nimport time\n\nfrom pyspark import keyword_only\nfrom pyspark.ml.param.shared import Param, Params\nfrom pyspark.ml.util import MLWritable, MLReadable\nfrom pyspark.sql import SparkSession\n\nfrom horovod.runner.common.util import codec\nfrom horovod.spark.common import util\nfrom horovod.spark.common.estimator import HorovodEstimator, HorovodModel\nfrom horovod.spark.common.params import EstimatorParams\nfrom horovod.spark.common.serialization import \\\n HorovodParamsWriter, HorovodParamsReader\nfrom horovod.spark.torch import remote\nfrom horovod.spark.torch.util import deserialize_fn, serialize_fn, \\\n save_into_bio\n\nimport numpy as np\nimport torch\nimport torch.utils.data\n\n\ndef _torch_param_serialize(param_name, param_val):\n if param_val is None:\n return None\n\n if param_name in [EstimatorParams.backend.name, EstimatorParams.store.name]:\n # We do not serialize backend and store. These params have to be regenerated for each\n # run of the pipeline\n return None\n elif param_name == EstimatorParams.model.name:\n serialize = serialize_fn()\n return serialize(param_val)\n\n return codec.dumps_base64(param_val)\n\n\nclass TorchEstimatorParamsWriter(HorovodParamsWriter):\n def saveImpl(self, path):\n # Write the parameters\n HorovodParamsWriter.saveMetadata(self.instance, path, self.sc,\n param_serializer_fn=_torch_param_serialize)\n\n\nclass TorchEstimatorParamsWritable(MLWritable):\n def write(self):\n return TorchEstimatorParamsWriter(self)\n\n\nclass TorchEstimatorParamsReader(HorovodParamsReader):\n def _deserialize_dict(self, dict_values):\n deserialized_dict = dict()\n for key, val in dict_values.items():\n if val is None:\n deserialized_dict[key] = None\n elif key == EstimatorParams.model.name:\n deserialize = deserialize_fn()\n deserialized_dict[key] = deserialize(val)\n else:\n deserialized_dict[key] = codec.loads_base64(val)\n return deserialized_dict\n\n\nclass TorchEstimatorParamsReadable(MLReadable):\n @classmethod\n def read(cls):\n \"\"\"Returns a DefaultParamsReader instance for this class.\"\"\"\n return TorchEstimatorParamsReader(cls)\n\n\nclass TorchEstimator(HorovodEstimator, TorchEstimatorParamsWritable,\n TorchEstimatorParamsReadable):\n \"\"\"Spark Estimator for fitting PyTorch models to a DataFrame.\n\n Args:\n num_proc: Number of Horovod processes. Defaults to `spark.default.parallelism`.\n model: PyTorch model to train.\n backend: Optional Backend object for running distributed training function. Defaults to SparkBackend with\n `num_proc` worker processes. Cannot be specified if `num_proc` is also provided.\n store: Store object that abstracts reading and writing of intermediate data and run results.\n optimizer: PyTorch optimizer to be converted into a `hvd.DistributedOptimizer` for training.\n loss: PyTorch loss or list of losses.\n loss_constructors: Optional functions that generate losses.\n metrics: Optional metrics to record.\n loss_weights: Optional list of float weight values to assign each loss.\n sample_weight_col: Optional column indicating the weight of each sample.\n gradient_compression: Gradient compression used by `hvd.DistributedOptimizer`.\n feature_cols: Column names used as feature inputs to the model. Must be a list with each feature\n mapping to a sequential argument in the model's forward() function.\n input_shapes: List of shapes for each input tensor to the model.\n validation: Optional validation column name (string) where every row in the column is either 1/True or 0/False,\n or validation split (float) giving percent of data to be randomly selected for validation.\n label_cols: Column names used as labels. Must be a list with one label for each output of the model.\n batch_size: Number of rows from the DataFrame per batch.\n val_batch_size: Number of rows from the DataFrame per batch for validation, if not set, will use batch_size.\n epochs: Number of epochs to train.\n verbose: Verbosity level [0, 2] (default: 1).\n shuffle_buffer_size: Optional size of in-memory shuffle buffer in rows. Allocating a larger buffer size\n increases randomness of shuffling at the cost of more host memory. Defaults to estimating\n with an assumption of 4GB of memory per host.\n partitions_per_process: Number of Parquet partitions to assign per worker process from `num_proc` (default: 10).\n run_id: Optional unique ID for this run for organization in the Store. Will be automatically assigned if not\n provided.\n train_minibatch_fn: Optional custom function to execute within the training loop. Defaults to standard\n gradient descent process.\n train_steps_per_epoch: Number of steps to train each epoch. Useful for testing that model trains successfully.\n Defaults to training the entire dataset each epoch.\n validation_steps_per_epoch: Number of validation steps to perform each epoch.\n transformation_fn: Optional function that takes a row as its parameter\n and returns a modified row that is then fed into the\n train or validation step. This transformation is\n applied after batching. See Petastorm [TransformSpec](https://github.com/uber/petastorm/blob/master/petastorm/transform.py)\n for more details. Note that this fucntion constructs\n another function which should perform the\n transformation.\n train_reader_num_workers: This parameter specifies the number of parallel processes that\n read the training data from data store and apply data\n transformations to it. Increasing this number\n will generally increase the reading rate but will also\n increase the memory footprint. More processes are\n particularly useful if the bandwidth to the data store is not\n high enough, or users need to apply transformation such as\n decompression or data augmentation on raw data.\n val_reader_num_workers: Similar to the train_reader_num_workers.\n \"\"\"\n\n input_shapes = Param(Params._dummy(), 'input_shapes', 'input layer shapes')\n loss_constructors = Param(Params._dummy(), 'loss_constructors',\n 'functions that construct the loss')\n train_minibatch_fn = Param(Params._dummy(), 'train_minibatch_fn',\n 'functions that construct the minibatch train function for torch')\n\n @keyword_only\n def __init__(self,\n num_proc=None,\n model=None,\n backend=None,\n store=None,\n optimizer=None,\n loss=None,\n loss_constructors=None,\n metrics=None,\n loss_weights=None,\n sample_weight_col=None,\n gradient_compression=None,\n feature_cols=None,\n input_shapes=None,\n validation=None,\n label_cols=None,\n callbacks=None,\n batch_size=None,\n val_batch_size=None,\n epochs=None,\n verbose=1,\n shuffle_buffer_size=None,\n partitions_per_process=None,\n run_id=None,\n train_minibatch_fn=None,\n train_steps_per_epoch=None,\n validation_steps_per_epoch=None,\n transformation_fn=None,\n train_reader_num_workers=None,\n val_reader_num_workers=None,\n label_shapes=None):\n\n super(TorchEstimator, self).__init__()\n self._setDefault(loss_constructors=None,\n input_shapes=None,\n train_minibatch_fn=None,\n transformation_fn=None)\n\n kwargs = self._input_kwargs\n\n if EstimatorParams.loss.name in kwargs and TorchEstimator.loss_constructors.name in kwargs:\n raise ValueError(\"only one of loss_constructors and loss parameters can be specified.\")\n\n self.setParams(**kwargs)\n\n def setTrainMinibatchFn(self, value):\n return self._set(train_minibatch_fn=value)\n\n def getTrainMinibatchFn(self):\n return self.getOrDefault(self.train_minibatch_fn)\n\n def setInputShapes(self, value):\n return self._set(input_shapes=value)\n\n def getInputShapes(self):\n return self.getOrDefault(self.input_shapes)\n\n def setLossConstructors(self, value):\n return self._set(loss_constructors=value)\n\n def getLossConstructors(self):\n return self.getOrDefault(self.loss_constructors)\n\n def _get_optimizer(self):\n return self.getOrDefault(self.optimizer)\n\n # Overwrites Model's getOptimizer method\n def getOptimizer(self):\n model = self.getModel()\n if model:\n optimizer = self._get_optimizer()\n optimizer_cls = optimizer.__class__\n optimizer_state = optimizer.state_dict()\n optimzer = optimizer_cls(model.parameters(), lr=1)\n optimzer.load_state_dict(optimizer_state)\n return optimzer\n else:\n return self._get_optimizer()\n\n def _check_metadata_compatibility(self, metadata):\n util.check_shape_compatibility(metadata,\n self.getFeatureCols(),\n self.getLabelCols(),\n input_shapes=self.getInputShapes(),\n label_shapes=self.getLabelShapes())\n\n def _fit_on_prepared_data(self, backend, train_rows, val_rows, metadata, avg_row_size, dataset_idx=None):\n self._check_params(metadata)\n\n run_id = self.getRunId()\n if run_id is None:\n run_id = 'pytorch_' + str(int(time.time()))\n\n last_checkpoint_state = None\n if self._has_checkpoint(run_id):\n last_checkpoint_state = self._load_checkpoint(run_id)\n\n # Model parameters\n model_pre_train = self.getModel()\n model_state = model_pre_train.state_dict()\n serialized_model = serialize_fn()(model_pre_train)\n\n # Optimizer parameters\n optimizer = self._get_optimizer()\n optimizer_cls = optimizer.__class__\n optimizer_state = optimizer.state_dict()\n\n # Combine model and optimizer state\n model_opt_state = {'model': model_state, 'optimizer': optimizer_state} \\\n if last_checkpoint_state is None else last_checkpoint_state\n model_opt_state_serialized = save_into_bio(model_opt_state, torch.save)\n\n trainer = remote.RemoteTrainer(self, metadata, last_checkpoint_state, run_id, dataset_idx)\n handle = backend.run(trainer,\n args=(serialized_model, optimizer_cls, model_opt_state_serialized,\n train_rows, val_rows, avg_row_size),\n env={})\n return self._create_model(handle, run_id, metadata)\n\n def _load_checkpoint(self, run_id):\n store = self.getStore()\n last_ckpt_path = store.get_checkpoint_path(run_id)\n\n if self.getVerbose():\n print('Resuming training from last checkpoint: {}'.format(last_ckpt_path))\n\n ckpt_file = io.BytesIO(store.read(last_ckpt_path))\n return torch.load(ckpt_file)\n\n def _create_model(self, run_results, run_id, metadata):\n history, serialized_checkpoint = run_results[0]\n serialized_checkpoint.seek(0)\n best_checkpoint = torch.load(serialized_checkpoint, map_location=torch.device('cpu'))\n\n model = copy.deepcopy(self.getModel())\n optimizer = copy.deepcopy(self.getOptimizer())\n\n model.load_state_dict(best_checkpoint['model'])\n optimizer.load_state_dict(best_checkpoint['optimizer'])\n\n return self.get_model_class()(**self._get_model_kwargs(\n model, history, optimizer, run_id, metadata))\n\n def get_model_class(self):\n return TorchModel\n\n def _get_model_kwargs(self, model, history, optimizer, run_id, metadata):\n return dict(history=history,\n model=model,\n optimizer=optimizer,\n feature_columns=self.getFeatureCols(),\n input_shapes=self.getInputShapes(),\n label_columns=self.getLabelCols(),\n run_id=run_id,\n _metadata=metadata,\n loss=self.getLoss(),\n loss_constructors=self.getLossConstructors())\n\n\nclass TorchModel(HorovodModel, TorchEstimatorParamsWritable, TorchEstimatorParamsReadable):\n \"\"\"Spark Transformer wrapping a PyTorch model, used for making predictions on a DataFrame.\n\n Retrieve the underlying PyTorch model by calling `torch_model.getModel()`.\n\n Args:\n history: List of metrics, one entry per epoch during training.\n model: Trained PyTorch model.\n feature_columns: List of feature column names.\n label_columns: List of label column names.\n optimizer: PyTorch optimizer used during training, containing updated state.\n run_id: ID of the run used to train the model.\n loss: PyTorch loss(es).\n loss_constructors: PyTorch loss constructors.\n \"\"\"\n\n optimizer = Param(Params._dummy(), 'optimizer', 'optimizer')\n input_shapes = Param(Params._dummy(), 'input_shapes', 'input layer shapes')\n loss = Param(Params._dummy(), 'loss', 'loss')\n loss_constructors = Param(Params._dummy(), 'loss_constructors',\n 'functions that construct the loss')\n\n @keyword_only\n def __init__(self,\n history=None,\n model=None,\n feature_columns=None,\n input_shapes=None,\n label_columns=None,\n optimizer=None,\n run_id=None,\n _metadata=None,\n loss=None,\n loss_constructors=None):\n super(TorchModel, self).__init__()\n\n if label_columns:\n self.setOutputCols([col + '__output' for col in label_columns])\n\n self._setDefault(optimizer=None,\n loss=None,\n loss_constructors=None,\n input_shapes=None)\n\n kwargs = self._input_kwargs\n self.setParams(**kwargs)\n\n def setLoss(self, value):\n return self._set(loss=value)\n\n def getLoss(self):\n return self.getOrDefault(self.loss)\n\n def setLossConstructors(self, value):\n return self._set(loss_constructors=value)\n\n def getLossConstructors(self):\n return self.getOrDefault(self.loss_constructors)\n\n def setInputShapes(self, value):\n return self._set(input_shapes=value)\n\n def getInputShapes(self):\n return self.getOrDefault(self.input_shapes)\n\n def setOptimizer(self, value):\n return self._set(optimizer=value)\n\n def _get_optimizer(self):\n return self.getOrDefault(self.optimizer)\n\n def getOptimizer(self):\n model = self.getModel()\n if model:\n _optimizer = self._get_optimizer()\n optimizer_cls = _optimizer.__class__\n optimizer_state = _optimizer.state_dict()\n optimzer = optimizer_cls(model.parameters(), lr=1)\n optimzer.load_state_dict(optimizer_state)\n return optimzer\n else:\n return self._get_optimizer()\n\n # To run locally on OS X, need export OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES\n def _transform(self, df):\n model_pre_predict = self.getModel()\n model_pre_predict.eval()\n\n deserialize = deserialize_fn()\n serialize = serialize_fn()\n serialized_model = serialize(model_pre_predict)\n\n input_shapes = self.getInputShapes()\n label_cols = self.getLabelColumns()\n output_cols = self.getOutputCols()\n feature_cols = self.getFeatureColumns()\n metadata = self._get_metadata()\n\n def predict(rows):\n from pyspark import Row\n from pyspark.ml.linalg import DenseVector, SparseVector\n\n model = deserialize(serialized_model)\n # Perform predictions.\n for row in rows:\n fields = row.asDict().copy()\n\n # Note: if the col is SparseVector, torch.tensor(col) correctly converts it to a\n # dense torch tensor.\n data = [torch.tensor([row[col]]).reshape(shape) for\n col, shape in zip(feature_cols, input_shapes)]\n\n with torch.no_grad():\n preds = model(*data)\n\n if not isinstance(preds, list) and not isinstance(preds, tuple):\n preds = [preds]\n\n for label_col, output_col, pred in zip(label_cols, output_cols, preds):\n meta = metadata[label_col]\n col_type = meta['spark_data_type']\n # dtype for dense and spark tensor is always np.float64\n if col_type == DenseVector:\n shape = np.prod(pred.shape)\n flattened_pred = pred.reshape(shape, )\n field = DenseVector(flattened_pred)\n elif col_type == SparseVector:\n shape = meta['shape']\n flattened_pred = pred.reshape(shape, )\n nonzero_indices = flattened_pred.nonzero()[0]\n field = SparseVector(shape, nonzero_indices,\n flattened_pred[nonzero_indices])\n elif pred.shape.numel() == 1:\n # If the column is scalar type, int, float, etc.\n value = pred.item()\n python_type = util.spark_scalar_to_python_type(col_type)\n if issubclass(python_type, numbers.Integral):\n value = round(value)\n field = python_type(value)\n else:\n field = DenseVector(pred.reshape(-1))\n\n fields[output_col] = field\n\n yield Row(**fields)\n\n spark0 = SparkSession._instantiatedSession\n\n # Get a limited DF and make predictions and get the schema of the final DF\n limited_pred_rdd = df.limit(100000).rdd.mapPartitions(predict)\n limited_pred_df = spark0.createDataFrame(limited_pred_rdd, samplingRatio=1)\n final_output_schema = limited_pred_df.schema\n\n # Spark has to infer whether a filed is nullable or not from a limited number of samples.\n # It does not always get it right. We copy the nullable boolean variable for the fields\n # from the original dataframe to the final DF schema.\n nullables = {field.name: field.nullable for field in df.schema.fields}\n for field in final_output_schema.fields:\n if field.name in nullables:\n field.nullable = nullables[field.name]\n\n pred_rdd = df.rdd.mapPartitions(predict)\n # Use the schema from previous section to construct the final DF with prediction\n return spark0.createDataFrame(pred_rdd, schema=final_output_schema)\n" ]
[ [ "torch.load", "torch.no_grad", "torch.tensor", "numpy.prod", "torch.device" ] ]
sweersr/visions
[ "1af04235cb77bec52e4923627dfbf968ed1a584d" ]
[ "tests/series.py" ]
[ "import datetime\nimport uuid\nfrom ipaddress import IPv4Address\nfrom pathlib import PureWindowsPath, PurePosixPath\nfrom urllib.parse import urlparse\nimport pandas as pd\nimport numpy as np\nfrom shapely import wkt\n\nfrom visions.core.implementations.types import *\n\n\ndef get_series():\n return [\n # Int Series\n pd.Series([1, 2, 3], name=\"int_series\"),\n pd.Series(range(10), name=\"int_range\"),\n pd.Series([1, 2, 3], name=\"Int64_int_series\", dtype=\"Int64\"),\n pd.Series([1, 2, 3, np.nan], name=\"Int64_int_nan_series\", dtype=\"Int64\"),\n pd.Series([1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0], name=\"int_series_boolean\"),\n # Count\n pd.Series(np.array([1, 2, 3, 4], dtype=np.uint32), name=\"np_uint32\"),\n # Categorical\n pd.Series([1, 2, 3], name=\"categorical_int_series\", dtype=\"category\"),\n pd.Series(\n pd.Categorical(\n [\"A\", \"B\", \"C\", \"C\", \"B\", \"A\"],\n categories=[\"A\", \"B\", \"C\"],\n ordered=False,\n ),\n name=\"categorical_char\",\n ),\n pd.Series([1.0, 2.0, 3.1], dtype=\"category\", name=\"categorical_float_series\"),\n pd.Series(\n [\"Georgia\", \"Sam\"], dtype=\"category\", name=\"categorical_string_series\"\n ),\n pd.Series(\n [np.complex(0, 0), np.complex(1, 2), np.complex(3, -1)],\n name=\"categorical_complex_series\",\n dtype=\"category\",\n ),\n # Ordinal\n pd.Series(\n pd.Categorical(\n [\"A\", \"B\", \"C\", \"C\", \"B\", \"A\"], categories=[\"A\", \"B\", \"C\"], ordered=True\n ),\n name=\"ordinal\",\n ),\n # Float Series\n pd.Series([1.0, 2.1, 3.0], name=\"float_series\"),\n pd.Series([1.0, 2.5, np.nan], name=\"float_nan_series\"),\n pd.Series([1.0, 2.0, 3.0, 4.0], name=\"float_series2\"),\n pd.Series(np.array([1.2, 2, 3, 4], dtype=np.float), name=\"float_series3\"),\n pd.Series([1, 2, 3.05, 4], dtype=float, name=\"float_series4\"),\n pd.Series([np.nan, 1.2], name=\"float_series5\"),\n pd.Series([np.nan, 1.1], dtype=np.single, name=\"float_series6\"),\n pd.Series([np.inf, np.NINF, np.PINF, 1000000.0, 5.5], name=\"float_with_inf\"),\n pd.Series([np.inf, np.NINF, np.Infinity, np.PINF], name=\"inf_series\"),\n pd.Series([1, 2, np.nan], name=\"int_nan_series\"),\n # Nan Series\n pd.Series([np.nan], name=\"nan_series\"),\n pd.Series([np.nan, np.nan, np.nan, np.nan], name=\"nan_series_2\"),\n # String Series\n pd.Series([\"Patty\", \"Valentine\"], name=\"string_series\"),\n pd.Series([\"1941-05-24\", \"13/10/2016\"], name=\"timestamp_string_series\"),\n pd.Series([\"mack\", \"the\", \"finger\"], name=\"string_unicode_series\"),\n pd.Series(\n np.array([\"upper\", \"hall\"], dtype=np.unicode_),\n name=\"string_np_unicode_series\",\n ),\n pd.Series([\"1.0\", \"2.0\", np.nan], name=\"string_num_nan\"),\n pd.Series([\"1.0\", \"2.0\", \"3.0\"], name=\"string_num\"),\n pd.Series([\"1.0\", \"45.67\", np.nan], name=\"string_flt_nan\"),\n pd.Series([\"1.0\", \"45.67\", \"3.5\"], name=\"string_flt\"),\n pd.Series(\n [\"POINT (-92 42)\", \"POINT (-92 42.1)\", \"POINT (-92 42.2)\"],\n name=\"geometry_string_series\",\n ),\n pd.Series(\n [\n \"I was only robbing the register,\",\n \"I hope you understand\",\n \"One of us had better call up the cops\",\n \"In the hot New Jersey night\",\n np.nan,\n ],\n name=\"string_str_nan\",\n ),\n pd.Series([\"True\", \"False\", None], name=\"string_bool_nan\"),\n pd.Series(range(20), name=\"int_str_range\").astype(\"str\"),\n pd.Series([\"1937-05-06\", \"20/4/2014\"], name=\"string_date\"),\n pd.Series(\n [\n \"http://www.cwi.nl:80/%7Eguido/Python.html\",\n \"https://github.com/pandas-profiling/pandas-profiling\",\n ],\n name=\"str_url\",\n ),\n pd.Series(\n [r\"C:\\\\home\\\\user\\\\file.txt\", r\"C:\\\\home\\\\user\\\\test2.txt\"],\n name=\"path_series_windows_str\",\n ),\n pd.Series(\n [r\"/home/user/file.txt\", r\"/home/user/test2.txt\"],\n name=\"path_series_linux_str\",\n ),\n # Bool Series\n pd.Series([True, False], name=\"bool_series\"),\n pd.Series([True, False, None], name=\"bool_nan_series\"),\n pd.Series([True, False, None], name=\"nullable_bool_series\", dtype=\"Bool\"),\n pd.Series([True, False, False, True], dtype=bool, name=\"bool_series2\"),\n pd.Series(np.array([1, 0, 0, 1], dtype=np.bool), name=\"bool_series3\"),\n # Complex Series\n pd.Series(\n [np.complex(0, 0), np.complex(1, 2), np.complex(3, -1)],\n name=\"complex_series\",\n ),\n pd.Series(\n [\n np.complex(0, 0),\n np.complex(1, 2),\n np.complex(3, -1),\n np.complex(np.nan, np.nan),\n ],\n name=\"complex_series_nan\",\n ),\n pd.Series([\"(1+1j)\", \"(2+2j)\", \"(10+100j)\"], name=\"str_complex\"),\n pd.Series(\n [np.complex(0, 0), np.complex(1, 2), np.complex(3, -1), np.nan],\n name=\"complex_series_nan_2\",\n ),\n pd.Series(\n [complex(0, 0), complex(1, 2), complex(3, -1), np.nan],\n name=\"complex_series_py_nan\",\n ),\n pd.Series(\n [complex(0, 0), complex(1, 2), complex(3, -1)], name=\"complex_series_py\"\n ),\n pd.Series(\n [np.complex(0, 0), np.complex(1, 0), np.complex(3, 0), np.complex(-1, 0)],\n name=\"complex_series_float\",\n ),\n # Datetime Series\n pd.Series(\n [pd.datetime(2017, 3, 5, 12, 2), pd.datetime(2019, 12, 4)],\n name=\"timestamp_series\",\n ),\n pd.Series(\n [pd.datetime(2017, 3, 5), pd.datetime(2019, 12, 4, 3, 2, 0), pd.NaT],\n name=\"timestamp_series_nat\",\n ),\n pd.Series(\n [pd.datetime(2017, 3, 5), pd.datetime(2019, 12, 4), pd.NaT],\n name=\"date_series_nat\",\n ),\n pd.Series(\n pd.date_range(\n start=\"2013-05-18 12:00:00\",\n periods=2,\n freq=\"H\",\n tz=\"Europe/Brussels\",\n name=\"timestamp_aware_series\",\n )\n ),\n pd.to_datetime(\n pd.Series(\n [\n datetime.date(2011, 1, 1),\n datetime.date(2012, 1, 2),\n datetime.date(2013, 1, 1),\n ],\n name=\"datetime\",\n )\n ),\n # Timedelta Series\n pd.Series([pd.Timedelta(days=i) for i in range(3)], name=\"timedelta_series\"),\n pd.Series(\n [pd.Timedelta(days=i) for i in range(3)] + [pd.NaT],\n name=\"timedelta_series_nat\",\n ),\n # Geometry Series\n pd.Series(\n [\n wkt.loads(\"POINT (-92 42)\"),\n wkt.loads(\"POINT (-92 42.1)\"),\n wkt.loads(\"POINT (-92 42.2)\"),\n ],\n name=\"geometry_series\",\n ),\n # Path Series\n pd.Series(\n [\n PurePosixPath(\"/home/user/file.txt\"),\n PurePosixPath(\"/home/user/test2.txt\"),\n ],\n name=\"path_series_linux\",\n ),\n pd.Series(\n [\n PureWindowsPath(\"C:\\\\home\\\\user\\\\file.txt\"),\n PureWindowsPath(\"C:\\\\home\\\\user\\\\test2.txt\"),\n ],\n name=\"path_series_windows\",\n ),\n # Url Series\n pd.Series(\n [\n urlparse(\"http://www.cwi.nl:80/%7Eguido/Python.html\"),\n urlparse(\"https://github.com/dylan-profiling/hurricane\"),\n ],\n name=\"url_series\",\n ),\n # UUID Series\n pd.Series(\n [\n uuid.UUID(\"0b8a22ca-80ad-4df5-85ac-fa49c44b7ede\"),\n uuid.UUID(\"aaa381d6-8442-4f63-88c8-7c900e9a23c6\"),\n uuid.UUID(\"00000000-0000-0000-0000-000000000000\"),\n ],\n name=\"uuid_series\",\n ),\n pd.Series(\n [\n \"0b8a22ca-80ad-4df5-85ac-fa49c44b7ede\",\n \"aaa381d6-8442-4f63-88c8-7c900e9a23c6\",\n \"00000000-0000-0000-0000-000000000000\",\n ],\n name=\"uuid_series_str\",\n ),\n # Object Series\n pd.Series([[1, \"\"], [2, \"Rubin\"], [3, \"Carter\"]], name=\"mixed_list[str,int]\"),\n pd.Series(\n [{\"why\": \"did you\"}, {\"bring him\": \"in for he\"}, {\"aint\": \"the guy\"}],\n name=\"mixed_dict\",\n ),\n pd.Series(\n [pd.to_datetime, pd.to_timedelta, pd.read_json, pd.to_pickle],\n name=\"callable\",\n ),\n pd.Series([pd, wkt, np], name=\"module\"),\n pd.Series([\"1.1\", \"2\"], name=\"textual_float\"),\n pd.Series([\"1.1\", \"2\", \"NAN\"], name=\"textual_float_nan\"),\n # Empty\n pd.Series([], name=\"empty\"),\n pd.Series([], name=\"empty_float\", dtype=float),\n pd.Series([], name=\"empty_int64\", dtype=\"Int64\"),\n pd.Series([], name=\"empty_object\", dtype=\"object\"),\n pd.Series([], name=\"empty_bool\", dtype=bool),\n # IP\n pd.Series([IPv4Address(\"127.0.0.1\"), IPv4Address(\"127.0.0.1\")], name=\"ip\"),\n pd.Series([\"127.0.0.1\", \"127.0.0.1\"], name=\"ip_str\"),\n ]\n\n\ndef get_contains_map():\n series_map = {\n visions_integer: [\n \"int_series\",\n \"Int64_int_series\",\n \"int_range\",\n \"Int64_int_nan_series\",\n \"int_series_boolean\",\n ],\n visions_count: [\"np_uint32\"],\n visions_path: [\"path_series_linux\", \"path_series_windows\"],\n visions_url: [\"url_series\"],\n visions_float: [\n \"float_series\",\n \"float_series2\",\n \"float_series3\",\n \"float_series4\",\n \"inf_series\",\n \"nan_series\",\n \"float_nan_series\",\n \"float_series5\",\n \"int_nan_series\",\n \"nan_series_2\",\n \"float_with_inf\",\n \"float_series6\",\n ],\n visions_categorical: [\n \"categorical_int_series\",\n \"categorical_float_series\",\n \"categorical_string_series\",\n \"categorical_complex_series\",\n \"categorical_char\",\n \"ordinal\",\n ],\n visions_bool: [\n \"bool_series\",\n \"bool_series2\",\n \"bool_series3\",\n \"nullable_bool_series\",\n ],\n visions_complex: [\n \"complex_series\",\n \"complex_series_py\",\n \"complex_series_nan\",\n \"complex_series_py_nan\",\n \"complex_series_nan_2\",\n \"complex_series_float\",\n ],\n visions_datetime: [\n \"timestamp_series\",\n \"timestamp_aware_series\",\n \"datetime\",\n \"timestamp_series_nat\",\n \"date_series_nat\",\n ],\n visions_date: [\"datetime\", \"date_series_nat\"],\n visions_timedelta: [\"timedelta_series\", \"timedelta_series_nat\"],\n visions_string: [\n \"timestamp_string_series\",\n \"string_series\",\n \"geometry_string_series\",\n \"string_unicode_series\",\n \"string_np_unicode_series\",\n \"path_series_linux_str\",\n \"path_series_windows_str\",\n \"int_str_range\",\n \"string_date\",\n \"textual_float\",\n \"textual_float_nan\",\n \"ip_str\",\n \"string_flt\",\n \"string_num\",\n \"str_url\",\n \"string_str_nan\",\n \"string_num_nan\",\n \"string_bool_nan\",\n \"string_flt_nan\",\n \"str_complex\",\n \"uuid_series_str\",\n ],\n visions_geometry: [\"geometry_series\"],\n visions_ip: [\"ip\"],\n visions_ordinal: [\"ordinal\"],\n visions_uuid: [\"uuid_series\"],\n }\n\n series_map[visions_object] = (\n [\"mixed_list[str,int]\", \"mixed_dict\", \"callable\", \"module\", \"bool_nan_series\"]\n + series_map[visions_string]\n + series_map[visions_geometry]\n + series_map[visions_path]\n + series_map[visions_url]\n + series_map[visions_ip]\n + series_map[visions_uuid]\n )\n\n # Empty series\n all = [\"empty\", \"empty_bool\", \"empty_float\", \"empty_int64\", \"empty_object\"]\n for key, values in series_map.items():\n all += values\n series_map[visions_generic] = list(set(all))\n\n return series_map\n\n\ndef infer_series_type_map():\n return {\n \"int_series\": visions_integer,\n \"categorical_int_series\": visions_categorical,\n \"int_nan_series\": visions_integer,\n \"Int64_int_series\": visions_integer,\n \"Int64_int_nan_series\": visions_integer,\n \"np_uint32\": visions_count,\n \"int_range\": visions_integer,\n \"float_series\": visions_float,\n \"float_nan_series\": visions_float,\n \"int_series_boolean\": visions_bool,\n \"float_series2\": visions_integer,\n \"float_series3\": visions_float,\n \"float_series4\": visions_float,\n \"float_series5\": visions_float,\n \"float_series6\": visions_float,\n \"complex_series_float\": visions_integer,\n \"categorical_float_series\": visions_categorical,\n \"float_with_inf\": visions_float,\n \"inf_series\": visions_float,\n \"nan_series\": visions_float,\n \"nan_series_2\": visions_float,\n \"string_series\": visions_string,\n \"categorical_string_series\": visions_categorical,\n \"timestamp_string_series\": visions_date,\n \"string_unicode_series\": visions_string,\n \"string_np_unicode_series\": visions_string,\n \"string_num_nan\": visions_integer,\n \"string_num\": visions_integer,\n \"string_flt_nan\": visions_float,\n \"string_flt\": visions_float,\n \"string_str_nan\": visions_string,\n \"string_bool_nan\": visions_bool,\n \"int_str_range\": visions_integer,\n \"string_date\": visions_date,\n \"str_url\": visions_url,\n \"bool_series\": visions_bool,\n \"bool_nan_series\": visions_bool,\n \"nullable_bool_series\": visions_bool,\n \"bool_series2\": visions_bool,\n \"bool_series3\": visions_bool,\n \"complex_series\": visions_complex,\n \"complex_series_nan\": visions_complex,\n \"complex_series_nan_2\": visions_complex,\n \"complex_series_py_nan\": visions_complex,\n \"complex_series_py\": visions_complex,\n \"categorical_complex_series\": visions_categorical,\n \"timestamp_series\": visions_datetime,\n \"timestamp_series_nat\": visions_datetime,\n \"timestamp_aware_series\": visions_datetime,\n \"datetime\": visions_date,\n \"timedelta_series\": visions_timedelta,\n \"timedelta_series_nat\": visions_timedelta,\n \"geometry_string_series\": visions_geometry,\n \"geometry_series\": visions_geometry,\n \"path_series_linux\": visions_path,\n \"path_series_linux_str\": visions_path,\n \"path_series_windows\": visions_path,\n \"path_series_windows_str\": visions_path,\n \"url_series\": visions_url,\n \"mixed_list[str,int]\": visions_object,\n \"mixed_dict\": visions_object,\n \"callable\": visions_object,\n \"module\": visions_object,\n \"textual_float\": visions_float,\n \"textual_float_nan\": visions_float,\n \"empty\": visions_generic,\n \"empty_object\": visions_generic,\n \"empty_float\": visions_generic,\n \"empty_bool\": visions_generic,\n \"empty_int64\": visions_generic,\n \"ip\": visions_ip,\n \"ip_str\": visions_ip,\n \"date_series_nat\": visions_date,\n \"categorical_char\": visions_categorical,\n \"ordinal\": visions_ordinal,\n \"str_complex\": visions_complex,\n \"uuid_series\": visions_uuid,\n \"uuid_series_str\": visions_uuid,\n }\n\n\ndef get_convert_map():\n # Conversions in one single step\n series_map = [\n # Model type, Relation type\n (visions_integer, visions_float, [\"int_nan_series\", \"float_series2\"]),\n (visions_integer, visions_string, [\"int_str_range\"]),\n (visions_complex, visions_string, [\"str_complex\"]),\n (\n visions_float,\n visions_string,\n [\n \"string_flt\",\n \"string_num_nan\",\n \"string_num\",\n \"string_flt_nan\",\n \"textual_float\",\n \"textual_float_nan\",\n \"int_str_range\",\n ],\n ),\n (visions_datetime, visions_string, [\"timestamp_string_series\", \"string_date\"]),\n (visions_geometry, visions_string, [\"geometry_string_series\"]),\n (visions_bool, visions_string, [\"string_bool_nan\"]),\n (visions_ip, visions_string, [\"ip_str\"]),\n (visions_url, visions_string, [\"str_url\"]),\n (\n visions_path,\n visions_string,\n [\"path_series_windows_str\", \"path_series_linux_str\"],\n ),\n (visions_float, visions_complex, [\"complex_series_float\"]),\n (visions_bool, visions_integer, [\"int_series_boolean\"]),\n (visions_bool, visions_object, [\"bool_nan_series\"]),\n (visions_uuid, visions_string, [\"uuid_series_str\"]),\n ]\n\n return series_map\n" ]
[ [ "pandas.Series", "pandas.date_range", "numpy.complex", "pandas.datetime", "pandas.Categorical", "pandas.Timedelta", "numpy.array" ] ]
zichuan-scott-xu/automl-workflow
[ "d108e55da943775953b9f1801311a86ac07e58a0" ]
[ "examples/DeepWisdom/Auto_Tabular/model_lib/xgb.py" ]
[ "import xgboost as xgb\nfrom sklearn.metrics import roc_auc_score\nimport hyperopt\nfrom hyperopt import STATUS_OK, Trials, hp, space_eval, tpe\nimport lightgbm as lgb\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split, RandomizedSearchCV\n\nfrom Auto_Tabular.utils.log_utils import log, timeit\nfrom Auto_Tabular import CONSTANT\nfrom Auto_Tabular.utils.data_utils import ohe2cat\nfrom .meta_model import MetaModel\nfrom sklearn.model_selection import StratifiedShuffleSplit\n\n\nclass XGBModel(MetaModel):\n\n def __init__(self):\n super(XGBModel, self).__init__()\n self.max_run = 2\n self.all_data_round = 1\n self.explore_params_round = 0\n\n self.not_gain_threhlod = 3\n\n self.patience = 3\n\n self.is_init = False\n\n self.name = 'xgb'\n self.type = 'tree'\n\n self._model = None\n\n self.params = {\n \"boosting_type\": \"gbdt\",\n \"objective\": \"multi:softprob\",\n \"nthread\": CONSTANT.JOBS,\n \"tree_method\": \"hist\",\n \"eval_metric\": \"mlogloss\",\n \"seed\": CONSTANT.SEED,\n }\n\n self.hyperparams = {\n \"learning_rate\": 0.02,\n \"max_depth\": 6,\n \"min_child_weight\": 0.01,\n \"min_data_in_leaf\": 100,\n \"gamma\": 0.1,\n \"lambda\": 0.1,\n \"alpha\": 0.1}\n\n self.is_multi_label = None\n\n self.num_class = None\n\n self.models = {}\n\n self.import_cols = None\n\n def init_model(self, num_class, **kwargs):\n self.is_init = True\n self.params.update({'num_class': num_class})\n self.num_class = num_class\n\n #@timeit\n def epoch_train(self, dataloader, run_num, is_multi_label=None, info=None, time_remain=None):\n self.is_multi_label = is_multi_label\n X, y, train_idxs, cat = dataloader['X'], dataloader['y'], dataloader['train_idxs'], dataloader['cat_cols']\n train_x, train_y = X.loc[train_idxs], y[train_idxs]\n\n if info['mode'] == 'bagging':\n self.hyperparams = info['xgb'].copy()\n self.hyperparams['random_seed'] = np.random.randint(0, 2020)\n run_num = self.explore_params_round\n\n if run_num == self.explore_params_round:\n print('xgb explore_params_round')\n train_x, train_y, val_x, val_y, = self.split_data(train_x, train_y)\n\n self.import_cols = info['imp_cols']\n\n if train_x.shape[1] > 300 and train_x.shape[0] > 10000:\n train_x = train_x[self.import_cols[:300]]\n val_x = val_x[self.import_cols[:300]]\n log('explore_params_round sample 300 cols')\n train_x.reset_index(drop=True, inplace=True)\n train_x = train_x.sample(n=10000)\n train_y = train_y[list(train_x.index)]\n log('explore_params_round sample 1w samples')\n\n elif train_x.shape[0] > 10000:\n train_x.reset_index(drop=True, inplace=True)\n train_x = train_x.sample(n=10000)\n train_y = train_y[list(train_x.index)]\n log('explore_params_round sample 1w samples')\n\n elif train_x.shape[1] > 300:\n train_x = train_x[self.import_cols[:300]]\n val_x = val_x[self.import_cols[:300]]\n log('explore_params_round sample 300 cols')\n\n print('shape: ', train_x.shape)\n self.bayes_opt(train_x, val_x, train_y, val_y, cat)\n self.early_stop_opt(train_x, val_x, train_y, val_y, cat)\n info['xgb'] = self.hyperparams.copy()\n\n train_x, train_y = X.loc[train_idxs], y[train_idxs]\n if run_num == self.all_data_round:\n print('xgb all data round')\n all_train_idxs = dataloader['all_train_idxs']\n train_x = X.loc[all_train_idxs]\n train_y = y[all_train_idxs]\n if not self.is_multi_label:\n xgb_train = xgb.DMatrix(train_x, ohe2cat(train_y))\n self._model = xgb.train({**self.params, **self.hyperparams}, xgb_train)\n else:\n for cls in range(self.num_class):\n cls_y = train_y[:, cls]\n xgb_train = xgb.DMatrix(train_x, cls_y)\n self.models[cls] = self._model = xgb.train({**self.params, **self.hyperparams}, xgb_train)\n\n\n #@timeit\n def epoch_valid(self, dataloader):\n X, y, val_idxs= dataloader['X'], dataloader['y'], dataloader['val_idxs']\n val_x, val_y = X.loc[val_idxs], y[val_idxs]\n val_x = xgb.DMatrix(val_x)\n if not self.is_multi_label:\n preds = self._model.predict(val_x)\n else:\n all_preds = []\n for cls in range(y.shape[1]):\n preds = self.models[cls].predict(val_x)\n all_preds.append(preds[:,1])\n preds = np.stack(all_preds, axis=1)\n valid_auc = roc_auc_score(val_y, preds)\n return valid_auc\n\n #@timeit\n def predict(self, dataloader):\n X, test_idxs = dataloader['X'], dataloader['test_idxs']\n test_x = X.loc[test_idxs]\n test_x = xgb.DMatrix(test_x)\n if not self.is_multi_label:\n return self._model.predict(test_x)\n else:\n all_preds = []\n for cls in range(self.num_class):\n preds = self.models[cls].predict(test_x)\n all_preds.append(preds[:, 1])\n return np.stack(all_preds, axis=1)\n\n #@timeit\n def bayes_opt(self, X_train, X_eval, y_train, y_eval, categories):\n if self.is_multi_label:\n dtrain = xgb.DMatrix(X_train, y_train[:, 1])\n dvalid = xgb.DMatrix(X_eval, y_eval[:, 1])\n else:\n dtrain = xgb.DMatrix(X_train, ohe2cat(y_train))\n dvalid = xgb.DMatrix(X_eval, ohe2cat(y_eval))\n space = {\n \"learning_rate\": hp.loguniform(\"learning_rate\", np.log(0.01), np.log(0.1)),\n \"max_depth\": hp.choice(\"max_depth\", [4, 6, 8, 10, 12]),\n \"min_child_weight\": hp.uniform('min_child_weight', 0.01, 1),\n \"min_data_in_leaf\": hp.choice(\"min_data_in_leaf\", np.linspace(10, 100, 20, dtype=int)),\n \"gamma\": hp.uniform(\"gamma\", 0.001, 0.1),\n \"lambda\": hp.uniform(\"lambda\", 0, 1),\n \"alpha\": hp.uniform(\"alpha\", 0, 1),\n \"colsample_bytree\": hp.choice(\"colsample_bytree\", [0.7, 0.9]),\n \"colsample_bylevel\": hp.choice(\"colsample_bylevel\", [0.7, 0.9]),\n \"colsample_bynode\": hp.choice(\"colsample_bynode\", [0.7, 0.9]),\n\n }\n\n def objective(hyperparams):\n model = xgb.train({**self.params, **hyperparams}, dtrain, num_boost_round=50)\n\n pred = model.predict(dvalid)\n if self.is_multi_label:\n score = roc_auc_score(y_eval[:, 1], pred[:, 1])\n else:\n score = roc_auc_score(y_eval, pred)\n\n return {'loss': -score, 'status': STATUS_OK}\n\n trials = Trials()\n best = hyperopt.fmin(fn=objective, space=space, trials=trials,\n algo=tpe.suggest, max_evals=10, verbose=1,\n rstate=np.random.RandomState(1))\n\n self.hyperparams.update(space_eval(space, best))\n log(\"auc = {}, hyperparams: {}\".format(-trials.best_trial['result']['loss'], self.hyperparams))\n\n def early_stop_opt(self, X_train, X_eval, y_train, y_eval, categories):\n if self.is_multi_label:\n dtrain = xgb.DMatrix(X_train, y_train[:, 1])\n dvalid = xgb.DMatrix(X_eval, y_eval[:, 1])\n else:\n dtrain = xgb.DMatrix(X_train, ohe2cat(y_train))\n dvalid = xgb.DMatrix(X_eval, ohe2cat(y_eval))\n\n model = xgb.train({**self.params, **self.hyperparams}, dtrain, evals=[(dvalid, 'eval')], num_boost_round=1200,\n early_stopping_rounds=10) #categorical_feature=categories)\n\n\n self.params['num_boost_round'] = model.best_iteration\n log('best boost round: {}'.format(model.best_iteration))\n\n def split_data(self, x, y):\n new_x = x.copy()\n new_x.reset_index(drop=True, inplace=True)\n sss = StratifiedShuffleSplit(n_splits=5, test_size=0.2, random_state=0)\n self.splits = {}\n i = 0\n for train_idxs, val_idxs in sss.split(new_x, y):\n self.splits[i] = [train_idxs, val_idxs]\n i += 1\n new_train_x = new_x.loc[self.splits[0][0]]\n new_train_y = y[self.splits[0][0]]\n\n new_val_x = new_x.loc[self.splits[0][1]]\n new_val_y = y[self.splits[0][1]]\n\n return new_train_x, new_train_y, new_val_x, new_val_y\n\n\n" ]
[ [ "sklearn.metrics.roc_auc_score", "numpy.random.RandomState", "numpy.log", "numpy.stack", "numpy.random.randint", "numpy.linspace", "sklearn.model_selection.StratifiedShuffleSplit" ] ]
timvink/tune-sklearn
[ "97ff39a2a9d4a6e1a87183408ebaa4d879102737" ]
[ "tune_sklearn/tune_basesearch.py" ]
[ "\"\"\"Parent class for a cross-validation interface\nbuilt with a Ray Tune back-end.\n\nImplementation derived from referencing the equivalent\nGridSearchCV interfaces from Dask and Optuna.\n\nhttps://ray.readthedocs.io/en/latest/tune.html\nhttps://dask.org\nhttps://optuna.org\n -- Anthony Yu and Michael Chau\n\"\"\"\nimport logging\nfrom collections import defaultdict\n\nfrom scipy.stats import rankdata\nfrom sklearn.model_selection._search import BaseSearchCV\nfrom sklearn.utils.validation import check_is_fitted\nfrom sklearn.model_selection import check_cv\nfrom sklearn.base import is_classifier\nfrom sklearn.base import clone\nfrom sklearn.exceptions import NotFittedError\n\nimport numpy as np\nfrom numpy.ma import MaskedArray\nimport pandas as pd\nimport warnings\nimport multiprocessing\nimport os\nimport inspect\nimport time\nimport numbers\n\nimport ray\nfrom ray.tune.trial import Trial\nfrom ray.tune.schedulers import (\n PopulationBasedTraining, AsyncHyperBandScheduler, HyperBandScheduler,\n MedianStoppingRule, TrialScheduler, ASHAScheduler, HyperBandForBOHB)\nfrom ray.tune.logger import (TBXLogger, JsonLogger, CSVLogger, MLFLowLogger,\n Logger)\n\nfrom tune_sklearn.utils import (EarlyStopping, get_early_stop_type,\n check_is_pipeline, _check_multimetric_scoring)\nfrom tune_sklearn._detect_booster import is_lightgbm_model\n\nlogger = logging.getLogger(__name__)\n\n\ndef resolve_early_stopping(early_stopping, max_iters, metric_name):\n if isinstance(early_stopping, str):\n if early_stopping in TuneBaseSearchCV.defined_schedulers:\n if early_stopping == \"PopulationBasedTraining\":\n return PopulationBasedTraining(metric=metric_name, mode=\"max\")\n elif early_stopping == \"AsyncHyperBandScheduler\":\n return AsyncHyperBandScheduler(\n metric=metric_name, mode=\"max\", max_t=max_iters)\n elif early_stopping == \"HyperBandScheduler\":\n return HyperBandScheduler(\n metric=metric_name, mode=\"max\", max_t=max_iters)\n elif early_stopping == \"MedianStoppingRule\":\n return MedianStoppingRule(metric=metric_name, mode=\"max\")\n elif early_stopping == \"ASHAScheduler\":\n return ASHAScheduler(\n metric=metric_name, mode=\"max\", max_t=max_iters)\n elif early_stopping == \"HyperBandForBOHB\":\n return HyperBandForBOHB(\n metric=metric_name, mode=\"max\", max_t=max_iters)\n raise ValueError(\"{} is not a defined scheduler. \"\n \"Check the list of available schedulers.\"\n .format(early_stopping))\n elif isinstance(early_stopping, TrialScheduler):\n early_stopping._metric = metric_name\n early_stopping._mode = \"max\"\n return early_stopping\n else:\n raise TypeError(\"`early_stopping` must be a str, boolean, \"\n f\"or tune scheduler. Got {type(early_stopping)}.\")\n\n\ndef resolve_loggers(loggers):\n init_loggers = {JsonLogger, CSVLogger}\n if loggers is None:\n return list(init_loggers)\n\n if not isinstance(loggers, list):\n raise TypeError(\"`loggers` must be a list of str or tune loggers.\")\n\n for log in loggers:\n if isinstance(log, str):\n if log == \"tensorboard\":\n init_loggers.add(TBXLogger)\n elif log == \"csv\":\n init_loggers.add(CSVLogger)\n elif log == \"mlflow\":\n init_loggers.add(MLFLowLogger)\n elif log == \"json\":\n init_loggers.add(JsonLogger)\n else:\n raise ValueError((\"{} is not one of the defined loggers. \" +\n str(TuneBaseSearchCV.defined_schedulers))\n .format(log))\n elif inspect.isclass(log) and issubclass(log, Logger):\n init_loggers.add(log)\n else:\n raise TypeError(\"`loggers` must be a list of str or tune \"\n \"loggers.\")\n\n return list(init_loggers)\n\n\nclass TuneBaseSearchCV(BaseSearchCV):\n \"\"\"Abstract base class for TuneGridSearchCV and TuneSearchCV\"\"\"\n\n defined_schedulers = [\n \"PopulationBasedTraining\", \"AsyncHyperBandScheduler\",\n \"HyperBandScheduler\", \"MedianStoppingRule\", \"ASHAScheduler\",\n \"HyperBandForBOHB\"\n ]\n defined_loggers = [\"tensorboard\", \"csv\", \"mlflow\", \"json\"]\n\n @property\n def _estimator_type(self):\n \"\"\"str: Returns the estimator's estimator type, such as 'classifier'\n or 'regressor'.\n\n \"\"\"\n return self.estimator._estimator_type\n\n @property\n def best_params_(self):\n \"\"\"dict: Parameter setting that gave the best results on the hold\n out data.\n\n For multi-metric evaluation, this is present only if ``refit`` is\n specified.\n\n \"\"\"\n self._check_is_fitted(\"best_params_\", check_refit=\"multimetric\")\n return self.best_params\n\n @property\n def best_index_(self):\n \"\"\"int: The index (of the ``cv_results_`` arrays)\n which corresponds to the best candidate parameter setting.\n\n The dict at ``search.cv_results_['params'][search.best_index_]`` gives\n the parameter setting for the best model, that gives the highest\n mean score (``search.best_score_``).\n\n For multi-metric evaluation, this is present only if ``refit`` is\n specified.\n\n \"\"\"\n self._check_is_fitted(\"best_index_\", check_refit=\"multimetric\")\n return self.best_index\n\n @property\n def best_score_(self):\n \"\"\"float: Mean cross-validated score of the best_estimator\n\n For multi-metric evaluation, this is present only if ``refit``\n is specified.\n\n \"\"\"\n self._check_is_fitted(\"best_score_\", check_refit=\"multimetric\")\n return self.best_score\n\n @property\n def multimetric_(self):\n \"\"\"bool: Whether evaluation performed was multi-metric.\"\"\"\n return self.is_multi\n\n @property\n def classes_(self):\n \"\"\"list: Get the list of unique classes found in the target `y`.\"\"\"\n self._check_is_fitted(\"classes_\")\n return self.best_estimator_.classes_\n\n @property\n def n_splits_(self):\n \"\"\"int: The number of cross-validation splits (folds/iterations).\"\"\"\n self._check_is_fitted(\"n_splits_\", check_refit=False)\n return self.n_splits_\n\n @property\n def best_estimator_(self):\n \"\"\"estimator: Estimator that was chosen by the search,\n i.e. estimator which gave highest score (or smallest loss if\n specified) on the left out data. Not available if ``refit=False``.\n\n See ``refit`` parameter for more information on allowed values.\n \"\"\"\n self._check_is_fitted(\"best_estimator_\")\n return self.best_estimator\n\n @property\n def refit_time_(self):\n \"\"\"float: Seconds used for refitting the best model on the\n whole dataset.\n\n This is present only if ``refit`` is not False.\n \"\"\"\n self._check_is_fitted(\"refit_time_\")\n return self.refit_time\n\n @property\n def scorer_(self):\n \"\"\"function or a dict: Scorer function used on the held out\n data to choose the best parameters for the model.\n\n For multi-metric evaluation, this attribute holds the validated\n ``scoring`` dict which maps the scorer key to the scorer callable.\n \"\"\"\n return self.scoring\n\n @property\n def decision_function(self):\n \"\"\"function: Get decision_function on the estimator with the best\n found parameters.\n\n Only available if ``refit=True`` and the underlying estimator supports\n ``decision_function``.\n\n \"\"\"\n self._check_is_fitted(\"decision_function\")\n return self.best_estimator_.decision_function\n\n @property\n def inverse_transform(self):\n \"\"\"function: Get inverse_transform on the estimator with the best found\n parameters.\n\n Only available if the underlying estimator implements\n ``inverse_transform`` and ``refit=True``.\n\n \"\"\"\n self._check_is_fitted(\"inverse_transform\")\n return self.best_estimator_.inverse_transform\n\n @property\n def predict(self):\n \"\"\"function: Get predict on the estimator with the best found\n parameters.\n\n Only available if ``refit=True`` and the underlying estimator supports\n ``predict``.\n\n \"\"\"\n self._check_is_fitted(\"predict\")\n return self.best_estimator_.predict\n\n @property\n def predict_log_proba(self):\n \"\"\"function: Get predict_log_proba on the estimator with the best found\n parameters.\n\n Only available if ``refit=True`` and the underlying estimator supports\n ``predict_log_proba``.\n\n \"\"\"\n self._check_is_fitted(\"predict_log_proba\")\n return self.best_estimator_.predict_log_proba\n\n @property\n def predict_proba(self):\n \"\"\"function: Get predict_proba on the estimator with the best found\n parameters.\n\n Only available if ``refit=True`` and the underlying estimator supports\n ``predict_proba``.\n\n \"\"\"\n self._check_is_fitted(\"predict_proba\")\n return self.best_estimator_.predict_proba\n\n @property\n def transform(self):\n \"\"\"function: Get transform on the estimator with the best found\n parameters.\n\n Only available if the underlying estimator supports ``transform`` and\n ``refit=True``.\n\n \"\"\"\n self._check_is_fitted(\"transform\")\n return self.best_estimator_.transform\n\n def _check_params(self):\n \"\"\"Helper method to see if parameters passed in are valid.\n\n Raises:\n ValueError: if parameters are invalid.\n\n \"\"\"\n if not hasattr(self.estimator, \"fit\"):\n raise ValueError(\"estimator must be a scikit-learn estimator.\")\n\n def _check_if_refit(self, attr):\n \"\"\"Helper method to see if the requested property is available based\n on the `refit` argument.\n\n Args:\n attr (str): Attribute requested by the user.\n\n Raises:\n AttributeError: If `self.refit` is False.\n\n \"\"\"\n if not self.refit:\n raise AttributeError(\"'{}' is not a valid attribute with \"\n \"'refit=False'.\".format(attr))\n\n def _check_is_fitted(self, method_name, check_refit=True):\n \"\"\"Helper method to see if the estimator has been fitted.\n\n Args:\n method_name (str): String of the method name called from the user.\n\n check_refit (bool|str): Whether to also check for `self.refit`\n param. If \"multimetric\", will only check if `self.multimetric`\n param is also True. Defaults to True.\n\n Raises:\n NotFittedError: If the estimator has not been fitted.\n TypeError: If the estimator is invalid (i.e. doesn't implement\n the sklearn estimator interface).\n\n \"\"\"\n if not self.refit:\n if check_refit == \"multimetric\":\n if self.is_multi:\n msg = (\n \"This {0} instance was initialized with refit=False. \"\n \"For multi-metric evaluation, {1} \"\n \"is available only after refitting on the best \"\n \"parameters.\").format(\n type(self).__name__, method_name)\n raise NotFittedError(msg)\n elif check_refit:\n msg = (\n \"This {0} instance was initialized with refit=False. {1} \"\n \"is available only after refitting on the best \"\n \"parameters.\").format(type(self).__name__, method_name)\n raise NotFittedError(msg)\n check_is_fitted(self)\n\n def _is_multimetric(self, scoring):\n \"\"\"Helper method to see if multimetric scoring is\n being used\n\n Args:\n scoring (str, callable, list, tuple, or dict):\n the scoring being used\n \"\"\"\n\n return isinstance(scoring, (list, tuple, dict))\n\n def __init__(self,\n estimator,\n early_stopping=None,\n scoring=None,\n n_jobs=None,\n cv=5,\n refit=True,\n verbose=0,\n error_score=\"raise\",\n return_train_score=False,\n local_dir=\"~/ray_results\",\n max_iters=1,\n use_gpu=False,\n loggers=None,\n pipeline_auto_early_stop=True,\n stopper=None,\n time_budget_s=None):\n if max_iters < 1:\n raise ValueError(\"max_iters must be greater than or equal to 1.\")\n self.estimator = estimator\n self.base_estimator = estimator\n self.pipeline_auto_early_stop = pipeline_auto_early_stop\n self.stopper = stopper\n self.time_budget_s = time_budget_s\n\n if self.pipeline_auto_early_stop and check_is_pipeline(estimator):\n _, self.base_estimator = self.base_estimator.steps[-1]\n\n self.early_stop_type = get_early_stop_type(self.base_estimator,\n bool(early_stopping))\n\n if not self._can_early_stop():\n if early_stopping:\n raise ValueError(\"Early stopping is not supported because \"\n \"the estimator does not have `partial_fit`, \"\n \"does not support warm_start, or is a \"\n \"tree classifier. Set \"\n \"`early_stopping=False`.\")\n if not early_stopping and max_iters > 1:\n warnings.warn(\n \"max_iters is set > 1 but incremental/partial training \"\n \"is not enabled. To enable partial training, \"\n \"ensure the estimator has `partial_fit` or \"\n \"`warm_start` and set `early_stopping=True`. \"\n \"Automatically setting max_iters=1.\",\n category=UserWarning)\n max_iters = 1\n\n # Get metric scoring name\n self.scoring = scoring\n self.refit = refit\n if not hasattr(self, \"is_multi\"):\n self.scoring, self.is_multi = _check_multimetric_scoring(\n self.estimator, self.scoring)\n\n if self.is_multi:\n self._base_metric_name = self.refit\n else:\n self._base_metric_name = \"score\"\n\n self._metric_name = \"average_test_%s\" % self._base_metric_name\n\n if early_stopping:\n if not self._can_early_stop() and is_lightgbm_model(\n self.base_estimator):\n warnings.warn(\"lightgbm>=3.0.0 required for early_stopping \"\n \"functionality.\")\n assert self._can_early_stop()\n if max_iters == 1:\n warnings.warn(\n \"early_stopping is enabled but max_iters = 1. \"\n \"To enable partial training, set max_iters > 1.\",\n category=UserWarning)\n if self.early_stop_type == EarlyStopping.XGB:\n warnings.warn(\n \"tune-sklearn implements incremental learning \"\n \"for xgboost models following this: \"\n \"https://github.com/dmlc/xgboost/issues/1686. \"\n \"This may negatively impact performance. To \"\n \"disable, set `early_stopping=False`.\",\n category=UserWarning)\n elif self.early_stop_type == EarlyStopping.LGBM:\n warnings.warn(\n \"tune-sklearn implements incremental learning \"\n \"for lightgbm models following this: \"\n \"https://lightgbm.readthedocs.io/en/latest/pythonapi/\"\n \"lightgbm.LGBMModel.html#lightgbm.LGBMModel.fit \"\n \"This may negatively impact performance. To \"\n \"disable, set `early_stopping=False`.\",\n category=UserWarning)\n elif self.early_stop_type == EarlyStopping.CATBOOST:\n warnings.warn(\n \"tune-sklearn implements incremental learning \"\n \"for Catboost models following this: \"\n \"https://catboost.ai/docs/concepts/python-usages-\"\n \"examples.html#training-continuation \"\n \"This may negatively impact performance. To \"\n \"disable, set `early_stopping=False`.\",\n category=UserWarning)\n if early_stopping is True:\n # Override the early_stopping variable so\n # that it is resolved appropriately in\n # the next block\n early_stopping = \"AsyncHyperBandScheduler\"\n # Resolve the early stopping object\n early_stopping = resolve_early_stopping(early_stopping, max_iters,\n self._metric_name)\n\n self.early_stopping = early_stopping\n self.max_iters = max_iters\n\n self.cv = cv\n self.n_jobs = int(n_jobs or -1)\n self.sk_n_jobs = 1\n if os.environ.get(\"SKLEARN_N_JOBS\") is not None:\n self.sk_n_jobs = int(os.environ.get(\"SKLEARN_N_JOBS\"))\n\n self.verbose = verbose\n self.error_score = error_score\n self.return_train_score = return_train_score\n self.local_dir = local_dir\n self.use_gpu = use_gpu\n self.loggers = resolve_loggers(loggers)\n assert isinstance(self.n_jobs, int)\n\n def _fit(self, X, y=None, groups=None, **fit_params):\n \"\"\"Helper method to run fit procedure\n\n Args:\n X (:obj:`array-like` (shape = [n_samples, n_features])):\n Training vector, where n_samples is the number of samples and\n n_features is the number of features.\n y (:obj:`array-like`): Shape of array expected to be [n_samples]\n or [n_samples, n_output]). Target relative to X for\n classification or regression; None for unsupervised learning.\n groups (:obj:`array-like` (shape (n_samples,)), optional):\n Group labels for the samples used while splitting the dataset\n into train/test set. Only used in conjunction with a \"Group\"\n `cv` instance (e.g., `GroupKFold`).\n **fit_params (:obj:`dict` of str): Parameters passed to\n the ``fit`` method of the estimator.\n\n Returns:\n :obj:`TuneBaseSearchCV` child instance, after fitting.\n \"\"\"\n self._check_params()\n classifier = is_classifier(self.estimator)\n cv = check_cv(cv=self.cv, y=y, classifier=classifier)\n self.n_splits = cv.get_n_splits(X, y, groups)\n if not hasattr(self, \"is_multi\"):\n self.scoring, self.is_multi = _check_multimetric_scoring(\n self.estimator, self.scoring)\n else:\n self.scoring, _ = _check_multimetric_scoring(\n self.estimator, self.scoring)\n\n if self.is_multi:\n if self.refit and (not isinstance(self.refit, str)\n or self.refit not in self.scoring):\n raise ValueError(\"When using multimetric scoring, refit \"\n \"must be the name of the scorer used to \"\n \"pick the best parameters. If not needed, \"\n \"set refit to False\")\n\n assert isinstance(\n self.n_jobs,\n int), (\"Internal error: self.n_jobs must be an integer.\")\n if self.n_jobs < 0:\n resources_per_trial = {\"cpu\": 1, \"gpu\": 1 if self.use_gpu else 0}\n if self.n_jobs < -1:\n warnings.warn(\n \"`self.n_jobs` is automatically set \"\n \"-1 for any negative values.\",\n category=UserWarning)\n else:\n available_cpus = multiprocessing.cpu_count()\n gpu_fraction = 1 if self.use_gpu else 0\n if ray.is_initialized():\n available_cpus = ray.cluster_resources()[\"CPU\"]\n if self.use_gpu:\n available_gpus = ray.cluster_resources()[\"GPU\"]\n gpu_fraction = available_gpus / self.n_jobs\n cpu_fraction = available_cpus / self.n_jobs\n if cpu_fraction > 1:\n cpu_fraction = int(np.ceil(cpu_fraction))\n if gpu_fraction > 1:\n gpu_fraction = int(np.ceil(gpu_fraction))\n resources_per_trial = {\"cpu\": cpu_fraction, \"gpu\": gpu_fraction}\n\n X_id = ray.put(X)\n y_id = ray.put(y)\n\n config = {}\n config[\"early_stopping\"] = bool(self.early_stopping)\n config[\"early_stop_type\"] = self.early_stop_type\n config[\"X_id\"] = X_id\n config[\"y_id\"] = y_id\n config[\"groups\"] = groups\n config[\"cv\"] = cv\n config[\"fit_params\"] = fit_params\n config[\"scoring\"] = self.scoring\n config[\"max_iters\"] = self.max_iters\n config[\"return_train_score\"] = self.return_train_score\n config[\"n_jobs\"] = self.sk_n_jobs\n config[\"metric_name\"] = self._metric_name\n\n self._fill_config_hyperparam(config)\n analysis = self._tune_run(config, resources_per_trial)\n\n self.cv_results_ = self._format_results(self.n_splits, analysis)\n\n metric = self._metric_name\n base_metric = self._base_metric_name\n\n # For multi-metric evaluation, store the best_index, best_params and\n # best_score iff refit is one of the scorer names\n # In single metric evaluation, refit_metric is \"score\"\n if self.refit or not self.is_multi:\n # If callable, refit is expected to return the index of the best\n # parameter set.\n if callable(self.refit):\n self.best_index = self.refit(self.cv_results_)\n if not isinstance(self.best_index, numbers.Integral):\n raise TypeError(\"best_index returned is not an integer\")\n if (self.best_index < 0\n or self.best_index >= len(self.cv_results_[\"params\"])):\n raise IndexError(\"best_index index out of range\")\n else:\n self.best_index = self.cv_results_[\"rank_test_%s\" %\n base_metric].argmin()\n self.best_score = self.cv_results_[\n \"mean_test_%s\" % base_metric][self.best_index]\n best_config = analysis.get_best_config(\n metric=metric, mode=\"max\", scope=\"last\")\n self.best_params = self._clean_config_dict(best_config)\n\n if self.refit:\n base_estimator = clone(self.estimator)\n if self.early_stop_type == EarlyStopping.WARM_START_ENSEMBLE:\n logger.info(\"tune-sklearn uses `n_estimators` to warm \"\n \"start, so this parameter can't be \"\n \"set when warm start early stopping. \"\n \"`n_estimators` defaults to `max_iters`.\")\n if check_is_pipeline(base_estimator):\n cloned_final_estimator = base_estimator.steps[-1][1]\n cloned_final_estimator.set_params(\n **{\"n_estimators\": self.max_iters})\n else:\n self.best_params[\"n_estimators\"] = self.max_iters\n # we clone again after setting params in case some\n # of the params are estimators as well.\n self.best_estimator = clone(\n base_estimator.set_params(**self.best_params))\n refit_start_time = time.time()\n if y is not None:\n self.best_estimator.fit(X, y, **fit_params)\n else:\n self.best_estimator.fit(X, **fit_params)\n refit_end_time = time.time()\n self.refit_time = refit_end_time - refit_start_time\n\n return self\n\n def fit(self, X, y=None, groups=None, **fit_params):\n \"\"\"Run fit with all sets of parameters.\n\n ``tune.run`` is used to perform the fit procedure.\n\n Args:\n X (:obj:`array-like` (shape = [n_samples, n_features])):\n Training vector, where n_samples is the number of samples and\n n_features is the number of features.\n y (:obj:`array-like`): Shape of array expected to be [n_samples]\n or [n_samples, n_output]). Target relative to X for\n classification or regression; None for unsupervised learning.\n groups (:obj:`array-like` (shape (n_samples,)), optional):\n Group labels for the samples used while splitting the dataset\n into train/test set. Only used in conjunction with a \"Group\"\n `cv` instance (e.g., `GroupKFold`).\n **fit_params (:obj:`dict` of str): Parameters passed to\n the ``fit`` method of the estimator.\n\n Returns:\n :obj:`TuneBaseSearchCV` child instance, after fitting.\n\n \"\"\"\n ray_init = ray.is_initialized()\n try:\n if not ray_init:\n if self.n_jobs == 1:\n ray.init(\n local_mode=True,\n configure_logging=False,\n ignore_reinit_error=True,\n include_dashboard=False)\n else:\n ray.init(\n ignore_reinit_error=True,\n configure_logging=False,\n include_dashboard=False\n # log_to_driver=self.verbose == 2\n )\n if self.verbose != 2:\n logger.info(\"TIP: Hiding process output by default. \"\n \"To show process output, set verbose=2.\")\n\n result = self._fit(X, y, groups, **fit_params)\n\n if not ray_init and ray.is_initialized():\n ray.shutdown()\n\n return result\n\n except Exception:\n if not ray_init and ray.is_initialized():\n ray.shutdown()\n raise\n\n def score(self, X, y=None):\n \"\"\"Compute the score(s) of an estimator on a given test set.\n\n Args:\n X (:obj:`array-like` (shape = [n_samples, n_features])): Input\n data, where n_samples is the number of samples and\n n_features is the number of features.\n y (:obj:`array-like`): Shape of array is expected to be\n [n_samples] or [n_samples, n_output]). Target relative to X\n for classification or regression. You can also pass in\n None for unsupervised learning.\n\n Returns:\n float: computed score\n\n \"\"\"\n self._check_is_fitted(self._metric_name)\n if self.is_multi:\n scorer_name = self.refit\n else:\n scorer_name = \"score\"\n return self.scoring[scorer_name](self.best_estimator_, X, y)\n\n def _can_early_stop(self):\n \"\"\"Helper method to determine if it is possible to do early stopping.\n\n Only sklearn estimators with `partial_fit` or `warm_start` can be early\n stopped. warm_start works by picking up training from the previous\n call to `fit`.\n\n Returns:\n bool: if the estimator can early stop\n\n \"\"\"\n return self.early_stop_type != EarlyStopping.NO_EARLY_STOP\n\n def _fill_config_hyperparam(self, config):\n \"\"\"Fill in the ``config`` dictionary with the hyperparameters.\n\n For RandomizedSearchCV, samples are pulled from the distribution\n to be saved in the ``config`` dictionary.\n For GridSearchCV, the list is directly saved in the ``config``\n dictionary.\n\n Implement this functionality in a child class.\n\n Args:\n config (:obj:`dict`): dictionary to be filled in as the\n configuration for `tune.run`.\n\n \"\"\"\n raise NotImplementedError(\"Define in child class\")\n\n def _tune_run(self, config, resources_per_trial):\n \"\"\"Wrapper to call ``tune.run``. Implement this in a child class.\n\n Args:\n config (:obj:`dict`): dictionary to be passed in as the\n configuration for `tune.run`.\n resources_per_trial (:obj:`dict` of int): dictionary specifying the\n number of cpu's and gpu's to use to train the model.\n\n \"\"\"\n raise NotImplementedError(\"Define in child class\")\n\n def _clean_config_dict(self, config):\n \"\"\"Helper to remove keys from the ``config`` dictionary returned from\n ``tune.run``.\n\n Args:\n config (:obj:`dict`): Dictionary of all hyperparameter\n configurations and extra output from ``tune.run``., Keys for\n hyperparameters are the hyperparameter variable names\n and the values are the numeric values set to those variables.\n\n Returns:\n config (:obj:`dict`): Dictionary of all hyperparameter\n configurations without the output from ``tune.run``., Keys for\n hyperparameters are the hyperparameter variable names\n and the values are the numeric values set to those variables.\n \"\"\"\n for key in [\n \"estimator_list\", \"early_stopping\", \"X_id\", \"y_id\", \"groups\",\n \"cv\", \"fit_params\", \"scoring\", \"max_iters\",\n \"return_train_score\", \"n_jobs\", \"metric_name\",\n \"early_stop_type\"\n ]:\n config.pop(key, None)\n return config\n\n def _format_results(self, n_splits, out):\n \"\"\"Helper to generate the ``cv_results_`` dictionary.\n\n Args:\n n_splits (int): integer specifying the number of folds when doing\n cross-validation.\n out (:obj:`ExperimentAnalysis`): Object returned by `tune.run`.\n\n Returns:\n results (:obj:`dict`): Dictionary of results to use for the\n interface's ``cv_results_``.\n\n \"\"\"\n trials = [\n trial for trial in out.trials if trial.status == Trial.TERMINATED\n ]\n trial_dirs = [trial.logdir for trial in trials]\n # The result dtaframes are indexed by their trial logdir\n trial_dfs = out.fetch_trial_dataframes()\n\n # Try to find a template df to use for trials that did not return\n # any results. These trials should copy the structure and fill it\n # with NaNs so that the later reshape actions work.\n template_df = None\n fix_trial_dirs = [] # Holds trial dirs with no results\n for trial_dir in trial_dirs:\n if trial_dir in trial_dfs and template_df is None:\n template_df = trial_dfs[trial_dir]\n elif trial_dir not in trial_dfs:\n fix_trial_dirs.append(trial_dir)\n\n # Create NaN dataframes for trials without results\n if fix_trial_dirs:\n if template_df is None:\n # No trial returned any results\n return {}\n for trial_dir in fix_trial_dirs:\n trial_df = pd.DataFrame().reindex_like(template_df)\n trial_dfs[trial_dir] = trial_df\n\n # Keep right order\n dfs = [trial_dfs[trial_dir] for trial_dir in trial_dirs]\n finished = [df.iloc[[-1]] for df in dfs]\n test_scores = {}\n train_scores = {}\n for name in self.scoring:\n test_scores[name] = [\n df[[\n col for col in dfs[0].columns\n if \"split\" in col and \"test_%s\" % name in col\n ]].to_numpy() for df in finished\n ]\n if self.return_train_score:\n train_scores[name] = [\n df[[\n col for col in dfs[0].columns\n if \"split\" in col and \"train_%s\" % name in col\n ]].to_numpy() for df in finished\n ]\n else:\n train_scores = None\n\n configs = [trial.config for trial in trials]\n candidate_params = [\n self._clean_config_dict(config) for config in configs\n ]\n\n results = {\"params\": candidate_params}\n n_candidates = len(candidate_params)\n\n def _store(\n results,\n key_name,\n array,\n n_splits,\n n_candidates,\n weights=None,\n splits=False,\n rank=False,\n ):\n \"\"\"A small helper to store the scores/times to the cv_results_\"\"\"\n # When iterated first by n_splits and then by parameters\n array = np.array(\n array, dtype=np.float64).reshape((n_candidates, n_splits))\n if splits:\n for split_i in range(n_splits):\n results[\"split%d_%s\" % (split_i,\n key_name)] = array[:, split_i]\n\n array_means = np.average(array, axis=1, weights=weights)\n results[\"mean_%s\" % key_name] = array_means\n # Weighted std is not directly available in numpy\n array_stds = np.sqrt(\n np.average(\n (array - array_means[:, np.newaxis])**2,\n axis=1,\n weights=weights))\n results[\"std_%s\" % key_name] = array_stds\n\n if rank:\n results[\"rank_%s\" % key_name] = np.asarray(\n rankdata(-array_means, method=\"min\"), dtype=np.int32)\n\n for name in self.scoring:\n _store(\n results,\n \"test_%s\" % name,\n test_scores[name],\n n_splits,\n n_candidates,\n splits=True,\n rank=True,\n )\n if self.return_train_score:\n for name in self.scoring:\n _store(\n results,\n \"train_%s\" % name,\n train_scores[name],\n n_splits,\n n_candidates,\n splits=True,\n rank=True,\n )\n\n results[\"time_total_s\"] = np.array(\n [df[\"time_total_s\"].to_numpy() for df in finished]).flatten()\n\n results[\"training_iteration\"] = np.array([\n df[\"training_iteration\"].to_numpy() for df in finished\n ]).flatten()\n\n # Use one MaskedArray and mask all the places where the param is not\n # applicable for that candidate. Use defaultdict as each candidate may\n # not contain all the params\n param_results = defaultdict(\n lambda: MaskedArray(\n np.empty(n_candidates),\n mask=True,\n dtype=object,\n )\n )\n for cand_i, params in enumerate(candidate_params):\n for name, value in params.items():\n param_results[\"param_%s\" % name][cand_i] = value\n\n results.update(param_results)\n\n return results\n" ]
[ [ "sklearn.utils.validation.check_is_fitted", "sklearn.base.clone", "sklearn.exceptions.NotFittedError", "numpy.ceil", "sklearn.base.is_classifier", "numpy.array", "scipy.stats.rankdata", "numpy.empty", "pandas.DataFrame", "sklearn.model_selection.check_cv", "numpy.average" ] ]
DNNTesting/fse20
[ "cf261c322f4d588b15125aa2b2f289c6a89a6928" ]
[ "submissions/reusable/CovTesting/Coverage VS. # Adv Examples/tknp_testing.py" ]
[ "import argparse\nimport os\nimport random\nimport shutil\nimport warnings\nimport sys\n\nwarnings.filterwarnings(\"ignore\")\n\nfrom keras import backend as K\nimport numpy as np\nfrom PIL import Image, ImageFilter\nfrom skimage.measure import compare_ssim as SSIM\nimport keras\n\nimport tensorflow as tf\nimport os\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\n####for solving some specific problems, don't care\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsess = tf.Session(config=config)\n\n\n# helper function\ndef get_layer_i_output(model, i, data):\n layer_model = K.function([model.layers[0].input], [model.layers[i].output])\n ret = layer_model([data])[0]\n num = data.shape[0]\n ret = np.reshape(ret, (num, -1))\n return ret\n\n\n# the data is in range(-.5, .5)\ndef load_data(name):\n assert (name.upper() in ['MNIST', 'CIFAR', 'SVHN'])\n name = name.lower()\n x_train = np.load('../data/' + name + '_data/' + name + '_x_train.npy')\n y_train = np.load('../data/' + name + '_data/' + name + '_y_train.npy')\n x_test = np.load('../data/' + name + '_data/' + name + '_x_test.npy')\n y_test = np.load('../data/' + name + '_data/' + name + '_y_test.npy')\n return x_train, y_train, x_test, y_test\n\n\nclass Coverage:\n def __init__(self, model, x_train, y_train, x_test, y_test, x_adv):\n self.model = model\n self.x_train = x_train\n self.y_train = y_train\n self.x_test = x_test\n self.y_test = y_test\n self.x_adv = x_adv\n\n # find scale factors and min num\n def scale(self, layers, batch=1024):\n data_num = self.x_adv.shape[0]\n factors = dict()\n for i in layers:\n begin, end = 0, batch\n max_num, min_num = np.NINF, np.inf\n while begin < data_num:\n layer_output = get_layer_i_output(self.model, i, self.x_adv[begin:end])\n tmp = layer_output.max()\n max_num = tmp if tmp > max_num else max_num\n tmp = layer_output.min()\n min_num = tmp if tmp < min_num else min_num\n begin += batch\n end += batch\n factors[i] = (max_num - min_num, min_num)\n return factors\n\n # 1 Neuron Coverage\n def NC(self, layers, threshold=0., batch=1024):\n factors = self.scale(layers, batch=batch)\n neuron_num = 0\n for i in layers:\n out_shape = self.model.layers[i].output.shape\n neuron_num += np.prod(out_shape[1:])\n neuron_num = int(neuron_num)\n\n activate_num = 0\n data_num = self.x_adv.shape[0]\n for i in layers:\n neurons = np.prod(self.model.layers[i].output.shape[1:])\n buckets = np.zeros(neurons).astype('bool')\n begin, end = 0, batch\n while begin < data_num:\n layer_output = get_layer_i_output(self.model, i, self.x_adv[begin:end])\n # scale the layer output to (0, 1)\n layer_output -= factors[i][1]\n layer_output /= factors[i][0]\n col_max = np.max(layer_output, axis=0)\n begin += batch\n end += batch\n buckets[col_max > threshold] = True\n activate_num += np.sum(buckets)\n # print('NC:\\t{:.3f} activate_num:\\t{} neuron_num:\\t{}'.format(activate_num / neuron_num, activate_num, neuron_num))\n return activate_num / neuron_num, activate_num, neuron_num\n\n # 2 k-multisection neuron coverage, neuron boundary coverage and strong activation neuron coverage\n def KMNC(self, layers, k=10, batch=1024):\n neuron_num = 0\n for i in layers:\n out_shape = self.model.layers[i].output.shape\n neuron_num += np.prod(out_shape[1:])\n neuron_num = int(neuron_num)\n\n covered_num = 0\n l_covered_num = 0\n u_covered_num = 0\n for i in layers:\n neurons = np.prod(self.model.layers[i].output.shape[1:])\n print(neurons)\n begin, end = 0, batch\n data_num = self.x_train.shape[0]\n\n neuron_max = np.full(neurons, np.NINF).astype('float')\n neuron_min = np.full(neurons, np.inf).astype('float')\n while begin < data_num:\n layer_output_train = get_layer_i_output(self.model, i, self.x_train[begin:end])\n batch_neuron_max = np.max(layer_output_train, axis=0)\n batch_neuron_min = np.min(layer_output_train, axis=0)\n neuron_max = np.maximum(batch_neuron_max, neuron_max)\n neuron_min = np.minimum(batch_neuron_min, neuron_min)\n begin += batch\n end += batch\n buckets = np.zeros((neurons, k + 2)).astype('bool')\n interval = (neuron_max - neuron_min) / k\n # print(interval[8], neuron_max[8], neuron_min[8])\n begin, end = 0, batch\n data_num = self.x_adv.shape[0]\n while begin < data_num:\n layer_output_adv = get_layer_i_output(model, i, self.x_adv[begin: end])\n layer_output_adv -= neuron_min\n layer_output_adv /= (interval + 10 ** (-100))\n layer_output_adv[layer_output_adv < 0.] = -1\n layer_output_adv[layer_output_adv >= k / 1.0] = k\n layer_output_adv = layer_output_adv.astype('int')\n # index 0 for lower, 1 to k for between, k + 1 for upper\n layer_output_adv = layer_output_adv + 1\n for j in range(neurons):\n uniq = np.unique(layer_output_adv[:, j])\n # print(layer_output_adv[:, j])\n buckets[j, uniq] = True\n begin += batch\n end += batch\n covered_num += np.sum(buckets[:, 1:-1])\n u_covered_num += np.sum(buckets[:, -1])\n l_covered_num += np.sum(buckets[:, 0])\n print('KMNC:\\t{:.3f} covered_num:\\t{}'.format(covered_num / (neuron_num * k), covered_num))\n print(\n 'NBC:\\t{:.3f} l_covered_num:\\t{}'.format((l_covered_num + u_covered_num) / (neuron_num * 2), l_covered_num))\n print('SNAC:\\t{:.3f} u_covered_num:\\t{}'.format(u_covered_num / neuron_num, u_covered_num))\n return covered_num / (neuron_num * k), (l_covered_num + u_covered_num) / (\n neuron_num * 2), u_covered_num / neuron_num, covered_num, l_covered_num, u_covered_num, neuron_num * k\n\n # 3 top-k neuron coverage\n def TKNC(self, layers, k=2, batch=1024):\n def top_k(x, k):\n ind = np.argpartition(x, -k)[-k:]\n return ind[np.argsort((-x)[ind])]\n\n neuron_num = 0\n for i in layers:\n out_shape = self.model.layers[i].output.shape\n neuron_num += np.prod(out_shape[1:])\n neuron_num = int(neuron_num)\n\n pattern_num = 0\n data_num = self.x_adv.shape[0]\n for i in layers:\n pattern_set = set()\n begin, end = 0, batch\n while begin < data_num:\n layer_output = get_layer_i_output(self.model, i, self.x_adv[begin:end])\n topk = np.argpartition(layer_output, -k, axis=1)[:, -k:]\n topk = np.sort(topk, axis=1)\n # or in order\n # topk = np.apply_along_axis[lambda x: top_k(layer_output, k), 1, layer_output]\n for j in range(topk.shape[0]):\n pattern_set.add(tuple(topk[j]))\n begin += batch\n end += batch\n pattern_num += len(pattern_set)\n print(\n 'TKNC:\\t{:.3f} pattern_num:\\t{} neuron_num:\\t{}'.format(pattern_num / neuron_num, pattern_num, neuron_num))\n return pattern_num / neuron_num, pattern_num, neuron_num\n\n # 4 top-k neuron patterns\n def TKNP(self, layers, k=2, batch=1024):\n def top_k(x, k):\n ind = np.argpartition(x, -k)[-k:]\n return ind[np.argsort((-x)[ind])]\n\n def to_tuple(x):\n l = list()\n for row in x:\n l.append(tuple(row))\n return tuple(l)\n\n pattern_set = set()\n layer_num = len(layers)\n data_num = self.x_adv.shape[0]\n patterns = np.zeros((data_num, layer_num, k))\n layer_cnt = 0\n for i in layers:\n neurons = np.prod(self.model.layers[i].output.shape[1:])\n begin, end = 0, batch\n while begin < data_num:\n layer_output = get_layer_i_output(self.model, i, self.x_adv[begin:end])\n topk = np.argpartition(layer_output, -k, axis=1)[:, -k:]\n topk = np.sort(topk, axis=1)\n # or in order\n # topk = np.apply_along_axis[lambda x: top_k(layer_output, k), 1, layer_output]\n patterns[begin:end, layer_cnt, :] = topk\n begin += batch\n end += batch\n layer_cnt += 1\n\n for i in range(patterns.shape[0]):\n pattern_set.add(to_tuple(patterns[i]))\n pattern_num = len(pattern_set)\n print('TKNP:\\t{:.3f}'.format(pattern_num))\n return pattern_num\n\n def all(self, layers, batch=100):\n self.NC(layers, batch=batch)\n self.KMNC(layers, batch=batch)\n self.TKNC(layers, batch=batch)\n self.TKNP(layers, batch=batch)\n\nif __name__ == '__main__':\n dataset = 'mnist'\n model_name = 'lenet1'\n l = [0, 8]\n\n x_train, y_train, x_test, y_test = load_data(dataset)\n\n # ## load mine trained model\n from keras.models import load_model\n\n model = load_model('../data/' + dataset + '_data/model/' + model_name + '.h5')\n model.summary()\n\n tknp_all = np.array([])\n\n for num in range(0, 50):\n coverage = Coverage(model, x_train, y_train, x_test, y_test, x_test[0: 200*num])\n tknp = coverage.TKNP(l)\n tknp_all = np.append(tknp_all, tknp)\n\n with open(\"testing_coverage_result.txt\", \"a\") as f:\n f.write(\"\\n------------------------------------------------------------------------------\\n\")\n f.write('x: {} \\n'.format(num*200+1))\n f.write('TKNP: {} \\n'.format(tknp))\n\n np.save('Q2_original/tknp_all.npy', tknp_all)\n" ]
[ [ "numpy.save", "numpy.sum", "numpy.argsort", "numpy.append", "numpy.argpartition", "numpy.reshape", "numpy.unique", "numpy.minimum", "numpy.load", "numpy.zeros", "numpy.max", "numpy.min", "tensorflow.Session", "numpy.prod", "numpy.sort", "numpy.maximum", "tensorflow.ConfigProto", "numpy.array", "numpy.full" ] ]
LouisFaure/scFates
[ "e925b5316c77d923514ac14572eeb738d9f5dd2c" ]
[ "scFates/plot/utils.py" ]
[ "# Extracted from scanpy, thanks!\nfrom functools import lru_cache\nfrom typing import Union, Sequence\nfrom typing_extensions import Literal\n\nimport numpy as np\nfrom matplotlib import pyplot as pl\nfrom matplotlib import rcParams\nfrom matplotlib.axes import Axes\nfrom matplotlib.figure import SubplotParams as sppars\nfrom matplotlib.colors import to_hex\n\nfrom matplotlib.colors import LinearSegmentedColormap\nimport pandas as pd\n\n\ndef setup_axes(\n ax: Union[Axes, Sequence[Axes]] = None,\n panels=\"blue\",\n colorbars=(False,),\n right_margin=None,\n left_margin=None,\n projection: Literal[\"2d\", \"3d\"] = \"2d\",\n show_ticks=False,\n):\n \"\"\"Grid of axes for plotting, legends and colorbars.\"\"\"\n make_projection_available(projection)\n if left_margin is not None:\n raise NotImplementedError(\"We currently don’t support `left_margin`.\")\n if np.any(colorbars) and right_margin is None:\n right_margin = 1 - rcParams[\"figure.subplot.right\"] + 0.21 # 0.25\n elif right_margin is None:\n right_margin = 1 - rcParams[\"figure.subplot.right\"] + 0.06 # 0.10\n # make a list of right margins for each panel\n if not isinstance(right_margin, list):\n right_margin_list = [right_margin for i in range(len(panels))]\n else:\n right_margin_list = right_margin\n\n # make a figure with len(panels) panels in a row side by side\n top_offset = 1 - rcParams[\"figure.subplot.top\"]\n bottom_offset = 0.15 if show_ticks else 0.08\n left_offset = 1 if show_ticks else 0.3 # in units of base_height\n base_height = rcParams[\"figure.figsize\"][1]\n height = base_height\n base_width = rcParams[\"figure.figsize\"][0]\n if show_ticks:\n base_width *= 1.1\n\n draw_region_width = (\n base_width - left_offset - top_offset - 0.5\n ) # this is kept constant throughout\n\n right_margin_factor = sum([1 + right_margin for right_margin in right_margin_list])\n width_without_offsets = (\n right_margin_factor * draw_region_width\n ) # this is the total width that keeps draw_region_width\n\n right_offset = (len(panels) - 1) * left_offset\n figure_width = width_without_offsets + left_offset + right_offset\n draw_region_width_frac = draw_region_width / figure_width\n left_offset_frac = left_offset / figure_width\n right_offset_frac = 1 - (len(panels) - 1) * left_offset_frac\n\n if ax is None:\n pl.figure(\n figsize=(figure_width, height),\n subplotpars=sppars(left=0, right=1, bottom=bottom_offset),\n )\n left_positions = [left_offset_frac, left_offset_frac + draw_region_width_frac]\n for i in range(1, len(panels)):\n right_margin = right_margin_list[i - 1]\n left_positions.append(\n left_positions[-1] + right_margin * draw_region_width_frac\n )\n left_positions.append(left_positions[-1] + draw_region_width_frac)\n panel_pos = [[bottom_offset], [1 - top_offset], left_positions]\n\n axs = []\n if ax is None:\n for icolor, color in enumerate(panels):\n left = panel_pos[2][2 * icolor]\n bottom = panel_pos[0][0]\n width = draw_region_width / figure_width\n height = panel_pos[1][0] - bottom\n if projection == \"2d\":\n ax = pl.axes([left, bottom, width, height])\n elif projection == \"3d\":\n ax = pl.axes([left, bottom, width, height], projection=\"3d\")\n axs.append(ax)\n else:\n axs = ax if isinstance(ax, cabc.Sequence) else [ax]\n\n return axs, panel_pos, draw_region_width, figure_width\n\n\n@lru_cache(None)\ndef make_projection_available(projection):\n avail_projections = {\"2d\", \"3d\"}\n if projection not in avail_projections:\n raise ValueError(f\"choose projection from {avail_projections}\")\n if projection == \"2d\":\n return\n\n from io import BytesIO\n from matplotlib import __version__ as mpl_version\n from mpl_toolkits.mplot3d import Axes3D\n\n fig = Figure()\n ax = Axes3D(fig)\n\n circles = PatchCollection([Circle((5, 1)), Circle((2, 2))])\n ax.add_collection3d(circles, zs=[1, 2])\n\n buf = BytesIO()\n try:\n fig.savefig(buf)\n except ValueError as e:\n if not \"operands could not be broadcast together\" in str(e):\n raise e\n raise ValueError(\n \"There is a known error with matplotlib 3d plotting, \"\n f\"and your version ({mpl_version}) seems to be affected. \"\n \"Please install matplotlib==3.0.2 or wait for \"\n \"https://github.com/matplotlib/matplotlib/issues/14298\"\n )\n\n\ndef is_categorical(data, c=None):\n from pandas.api.types import is_categorical_dtype as cat\n\n if c is None:\n return cat(data) # if data is categorical/array\n if not is_view(data): # if data is anndata view\n strings_to_categoricals(data)\n return isinstance(c, str) and c in data.obs.keys() and cat(data.obs[c])\n\n\ndef is_view(adata):\n return (\n adata.is_view\n if hasattr(adata, \"is_view\")\n else adata.isview\n if hasattr(adata, \"isview\")\n else adata._isview\n if hasattr(adata, \"_isview\")\n else True\n )\n\n\ndef strings_to_categoricals(adata):\n \"\"\"Transform string annotations to categoricals.\"\"\"\n from pandas.api.types import is_string_dtype, is_integer_dtype, is_bool_dtype\n from pandas import Categorical\n\n def is_valid_dtype(values):\n return (\n is_string_dtype(values) or is_integer_dtype(values) or is_bool_dtype(values)\n )\n\n df = adata.obs\n df_keys = [key for key in df.columns if is_valid_dtype(df[key])]\n for key in df_keys:\n c = df[key]\n c = Categorical(c)\n if 1 < len(c.categories) < min(len(c), 100):\n df[key] = c\n\n df = adata.var\n df_keys = [key for key in df.columns if is_string_dtype(df[key])]\n for key in df_keys:\n c = df[key].astype(\"U\")\n c = Categorical(c)\n if 1 < len(c.categories) < min(len(c), 100):\n df[key] = c\n\n\ndef gen_milestones_gradients(adata, seg_order=None):\n\n seg_order = adata.obs.seg.unique() if seg_order is None else seg_order\n\n if \"milestones_colors\" not in adata.uns or len(adata.uns[\"milestones_colors\"]) == 1:\n from . import palette_tools\n\n palette_tools._set_default_colors_for_categorical_obs(adata, \"milestones\")\n\n def milestones_prog(s):\n cfrom = adata.obs.t[adata.obs.seg == s].idxmin()\n cto = adata.obs.t[adata.obs.seg == s].idxmax()\n mfrom = adata.obs.milestones[cfrom]\n mto = adata.obs.milestones[cto]\n mfrom_c = adata.uns[\"milestones_colors\"][\n np.argwhere(adata.obs.milestones.cat.categories == mfrom)[0][0]\n ]\n mto_c = adata.uns[\"milestones_colors\"][\n np.argwhere(adata.obs.milestones.cat.categories == mto)[0][0]\n ]\n\n cm = LinearSegmentedColormap.from_list(\"test\", [mfrom_c, mto_c], N=1000)\n pst = (\n adata.obs.t[adata.obs.seg == s] - adata.obs.t[adata.obs.seg == s].min()\n ) / (\n adata.obs.t[adata.obs.seg == s].max()\n - adata.obs.t[adata.obs.seg == s].min()\n )\n return pd.Series(list(map(to_hex, cm(pst))), index=pst.index)\n\n return pd.concat(list(map(milestones_prog, seg_order)))\n" ]
[ [ "pandas.api.types.is_categorical_dtype", "pandas.api.types.is_string_dtype", "numpy.argwhere", "numpy.any", "pandas.api.types.is_integer_dtype", "pandas.Categorical", "matplotlib.colors.LinearSegmentedColormap.from_list", "pandas.api.types.is_bool_dtype", "matplotlib.pyplot.axes", "matplotlib.figure.SubplotParams" ] ]
NightKirie/NCKU_NLP_2108_industry3
[ "23ac13644b140587e23cfeffb114c7c6f46f17a2" ]
[ "Packages/matplotlib-2.2.2/lib/mpl_examples/user_interfaces/embedding_in_tk_sgskip.py" ]
[ "\"\"\"\n===============\nEmbedding In Tk\n===============\n\n\"\"\"\n\nfrom six.moves import tkinter as Tk\n\nfrom matplotlib.backends.backend_tkagg import (\n FigureCanvasTkAgg, NavigationToolbar2TkAgg)\n# Implement the default Matplotlib key bindings.\nfrom matplotlib.backend_bases import key_press_handler\nfrom matplotlib.figure import Figure\nfrom six.moves import tkinter as Tk\n\nimport numpy as np\n\n\nroot = Tk.Tk()\nroot.wm_title(\"Embedding in Tk\")\n\nfig = Figure(figsize=(5, 4), dpi=100)\nt = np.arange(0, 3, .01)\nfig.add_subplot(111).plot(t, 2 * np.sin(2 * np.pi * t))\n\ncanvas = FigureCanvasTkAgg(fig, master=root) # A tk.DrawingArea.\ncanvas.draw()\ncanvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)\n\ntoolbar = NavigationToolbar2TkAgg(canvas, root)\ntoolbar.update()\ncanvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)\n\n\ndef on_key_press(event):\n print(\"you pressed {}\".format(event.key))\n key_press_handler(event, canvas, toolbar)\n\n\ncanvas.mpl_connect(\"key_press_event\", on_key_press)\n\n\ndef _quit():\n root.quit() # stops mainloop\n root.destroy() # this is necessary on Windows to prevent\n # Fatal Python Error: PyEval_RestoreThread: NULL tstate\n\n\nbutton = Tk.Button(master=root, text=\"Quit\", command=_quit)\nbutton.pack(side=Tk.BOTTOM)\n\nTk.mainloop()\n# If you put root.destroy() here, it will cause an error if the window is\n# closed with the window manager.\n" ]
[ [ "numpy.sin", "matplotlib.figure.Figure", "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg", "matplotlib.backend_bases.key_press_handler", "numpy.arange", "matplotlib.backends.backend_tkagg.NavigationToolbar2TkAgg" ] ]
rudyn2/wine_market_temporal_prediction
[ "ab51dbdaa75dfd532eaef3f712ca21f3117c6d74" ]
[ "src/TimeSeries/TimeSeries.py" ]
[ "from collections import defaultdict\nfrom copy import deepcopy\nfrom typing import List, Tuple\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler\n\n\nclass DiffOperation:\n\n def __init__(self):\n self._data_copy: pd.Series = pd.Series(dtype='float64')\n self._interval: int = 0\n\n def fit(self, data: pd.Series):\n self._data_copy = data.copy(deep=True)\n\n def fit_transform(self, data: pd.Series, interval: int = 1) -> pd.Series:\n self._data_copy = data.copy()\n self._interval = interval\n return self.transform(data)\n\n def transform(self, data: pd.Series) -> pd.Series:\n diff = []\n indexes = []\n for index in range(self._interval, len(data)):\n value = data.iloc[index] - data.iloc[index - self._interval]\n indexes.append(data.index[index])\n diff.append(value)\n return pd.Series(data=diff, index=indexes).dropna()\n\n def invert(self, external_diff: pd.Series) -> pd.Series:\n\n # assert len(external) == len(self._data_copy) - self._interval\n assert type(external_diff.index) is pd.DatetimeIndex\n\n inverted_indexes = [self._data_copy.index[i] for i in range(self._interval)]\n inverted_values = [self._data_copy.iloc[i] for i in range(self._interval)]\n\n for index in external_diff.index:\n # datetime index\n int_loc = list(self._data_copy.index).index(index)\n value = external_diff.loc[index] + self._data_copy.iloc[int_loc - self._interval]\n inverted_values.append(value)\n inverted_indexes.append(index)\n inverter_ts = pd.Series(data=np.float64(inverted_values), index=inverted_indexes)\n return inverter_ts\n\n def __copy__(self):\n new = DiffOperation()\n new._data_copy = self._data_copy.copy(deep=True)\n new._interval = self._interval\n return new\n\n def partial_invert(self, external: pd.Series) -> pd.Series:\n\n assert type(external.index) is pd.DatetimeIndex\n\n inverted_indexes = []\n inverted_values = []\n\n for index in external.index:\n # datetime index\n int_loc = list(self._data_copy.index).index(index)\n value = external.loc[index] + self._data_copy.iloc[int_loc - self._interval]\n inverted_values.append(value)\n inverted_indexes.append(index)\n\n return pd.Series(data=inverted_values, index=inverted_indexes).copy()\n\n\nclass TimeSeries:\n \"\"\"\n Class to handle generic temporal series. The series must be stored in a csv file where each row is a temporal\n record and each column a different temporal serie.\n \"\"\"\n\n def __init__(self):\n self._diff_operators: defaultdict[list] = defaultdict(list)\n self._scaler: MinMaxScaler = MinMaxScaler()\n self._is_scaled = False\n self._data: pd.DataFrame = pd.DataFrame()\n self._col_names: list = []\n self._index_name: str = ''\n\n def load(self, file_path: str, index_col: str):\n \"\"\"\n Loads a csv with temporal series.\n\n :param file_path: Path of file to the data.\n :param index_col: Name of the index column.\n \"\"\"\n self._data = pd.read_csv(file_path)\n self._col_names = [column_name for column_name in self._data.columns if\n column_name not in [index_col, 'Unnamed: 0']]\n self._index_name = index_col\n self._preprocess()\n\n def _preprocess(self):\n \"\"\"\n Does general pre-processing of the temporal series.\n \"\"\"\n if 'Unnamed: 0' in self._data.columns.values:\n self._data.drop(columns=['Unnamed: 0'], inplace=True)\n if self._index_name is not None:\n self._data.set_index(pd.to_datetime(self._data[self._index_name]), inplace=True)\n self._data.index.freq = self._data.index.inferred_freq\n for col_name in self._col_names:\n self._data[col_name] = pd.to_numeric(self._data[col_name], errors='coerce')\n self._data[col_name].interpolate(method='time', inplace=True)\n self._data.drop(columns=[self._index_name], inplace=True)\n\n def timeseries_to_supervised(self, name: str, lag: int = 1, width: int = 4, pred_width: int = 2) -> Tuple[\n np.array, np.array, list, list]:\n \"\"\"\n Transform the data from temporal series to (x, y) supervised data. It returns two numpy arrays with data.\n\n :param name: Name of particular serie that wants to be transformed.\n :param lag: Number of periods of lag.\n :param width: Width of the temporal window of data.\n :param pred_width: Width of prediction window.\n :return: A Tuple.\n 1) numpy array with X elements.\n 2) numpy array with y elements.\n 3) temporal indexes of X elements.\n 4) temporal indexes of y elements.\n \"\"\"\n series = self._data[name].copy()\n dataset_x = []\n dataset_y = []\n dataset_x_index = []\n dataset_y_index = []\n\n for index in range(len(series) - width - lag - pred_width + 1):\n x_element = series.iloc[index:index + width]\n y_element = series.iloc[index + width + lag:index + width + lag + pred_width]\n dataset_x.append(np.array(x_element))\n dataset_y.append(np.array(y_element))\n dataset_x_index.append(series.index[index:index + width])\n dataset_y_index.append(series.index[index + width + lag:index + width + lag + pred_width])\n\n return np.array(dataset_x), np.array(dataset_y), dataset_x_index, dataset_y_index\n\n def difference(self, interval: int = 1):\n \"\"\"\n Differences the data for an interval.\n\n :param interval: Delta difference.\n :return: A copy of the data after the operation.\n \"\"\"\n\n assert type(interval) is int, \"Just integer values for interval parameter are allowed.\"\n for name in self._col_names:\n diff_op = DiffOperation()\n self._data[name] = diff_op.fit_transform(self._data[name], interval)\n self._diff_operators[name].append(diff_op)\n self._data.dropna(inplace=True)\n return self.copy()\n\n def inv_difference(self):\n \"\"\"\n Reverse the last difference operation.\n \"\"\"\n new_data = {}\n for name in self._col_names:\n new_data[name] = self._diff_operators[name].pop().invert(self._data[name])\n self._data = pd.DataFrame(new_data)\n return self.copy()\n\n def inv_difference_serie(self, name: str, external_serie: pd.Series) -> pd.Series:\n \"\"\"\n Reverse the difference of external data using difference values stored in the last difference\n operation made by this object.\n \"\"\"\n if name not in self._diff_operators.keys():\n raise ValueError(\"Invalid operation\")\n return self._diff_operators[name][-1].partial_invert(external_serie)\n\n def fit_scale(self):\n \"\"\"\n fits and scales the data using a MinMaxScaler.\n \"\"\"\n self._data[:] = self._scaler.fit_transform(X=self._data)\n self._is_scaled = True\n return self.copy()\n\n def scale(self, other_time_series):\n \"\"\"\n scales the data using a MinMaxScaler.\n \"\"\"\n other_time_series.data[:] = self._scaler.transform(X=other_time_series.data)\n other_time_series._is_scaled = True\n other_time_series._scaler = self._scaler\n return other_time_series\n\n def inv_scale(self):\n \"\"\"\n Reverse the last scale operation.\n \"\"\"\n self._data[:] = self._scaler.inverse_transform(X=self._data)\n self._is_scaled = False\n return self.copy()\n\n def inv_scale_serie(self, name: str, external_serie: pd.Series) -> pd.Series:\n \"\"\"\n Reverse the scale of data using Min Max values stored in the last scaling operation made by this object.\n\n :param name: Name of the reference serie.\n :param external_serie: Serie that wants to be reversed.\n \"\"\"\n if self._is_scaled:\n loc = self._col_names.index(name)\n col_min = self._scaler.min_[loc]\n col_scale = self._scaler.scale_[loc]\n external_serie -= col_min\n external_serie /= col_scale\n return external_serie\n return external_serie\n\n def copy(self):\n \"\"\"\n Makes a copy of a TimeSeries.\n \"\"\"\n new = TimeSeries()\n new._data = self._data.copy()\n new._index_name = self._index_name\n new._col_names = self._col_names\n new._index_name = self._index_name\n new._scaler = self._scaler\n new._is_scaled = self._is_scaled\n new._diff_operators = deepcopy(self._diff_operators)\n return new\n\n def plot_with(self, name: str, external_serie: pd.Series):\n \"\"\"\n Makes a plot of the common values between the external serie and the column called 'name'.\n \"\"\"\n intersection = self._data[name].loc[external_serie.index]\n if len(intersection) == 0:\n raise ValueError(f\"There are not common values between col '{name}' and \"\n \"provided external series.\")\n fig, ax = self._get_customized_figure()\n internal: pd.Series = self[name].loc[intersection.index]\n external = external_serie.loc[intersection.index]\n internal.plot(ax=ax, label='internal')\n external.plot(ax=ax, label='external')\n ax.legend()\n plt.show()\n\n @staticmethod\n def _get_customized_figure():\n \"\"\"\n This method should work as a style template for all the plots.\n \"\"\"\n fig, ax = plt.subplots()\n return fig, ax\n\n def plot_serie(self, name: str, ax: plt.Axes, start: str = ''):\n \"\"\"\n Plots a temporal serie.\n\n :param name: Name of the serie that wants to be plotted.\n :param ax: Matplotlib axis where data will be plotted.\n :param start: String representation of a date from which predict.\n \"\"\"\n if start == '':\n start = self._data.index[0]\n self._data[name][start:].plot(ax=ax, label='Observaciones')\n ax.legend()\n\n def col_names(self) -> List[str]:\n return self._col_names\n\n def info(self):\n return self._data.info()\n\n def __len__(self) -> int:\n return len(self._data)\n\n def __getitem__(self, item):\n return self._data[item]\n\n @property\n def data(self):\n return self._data\n" ]
[ [ "pandas.Series", "pandas.read_csv", "sklearn.preprocessing.MinMaxScaler", "pandas.to_numeric", "pandas.DataFrame", "matplotlib.pyplot.subplots", "pandas.to_datetime", "matplotlib.pyplot.show", "numpy.array", "numpy.float64" ] ]
chyan26/ginga
[ "e00c887d8660e0a4178f9681ca7ea7784b7ca129" ]
[ "ginga/qtw/CanvasRenderQt.py" ]
[ "#\n# CanvasRenderQt.py -- for rendering into a ImageViewQt widget\n#\n# This is open-source software licensed under a BSD license.\n# Please see the file LICENSE.txt for details.\n#\nimport numpy as np\n\nfrom ginga.qtw.QtHelp import (QtCore, QPainter, QPen, QPolygon, QColor,\n QPainterPath, QImage, QPixmap, get_font)\n\nfrom ginga import colors\nfrom ginga.canvas import render\n# force registration of all canvas types\nimport ginga.canvas.types.all # noqa\n\n\nclass RenderContext(render.RenderContextBase):\n\n def __init__(self, renderer, viewer, surface):\n render.RenderContextBase.__init__(self, renderer, viewer)\n\n self.cr = QPainter(surface)\n self.cr.setRenderHint(QPainter.Antialiasing)\n self.cr.setRenderHint(QPainter.TextAntialiasing)\n\n def __get_color(self, color, alpha):\n clr = QColor()\n if isinstance(color, tuple):\n clr.setRgbF(color[0], color[1], color[2], alpha)\n else:\n r, g, b = colors.lookup_color(color)\n clr.setRgbF(r, g, b, alpha)\n return clr\n\n def set_line_from_shape(self, shape):\n pen = QPen()\n pen.setWidthF(getattr(shape, 'linewidth', 1.0))\n\n if hasattr(shape, 'linestyle'):\n if shape.linestyle == 'dash':\n pen.setDashPattern([3.0, 4.0, 6.0, 4.0])\n pen.setDashOffset(5.0)\n\n alpha = getattr(shape, 'alpha', 1.0)\n color = self.__get_color(shape.color, alpha)\n pen.setColor(color)\n self.cr.setPen(pen)\n\n def set_fill_from_shape(self, shape):\n fill = getattr(shape, 'fill', False)\n if fill:\n if hasattr(shape, 'fillcolor') and shape.fillcolor:\n color = shape.fillcolor\n else:\n color = shape.color\n\n if color is None:\n self.cr.setBrush(QtCore.Qt.NoBrush)\n else:\n alpha = getattr(shape, 'alpha', None)\n fillalpha = getattr(shape, 'fillalpha', alpha)\n color = self.__get_color(color, fillalpha)\n self.cr.setBrush(color)\n else:\n self.cr.setBrush(QtCore.Qt.NoBrush)\n\n def set_font_from_shape(self, shape):\n if hasattr(shape, 'font'):\n if hasattr(shape, 'fontsize') and shape.fontsize is not None:\n fontsize = shape.fontsize\n else:\n fontsize = shape.scale_font(self.viewer)\n fontsize = self.scale_fontsize(fontsize)\n font = get_font(shape.font, fontsize)\n self.cr.setFont(font)\n\n def initialize_from_shape(self, shape, line=True, fill=True, font=True):\n if line:\n self.set_line_from_shape(shape)\n if fill:\n self.set_fill_from_shape(shape)\n if font:\n self.set_font_from_shape(shape)\n\n def set_line(self, color, alpha=1.0, linewidth=1, style='solid'):\n clr = self.__get_color(color, alpha)\n pen = self.cr.pen()\n pen.setColor(clr)\n pen.setWidthF(float(linewidth))\n if style == 'dash':\n pen.setDashPattern([3.0, 4.0, 6.0, 4.0])\n pen.setDashOffset(5.0)\n self.cr.setPen(pen)\n\n def set_fill(self, color, alpha=1.0):\n if color is None:\n self.cr.setBrush(QtCore.Qt.NoBrush)\n else:\n color = self.__get_color(color, alpha)\n self.cr.setBrush(color)\n\n def set_font(self, fontname, fontsize, color='black', alpha=1.0):\n self.set_line(color, alpha=alpha)\n fontsize = self.scale_fontsize(fontsize)\n font = get_font(fontname, fontsize)\n self.cr.setFont(font)\n\n def text_extents(self, text):\n fm = self.cr.fontMetrics()\n width = fm.width(text)\n height = fm.height()\n return width, height\n\n ##### DRAWING OPERATIONS #####\n\n def draw_text(self, cx, cy, text, rot_deg=0.0):\n self.cr.save()\n self.cr.translate(cx, cy)\n self.cr.rotate(-rot_deg)\n\n self.cr.drawText(0, 0, text)\n\n self.cr.restore()\n\n def draw_polygon(self, cpoints):\n qpoints = [QtCore.QPoint(p[0], p[1]) for p in cpoints]\n p = cpoints[0]\n qpoints.append(QtCore.QPoint(p[0], p[1]))\n qpoly = QPolygon(qpoints)\n\n self.cr.drawPolygon(qpoly)\n\n def draw_circle(self, cx, cy, cradius):\n # this is necessary to work around a bug in Qt--radius of 0\n # causes a crash\n cradius = max(cradius, 0.000001)\n pt = QtCore.QPointF(cx, cy)\n self.cr.drawEllipse(pt, float(cradius), float(cradius))\n\n def draw_bezier_curve(self, cp):\n path = QPainterPath()\n path.moveTo(cp[0][0], cp[0][1])\n path.cubicTo(cp[1][0], cp[1][1], cp[2][0], cp[2][1], cp[3][0], cp[3][1])\n self.cr.drawPath(path)\n\n def draw_ellipse_bezier(self, cp):\n # draw 4 bezier curves to make the ellipse\n path = QPainterPath()\n path.moveTo(cp[0][0], cp[0][1])\n path.cubicTo(cp[1][0], cp[1][1], cp[2][0], cp[2][1], cp[3][0], cp[3][1])\n path.cubicTo(cp[4][0], cp[4][1], cp[5][0], cp[5][1], cp[6][0], cp[6][1])\n path.cubicTo(cp[7][0], cp[7][1], cp[8][0], cp[8][1], cp[9][0], cp[9][1])\n path.cubicTo(cp[10][0], cp[10][1], cp[11][0], cp[11][1], cp[12][0], cp[12][1])\n self.cr.drawPath(path)\n\n def draw_line(self, cx1, cy1, cx2, cy2):\n self.cr.pen().setCapStyle(QtCore.Qt.RoundCap)\n self.cr.drawLine(cx1, cy1, cx2, cy2)\n\n def draw_path(self, cp):\n self.cr.pen().setCapStyle(QtCore.Qt.RoundCap)\n pts = [QtCore.QLineF(QtCore.QPointF(cp[i][0], cp[i][1]),\n QtCore.QPointF(cp[i + 1][0], cp[i + 1][1]))\n for i in range(len(cp) - 1)]\n self.cr.drawLines(pts)\n\n\nclass CanvasRenderer(render.RendererBase):\n\n def __init__(self, viewer, surface_type='qimage'):\n render.RendererBase.__init__(self, viewer)\n\n self.kind = 'qt'\n # Qt needs this to be in BGRA\n self.rgb_order = 'BGRA'\n self.qimg_fmt = QImage.Format_RGB32\n self.surface_type = surface_type\n # the offscreen drawing surface\n self.surface = None\n\n def resize(self, dims):\n \"\"\"Resize our drawing area to encompass a space defined by the\n given dimensions.\n \"\"\"\n width, height = dims[:2]\n self.logger.debug(\"renderer reconfigured to %dx%d\" % (\n width, height))\n if self.surface_type == 'qpixmap':\n self.surface = QPixmap(width, height)\n else:\n self.surface = QImage(width, height, self.qimg_fmt)\n\n # fill surface with background color;\n # this reduces unwanted garbage in the resizing window\n painter = QPainter(self.surface)\n size = self.surface.size()\n sf_wd, sf_ht = size.width(), size.height()\n bg = self.viewer.img_bg\n bgclr = self._get_color(*bg)\n painter.fillRect(QtCore.QRect(0, 0, sf_wd, sf_ht), bgclr)\n\n def _get_qimage(self, rgb_data):\n ht, wd, channels = rgb_data.shape\n\n result = QImage(rgb_data.data, wd, ht, self.qimg_fmt)\n # Need to hang on to a reference to the array\n result.ndarray = rgb_data\n return result\n\n def _get_color(self, r, g, b):\n # TODO: combine with the method from the RenderContext?\n n = 255.0\n clr = QColor(int(r * n), int(g * n), int(b * n))\n return clr\n\n def render_image(self, rgbobj, dst_x, dst_y):\n \"\"\"Render the image represented by (rgbobj) at dst_x, dst_y\n in the pixel space.\n *** internal method-- do not use ***\n \"\"\"\n self.logger.debug(\"redraw surface=%s\" % (self.surface))\n if self.surface is None:\n return\n self.logger.debug(\"drawing to surface\")\n\n # Prepare array for rendering\n # TODO: what are options for high bit depth under Qt?\n data = rgbobj.get_array(self.rgb_order, dtype=np.uint8)\n (height, width) = data.shape[:2]\n\n daht, dawd, depth = data.shape\n self.logger.debug(\"data shape is %dx%dx%d\" % (dawd, daht, depth))\n\n # Get qimage for copying pixel data\n qimage = self._get_qimage(data)\n drawable = self.surface\n\n painter = QPainter(drawable)\n #painter.setWorldMatrixEnabled(True)\n\n # fill surface with background color\n size = drawable.size()\n sf_wd, sf_ht = size.width(), size.height()\n bg = self.viewer.img_bg\n bgclr = self._get_color(*bg)\n painter.fillRect(QtCore.QRect(0, 0, sf_wd, sf_ht), bgclr)\n\n # draw image data from buffer to offscreen pixmap\n painter.drawImage(QtCore.QRect(dst_x, dst_y, width, height),\n qimage,\n QtCore.QRect(0, 0, width, height))\n\n def get_surface_as_array(self, order=None):\n if self.surface_type == 'qpixmap':\n qimg = self.surface.toImage()\n else:\n qimg = self.surface\n #qimg = qimg.convertToFormat(QImage.Format_RGBA32)\n\n width, height = qimg.width(), qimg.height()\n\n if hasattr(qimg, 'bits'):\n # PyQt\n ptr = qimg.bits()\n ptr.setsize(qimg.byteCount())\n else:\n # PySide\n ptr = qimg.constBits()\n\n arr = np.array(ptr).reshape(height, width, 4)\n\n # adjust according to viewer's needed order\n return self.reorder(order, arr)\n\n def setup_cr(self, shape):\n cr = RenderContext(self, self.viewer, self.surface)\n cr.initialize_from_shape(shape, font=False)\n return cr\n\n def get_dimensions(self, shape):\n cr = self.setup_cr(shape)\n cr.set_font_from_shape(shape)\n return cr.text_extents(shape.text)\n\n#END\n" ]
[ [ "numpy.array" ] ]
Capri2014/pyro
[ "546f9010aeb2308ae566726b1cec67a7b4fda9c2" ]
[ "pyro/distributions/mixture.py" ]
[ "import torch\nfrom torch.distributions import constraints\nfrom torch.distributions.utils import lazy_property\n\nfrom pyro.distributions.torch_distribution import TorchDistribution\nfrom pyro.distributions.util import broadcast_shape\n\n\nclass MaskedConstraint(constraints.Constraint):\n \"\"\"\n Combines two constraints interleaved elementwise by a mask.\n\n :param torch.Tensor mask: boolean mask tensor (of dtype ``torch.bool``)\n :param torch.constraints.Constraint constraint0: constraint that holds\n wherever ``mask == 0``\n :param torch.constraints.Constraint constraint1: constraint that holds\n wherever ``mask == 1``\n \"\"\"\n def __init__(self, mask, constraint0, constraint1):\n self.mask = mask\n self.constraint0 = constraint0\n self.constraint1 = constraint1\n\n def check(self, value):\n result = self.constraint0.check(value)\n mask = self.mask.expand(result.shape) if result.shape != self.mask.shape else self.mask\n result[mask] = self.constraint1.check(value)[mask]\n return result\n\n\nclass MaskedMixture(TorchDistribution):\n \"\"\"\n A masked deterministic mixture of two distributions.\n\n This is useful when the mask is sampled from another distribution,\n possibly correlated across the batch. Often the mask can be\n marginalized out via enumeration.\n\n Example::\n\n change_point = pyro.sample(\"change_point\",\n dist.Categorical(torch.ones(len(data) + 1)),\n infer={'enumerate': 'parallel'})\n mask = torch.arange(len(data), dtype=torch.long) >= changepoint\n with pyro.plate(\"data\", len(data)):\n pyro.sample(\"obs\", MaskedMixture(mask, dist1, dist2), obs=data)\n\n :param torch.Tensor mask: A byte tensor toggling between ``component0``\n and ``component1``.\n :param pyro.distributions.TorchDistribution component0: a distribution\n for batch elements ``mask == 0``.\n :param pyro.distributions.TorchDistribution component1: a distribution\n for batch elements ``mask == 1``.\n \"\"\"\n arg_constraints = {} # nothing can be constrained\n\n def __init__(self, mask, component0, component1, validate_args=None):\n if not torch.is_tensor(mask) or mask.dtype != torch.bool:\n raise ValueError('Expected mask to be a BoolTensor but got {}'.format(type(mask)))\n if component0.event_shape != component1.event_shape:\n raise ValueError('components event_shape disagree: {} vs {}'\n .format(component0.event_shape, component1.event_shape))\n batch_shape = broadcast_shape(mask.shape, component0.batch_shape, component1.batch_shape)\n if mask.shape != batch_shape:\n mask = mask.expand(batch_shape)\n if component0.batch_shape != batch_shape:\n component0 = component0.expand(batch_shape)\n if component1.batch_shape != batch_shape:\n component1 = component1.expand(batch_shape)\n\n self.mask = mask\n self.component0 = component0\n self.component1 = component1\n super(MaskedMixture, self).__init__(batch_shape, component0.event_shape, validate_args)\n\n # We need to disable _validate_sample on each component since samples are only valid on the\n # component from which they are drawn. Instead we perform validation using a MaskedConstraint.\n self.component0._validate_args = False\n self.component1._validate_args = False\n\n @property\n def has_rsample(self):\n return self.component0.has_rsample and self.component1.has_rsample\n\n @constraints.dependent_property\n def support(self):\n if self.component0.support is self.component1.support:\n return self.component0.support\n return MaskedConstraint(self.mask, self.component0.support, self.component1.support)\n\n def expand(self, batch_shape):\n try:\n return super(MaskedMixture, self).expand(batch_shape)\n except NotImplementedError:\n mask = self.mask.expand(batch_shape)\n component0 = self.component0.expand(batch_shape)\n component1 = self.component1.expand(batch_shape)\n return type(self)(mask, component0, component1)\n\n def sample(self, sample_shape=torch.Size()):\n mask = self.mask.expand(sample_shape + self.batch_shape) if sample_shape else self.mask\n result = self.component0.sample(sample_shape)\n result[mask] = self.component1.sample(sample_shape)[mask]\n return result\n\n def rsample(self, sample_shape=torch.Size()):\n mask = self.mask.expand(sample_shape + self.batch_shape) if sample_shape else self.mask\n result = self.component0.rsample(sample_shape)\n result[mask] = self.component1.rsample(sample_shape)[mask]\n return result\n\n def log_prob(self, value):\n value_shape = broadcast_shape(value.shape, self.batch_shape + self.event_shape)\n if value.shape != value_shape:\n value = value.expand(value_shape)\n if self._validate_args:\n self._validate_sample(value)\n mask_shape = value_shape[:len(value_shape) - len(self.event_shape)]\n mask = self.mask\n if mask.shape != mask_shape:\n mask = mask.expand(mask_shape)\n result = self.component0.log_prob(value)\n result[mask] = self.component1.log_prob(value)[mask]\n return result\n\n @lazy_property\n def mean(self):\n result = self.component0.mean.clone()\n result[self.mask] = self.component1.mean[self.mask]\n return result\n\n @lazy_property\n def variance(self):\n result = self.component0.variance.clone()\n result[self.mask] = self.component1.variance[self.mask]\n return result\n" ]
[ [ "torch.is_tensor", "torch.Size" ] ]
PaulMarcelo/Python
[ "66a9fa21d2d803f5b06d285c705812251dc6d234" ]
[ "Rest Django Framework/api_image_manager/blur_image_app/blur_image.py" ]
[ "import imutils\nimport numpy as np\nimport cv2\nimport base64\n\n\ndef byte_to_image(string_byte):\n jpg_original = base64.b64decode(string_byte)\n jpg_as_np = np.frombuffer(jpg_original, dtype=np.uint8)\n img = cv2.imdecode(jpg_as_np, flags=1)\n return img\n\n\ndef file_to_image(file):\n img = np.asarray(bytearray(file.read()), dtype=\"uint8\")\n img = cv2.imdecode(img, cv2.IMREAD_COLOR)\n return img\n\n\ndef resize_gray_image(image):\n orig_resize = imutils.resize(image, width=500)\n gray = cv2.cvtColor(orig_resize, cv2.COLOR_BGR2GRAY)\n return gray\n\n\ndef detect_blur_fft(image, size, thresh):\n (h, w) = image.shape\n (cX, cY) = (int(w / 2.0), int(h / 2.0))\n fft = np.fft.fft2(image)\n fft_shift = np.fft.fftshift(fft)\n fft_shift[cY - size:cY + size, cX - size:cX + size] = 0\n fft_shift = np.fft.ifftshift(fft_shift)\n recon = np.fft.ifft2(fft_shift)\n magnitude = 20 * np.log(np.abs(recon))\n mean = np.mean(magnitude)\n return mean, mean <= thresh\n\n\ndef evaluate_image(image, file_name_param, radio_value, thresh_value):\n image = resize_gray_image(image)\n (mean, blurry) = detect_blur_fft(image, radio_value, thresh_value)\n return {\"filename\": file_name_param, \"value\": mean, \"isblur\": blurry}\n" ]
[ [ "numpy.fft.fftshift", "numpy.fft.fft2", "numpy.fft.ifft2", "numpy.abs", "numpy.fft.ifftshift", "numpy.mean", "numpy.frombuffer" ] ]
BrandonSLockey/HFPN-Stochastic-Version
[ "ed333a2557d4b70d34cd3e4c0c2eb4133d8a85a3" ]
[ "HFPN-Stochastic-Version/sHFPN Plotting.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport os\nimport sys\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.signal import find_peaks_cwt\nfrom datetime import datetime\n\n#from scipy.signal import convolve\n#get_ipython().run_line_magic('matplotlib', 'qt')\n# Only run this cell once to avoid confusion with directories\n# Point this to the directory where HFPN.py is relative to your working directory\ncwd = os.getcwd() # Get current working directory\nroot_folder = os.sep + \"HFPN-Stochastic-Version\"\n# Move to 'utils' from current directory position\nsys.path.insert(0, cwd[:(cwd.index(root_folder)+len(root_folder))] + os.sep + \"HFPN-Stochastic-Version\" + os.sep)\nfrom visualisation import Analysis\n\n\n# In[2]:\nanalysis = {}\n##############################################################################\n################Input File Name and Plotting Steps############################\n##############################################################################\nstart_time = datetime.now()\n\n\nFile1 = '6MSD10healthy'\nFile2 = '6MSD10aged'\nFile3 = '6MSD10agedCD33'\ntest = 'robustnesstest'\ntimestep = 0.01\n\ndesired_plotting_steps = 1000\nwindow_size = 10000 #used to find rolling average \n\n# analysis[File1] = Analysis.load_from_file(File1)\n# analysis[File2] = Analysis.load_from_file(File2)\n# analysis[File3] = Analysis.load_from_file(File3)\nanalysis[test] = Analysis.load_from_file(test)\n\nexecution_time = datetime.now()-start_time\nprint('\\n\\nLoad-in Time:', execution_time)\nprint(\"\")\n##############################################################################\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~End - BSL~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n############################################################################## \n\n\n\n# In[3]:\n\n\ndef smoothen(array, filter_size):\n filt=np.ones(filter_size)/filter_size\n return convolve(array[:-(filter_size-1),0],filt)\n\n\ndef movingaverage(interval, window_size):\n window= np.ones(int(window_size))/float(window_size)\n return np.convolve(interval, window, 'same')\n \ndef create_plot(analysis, input_place_list, place_labels, mutation_list, mutation_labels, plot_title):\n t=np.arange(0,(desired_plotting_steps*timestep)+timestep,timestep) #\n\n fig,ax=plt.subplots()\n linestep = 0.3\n line_width = 2.5\n minima = []\n timeseries = []\n timeseries2 = []\n\n \n for i, mutation in enumerate(mutation_list):\n for place, place_label in zip(input_place_list, place_labels):\n data = analysis[mutation].mean_token_history_for_places([place])[0:desired_plotting_steps+1] #mutation is the file_name\n #print(data[200000]) #units in time_step\n y = data[:,0]\n # ylist = y.tolist()\n\n if place_label == \"\":\n ax.plot(t, data, label = mutation_labels[i]+' - '+place_label, linewidth = line_width- i*linestep, color=\"dimgrey\")\n \n # minimadata = y*-1\n # peaks = find_peaks_cwt(minimadata, np.arange(1,10000))\n # ax.plot(peaks*timestep, y[peaks], \"x\")\n \n for i in range(1, len(y)-1):\n if y[i-1] > y[i] and y[i+1] > y[i]:\n minima.append(y[i])\n # np.append(minima, y[i])\n timeseries.append(i)\n # np.append(timeseries, i)\n # print(minima)\n for i in timeseries:\n timeseries2.append(i*timestep)\n \n \n \n # y_av = movingaverage(y, window_size)\n # ax.plot(t[window_size:desired_plotting_steps-window_size], y_av[window_size:desired_plotting_steps-window_size], label = 'rolling average', linewidth = line_width- i*linestep, color = \"r\")\n # ax.plot(timeseries2, y[timeseries], color='green', marker='o', linestyle='dashed')\n else:\n ax.plot(t, data, label = mutation_labels[i]+' - '+place_label, linewidth = line_width- i*linestep, color=\"black\")\n \n ax.legend()\n Analysis.standardise_plot(ax, title = plot_title, xlabel = \"Time (s)\",ylabel = \"Molecule count\")\n \n##############################################################################\n############## OTHER PLOT PARAMETERS YOU WANT#################################\n##############################################################################\n # plt.xlim([0,1000]) #x axis range in seconds\n # plt.ylim(min(y), max(y))\n \n #DASHED LINES\n # plt.axvline(x=1500, linestyle='--', color ='black')\n # plt.axvline(x=1550, linestyle='--', color ='black')\n #plt.axhline(y=80000, linestyle='--', color ='black', label = \"p_ROS_mito Threshold = 80k\")\n #plt.legend()\n##############################################################################\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~End - BSL~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n############################################################################## \n plt.show()\n\ndef plot_stacked_bars(ax, legend, all_data, xs, labels, width):\n\n cum_sum = np.zeros_like(all_data[:,0])\n for i in range(len(labels)):\n data = all_data[:,i]\n rects = ax.bar(xs, data, width, bottom=cum_sum, label=labels[i])\n cum_sum += data \n \ndef create_bar_chart(analysis, places_a, places_a_labels, places_b, places_b_labels, mutation_list, mutation_labels, plot_title):\n# for mutation in mutation_list:\n# for place in places_a:\n# print(place)\n# print(analysis[mutation].mean_token_history_for_places(place)[-1])\n# for mutation in mutation_list:\n# for place in places_b:\n# print(place)\n# print(analysis[mutation].mean_token_history_for_places(place)[-1])\n final_token_count_a = [[analysis[mutation].mean_token_history_for_places(place)[-1] for place in places_a] for mutation in mutation_list]\n final_token_count_b = [[analysis[mutation].mean_token_history_for_places(place)[-1] for place in places_b] for mutation in mutation_list]\n print(np.array(final_token_count_a).shape)\n print(np.array(final_token_count_b).shape)\n final_token_count_a = np.sum(final_token_count_a, 2) # remove dimension 3\n final_token_count_b = np.sum(final_token_count_b, 2) # remove dimension 3\n\n # normalize data\n\n final_token_count_a = final_token_count_a / np.sum(final_token_count_a[0,:])\n final_token_count_b = final_token_count_b / np.sum(final_token_count_b, 1)[:,None]\n\n final_token_count_a *= 100\n final_token_count_b *= 100\n \n width = 0.5\n \n FIGURESIZE = (14,7)\n fig, ax = plt.subplots(1, 1, figsize=FIGURESIZE)\n\n bar_positions_a = np.array(range(len(mutation_list)))\n bar_positions_b = max(bar_positions_a) + 2 + np.array(range(len(mutation_list)))\n \n plot_stacked_bars(ax,legend=mutation_list, all_data=final_token_count_a, xs=bar_positions_a, labels=places_a_labels,width=width)\n plot_stacked_bars(ax,legend=mutation_list, all_data=final_token_count_b, xs=bar_positions_b, labels = places_b_labels,width=width)\n\n # Add some text for labels, title and custom x-axis tick labels, etc.\n ax.set_ylabel('% Molecule Count', fontsize=16)\n ax.set_title(plot_title, fontsize=18)\n ax.set_xticks(np.concatenate((bar_positions_a, bar_positions_b)))\n ax.set_xticklabels(np.concatenate((mutation_labels, mutation_labels)), rotation=-25, ha='left', fontsize=12)\n\n #ax.set_ylim((0,150))\n\n plt.legend(fontsize=14, loc='upper right', bbox_to_anchor=(1.3, 1))\n plt.show()\n \n##############################################################################\n################Calcium Relevant Graphs - BSL#################################\n##############################################################################\n\ndef create_histogram(analysis, bins):\n plt.hist(analysis[File1].delay_list_t_B, bins=bins, edgecolor='black', linewidth=1.2)\n print(\"NOTE, these are t_B transitions only\")\n \ndef calc_mean_SD_transition(list_of_transition_delays, transition_id):\n \"\"\"\n So the delays for each transition t_A, t_B t_D were compiled into a list for analysis. We are just calculating the mean and standard deviation for those lists here.\n \"\"\"\n the_mean_t_A = np.mean(list_of_transition_delays)\n SD_t_A = np.std(list_of_transition_delays)\n print(f\"Mean of delay_list {transition_id}:\", np.round(the_mean_t_A, decimals=3), \"SD:\", np.round(100*SD_t_A/the_mean_t_A, decimals=3), \"percent\", len(list_of_transition_delays), \"Counts\") \n print(\"Max Value:\", max(list_of_transition_delays), \"Min Value:\", min(list_of_transition_delays))\n print(\"\")\n\ndef calculate_mean_of_delay(analysis, file):\n print(f\"File:{file}\")\n t_A = 't_A'\n list_of_transition_delays = analysis[file].delay_list_t_A\n calc_mean_SD_transition(list_of_transition_delays, t_A)\n \n t_B = 't_B'\n list_of_transition_delays = analysis[file].delay_list_t_B\n calc_mean_SD_transition(list_of_transition_delays, t_B)\n \n t_D = 't_D'\n list_of_transition_delays = analysis[file].delay_list_t_D\n calc_mean_SD_transition(list_of_transition_delays, t_D) \n \n \ndef create_list_counting_zero_runs(normal_list):\n \"\"\"\n so in calcium, there is an array of zeros and ones. This function counts the length of zeros the span the array, and appends it to a new list and returns the list\n \"\"\"\n list_2 = []\n\n count = 0 \n for index,number in enumerate(normal_list): \n if number == 0:\n count = count+1\n if number ==1 and normal_list[index-1]==0:\n list_2.append(int(count))\n count = 0\n if number == 0 and index == (len(normal_list)-1): #So situations where we reach the end of the list and we are stuck with a zero are still counted.\n list_2.append(int(count))\n #Cut_off_the very first and last element of the list for safety reasons, to deal with potential truncated zero-runs lowering the mean.\n list_2.pop(0)\n list_2.pop() \n\n return list_2\n\ndef calc_and_print_mean_sd_calcium(file, place_id):\n \"\"\"\n This can take a long time since the list is huge.\n data is in a two dimensional form and needs to be converted to a one dimensional list.\n Calculates the Mean number of time steps until that transition contains a one token again and the SD for the place_id over the whole run\n \"\"\"\n data = analysis[file].mean_token_history_for_places([place_id])[0:desired_plotting_steps+1] \n list_of_lists = data.tolist()\n normal_list = [item for sublist in list_of_lists for item in sublist] \n\n zero_runs_count_list = create_list_counting_zero_runs(normal_list)\n mean1 = np.mean(zero_runs_count_list)\n std1 = np.std(zero_runs_count_list)\n print(f\"Mean Delay for {place_id}:\", np.round(mean1, decimals =3), \"timesteps\", len(zero_runs_count_list), \"counts\")\n print(f\"SD for {place_id}: +/-\", np.round(std1, decimals=3), \"timesteps or\", np.round(100*std1/mean1, decimals=3), \"percent\") \n print(\"Max:\", max(zero_runs_count_list), \"Min:\", min(zero_runs_count_list))\n #print(\"The very first element was:\", zero_runs_count_list[0]) \n #print(\"The very last element was: \", zero_runs_count_list[len(zero_runs_count_list)-1]) \n print('')\n\n\ndef calculate_TRUE_calcium_stochasticity(file):\n print(f\"File: {file}\")\n place_id = 'p_on4'\n calc_and_print_mean_sd_calcium(file, place_id)\n place_id = 'p_Ca_extra'\n calc_and_print_mean_sd_calcium(file, place_id)\n place_id = 'p_on3'\n calc_and_print_mean_sd_calcium(file, place_id)\n \n \n##############################################################################\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~End - BSL~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n############################################################################## \n\n \n##############################################################################\n################Plotting Commands - BSL#######################################\n##############################################################################\nstart_time = datetime.now()\n\n\n# calculate_TRUE_calcium_stochasticity(File1)\n# calculate_TRUE_calcium_stochasticity(File2)\n\n# create_plot(analysis, \n# input_place_list = ['p_SNCA_olig'], \n# place_labels = [\"\"], \n# mutation_list = [File3], \n# mutation_labels = [File3],\n# plot_title = 'PD - p_SNCA_olig')\n\n\n# create_histogram(analysis, 20)\n\n# calculate_mean_of_delay(analysis, File1)\n# calculate_mean_of_delay(analysis, File2)\n\n\n##############################################################################\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~End - BSL~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#\n############################################################################## \n\n# # Bar charts\n\n# In[ ]:\n\n\n# create_bar_chart(analysis, \n# places_a = ['p_ROS_mito', 'p_Ca_cyto'], \n# places_a_labels = ['p_ROS_mito', 'p_Ca_cyto'], \n# places_b = ['p_RTN3_HMW_dys1','p_RTN3_HMW_dys2','p_RTN3_HMW_lyso'], \n# places_b_labels=['Dystrophic neurites I', 'Dystrophic_neurites II','Lyso'], \n# mutation_list = ['healthy', 'chol600'], \n# mutation_labels = ['healthy', 'chol600'],\n# plot_title = 'PD - RTN3 distribution')\n\n\n# In[ ]:\n\n\n# create_bar_chart(analysis, \n# places_a = ['p_RTN3_axon','p_RTN3_PN'], \n# places_a_labels = ['Axon', 'Perinuclear region'], \n# places_b = ['p_RTN3_HMW_dys1','p_RTN3_HMW_dys2','p_RTN3_HMW_lyso'], \n# places_b_labels=['Dystrophic neurites I', 'Dystrophic_neurites II','Lyso'], \n# mutation_list = ['all_mutations', 'lrrk2', 'DNL','NPT','LAMP2A', 'healthy'], \n# mutation_labels = ['Combined diseased state','LRRK2','LRRK2 + DNL151','Combined diseased state + NPT200','Combined diseased state + LAMP2A', 'Healthy'],\n# plot_title = 'PD - RTN3 distribution and therapeutics')\n\n\n# # Plotting\n\n# ## Energy metabolism \n\n# In[4]:\ncreate_plot(analysis, \n input_place_list = ['p_Ab'], \n place_labels = [\"\"], \n mutation_list = [test], \n mutation_labels = [test],\n plot_title = 'Ab')\n\nexecution_time = datetime.now()-start_time\nprint('\\n\\nPlotting Time:', execution_time)\n\n\n# create_plot(analysis, \n# input_place_list = ['p_27OHchol_intra'], \n# place_labels = [\"\"], \n# mutation_list = ['HFPN_DJ1_6x10e6'], \n# mutation_labels = ['HFPN_DJ1_6x10e6'],\n# plot_title = 'PD - p_27OHchol_intra')\n# create_plot(analysis, \n# input_place_list = ['p_24OHchol_intra'], \n# place_labels = [\"\"], \n# mutation_list = ['HFPN_DJ1_6x10e6'], \n# mutation_labels = ['HFPN_DJ1_6x10e6'],\n# plot_title = 'PD - p_24OHchol_intra')\n# create_plot(analysis, \n# input_place_list = ['p_cas3'], \n# place_labels = [\"\"], \n# mutation_list = ['HFPN_DJ1_6x10e6'], \n# mutation_labels = ['HFPN_DJ1_6x10e6'],\n# plot_title = 'PD - p_cas3')\n\n\n\n\n\n# create_plot(analysis, \n# input_place_list = ['p_chol_LE'], \n# place_labels = [\"\"], \n# mutation_list = ['HFPN_Healthy_6x10e6'], \n# mutation_labels = ['HFPN_Healthy_6x10e6'],\n# plot_title = 'PD - p_chol_LE')\n\n# create_plot(analysis, \n# input_place_list = ['p_ApoEchol_extra'], \n# place_labels = [\"\"], \n# mutation_list = ['HFPN_Healthy_6x10e6'], \n# mutation_labels = ['HFPN_Healthy_6x10e6'],\n# plot_title = 'PD - p_ApoEchol_extra')\n\n# create_plot(analysis, \n# input_place_list = ['p_ApoEchol_EE'], \n# place_labels = [\"\"], \n# mutation_list = ['DJ1_500k_HFPN'], \n# mutation_labels = ['DJ1_500k_HFPN'],\n# plot_title = 'PD - p_ApoEchol_EE')\n\n# create_plot(analysis, \n# input_place_list = ['p_7HOCA'], \n# place_labels = [\"\"], \n# mutation_list = ['DJ1_500k_HFPN'], \n# mutation_labels = ['DJ1_500k_HFPN'],\n# plot_title = 'PD - p_7HOCA')\n\n# create_plot(analysis, \n# input_place_list = ['p_preg'], \n# place_labels = [\"\"], \n# mutation_list = ['DJ1_500k_HFPN'], \n# mutation_labels = ['DJ1_500k_HFPN'],\n# plot_title = 'PD - p_preg')\n\n# create_plot(analysis, \n# input_place_list = ['p_24OHchol_extra'], \n# place_labels = [\"\"], \n# mutation_list = ['DJ1_500k_HFPN'], \n# mutation_labels = ['DJ1_500k_HFPN'],\n# plot_title = 'PD - p_24OHchol_extra')\n\n# create_plot(analysis, \n# input_place_list = ['p_24OHchol_intra'], \n# place_labels = [\"\"], \n# mutation_list = ['DJ1_500k_HFPN'], \n# mutation_labels = ['DJ1_500k_HFPN'],\n# plot_title = 'PD - p_24OHchol_intra')\n\n# create_plot(analysis, \n# input_place_list = ['p_ROS_mito'], \n# place_labels = [\"\"], \n# mutation_list = ['DJ1_500k_HFPN'], \n# mutation_labels = ['DJ1_500k_HFPN'],\n# plot_title = 'PD - p_ROS_mito')\n# create_plot(analysis, \n# input_place_list = ['p_H2O_mito'], \n# place_labels = [\"\"], \n# mutation_list = ['DJ1_500k_HFPN'], \n# mutation_labels = ['DJ1_500k_HFPN'],\n# plot_title = 'PD - p_H2O_mito')\n# create_plot(analysis, \n# input_place_list = ['p_cas3'], \n# place_labels = [\"\"], \n# mutation_list = ['DJ1_500k_HFPN'], \n# mutation_labels = ['DJ1_500k_HFPN'],\n# plot_title = 'PD - p_cas3')\n# create_plot(analysis, \n# input_place_list = ['p_Ca_cyto'], \n# place_labels = [\"\"], \n# mutation_list = ['DJ1_500k_HFPN'], \n# mutation_labels = ['DJ1_500k_HFPN'],\n# plot_title = 'PD - p_Ca_cyto')\n# create_plot(analysis, \n# input_place_list = ['p_Ca_mito'], \n# place_labels = [\"\"], \n# mutation_list = ['DJ1_500k_HFPN'], \n# mutation_labels = ['DJ1_500k_HFPN'],\n# plot_title = 'PD - p_Ca_mito')\n# create_plot(analysis, \n# input_place_list = ['p_Ca_ER'], \n# place_labels = [\"\"], \n# mutation_list = ['DJ1_500k_HFPN'], \n# mutation_labels = ['DJ1_500k_HFPN'],\n# plot_title = 'PD - p_Ca_ER')\n# create_plot(analysis, \n# input_place_list = ['p_24OHchol_intra'], \n# place_labels = [\"\"], \n# mutation_list = ['DJ1_500k_HFPN'], \n# mutation_labels = ['DJ1_500k_HFPN'],\n# plot_title = 'PD - p_24OHchol_intra')\n# create_plot(analysis, \n# input_place_list = ['p_24OHchol_intra'], \n# place_labels = [\"\"], \n# mutation_list = ['DJ1_500k_HFPN'], \n# mutation_labels = ['DJ1_500k_HFPN'],\n# plot_title = 'PD - p_24OHchol_intra')\n# create_plot(analysis, \n# input_place_list = ['p_24OHchol_intra'], \n# place_labels = [\"\"], \n# mutation_list = ['DJ1_500k_HFPN'], \n# mutation_labels = ['DJ1_500k_HFPN'],\n# plot_title = 'PD - p_24OHchol_intra')\n# create_plot(analysis, \n# input_place_list = ['p_24OHchol_intra'], \n# place_labels = [\"\"], \n# mutation_list = ['DJ1_500k_HFPN'], \n# mutation_labels = ['DJ1_500k_HFPN'],\n# plot_title = 'PD - p_24OHchol_intra')\n\n\n\n# create_plot(analysis, \n# input_place_list = ['p_cas3'], \n# place_labels = [\"\"], \n# mutation_list = ['testlol'], \n# mutation_labels = ['testlol'],\n# plot_title = 'PD - p_cas3')\n\n# create_plot(analysis, \n# input_place_list = ['p_H2O_mito'], \n# place_labels = [\"\"], \n# mutation_list = ['testlol'], \n# mutation_labels = ['testlol'],\n# plot_title = 'PD - p_H2O_mito')\n\n# create_plot(analysis, \n# input_place_list = ['p_chol_mito'], \n# place_labels = [\"\"], \n# mutation_list = ['testlol'], \n# mutation_labels = ['testlol'],\n# plot_title = 'PD - p_chol_mito')\n\n# # ## Lewy body formation\n\n# # In[ ]:\n\n\n# create_plot(analysis, \n# input_place_list = ['p_ATP'], \n# place_labels = [\"\"], \n# mutation_list = ['healthy', 'chol600'], \n# mutation_labels = ['healthy', 'chol600'],\n# plot_title = 'PD - Lewy body formation')\n\n\n# # ## Chol (LB and cas3)\n\n# # In[ ]:\n\n\n# #THE CORRECT ONE FOR CHOL\n# create_plot(analysis, \n# input_place_list = ['p_LB'], \n# place_labels = [\"\"], \n# mutation_list = ['healthy','gba1_lrrk2','27OHchol','27OH_lrrk2_gba1','ApoEchol','ApoE_lrrk2_gba1'], \n# mutation_labels = ['Healthy','GBA1 + LRRK2','2x 27OH-chol','2x 27OH-chol + LRRK2 + GBA1','2x APOE-chol','2x APOE-chol + LRRK2 + GBA1'],\n# plot_title = 'PD - Lewy body formation and high levels chol')\n# #THE CORRECT ONE FOR CHOL\n# create_plot(analysis, \n# input_place_list = ['p_cas3'], \n# place_labels = [\"\"], \n# mutation_list = ['healthy','gba1_lrrk2','27OHchol','27OH_lrrk2_gba1','ApoEchol','ApoE_lrrk2_gba1'], \n# mutation_labels = ['Healthy','GBA1 + LRRK2','2x 27OH-chol','2x 27OH-chol + LRRK2 + GBA1','2x APOE-chol','2x APOE-chol + LRRK2 + GBA1'],\n# plot_title = 'PD - Active Caspase-3 and high levels chol')\n\n\n# # ## Therapeutics\n\n# # In[ ]:\n\n\n# create_plot(analysis, \n# input_place_list = ['p_cas3'], \n# place_labels = [\"\"], \n# mutation_list = ['all_mutations', 'lrrk2', 'DNL','NPT','LAMP2A', 'healthy'], \n# mutation_labels = ['Combined diseased state','LRRK2','LRRK2 + DNL151','Combined diseased state + NPT200','Combined diseased state + LAMP2A', 'Healthy'],\n# plot_title = 'PD - Active Caspase-3 and therapeutics')\n# # create_plot(analysis, \n# # input_place_list = ['p_SNCA_olig'], \n# # place_labels = [\"\"], \n# # mutation_list = ['all_mutations', 'lrrk2', 'DNL','NPT','LAMP2A','healthy'], \n# # mutation_labels = ['Combined diseased state','LRRK2','LRRK2 + DNL151','Combined diseased state + NPT200','Combined diseased state + LAMP2A', 'Healthy'],\n# # plot_title = 'PD - SNCA oligomerisation and therapeutics')\n# create_plot(analysis, \n# input_place_list = ['p_LB'], \n# place_labels = [\"\"], \n# mutation_list = ['all_mutations', 'lrrk2', 'DNL','NPT','LAMP2A','healthy'], \n# mutation_labels = ['Combined diseased state','LRRK2','LRRK2 + DNL151','Combined diseased state + NPT200','Combined diseased state + LAMP2A', 'Healthy'],\n# plot_title = 'PD - Lewy body formation and therapeutics')\n\n\n# # # Computing the mean\n\n# # In[ ]:\n\n\n# mean_healthy = np.mean(analysis['healthy'].token_storage[:,50000:,analysis['healthy'].place_dict[\"p_ATP\"]])\n# print(\"healthy\", mean_healthy)\n# mean_lrrk2 = np.mean(analysis['lrrk2'].token_storage[:,50000:,analysis['lrrk2'].place_dict[\"p_ATP\"]])\n# print(\"lrrk2\", mean_lrrk2)\n\n\n# # In[ ]:\n\n\n# # create_plot(['p_LB'],\"Lewy body formation\")\n\n\n# # In[ ]:\n\n\n# # create_plot(['p_SNCA_olig'],\"SNCA olgiomerisation\")\n\n\n# # In[ ]:\n\n\n\n# ## Lewy body formation\n\n# In[ ]:\n\n\n\n# ## Chol (LB and cas3)\n\n# In[ ]:\n\n\n#THE CORRECT ONE FOR CHOL\n# create_plot(analysis, \n# input_place_list = ['p_LB'], \n# place_labels = [\"\"], \n# mutation_list = ['healthy','gba1_lrrk2','27OHchol','27OH_lrrk2_gba1','ApoEchol','ApoE_lrrk2_gba1'], \n# mutation_labels = ['Healthy','GBA1 + LRRK2','2x 27OH-chol','2x 27OH-chol + LRRK2 + GBA1','2x APOE-chol','2x APOE-chol + LRRK2 + GBA1'],\n# plot_title = 'PD - Lewy body formation and high levels chol')\n# #THE CORRECT ONE FOR CHOL\n# create_plot(analysis, \n# input_place_list = ['p_cas3'], \n# place_labels = [\"\"], \n# mutation_list = ['healthy','gba1_lrrk2','27OHchol','27OH_lrrk2_gba1','ApoEchol','ApoE_lrrk2_gba1'], \n# mutation_labels = ['Healthy','GBA1 + LRRK2','2x 27OH-chol','2x 27OH-chol + LRRK2 + GBA1','2x APOE-chol','2x APOE-chol + LRRK2 + GBA1'],\n# plot_title = 'PD - Active Caspase-3 and high levels chol')\n\n\n# # ## Therapeutics\n\n# # In[ ]:\n\n\n# create_plot(analysis, \n# input_place_list = ['p_cas3'], \n# place_labels = [\"\"], \n# mutation_list = ['all_mutations', 'lrrk2', 'DNL','NPT','LAMP2A', 'healthy'], \n# mutation_labels = ['Combined diseased state','LRRK2','LRRK2 + DNL151','Combined diseased state + NPT200','Combined diseased state + LAMP2A', 'Healthy'],\n# plot_title = 'PD - Active Caspase-3 and therapeutics')\n# # create_plot(analysis, \n# # input_place_list = ['p_SNCA_olig'], \n# # place_labels = [\"\"], \n# # mutation_list = ['all_mutations', 'lrrk2', 'DNL','NPT','LAMP2A','healthy'], \n# # mutation_labels = ['Combined diseased state','LRRK2','LRRK2 + DNL151','Combined diseased state + NPT200','Combined diseased state + LAMP2A', 'Healthy'],\n# # plot_title = 'PD - SNCA oligomerisation and therapeutics')\n# create_plot(analysis, \n# input_place_list = ['p_LB'], \n# place_labels = [\"\"], \n# mutation_list = ['all_mutations', 'lrrk2', 'DNL','NPT','LAMP2A','healthy'], \n# mutation_labels = ['Combined diseased state','LRRK2','LRRK2 + DNL151','Combined diseased state + NPT200','Combined diseased state + LAMP2A', 'Healthy'],\n# plot_title = 'PD - Lewy body formation and therapeutics')\n\n\n# # # Computing the mean\n\n# # In[ ]:\n\n\n# mean_healthy = np.mean(analysis['healthy'].token_storage[:,50000:,analysis['healthy'].place_dict[\"p_ATP\"]])\n# print(\"healthy\", mean_healthy)\n# mean_lrrk2 = np.mean(analysis['lrrk2'].token_storage[:,50000:,analysis['lrrk2'].place_dict[\"p_ATP\"]])\n# print(\"lrrk2\", mean_lrrk2)\n\n\n# In[ ]:\n\n\n# create_plot(['p_LB'],\"Lewy body formation\")\n\n\n# In[ ]:\n\n\n# create_plot(['p_SNCA_olig'],\"SNCA olgiomerisation\")\n\n\n# In[ ]:\n\n\n\n\n\n\n\n# # In[ ]:\n\n\n# create_plot(['p_chol_LE'],\"Cholesterol late endosomes\")\n\n\n\n# In[ ]:\n\n\n\n\n" ]
[ [ "numpy.zeros_like", "numpy.sum", "matplotlib.pyplot.legend", "numpy.ones", "numpy.concatenate", "matplotlib.pyplot.subplots", "numpy.arange", "matplotlib.pyplot.show", "matplotlib.pyplot.hist", "numpy.round", "numpy.std", "numpy.convolve", "numpy.array", "numpy.mean" ] ]
jielyugt/calibration
[ "1b9be673fb7ff8cf481e875153b1a7649e3b6e67" ]
[ "scripts/lenet/train_cifar100.py" ]
[ "# Training procedure for LeNet-5 CIFAR-100.\n#Code base from https://github.com/BIGBALLON/cifar-10-cnn/blob/master/1_Lecun_Network/LeNet_dp_da_wd_keras.py \n\nimport keras\nimport numpy as np\nfrom keras import optimizers\nfrom keras.datasets import cifar100\nfrom keras.models import Sequential\nfrom keras.layers import Conv2D, Dense, Flatten, MaxPooling2D\nfrom keras.callbacks import LearningRateScheduler, TensorBoard\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.regularizers import l2\nfrom sklearn.model_selection import train_test_split\nimport pickle\n\nbatch_size = 128\nepochs = 300\niterations = 45000 // batch_size\nnum_classes = 100\nweight_decay = 0.0001\nseed = 333\nN = 1\nprint(\"N:\", N)\n\n\ndef build_model(n=1, num_classes = 10):\n \"\"\"\n parameters:\n n: (int) scaling for model (n times filters in Conv2D and nodes in Dense)\n \"\"\"\n model = Sequential()\n model.add(Conv2D(n*6, (5, 5), padding='valid', activation = 'relu', kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay), input_shape=(32,32,3)))\n model.add(MaxPooling2D((2, 2), strides=(2, 2)))\n model.add(Conv2D(n*16, (5, 5), padding='valid', activation = 'relu', kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay)))\n model.add(MaxPooling2D((2, 2), strides=(2, 2)))\n model.add(Flatten())\n model.add(Dense(n*120, activation = 'relu', kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay) ))\n model.add(Dense(n*84, activation = 'relu', kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay) ))\n model.add(Dense(num_classes, activation = 'softmax', kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay) ))\n sgd = optimizers.SGD(lr=.1, momentum=0.9, nesterov=True)\n model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])\n return model\n\ndef scheduler(epoch):\n if epoch <= 60:\n return 0.05\n if epoch <= 120:\n return 0.01\n if epoch <= 160: \n return 0.002\n return 0.0004\n \n# Per channel mean and std normalization\ndef color_preprocessing(x_train, x_val, x_test):\n \n x_train = x_train.astype('float32')\n x_val = x_val.astype('float32') \n x_test = x_test.astype('float32')\n \n mean = np.mean(x_train, axis=(0,1,2)) # Per channel mean\n std = np.std(x_train, axis=(0,1,2))\n x_train = (x_train - mean) / std\n x_val = (x_val - mean) / std\n x_test = (x_test - mean) / std\n \n return x_train, x_val, x_test\n\nif __name__ == '__main__':\n\n # load data\n (x_train, y_train), (x_test, y_test) = cifar100.load_data()\n \n x_train45, x_val, y_train45, y_val = train_test_split(x_train, y_train, test_size=0.1, random_state=seed) # random_state = seed\n x_train45, x_val, x_test = color_preprocessing(x_train45, x_val, x_test)\n \n y_train45 = keras.utils.to_categorical(y_train45, num_classes)\n y_val = keras.utils.to_categorical(y_val, num_classes)\n y_test = keras.utils.to_categorical(y_test, num_classes)\n\n\n # build network\n model = build_model(n=N, num_classes = num_classes)\n print(model.summary())\n\n # set callback\n change_lr = LearningRateScheduler(scheduler)\n cbks = [change_lr]\n\n # using real-time data augmentation\n print('Using real-time data augmentation.')\n datagen = ImageDataGenerator(horizontal_flip=True,\n width_shift_range=0.125,height_shift_range=0.125,fill_mode='constant',cval=0.)\n\n datagen.fit(x_train45)\n\n # start traing \n hist = model.fit_generator(datagen.flow(x_train45, y_train45,batch_size=batch_size, shuffle=True),\n steps_per_epoch=iterations,\n epochs=epochs,\n callbacks=cbks,\n validation_data=(x_val, y_val))\n # save model\n model.save('lenet_c100.h5')\n \n print(\"Get test accuracy:\")\n loss, accuracy = model.evaluate(x_test, y_test, verbose=0)\n print(\"Test: accuracy1 = %f ; loss1 = %f\" % (accuracy, loss))\n\n print(\"Pickle models history\")\n with open('hist_lenet_c100.p', 'wb') as f:\n pickle.dump(hist.history, f)" ]
[ [ "numpy.std", "sklearn.model_selection.train_test_split", "numpy.mean" ] ]
uaca/deepy
[ "090fbad22a08a809b12951cd0d4984f5bd432698" ]
[ "deepy/networks/network.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport cPickle as pickle\nimport gzip\nimport logging as loggers\nimport os\nfrom threading import Thread\n\nimport filelock\nimport numpy as np\nimport theano\nimport theano.tensor as T\n\nimport deepy\nfrom deepy.utils.map_dict import MapDict\nfrom deepy.layers.block import Block\nfrom deepy.layers.layer import NeuralLayer\nfrom deepy.utils import dim_to_var\nfrom deepy.trainers.train_logger import TrainLogger\n\nlogging = loggers.getLogger(\"network\")\nsave_logger = loggers.getLogger(\"saving\")\n\nDEEPY_MESSAGE = \"deepy version = %s\" % deepy.__version__\n\n\ndef save_network_params(params, path):\n lock = filelock.FileLock(path)\n with lock:\n if path.endswith('gz'):\n opener = gzip.open if path.lower().endswith('.gz') else open\n handle = opener(path, 'wb')\n pickle.dump(params, handle)\n handle.close()\n elif path.endswith('uncompressed.npz'):\n np.savez(path, *params)\n elif path.endswith('.npz'):\n np.savez_compressed(path, *params)\n else:\n raise Exception(\"File format of %s is not supported, use '.gz' or '.npz' or '.uncompressed.gz'\" % path)\n\nclass NeuralNetwork(object):\n \"\"\"\n The base class of neural networks.\n \"\"\"\n\n def __init__(self, input_dim, input_tensor=None):\n logging.info(DEEPY_MESSAGE)\n self.input_dim = input_dim\n self.input_tensor = input_tensor\n self.parameter_count = 0\n\n self.parameters = []\n self.free_parameters = []\n\n self.training_updates = []\n self.updates = []\n\n self.input_variables = []\n self.target_variables = []\n\n self.training_callbacks = []\n self.testing_callbacks = []\n self.epoch_callbacks = []\n\n self.layers = []\n self._test_outputs = []\n self._test_output = None\n self._output_keys = None\n\n self._hidden_outputs = []\n self.training_monitors = []\n self.testing_monitors = []\n\n self.setup_variables()\n self.train_logger = TrainLogger()\n\n def stack_layer(self, layer, no_setup=False):\n \"\"\"\n Stack a neural layer.\n :type layer: NeuralLayer\n :param no_setup: whether the layer is already initialized\n \"\"\"\n if layer.name:\n layer.name += \"%d\" % (len(self.layers) + 1)\n if not self.layers:\n layer.init(self.input_dim, no_prepare=no_setup)\n else:\n layer.init(self.layers[-1].output_dim, no_prepare=no_setup)\n self._output = layer.compute_tensor(self._output)\n self._test_output = layer.compute_tensor(self._test_output)\n self._hidden_outputs.append(self._output)\n self.register_layer(layer)\n self.layers.append(layer)\n\n def register(self, *layers):\n \"\"\"\n Register multiple layers as the components of the network.\n The parameter of those layers will be trained.\n But the output of the layer will not be stacked.\n \"\"\"\n for layer in layers:\n self.register_layer(layer)\n\n def register_layer(self, layer):\n \"\"\"\n Register the layer so that it's param will be trained.\n But the output of the layer will not be stacked.\n \"\"\"\n if type(layer) == Block:\n layer.fix()\n self.parameter_count += layer.parameter_count\n self.parameters.extend(layer.parameters)\n self.free_parameters.extend(layer.free_parameters)\n self.training_monitors.extend(layer.training_monitors)\n self.testing_monitors.extend(layer.testing_monitors)\n self.updates.extend(layer.updates)\n self.training_updates.extend(layer.training_updates)\n self.input_variables.extend(layer.external_inputs)\n self.target_variables.extend(layer.external_targets)\n\n self.training_callbacks.extend(layer.training_callbacks)\n self.testing_callbacks.extend(layer.testing_callbacks)\n self.epoch_callbacks.extend(layer.epoch_callbacks)\n\n def first_layer(self):\n \"\"\"\n Return first layer.\n \"\"\"\n return self.layers[0] if self.layers else None\n\n def stack(self, *layers):\n \"\"\"\n Stack layers.\n \"\"\"\n for layer in layers:\n self.stack_layer(layer)\n return self\n\n def prepare_training(self):\n \"\"\"\n This function will be called before training.\n \"\"\"\n self.report()\n\n def monitor_layer_outputs(self):\n \"\"\"\n Monitoring the outputs of each layer.\n Useful for troubleshooting convergence problems.\n \"\"\"\n for layer, hidden in zip(self.layers, self._hidden_outputs):\n self.training_monitors.append(('mean(%s)' % (layer.name), abs(hidden).mean()))\n\n @property\n def all_parameters(self):\n \"\"\"\n Return all parameters.\n \"\"\"\n params = []\n params.extend(self.parameters)\n params.extend(self.free_parameters)\n\n return params\n\n def setup_variables(self):\n \"\"\"\n Set up variables.\n \"\"\"\n if self.input_tensor:\n if type(self.input_tensor) == int:\n x = dim_to_var(self.input_tensor, name=\"x\")\n else:\n x = self.input_tensor\n else:\n x = T.matrix('x')\n self.input_variables.append(x)\n self._output = x\n self._test_output = x\n\n def _compile(self):\n if not hasattr(self, '_compute'):\n if isinstance(self._test_outputs, dict):\n self._output_keys, out_tensors = map(list, zip(*self._test_outputs.items()))\n if self._test_output:\n self._output_keys.insert(0, \"cost\")\n out_tensors.insert(0, self._test_output)\n elif isinstance(self._test_outputs, list) and self._test_outputs:\n out_tensors = self._test_outputs\n if self._test_output:\n out_tensors.insert(0, self._test_output)\n else:\n out_tensors = self._test_output\n self._compute = theano.function(\n filter(lambda x: x not in self.target_variables, self.input_variables),\n out_tensors, updates=self.updates, allow_input_downcast=True)\n\n def compute(self, *x):\n \"\"\"\n Return network output.\n \"\"\"\n self._compile()\n outs = self._compute(*x)\n if self._output_keys:\n return MapDict(dict(zip(self._output_keys, outs)))\n else:\n return outs\n\n @property\n def output(self):\n \"\"\"\n Return output variable.\n \"\"\"\n return self._output\n\n @property\n def test_output(self):\n \"\"\"\n Return output variable in test time.\n \"\"\"\n return self._test_output\n\n @property\n def cost(self):\n \"\"\"\n Return cost variable.\n \"\"\"\n return T.constant(0)\n\n @property\n def test_cost(self):\n \"\"\"\n Return cost variable in test time.\n \"\"\"\n return self.cost\n\n def save_params(self, path, new_thread=False):\n \"\"\"\n Save parameters to file.\n \"\"\"\n save_logger.info(path)\n param_variables = self.all_parameters\n params = [p.get_value().copy() for p in param_variables]\n if new_thread:\n thread = Thread(target=save_network_params, args=(params, path))\n thread.start()\n else:\n save_network_params(params, path)\n self.train_logger.save(path)\n\n def load_params(self, path, exclude_free_params=False):\n \"\"\"\n Load parameters from file.\n \"\"\"\n if not os.path.exists(path): return;\n logging.info(\"loading parameters from %s\" % path)\n # Decide which parameters to load\n if exclude_free_params:\n params_to_load = self.parameters\n else:\n params_to_load = self.all_parameters\n # Load parameters\n if path.endswith(\".gz\"):\n opener = gzip.open if path.lower().endswith('.gz') else open\n handle = opener(path, 'rb')\n saved_params = pickle.load(handle)\n handle.close()\n # Write parameters\n for target, source in zip(params_to_load, saved_params):\n logging.info('%s: setting value %s', target.name, source.shape)\n target.set_value(source)\n elif path.endswith(\".npz\"):\n arrs = np.load(path)\n # Write parameters\n for target, idx in zip(params_to_load, range(len(arrs.keys()))):\n source = arrs['arr_%d' % idx]\n logging.info('%s: setting value %s', target.name, source.shape)\n target.set_value(source)\n else:\n raise Exception(\"File format of %s is not supported, use '.gz' or '.npz' or '.uncompressed.gz'\" % path)\n\n self.train_logger.load(path)\n\n def report(self):\n \"\"\"\n Print network statistics.\n \"\"\"\n logging.info(\"network inputs: %s\", \" \".join(map(str, self.input_variables)))\n logging.info(\"network targets: %s\", \" \".join(map(str, self.target_variables)))\n logging.info(\"network parameters: %s\", \" \".join(map(str, self.all_parameters)))\n logging.info(\"parameter count: %d\", self.parameter_count)\n\n def epoch_callback(self):\n \"\"\"\n Callback for each epoch.\n \"\"\"\n for cb in self.epoch_callbacks:\n cb()\n\n def training_callback(self):\n \"\"\"\n Callback for each training iteration.\n \"\"\"\n for cb in self.training_callbacks:\n cb()\n\n def testing_callback(self):\n \"\"\"\n Callback for each testing iteration.\n \"\"\"\n for cb in self.training_callbacks:\n cb()\n" ]
[ [ "numpy.load", "numpy.savez", "numpy.savez_compressed" ] ]
mknz/dsr-road-roughness-prediction
[ "5f56b6ba5da70a09f2c967b7f32c740072e20ed1" ]
[ "eval_seg.py" ]
[ "'''Evaluation script'''\nimport argparse\nfrom pathlib import Path\n\nimport numpy as np\n\nfrom tqdm import tqdm\n\nimport torch\n\nfrom torch.utils.data import DataLoader\n\nfrom PIL import Image\n\nfrom albumentations import Compose\nfrom albumentations import CenterCrop\n\nfrom torchvision.transforms.functional import to_pil_image\n\nfrom road_roughness_prediction.segmentation import datasets\nfrom road_roughness_prediction.segmentation import models\nfrom road_roughness_prediction.segmentation import logging\nfrom road_roughness_prediction.segmentation import utils\nimport road_roughness_prediction.tools.torch as torch_tools\n\n\ndef evaluate(net, loader: DataLoader, criterion, device, save_dir, category_type):\n\n net.eval()\n losses = []\n image_writer = ImageWriter(save_dir, category_type)\n with torch.no_grad():\n for batch in tqdm(loader):\n X = batch['X'].to(device)\n Y = batch['Y'].to(device)\n out = net.forward(X)\n losses.append(criterion(out, Y).item())\n image_writer.write_batch_images(batch, out.cpu())\n\n mean_loss = np.mean(losses)\n print(f'loss: {mean_loss:.4f}')\n\n\nclass ImageWriter:\n\n def __init__(self, save_dir: Path, category_type):\n self._counter = 0\n self.category_type = category_type\n self.is_binary = category_type == datasets.surface_types.BinaryCategory\n self.input_dir = save_dir / 'input'\n self.output_dir = save_dir / 'output'\n self.target_dir = save_dir / 'target'\n self.blend_output_dir = save_dir / 'blend_output'\n self.blend_target_dir = save_dir / 'blend_target'\n\n dirs = [\n self.input_dir,\n self.output_dir,\n self.target_dir,\n self.blend_output_dir,\n self.blend_target_dir,\n ]\n\n for dir_ in dirs:\n if not dir_.exists():\n dir_.mkdir()\n\n def write_batch_images(self, batch, out):\n if self.is_binary:\n self._write_batch_images_binary(batch, out)\n else:\n self._write_batch_images_multi(batch, out)\n\n def _write_batch_images_binary(self, batch, out):\n '''Write batch-wise data into images'''\n\n X = batch['X']\n Y = batch['Y']\n\n # out: [n_batch, 1, height, width]\n out_seg = (np.array(out.squeeze(dim=1)) > 0.5).astype(np.uint8)\n\n n_batches = X.shape[0]\n for i in range(n_batches):\n file_name = f'{self._counter:05d}'\n\n input_img = to_pil_image(logging.normalize(X[i, ::]))\n save_path = self.input_dir / (file_name + '.jpg')\n input_img.save(save_path)\n\n out_seg_img = out_seg[i, ::]\n out_seg_index_img = utils.create_index_image(out_seg_img)\n save_path = self.output_dir / (file_name + '.png')\n out_seg_index_img.save(save_path)\n\n target_img = np.array(Y[i, ::].squeeze()).astype(np.uint8)\n target_index_img = utils.create_index_image(target_img)\n save_path = self.target_dir / (file_name + '.png')\n target_index_img.save(save_path)\n\n blend_output_img = self._blend_image(input_img, out_seg_index_img)\n save_path = self.blend_output_dir / (file_name + '.jpg')\n blend_output_img.save(save_path)\n\n blend_target_img = self._blend_image(input_img, target_index_img)\n save_path = self.blend_target_dir / (file_name + '.jpg')\n blend_target_img.save(save_path)\n\n self._counter += 1\n\n def _write_batch_images_multi(self, batch, out):\n '''Write batch-wise data into images'''\n\n X = batch['X']\n Y = batch['Y']\n\n # out: [n_batch, n_class, height, width]\n out_seg = out.argmax(1)\n\n n_batches = X.shape[0]\n for i in range(n_batches):\n file_name = f'{self._counter:05d}'\n\n input_img = to_pil_image(logging.normalize(X[i, ::]))\n save_path = self.input_dir / (file_name + '.jpg')\n input_img.save(save_path)\n\n out_seg_img = np.array(out_seg[i, ::]).astype(np.uint8)\n out_seg_index_img = utils.create_index_image(out_seg_img)\n save_path = self.output_dir / (file_name + '.png')\n out_seg_index_img.save(save_path)\n\n target_img = np.array(Y[i, ::]).astype(np.uint8)\n target_index_img = utils.create_index_image(target_img)\n save_path = self.target_dir / (file_name + '.png')\n target_index_img.save(save_path)\n\n blend_output_img = self._blend_image(input_img, out_seg_index_img)\n save_path = self.blend_output_dir / (file_name + '.jpg')\n blend_output_img.save(save_path)\n\n blend_target_img = self._blend_image(input_img, target_index_img)\n save_path = self.blend_target_dir / (file_name + '.jpg')\n blend_target_img.save(save_path)\n\n self._counter += 1\n\n def _blend_image(self, original, segmented):\n blend = Image.blend(original.convert('RGB'), segmented.convert('RGB'), alpha=0.2)\n return blend\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--weight-path', required=True)\n parser.add_argument('--image-dirs', required=True, type=str, nargs='+')\n parser.add_argument('--mask-dirs', required=True, type=str, nargs='+')\n parser.add_argument('--model-name', type=str, default='unet11')\n parser.add_argument('--dataset-type', type=str, default='base')\n parser.add_argument('--save-path', default='forward')\n parser.add_argument('--category-type', default='binary', choices=['binary', 'simple'])\n parser.add_argument('--cpu', action='store_true')\n parser.add_argument('--device-id', type=int, default=0)\n parser.add_argument('--seed', type=int, default=1)\n parser.add_argument('--input-size', type=int, nargs=2, default=(640, 640))\n parser.add_argument('--jaccard-weight', type=float, default=0.3)\n\n args = parser.parse_args()\n print(args)\n\n image_dirs = [Path(p) for p in args.image_dirs]\n mask_dirs = [Path(p) for p in args.mask_dirs]\n for data_dir in (image_dirs + mask_dirs):\n assert data_dir.exists(), f'{str(data_dir)} does not exist.'\n\n device = torch_tools.get_device(args.cpu, args.device_id)\n torch_tools.set_seeds(args.seed, device)\n\n weight_path = Path(args.weight_path)\n\n category_type = datasets.surface_types.from_string(args.category_type)\n\n save_path = Path(args.save_path)\n if not save_path.exists():\n save_path.mkdir(parents=True)\n\n net = models.load_model(args.model_name, category_type).to(device)\n state_dict = torch.load(weight_path, map_location=device)\n net.load_state_dict(state_dict=state_dict)\n\n input_size = args.input_size\n\n transform = Compose([\n CenterCrop(*input_size),\n ])\n\n dataset = datasets.create_dataset(\n args.dataset_type,\n image_dirs,\n mask_dirs,\n category_type,\n transform,\n )\n\n loader = DataLoader(dataset, batch_size=1, shuffle=False)\n criterion = models.loss.get_criterion(category_type, args.jaccard_weight)\n evaluate(net, loader, criterion, device, save_path, category_type)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.utils.data.DataLoader", "torch.load", "torch.no_grad", "numpy.array", "numpy.mean" ] ]
joaquimlyrio/feat-visualization
[ "47ead5044d1239dba1133b7ea812b2ed7f2564dc" ]
[ "lib/ConvDeconvDataSet2.py" ]
[ "\n# Imports\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport random\nimport math\nfrom scipy.misc import imsave\n\n###\n### Convolutional Neural Network (CNN) for MNIST\n###\nclass CnnData2: ##### OBS: only works if stride size = filter size of pooling layer\n \n def __init__( self, session, n_in = 128, n_out = 3,\n filterSizeConv1 = 5, nFiltersConv1 = 32, \n filterSizeConv2 = 5, nFiltersConv2 = 32,\n filterSizeConv3 = 5, nFiltersConv3 = 64, \n filterSizePool1 = 2, strideFilter1 = 2,\n filterSizePool2 = 2, strideFilter2 = 2,\n filterSizePool3 = 2, strideFilter3 = 2,\n nChannels = 3, fcUnits = 1024, mode = True ):\n\n \t# instantiate session\n self.session = session\n self.n_in = n_in # number of pixels\n self.n_out = n_out # number of classes\n self.mode = mode #True to train\n self.nChannels = nChannels # number of channels (1=grayscale;3=colored)\n\n # convolution filter sizes\n self.filterSizeConv1 = filterSizeConv1\n self.filterSizeConv2 = filterSizeConv2\n self.filterSizeConv3 = filterSizeConv3\n \n # number of filters of each convolutional layer\n self.nFiltersConv1 = nFiltersConv1\n self.nFiltersConv2 = nFiltersConv2\n self.nFiltersConv3 = nFiltersConv3\n\n # pooling layer filter sizes\n self.filterSizePool1 = filterSizePool1\n self.filterSizePool2 = filterSizePool2\n self.filterSizePool3 = filterSizePool3\n\n # pooling layer stride\n self.strideFilter1 = strideFilter1\n self.strideFilter2 = strideFilter2\n self.strideFilter3 = strideFilter3\n\n # data placeholders\n #self.x = tf.placeholder(tf.float32, [None, n_in, n_in, nChannels], name='x')\n self.x = tf.placeholder(tf.float32, [None, int(n_in * n_in * nChannels )], name='x')\n self.y = tf.placeholder(tf.float32, [None, n_out], name='y')\n #self.x_in = tf.reshape(self.x, [-1, self.n_in * self.n_in])\n self.W_c1 = tf.get_variable( 'W_c1', shape = [ filterSizeConv1, filterSizeConv1, nChannels, nFiltersConv1 ] )\n self.W_c2 = tf.get_variable( 'W_c2', shape = [ filterSizeConv2, filterSizeConv2, nFiltersConv1, nFiltersConv2 ] )\n self.W_c3 = tf.get_variable( 'W_c3', shape = [ filterSizeConv3, filterSizeConv3, nFiltersConv2, nFiltersConv3 ] )\n\n ##\n ## Network Architecture\n ##\n\n # Input Layer\n self.input_layer = tf.reshape(self.x, [-1, self.n_in, self.n_in, self.nChannels])\n\n #\n # Convolutional Layer #1\n #\n\n # filter\n self.conv1 = tf.nn.conv2d(\n input = self.input_layer,\n filter = self.W_c1,\n padding = \"SAME\",\n strides = [1,1,1,1] )\n\n # relu\n self.relu1 = tf.nn.relu( self.conv1 )\n\n #\n # Pooling Layer #1\n #\n self.pool1 = tf.layers.max_pooling2d(inputs=self.relu1, pool_size=[self.filterSizePool1, self.filterSizePool1], strides=self.strideFilter1)\n\n\n #\n # Convolutional Layer #2\n #\n\n # filter b\n self.conv2 = tf.nn.conv2d(\n input = self.pool1,\n filter = self.W_c2,\n padding = \"SAME\",\n strides = [1,1,1,1] )\n\n # relu\n self.relu2 = tf.nn.relu( self.conv2 )\n\n #\n # Pooling layer #2\n #\n self.pool2 = tf.layers.max_pooling2d(inputs=self.relu2, pool_size=[self.filterSizePool2, self.filterSizePool2], strides=self.strideFilter2)\n\n\n #\n # Convolutional Layer #3\n #\n\n # filter\n self.conv3 = tf.nn.conv2d(\n input = self.pool2,\n filter = self.W_c3,\n padding = \"SAME\",\n strides = [1,1,1,1] )\n\n # relu\n self.relu3 = tf.nn.relu( self.conv3 )\n\n #\n # Pooling layer #3\n #\n self.pool3 = tf.layers.max_pooling2d(inputs=self.relu3, pool_size=[self.filterSizePool3, self.filterSizePool3], strides=self.strideFilter3)\n\n\n #\n # Dense Layer ---> PARAMETRIZE! change this 7\n #\n nReshape = (self.n_in/filterSizePool1/filterSizePool2/filterSizePool3) * (self.n_in/filterSizePool1/filterSizePool2/filterSizePool3) * nFiltersConv3\n pool3_flat = tf.reshape(self.pool3, [-1, int(nReshape)])\n dense = tf.layers.dense(inputs=pool3_flat, units=fcUnits, activation=tf.nn.relu)\n dropout = tf.layers.dropout(\n inputs=dense, rate=0.3, training = self.mode )\n\n # Logits Layer\n self.logits = tf.layers.dense(inputs=dropout, units = self.n_out)\n self.q = tf.argmax(input = self.logits, axis = 1) # leave 1?\n \n # Output Layer\n onehot_labels = tf.one_hot( indices = tf.cast(self.y, tf.int32), depth = self.n_out )\n self.loss = tf.nn.softmax_cross_entropy_with_logits(\n labels = self.y, logits = self.logits )\n\n self.train_step = tf.train.AdamOptimizer(1e-3).minimize(self.loss)\n\n \n # method to compute y given x\n def compute(self, x):\n return self.session.run(self.q, feed_dict={self.x:np.reshape(x,[-1,int(self.n_in*self.n_in*self.nChannels)])})\n #return self.session.run(self.q, feed_dict={self.x:np.reshape(x,[-1, self.n_in, self.n_in, self.nChannels])})\n \n # method to train network\n def train(self, x_batch, y_batch): \n # take a training step\n #_ = self.session.run(self.train_step, feed_dict={self.x: x_batch, self.y: y_batch})\n _ = self.session.run(self.train_step, feed_dict={self.x:np.reshape(x_batch,[-1,int(self.n_in*self.n_in*self.nChannels)]), self.y: y_batch})\n\n # acessor method for output after pooling layers\n def getPools(self):\n return ( self.pool1, self.pool2, self.pool3 )\n\n # acessor method for output after convolutional layers\n def getConvs(self):\n \treturn ( self.conv1, self.conv2, self.conv3 )\n\n # acessor method for loss\n def getLoss(self):\n return self.loss\n\n # acessor method to get filter weights of convolutional layers\n def getWeights(self):\n return (self.W_c1, self.W_c2, self.W_c3 )\n\n # method to initialize filter weights\n def initWeight(shape):\n weights = tf.truncated_normal(shape,stddev=0.1)\n return tf.Variable(weights)\n\n # method to instantiate deconvolutional neural net\n def createDeconvNet(self, inputImage, inputLabel):\n return CnnData2.DeconvData2( self, self.session, inputImage, inputLabel )\n\n # saver method to save trained cnn in disk \n def netSaver(self, savePath):\n saver = tf.train.Saver()\n saver.save(self.session, savePath)\n print(\"Model saved in file: %s\" % savePath)\n\n # loader method to restore weights of a pretrained cnn\n def netLoader(self, loadPath):\n loader = tf.train.Saver({\"W_c1\":self.W_c1, \"W_c2\":self.W_c2, \"W_c3\":self.W_c3})\n restoredModel= loader.restore(self.session, loadPath)\n print(\"Model restored from %s\" % loadPath)\n\n\n def activate(self, layer, image, sess):\n# \"\"\"\n# Within a tensorflow session, calls plotfilter\n# to display the activations of trained filters in a specific layer\n# after passsing an image.\n#\n# Parameters\n# ----\n# layer: int\n# image: ndarray of length 784\n# \"\"\"\n \n \n conv_layer = sess.run(layer, feed_dict={self.x:np.reshape(image, [ 1, 49152], order='F')})\n \n self.plotfilter(conv_layer)\n \n return conv_layer\n \n def plotfilter(self, conv_layer):\n# \"\"\"\n \n\n# Parameters\n# ----\n# conv_layer = [?, 28, 28, 32] tensor\n# \"\"\"\n# \n filters=conv_layer.shape[3]\n plt.figure(1,figsize=(25,25))\n n_columns = 6\n n_rows = math.ceil(filters / n_columns) + 1\n for i in range(filters):\n plt.subplot(n_rows, n_columns, i+1)\n plt.title('Filter ' + str(i))\n plt.imshow(conv_layer[0,:,:,i], interpolation=\"nearest\")\n\n\n\n\n ###\n ### Nested Class: Deconvolutional Neural Network (CNN) for MNIST\n ###\n class DeconvData2:\n \n def __init__( self, outer, session, inputImage, inputLabel ):\n\n \t# data placeholders\n \t#self.inputImage = tf.placeholder(tf.float32, [None, inDim], name='x')\n \t#self.inputLabel = tf.placeholder(tf.float32, [None, outDim], name='y')\n\n \t# instantiate outer class in inner class\n self.cnn = outer\n self.sess = session\n\n activations1 = self.calculateActivations( inputImage, inputLabel, 1 )\n self.deconv1 = self.deconvLayer1( inputImage, inputLabel, activations1 )\n activations2 = self.calculateActivations(inputImage, inputLabel, 2)\n self.deconv2 = self.deconvLayer2( inputImage, inputLabel, activations2 )\n activations3 = self.calculateActivations( inputImage, inputLabel, 3 ) \n self.deconv3 = self.deconvLayer3( inputImage, inputLabel, activations3 )\n\n\n def deconvLayer1( self, inputImage, inputLabel, activations1 ):\n\n \t#\n \t## Deconvoluting 1st layer\n \t##\n \n # get activations for layer 1\n #activations1 = self.calculateActivations( inputImage, inputLabel, 1 )\n\n # convert from array to tensor\n act1_tf = tf.convert_to_tensor( activations1, np.float32 )\n\n # unpool\n unPool1 = self.unpool( act1_tf )\n\n # unrelu\n unRelu1 = tf.nn.relu( unPool1 )\n\n \t# deconvolute (filter)\n unConv1 = tf.nn.conv2d_transpose( # check dimensions\n \t #activations1,\n unRelu1,\n self.cnn.W_c1,\n output_shape = [ inputImage.shape[0], self.cnn.n_in, self.cnn.n_in, self.cnn.nChannels ],\n strides = [1, 1, 1, 1],\n padding = \"SAME\" )\n\n return unConv1\n\n\n def deconvLayer2( self, inputImage, inputLabel, activations2 ):\n\n\n ##\n ## Deconvoluting 2nd layer\n ##\n\n # get activations for layer 2\n #activations2 = self.calculateActivations(inputImage, inputLabel, 2)\n\n # convert from array to tensor\n act1_tf = tf.convert_to_tensor( activations2, np.float32 )\n\n # 1st unpool\n unPool1 = self.unpool( act1_tf )\n\n # 1st unrelu\n unRelu1 = tf.nn.relu( unPool1 )\n\n # 1st deconvolute (filter)\n outputShape1 = int(self.cnn.n_in/self.cnn.filterSizePool1)\n unConv1 = tf.nn.conv2d_transpose( \n #activations1,\n unRelu1,\n self.cnn.W_c2,\n output_shape = [ inputImage.shape[0], outputShape1, outputShape1, self.cnn.nFiltersConv1],\n strides = [1, 1, 1, 1],\n padding = \"SAME\" )\n\n # 2nd unpool\n unPool2 = self.unpool( unConv1 )\n\n # 2nd relu\n unRelu2 = tf.nn.relu( unPool2 )\n\n # 2nd deconvolute (filter)\n unConv2 = tf.nn.conv2d_transpose( \n #activations1,\n unRelu2,\n self.cnn.W_c1,\n output_shape = [ inputImage.shape[0], self.cnn.n_in, self.cnn.n_in, self.cnn.nChannels],\n strides = [1, 1, 1, 1],\n padding = \"SAME\" )\n\n return unConv2\n\n\n def deconvLayer3( self, inputImage, inputLabel, activations3 ):\n\n\n ##\n ## Deconvoluting 3rd layer\n ##\n\n # get activations for layer 3\n #activations3 = self.calculateActivations(inputImage, inputLabel, 3)\n\n # convert from array to tensor\n act1_tf = tf.convert_to_tensor( activations3, np.float32 )\n\n # 1st unpool\n unPool1 = self.unpool( act1_tf )\n\n # 1st unrelu\n unRelu1 = tf.nn.relu( unPool1 )\n\n # 1st deconvolute (filter)\n outputShape1 = int((self.cnn.n_in/self.cnn.filterSizePool2)/self.cnn.filterSizePool1)\n unConv1 = tf.nn.conv2d_transpose( \n #activations1,\n unRelu1,\n self.cnn.W_c3,\n #output_shape = [ inputImage.shape[0], outputShape1, outputShape1, self.cnn.nFiltersConv1],\n output_shape = [ inputImage.shape[0], outputShape1, outputShape1, self.cnn.nFiltersConv2],\n strides = [1, 1, 1, 1],\n padding = \"SAME\" )\n\n\n # 2nd unpool\n unPool2 = self.unpool( unConv1 )\n\n # 2nd relu\n unRelu2 = tf.nn.relu( unPool2 )\n\n # 2nd deconvolute (filter)\n outputShape2 = int(self.cnn.n_in/self.cnn.filterSizePool1)\n unConv2 = tf.nn.conv2d_transpose( \n #activations1,\n unRelu2,\n self.cnn.W_c2,\n #output_shape = [ inputImage.shape[0], outputShape2, outputShape2, self.cnn.nChannels],\n output_shape = [ inputImage.shape[0], outputShape2, outputShape2, self.cnn.nFiltersConv1],\n strides = [1, 1, 1, 1],\n padding = \"SAME\" )\n\n\n # 3rd unpool\n unPool3 = self.unpool( unConv2 )\n\n # 3rd relu\n unRelu3 = tf.nn.relu( unPool3 )\n\n # 3rd deconvolute (filter)\n unConv3 = tf.nn.conv2d_transpose( \n #activations1,\n unRelu3,\n self.cnn.W_c1,\n output_shape = [ inputImage.shape[0], self.cnn.n_in, self.cnn.n_in, self.cnn.nChannels],\n strides = [1, 1, 1, 1],\n padding = \"SAME\" )\n\n return unConv3\n\n #Returns the random filter activation for layer 1\n def bestActivation1( self, inputImage, inputLabel, n_best=3, k=3):\n activations_layer1=self.calculateActivations( inputImage, inputLabel, 1)\n \n random.seed(3)\n filters_layer1=random.sample(range(activations_layer1.shape[-1]),k)\n j=0\n best_index=np.zeros([k,n_best])\n \n all_isolations = np.zeros([k, n_best, 64, 64, 1])\n \n \n \n for i in filters_layer1:\n isolated=activations_layer1.copy()[:,:,:,i]\n \n Norm1 = np.linalg.norm(isolated,axis=(1,2))\n #Norm2 = np.linalg.norm(Norm1, axis=1)\n \n best = np.argsort(Norm1)[-n_best:]\n best_index[j,:]=best\n \n \n all_isolations[j,:,:,:,] = np.reshape(isolated[best,],(n_best,64,64,1))\n j=j+1\n \n \n \n return all_isolations,best_index, filters_layer1\n \n #Returns the random filter activation for layer 2\n def bestActivation2( self, inputImage, inputLabel, n_best=3, k=3):\n activations_layer2=self.calculateActivations( inputImage, inputLabel, 2)\n random.seed(3)\n filters_layer2=random.sample(range(activations_layer2.shape[-1]),k)\n j=0\n best_index=np.zeros([k,n_best])\n \n all_isolations = np.zeros([k, n_best, 32, 32, 1])\n #print(activations_layer2.shape)\n \n \n for i in filters_layer2:\n isolated=activations_layer2.copy()[:,:,:,i]\n \n Norm1 = np.linalg.norm(isolated,axis=(1,2))\n #Norm2 = np.linalg.norm(Norm1, axis=1)\n \n best = np.argsort(Norm1)[-n_best:]\n best_index[j,:]=best\n \n isolated=np.reshape(isolated[best,],(n_best,32,32,1))\n \n all_isolations[j,:,:,:,] = isolated\n j=j+1\n \n \n \n return all_isolations,best_index, filters_layer2\n \n #Returns the random filter activation for layer 3\n def bestActivation3( self, inputImage, inputLabel, n_best=3, k=3):\n activations_layer3=self.calculateActivations( inputImage, inputLabel, 3)\n random.seed(3)\n filters_layer3=random.sample(range(activations_layer3.shape[-1]),k)\n j=0\n best_index=np.zeros([k,n_best])\n \n all_isolations = np.zeros([k, n_best, 16, 16, 1])\n #print(activations_layer2.shape)\n \n \n for i in filters_layer3:\n isolated=activations_layer3.copy()[:,:,:,i]\n \n Norm1 = np.linalg.norm(isolated,axis=(1,2))\n #Norm2 = np.linalg.norm(Norm1, axis=1)\n \n best = np.argsort(Norm1)[-n_best:]\n best_index[j,:]=best\n \n isolated=np.reshape(isolated[best,],(n_best,16,16,1))\n \n all_isolations[j,:,:,:,] = isolated\n j=j+1\n \n \n \n return all_isolations,best_index, filters_layer3\n \n #Returns de deconvoluted layer1 as numpy array, with isolated nodes,\n #and save the images on the \"img\" folder\n def displayFeatures1( self, inputImage, inputLabel, n_best = 3, k = 3):\n\n #\n ## Deconvoluting 1st layer\n ##\n \n # get activations for layer 1\n activations1 = self.calculateActivations( inputImage, inputLabel, 1 )\n random.seed(3)\n filters = random.sample(range(activations1.shape[-1]), k)\n aux = activations1.shape[0] - n_best\n \n all_isolations = np.zeros([k, n_best, 128, 128, 3])\n j = 0\n best_index = np.zeros([k, n_best])\n \n for i in filters:\n # Isolate filters\n print(\"Deconvoluting Layer 1 Filter: {}\".format(i))\n isolated = activations1.copy()\n isolated[:,:,:,:i] = 0\n isolated[:,:,:,i+1:] = 0\n\n Norm1 = np.linalg.norm(isolated[:,:,:,i], axis = (1, 2))\n #Norm2 = np.linalg.norm(Norm1, axis = 1)\n\n best = np.argsort(Norm1)[-n_best:]\n\n # devonvolute\n unConv1 = self.deconvLayer1( inputImage, inputLabel, isolated )\n \n u = unConv1.eval()\n \n u = u[best,]\n best_index[j,:] = best\n \n #imsave(\"img/Deconv1_Node_{}_of_Image1.jpg\".format(i), u[0,:,:,:])\n \n all_isolations[j,:,:,:,:] = u\n j = j + 1\n \n return all_isolations, best_index, filters\n\n\n def displayFeatures2( self, inputImage, inputLabel, n_best = 3, k = 3):\n\n ##\n ## Deconvoluting 2nd layer\n ##\n\n # get activations for layer 2\n activations2 = self.calculateActivations(inputImage, inputLabel, 2)\n random.seed(3)\n filters = random.sample(range(activations2.shape[-1]), k)\n aux = activations2.shape[0] - n_best\n \n all_isolations = np.zeros([k, n_best, 128, 128, 3])\n j = 0\n best_index = np.zeros([k, n_best])\n \n for i in filters:\n # Isolate filters\n print(\"Deconvoluting Layer 2 Filter: {}\".format(i))\n isolated = activations2.copy()\n isolated[:,:,:,:i] = 0\n isolated[:,:,:,i+1:] = 0\n \n Norm1 = np.linalg.norm(isolated[:,:,:,i], axis = (1, 2))\n #Norm2 = np.linalg.norm(Norm1, axis = 1)\n \n best = np.argsort(Norm1)[-n_best:]\n \n # deconvolute\n unConv2 = self.deconvLayer2( inputImage, inputLabel, isolated )\n \n u = unConv2.eval()\n \n u = u[best,]\n best_index[j,:] = best\n \n #imsave(\"img/Deconv2_Node_{}_of_Image1.jpg\".format(i), u[0,:,:,:])\n \n all_isolations[j,:,:,:,:] = u\n j = j + 1\n \n \n return all_isolations, best_index, filters\n\n\n def displayFeatures3( self, inputImage, inputLabel, n_best = 3, k = 3):\n\n ##\n ## Deconvoluting 2nd layer\n ##\n\n # get activations for layer 2\n activations3 = self.calculateActivations(inputImage, inputLabel, 3)\n random.seed(3)\n filters = random.sample(range(activations3.shape[-1]), k)\n aux = activations3.shape[0] - n_best\n \n all_isolations = np.zeros([k, n_best, 128, 128, 3])\n j = 0\n best_index = np.zeros([k, n_best])\n \n for i in range(filters):\n # Isolate filters\n if i % 5 == 0:\n print(\"Deconvoluting Layer 3 activation number: {}\".format(i))\n isolated = activations3.copy()\n isolated[:,:,:,:i] = 0\n isolated[:,:,:,i+1:] = 0\n \n Norm1 = np.linalg.norm(isolated[:,:,:,i], axis = (1, 2))\n #Norm2 = np.linalg.norm(Norm1, axis = 1)\n \n best = np.argsort(Norm1)[-n_best:]\n \n # deconvolute\n unConv3 = self.deconvLayer3( inputImage, inputLabel, isolated )\n \n u = unConv3.eval()\n \n u = u[best,]\n best_index[j,:] = best\n \n #imsave(\"img/Deconv3_Node_{}_of_Image1.jpg\".format(i), u[0,:,:,:])\n \n all_isolations[j,:,:,:,:] = u\n j = j + 1\n \n \n return all_isolations, best_index, filters \n\n\n # calculate activations for layer (1 or 2)\n def calculateActivations( self, inputImage, inputLabel, layer ):\n\n if( layer == 1 ):\n return self.cnn.pool1.eval(feed_dict={self.cnn.x: np.reshape(inputImage,[-1,int(self.cnn.n_in*self.cnn.n_in*self.cnn.nChannels)])})\n elif( layer == 2 ):\n return self.cnn.pool2.eval(feed_dict={self.cnn.x: np.reshape(inputImage,[-1,int(self.cnn.n_in*self.cnn.n_in*self.cnn.nChannels)])})\n else:\n return self.cnn.pool3.eval(feed_dict={self.cnn.x: np.reshape(inputImage,[-1,int(self.cnn.n_in*self.cnn.n_in*self.cnn.nChannels)])})\n \n def calculateActivationsFeature( self, inputImage, inputLabel, layer ):\n\n if( layer == 1 ):\n return self.cnn.relu1.eval(feed_dict={self.cnn.x: np.reshape(inputImage,[-1,int(self.cnn.n_in*self.cnn.n_in*self.cnn.nChannels)])})\n elif( layer == 2 ):\n return self.cnn.relu2.eval(feed_dict={self.cnn.x: np.reshape(inputImage,[-1,int(self.cnn.n_in*self.cnn.n_in*self.cnn.nChannels)])})\n else:\n return self.cnn.relu3.eval(feed_dict={self.cnn.x: np.reshape(inputImage,[-1,int(self.cnn.n_in*self.cnn.n_in*self.cnn.nChannels)])})\n\n\n def getDeconv( self ):\n return self.deconv1, self.deconv2, self.deconv3\n\n # method to unpool (taken from kvfrans - put link!)\n def unpool( self, value ):\n \"\"\"N-dimensional version of the unpooling operation from\n https://www.robots.ox.ac.uk/~vgg/rg/papers/Dosovitskiy_Learning_to_Generate_2015_CVPR_paper.pdf\n\n :param value: A Tensor of shape [b, d0, d1, ..., dn, ch]\n :return: A Tensor of shape [b, 2*d0, 2*d1, ..., 2*dn, ch]\n \"\"\"\n #with tf.name_scope(name) as scope:\n sh = value.get_shape().as_list()\n dim = len(sh[1:-1])\n out = (tf.reshape(value, [-1] + sh[-dim:]))\n for i in range(dim, 0, -1):\n out = tf.concat( [out, out], i)\n out_size = [-1] + [s * 2 for s in sh[1:-1]] + [sh[-1]]\n out = tf.reshape(out, out_size)#, name=scope)\n return out\n" ]
[ [ "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.reshape", "numpy.argsort", "matplotlib.pyplot.imshow", "tensorflow.convert_to_tensor", "tensorflow.concat", "tensorflow.Variable", "matplotlib.pyplot.figure", "numpy.reshape", "numpy.zeros", "tensorflow.nn.conv2d_transpose", "tensorflow.cast", "tensorflow.layers.max_pooling2d", "tensorflow.train.Saver", "tensorflow.layers.dense", "numpy.linalg.norm", "tensorflow.layers.dropout", "tensorflow.placeholder", "tensorflow.truncated_normal", "tensorflow.train.AdamOptimizer", "tensorflow.nn.conv2d", "matplotlib.pyplot.subplot", "tensorflow.argmax", "tensorflow.nn.relu", "tensorflow.get_variable" ] ]
happy-jihye/Cartoon-StyleGAN
[ "d1c0304f633e272135b1a07eb36fa26f387dddf9" ]
[ "projector.py" ]
[ "import argparse\nimport math\nimport os\n\nimport torch\nfrom torch import optim\nfrom torch.nn import functional as F\nfrom torchvision import transforms\nfrom PIL import Image\nfrom utils import tensor2image, save_image\nfrom tqdm import tqdm\n\nimport lpips\nfrom model import Generator, Encoder\n\n\ndef noise_regularize(noises):\n loss = 0\n\n for noise in noises:\n size = noise.shape[2]\n\n while True:\n loss = (\n loss\n + (noise * torch.roll(noise, shifts=1, dims=3)).mean().pow(2)\n + (noise * torch.roll(noise, shifts=1, dims=2)).mean().pow(2)\n )\n\n if size <= 8:\n break\n\n noise = noise.reshape([-1, 1, size // 2, 2, size // 2, 2])\n noise = noise.mean([3, 5])\n size //= 2\n\n return loss\n\n\ndef noise_normalize_(noises):\n for noise in noises:\n mean = noise.mean()\n std = noise.std()\n\n noise.data.add_(-mean).div_(std)\n\n\ndef get_lr(t, initial_lr, rampdown=0.25, rampup=0.05):\n lr_ramp = min(1, (1 - t) / rampdown)\n lr_ramp = 0.5 - 0.5 * math.cos(lr_ramp * math.pi)\n lr_ramp = lr_ramp * min(1, t / rampup)\n\n return initial_lr * lr_ramp\n\n\ndef latent_noise(latent, strength):\n noise = torch.randn_like(latent) * strength\n\n return latent + noise\n\n\ndef make_image(tensor):\n return (\n tensor.detach()\n .clamp_(min=-1, max=1)\n .add(1)\n .div_(2)\n .mul(255)\n .type(torch.uint8)\n .permute(0, 2, 3, 1)\n .to(\"cpu\")\n .numpy()\n )\n\n\nif __name__ == \"__main__\":\n device = \"cuda\"\n\n # -----------------------------------\n # Parser\n # -----------------------------------\n\n parser = argparse.ArgumentParser(\n description=\"Image projector to the generator latent spaces\"\n )\n parser.add_argument(\n \"--ckpt\", type=str, required=True, help=\"path to the model checkpoint\"\n )\n parser.add_argument(\n \"--e_ckpt\", type=str, default=None, help=\"path to the encoder checkpoint\"\n )\n parser.add_argument(\n \"--size\", type=int, default=256, help=\"output image sizes of the generator\"\n )\n parser.add_argument(\n \"--truncation\", type=float, default=0.7, help=\"truncation\"\n )\n parser.add_argument(\n \"--lr_rampup\",\n type=float,\n default=0.05,\n help=\"duration of the learning rate warmup\",\n )\n parser.add_argument(\n \"--lr_rampdown\",\n type=float,\n default=0.25,\n help=\"duration of the learning rate decay\",\n )\n parser.add_argument(\"--lr\", type=float, default=0.01, help=\"learning rate\")\n parser.add_argument(\n \"--noise\", type=float, default=0.05, help=\"strength of the noise level\"\n )\n parser.add_argument(\n \"--noise_ramp\",\n type=float,\n default=0.75,\n help=\"duration of the noise level decay\",\n )\n parser.add_argument(\"--step\", type=int, default=1000, help=\"optimize iterations\")\n parser.add_argument(\n \"--noise_regularize\",\n type=float,\n default=1e5,\n help=\"weight of the noise regularization\",\n )\n parser.add_argument(\"--mse\", type=float, default=0, help=\"weight of the mse loss\")\n parser.add_argument(\"--vgg\", type=float, default=1.0, help=\"weight of the vgg loss\")\n parser.add_argument(\n \"--w_plus\",\n action=\"store_true\",\n help=\"allow to use distinct latent codes to each layers\",\n )\n parser.add_argument(\n \"--project_name\", type=str, default=\"project\", help=\"name of the result project file\"\n )\n parser.add_argument(\n \"--factor_name\", type=str, default=\"factor\", help=\"name of the result factor file\"\n )\n parser.add_argument(\n \"--files\", nargs=\"+\", help=\"path to image files to be projected\"\n )\n\n args = parser.parse_args()\n\n # =============================================\n\n # -----------------------------------\n # Project Images to Latent spaces\n # -----------------------------------\n \n if args.files is None:\n exit() \n\n n_mean_latent = 10000\n\n # Load Real Images\n resize = min(args.size, 256)\n\n transform = transforms.Compose(\n [\n transforms.Resize(resize),\n transforms.CenterCrop(resize),\n transforms.ToTensor(),\n transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),\n ]\n )\n\n imgs = []\n\n for imgfile in args.files:\n img = transform(Image.open(imgfile).convert(\"RGB\"))\n imgs.append(img)\n\n imgs = torch.stack(imgs, 0).to(device)\n\n\n\n # -------------\n # Generator\n # -------------\n\n g_ema = Generator(args.size, 512, 8).to(device)\n g_ema.load_state_dict(torch.load(args.ckpt)[\"g_ema\"], strict=False)\n g_ema.eval()\n \n trunc = g_ema.mean_latent(4096).detach().clone()\n\n # -------------\n # Encoder\n # -------------\n\n if args.e_ckpt is not None :\n e_ckpt = torch.load(args.e_ckpt, map_location=device)\n\n encoder = Encoder(args.size, 512).to(device)\n encoder.load_state_dict(e_ckpt['e'])\n encoder.eval()\n\n\n # -------------\n # Latent vector\n # -------------\n\n if args.e_ckpt is not None :\n with torch.no_grad(): \n latent_init = encoder(imgs)\n latent_in = latent_init.detach().clone() \n else :\n with torch.no_grad():\n noise_sample = torch.randn(n_mean_latent, 512, device=device)\n latent_out = g_ema.style(noise_sample)\n\n latent_mean = latent_out.mean(0)\n latent_std = ((latent_out - latent_mean).pow(2).sum() / n_mean_latent) ** 0.5\n\n latent_in = latent_mean.detach().clone().unsqueeze(0).repeat(imgs.shape[0], 1)\n\n if args.w_plus:\n latent_in = latent_in.unsqueeze(1).repeat(1, g_ema.n_latent, 1)\n\n latent_in.requires_grad = True\n\n # -------------\n # Noise\n # -------------\n\n noises_single = g_ema.make_noise()\n noises = []\n for noise in noises_single:\n noises.append(noise.repeat(imgs.shape[0], 1, 1, 1).normal_())\n\n for noise in noises:\n noise.requires_grad = True\n\n\n # -------------\n # Loss\n # -------------\n\n # PerceptualLoss\n percept = lpips.PerceptualLoss(\n model=\"net-lin\", net=\"vgg\", use_gpu=device.startswith(\"cuda\")\n )\n\n\n # Optimizer\n if args.e_ckpt is not None :\n optimizer = optim.Adam([latent_in], lr=args.lr)\n else:\n optimizer = optim.Adam([latent_in] + noises, lr=args.lr)\n\n pbar = tqdm(range(args.step))\n latent_path = []\n proj_images = []\n\n # Training !\n\n for i in pbar:\n\n t = i / args.step\n lr = get_lr(t, args.lr)\n\n optimizer.param_groups[0][\"lr\"] = lr\n\n # fake image\n if args.e_ckpt is not None :\n img_gen, _ = g_ema([latent_in], input_is_latent=True,\n truncation=args.truncation, truncation_latent = trunc,\n randomize_noise=False)\n else:\n noise_strength = latent_std * args.noise * max(0, 1 - t / args.noise_ramp) ** 2\n latent_n = latent_noise(latent_in, noise_strength.item())\n\n img_gen, _ = g_ema([latent_n], input_is_latent=True, noise=noises)\n \n #\n batch, channel, height, width = img_gen.shape\n if height > 256:\n factor = height // 256\n\n img_gen = img_gen.reshape(\n batch, channel, height // factor, factor, width // factor, factor\n )\n img_gen = img_gen.mean([3, 5])\n \n\n # latent\n if args.e_ckpt is not None :\n latent_hat = encoder(img_gen)\n\n\n # Loss\n p_loss = percept(img_gen, imgs).sum() \n r_loss = torch.mean((img_gen - imgs) ** 2) \n mse_loss = F.mse_loss(img_gen, imgs)\n \n n_loss = noise_regularize(noises)\n\n if args.e_ckpt is not None :\n style_loss = F.mse_loss(latent_hat, latent_init)\n loss = args.vgg * p_loss + r_loss + style_loss + args.mse * mse_loss\n else :\n style_loss = 0.0\n loss = args.vgg * p_loss + r_loss + args.mse * mse_loss + args.noise_regularize * n_loss \n\n\n # update\n optimizer.zero_grad()\n loss.backward(retain_graph=True)\n optimizer.step()\n\n noise_normalize_(noises)\n\n if (i + 1) % 100 == 0:\n latent_path.append(latent_in.detach().clone())\n proj_images.append(img_gen)\n\n pbar.set_description(\n (\n f\"perceptual: {p_loss.item():.4f}; noise regularize: {n_loss.item():.4f}; \"\n f\"reconstruction: {r_loss:.4f}; \"\n f\"mse_img: {mse_loss.item():.4f}; mse_latent: {style_loss:.4f}; lr: {lr:.4f} |\"\n )\n )\n\n # =============================================\n\n # -----------------------------------\n # Save image, latent, noise\n # -----------------------------------\n\n\n # final generated image\n if args.e_ckpt is not None :\n img_gen, _ = g_ema([latent_path[-1]], input_is_latent=True,\n truncation=args.truncation, truncation_latent = trunc,\n randomize_noise=None)\n else:\n img_gen, _ = g_ema([latent_path[-1]], input_is_latent=True, noise=noises)\n\n\n filename = f\"{args.project_name}.pt\"\n img_ar = make_image(img_gen)\n\n\n images = []\n for i in range(len(proj_images)):\n img = proj_images[i][0]\n for k in range(1, len(proj_images[0])): \n # img : torch.Size([3, 256*num_img, 256])\n img = torch.cat([img, proj_images[i][k]], dim =1) \n images.append(img) \n\n\n result_file = {}\n for i, input_name in enumerate(args.files):\n noise_single = []\n for noise in noises:\n noise_single.append(noise)\n\n name = os.path.splitext(os.path.basename(input_name))[0]\n result_file[name] = {\n \"r_img\": tensor2image(imgs[i]),\n \"f_img\": tensor2image(img_gen[i]),\n \"p_img\" : tensor2image(torch.cat(images, dim=2)),\n \"latent\": latent_in[i].unsqueeze(0),\n \"noise\": noise_single,\n \"args\" : args,\n }\n\n img_name = os.path.splitext(os.path.basename(input_name))[0] + \"-project.png\"\n pil_img = Image.fromarray(img_ar[i])\n pil_img.save(img_name)\n\n img_name = os.path.splitext(os.path.basename(input_name))[0] + \"-project-interpolation.png\"\n save_image(tensor2image(torch.cat(images, dim=2)), size = 20, out=img_name)\n\n torch.save(result_file, filename)\n" ]
[ [ "torch.nn.functional.mse_loss", "torch.stack", "torch.randn_like", "torch.load", "torch.randn", "torch.roll", "torch.save", "torch.no_grad", "torch.optim.Adam", "torch.cat", "torch.mean" ] ]
brando90/pytorch-meta
[ "389e35ef9aa812f07ce50a3f3bd253c4efb9765c" ]
[ "torchmeta/datasets/tieredimagenet.py" ]
[ "import numpy as np\nfrom PIL import Image\nimport h5py\nimport json\nimport os\nimport io\nimport pickle\n\nfrom torchmeta.utils.data import Dataset, ClassDataset, CombinationMetaDataset\n# QKFIX: See torchmeta.datasets.utils for more informations\nfrom torchmeta.datasets.utils import download_file_from_google_drive\n\n\nclass TieredImagenet(CombinationMetaDataset):\n \"\"\"\n The Tiered-Imagenet dataset, introduced in [1]. This dataset contains images \n of 608 different classes from the ILSVRC-12 dataset (Imagenet challenge).\n\n Parameters\n ----------\n root : string\n Root directory where the dataset folder `tieredimagenet` exists.\n\n num_classes_per_task : int\n Number of classes per tasks. This corresponds to \"N\" in \"N-way\" \n classification.\n\n meta_train : bool (default: `False`)\n Use the meta-train split of the dataset. If set to `True`, then the\n arguments `meta_val` and `meta_test` must be set to `False`. Exactly one \n of these three arguments must be set to `True`.\n\n meta_val : bool (default: `False`)\n Use the meta-validation split of the dataset. If set to `True`, then the \n arguments `meta_train` and `meta_test` must be set to `False`. Exactly one \n of these three arguments must be set to `True`.\n\n meta_test : bool (default: `False`)\n Use the meta-test split of the dataset. If set to `True`, then the \n arguments `meta_train` and `meta_val` must be set to `False`. Exactly one \n of these three arguments must be set to `True`.\n\n meta_split : string in {'train', 'val', 'test'}, optional\n Name of the split to use. This overrides the arguments `meta_train`, \n `meta_val` and `meta_test` if all three are set to `False`.\n\n transform : callable, optional\n A function/transform that takes a `PIL` image, and returns a transformed \n version. See also `torchvision.transforms`.\n\n target_transform : callable, optional\n A function/transform that takes a target, and returns a transformed \n version. See also `torchvision.transforms`.\n\n dataset_transform : callable, optional\n A function/transform that takes a dataset (ie. a task), and returns a \n transformed version of it. E.g. `torchmeta.transforms.ClassSplitter()`.\n\n class_augmentations : list of callable, optional\n A list of functions that augment the dataset with new classes. These classes \n are transformations of existing classes. E.g.\n `torchmeta.transforms.HorizontalFlip()`.\n\n download : bool (default: `False`)\n If `True`, downloads the pickle files and processes the dataset in the root \n directory (under the `tieredimagenet` folder). If the dataset is already \n available, this does not download/process the dataset again.\n\n Notes\n -----\n The dataset is downloaded from [this repository]\n (https://github.com/renmengye/few-shot-ssl-public/). The dataset contains \n images from 34 categories. The meta train/validation/test splits are over \n 20/6/8 categories. Each category contains between 10 and 30 classes. The \n splits over categories (instead of over classes) ensures that all the training \n classes are sufficiently distinct from the test classes (unlike Mini-Imagenet).\n\n References\n ----------\n .. [1] Ren, M., Triantafillou, E., Ravi, S., Snell, J., Swersky, K., \n Tenenbaum, J.B., Larochelle, H. and Zemel, R.S. (2018). Meta-learning \n for semi-supervised few-shot classification. International Conference \n on Learning Representations. (https://arxiv.org/abs/1803.00676)\n \"\"\"\n def __init__(self, root, num_classes_per_task=None, meta_train=False,\n meta_val=False, meta_test=False, meta_split=None,\n transform=None, target_transform=None, dataset_transform=None,\n class_augmentations=None, download=False):\n dataset = TieredImagenetClassDataset(root, meta_train=meta_train,\n meta_val=meta_val, meta_test=meta_test, meta_split=meta_split,\n transform=transform, class_augmentations=class_augmentations,\n download=download)\n super(TieredImagenet, self).__init__(dataset, num_classes_per_task,\n target_transform=target_transform, dataset_transform=dataset_transform)\n\n\nclass TieredImagenetClassDataset(ClassDataset):\n folder = 'tieredimagenet'\n # Google Drive ID from https://github.com/renmengye/few-shot-ssl-public\n gdrive_id = '1g1aIDy2Ar_MViF2gDXFYDBTR-HYecV07'\n tar_filename = 'tiered-imagenet.tar'\n tar_md5 = 'e07e811b9f29362d159a9edd0d838c62'\n tar_folder = 'tiered-imagenet'\n\n filename = '{0}_data.hdf5'\n filename_labels = '{0}_labels.json'\n\n def __init__(self, root, meta_train=False, meta_val=False, meta_test=False,\n meta_split=None, transform=None, class_augmentations=None,\n download=False):\n super(TieredImagenetClassDataset, self).__init__(meta_train=meta_train,\n meta_val=meta_val, meta_test=meta_test, meta_split=meta_split,\n class_augmentations=class_augmentations)\n\n self.root = os.path.join(os.path.expanduser(root), self.folder)\n self.transform = transform\n\n self._data_file = None\n self._data = None\n self._labels = None\n\n self.split_filename = os.path.join(self.root,\n self.filename.format(self.meta_split))\n self.split_filename_labels = os.path.join(self.root,\n self.filename_labels.format(self.meta_split))\n\n if download:\n self.download()\n\n if not self._check_integrity():\n raise RuntimeError('TieredImagenet integrity check failed')\n self._num_classes = len(self.labels)\n\n @property\n def data(self):\n if self._data is None:\n self._data_file = h5py.File(self.split_filename, 'r')\n self._data = self._data_file['datasets']\n return self._data\n\n @property\n def labels(self):\n if self._labels is None:\n with open(self.split_filename_labels, 'r') as f:\n self._labels = json.load(f)\n return self._labels\n\n def __getitem__(self, index):\n specific_class_name = self.labels[index % self.num_classes]\n data = self.data[specific_class_name]\n general_class_name = data.attrs['label_general']\n transform = self.get_transform(index, self.transform)\n target_transform = self.get_target_transform(index)\n\n return TieredImagenetDataset(index, data,\n general_class_name, specific_class_name,\n transform=transform, target_transform=target_transform)\n\n @property\n def num_classes(self):\n return self._num_classes\n\n def close(self):\n if self._data_file is not None:\n self._data_file.close()\n self._data_file = None\n self._data = None\n\n def _check_integrity(self):\n return (os.path.isfile(self.split_filename)\n and os.path.isfile(self.split_filename_labels))\n\n def download(self):\n import tarfile\n import shutil\n from tqdm import tqdm\n\n if self._check_integrity():\n return\n\n download_file_from_google_drive(self.gdrive_id, self.root,\n self.tar_filename, md5=self.tar_md5)\n\n filename = os.path.join(self.root, self.tar_filename)\n with tarfile.open(filename, 'r') as f:\n f.extractall(self.root)\n tar_folder = os.path.join(self.root, self.tar_folder)\n\n for split in ['train', 'val', 'test']:\n filename = os.path.join(self.root, self.filename.format(split))\n if os.path.isfile(filename):\n continue\n\n images_filename = os.path.join(tar_folder, '{0}_images_png.pkl'.format(split))\n if not os.path.isfile(images_filename):\n raise IOError(images_filename)\n with open(images_filename, 'rb') as f:\n images = pickle.load(f, encoding='bytes')\n\n labels_filename = os.path.join(tar_folder, '{0}_labels.pkl'.format(split))\n if not os.path.isfile(labels_filename):\n raise IOError()\n with open(labels_filename, 'rb') as f:\n labels = pickle.load(f, encoding='latin1')\n\n labels_str = labels['label_specific_str']\n general_labels_str = labels['label_general_str']\n general_labels = labels['label_general']\n with open(os.path.join(self.root, self.filename_labels.format(split)), 'w') as f:\n json.dump(labels_str, f)\n\n with h5py.File(filename, 'w') as f:\n group = f.create_group('datasets')\n dtype = h5py.special_dtype(vlen=np.uint8)\n for i, label in enumerate(tqdm(labels_str, desc=filename)):\n indices, = np.where(labels['label_specific'] == i)\n dataset = group.create_dataset(label, (len(indices),), dtype=dtype)\n general_idx = general_labels[indices[0]]\n dataset.attrs['label_general'] = (general_labels_str[general_idx]\n if general_idx < len(general_labels_str) else '')\n dataset.attrs['label_specific'] = label\n for j, k in enumerate(indices):\n dataset[j] = np.squeeze(images[k])\n\n if os.path.isdir(tar_folder):\n shutil.rmtree(tar_folder)\n\n\nclass TieredImagenetDataset(Dataset):\n def __init__(self, index, data, general_class_name, specific_class_name,\n transform=None, target_transform=None):\n super(TieredImagenetDataset, self).__init__(index, transform=transform,\n target_transform=target_transform)\n self.data = data\n self.general_class_name = general_class_name\n self.specific_class_name = specific_class_name\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n image = Image.open(io.BytesIO(self.data[index]))\n target = (self.general_class_name, self.specific_class_name)\n\n if self.transform is not None:\n image = self.transform(image)\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return (image, target)\n" ]
[ [ "numpy.where", "numpy.squeeze" ] ]
waelterm/Udacity_Project9-SystemIntegration
[ "e652b7d72415cd33706bcd398f2fbe7d17e1fdb4" ]
[ "ros/src/tl_detector/light_classification/tl_classifier.py" ]
[ "from styx_msgs.msg import TrafficLight\nimport tensorflow as tf\nimport numpy as np\nimport datetime\n\nclass TLClassifier(object):\n def __init__(self):\n TrainedModelPath = r'light_classification/model/faster_rcnn_frozen_inference_graph.pb'\n self.graph = tf.Graph()\n self.threshold = .5\n\n with self.graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(TrainedModelPath, 'rb') as fid:\n od_graph_def.ParseFromString(fid.read())\n tf.import_graph_def(od_graph_def, name='')\n\n self.image_tensor = self.graph.get_tensor_by_name('image_tensor:0')\n self.boxes = self.graph.get_tensor_by_name('detection_boxes:0')\n self.scores = self.graph.get_tensor_by_name('detection_scores:0')\n self.classes = self.graph.get_tensor_by_name('detection_classes:0')\n self.num_detections = self.graph.get_tensor_by_name(\n 'num_detections:0')\n\n self.sess = tf.Session(graph=self.graph)\n\n def get_classification(self, image):\n \"\"\"Classifies the Traffic light as red, green, yellow, or unknown\n Args:\n image taken from egovehicle\n Returns:\n int: ID of traffic light color\n \"\"\"\n with self.graph.as_default():\n img_expand = np.expand_dims(image, axis=0)\n start = datetime.datetime.now()\n (boxes, scores, classes, num_detections) = self.sess.run(\n [self.boxes, self.scores, self.classes, self.num_detections],\n feed_dict={self.image_tensor: img_expand})\n end = datetime.datetime.now()\n c = end - start\n print(c.total_seconds())\n\n boxes = np.squeeze(boxes)\n scores = np.squeeze(scores)\n classes = np.squeeze(classes).astype(np.int32)\n\n print('SCORES: ', scores[0])\n print('CLASSES: ', classes[0])\n\n if scores[0] > self.threshold:\n if classes[0] == 1:\n print('GREEN')\n return TrafficLight.GREEN\n elif classes[0] == 2:\n print('RED')\n return TrafficLight.RED\n elif classes[0] == 3:\n print('YELLOW')\n return TrafficLight.YELLOW\n\n return TrafficLight.UNKNOWN\n" ]
[ [ "numpy.squeeze", "tensorflow.gfile.GFile", "tensorflow.Graph", "numpy.expand_dims", "tensorflow.Session", "tensorflow.import_graph_def", "tensorflow.GraphDef" ] ]
brownbaerchen/pySDC
[ "31293859d731646aa09cef4345669eac65501550" ]
[ "pySDC/playgrounds/PETSc/playground_data.py" ]
[ "\nimport numpy as np\n\nfrom petsc4py import PETSc\n\n\ndef main():\n # import petsc4py\n\n\n n = 4\n dx = 1.0/(n - 1)\n dy = dx\n comm= PETSc.COMM_WORLD\n da = PETSc.DMDA().create([n, n], dof=1, stencil_width=1, comm=comm)\n dar = da.refine()\n print(dar.getSizes())\n exit()\n\n rank = PETSc.COMM_WORLD.getRank()\n # comm=\n\n x = da.createGlobalVec()\n xa = da.getVecArray(x)\n (xs, xe), (ys, ye) = da.getRanges()\n print(xs,xe,ys,ye, xa.shape)\n for i in range(xs, xe):\n for j in range(ys, ye):\n xa[i, j, 0] = np.sin(2 * np.pi * (i ) * dx) * np.sin(2 * np.pi * (j ) * dy)\n xa[i, j, 1] = 0.1 * np.sin(2 * np.pi * (i ) * dx) * np.sin(2 * np.pi * (j ) * dy)\n print('x=', rank, x.getArray())\n # print('x:', x.getSizes(), da.getRanges())\n # print()\n\n y = da.createGlobalVec()\n ya = da.getVecArray(y)\n (xs, xe), (ys, ye) = da.getRanges()\n for i in range(xs, xe):\n for j in range(ys, ye):\n ya[i, j, 0] = -2 * (2.0 * np.pi) ** 2 * np.sin(2 * np.pi * (i ) * dx) * np.sin(2 * np.pi * (j ) * dy)\n ya[i, j, 1] = -0.2 * (2.0 * np.pi) ** 2 * np.sin(2 * np.pi * (i) * dx) * np.sin(2 * np.pi * (j) * dy)\n #\n # z = da.createGlobalVec()\n # za = da.getVecArray(z)\n # (xs, xe), (ys, ye) = da.getRanges()\n # for i in range(xs, xe):\n # for j in range(ys, ye):\n # za[i, j] = 4 * (2.0 * np.pi) ** 4 * np.sin(2 * np.pi * (i + 1) * dx) * np.sin(2 * np.pi * (j + 1) * dy)\n\n\n # z = y.copy()\n # print('z=', z.getArray())\n # ya = da.getVecArray(y)\n # ya[0,0] = 10.0\n # print(y.getArray()[0], z.getArray()[0])\n\n A = da.createMatrix()\n A.setType('aij') # sparse\n A.setFromOptions()\n A.setPreallocationNNZ((5,5))\n A.setUp()\n\n A.zeroEntries()\n row = PETSc.Mat.Stencil()\n col = PETSc.Mat.Stencil()\n mx, my = da.getSizes()\n (xs, xe), (ys, ye) = da.getRanges()\n for j in range(ys, ye):\n for i in range(xs, xe):\n if (i == 0 or j == 0 or i == mx - 1 or j == my - 1):\n row.index = (i, j)\n row.field = 0\n A.setValueStencil(row, row, 1.0)\n row.field = 1\n A.setValueStencil(row, row, 1.0)\n # pass\n else:\n # u = x[i, j] # center\n diag = -2.0 / dx ** 2 - 2.0 / dy ** 2\n for index, value in [\n ((i, j - 1), 1.0 / dy ** 2),\n ((i - 1, j), 1.0 / dx ** 2),\n ((i, j), diag),\n ((i + 1, j), 1.0 / dx ** 2),\n ((i, j + 1), 1.0 / dy ** 2),\n ]:\n row.index = (i, j)\n row.field = 0\n col.index = index\n col.field = 0\n A.setValueStencil(row, col, value)\n row.field = 1\n col.field = 1\n A.setValueStencil(row, col, value)\n\n A.assemble()\n A.view()\n exit()\n\n Id = da.createMatrix()\n Id.setType('aij') # sparse\n Id.setFromOptions()\n Id.setPreallocationNNZ((5, 5))\n Id.setUp()\n\n Id.zeroEntries()\n row = PETSc.Mat.Stencil()\n col = PETSc.Mat.Stencil()\n mx, my = da.getSizes()\n (xs, xe), (ys, ye) = da.getRanges()\n for j in range(ys, ye):\n for i in range(xs, xe):\n row.index = (i, j)\n row.field = 0\n col.index = (i, j)\n col.field = 0\n Id.setValueStencil(row, row, 1.0)\n row.field = 1\n col.field = 1\n Id.setValueStencil(row, col, 1.0)\n Id.assemble()\n\n # (xs, xe), (ys, ye) = da.getRanges()\n # print(A.getValues(range(n*n), range(n*n)))\n\n res = da.createGlobalVec()\n A.mult(x, res)\n print('1st turn', rank, res.getArray())\n print((res-y).norm(PETSc.NormType.NORM_INFINITY))\n\n ksp = PETSc.KSP().create()\n ksp.setOperators(A)\n ksp.setType('cg')\n pc = ksp.getPC()\n pc.setType('mg')\n ksp.setFromOptions()\n\n x1 = da.createGlobalVec()\n ksp.solve(res, x1)\n print((x1 - x).norm(PETSc.NormType.NORM_INFINITY))\n\n x2 = da.createGlobalVec()\n Id.mult(x1, x2)\n print((x2 - x1).norm(PETSc.NormType.NORM_INFINITY))\n\n\n # # A.view()\n # res1 = da.createNaturalVec()\n # A.mult(res, res1)\n # # print('2nd turn', rank, res1.getArray())\n # da.globalToNatural(res, res1)\n # print(res1.getArray())\n # print((res1 - y).norm(PETSc.NormType.NORM_INFINITY))\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.sin" ] ]
jefflai108/espnet
[ "a51f21cb94a4dead2300a8a13adb92ffdfbafbe8" ]
[ "test/espnet2/tts/test_fastspeech2.py" ]
[ "import pytest\nimport torch\n\nfrom espnet2.tts.fastspeech2 import FastSpeech2\n\n\[email protected](\"postnet_layers\", [0, 1])\[email protected](\"reduction_factor\", [1, 2, 3])\[email protected](\n \"spk_embed_dim, spk_embed_integration_type\",\n [(None, \"add\"), (2, \"add\"), (2, \"concat\")],\n)\[email protected](\"use_gst\", [True, False])\[email protected](\n \"use_masking, use_weighted_masking\", [[True, False], [False, True]]\n)\ndef test_fastspeech2(\n postnet_layers,\n reduction_factor,\n spk_embed_dim,\n spk_embed_integration_type,\n use_gst,\n use_masking,\n use_weighted_masking,\n):\n model = FastSpeech2(\n idim=10,\n odim=5,\n adim=4,\n aheads=2,\n elayers=1,\n eunits=4,\n dlayers=1,\n dunits=4,\n postnet_layers=postnet_layers,\n postnet_chans=4,\n postnet_filts=5,\n reduction_factor=reduction_factor,\n duration_predictor_layers=2,\n duration_predictor_chans=4,\n duration_predictor_kernel_size=3,\n energy_predictor_layers=2,\n energy_predictor_chans=4,\n energy_predictor_kernel_size=3,\n energy_predictor_dropout=0.5,\n energy_embed_kernel_size=9,\n energy_embed_dropout=0.5,\n pitch_predictor_layers=2,\n pitch_predictor_chans=4,\n pitch_predictor_kernel_size=3,\n pitch_predictor_dropout=0.5,\n pitch_embed_kernel_size=9,\n pitch_embed_dropout=0.5,\n spk_embed_dim=spk_embed_dim,\n spk_embed_integration_type=spk_embed_integration_type,\n use_gst=use_gst,\n gst_tokens=2,\n gst_heads=4,\n gst_conv_layers=2,\n gst_conv_chans_list=[2, 4],\n gst_conv_kernel_size=3,\n gst_conv_stride=2,\n gst_gru_layers=1,\n gst_gru_units=4,\n use_masking=use_masking,\n use_weighted_masking=use_weighted_masking,\n )\n\n inputs = dict(\n text=torch.randint(1, 10, (2, 2)),\n text_lengths=torch.tensor([2, 1], dtype=torch.long),\n speech=torch.randn(2, 4 * reduction_factor, 5),\n speech_lengths=torch.tensor([4, 2], dtype=torch.long) * reduction_factor,\n durations=torch.tensor([[2, 2, 0], [2, 0, 0]], dtype=torch.long),\n pitch=torch.tensor([[2, 2, 0], [2, 0, 0]], dtype=torch.float).unsqueeze(-1),\n energy=torch.tensor([[2, 2, 0], [2, 0, 0]], dtype=torch.float).unsqueeze(-1),\n # NOTE(kan-bayashi): +1 for eos\n durations_lengths=torch.tensor([2 + 1, 1 + 1], dtype=torch.long),\n pitch_lengths=torch.tensor([2 + 1, 1 + 1], dtype=torch.long),\n energy_lengths=torch.tensor([2 + 1, 1 + 1], dtype=torch.long),\n )\n if spk_embed_dim is not None:\n inputs.update(spembs=torch.randn(2, spk_embed_dim))\n loss, *_ = model(**inputs)\n loss.backward()\n\n with torch.no_grad():\n model.eval()\n\n inputs = dict(\n text=torch.randint(0, 10, (2,)),\n )\n if use_gst:\n inputs.update(speech=torch.randn(5, 5))\n if spk_embed_dim is not None:\n inputs.update(spembs=torch.randn(spk_embed_dim))\n model.inference(**inputs)\n\n # teacher forcing\n inputs.update(durations=torch.tensor([2, 2, 0], dtype=torch.long))\n inputs.update(pitch=torch.tensor([2, 2, 0], dtype=torch.float).unsqueeze(-1))\n inputs.update(energy=torch.tensor([2, 2, 0], dtype=torch.float).unsqueeze(-1))\n model.inference(**inputs, use_teacher_forcing=True)\n" ]
[ [ "torch.randn", "torch.no_grad", "torch.tensor", "torch.randint" ] ]
dailai/quick-nlp
[ "d27ae90c0788d0899af7f45b323ca64ed0c33868" ]
[ "src/quicknlp/modules/attention_decoder.py" ]
[ "import warnings\nfrom typing import Optional\n\nimport torch\n\nfrom quicknlp.utils import assert_dims\nfrom .basic_decoder import EmbeddingRNNDecoder\n\n\nclass RNNAttentionDecoder(EmbeddingRNNDecoder):\n\n def __init__(self, ntoken: int, emb_sz: int, nhid: int, nlayers: int, pad_token: int, eos_token: int,\n max_tokens=10, embedding_layer: Optional[torch.nn.Module] = None, dropouth=0.3, dropouti=0.65,\n dropoute=0.1, wdrop=0.5, cell_type=\"lstm\", **kwargs):\n\n super(RNNAttentionDecoder, self).__init__(ntoken=ntoken, emb_sz=emb_sz, nhid=nhid, nlayers=nlayers,\n pad_token=pad_token, eos_token=eos_token, max_tokens=max_tokens,\n embedding_layer=embedding_layer, dropouth=dropouth, dropouti=dropouti,\n dropoute=dropoute, wdrop=wdrop, cell_type=cell_type,\n in_dim=emb_sz * 2,\n out_dim=emb_sz\n )\n\n def _train_forward(self, inputs):\n sl, bs = inputs.size()\n emb = self.encoder_with_dropout(inputs, dropout=self.dropoute if self.training else 0)\n emb = self.dropouti(emb)\n\n layer_outputs = [[] for _ in range(self.nlayers)]\n raw_layer_outputs = [[] for _ in range(self.nlayers)]\n for raw_output in emb:\n raw_output = torch.cat(\n [raw_output, self.projection_layer.get_attention_output(raw_output)],\n dim=-1).unsqueeze_(0)\n raw_output = assert_dims(raw_output, [1, bs, self.emb_sz * 2])\n raw_outputs, outputs, new_hidden = self._rnn_step(raw_output)\n for layer_index in range(self.nlayers):\n layer_outputs[layer_index].append(outputs[layer_index])\n raw_layer_outputs[layer_index].append(raw_outputs[layer_index])\n rnn_out = assert_dims(raw_outputs[-1], [1, bs, self.emb_sz])\n layer_outputs[-1][-1] = self.projection_layer(rnn_out[0])\n raw_outputs = [torch.cat(i, dim=0) for i in raw_layer_outputs]\n outputs = [torch.cat(i, dim=0) for i in layer_outputs]\n return raw_outputs, outputs\n\n def _beam_forward(self, inputs, num_beams):\n # ensure keys exist for all beams\n if self.projection_layer.keys is not None and num_beams > 0:\n self.projection_layer.keys = self.projection_layer.keys.repeat(1, num_beams, 1)\n return super(RNNAttentionDecoder, self)._beam_forward(inputs, num_beams=num_beams)\n\n def _rnn_step(self, raw_output):\n new_hidden, raw_outputs, outputs = [], [], []\n for layer_index, (rnn, drop) in enumerate(zip(self.rnns, self.dropouths)):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n raw_output, new_h = rnn(raw_output, self.hidden[layer_index])\n new_hidden.append(new_h)\n raw_outputs.append(raw_output)\n if layer_index != self.nlayers - 1:\n raw_output = drop(raw_output)\n outputs.append(raw_output)\n self.hidden = new_hidden\n return raw_outputs, outputs, new_hidden\n" ]
[ [ "torch.cat" ] ]
alvarovm/cclib
[ "18a87de7fcb15c4133e1fd21939401672438ebb7" ]
[ "src/cclib/method/opa.py" ]
[ "# -*- coding: utf-8 -*-\r\n#\r\n# Copyright (c) 2016, the cclib development team\r\n#\r\n# This file is part of cclib (http://cclib.github.io) and is distributed under\r\n# the terms of the BSD 3-Clause License.\r\n\r\n\"\"\"Calculation of overlap population analysis based on cclib data.\"\"\"\r\n\r\nimport random\r\n\r\nimport numpy\r\n\r\nfrom .calculationmethod import Method\r\n\r\n\r\ndef func(x):\r\n if x==1:\r\n return 1\r\n else:\r\n return x+func(x-1)\r\n\r\n\r\nclass OPA(Method):\r\n \"\"\"Overlap population analysis.\"\"\"\r\n \r\n def __init__(self, *args):\r\n\r\n # Call the __init__ method of the superclass.\r\n super(OPA, self).__init__(logname=\"OPA\", *args)\r\n \r\n def __str__(self):\r\n \"\"\"Return a string representation of the object.\"\"\"\r\n return \"OPA of\" % (self.data)\r\n\r\n def __repr__(self):\r\n \"\"\"Return a representation of the object.\"\"\"\r\n return 'OPA(\"%s\")' % (self.data)\r\n \r\n def calculate(self, indices=None, fupdate=0.05):\r\n \"\"\"Perform an overlap population analysis given the results of a parser\"\"\"\r\n \r\n # Do we have the needed info in the ccData object?\r\n if not hasattr(self.data, \"mocoeffs\") \\\r\n and not ( hasattr(self.data, \"aooverlaps\") \\\r\n or hasattr(self.data, \"fooverlaps\") ) \\\r\n and not hasattr(self.data, \"nbasis\"):\r\n self.logger.error(\"Missing mocoeffs, aooverlaps/fooverlaps or nbasis\")\r\n return False #let the caller of function know we didn't finish\r\n\r\n if not indices:\r\n\r\n # Build list of groups of orbitals in each atom for atomresults.\r\n if hasattr(self.data, \"aonames\"):\r\n names = self.data.aonames\r\n elif hasattr(self.data, \"foonames\"):\r\n names = self.data.fonames\r\n\r\n atoms = []\r\n indices = []\r\n\r\n name = names[0].split('_')[0]\r\n atoms.append(name)\r\n indices.append([0])\r\n\r\n for i in range(1, len(names)):\r\n name = names[i].split('_')[0]\r\n try:\r\n index = atoms.index(name)\r\n except ValueError: #not found in atom list\r\n atoms.append(name)\r\n indices.append([i])\r\n else:\r\n indices[index].append(i)\r\n\r\n # Determine number of steps, and whether process involves beta orbitals.\r\n nfrag = len(indices) #nfrag\r\n nstep = func(nfrag - 1)\r\n unrestricted = (len(self.data.mocoeffs) == 2)\r\n alpha = len(self.data.mocoeffs[0])\r\n nbasis = self.data.nbasis\r\n\r\n self.logger.info(\"Creating attribute results: array[4]\")\r\n results= [ numpy.zeros([nfrag, nfrag, alpha], \"d\") ]\r\n if unrestricted:\r\n beta = len(self.data.mocoeffs[1])\r\n results.append(numpy.zeros([nfrag, nfrag, beta], \"d\"))\r\n nstep *= 2\r\n \r\n if hasattr(self.data, \"aooverlaps\"):\r\n overlap = self.data.aooverlaps\r\n elif hasattr(self.data,\"fooverlaps\"):\r\n overlap = self.data.fooverlaps\r\n\r\n #intialize progress if available\r\n if self.progress:\r\n self.progress.initialize(nstep)\r\n\r\n size = len(self.data.mocoeffs[0])\r\n step = 0\r\n\r\n preresults = []\r\n for spin in range(len(self.data.mocoeffs)):\r\n two = numpy.array([2.0]*len(self.data.mocoeffs[spin]),\"d\")\r\n\r\n\r\n # OP_{AB,i} = \\sum_{a in A} \\sum_{b in B} 2 c_{ai} c_{bi} S_{ab}\r\n\r\n for A in range(len(indices)-1):\r\n\r\n for B in range(A+1, len(indices)):\r\n\r\n if self.progress: #usually only a handful of updates, so remove random part\r\n self.progress.update(step, \"Overlap Population Analysis\")\r\n\r\n for a in indices[A]:\r\n\r\n ca = self.data.mocoeffs[spin][:,a]\r\n\r\n for b in indices[B]:\r\n \r\n cb = self.data.mocoeffs[spin][:,b]\r\n temp = ca * cb * two *overlap[a,b]\r\n results[spin][A,B] = numpy.add(results[spin][A,B],temp)\r\n results[spin][B,A] = numpy.add(results[spin][B,A],temp)\r\n\r\n step += 1\r\n\r\n temparray2 = numpy.swapaxes(results[0],1,2)\r\n self.results = [ numpy.swapaxes(temparray2,0,1) ]\r\n if unrestricted:\r\n temparray2 = numpy.swapaxes(results[1],1,2)\r\n self.results.append(numpy.swapaxes(temparray2, 0, 1))\r\n\r\n if self.progress:\r\n self.progress.update(nstep, \"Done\")\r\n\r\n return True\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import doctest, opa\r\n doctest.testmod(opa, verbose=False)\r\n" ]
[ [ "numpy.swapaxes", "numpy.add", "numpy.zeros" ] ]
simphide/Kaggle-2020-Alaska2
[ "3c1f5e8e564c9f04423beef69244fc74168f88ca" ]
[ "blends/blend6/blend6.py" ]
[ "import pandas as pd\nimport numpy as np\nfrom scipy.stats import spearmanr\nfrom sklearn.metrics import matthews_corrcoef, ConfusionMatrixDisplay\nfrom alaska2.submissions import blend_predictions_ranked, blend_predictions_mean\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import plot_confusion_matrix\n\nv25_xl_NR_moreTTA = pd.read_csv(\"submission_v25_xl_NR_moreTTA.csv\").sort_values(by=\"Id\").reset_index()\nv25_xl_NR_moreTTA_b4mish = pd.read_csv(\"submission_v25_xl_NR_moreTTA_b4mish.csv\").sort_values(by=\"Id\").reset_index()\n\nmean_09406 = pd.read_csv(\"mean_0.9406_cls_Cf2cauc_Df1cauc_Df2cauc_Ff0cauc_Gf0cauc_Gf1cauc_Gf2cauc_Gf3cauc.csv\")\nxgb_cls_gs_09445 = pd.read_csv(\n \"xgb_cls_gs_0.9445_Cf2cauc_Df1cauc_Df2cauc_Ff0cauc_Gf0cauc_Gf1cauc_Gf2cauc_Gf3cauc_.csv\"\n)\n\n# Force 1.01 value of OOR values in my submission\noor_mask = v25_xl_NR_moreTTA.Label > 1.0\n\nmean_09406.loc[oor_mask, \"Label\"] = 1.01\nxgb_cls_gs_09445.loc[oor_mask, \"Label\"] = 1.01\n\nsubmissions = [v25_xl_NR_moreTTA, v25_xl_NR_moreTTA_b4mish, mean_09406, xgb_cls_gs_09445]\n\ncm = np.zeros((len(submissions), len(submissions)))\nfor i in range(len(submissions)):\n for j in range(len(submissions)):\n cm[i, j] = spearmanr(submissions[i].Label, submissions[j].Label).correlation\n\nprint(cm)\n\ndisp = ConfusionMatrixDisplay(\n confusion_matrix=cm,\n display_labels=[\"v25_xl_NR_moreTTA\", \"v25_xl_NR_moreTTA_b4mish\", \"mean_09406\", \"xgb_cls_gs_09445\"],\n)\nplt.figure(figsize=(8, 8))\ndisp.plot(include_values=True, cmap=\"Blues\", ax=plt.gca(), xticks_rotation=45)\nplt.show()\n\nblend_6_ranked = blend_predictions_ranked([v25_xl_NR_moreTTA_b4mish, xgb_cls_gs_09445])\nblend_6_ranked.to_csv(\"blend_6_ranked_v25_xl_NR_moreTTA_b4mish_with_xgb_cls_gs_09445.csv\", index=False)\n" ]
[ [ "sklearn.metrics.ConfusionMatrixDisplay", "pandas.read_csv", "matplotlib.pyplot.figure", "matplotlib.pyplot.gca", "matplotlib.pyplot.show", "scipy.stats.spearmanr" ] ]
ejbkdb/HackAuto
[ "5724ed5aa7e9e5fba4427dd95328f10e4a18710e" ]
[ "agents/blue_simple_agent.py" ]
[ "import numpy as np\nimport rospy\nfrom std_msgs.msg import Bool\nfrom geometry_msgs.msg import Point, Twist, Vector3\n\n# Global variables\nblue_center = Point()\nblue_flag = False\nblue_base = Point()\nred_base = Point()\nblue_twist = Twist()\ngame_over = False\naccumulated_error = 0.\nneutral_zone = False\n\n# Helper functions\ndef set_center(sphere_center):\n global blue_center\n blue_center = sphere_center\n return\n\ndef set_flag(flag_status):\n global blue_flag, neutral_zone\n # Logic for needing to go back through neutral zone\n if blue_flag != flag_status.data:\n neutral_zone = False\n blue_flag = flag_status.data\n return\n\ndef set_game_over(game_state):\n global game_over\n game_over = game_state.data\n return\n\ndef set_blue_base(base):\n global blue_base\n blue_base = base\n return\n\ndef set_red_base(base):\n global red_base\n red_base = base\n return\n\ndef yaw_vel_to_twist(yaw, vel): \n twist_msg = Twist() \n twist_msg.linear = Vector3(0, 0, 0) \n twist_msg.angular.x = np.cos(yaw) * vel \n twist_msg.angular.y = np.sin(yaw) * vel \n twist_msg.angular.z = 0 \n return twist_msg\n\ndef get_heading_and_distance():\n global blue_center, blue_flag, blue_base, red_base, neutral_zone\n if neutral_zone and blue_flag:\n # Have flag, go home\n target_x = blue_base.x\n target_y = blue_base.y\n elif not blue_flag and (neutral_zone != False):\n # Don't have flag, go to opponent's base\n target_x = red_base.x\n target_y = red_base.y\n else:\n # Haven't passed through neutral zone, go there\n target_x = (0.25 * (max(blue_base.x, red_base.x) \n - min(blue_base.x, red_base.x)) \n + min(blue_base.x, red_base.x))\n target_y = (0.25 * (max(blue_base.y, red_base.y) \n - min(blue_base.y, red_base.y)) \n + min(blue_base.y, red_base.y))\n delta_x = target_x - blue_center.x\n delta_y = target_y - blue_center.y\n print(\"[{}, {}]\".format(delta_x, delta_y))\n distance = np.sqrt(delta_x ** 2 + delta_y ** 2)\n if not neutral_zone and distance < 50:\n neutral_zone = True\n heading = np.arctan2(delta_y, delta_x)\n return heading, distance\n\n# Agent function\ndef proportional_control():\n global blue_twist, accumulated_error\n\n if blue_center != Point():\n heading, distance = get_heading_and_distance()\n heading = -heading # Switch from camera to world coordinates\n if distance < 100:\n accumulated_error = 0\n else:\n accumulated_error += distance\n speed = distance / 100. + accumulated_error / 10000.\n else:\n speed = 0\n heading = 0\n blue_twist = yaw_vel_to_twist(heading, speed)\n return\n\n# Init function\ndef simple_agent():\n global game_over\n # Setup ROS message handling\n rospy.init_node('blue_agent', anonymous=True)\n\n pub_blue_cmd = rospy.Publisher('/blue_sphero/twist_cmd', Twist, queue_size=1)\n sub_blue_center = rospy.Subscriber('/blue_sphero/center', Point, set_center, queue_size=1)\n sub_blue_flag = rospy.Subscriber('/blue_sphero/flag', Bool, set_flag, queue_size=1)\n sub_blue_base = rospy.Subscriber('/blue_sphero/base', Point, set_blue_base, queue_size=1)\n sub_red_base = rospy.Subscriber('/red_sphero/base', Point, set_red_base, queue_size=1)\n sub_game_over = rospy.Subscriber('/game_over', Bool, set_game_over, queue_size=1)\n\n # Agent control loop\n rate = rospy.Rate(2) # Hz\n while not rospy.is_shutdown():\n proportional_control()\n pub_blue_cmd.publish(blue_twist)\n if game_over != False:\n break\n rate.sleep()\n print(\"Game ended. No agent to save.\")\n return\n\nif __name__ == '__main__':\n try:\n simple_agent()\n except rospy.ROSInterruptException:\n pass\n\n" ]
[ [ "numpy.sqrt", "numpy.arctan2", "numpy.cos", "numpy.sin" ] ]
entelecheia/eKorpKit
[ "9521ae4c4749419fa2b088d1b9e518e5927b7cb8" ]
[ "ekorpkit/io/fetch/loader/esgreport.py" ]
[ "import os\nimport codecs\nimport pandas as pd\nfrom pathlib import Path\nfrom glob import glob\nfrom ekorpkit.utils.func import ordinal\n\n\nclass ESGReport:\n def __init__(self, name, output_dir, output_file, input_path, txt_info, **kwargs):\n\n self.name = name\n self.input_path = input_path\n self.txt_info = txt_info\n self.output_path = f\"{output_dir}/{output_file}\"\n os.makedirs(output_dir, exist_ok=True)\n\n self.parse_text_files()\n\n def parse_text_files(self):\n filepaths = glob(self.input_path, recursive=True)\n filepaths = [fp for fp in filepaths if Path(fp).is_file()]\n txt_info = self.txt_info\n\n initial_file_num = txt_info.get(\"initial_file_num\", 0)\n segment_separator = txt_info.get(\"segment_separator\", None)\n segment_separator = codecs.decode(segment_separator, \"unicode_escape\")\n doc_id_format = txt_info.get(\"doc_id_format\", None)\n file_num_increment = txt_info.get(\"file_num_increment\", 1)\n\n file_num = initial_file_num\n reports = []\n for i, file in enumerate(filepaths):\n file = Path(file)\n print(\" >> processing {} file: {}\".format(ordinal(i + 1), file.name))\n\n texts = file.open().read()\n for seg_num, doc in enumerate(texts.split(segment_separator)):\n doc = doc.strip()\n if len(doc) > 0:\n doc_id = doc_id_format.format(file_num=file_num, seg_num=seg_num)\n rpt = {\"doc_id\": doc_id, \"filename\": file.stem, \"text\": doc}\n reports.append(rpt)\n file_num += file_num_increment\n\n df = pd.DataFrame(reports)\n print(df.tail())\n df.to_csv(self.output_path, header=True, index=False)\n print(\n f\"Corpus [{self.name}] is built to [{self.output_path}] from [{self.input_path}]\"\n )\n" ]
[ [ "pandas.DataFrame" ] ]
RenatoTorres/Exode
[ "fd7f6f51a04a88d404dcbed34acd5b8c2f54e54a" ]
[ "Exode/UI/gardenGraph.py" ]
[ "'''\nGraph\n======\nThe :class:`Graph` widget is a widget for displaying plots. It supports\ndrawing multiple plot with different colors on the Graph. It also supports\naxes titles, ticks, labeled ticks, grids and a log or linear representation on\nboth the x and y axis, independently.\nTo display a plot. First create a graph which will function as a \"canvas\" for\nthe plots. Then create plot objects e.g. MeshLinePlot and add them to the\ngraph.\nTo create a graph with x-axis between 0-100, y-axis between -1 to 1, x and y\nlabels of and X and Y, respectively, x major and minor ticks every 25, 5 units,\nrespectively, y major ticks every 1 units, full x and y grids and with\na red line plot containing a sin wave on this range::\n from kivy.garden.graph import Graph, MeshLinePlot\n graph = Graph(xlabel='X', ylabel='Y', x_ticks_minor=5,\n x_ticks_major=25, y_ticks_major=1,\n y_grid_label=True, x_grid_label=True, padding=5,\n x_grid=True, y_grid=True, xmin=-0, xmax=100, ymin=-1, ymax=1)\n plot = MeshLinePlot(color=[1, 0, 0, 1])\n plot.points = [(x, sin(x / 10.)) for x in range(0, 101)]\n graph.add_plot(plot)\nThe MeshLinePlot plot is a particular plot which draws a set of points using\na mesh object. The points are given as a list of tuples, with each tuple\nbeing a (x, y) coordinate in the graph's units.\nYou can create different types of plots other than MeshLinePlot by inheriting\nfrom the Plot class and implementing the required functions. The Graph object\nprovides a \"canvas\" to which a Plot's instructions are added. The plot object\nis responsible for updating these instructions to show within the bounding\nbox of the graph the proper plot. The Graph notifies the Plot when it needs\nto be redrawn due to changes. See the MeshLinePlot class for how it is done.\nThe current availables plots are:\n * `MeshStemPlot`\n * `MeshLinePlot`\n * `SmoothLinePlot` - require Kivy 1.8.1\n.. note::\n The graph uses a stencil view to clip the plots to the graph display area.\n As with the stencil graphics instructions, you cannot stack more than 8\n stencil-aware widgets.\n'''\n\n__all__ = ('Graph', 'Plot', 'MeshLinePlot', 'MeshStemPlot', 'LinePlot', 'SmoothLinePlot', 'ContourPlot')\n__version__ = '0.4-dev'\n\nfrom math import radians\nfrom kivy.uix.widget import Widget\nfrom kivy.uix.label import Label\nfrom kivy.uix.stencilview import StencilView\nfrom kivy.properties import NumericProperty, BooleanProperty,\\\n BoundedNumericProperty, StringProperty, ListProperty, ObjectProperty,\\\n DictProperty, AliasProperty\nfrom kivy.clock import Clock\nfrom kivy.graphics import Mesh, Color, Rectangle\nfrom kivy.graphics import Fbo\nfrom kivy.graphics.transformation import Matrix\nfrom kivy.graphics.texture import Texture\nfrom kivy.event import EventDispatcher\nfrom kivy.lang import Builder\nfrom kivy import metrics\nfrom math import log10, floor, ceil\nfrom decimal import Decimal\ntry:\n import numpy as np\nexcept ImportError as e:\n np = None\n\n\ndef identity(x):\n return x\n\ndef exp10(x):\n return 10 ** x\n\nBuilder.load_string('''\n#:kivy 1.1.0\n<RotateLabel>:\n canvas.before:\n PushMatrix\n MatrixInstruction:\n matrix: self.transform\n canvas.after:\n PopMatrix\n''')\n\n\nclass RotateLabel(Label):\n\n transform = ObjectProperty(Matrix())\n\n\nclass Graph(Widget):\n '''Graph class, see module documentation for more information.\n '''\n\n # triggers a full reload of graphics\n _trigger = ObjectProperty(None)\n # triggers only a repositioning of objects due to size/pos updates\n _trigger_size = ObjectProperty(None)\n # triggers only a update of colors, e.g. tick_color\n _trigger_color = ObjectProperty(None)\n # holds widget with the x-axis label\n _xlabel = ObjectProperty(None)\n # holds widget with the y-axis label\n _ylabel = ObjectProperty(None)\n # holds all the x-axis tick mark labels\n _x_grid_label = ListProperty([])\n # holds all the y-axis tick mark labels\n _y_grid_label = ListProperty([])\n # the mesh drawing all the ticks/grids\n _mesh_ticks = ObjectProperty(None)\n # the mesh which draws the surrounding rectangle\n _mesh_rect = ObjectProperty(None)\n # a list of locations of major and minor ticks. The values are not\n # but is in the axis min - max range\n _ticks_majorx = ListProperty([])\n _ticks_minorx = ListProperty([])\n _ticks_majory = ListProperty([])\n _ticks_minory = ListProperty([])\n\n tick_color = ListProperty([.25, .25, .25, 1])\n '''Color of the grid/ticks, default to 1/4. grey.\n '''\n\n background_color = ListProperty([0, 0, 0, 0])\n '''Color of the background, defaults to transparent\n '''\n\n border_color = ListProperty([1, 1, 1, 1])\n '''Color of the border, defaults to white\n '''\n\n label_options = DictProperty()\n '''Label options that will be passed to `:class:`kivy.uix.Label`.\n '''\n\n _with_stencilbuffer = BooleanProperty(True)\n '''Whether :class:`Graph`'s FBO should use FrameBuffer (True) or not (False).\n\n .. warning:: This property is internal and so should be used with care. It can break\n some other graphic instructions used by the :class:`Graph`, for example you can have\n problems when drawing :class:`SmoothLinePlot` plots, so use it only when you know\n what exactly you are doing.\n\n :data:`_with_stencilbuffer` is a :class:`~kivy.properties.BooleanProperty`, defaults\n to True.'''\n\n def __init__(self, **kwargs):\n super(Graph, self).__init__(**kwargs)\n\n with self.canvas:\n self._fbo = Fbo(size=self.size, with_stencilbuffer=self._with_stencilbuffer)\n\n with self._fbo:\n self._background_color = Color(*self.background_color)\n self._background_rect = Rectangle(size=self.size)\n self._mesh_ticks_color = Color(*self.tick_color)\n self._mesh_ticks = Mesh(mode='lines')\n self._mesh_rect_color = Color(*self.border_color)\n self._mesh_rect = Mesh(mode='line_strip')\n\n with self.canvas:\n Color(1, 1, 1)\n self._fbo_rect = Rectangle(size=self.size, texture=self._fbo.texture)\n\n mesh = self._mesh_rect\n mesh.vertices = [0] * (5 * 4)\n mesh.indices = range(5)\n\n self._plot_area = StencilView()\n self.add_widget(self._plot_area)\n\n t = self._trigger = Clock.create_trigger(self._redraw_all)\n ts = self._trigger_size = Clock.create_trigger(self._redraw_size)\n tc = self._trigger_color = Clock.create_trigger(self._update_colors)\n\n self.bind(center=ts, padding=ts, precision=ts, plots=ts, x_grid=ts,\n y_grid=ts, draw_border=ts)\n self.bind(xmin=t, xmax=t, xlog=t, x_ticks_major=t, x_ticks_minor=t,\n xlabel=t, x_grid_label=t, ymin=t, ymax=t, ylog=t,\n y_ticks_major=t, y_ticks_minor=t, ylabel=t, y_grid_label=t,\n font_size=t, label_options=t)\n self.bind(tick_color=tc, background_color=tc, border_color=tc)\n self._trigger()\n\n def add_widget(self, widget):\n if widget is self._plot_area:\n canvas = self.canvas\n self.canvas = self._fbo\n super(Graph, self).add_widget(widget)\n if widget is self._plot_area:\n self.canvas = canvas\n\n def remove_widget(self, widget):\n if widget is self._plot_area:\n canvas = self.canvas\n self.canvas = self._fbo\n super(Graph, self).remove_widget(widget)\n if widget is self._plot_area:\n self.canvas = canvas\n\n def _get_ticks(self, major, minor, log, s_min, s_max):\n if major and s_max > s_min:\n if log:\n s_min = log10(s_min)\n s_max = log10(s_max)\n # count the decades in min - max. This is in actual decades,\n # not logs.\n n_decades = floor(s_max - s_min)\n # for the fractional part of the last decade, we need to\n # convert the log value, x, to 10**x but need to handle\n # differently if the last incomplete decade has a decade\n # boundary in it\n if floor(s_min + n_decades) != floor(s_max):\n n_decades += 1 - (10 ** (s_min + n_decades + 1) - 10 **\n s_max) / 10 ** floor(s_max + 1)\n else:\n n_decades += ((10 ** s_max - 10 ** (s_min + n_decades)) /\n 10 ** floor(s_max + 1))\n # this might be larger than what is needed, but we delete\n # excess later\n n_ticks_major = n_decades / float(major)\n n_ticks = int(floor(n_ticks_major * (minor if minor >=\n 1. else 1.0))) + 2\n # in decade multiples, e.g. 0.1 of the decade, the distance\n # between ticks\n decade_dist = major / float(minor if minor else 1.0)\n\n points_minor = [0] * n_ticks\n points_major = [0] * n_ticks\n k = 0 # position in points major\n k2 = 0 # position in points minor\n # because each decade is missing 0.1 of the decade, if a tick\n # falls in < min_pos skip it\n min_pos = 0.1 - 0.00001 * decade_dist\n s_min_low = floor(s_min)\n # first real tick location. value is in fractions of decades\n # from the start we have to use decimals here, otherwise\n # floating point inaccuracies results in bad values\n start_dec = ceil((10 ** Decimal(s_min - s_min_low - 1)) /\n Decimal(decade_dist)) * decade_dist\n count_min = (0 if not minor else\n floor(start_dec / decade_dist) % minor)\n start_dec += s_min_low\n count = 0 # number of ticks we currently have passed start\n while True:\n # this is the current position in decade that we are.\n # e.g. -0.9 means that we're at 0.1 of the 10**ceil(-0.9)\n # decade\n pos_dec = start_dec + decade_dist * count\n pos_dec_low = floor(pos_dec)\n diff = pos_dec - pos_dec_low\n zero = abs(diff) < 0.001 * decade_dist\n if zero:\n # the same value as pos_dec but in log scale\n pos_log = pos_dec_low\n else:\n pos_log = log10((pos_dec - pos_dec_low\n ) * 10 ** ceil(pos_dec))\n if pos_log > s_max:\n break\n count += 1\n if zero or diff >= min_pos:\n if minor and not count_min % minor:\n points_major[k] = pos_log\n k += 1\n else:\n points_minor[k2] = pos_log\n k2 += 1\n count_min += 1\n #n_ticks = len(points)\n else:\n # distance between each tick\n tick_dist = major / float(minor if minor else 1.0)\n n_ticks = int(floor((s_max - s_min) / tick_dist) + 1)\n points_major = [0] * int(floor((s_max - s_min) / float(major))\n + 1)\n points_minor = [0] * (n_ticks - len(points_major) + 1)\n k = 0 # position in points major\n k2 = 0 # position in points minor\n for m in range(0, n_ticks):\n if minor and m % minor:\n points_minor[k2] = m * tick_dist + s_min\n k2 += 1\n else:\n points_major[k] = m * tick_dist + s_min\n k += 1\n del points_major[k:]\n del points_minor[k2:]\n else:\n points_major = []\n points_minor = []\n return points_major, points_minor\n\n def _update_labels(self):\n xlabel = self._xlabel\n ylabel = self._ylabel\n x = self.x\n y = self.y\n width = self.width\n height = self.height\n padding = self.padding\n x_next = padding + x\n y_next = padding + y\n xextent = width + x\n yextent = height + y\n ymin = self.ymin\n ymax = self.ymax\n xmin = self.xmin\n precision = self.precision\n x_overlap = False\n y_overlap = False\n # set up x and y axis labels\n if xlabel:\n xlabel.text = self.xlabel\n xlabel.texture_update()\n xlabel.size = xlabel.texture_size\n xlabel.pos = int(x + width / 2. - xlabel.width / 2.), int(padding + y)\n y_next += padding + xlabel.height\n if ylabel:\n ylabel.text = self.ylabel\n ylabel.texture_update()\n ylabel.size = ylabel.texture_size\n ylabel.x = padding + x - (ylabel.width / 2. - ylabel.height / 2.)\n x_next += padding + ylabel.height\n xpoints = self._ticks_majorx\n xlabels = self._x_grid_label\n xlabel_grid = self.x_grid_label\n ylabel_grid = self.y_grid_label\n ypoints = self._ticks_majory\n ylabels = self._y_grid_label\n # now x and y tick mark labels\n if len(ylabels) and ylabel_grid:\n # horizontal size of the largest tick label, to have enough room\n funcexp = exp10 if self.ylog else identity\n funclog = log10 if self.ylog else identity\n ylabels[0].text = precision % funcexp(ypoints[0])\n ylabels[0].texture_update()\n y1 = ylabels[0].texture_size\n y_start = y_next + (padding + y1[1] if len(xlabels) and xlabel_grid\n else 0) + \\\n (padding + y1[1] if not y_next else 0)\n yextent = y + height - padding - y1[1] / 2.\n\n ymin = funclog(ymin)\n ratio = (yextent - y_start) / float(funclog(ymax) - ymin)\n y_start -= y1[1] / 2.\n y1 = y1[0]\n for k in range(len(ylabels)):\n ylabels[k].text = precision % funcexp(ypoints[k])\n ylabels[k].texture_update()\n ylabels[k].size = ylabels[k].texture_size\n y1 = max(y1, ylabels[k].texture_size[0])\n ylabels[k].pos = tuple(map(int, (x_next, y_start +\n (ypoints[k] - ymin) * ratio)))\n if len(ylabels) > 1 and ylabels[0].top > ylabels[1].y:\n y_overlap = True\n else:\n x_next += y1 + padding\n if len(xlabels) and xlabel_grid:\n funcexp = exp10 if self.xlog else identity\n funclog = log10 if self.xlog else identity\n # find the distance from the end that'll fit the last tick label\n xlabels[0].text = precision % funcexp(xpoints[-1])\n xlabels[0].texture_update()\n xextent = x + width - xlabels[0].texture_size[0] / 2. - padding\n # find the distance from the start that'll fit the first tick label\n if not x_next:\n xlabels[0].text = precision % funcexp(xpoints[0])\n xlabels[0].texture_update()\n x_next = padding + xlabels[0].texture_size[0] / 2.\n xmin = funclog(xmin)\n ratio = (xextent - x_next) / float(funclog(self.xmax) - xmin)\n right = -1\n for k in range(len(xlabels)):\n xlabels[k].text = precision % funcexp(xpoints[k])\n # update the size so we can center the labels on ticks\n xlabels[k].texture_update()\n xlabels[k].size = xlabels[k].texture_size\n xlabels[k].pos = tuple(map(int, (x_next + (xpoints[k] - xmin)\n * ratio - xlabels[k].texture_size[0] / 2., y_next)))\n if xlabels[k].x < right:\n x_overlap = True\n break\n right = xlabels[k].right\n if not x_overlap:\n y_next += padding + xlabels[0].texture_size[1]\n # now re-center the x and y axis labels\n if xlabel:\n xlabel.x = int(x_next + (xextent - x_next) / 2. - xlabel.width / 2.)\n if ylabel:\n ylabel.y = int(y_next + (yextent - y_next) / 2. - ylabel.height / 2.)\n t = Matrix().translate(ylabel.center[0], ylabel.center[1], 0)\n t = t.multiply(Matrix().rotate(-radians(270), 0, 0, 1))\n ylabel.transform = t.multiply(\n Matrix().translate(\n -int(ylabel.center_x),\n -int(ylabel.center_y),\n 0))\n if x_overlap:\n for k in range(len(xlabels)):\n xlabels[k].text = ''\n if y_overlap:\n for k in range(len(ylabels)):\n ylabels[k].text = ''\n return x_next - x, y_next - y, xextent - x, yextent - y\n\n def _update_ticks(self, size):\n # re-compute the positions of the bounding rectangle\n mesh = self._mesh_rect\n vert = mesh.vertices\n if self.draw_border:\n s0, s1, s2, s3 = size\n vert[0] = s0\n vert[1] = s1\n vert[4] = s2\n vert[5] = s1\n vert[8] = s2\n vert[9] = s3\n vert[12] = s0\n vert[13] = s3\n vert[16] = s0\n vert[17] = s1\n else:\n vert[0:18] = [0 for k in range(18)]\n mesh.vertices = vert\n # re-compute the positions of the x/y axis ticks\n mesh = self._mesh_ticks\n vert = mesh.vertices\n start = 0\n xpoints = self._ticks_majorx\n ypoints = self._ticks_majory\n xpoints2 = self._ticks_minorx\n ypoints2 = self._ticks_minory\n ylog = self.ylog\n xlog = self.xlog\n xmin = self.xmin\n xmax = self.xmax\n if xlog:\n xmin = log10(xmin)\n xmax = log10(xmax)\n ymin = self.ymin\n ymax = self.ymax\n if ylog:\n xmin = log10(ymin)\n ymax = log10(ymax)\n if len(xpoints):\n top = size[3] if self.x_grid else metrics.dp(12) + size[1]\n ratio = (size[2] - size[0]) / float(xmax - xmin)\n for k in range(start, len(xpoints) + start):\n vert[k * 8] = size[0] + (xpoints[k - start] - xmin) * ratio\n vert[k * 8 + 1] = size[1]\n vert[k * 8 + 4] = vert[k * 8]\n vert[k * 8 + 5] = top\n start += len(xpoints)\n if len(xpoints2):\n top = metrics.dp(8) + size[1]\n ratio = (size[2] - size[0]) / float(xmax - xmin)\n for k in range(start, len(xpoints2) + start):\n vert[k * 8] = size[0] + (xpoints2[k - start] - xmin) * ratio\n vert[k * 8 + 1] = size[1]\n vert[k * 8 + 4] = vert[k * 8]\n vert[k * 8 + 5] = top\n start += len(xpoints2)\n if len(ypoints):\n top = size[2] if self.y_grid else metrics.dp(12) + size[0]\n ratio = (size[3] - size[1]) / float(ymax - ymin)\n for k in range(start, len(ypoints) + start):\n vert[k * 8 + 1] = size[1] + (ypoints[k - start] - ymin) * ratio\n vert[k * 8 + 5] = vert[k * 8 + 1]\n vert[k * 8] = size[0]\n vert[k * 8 + 4] = top\n start += len(ypoints)\n if len(ypoints2):\n top = metrics.dp(8) + size[0]\n ratio = (size[3] - size[1]) / float(ymax - ymin)\n for k in range(start, len(ypoints2) + start):\n vert[k * 8 + 1] = size[1] + (ypoints2[k - start] - ymin) * ratio\n vert[k * 8 + 5] = vert[k * 8 + 1]\n vert[k * 8] = size[0]\n vert[k * 8 + 4] = top\n mesh.vertices = vert\n\n def _update_plots(self, size):\n ylog = self.ylog\n xlog = self.xlog\n xmin = self.xmin\n xmax = self.xmax\n ymin = self.ymin\n ymax = self.ymax\n for plot in self.plots:\n plot._update(xlog, xmin, xmax, ylog, ymin, ymax, size)\n\n def _update_colors(self, *args):\n self._mesh_ticks_color.rgba = tuple(self.tick_color)\n self._background_color.rgba = tuple(self.background_color)\n self._mesh_rect_color.rgba = tuple(self.border_color)\n\n def _redraw_all(self, *args):\n # add/remove all the required labels\n font_size = self.font_size\n if self.xlabel:\n if not self._xlabel:\n xlabel = Label(font_size=font_size, **self.label_options)\n self.add_widget(xlabel)\n self._xlabel = xlabel\n else:\n xlabel = self._xlabel\n if xlabel:\n self.remove_widget(xlabel)\n self._xlabel = None\n grids = self._x_grid_label\n xpoints_major, xpoints_minor = self._get_ticks(self.x_ticks_major,\n self.x_ticks_minor,\n self.xlog, self.xmin,\n self.xmax)\n self._ticks_majorx = xpoints_major\n self._ticks_minorx = xpoints_minor\n if not self.x_grid_label:\n n_labels = 0\n else:\n n_labels = len(xpoints_major)\n for k in range(n_labels, len(grids)):\n self.remove_widget(grids[k])\n del grids[n_labels:]\n grid_len = len(grids)\n grids.extend([None] * (n_labels - len(grids)))\n for k in range(grid_len, n_labels):\n grids[k] = Label(font_size=font_size, **self.label_options)\n self.add_widget(grids[k])\n\n if self.ylabel:\n if not self._ylabel:\n ylabel = RotateLabel(font_size=font_size, **self.label_options)\n self.add_widget(ylabel)\n self._ylabel = ylabel\n else:\n ylabel = self._ylabel\n if ylabel:\n self.remove_widget(ylabel)\n self._ylabel = None\n grids = self._y_grid_label\n ypoints_major, ypoints_minor = self._get_ticks(self.y_ticks_major,\n self.y_ticks_minor,\n self.ylog, self.ymin,\n self.ymax)\n self._ticks_majory = ypoints_major\n self._ticks_minory = ypoints_minor\n if not self.y_grid_label:\n n_labels = 0\n else:\n n_labels = len(ypoints_major)\n for k in range(n_labels, len(grids)):\n self.remove_widget(grids[k])\n del grids[n_labels:]\n grid_len = len(grids)\n grids.extend([None] * (n_labels - len(grids)))\n for k in range(grid_len, n_labels):\n grids[k] = Label(font_size=font_size, **self.label_options)\n self.add_widget(grids[k])\n\n mesh = self._mesh_ticks\n n_points = (len(xpoints_major) + len(xpoints_minor) +\n len(ypoints_major) + len(ypoints_minor))\n mesh.vertices = [0] * (n_points * 8)\n mesh.indices = [k for k in range(n_points * 2)]\n self._redraw_size()\n\n def _redraw_size(self, *args):\n # size a 4-tuple describing the bounding box in which we can draw\n # graphs, it's (x0, y0, x1, y1), which correspond with the bottom left\n # and top right corner locations, respectively\n size = self._update_labels()\n self._plot_area.pos = (size[0], size[1])\n self._plot_area.size = (size[2] - size[0], size[3] - size[1])\n self._fbo.size = self.size\n self._fbo_rect.texture = self._fbo.texture\n self._fbo_rect.size = self.size\n self._fbo_rect.pos = self.pos\n self._background_rect.size = self.size\n self._update_ticks(size)\n self._update_plots(size)\n\n def _clear_buffer(self, *largs):\n fbo = self._fbo\n fbo.bind()\n fbo.clear_buffer()\n fbo.release()\n\n def add_plot(self, plot):\n '''Add a new plot to this graph.\n :Parameters:\n `plot`:\n Plot to add to this graph.\n >>> graph = Graph()\n >>> plot = MeshLinePlot(mode='line_strip', color=[1, 0, 0, 1])\n >>> plot.points = [(x / 10., sin(x / 50.)) for x in range(-0, 101)]\n >>> graph.add_plot(plot)\n '''\n if plot in self.plots:\n return\n add = self._plot_area.canvas.add\n for instr in plot.get_drawings():\n add(instr)\n plot.bind(on_clear_plot=self._clear_buffer)\n self.plots.append(plot)\n\n def remove_plot(self, plot):\n '''Remove a plot from this graph.\n :Parameters:\n `plot`:\n Plot to remove from this graph.\n >>> graph = Graph()\n >>> plot = MeshLinePlot(mode='line_strip', color=[1, 0, 0, 1])\n >>> plot.points = [(x / 10., sin(x / 50.)) for x in range(-0, 101)]\n >>> graph.add_plot(plot)\n >>> graph.remove_plot(plot)\n '''\n if plot not in self.plots:\n return\n remove = self._plot_area.canvas.remove\n for instr in plot.get_drawings():\n remove(instr)\n plot.unbind(on_clear_plot=self._clear_buffer)\n self.plots.remove(plot)\n\n def collide_plot(self, x, y):\n '''Determine if the given coordinates fall inside the plot area.\n :Parameters:\n `x, y`:\n The coordinates to test (in window coords).\n '''\n adj_x, adj_y = x - self._plot_area.pos[0], y - self._plot_area.pos[1]\n return 0 <= adj_x <= self._plot_area.size[0] \\\n and 0 <= adj_y <= self._plot_area.size[1]\n\n def to_data(self, x, y):\n '''Convert window coords to data coords.\n\n :Parameters:\n `x, y`:\n The coordinates to convert (in window coords).\n '''\n adj_x = float(x - self._plot_area.pos[0])\n adj_y = float(y - self._plot_area.pos[1])\n norm_x = adj_x / self._plot_area.size[0]\n norm_y = adj_y / self._plot_area.size[1]\n if self.xlog:\n xmin, xmax = log10(self.xmin), log10(self.xmax)\n conv_x = 10.**(norm_x * (xmax - xmin) + xmin)\n else:\n conv_x = norm_x * (self.xmax - self.xmin) + self.xmin\n if self.ylog:\n ymin, ymax = log10(self.ymin), log10(self.ymax)\n conv_y = 10.**(norm_y * (ymax - ymin) + ymin)\n else:\n conv_y = norm_y * (self.ymax - self.ymin) + self.ymin\n return [conv_x, conv_y]\n\n xmin = NumericProperty(0.)\n '''The x-axis minimum value.\n If :data:`xlog` is True, xmin must be larger than zero.\n :data:`xmin` is a :class:`~kivy.properties.NumericProperty`, defaults to 0.\n '''\n\n xmax = NumericProperty(100.)\n '''The x-axis maximum value, larger than xmin.\n :data:`xmax` is a :class:`~kivy.properties.NumericProperty`, defaults to 0.\n '''\n\n xlog = BooleanProperty(False)\n '''Determines whether the x-axis should be displayed logarithmically (True)\n or linearly (False).\n :data:`xlog` is a :class:`~kivy.properties.BooleanProperty`, defaults\n to False.\n '''\n\n x_ticks_major = BoundedNumericProperty(0, min=0)\n '''Distance between major tick marks on the x-axis.\n Determines the distance between the major tick marks. Major tick marks\n start from min and re-occur at every ticks_major until :data:`xmax`.\n If :data:`xmax` doesn't overlap with a integer multiple of ticks_major,\n no tick will occur at :data:`xmax`. Zero indicates no tick marks.\n If :data:`xlog` is true, then this indicates the distance between ticks\n in multiples of current decade. E.g. if :data:`xmin` is 0.1 and\n ticks_major is 0.1, it means there will be a tick at every 10th of the\n decade, i.e. 0.1 ... 0.9, 1, 2... If it is 0.3, the ticks will occur at\n 0.1, 0.3, 0.6, 0.9, 2, 5, 8, 10. You'll notice that it went from 8 to 10\n instead of to 20, that's so that we can say 0.5 and have ticks at every\n half decade, e.g. 0.1, 0.5, 1, 5, 10, 50... Similarly, if ticks_major is\n 1.5, there will be ticks at 0.1, 5, 100, 5,000... Also notice, that there's\n always a major tick at the start. Finally, if e.g. :data:`xmin` is 0.6\n and this 0.5 there will be ticks at 0.6, 1, 5...\n :data:`x_ticks_major` is a\n :class:`~kivy.properties.BoundedNumericProperty`, defaults to 0.\n '''\n\n x_ticks_minor = BoundedNumericProperty(0, min=0)\n '''The number of sub-intervals that divide x_ticks_major.\n Determines the number of sub-intervals into which ticks_major is divided,\n if non-zero. The actual number of minor ticks between the major ticks is\n ticks_minor - 1. Only used if ticks_major is non-zero. If there's no major\n tick at xmax then the number of minor ticks after the last major\n tick will be however many ticks fit until xmax.\n If self.xlog is true, then this indicates the number of intervals the\n distance between major ticks is divided. The result is the number of\n multiples of decades between ticks. I.e. if ticks_minor is 10, then if\n ticks_major is 1, there will be ticks at 0.1, 0.2...0.9, 1, 2, 3... If\n ticks_major is 0.3, ticks will occur at 0.1, 0.12, 0.15, 0.18... Finally,\n as is common, if ticks major is 1, and ticks minor is 5, there will be\n ticks at 0.1, 0.2, 0.4... 0.8, 1, 2...\n :data:`x_ticks_minor` is a\n :class:`~kivy.properties.BoundedNumericProperty`, defaults to 0.\n '''\n\n x_grid = BooleanProperty(False)\n '''Determines whether the x-axis has tick marks or a full grid.\n If :data:`x_ticks_major` is non-zero, then if x_grid is False tick marks\n will be displayed at every major tick. If x_grid is True, instead of ticks,\n a vertical line will be displayed at every major tick.\n :data:`x_grid` is a :class:`~kivy.properties.BooleanProperty`, defaults\n to False.\n '''\n\n x_grid_label = BooleanProperty(False)\n '''Whether labels should be displayed beneath each major tick. If true,\n each major tick will have a label containing the axis value.\n :data:`x_grid_label` is a :class:`~kivy.properties.BooleanProperty`,\n defaults to False.\n '''\n\n xlabel = StringProperty('')\n '''The label for the x-axis. If not empty it is displayed in the center of\n the axis.\n :data:`xlabel` is a :class:`~kivy.properties.StringProperty`,\n defaults to ''.\n '''\n\n ymin = NumericProperty(0.)\n '''The y-axis minimum value.\n If :data:`ylog` is True, ymin must be larger than zero.\n :data:`ymin` is a :class:`~kivy.properties.NumericProperty`, defaults to 0.\n '''\n\n ymax = NumericProperty(100.)\n '''The y-axis maximum value, larger than ymin.\n :data:`ymax` is a :class:`~kivy.properties.NumericProperty`, defaults to 0.\n '''\n\n ylog = BooleanProperty(False)\n '''Determines whether the y-axis should be displayed logarithmically (True)\n or linearly (False).\n :data:`ylog` is a :class:`~kivy.properties.BooleanProperty`, defaults\n to False.\n '''\n\n y_ticks_major = BoundedNumericProperty(0, min=0)\n '''Distance between major tick marks. See :data:`x_ticks_major`.\n :data:`y_ticks_major` is a\n :class:`~kivy.properties.BoundedNumericProperty`, defaults to 0.\n '''\n\n y_ticks_minor = BoundedNumericProperty(0, min=0)\n '''The number of sub-intervals that divide ticks_major.\n See :data:`x_ticks_minor`.\n :data:`y_ticks_minor` is a\n :class:`~kivy.properties.BoundedNumericProperty`, defaults to 0.\n '''\n\n y_grid = BooleanProperty(False)\n '''Determines whether the y-axis has tick marks or a full grid. See\n :data:`x_grid`.\n :data:`y_grid` is a :class:`~kivy.properties.BooleanProperty`, defaults\n to False.\n '''\n\n y_grid_label = BooleanProperty(False)\n '''Whether labels should be displayed beneath each major tick. If true,\n each major tick will have a label containing the axis value.\n :data:`y_grid_label` is a :class:`~kivy.properties.BooleanProperty`,\n defaults to False.\n '''\n\n ylabel = StringProperty('')\n '''The label for the y-axis. If not empty it is displayed in the center of\n the axis.\n :data:`ylabel` is a :class:`~kivy.properties.StringProperty`,\n defaults to ''.\n '''\n\n padding = NumericProperty('5dp')\n '''Padding distances between the labels, axes titles and graph, as\n well between the widget and the objects near the boundaries.\n :data:`padding` is a :class:`~kivy.properties.NumericProperty`, defaults\n to 5dp.\n '''\n\n font_size = NumericProperty('15sp')\n '''Font size of the labels.\n :data:`font_size` is a :class:`~kivy.properties.NumericProperty`, defaults\n to 15sp.\n '''\n\n precision = StringProperty('%g')\n '''Determines the numerical precision of the tick mark labels. This value\n governs how the numbers are converted into string representation. Accepted\n values are those listed in Python's manual in the\n \"String Formatting Operations\" section.\n :data:`precision` is a :class:`~kivy.properties.StringProperty`, defaults\n to '%g'.\n '''\n\n draw_border = BooleanProperty(True)\n '''Whether a border is drawn around the canvas of the graph where the\n plots are displayed.\n :data:`draw_border` is a :class:`~kivy.properties.BooleanProperty`,\n defaults to True.\n '''\n\n plots = ListProperty([])\n '''Holds a list of all the plots in the graph. To add and remove plots\n from the graph use :data:`add_plot` and :data:`add_plot`. Do not add\n directly edit this list.\n :data:`plots` is a :class:`~kivy.properties.ListProperty`,\n defaults to [].\n '''\n\n\nclass Plot(EventDispatcher):\n '''Plot class, see module documentation for more information.\n :Events:\n `on_clear_plot`\n Fired before a plot updates the display and lets the fbo know that\n it should clear the old drawings.\n ..versionadded:: 0.4\n '''\n\n __events__ = ('on_clear_plot', )\n\n # most recent values of the params used to draw the plot\n params = DictProperty({'xlog': False, 'xmin': 0, 'xmax': 100,\n 'ylog': False, 'ymin': 0, 'ymax': 100,\n 'size': (0, 0, 0, 0)})\n\n color = ListProperty([1, 1, 1, 1])\n '''Color of the plot.\n '''\n\n points = ListProperty([])\n '''List of (x, y) points to be displayed in the plot.\n The elements of points are 2-tuples, (x, y). The points are displayed\n based on the mode setting.\n :data:`points` is a :class:`~kivy.properties.ListProperty`, defaults to\n [].\n '''\n\n def __init__(self, **kwargs):\n super(Plot, self).__init__(**kwargs)\n self.ask_draw = Clock.create_trigger(self.draw)\n self.bind(params=self.ask_draw, points=self.ask_draw)\n self._drawings = self.create_drawings()\n\n # this function is called by graph whenever any of the parameters\n # change. The plot should be recalculated then.\n # log, min, max indicate the axis settings.\n # size a 4-tuple describing the bounding box in which we can draw\n # graphs, it's (x0, y0, x1, y1), which correspond with the bottom left\n # and top right corner locations, respectively.\n def update(self, xlog, xmin, xmax, ylog, ymin, ymax, size):\n self.params.update({\n 'xlog': xlog, 'xmin': xmin, 'xmax': xmax, 'ylog': ylog,\n 'ymin': ymin, 'ymax': ymax, 'size': size})\n\n # returns a string which is unique and is the group name given to all the\n # instructions returned by _get_drawings. Graph uses this to remove\n # these instructions when needed.\n def get_group(self):\n return ''\n\n # returns a list of canvas instructions that will be added to the graph's\n # canvas.\n def get_drawings(self):\n if isinstance(self._drawings, (tuple, list)):\n return self._drawings\n return []\n\n # method called once to create all the canvas instructions needed for the\n # plot\n def create_drawings(self):\n pass\n\n # draw the plot according to the params. It dispatches on_clear_plot\n # so derived classes should call super before updating.\n def draw(self, *largs):\n self.dispatch('on_clear_plot')\n\n def iterate_points(self):\n '''Iterate on all the points adjusted to the graph settings\n '''\n params = self._params\n funcx = log10 if params['xlog'] else lambda x: x\n funcy = log10 if params['ylog'] else lambda x: x\n xmin = funcx(params['xmin'])\n ymin = funcy(params['ymin'])\n size = params['size']\n ratiox = (size[2] - size[0]) / float(funcx(params['xmax']) - xmin)\n ratioy = (size[3] - size[1]) / float(funcy(params['ymax']) - ymin)\n for x, y in self.points:\n yield (\n (funcx(x) - xmin) * ratiox + size[0],\n (funcy(y) - ymin) * ratioy + size[1])\n\n def on_clear_plot(self, *largs):\n pass\n\n # compatibility layer\n _update = update\n _get_drawings = get_drawings\n _params = params\n\n\nclass MeshLinePlot(Plot):\n '''MeshLinePlot class which displays a set of points similar to a mesh.\n '''\n\n def create_drawings(self):\n self._color = Color(*self.color)\n self._mesh = Mesh(mode='line_strip')\n self.bind(color=lambda instr, value: setattr(self._color, \"rgba\", value))\n return [self._color, self._mesh]\n\n def draw(self, *args):\n super(MeshLinePlot, self).draw(*args)\n points = self.points\n mesh = self._mesh\n vert = mesh.vertices\n ind = mesh.indices\n params = self._params\n funcx = log10 if params['xlog'] else lambda x: x\n funcy = log10 if params['ylog'] else lambda x: x\n xmin = funcx(params['xmin'])\n ymin = funcy(params['ymin'])\n diff = len(points) - len(vert) // 4\n size = params['size']\n ratiox = (size[2] - size[0]) / float(funcx(params['xmax']) - xmin)\n ratioy = (size[3] - size[1]) / float(funcy(params['ymax']) - ymin)\n if diff < 0:\n del vert[4 * len(points):]\n del ind[len(points):]\n elif diff > 0:\n ind.extend(range(len(ind), len(ind) + diff))\n vert.extend([0] * (diff * 4))\n for k in range(len(points)):\n vert[k * 4] = (funcx(points[k][0]) - xmin) * ratiox + size[0]\n vert[k * 4 + 1] = (funcy(points[k][1]) - ymin) * ratioy + size[1]\n mesh.vertices = vert\n\n def _set_mode(self, value):\n if hasattr(self, '_mesh'):\n self._mesh.mode = value\n mode = AliasProperty(lambda self: self._mesh.mode, _set_mode)\n '''VBO Mode used for drawing the points. Can be one of: 'points',\n 'line_strip', 'line_loop', 'lines', 'triangle_strip', 'triangle_fan'.\n See :class:`~kivy.graphics.Mesh` for more details.\n Defaults to 'line_strip'.\n '''\n\n\nclass MeshStemPlot(MeshLinePlot):\n '''MeshStemPlot uses the MeshLinePlot class to draw a stem plot. The data\n provided is graphed from origin to the data point.\n '''\n\n def draw(self, *args):\n super(MeshStemPlot, self).draw(*args)\n points = self.points\n mesh = self._mesh\n self._mesh.mode = 'lines'\n vert = mesh.vertices\n ind = mesh.indices\n params = self._params\n funcx = log10 if params['xlog'] else lambda x: x\n funcy = log10 if params['ylog'] else lambda x: x\n xmin = funcx(params['xmin'])\n ymin = funcy(params['ymin'])\n diff = len(points) * 2 - len(vert) // 4\n size = params['size']\n ratiox = (size[2] - size[0]) / float(funcx(params['xmax']) - xmin)\n ratioy = (size[3] - size[1]) / float(funcy(params['ymax']) - ymin)\n if diff < 0:\n del vert[4 * len(points):]\n del ind[len(points):]\n elif diff > 0:\n ind.extend(range(len(ind), len(ind) + diff))\n vert.extend([0] * (diff * 4))\n for k in range(len(points)):\n vert[k * 8] = (funcx(points[k][0]) - xmin) * ratiox + size[0]\n vert[k * 8 + 1] = (0 - ymin) * ratioy + size[1]\n vert[k * 8 + 4] = (funcx(points[k][0]) - xmin) * ratiox + size[0]\n vert[k * 8 + 5] = (funcy(points[k][1]) - ymin) * ratioy + size[1]\n mesh.vertices = vert\n\n\nclass LinePlot(Plot):\n '''LinePlot draws using a standard Line object.\n '''\n\n '''Args:\n line_width (float) - the width of the graph line\n '''\n def __init__(self, **kwargs):\n self._line_width = kwargs.get('line_width', 1)\n super(LinePlot, self).__init__(**kwargs)\n\n def create_drawings(self):\n from kivy.graphics import Line, RenderContext\n\n self._grc = RenderContext(\n use_parent_modelview=True,\n use_parent_projection=True)\n with self._grc:\n self._gcolor = Color(*self.color)\n self._gline = Line(points=[], cap='none', width=self._line_width, joint='round')\n\n return [self._grc]\n\n def draw(self, *args):\n super(LinePlot, self).draw(*args)\n # flatten the list\n points = []\n for x, y in self.iterate_points():\n points += [x, y]\n self._gline.points = points\n\nclass SmoothLinePlot(Plot):\n '''Smooth Plot class, see module documentation for more information.\n This plot use a specific Fragment shader for a custom anti aliasing.\n '''\n\n SMOOTH_FS = '''\n $HEADER$\n void main(void) {\n float edgewidth = 0.015625 * 64.;\n float t = texture2D(texture0, tex_coord0).r;\n float e = smoothstep(0., edgewidth, t);\n gl_FragColor = frag_color * vec4(1, 1, 1, e);\n }\n '''\n\n # XXX This gradient data is a 64x1 RGB image, and\n # values goes from 0 -> 255 -> 0.\n GRADIENT_DATA = (\n b\"\\x00\\x00\\x00\\x07\\x07\\x07\\x0f\\x0f\\x0f\\x17\\x17\\x17\\x1f\\x1f\\x1f\"\n b\"'''///777???GGGOOOWWW___gggooowww\\x7f\\x7f\\x7f\\x87\\x87\\x87\"\n b\"\\x8f\\x8f\\x8f\\x97\\x97\\x97\\x9f\\x9f\\x9f\\xa7\\xa7\\xa7\\xaf\\xaf\\xaf\"\n b\"\\xb7\\xb7\\xb7\\xbf\\xbf\\xbf\\xc7\\xc7\\xc7\\xcf\\xcf\\xcf\\xd7\\xd7\\xd7\"\n b\"\\xdf\\xdf\\xdf\\xe7\\xe7\\xe7\\xef\\xef\\xef\\xf7\\xf7\\xf7\\xff\\xff\\xff\"\n b\"\\xf6\\xf6\\xf6\\xee\\xee\\xee\\xe6\\xe6\\xe6\\xde\\xde\\xde\\xd5\\xd5\\xd5\"\n b\"\\xcd\\xcd\\xcd\\xc5\\xc5\\xc5\\xbd\\xbd\\xbd\\xb4\\xb4\\xb4\\xac\\xac\\xac\"\n b\"\\xa4\\xa4\\xa4\\x9c\\x9c\\x9c\\x94\\x94\\x94\\x8b\\x8b\\x8b\\x83\\x83\\x83\"\n b\"{{{sssjjjbbbZZZRRRJJJAAA999111))) \\x18\\x18\\x18\\x10\\x10\\x10\"\n b\"\\x08\\x08\\x08\\x00\\x00\\x00\")\n\n def create_drawings(self):\n from kivy.graphics import Line, RenderContext\n\n # very first time, create a texture for the shader\n if not hasattr(SmoothLinePlot, '_texture'):\n tex = Texture.create(size=(1, 64), colorfmt='rgb')\n tex.add_reload_observer(SmoothLinePlot._smooth_reload_observer)\n SmoothLinePlot._texture = tex\n SmoothLinePlot._smooth_reload_observer(tex)\n\n self._grc = RenderContext(fs=SmoothLinePlot.SMOOTH_FS,\n use_parent_modelview=True,\n use_parent_projection=True)\n with self._grc:\n self._gcolor = Color(*self.color)\n self._gline = Line(points=[], cap='none', width=2.,\n texture=SmoothLinePlot._texture)\n\n return [self._grc]\n\n @staticmethod\n def _smooth_reload_observer(texture):\n texture.blit_buffer(SmoothLinePlot.GRADIENT_DATA, colorfmt=\"rgb\")\n\n def draw(self, *args):\n super(SmoothLinePlot, self).draw(*args)\n # flatten the list\n points = []\n for x, y in self.iterate_points():\n points += [x, y]\n self._gline.points = points\n\n\nclass ContourPlot(Plot):\n \"\"\"\n ContourPlot visualizes 3 dimensional data as an intensity map image.\n The user must first specify 'xrange' and 'yrange' (tuples of min,max) and then 'data', the intensity values.\n X and Y values are assumed to be linearly spaced values from xrange/yrange and the dimensions of 'data'.\n The color values are automatically scaled to the min and max z range of the data set.\n \"\"\"\n _image = ObjectProperty(None)\n data = ObjectProperty(None)\n xrange = ListProperty([0, 100])\n yrange = ListProperty([0, 100])\n\n def __init__(self, **kwargs):\n super(ContourPlot, self).__init__(**kwargs)\n self.bind(data=self.ask_draw, xrange=self.ask_draw, yrange=self.ask_draw)\n\n def create_drawings(self):\n self._image = Rectangle()\n self._color = Color([1, 1, 1, 1])\n self.bind(color=lambda instr, value: setattr(self._color, 'rgba', value))\n return [self._color, self._image]\n\n def draw(self, *args):\n super(ContourPlot, self).draw(*args)\n data = self.data\n xdim, ydim = data.shape\n\n # Find the minimum and maximum z values\n zmax = data.max()\n zmin = data.min()\n rgb_scale_factor = 1.0 / (zmax - zmin) * 255\n # Scale the z values into RGB data\n buf = np.array(data, dtype=float, copy=True)\n np.subtract(buf, zmin, out=buf)\n np.multiply(buf, rgb_scale_factor, out=buf)\n # Duplicate into 3 dimensions (RGB) and convert to byte array\n buf = np.asarray(buf, dtype=np.uint8)\n buf = np.expand_dims(buf, axis=2)\n buf = np.concatenate((buf, buf, buf), axis=2)\n buf = np.reshape(buf, (xdim, ydim, 3))\n\n charbuf = bytearray(np.reshape(buf, (buf.size)))\n self._texture = Texture.create(size=(xdim, ydim), colorfmt='rgb')\n self._texture.blit_buffer(charbuf, colorfmt='rgb', bufferfmt='ubyte')\n image = self._image\n image.texture = self._texture\n\n params = self._params\n funcx = log10 if params['xlog'] else lambda x: x\n funcy = log10 if params['ylog'] else lambda x: x\n xmin = funcx(params['xmin'])\n ymin = funcy(params['ymin'])\n size = params['size']\n ratiox = (size[2] - size[0]) / float(funcx(params['xmax']) - xmin)\n ratioy = (size[3] - size[1]) / float(funcy(params['ymax']) - ymin)\n\n bl = (funcx(self.xrange[0]) - xmin) * ratiox + size[0], (funcy(self.yrange[0]) - ymin) * ratioy + size[1]\n tr = (funcx(self.xrange[1]) - xmin) * ratiox + size[0], (funcy(self.yrange[1]) - ymin) * ratioy + size[1]\n image.pos = bl\n w = tr[0] - bl[0]\n h = tr[1] - bl[1]\n image.size = (w, h)\n\n\nif __name__ == '__main__':\n import itertools\n from math import sin, cos, pi\n from kivy.utils import get_color_from_hex as rgb\n from kivy.uix.boxlayout import BoxLayout\n from kivy.app import App\n\n class TestApp(App):\n\n def build(self):\n b = BoxLayout(orientation='vertical')\n # example of a custom theme\n colors = itertools.cycle([\n rgb('7dac9f'), rgb('dc7062'), rgb('66a8d4'), rgb('e5b060')])\n graph_theme = {\n 'label_options': {\n 'color': rgb('444444'), # color of tick labels and titles\n 'bold': True},\n 'background_color': rgb('f8f8f2'), # back ground color of canvas\n 'tick_color': rgb('808080'), # ticks and grid\n 'border_color': rgb('808080')} # border drawn around each graph\n\n graph = Graph(\n xlabel='Cheese',\n ylabel='Apples',\n x_ticks_minor=5,\n x_ticks_major=25,\n y_ticks_major=1,\n y_grid_label=True,\n x_grid_label=True,\n padding=5,\n xlog=False,\n ylog=False,\n x_grid=True,\n y_grid=True,\n xmin=-50,\n xmax=50,\n ymin=-1,\n ymax=1,\n **graph_theme)\n\n plot = SmoothLinePlot(color=next(colors))\n plot.points = [(x / 10., sin(x / 50.)) for x in range(-500, 501)]\n # for efficiency, the x range matches xmin, xmax\n graph.add_plot(plot)\n\n plot = MeshLinePlot(color=next(colors))\n plot.points = [(x / 10., cos(x / 50.)) for x in range(-500, 501)]\n graph.add_plot(plot)\n self.plot = plot # this is the moving graph, so keep a reference\n\n plot = MeshStemPlot(color=next(colors))\n graph.add_plot(plot)\n plot.points = [(x, x / 50.) for x in range(-50, 51)]\n\n Clock.schedule_interval(self.update_points, 1 / 60.)\n\n graph2 = Graph(\n xlabel='Position (m)',\n ylabel='Time (s)',\n x_ticks_minor=0,\n x_ticks_major=1,\n y_ticks_major=10,\n y_grid_label=True,\n x_grid_label=True,\n padding=5,\n xlog=False,\n ylog=False,\n xmin=0,\n ymin=0,\n **graph_theme)\n b.add_widget(graph)\n\n if np is not None:\n (xbounds, ybounds, data) = self.make_contour_data()\n # This is required to fit the graph to the data extents\n graph2.xmin, graph2.xmax = xbounds\n graph2.ymin, graph2.ymax = ybounds\n\n plot = ContourPlot()\n plot.data = data\n plot.xrange = xbounds\n plot.yrange = ybounds\n plot.color = [1, 0.7, 0.2, 1]\n graph2.add_plot(plot)\n\n b.add_widget(graph2)\n self.contourplot = plot\n\n Clock.schedule_interval(self.update_contour, 1 / 60.)\n\n return b\n\n def make_contour_data(self, ts=0):\n omega = 2 * pi / 30\n k = (2 * pi) / 2.0\n\n ts = sin(ts * 2) + 1.5 # emperically determined 'pretty' values\n npoints = 100\n data = np.ones((npoints, npoints))\n\n position = [ii * 0.1 for ii in range(npoints)]\n time = [(ii % 100) * 0.6 for ii in range(npoints)]\n\n for ii, t in enumerate(time):\n for jj, x in enumerate(position):\n data[ii, jj] = sin(k * x + omega * t) + sin(-k * x + omega * t) / ts\n return ((0, max(position)), (0, max(time)), data)\n\n def update_points(self, *args):\n self.plot.points = [(x / 10., cos(Clock.get_time() + x / 50.)) for x in range(-500, 501)]\n\n def update_contour(self, *args):\n _, _, self.contourplot.data[:] = self.make_contour_data(Clock.get_time())\n # this does not trigger an update, because we replace the\n # values of the arry and do not change the object.\n # However, we cannot do \"...data = make_contour_data()\" as\n # kivy will try to check for the identity of the new and\n # old values. In numpy, 'nd1 == nd2' leads to an error\n # (you have to use np.all). Ideally, property should be patched\n # for this.\n self.contourplot.ask_draw()\n\n TestApp().run()\n" ]
[ [ "numpy.ones", "numpy.multiply", "numpy.subtract", "numpy.reshape", "numpy.asarray", "numpy.expand_dims", "numpy.array", "numpy.concatenate" ] ]
lishuangshuang0616/DNBelab_C4_scRNA_snakemake
[ "a705f537c7f2981b42ff6ebeb26a867ba41ea985" ]
[ "scripts/generate_cellline.py" ]
[ "import pandas as pd\nimport os\nimport argparse\nimport re\n\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n pass\n\n try:\n import unicodedata\n unicodedata.numeric(s)\n return True\n except (TypeError, ValueError):\n pass\n\n return False\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--outPath', type=str, help=\n\t'''input the outpath''',)\n parser.add_argument('--htmlTemplate', type=str, help=\n\t'''input the html template''',)\n parser.add_argument('--ID', type=str, help=\n '''input the ID''',)\n args = parser.parse_args()\n return [args.outPath, args.htmlTemplate, args.ID,]\n \ndef get_args_from_file():\n path=get_args()[0]\n csv = [path+'/07.report/1.cell_report.csv',\\\n path+'/07.report/3.cDNA_sequencing_report.csv',\\\n path+'/07.report/3.Index_sequencing_report_T1.csv',\\\n path+'/07.report/4.alignment_report.csv',\\\n path+'/07.report/5.anno_report.csv',\\\n ]\n \n stat = dict()\n for i in range(len(csv)):\n if i==0:\n df = pd.read_csv(open(csv[i]),encoding=\"utf_8\",dtype=str,header=None,sep=\",\")\n stat['samplename'] = df[1][0]\n stat['species'] = df[1][1]\n stat['estm_Num_cell'] = df[1][2]\n stat['mean_r_per_c'] = df[1][3]\n stat['mean_UMI_per_c'] = df[1][4]\n stat['median_UMI_per_c'] = df[1][5]\n stat['mean_genes_per_c'] = df[1][6]\n stat['median_genes_per_c'] = df[1][7]\n stat['Human_mean_genes'] = df[1][8]\n stat['Mouse_mean_genes'] = df[1][9] \n stat['Human_mean_UMIs'] = df[1][10] \n stat['Mouse_mean_UMIs'] = df[1][11]\n stat['Human_ratio'] = df[1][12]\n stat['Mouse_ratio'] = df[1][13]\n stat['Multi_ratio'] = df[1][14]\n if i==1:\n df = pd.read_csv(open(csv[i]),encoding=\"utf_8\",dtype=str,header=None,sep=\",\")\n stat['cDNA_num_frag'] = df[1][0]\n stat['cDNA_frag_pass_QC'] = df[1][1]\n stat['cDNA_frag_exact_bar'] = df[1][2]\n stat['cDNA_frag_fail_bar'] = df[1][3]\n stat['cDNA_frag_low_qual'] = df[1][4]\n stat['cDNA_frag_unknow_bar'] = df[1][5]\n stat['cDNA_Q30_c_bar'] = df[1][6]\n stat['cDNA_Q30_s_bar'] = df[1][7]\n stat['cDNA_Q30_UMI'] = df[1][8]\n stat['cDNA_Q30_r'] = df[1][9]\n if i==2:\n df = pd.read_csv(open(csv[i]),encoding=\"utf_8\",dtype=str,header=None,sep=\",\")\n stat['index_num_frag'] = df[1][0]\n stat['index_frag_pass_QC'] = df[1][1]\n stat['index_frag_exact_bar'] = df[1][2]\n stat['index_frag_fail_bar'] = df[1][3]\n stat['index_frag_low_qual'] = df[1][4]\n stat['index_frag_unknow_bar'] = df[1][5]\n stat['index_Q30_c_bar'] = df[1][6]\n stat['index_Q30_s_bar'] = df[1][7]\n stat['index_Q30_UMI'] = df[1][8]\n stat['index_Q30_r'] = df[1][9]\n if i==3:\n df = pd.read_csv(open(csv[i]),encoding=\"utf_8\",dtype=str,header=None,sep=\",\") \n stat['raw_r'] = df[1][0]\n stat['map_r'] = df[1][1]\n stat['plus_strd'] = df[1][2]\n stat['minus_strd'] = df[1][3]\n stat['mito_ratio'] = df[1][4]\n stat['map_qual_corrt_r'] = df[1][5] \n if i==4:\n df = pd.read_csv(open(csv[i]),encoding=\"utf_8\",dtype=str,header=None,sep=\",\")\n stat['r_m_geno'] = df[1][0]\n stat['r_m_ex'] = df[1][1]\n stat['r_m_intro'] = df[1][2]\n stat['r_m_ex_intro'] = df[1][3]\n stat['r_m_anti'] = df[1][4]\n stat['r_m_inter'] = df[1][5]\n stat['r_m_gene_fail'] = df[1][6]\n plot_file = [\n path+'/07.report/div/barcode_rank.div',\\\n path+'/07.report/div/cluster_chsize.div',\\\n path+'/07.report/base64/6.base64',\\\n path+'/07.report/base64/7.base64',\\\n #path+'/07.report/base64/8.base64',\\\n \n]\n plot_base64 = []\n plot_base64.append(open(path+'/07.report/div/barcode_rank.div',\"r\").read())\n# plot_base64.append(open(path+'/07.report/div/cluster_chsize.div',\"r\").read())\n plot_base64.append(open(path+'/07.report/base64/6.base64',\"r\").read())\n plot_base64.append(open(path+'/07.report/base64/7.base64',\"r\").read())\n #plot_base64.append(open(path+'/07.report/base64/8.base64',\"r\").read())\n# plot_base64.append(open(path+'/07.report/div/nUMI_chsize.div',\"r\").read())\n plot_order = ['plot1','plot3','plot4']\n plot_dict = dict(zip(plot_order, plot_base64))\n \n import locale\n locale.setlocale(locale.LC_ALL, 'en_US')\n for k,v in stat.items():\n if is_number(v):\n stat[k] =locale.format_string(\"%d\", int(v), grouping=True)\n else:\n continue\n return stat, plot_dict\n \ndef write_param_to_template():\n stat, plot_dict= get_args_from_file()\n template = open(get_args()[1]).read()\n ID = get_args()[2]\n from string import Template\n\n html=Template(template)\n report=html.safe_substitute(sample_info=ID, samplename=stat['samplename'],\\\n species=stat['species'], median_UMI_per_c=stat['median_UMI_per_c'],\\\n estm_Num_cell=stat['estm_Num_cell'],sample_id=stat['estm_Num_cell'],\\\n # total_gene=stat['total_gene'],\\\n # cluster_cell=stat['cluster_cell'],\\\n cDNA_num_frag=stat['cDNA_num_frag'],\\\n cDNA_frag_pass_QC=stat['cDNA_frag_pass_QC'],cDNA_frag_exact_bar=stat['cDNA_frag_exact_bar'],\\\n cDNA_frag_fail_bar=stat['cDNA_frag_fail_bar'],\\\n cDNA_frag_low_qual=stat['cDNA_frag_low_qual'],\\\n cDNA_frag_unknow_bar = stat['cDNA_frag_unknow_bar'],\\\n cDNA_Q30_c_bar=stat['cDNA_Q30_c_bar'],cDNA_Q30_s_bar=stat['cDNA_Q30_s_bar'],\\\n cDNA_Q30_UMI=stat['cDNA_Q30_UMI'],\\\n cDNA_Q30_r=stat['cDNA_Q30_r'],\\\n index_num_frag=stat['index_num_frag'],\\\n index_frag_pass_QC=stat['index_frag_pass_QC'],index_frag_exact_bar=stat['index_frag_exact_bar'],\\\n index_frag_fail_bar=stat['index_frag_fail_bar'],\\\n index_frag_low_qual=stat['index_frag_low_qual'],\\\n index_frag_unknow_bar = stat['index_frag_unknow_bar'],\\\n index_Q30_c_bar=stat['index_Q30_c_bar'],index_Q30_s_bar=stat['index_Q30_s_bar'],\\\n index_Q30_UMI=stat['index_Q30_UMI'],\\\n index_Q30_r=stat['index_Q30_r'],\\\n raw_r=stat['raw_r'],\\\n map_r=stat['map_r'],plus_strd=stat['plus_strd'],\\\n minus_strd=stat['minus_strd'],\\\n mito_ratio = stat['mito_ratio'],\\\n map_qual_corrt_r=stat['map_qual_corrt_r'],plot1=plot_dict['plot1'],\\\n plot3=plot_dict['plot3'],\\\n plot4=plot_dict['plot4'],r_m_geno=stat['r_m_geno'], \n r_m_ex=stat['r_m_ex'], \n r_m_intro=stat['r_m_intro'],\n r_m_ex_intro=stat['r_m_ex_intro'],\n r_m_anti=stat['r_m_anti'],\n r_m_inter=stat['r_m_inter'],\n r_m_gene_fail=stat['r_m_gene_fail'],ID=ID,\n mean_r_per_c=stat['mean_r_per_c'],\n mean_UMI_per_c=stat['mean_UMI_per_c'],\n mean_genes_per_c=stat['mean_genes_per_c'],\n median_genes_per_c=stat['median_genes_per_c'],\n #plot6=plot_dict['plot6']\n\t \n #mean_genes_per_c=stat['mean_genes_per_c'],\n #median_genes_per_c=stat['median_genes_per_c'],\n Human_mean_genes=stat['Human_mean_genes'],\n Mouse_mean_genes=stat['Mouse_mean_genes'], \n Human_mean_UMIs=stat['Human_mean_UMIs'], \n Mouse_mean_UMIs=stat['Mouse_mean_UMIs'],\n )\n metrics_df = pd.DataFrame([stat])\n cols = [\"samplename\",\"species\",\"estm_Num_cell\",\"mean_r_per_c\",\"mean_UMI_per_c\",\"mean_genes_per_c\",\\\n \"median_UMI_per_c\",\"median_genes_per_c\",\"Human_mean_genes\",\"Mouse_mean_genes\",\"Human_mean_UMIs\",\"Mouse_mean_UMIs\",\\\n \"Human_ratio\",\"Mouse_ratio\",\"Multi_ratio\",\"cDNA_num_frag\",\"cDNA_frag_pass_QC\",\"cDNA_Q30_r\",\"index_num_frag\",\\\n \"index_frag_pass_QC\",\"index_Q30_r\",\"map_r\",\"mito_ratio\",\"r_m_geno\",\"r_m_ex\",\"r_m_intro\",\\\n \"r_m_ex_intro\",\"r_m_anti\",\"r_m_inter\",\"r_m_gene_fail\"]\n metrics_summary_df = metrics_df[cols]\n metrics_summary_df.columns =[\"SampleName\",\"species\",\"Estimated number of cell\",\"Mean reads per cell\",\"Mean UMI count per cell\",\\\n \"Mean genes per cell\",\"Median UMI counts per cell\",\"Median genes per cell\",\"Human_mean_genes\",\"Mouse_mean_genes\",\\\n \"Human_mean_UMIs\",\"Mouse_mean_UMIs\",\"Human_ratio\",\"Mouse_ratio\",\"Multi_ratio\",\\\n \"cDNA Number of reads\",\"cDNA Reads pass QC\",\"cDNA Q30 bases in reads\",\"index Number of reads\",\"index Reads pass QC\",\\\n \"index Q30 bases in reads\",\"Mapped reads\",\"Mitochondria ratio\",\"Reads mapped to genome (Map Quality >= 0)\",\"Reads mapped to exonic regions\",\\\n \"Reads mapped to intronic regions\",\"Reads mapped to both exonic and intronic regions\",\"Reads mapped antisense to gene\",\\\n \"Reads mapped to intergenic regions\",\"Reads mapped to gene but failed to interpret type\"]\n return report,metrics_summary_df\n \nif __name__ == '__main__':\n outpath=get_args()[0]\n ID = get_args()[2]\n #get_args_from_file()\n report,metrics_summary_df=write_param_to_template()\n fw = open(outpath+'/07.report/html/'+ID+'_CDCPv2_scRNA_report.html','w')\n fw.write(report)\n file_df = outpath+'/07.report/8.metrics_summary.xls'\n metrics_summary_df.to_csv(file_df,sep='\\t',index=0)\n" ]
[ [ "pandas.DataFrame" ] ]
JihoChoi/dynamic-gcn-deprecated-TBU
[ "ff315206811b757f2f61f0776917e6a7d43c9379" ]
[ "baselines/bigcn/model/Twitter/BiGCN_Twitter-Attn-0325.py" ]
[ "import sys,os\nsys.path.append(os.getcwd())\n# from Process.process import *\nfrom Process.process import *\nimport torch as th\nfrom torch_scatter import scatter_mean\nimport torch.nn.functional as F\nimport numpy as np\nfrom tools.earlystopping import EarlyStopping\nfrom torch_geometric.data import DataLoader\nfrom tqdm import tqdm\nfrom Process.rand5fold import *\nfrom tools.evaluate import *\nfrom torch_geometric.nn import GCNConv\nimport copy\n\n\nclass TDRumorGCN(th.nn.Module):\n def __init__(self, in_feats, hid_feats, out_feats):\n super(TDRumorGCN, self).__init__()\n self.conv1 = GCNConv(in_feats, hid_feats)\n # self.conv2 = GCNConv(hid_feats+in_feats, out_feats)\n self.conv2 = GCNConv(hid_feats, out_feats)\n\n def forward(self, data):\n x, edge_index = data.x, data.edge_index\n\n # print(\"==========================\")\n # print(x.shape, edge_index.shape)\n # print(\"==========================\")\n \"\"\"\n x1 = copy.copy(x.float())\n x = self.conv1(x, edge_index)\n x2 = copy.copy(x)\n # TODO: root feature concat\n rootindex = data.rootindex\n root_extend = th.zeros(len(data.batch), x1.size(1)).to(device)\n batch_size = max(data.batch) + 1\n for num_batch in range(batch_size):\n index = (th.eq(data.batch, num_batch))\n root_extend[index] = x1[rootindex[num_batch]]\n x = th.cat((x, root_extend), 1)\n\n x = F.relu(x)\n x = F.dropout(x, training=self.training)\n x = self.conv2(x, edge_index)\n x = F.relu(x)\n root_extend = th.zeros(len(data.batch), x2.size(1)).to(device)\n for num_batch in range(batch_size):\n index = (th.eq(data.batch, num_batch))\n root_extend[index] = x2[rootindex[num_batch]]\n x = th.cat((x, root_extend), 1)\n x = scatter_mean(x, data.batch, dim=0)\n \"\"\"\n\n x, edge_index = data.x, data.BU_edge_index\n\n # print(\"TEST\")\n # print(x.shape, edge_index.shape)\n\n x = self.conv1(x, edge_index)\n # print(\"after conv1:\", x.shape)\n\n x = F.relu(x)\n x = F.dropout(x, training=self.training)\n x = self.conv2(x, edge_index)\n # print(\"after conv2:\", x.shape)\n\n # exit()\n\n x = F.relu(x)\n x = scatter_mean(x, data.batch, dim=0)\n\n return x\n\n\nclass BURumorGCN(th.nn.Module):\n def __init__(self, in_feats, hid_feats, out_feats):\n super(BURumorGCN, self).__init__()\n self.conv1 = GCNConv(in_feats, hid_feats)\n # self.conv2 = GCNConv(hid_feats+in_feats, out_feats)\n self.conv2 = GCNConv(hid_feats, out_feats)\n\n def forward(self, data):\n\n # print(\"---------------------------------------\")\n # print(data.x.shape, data.BU_edge_index.shape)\n # print(\"---------------------------------------\")\n\n\n \"\"\"\n x, edge_index = data.x, data.BU_edge_index\n x1 = copy.copy(x.float())\n x = self.conv1(x, edge_index)\n x2 = copy.copy(x)\n\n rootindex = data.rootindex\n root_extend = th.zeros(len(data.batch), x1.size(1)).to(device)\n batch_size = max(data.batch) + 1\n for num_batch in range(batch_size):\n index = (th.eq(data.batch, num_batch))\n root_extend[index] = x1[rootindex[num_batch]]\n x = th.cat((x, root_extend), 1)\n\n x = F.relu(x)\n x = F.dropout(x, training=self.training)\n x = self.conv2(x, edge_index)\n x = F.relu(x)\n root_extend = th.zeros(len(data.batch), x2.size(1)).to(device)\n for num_batch in range(batch_size):\n index = (th.eq(data.batch, num_batch))\n root_extend[index] = x2[rootindex[num_batch]]\n x = th.cat((x, root_extend), 1)\n\n x = scatter_mean(x, data.batch, dim=0)\n \"\"\"\n\n x, edge_index = data.x, data.BU_edge_index\n\n x1 = copy.copy(x.float())\n rootindex = data.rootindex\n root_extend = th.zeros(len(data.batch), x1.size(1)).to(device)\n batch_size = max(data.batch) + 1\n for num_batch in range(batch_size):\n index = (th.eq(data.batch, num_batch))\n root_extend[index] = x1[rootindex[num_batch]]\n # x = th.cat((x, root_extend), 1)\n\n\n x = self.conv1(x, edge_index)\n x = F.relu(x)\n x = F.dropout(x, training=self.training)\n x = self.conv2(x, edge_index)\n x = F.relu(x)\n # x = scatter_mean(x, data.batch, dim=0)\n # print(x.shape, root_extend.shape)\n\n # HERE\n # x = th.cat((x, root_extend), 1)\n x = scatter_mean(x, data.batch, dim=0) # GCN - mean\n\n\n root_extend = scatter_mean(root_extend, data.batch, dim=0)\n # print(x.shape, root_extend.shape)\n\n return x, root_extend\n\n\nclass Net(th.nn.Module):\n def __init__(self, in_feats, hid_feats, out_feats): # 5000, 64, 64\n super(Net, self).__init__()\n self.TDRumorGCN = TDRumorGCN(in_feats, hid_feats, out_feats)\n self.BURumorGCN = BURumorGCN(in_feats, hid_feats, out_feats)\n\n\n print(\"Out / hidden:\", out_feats, hid_feats)\n\n # self.multihead_attn = th.nn.MultiheadAttention(hid_feats, 6)\n\n\n # TODO: Attention layer\n # self.fc1 = th.nn.Linear((out_feats+hid_feats)*2 * 5, 64)\n\n self.W_s1 = th.nn.Linear(out_feats * 2, 128)\n self.W_s2 = th.nn.Linear(128, 128)\n\n\n # self.multihead_attn = th.nn.MultiheadAttention(65, 5)\n\n # self.fc1 = th.nn.Linear((out_feats + hid_feats)*2 * 5, 64)\n self.fc1 = th.nn.Linear((out_feats) *2 * 5 + in_feats, 64)\n # self.fc1 = th.nn.Linear((out_feats) *2 * 10 + in_feats, 64)\n\n\n # self.relu = th.nn.ReLU()\n self.relu = th.nn.LeakyReLU()\n self.fc2 = th.nn.Linear(64, 4)\n\n\n\n def attention_net(self, snapshot_output):\n # B * D\n \"\"\"\n https://github.com/prakashpandey9/Text-Classification-Pytorch/blob/master/models/selfAttention.py\n \"\"\"\n attn_weight_matrix = self.W_s2(th.tanh(self.W_s1(snapshot_output)))\n print(\"here1\", attn_weight_matrix.shape)\n\n # attn_weight_matrix = attn_weight_matrix.unsqueeze(1) # 128 1 64 --> 128 1 64\n print(\"here2\", attn_weight_matrix.shape)\n\n # ttn_weight_matrix = attn_weight_matrix.permute(0, 2, 1)\n # ttn_weight_matrix = attn_weight_matrix.permute(1, 0, 2)\n print(\"here3\", attn_weight_matrix.shape)\n\n attn_weight_matrix = F.softmax(attn_weight_matrix, dim=0) # TODO\n\n attn_weight_matrix = attn_weight_matrix.unsqueeze(1) # 128 64*2 --> 128 64 1\n print(\"there4\", attn_weight_matrix.shape)\n\n return attn_weight_matrix\n\n\n # def forward(self, data):\n def forward(self, s0, s1, s2, s3, s4):\n # def forward(self, s0, s1, s2, s3, s4, s5, s6, s7, s8, s9):\n\n # TODO: data to temporal adjacency network, fix\n\n TD_x0 = self.TDRumorGCN(s0)\n BU_x0, _ = self.BURumorGCN(s0)\n TD_x1 = self.TDRumorGCN(s1)\n BU_x1, _ = self.BURumorGCN(s1)\n TD_x2 = self.TDRumorGCN(s2)\n BU_x2, _ = self.BURumorGCN(s2)\n TD_x3 = self.TDRumorGCN(s3)\n BU_x3, _ = self.BURumorGCN(s3)\n TD_x4 = self.TDRumorGCN(s4)\n BU_x4, root_extend = self.BURumorGCN(s4)\n\n # TD_x5 = self.TDRumorGCN(s5)\n # BU_x5, _ = self.BURumorGCN(s5)\n # TD_x6 = self.TDRumorGCN(s6)\n # BU_x6, _ = self.BURumorGCN(s6)\n # TD_x7 = self.TDRumorGCN(s7)\n # BU_x7, _ = self.BURumorGCN(s7)\n # TD_x8 = self.TDRumorGCN(s8)\n # BU_x8, _ = self.BURumorGCN(s8)\n # TD_x9 = self.TDRumorGCN(s9)\n # BU_x9, _ = self.BURumorGCN(s9)\n\n\n # x = th.cat((BU_x, TD_x), 1)\n # x = th.cat((BU_x0, TD_x0, BU_x1, TD_x1, BU_x2, TD_x2, BU_x3, TD_x3, BU_x4, TD_x4), 1)\n\n \"\"\"\n # TD_x0 = TD_x0.permute(1, 0)\n print(TD_x0.shape)\n attn_weight_matrix = self.attention_net(TD_x0)\n print(\"attn\", attn_weight_matrix.shape)\n print(\"TD\", TD_x0.shape)\n\n\n TD_x0_coeff = th.matmul(attn_weight_matrix, TD_x0.unsqueeze(2)) # (B x 1 x 64) * (B x 64 x 1)\n # TD_x0 = th.bmm(attn_weight_matrix, TD_x0) # TODO: https://gaussian37.github.io/dl-pytorch-pytorch-tensor-basic/\n # TD_x0 = th.bmm(TD_x0, attn_weight_matrix) # TODO: https://gaussian37.github.io/dl-pytorch-pytorch-tensor-basic/\n print(\"TD_x0_coeff\", TD_x0_coeff.shape) # B x 1 x 1\n\n # HERE TODO: JIHO\n\n\n TD_x0 = th.bmm(TD_x0_coeff, TD_x0.unsqueeze(1))\n print(\"TD HEREHERE1\", TD_x0.shape)\n TD_x0 = TD_x0.view(-1, 64)\n print(\"TD HEREHERE2\", TD_x0.shape)\n \"\"\"\n\n x0 = th.cat((BU_x0, TD_x0), 1)\n x1 = th.cat((BU_x1, TD_x1), 1)\n x2 = th.cat((BU_x2, TD_x2), 1)\n x3 = th.cat((BU_x3, TD_x3), 1)\n x4 = th.cat((BU_x4, TD_x4), 1)\n\n attn_weight_matrix = self.attention_net(x0)\n x0_coeff = th.matmul(attn_weight_matrix, x0.unsqueeze(2)) # (B x 1 x 64) * (B x 64 x 1)\n x0 = th.bmm(x0_coeff, x0.unsqueeze(1)).view(-1, 128)\n\n attn_weight_matrix = self.attention_net(x1)\n x1_coeff = th.matmul(attn_weight_matrix, x1.unsqueeze(2)) # (B x 1 x 64) * (B x 64 x 1)\n x1 = th.bmm(x1_coeff, x1.unsqueeze(1)).view(-1, 128)\n attn_weight_matrix = self.attention_net(x2)\n x2_coeff = th.matmul(attn_weight_matrix, x2.unsqueeze(2)) # (B x 1 x 64) * (B x 64 x 1)\n x2 = th.bmm(x2_coeff, x2.unsqueeze(1)).view(-1, 128)\n attn_weight_matrix = self.attention_net(x3)\n x3_coeff = th.matmul(attn_weight_matrix, x3.unsqueeze(2)) # (B x 1 x 64) * (B x 64 x 1)\n x3 = th.bmm(x3_coeff, x3.unsqueeze(1)).view(-1, 128)\n attn_weight_matrix = self.attention_net(x4)\n x4_coeff = th.matmul(attn_weight_matrix, x4.unsqueeze(2)) # (B x 1 x 64) * (B x 64 x 1)\n x4 = th.bmm(x4_coeff, x4.unsqueeze(1)).view(-1, 128)\n\n\n\n # x = th.cat((BU_x0, TD_x0, BU_x1, TD_x1, BU_x2, TD_x2, BU_x3, TD_x3, BU_x4, TD_x4, root_extend), 1) # ORG\n x = th.cat((x0, x1, x2, x3, x4, root_extend), 1)\n\n # https://github.com/prakashpandey9/Text-Classification-Pytorch/blob/master/models/selfAttention.py\n\n\n # attn_output, attn_output_weights = self.multihead_attn(x, x, x)\n # print(attn_output, attn_output_weights)\n # print(attn_output.shape, attn_output_weights.shape)\n # exit()\n\n\n # x = th.cat((BU_x0, TD_x0, BU_x1, TD_x1, BU_x2, TD_x2, BU_x3, TD_x3, BU_x4, TD_x4,\n # BU_x5, TD_x5, BU_x6, TD_x6, BU_x7, TD_x7, BU_x8, TD_x8, BU_x9, TD_x9, root_extend), 1)\n\n # TODO: attention\n # x = self.multihead_attn(x, x, x)\n\n\n # print(x.shape)\n x = self.fc1(x)\n x = self.relu(x)\n x = self.fc2(x)\n x = F.log_softmax(x, dim=1)\n return x\n\n\ndef train_GCN(treeDic, x_test, x_train, TDdroprate, BUdroprate, lr, weight_decay, patience, n_epochs, batchsize, dataname, iter):\n model = Net(5000, 64, 64).to(device)\n BU_params = list(map(id, model.BURumorGCN.conv1.parameters()))\n BU_params += list(map(id, model.BURumorGCN.conv2.parameters()))\n base_params = filter(lambda p: id(p) not in BU_params, model.parameters())\n optimizer = th.optim.Adam([\n {'params': base_params},\n {'params': model.BURumorGCN.conv1.parameters(), 'lr': lr/5},\n {'params': model.BURumorGCN.conv2.parameters(), 'lr': lr/5}\n ], lr=lr, weight_decay=weight_decay)\n model.train()\n train_losses = []\n val_losses = []\n train_accs = []\n val_accs = []\n early_stopping = EarlyStopping(patience=patience, verbose=True)\n for epoch in range(n_epochs):\n # traindata_list, testdata_list = loadBiData(dataname, treeDic, x_train, x_test, TDdroprate, BUdroprate)\n traindata_list, testdata_list = loadSnapshotData(dataname, treeDic, x_train, x_test, TDdroprate, BUdroprate)\n train_loader = DataLoader(traindata_list, batch_size=batchsize, shuffle=True, num_workers=5)\n test_loader = DataLoader(testdata_list, batch_size=batchsize, shuffle=True, num_workers=5)\n print(\"epoch\")\n\n # ---------\n # TRAIN\n # ---------\n avg_loss = []\n avg_acc = []\n batch_idx = 0\n # tqdm_train_loader = tqdm(train_loader) # JIHO\n tqdm_train_loader = train_loader\n for Batch_data in tqdm_train_loader:\n\n # print(Batch_data.shape)\n # Batch_data.to(device)\n\n # print(Batch_data)\n # print(len(Batch_data))\n # print(\"HERE\")\n\n s0 = Batch_data[0].to(device)\n s1 = Batch_data[1].to(device)\n s2 = Batch_data[2].to(device)\n s3 = Batch_data[3].to(device)\n s4 = Batch_data[4].to(device)\n\n # s5 = Batch_data[5].to(device)\n # s6 = Batch_data[6].to(device)\n # s7 = Batch_data[7].to(device)\n # s8 = Batch_data[8].to(device)\n # s9 = Batch_data[9].to(device)\n\n\n # out_labels = model(Batch_data)\n out_labels = model(s0, s1, s2, s3, s4)\n # out_labels = model(s0, s1, s2, s3, s4, s5, s6, s7, s8, s9)\n\n finalloss = F.nll_loss(out_labels, Batch_data[0].y)\n loss = finalloss\n optimizer.zero_grad()\n loss.backward()\n avg_loss.append(loss.item())\n optimizer.step()\n _, pred = out_labels.max(dim=-1)\n # correct = pred.eq(Batch_data.y).sum().item()\n correct = pred.eq(Batch_data[0].y).sum().item()\n train_acc = correct / len(Batch_data[0].y)\n avg_acc.append(train_acc)\n print(\"Iter {:03d} | Epoch {:05d} | Batch{:02d} | Train_Loss {:.4f}| Train_Accuracy {:.4f}\".format(\n iter, epoch, batch_idx, loss.item(), train_acc))\n batch_idx = batch_idx + 1\n\n train_losses.append(np.mean(avg_loss))\n train_accs.append(np.mean(avg_acc))\n\n # ---------\n # TEST\n # ---------\n temp_val_losses = []\n temp_val_accs = []\n temp_val_Acc_all, temp_val_Acc1, temp_val_Prec1, temp_val_Recll1, temp_val_F1, \\\n temp_val_Acc2, temp_val_Prec2, temp_val_Recll2, temp_val_F2, \\\n temp_val_Acc3, temp_val_Prec3, temp_val_Recll3, temp_val_F3, \\\n temp_val_Acc4, temp_val_Prec4, temp_val_Recll4, temp_val_F4 = [], [\n ], [], [], [], [], [], [], [], [], [], [], [], [], [], [], []\n model.eval()\n # tqdm_test_loader = tqdm(test_loader) # JIHO\n tqdm_test_loader = test_loader\n for Batch_data in tqdm_test_loader:\n # Batch_data.to(device)\n # val_out = model(Batch_data)\n\n s0 = Batch_data[0].to(device)\n s1 = Batch_data[1].to(device)\n s2 = Batch_data[2].to(device)\n s3 = Batch_data[3].to(device)\n s4 = Batch_data[4].to(device)\n\n # s5 = Batch_data[5].to(device)\n # s6 = Batch_data[6].to(device)\n # s7 = Batch_data[7].to(device)\n # s8 = Batch_data[8].to(device)\n # s9 = Batch_data[9].to(device)\n\n # out_labels = model(Batch_data)\n val_out = model(s0, s1, s2, s3, s4)\n # val_out = model(s0, s1, s2, s3, s4, s5, s6, s7, s8, s9)\n\n val_loss = F.nll_loss(val_out, Batch_data[0].y)\n temp_val_losses.append(val_loss.item())\n _, val_pred = val_out.max(dim=1)\n correct = val_pred.eq(Batch_data[0].y).sum().item()\n val_acc = correct / len(Batch_data[0].y)\n Acc_all, Acc1, Prec1, Recll1, F1, Acc2, Prec2, Recll2, F2, Acc3, Prec3, Recll3, F3, Acc4, Prec4, Recll4, F4 = evaluation4class(\n val_pred, Batch_data[0].y)\n temp_val_Acc_all.append(Acc_all), temp_val_Acc1.append(Acc1), temp_val_Prec1.append(Prec1), temp_val_Recll1.append(Recll1), temp_val_F1.append(F1), \\\n temp_val_Acc2.append(Acc2), temp_val_Prec2.append(Prec2), temp_val_Recll2.append(Recll2), temp_val_F2.append(F2), \\\n temp_val_Acc3.append(Acc3), temp_val_Prec3.append(Prec3), temp_val_Recll3.append(Recll3), temp_val_F3.append(F3), \\\n temp_val_Acc4.append(Acc4), temp_val_Prec4.append(\n Prec4), temp_val_Recll4.append(Recll4), temp_val_F4.append(F4)\n temp_val_accs.append(val_acc)\n val_losses.append(np.mean(temp_val_losses))\n val_accs.append(np.mean(temp_val_accs))\n print(\"Epoch {:05d} | Val_Loss {:.4f}| Val_Accuracy {:.4f}\".format(epoch, np.mean(temp_val_losses),\n np.mean(temp_val_accs)))\n\n res = ['acc:{:.4f}'.format(np.mean(temp_val_Acc_all)),\n 'C1:{:.4f},{:.4f},{:.4f},{:.4f}'.format(np.mean(temp_val_Acc1), np.mean(\n temp_val_Prec1), np.mean(temp_val_Recll1), np.mean(temp_val_F1)),\n 'C2:{:.4f},{:.4f},{:.4f},{:.4f}'.format(np.mean(temp_val_Acc2), np.mean(\n temp_val_Prec2), np.mean(temp_val_Recll2), np.mean(temp_val_F2)),\n 'C3:{:.4f},{:.4f},{:.4f},{:.4f}'.format(np.mean(temp_val_Acc3), np.mean(\n temp_val_Prec3), np.mean(temp_val_Recll3), np.mean(temp_val_F3)),\n 'C4:{:.4f},{:.4f},{:.4f},{:.4f}'.format(np.mean(temp_val_Acc4), np.mean(temp_val_Prec4), np.mean(temp_val_Recll4), np.mean(temp_val_F4))]\n print('results:', res)\n early_stopping(np.mean(temp_val_losses), np.mean(temp_val_accs), np.mean(temp_val_F1), np.mean(\n temp_val_F2), np.mean(temp_val_F3), np.mean(temp_val_F4), model, 'BiGCN', dataname)\n accs = np.mean(temp_val_accs)\n F1 = np.mean(temp_val_F1)\n F2 = np.mean(temp_val_F2)\n F3 = np.mean(temp_val_F3)\n F4 = np.mean(temp_val_F4)\n if early_stopping.early_stop:\n print(\"Early stopping\")\n accs = early_stopping.accs\n F1 = early_stopping.F1\n F2 = early_stopping.F2\n F3 = early_stopping.F3\n F4 = early_stopping.F4\n break\n return train_losses, val_losses, train_accs, val_accs, accs, F1, F2, F3, F4\n\n\n# ====================\n# MAIN\n# ====================\n\nlr = 0.0005\nweight_decay = 1e-4\npatience = 10\n# n_epochs=200 # JIHO\nn_epochs = 100\nbatchsize = 128\nTDdroprate = 0.2\nBUdroprate = 0.2\ndatasetname = sys.argv[1] # \"Twitter15\"、\"Twitter16\"\niterations = int(sys.argv[2])\nmodel = \"GCN\"\ndevice = th.device('cuda:2' if th.cuda.is_available() else 'cpu')\n# device = th.device('cuda:3' if th.cuda.is_available() else 'cpu')\ntest_accs = []\nNR_F1 = []\nFR_F1 = []\nTR_F1 = []\nUR_F1 = []\nfor iter in range(iterations):\n fold0_x_test, fold0_x_train, fold1_x_test, fold1_x_train, fold2_x_test, \\\n fold2_x_train, fold3_x_test, fold3_x_train, fold4_x_test, fold4_x_train = load5foldData(\n datasetname)\n\n treeDic = loadTree(datasetname)\n\n # print(len(treeDic))\n # print(treeDic.keys())\n # print(treeDic['634013216431935488'])\n # print(\"----------------\" * 4)\n # for key in treeDic['634013216431935488']:\n # print(key, treeDic['634013216431935488'][key])\n # exit()\n\n # five fold cross validation\n train_losses, val_losses, train_accs, val_accs0, accs0, F1_0, F2_0, F3_0, F4_0 = train_GCN(\n treeDic, fold0_x_test, fold0_x_train, TDdroprate, BUdroprate, lr, weight_decay, patience, n_epochs, batchsize, datasetname, iter)\n train_losses, val_losses, train_accs, val_accs1, accs1, F1_1, F2_1, F3_1, F4_1 = train_GCN(\n treeDic, fold1_x_test, fold1_x_train, TDdroprate, BUdroprate, lr, weight_decay, patience, n_epochs, batchsize, datasetname, iter)\n train_losses, val_losses, train_accs, val_accs2, accs2, F1_2, F2_2, F3_2, F4_2 = train_GCN(\n treeDic, fold2_x_test, fold2_x_train, TDdroprate, BUdroprate, lr, weight_decay, patience, n_epochs, batchsize, datasetname, iter)\n train_losses, val_losses, train_accs, val_accs3, accs3, F1_3, F2_3, F3_3, F4_3 = train_GCN(\n treeDic, fold3_x_test, fold3_x_train, TDdroprate, BUdroprate, lr, weight_decay, patience, n_epochs, batchsize, datasetname, iter)\n train_losses, val_losses, train_accs, val_accs4, accs4, F1_4, F2_4, F3_4, F4_4 = train_GCN(\n treeDic, fold4_x_test, fold4_x_train, TDdroprate, BUdroprate, lr, weight_decay, patience, n_epochs, batchsize, datasetname, iter)\n test_accs.append((accs0+accs1+accs2+accs3+accs4)/5)\n NR_F1.append((F1_0+F1_1+F1_2+F1_3+F1_4)/5)\n FR_F1.append((F2_0 + F2_1 + F2_2 + F2_3 + F2_4) / 5)\n TR_F1.append((F3_0 + F3_1 + F3_2 + F3_3 + F3_4) / 5)\n UR_F1.append((F4_0 + F4_1 + F4_2 + F4_3 + F4_4) / 5)\nprint(\"Total_Test_Accuracy: {:.4f}|NR F1: {:.4f}|FR F1: {:.4f}|TR F1: {:.4f}|UR F1: {:.4f}\".format(\n sum(test_accs) / iterations, sum(NR_F1) / iterations, sum(FR_F1) / iterations, sum(TR_F1) / iterations, sum(UR_F1) / iterations))\n" ]
[ [ "torch.nn.functional.log_softmax", "torch.nn.Linear", "torch.nn.functional.dropout", "torch.nn.functional.softmax", "torch.nn.functional.nll_loss", "torch.eq", "torch.nn.functional.relu", "torch.cuda.is_available", "torch.cat", "numpy.mean", "torch.nn.LeakyReLU" ] ]
clifduhn/magellanmapper
[ "2d9230b7374719d6cba9b626145f9e81f09c42bf" ]
[ "magmap/plot/colormaps.py" ]
[ "# Colormaps for MagellanMapper\n# Author: David Young, 2018, 2020\n\"\"\"Custom colormaps for MagellanMapper.\n\"\"\"\n\nfrom enum import Enum, auto\n\nimport numpy as np\nfrom matplotlib import cm\nfrom matplotlib import colors\n\nfrom magmap.settings import config\nfrom magmap.io import libmag\n\n#: Dict[:class:`config.Cmaps`, :obj:`colors.LinearSegmentedColormap`]:\n# Default colormaps.\nCMAPS = {}\n\n\nclass DiscreteModes(Enum):\n \"\"\"Discrete colormap generation modes.\"\"\"\n RANDOMN = auto()\n GRID = auto()\n\n\ndef make_dark_linear_cmap(name, color):\n \"\"\"Make a linear colormap starting with black and ranging to \n ``color``.\n \n Args:\n name: Name to give to colormap.\n color: Colors will range from black to this color.\n \n Returns:\n A `LinearSegmentedColormap` object.\n \"\"\"\n return colors.LinearSegmentedColormap.from_list(name, (\"black\", color))\n\n\ndef setup_cmaps():\n \"\"\"Setup default colormaps, storing them in :const:``CMAPS``.\"\"\"\n CMAPS[config.Cmaps.CMAP_GRBK_NAME] = make_dark_linear_cmap(\n config.Cmaps.CMAP_GRBK_NAME.value, \"green\")\n CMAPS[config.Cmaps.CMAP_RDBK_NAME] = make_dark_linear_cmap(\n config.Cmaps.CMAP_RDBK_NAME.value, \"red\")\n\n\nclass DiscreteColormap(colors.ListedColormap):\n \"\"\"Extends :class:``matplotlib.colors.ListedColormap`` to generate a \n discrete colormap and associated normalization object.\n \n Extend ``ListedColormap`` rather than linear colormap since the \n number of colors should equal the number of possible vals, without \n requiring interpolation.\n \n Attributes:\n cmap_labels: Tuple of N lists of RGBA values, where N is equal \n to the number of colors, with a discrete color for each \n unique value in ``labels``.\n norm: Normalization object, which is of type \n :class:``matplotlib.colors.NoNorm`` if indexing directly or \n :class:``matplotlib.colors.BoundaryNorm`` if otherwise.\n img_labels (List[int]): Sorted sequence of unique labels. May have\n more values than in ``labels`` such as mirrored negative values.\n None if ``index_direct`` is False.\n \"\"\"\n def __init__(self, labels=None, seed=None, alpha=150, index_direct=True, \n min_val=0, max_val=255, min_any=0, background=None,\n dup_for_neg=False, symmetric_colors=False, cmap_labels=None):\n \"\"\"Generate discrete colormap for labels using \n :func:``discrete_colormap``.\n \n Args:\n labels: Labels of integers for which a distinct color should be \n mapped to each unique label. Deafults to None, in which case \n no colormap will be generated.\n seed: Seed for randomizer to allow consistent colormap between \n runs; defaults to None.\n alpha: Transparency leve; defaults to 150 for semi-transparent.\n index_direct: True if the colormap will be indexed directly, which \n assumes that the labels will serve as indexes to the colormap \n and should span sequentially from 0, 1, 2, ...; defaults to \n True. If False, a colormap will be generated for the full \n range of integers between the lowest and highest label values, \n inclusive, with a :obj:`colors.BoundaryNorm`, which may\n incur performance cost.\n min_val (int): Minimum value for random numbers; defaults to 0.\n max_val (int): Maximum value for random numbers; defaults to 255.\n min_any (int, float): Minimum value above which at least one value\n must be in each set of RGB values; defaults to 0\n background: Tuple of (backround_label, (R, G, B, A)), where \n background_label is the label value specifying the background, \n and RGBA value will replace the color corresponding to that \n label. Defaults to None.\n dup_for_neg: True to duplicate positive labels as negative \n labels to recreate the same set of labels as for a \n mirrored labels map. Defaults to False.\n symmetric_colors (bool): True to make symmetric colors, assuming\n symmetric labels centered on 0; defaults to False.\n cmap_labels (List[str]): Sequence of colors as Matplotlib color\n strings or RGB(A) hex (eg \"#0fab24ff\") strings.\n \"\"\"\n self.norm = None\n self.cmap_labels = None\n self.img_labels = None\n\n if labels is None: return\n labels_unique = np.unique(labels)\n if dup_for_neg and np.sum(labels_unique < 0) == 0:\n # for labels that are only >= 0, duplicate the pos portion \n # as neg so that images with or without negs use the same colors\n labels_unique = np.append(\n -1 * labels_unique[labels_unique > 0][::-1], labels_unique)\n num_colors = len(labels_unique)\n\n labels_offset = 0\n if index_direct:\n # assume label vals increase by 1 from 0 until num_colors; store\n # sorted labels sequence to translate labels based on index\n self.norm = colors.NoNorm()\n self.img_labels = labels_unique\n else:\n # use labels as bounds for each color, including wide bounds\n # for large gaps between successive labels; offset bounds to\n # encompass each label and avoid off-by-one errors that appear\n # when viewing images with additional extreme labels; float32\n # gives unsymmetric colors for large values in mirrored atlases\n # despite remaining within range for unclear reasons, fixed by\n # using float64 instead\n labels_offset = 0.5\n bounds = labels_unique.astype(np.float64)\n bounds -= labels_offset\n # number of boundaries should be one more than number of labels to\n # avoid need for interpolation of boundary bin numbers and\n # potential merging of 2 extreme labels\n bounds = np.append(bounds, [bounds[-1] + 1])\n # TODO: may have occasional colormap inaccuracies from this bug:\n # https://github.com/matplotlib/matplotlib/issues/9937;\n self.norm = colors.BoundaryNorm(bounds, num_colors)\n if cmap_labels is None:\n # auto-generate colors for the number of labels\n self.cmap_labels = discrete_colormap(\n num_colors, alpha, False, seed, min_val, max_val, min_any,\n symmetric_colors, jitter=20, mode=DiscreteModes.RANDOMN)\n else:\n # generate RGBA colors from supplied color strings\n self.cmap_labels = colors.to_rgba_array(cmap_labels) * max_val\n if background is not None:\n # replace background label color with given color\n bkgdi = np.where(labels_unique == background[0] - labels_offset)\n if len(bkgdi) > 0 and bkgdi[0].size > 0:\n self.cmap_labels[bkgdi[0][0]] = background[1]\n #print(self.cmap_labels)\n self.make_cmap()\n \n def make_cmap(self):\n \"\"\"Initialize ``ListedColormap`` with stored labels rescaled to 0-1.\"\"\"\n super(DiscreteColormap, self).__init__(\n self.cmap_labels / 255.0, \"discrete_cmap\")\n \n def modified_cmap(self, adjust):\n \"\"\"Make a modified discrete colormap from itself.\n \n Args:\n adjust: Value by which to adjust RGB (not A) values.\n \n Returns:\n New ``DiscreteColormap`` instance with ``norm`` pointing to first \n instance and ``cmap_labels`` incremented by the given value.\n \"\"\"\n cmap = DiscreteColormap()\n # TODO: consider whether to copy instead\n cmap.norm = self.norm\n cmap.cmap_labels = np.copy(self.cmap_labels)\n # labels are uint8 so should already fit within RGB bounds; colors \n # that exceed these bounds will likely have slightly different tones \n # since RGB vals will not change uniformly\n cmap.cmap_labels[:, :3] += adjust\n cmap.make_cmap()\n return cmap\n\n def convert_img_labels(self, img):\n \"\"\"Convert an image to the indices in :attr:`img_labels` to give\n a linearly scaled image.\n\n This image can be displayed using a colormap with :obj:`colors.NoNorm`\n to index directly into the colormpa.\n\n Args:\n img (:obj:`np.ndarray`): Image to convert.\n\n Returns:\n :obj:`np.ndarray`: Array of same shape as ``img`` with values\n translated to their corresponding indices within :attr:`img_labels`,\n or ``img`` unchanged if :attr:`img_labels` is None.\n\n \"\"\"\n conv = img\n if self.img_labels is not None:\n conv = np.searchsorted(self.img_labels, img)\n return conv\n\n\ndef discrete_colormap(num_colors, alpha=255, prioritize_default=True,\n seed=None, min_val=0, max_val=255, min_any=0,\n symmetric_colors=False, dup_offset=0, jitter=0,\n mode=DiscreteModes.RANDOMN):\n \"\"\"Make a discrete colormap using :attr:``config.colors`` as the \n starting colors and filling in the rest with randomly generated RGB values.\n \n Args:\n num_colors (int): Number of discrete colors to generate.\n alpha (int): Transparency level, from 0-255; defaults to 255.\n prioritize_default (bool, str): If True, the default colors from \n :attr:``config.colors`` will replace the initial colormap elements; \n defaults to True. Alternatively, `cn` can be given to use \n the \"CN\" color spec instead.\n seed (int): Random number seed; defaults to None, in which case no seed \n will be set.\n min_val (int, float): Minimum value for random numbers; defaults to 0.\n max_val (int, float): Maximum value for random numbers; defaults to 255.\n For floating point ranges such as 0.0-1.0, set as a float.\n min_any (int, float): Minimum value above which at least one value\n must be in each set of RGB values; defaults to 0. If all\n values in an RGB set are below this value, the lowest\n RGB value will be scaled up by the ratio ``max_val:min_any``.\n Assumes a range of ``min_val < min_any < max_val``; defaults to\n 0 to ignore.\n symmetric_colors (bool): True to create a symmetric set of colors,\n assuming the first half of ``num_colors`` mirror those of\n the second half; defaults to False.\n dup_offset (int): Amount by which to offset duplicate color values\n if ``dup_for_neg`` is enabled; defaults to 0.\n jitter (int): In :obj:`DiscreteModes.GRID` mode, coordinates are\n randomly shifted by half this value above or below their original\n value; defaults to 0.\n mode (:obj:`DiscreteModes`): Mode given as an enumeration; defaults\n to :obj:`DiscreteModes.RANDOMN` mode.\n \n Returns:\n :obj:`np.ndaarry`: 2D Numpy array in the format \n ``[[R, G, B, alpha], ...]`` on a \n scale of 0-255. This colormap will need to be converted into a \n Matplotlib colormap using ``LinearSegmentedColormap.from_list`` \n to generate a map that can be used directly in functions such \n as ``imshow``.\n \"\"\"\n if symmetric_colors:\n # make room for offset when duplicating colors\n max_val -= dup_offset\n\n # generate random combination of RGB values for each number of colors, \n # where each value ranges from min-max\n if mode is DiscreteModes.GRID:\n # discrete colors taken from an evenly spaced grid for min separation\n # between color values\n jitters = None\n if jitter > 0:\n if seed is not None: np.random.seed(seed)\n jitters = np.multiply(\n np.random.random((num_colors, 3)),\n jitter - jitter / 2).astype(int)\n max_val -= np.amax(jitters)\n min_val -= np.amin(jitters)\n # TODO: weight chls or scale non-linearly for better visual distinction\n space = (max_val - min_val) // np.cbrt(num_colors)\n sl = slice(min_val, max_val, space)\n grid = np.mgrid[sl, sl, sl]\n coords = np.c_[grid[0].ravel(), grid[1].ravel(), grid[2].ravel()]\n if min_any > 0:\n # remove all coords where all vals are below threshold\n # TODO: account for lost coords in initial space size determination\n coords = coords[~np.all(np.less(coords, min_any), axis=1)]\n if seed is not None: np.random.seed(seed)\n rand = np.random.choice(len(coords), num_colors, replace=False)\n rand_coords = coords[rand]\n if jitters is not None:\n rand_coords = np.add(rand_coords, jitters)\n rand_coords_shape = list(rand_coords.shape)\n rand_coords_shape[-1] += 1\n cmap = np.zeros(\n rand_coords_shape,\n dtype=libmag.dtype_within_range(min_val, max_val))\n cmap[:, :-1] = rand_coords\n else:\n # randomly generate each color value; 4th values only for simplicity\n # in generating array with shape for alpha channel\n if seed is not None: np.random.seed(seed)\n cmap = (np.random.random((num_colors, 4)) \n * (max_val - min_val) + min_val).astype(\n libmag.dtype_within_range(min_val, max_val))\n if min_any > 0:\n # if all vals below threshold, scale up lowest value\n below_offset = np.all(np.less(cmap[:, :3], min_any), axis=1)\n axes = np.argmin(cmap[below_offset, :3], axis=1)\n cmap[below_offset, axes] = np.multiply(\n cmap[below_offset, axes], max_val / min_any)\n \n if symmetric_colors:\n # invert latter half onto former half, assuming that corresponding\n # labels are mirrored (eg -5, 3, 0, 3, 5), with background centered as 0\n cmap_len = len(cmap)\n mid = cmap_len // 2\n cmap[:mid] = cmap[:cmap_len-mid-1:-1] + dup_offset\n cmap[:, -1] = alpha # set transparency\n if prioritize_default is not False:\n # prioritize default colors by replacing first colors with default ones\n colors_default = config.colors\n if prioritize_default == \"cn\":\n # \"CN\" color spec\n colors_default = np.multiply(\n [colors.to_rgb(\"C{}\".format(i)) for i in range(10)], 255)\n end = min((num_colors, len(colors_default)))\n cmap[:end, :3] = colors_default[:end]\n return cmap\n\n\ndef get_labels_discrete_colormap(labels_img, alpha_bkgd=255, dup_for_neg=False, \n use_orig_labels=False, symmetric_colors=False):\n \"\"\"Get a default discrete colormap for a labels image, assuming that \n background is 0, and the seed is determined by :attr:``config.seed``.\n \n Args:\n labels_img: Labels image as a Numpy array.\n alpha_bkgd: Background alpha level from 0 to 255; defaults to 255 \n to turn on background fully.\n dup_for_neg: True to duplicate positive labels as negative \n labels; defaults to False.\n use_orig_labels (bool): True to use original labels from \n :attr:`config.labels_img_orig` if available, falling back to \n ``labels_img``. Defaults to False.\n symmetric_colors (bool): True to create a symmetric set of colors;\n defaults to False.\n \n Returns:\n :class:``DiscreteColormap`` object with a separate color for \n each unique value in ``labels_img``.\n \"\"\"\n lbls = labels_img\n if use_orig_labels and config.labels_img_orig is not None:\n # use original labels if available for mapping consistency\n lbls = config.labels_img_orig\n return DiscreteColormap(\n lbls, config.seed, 255, min_any=160, min_val=10,\n background=(0, (0, 0, 0, alpha_bkgd)), dup_for_neg=dup_for_neg,\n symmetric_colors=symmetric_colors)\n\n\ndef get_borders_colormap(borders_img, labels_img, cmap_labels):\n \"\"\"Get a colormap for borders, using corresponding labels with \n intensity change to distinguish the borders.\n \n If the number of labels differs from that of the original colormap, \n a new colormap will be generated instead.\n \n Args:\n borders_img: Borders image as a Numpy array, used to determine \n the number of labels required. If this image has multiple \n channels, a similar colormap with distinct intensity will \n be made for each channel.\n labels_img: Labels image as a Numpy array, used to compare \n the number of labels for each channel in ``borders_img``.\n cmap_labels: The original colormap on which the new colormaps \n will be based.\n \n Returns:\n List of borders colormaps corresponding to the number of channels, \n or None if ``borders_img`` is None\n \"\"\"\n cmap_borders = None\n if borders_img is not None:\n if np.unique(labels_img).size == np.unique(borders_img).size:\n # get matching colors by using labels colormap as template, \n # with brightest colormap for original (channel 0) borders\n channels = 1\n if borders_img.ndim >= 4:\n channels = borders_img.shape[-1]\n cmap_borders = [\n cmap_labels.modified_cmap(int(40 / (channel + 1)))\n for channel in range(channels)]\n else:\n # get a new colormap if borders image has different number \n # of labels while still ensuring a transparent background\n cmap_borders = [get_labels_discrete_colormap(borders_img, 0)]\n return cmap_borders\n\n\ndef make_binary_cmap(binary_colors):\n \"\"\"Make a binary discrete colormap.\n \n Args:\n binary_colors (List[str]): Sequence of colors as\n ``[background, foreground]``.\n\n Returns:\n :obj:`DiscreteColormap`: Discrete colormap with labels of ``[0, 1]``\n mapped to ``binary_colors``.\n\n \"\"\"\n return DiscreteColormap([0, 1], cmap_labels=binary_colors)\n\n\ndef setup_labels_cmap(labels_img):\n \"\"\"Set up a colormap for a labels image.\n \n If :attr:`config.atlas_labels[config.AtlasLabels.BINARY]` is set,\n its value will be used to construct a binary colormap, where 0 is assumed\n to be background, and 1 is foreground.\n \n Args:\n labels_img (:obj:`np.ndarray`): Labels image.\n\n Returns:\n :obj:`DiscreteColormap`: Discrete colormap for the given labels.\n\n \"\"\"\n binary_colors = config.atlas_labels[config.AtlasLabels.BINARY]\n if binary_colors:\n cmap_labels = make_binary_cmap(binary_colors)\n else:\n cmap_labels = get_labels_discrete_colormap(\n labels_img, 0, dup_for_neg=True, use_orig_labels=True,\n symmetric_colors=config.atlas_labels[\n config.AtlasLabels.SYMMETRIC_COLORS])\n return cmap_labels\n\n\ndef get_cmap(cmap, n=None):\n \"\"\"Get colormap from a list of colormaps, string, or enum.\n \n If ``n`` is given, ``cmap`` is assumed to be a list from which a colormap \n will be retrieved. Colormaps that are strings will be converted to \n the associated standard `Colormap` object, while enums in \n :class:``config.Cmaps`` will be used to retrieve a `Colormap` object \n from :const:``CMAPS``, which is assumed to have been initialized.\n \n Args:\n cmap: Colormap given as a string of Enum or list of colormaps.\n n: Index of `cmap` to retrieve a colormap, assuming that `cmap` \n is a sequence. Defaults to None to use `cmap` directly.\n \n Returns:\n The ``Colormap`` object, or None if no corresponding colormap \n is found.\n \"\"\"\n if n is not None:\n # assume that cmap is a list\n cmap = config.cmaps[n] if n < len(cmap) else None\n if isinstance(cmap, str):\n # cmap given as a standard Matplotlib colormap name\n cmap = cm.get_cmap(cmap)\n elif cmap in config.Cmaps:\n # assume default colormaps have been initialized\n cmap = CMAPS[cmap]\n return cmap\n\n\ndef setup_colormaps(num_channels):\n \"\"\"Set up colormaps based on the currently loaded main ROI profile.\n\n Args:\n num_channels (int): Number of channels in the main image; if the\n main ROI profile does not define this many colormaps, new\n colormaps will be randomly generated.\n\n \"\"\"\n config.cmaps = list(config.roi_profile[\"channel_colors\"])\n num_cmaps = len(config.cmaps)\n if num_cmaps < num_channels:\n # add colormap for each remaining channel, purposely inducing\n # int wraparound for greater color contrast\n chls_diff = num_channels - num_cmaps\n cmaps = discrete_colormap(\n chls_diff, alpha=255, prioritize_default=False, seed=config.seed,\n min_val=150) / 255.0\n print(\"generating colormaps from RGBA colors:\\n\", cmaps)\n for cmap in cmaps:\n config.cmaps.append(make_dark_linear_cmap(\"\", cmap))\n" ]
[ [ "numpy.sum", "numpy.multiply", "matplotlib.colors.BoundaryNorm", "numpy.less", "numpy.random.seed", "numpy.copy", "matplotlib.colors.LinearSegmentedColormap.from_list", "matplotlib.cm.get_cmap", "numpy.add", "numpy.amax", "matplotlib.colors.to_rgba_array", "numpy.append", "numpy.argmin", "matplotlib.colors.NoNorm", "numpy.where", "numpy.unique", "numpy.cbrt", "numpy.searchsorted", "numpy.random.random", "numpy.amin" ] ]
bionicles/neuromax
[ "a53a17a1c033c11ac607a9e28f43b1f906e58aad" ]
[ "nature/bricks/experiments/conv_set/conv_set_class.py" ]
[ "# conv-kernel.py\n# why?: build a resnet with kernel and attention set convolutions\nimport tensorflow as tf\n\nfrom .kernel import get_kernel\n\nfrom tools import concat_1D_coords, log, make_uuid\n\nB, L, K = tf.keras.backend, tf.keras.layers, tf.keras\n\n# todo ... fix one_for_all and all_for_one inside graph_model\nSET_OPTIONS = [-1, 1, 2, 3]\n\n\nclass KConvSet1D(L.Layer):\n \"\"\"Convolve a learned kernel over sets of elements from a 1D tensor\"\"\"\n\n def __init__(self, agent, brick_id, in_spec, out_spec, set_size):\n self.out_spec = out_spec\n self.in_spec = in_spec\n d_out = out_spec if isinstance(out_spec, int) else out_spec.shape[-1]\n d_in = in_spec if isinstance(in_spec, int) else in_spec.shape[-1]\n d_in2 = None\n if set_size is None:\n set_size = agent.pull_choices(f\"{brick_id}_KConvSet_set_size\",\n SET_OPTIONS)\n self.brick_id = brick_id\n self.agent = agent\n if set_size is 1:\n self.call = self.call_for_one\n elif set_size is 2:\n self.call = self.call_for_two\n elif set_size is 3:\n self.call = self.call_for_three\n elif set_size is \"one_for_all\": # ragged sensors\n # d_in is size of 1 element of shape\n # d_in2 is code_spec.size\n d_out = out_spec.size\n set_size = 1\n self.reshape = L.Reshape(out_spec.shape)\n self.call = self.call_one_for_all\n self.flatten = L.Flatten()\n elif set_size is \"all_for_one\": # ragged actuators\n d_in = in_spec.size\n # d_in2 is 1 (placeholder range)\n # d_out is size of 1 element of the output shape\n self.call = self.call_all_for_one\n self.kernel = get_kernel(agent, brick_id, d_in, d_out, set_size,\n d_in2=d_in2)\n self.id = make_uuid([id, \"KConvSet1D\", set_size])\n super(KConvSet1D, self).__init__(name=self.id)\n\n def call_one_for_all(self, input): # input unknown = ragged sensor\n \"\"\"Each input element innervates all output elements\"\"\"\n # output = tf.foldl(lambda a, item: a + self.kernel(item), input)\n output = tf.map_fn(lambda item: self.kernel(item), input)\n log(\"call_one_for_all\", output.shape, color=\"blue\")\n output = tf.math.reduce_sum(output, axis=1)\n log(\"call_one_for_all\", output.shape, color=\"blue\")\n return output\n\n def call_all_for_one(self, inputs): # output unknown = ragged actuator\n \"\"\"All input elements innervate each output element\"\"\"\n # log(\"\")\n # log(\"we map all inputs onto one output\", color=\"green\")\n # log(f\"[{self.in_spec.shape}] in_spec\", color=\"white\")\n code, placeholder = inputs\n placeholder_with_coords = concat_1D_coords(placeholder)\n placeholder_with_coords = tf.squeeze(placeholder_with_coords, 0)\n coords = tf.slice(placeholder_with_coords, [0, 1], [-1, 1])\n code = tf.squeeze(code, 0)\n log(list(code.shape), \"code\", color=\"white\")\n log(list(coords.shape), \"coords\", color=\"white\")\n output = tf.map_fn(\n lambda coord: self.kernel(\n tf.concat([code, coord], 0)\n ), tf.expand_dims(coords, 1))\n # log(f\"[{self.out_spec.shape}] out_spec\", color=\"blue\")\n output = tf.reduce_sum(output, 1)\n # log(list(output.shape), \"output\", color=\"yellow\")\n # log(\"\")\n return output\n\n # TODO: find a nice recursive approach to N-ary set convolutions\n def call_for_one(self, atoms):\n return tf.map_fn(lambda a1: self.kernel(a1), atoms)\n\n def call_for_two(self, atoms):\n return tf.map_fn(lambda a1: tf.reduce_sum(tf.map_fn(lambda a2: self.kernel([a1, a2]), atoms), axis=0), atoms)\n\n def call_for_three(self, atoms):\n return tf.map_fn(lambda a1: tf.reduce_sum(tf.map_fn(lambda a2: tf.reduce_sum(tf.map_fn(lambda a3: self.kernel([a1, a2, a3]), atoms), axis=0), atoms), axis=0), atoms)\n\n # TOO COMPLICATED:\n # def call_autoregressive(self, code, coords):\n # return tf.foldl(lambda done, coord: tf.concat(done,\n # tf.reduce_mean(\n # tf.map_fn(lambda done_item:\n # self.kernel([coord, done_item code]), coords), axis=0)), coords)\n" ]
[ [ "tensorflow.math.reduce_sum", "tensorflow.expand_dims", "tensorflow.squeeze", "tensorflow.slice", "tensorflow.concat", "tensorflow.reduce_sum" ] ]