repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
lizhaokun/Table-Extraction-and-Chinese-OCR | [
"5636e7140c07e41d8bdbc0a0f8c27a6eb33df706"
] | [
"dnn/ocr.py"
] | [
"import cv2\nimport os\nimport time\nimport json\nimport numpy as np\nfrom PIL import Image\nfrom config import ocrPath,GPU\ndef read_characters():\n p= ocrPath.replace('.weights','.json')\n if os.path.exists(p):\n with open(p, encoding='utf-8') as f:\n characters = json.loads(f.read())\n return characters\n else:\n return ''\n\n\ncharactersPred = ' '+read_characters()+'| '\nif 1:\n from dnn.darknet import load_net,predict_image,array_to_image\n ocrNet = load_net(ocrPath.replace('.weights','.cfg').encode(),ocrPath.encode(), 0)\nelse:\n ocrNet = cv2.dnn.readNetFromDarknet(ocrPath.replace('.weights','.cfg'),ocrPath)\n\ndef predict_cpu(image):\n \"\"\"\n cnn ctc model \n same errors, fix opencv dnn to use\n \"\"\"\n scale = image.size[1]*1.0 / 32\n w = image.size[0] / scale\n w = int(w)\n image = image.resize((w,32),Image.BILINEAR)\n image = (np.array(image.convert('L'))/255.0-0.5)/0.5\n image = np.array([[image]])\n ocrNet.setInput(image)\n y_pred = ocrNet.forward(ocrNet.getUnconnectedOutLayersNames())\n y_pred = y_pred[0][0,:,-1,:]\n out = decode(y_pred)##\n return out\n \ndef predict_darknet(image):\n scale = image.size[1]*1.0 / 32\n w = image.size[0] / scale\n w = int(w)\n image = image.resize((w,32),Image.BILINEAR)\n image = (np.array(image.convert('L'))/255.0-0.5)/0.5\n h,w = image.shape\n if w<8:\n return ''\n tmp = np.zeros((h,w,1))\n tmp[:,:,0] = image\n \n im = array_to_image(image)\n res=predict_image(ocrNet,im)\n outW = int(np.ceil(w/4)-3)\n nchars = len(charactersPred)\n out = [ res[i] for i in range(outW*nchars)] \n out = np.array(out).reshape((nchars,outW))\n out = out.transpose((1,0))\n return decode(out)\n \n \ndef decode(pred):\n t = pred.argmax(axis=1)\n length = len(t)\n char_list = []\n n = len(charactersPred)\n for i in range(length):\n if t[i] not in [n-1,n-1] and (not (i > 0 and t[i - 1] == t[i])):\n char_list.append(charactersPred[t[i]])\n return ''.join(char_list)\n \n\n\nif __name__=='__main__':\n t =time.time()\n img=Image.open('./test/test.png')\n res = predict_darknet(img)\n print(time.time()-t,res)\n"
] | [
[
"numpy.ceil",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
msamribeiro/ultrasound-speech-error-detection | [
"c9661676c8f993e383f07d08fd8091811bf9286a"
] | [
"model.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nModel architecture for PyTorch\n\nDate: 2020\nAuthor: M. Sam Ribeiro\n\"\"\"\n\nimport numpy as np\nnp.random.seed(42)\n\nimport random\nrandom.seed(42)\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\n\nclass Model(nn.Module):\n\n def __init__(self, num_channels, audio_dim, num_classes):\n super(Model, self).__init__()\n\n # Encoder size is the flattened features from the ultrasound encoder.\n # we normally estimate this based on input dimensions, number of\n # channels, or kernel size. Since this is a pre-trained model with\n # fixed-sized inputs, we hard-code it here for simplicity.\n self.encoder_size = 16896\n\n # Audio Encoder\n self.audio_fc1 = nn.Linear(audio_dim, 256, bias=True)\n\n # Ultrasound Encoder\n self.conv1 = nn.Conv2d(in_channels=num_channels, out_channels=32, kernel_size=5)\n self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5)\n self.batch_norm = nn.BatchNorm1d(self.encoder_size+256)\n\n # phone classifier\n self.fc1 = nn.Linear(self.encoder_size+256, 256, bias=True)\n self.fc2 = nn.Linear(256, 128, bias=True)\n self.fc3 = nn.Linear(128, num_classes, bias=True)\n self.softmax = nn.LogSoftmax(dim=1)\n\n\n\n def forward(self, ultra, audio):\n ''' forward pass '''\n u = ultra\n a = audio\n\n # encode audio\n a = F.relu( self.audio_fc1(a) )\n\n # encode ultrasound\n u = F.max_pool2d(F.relu(self.conv1(u)), kernel_size=(2, 2))\n u = F.max_pool2d(F.relu(self.conv2(u)), kernel_size=(2, 2))\n u = u.view(-1, self.encoder_size) # flatten\n\n # join features and normalise\n x = torch.cat([u, a], dim=1)\n x = self.batch_norm(x)\n\n # phone classifier\n x = F.relu( self.fc1(x) )\n x = F.relu( self.fc2(x) )\n x = self.fc3(x)\n x = self.softmax(x)\n\n return x\n"
] | [
[
"torch.nn.BatchNorm1d",
"torch.nn.LogSoftmax",
"numpy.random.seed",
"torch.cat",
"torch.nn.Conv2d",
"torch.nn.Linear"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ziniBRC/RNASSR-Net | [
"7e43f62bb00c75c913b7f53fd085151517eeca98"
] | [
"nets/RNA_graph_classification/graphsage_net.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport dgl\n\n\"\"\"\n GraphSAGE: \n William L. Hamilton, Rex Ying, Jure Leskovec, Inductive Representation Learning on Large Graphs (NeurIPS 2017)\n https://cs.stanford.edu/people/jure/pubs/graphsage-nips17.pdf\n\"\"\"\n\nfrom layers.graphsage_layer import GraphSageLayer\nfrom layers.mlp_readout_layer import MLPReadout\n\n\nclass GraphSageNet(nn.Module):\n \"\"\"\n Grahpsage network with multiple GraphSageLayer layers\n \"\"\"\n def __init__(self, net_params):\n super().__init__()\n in_dim = net_params['in_dim']\n hidden_dim = net_params['hidden_dim']\n out_dim = net_params['out_dim']\n n_classes = net_params['n_classes']\n in_feat_dropout = net_params['in_feat_dropout']\n dropout = net_params['dropout']\n aggregator_type = net_params['sage_aggregator']\n n_layers = net_params['L'] \n batch_norm = net_params['batch_norm']\n residual = net_params['residual']\n self.readout = net_params['readout']\n \n self.embedding_h = nn.Linear(in_dim, hidden_dim)\n self.in_feat_dropout = nn.Dropout(in_feat_dropout)\n \n self.layers = nn.ModuleList([GraphSageLayer(hidden_dim, hidden_dim, F.relu,\n dropout, aggregator_type, batch_norm, residual) for _ in range(n_layers-1)])\n self.layers.append(GraphSageLayer(hidden_dim, out_dim, F.relu, dropout, aggregator_type, batch_norm, residual))\n self.MLP_layer = MLPReadout(out_dim, n_classes)\n \n def forward(self, g, h, e):\n h = self.embedding_h(h)\n h = self.in_feat_dropout(h)\n for conv in self.layers:\n h = conv(g, h)\n g.ndata['h'] = h\n \n if self.readout == \"sum\":\n hg = dgl.sum_nodes(g, 'h')\n elif self.readout == \"max\":\n hg = dgl.max_nodes(g, 'h')\n elif self.readout == \"mean\":\n hg = dgl.mean_nodes(g, 'h')\n else:\n hg = dgl.mean_nodes(g, 'h') # default readout is mean nodes\n \n return self.MLP_layer(hg)\n \n def loss(self, pred, label):\n criterion = nn.CrossEntropyLoss()\n loss = criterion(pred, label)\n return loss"
] | [
[
"torch.nn.CrossEntropyLoss",
"torch.nn.Linear",
"torch.nn.Dropout"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
korintje/pygjf | [
"fb30ad84fe7da1f08c4a7a76f4a02d796d88081f"
] | [
"pygjf.py"
] | [
"import math\r\nimport numpy as np\r\n\r\ndef cart2sph(cart):\r\n XsqPlusYsq = cart[0]**2 + cart[1]**2\r\n r = math.sqrt(XsqPlusYsq + cart[2]**2)\r\n elev = math.atan2(cart[2], math.sqrt(XsqPlusYsq))\r\n az = math.atan2(cart[1], cart[2])\r\n return np.array([r, elev, az])\r\n\r\ndef sph2cart(sph):\r\n radius = sph[0]\r\n elevation = sph[1]\r\n azimuth = sph[2]\r\n ax = np.cos(azimuth) * np.sin(np.pi/2 - elevation) * radius\r\n ay = np.sin(azimuth) * np.sin(np.pi/2 - elevation) * radius\r\n az = np.cos(np.pi/2 - elevation) * radius\r\n return np.array([ax, ay, az])\r\n\r\ndef get_hexagonal_cart(distance, layer_num, dimension=2):\r\n if dimension == 2:\r\n z = []\r\n elif dimension == 3:\r\n z = [0.0]\r\n else:\r\n raise(\"dimension must be 2 or 3\")\r\n coords = []\r\n d = distance * layer_num\r\n sp = math.sin(math.pi / 3.0)\r\n cp = math.cos(math.pi / 3.0)\r\n sm = -1 * math.sin(math.pi / 3.0)\r\n cm = -1 * math.cos(math.pi / 3.0)\r\n vertexes = [\r\n np.array([d * 1.0, 0.0] + z), \r\n np.array([d * cp, d * sp] + z), \r\n np.array([d * cm, d * sp] + z), \r\n np.array([d * -1.0, 0.0] + z), \r\n np.array([d * cm, d * sm] + z), \r\n np.array([d * cp, d * sm] + z), \r\n ]\r\n for i, vertex in enumerate(vertexes):\r\n for m in range(layer_num):\r\n next_idx = (i + 1) % 6\r\n edge = vertexes[next_idx] - vertex\r\n coords.append(vertex + m / layer_num * edge)\r\n return coords\r\n\r\n\r\nclass GaussianInput():\r\n\r\n def __init__(self, headers, title, total_charge, multiplicity, atoms):\r\n self.headers = headers\r\n self.title = title\r\n self.total_charge = total_charge\r\n self.multiplicity = multiplicity\r\n self.atoms = atoms\r\n \r\n @classmethod\r\n def load_from_file(cls, filepath):\r\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\r\n string = f.read()\r\n return cls.load_from_string(string)\r\n\r\n @classmethod\r\n def load_from_string(cls, string):\r\n lines = string.split(\"\\n\")\r\n seps = [i for i, n in enumerate(lines) if not n]\r\n sep_1st = seps[0]\r\n sep_2nd = seps[1]\r\n headers = lines[0:sep_1st]\r\n title = lines[sep_1st + 1]\r\n charge_multiplicity = lines[sep_2nd + 1].split()\r\n total_charge = int(charge_multiplicity[0])\r\n multiplicity = int(charge_multiplicity[1])\r\n _atoms = [line.split() for line in lines[sep_2nd + 2:] if line]\r\n atoms = []\r\n for _atom in _atoms:\r\n atom = {}\r\n atom[\"element\"] = _atom[0]\r\n atom[\"coord\"] = np.array([float(coord) for coord in _atom[1:4]])\r\n atoms.append(atom)\r\n return cls(headers, title, total_charge, multiplicity, atoms)\r\n\r\n @classmethod\r\n def is_valid(cls):\r\n pass\r\n\r\n def to_string(self):\r\n atom_lines = []\r\n for atom in self.atoms:\r\n x, y, z = atom[\"coord\"][0], atom[\"coord\"][1], atom[\"coord\"][2]\r\n element = atom[\"element\"]\r\n atom_line = \" {} {: .8f} {: .8f} {: .8f}\".format(element, x, y, z)\r\n atom_lines.append(atom_line)\r\n return \"\\n\".join(self.headers) \\\r\n + \"\\n\\n\" + self.title \\\r\n + \"\\n\\n\" + str(self.total_charge) + \" \" + str(self.multiplicity) \\\r\n + \"\\n\" + \"\\n\".join(atom_lines) \\\r\n + \"\\n\"\r\n\r\n def to_file(self, filepath):\r\n string = self.to_string()\r\n with open(filepath, \"w\") as f:\r\n f.write(string)\r\n\r\n def rotate(self, origin, azimuth=0.0, elevation=0.0):\r\n for atom in self.atoms:\r\n cart = atom[\"coord\"]\r\n rel_cart = cart - origin\r\n rel_sph = cart2sph(rel_cart)\r\n rot_sph = rel_sph + np.array([0.0, elevation, azimuth])\r\n rot_cart = sph2cart(rot_sph)\r\n atom[\"coord\"] = rot_cart\r\n \r\n def clone(self, atom_idx, cart):\r\n center_atom = self.atoms[atom_idx]\r\n delta = cart - center_atom[\"coord\"]\r\n atoms = []\r\n for atom in self.atoms[:]:\r\n atom[\"coord\"] = atom[\"coord\"] + delta\r\n atoms.append(atom)\r\n self.atoms += atoms\r\n\r\n def move(self, atom_idx, cart):\r\n delta = cart - self.atoms[atom_idx][\"coord\"]\r\n new_atoms = []\r\n for atom in self.atoms:\r\n new_atoms.append({\"element\": atom[\"element\"], \"coord\": (atom[\"coord\"] + delta)})\r\n self.atoms = new_atoms\r\n \r\n def clean_atoms(self):\r\n self.atoms = []\r\n\r\n def hex_repeat(self, distance, layer, origin_idx, perp_origin_idx):\r\n oc = self.atoms[origin_idx][\"coord\"]\r\n pc = self.atoms[perp_origin_idx][\"coord\"]\r\n origins = get_hexagonal_cart(distance, layer, dimension=3)\r\n rot_v = cart2sph(pc - oc)\r\n # rot_el = math.pi / 2 - rot_v[1]\r\n _atoms = self.atoms[:]\r\n self.clean_atoms()\r\n for i, origin_cart in enumerate(origins):\r\n _tmp_atoms = _atoms\r\n _tmp_mol = GaussianInput([], \"\", 0, 1, _tmp_atoms)\r\n _tmp_mol.move(origin_idx, origin_cart)\r\n self.atoms.extend(_tmp_mol.atoms)\r\n "
] | [
[
"numpy.array",
"numpy.cos",
"numpy.sin"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
menajosep/recipe-summarization | [
"3eb595582962b38daadca04b93f3e6dcf2dd0146"
] | [
"src/utils.py"
] | [
"\"\"\"Utility methods.\"\"\"\nimport os\nfrom sklearn.model_selection import train_test_split\nimport _pickle as pickle\n\nimport config\nfrom constants import empty, eos, FN0\n\n\ndef join_ingredients(ingredients_listlist):\n \"\"\"Join multiple lists of ingredients with ' , '.\"\"\"\n return [' , '.join(i) for i in ingredients_listlist]\n\n\ndef get_flat_ingredients_list(ingredients_joined_train):\n \"\"\"Flatten lists of ingredients encoded as a string into a single list.\"\"\"\n return ' , '.join(ingredients_joined_train).split(' , ')\n\n\ndef section_print():\n \"\"\"Memorized function keeping track of section number.\"\"\"\n section_number = 0\n\n def inner(message):\n \"\"\"Print section number.\"\"\"\n global section_number\n section_number += 1\n print('Section {}: {}'.format(section_number, message))\n print('Section {}: initializing section function'.format(section_number))\n return inner\n\n\ndef is_filename_char(x):\n \"\"\"Return True if x is an acceptable filename character.\"\"\"\n if x.isalnum():\n return True\n if x in ['-', '_']:\n return True\n return False\n\n\ndef url_to_filename(filename):\n \"\"\"Map a URL string to filename by removing unacceptable characters.\"\"\"\n return \"\".join(x for x in filename if is_filename_char(x))\n\n\ndef prt(label, word_idx, idx2word):\n \"\"\"Map `word_idx` list to words and print it with its associated `label`.\"\"\"\n words = [idx2word[word] for word in word_idx]\n print('{}: {}\\n'.format(label, ' '.join(words)))\n\n\ndef str_shape(x):\n \"\"\"Format the dimension of numpy array `x` as a string.\"\"\"\n return 'x'.join([str(element) for element in x.shape])\n\n\ndef load_embedding(nb_unknown_words, emb_type):\n \"\"\"Read word embeddings and vocabulary from disk.\"\"\"\n with open(os.path.join(config.path_data, '{}-{}.pkl'.format(emb_type,FN0)), 'rb') as fp:\n embedding, idx2word, word2idx, embedding_idx2idx = pickle.load(fp)\n vocab_size, embedding_size = embedding.shape\n print('dimension of embedding space for words: {:,}'.format(embedding_size))\n print('vocabulary size: {:,} the last {:,} words can be used as place holders for unknown/oov words'.\n format(vocab_size, nb_unknown_words))\n print('total number of different words: {:,}'.format(len(idx2word)))\n print('number of words outside vocabulary which we can substitue using embedding similarity: {:,}'.\n format(len(embedding_idx2idx)))\n print('number of words that will be regarded as unknonw(unk)/out-of-vocabulary(oov): {:,}'.\n format(len(idx2word) - vocab_size - len(embedding_idx2idx)))\n return embedding, idx2word, word2idx, embedding_idx2idx\n\n\ndef load_data():\n \"\"\"Read recipe data from disk.\"\"\"\n with open(os.path.join(config.path_data, '{}.data.pkl'.format(FN0)), 'rb') as fp:\n X, Y = pickle.load(fp)\n print('number of examples', len(X), len(Y))\n return X, Y\n\n\ndef process_vocab(idx2word, vocab_size, oov0, nb_unknown_words):\n \"\"\"Update vocabulary to account for unknown words.\"\"\"\n # reserve vocabulary space for unkown words\n for i in range(nb_unknown_words):\n idx2word[vocab_size - 1 - i] = '<{}>'.format(i)\n\n # mark words outside vocabulary with ^ at their end\n for i in range(oov0, len(idx2word)):\n idx2word[i] = idx2word[i] + '^'\n\n # add empty word and end-of-sentence to vocab\n idx2word[empty] = '_'\n idx2word[eos] = '~'\n\n return idx2word\n\n\ndef load_split_data(nb_val_samples, seed):\n \"\"\"Create train-test split.\"\"\"\n # load data and create train test split\n X, Y = load_data()\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=nb_val_samples, random_state=seed)\n del X, Y # free up memory by removing X and Y\n return X_train, X_test, Y_train, Y_test\n\n\nif __name__ == '__main__':\n print(url_to_filename('http://stackoverflow.com/questions/295135/turn-a-string-into-a-valid-filename'))\n"
] | [
[
"sklearn.model_selection.train_test_split"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JiekaiJia/pettingzoo_comunication | [
"1e85d5edb87ac867385649616e030284c0b6910f"
] | [
"learning.py"
] | [
"from gym import spaces\nimport numpy as np\nfrom scipy.special import softmax\nfrom pettingzoo.mpe import simple_spread_v2, simple_speaker_listener_v3, simple_tag_v2, simple_reference_v2, simple_world_comm_v2,simple_crypto_v2\nfrom pettingzoo.sisl import multiwalker_v7, pursuit_v3, waterworld_v3\nfrom pettingzoo.magent import adversarial_pursuit_v2\nfrom pettingzoo.utils.conversions import to_parallel_wrapper\nfrom pettingzoo.utils.wrappers import AssertOutOfBoundsWrapper, OrderEnforcingWrapper\nfrom comm_channel import ParallelCommWrapper, CommWrapper\nfrom ray.rllib.env import PettingZooEnv\nimport random\n\nfrom utils import init_comm_dict\n\n# env = adversarial_pursuit_v2.env()\n# comm_dict = init_comm_dict(env)\n# comm_dict['comm_bits'] = 4\n# comm_dict['receivers'][env.possible_agents[2]] = [env.possible_agents[0], env.possible_agents[1]]\n# env = CommWrapper(env, comm_dict)\n# env.reset()\n# for agent in env.agent_iter():\n# env.render()\n# observations, rewards, dones, infos = env.last()\n# print(observations.shape)\n# if dones:\n# action = None\n# else:\n# # action = random.choice([0, 5, 10, 15])\n# action = np.random.randint(0, 20)\n# # action = np.tanh(np.random.randn(1, env.action_spaces[agent].shape[0]).reshape(env.action_spaces[agent].shape[0],))\n# env.step(action)\n\n\npar_env = adversarial_pursuit_v2.parallel_env()\ncomm_dict = init_comm_dict(par_env)\ncomm_dict['comm_bits'] = 4\ncomm_dict['receivers'][par_env.possible_agents[2]] = [par_env.possible_agents[0], par_env.possible_agents[1]]\npar_env = ParallelCommWrapper(par_env, comm_dict)\nobs = par_env.reset()\nfor step in range(25):\n par_env.render()\n print({k: v.shape for k,v in obs.items()})\n # actions = {agent: random.choice([0, 5, 10, 15]) for agent in par_env.agents}\n actions = {agent: np.random.randint(0, 20) for agent in par_env.agents}\n # actions = {agent: np.tanh(np.random.randn(1, par_env.action_spaces[agent].shape[0]).reshape(par_env.action_spaces[agent].shape[0],)) for agent in par_env.agents}\n obs, _, _, _ = par_env.step(actions)\n"
] | [
[
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jchanxtarov/vbpr | [
"972d682b41ff715deb1fe56f98e96299a0afb670"
] | [
"src/models/base.py"
] | [
"# Copyright (c) latataro (jchanxtarov). All rights reserved.\n# Licensed under the MIT License.\n\nfrom abc import ABCMeta, abstractmethod\nfrom typing import Any, Generic, TypeVar\n\nimport torch as th\nimport torch.nn as nn\nfrom utils.loaders.base import BaseDataset\nfrom utils.types import UserItems\n\nDataset = TypeVar('Dataset', bound=BaseDataset)\n\n\nclass BasePredictor(Generic[Dataset], metaclass=ABCMeta):\n\n @abstractmethod\n def load(self, dataset: Dataset) -> None:\n raise NotImplementedError\n\n @abstractmethod\n def train(self, dataloader: th.utils.data.DataLoader) -> None:\n raise NotImplementedError\n\n @abstractmethod\n def save(self, name_data: str, name_model: str, uniqid: str) -> None:\n raise NotImplementedError\n\n @abstractmethod\n def predict(self, pretrain_path: str) -> UserItems:\n raise NotImplementedError\n\n def load_pretrained_model(self, model: nn.Module, path: str) -> Any:\n model.load_state_dict(th.load(path, map_location=th.device('cpu')))\n return model\n"
] | [
[
"torch.device"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gyan42/mozhi | [
"ee54692b1913141e5fdfda486b7dcd2a37e9f39f"
] | [
"mozhi/serve/torch/handler/ner_handler.py"
] | [
"import importlib\nimport inspect\nimport os\nimport logging\nimport torch\nimport pickle\nfrom abc import ABC\n\nimport ts\nfrom ts.torch_handler.base_handler import BaseHandler\nfrom ts.torch_handler.text_handler import TextHandler\n\nfrom mozhi.preprocessor.ipreprocessor import IPreprocessor\nfrom mozhi.bin.urn.models_urn import TF_MODEL_OBJECT_MAP, PYTORCH_MODEL_OBJECT_MAP\nfrom mozhi.bin.urn.preprocessor_urn import PREPROCESSOR_OBJ_MAP\n\nlogging.getLogger().setLevel(logging.DEBUG)\nlogger = logging.getLogger(__name__)\n\n\ndef load(file_path):\n with open(file_path, 'rb') as f:\n return pickle.load(f)\n\ndef list_classes_from_module(module, parent_class=None):\n \"\"\"\n Parse user defined module to get all model service classes in it.\n\n :param module:\n :param parent_class:\n :return: List of model service class definitions\n \"\"\"\n\n # Parsing the module to get all defined classes\n classes = [cls[1] for cls in inspect.getmembers(module, lambda member: inspect.isclass(member) and\n member.__module__ == module.__name__)]\n # filter classes that is subclass of parent_class\n if parent_class is not None:\n return [c for c in classes if issubclass(c, parent_class)]\n\n return classes\n\n\nclass NERHandler(BaseHandler):\n \"\"\"\n A custom model handler implementation.\n \"\"\"\n\n def __init__(self):\n self._context = None\n self.initialized = False\n self.explain = False\n self.input_text = None\n self._preprocessor = None\n\n def _load_pickled_model(self, model_dir, model_file, model_pt_path):\n \"\"\"\n Loads the pickle file from the given model path.\n\n Args:\n model_dir (str): Points to the location of the model artefacts.\n model_file (.py): the file which contains the model class.\n model_pt_path (str): points to the location of the model pickle file.\n\n Raises:\n RuntimeError: It raises this error when the model.py file is missing.\n ValueError: Raises value error when there is more than one class in the label,\n since the mapping supports only one label per class.\n\n Returns:\n serialized model file: Returns the pickled pytorch model file\n \"\"\"\n model_def_path = os.path.join(model_dir, model_file)\n logger.error(\"===============model_def_path \" + model_def_path)\n\n if not os.path.isfile(model_def_path):\n raise RuntimeError(\"Missing the model.py file\")\n\n logger.error(\"===============model_class_definitions\" + importlib.import_module(model_file.split(\".\")[0]))\n module = importlib.import_module(model_file.split(\".\")[0])\n model_class_definitions = list_classes_from_module(module)\n logger.error(\"===============model_class_definitions\" + model_class_definitions)\n\n if len(model_class_definitions) != 1:\n raise ValueError(\n \"Expected only one class as model definition. {}\".format(\n model_class_definitions\n )\n )\n\n model_class = model_class_definitions[0]\n logger.error(\"===============model_class\" + model_class)\n model = model_class()\n if model_pt_path:\n state_dict = torch.load(model_pt_path, map_location=self.device)\n model.load_state_dict(state_dict)\n return model\n\n def _load_torchscript_model(self, model_pt_path):\n \"\"\"Loads the PyTorch model and returns the NN model object.\n\n Args:\n model_pt_path (str): denotes the path of the model file.\n\n Returns:\n (NN Model Object) : Loads the model object.\n \"\"\"\n logger.error(\"self.device===============>\" + str(self.device))\n\n # model = torch.jit.load(model_pt_path, map_location=self.device)\n model = torch.jit.load(model_pt_path)\n logger.error(\"_load_torchscript_model===============>\" + model_pt_path)\n\n return model\n\n def initialize(self, context: ts.context.Context):\n \"\"\"\n Initialize model. This will be called during model loading time\n :param context: Initial context contains model server system properties.\n :return:\n \"\"\"\n\n logger.error(\"===================>context\" + str(context))\n properties = context.system_properties\n logger.error(\"===================>properties\" + str(properties))\n model_dir = properties.get(\"model_dir\")\n\n logger.info(\"*\" * 100)\n\n self._preprocessor = load(os.path.join(model_dir, \"NaiveSentencePreprocessor\"))\n\n logger.info(\"=\" * 100)\n logger.error(self._preprocessor.id2label(3))\n logger.info(\"*\" * 100)\n logger.error(model_dir)\n\n\n self.map_location = \"cuda\" if torch.cuda.is_available() and properties.get(\"gpu_id\") is not None else \"cpu\"\n self.device = torch.device(\n self.map_location + \":\" + str(properties.get(\"gpu_id\"))\n if torch.cuda.is_available() and properties.get(\"gpu_id\") is not None\n else self.map_location\n )\n self.manifest = context.manifest\n\n logger.error(\"===================>context.manifest\" + str(context.manifest))\n\n \"\"\"\n .pt\n {'createdOn': '11/06/2021 21:27:42', 'runtime': 'python', 'model': {'modelName': 'bilstmcrf', \n 'serializedFile': 'bilstmcrftorch.pt', 'handler': 'ner_handler.py', 'modelFile': 'bilstm_crf_torch.py', \n 'modelVersion': '1.0'}, 'archiverVersion': '0.4.0'}\n \"\"\"\n\n\n\n model_pt_path = None\n if \"serializedFile\" in self.manifest[\"model\"]:\n serialized_file = self.manifest[\"model\"][\"serializedFile\"]\n model_pt_path = os.path.join(model_dir, serialized_file)\n\n logger.error(\"model_pt_path===============>\" + model_pt_path)\n # model def file\n model_file = self.manifest[\"model\"].get(\"modelFile\", \"\")\n logger.error(\"model_file===============>\" + model_pt_path)\n\n if model_file:\n logger.debug(\"Loading eager model\")\n self.model = self._load_pickled_model(model_dir, model_file, model_pt_path)\n self.model.to(self.device)\n else:\n logger.debug(\"Loading torchscript model\")\n if not os.path.isfile(model_pt_path):\n raise RuntimeError(\"Missing the model.pt file\")\n\n self.model = self._load_torchscript_model(model_pt_path)\n\n self.model.eval()\n\n logger.debug('Model file %s loaded successfully', model_pt_path)\n\n # # Load class mapping for classifiers\n # mapping_file_path = os.path.join(model_dir, \"index_to_name.json\")\n # self.mapping = load_label_mapping(mapping_file_path)\n\n self.initialized = True\n # load the model, refer 'custom handler class' above for details\n\n def preprocess(self, data):\n \"\"\"\n Transform raw input into model input data.\n :param batch: list of raw requests, should match batch size\n :return: list of preprocessed model input data\n \"\"\"\n # Take the input data and make it inference ready\n preprocessed_data = data[0].get(\"data\")\n if preprocessed_data is None:\n preprocessed_data = data[0].get(\"body\")\n logger.info(\"=\" * 100 + \"preprocessed_data \" + str(preprocessed_data))\n\n preprocessed_data = preprocessed_data.decode('UTF-8')\n preprocessed_data = self._preprocessor.tokenize(sentence=preprocessed_data)\n print(\"Preprocessed data: \\n\" + str(preprocessed_data))\n\n return preprocessed_data\n\n\n def inference(self, model_input):\n \"\"\"\n Internal inference methods\n :param model_input: transformed model input data\n :return: list of inference output in NDArray\n \"\"\"\n # Do some inference call to engine here and return output\n logger.info(\"=\" * 100 + \"inference \" + str(model_input))\n model_input = torch.tensor(model_input)\n model_output = self.model(model_input)\n logger.info(\"=\" * 100 + \"model_output \" + str(model_output))\n model_output = torch.argmax(model_output, dim=-1)\n logger.info(\"=\" * 100 + \"model_output \" + str(model_output[0]))\n return model_output\n\n def postprocess(self, inference_output):\n \"\"\"\n Return inference result.\n :param inference_output: list of inference output\n :return: list of predict results\n \"\"\"\n # Take output from network and post-process to desired format\n # postprocess_output = inference_output\n # return torch.argmax(inference_output, dim=-1)\n return [self._preprocessor.ids2labels(p) for p in inference_output]\n\n\n def handle(self, data, context):\n \"\"\"\n Invoke by TorchServe for prediction request.\n Do pre-processing of data, prediction using model and postprocessing of prediciton output\n :param data: [{body: bytearray()}]Input data for prediction\n :param context: Initial context contains model server system properties.\n :return: prediction output\n \"\"\"\n\n logger.info(\"=\" * 100 + \"handle \" + str(data))\n model_input = self.preprocess(data)\n\n model_output = self.inference(model_input)\n res = self.postprocess(model_output)\n logger.info(\"*\" * 100 + str(res))\n return res"
] | [
[
"torch.jit.load",
"torch.load",
"torch.tensor",
"torch.cuda.is_available",
"torch.argmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dasepli/fastNLP | [
"97883fcabfb88e3878cb9b73d878a26746615e8b"
] | [
"fastNLP/io/pipe/coreference.py"
] | [
"r\"\"\"undocumented\"\"\"\n\n__all__ = [\n \"CoReferencePipe\"\n]\n\nimport collections\n\nimport numpy as np\n\nfrom fastNLP.core.vocabulary import Vocabulary\nfrom .pipe import Pipe\nfrom ..data_bundle import DataBundle\nfrom ..loader.coreference import CoReferenceLoader\nfrom ...core.const import Const\n\n\nclass CoReferencePipe(Pipe):\n r\"\"\"\n 对Coreference resolution问题进行处理,得到文章种类/说话者/字符级信息/序列长度。\n\n 处理完成后数据包含文章类别、speaker信息、句子信息、句子对应的index、char、句子长度、target:\n\n .. csv-table::\n :header: \"words1\", \"words2\",\"words3\",\"words4\",\"chars\",\"seq_len\",\"target\"\n\n \"bc\", \"[[0,0],[1,1]]\",\"[['I','am'],[]]\",\"[[1,2],[]]\",\"[[[1],[2,3]],[]]\",\"[2,3]\",\"[[[2,3],[6,7]],[[10,12],[20,22]]]\"\n \"[...]\", \"[...]\",\"[...]\",\"[...]\",\"[...]\",\"[...]\",\"[...]\"\n\n dataset的print_field_meta()函数输出的各个field的被设置成input和target的情况为::\n\n +-------------+-----------+--------+-------+---------+\n | field_names | raw_chars | target | chars | seq_len |\n +-------------+-----------+--------+-------+---------+\n | is_input | False | True | True | True |\n | is_target | False | True | False | True |\n | ignore_type | | False | False | False |\n | pad_value | | 0 | 0 | 0 |\n +-------------+-----------+--------+-------+---------+\n\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.config = config\n\n def process(self, data_bundle: DataBundle):\n r\"\"\"\n 对load进来的数据进一步处理原始数据包含:raw_key,raw_speaker,raw_words,raw_clusters\n \n .. csv-table::\n :header: \"raw_key\", \"raw_speaker\",\"raw_words\",\"raw_clusters\"\n\n \"bc/cctv/00/cctv_0000_0\", \"[[Speaker#1, Speaker#1],[]]\",\"[['I','am'],[]]\",\"[[[2,3],[6,7]],[[10,12],[20,22]]]\"\n \"bc/cctv/00/cctv_0000_1\", \"[['Speaker#1', 'peaker#1'],[]]\",\"[['He','is'],[]]\",\"[[[2,3],[6,7]],[[10,12],[20,22]]]\"\n \"[...]\", \"[...]\",\"[...]\",\"[...]\"\n\n\n :param data_bundle:\n :return:\n \"\"\"\n genres = {g: i for i, g in enumerate([\"bc\", \"bn\", \"mz\", \"nw\", \"pt\", \"tc\", \"wb\"])}\n vocab = Vocabulary().from_dataset(*data_bundle.datasets.values(), field_name= Const.RAW_WORDS(3))\n vocab.build_vocab()\n word2id = vocab.word2idx\n data_bundle.set_vocab(vocab, Const.INPUTS(0))\n if self.config.char_path:\n char_dict = get_char_dict(self.config.char_path)\n else:\n char_set = set()\n for i,w in enumerate(word2id):\n if i < 2:\n continue\n for c in w:\n char_set.add(c)\n\n char_dict = collections.defaultdict(int)\n char_dict.update({c: i for i, c in enumerate(char_set)})\n\n for name, ds in data_bundle.datasets.items():\n # genre\n ds.apply(lambda x: genres[x[Const.RAW_WORDS(0)][:2]], new_field_name=Const.INPUTS(0))\n\n # speaker_ids_np\n ds.apply(lambda x: speaker2numpy(x[Const.RAW_WORDS(1)], self.config.max_sentences, is_train=name == 'train'),\n new_field_name=Const.INPUTS(1))\n\n # sentences\n ds.rename_field(Const.RAW_WORDS(3),Const.INPUTS(2))\n\n # doc_np\n ds.apply(lambda x: doc2numpy(x[Const.INPUTS(2)], word2id, char_dict, max(self.config.filter),\n self.config.max_sentences, is_train=name == 'train')[0],\n new_field_name=Const.INPUTS(3))\n # char_index\n ds.apply(lambda x: doc2numpy(x[Const.INPUTS(2)], word2id, char_dict, max(self.config.filter),\n self.config.max_sentences, is_train=name == 'train')[1],\n new_field_name=Const.CHAR_INPUT)\n # seq len\n ds.apply(lambda x: doc2numpy(x[Const.INPUTS(2)], word2id, char_dict, max(self.config.filter),\n self.config.max_sentences, is_train=name == 'train')[2],\n new_field_name=Const.INPUT_LEN)\n\n # clusters\n ds.rename_field(Const.RAW_WORDS(2), Const.TARGET)\n\n ds.set_ignore_type(Const.TARGET)\n ds.set_padder(Const.TARGET, None)\n ds.set_input(Const.INPUTS(0), Const.INPUTS(1), Const.INPUTS(2), Const.INPUTS(3), Const.CHAR_INPUT, Const.INPUT_LEN)\n ds.set_target(Const.TARGET)\n\n return data_bundle\n\n def process_from_file(self, paths):\n '''\n example: \n bundle = CoReferencePipe(config).process_from_file({'train': config.train_path, 'dev': config.dev_path,\n 'test': config.test_path})\n '''\n bundle = CoReferenceLoader().load(paths)\n return self.process(bundle)\n\n\n# helper\n\ndef doc2numpy(doc, word2id, chardict, max_filter, max_sentences, is_train):\n docvec, char_index, length, max_len = _doc2vec(doc, word2id, chardict, max_filter, max_sentences, is_train)\n assert max(length) == max_len\n assert char_index.shape[0] == len(length)\n assert char_index.shape[1] == max_len\n doc_np = np.zeros((len(docvec), max_len), int)\n for i in range(len(docvec)):\n for j in range(len(docvec[i])):\n doc_np[i][j] = docvec[i][j]\n return doc_np, char_index, length\n\ndef _doc2vec(doc,word2id,char_dict,max_filter,max_sentences,is_train):\n max_len = 0\n max_word_length = 0\n docvex = []\n length = []\n if is_train:\n sent_num = min(max_sentences,len(doc))\n else:\n sent_num = len(doc)\n\n for i in range(sent_num):\n sent = doc[i]\n length.append(len(sent))\n if (len(sent) > max_len):\n max_len = len(sent)\n sent_vec =[]\n for j,word in enumerate(sent):\n if len(word)>max_word_length:\n max_word_length = len(word)\n if word in word2id:\n sent_vec.append(word2id[word])\n else:\n sent_vec.append(word2id[\"UNK\"])\n docvex.append(sent_vec)\n\n char_index = np.zeros((sent_num, max_len, max_word_length),dtype=int)\n for i in range(sent_num):\n sent = doc[i]\n for j,word in enumerate(sent):\n char_index[i, j, :len(word)] = [char_dict[c] for c in word]\n\n return docvex,char_index,length,max_len\n\ndef speaker2numpy(speakers_raw,max_sentences,is_train):\n if is_train and len(speakers_raw)> max_sentences:\n speakers_raw = speakers_raw[0:max_sentences]\n speakers = flatten(speakers_raw)\n speaker_dict = {s: i for i, s in enumerate(set(speakers))}\n speaker_ids = np.array([speaker_dict[s] for s in speakers])\n return speaker_ids\n\n# 展平\ndef flatten(l):\n return [item for sublist in l for item in sublist]\n\ndef get_char_dict(path):\n vocab = [\"<UNK>\"]\n with open(path) as f:\n vocab.extend(c.strip() for c in f.readlines())\n char_dict = collections.defaultdict(int)\n char_dict.update({c: i for i, c in enumerate(vocab)})\n return char_dict\n"
] | [
[
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hossein-haeri/MADDPG | [
"4d3abf907b890000374303769bf22db02a2b56d8"
] | [
"experiments/train.py"
] | [
"import argparse\nimport numpy as np\nimport tensorflow as tf\nimport time\nimport pickle\nimport matplotlib.pyplot as plt\nimport maddpg.common.tf_util as U\nfrom maddpg.trainer.maddpg import MADDPGAgentTrainer\nimport tensorflow.contrib.layers as layers\nimport csv\n\n# np.random.seed(101)\n\ndef parse_args():\n parser = argparse.ArgumentParser(\"Reinforcement Learning experiments for multiagent environments\")\n # Environment\n parser.add_argument(\"--scenario\", type=str, default=\"simple\", help=\"name of the scenario script\")\n parser.add_argument(\"--max-episode-len\", type=int, default=25, help=\"maximum episode length\")\n parser.add_argument(\"--num-episodes\", type=int, default=60000, help=\"number of episodes\")\n parser.add_argument(\"--num-adversaries\", type=int, default=0, help=\"number of adversaries\")\n parser.add_argument(\"--good-policy\", type=str, default=\"maddpg\", help=\"policy for good agents\")\n parser.add_argument(\"--adv-policy\", type=str, default=\"maddpg\", help=\"policy of adversaries\")\n # Core training parameters\n parser.add_argument(\"--lr\", type=float, default=1e-2, help=\"learning rate for Adam optimizer\")\n parser.add_argument(\"--gamma\", type=float, default=0.95, help=\"discount factor\")\n parser.add_argument(\"--batch-size\", type=int, default=1024, help=\"number of episodes to optimize at the same time\")\n parser.add_argument(\"--num-units\", type=int, default=64, help=\"number of units in the mlp\")\n # Checkpointing\n parser.add_argument(\"--exp-name\", type=str, default=\"\", help=\"name of the experiment\")\n parser.add_argument(\"--save-dir\", type=str, default=\"./saved_policy/\", help=\"directory in which training state and model should be saved\")\n parser.add_argument(\"--save-rate\", type=int, default=100, help=\"save model once every time this many episodes are completed\")\n parser.add_argument(\"--load-dir\", type=str, default=\"\", help=\"directory in which training state and model are loaded\")\n # Evaluation\n parser.add_argument(\"--restore\", action=\"store_true\", default=False)\n parser.add_argument(\"--display\", action=\"store_true\", default=False)\n parser.add_argument(\"--benchmark\", action=\"store_true\", default=False)\n parser.add_argument(\"--benchmark-iters\", type=int, default=100000, help=\"number of iterations run for benchmarking\")\n parser.add_argument(\"--benchmark-dir\", type=str, default=\"./benchmark_files/\", help=\"directory where benchmark data is saved\")\n parser.add_argument(\"--plots-dir\", type=str, default=\"./learning_curves/\", help=\"directory where plot data is saved\")\n return parser.parse_args()\n\ndef mlp_model(input, num_outputs, scope, reuse=False, num_units=64, rnn_cell=None):\n # This model takes as input an observation and returns values of all actions\n with tf.variable_scope(scope, reuse=reuse):\n out = input\n out = layers.fully_connected(out, num_outputs=num_units, activation_fn=tf.nn.relu)\n out = layers.fully_connected(out, num_outputs=num_units, activation_fn=tf.nn.relu)\n out = layers.fully_connected(out, num_outputs=num_outputs, activation_fn=None)\n return out\n\ndef make_env(scenario_name, arglist, benchmark=False):\n from multiagent.environment import MultiAgentEnv\n import multiagent.scenarios as scenarios\n\n # load scenario from script\n scenario = scenarios.load(scenario_name + \".py\").Scenario()\n # create world\n world = scenario.make_world()\n # create multiagent environment\n if benchmark:\n env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation, scenario.benchmark_data)\n else:\n env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation)\n return env\n\ndef get_trainers(env, num_adversaries, obs_shape_n, arglist):\n trainers = []\n model = mlp_model\n trainer = MADDPGAgentTrainer\n for i in range(num_adversaries):\n trainers.append(trainer(\n \"agent_%d\" % i, model, obs_shape_n, env.action_space, i, arglist,\n local_q_func=(arglist.adv_policy=='ddpg')))\n\n for i in range(num_adversaries, env.n):\n trainers.append(trainer(\n \"agent_%d\" % i, model, obs_shape_n, env.action_space, i, arglist,\n local_q_func=(arglist.good_policy=='ddpg')))\n return trainers\n\n\ndef get_means(ls, chunk_length):\n groups = [ls[x:x+chunk_length] for x in range(0, len(ls), chunk_length)]\n means_list = [sum(group)/len(group) for group in groups]\n return np.asarray(means_list)\n\n\ndef get_STDs(ls, chunk_length):\n groups = [ls[x:x+chunk_length] for x in range(0, len(ls), chunk_length)]\n std_list = [np.std(group) for group in groups]\n return np.asarray(std_list)\n\n\ndef plot_mean_with_std(ls, chunk_length, **kwargs):\n\n scale = 1\n for key, value in kwargs.items():\n if key=='scale':\n scale = value\n if key=='color':\n c = value\n means = get_means(ls, chunk_length)\n stds = get_STDs(ls, chunk_length)\n # plt.plot(means - scale*stds, lw=0.5, c='#396AB1')\n # plt.plot(means + scale*stds, lw=0.5, c='#396AB1')\n plt.fill_between(range(len(means)),means - scale*stds, means + scale*stds, color=c, alpha=0.2)\n plt.plot(means, lw=1, color=c)\n\n\ndef plot_rewards(agent_rewards, average_window):\n plt.cla()\n colors = [\n '#396AB1',\n '#DA7C30',\n '#3E9651',\n '#CC2529',\n '#535154',\n '#6B4C9A',\n '#922428',\n '#948B3D'\n ]\n for i, reward_list in enumerate(agent_rewards):\n plot_mean_with_std(agent_rewards[i][:], int(average_window), scale=0.5, color=colors[i])\n # plt.plot(get_means(agent_rewards[i][:], int(average_window/4)))\n ax = plt.gca()\n ax.set_xlabel('Episodes (x' + str(arglist.save_rate) + ')')\n ax.set_ylabel('Reward')\n plt.grid()\n plt.legend(['agent 1 (Team A)', 'agent 2 (Team A)', 'agent 3 (Team B)', 'agent 4 (Team B)'])\n plt.pause(0.0000001)\n\ndef train(arglist):\n csvfile = open('test.csv', 'w', newline='')\n filewriter = csv.writer(csvfile, delimiter=' ')\n\n with U.single_threaded_session():\n # Create environment\n env = make_env(arglist.scenario, arglist, arglist.benchmark)\n # Create agent trainers\n obs_shape_n = [env.observation_space[i].shape for i in range(env.n)]\n num_adversaries = min(env.n, arglist.num_adversaries)\n trainers = get_trainers(env, num_adversaries, obs_shape_n, arglist)\n print('Using good policy {} and adv policy {}'.format(arglist.good_policy, arglist.adv_policy))\n\n # Initialize\n U.initialize()\n\n # Load previous results, if necessary\n if arglist.load_dir == \"\":\n arglist.load_dir = arglist.save_dir\n if arglist.restore or arglist.benchmark:\n print('Loading previous state...')\n U.load_state(arglist.load_dir)\n\n episode_rewards = [0.0] # sum of rewards for all agents\n agent_rewards = [[0.0] for _ in range(env.n)] # individual agent reward\n final_ep_rewards = [] # sum of rewards for training curve\n final_ep_ag_rewards = [] # agent rewards for training curve\n agent_info = [[[]]] # placeholder for benchmarking info\n saver = tf.train.Saver()\n obs_n = env.reset()\n episode_step = 0\n train_step = 0\n t_start = time.time()\n\n print('Starting iterations...')\n while True:\n # get action\n action_n = [agent.action(obs) for agent, obs in zip(trainers,obs_n)]\n # environment step\n new_obs_n, rew_n, done_n, info_n = env.step(action_n)\n\n episode_step += 1\n done = all(done_n)\n terminal = (episode_step >= arglist.max_episode_len)\n # collect experience\n for i, agent in enumerate(trainers):\n agent.experience(obs_n[i], action_n[i], rew_n[i], new_obs_n[i], done_n[i], terminal)\n obs_n = new_obs_n\n\n for i, rew in enumerate(rew_n):\n episode_rewards[-1] += rew\n agent_rewards[i][-1] += rew\n\n\n\n if done or terminal:\n obs_n = env.reset()\n episode_step = 0\n episode_rewards.append(0)\n for a in agent_rewards:\n a.append(0)\n agent_info.append([[]])\n\n # increment global step counter\n train_step += 1\n\n # for benchmarking learned policies\n if arglist.benchmark:\n for i, info in enumerate(info_n):\n agent_info[-1][i].append(info_n['n'])\n if train_step > arglist.benchmark_iters and (done or terminal):\n file_name = arglist.benchmark_dir + arglist.exp_name + '.pkl'\n print('Finished benchmarking, now saving...')\n with open(file_name, 'wb') as fp:\n pickle.dump(agent_info[:-1], fp)\n break\n continue\n\n # for displaying learned policies\n if arglist.display:\n time.sleep(0.1)\n env.render()\n continue\n\n # update all trainers, if not in display or benchmark mode\n loss = None\n for agent in trainers:\n agent.preupdate()\n for agent in trainers:\n loss = agent.update(trainers, train_step)\n\n # save model, display training output\n if terminal and (len(episode_rewards) % arglist.save_rate == 0):\n U.save_state(arglist.save_dir, saver=saver)\n # print statement depends on whether or not there are adversaries\n if num_adversaries == 0 and False:\n print(\"steps: {}, episodes: {}, mean episode reward: {}, time: {}\".format(\n train_step, len(episode_rewards), np.mean(episode_rewards[-arglist.save_rate:]), round(time.time()-t_start, 3)))\n\n else:\n print(\"steps: {}, episodes: {}, mean episode reward: {}, agent episode reward: {}, time: {}\".format(\n train_step, len(episode_rewards), np.mean(episode_rewards[-arglist.save_rate:]),\n [np.mean(rew[-arglist.save_rate:]) for rew in agent_rewards], round(time.time()-t_start, 3)))\n plot_rewards(agent_rewards, arglist.save_rate)\n np.savetxt('rewards.csv', agent_rewards, delimiter=', ')\n\n # print(agent_rewards)\n t_start = time.time()\n # Keep track of final episode reward\n final_ep_rewards.append(np.mean(episode_rewards[-arglist.save_rate:]))\n for rew in agent_rewards:\n final_ep_ag_rewards.append(np.mean(rew[-arglist.save_rate:]))\n\n # saves final episode reward for plotting training curve later\n if len(episode_rewards) > arglist.num_episodes:\n rew_file_name = arglist.plots_dir + arglist.exp_name + '_rewards.pkl'\n with open(rew_file_name, 'wb') as fp:\n pickle.dump(final_ep_rewards, fp)\n agrew_file_name = arglist.plots_dir + arglist.exp_name + '_agrewards.pkl'\n with open(agrew_file_name, 'wb') as fp:\n pickle.dump(final_ep_ag_rewards, fp)\n print('...Finished total of {} episodes.'.format(len(episode_rewards)))\n # filewriter.writerow(agent_rewards)\n break\n\nif __name__ == '__main__':\n arglist = parse_args()\n train(arglist)\n plt.show()\n"
] | [
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.legend",
"numpy.asarray",
"matplotlib.pyplot.cla",
"tensorflow.contrib.layers.fully_connected",
"matplotlib.pyplot.plot",
"numpy.std",
"numpy.mean",
"matplotlib.pyplot.grid",
"tensorflow.variable_scope",
"tensorflow.train.Saver",
"matplotlib.pyplot.show",
"matplotlib.pyplot.pause",
"numpy.savetxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
meijun/cs231n-assignment | [
"6180f3081530db9afca17d15ed6169057d063669"
] | [
"assignment2/cs231n/layers.py"
] | [
"import numpy as np\n\n\ndef affine_forward(x, w, b):\n \"\"\"\n Computes the forward pass for an affine (fully-connected) layer.\n\n The input x has shape (N, d_1, ..., d_k) and contains a minibatch of N\n examples, where each example x[i] has shape (d_1, ..., d_k). We will\n reshape each input into a vector of dimension D = d_1 * ... * d_k, and\n then transform it to an output vector of dimension M.\n\n Inputs:\n - x: A numpy array containing input data, of shape (N, d_1, ..., d_k)\n - w: A numpy array of weights, of shape (D, M)\n - b: A numpy array of biases, of shape (M,)\n \n Returns a tuple of:\n - out: output, of shape (N, M)\n - cache: (x, w, b)\n \"\"\"\n out = None\n #############################################################################\n # TODO: Implement the affine forward pass. Store the result in out. You #\n # will need to reshape the input into rows. #\n #############################################################################\n assert isinstance(x, np.ndarray)\n assert isinstance(w, np.ndarray)\n assert isinstance(b, np.ndarray)\n\n N = x.shape[0]\n D, M = w.shape\n x1 = x.reshape((N, -1))\n out = x1.dot(w) + b\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n cache = (x, w, b)\n return out, cache\n\n\ndef affine_backward(dout, cache):\n \"\"\"\n Computes the backward pass for an affine layer.\n\n Inputs:\n - dout: Upstream derivative, of shape (N, M)\n - cache: Tuple of:\n - x: Input data, of shape (N, d_1, ... d_k)\n - w: Weights, of shape (D, M)\n\n Returns a tuple of:\n - dx: Gradient with respect to x, of shape (N, d1, ..., d_k)\n - dw: Gradient with respect to w, of shape (D, M)\n - db: Gradient with respect to b, of shape (M,)\n \"\"\"\n x, w, b = cache\n dx, dw, db = None, None, None\n #############################################################################\n # TODO: Implement the affine backward pass. #\n #############################################################################\n N = x.shape[0]\n dx = dout.dot(w.T).reshape(x.shape)\n dw = x.reshape((N, -1)).T.dot(dout)\n db = dout.sum(axis=0)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return dx, dw, db\n\n\ndef relu_forward(x):\n \"\"\"\n Computes the forward pass for a layer of rectified linear units (ReLUs).\n\n Input:\n - x: Inputs, of any shape\n\n Returns a tuple of:\n - out: Output, of the same shape as x\n - cache: x\n \"\"\"\n out = None\n #############################################################################\n # TODO: Implement the ReLU forward pass. #\n #############################################################################\n out = np.maximum(x, 0)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n cache = x\n return out, cache\n\n\ndef relu_backward(dout, cache):\n \"\"\"\n Computes the backward pass for a layer of rectified linear units (ReLUs).\n\n Input:\n - dout: Upstream derivatives, of any shape\n - cache: Input x, of same shape as dout\n\n Returns:\n - dx: Gradient with respect to x\n \"\"\"\n dx, x = None, cache\n #############################################################################\n # TODO: Implement the ReLU backward pass. #\n #############################################################################\n dx = dout * (x > 0)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return dx\n\n\ndef batchnorm_forward(x, gamma, beta, bn_param):\n \"\"\"\n Forward pass for batch normalization.\n \n During training the sample mean and (uncorrected) sample variance are\n computed from minibatch statistics and used to normalize the incoming data.\n During training we also keep an exponentially decaying running mean of the mean\n and variance of each feature, and these averages are used to normalize data\n at test-time.\n\n At each timestep we update the running averages for mean and variance using\n an exponential decay based on the momentum parameter:\n\n running_mean = momentum * running_mean + (1 - momentum) * sample_mean\n running_var = momentum * running_var + (1 - momentum) * sample_var\n\n Note that the batch normalization paper suggests a different test-time\n behavior: they compute sample mean and variance for each feature using a\n large number of training images rather than using a running average. For\n this implementation we have chosen to use running averages instead since\n they do not require an additional estimation step; the torch7 implementation\n of batch normalization also uses running averages.\n\n Input:\n - x: Data of shape (N, D)\n - gamma: Scale parameter of shape (D,)\n - beta: Shift paremeter of shape (D,)\n - bn_param: Dictionary with the following keys:\n - mode: 'train' or 'test'; required\n - eps: Constant for numeric stability\n - momentum: Constant for running mean / variance.\n - running_mean: Array of shape (D,) giving running mean of features\n - running_var Array of shape (D,) giving running variance of features\n\n Returns a tuple of:\n - out: of shape (N, D)\n - cache: A tuple of values needed in the backward pass\n \"\"\"\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n N, D = x.shape\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n if mode == 'train':\n #############################################################################\n # TODO: Implement the training-time forward pass for batch normalization. #\n # Use minibatch statistics to compute the mean and variance, use these #\n # statistics to normalize the incoming data, and scale and shift the #\n # normalized data using gamma and beta. #\n # #\n # You should store the output in the variable out. Any intermediates that #\n # you need for the backward pass should be stored in the cache variable. #\n # #\n # You should also use your computed sample mean and variance together with #\n # the momentum variable to update the running mean and running variance, #\n # storing your result in the running_mean and running_var variables. #\n #############################################################################\n mean = np.sum(x, axis=0) / N\n var = np.sum((x - mean) ** 2, axis=0) / N\n x_hat = (x - mean) / np.sqrt(var + eps)\n y = gamma * x_hat + beta\n\n out = y\n cache = (x, mean, var, eps, x_hat, y, gamma, beta)\n\n running_mean = momentum * running_mean + (1 - momentum) * mean\n running_var = momentum * running_var + (1 - momentum) * var\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n elif mode == 'test':\n #############################################################################\n # TODO: Implement the test-time forward pass for batch normalization. Use #\n # the running mean and variance to normalize the incoming data, then scale #\n # and shift the normalized data using gamma and beta. Store the result in #\n # the out variable. #\n #############################################################################\n x_hat = (x - running_mean) / np.sqrt(running_var + eps)\n y = gamma * x_hat + beta\n out = y\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n # Store the updated running means back into bn_param\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n\n return out, cache\n\n\ndef batchnorm_backward(dout, cache):\n \"\"\"\n Backward pass for batch normalization.\n \n For this implementation, you should write out a computation graph for\n batch normalization on paper and propagate gradients backward through\n intermediate nodes.\n \n Inputs:\n - dout: Upstream derivatives, of shape (N, D)\n - cache: Variable of intermediates from batchnorm_forward.\n \n Returns a tuple of:\n - dx: Gradient with respect to inputs x, of shape (N, D)\n - dgamma: Gradient with respect to scale parameter gamma, of shape (D,)\n - dbeta: Gradient with respect to shift parameter beta, of shape (D,)\n \"\"\"\n dx, dgamma, dbeta = None, None, None\n #############################################################################\n # TODO: Implement the backward pass for batch normalization. Store the #\n # results in the dx, dgamma, and dbeta variables. #\n #############################################################################\n x, mean, var, eps, x_hat, y, gamma, beta = cache\n N = x.shape[0]\n d_x_hat = dout * gamma\n d_var = np.sum(d_x_hat * (x - mean) * (-0.5) * ((var + eps) ** (-1.5)), axis=0)\n d_mean = np.sum(d_x_hat * (-1 / np.sqrt(var + eps)), axis=0)\n dx = d_x_hat * (1 / np.sqrt(var + eps)) + d_var * (2 * (x - mean) / N) + d_mean * (1.0 / N)\n dgamma = np.sum(dout * x_hat, axis=0)\n dbeta = np.sum(dout, axis=0)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return dx, dgamma, dbeta\n\n\ndef batchnorm_backward_alt(dout, cache):\n \"\"\"\n Alternative backward pass for batch normalization.\n \n For this implementation you should work out the derivatives for the batch\n normalizaton backward pass on paper and simplify as much as possible. You\n should be able to derive a simple expression for the backward pass.\n \n Note: This implementation should expect to receive the same cache variable\n as batchnorm_backward, but might not use all of the values in the cache.\n \n Inputs / outputs: Same as batchnorm_backward\n \"\"\"\n dx, dgamma, dbeta = None, None, None\n #############################################################################\n # TODO: Implement the backward pass for batch normalization. Store the #\n # results in the dx, dgamma, and dbeta variables. #\n # #\n # After computing the gradient with respect to the centered inputs, you #\n # should be able to compute gradients with respect to the inputs in a #\n # single statement; our implementation fits on a single 80-character line. #\n #############################################################################\n x, mean, var, eps, x_hat, y, gamma, beta = cache\n N = x.shape[0]\n # d_x_hat = dout * gamma\n # d_var = np.sum(d_x_hat * (x - mean) * (-0.5) * ((var + eps) ** (-1.5)), axis=0)\n # d_mean = np.sum(d_x_hat * (-1 / np.sqrt(var + eps)), axis=0)\n dx = gamma / np.sqrt(var + eps) * (dout - dout.mean(axis=0) - (x - mean) / (var + eps) * (np.mean(dout * (x - mean), axis=0))) # d_x_hat * (1 / np.sqrt(var + eps)) + d_var * (2 * (x - mean) / N) + d_mean * (1.0 / N)\n dgamma = np.sum(dout * x_hat, axis=0)\n dbeta = np.sum(dout, axis=0)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n \n return dx, dgamma, dbeta\n\n\ndef dropout_forward(x, dropout_param):\n \"\"\"\n Performs the forward pass for (inverted) dropout.\n\n Inputs:\n - x: Input data, of any shape\n - dropout_param: A dictionary with the following keys:\n - p: Dropout parameter. We drop each neuron output with probability p.\n - mode: 'test' or 'train'. If the mode is train, then perform dropout;\n if the mode is test, then just return the input.\n - seed: Seed for the random number generator. Passing seed makes this\n function deterministic, which is needed for gradient checking but not in\n real networks.\n\n Outputs:\n - out: Array of the same shape as x.\n - cache: A tuple (dropout_param, mask). In training mode, mask is the dropout\n mask that was used to multiply the input; in test mode, mask is None.\n \"\"\"\n p, mode = dropout_param['p'], dropout_param['mode']\n if 'seed' in dropout_param:\n np.random.seed(dropout_param['seed'])\n\n mask = None\n out = None\n\n if mode == 'train':\n ###########################################################################\n # TODO: Implement the training phase forward pass for inverted dropout. #\n # Store the dropout mask in the mask variable. #\n ###########################################################################\n mask = np.random.rand(*x.shape) > p\n out = x * mask / (1-p)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n elif mode == 'test':\n ###########################################################################\n # TODO: Implement the test phase forward pass for inverted dropout. #\n ###########################################################################\n out = x\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n cache = (dropout_param, mask)\n out = out.astype(x.dtype, copy=False)\n\n return out, cache\n\n\ndef dropout_backward(dout, cache):\n \"\"\"\n Perform the backward pass for (inverted) dropout.\n\n Inputs:\n - dout: Upstream derivatives, of any shape\n - cache: (dropout_param, mask) from dropout_forward.\n \"\"\"\n dropout_param, mask = cache\n mode = dropout_param['mode']\n \n dx = None\n if mode == 'train':\n ###########################################################################\n # TODO: Implement the training phase backward pass for inverted dropout. #\n ###########################################################################\n p = dropout_param['p']\n dx = dout * mask / (1-p)\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n elif mode == 'test':\n dx = dout\n return dx\n\n\ndef conv_forward_naive(x, w, b, conv_param):\n \"\"\"\n A naive implementation of the forward pass for a convolutional layer.\n\n The input consists of N data points, each with C channels, height H and width\n W. We convolve each input with F different filters, where each filter spans\n all C channels and has height HH and width HH.\n\n Input:\n - x: Input data of shape (N, C, H, W)\n - w: Filter weights of shape (F, C, HH, WW)\n - b: Biases, of shape (F,)\n - conv_param: A dictionary with the following keys:\n - 'stride': The number of pixels between adjacent receptive fields in the\n horizontal and vertical directions.\n - 'pad': The number of pixels that will be used to zero-pad the input.\n\n Returns a tuple of:\n - out: Output data, of shape (N, F, H', W') where H' and W' are given by\n H' = 1 + (H + 2 * pad - HH) / stride\n W' = 1 + (W + 2 * pad - WW) / stride\n - cache: (x, w, b, conv_param)\n \"\"\"\n out = None\n #############################################################################\n # TODO: Implement the convolutional forward pass. #\n # Hint: you can use the function np.pad for padding. #\n #############################################################################\n pad = conv_param['pad']\n stride = conv_param['stride']\n N, C, H, W = x.shape\n F, C, HH, WW = w.shape\n H_ = 1 + (H + 2 * pad - HH) / stride\n W_ = 1 + (W + 2 * pad - WW) / stride\n out = np.zeros((N, F, H_, W_))\n x_pad = np.pad(x, ((0,), (0,), (pad,), (pad,)), 'constant')\n for i in range(N):\n for f in range(F):\n for h_ in range(H_):\n for w_ in range(W_):\n out[i, f, h_, w_] = np.sum(x_pad[i, :, h_*stride:h_*stride+HH, w_*stride:w_*stride+WW] * w[f]) + b[f]\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n cache = (x, w, b, conv_param)\n return out, cache\n\n\ndef conv_backward_naive(dout, cache):\n \"\"\"\n A naive implementation of the backward pass for a convolutional layer.\n\n Inputs:\n - dout: Upstream derivatives.\n - cache: A tuple of (x, w, b, conv_param) as in conv_forward_naive\n\n Returns a tuple of:\n - dx: Gradient with respect to x\n - dw: Gradient with respect to w\n - db: Gradient with respect to b\n \"\"\"\n dx, dw, db = None, None, None\n #############################################################################\n # TODO: Implement the convolutional backward pass. #\n #############################################################################\n x, w, b, conv_param = cache\n pad = conv_param['pad']\n stride = conv_param['stride']\n N, C, H, W = x.shape\n F, C, HH, WW = w.shape\n H_ = 1 + (H + 2 * pad - HH) / stride\n W_ = 1 + (W + 2 * pad - WW) / stride\n x_pad = np.pad(x, ((0,), (0,), (pad,), (pad,)), 'constant')\n dx_pad = np.zeros_like(x_pad)\n dw = np.zeros_like(w)\n db = np.zeros_like(b)\n for i in range(N):\n for f in range(F):\n for h_ in range(H_):\n for w_ in range(W_):\n v = dout[i, f, h_, w_]\n db[f] += v\n dx_pad[i, :, h_*stride:h_*stride+HH, w_*stride:w_*stride+WW] += v * w[f]\n dw[f] += v * x_pad[i, :, h_*stride:h_*stride+HH, w_*stride:w_*stride+WW]\n dx = dx_pad[:,:,pad:H+pad, pad:W+pad]\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return dx, dw, db\n\n\ndef max_pool_forward_naive(x, pool_param):\n \"\"\"\n A naive implementation of the forward pass for a max pooling layer.\n\n Inputs:\n - x: Input data, of shape (N, C, H, W)\n - pool_param: dictionary with the following keys:\n - 'pool_height': The height of each pooling region\n - 'pool_width': The width of each pooling region\n - 'stride': The distance between adjacent pooling regions\n\n Returns a tuple of:\n - out: Output data\n - cache: (x, pool_param)\n \"\"\"\n out = None\n #############################################################################\n # TODO: Implement the max pooling forward pass #\n #############################################################################\n N, C, H, W = x.shape\n H_, W_, stride = pool_param['pool_height'], pool_param['pool_width'], pool_param['stride']\n HH, WW = (H - H_) / stride + 1, (W - W_) / stride + 1\n out = np.zeros((N, C, HH, WW))\n for i in range(N):\n for c in range(C):\n for h_ in range(HH):\n for w_ in range(WW):\n out[i, c, h_, w_] = np.max(x[i, c, h_*stride:h_*stride+H_, w_*stride:w_*stride+W_])\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n cache = (x, pool_param)\n return out, cache\n\n\ndef max_pool_backward_naive(dout, cache):\n \"\"\"\n A naive implementation of the backward pass for a max pooling layer.\n\n Inputs:\n - dout: Upstream derivatives\n - cache: A tuple of (x, pool_param) as in the forward pass.\n\n Returns:\n - dx: Gradient with respect to x\n \"\"\"\n dx = None\n #############################################################################\n # TODO: Implement the max pooling backward pass #\n #############################################################################\n x, pool_param = cache\n N, C, H, W = x.shape\n H_, W_, stride = pool_param['pool_height'], pool_param['pool_width'], pool_param['stride']\n HH, WW = (H - H_) / stride + 1, (W - W_) / stride + 1\n dx = np.zeros_like(x)\n for i in range(N):\n for c in range(C):\n for h_ in range(HH):\n for w_ in range(WW):\n v = dout[i, c, h_, w_]\n max = np.max(x[i, c, h_ * stride:h_ * stride + H_, w_ * stride:w_ * stride + W_])\n dx[i, c, h_ * stride:h_ * stride + H_, w_ * stride:w_ * stride + W_] += v * (x[i, c, h_ * stride:h_ * stride + H_, w_ * stride:w_ * stride + W_] == max)\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n return dx\n\n\ndef spatial_batchnorm_forward(x, gamma, beta, bn_param):\n \"\"\"\n Computes the forward pass for spatial batch normalization.\n \n Inputs:\n - x: Input data of shape (N, C, H, W)\n - gamma: Scale parameter, of shape (C,)\n - beta: Shift parameter, of shape (C,)\n - bn_param: Dictionary with the following keys:\n - mode: 'train' or 'test'; required\n - eps: Constant for numeric stability\n - momentum: Constant for running mean / variance. momentum=0 means that\n old information is discarded completely at every time step, while\n momentum=1 means that new information is never incorporated. The\n default of momentum=0.9 should work well in most situations.\n - running_mean: Array of shape (D,) giving running mean of features\n - running_var Array of shape (D,) giving running variance of features\n \n Returns a tuple of:\n - out: Output data, of shape (N, C, H, W)\n - cache: Values needed for the backward pass\n \"\"\"\n out, cache = None, None\n\n #############################################################################\n # TODO: Implement the forward pass for spatial batch normalization. #\n # #\n # HINT: You can implement spatial batch normalization using the vanilla #\n # version of batch normalization defined above. Your implementation should #\n # be very short; ours is less than five lines. #\n #############################################################################\n N, C, H, W = x.shape\n X = np.transpose(x, (0, 2, 3, 1)).reshape((-1, C)) # np.zeros((N * H * W, C))\n # for n in range(N):\n # for h in range(H):\n # for w in range(W):\n # X[n * H * W + h * W + w] = x[n, :, h, w]\n OUT, cache = batchnorm_forward(X, gamma, beta, bn_param)\n out = np.transpose(OUT.reshape((N, H, W, C)), (0, 3, 1, 2)) # np.zeros((N, C, H, W))\n # for n in range(N):\n # for h in range(H):\n # for w in range(W):\n # out[n, :, h, w] = OUT[n * H * W + h * W + w]\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return out, cache\n\n\ndef spatial_batchnorm_backward(dout, cache):\n \"\"\"\n Computes the backward pass for spatial batch normalization.\n \n Inputs:\n - dout: Upstream derivatives, of shape (N, C, H, W)\n - cache: Values from the forward pass\n \n Returns a tuple of:\n - dx: Gradient with respect to inputs, of shape (N, C, H, W)\n - dgamma: Gradient with respect to scale parameter, of shape (C,)\n - dbeta: Gradient with respect to shift parameter, of shape (C,)\n \"\"\"\n dx, dgamma, dbeta = None, None, None\n\n #############################################################################\n # TODO: Implement the backward pass for spatial batch normalization. #\n # #\n # HINT: You can implement spatial batch normalization using the vanilla #\n # version of batch normalization defined above. Your implementation should #\n # be very short; ours is less than five lines. #\n #############################################################################\n N, C, H, W = dout.shape\n dOUT = np.transpose(dout, (0, 2, 3, 1)).reshape((-1, C)) # np.zeros((N * H * W, C))\n # for n in range(N):\n # for h in range(H):\n # for w in range(W):\n # dOUT[n * H * W + h * W + w] = dout[n, :, h, w]\n dX, dgamma, dbeta = batchnorm_backward(dOUT, cache)\n dx = np.transpose(dX.reshape((N, H, W, C)), (0, 3, 1, 2)) # np.zeros((N, C, H, W))\n # for n in range(N):\n # for h in range(H):\n # for w in range(W):\n # dx[n, :, h, w] = dX[n * H * W + h * W + w]\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return dx, dgamma, dbeta\n \n\ndef svm_loss(x, y):\n \"\"\"\n Computes the loss and gradient using for multiclass SVM classification.\n\n Inputs:\n - x: Input data, of shape (N, C) where x[i, j] is the score for the jth class\n for the ith input.\n - y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and\n 0 <= y[i] < C\n\n Returns a tuple of:\n - loss: Scalar giving the loss\n - dx: Gradient of the loss with respect to x\n \"\"\"\n N = x.shape[0]\n correct_class_scores = x[np.arange(N), y]\n margins = np.maximum(0, x - correct_class_scores[:, np.newaxis] + 1.0)\n margins[np.arange(N), y] = 0\n loss = np.sum(margins) / N\n num_pos = np.sum(margins > 0, axis=1)\n dx = np.zeros_like(x)\n dx[margins > 0] = 1\n dx[np.arange(N), y] -= num_pos\n dx /= N\n return loss, dx\n\n\ndef softmax_loss(x, y):\n \"\"\"\n Computes the loss and gradient for softmax classification.\n\n Inputs:\n - x: Input data, of shape (N, C) where x[i, j] is the score for the jth class\n for the ith input.\n - y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and\n 0 <= y[i] < C\n\n Returns a tuple of:\n - loss: Scalar giving the loss\n - dx: Gradient of the loss with respect to x\n \"\"\"\n probs = np.exp(x - np.max(x, axis=1, keepdims=True))\n probs /= np.sum(probs, axis=1, keepdims=True)\n N = x.shape[0]\n loss = -np.sum(np.log(probs[np.arange(N), y])) / N\n dx = probs.copy()\n dx[np.arange(N), y] -= 1\n dx /= N\n return loss, dx\n"
] | [
[
"numpy.maximum",
"numpy.pad",
"numpy.random.seed",
"numpy.sqrt",
"numpy.arange",
"numpy.max",
"numpy.zeros_like",
"numpy.random.rand",
"numpy.mean",
"numpy.transpose",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
markvrma/PlagiarismChecker | [
"e656829de05186baf3fd413643f98033ce4cd4d7"
] | [
"backend/OCR/word.py"
] | [
"import cv2\nimport numpy as np\nfrom OCR.utils import resize, ratio\n\n\ndef bb_to_img(img, lines):\n result = []\n for line in lines:\n for word in line:\n result.append(img[word[1] : word[3], word[0] : word[2]])\n return result\n\n\ndef wordDetection(image, join=False):\n \"\"\"Detecting the words bounding boxes.\n Return: numpy array of bounding boxes [x, y, x+w, y+h]\n \"\"\"\n # Preprocess image for word detection\n blurred = cv2.GaussianBlur(image, (5, 5), 18)\n edge_img = edgeDetection(blurred)\n ret, edge_img = cv2.threshold(edge_img, 50, 255, cv2.THRESH_BINARY)\n preprocessed_image = cv2.morphologyEx(\n edge_img, cv2.MORPH_CLOSE, np.ones((15, 15), np.uint8)\n )\n\n return textDetection(preprocessed_image, image, join)\n\n\ndef sort_words(boxes):\n \"\"\"Sort boxes - (x, y, x+w, y+h) from left to right, top to bottom.\"\"\"\n mean_height = sum([y2 - y1 for _, y1, _, y2 in boxes]) / len(boxes)\n boxes.view(\"i8,i8,i8,i8\").sort(order=[\"f1\"], axis=0)\n current_line = boxes[0][1]\n lines = []\n tmp_line = []\n for box in boxes:\n if box[1] > current_line + mean_height:\n lines.append(tmp_line)\n tmp_line = [box]\n current_line = box[1]\n continue\n tmp_line.append(box)\n current_line = (current_line * (len(tmp_line) - 1) + box[1]) / len(tmp_line)\n lines.append(tmp_line)\n\n for line in lines:\n line.sort(key=lambda box: box[0])\n\n return lines\n\n\ndef edgeDetection(im):\n \"\"\"\n Edge detection using sobel operator on each layer individually.\n Sobel operator is applied for each image layer (RGB)\n \"\"\"\n return np.max(\n np.array(\n [\n sobelFilter(im[:, :, 0]),\n sobelFilter(im[:, :, 1]),\n sobelFilter(im[:, :, 2]),\n ]\n ),\n axis=0,\n )\n\n\ndef sobelFilter(channel):\n \"\"\"Sobel operator.\"\"\"\n sobelX = cv2.Sobel(channel, cv2.CV_16S, 1, 0)\n sobelY = cv2.Sobel(channel, cv2.CV_16S, 0, 1)\n sobel = np.hypot(sobelX, sobelY)\n sobel[sobel > 255] = 255\n return np.uint8(sobel)\n\n\ndef textDetection(img, image, join=True):\n \"\"\"Text detection using contours.\"\"\"\n small = resize(img, 2000)\n\n # Finding contours\n mask = np.zeros(small.shape, np.uint8)\n cnt, hierarchy = cv2.findContours(\n np.copy(small), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE\n )\n\n index = 0\n boxes = []\n if cnt:\n # Go through all contours in top level\n while index >= 0:\n x, y, w, h = cv2.boundingRect(cnt[index])\n cv2.drawContours(mask, cnt, index, (255, 255, 255), cv2.FILLED)\n maskROI = mask[y : y + h, x : x + w]\n # Ratio of white pixels to area of bounding rectangle\n r = cv2.countNonZero(maskROI) / (w * h)\n\n # Limits for text\n if (\n r > 0.1\n and 1600 > w > 10\n and 1600 > h > 10\n and h / w < 3\n and w / h < 10\n and (60 // h) * w < 1000\n ):\n boxes += [[x, y, w, h]]\n\n index = hierarchy[0][index][0]\n\n if join:\n # Need more work\n boxes = group_rectangles(boxes)\n\n # image for drawing bounding boxes\n small = cv2.cvtColor(small, cv2.COLOR_GRAY2RGB)\n bounding_boxes = np.array([0, 0, 0, 0])\n for (x, y, w, h) in boxes:\n cv2.rectangle(small, (x, y), (x + w, y + h), (0, 255, 0), 2)\n bounding_boxes = np.vstack((bounding_boxes, np.array([x, y, x + w, y + h])))\n\n boxes = bounding_boxes.dot(ratio(image, small.shape[0])).astype(np.int64)\n return boxes[1:]\n else:\n return boxes\n\n\ndef union(a, b):\n x = min(a[0], b[0])\n y = min(a[1], b[1])\n w = max(a[0] + a[2], b[0] + b[2]) - x\n h = max(a[1] + a[3], b[1] + b[3]) - y\n return [x, y, w, h]\n\n\ndef _intersect(a, b, thresh=50):\n x = max(a[0] - thresh, b[0] - thresh)\n y = max(a[1] - thresh, b[1] - thresh)\n w = min(a[0] + a[2], b[0] + b[2]) - x\n h = min(a[1] + a[3], b[1] + b[3]) - y\n if w < 0 or h < 0:\n return False\n return True\n\n\ndef group_rectangles(rec):\n \"\"\"\n Uion intersecting rectangles.\n Args:\n rec - list of rectangles in form [x, y, w, h]\n Return:\n list of grouped rectangles\n \"\"\"\n tested = [False for i in range(len(rec))]\n final = []\n i = 0\n while i < len(rec):\n if not tested[i]:\n j = i + 1\n while j < len(rec):\n if not tested[j] and _intersect(rec[i], rec[j]):\n rec[i] = union(rec[i], rec[j])\n tested[j] = True\n j = i\n j += 1\n final += [rec[i]]\n i += 1\n\n return final\n"
] | [
[
"numpy.uint8",
"numpy.ones",
"numpy.copy",
"numpy.array",
"numpy.zeros",
"numpy.hypot"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Lozovskii-Aleksandr/modin | [
"ed2a7a4adf973660eb15b9ea674cd5fb802f2424"
] | [
"modin/pandas/test/dataframe/test_indexing.py"
] | [
"# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nimport pytest\nimport numpy as np\nimport pandas\nfrom pandas.testing import assert_index_equal\nimport matplotlib\nimport modin.pandas as pd\nimport sys\n\nfrom modin.pandas.test.utils import (\n NROWS,\n RAND_LOW,\n RAND_HIGH,\n df_equals,\n arg_keys,\n name_contains,\n test_data,\n test_data_values,\n test_data_keys,\n axis_keys,\n axis_values,\n int_arg_keys,\n int_arg_values,\n create_test_dfs,\n eval_general,\n generate_multiindex,\n extra_test_parameters,\n default_to_pandas_ignore_string,\n)\nfrom modin.config import NPartitions\nfrom modin.utils import get_current_execution\nfrom modin.test.test_utils import warns_that_defaulting_to_pandas\n\nNPartitions.put(4)\n\n# Force matplotlib to not use any Xwindows backend.\nmatplotlib.use(\"Agg\")\n\n# Our configuration in pytest.ini requires that we explicitly catch all\n# instances of defaulting to pandas, but some test modules, like this one,\n# have too many such instances.\n# TODO(https://github.com/modin-project/modin/issues/3655): catch all instances\n# of defaulting to pandas.\npytestmark = pytest.mark.filterwarnings(default_to_pandas_ignore_string)\n\n\ndef eval_setitem(md_df, pd_df, value, col=None, loc=None):\n if loc is not None:\n col = pd_df.columns[loc]\n\n value_getter = value if callable(value) else (lambda *args, **kwargs: value)\n\n eval_general(\n md_df, pd_df, lambda df: df.__setitem__(col, value_getter(df)), __inplace__=True\n )\n\n\[email protected](\n \"dates\",\n [\n [\"2018-02-27 09:03:30\", \"2018-02-27 09:04:30\"],\n [\"2018-02-27 09:03:00\", \"2018-02-27 09:05:00\"],\n ],\n)\[email protected](\"subset\", [\"a\", \"b\", [\"a\", \"b\"], None])\ndef test_asof_with_nan(dates, subset):\n data = {\"a\": [10, 20, 30, 40, 50], \"b\": [None, None, None, None, 500]}\n index = pd.DatetimeIndex(\n [\n \"2018-02-27 09:01:00\",\n \"2018-02-27 09:02:00\",\n \"2018-02-27 09:03:00\",\n \"2018-02-27 09:04:00\",\n \"2018-02-27 09:05:00\",\n ]\n )\n modin_where = pd.DatetimeIndex(dates)\n pandas_where = pandas.DatetimeIndex(dates)\n compare_asof(data, index, modin_where, pandas_where, subset)\n\n\[email protected](\n \"dates\",\n [\n [\"2018-02-27 09:03:30\", \"2018-02-27 09:04:30\"],\n [\"2018-02-27 09:03:00\", \"2018-02-27 09:05:00\"],\n ],\n)\[email protected](\"subset\", [\"a\", \"b\", [\"a\", \"b\"], None])\ndef test_asof_without_nan(dates, subset):\n data = {\"a\": [10, 20, 30, 40, 50], \"b\": [70, 600, 30, -200, 500]}\n index = pd.DatetimeIndex(\n [\n \"2018-02-27 09:01:00\",\n \"2018-02-27 09:02:00\",\n \"2018-02-27 09:03:00\",\n \"2018-02-27 09:04:00\",\n \"2018-02-27 09:05:00\",\n ]\n )\n modin_where = pd.DatetimeIndex(dates)\n pandas_where = pandas.DatetimeIndex(dates)\n compare_asof(data, index, modin_where, pandas_where, subset)\n\n\[email protected](\n \"lookup\",\n [[60, 70, 90], [60.5, 70.5, 100]],\n)\[email protected](\"subset\", [\"col2\", \"col1\", [\"col1\", \"col2\"], None])\ndef test_asof_large(lookup, subset):\n data = test_data[\"float_nan_data\"]\n index = list(range(NROWS))\n modin_where = pd.Index(lookup)\n pandas_where = pandas.Index(lookup)\n compare_asof(data, index, modin_where, pandas_where, subset)\n\n\ndef compare_asof(\n data, index, modin_where: pd.Index, pandas_where: pandas.Index, subset\n):\n modin_df = pd.DataFrame(data, index=index)\n pandas_df = pandas.DataFrame(data, index=index)\n df_equals(\n modin_df.asof(modin_where, subset=subset),\n pandas_df.asof(pandas_where, subset=subset),\n )\n df_equals(\n modin_df.asof(modin_where.values, subset=subset),\n pandas_df.asof(pandas_where.values, subset=subset),\n )\n df_equals(\n modin_df.asof(list(modin_where.values), subset=subset),\n pandas_df.asof(list(pandas_where.values), subset=subset),\n )\n df_equals(\n modin_df.asof(modin_where.values[0], subset=subset),\n pandas_df.asof(pandas_where.values[0], subset=subset),\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_first_valid_index(data):\n modin_df = pd.DataFrame(data)\n pandas_df = pandas.DataFrame(data)\n\n assert modin_df.first_valid_index() == (pandas_df.first_valid_index())\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\"n\", int_arg_values, ids=arg_keys(\"n\", int_arg_keys))\ndef test_head(data, n):\n # Test normal dataframe head\n modin_df = pd.DataFrame(data)\n pandas_df = pandas.DataFrame(data)\n df_equals(modin_df.head(n), pandas_df.head(n))\n df_equals(modin_df.head(len(modin_df) + 1), pandas_df.head(len(pandas_df) + 1))\n\n # Test head when we call it from a QueryCompilerView\n modin_result = modin_df.loc[:, [\"col1\", \"col3\", \"col3\"]].head(n)\n pandas_result = pandas_df.loc[:, [\"col1\", \"col3\", \"col3\"]].head(n)\n df_equals(modin_result, pandas_result)\n\n\[email protected](reason=\"Defaulting to Pandas\")\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_iat(data):\n modin_df = pd.DataFrame(data)\n pandas_df = pandas.DataFrame(data) # noqa F841\n\n with pytest.raises(NotImplementedError):\n modin_df.iat()\n\n\[email protected]\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_iloc(request, data):\n modin_df = pd.DataFrame(data)\n pandas_df = pandas.DataFrame(data)\n\n if not name_contains(request.node.name, [\"empty_data\"]):\n # Scalar\n np.testing.assert_equal(modin_df.iloc[0, 1], pandas_df.iloc[0, 1])\n\n # Series\n df_equals(modin_df.iloc[0], pandas_df.iloc[0])\n df_equals(modin_df.iloc[1:, 0], pandas_df.iloc[1:, 0])\n df_equals(modin_df.iloc[1:2, 0], pandas_df.iloc[1:2, 0])\n\n # DataFrame\n df_equals(modin_df.iloc[[1, 2]], pandas_df.iloc[[1, 2]])\n # See issue #80\n # df_equals(modin_df.iloc[[1, 2], [1, 0]], pandas_df.iloc[[1, 2], [1, 0]])\n df_equals(modin_df.iloc[1:2, 0:2], pandas_df.iloc[1:2, 0:2])\n\n # Issue #43\n modin_df.iloc[0:3, :]\n\n # Write Item\n modin_df.iloc[[1, 2]] = 42\n pandas_df.iloc[[1, 2]] = 42\n df_equals(modin_df, pandas_df)\n\n modin_df = pd.DataFrame(data)\n pandas_df = pandas.DataFrame(data)\n modin_df.iloc[0] = modin_df.iloc[1]\n pandas_df.iloc[0] = pandas_df.iloc[1]\n df_equals(modin_df, pandas_df)\n\n modin_df = pd.DataFrame(data)\n pandas_df = pandas.DataFrame(data)\n modin_df.iloc[:, 0] = modin_df.iloc[:, 1]\n pandas_df.iloc[:, 0] = pandas_df.iloc[:, 1]\n df_equals(modin_df, pandas_df)\n\n # From issue #1775\n df_equals(\n modin_df.iloc[lambda df: df.index.get_indexer_for(df.index[:5])],\n pandas_df.iloc[lambda df: df.index.get_indexer_for(df.index[:5])],\n )\n\n # Read values, selecting rows with callable and a column with a scalar.\n df_equals(\n pandas_df.iloc[lambda df: df.index.get_indexer_for(df.index[:5]), 0],\n modin_df.iloc[lambda df: df.index.get_indexer_for(df.index[:5]), 0],\n )\n else:\n with pytest.raises(IndexError):\n modin_df.iloc[0, 1]\n\n\[email protected]\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_index(data):\n modin_df = pd.DataFrame(data)\n pandas_df = pandas.DataFrame(data)\n\n df_equals(modin_df.index, pandas_df.index)\n modin_df_cp = modin_df.copy()\n pandas_df_cp = pandas_df.copy()\n\n modin_df_cp.index = [str(i) for i in modin_df_cp.index]\n pandas_df_cp.index = [str(i) for i in pandas_df_cp.index]\n df_equals(modin_df_cp.index, pandas_df_cp.index)\n\n\[email protected]\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_indexing_duplicate_axis(data):\n modin_df = pd.DataFrame(data)\n pandas_df = pandas.DataFrame(data)\n modin_df.index = pandas_df.index = [i // 3 for i in range(len(modin_df))]\n assert any(modin_df.index.duplicated())\n assert any(pandas_df.index.duplicated())\n\n df_equals(modin_df.iloc[0], pandas_df.iloc[0])\n df_equals(modin_df.loc[0], pandas_df.loc[0])\n df_equals(modin_df.iloc[0, 0:4], pandas_df.iloc[0, 0:4])\n df_equals(\n modin_df.loc[0, modin_df.columns[0:4]],\n pandas_df.loc[0, pandas_df.columns[0:4]],\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_set_index(data):\n modin_df = pd.DataFrame(data)\n pandas_df = pandas.DataFrame(data)\n\n modin_result = modin_df.set_index([modin_df.index, modin_df.columns[0]])\n pandas_result = pandas_df.set_index([pandas_df.index, pandas_df.columns[0]])\n df_equals(modin_result, pandas_result)\n\n\[email protected]\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_keys(data):\n modin_df = pd.DataFrame(data)\n pandas_df = pandas.DataFrame(data)\n\n df_equals(modin_df.keys(), pandas_df.keys())\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_loc(data):\n modin_df = pd.DataFrame(data)\n pandas_df = pandas.DataFrame(data)\n\n key1 = modin_df.columns[0]\n key2 = modin_df.columns[1]\n # Scalar\n df_equals(modin_df.loc[0, key1], pandas_df.loc[0, key1])\n\n # Series\n df_equals(modin_df.loc[0], pandas_df.loc[0])\n df_equals(modin_df.loc[1:, key1], pandas_df.loc[1:, key1])\n df_equals(modin_df.loc[1:2, key1], pandas_df.loc[1:2, key1])\n df_equals(modin_df.loc[:, key1], pandas_df.loc[:, key1])\n\n # DataFrame\n df_equals(modin_df.loc[[1, 2]], pandas_df.loc[[1, 2]])\n\n # List-like of booleans\n indices = [i % 3 == 0 for i in range(len(modin_df.index))]\n columns = [i % 5 == 0 for i in range(len(modin_df.columns))]\n modin_result = modin_df.loc[indices, columns]\n pandas_result = pandas_df.loc[indices, columns]\n df_equals(modin_result, pandas_result)\n\n modin_result = modin_df.loc[:, columns]\n pandas_result = pandas_df.loc[:, columns]\n df_equals(modin_result, pandas_result)\n\n modin_result = modin_df.loc[indices]\n pandas_result = pandas_df.loc[indices]\n df_equals(modin_result, pandas_result)\n\n # See issue #80\n # df_equals(modin_df.loc[[1, 2], ['col1']], pandas_df.loc[[1, 2], ['col1']])\n df_equals(modin_df.loc[1:2, key1:key2], pandas_df.loc[1:2, key1:key2])\n\n # From issue #421\n df_equals(modin_df.loc[:, [key2, key1]], pandas_df.loc[:, [key2, key1]])\n df_equals(modin_df.loc[[2, 1], :], pandas_df.loc[[2, 1], :])\n\n # From issue #1023\n key1 = modin_df.columns[0]\n key2 = modin_df.columns[-2]\n df_equals(modin_df.loc[:, key1:key2], pandas_df.loc[:, key1:key2])\n\n # Write Item\n modin_df_copy = modin_df.copy()\n pandas_df_copy = pandas_df.copy()\n modin_df_copy.loc[[1, 2]] = 42\n pandas_df_copy.loc[[1, 2]] = 42\n df_equals(modin_df_copy, pandas_df_copy)\n\n # Write an item, selecting rows with a callable.\n modin_df_copy2 = modin_df.copy()\n pandas_df_copy2 = pandas_df.copy()\n modin_df_copy2.loc[lambda df: df[key1].isin(list(range(1000)))] = 42\n pandas_df_copy2.loc[lambda df: df[key1].isin(list(range(1000)))] = 42\n df_equals(modin_df_copy2, pandas_df_copy2)\n\n # Write an item, selecting rows with a callable and a column with a scalar.\n modin_df_copy3 = modin_df.copy()\n pandas_df_copy3 = pandas_df.copy()\n modin_df_copy3.loc[lambda df: df[key1].isin(list(range(1000))), key1] = 42\n pandas_df_copy3.loc[lambda df: df[key1].isin(list(range(1000))), key1] = 42\n df_equals(modin_df_copy3, pandas_df_copy3)\n\n # Disabled for `BaseOnPython` because of the issue with `getitem_array`:\n # https://github.com/modin-project/modin/issues/3701\n if get_current_execution() != \"BaseOnPython\":\n # From issue #1775\n df_equals(\n modin_df.loc[lambda df: df.iloc[:, 0].isin(list(range(1000)))],\n pandas_df.loc[lambda df: df.iloc[:, 0].isin(list(range(1000)))],\n )\n\n # Read values, selecting rows with a callable and a column with a scalar.\n df_equals(\n pandas_df.loc[lambda df: df[key1].isin(list(range(1000))), key1],\n modin_df.loc[lambda df: df[key1].isin(list(range(1000))), key1],\n )\n\n # From issue #1374\n with pytest.raises(KeyError):\n modin_df.loc[\"NO_EXIST\"]\n\n\n# This tests the bug from https://github.com/modin-project/modin/issues/3736\ndef test_loc_setting_single_categorical_column():\n modin_df = pd.DataFrame({\"status\": [\"a\", \"b\", \"c\"]}, dtype=\"category\")\n pandas_df = pandas.DataFrame({\"status\": [\"a\", \"b\", \"c\"]}, dtype=\"category\")\n modin_df.loc[1:3, \"status\"] = \"a\"\n pandas_df.loc[1:3, \"status\"] = \"a\"\n df_equals(modin_df, pandas_df)\n\n\ndef test_loc_multi_index():\n modin_df = pd.read_csv(\n \"modin/pandas/test/data/blah.csv\", header=[0, 1, 2, 3], index_col=0\n )\n pandas_df = pandas.read_csv(\n \"modin/pandas/test/data/blah.csv\", header=[0, 1, 2, 3], index_col=0\n )\n\n df_equals(modin_df.loc[1], pandas_df.loc[1])\n df_equals(modin_df.loc[1, \"Presidents\"], pandas_df.loc[1, \"Presidents\"])\n df_equals(\n modin_df.loc[1, (\"Presidents\", \"Pure mentions\")],\n pandas_df.loc[1, (\"Presidents\", \"Pure mentions\")],\n )\n assert (\n modin_df.loc[1, (\"Presidents\", \"Pure mentions\", \"IND\", \"all\")]\n == pandas_df.loc[1, (\"Presidents\", \"Pure mentions\", \"IND\", \"all\")]\n )\n df_equals(modin_df.loc[(1, 2), \"Presidents\"], pandas_df.loc[(1, 2), \"Presidents\"])\n\n tuples = [\n (\"bar\", \"one\"),\n (\"bar\", \"two\"),\n (\"bar\", \"three\"),\n (\"bar\", \"four\"),\n (\"baz\", \"one\"),\n (\"baz\", \"two\"),\n (\"baz\", \"three\"),\n (\"baz\", \"four\"),\n (\"foo\", \"one\"),\n (\"foo\", \"two\"),\n (\"foo\", \"three\"),\n (\"foo\", \"four\"),\n (\"qux\", \"one\"),\n (\"qux\", \"two\"),\n (\"qux\", \"three\"),\n (\"qux\", \"four\"),\n ]\n\n modin_index = pd.MultiIndex.from_tuples(tuples, names=[\"first\", \"second\"])\n pandas_index = pandas.MultiIndex.from_tuples(tuples, names=[\"first\", \"second\"])\n frame_data = np.random.randint(0, 100, size=(16, 100))\n modin_df = pd.DataFrame(\n frame_data,\n index=modin_index,\n columns=[\"col{}\".format(i) for i in range(100)],\n )\n pandas_df = pandas.DataFrame(\n frame_data,\n index=pandas_index,\n columns=[\"col{}\".format(i) for i in range(100)],\n )\n df_equals(modin_df.loc[\"bar\", \"col1\"], pandas_df.loc[\"bar\", \"col1\"])\n assert modin_df.loc[(\"bar\", \"one\"), \"col1\"] == pandas_df.loc[(\"bar\", \"one\"), \"col1\"]\n df_equals(\n modin_df.loc[\"bar\", (\"col1\", \"col2\")],\n pandas_df.loc[\"bar\", (\"col1\", \"col2\")],\n )\n\n # From issue #1456\n transposed_modin = modin_df.T\n transposed_pandas = pandas_df.T\n df_equals(\n transposed_modin.loc[transposed_modin.index[:-2], :],\n transposed_pandas.loc[transposed_pandas.index[:-2], :],\n )\n\n # From issue #1610\n df_equals(modin_df.loc[modin_df.index], pandas_df.loc[pandas_df.index])\n df_equals(modin_df.loc[modin_df.index[:7]], pandas_df.loc[pandas_df.index[:7]])\n\n\ndef test_loc_empty():\n pandas_df = pandas.DataFrame(index=range(5))\n modin_df = pd.DataFrame(index=range(5))\n\n df_equals(pandas_df.loc[1], modin_df.loc[1])\n pandas_df.loc[1] = 3\n modin_df.loc[1] = 3\n df_equals(pandas_df, modin_df)\n\n\[email protected](\"index\", [[\"row1\", \"row2\", \"row3\"]])\[email protected](\"columns\", [[\"col1\", \"col2\"]])\ndef test_loc_assignment(index, columns):\n md_df, pd_df = create_test_dfs(index=index, columns=columns)\n for i, ind in enumerate(index):\n for j, col in enumerate(columns):\n value_to_assign = int(str(i) + str(j))\n md_df.loc[ind][col] = value_to_assign\n pd_df.loc[ind][col] = value_to_assign\n df_equals(md_df, pd_df)\n\n\[email protected]\ndef loc_iter_dfs():\n columns = [\"col1\", \"col2\", \"col3\"]\n index = [\"row1\", \"row2\", \"row3\"]\n return create_test_dfs(\n {col: ([idx] * len(index)) for idx, col in enumerate(columns)},\n columns=columns,\n index=index,\n )\n\n\[email protected](\"reverse_order\", [False, True])\[email protected](\"axis\", [0, 1])\ndef test_loc_iter_assignment(loc_iter_dfs, reverse_order, axis):\n if reverse_order and axis:\n pytest.xfail(\n \"Due to internal sorting of lookup values assignment order is lost, see GH-#2552\"\n )\n\n md_df, pd_df = loc_iter_dfs\n\n select = [slice(None), slice(None)]\n select[axis] = sorted(pd_df.axes[axis][:-1], reverse=reverse_order)\n select = tuple(select)\n\n pd_df.loc[select] = pd_df.loc[select] + pd_df.loc[select]\n md_df.loc[select] = md_df.loc[select] + md_df.loc[select]\n df_equals(md_df, pd_df)\n\n\[email protected](\"reverse_order\", [False, True])\[email protected](\"axis\", [0, 1])\ndef test_loc_order(loc_iter_dfs, reverse_order, axis):\n md_df, pd_df = loc_iter_dfs\n\n select = [slice(None), slice(None)]\n select[axis] = sorted(pd_df.axes[axis][:-1], reverse=reverse_order)\n select = tuple(select)\n\n df_equals(pd_df.loc[select], md_df.loc[select])\n\n\[email protected]\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_loc_nested_assignment(data):\n modin_df = pd.DataFrame(data)\n pandas_df = pandas.DataFrame(data)\n key1 = modin_df.columns[0]\n key2 = modin_df.columns[1]\n\n modin_df[key1].loc[0] = 500\n pandas_df[key1].loc[0] = 500\n df_equals(modin_df, pandas_df)\n\n modin_df[key2].loc[0] = None\n pandas_df[key2].loc[0] = None\n df_equals(modin_df, pandas_df)\n\n\ndef test_iloc_assignment():\n modin_df = pd.DataFrame(index=[\"row1\", \"row2\", \"row3\"], columns=[\"col1\", \"col2\"])\n pandas_df = pandas.DataFrame(\n index=[\"row1\", \"row2\", \"row3\"], columns=[\"col1\", \"col2\"]\n )\n modin_df.iloc[0][\"col1\"] = 11\n modin_df.iloc[1][\"col1\"] = 21\n modin_df.iloc[2][\"col1\"] = 31\n modin_df.iloc[lambda df: 0][\"col2\"] = 12\n modin_df.iloc[1][lambda df: [\"col2\"]] = 22\n modin_df.iloc[lambda df: 2][lambda df: [\"col2\"]] = 32\n pandas_df.iloc[0][\"col1\"] = 11\n pandas_df.iloc[1][\"col1\"] = 21\n pandas_df.iloc[2][\"col1\"] = 31\n pandas_df.iloc[lambda df: 0][\"col2\"] = 12\n pandas_df.iloc[1][lambda df: [\"col2\"]] = 22\n pandas_df.iloc[lambda df: 2][lambda df: [\"col2\"]] = 32\n\n df_equals(modin_df, pandas_df)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_iloc_nested_assignment(data):\n modin_df = pd.DataFrame(data)\n pandas_df = pandas.DataFrame(data)\n key1 = modin_df.columns[0]\n key2 = modin_df.columns[1]\n\n modin_df[key1].iloc[0] = 500\n pandas_df[key1].iloc[0] = 500\n df_equals(modin_df, pandas_df)\n\n modin_df[key2].iloc[0] = None\n pandas_df[key2].iloc[0] = None\n df_equals(modin_df, pandas_df)\n\n\ndef test_iloc_empty():\n pandas_df = pandas.DataFrame(index=range(5))\n modin_df = pd.DataFrame(index=range(5))\n\n df_equals(pandas_df.iloc[1], modin_df.iloc[1])\n pandas_df.iloc[1] = 3\n modin_df.iloc[1] = 3\n df_equals(pandas_df, modin_df)\n\n\ndef test_loc_series():\n md_df, pd_df = create_test_dfs({\"a\": [1, 2], \"b\": [3, 4]})\n\n pd_df.loc[pd_df[\"a\"] > 1, \"b\"] = np.log(pd_df[\"b\"])\n md_df.loc[md_df[\"a\"] > 1, \"b\"] = np.log(md_df[\"b\"])\n\n df_equals(pd_df, md_df)\n\n\[email protected](\"locator_name\", [\"loc\", \"iloc\"])\[email protected](\n \"slice_indexer\",\n [\n slice(None, None, -2),\n slice(1, 10, None),\n slice(None, 10, None),\n slice(10, None, None),\n slice(10, None, -2),\n slice(-10, None, -2),\n slice(None, 1_000_000_000, None),\n ],\n)\ndef test_loc_iloc_slice_indexer(locator_name, slice_indexer):\n md_df, pd_df = create_test_dfs(test_data_values[0])\n # Shifting the index, so labels won't match its position\n shifted_index = pandas.RangeIndex(1, len(md_df) + 1)\n md_df.index = shifted_index\n pd_df.index = shifted_index\n\n eval_general(md_df, pd_df, lambda df: getattr(df, locator_name)[slice_indexer])\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_pop(request, data):\n modin_df = pd.DataFrame(data)\n pandas_df = pandas.DataFrame(data)\n\n if \"empty_data\" not in request.node.name:\n key = modin_df.columns[0]\n temp_modin_df = modin_df.copy()\n temp_pandas_df = pandas_df.copy()\n modin_popped = temp_modin_df.pop(key)\n pandas_popped = temp_pandas_df.pop(key)\n df_equals(modin_popped, pandas_popped)\n df_equals(temp_modin_df, temp_pandas_df)\n\n\ndef test_reindex():\n frame_data = {\n \"col1\": [0, 1, 2, 3],\n \"col2\": [4, 5, 6, 7],\n \"col3\": [8, 9, 10, 11],\n \"col4\": [12, 13, 14, 15],\n \"col5\": [0, 0, 0, 0],\n }\n pandas_df = pandas.DataFrame(frame_data)\n modin_df = pd.DataFrame(frame_data)\n\n df_equals(modin_df.reindex([0, 3, 2, 1]), pandas_df.reindex([0, 3, 2, 1]))\n df_equals(modin_df.reindex([0, 6, 2]), pandas_df.reindex([0, 6, 2]))\n df_equals(\n modin_df.reindex([\"col1\", \"col3\", \"col4\", \"col2\"], axis=1),\n pandas_df.reindex([\"col1\", \"col3\", \"col4\", \"col2\"], axis=1),\n )\n df_equals(\n modin_df.reindex([\"col1\", \"col7\", \"col4\", \"col8\"], axis=1),\n pandas_df.reindex([\"col1\", \"col7\", \"col4\", \"col8\"], axis=1),\n )\n df_equals(\n modin_df.reindex(index=[0, 1, 5], columns=[\"col1\", \"col7\", \"col4\", \"col8\"]),\n pandas_df.reindex(index=[0, 1, 5], columns=[\"col1\", \"col7\", \"col4\", \"col8\"]),\n )\n df_equals(\n modin_df.T.reindex([\"col1\", \"col7\", \"col4\", \"col8\"], axis=0),\n pandas_df.T.reindex([\"col1\", \"col7\", \"col4\", \"col8\"], axis=0),\n )\n\n\ndef test_reindex_like():\n df1 = pd.DataFrame(\n [\n [24.3, 75.7, \"high\"],\n [31, 87.8, \"high\"],\n [22, 71.6, \"medium\"],\n [35, 95, \"medium\"],\n ],\n columns=[\"temp_celsius\", \"temp_fahrenheit\", \"windspeed\"],\n index=pd.date_range(start=\"2014-02-12\", end=\"2014-02-15\", freq=\"D\"),\n )\n df2 = pd.DataFrame(\n [[28, \"low\"], [30, \"low\"], [35.1, \"medium\"]],\n columns=[\"temp_celsius\", \"windspeed\"],\n index=pd.DatetimeIndex([\"2014-02-12\", \"2014-02-13\", \"2014-02-15\"]),\n )\n with warns_that_defaulting_to_pandas():\n df2.reindex_like(df1)\n\n\ndef test_rename_sanity():\n source_df = pandas.DataFrame(test_data[\"int_data\"])[\n [\"col1\", \"index\", \"col3\", \"col4\"]\n ]\n mapping = {\"col1\": \"a\", \"index\": \"b\", \"col3\": \"c\", \"col4\": \"d\"}\n\n modin_df = pd.DataFrame(source_df)\n df_equals(modin_df.rename(columns=mapping), source_df.rename(columns=mapping))\n\n renamed2 = source_df.rename(columns=str.lower)\n df_equals(modin_df.rename(columns=str.lower), renamed2)\n\n modin_df = pd.DataFrame(renamed2)\n df_equals(modin_df.rename(columns=str.upper), renamed2.rename(columns=str.upper))\n\n # index\n data = {\"A\": {\"foo\": 0, \"bar\": 1}}\n\n # gets sorted alphabetical\n df = pandas.DataFrame(data)\n modin_df = pd.DataFrame(data)\n assert_index_equal(\n modin_df.rename(index={\"foo\": \"bar\", \"bar\": \"foo\"}).index,\n df.rename(index={\"foo\": \"bar\", \"bar\": \"foo\"}).index,\n )\n\n assert_index_equal(\n modin_df.rename(index=str.upper).index, df.rename(index=str.upper).index\n )\n\n # Using the `mapper` functionality with `axis`\n assert_index_equal(\n modin_df.rename(str.upper, axis=0).index, df.rename(str.upper, axis=0).index\n )\n assert_index_equal(\n modin_df.rename(str.upper, axis=1).columns,\n df.rename(str.upper, axis=1).columns,\n )\n\n # have to pass something\n with pytest.raises(TypeError):\n modin_df.rename()\n\n # partial columns\n renamed = source_df.rename(columns={\"col3\": \"foo\", \"col4\": \"bar\"})\n modin_df = pd.DataFrame(source_df)\n assert_index_equal(\n modin_df.rename(columns={\"col3\": \"foo\", \"col4\": \"bar\"}).index,\n source_df.rename(columns={\"col3\": \"foo\", \"col4\": \"bar\"}).index,\n )\n\n # other axis\n renamed = source_df.T.rename(index={\"col3\": \"foo\", \"col4\": \"bar\"})\n assert_index_equal(\n source_df.T.rename(index={\"col3\": \"foo\", \"col4\": \"bar\"}).index,\n modin_df.T.rename(index={\"col3\": \"foo\", \"col4\": \"bar\"}).index,\n )\n\n # index with name\n index = pandas.Index([\"foo\", \"bar\"], name=\"name\")\n renamer = pandas.DataFrame(data, index=index)\n modin_df = pd.DataFrame(data, index=index)\n\n renamed = renamer.rename(index={\"foo\": \"bar\", \"bar\": \"foo\"})\n modin_renamed = modin_df.rename(index={\"foo\": \"bar\", \"bar\": \"foo\"})\n assert_index_equal(renamed.index, modin_renamed.index)\n\n assert renamed.index.name == modin_renamed.index.name\n\n\ndef test_rename_multiindex():\n tuples_index = [(\"foo1\", \"bar1\"), (\"foo2\", \"bar2\")]\n tuples_columns = [(\"fizz1\", \"buzz1\"), (\"fizz2\", \"buzz2\")]\n index = pandas.MultiIndex.from_tuples(tuples_index, names=[\"foo\", \"bar\"])\n columns = pandas.MultiIndex.from_tuples(tuples_columns, names=[\"fizz\", \"buzz\"])\n\n frame_data = [(0, 0), (1, 1)]\n df = pandas.DataFrame(frame_data, index=index, columns=columns)\n modin_df = pd.DataFrame(frame_data, index=index, columns=columns)\n\n #\n # without specifying level -> accross all levels\n renamed = df.rename(\n index={\"foo1\": \"foo3\", \"bar2\": \"bar3\"},\n columns={\"fizz1\": \"fizz3\", \"buzz2\": \"buzz3\"},\n )\n modin_renamed = modin_df.rename(\n index={\"foo1\": \"foo3\", \"bar2\": \"bar3\"},\n columns={\"fizz1\": \"fizz3\", \"buzz2\": \"buzz3\"},\n )\n assert_index_equal(renamed.index, modin_renamed.index)\n\n renamed = df.rename(\n index={\"foo1\": \"foo3\", \"bar2\": \"bar3\"},\n columns={\"fizz1\": \"fizz3\", \"buzz2\": \"buzz3\"},\n )\n assert_index_equal(renamed.columns, modin_renamed.columns)\n assert renamed.index.names == modin_renamed.index.names\n assert renamed.columns.names == modin_renamed.columns.names\n\n #\n # with specifying a level\n\n # dict\n renamed = df.rename(columns={\"fizz1\": \"fizz3\", \"buzz2\": \"buzz3\"}, level=0)\n modin_renamed = modin_df.rename(\n columns={\"fizz1\": \"fizz3\", \"buzz2\": \"buzz3\"}, level=0\n )\n assert_index_equal(renamed.columns, modin_renamed.columns)\n renamed = df.rename(columns={\"fizz1\": \"fizz3\", \"buzz2\": \"buzz3\"}, level=\"fizz\")\n modin_renamed = modin_df.rename(\n columns={\"fizz1\": \"fizz3\", \"buzz2\": \"buzz3\"}, level=\"fizz\"\n )\n assert_index_equal(renamed.columns, modin_renamed.columns)\n\n renamed = df.rename(columns={\"fizz1\": \"fizz3\", \"buzz2\": \"buzz3\"}, level=1)\n modin_renamed = modin_df.rename(\n columns={\"fizz1\": \"fizz3\", \"buzz2\": \"buzz3\"}, level=1\n )\n assert_index_equal(renamed.columns, modin_renamed.columns)\n renamed = df.rename(columns={\"fizz1\": \"fizz3\", \"buzz2\": \"buzz3\"}, level=\"buzz\")\n modin_renamed = modin_df.rename(\n columns={\"fizz1\": \"fizz3\", \"buzz2\": \"buzz3\"}, level=\"buzz\"\n )\n assert_index_equal(renamed.columns, modin_renamed.columns)\n\n # function\n func = str.upper\n renamed = df.rename(columns=func, level=0)\n modin_renamed = modin_df.rename(columns=func, level=0)\n assert_index_equal(renamed.columns, modin_renamed.columns)\n renamed = df.rename(columns=func, level=\"fizz\")\n modin_renamed = modin_df.rename(columns=func, level=\"fizz\")\n assert_index_equal(renamed.columns, modin_renamed.columns)\n\n renamed = df.rename(columns=func, level=1)\n modin_renamed = modin_df.rename(columns=func, level=1)\n assert_index_equal(renamed.columns, modin_renamed.columns)\n renamed = df.rename(columns=func, level=\"buzz\")\n modin_renamed = modin_df.rename(columns=func, level=\"buzz\")\n assert_index_equal(renamed.columns, modin_renamed.columns)\n\n # index\n renamed = df.rename(index={\"foo1\": \"foo3\", \"bar2\": \"bar3\"}, level=0)\n modin_renamed = modin_df.rename(index={\"foo1\": \"foo3\", \"bar2\": \"bar3\"}, level=0)\n assert_index_equal(modin_renamed.index, renamed.index)\n\n\[email protected](reason=\"Pandas does not pass this test\")\ndef test_rename_nocopy():\n source_df = pandas.DataFrame(test_data[\"int_data\"])[\n [\"col1\", \"index\", \"col3\", \"col4\"]\n ]\n modin_df = pd.DataFrame(source_df)\n modin_renamed = modin_df.rename(columns={\"col3\": \"foo\"}, copy=False)\n modin_renamed[\"foo\"] = 1\n assert (modin_df[\"col3\"] == 1).all()\n\n\ndef test_rename_inplace():\n source_df = pandas.DataFrame(test_data[\"int_data\"])[\n [\"col1\", \"index\", \"col3\", \"col4\"]\n ]\n modin_df = pd.DataFrame(source_df)\n\n df_equals(\n modin_df.rename(columns={\"col3\": \"foo\"}),\n source_df.rename(columns={\"col3\": \"foo\"}),\n )\n\n frame = source_df.copy()\n modin_frame = modin_df.copy()\n frame.rename(columns={\"col3\": \"foo\"}, inplace=True)\n modin_frame.rename(columns={\"col3\": \"foo\"}, inplace=True)\n\n df_equals(modin_frame, frame)\n\n\ndef test_rename_bug():\n # rename set ref_locs, and set_index was not resetting\n frame_data = {0: [\"foo\", \"bar\"], 1: [\"bah\", \"bas\"], 2: [1, 2]}\n df = pandas.DataFrame(frame_data)\n modin_df = pd.DataFrame(frame_data)\n df = df.rename(columns={0: \"a\"})\n df = df.rename(columns={1: \"b\"})\n df = df.set_index([\"a\", \"b\"])\n df.columns = [\"2001-01-01\"]\n\n modin_df = modin_df.rename(columns={0: \"a\"})\n modin_df = modin_df.rename(columns={1: \"b\"})\n modin_df = modin_df.set_index([\"a\", \"b\"])\n modin_df.columns = [\"2001-01-01\"]\n\n df_equals(modin_df, df)\n\n\ndef test_rename_axis():\n data = {\"num_legs\": [4, 4, 2], \"num_arms\": [0, 0, 2]}\n index = [\"dog\", \"cat\", \"monkey\"]\n modin_df = pd.DataFrame(data, index)\n pandas_df = pandas.DataFrame(data, index)\n df_equals(modin_df.rename_axis(\"animal\"), pandas_df.rename_axis(\"animal\"))\n df_equals(\n modin_df.rename_axis(\"limbs\", axis=\"columns\"),\n pandas_df.rename_axis(\"limbs\", axis=\"columns\"),\n )\n\n modin_df.rename_axis(\"limbs\", axis=\"columns\", inplace=True)\n pandas_df.rename_axis(\"limbs\", axis=\"columns\", inplace=True)\n df_equals(modin_df, pandas_df)\n\n new_index = pd.MultiIndex.from_product(\n [[\"mammal\"], [\"dog\", \"cat\", \"monkey\"]], names=[\"type\", \"name\"]\n )\n modin_df.index = new_index\n pandas_df.index = new_index\n\n df_equals(\n modin_df.rename_axis(index={\"type\": \"class\"}),\n pandas_df.rename_axis(index={\"type\": \"class\"}),\n )\n df_equals(\n modin_df.rename_axis(columns=str.upper),\n pandas_df.rename_axis(columns=str.upper),\n )\n df_equals(\n modin_df.rename_axis(columns=[str.upper(o) for o in modin_df.columns.names]),\n pandas_df.rename_axis(columns=[str.upper(o) for o in pandas_df.columns.names]),\n )\n\n with pytest.raises(ValueError):\n df_equals(\n modin_df.rename_axis(str.upper, axis=1),\n pandas_df.rename_axis(str.upper, axis=1),\n )\n\n\ndef test_rename_axis_inplace():\n test_frame = pandas.DataFrame(test_data[\"int_data\"])\n modin_df = pd.DataFrame(test_frame)\n\n result = test_frame.copy()\n modin_result = modin_df.copy()\n no_return = result.rename_axis(\"foo\", inplace=True)\n modin_no_return = modin_result.rename_axis(\"foo\", inplace=True)\n\n assert no_return is modin_no_return\n df_equals(modin_result, result)\n\n result = test_frame.copy()\n modin_result = modin_df.copy()\n no_return = result.rename_axis(\"bar\", axis=1, inplace=True)\n modin_no_return = modin_result.rename_axis(\"bar\", axis=1, inplace=True)\n\n assert no_return is modin_no_return\n df_equals(modin_result, result)\n\n\ndef test_reorder_levels():\n data = np.random.randint(1, 100, 12)\n modin_df = pd.DataFrame(\n data,\n index=pd.MultiIndex.from_tuples(\n [\n (num, letter, color)\n for num in range(1, 3)\n for letter in [\"a\", \"b\", \"c\"]\n for color in [\"Red\", \"Green\"]\n ],\n names=[\"Number\", \"Letter\", \"Color\"],\n ),\n )\n pandas_df = pandas.DataFrame(\n data,\n index=pandas.MultiIndex.from_tuples(\n [\n (num, letter, color)\n for num in range(1, 3)\n for letter in [\"a\", \"b\", \"c\"]\n for color in [\"Red\", \"Green\"]\n ],\n names=[\"Number\", \"Letter\", \"Color\"],\n ),\n )\n df_equals(\n modin_df.reorder_levels([\"Letter\", \"Color\", \"Number\"]),\n pandas_df.reorder_levels([\"Letter\", \"Color\", \"Number\"]),\n )\n\n\ndef test_reindex_multiindex():\n data1, data2 = np.random.randint(1, 20, (5, 5)), np.random.randint(10, 25, 6)\n index = np.array([\"AUD\", \"BRL\", \"CAD\", \"EUR\", \"INR\"])\n modin_midx = pd.MultiIndex.from_product(\n [[\"Bank_1\", \"Bank_2\"], [\"AUD\", \"CAD\", \"EUR\"]], names=[\"Bank\", \"Curency\"]\n )\n pandas_midx = pandas.MultiIndex.from_product(\n [[\"Bank_1\", \"Bank_2\"], [\"AUD\", \"CAD\", \"EUR\"]], names=[\"Bank\", \"Curency\"]\n )\n modin_df1, modin_df2 = (\n pd.DataFrame(data=data1, index=index, columns=index),\n pd.DataFrame(data2, modin_midx),\n )\n pandas_df1, pandas_df2 = (\n pandas.DataFrame(data=data1, index=index, columns=index),\n pandas.DataFrame(data2, pandas_midx),\n )\n modin_df2.columns, pandas_df2.columns = [\"Notional\"], [\"Notional\"]\n md_midx = pd.MultiIndex.from_product([modin_df2.index.levels[0], modin_df1.index])\n pd_midx = pandas.MultiIndex.from_product(\n [pandas_df2.index.levels[0], pandas_df1.index]\n )\n # reindex without axis, index, or columns\n modin_result = modin_df1.reindex(md_midx, fill_value=0)\n pandas_result = pandas_df1.reindex(pd_midx, fill_value=0)\n df_equals(modin_result, pandas_result)\n # reindex with only axis\n modin_result = modin_df1.reindex(md_midx, fill_value=0, axis=0)\n pandas_result = pandas_df1.reindex(pd_midx, fill_value=0, axis=0)\n df_equals(modin_result, pandas_result)\n # reindex with axis and level\n modin_result = modin_df1.reindex(md_midx, fill_value=0, axis=0, level=0)\n pandas_result = pandas_df1.reindex(pd_midx, fill_value=0, axis=0, level=0)\n df_equals(modin_result, pandas_result)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test_reset_index(data):\n modin_df = pd.DataFrame(data)\n pandas_df = pandas.DataFrame(data)\n\n modin_result = modin_df.reset_index(inplace=False)\n pandas_result = pandas_df.reset_index(inplace=False)\n df_equals(modin_result, pandas_result)\n\n modin_df_cp = modin_df.copy()\n pd_df_cp = pandas_df.copy()\n modin_df_cp.reset_index(inplace=True)\n pd_df_cp.reset_index(inplace=True)\n df_equals(modin_df_cp, pd_df_cp)\n\n\[email protected](\n \"data\",\n [\n pytest.param(\n test_data[\"int_data\"],\n marks=pytest.mark.skipif(not extra_test_parameters, reason=\"extra\"),\n ),\n test_data[\"float_nan_data\"],\n ],\n ids=[\"int_data\", \"float_nan_data\"],\n)\[email protected](\"nlevels\", [3])\[email protected](\"columns_multiindex\", [True, False])\[email protected](\n \"level\",\n [\n \"no_level\",\n None,\n 0,\n 1,\n 2,\n [2, 0],\n [2, 1],\n [1, 0],\n pytest.param(\n [2, 1, 2],\n marks=pytest.mark.skipif(not extra_test_parameters, reason=\"extra\"),\n ),\n pytest.param(\n [0, 0, 0, 0],\n marks=pytest.mark.skipif(not extra_test_parameters, reason=\"extra\"),\n ),\n pytest.param(\n [\"level_name_1\"],\n marks=pytest.mark.skipif(not extra_test_parameters, reason=\"extra\"),\n ),\n pytest.param(\n [\"level_name_2\", \"level_name_1\"],\n marks=pytest.mark.skipif(not extra_test_parameters, reason=\"extra\"),\n ),\n pytest.param(\n [2, \"level_name_0\"],\n marks=pytest.mark.skipif(not extra_test_parameters, reason=\"extra\"),\n ),\n ],\n)\[email protected](\"col_level\", [\"no_col_level\", 0, 1, 2])\[email protected](\"col_fill\", [\"no_col_fill\", None, 0, \"new\"])\[email protected](\"drop\", [False])\[email protected](\n \"multiindex_levels_names_max_levels\",\n [\n 0,\n 1,\n 2,\n pytest.param(\n 3, marks=pytest.mark.skipif(not extra_test_parameters, reason=\"extra\")\n ),\n pytest.param(\n 4, marks=pytest.mark.skipif(not extra_test_parameters, reason=\"extra\")\n ),\n ],\n)\[email protected](\n \"none_in_index_names\",\n [\n pytest.param(\n False,\n marks=pytest.mark.skipif(not extra_test_parameters, reason=\"extra\"),\n ),\n True,\n \"mixed_1st_None\",\n pytest.param(\n \"mixed_2nd_None\",\n marks=pytest.mark.skipif(not extra_test_parameters, reason=\"extra\"),\n ),\n ],\n)\ndef test_reset_index_with_multi_index_no_drop(\n data,\n nlevels,\n columns_multiindex,\n level,\n col_level,\n col_fill,\n drop,\n multiindex_levels_names_max_levels,\n none_in_index_names,\n):\n data_rows = len(data[list(data.keys())[0]])\n index = generate_multiindex(data_rows, nlevels=nlevels)\n data_columns = len(data.keys())\n columns = (\n generate_multiindex(data_columns, nlevels=nlevels)\n if columns_multiindex\n else pandas.RangeIndex(0, data_columns)\n )\n # Replace original data columns with generated\n data = {columns[ind]: data[key] for ind, key in enumerate(data)}\n index.names = (\n [f\"level_{i}\" for i in range(index.nlevels)]\n if multiindex_levels_names_max_levels == 0\n else [\n tuple(\n [\n f\"level_{i}_name_{j}\"\n for j in range(\n 0,\n max(multiindex_levels_names_max_levels + 1 - index.nlevels, 0)\n + i,\n )\n ]\n )\n if max(multiindex_levels_names_max_levels + 1 - index.nlevels, 0) + i > 0\n else f\"level_{i}\"\n for i in range(index.nlevels)\n ]\n )\n\n if none_in_index_names is True:\n index.names = [None] * len(index.names)\n elif none_in_index_names:\n names_list = list(index.names)\n start_index = 0 if none_in_index_names == \"mixed_1st_None\" else 1\n names_list[start_index::2] = [None] * len(names_list[start_index::2])\n index.names = names_list\n\n modin_df = pd.DataFrame(data, index=index, columns=columns)\n pandas_df = pandas.DataFrame(data, index=index, columns=columns)\n\n if isinstance(level, list):\n level = [\n index.names[int(x[len(\"level_name_\") :])]\n if isinstance(x, str) and x.startswith(\"level_name_\")\n else x\n for x in level\n ]\n\n kwargs = {\"drop\": drop}\n if level != \"no_level\":\n kwargs[\"level\"] = level\n if col_level != \"no_col_level\":\n kwargs[\"col_level\"] = col_level\n if col_fill != \"no_col_fill\":\n kwargs[\"col_fill\"] = col_fill\n eval_general(modin_df, pandas_df, lambda df: df.reset_index(**kwargs))\n\n\[email protected](\n \"data\",\n [\n pytest.param(\n test_data[\"int_data\"],\n marks=pytest.mark.skipif(not extra_test_parameters, reason=\"extra\"),\n ),\n test_data[\"float_nan_data\"],\n ],\n ids=[\"int_data\", \"float_nan_data\"],\n)\[email protected](\"nlevels\", [3])\[email protected](\n \"level\",\n [\n \"no_level\",\n None,\n 0,\n 1,\n 2,\n [2, 0],\n [2, 1],\n [1, 0],\n pytest.param(\n [2, 1, 2],\n marks=pytest.mark.skipif(not extra_test_parameters, reason=\"extra\"),\n ),\n pytest.param(\n [0, 0, 0, 0],\n marks=pytest.mark.skipif(not extra_test_parameters, reason=\"extra\"),\n ),\n pytest.param(\n [\"level_name_1\"],\n marks=pytest.mark.skipif(not extra_test_parameters, reason=\"extra\"),\n ),\n pytest.param(\n [\"level_name_2\", \"level_name_1\"],\n marks=pytest.mark.skipif(not extra_test_parameters, reason=\"extra\"),\n ),\n pytest.param(\n [2, \"level_name_0\"],\n marks=pytest.mark.skipif(not extra_test_parameters, reason=\"extra\"),\n ),\n ],\n)\[email protected](\n \"multiindex_levels_names_max_levels\",\n [\n 0,\n 1,\n 2,\n pytest.param(\n 3, marks=pytest.mark.skipif(not extra_test_parameters, reason=\"extra\")\n ),\n pytest.param(\n 4, marks=pytest.mark.skipif(not extra_test_parameters, reason=\"extra\")\n ),\n ],\n)\[email protected](\n \"none_in_index_names\",\n [\n pytest.param(\n False,\n marks=pytest.mark.skipif(not extra_test_parameters, reason=\"extra\"),\n ),\n True,\n \"mixed_1st_None\",\n pytest.param(\n \"mixed_2nd_None\",\n marks=pytest.mark.skipif(not extra_test_parameters, reason=\"extra\"),\n ),\n ],\n)\ndef test_reset_index_with_multi_index_drop(\n data, nlevels, level, multiindex_levels_names_max_levels, none_in_index_names\n):\n test_reset_index_with_multi_index_no_drop(\n data,\n nlevels,\n True,\n level,\n \"no_col_level\",\n \"no_col_fill\",\n True,\n multiindex_levels_names_max_levels,\n none_in_index_names,\n )\n\n\[email protected](\"index_levels_names_max_levels\", [0, 1, 2])\ndef test_reset_index_with_named_index(index_levels_names_max_levels):\n modin_df = pd.DataFrame(test_data_values[0])\n pandas_df = pandas.DataFrame(test_data_values[0])\n\n index_name = (\n tuple([f\"name_{j}\" for j in range(0, index_levels_names_max_levels)])\n if index_levels_names_max_levels > 0\n else \"NAME_OF_INDEX\"\n )\n modin_df.index.name = pandas_df.index.name = index_name\n df_equals(modin_df, pandas_df)\n df_equals(modin_df.reset_index(drop=False), pandas_df.reset_index(drop=False))\n\n modin_df.reset_index(drop=True, inplace=True)\n pandas_df.reset_index(drop=True, inplace=True)\n df_equals(modin_df, pandas_df)\n\n modin_df = pd.DataFrame(test_data_values[0])\n pandas_df = pandas.DataFrame(test_data_values[0])\n modin_df.index.name = pandas_df.index.name = index_name\n df_equals(modin_df.reset_index(drop=False), pandas_df.reset_index(drop=False))\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\"axis\", axis_values, ids=axis_keys)\ndef test_sample(data, axis):\n modin_df = pd.DataFrame(data)\n pandas_df = pandas.DataFrame(data)\n\n with pytest.raises(ValueError):\n modin_df.sample(n=3, frac=0.4, axis=axis)\n\n with pytest.raises(KeyError):\n modin_df.sample(frac=0.5, weights=\"CoLuMn_No_ExIsT\", axis=0)\n\n with pytest.raises(ValueError):\n modin_df.sample(frac=0.5, weights=modin_df.columns[0], axis=1)\n\n with pytest.raises(ValueError):\n modin_df.sample(\n frac=0.5, weights=[0.5 for _ in range(len(modin_df.index[:-1]))], axis=0\n )\n\n with pytest.raises(ValueError):\n modin_df.sample(\n frac=0.5,\n weights=[0.5 for _ in range(len(modin_df.columns[:-1]))],\n axis=1,\n )\n\n with pytest.raises(ValueError):\n modin_df.sample(n=-3, axis=axis)\n\n with pytest.raises(ValueError):\n modin_df.sample(frac=0.2, weights=pandas.Series(), axis=axis)\n\n if isinstance(axis, str):\n num_axis = pandas.DataFrame()._get_axis_number(axis)\n else:\n num_axis = axis\n\n # weights that sum to 1\n sums = sum(i % 2 for i in range(len(modin_df.axes[num_axis])))\n weights = [i % 2 / sums for i in range(len(modin_df.axes[num_axis]))]\n\n modin_result = modin_df.sample(\n frac=0.5, random_state=42, weights=weights, axis=axis\n )\n pandas_result = pandas_df.sample(\n frac=0.5, random_state=42, weights=weights, axis=axis\n )\n df_equals(modin_result, pandas_result)\n\n # weights that don't sum to 1\n weights = [i % 2 for i in range(len(modin_df.axes[num_axis]))]\n modin_result = modin_df.sample(\n frac=0.5, random_state=42, weights=weights, axis=axis\n )\n pandas_result = pandas_df.sample(\n frac=0.5, random_state=42, weights=weights, axis=axis\n )\n df_equals(modin_result, pandas_result)\n\n modin_result = modin_df.sample(n=0, axis=axis)\n pandas_result = pandas_df.sample(n=0, axis=axis)\n df_equals(modin_result, pandas_result)\n\n modin_result = modin_df.sample(frac=0.5, random_state=42, axis=axis)\n pandas_result = pandas_df.sample(frac=0.5, random_state=42, axis=axis)\n df_equals(modin_result, pandas_result)\n\n modin_result = modin_df.sample(n=2, random_state=42, axis=axis)\n pandas_result = pandas_df.sample(n=2, random_state=42, axis=axis)\n df_equals(modin_result, pandas_result)\n\n # issue #1692, numpy RandomState object\n # We must create a new random state for each iteration because the values that\n # are selected will be impacted if the object has already been used.\n random_state = np.random.RandomState(42)\n modin_result = modin_df.sample(frac=0.5, random_state=random_state, axis=axis)\n\n random_state = np.random.RandomState(42)\n pandas_result = pandas_df.sample(frac=0.5, random_state=random_state, axis=axis)\n df_equals(modin_result, pandas_result)\n\n\ndef test_select_dtypes():\n frame_data = {\n \"test1\": list(\"abc\"),\n \"test2\": np.arange(3, 6).astype(\"u1\"),\n \"test3\": np.arange(8.0, 11.0, dtype=\"float64\"),\n \"test4\": [True, False, True],\n \"test5\": pandas.date_range(\"now\", periods=3).values,\n \"test6\": list(range(5, 8)),\n }\n df = pandas.DataFrame(frame_data)\n rd = pd.DataFrame(frame_data)\n\n include = np.float, \"integer\"\n exclude = (np.bool_,)\n r = rd.select_dtypes(include=include, exclude=exclude)\n\n e = df[[\"test2\", \"test3\", \"test6\"]]\n df_equals(r, e)\n\n r = rd.select_dtypes(include=np.bool_)\n e = df[[\"test4\"]]\n df_equals(r, e)\n\n r = rd.select_dtypes(exclude=np.bool_)\n e = df[[\"test1\", \"test2\", \"test3\", \"test5\", \"test6\"]]\n df_equals(r, e)\n\n try:\n pd.DataFrame().select_dtypes()\n assert False\n except ValueError:\n assert True\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\"n\", int_arg_values, ids=arg_keys(\"n\", int_arg_keys))\ndef test_tail(data, n):\n modin_df = pd.DataFrame(data)\n pandas_df = pandas.DataFrame(data)\n\n df_equals(modin_df.tail(n), pandas_df.tail(n))\n df_equals(modin_df.tail(len(modin_df)), pandas_df.tail(len(pandas_df)))\n\n\ndef test_xs():\n d = {\n \"num_legs\": [4, 4, 2, 2],\n \"num_wings\": [0, 0, 2, 2],\n \"class\": [\"mammal\", \"mammal\", \"mammal\", \"bird\"],\n \"animal\": [\"cat\", \"dog\", \"bat\", \"penguin\"],\n \"locomotion\": [\"walks\", \"walks\", \"flies\", \"walks\"],\n }\n df = pd.DataFrame(data=d)\n df = df.set_index([\"class\", \"animal\", \"locomotion\"])\n with warns_that_defaulting_to_pandas():\n df.xs(\"mammal\")\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test___getitem__(data):\n modin_df = pd.DataFrame(data)\n pandas_df = pandas.DataFrame(data)\n\n key = modin_df.columns[0]\n modin_col = modin_df.__getitem__(key)\n assert isinstance(modin_col, pd.Series)\n\n pd_col = pandas_df[key]\n df_equals(pd_col, modin_col)\n\n slices = [\n (None, -1),\n (-1, None),\n (1, 2),\n (1, None),\n (None, 1),\n (1, -1),\n (-3, -1),\n (1, -1, 2),\n (-1, 1, -1),\n (None, None, 2),\n ]\n\n # slice test\n for slice_param in slices:\n s = slice(*slice_param)\n df_equals(modin_df[s], pandas_df[s])\n\n # Test empty\n df_equals(pd.DataFrame([])[:10], pandas.DataFrame([])[:10])\n\n\ndef test_getitem_empty_mask():\n # modin-project/modin#517\n modin_frames = []\n pandas_frames = []\n data1 = np.random.randint(0, 100, size=(100, 4))\n mdf1 = pd.DataFrame(data1, columns=list(\"ABCD\"))\n pdf1 = pandas.DataFrame(data1, columns=list(\"ABCD\"))\n modin_frames.append(mdf1)\n pandas_frames.append(pdf1)\n\n data2 = np.random.randint(0, 100, size=(100, 4))\n mdf2 = pd.DataFrame(data2, columns=list(\"ABCD\"))\n pdf2 = pandas.DataFrame(data2, columns=list(\"ABCD\"))\n modin_frames.append(mdf2)\n pandas_frames.append(pdf2)\n\n data3 = np.random.randint(0, 100, size=(100, 4))\n mdf3 = pd.DataFrame(data3, columns=list(\"ABCD\"))\n pdf3 = pandas.DataFrame(data3, columns=list(\"ABCD\"))\n modin_frames.append(mdf3)\n pandas_frames.append(pdf3)\n\n modin_data = pd.concat(modin_frames)\n pandas_data = pandas.concat(pandas_frames)\n df_equals(\n modin_data[[False for _ in modin_data.index]],\n pandas_data[[False for _ in modin_data.index]],\n )\n\n\ndef test_getitem_datetime_slice():\n data = {\"data\": range(1000)}\n index = pd.date_range(\"2017/1/4\", periods=1000)\n modin_df = pd.DataFrame(data=data, index=index)\n pandas_df = pandas.DataFrame(data=data, index=index)\n\n s = slice(\"2017-01-06\", \"2017-01-09\")\n df_equals(modin_df[s], pandas_df[s])\n\n\ndef test_getitem_same_name():\n data = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16],\n [17, 18, 19, 20],\n ]\n columns = [\"c1\", \"c2\", \"c1\", \"c3\"]\n modin_df = pd.DataFrame(data, columns=columns)\n pandas_df = pandas.DataFrame(data, columns=columns)\n df_equals(modin_df[\"c1\"], pandas_df[\"c1\"])\n df_equals(modin_df[\"c2\"], pandas_df[\"c2\"])\n df_equals(modin_df[[\"c1\", \"c2\"]], pandas_df[[\"c1\", \"c2\"]])\n df_equals(modin_df[\"c3\"], pandas_df[\"c3\"])\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test___getattr__(request, data):\n modin_df = pd.DataFrame(data)\n pandas_df = pandas.DataFrame(data) # noqa F841\n\n if \"empty_data\" not in request.node.name:\n key = modin_df.columns[0]\n col = modin_df.__getattr__(key)\n\n col = modin_df.__getattr__(\"col1\")\n assert isinstance(col, pd.Series)\n\n col = getattr(modin_df, \"col1\")\n assert isinstance(col, pd.Series)\n\n # Check that lookup in column doesn't override other attributes\n df2 = modin_df.rename(index=str, columns={key: \"columns\"})\n assert isinstance(df2.columns, pandas.Index)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test___setitem__(data):\n eval_setitem(*create_test_dfs(data), loc=-1, value=1)\n eval_setitem(\n *create_test_dfs(data), loc=-1, value=lambda df: type(df)(df[df.columns[0]])\n )\n\n nrows = len(data[list(data.keys())[0]])\n arr = np.arange(nrows * 2).reshape(-1, 2)\n\n eval_setitem(*create_test_dfs(data), loc=-1, value=arr)\n eval_setitem(*create_test_dfs(data), col=\"___NON EXISTENT COLUMN\", value=arr)\n eval_setitem(*create_test_dfs(data), loc=0, value=np.arange(nrows))\n\n modin_df = pd.DataFrame(columns=data.keys())\n pandas_df = pandas.DataFrame(columns=data.keys())\n\n for col in modin_df.columns:\n modin_df[col] = np.arange(1000)\n\n for col in pandas_df.columns:\n pandas_df[col] = np.arange(1000)\n\n df_equals(modin_df, pandas_df)\n\n # Test series assignment to column\n modin_df = pd.DataFrame(columns=modin_df.columns)\n pandas_df = pandas.DataFrame(columns=pandas_df.columns)\n modin_df[modin_df.columns[-1]] = modin_df[modin_df.columns[0]]\n pandas_df[pandas_df.columns[-1]] = pandas_df[pandas_df.columns[0]]\n df_equals(modin_df, pandas_df)\n\n if not sys.version_info.major == 3 and sys.version_info.minor > 6:\n # This test doesn't work correctly on Python 3.6\n # Test 2d ndarray assignment to column\n modin_df = pd.DataFrame(data)\n pandas_df = pandas.DataFrame(data)\n modin_df[\"new_col\"] = modin_df[[modin_df.columns[0]]].values\n pandas_df[\"new_col\"] = pandas_df[[pandas_df.columns[0]]].values\n df_equals(modin_df, pandas_df)\n assert isinstance(modin_df[\"new_col\"][0], type(pandas_df[\"new_col\"][0]))\n\n modin_df[1:5] = 10\n pandas_df[1:5] = 10\n df_equals(modin_df, pandas_df)\n\n # Transpose test\n modin_df = pd.DataFrame(data).T\n pandas_df = pandas.DataFrame(data).T\n\n modin_df[modin_df.columns[0]] = 0\n pandas_df[pandas_df.columns[0]] = 0\n df_equals(modin_df, pandas_df)\n\n modin_df.columns = [str(i) for i in modin_df.columns]\n pandas_df.columns = [str(i) for i in pandas_df.columns]\n\n modin_df[modin_df.columns[0]] = 0\n pandas_df[pandas_df.columns[0]] = 0\n\n df_equals(modin_df, pandas_df)\n\n modin_df[modin_df.columns[0]][modin_df.index[0]] = 12345\n pandas_df[pandas_df.columns[0]][pandas_df.index[0]] = 12345\n df_equals(modin_df, pandas_df)\n\n modin_df[1:5] = 10\n pandas_df[1:5] = 10\n df_equals(modin_df, pandas_df)\n\n\ndef test___setitem__partitions_aligning():\n # from issue #2390\n modin_df = pd.DataFrame({\"a\": [1, 2, 3]})\n pandas_df = pandas.DataFrame({\"a\": [1, 2, 3]})\n modin_df[\"b\"] = pd.Series([4, 5, 6, 7, 8])\n pandas_df[\"b\"] = pandas.Series([4, 5, 6, 7, 8])\n df_equals(modin_df, pandas_df)\n\n # from issue #2442\n data = {\"a\": [1, 2, 3, 4]}\n # Index with duplicated timestamp\n index = pandas.to_datetime([\"2020-02-06\", \"2020-02-06\", \"2020-02-22\", \"2020-03-26\"])\n\n md_df, pd_df = create_test_dfs(data, index=index)\n # Setting new column\n pd_df[\"b\"] = pandas.Series(np.arange(4))\n md_df[\"b\"] = pd.Series(np.arange(4))\n df_equals(md_df, pd_df)\n\n # Setting existing column\n pd_df[\"b\"] = pandas.Series(np.arange(4))\n md_df[\"b\"] = pd.Series(np.arange(4))\n df_equals(md_df, pd_df)\n\n pd_df[\"a\"] = pandas.Series(np.arange(4))\n md_df[\"a\"] = pd.Series(np.arange(4))\n df_equals(md_df, pd_df)\n\n\ndef test___setitem__with_mismatched_partitions():\n fname = \"200kx99.csv\"\n np.savetxt(fname, np.random.randint(0, 100, size=(200_000, 99)), delimiter=\",\")\n modin_df = pd.read_csv(fname)\n pandas_df = pandas.read_csv(fname)\n modin_df[\"new\"] = pd.Series(list(range(len(modin_df))))\n pandas_df[\"new\"] = pandas.Series(list(range(len(pandas_df))))\n df_equals(modin_df, pandas_df)\n\n\ndef test___setitem__mask():\n # DataFrame mask:\n data = test_data[\"int_data\"]\n modin_df = pd.DataFrame(data)\n pandas_df = pandas.DataFrame(data)\n\n mean = int((RAND_HIGH + RAND_LOW) / 2)\n pandas_df[pandas_df > mean] = -50\n modin_df[modin_df > mean] = -50\n\n df_equals(modin_df, pandas_df)\n\n # Array mask:\n pandas_df = pandas.DataFrame(data)\n modin_df = pd.DataFrame(data)\n array = (pandas_df > mean).to_numpy()\n\n modin_df[array] = -50\n pandas_df[array] = -50\n\n df_equals(modin_df, pandas_df)\n\n # Array mask of wrong size:\n with pytest.raises(ValueError):\n array = np.array([[1, 2], [3, 4]])\n modin_df[array] = 20\n\n\[email protected](\n \"data\",\n [\n {},\n {\"id\": [], \"max_speed\": [], \"health\": []},\n {\"id\": [1], \"max_speed\": [2], \"health\": [3]},\n {\"id\": [4, 40, 400], \"max_speed\": [111, 222, 333], \"health\": [33, 22, 11]},\n ],\n ids=[\"empty_frame\", \"empty_cols\", \"1_length_cols\", \"2_length_cols\"],\n)\[email protected](\n \"value\",\n [[11, 22], [11, 22, 33]],\n ids=[\"2_length_val\", \"3_length_val\"],\n)\[email protected](\"convert_to_series\", [False, True])\[email protected](\"new_col_id\", [123, \"new_col\"], ids=[\"integer\", \"string\"])\ndef test_setitem_on_empty_df(data, value, convert_to_series, new_col_id):\n pandas_df = pandas.DataFrame(data)\n modin_df = pd.DataFrame(data)\n\n def applyier(df):\n if convert_to_series:\n converted_value = (\n pandas.Series(value)\n if isinstance(df, pandas.DataFrame)\n else pd.Series(value)\n )\n else:\n converted_value = value\n df[new_col_id] = converted_value\n return df\n\n eval_general(modin_df, pandas_df, applyier)\n\n\ndef test___setitem__unhashable_list():\n # from #3258 and #3291\n cols = [\"a\", \"b\"]\n modin_df = pd.DataFrame([[0, 0]], columns=cols)\n modin_df[cols] = modin_df[cols]\n pandas_df = pandas.DataFrame([[0, 0]], columns=cols)\n pandas_df[cols] = pandas_df[cols]\n df_equals(modin_df, pandas_df)\n\n\ndef test___setitem__single_item_in_series():\n # Test assigning a single item in a Series for issue\n # https://github.com/modin-project/modin/issues/3860\n modin_series = pd.Series(99)\n pandas_series = pandas.Series(99)\n modin_series[:1] = pd.Series(100)\n pandas_series[:1] = pandas.Series(100)\n df_equals(modin_series, pandas_series)\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\ndef test___len__(data):\n modin_df = pd.DataFrame(data)\n pandas_df = pandas.DataFrame(data)\n\n assert len(modin_df) == len(pandas_df)\n\n\ndef test_index_order():\n # see #1708 and #1869 for details\n df_modin, df_pandas = (\n pd.DataFrame(test_data[\"float_nan_data\"]),\n pandas.DataFrame(test_data[\"float_nan_data\"]),\n )\n rows_number = len(df_modin.index)\n level_0 = np.random.choice([x for x in range(10)], rows_number)\n level_1 = np.random.choice([x for x in range(10)], rows_number)\n index = pandas.MultiIndex.from_arrays([level_0, level_1])\n\n df_modin.index = index\n df_pandas.index = index\n\n for func in [\"all\", \"any\", \"mad\", \"count\"]:\n df_equals(\n getattr(df_modin, func)(level=0).index,\n getattr(df_pandas, func)(level=0).index,\n )\n\n\[email protected](\"data\", test_data_values, ids=test_data_keys)\[email protected](\"sortorder\", [0, 3, 5])\ndef test_multiindex_from_frame(data, sortorder):\n modin_df, pandas_df = create_test_dfs(data)\n\n def call_from_frame(df):\n if type(df).__module__.startswith(\"pandas\"):\n return pandas.MultiIndex.from_frame(df, sortorder)\n else:\n return pd.MultiIndex.from_frame(df, sortorder)\n\n eval_general(modin_df, pandas_df, call_from_frame, comparator=assert_index_equal)\n"
] | [
[
"pandas.to_datetime",
"pandas.Series",
"pandas.RangeIndex",
"pandas.MultiIndex.from_tuples",
"pandas.DataFrame",
"numpy.random.randint",
"numpy.testing.assert_equal",
"pandas.read_csv",
"pandas.MultiIndex.from_frame",
"numpy.arange",
"pandas.Index",
"pandas.DatetimeIndex",
"pandas.testing.assert_index_equal",
"numpy.log",
"pandas.concat",
"pandas.MultiIndex.from_product",
"pandas.date_range",
"numpy.array",
"numpy.random.RandomState",
"matplotlib.use",
"pandas.MultiIndex.from_arrays"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
JoannaWangBK/Attention-Based-Aspect-Extraction | [
"2c4b64f5dafef0a645b35c06b1d1ed53d186e63d"
] | [
"code/preprocess.py"
] | [
"from sklearn.feature_extraction.text import CountVectorizer\nfrom nltk.corpus import stopwords\nfrom nltk.stem.wordnet import WordNetLemmatizer\nimport codecs\nimport json\nfrom tqdm import tqdm\nimport argparse\n\n\ndef parseSentence(line):\n lmtzr = WordNetLemmatizer()\n stop = stopwords.words('english')\n text_token = CountVectorizer().build_tokenizer()(line.lower())\n text_rmstop = [i for i in text_token if i not in stop]\n text_stem = [lmtzr.lemmatize(w) for w in text_rmstop]\n return text_stem\n\n\ndef preprocess_train(domain):\n f = codecs.open('../datasets/' + domain + '/train.txt', 'r', 'utf-8')\n out = codecs.open('../preprocessed_data/' + domain + '/train.txt', 'w', 'utf-8')\n\n for line in f:\n tokens = parseSentence(line)\n if len(tokens) > 0:\n out.write(' '.join(tokens) + '\\n')\n\n\ndef preprocess_test(domain):\n # For restaurant domain, only keep sentences with single \n # aspect label that in {Food, Staff, Ambience}\n\n f1 = codecs.open('../datasets/' + domain + '/test.txt', 'r', 'utf-8')\n f2 = codecs.open('../datasets/' + domain + '/test_label.txt', 'r', 'utf-8')\n out1 = codecs.open('../preprocessed_data/' + domain + '/test.txt', 'w', 'utf-8')\n out2 = codecs.open('../preprocessed_data/' + domain + '/test_label.txt', 'w', 'utf-8')\n\n for text, label in zip(f1, f2):\n label = label.strip()\n if domain == 'speaker' and label not in ['sound', 'durability', 'design', 'price']:\n continue \n if domain == 'restaurant' and label not in ['Food', 'Staff', 'Ambience']:\n continue\n tokens = parseSentence(text)\n if len(tokens) > 0:\n out1.write(' '.join(tokens) + '\\n')\n out2.write(label + '\\n')\n\n\ndef preprocess_line(line):\n return \" \".join([morph.parse(w)[0].normal_form for w in word_tokenize(line.lower())])\n\n\ndef preprocess_reviews_train():\n with open(\"../preprocessed_data/app_reviews/appstore.json\", \"rt\") as f:\n reviews = json.load(f)\n with open(\"../preprocessed_data/app_reviews/train.txt\", \"wt\") as f:\n for rev in tqdm(reviews):\n if isinstance(rev, dict):\n f.write(preprocess_line(rev[\"Title\"] + \" \" + rev[\"Review\"]) + \"\\n\")\n\n\ndef preprocess(domain):\n print('\\t' + domain + ' train set ...')\n preprocess_train(domain)\n print('\\t' + domain + ' test set ...')\n preprocess_test(domain)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--domain\", dest=\"domain\", type=str, metavar='<str>', default='speaker',\n help=\"domain of the corpus\")\n args = parser.parse_args()\n\n if args.domain == \"app_reviews\":\n import pymorphy2\n from nltk.tokenize import word_tokenize\n\n morph = pymorphy2.MorphAnalyzer()\n\n print('Preprocessing raw review sentences ...')\n preprocess_reviews_train()\n else:\n preprocess(args.domain)\n"
] | [
[
"sklearn.feature_extraction.text.CountVectorizer"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lelilia/advent_of_code_2021 | [
"cee570be9865e089216c3fdad11418b5eee8a07b"
] | [
"day20.py"
] | [
"\"\"\" Advent of Codee 2021 day 20 \"\"\"\n\nimport numpy as np\n\nINPUT_FILE = \"input20.txt\"\n\nMASK = np.array([[256, 128, 64], [32, 16, 8], [4, 2, 1]])\n\n\ndef get_input(input_file):\n with open(input_file) as f:\n algo, data = f.read().strip().split(\"\\n\\n\")\n\n algo = get_algo(algo)\n matrix = get_matrix(data.split(\"\\n\"))\n return algo, matrix\n\n\ndef get_matrix(matrix_data):\n matrix = np.array([[a for a in b] for b in matrix_data])\n matrix = np.where(matrix == \".\", 0, matrix)\n matrix = np.where(matrix == \"#\", 1, matrix).astype(int)\n return matrix\n\n\ndef get_algo(algo_data):\n return algo_data.replace(\"#\", \"1\").replace(\".\", \"0\")\n\n\ndef run_n_steps(n, matrix, algo):\n for step in range(n):\n padding_value = int(algo[0]) if step % 2 == 1 else int(algo[511])\n matrix = np.pad(matrix, 2, constant_values=padding_value)\n new_matrix = np.zeros(matrix.shape, dtype=int)\n\n for i in range(1, len(matrix) - 1):\n for j in range(1, len(matrix[0]) - 1):\n index = np.sum(MASK * matrix[i - 1 : i + 2, j - 1 : j + 2])\n new_matrix[i, j] = int(algo[index])\n matrix = new_matrix[1:-1, 1:-1].copy()\n return np.sum(matrix)\n\n\nif __name__ == \"__main__\":\n algo, matrix = get_input(INPUT_FILE)\n print(\"Part 1:\\t\", run_n_steps(2, matrix, algo))\n print(\"Part 2:\\t\", run_n_steps(50, matrix, algo))\n"
] | [
[
"numpy.pad",
"numpy.array",
"numpy.where",
"numpy.sum",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
damirazo/pandas-profiling | [
"e436694befc25463073652b4abddc9b9537a555d"
] | [
"tests/unit/test_example.py"
] | [
"import sys\n\nimport pandas as pd\nimport numpy as np\n\nfrom pandas_profiling import ProfileReport\n\n\ndef test_example(get_data_file, test_output_dir):\n file_name = get_data_file(\n \"meteorites.csv\",\n \"https://data.nasa.gov/api/views/gh4g-9sfh/rows.csv?accessType=DOWNLOAD\",\n )\n\n df = pd.read_csv(file_name)\n # Note: Pandas does not support dates before 1880, so we ignore these for this analysis\n df[\"year\"] = pd.to_datetime(df[\"year\"], errors=\"coerce\")\n\n # Example: Constant variable\n df[\"source\"] = \"NASA\"\n\n # Example: Boolean variable\n df[\"boolean\"] = np.random.choice([True, False], df.shape[0])\n\n # Example: Mixed with base types\n df[\"mixed\"] = np.random.choice([1, \"A\"], df.shape[0])\n\n # Example: Highly correlated variables\n df[\"reclat_city\"] = df[\"reclat\"] + np.random.normal(scale=5, size=(len(df)))\n\n # Example: Duplicate observations\n duplicates_to_add = pd.DataFrame(df.iloc[0:10])\n duplicates_to_add[\"name\"] += \" copy\"\n\n df = df.append(duplicates_to_add, ignore_index=True)\n\n output_file = test_output_dir / \"profile.html\"\n profile = ProfileReport(\n df, title=\"NASA Meteorites\", samples={\"head\": 5, \"tail\": 5}, sort=\"ascending\"\n )\n profile.to_file(output_file=output_file)\n assert (test_output_dir / \"profile.html\").exists(), \"Output file does not exist\"\n assert (\n type(profile.get_description()) == dict\n and len(profile.get_description().items()) == 7\n ), \"Unexpected result\"\n if sys.version_info[1] >= 6:\n assert list(profile.get_description()[\"variables\"].keys()) == [\n \"boolean\",\n \"fall\",\n \"GeoLocation\",\n \"id\",\n \"mass (g)\",\n \"mixed\",\n \"name\",\n \"nametype\",\n \"recclass\",\n \"reclat\",\n \"reclat_city\",\n \"reclong\",\n \"source\",\n \"year\",\n ], \"Ascending sort did not work\"\n"
] | [
[
"pandas.read_csv",
"pandas.to_datetime",
"pandas.DataFrame",
"numpy.random.choice"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
squio/Photo_Mosaic | [
"3340324d33c9e94ca780ca1164c843c645a175d7"
] | [
"scripts/Mosaic_Creator.py"
] | [
"#!/usr/bin/env python\nimport os, random, argparse\nfrom PIL import Image\nimport numpy as np\n\nparser = argparse.ArgumentParser(description='Creates a photomosaic from input images')\nparser.add_argument('--target', dest='target', required=True, help=\"Image to create mosaic from\")\nparser.add_argument('--images', dest='images', required=True, help=\"Diectory of images\")\nparser.add_argument('--grid', nargs=2, dest='grid', required=True, help=\"Size of photo mosaic\")\nparser.add_argument('--output', dest='output', required=False)\n\nargs = parser.parse_args()\n\n\ndef getImages(images_directory):\n files = os.listdir(images_directory)\n images = []\n for file in files:\n filePath = os.path.abspath(os.path.join(images_directory, file))\n try:\n fp = open(filePath, \"rb\")\n im = Image.open(fp)\n images.append(im)\n im.load()\n fp.close()\n except:\n print(\"Invalid image: %s\" % (filePath,))\n return (images)\n\n\ndef getAverageRGB(image):\n im = np.array(image)\n w, h, d = im.shape\n return (tuple(np.average(im.reshape(w * h, d), axis=0)))\n\n\ndef splitImage(image, size):\n W, H = image.size[0], image.size[1]\n m, n = size\n w, h = int(W / n), int(H / m)\n imgs = []\n for j in range(m):\n for i in range(n):\n imgs.append(image.crop((i * w, j * h, (i + 1) * w, (j + 1) * h)))\n return (imgs)\n\n\ndef getBestMatchIndex(input_avg, avgs):\n avg = input_avg\n index = 0\n min_index = 0\n min_dist = float(\"inf\")\n for val in avgs:\n dist = ((val[0] - avg[0]) * (val[0] - avg[0]) +\n (val[1] - avg[1]) * (val[1] - avg[1]) +\n (val[2] - avg[2]) * (val[2] - avg[2]))\n if dist < min_dist:\n min_dist = dist\n min_index = index\n index += 1\n return (min_index)\n\n\ndef createImageGrid(images, dims):\n m, n = dims\n width = max([img.size[0] for img in images])\n height = max([img.size[1] for img in images])\n grid_img = Image.new('RGB', (n * width, m * height))\n for index in range(len(images)):\n row = int(index / n)\n col = index - n * row\n grid_img.paste(images[index], (col * width, row * height))\n return (grid_img)\n\n\ndef createPhotomosaic(target_image, input_images, grid_size,\n reuse_images=True):\n target_images = splitImage(target_image, grid_size)\n\n output_images = []\n count = 0\n batch_size = int(len(target_images) / 10)\n avgs = []\n for img in input_images:\n avgs.append(getAverageRGB(img))\n\n for img in target_images:\n avg = getAverageRGB(img)\n match_index = getBestMatchIndex(avg, avgs)\n output_images.append(input_images[match_index])\n if count > 0 and batch_size > 10 and count % batch_size is 0:\n print('processed %d of %d...' % (count, len(target_images)))\n count += 1\n # remove selected image from input if flag set\n if not reuse_images:\n input_images.remove(match_index)\n\n mosaic_image = createImageGrid(output_images, grid_size)\n return (mosaic_image)\n\n\n### ---------------------------------------------\n\n\ntarget_image = Image.open(args.target)\n\n# input images\nprint('reading input folder...')\ninput_images = getImages(args.images)\n\n# check if any valid input images found\nif input_images == []:\n print('No input images found in %s. Exiting.' % (args.images,))\n exit()\n\n# shuffle list - to get a more varied output?\nrandom.shuffle(input_images)\n\n# size of grid\ngrid_size = (int(args.grid[0]), int(args.grid[1]))\n\n# output\noutput_filename = 'mosaic.jpeg'\nif args.output:\n output_filename = args.output\n\n# re-use any image in input\nreuse_images = True\n\n# resize the input to fit original image size?\nresize_input = True\n\nprint('starting photomosaic creation...')\n\n# if images can't be reused, ensure m*n <= num_of_images\nif not reuse_images:\n if grid_size[0] * grid_size[1] > len(input_images):\n print('grid size less than number of images')\n exit()\n\n# resizing input\nif resize_input:\n print('resizing images...')\n # for given grid size, compute max dims w,h of tiles\n dims = (int(target_image.size[0] / grid_size[1]),\n int(target_image.size[1] / grid_size[0]))\n print(\"max tile dims: %s\" % (dims,))\n # resize\n for img in input_images:\n img.thumbnail(dims)\n\n# create photomosaic\nmosaic_image = createPhotomosaic(target_image, input_images, grid_size, reuse_images)\n\n# write out mosaic\nmosaic_image.save(output_filename, 'jpeg')\n\nprint(\"saved output to %s\" % (output_filename,))\nprint('done.')\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Caro-app/cvat | [
"604be638419221a35c55fa9f0d565c6adaf0ad71"
] | [
"datumaro/tests/test_cvat_format.py"
] | [
"import numpy as np\nimport os\nimport os.path as osp\nfrom xml.etree import ElementTree as ET\n\nfrom unittest import TestCase\n\nfrom datumaro.components.extractor import (Extractor, DatasetItem,\n AnnotationType, PointsObject, PolygonObject, PolyLineObject, BboxObject,\n LabelCategories,\n)\nfrom datumaro.components.importers.cvat import CvatImporter\nfrom datumaro.components.converters.cvat import CvatConverter\nfrom datumaro.components.project import Project\nimport datumaro.components.formats.cvat as Cvat\nfrom datumaro.util.image import save_image\nfrom datumaro.util.test_utils import TestDir\n\n\nclass CvatExtractorTest(TestCase):\n @staticmethod\n def generate_dummy_cvat(path):\n images_dir = osp.join(path, Cvat.CvatPath.IMAGES_DIR)\n anno_dir = osp.join(path, Cvat.CvatPath.ANNOTATIONS_DIR)\n\n os.makedirs(images_dir)\n os.makedirs(anno_dir)\n\n root_elem = ET.Element('annotations')\n ET.SubElement(root_elem, 'version').text = '1.1'\n\n meta_elem = ET.SubElement(root_elem, 'meta')\n task_elem = ET.SubElement(meta_elem, 'task')\n ET.SubElement(task_elem, 'z_order').text = 'True'\n ET.SubElement(task_elem, 'mode').text = 'interpolation'\n\n labels_elem = ET.SubElement(task_elem, 'labels')\n\n label1_elem = ET.SubElement(labels_elem, 'label')\n ET.SubElement(label1_elem, 'name').text = 'label1'\n label1_attrs_elem = ET.SubElement(label1_elem, 'attributes')\n\n label1_a1_elem = ET.SubElement(label1_attrs_elem, 'attribute')\n ET.SubElement(label1_a1_elem, 'name').text = 'a1'\n ET.SubElement(label1_a1_elem, 'input_type').text = 'checkbox'\n ET.SubElement(label1_a1_elem, 'default_value').text = 'false'\n ET.SubElement(label1_a1_elem, 'values').text = 'false\\ntrue'\n\n label1_a2_elem = ET.SubElement(label1_attrs_elem, 'attribute')\n ET.SubElement(label1_a2_elem, 'name').text = 'a2'\n ET.SubElement(label1_a2_elem, 'input_type').text = 'radio'\n ET.SubElement(label1_a2_elem, 'default_value').text = 'v1'\n ET.SubElement(label1_a2_elem, 'values').text = 'v1\\nv2\\nv3'\n\n label2_elem = ET.SubElement(labels_elem, 'label')\n ET.SubElement(label2_elem, 'name').text = 'label2'\n\n # item 1\n save_image(osp.join(images_dir, 'img0.jpg'), np.ones((8, 8, 3)))\n item1_elem = ET.SubElement(root_elem, 'image')\n item1_elem.attrib.update({\n 'id': '0', 'name': 'img0', 'width': '8', 'height': '8'\n })\n\n item1_ann1_elem = ET.SubElement(item1_elem, 'box')\n item1_ann1_elem.attrib.update({\n 'label': 'label1', 'occluded': '1', 'z_order': '1',\n 'xtl': '0', 'ytl': '2', 'xbr': '4', 'ybr': '4'\n })\n item1_ann1_a1_elem = ET.SubElement(item1_ann1_elem, 'attribute')\n item1_ann1_a1_elem.attrib['name'] = 'a1'\n item1_ann1_a1_elem.text = 'true'\n item1_ann1_a2_elem = ET.SubElement(item1_ann1_elem, 'attribute')\n item1_ann1_a2_elem.attrib['name'] = 'a2'\n item1_ann1_a2_elem.text = 'v3'\n\n item1_ann2_elem = ET.SubElement(item1_elem, 'polyline')\n item1_ann2_elem.attrib.update({\n 'label': '', 'points': '1.0,2;3,4;5,6;7,8'\n })\n\n # item 2\n save_image(osp.join(images_dir, 'img1.jpg'), np.ones((10, 10, 3)))\n item2_elem = ET.SubElement(root_elem, 'image')\n item2_elem.attrib.update({\n 'id': '1', 'name': 'img1', 'width': '8', 'height': '8'\n })\n\n item2_ann1_elem = ET.SubElement(item2_elem, 'polygon')\n item2_ann1_elem.attrib.update({\n 'label': '', 'points': '1,2;3,4;6,5', 'z_order': '1',\n })\n\n item2_ann2_elem = ET.SubElement(item2_elem, 'points')\n item2_ann2_elem.attrib.update({\n 'label': 'label2', 'points': '1,2;3,4;5,6', 'z_order': '2',\n })\n\n with open(osp.join(anno_dir, 'train.xml'), 'w') as f:\n f.write(ET.tostring(root_elem, encoding='unicode'))\n\n def test_can_load(self):\n class TestExtractor(Extractor):\n def __iter__(self):\n return iter([\n DatasetItem(id=1, subset='train', image=np.ones((8, 8, 3)),\n annotations=[\n BboxObject(0, 2, 4, 2, label=0,\n attributes={\n 'occluded': True, 'z_order': 1,\n 'a1': 'true', 'a2': 'v3'\n }),\n PolyLineObject([1, 2, 3, 4, 5, 6, 7, 8],\n attributes={'occluded': False, 'z_order': 0}),\n ]),\n DatasetItem(id=2, subset='train', image=np.ones((10, 10, 3)),\n annotations=[\n PolygonObject([1, 2, 3, 4, 6, 5],\n attributes={'occluded': False, 'z_order': 1}),\n PointsObject([1, 2, 3, 4, 5, 6], label=1,\n attributes={'occluded': False, 'z_order': 2}),\n ]),\n ])\n\n def categories(self):\n label_categories = LabelCategories()\n for i in range(10):\n label_categories.add('label_' + str(i))\n return {\n AnnotationType.label: label_categories,\n }\n\n with TestDir() as test_dir:\n self.generate_dummy_cvat(test_dir.path)\n source_dataset = TestExtractor()\n\n parsed_dataset = CvatImporter()(test_dir.path).make_dataset()\n\n self.assertListEqual(\n sorted(source_dataset.subsets()),\n sorted(parsed_dataset.subsets()),\n )\n self.assertEqual(len(source_dataset), len(parsed_dataset))\n for subset_name in source_dataset.subsets():\n source_subset = source_dataset.get_subset(subset_name)\n parsed_subset = parsed_dataset.get_subset(subset_name)\n for item_a, item_b in zip(source_subset, parsed_subset):\n self.assertEqual(len(item_a.annotations), len(item_b.annotations))\n for ann_a, ann_b in zip(item_a.annotations, item_b.annotations):\n self.assertEqual(ann_a, ann_b)\n\n\nclass CvatConverterTest(TestCase):\n def _test_save_and_load(self, source_dataset, converter, test_dir,\n importer_params=None, target_dataset=None):\n converter(source_dataset, test_dir.path)\n\n if not importer_params:\n importer_params = {}\n project = Project.import_from(test_dir.path, 'cvat', **importer_params)\n parsed_dataset = project.make_dataset()\n\n if target_dataset is not None:\n source_dataset = target_dataset\n self.assertListEqual(\n sorted(source_dataset.subsets()),\n sorted(parsed_dataset.subsets()),\n )\n\n self.assertEqual(len(source_dataset), len(parsed_dataset))\n\n for subset_name in source_dataset.subsets():\n source_subset = source_dataset.get_subset(subset_name)\n parsed_subset = parsed_dataset.get_subset(subset_name)\n self.assertEqual(len(source_subset), len(parsed_subset))\n for idx, (item_a, item_b) in enumerate(\n zip(source_subset, parsed_subset)):\n self.assertEqual(item_a, item_b, str(idx))\n\n def test_can_save_and_load(self):\n label_categories = LabelCategories()\n for i in range(10):\n label_categories.add(str(i))\n label_categories.items[2].attributes.update(['a1', 'a2'])\n label_categories.attributes.update(['z_order', 'occluded'])\n\n class SrcTestExtractor(Extractor):\n def __iter__(self):\n return iter([\n DatasetItem(id=0, subset='s1', image=np.zeros((5, 10, 3)),\n annotations=[\n PolygonObject([0, 0, 4, 0, 4, 4],\n label=1, group=4,\n attributes={ 'occluded': True }),\n PolygonObject([5, 0, 9, 0, 5, 5],\n label=2, group=4,\n attributes={ 'unknown': 'bar' }),\n PointsObject([1, 1, 3, 2, 2, 3],\n label=2,\n attributes={ 'a1': 'x', 'a2': 42 }),\n ]\n ),\n DatasetItem(id=1, subset='s1',\n annotations=[\n PolyLineObject([0, 0, 4, 0, 4, 4],\n label=3, id=4, group=4),\n BboxObject(5, 0, 1, 9,\n label=3, id=4, group=4),\n ]\n ),\n\n DatasetItem(id=0, subset='s2', image=np.zeros((5, 10, 3)),\n annotations=[\n PolygonObject([0, 0, 4, 0, 4, 4],\n label=3, group=4,\n attributes={ 'z_order': 1, 'occluded': False }),\n PolyLineObject([5, 0, 9, 0, 5, 5]), # will be skipped\n ]\n ),\n ])\n\n def categories(self):\n return { AnnotationType.label: label_categories }\n\n class DstTestExtractor(Extractor):\n def __iter__(self):\n return iter([\n DatasetItem(id=0, subset='s1', image=np.zeros((5, 10, 3)),\n annotations=[\n PolygonObject([0, 0, 4, 0, 4, 4],\n label=1, group=4,\n attributes={ 'z_order': 0, 'occluded': True }),\n PolygonObject([5, 0, 9, 0, 5, 5],\n label=2, group=4,\n attributes={ 'z_order': 0, 'occluded': False }),\n PointsObject([1, 1, 3, 2, 2, 3],\n label=2,\n attributes={ 'z_order': 0, 'occluded': False,\n 'a1': 'x', 'a2': '42' }),\n ]\n ),\n DatasetItem(id=1, subset='s1',\n annotations=[\n PolyLineObject([0, 0, 4, 0, 4, 4],\n label=3, group=4,\n attributes={ 'z_order': 0, 'occluded': False }),\n BboxObject(5, 0, 1, 9,\n label=3, group=4,\n attributes={ 'z_order': 0, 'occluded': False }),\n ]\n ),\n\n DatasetItem(id=0, subset='s2', image=np.zeros((5, 10, 3)),\n annotations=[\n PolygonObject([0, 0, 4, 0, 4, 4],\n label=3, group=4,\n attributes={ 'z_order': 1, 'occluded': False }),\n ]\n ),\n ])\n\n def categories(self):\n return { AnnotationType.label: label_categories }\n\n with TestDir() as test_dir:\n self._test_save_and_load(SrcTestExtractor(),\n CvatConverter(save_images=True), test_dir,\n target_dataset=DstTestExtractor())\n"
] | [
[
"numpy.zeros",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
GXYM/DRRG | [
"ce63248ce44aac59821ccc1b405a64953aa91b51"
] | [
"eval.py"
] | [
"import os\nimport cv2\nimport numpy as np\nimport subprocess\nfrom util.config import config as cfg\nfrom util.misc import mkdirs\n\n\ndef osmkdir(out_dir):\n import shutil\n if os.path.exists(out_dir):\n shutil.rmtree(out_dir)\n os.makedirs(out_dir)\n\n\ndef analysize_result(source_dir, fid_path, outpt_dir, name):\n\n bad_txt = open(\"{}/eval.txt\".format(outpt_dir), 'w')\n all_eval = open(\"{}/{}/{}_eval.txt\".format(cfg.output_dir, \"Analysis\", name), 'a+')\n sel_list = list()\n with open(fid_path) as f:\n lines = f.read().split(\"\\n\")\n for line in lines:\n line_items = line.split(\" \")\n id = line_items[0]\n precision = float(line_items[2].split('=')[-1])\n recall = float(line_items[4].split('=')[-1])\n if id != \"ALL\" and (precision < 0.5 or recall < 0.5):\n img_path = os.path.join(source_dir, line_items[0].replace(\".txt\", \".jpg\"))\n os.system('cp {} {}'.format(img_path, outpt_dir))\n sel_list.append((int(id.replace(\".txt\", \"\").replace(\"img\", \"\").replace(\"_\", \"\")), line))\n if id == \"ALL\":\n all_eval.write(\"{} {} {}\\n\".format(\n outpt_dir.split('/')[-1],\n \"{}/{}/{}\".format(cfg.tr_thresh, cfg.tcl_thresh, cfg.expend),\n line))\n sel_list = sorted(sel_list, key=lambda its: its[0])\n bad_txt.write('\\n'.join([its[1] for its in sel_list]))\n all_eval.close()\n bad_txt.close()\n\n\ndef deal_eval_total_text(debug=False):\n # compute DetEval\n eval_dir = os.path.join(cfg.output_dir, \"Analysis\", \"output_eval\")\n if not os.path.exists(eval_dir):\n os.makedirs(eval_dir)\n\n print('Computing DetEval in {}/{}'.format(cfg.output_dir, cfg.exp_name))\n subprocess.call(\n ['python', 'dataset/total_text/Evaluation_Protocol/Python_scripts/Deteval.py', cfg.exp_name, '--tr', '0.7',\n '--tp', '0.6'])\n subprocess.call(\n ['python', 'dataset/total_text/Evaluation_Protocol/Python_scripts/Deteval.py', cfg.exp_name, '--tr', '0.8',\n '--tp', '0.4'])\n\n if debug:\n source_dir = os.path.join(cfg.vis_dir, '{}_test'.format(cfg.exp_name))\n outpt_dir_base = os.path.join(cfg.output_dir, \"Analysis\", \"eval_view\", \"total_text\")\n if not os.path.exists(outpt_dir_base):\n mkdirs(outpt_dir_base)\n\n outpt_dir1 = os.path.join(outpt_dir_base, \"{}_{}_{}_{}_{}\"\n .format(cfg.test_size[0], cfg.test_size[1], cfg.checkepoch, 0.7, 0.6))\n osmkdir(outpt_dir1)\n fid_path1 = '{}/Eval_TotalText_{}_{}.txt'.format(eval_dir, 0.7, 0.6)\n\n analysize_result(source_dir, fid_path1, outpt_dir1, \"totalText\")\n\n outpt_dir2 = os.path.join(outpt_dir_base, \"{}_{}_{}_{}_{}\"\n .format(cfg.test_size[0], cfg.test_size[1], cfg.checkepoch, 0.8, 0.4))\n osmkdir(outpt_dir2)\n fid_path2 = '{}/Eval_TotalText_{}_{}.txt'.format(eval_dir, 0.8, 0.4)\n\n analysize_result(source_dir, fid_path2, outpt_dir2, \"totalText\")\n\n print('End.')\n\n\ndef deal_eval_ctw1500(debug=False):\n # compute DetEval\n eval_dir = os.path.join(cfg.output_dir, \"Analysis\", \"output_eval\")\n if not os.path.exists(eval_dir):\n os.makedirs(eval_dir)\n\n print('Computing DetEval in {}/{}'.format(cfg.output_dir, cfg.exp_name))\n subprocess.call(['python', 'dataset/ctw1500/Evaluation_Protocol/ctw1500_eval.py', cfg.exp_name])\n\n if debug:\n source_dir = os.path.join(cfg.vis_dir, '{}_test'.format(cfg.exp_name))\n outpt_dir_base = os.path.join(cfg.output_dir, \"Analysis\", \"eval_view\", \"ctw1500\")\n if not os.path.exists(outpt_dir_base):\n mkdirs(outpt_dir_base)\n\n outpt_dir = os.path.join(outpt_dir_base, \"{}_{}_{}\".format(cfg.test_size[0], cfg.test_size[1], cfg.checkepoch))\n osmkdir(outpt_dir)\n fid_path1 = '{}/Eval_ctw1500_{}.txt'.format(eval_dir, 0.5)\n\n analysize_result(source_dir, fid_path1, outpt_dir, \"ctw1500\")\n\n print('End.')\n\n\ndef deal_eval_icdar15(debug=False):\n # compute DetEval\n eval_dir = os.path.join(cfg.output_dir, \"Analysis\", \"output_eval\")\n if not os.path.exists(eval_dir):\n os.makedirs(eval_dir)\n\n input_dir = 'output/{}'.format(cfg.exp_name)\n father_path = os.path.abspath(input_dir)\n print(father_path)\n print('Computing DetEval in {}/{}'.format(cfg.output_dir, cfg.exp_name))\n subprocess.call(['sh', 'dataset/icdar15/eval.sh', father_path])\n\n if debug:\n source_dir = os.path.join(cfg.vis_dir, '{}_test'.format(cfg.exp_name))\n outpt_dir_base = os.path.join(cfg.output_dir, \"Analysis\", \"eval_view\", \"icdar15\")\n if not os.path.exists(outpt_dir_base):\n mkdirs(outpt_dir_base)\n\n outpt_dir = os.path.join(outpt_dir_base, \"{}_{}_{}\".format(cfg.test_size[0], cfg.test_size[1], cfg.checkepoch))\n osmkdir(outpt_dir)\n fid_path1 = '{}/Eval_icdar15.txt'.format(eval_dir)\n\n analysize_result(source_dir, fid_path1, outpt_dir, \"icdar15\")\n\n print('End.')\n\n pass\n\n\ndef deal_eval_TD500(debug=False):\n # compute DetEval\n eval_dir = os.path.join(cfg.output_dir, \"Analysis\", \"output_eval\")\n if not os.path.exists(eval_dir):\n os.makedirs(eval_dir)\n\n input_dir = 'output/{}'.format(cfg.exp_name)\n father_path = os.path.abspath(input_dir)\n print(father_path)\n print('Computing DetEval in {}/{}'.format(cfg.output_dir, cfg.exp_name))\n subprocess.call(['sh', 'dataset/TD500/eval.sh', father_path])\n\n if debug:\n source_dir = os.path.join(cfg.vis_dir, '{}_test'.format(cfg.exp_name))\n outpt_dir_base = os.path.join(cfg.output_dir, \"Analysis\", \"eval_view\", \"TD500\")\n if not os.path.exists(outpt_dir_base):\n mkdirs(outpt_dir_base)\n\n outpt_dir = os.path.join(outpt_dir_base, \"{}_{}_{}\".format(cfg.test_size[0], cfg.test_size[1], cfg.checkepoch))\n osmkdir(outpt_dir)\n fid_path1 = '{}/Eval_TD500.txt'.format(eval_dir)\n\n analysize_result(source_dir, fid_path1, outpt_dir, \"TD500\")\n\n print('End.')\n\n\ndef data_transfer_ICDAR(contours):\n cnts = list()\n for cont in contours:\n rect = cv2.minAreaRect(cont)\n points = cv2.boxPoints(rect)\n points = np.int0(points)\n # print(points.shape)\n # points = np.reshape(points, (4, 2))\n cnts.append(points)\n return cnts\n\n\ndef data_transfer_TD500(contours, res_file, img=None):\n with open(res_file, 'w') as f:\n for cont in contours:\n rect = cv2.minAreaRect(cont)\n points = cv2.boxPoints(rect)\n box = np.int0(points)\n\n cv2.drawContours(img, [box], 0, (0, 255, 0), 3)\n # cv2.imshow(\"lllll\", img)\n # cv2.waitKey(0)\n\n cx, cy = rect[0]\n w_, h_ = rect[1]\n angle = rect[2]\n mid_ = 0\n if angle > 45:\n angle = 90 - angle\n mid_ = w_;\n w_ = h_;\n h_ = mid_\n elif angle < -45:\n angle = 90 + angle\n mid_ = w_;\n w_ = h_;\n h_ = mid_\n angle = angle / 180 * 3.141592653589\n\n x_min = int(cx - w_ / 2)\n x_max = int(cx + w_ / 2)\n y_min = int(cy - h_ / 2)\n y_max = int(cy + h_ / 2)\n f.write('{},{},{},{},{}\\r\\n'.format(x_min, y_min, x_max, y_max, angle))\n\n return img\n\n\ndef data_transfer_MLT2017(contours, res_file):\n with open(res_file, 'w') as f:\n for cont in contours:\n rect = cv2.minAreaRect(cont)\n points = cv2.boxPoints(rect)\n points = np.int0(points)\n p = np.reshape(points, -1)\n f.write('{},{},{},{},{},{},{},{},{}\\r\\n'\n .format(p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], 1))\n\n\n\n"
] | [
[
"numpy.int0",
"numpy.reshape"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
befelix/GPyOpt | [
"333023becdffbc35591a8a4a2d0c770659c6cbb7"
] | [
"GPyOpt/util/general.py"
] | [
"# Copyright (c) 2016, the GPyOpt Authors\n# Licensed under the BSD 3-clause license (see LICENSE.txt)\n\nimport numpy as np\nfrom scipy.special import erfc\nimport time\nfrom ..core.errors import InvalidConfigError\n\ndef compute_integrated_acquisition(acquisition,x):\n '''\n Used to compute the acquisition function when samples of the hyper-parameters have been generated (used in GP_MCMC model).\n\n :param acquisition: acquisition function with GpyOpt model type GP_MCMC.\n :param x: location where the acquisition is evaluated.\n '''\n\n acqu_x = 0\n\n for i in range(acquisition.model.num_hmc_samples):\n acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i,:]\n acqu_x += acquisition.acquisition_function(x)\n\n acqu_x = acqu_x/acquisition.model.num_hmc_samples\n return acqu_x\n\ndef compute_integrated_acquisition_withGradients(acquisition,x):\n '''\n Used to compute the acquisition function with gradients when samples of the hyper-parameters have been generated (used in GP_MCMC model).\n\n :param acquisition: acquisition function with GpyOpt model type GP_MCMC.\n :param x: location where the acquisition is evaluated.\n '''\n\n acqu_x = 0\n d_acqu_x = 0\n\n for i in range(acquisition.model.num_hmc_samples):\n acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i,:]\n acqu_x_sample, d_acqu_x_sample = acquisition.acquisition_function_withGradients(x)\n acqu_x += acqu_x_sample\n d_acqu_x += d_acqu_x_sample\n\n acqu_x = acqu_x/acquisition.model.num_hmc_samples\n d_acqu_x = d_acqu_x/acquisition.model.num_hmc_samples\n\n return acqu_x, d_acqu_x\n\n\ndef best_guess(f,X):\n '''\n Gets the best current guess from a vector.\n :param f: function to evaluate.\n :param X: locations.\n '''\n n = X.shape[0]\n xbest = np.zeros(n)\n for i in range(n):\n ff = f(X[0:(i+1)])\n xbest[i] = ff[np.argmin(ff)]\n return xbest\n\n\ndef samples_multidimensional_uniform(bounds,num_data):\n '''\n Generates a multidimensional grid uniformly distributed.\n :param bounds: tuple defining the box constraints.\n :num_data: number of data points to generate.\n\n '''\n dim = len(bounds)\n Z_rand = np.zeros(shape=(num_data,dim))\n for k in range(0,dim): Z_rand[:,k] = np.random.uniform(low=bounds[k][0],high=bounds[k][1],size=num_data)\n return Z_rand\n\n\ndef reshape(x,input_dim):\n '''\n Reshapes x into a matrix with input_dim columns\n\n '''\n x = np.array(x)\n if x.size ==input_dim:\n x = x.reshape((1,input_dim))\n return x\n\ndef get_moments(model,x):\n '''\n Moments (mean and sdev.) of a GP model at x\n\n '''\n input_dim = model.X.shape[1]\n x = reshape(x,input_dim)\n fmin = min(model.predict(model.X)[0])\n m, v = model.predict(x)\n s = np.sqrt(np.clip(v, 0, np.inf))\n return (m,s, fmin)\n\ndef get_d_moments(model,x):\n '''\n Gradients with respect to x of the moments (mean and sdev.) of the GP\n :param model: GPy model.\n :param x: location where the gradients are evaluated.\n '''\n input_dim = model.input_dim\n x = reshape(x,input_dim)\n _, v = model.predict(x)\n dmdx, dvdx = model.predictive_gradients(x)\n dmdx = dmdx[:,:,0]\n dsdx = dvdx / (2*np.sqrt(v))\n return (dmdx, dsdx)\n\n\ndef get_quantiles(acquisition_par, fmin, m, s):\n '''\n Quantiles of the Gaussian distribution useful to determine the acquisition function values\n :param acquisition_par: parameter of the acquisition function\n :param fmin: current minimum.\n :param m: vector of means.\n :param s: vector of standard deviations.\n '''\n if isinstance(s, np.ndarray):\n s[s<1e-10] = 1e-10\n elif s< 1e-10:\n s = 1e-10\n u = (fmin-m-acquisition_par)/s\n phi = np.exp(-0.5 * u**2) / np.sqrt(2*np.pi)\n Phi = 0.5 * erfc(-u / np.sqrt(2))\n return (phi, Phi, u)\n\n\ndef best_value(Y,sign=1):\n '''\n Returns a vector whose components i are the minimum (default) or maximum of Y[:i]\n '''\n n = Y.shape[0]\n Y_best = np.ones(n)\n for i in range(n):\n if sign == 1:\n Y_best[i]=Y[:(i+1)].min()\n else:\n Y_best[i]=Y[:(i+1)].max()\n return Y_best\n\ndef spawn(f):\n '''\n Function for parallel evaluation of the acquisition function\n '''\n def fun(pipe,x):\n pipe.send(f(x))\n pipe.close()\n return fun\n\n\ndef evaluate_function(f,X):\n '''\n Returns the evaluation of a function *f* and the time per evaluation\n '''\n num_data, dim_data = X.shape\n Y_eval = np.zeros((num_data, dim_data))\n Y_time = np.zeros((num_data, 1))\n for i in range(num_data):\n time_zero = time.time()\n Y_eval[i,:] = f(X[i,:])\n Y_time[i,:] = time.time() - time_zero\n return Y_eval, Y_time\n\n\ndef values_to_array(input_values):\n '''\n Transforms a values of int, float and tuples to a column vector numpy array\n '''\n if type(input_values)==tuple:\n values = np.array(input_values).reshape(-1,1)\n elif type(input_values) == np.ndarray:\n values = np.atleast_2d(input_values)\n elif type(input_values)==int or type(input_values)==float or type(np.int64):\n values = np.atleast_2d(np.array(input_values))\n else:\n print('Type to transform not recognized')\n return values\n\n\ndef merge_values(values1,values2):\n '''\n Merges two numpy arrays by calculating all possible combinations of rows\n '''\n array1 = values_to_array(values1)\n array2 = values_to_array(values2)\n\n if array1.size == 0:\n return array2\n if array2.size == 0:\n return array1\n\n merged_array = []\n for row_array1 in array1:\n for row_array2 in array2:\n merged_row = np.hstack((row_array1,row_array2))\n merged_array.append(merged_row)\n return np.atleast_2d(merged_array)\n\n\ndef normalize(Y, normalization_type='stats'):\n \"\"\"Normalize the vector Y using statistics or its range.\n\n :param Y: Row or column vector that you want to normalize.\n :param normalization_type: String specifying the kind of normalization\n to use. Options are 'stats' to use mean and standard deviation,\n or 'maxmin' to use the range of function values.\n :return Y_normalized: The normalized vector.\n \"\"\"\n Y = np.asarray(Y, dtype=float)\n\n if np.max(Y.shape) != Y.size:\n raise NotImplementedError('Only 1-dimensional arrays are supported.')\n\n # Only normalize with non null sdev (divide by zero). For only one\n # data point both std and ptp return 0.\n if normalization_type == 'stats':\n Y_norm = Y - Y.mean()\n std = Y.std()\n if std > 0:\n Y_norm /= std\n elif normalization_type == 'maxmin':\n Y_norm = Y - Y.min()\n y_range = np.ptp(Y)\n if y_range > 0:\n Y_norm /= y_range\n else:\n raise ValueError('Unknown normalization type: {}'.format(normalization_type))\n\n return Y_norm\n"
] | [
[
"numpy.hstack",
"numpy.sqrt",
"numpy.clip",
"numpy.asarray",
"numpy.ptp",
"numpy.ones",
"numpy.atleast_2d",
"numpy.max",
"numpy.argmin",
"numpy.exp",
"numpy.random.uniform",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
WiIIiamTang/SummaRuNNer | [
"ea0c38c960f85a405848dd43355776e4b99d4d12"
] | [
"utils/Dataset.py"
] | [
"import csv\r\nimport torch\r\nimport torch.utils.data as data\r\nfrom torch.autograd import Variable\r\nfrom .Vocab import Vocab\r\nimport numpy as np\r\n\r\nclass Dataset(data.Dataset):\r\n def __init__(self, examples):\r\n super(Dataset,self).__init__()\r\n # data: {'sents':xxxx,'labels':'xxxx', 'summaries':[1,0]}\r\n self.examples = examples \r\n self.training = False\r\n def train(self):\r\n self.training = True\r\n return self\r\n def test(self):\r\n self.training = False\r\n return self\r\n def shuffle(self,words):\r\n np.random.shuffle(words)\r\n return ' '.join(words)\r\n def dropout(self,words,p=0.3):\r\n l = len(words)\r\n drop_index = np.random.choice(l,int(l*p))\r\n keep_words = [words[i] for i in range(l) if i not in drop_index]\r\n return ' '.join(keep_words)\r\n def __getitem__(self, idx):\r\n ex = self.examples[idx]\r\n return ex\r\n #words = ex['sents'].split()\r\n #guess = np.random.random()\r\n\r\n #if self.training:\r\n # if guess > 0.5:\r\n # sents = self.dropout(words,p=0.3)\r\n # else:\r\n # sents = self.shuffle(words)\r\n #else:\r\n # sents = ex['sents']\r\n #return {'id':ex['id'],'sents':sents,'labels':ex['labels']}\r\n \r\n def __len__(self):\r\n return len(self.examples)\r\n"
] | [
[
"numpy.random.shuffle"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
srijankr/hawkeye | [
"674c729b1aa2825b84c7703063e1addbb9950c7a"
] | [
"src/code/birdwatch-metric-process-tweets.py"
] | [
"'''This script is used to collect the results for the Birdwatch system.'''\n\nimport pandas as pd\nfrom tqdm import tqdm\nimport random\nimport math\nimport pickle\n\nnotes = pd.read_csv(\"..//data//notes-00000-13-04-21.tsv\", sep='\\t')\nratings = pd.read_csv(\"..//data//ratings-00000-13-04-21.tsv\", sep='\\t')\n\n'''This function is used to get the top-ranked notes for the tweet that satisfy the criteria'''\ndef getCurrentlyRatedHelpfulNotesForTweet(ratingsWithNotesForTweet,maxCurrentlyRatedHelpfulNotes = None,minRatingsNeeded = None,minHelpfulnessRatioNeeded = None):\n \n scoredNotes = ratingsWithNotesForTweet.groupby('noteId').sum()\n scoredNotes['helpfulnessRatio'] = scoredNotes['helpful']/scoredNotes['numRatings']\n filteredNotes = scoredNotes[(scoredNotes['numRatings'] >= minRatingsNeeded) & (scoredNotes['helpfulnessRatio'] >= minHelpfulnessRatioNeeded)]\n return filteredNotes.sort_values(by='helpfulnessRatio', ascending=False)[:maxCurrentlyRatedHelpfulNotes]\n\n'''This function is used to find the minimum number of fake accounts needed \nfor the note to become the top ranked note and meet all the criteria.'''\n\ndef findNumberOfAccountsNeeded(ratingsWithNotesForTweet,candidateNotes,currentlyRatedHelpfulNotesIds,insertion=None,replacement=None):\n \n scoredNotes = ratingsWithNotesForTweet.groupby('noteId').sum()\n scoredNotes['helpfulnessRatio'] = scoredNotes['helpful']/scoredNotes['numRatings']\n\n #For a random note, do a run through of all the possible number of accounts one can use \n #to rate this note helpful and bring it to \"Currently Rated Helpful\" \n #i.e. top-ranked note in Birdwatrch system(if it is currently not) \n \n randomNoteId = random.choice(list(candidateNotes))\n numberOfAccounts = 1\n while(True):\n\n scoredNotesDummy = scoredNotes.copy(deep=True)\n\n '''We iteratively add fake accounts, which give fake 'helpful' \n ratings to the note and 'not helpful' \n ratings to notes currently ranked at the top.''' \n if insertion:\n #add helpful ratings to random note id\n scoredNotesDummy.loc[randomNoteId, 'helpful'] += numberOfAccounts\n scoredNotesDummy.loc[randomNoteId, 'numRatings'] += numberOfAccounts\n\n if replacement:\n for top3NoteId in currentlyRatedHelpfulNotesIds:\n scoredNotesDummy.loc[top3NoteId, 'numRatings'] += numberOfAccounts\n scoredNotesDummy.loc[top3NoteId, 'notHelpful'] += numberOfAccounts\n\n scoredNotesDummy['helpfulnessRatio'] = scoredNotesDummy['helpful']/scoredNotesDummy['numRatings']\n filteredNotesAboveThreshold = scoredNotesDummy[(scoredNotesDummy['numRatings'] >= minRatingsNeeded) & (scoredNotesDummy['helpfulnessRatio'] >= minHelpfulnessRatioNeeded)]\n currentlyRatedHelpfulNotesIdsNew = set(filteredNotesAboveThreshold.sort_values(by='helpfulnessRatio', ascending=False)[:maxCurrentlyRatedHelpfulNotes].index)\n\n #Does our (current note) occur in the top ranked notes?\n if randomNoteId in currentlyRatedHelpfulNotesIdsNew:\n return numberOfAccounts\n\n numberOfAccounts += 1 \n #allow a maximum of 10 fake accounts to be used by the attacker (for computational reasons) \n if numberOfAccounts > 10:\n return 10\n\n'''The variables can be changed according to requirements of your experiments'''\n\nmaxCurrentlyRatedHelpfulNotes = 1 #NUMBER OF TOP-RANKED NOTES\nminRatingsNeeded = 5 \nminHelpfulnessRatioNeeded = 0.84\n\nnumberOfAccountsTakenToMakeRandomNoteCurrrentlyRatedHelpful = {}\ninsertion_bw,replacement_bw = {},{}\ndefault_bw = set()\nrem = set()\n\ntotalTweets = list(set(notes['tweetId']))\nfor tweetId in tqdm(totalTweets):\n \n #Get all notes for this tweet\n notesForTweet = notes[notes['tweetId']==tweetId]\n allNotesSet = set(notesForTweet['noteId'])\n\n #Currently Helpful Notes\n ratingsWithNotesForTweet = notesForTweet.set_index('noteId').join(ratings.set_index('noteId'), lsuffix=\"_note\", rsuffix=\"_rating\", how='left')\n ratingsWithNotesForTweet['numRatings'] = ratingsWithNotesForTweet.apply(lambda x: 0 if math.isnan(x['helpful']) else 1, axis=1)\n currentlyRatedHelpfulNotes = getCurrentlyRatedHelpfulNotesForTweet(ratingsWithNotesForTweet,maxCurrentlyRatedHelpfulNotes = maxCurrentlyRatedHelpfulNotes,minRatingsNeeded = minRatingsNeeded,minHelpfulnessRatioNeeded = minHelpfulnessRatioNeeded)\n currentlyRatedHelpfulNotesIds = set(currentlyRatedHelpfulNotes.index)\n \n #Candidate Notes\n candidateNotes = allNotesSet - currentlyRatedHelpfulNotesIds\n\n limit = maxCurrentlyRatedHelpfulNotes\n #CASE 1:\n if len(candidateNotes)==0:\n default_bw.add(tweetId)\n continue\n \n #CASE 2:\n elif len(candidateNotes)>0 and len(currentlyRatedHelpfulNotesIds)<limit:\n insertion_bw[tweetId] = findNumberOfAccountsNeeded(ratingsWithNotesForTweet,candidateNotes,currentlyRatedHelpfulNotesIds,insertion=True,replacement=False) \n \n #CASE 3: \n elif len(allNotesSet)>limit and len(currentlyRatedHelpfulNotesIds)==limit:\n replacement_bw[tweetId] = findNumberOfAccountsNeeded(ratingsWithNotesForTweet,candidateNotes,currentlyRatedHelpfulNotesIds,insertion=True,replacement=True)\n \n else:\n rem.add(tweetId)\n\n'''The results are stored in the results folder. \nYou will need to create an empty results folder if it does not exist.'''\n\nwith open('results/bw-insertion.pickle', 'rb') as handle:\n pickle.dump(insertion_bw, handle, protocol=pickle.HIGHEST_PROTOCOL)\nwith open('results/bw-replacement.pickle', 'rb') as handle:\n pickle.dump(replacement_bw, handle, protocol=pickle.HIGHEST_PROTOCOL)\nwith open('results/bw-default.pickle', 'rb') as handle:\n pickle.dump(replacement_bw, handle, protocol=pickle.HIGHEST_PROTOCOL)"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
IL2HorusTeam/il2-heightmap-creator | [
"a7964106de125456e10dd91bd28b1ba4d109ed42"
] | [
"il2fb/maps/heightmaps/rendering.py"
] | [
"# -*- coding: utf-8 -*-\n\nimport argparse\nimport logging\n\nfrom array import array\nfrom pathlib import Path\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom matplotlib.colors import LinearSegmentedColormap\nfrom pylab import contour, contourf\n\n\nfrom il2fb.maps.heightmaps.constants import HEIGHT_PACK_FORMAT\nfrom il2fb.maps.heightmaps.constants import MAP_SCALE\nfrom il2fb.maps.heightmaps.logging import setup_logging\n\n\nLOG = logging.getLogger(__name__)\n\nISOHYPSE_COLOR = \"#303030\"\nISOHYPSE_WIDTH = 0.2\n\nCMAP_DATA = (\n (0.27058823529411763, 0.45882352941176469, 0.70588235294117652),\n (0.45490196078431372, 0.67843137254901964, 0.81960784313725488),\n (0.6705882352941176 , 0.85098039215686272, 0.9137254901960784 ),\n (0.8784313725490196 , 0.95294117647058818, 0.97254901960784312),\n (1.0 , 1.0 , 0.74901960784313726),\n (0.99607843137254903, 0.8784313725490196 , 0.56470588235294117),\n (0.99215686274509807, 0.68235294117647061, 0.38039215686274508),\n (0.95686274509803926, 0.42745098039215684, 0.2627450980392157 ),\n (0.84313725490196079, 0.18823529411764706, 0.15294117647058825),\n (0.6470588235294118 , 0.0 , 0.14901960784313725),\n)\nCMAP = LinearSegmentedColormap.from_list('il2fb-heights', CMAP_DATA, 256)\n\n\ndef load_args() -> argparse.Namespace:\n parser = argparse.ArgumentParser(\n description=(\n \"Render heightmap of a given location of \"\n \"«IL-2 Sturmovik: Forgotten Battles»\"\n ),\n )\n parser.add_argument(\n '--height',\n dest='height',\n type=int,\n required=True,\n help=f\"Map height in meters. Must be proportional to {MAP_SCALE}\",\n )\n parser.add_argument(\n '--width',\n dest='width',\n type=int,\n required=True,\n help=f\"Map width in meters. Must be proportional to {MAP_SCALE}\",\n )\n parser.add_argument(\n '-i', '--in',\n dest='input_file_path',\n type=lambda x: Path(x).resolve(),\n default=\"heightmap.raw\",\n help=\"Input file path. Default: 'heightmap.raw'\",\n )\n parser.add_argument(\n '-o', '--out',\n dest='output_file_path',\n type=lambda x: Path(x).resolve(),\n default=\"heightmap.png\",\n help=\"Output file path. Default: 'heightmap.png'\",\n )\n parser.add_argument(\n '--isostep',\n dest='isostep',\n type=int,\n default=200,\n help=\"Step in meters between isohypses. Default: 200\",\n )\n parser.add_argument(\n '-r', '--dpi',\n dest='dpi',\n type=int,\n default=48,\n help=\"Output resolution in DPI. Default: 48\",\n )\n args = parser.parse_args()\n\n if args.height % MAP_SCALE != 0:\n parser.error(f\"Map height must be proportional to {MAP_SCALE}\")\n\n if args.width % MAP_SCALE != 0:\n parser.error(f\"Map width must be proportional to {MAP_SCALE}\")\n\n return args\n\n\ndef render(\n src: array,\n height: int,\n width: int,\n isostep: int,\n dpi: int,\n output_file_path: Path,\n) -> None:\n\n height = height // MAP_SCALE\n width = width // MAP_SCALE\n\n image_size = (\n (width / dpi),\n (height / dpi),\n )\n\n data = np.array(src).reshape((height, width))\n isohypses = list(range(0, data.max(), isostep))\n\n plt.clf()\n plt.axis('off')\n plt.figure(dpi=dpi)\n\n fig = plt.figure(figsize=image_size, frameon=False)\n fig.add_axes([0, 0, 1, 1])\n\n contourf(data, 256, cmap=CMAP)\n contour(data, isohypses, colors=ISOHYPSE_COLOR, linewidths=ISOHYPSE_WIDTH)\n\n output_file_path.parent.parent.mkdir(parents=True, exist_ok=True)\n plt.savefig(str(output_file_path), bbox_inches=0, dpi=dpi)\n\n\ndef main() -> None:\n setup_logging()\n\n args = load_args()\n src = array(HEIGHT_PACK_FORMAT)\n\n with args.input_file_path.open('rb') as f:\n src.frombytes(f.read())\n\n render(\n src=src,\n height=args.height,\n width=args.width,\n isostep=args.isostep,\n dpi=args.dpi,\n output_file_path=args.output_file_path,\n )\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"matplotlib.pyplot.clf",
"matplotlib.pyplot.axis",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"numpy.array",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tacticsiege/TacticToolkit | [
"74f86180776c77507f096adb0aacf5d23840b341"
] | [
"ttk/sandbox/udemy/SimpleRNNClassifier.py"
] | [
"import theano\nimport theano.tensor as T\n\nimport numpy as np\n\nfrom ttk.sandbox.udemy import init_weight\n\nimport matplotlib.pyplot as plt\n\nfrom sklearn.utils import shuffle\n\nimport time\n\nclass SimpleRNNClassifier(object):\n def __init__(self, M):\n self.M = M\n \n def fit(self, X, Y, learning_rate=10e-1, mu=0.99, reg=1.0, activation=T.tanh, epochs=100, show_fig=False):\n D = X[0].shape[1] # X is of size N x T(n) x D\n K = len(set(Y.flatten()))\n N = len(Y)\n M = self.M\n self.f = activation\n\n print ('D, K, N, M:', D, K, N, M)\n\n # initial weights\n Wx = init_weight(D, M)\n Wh = init_weight(M, M)\n bh = np.zeros(M)\n h0 = np.zeros(M)\n Wo = init_weight(M, K)\n bo = np.zeros(K)\n\n # make them theano shared\n self.Wx = theano.shared(Wx)\n self.Wh = theano.shared(Wh)\n self.bh = theano.shared(bh)\n self.h0 = theano.shared(h0)\n self.Wo = theano.shared(Wo)\n self.bo = theano.shared(bo)\n self.params = [self.Wx, self.Wh, self.bh, self.h0, self.Wo, self.bo]\n \n # theano inputs/outputs\n thX = T.fmatrix('X')\n thY = T.ivector('Y')\n #thY = T.fmatrix('Y')\n \n def recurrence(x_t, h_t1):\n # returns h(t), y(t)\n h_t = self.f(x_t.dot(self.Wx) + h_t1.dot(self.Wh) + self.bh)\n y_t = T.nnet.softmax(h_t.dot(self.Wo) + self.bo)\n return h_t, y_t\n \n [h, y], _ = theano.scan(\n fn=recurrence,\n outputs_info=[self.h0, None],\n sequences=thX,\n n_steps=thX.shape[0]\n )\n \n #print ('Shape y:', y.shape)\n #print ('y[0, 0, 0]:', y[0,0,0])\n\n py_x = y[:, 0, :]\n prediction = T.argmax(py_x, axis=1)\n \n cost = -T.mean(T.log(py_x[T.arange(thY.shape[0]), thY]))\n grads = T.grad(cost, self.params)\n # easy way to get shared var in shape of p...\n dparams = [theano.shared(p.get_value()*0) for p in self.params]\n \n updates = [\n (p, p + mu*dp - learning_rate*g) for p, dp, g in zip(self.params, dparams, grads)\n ] + [\n (dp, mu*dp - learning_rate*g) for dp, g in zip(dparams, grads)\n ]\n \n self.predict_op = theano.function(\n inputs=[thX],\n outputs=prediction\n )\n self.train_op = theano.function(\n inputs=[thX, thY],\n outputs=[cost, prediction, y],\n updates=updates,\n )\n \n # training loop\n costs = []\n for i in range(epochs):\n start_time = time.time()\n print ('iteration:', i)\n X, Y = shuffle(X, Y)\n n_correct = 0\n cost = 0\n for j in range(N):\n #print ('X[j]:', X[j], 'Y[j]:', Y[j])\n \n c, p, rout = self.train_op(X[j], Y[j])\n #print ('c:', c)\n cost += c\n #print ('p[-1]:', p[-1], 'Y[j,-1]:', Y[j,-1])\n if p[-1] == Y[j, -1]:\n n_correct += 1\n print ('shape y:', rout.shape)\n print ('i:', i, 'cost:', cost, 'classification rate:', (float(n_correct) / N))\n costs.append(cost)\n end_time = time.time()\n duration = end_time - start_time\n print ('duration:', duration)\n \n if show_fig:\n plt.plot(costs)\n plt.show()\n \n\n\n"
] | [
[
"matplotlib.pyplot.plot",
"sklearn.utils.shuffle",
"matplotlib.pyplot.show",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hiroharu-kato/nmr | [
"b27780f7f574b70d338d2e947063504379ad1920"
] | [
"nmr/rasterization.py"
] | [
"import functools\nimport string\n\nimport cupy as cp\nimport torch\n\nfrom . import utils\n\n\ndef distribute(data, indices, foreground_maps, is_batch_data=False, is_batch_indices=False, default_value=-1):\n if not is_batch_data:\n data = data[indices]\n else:\n if not is_batch_indices:\n data = data[:, indices]\n else:\n if data.shape[0] == indices.shape[0] != 1:\n data = torch.stack([d[i] for d, i in zip(data, indices)])\n else:\n if data.shape[0] == 1:\n return data[0, indices]\n else:\n raise NotImplementedError\n data = mask(data, foreground_maps, default_value)\n return data\n\n\nclass Mask(torch.autograd.Function):\n @staticmethod\n def forward(ctx, data, masks, default_value):\n # PyTorch to CuPy\n device = data.device\n data_in = cp.asarray(data)\n masks = cp.asarray(masks)\n data_out = data_in.copy()\n dim = data_in.size / masks.size\n\n # distribute\n masks = cp.ascontiguousarray(masks)\n data_out = cp.ascontiguousarray(data_out)\n kernel = cp.ElementwiseKernel(\n 'raw S data_out, int64 mask',\n '',\n string.Template('''\n if (mask == 0) {\n ${dtype}* p = (${dtype}*)&data_out[i * ${dim}];\n for (int j = 0; j < ${dim}; j++) *p++ = ${default_value};\n }\n ''').substitute(\n dim=dim,\n dtype=utils.get_dtype_in_cuda(data_out.dtype),\n default_value=default_value,\n ),\n 'function',\n )\n kernel(data_out, masks)\n\n # CuPy to PyTorch\n data_out = torch.as_tensor(data_out, device=device)\n\n return data_out\n\n def backward(self, data, face):\n raise NotImplementedError\n\n\ndef downsample(data, foreground_maps):\n if foreground_maps is None:\n return (data[:, 0::2, 0::2] + data[:, 1::2, 0::2] + data[:, 0::2, 1::2] + data[:, 1::2, 1::2]) / 4.\n else:\n weights = foreground_maps\n while data.ndim != weights.ndim:\n weights = weights.unsqueeze(-1)\n d00 = data[:, 0::2, 0::2]\n d01 = data[:, 0::2, 1::2]\n d10 = data[:, 1::2, 0::2]\n d11 = data[:, 1::2, 1::2]\n w00 = weights[:, 0::2, 0::2]\n w01 = weights[:, 0::2, 1::2]\n w10 = weights[:, 1::2, 0::2]\n w11 = weights[:, 1::2, 1::2]\n data = d00 * w00 + d01 * w01 + d10 * w10 + d11 * w11\n weights = w00 + w01 + w10 + w11\n data = data / weights\n f00 = foreground_maps[:, 0::2, 0::2]\n f01 = foreground_maps[:, 0::2, 1::2]\n f10 = foreground_maps[:, 1::2, 0::2]\n f11 = foreground_maps[:, 1::2, 1::2]\n foreground_maps = functools.reduce(torch.max, (f00, f01, f10, f11))\n data = mask(data, foreground_maps)\n return data\n\n\ndef mask(data, masks, default_value=0):\n return Mask.apply(data, masks, default_value)\n\n\ndef compute_face_index_maps(vertices, faces, image_h, image_w, near, far):\n # vertices:\n # - [batch_size, num_vertices, 3] if vertices are shared in batch\n # - [1, num_vertices, 3] if vertices are not shared in batch\n # faces:\n # - [num_faces, 3]\n\n # PyTorch to CuPy\n device = vertices.device\n vertices = cp.asarray(vertices)\n faces = cp.asarray(faces)\n num_faces = faces.shape[0]\n\n # face indices to face coordinates\n faces = vertices[:, faces]\n\n #\n faces = cp.ascontiguousarray(faces)\n batch_size = vertices.shape[0]\n loop = cp.arange(batch_size * image_h * image_w).astype('int64')\n kernel = cp.ElementwiseKernel(\n 'int64 _, raw float32 faces',\n 'int64 face_index, int64 is_foreground',\n string.Template('''\n const int ih = ${image_h};\n const int iw = ${image_w};\n const int nf = ${num_faces};\n int bn = i / (ih * iw); // batch number\n const int pn = i % (ih * iw); // pixel number\n const float yp = ih - (pn / iw + 0.5);\n const float xp = pn % iw + 0.5;\n \n float* face = (float*)&faces[bn * nf * 9]; // pointer to current face\n float depth_min = ${far};\n int face_index_min = -1;\n is_foreground = 0;\n for (int fn = 0; fn < nf; fn++) {\n /* go to next face */\n const float x0 = *face++;\n const float y0 = *face++;\n const float z0 = *face++;\n const float x1 = *face++;\n const float y1 = *face++;\n const float z1 = *face++;\n const float x2 = *face++;\n const float y2 = *face++;\n const float z2 = *face++;\n \n /* continue if (xp, yp) is outside of the rectangle */\n if (xp < x0 && xp < x1 && xp < x2) continue;\n if (x0 < xp && x1 < xp && x2 < xp) continue;\n if (yp < y0 && yp < y1 && yp < y2) continue;\n if (y0 < yp && y1 < yp && y2 < yp) continue;\n \n /* check in or out. w0, w1, w2 should have the same sign. */\n float w0 = (yp - y1) * (x2 - x1) - (y2 - y1) * (xp - x1); \n float w1 = (yp - y2) * (x0 - x2) - (y0 - y2) * (xp - x2);\n float w2 = (yp - y0) * (x1 - x0) - (y1 - y0) * (xp - x0);\n if (w0 * w1 <= 0) continue;\n if (w1 * w2 <= 0) continue;\n\n /* normalize w */\n const float w_sum = w0 + w1 + w2;\n w0 /= w_sum;\n w1 /= w_sum;\n w2 /= w_sum;\n \n /* compute 1 / zp = sum(w / z) */\n const float zp = 1. / (w0 / z0 + w1 / z1 + w2 / z2);\n if (zp <= ${near} || ${far} <= zp) continue;\n \n /* check z-buffer */\n if (zp <= depth_min - ${depth_min_delta}) {\n depth_min = zp;\n face_index_min = fn;\n is_foreground = 1;\n }\n }\n /* set to global memory */\n face_index = face_index_min;\n ''').substitute(\n num_faces=num_faces,\n image_h=image_h,\n image_w=image_w,\n near=near,\n far=far,\n depth_min_delta=1e-4,\n ),\n 'function',\n )\n face_index_maps, foreground_maps = kernel(loop, faces)\n face_index_maps = face_index_maps.reshape((-1, image_h, image_w))\n foreground_maps = foreground_maps.reshape((-1, image_h, image_w))\n\n # CuPy to PyTorch\n face_index_maps = torch.as_tensor(face_index_maps, device=device)\n foreground_maps = torch.as_tensor(foreground_maps, device=device)\n\n return face_index_maps, foreground_maps\n\n\ndef compute_weight_map(vertex_maps, foreground_maps):\n x0 = vertex_maps[:, :, :, 0, 0]\n x1 = vertex_maps[:, :, :, 1, 0]\n x2 = vertex_maps[:, :, :, 2, 0]\n y0 = vertex_maps[:, :, :, 0, 1]\n y1 = vertex_maps[:, :, :, 1, 1]\n y2 = vertex_maps[:, :, :, 2, 1]\n\n image_h, image_w = vertex_maps.shape[-4:-2]\n yp = image_h - (torch.arange(image_h, dtype=torch.float32, device=vertex_maps.device) + 0.5)\n xp = torch.arange(image_w, dtype=torch.float32, device=vertex_maps.device) + 0.5\n yp, xp = torch.broadcast_tensors(yp[:, None], xp[None, :])\n yp = yp.unsqueeze(0)\n xp = xp.unsqueeze(0)\n\n w0 = (yp - y1) * (x2 - x1) - (y2 - y1) * (xp - x1)\n w1 = (yp - y2) * (x0 - x2) - (y0 - y2) * (xp - x2)\n w2 = (yp - y0) * (x1 - x0) - (y1 - y0) * (xp - x0)\n w = torch.stack((w0, w1, w2), dim=-1)\n w = w / w.sum(-1, keepdim=True)\n w = mask(w, foreground_maps)\n return w\n\n\ndef compute_depth_maps(vertex_maps, weight_maps, foreground_maps):\n z_maps = vertex_maps[:, :, :, :, 2]\n z_maps = 1. / (weight_maps / z_maps).sum(3)\n z_maps = mask(z_maps, foreground_maps)\n return z_maps\n\n\ndef compute_normal_maps(vertex_n_w_maps, vertex_n_c_maps, vertex_maps, weight_maps, foreground_maps):\n normal_w_maps = interpolate(vertex_n_w_maps, vertex_maps, weight_maps)\n normal_c_maps = interpolate(vertex_n_c_maps, vertex_maps, weight_maps)\n return compute_normal_maps_no_weight(normal_w_maps, normal_c_maps, foreground_maps)\n\n\ndef compute_normal_maps_no_weight(normal_w_maps, normal_c_maps, foreground_maps):\n normal_sign = (normal_c_maps[:, :, :, 2] <= 0)\n normal_sign = normal_sign.unsqueeze(-1)\n normal_w_maps = normal_w_maps * normal_sign - normal_w_maps * torch.logical_not(normal_sign)\n normal_c_maps = normal_c_maps * normal_sign - normal_c_maps * torch.logical_not(normal_sign)\n normal_w_maps = normal_w_maps * torch.rsqrt(torch.sum(normal_w_maps ** 2, dim=3, keepdim=True))\n normal_c_maps = normal_c_maps * torch.rsqrt(torch.sum(normal_c_maps ** 2, dim=3, keepdim=True))\n normal_w_maps = mask(normal_w_maps, foreground_maps)\n normal_c_maps = mask(normal_c_maps, foreground_maps)\n return normal_w_maps, normal_c_maps\n\n\ndef compute_color_maps(vertex_t_maps, textures, texture_params_maps, foreground_maps):\n texture_height, texture_width = textures.shape[-3:-1]\n y_max = texture_params_maps.select(-1, 0)\n x_max = texture_params_maps.select(-1, 1)\n y_offset = texture_params_maps.select(-1, 2)\n ty_f = (1 - vertex_t_maps.select(-1, 1)) * y_max + y_offset\n tx_f = vertex_t_maps.select(-1, 0) * x_max\n ty_i_f = torch.floor(ty_f).type(torch.int64).clamp(0, texture_height - 1)\n ty_i_c = torch.ceil(ty_f).type(torch.int64).clamp(0, texture_height - 1)\n tx_i_f = torch.floor(tx_f).type(torch.int64).clamp(0, texture_width - 1)\n tx_i_c = torch.ceil(tx_f).type(torch.int64).clamp(0, texture_width - 1)\n t_i_ff = ty_i_f * texture_width + tx_i_f\n t_i_fc = ty_i_f * texture_width + tx_i_c\n t_i_cf = ty_i_c * texture_width + tx_i_f\n t_i_cc = ty_i_c * texture_width + tx_i_c\n w_ff = (1 - (ty_f - ty_i_f)) * (1 - (tx_f - tx_i_f))\n w_fc = (1 - (ty_f - ty_i_f)) * (1 - (tx_i_c - tx_f))\n w_cf = (1 - (ty_i_c - ty_f)) * (1 - (tx_f - tx_i_f))\n w_cc = (1 - (ty_i_c - ty_f)) * (1 - (tx_i_c - tx_f))\n w_sum = w_ff + w_fc + w_cf + w_cc\n w_ff = w_ff / w_sum\n w_fc = w_fc / w_sum\n w_cf = w_cf / w_sum\n w_cc = w_cc / w_sum\n t2 = textures.reshape((textures.shape[0], -1, 3))\n t_ff = distribute(t2, t_i_ff, foreground_maps, True, True, default_value=0)\n t_fc = distribute(t2, t_i_fc, foreground_maps, True, True, default_value=0)\n t_cf = distribute(t2, t_i_cf, foreground_maps, True, True, default_value=0)\n t_cc = distribute(t2, t_i_cc, foreground_maps, True, True, default_value=0)\n color_maps = (\n t_ff * w_ff.unsqueeze(-1) + t_fc * w_fc.unsqueeze(-1) +\n t_cf * w_cf.unsqueeze(-1) + t_cc * w_cc.unsqueeze(-1))\n color_maps = mask(color_maps, foreground_maps)\n return color_maps\n\n\ndef reflectance_maps(normal_w_maps, normal_c_maps):\n return torch.relu(normal_w_maps.select(-1, 1)) * 0.3 + 0.7\n # return torch.relu(-normal_c_maps[:, :, 2]) * 0.5 + 0.5\n\n\ndef compute_normals(vertices, faces):\n vs = vertices[:, faces]\n v0 = vs[:, :, 0]\n v1 = vs[:, :, 1]\n v2 = vs[:, :, 2]\n e01 = v1 - v0\n e02 = v2 - v0\n normals = torch.cross(e01, e02)\n normals = normals * torch.rsqrt((normals ** 2).sum(axis=2, keepdims=True))\n return normals\n\n\ndef interpolate(data, vertex_maps, weight_maps):\n a = ((data / vertex_maps[:, :, :, :, 2:]) * weight_maps[:, :, :, :, None]).sum(3)\n b = ((1 / vertex_maps[:, :, :, :, 2:]) * weight_maps[:, :, :, :, None]).sum(3)\n return a / b\n"
] | [
[
"torch.ceil",
"torch.floor",
"torch.sum",
"torch.stack",
"torch.as_tensor",
"torch.arange",
"torch.broadcast_tensors",
"torch.logical_not",
"torch.cross"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
emirhansilsupur/K132_data_science_bootcamp_homework | [
"7a39c76672cbe7dd6bf45f5e06b03305f94b9f53"
] | [
"week2/web_scraping_kdnuggets/KDnuggets_scraping.py"
] | [
"from requests_html import HTMLSession\nimport pandas as pd\n\n\n# Creating a session object that will allow us to send requests to the website.\nsession = HTMLSession()\n# Extracting the title, description and date of the news article.\ndef scraping(topic):\n \"\"\"\n The function is scraping the website and extracting the data.\n :param topic: The topic of the blog\n :return: A list of dictionaries. Each dictionary contains the title, description, date, topic and\n link of the news article.\n \"\"\"\n\n # Sending a GET request to the website and storing the response in `r`.\n r = session.get(f\"https://www.kdnuggets.com/tag/{topic}\")\n\n # Rendering the page and scrolling down the page to load all the data.\n r.html.render(sleep=1, scrolldown=1)\n\n # Finding all the `ul` tags in the page.\n blogs = r.html.find(\"ul\")\n\n # Creating an empty list and storing the data in it.\n data = list()\n\n for i in blogs:\n for j in r.html.find(\"li\"):\n try:\n blog_title = j.find(\"a\", first=True)\n blog_desc = j.find(\"div\", first=True)\n blog_date = j.find(\"font\", first=True)\n blog_tag = j.find(\"p\", first=True)\n blog_link = blog_title.absolute_links\n\n blog_data = {\n \"title\": blog_title.text.strip(),\n \"description\": blog_desc.text.strip(),\n \"topic\": blog_tag.text.strip().split(\",\")[0],\n \"date\": blog_date.text.lstrip(\"-\").rstrip(\".\"),\n \"link\": list(blog_link)[0],\n }\n data.append(blog_data)\n except:\n pass\n return data\n\n\ntopics = [\n \"artificial-intelligence\",\n \"career-advice\",\n \"computer-vision\",\n \"data-science\",\n \"machine-learning\",\n \"natural-language-processing\",\n]\ndataset = list()\n# This is a for loop that is iterating through the list of topics and calling the function\n# topic_name() for each topic.\nfor i in topics:\n dataset += scraping(i)\n# Creating a dataframe from the list of dictionaries and saving it as a csv file.\ndf = pd.DataFrame(dataset)\ndf.drop_duplicates(subset=\"title\", inplace=True)\ndf.to_csv(\"KDnuggets_data.csv\", index=False)\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
pspeter/pythovolve | [
"7d5b1f7c7ab44438979ea5bd93dc872eb2b44960"
] | [
"pythovolve/plotting.py"
] | [
"from multiprocessing import Queue\nfrom typing import Sequence\n\nfrom matplotlib.axes import Axes\nfrom matplotlib.figure import Figure\n\n\nclass ProgressPlot:\n def __init__(self, max_generations: int, data_queue: Queue, fig: Figure = None,\n axes: Sequence[Axes] = None):\n\n # these imports can't be at the top of the file as they should only be\n # imported by the process running the plots. If both processes import\n # matplotlib, the child process will crash on MacOS.\n # see https://stackoverflow.com/questions/9879371\n import matplotlib.pyplot as plt\n import seaborn as sns\n from matplotlib.animation import FuncAnimation\n\n self.max_generations = max_generations\n # set to True if there's new data to plot, set to false after it has been plotted\n self.stale = False\n\n if fig is not None and axes is not None:\n self.fig = fig\n self.axes = axes\n else:\n self.fig, axes = plt.subplots()\n self.axes = [axes]\n\n self.data_queue = data_queue\n self.current_best_scores = []\n self.best_scores = []\n self.generation = 0\n\n self.total_line, = self.axes[0].plot([], [], 'r-', animated=True, label=\"Total best\")\n self.current_line, = self.axes[0].plot([], [], 'g.', animated=True, label=\"Generation best\")\n\n # setup the animation\n self.animation = FuncAnimation(self.fig, self._update, init_func=self._init,\n blit=True, interval=1000 // 20)\n\n self.legend = self.axes[0].legend()\n\n sns.set()\n plt.show()\n\n def _get_newest_data(self):\n while not self.data_queue.empty():\n current_best_score, best = self.data_queue.get()\n self.current_best_scores.append((self.generation, current_best_score))\n self.best_scores.append((self.generation, best.score))\n self.best = best\n self.generation += 1\n self.stale = True\n\n def _init(self):\n self._get_newest_data()\n\n if len(self.best_scores) > 0:\n x_max = min(self.max_generations - 1, int(len(self.best_scores) * 2 + 10))\n max_score = max(score[1] for score in self.best_scores)\n min_score = self.best.score\n y_min = min_score - (max_score - min_score) * 0.3\n y_max = max_score + (max_score - min_score) * 0.1\n y_max = y_max if not y_min == y_max else y_min + 1\n\n else:\n x_max = 100\n y_min = 0\n y_max = 1e-5\n\n ax = self.axes[0]\n ax.set_title(\"Progress\")\n ax.set_xlim(0, x_max)\n ax.set_ylim(y_min, y_max)\n ax.set_xlabel(\"Generation\")\n ax.set_ylabel(\"Score\")\n\n self.legend = ax.legend()\n\n return self.current_line, self.total_line, self.legend\n\n def _update(self, _):\n self._get_newest_data()\n\n if len(self.best_scores) > 0 and self.stale:\n ax = self.axes[0]\n\n # update range of x-axis\n _, x_max = ax.get_xlim()\n if len(self.best_scores) + 1 > x_max * 0.95 and not x_max == self.max_generations - 1:\n ax.set_xlim(0, min(self.max_generations - 1, int(x_max * 2 + 10)))\n ax.figure.canvas.draw()\n\n # update range of y-axis\n y_min, y_max = ax.get_ylim()\n max_score = max(score[1] for score in self.best_scores)\n min_score = self.best.score\n if max_score > y_max or min_score < y_min:\n new_min = min_score - (max_score - min_score) * 0.3\n new_max = max_score + (max_score - min_score) * 0.1\n new_max = new_max if not new_min == new_max else new_min + 1\n ax.set_ylim(new_min, new_max)\n ax.figure.canvas.draw()\n\n self.current_line.set_data(zip(*sorted(self.current_best_scores)))\n self.total_line.set_data(zip(*sorted(self.best_scores)))\n\n return self.current_line, self.total_line, self.legend\n\n\nclass TSPPlot(ProgressPlot):\n def __init__(self, max_generations: int, problem, data_queue: Queue):\n\n import matplotlib.pyplot as plt\n\n self.problem = problem\n self.path_lines = []\n self.city_points = []\n\n fig, axes = plt.subplots(1, 2, figsize=(14, 6))\n super().__init__(max_generations, data_queue, fig, axes)\n\n def _init(self):\n super()._init()\n ax = self.axes[1]\n area = self.problem.defined_area\n\n ax.set_title(\"Best Solution\")\n ax.set_xlim(area.min.x, area.max.x)\n ax.set_ylim(area.min.y, area.max.y)\n\n for point in self.city_points:\n del point\n\n x_cities = [city.x for city in self.problem.cities]\n y_cities = [city.y for city in self.problem.cities]\n self.city_points = ax.plot(x_cities, y_cities, ls=\"\", marker=\"*\", label=\"Cities\")\n\n self._plot_paths()\n\n return (self.current_line, self.total_line, self.legend, *self.path_lines)\n\n def _update(self, frame):\n super()._update(frame)\n self._plot_paths()\n return (self.current_line, self.total_line, self.legend, *self.path_lines)\n\n def _plot_paths(self):\n if not self.best_scores or not self.stale:\n return\n\n ax = self.axes[1]\n\n while self.path_lines:\n # to completely get rid of the lines, this is necessary\n # see https://stackoverflow.com/questions/4981815\n self.path_lines.pop(0).remove()\n\n path = [self.problem.cities[idx] for idx in self.best.phenotype]\n self.path_lines = ax.plot([path[-1].x, path[0].x], [path[-1].y, path[0].y], \"k-\")\n\n for start, dest in zip(path, path[1:]):\n self.path_lines += ax.plot([dest.x, start.x], [dest.y, start.y], \"k-\")\n"
] | [
[
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"matplotlib.animation.FuncAnimation"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MOJTABAFA/toxoplasma-isbi2018 | [
"cd5a86e49fd560d27963dc2898104280996ad396"
] | [
"ar/ar.py"
] | [
"import numpy as np\nimport numpy.linalg as linalg\n\ndef state_space(raw_data, q):\n \"\"\"\n Performs the state-space projection of the original data using principal\n component analysis (eigen-decomposition).\n\n Parameters\n ----------\n raw_data : array, shape (N, M)\n Row-vector data points with M features.\n q : integer\n Number of principal components to keep.\n\n Returns\n -------\n X : array, shape (q, M)\n State-space projection of the original data.\n C : array, shape (N, q) the PCA matrix (useful for returning to the data space)\n Projection matrix.\n \"\"\"\n if q <= 0:\n raise Exception('Parameter \"q\" restricted to positive integer values.')\n\n # Perform the SVD on the data.\n # For full documentation on this aspect, see page 15 of Midori Hyndman's\n # master's thesis on Autoregressive modeling.\n #\n # Y = U * S * Vt,\n #\n # Y = C * X,\n #\n # So:\n # C = first q columns of U\n # S_hat = first q singular values of S\n # Vt_hat = first q rows of Vt\n #\n # X = S_hat * Vt_hat\n #\n # For the full documentation of SVD, see:\n # http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.svd.html#numpy.linalg.svd\n U, S, Vt = linalg.svd(raw_data, full_matrices = False)\n C = U[:, :q]\n Sh = np.diag(S)[:q, :q]\n Vth = Vt[:q, :]\n X = np.dot(Sh, Vth)\n return [X, C]\n\ndef appearance_space(state_data, C):\n \"\"\"\n Converts data projected into the state space back into the appearance space\n according to the projection matrix C. Effectively, this undoes the operations\n of \"state_space()\":\n\n X, C = state_space(original_data)\n original_data = appearance_space(X, C)\n\n Parameters\n ----------\n state_data : array, shape (q, M)\n The projected data, or the output \"X\" from \"state_space\".\n C : array, shape (N, q)\n The projection matrix, or the output \"C\" from \"state_space\".\n\n Returns\n -------\n X : array, shape (N, M)\n The original form of the data, or the input to \"state_space\".\n \"\"\"\n return np.dot(C, state_data)\n\ndef train(X, order = 2):\n \"\"\"\n Estimates the transition matrices A (and eventually the error parameters as\n well) for this AR model, given the order of the markov process.\n\n (in this notation, the parameter to this method \"order\" has the same value as \"q\")\n\n Parameters\n ----------\n X : array, shape (q, M) or (M,)\n Matrix of column vectors of the data (either original or state-space).\n order : integer\n Positive, non-zero integer order value for the order of the Markov process.\n\n Returns\n -------\n A : array, shape (q, q)\n Transition coefficients for the system\n \"\"\"\n if order <= 0:\n raise Exception('Parameter \"order\" restricted to positive integer values')\n W = None\n\n # A particular special case first.\n if len(X.shape) == 1:\n Xtemp = np.zeros(shape = (1, np.size(X)))\n Xtemp[0, :] = X\n X = Xtemp\n\n # What happens in this loop is so obscenely complicated that I'm pretty\n # sure I couldn't replicate if I had to, much less explain it. Nevertheless,\n # this loop allows for the calculation of n-th order transition matrices\n # of a high-dimensional system.\n #\n # I know this could be done much more simply with some np.reshape() voodoo\n # magic, but for the time being I'm entirely too lazy to do so. Plus, this\n # works. Which is good.\n for i in range(1, order + 1):\n Xt = X[:, order - i: -i]\n if W is None:\n W = np.zeros((np.size(Xt, axis = 0) * order, np.size(Xt, axis = 1)))\n W[(i - 1) * np.size(Xt, axis = 0):((i - 1) * np.size(Xt, axis = 0)) + np.size(Xt, axis = 0), ...] = Xt\n Xt = X[:, order:]\n A = np.dot(Xt, linalg.pinv(W))\n \n # The data structure \"A\" is actually all the transition matrices appended\n # horizontally into a single NumPy array. We need to extract them.\n matrices = []\n for i in range(0, order):\n matrices.append(A[..., i * np.size(A, axis = 0):(i * np.size(A, axis = 0)) + np.size(A, axis = 0)])\n return matrices\n\ndef test(X, A, guided = True):\n \"\"\"\n Parameters\n ----------\n X : array, shape (N, M)\n The original data (lots of column vectors).\n A : array, shape (q, q)\n List of transition matrices or coefficients (output from estimate_parameters).\n guided : boolean\n Indicates whether or not this is a \"guided\" reconstruction. If True \n (default), each \"order\" number of points is used to predict a new point, \n but the predicted point is not in turn used to predict the next point.\n If False, predicted points are used to predict new points.\n\n Returns\n -------\n Xrecon : array, shape (N, M)\n The reconstructed data. Same dimensions as X. Hopefully similar\n quantities as well.\n \"\"\"\n # The order of the markov process is, not all coincidentally, the\n # number of transition matrices we have in this list.\n order = np.size(A, axis = 0)\n\n # This is somewhat tricky. For abitrary order, we need to \n # come up with an expression for:\n #\n # Xrecon = SUM_OF_PREVIOUS_TERMS\n #\n # where SUM_OF_PREVIOUS_TERMS is constructed in a loop over \"order\" \n # previous elements in the data, multiplying each element by the \n # corresponding transition matrix/coefficient. Then that sum needs to be\n # but a single element in a larger array that has a correspondence to\n # the original X.\n Xrecon = np.zeros(X.shape)\n Xrecon[:, :order] = X[:, :order]\n for i in range(order, np.size(X, axis = 1)):\n for j in range(1, order + 1):\n\n # The second argument to np.dot() is a ternary statement, conditioning\n # on the \"guided\" boolean passed into this method: do we use the actual\n # data in estimating the next point, or previously-esimated data?\n Xrecon[:, i] += np.dot(A[j - 1], X[:, i - j] if guided else Xrecon[:, i - j])\n return Xrecon - np.mean(Xrecon, axis = 0)\n\ndef error(Y, X):\n \"\"\"\n Calculates mean squared error (MSE) of the reconstructed data (X) relative\n to the original data (Y). In theory, this number should decrease as the\n order of the markov process increases and/or the number of components\n involved in the original projection (if used) increases.\n\n Parameters\n ----------\n Y : array, shape (N, M)\n The original data.\n X : array, shape (N, M)\n The reconstructed data (output from \"test\").\n\n Returns\n -------\n MSE : array, shape (N, M)\n Same dimensions as X and Y, where each element is the MSE as if the\n corresponding element in the reconstructed data Y operated as the estimator \n for the corresponding element in the original data X.\n \"\"\"\n return (Y - X) ** 2\n"
] | [
[
"numpy.diag",
"numpy.dot",
"numpy.linalg.svd",
"numpy.linalg.pinv",
"numpy.size",
"numpy.mean",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
trummelbummel/MSfingerprinter | [
"8a7c275d4368754327ff30e38c3fd9df01ccad75",
"8a7c275d4368754327ff30e38c3fd9df01ccad75"
] | [
"MSfingerprinter/MSfingerprinter/detect_peaks.py",
"MSfingerprinter/MSfingerprinter/maxsubpatternhitset.py"
] | [
"\"\"\"Detect peaks in data based on their amplitude and other features.\"\"\"\n\nfrom __future__ import division, print_function\nimport numpy as np\n\n__author__ = \"Marcos Duarte, https://github.com/demotu/BMC\"\n__version__ = \"1.0.4\"\n__license__ = \"MIT\"\n\n\ndef detect_peaks(x, mph=None, mpd=1, threshold=0, edge='rising',\n kpsh=False, valley=False, show=False, ax=None):\n\n \"\"\"Detect peaks in data based on their amplitude and other features.\n\n Parameters\n ----------\n x : 1D array_like\n data.\n mph : {None, number}, optional (default = None)\n detect peaks that are greater than minimum peak height.\n mpd : positive integer, optional (default = 1)\n detect peaks that are at least separated by minimum peak distance (in\n number of data).\n threshold : positive number, optional (default = 0)\n detect peaks (valleys) that are greater (smaller) than `threshold`\n in relation to their immediate neighbors.\n edge : {None, 'rising', 'falling', 'both'}, optional (default = 'rising')\n for a flat peak, keep only the rising edge ('rising'), only the\n falling edge ('falling'), both edges ('both'), or don't detect a\n flat peak (None).\n kpsh : bool, optional (default = False)\n keep peaks with same height even if they are closer than `mpd`.\n valley : bool, optional (default = False)\n if True (1), detect valleys (local minima) instead of peaks.\n show : bool, optional (default = False)\n if True (1), plot data in matplotlib figure.\n ax : a matplotlib.axes.Axes instance, optional (default = None).\n\n Returns\n -------\n ind : 1D array_like\n indeces of the peaks in `x`.\n\n Notes\n -----\n The detection of valleys instead of peaks is performed internally by simply\n negating the data: `ind_valleys = detect_peaks(-x)`\n\n The function can handle NaN's\n\n See this IPython Notebook [1]_.\n\n References\n ----------\n .. [1] http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DetectPeaks.ipynb\n\n Examples\n --------\n >>> from detect_peaks import detect_peaks\n >>> x = np.random.randn(100)\n >>> x[60:81] = np.nan\n >>> # detect all peaks and plot data\n >>> ind = detect_peaks(x, show=True)\n >>> print(ind)\n\n >>> x = np.sin(2*np.pi*5*np.linspace(0, 1, 200)) + np.random.randn(200)/5\n >>> # set minimum peak height = 0 and minimum peak distance = 20\n >>> detect_peaks(x, mph=0, mpd=20, show=True)\n\n >>> x = [0, 1, 0, 2, 0, 3, 0, 2, 0, 1, 0]\n >>> # set minimum peak distance = 2\n >>> detect_peaks(x, mpd=2, show=True)\n\n >>> x = np.sin(2*np.pi*5*np.linspace(0, 1, 200)) + np.random.randn(200)/5\n >>> # detection of valleys instead of peaks\n >>> detect_peaks(x, mph=0, mpd=20, valley=True, show=True)\n\n >>> x = [0, 1, 1, 0, 1, 1, 0]\n >>> # detect both edges\n >>> detect_peaks(x, edge='both', show=True)\n\n >>> x = [-2, 1, -2, 2, 1, 1, 3, 0]\n >>> # set threshold = 2\n >>> detect_peaks(x, threshold = 2, show=True)\n \"\"\"\n\n x = np.atleast_1d(x).astype('float64')\n if x.size < 3:\n return np.array([], dtype=int)\n if valley:\n x = -x\n # find indices of all peaks\n dx = x[1:] - x[:-1]\n # handle NaN's\n indnan = np.where(np.isnan(x))[0]\n if indnan.size:\n x[indnan] = np.inf\n dx[np.where(np.isnan(dx))[0]] = np.inf\n ine, ire, ife = np.array([[], [], []], dtype=int)\n if not edge:\n ine = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) > 0))[0]\n else:\n if edge.lower() in ['rising', 'both']:\n ire = np.where((np.hstack((dx, 0)) <= 0) & (np.hstack((0, dx)) > 0))[0]\n if edge.lower() in ['falling', 'both']:\n ife = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) >= 0))[0]\n ind = np.unique(np.hstack((ine, ire, ife)))\n # handle NaN's\n if ind.size and indnan.size:\n # NaN's and values close to NaN's cannot be peaks\n ind = ind[np.in1d(ind, np.unique(np.hstack((indnan, indnan-1, indnan+1))), invert=True)]\n # first and last values of x cannot be peaks\n if ind.size and ind[0] == 0:\n ind = ind[1:]\n if ind.size and ind[-1] == x.size-1:\n ind = ind[:-1]\n # remove peaks < minimum peak height\n if ind.size and mph is not None:\n ind = ind[x[ind] >= mph]\n # remove peaks - neighbors < threshold\n if ind.size and threshold > 0:\n dx = np.min(np.vstack([x[ind]-x[ind-1], x[ind]-x[ind+1]]), axis=0)\n ind = np.delete(ind, np.where(dx < threshold)[0])\n # detect small peaks closer than minimum peak distance\n if ind.size and mpd > 1:\n ind = ind[np.argsort(x[ind])][::-1] # sort ind by peak height\n idel = np.zeros(ind.size, dtype=bool)\n for i in range(ind.size):\n if not idel[i]:\n # keep peaks with the same height if kpsh is True\n idel = idel | (ind >= ind[i] - mpd) & (ind <= ind[i] + mpd) \\\n & (x[ind[i]] > x[ind] if kpsh else True)\n idel[i] = 0 # Keep current peak\n # remove the small peaks and sort back the indices by their occurrence\n ind = np.sort(ind[~idel])\n\n if show:\n if indnan.size:\n x[indnan] = np.nan\n if valley:\n x = -x\n _plot(x, mph, mpd, threshold, edge, valley, ax, ind)\n\n return ind\n\n\ndef _plot(x, mph, mpd, threshold, edge, valley, ax, ind):\n \"\"\"Plot results of the detect_peaks function, see its help.\"\"\"\n try:\n import matplotlib.pyplot as plt\n except ImportError:\n print('matplotlib is not available.')\n else:\n if ax is None:\n _, ax = plt.subplots(1, 1, figsize=(8, 4))\n\n ax.plot(x, 'b', lw=1)\n if ind.size:\n label = 'valley' if valley else 'peak'\n label = label + 's' if ind.size > 1 else label\n ax.plot(ind, x[ind], '+', mfc=None, mec='r', mew=2, ms=8,\n label='%d %s' % (ind.size, label))\n ax.legend(loc='best', framealpha=.5, numpoints=1)\n ax.set_xlim(-.02*x.size, x.size*1.02-1)\n ymin, ymax = x[np.isfinite(x)].min(), x[np.isfinite(x)].max()\n yrange = ymax - ymin if ymax > ymin else 1\n ax.set_ylim(ymin - 0.1*yrange, ymax + 0.1*yrange)\n ax.set_xlabel('Data #', fontsize=14)\n ax.set_ylabel('Amplitude', fontsize=14)\n mode = 'Valley detection' if valley else 'Peak detection'\n ax.set_title(\"%s (mph=%s, mpd=%d, threshold=%s, edge='%s')\"\n % (mode, str(mph), mpd, str(threshold), edge))\n # plt.grid()\n plt.show()\n",
"import numpy as np\nfrom treelib import Node, Tree\nimport MSfingerprinter.maxsubpatterntree as maxsubpatterntree\nimport miningperiodicpatterns\nimport operator\nimport itertools as it\nimport re\nfrom collections import defaultdict, OrderedDict\nimport itertools\n\n'''algorithm 3.2 in Efficient mining of partial periodic patterns in time series database Han et al.\nform candidate frequent maxpattern maximal pattern generated from F1 uses regex to handle optional events\nregex implementation of python can only handle 100 capturing group such that the maximum periodicity that can\nbe investigated with this implementation is 100 '''\n\ndef formCmax(F1, period_j):\n Cmax = []\n previous = 0\n # sort such that second element of (position, valuebin) tuple is ascending\n # so sort according to binvalues\n F1 = sorted(list(F1), key=lambda x: x[2][1])\n for i in F1:\n listcandidatepattern = list(i)\n # position in the period\n position = listcandidatepattern[2][0]\n # bin the value is in\n value = listcandidatepattern[2][1]\n Cmax.append((position, value))\n lengthCmax = len(Cmax)\n # Cmaxdict contains set of values at certain positions in the periodic pattern\n Cmaxdict = {}\n for key, value in Cmax:\n Cmaxdict.setdefault(key, set()).add(value)\n\n return Cmaxdict, lengthCmax\n\n\n#test for exact equality in hitset\ndef arreq_in_list(maskedsubpattern, hitset):\n return next((True for elem in hitset if np.ma.allequal(elem, maskedsubpattern)), False)\n\n# forms the 1 cycle patterns for tree base case fill the leaf level of the tree with 1-cycle patterns\ndef form1cyclepatterns(F1, periods, period_j):\n F1 = list(F1)\n periodlen = periods[period_j]\n onecyclepatterns = []\n periodlength = F1[0][0]\n for i in F1:\n pattern = np.zeros((periodlen,))\n np.put(pattern, [i[3][0]], [i[3][1]])\n onecyclepatterns.append((i[0], pattern))\n return onecyclepatterns\n\n\ndef createRegexfromCmax(Cmax):\n\n noncapturinggroups = []\n # one has to build regex as a string\n pattern = ''\n insert = []\n counter = 0\n for k,v in Cmax.items():\n subinsert = []\n if v == set([0]):\n counter += 1\n ins = '(?:' + '[0-9]' + '{0,1}\\,' + ')'\n insert.append(ins)\n\n noncapturinggroups.append(counter)\n elif v != set([0]):\n counter += 1\n for i in range(len(v)):\n\n subinsert.append(str(i) + '{0,1}')\n groupinsert = '|'.join(subinsert)\n ins = '(' + groupinsert + ')\\,'\n insert.append(ins)\n insert.append('?')\n regexpattern = ''.join(insert)\n\n return regexpattern, noncapturinggroups\n\ndef findhitregex(regexpattern, Segment_i):\n # replace anything that is not matched by pattern with 0\n subpattern = re.sub(regexpattern, '0', Segment_i)\n return subpattern\n\n\n\n\ndef findCiHitRegex(treedepth, previousnodes, Tslice, maxsubpattern, regexpattern, noncapturinggroups, maxsubpatterntreeinstance, periods, period_j, counterindexpassed, nodelist):\n # leaves are last frequent patterns from which to generate new frequent subpatterns\n # occurences gets emptied out each pattern iteration...problem!\n occurences = []\n equalitybool = False\n previoustimes = np.zeros((1, period_j))\n hitset = []\n timepointsperiodicities = []\n globcounter = 0\n boolenarray = False\n onset = None\n periodlength = periods[period_j]\n testtimeseries = Tslice['symbolicTS']\n masspoints = Tslice['m/zPeriods']\n freqpoints = Tslice['freqPeriods']\n timepoints = Tslice['timepointsPeriods']\n\n # creates a regular expression object from regexpattern that represents Cmax\n regularexpressionobject = re.compile(regexpattern)\n counter = 0\n\n # compare each subpattern with all periodsegments\n if regularexpressionobject:\n if counterindexpassed == 0:\n for k,v in testtimeseries.items():\n masses = masspoints[k]\n times = timepoints[k]\n freqs = freqpoints[k]\n discreteTSperiod = testtimeseries[k]\n vint = v.astype('int')\n v = vint.astype('str')\n listarray = v.tolist()\n # TODO: save the discrete TS already as list and not as np.array\n stringpattern = ','.join(listarray)\n allpatterns = regularexpressionobject.findall(stringpattern)\n if len(allpatterns) != 0:\n newnodepattern = list(allpatterns[0])\n # insert star at all noncapturinggroups (-1 cause counter is 1 greater)\n for i in noncapturinggroups:\n index = i-1\n newnodepattern.insert(index,'*')\n # if hitset contains elements then check if it is already there, if not create node\n if len(hitset) != 0:\n for i in hitset:\n name = ''.join(newnodepattern)\n # if not in hitset then create node\n if not maxsubpatterntreeinstance.contains(name):\n newnodepattern = np.array(newnodepattern)\n hitset.append([newnodepattern, 1])\n nonstarindizes = np.where(newnodepattern != '*')\n counter = 1\n parent = 'root'\n timepointsperiod = [np.take(times, nonstarindizes).tolist()]\n occuringat = [np.take(masses, nonstarindizes).tolist()]\n # this takes the freq from periodic pattern indizes\n frequencyoccuringat = np.take(freqs,nonstarindizes).tolist()\n print('creating node')\n maxsubpatterntreeinstance = maxsubpatterntree.createChildnode(frequencyoccuringat, name, maxsubpatterntreeinstance, parent, counter, onset, occuringat, timepointsperiod)\n # else increment count at node\n else:\n index = -1\n for j in hitset:\n index += 1\n\n name = ''.join(newnodepattern)\n oldpatternname = ''.join(j[0])\n\n if name == oldpatternname:\n hitset[index][1] == hitset[index][1] + 1\n node = maxsubpatterntreeinstance.get_node(name)\n node.data.count = hitset[index][1]\n\n else:\n\n newnodepattern = np.array(newnodepattern)\n hitset.append([newnodepattern, 1])\n nonstarindizes = np.where(newnodepattern != '*')\n counter = 1\n parent = 'root'\n name = ''.join(newnodepattern)\n timepointsperiod = [np.take(times, nonstarindizes).tolist()]\n occuringat = [np.take(masses, nonstarindizes).tolist()]\n # this takes the freq from periodic pattern indizes\n frequencyoccuringat = np.take(freqs,nonstarindizes).tolist()\n print('creating node')\n maxsubpatterntreeinstance = maxsubpatterntree.createChildnode(frequencyoccuringat, name, maxsubpatterntreeinstance, parent, counter, onset, occuringat, timepointsperiod)\n\n else:\n return maxsubpatterntreeinstance, hitset\n # maxsubpatterntreeinstance.show()\n return maxsubpatterntreeinstance, hitset\n\n\n\n# now set approach intersect Cmax with each period segment\ndef getmaxPatternsset(treedepth, Tslice, maxsubpatterntreeinstance, counterindexpassed, Cmax, periods, period_j, nonstarcount, nodelist):\n hitsetcomplete = []\n parent = None\n transactions = []\n counter = 0\n subpatterndict = OrderedDict()\n # function call to generate all possible patterns from Ci\n subpatterns = []\n subpatternstrings = []\n # keys are running number of index\n possiblekeys = range(periods[period_j])\n possiblekeys = set([float(i) for i in possiblekeys])\n # check in dictionary if one of the running number keys is missing and set the corresponding position to 0 always\n presentkeys = set(Cmax.keys())\n # setdifference to find nonmatching elements\n unmatchedkeys = possiblekeys.symmetric_difference(presentkeys)\n unmatchedkeyslist = list(unmatchedkeys)\n # insert set([0]) wherever key is missing (will always be don't care character)\n if unmatchedkeyslist:\n for k,v in Cmax.items():\n for i in unmatchedkeys:\n Cmax[i] = set([0])\n\n regexpattern, noncapturinggroups = createRegexfromCmax(Cmax)\n previousnodes = []\n maxsubpatterntreeinstance, hitset = findCiHitRegex(treedepth, previousnodes, Tslice, Cmax, regexpattern, noncapturinggroups, maxsubpatterntreeinstance, periods, period_j, counterindexpassed, nodelist)\n\n return maxsubpatterntreeinstance, hitset\n"
] | [
[
"numpy.hstack",
"numpy.isfinite",
"numpy.isnan",
"numpy.vstack",
"matplotlib.pyplot.subplots",
"numpy.sort",
"numpy.atleast_1d",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.where",
"matplotlib.pyplot.show"
],
[
"numpy.ma.allequal",
"numpy.take",
"numpy.put",
"numpy.array",
"numpy.zeros",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yx14/ising | [
"675349021696857ed70c8231adadf33d4aa3f2c8"
] | [
"python_with_c/main-multiprocessing_c3.py"
] | [
"import sys\r\nimport os\r\nimport time\r\nimport csv\r\nimport click\r\nimport numpy as np\r\nimport logging\r\nimport matplotlib.pyplot as plt\r\nfrom ising_c import run_ising #import run_ising function from ising.py\r\nimport multiprocessing as mp\r\nfrom IsingLattice import IsingLattice\r\nimport scipy as scipy\r\n\r\ndef run_simulation(index,temp,n,num_steps,num_burnin,num_analysis,flip_prop,j,b,data_filename,corr_filename,data_listener,corr_listener):\r\n print(\"Working on Temp {0}\".format(round(temp,4)))\r\n try:\r\n #run the Ising model\r\n lattice = IsingLattice(n, flip_prop)\r\n time_start = time.time()\r\n Msamp, Esamp = run_ising(lattice,temp,num_steps,num_burnin,j,b,disable_tqdm=True)\r\n\r\n try:\r\n #calculate statistical values\r\n #calculate statistical values\r\n M_mean = np.average(Msamp[-num_analysis:])\r\n E_mean = np.average(Esamp[-num_analysis:])\r\n M_std = np.std(Msamp[-num_analysis:])\r\n E_std = np.std(Esamp[-num_analysis:])\r\n \r\n Chi_mean = np.average(M_std**2/temp)\r\n Cv_mean = 1/temp**2*E_std**2\r\n \r\n M_analysis = Msamp[-num_analysis:]\r\n E_analysis = Esamp[-num_analysis:]\r\n Chi_var = 1/temp**2*(scipy.stats.moment(M_analysis, 4) - scipy.stats.moment(M_analysis, 2)**2)/len(M_analysis)\r\n Chi_std = (Chi_var)**0.5\r\n\r\n Cv_var = 1/temp**4*(scipy.stats.moment(E_analysis, 4) - scipy.stats.moment(E_analysis, 2)**2)/len(E_analysis)\r\n Cv_std = (Cv_var)**0.5 \r\n\r\n \r\n data_array = [np.abs(M_mean),M_std,E_mean,E_std, Chi_mean, Chi_std, Cv_mean, Cv_std]\r\n \r\n data_listener.put([temp]+data_array)\r\n\r\n corr = lattice.calc_auto_correlation()\r\n lattice.free_memory()\r\n [corr_listener.put([temp]+corr_value) for corr_value in corr]\r\n\r\n print(\"Done with Temp {0} in {1} seconds\".format(round(temp,4), round(time.time()-time_start,2)))\r\n return True\r\n\r\n except:\r\n logging.error(\"Temp=\"+str(temp)+\": Statistical Calculation Failed. No Data Written.\")\r\n return False\r\n\r\n except KeyboardInterrupt:\r\n print(\"\\n\\nProgram Terminated. Good Bye!\")\r\n data_listener.put('kill')\r\n corr_listener.put('kill')\r\n sys.exit()\r\n\r\n except:\r\n logging.error(\"Temp=\"+str(temp)+\": Simulation Failed. No Data Written\")\r\n\r\n#simulation options (enter python main.py --help for details)\r\[email protected]()\r\[email protected]('--t_min', default=2.0, prompt='Minimum Temp', help='Minimum Temperature (inclusive)', type=float)\r\[email protected]('--t_max', default=2.6, prompt='Maximum Temp', help='Maximum Temperature (inclusive)', type=float)\r\[email protected]('--t_step', default=0.1, prompt='Temp Step Size', help='Temperature Step Size', type=float)\r\n\r\[email protected]('--n', prompt='Lattice Size', help='Lattice Size (NxN)',type=int)\r\[email protected]('--num_steps', default=100000, help='Total Number of Steps',type=int)\r\[email protected]('--num_analysis', default=50000, help='Number of Steps used in Analysis',type=int)\r\[email protected]('--num_burnin', default=25000, help='Total Number of Burnin Steps',type=int)\r\n\r\[email protected]('--j', default=1.0, help='Interaction Strength',type=float)\r\[email protected]('--b', default=0.0, help='Applied Magnetic Field',type=float)\r\[email protected]('--flip_prop', default=0.1, help='Proportion of Spins to Consider Flipping per Step',type=float)\r\n\r\[email protected]('--output', default='data', help='Directory Name for Data Output',type=str)\r\n\r\[email protected]('--processes', default=1, help='',type=int)\r\n\r\ndef main(t_min,t_max,t_step,n,num_steps,num_analysis,num_burnin,j,b,flip_prop,output,processes):\r\n simulation_start_time = time.time()\r\n data_filename, corr_filename = initialize_simulation(n,num_steps,num_analysis,num_burnin,output,j,b,flip_prop)\r\n run_processes(processes,t_min,t_max,t_step,n,num_steps,num_burnin,num_analysis,flip_prop,j,b,data_filename,corr_filename)\r\n simulation_duration = round((time.time() - simulation_start_time)/60.0,2)\r\n print('\\n\\nSimulation finished in {0} minutes. Data written to {1}.'.format(simulation_duration,data_filename))\r\n\r\ndef initialize_simulation(n,num_steps,num_analysis,num_burnin,output,j,b,flip_prop):\r\n check_step_values(num_steps, num_analysis, num_burnin)\r\n data_filename, corr_filename = get_filenames(output)\r\n write_sim_parameters(data_filename,corr_filename,n,num_steps,num_analysis,num_burnin,j,b,flip_prop)\r\n print('\\nSimulation Started! Data will be written to ' + data_filename + '\\n')\r\n return data_filename, corr_filename\r\n\r\ndef check_step_values(num_steps,num_analysis,num_burnin): #simulation size checks and exceptions\r\n if (num_burnin > num_steps):\r\n raise ValueError('num_burning cannot be greater than available num_steps. Exiting simulation.')\r\n\r\n if (num_analysis > num_steps - num_burnin):\r\n raise ValueError('num_analysis cannot be greater than available num_steps after burnin. Exiting simulation.')\r\n\r\ndef get_filenames(dirname): #make data folder if doesn't exist, then specify filename\r\n try:\r\n if not os.path.exists(dirname):\r\n os.makedirs(dirname)\r\n data_filename = os.path.join(dirname,'data_'+str(time.strftime(\"%Y%m%d-%H%M%S\"))+\".csv\")\r\n corr_filename = os.path.join(dirname,'corr_'+str(time.strftime(\"%Y%m%d-%H%M%S\"))+\".csv\")\r\n #Write simulation parameters to file\r\n return data_filename, corr_filename\r\n except:\r\n raise ValueError('Directory name not valid. Exiting simulation.')\r\n sys.exit()\r\n\r\ndef get_temp_array(t_min,t_max,t_step):\r\n if (t_min > t_max):\r\n raise ValueError('T_min cannot be greater than T_max. Exiting Simulation')\r\n sys.exit()\r\n try:\r\n T = np.arange(t_min,t_max,t_step).tolist()\r\n return T\r\n except:\r\n raise ValueError('Error creating temperature array. Exiting simulation.')\r\n sys.exit()\r\n\r\ndef write_sim_parameters(data_filename,corr_filename,n,num_steps,num_analysis,num_burnin,j,b,flip_prop):\r\n try:\r\n with open(data_filename,'w') as csv_file:\r\n writer = csv.writer(csv_file, delimiter=',', lineterminator='\\n')\r\n #Write simulations parameters to CSV file\r\n writer.writerow(['Lattice Size (NxN)','Total Steps','Steps Used in Analysis','Burnin Steps','Interaction Strength','Applied Mag Field','Spin Prop'])\r\n writer.writerow([n,num_steps,num_analysis,num_burnin,j,b,flip_prop])\r\n writer.writerow([])\r\n with open(corr_filename,'w') as csv_file:\r\n writer = csv.writer(csv_file, delimiter=',', lineterminator='\\n')\r\n #Write simulations parameters to CSV file\r\n writer.writerow(['Lattice Size (NxN)','Total Steps','Steps Used in Analysis','Burnin Steps','Interaction Strength','Applied Mag Field','Spin Prop'])\r\n writer.writerow([n,num_steps,num_analysis,num_burnin,j,b,flip_prop])\r\n writer.writerow([])\r\n except:\r\n logging.error('Could not save simulation parameters. Exiting simulation')\r\n sys.exit()\r\n\r\ndef compute_autocorrelation(spin):\r\n n = len(spin)\r\n corr_array = []\r\n for k in range(1,int(n/2)):\r\n col_mean, row_mean = spin.mean(axis=0),spin.mean(axis=1)\r\n #compute r values for rows and cols\r\n r_col = [np.multiply(spin[j,:]-col_mean,spin[(j+k)%n,:]-col_mean) for j in range(1,n)]\r\n r_row = [np.multiply(spin[:,j]-row_mean,spin[:,(j+k)%n]-row_mean) for j in range(1,n)]\r\n #normalize r values\r\n r_col = np.divide(r_col,float(n))\r\n r_row = np.divide(r_row,float(n))\r\n #calculate corr for k and add it to array\r\n corr = (r_col.mean() + r_row.mean())/2.0\r\n corr_array.append([k,corr])\r\n return corr_array\r\n\r\ndef listener(q, fn):\r\n '''listens for messages on the q, writes to file. '''\r\n f = open(fn, 'a') \r\n writer = csv.writer(f, delimiter=',', lineterminator='\\n')\r\n while 1:\r\n m = q.get()\r\n if m == 'kill':\r\n break\r\n writer.writerow(m)\r\n f.flush()\r\n f.close()\r\n\r\ndef run_processes(processes,t_min,t_max,t_step,n,num_steps,num_burnin,num_analysis,flip_prop,j,b,data_filename,corr_filename):\r\n \r\n T = get_temp_array(t_min, t_max, t_step)\r\n \r\n #must use Manager queue here, or will not work\r\n manager = mp.Manager()\r\n data_listener = manager.Queue()\r\n corr_listener = manager.Queue() \r\n pool = mp.Pool(mp.cpu_count() + 2)\r\n\r\n #put listener to work first\r\n data_watcher = pool.apply_async(listener, args=(data_listener, data_filename,))\r\n corr_watcher = pool.apply_async(listener, args=(corr_listener, corr_filename,))\r\n\r\n #fire off workers \r\n jobs = [pool.apply_async(run_simulation, args=(index,temp,n,num_steps,num_burnin,num_analysis,flip_prop,j,b,data_filename,corr_filename,data_listener,corr_listener,)) for index,temp in enumerate(T)]\r\n\r\n # collect results from the workers through the pool result queue \r\n [job.get() for job in jobs]\r\n\r\n #now we are done, kill the listener\r\n data_listener.put('kill')\r\n corr_listener.put('kill')\r\n pool.close()\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n"
] | [
[
"scipy.stats.moment",
"numpy.abs",
"numpy.multiply",
"numpy.arange",
"numpy.std",
"numpy.average"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
yselivonchyk/DCIGN_tensorflow | [
"ff8d85f3a7b7ca1e5c3f50ff003a1c09a70067cd"
] | [
"autoencoder.py"
] | [
"\"\"\"MNIST Autoencoder. \"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nimport numpy as np\nimport tensorflow as tf\nimport utils as ut\nimport input as inp\nimport visualization as vis\nimport matplotlib.pyplot as plt\nimport time\nimport sys\nimport getch\nimport model_interpreter as interpreter\nimport network_utils as nut\nimport math\nfrom tensorflow.contrib.tensorboard.plugins import projector\nfrom Bunch import Bunch\n\n\ntf.app.flags.DEFINE_string('input_path', '../data/tmp/grid03.14.c.tar.gz', 'input folder')\ntf.app.flags.DEFINE_string('input_name', '', 'input folder')\ntf.app.flags.DEFINE_string('test_path', '', 'test set folder')\ntf.app.flags.DEFINE_string('net', 'f100-f3', 'model configuration')\ntf.app.flags.DEFINE_string('model', 'noise', 'Type of the model to use: Autoencoder (ae)'\n 'WhatWhereAe (ww) U-netAe (u)')\ntf.app.flags.DEFINE_string('postfix', '', 'Postfix for the training folder')\n\ntf.app.flags.DEFINE_float('alpha', 10, 'Predictive reconstruction loss weight')\ntf.app.flags.DEFINE_float('beta', 0.0005, 'Reconstruction from noisy data loss weight')\ntf.app.flags.DEFINE_float('epsilon', 0.000001,\n 'Diameter of epsilon sphere comparing to distance to a neighbour. <= 0.5')\ntf.app.flags.DEFINE_float('gamma', 50., 'Loss weight for large distances')\ntf.app.flags.DEFINE_float('distance', 0.01, 'Maximum allowed interpoint distance')\ntf.app.flags.DEFINE_float('delta', 1., 'Loss weight for stacked objective')\n\ntf.app.flags.DEFINE_string('comment', '', 'Comment to leave by the model')\n\ntf.app.flags.DEFINE_float('test_max', 10000, 'max number of examples in the test set')\n\ntf.app.flags.DEFINE_integer('max_epochs', 0, 'Train for at most this number of epochs')\ntf.app.flags.DEFINE_integer('save_every', 250, 'Save model state every INT epochs')\ntf.app.flags.DEFINE_integer('eval_every', 25, 'Save encoding and visualizations every')\ntf.app.flags.DEFINE_integer('visualiza_max', 10, 'Max pairs to show on visualization')\ntf.app.flags.DEFINE_boolean('load_state', True, 'Load state if possible ')\ntf.app.flags.DEFINE_boolean('kill_depth', False, 'Ignore depth information')\ntf.app.flags.DEFINE_boolean('dev', False, 'Indicate development mode')\ntf.app.flags.DEFINE_integer('batch_size', 128, 'Batch size')\ntf.app.flags.DEFINE_float('learning_rate', 0.0001, 'Create visualization of ')\n\ntf.app.flags.DEFINE_float('blur', 5.0, 'Max sigma value for Gaussian blur applied to training set')\ntf.app.flags.DEFINE_boolean('new_blur', False, 'Use data augmentation as blur info')\ntf.app.flags.DEFINE_integer('blur_decrease', 10000, 'Decrease image blur every X steps')\n\nFLAGS = tf.app.flags.FLAGS\nslim = tf.contrib.slim\n\n\nAUTOENCODER = 'ae'\nPREDICTIVE = 'pred'\nDENOISING = 'noise'\n\nCHECKPOINT_NAME = '-9999.chpt'\nEMB_SUFFIX = '_embedding'\n\n\ndef is_stopping_point(current_epoch, epochs_to_train, stop_every=None, stop_x_times=None,\n stop_on_last=True):\n if stop_on_last and current_epoch + 1 == epochs_to_train:\n return True\n if stop_x_times is not None:\n return current_epoch % np.ceil(epochs_to_train / float(stop_x_times)) == 0\n if stop_every is not None:\n return (current_epoch + 1) % stop_every == 0\n\n\ndef _fetch_dataset(path, take=None):\n dataset = inp.read_ds_zip(path) # read\n take = len(dataset) if take is None else take\n dataset = dataset[:take]\n # print(dataset.dtype, dataset.shape, np.min(dataset), np.max(dataset))\n # dataset = inp.rescale_ds(dataset, 0, 1)\n if FLAGS.kill_depth:\n dataset[..., -1] = 0\n ut.print_info('DS fetch: %8d (%s)' % (len(dataset), path))\n return dataset\n\n\ndef l2(x):\n l = x.get_shape().as_list()[0]\n return tf.reshape(tf.sqrt(tf.reduce_sum(x ** 2, axis=1)), (l, 1))\n\n\ndef get_stats_template():\n return Bunch(\n batch=[],\n input=[],\n encoding=[],\n reconstruction=[],\n total_loss=0.,\n start=time.time())\n\n\ndef guard_nan(x):\n return x if not math.isnan(x) else -1.\n\n\ndef _blur_expand(input):\n k_size = 9\n kernels = [2, 4, 6]\n channels = [input] + [nut.blur_gaussian(input, k, k_size)[0] for k in kernels]\n res = tf.concat(channels, axis=3)\n return res\n\n\nclass Autoencoder:\n train_set, test_set = None, None\n permutation = None\n batch_shape = None\n epoch_size = None\n\n input, target = None, None # AE placeholders\n encode, decode = None, None # AE operations\n model = None # interpreted model\n\n encoding = None # AE predictive evaluation placeholder\n eval_decode, eval_loss = None, None # AE evaluation\n\n inputs, targets = None, None # Noise/Predictive placeholders\n raw_inputs, raw_targets = None, None # inputs in network-friendly representation\n models = None # Noise/Predictive interpreted models\n\n optimizer, _train = None, None\n loss_ae, loss_reco, loss_pred, loss_dn = None, None, None, None # Objectives\n loss_total = None\n losses = []\n\n step = None # operation\n step_var = None # variable\n\n vis_summary, vis_placeholder = None, None\n image_summaries = None\n visualization_batch_perm = None\n\n\n def __init__(self, optimizer=tf.train.AdamOptimizer, need_forlders=True):\n self.optimizer_constructor = optimizer\n FLAGS.input_name = inp.get_input_name(FLAGS.input_path)\n if need_forlders:\n ut.configure_folders(FLAGS)\n ut.print_flags(FLAGS)\n\n # MISC\n\n\n def get_past_epochs(self):\n return int(self.step.eval() / self.epoch_size)\n\n @staticmethod\n def get_checkpoint_path():\n # print(os.path.join(FLAGS.save_path, CHECKPOINT_NAME), len(CHECKPOINT_NAME))\n return os.path.join(FLAGS.save_path, CHECKPOINT_NAME)\n\n def get_latest_checkpoint(self):\n return tf.train.latest_checkpoint(\n self.get_checkpoint_path()[:-len(EMB_SUFFIX)],\n latest_filename='checkpoint'\n )\n\n\n # DATA\n\n\n def fetch_datasets(self):\n if FLAGS.max_epochs == 0:\n FLAGS.input_path = FLAGS.test_path\n self.train_set = _fetch_dataset(FLAGS.input_path)\n self.epoch_size = int(self.train_set.shape[0] / FLAGS.batch_size)\n self.batch_shape = [FLAGS.batch_size] + list(self.train_set.shape[1:])\n\n reuse_train = FLAGS.test_path == FLAGS.input_path or FLAGS.test_path == ''\n self.test_set = self.train_set.copy() if reuse_train else _fetch_dataset(FLAGS.test_path)\n take_test = int(FLAGS.test_max) if FLAGS.test_max > 1 else int(FLAGS.test_max * len(self.test_set))\n ut.print_info('take %d from test' % take_test)\n self.test_set = self.test_set[:take_test]\n\n def _batch_generator(self, x=None, y=None, shuffle=True, batches=None):\n \"\"\"Returns BATCH_SIZE of couples of subsequent images\"\"\"\n x = x if x is not None else self._get_blurred_dataset()\n y = y if y is not None else x\n batches = batches if batches is not None else int(np.floor(len(x) / FLAGS.batch_size))\n self.permutation = np.arange(len(x))\n self.permutation = self.permutation if not shuffle else np.random.permutation(self.permutation)\n\n for i in range(batches):\n batch_indexes = self.permutation[i * FLAGS.batch_size:(i + 1) * FLAGS.batch_size]\n # batch = np.stack((dataset[batch_indexes], dataset[batch_indexes + 1], dataset[batch_indexes + 2]), axis=1)\n yield x[batch_indexes], y[batch_indexes]\n\n def _batch_permutation_generator(self, length, start=0, shuffle=True, batches=None):\n self.permutation = np.arange(length) + start\n self.permutation = self.permutation if not shuffle else np.random.permutation(self.permutation)\n for i in range(int(length/FLAGS.batch_size)):\n if batches is not None and i >= batches:\n break\n yield self.permutation[i * FLAGS.batch_size:(i + 1) * FLAGS.batch_size]\n\n _blurred_dataset, _last_blur = None, 0\n\n def _get_blur_sigma(self):\n calculated_sigma = FLAGS.blur - int(10 * self.step.eval() / FLAGS.blur_decrease) / 10.0\n return max(0, calculated_sigma)\n\n # @ut.timeit\n def _get_blurred_dataset(self):\n if FLAGS.blur != 0:\n current_sigma = self._get_blur_sigma()\n if current_sigma != self._last_blur:\n # print(self._last_blur, current_sigma)\n self._last_blur = current_sigma\n self._blurred_dataset = inp.apply_gaussian(self.train_set, sigma=current_sigma)\n ut.print_info('blur s:%.1f[%.1f>%.1f]' % (current_sigma, self.train_set[2, 10, 10, 0], self._blurred_dataset[2, 10, 10, 0]))\n return self._blurred_dataset if self._blurred_dataset is not None else self.train_set\n return self.train_set\n\n\n# TRAIN\n\n\n def build_ae_model(self):\n self.input = tf.placeholder(tf.uint8, self.batch_shape, name='input')\n self.target = tf.placeholder(tf.uint8, self.batch_shape, name='target')\n self.step = tf.Variable(0, trainable=False, name='global_step')\n root = self._image_to_tensor(self.input)\n target = self._image_to_tensor(self.target)\n\n model = interpreter.build_autoencoder(root, FLAGS.net)\n\n self.encode = model.encode\n\n self.model = model\n self.encoding = tf.placeholder(self.encode.dtype, self.encode.get_shape(), name='encoding')\n eval_decode = interpreter.build_decoder(self.encoding, model.config, reuse=True)\n print(target, eval_decode)\n self.eval_loss = interpreter.l2_loss(target, eval_decode, name='predictive_reconstruction')\n self.eval_decode = self._tensor_to_image(eval_decode)\n\n self.loss_ae = interpreter.l2_loss(target, model.decode, name='reconstruction')\n self.decode = self._tensor_to_image(model.decode)\n self.losses = [self.loss_ae]\n\n def build_predictive_model(self):\n self.build_ae_model() # builds on top of AE model. Due to auxilary operations init\n self.inputs = tf.placeholder(tf.uint8, [3] + self.batch_shape, name='inputs')\n self.targets = tf.placeholder(tf.uint8, [3] + self.batch_shape, name='targets')\n\n # transform inputs\n self.raw_inputs = [self._image_to_tensor(self.inputs[i]) for i in range(3)]\n self.raw_targets = [self._image_to_tensor(self.targets[i]) for i in range(3)]\n\n # build AE objective for triplet\n config = self.model.config\n models = [interpreter.build_autoencoder(x, config) for x in self.raw_inputs]\n reco_losses = [1./3 * interpreter.l2_loss(models[i].decode, self.raw_targets[i]) for i in range(3)] # business as usual\n self.models = models\n\n # build predictive objective\n pred_loss_2 = self._prediction_decode(models[1].encode*2 - models[0].encode, self.raw_targets[2], models[2])\n pred_loss_0 = self._prediction_decode(models[1].encode*2 - models[2].encode, self.raw_targets[0], models[0])\n\n # build regularized distance objective\n dist_loss1 = self._distance_loss(models[1].encode - models[0].encode)\n dist_loss2 = self._distance_loss(models[1].encode - models[2].encode)\n\n # Stitch it all together and train\n self.loss_reco = tf.add_n(reco_losses)\n self.loss_pred = pred_loss_0 + pred_loss_2\n self.loss_dist = dist_loss1 + dist_loss2\n self.losses = [self.loss_reco, self.loss_pred]\n\n def _distance_loss(self, distances):\n error = tf.nn.relu(l2(distances) - FLAGS.distance ** 2)\n return tf.reduce_sum(error)\n\n def _prediction_decode(self, prediction, target, model):\n \"\"\"Predict encoding t3 by encoding (t2 and t1) and expect a good reconstruction\"\"\"\n predict_decode = interpreter.build_decoder(prediction, self.model.config, reuse=True, masks=model.mask_list)\n predict_loss = 1./2 * interpreter.l2_loss(predict_decode, target, alpha=FLAGS.alpha)\n self.models += [predict_decode]\n return predict_loss * FLAGS.gamma\n\n\n def build_denoising_model(self):\n self.build_predictive_model() # builds on top of predictive model. Reuses triplet encoding\n\n # build denoising objective\n models = self.models\n self.loss_dn = self._noisy_decode(models[1])\n self.losses = [self.loss_reco, self.loss_pred, self.loss_dist, self.loss_dn]\n\n def _noisy_decode(self, model):\n \"\"\"Distort middle encoding with [<= 1/3*dist(neigbour)] and demand good reconstruction\"\"\"\n # dist = l2(x1 - x2)\n # noise = dist * self.epsilon_sphere_noise()\n # tf.stop_gradient(noise)\n noise = tf.random_normal(self.model.encode.get_shape().as_list()) * FLAGS.epsilon\n noisy_encoding = noise + self.models[1].encode\n tf.stop_gradient(noisy_encoding) # or maybe here, who knows\n noisy_decode = interpreter.build_decoder(noisy_encoding, model.config, reuse=True, masks=model.mask_list)\n loss = interpreter.l2_loss(noisy_decode, self.raw_targets[1], alpha=FLAGS.beta)\n self.models += [noisy_decode]\n return loss\n\n def _tensor_to_image(self, net):\n with tf.name_scope('to_image'):\n if FLAGS.new_blur:\n net = net[..., :self.batch_shape[-1]]\n net = tf.nn.relu(net)\n net = tf.cast(net <= 1, net.dtype) * net * 255\n net = tf.cast(net, tf.uint8)\n return net\n\n def _image_to_tensor(self, image):\n with tf.name_scope('args_transform'):\n net = tf.cast(image, tf.float32) / 255.\n if FLAGS.new_blur:\n net = _blur_expand(net)\n FLAGS.blur = 0.\n return net\n\n def _init_optimizer(self):\n self.loss_total = tf.add_n(self.losses, 'loss_total')\n self.optimizer = self.optimizer_constructor(learning_rate=FLAGS.learning_rate)\n self._train = self.optimizer.minimize(self.loss_total, global_step=self.step)\n\n\n# MAIN\n\n\n def train(self):\n self.fetch_datasets()\n if FLAGS.model == AUTOENCODER:\n self.build_ae_model()\n elif FLAGS.model == PREDICTIVE:\n self.build_predictive_model()\n else:\n self.build_denoising_model()\n self._init_optimizer()\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n self._on_training_start(sess)\n\n try:\n for current_epoch in range(FLAGS.max_epochs):\n start = time.time()\n full_set_blur = len(self.train_set) < 50000\n ds = self._get_blurred_dataset() if full_set_blur else self.train_set\n if FLAGS.model == AUTOENCODER:\n\n # Autoencoder Training\n for batch in self._batch_generator():\n summs, encoding, reconstruction, loss, _, step = sess.run(\n [self.summs_train, self.encode, self.decode, self.loss_ae, self.train_ae, self.step],\n feed_dict={self.input: batch[0], self.target: batch[1]}\n )\n self._on_batch_finish(summs, loss, batch, encoding, reconstruction)\n\n else:\n\n # Predictive and Denoising training\n for batch_indexes in self._batch_permutation_generator(len(ds)-2):\n batch = np.stack((ds[batch_indexes], ds[batch_indexes + 1], ds[batch_indexes + 2]))\n if not full_set_blur:\n batch = np.stack((\n inp.apply_gaussian(ds[batch_indexes], sigma=self._get_blur_sigma()),\n inp.apply_gaussian(ds[batch_indexes+1], sigma=self._get_blur_sigma()),\n inp.apply_gaussian(ds[batch_indexes+2], sigma=self._get_blur_sigma())\n ))\n\n summs, loss, _ = sess.run(\n [self.summs_train, self.loss_total, self._train],\n feed_dict={self.inputs: batch, self.targets: batch})\n self._on_batch_finish(summs, loss)\n\n self._on_epoch_finish(current_epoch, start, sess)\n self._on_training_finish(sess)\n except KeyboardInterrupt:\n self._on_training_abort(sess)\n\n def inference(self, max=10^6):\n self.fetch_datasets()\n self.build_ae_model()\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n # nut.print_model_info()\n # nut.list_checkpoint_vars(self.get_latest_checkpoint().replace(EMB_SUFFIX, ''))\n\n self.saver = tf.train.Saver()\n self._restore_model(sess)\n # nut.print_model_info()\n\n encoding, decoding = None, None\n for i in range(len(self.train_set)):\n batch = np.expand_dims(self.train_set[i], axis=0)\n enc, dec = sess.run(\n [self.encode, self.decode],\n feed_dict={self.input: batch}\n )\n\n # print(enc.shape, dec.shape)\n encoding = enc if i == 0 else np.vstack((encoding, enc))\n decoding = dec if i == 0 else np.vstack((decoding, dec))\n print('\\r%5d/%d' % (i, len(self.train_set)), end='')\n if i >= max:\n break\n return encoding, decoding\n\n # @ut.timeit\n def evaluate(self, sess, take):\n digest = Bunch(encoded=None, reconstructed=None, source=None,\n loss=.0, eval_loss=.0, dumb_loss=.0)\n blurred = inp.apply_gaussian(self.test_set, self._get_blur_sigma())\n # Encode\n for i, batch in enumerate(self._batch_generator(blurred, shuffle=False)):\n encoding = self.encode.eval(feed_dict={self.input: batch[0]})\n digest.encoded = ut.concatenate(digest.encoded, encoding)\n # Save encoding for visualization\n encoded_no_nan = np.nan_to_num(digest.encoded)\n self.embedding_assign.eval(feed_dict={self.embedding_test_ph: encoded_no_nan})\n try:\n self.embedding_saver.save(sess, self.get_checkpoint_path() + EMB_SUFFIX)\n except:\n ut.print_info(\"Unexpected error: %s\" % str(sys.exc_info()[0]), color=33)\n\n # Calculate expected evaluation\n expected = digest.encoded[1:-1]*2 - digest.encoded[:-2]\n average = 0.5 * (digest.encoded[1:-1] + digest.encoded[:-2])\n digest.size = len(expected)\n # evaluation summaries\n self.summary_writer.add_summary(self.eval_summs.eval(\n feed_dict={self.blur_ph: self._get_blur_sigma()}),\n global_step=self.get_past_epochs())\n # evaluation losses\n for p in self._batch_permutation_generator(digest.size, shuffle=False):\n digest.loss += self.eval_loss.eval(feed_dict={self.encoding: digest.encoded[p + 2], self.target: blurred[p + 2]})\n digest.eval_loss += self.eval_loss.eval(feed_dict={self.encoding: expected[p], self.target: blurred[p + 2]})\n digest.dumb_loss += self.loss_ae.eval( feed_dict={self.input: blurred[p], self.target: blurred[p + 2]})\n\n # for batch in self._batch_generator(blurred, batches=1):\n # digest.source = batch[1][:take]\n # digest.reconstructed = self.decode.eval(feed_dict={self.input: batch[0]})[:take]\n\n # Reconstruction visualizations\n for p in self._batch_permutation_generator(digest.size, shuffle=True, batches=1):\n self.visualization_batch_perm = self.visualization_batch_perm if self.visualization_batch_perm is not None else p\n p = self.visualization_batch_perm\n digest.source = self.eval_decode.eval(feed_dict={self.encoding: expected[p]})[:take]\n digest.source = blurred[(p+2)[:take]]\n digest.reconstructed = self.eval_decode.eval(feed_dict={self.encoding: average[p]})[:take]\n self._eval_image_summaries(blurred[p], digest.encoded[p], average[p], expected[p])\n\n digest.dumb_loss = guard_nan(digest.dumb_loss)\n digest.eval_loss = guard_nan(digest.eval_loss)\n digest.loss = guard_nan(digest.loss)\n return digest\n\n def _eval_image_summaries(self, blurred_batch, actual, average, expected):\n \"\"\"Create Tensorboard summaries with image reconstructions\"\"\"\n noisy = expected + np.random.randn(*expected.shape) * FLAGS.epsilon\n\n summary = self.image_summaries['orig'].eval(feed_dict={self.input: blurred_batch})\n self.summary_writer.add_summary(summary, global_step=self.get_past_epochs())\n\n self._eval_image_summary('midd', average)\n # self._eval_image_summary('reco', actual)\n self._eval_image_summary('pred', expected)\n self._eval_image_summary('nois', noisy)\n\n def _eval_image_summary(self, name, encdoding_batch):\n summary = self.image_summaries[name].eval(feed_dict={self.encoding: encdoding_batch})\n self.summary_writer.add_summary(summary, global_step=self.get_past_epochs())\n\n def _add_decoding_summary(self, name, var, collection='train'):\n var = var[:FLAGS.visualiza_max]\n var = tf.concat(tf.unstack(var), axis=0)\n var = tf.expand_dims(var, dim=0)\n color_s = tf.summary.image(name, var[..., :3], max_outputs=FLAGS.visualiza_max)\n var = tf.expand_dims(var[..., 3], dim=3)\n bw_s = tf.summary.image('depth_' + name, var, max_outputs=FLAGS.visualiza_max)\n return tf.summary.merge([color_s, bw_s])\n\n\n# TRAINING PROGRESS EVENTS\n\n\n def _on_training_start(self, sess):\n # Writers and savers\n self.summary_writer = tf.summary.FileWriter(FLAGS.logdir, sess.graph)\n self.saver = tf.train.Saver()\n self._build_embedding_saver(sess)\n self._restore_model(sess)\n # Loss summaries\n self._build_summaries()\n\n self.epoch_stats = get_stats_template()\n self.stats = Bunch(\n epoch_accuracy=[],\n epoch_reconstructions=[],\n permutation=None\n )\n # if FLAGS.dev:\n # plt.ion()\n # plt.show()\n\n def _build_summaries(self):\n # losses\n with tf.name_scope('losses'):\n loss_names = ['loss_autoencoder', 'loss_predictive', 'loss_distance', 'loss_denoising']\n for i, loss in enumerate(self.losses):\n self._add_loss_summary(loss_names[i], loss)\n self._add_loss_summary('loss_total', self.loss_total)\n self.summs_train = tf.summary.merge_all('train')\n # reconstructions\n with tf.name_scope('decodings'):\n self.image_summaries = {\n 'orig': self._add_decoding_summary('0_original_input', self.input),\n 'reco': self._add_decoding_summary('1_reconstruction', self.eval_decode),\n 'pred': self._add_decoding_summary('2_prediction', self.eval_decode),\n 'midd': self._add_decoding_summary('3_averaged', self.eval_decode),\n 'nois': self._add_decoding_summary('4_noisy', self.eval_decode)\n }\n # visualization\n fig = vis.get_figure()\n fig.canvas.draw()\n self.vis_placeholder = tf.placeholder(tf.uint8, ut.fig2rgb_array(fig).shape)\n self.vis_summary = tf.summary.image('visualization', self.vis_placeholder)\n # embedding\n dists = l2(self.embedding_test[:-1] - self.embedding_test[1:])\n self.dist = dists\n metrics = []\n\n metrics.append(tf.summary.histogram('point_distance', dists))\n metrics.append(tf.summary.scalar('training/trajectory_length', tf.reduce_sum(dists)))\n self.blur_ph = tf.placeholder(dtype=tf.float32)\n metrics.append(tf.summary.scalar('training/blur_sigma', self.blur_ph))\n\n pred = self.embedding_test[1:-1]*2 - self.embedding_test[0:-2]\n pred_error = l2(pred - self.embedding_test[2:])\n\n mean_dist, mean_pred_error = tf.reduce_mean(dists), tf.reduce_mean(pred_error)\n improvement = (mean_dist-mean_pred_error)/mean_dist\n\n pairwise_improvement = tf.nn.relu(dists[1:] - pred_error)\n pairwise_improvement_bool = tf.cast(pairwise_improvement > 0, pairwise_improvement.dtype)\n self.pairwise_improvement_bool = pairwise_improvement_bool\n\n metrics.append(tf.summary.scalar('training/avg_dist', mean_dist))\n metrics.append(tf.summary.scalar('training/pred_dist', mean_pred_error))\n metrics.append(tf.summary.scalar('training/improvement', improvement))\n metrics.append(tf.summary.scalar('training/improvement_abs', tf.nn.relu(improvement)))\n metrics.append(tf.summary.histogram('training/improvement_abs_hist', nut.nan_to_zero(improvement)))\n metrics.append(tf.summary.scalar('training/improvement_pairwise', tf.reduce_mean(pairwise_improvement_bool)))\n metrics.append(tf.summary.histogram('training/improvement_pairwise_hist', pairwise_improvement_bool))\n self.eval_summs = tf.summary.merge(metrics)\n\n\n def _build_embedding_saver(self, sess):\n \"\"\"To use embedding visualizer data has to be stored in variable\n since we would like to visualize TEST_SET, this variable should not affect\n common checkpoint of the model.\n Hence, we build a separate variable with a separate saver.\"\"\"\n embedding_shape = [int(len(self.test_set) / FLAGS.batch_size) * FLAGS.batch_size,\n self.encode.get_shape().as_list()[1]]\n tsv_path = os.path.join(FLAGS.logdir, 'metadata.tsv')\n\n self.embedding_test_ph = tf.placeholder(tf.float32, embedding_shape, name='embedding')\n self.embedding_test = tf.Variable(tf.random_normal(embedding_shape), name='test_embedding', trainable=False)\n self.embedding_assign = self.embedding_test.assign(self.embedding_test_ph)\n self.embedding_saver = tf.train.Saver(var_list=[self.embedding_test])\n\n config = projector.ProjectorConfig()\n embedding = config.embeddings.add()\n embedding.tensor_name = self.embedding_test.name\n embedding.sprite.image_path = './sprite.png'\n embedding.sprite.single_image_dim.extend([80, 80])\n embedding.metadata_path = './metadata.tsv'\n projector.visualize_embeddings(self.summary_writer, config)\n sess.run(tf.variables_initializer([self.embedding_test], name='init_embeddings'))\n\n # build sprite image\n ut.images_to_sprite(self.test_set, path=os.path.join(FLAGS.logdir, 'sprite.png'))\n ut.generate_tsv(len(self.test_set), tsv_path)\n\n def _add_loss_summary(self, name, var, collection='train'):\n if var is not None:\n tf.summary.scalar(name, var, [collection])\n tf.summary.scalar('log_' + name, tf.log(var), [collection])\n\n def _restore_model(self, session):\n latest_checkpoint = self.get_latest_checkpoint()\n print(latest_checkpoint)\n if latest_checkpoint is not None:\n latest_checkpoint = latest_checkpoint.replace(EMB_SUFFIX, '')\n ut.print_info(\"latest checkpoint: %s\" % latest_checkpoint)\n if FLAGS.load_state and latest_checkpoint is not None:\n self.saver.restore(session, latest_checkpoint)\n ut.print_info('Restored requested. Previous epoch: %d' % self.get_past_epochs(), color=31)\n\n def _on_batch_finish(self, summs, loss, batch=None, encoding=None, reconstruction=None):\n self.summary_writer.add_summary(summs, global_step=self.step.eval())\n self.epoch_stats.total_loss += loss\n\n if False:\n assert batch is not None and reconstruction is not None\n original = batch[0]\n vis.plot_reconstruction(original, reconstruction, interactive=True)\n\n # @ut.timeit\n def _on_epoch_finish(self, epoch, start_time, sess):\n elapsed = time.time() - start_time\n self.epoch_stats.total_loss = guard_nan(self.epoch_stats.total_loss)\n accuracy = np.nan_to_num(100000 * np.sqrt(self.epoch_stats.total_loss / np.prod(self.batch_shape) / self.epoch_size))\n # SAVE\n if is_stopping_point(epoch, FLAGS.max_epochs, FLAGS.save_every):\n self.saver.save(sess, self.get_checkpoint_path())\n # VISUALIZE\n if is_stopping_point(epoch, FLAGS.max_epochs, FLAGS.eval_every):\n evaluation = self.evaluate(sess, take=FLAGS.visualiza_max)\n data = {\n 'enc': np.asarray(evaluation.encoded),\n 'rec': np.asarray(evaluation.reconstructed),\n 'blu': np.asarray(evaluation.source)\n }\n error_info = '%d(%d.%d.%d)' % (np.nan_to_num(accuracy),\n np.nan_to_num(evaluation.loss)/evaluation.size,\n np.nan_to_num(evaluation.eval_loss)/evaluation.size,\n np.nan_to_num(evaluation.dumb_loss)/evaluation.size)\n meta = Bunch(suf='encodings', e='%06d' % int(self.get_past_epochs()), er=error_info)\n # print(data, meta.to_file_name(folder=FLAGS.save_path))\n np.save(meta.to_file_name(folder=FLAGS.save_path), data)\n vis.plot_encoding_crosssection(\n evaluation.encoded,\n meta.to_file_name(FLAGS.save_path, 'jpg'),\n evaluation.source,\n evaluation.reconstructed,\n interactive=FLAGS.dev)\n self._save_visualization_to_summary()\n self.stats.epoch_accuracy.append(accuracy)\n self._print_epoch_info(accuracy, epoch, FLAGS.max_epochs, elapsed)\n if epoch + 1 != FLAGS.max_epochs:\n self.epoch_stats = get_stats_template()\n\n def _save_visualization_to_summary(self):\n image = ut.fig2rgb_array(plt.figure(num=0))\n self.summary_writer.add_summary(self.vis_summary.eval(feed_dict={self.vis_placeholder: image}))\n\n def _print_epoch_info(self, accuracy, current_epoch, epochs, elapsed):\n epochs_past = self.get_past_epochs() - current_epoch\n accuracy_info = '' if accuracy is None else '| accuracy %d' % int(accuracy)\n epoch_past_info = '' if epochs_past is None else '+%d' % (epochs_past - 1)\n epoch_count = 'Epochs %2d/%d%s' % (current_epoch + 1, epochs, epoch_past_info)\n time_info = '%2dms/bt' % (elapsed / self.epoch_size * 1000)\n\n examples = int(np.floor(len(self.train_set) / FLAGS.batch_size))\n loss_info = 't.loss:%d' % (self.epoch_stats.total_loss * 100 / (examples * np.prod(self.batch_shape[1:])))\n\n info_string = ' '.join([epoch_count, accuracy_info, time_info, loss_info])\n ut.print_time(info_string, same_line=True)\n\n def _on_training_finish(self, sess):\n if FLAGS.max_epochs == 0:\n self._on_epoch_finish(self.get_past_epochs(), time.time(), sess)\n best_acc = np.min(self.stats.epoch_accuracy)\n ut.print_time('Best Quality: %f for %s' % (best_acc, FLAGS.net))\n self.summary_writer.close()\n\n def _on_training_abort(self, sess):\n print('Press ENTER to save the model')\n if getch.getch() == '\\n':\n print('saving')\n self.saver.save(sess, self.get_checkpoint_path())\n\n\nif __name__ == '__main__':\n args = dict([arg.split('=', maxsplit=1) for arg in sys.argv[1:]])\n if len(args) <= 1:\n FLAGS.input_path = '../data/tmp/romb8.5.6.tar.gz'\n FLAGS.test_path = '../data/tmp/romb8.5.6.tar.gz'\n FLAGS.test_max = 2178\n FLAGS.max_epochs = 5\n FLAGS.eval_every = 1\n FLAGS.save_every = 1\n FLAGS.batch_size = 32\n FLAGS.blur = 0.0\n\n # FLAGS.model = 'noise'\n # FLAGS.beta = 1.0\n # FLAGS.epsilon = .000001\n\n model = Autoencoder()\n if FLAGS.model == 'ae':\n FLAGS.model = AUTOENCODER\n elif 'pred' in FLAGS.model:\n print('PREDICTIVE')\n FLAGS.model = PREDICTIVE\n elif 'noi' in FLAGS.model:\n print('DENOISING')\n FLAGS.model = DENOISING\n else:\n print('Do-di-li-doo doo-di-li-don')\n model.train()\n"
] | [
[
"numpy.expand_dims",
"tensorflow.concat",
"numpy.asarray",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.variables_initializer",
"numpy.nan_to_num",
"tensorflow.app.flags.DEFINE_string",
"numpy.random.randn",
"tensorflow.app.flags.DEFINE_boolean",
"tensorflow.add_n",
"tensorflow.summary.scalar",
"tensorflow.Variable",
"tensorflow.summary.image",
"numpy.arange",
"tensorflow.app.flags.DEFINE_integer",
"numpy.stack",
"tensorflow.stop_gradient",
"tensorflow.name_scope",
"tensorflow.Session",
"tensorflow.train.Saver",
"matplotlib.pyplot.figure",
"tensorflow.unstack",
"numpy.min",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.summary.merge_all",
"tensorflow.contrib.tensorboard.plugins.projector.ProjectorConfig",
"tensorflow.summary.merge",
"tensorflow.summary.histogram",
"tensorflow.nn.relu",
"tensorflow.summary.FileWriter",
"tensorflow.reduce_mean",
"tensorflow.expand_dims",
"tensorflow.contrib.tensorboard.plugins.projector.visualize_embeddings",
"numpy.random.permutation",
"tensorflow.log",
"tensorflow.app.flags.DEFINE_float",
"numpy.prod",
"numpy.vstack",
"tensorflow.random_normal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
skurscheid/snakemake | [
"8d17cef39905cab4fb8a6ef068dacb015c449316"
] | [
"snakemake/utils.py"
] | [
"__author__ = \"Johannes Köster\"\n__contributors__ = [\"Per Unneberg\"]\n__copyright__ = \"Copyright 2015, Johannes Köster\"\n__email__ = \"[email protected]\"\n__license__ = \"MIT\"\n\nimport os\nimport json\nimport re\nimport inspect\nimport textwrap\nimport platform\nfrom itertools import chain\nimport collections\nimport multiprocessing\nimport string\nimport shlex\nimport sys\nfrom urllib.parse import urljoin\n\nfrom snakemake.io import regex, Namedlist, Wildcards, _load_configfile\nfrom snakemake.logging import logger\nfrom snakemake.exceptions import WorkflowError\nimport snakemake\n\n\ndef validate(data, schema, set_default=True):\n \"\"\"Validate data with JSON schema at given path.\n\n Args:\n data (object): data to validate. Can be a config dict or a pandas data frame.\n schema (str): Path to JSON schema used for validation. The schema can also be\n in YAML format. If validating a pandas data frame, the schema has to\n describe a row record (i.e., a dict with column names as keys pointing\n to row values). See http://json-schema.org. The path is interpreted\n relative to the Snakefile when this function is called.\n set_default (bool): set default values defined in schema. See\n http://python-jsonschema.readthedocs.io/en/latest/faq/ for more\n information\n \"\"\"\n try:\n import jsonschema\n from jsonschema import validators, RefResolver\n except ImportError:\n raise WorkflowError(\n \"The Python 3 package jsonschema must be installed \"\n \"in order to use the validate directive.\"\n )\n\n if not os.path.isabs(schema):\n frame = inspect.currentframe().f_back\n # if workflow object is not available this has not been started from a workflow\n if \"workflow\" in frame.f_globals:\n workflow = frame.f_globals[\"workflow\"]\n schema = os.path.join(workflow.current_basedir, schema)\n\n schemafile = schema\n schema = _load_configfile(schema, filetype=\"Schema\")\n resolver = RefResolver(\n urljoin(\"file:\", schemafile),\n schema,\n handlers={\"file\": lambda uri: _load_configfile(re.sub(\"^file://\", \"\", uri))},\n )\n\n # Taken from http://python-jsonschema.readthedocs.io/en/latest/faq/\n def extend_with_default(validator_class):\n validate_properties = validator_class.VALIDATORS[\"properties\"]\n\n def set_defaults(validator, properties, instance, schema):\n for property, subschema in properties.items():\n if \"default\" in subschema:\n instance.setdefault(property, subschema[\"default\"])\n\n for error in validate_properties(validator, properties, instance, schema):\n yield error\n\n return validators.extend(validator_class, {\"properties\": set_defaults})\n\n Validator = validators.validator_for(schema)\n if Validator.META_SCHEMA[\"$schema\"] != schema[\"$schema\"]:\n logger.warning(\n \"No validator found for JSON Schema version identifier '{}'\".format(\n schema[\"$schema\"]\n )\n )\n logger.warning(\n \"Defaulting to validator for JSON Schema version '{}'\".format(\n Validator.META_SCHEMA[\"$schema\"]\n )\n )\n logger.warning(\"Note that schema file may not be validated correctly.\")\n DefaultValidator = extend_with_default(Validator)\n\n if not isinstance(data, dict):\n try:\n import pandas as pd\n\n recordlist = []\n if isinstance(data, pd.DataFrame):\n for i, record in enumerate(data.to_dict(\"records\")):\n record = {k: v for k, v in record.items() if not pd.isnull(v)}\n try:\n if set_default:\n DefaultValidator(schema, resolver=resolver).validate(record)\n recordlist.append(record)\n else:\n jsonschema.validate(record, schema, resolver=resolver)\n except jsonschema.exceptions.ValidationError as e:\n raise WorkflowError(\n \"Error validating row {} of data frame.\".format(i), e\n )\n if set_default:\n newdata = pd.DataFrame(recordlist, data.index)\n newcol = ~newdata.columns.isin(data.columns)\n n = len(data.columns)\n for col in newdata.loc[:, newcol].columns:\n data.insert(n, col, newdata.loc[:, col])\n n = n + 1\n return\n except ImportError:\n pass\n raise WorkflowError(\"Unsupported data type for validation.\")\n else:\n try:\n if set_default:\n DefaultValidator(schema, resolver=resolver).validate(data)\n else:\n jsonschema.validate(data, schema, resolver=resolver)\n except jsonschema.exceptions.ValidationError as e:\n raise WorkflowError(\"Error validating config file.\", e)\n\n\ndef simplify_path(path):\n \"\"\"Return a simplified version of the given path.\"\"\"\n relpath = os.path.relpath(path)\n if relpath.startswith(\"../../\"):\n return path\n else:\n return relpath\n\n\ndef linecount(filename):\n \"\"\"Return the number of lines of given file.\n\n Args:\n filename (str): the path to the file\n \"\"\"\n with open(filename) as f:\n return sum(1 for l in f)\n\n\ndef listfiles(pattern, restriction=None, omit_value=None):\n \"\"\"Yield a tuple of existing filepaths for the given pattern.\n\n Wildcard values are yielded as the second tuple item.\n\n Args:\n pattern (str): a filepattern. Wildcards are specified in snakemake syntax, e.g. \"{id}.txt\"\n restriction (dict): restrict to wildcard values given in this dictionary\n omit_value (str): wildcard value to omit\n\n Yields:\n tuple: The next file matching the pattern, and the corresponding wildcards object\n \"\"\"\n pattern = os.path.normpath(pattern)\n first_wildcard = re.search(\"{[^{]\", pattern)\n if first_wildcard:\n dirname = os.path.dirname(pattern[: first_wildcard.start()])\n if not dirname:\n dirname = \".\"\n else:\n dirname = os.path.dirname(pattern)\n pattern = re.compile(regex(pattern))\n for dirpath, dirnames, filenames in os.walk(dirname):\n for f in chain(filenames, dirnames):\n if dirpath != \".\":\n f = os.path.normpath(os.path.join(dirpath, f))\n match = re.match(pattern, f)\n if match:\n wildcards = Namedlist(fromdict=match.groupdict())\n if restriction is not None:\n invalid = any(\n omit_value not in v and v != wildcards[k]\n for k, v in restriction.items()\n )\n if not invalid:\n yield f, wildcards\n else:\n yield f, wildcards\n\n\ndef makedirs(dirnames):\n \"\"\"Recursively create the given directory or directories without\n reporting errors if they are present.\n \"\"\"\n if isinstance(dirnames, str):\n dirnames = [dirnames]\n for dirname in dirnames:\n os.makedirs(dirname, exist_ok=True)\n\n\ndef report(\n text,\n path,\n stylesheet=os.path.join(os.path.dirname(__file__), \"report.css\"),\n defaultenc=\"utf8\",\n template=None,\n metadata=None,\n **files\n):\n \"\"\"Create an HTML report using python docutils.\n\n This is deprecated in favor of the --report flag.\n\n Attention: This function needs Python docutils to be installed for the\n python installation you use with Snakemake.\n\n All keywords not listed below are intepreted as paths to files that shall\n be embedded into the document. They keywords will be available as link\n targets in the text. E.g. append a file as keyword arg via F1=input[0]\n and put a download link in the text like this:\n\n .. code:: python\n\n report('''\n ==============\n Report for ...\n ==============\n\n Some text. A link to an embedded file: F1_.\n\n Further text.\n ''', outputpath, F1=input[0])\n\n Instead of specifying each file as a keyword arg, you can also expand\n the input of your rule if it is completely named, e.g.:\n\n report('''\n Some text...\n ''', outputpath, **input)\n\n Args:\n text (str): The \"restructured text\" as it is expected by python docutils.\n path (str): The path to the desired output file\n stylesheet (str): An optional path to a css file that defines the style of the document. This defaults to <your snakemake install>/report.css. Use the default to get a hint how to create your own.\n defaultenc (str): The encoding that is reported to the browser for embedded text files, defaults to utf8.\n template (str): An optional path to a docutils HTML template.\n metadata (str): E.g. an optional author name or email address.\n\n \"\"\"\n try:\n import snakemake.report\n except ImportError:\n raise WorkflowError(\n \"Python 3 package docutils needs to be installed to use the report function.\"\n )\n snakemake.report.report(\n text,\n path,\n stylesheet=stylesheet,\n defaultenc=defaultenc,\n template=template,\n metadata=metadata,\n **files\n )\n\n\ndef R(code):\n \"\"\"Execute R code.\n\n This is deprecated in favor of the ``script`` directive.\n This function executes the R code given as a string.\n The function requires rpy2 to be installed.\n\n Args:\n code (str): R code to be executed\n \"\"\"\n try:\n import rpy2.robjects as robjects\n except ImportError:\n raise ValueError(\n \"Python 3 package rpy2 needs to be installed to use the R function.\"\n )\n robjects.r(format(textwrap.dedent(code), stepout=2))\n\n\nclass SequenceFormatter(string.Formatter):\n \"\"\"string.Formatter subclass with special behavior for sequences.\n\n This class delegates formatting of individual elements to another\n formatter object. Non-list objects are formatted by calling the\n delegate formatter's \"format_field\" method. List-like objects\n (list, tuple, set, frozenset) are formatted by formatting each\n element of the list according to the specified format spec using\n the delegate formatter and then joining the resulting strings with\n a separator (space by default).\n\n \"\"\"\n\n def __init__(\n self, separator=\" \", element_formatter=string.Formatter(), *args, **kwargs\n ):\n self.separator = separator\n self.element_formatter = element_formatter\n\n def format_element(self, elem, format_spec):\n \"\"\"Format a single element\n\n For sequences, this is called once for each element in a\n sequence. For anything else, it is called on the entire\n object. It is intended to be overridden in subclases.\n\n \"\"\"\n return self.element_formatter.format_field(elem, format_spec)\n\n def format_field(self, value, format_spec):\n if isinstance(value, Wildcards):\n return \",\".join(\n \"{}={}\".format(name, value)\n for name, value in sorted(value.items(), key=lambda item: item[0])\n )\n if isinstance(value, (list, tuple, set, frozenset)):\n return self.separator.join(\n self.format_element(v, format_spec) for v in value\n )\n else:\n return self.format_element(value, format_spec)\n\n\nclass QuotedFormatter(string.Formatter):\n \"\"\"Subclass of string.Formatter that supports quoting.\n\n Using this formatter, any field can be quoted after formatting by\n appending \"q\" to its format string. By default, shell quoting is\n performed using \"shlex.quote\", but you can pass a different\n quote_func to the constructor. The quote_func simply has to take a\n string argument and return a new string representing the quoted\n form of the input string.\n\n Note that if an element after formatting is the empty string, it\n will not be quoted.\n\n \"\"\"\n\n def __init__(self, quote_func=None, *args, **kwargs):\n if quote_func is None:\n quote_func = shlex.quote if not ON_WINDOWS else argvquote\n self.quote_func = quote_func\n super().__init__(*args, **kwargs)\n\n def format_field(self, value, format_spec):\n do_quote = format_spec.endswith(\"q\")\n if do_quote:\n format_spec = format_spec[:-1]\n formatted = super().format_field(value, format_spec)\n if do_quote and formatted != \"\":\n formatted = self.quote_func(formatted)\n return formatted\n\n\nclass AlwaysQuotedFormatter(QuotedFormatter):\n \"\"\"Subclass of QuotedFormatter that always quotes.\n\n Usage is identical to QuotedFormatter, except that it *always*\n acts like \"q\" was appended to the format spec.\n\n \"\"\"\n\n def format_field(self, value, format_spec):\n if not format_spec.endswith(\"q\"):\n format_spec += \"q\"\n return super().format_field(value, format_spec)\n\n\ndef format(_pattern, *args, stepout=1, _quote_all=False, **kwargs):\n \"\"\"Format a pattern in Snakemake style.\n\n This means that keywords embedded in braces are replaced by any variable\n values that are available in the current namespace.\n \"\"\"\n\n frame = inspect.currentframe().f_back\n while stepout > 1:\n if not frame.f_back:\n break\n frame = frame.f_back\n stepout -= 1\n\n variables = dict(frame.f_globals)\n # add local variables from calling rule/function\n variables.update(frame.f_locals)\n if \"self\" in variables and sys.version_info < (3, 5):\n # self is the first arg of fmt.format as well. Not removing it would\n # cause a multiple values error on Python <=3.4.2.\n del variables[\"self\"]\n variables.update(kwargs)\n fmt = SequenceFormatter(separator=\" \")\n if _quote_all:\n fmt.element_formatter = AlwaysQuotedFormatter()\n else:\n fmt.element_formatter = QuotedFormatter()\n try:\n return fmt.format(_pattern, *args, **variables)\n except KeyError as ex:\n raise NameError(\n \"The name {} is unknown in this context. Please \"\n \"make sure that you defined that variable. \"\n \"Also note that braces not used for variable access \"\n \"have to be escaped by repeating them, \"\n \"i.e. {{{{print $1}}}}\".format(str(ex))\n )\n\n\nclass Unformattable:\n def __init__(self, errormsg=\"This cannot be used for formatting\"):\n self.errormsg = errormsg\n\n def __str__(self):\n raise ValueError(self.errormsg)\n\n\ndef read_job_properties(\n jobscript, prefix=\"# properties\", pattern=re.compile(\"# properties = (.*)\")\n):\n \"\"\"Read the job properties defined in a snakemake jobscript.\n\n This function is a helper for writing custom wrappers for the\n snakemake --cluster functionality. Applying this function to a\n jobscript will return a dict containing information about the job.\n \"\"\"\n with open(jobscript) as jobscript:\n for m in map(pattern.match, jobscript):\n if m:\n return json.loads(m.group(1))\n\n\ndef min_version(version):\n \"\"\"Require minimum snakemake version, raise workflow error if not met.\"\"\"\n import pkg_resources\n\n if pkg_resources.parse_version(snakemake.__version__) < pkg_resources.parse_version(\n version\n ):\n raise WorkflowError(\"Expecting Snakemake version {} or higher.\".format(version))\n\n\ndef update_config(config, overwrite_config):\n \"\"\"Recursively update dictionary config with overwrite_config.\n\n See\n http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth\n for details.\n\n Args:\n config (dict): dictionary to update\n overwrite_config (dict): dictionary whose items will overwrite those in config\n\n \"\"\"\n\n def _update(d, u):\n for (key, value) in u.items():\n if isinstance(value, collections.Mapping):\n d[key] = _update(d.get(key, {}), value)\n else:\n d[key] = value\n return d\n\n _update(config, overwrite_config)\n\n\ndef available_cpu_count():\n \"\"\"\n Return the number of available virtual or physical CPUs on this system.\n The number of available CPUs can be smaller than the total number of CPUs\n when the cpuset(7) mechanism is in use, as is the case on some cluster\n systems.\n\n Adapted from http://stackoverflow.com/a/1006301/715090\n \"\"\"\n try:\n with open(\"/proc/self/status\") as f:\n status = f.read()\n m = re.search(r\"(?m)^Cpus_allowed:\\s*(.*)$\", status)\n if m:\n res = bin(int(m.group(1).replace(\",\", \"\"), 16)).count(\"1\")\n if res > 0:\n return min(res, multiprocessing.cpu_count())\n except IOError:\n pass\n\n return multiprocessing.cpu_count()\n\n\ndef argvquote(arg, force=True):\n \"\"\" Returns an argument quoted in such a way that that CommandLineToArgvW\n on Windows will return the argument string unchanged.\n This is the same thing Popen does when supplied with an list of arguments.\n Arguments in a command line should be separated by spaces; this\n function does not add these spaces. This implementation follows the\n suggestions outlined here:\n https://blogs.msdn.microsoft.com/twistylittlepassagesallalike/2011/04/23/everyone-quotes-command-line-arguments-the-wrong-way/\n \"\"\"\n if not force and len(arg) != 0 and not any([c in arg for c in ' \\t\\n\\v\"']):\n return arg\n else:\n n_backslashes = 0\n cmdline = '\"'\n for c in arg:\n if c == \"\\\\\":\n # first count the number of current backslashes\n n_backslashes += 1\n continue\n if c == '\"':\n # Escape all backslashes and the following double quotation mark\n cmdline += (n_backslashes * 2 + 1) * \"\\\\\"\n else:\n # backslashes are not special here\n cmdline += n_backslashes * \"\\\\\"\n n_backslashes = 0\n cmdline += c\n # Escape all backslashes, but let the terminating\n # double quotation mark we add below be interpreted\n # as a metacharacter\n cmdline += +n_backslashes * 2 * \"\\\\\" + '\"'\n return cmdline\n\n\nON_WINDOWS = platform.system() == \"Windows\"\n"
] | [
[
"pandas.isnull",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
kifumi/aqua | [
"30bb8dc865d1f3e1a670369b1f72f04ff14196a1"
] | [
"qiskit_aqua/algorithms/many_sample/qsvm/svm_qkernel_binary.py"
] | [
"# -*- coding: utf-8 -*-\n\n# Copyright 2018 IBM.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\nimport numpy as np\n\nimport logging\n\nfrom qiskit_aqua.algorithms.many_sample.qsvm import SVM_QKernel_ABC\nfrom qiskit_aqua.algorithms.many_sample.qsvm import (get_points_and_labels, optimize_SVM, kernel_join)\n\nlogger = logging.getLogger(__name__)\n\nclass SVM_QKernel_Binary(SVM_QKernel_ABC):\n \"\"\"\n the binary classifier\n \"\"\"\n\n def __init__(self):\n self.ret = {}\n\n def train(self, training_input, class_labels):\n \"\"\"\n train the svm\n Args:\n training_input (dict): dictionary which maps each class to the points in the class\n class_labels (list): list of classes. For example: ['A', 'B']\n \"\"\"\n training_points, training_points_labels, label_to_class = get_points_and_labels(training_input, class_labels)\n\n kernel_matrix = kernel_join(training_points, training_points, self.entangler_map,\n self.coupling_map, self.initial_layout, self.shots,\n self._random_seed, self.num_of_qubits, self._backend)\n\n self.ret['kernel_matrix_training'] = kernel_matrix\n\n [alpha, b, support] = optimize_SVM(kernel_matrix, training_points_labels)\n alphas = np.array([])\n SVMs = np.array([])\n yin = np.array([])\n for alphindex in range(len(support)):\n if support[alphindex]:\n alphas = np.vstack([alphas, alpha[alphindex]]) if alphas.size else alpha[alphindex]\n SVMs = np.vstack([SVMs, training_points[alphindex]]) if SVMs.size else training_points[alphindex]\n yin = np.vstack([yin, training_points_labels[alphindex]]\n ) if yin.size else training_points_labels[alphindex]\n\n self.ret['svm'] = {}\n self.ret['svm']['alphas'] = alphas\n self.ret['svm']['bias'] = b\n self.ret['svm']['support_vectors'] = SVMs\n self.ret['svm']['yin'] = yin\n\n def test(self, test_input, class_labels):\n \"\"\"\n test the svm\n Args:\n test_input (dict): dictionary which maps each class to the points in the class\n class_labels (list): list of classes. For example: ['A', 'B']\n \"\"\"\n\n test_points, test_points_labels, label_to_labelclass = get_points_and_labels(test_input, class_labels)\n\n alphas = self.ret['svm']['alphas']\n bias = self.ret['svm']['bias']\n SVMs = self.ret['svm']['support_vectors']\n yin = self.ret['svm']['yin']\n\n kernel_matrix = kernel_join(test_points, SVMs, self.entangler_map, self.coupling_map,\n self.initial_layout, self.shots, self._random_seed,\n self.num_of_qubits, self._backend)\n\n self.ret['kernel_matrix_testing'] = kernel_matrix\n\n success_ratio = 0\n L = 0\n total_num_points = len(test_points)\n Lsign = np.zeros(total_num_points)\n for tin in range(total_num_points):\n Ltot = 0\n for sin in range(len(SVMs)):\n L = yin[sin] * alphas[sin] * kernel_matrix[tin][sin]\n Ltot += L\n\n Lsign[tin] = np.sign(Ltot + bias)\n\n\n\n logger.debug(\"\\n=============================================\")\n logger.debug('classifying' + str(test_points[tin]))\n logger.debug('Label should be ' + str(label_to_labelclass[np.int(test_points_labels[tin])]))\n logger.debug('Predicted label is ' + str(label_to_labelclass[np.int(Lsign[tin])]))\n if np.int(test_points_labels[tin]) == np.int(Lsign[tin]):\n logger.debug('CORRECT')\n else:\n logger.debug('INCORRECT')\n\n if Lsign[tin] == test_points_labels[tin]:\n success_ratio += 1\n final_success_ratio = success_ratio / total_num_points\n\n logger.debug('Classification success for this set is %s %% \\n' % (100 * final_success_ratio))\n return final_success_ratio\n\n def predict(self, test_points):\n \"\"\"\n predict using the svm\n Args:\n test_points (numpy.ndarray): the points\n \"\"\"\n alphas = self.ret['svm']['alphas']\n bias = self.ret['svm']['bias']\n SVMs = self.ret['svm']['support_vectors']\n yin = self.ret['svm']['yin']\n\n kernel_matrix = kernel_join(test_points, SVMs, self.entangler_map, self.coupling_map,\n self.initial_layout, self.shots, self._random_seed,\n self.num_of_qubits, self._backend)\n\n self.ret['kernel_matrix_prediction'] = kernel_matrix\n\n total_num_points = len(test_points)\n Lsign = np.zeros(total_num_points)\n for tin in range(total_num_points):\n Ltot = 0\n for sin in range(len(SVMs)):\n L = yin[sin] * alphas[sin] * kernel_matrix[tin][sin]\n Ltot += L\n Lsign[tin] = np.int(np.sign(Ltot + bias))\n return Lsign\n\n def run(self):\n \"\"\"\n put the train, test, predict together\n \"\"\"\n if self.training_dataset is None:\n self.ret['error'] = 'training dataset is missing! please provide it'\n return self.ret\n\n num_of_qubits = self.auto_detect_qubitnum(self.training_dataset) # auto-detect mode\n if num_of_qubits == -1:\n self.ret['error'] = 'Something wrong with the auto-detection of num_of_qubits'\n return self.ret\n if num_of_qubits != 2 and num_of_qubits != 3:\n self.ret['error'] = 'You should lower the feature size to 2 or 3 using PCA first!'\n return self.ret\n self.train(self.training_dataset, self.class_labels)\n if self.test_dataset is not None:\n success_ratio = self.test(self.test_dataset, self.class_labels)\n self.ret['test_success_ratio'] = success_ratio\n if self.datapoints is not None:\n predicted_labels = self.predict(self.datapoints)\n _, _, label_to_class = get_points_and_labels(self.training_dataset, self.class_labels)\n predicted_labelclasses = [label_to_class[x] for x in predicted_labels]\n self.ret['predicted_labels'] = predicted_labelclasses\n\n return self.ret\n"
] | [
[
"numpy.sign",
"numpy.int",
"numpy.array",
"numpy.zeros",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
anokun7/AlexNet | [
"9fd64fc6d52e6e75231a8b069506da261dee071e"
] | [
"examples/imagenet/main.py"
] | [
"# Copyright 2020 Lorna Authors. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"\nEvaluate on ImageNet. Note that at the moment, training is not implemented\nthat being said, evaluation is working.\n\"\"\"\nimport argparse\nimport hashlib\nimport os\nimport random\nimport shutil\nimport time\nimport warnings\n\nimport PIL.Image\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.optim\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport torchvision.datasets as datasets\nimport torchvision.models as models\nimport torchvision.transforms as transforms\n\nfrom alexnet_pytorch import AlexNet\n\nmixed_precision = True\ntry:\n from apex import amp\nexcept:\n mixed_precision = False\n warnings.warn(\"Warning: Apex tool not install.\")\n\nparser = argparse.ArgumentParser(description='PyTorch ImageNet Training')\nparser.add_argument('data', metavar='DIR', default='data',\n help='path to dataset')\nparser.add_argument('-a', '--arch', metavar='ARCH', default='alexnet',\n help='model architecture (default: alexnet)')\nparser.add_argument('-j', '--workers', default=4, type=int, metavar='N',\n help='number of data loading workers (default: 4)')\nparser.add_argument('--epochs', default=90, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('-b', '--batch-size', default=128, type=int,\n metavar='N',\n help='mini-batch size (default: 128), this is the total '\n 'batch size of all GPUs on the current node when '\n 'using Data Parallel or Distributed Data Parallel')\nparser.add_argument('--lr', '--learning-rate', default=0.01, type=float,\n metavar='LR', help='initial learning rate', dest='lr')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\nparser.add_argument('--wd', '--weight-decay', default=5e-4, type=float,\n metavar='W', help='weight decay (default: 1e-4)',\n dest='weight_decay')\nparser.add_argument('-p', '--print-freq', default=10, type=int,\n metavar='N', help='print frequency (default: 10)')\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('--opt_level', default=\"O1\", type=str,\n help=\"Choose which accuracy to train. (default: 'O1')\")\nparser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',\n help='evaluate model on validation set')\nparser.add_argument('--pretrained', dest='pretrained', action='store_true',\n help='use pre-trained model')\nparser.add_argument('--world-size', default=-1, type=int,\n help='number of nodes for distributed training')\nparser.add_argument('--rank', default=-1, type=int,\n help='node rank for distributed training')\nparser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,\n help='url used to set up distributed training')\nparser.add_argument('--dist-backend', default='nccl', type=str,\n help='distributed backend')\nparser.add_argument('--seed', default=None, type=int,\n help='seed for initializing training. ')\nparser.add_argument('--gpu', default=0, type=int,\n help='GPU id to use.')\nparser.add_argument('--image_size', default=224, type=int,\n help='image size')\nparser.add_argument('--num_classes', type=int, default=1000,\n help=\"number of dataset category.\")\nparser.add_argument('--multiprocessing-distributed', action='store_true',\n help='Use multi-processing distributed training to launch '\n 'N processes per node, which has N GPUs. This is the '\n 'fastest way to use PyTorch for either single node or '\n 'multi node data parallel training')\nbest_acc1 = 0\n\n\ndef main():\n args = parser.parse_args()\n\n if args.seed is not None:\n random.seed(args.seed)\n torch.manual_seed(args.seed)\n cudnn.deterministic = True\n warnings.warn('You have chosen to seed training. '\n 'This will turn on the CUDNN deterministic setting, '\n 'which can slow down your training considerably! '\n 'You may see unexpected behavior when restarting '\n 'from checkpoints.')\n\n if args.gpu is not None:\n warnings.warn('You have chosen a specific GPU. This will completely '\n 'disable data parallelism.')\n\n if args.dist_url == \"env://\" and args.world_size == -1:\n args.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n args.distributed = args.world_size > 1 or args.multiprocessing_distributed\n\n ngpus_per_node = torch.cuda.device_count()\n if args.multiprocessing_distributed:\n # Since we have ngpus_per_node processes per node, the total world_size\n # needs to be adjusted accordingly\n args.world_size = ngpus_per_node * args.world_size\n # Use torch.multiprocessing.spawn to launch distributed processes: the\n # main_worker process function\n mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))\n else:\n # Simply call main_worker function\n main_worker(args.gpu, ngpus_per_node, args)\n\n\ndef main_worker(gpu, ngpus_per_node, args):\n global best_acc1\n args.gpu = gpu\n\n if args.gpu is not None:\n print(f\"Use GPU: {args.gpu} for training!\")\n\n if args.distributed:\n if args.dist_url == \"env://\" and args.rank == -1:\n args.rank = int(os.environ[\"RANK\"])\n if args.multiprocessing_distributed:\n # For multiprocessing distributed training, rank needs to be the\n # global rank among all the processes\n args.rank = args.rank * ngpus_per_node + gpu\n dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,\n world_size=args.world_size, rank=args.rank)\n # create model\n if 'alexnet' in args.arch:\n if args.pretrained:\n model = AlexNet.from_pretrained(args.arch, args.num_classes)\n print(f\"=> using pre-trained model '{args.arch}'\")\n else:\n print(f\"=> creating model '{args.arch}'\")\n model = AlexNet.from_name(args.arch)\n else:\n if args.pretrained:\n print(f\"=> using pre-trained model '{args.arch}'\")\n model = models.__dict__[args.arch](pretrained=True)\n else:\n print(f\"=> creating model '{args.arch}'\")\n model = models.__dict__[args.arch]()\n\n if args.distributed:\n # For multiprocessing distributed, DistributedDataParallel constructor\n # should always set the single device scope, otherwise,\n # DistributedDataParallel will use all available devices.\n if args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n model.cuda(args.gpu)\n # When using a single GPU per process and per\n # DistributedDataParallel, we need to divide the batch size\n # ourselves based on the total number of GPUs we have\n args.batch_size = int(args.batch_size / ngpus_per_node)\n args.workers = int(args.workers / ngpus_per_node)\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[args.gpu])\n else:\n model.cuda()\n # DistributedDataParallel will divide and allocate batch_size to all\n # available GPUs if device_ids are not set\n model = torch.nn.parallel.DistributedDataParallel(model)\n elif args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n model = model.cuda(args.gpu)\n else:\n # DataParallel will divide and allocate batch_size to all available\n # GPUs\n if args.arch.startswith('alexnet'):\n model.features = torch.nn.DataParallel(model.features)\n model.cuda()\n else:\n model = torch.nn.DataParallel(model).cuda()\n\n # define loss function (criterion) and optimizer\n criterion = nn.CrossEntropyLoss().cuda(args.gpu)\n\n optimizer = torch.optim.SGD(model.parameters(), args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n\n if mixed_precision:\n model, optimizer = amp.initialize(model, optimizer, opt_level=args.opt_level)\n\n # optionally resume from a checkpoint\n if args.resume:\n if os.path.isfile(args.resume):\n print(f\"=> loading checkpoint '{args.resume}'\")\n checkpoint = torch.load(args.resume)\n args.start_epoch = checkpoint['epoch']\n best_acc1 = checkpoint['best_acc1']\n if args.gpu is not None:\n # best_acc1 may be from a checkpoint from a different GPU\n best_acc1 = best_acc1.to(args.gpu)\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n amp.load_state_dict(checkpoint['amp'])\n print(f\"=> loaded checkpoint '{args.resume}' (epoch {checkpoint['epoch']})\")\n else:\n print(f\"=> no checkpoint found at '{args.resume}'\")\n\n cudnn.benchmark = True\n\n # Data loading code\n traindir = os.path.join(args.data, 'train')\n valdir = os.path.join(args.data, 'val')\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n train_dataset = datasets.ImageFolder(\n traindir,\n transform=transforms.Compose([\n transforms.RandomResizedCrop(args.image_size),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ]))\n\n if args.distributed:\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\n else:\n train_sampler = None\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),\n num_workers=args.workers, pin_memory=True, sampler=train_sampler)\n\n if 'alexnet' in args.arch:\n val_transforms = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n normalize,\n ])\n print('Using image size', 224)\n else:\n image_size = AlexNet.get_image_size(args.arch)\n val_transforms = transforms.Compose([\n transforms.Resize(image_size, interpolation=PIL.Image.BICUBIC),\n transforms.CenterCrop(image_size),\n transforms.ToTensor(),\n normalize,\n ])\n print('Using image size', image_size)\n\n val_loader = torch.utils.data.DataLoader(\n datasets.ImageFolder(valdir, val_transforms),\n batch_size=args.batch_size, shuffle=False,\n num_workers=args.workers, pin_memory=True)\n\n if args.evaluate:\n top1 = validate(val_loader, model, criterion, args)\n with open('res.txt', 'w') as f:\n print(f\"Acc@1: {top1}\", file=f)\n return\n\n for epoch in range(args.start_epoch, args.epochs):\n if args.distributed:\n train_sampler.set_epoch(epoch)\n adjust_learning_rate(optimizer, epoch, args)\n\n # train for one epoch\n train(train_loader, model, criterion, optimizer, epoch, args)\n\n # evaluate on validation set\n acc1 = validate(val_loader, model, criterion, args)\n\n # remember best acc@1 and save checkpoint\n is_best = acc1 > best_acc1\n best_acc1 = max(acc1, best_acc1)\n\n if not args.multiprocessing_distributed or (args.multiprocessing_distributed\n and args.rank % ngpus_per_node == 0):\n save_checkpoint({\n 'epoch': epoch + 1,\n 'arch': args.arch,\n 'state_dict': model.state_dict(),\n 'best_acc1': best_acc1,\n 'optimizer': optimizer.state_dict(),\n 'amp': amp.state_dict(),\n }, is_best)\n\n\ndef train(train_loader, model, criterion, optimizer, epoch, args):\n batch_time = AverageMeter('Time', ':6.3f')\n data_time = AverageMeter('Data', ':6.3f')\n losses = AverageMeter('Loss', ':4.4f')\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n progress = ProgressMeter(len(train_loader), batch_time, data_time, losses, top1,\n top5, prefix=\"Epoch: [{}]\".format(epoch))\n\n # switch to train mode\n model.train()\n\n end = time.time()\n for i, (images, target) in enumerate(train_loader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n if args.gpu is not None:\n images = images.cuda(args.gpu, non_blocking=True)\n target = target.cuda(args.gpu, non_blocking=True)\n\n # compute output\n output = model(images)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), images.size(0))\n top1.update(acc1[0], images.size(0))\n top5.update(acc5[0], images.size(0))\n\n # compute gradient and do Adam step\n optimizer.zero_grad()\n if mixed_precision:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n progress.print(i)\n\n\ndef validate(val_loader, model, criterion, args):\n batch_time = AverageMeter('Time', ':6.3f')\n losses = AverageMeter('Loss', ':4.4f')\n top1 = AverageMeter('Acc@1', ':6.2f')\n top5 = AverageMeter('Acc@5', ':6.2f')\n progress = ProgressMeter(len(val_loader), batch_time, losses, top1, top5,\n prefix='Test: ')\n\n # switch to evaluate mode\n model.eval()\n\n with torch.no_grad():\n end = time.time()\n for i, (images, target) in enumerate(val_loader):\n if args.gpu is not None:\n images = images.cuda(args.gpu, non_blocking=True)\n target = target.cuda(args.gpu, non_blocking=True)\n\n # compute output\n output = model(images)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), images.size(0))\n top1.update(acc1[0], images.size(0))\n top5.update(acc5[0], images.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n progress.print(i)\n\n print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'\n .format(top1=top1, top5=top5))\n\n return top1.avg\n\n\ndef save_checkpoint(state, is_best, filename='checkpoint.pth'):\n torch.save(state, filename)\n if is_best:\n shutil.copyfile(filename, \"model_best.pth\")\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self, name, fmt=':f'):\n self.count = 0\n self.sum = 0\n self.avg = 0\n self.val = 0\n self.name = name\n self.fmt = fmt\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def __str__(self):\n fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'\n return fmtstr.format(**self.__dict__)\n\n\nclass ProgressMeter(object):\n def __init__(self, num_batches, *meters, prefix=\"\"):\n self.batch_fmtstr = self._get_batch_fmtstr(num_batches)\n self.meters = meters\n self.prefix = prefix\n\n def print(self, batch):\n entries = [self.prefix + self.batch_fmtstr.format(batch)]\n entries += [str(meter) for meter in self.meters]\n print('\\t'.join(entries))\n\n @staticmethod\n def _get_batch_fmtstr(num_batches):\n num_digits = len(str(num_batches // 1))\n fmt = '{:' + str(num_digits) + 'd}'\n return '[' + fmt + '/' + fmt.format(num_batches) + ']'\n\n\ndef adjust_learning_rate(optimizer, epoch, args):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\n lr = args.lr * (0.1 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the accuracy over the k top predictions for the specified values of k\"\"\"\n with torch.no_grad():\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n result = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n result.append(correct_k.mul_(100.0 / batch_size))\n return result\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"torch.nn.CrossEntropyLoss",
"torch.distributed.init_process_group",
"torch.multiprocessing.spawn",
"torch.utils.data.distributed.DistributedSampler",
"torch.cuda.set_device",
"torch.manual_seed",
"torch.load",
"torch.utils.data.DataLoader",
"torch.nn.DataParallel",
"torch.no_grad",
"torch.cuda.device_count",
"torch.nn.parallel.DistributedDataParallel",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JamesDarcy616/wsidicomizer | [
"c38c062a2b1c31ce30be00ef71c69e34826c8f96"
] | [
"wsidicomizer/openslide.py"
] | [
"# Copyright 2021 SECTRA AB\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport math\nimport os\nfrom abc import ABCMeta\nfrom ctypes import c_uint32\nfrom pathlib import Path\nfrom typing import List, Optional, Sequence, Union\n\nimport numpy as np\nfrom PIL import Image\nfrom pydicom import Dataset\nfrom pydicom.uid import UID as Uid\nfrom wsidicom import (WsiDicom, WsiDicomLabels, WsiDicomLevels,\n WsiDicomOverviews)\nfrom wsidicom.errors import WsiDicomNotFoundError\nfrom wsidicom.geometry import Point, Region, Size, SizeMm\nfrom wsidicom.wsidicom import WsiDicom\n\nfrom wsidicomizer.common import MetaDicomizer, MetaImageData\nfrom wsidicomizer.dataset import create_base_dataset\nfrom wsidicomizer.encoding import Encoder, create_encoder\n\nif os.name == 'nt': # On windows, add path to openslide to dll path\n try:\n openslide_dir = os.environ['OPENSLIDE']\n except KeyError:\n raise ValueError(\n \"Enviroment variable 'OPENSLIDE'\"\n \"needs to be set to OpenSlide bin path\"\n )\n try:\n os.add_dll_directory(openslide_dir)\n except AttributeError:\n os.environ['PATH'] = (\n openslide_dir + os.pathsep + os.environ['PATH']\n )\n\"\"\"\nOpenSlideImageData uses proteted functions from OpenSlide to get image data as\nnumpy arrays instead of pillow images. The proteted function _read_region is\nused to get raw data from the OpenSlide C API. We consider this safe, as these\ndirectly map to the Openslide C API and are thus not likely to change.\n\"\"\"\n\nfrom openslide import OpenSlide\nfrom openslide._convert import argb2rgba as convert_argb_to_rgba\nfrom openslide.lowlevel import (ArgumentError, _read_region,\n get_associated_image_names)\n\n\nclass OpenSlideImageData(MetaImageData, metaclass=ABCMeta):\n def __init__(\n self,\n open_slide: OpenSlide,\n encoder: Encoder\n ):\n \"\"\"Wraps a OpenSlide image to ImageData.\n\n Parameters\n ----------\n open_slide: OpenSlide\n OpenSlide object to wrap.\n encoded: Encoder\n Encoder to use.\n \"\"\"\n super().__init__(encoder)\n self._slide = open_slide\n\n @property\n def files(self) -> List[Path]:\n return [Path(self._slide._filename)]\n\n @property\n def transfer_syntax(self) -> Uid:\n \"\"\"The uid of the transfer syntax of the image.\"\"\"\n return self._encoder.transfer_syntax\n\n @property\n def focal_planes(self) -> List[float]:\n return [0.0]\n\n @property\n def optical_paths(self) -> List[str]:\n return ['0']\n\n def close(self) -> None:\n \"\"\"Close the open slide object, if not already closed.\"\"\"\n try:\n self._slide.close()\n except ArgumentError:\n # Slide already closed\n pass\n\n\nclass OpenSlideAssociatedImageData(OpenSlideImageData):\n def __init__(\n self,\n open_slide: OpenSlide,\n image_type: str,\n encoder: Encoder\n ):\n \"\"\"Wraps a OpenSlide associated image (label or overview) to ImageData.\n\n Parameters\n ----------\n open_slide: OpenSlide\n OpenSlide object to wrap.\n image_type: str\n Type of image to wrap.\n encoded: Encoder\n Encoder to use.\n \"\"\"\n super().__init__(open_slide, encoder)\n self._image_type = image_type\n if image_type not in get_associated_image_names(self._slide._osr):\n raise ValueError(f\"{image_type} not in {self._slide}\")\n\n image = self._slide.associated_images[image_type]\n no_alpha = Image.new('RGB', image.size, self.blank_color)\n no_alpha.paste(image, mask=image.split()[3])\n self._image_size = Size.from_tuple(no_alpha.size)\n self._decoded_image = no_alpha\n self._encoded_image = self._encode(np.asarray(no_alpha))\n\n @property\n def image_size(self) -> Size:\n \"\"\"The pixel size of the image.\"\"\"\n return self._image_size\n\n @property\n def tile_size(self) -> Size:\n \"\"\"The pixel tile size of the image.\"\"\"\n return self.image_size\n\n @property\n def pixel_spacing(self) -> SizeMm:\n \"\"\"Size of the pixels in mm/pixel.\"\"\"\n # TODO figure out pixel spacing for label and overview in openslide.\n return SizeMm(1, 1)\n\n @property\n def pyramid_index(self) -> int:\n \"\"\"The pyramidal index in relation to the base layer.\"\"\"\n return 0\n\n def _get_encoded_tile(\n self,\n tile: Point,\n z: float,\n path: str\n ) -> bytes:\n if tile != Point(0, 0):\n raise ValueError(\"Point(0, 0) only valid tile for non-tiled image\")\n return self._encoded_image\n\n def _get_decoded_tile(\n self,\n tile: Point,\n z: float,\n path: str\n ) -> Image.Image:\n if tile != Point(0, 0):\n raise ValueError(\"Point(0, 0) only valid tile for non-tiled image\")\n return self._decoded_image\n\n\nclass OpenSlideLevelImageData(OpenSlideImageData):\n def __init__(\n self,\n open_slide: OpenSlide,\n level_index: int,\n tile_size: int,\n encoder: Encoder\n ):\n super().__init__(open_slide, encoder)\n \"\"\"Wraps a OpenSlide level to ImageData.\n\n Parameters\n ----------\n open_slide: OpenSlide\n OpenSlide object to wrap.\n level_index: int\n Level in OpenSlide object to wrap\n tile_size: int\n Output tile size.\n encoded: Encoder\n Encoder to use.\n \"\"\"\n self._tile_size = Size(tile_size, tile_size)\n self._slide = open_slide\n self._level_index = level_index\n self._image_size = Size.from_tuple(\n self._slide.level_dimensions[self._level_index]\n )\n self._downsample = int(\n self._slide.level_downsamples[self._level_index]\n )\n self._pyramid_index = int(math.log2(self.downsample))\n\n base_mpp_x = float(self._slide.properties['openslide.mpp-x'])\n base_mpp_y = float(self._slide.properties['openslide.mpp-y'])\n self._pixel_spacing = SizeMm(\n base_mpp_x * self.downsample / 1000.0,\n base_mpp_y * self.downsample / 1000.0\n )\n\n # Get set image origin and size to bounds if available\n bounds_x = self._slide.properties.get('openslide.bounds-x', 0)\n bounds_y = self._slide.properties.get('openslide.bounds-y', 0)\n bounds_w = self._slide.properties.get('openslide.bounds-width', None)\n bounds_h = self._slide.properties.get('openslide.bounds-height', None)\n self._offset = Point(int(bounds_x), int(bounds_y))\n if None not in [bounds_w, bounds_h]:\n self._image_size = (\n Size(int(bounds_w), int(bounds_h)) // self.downsample\n )\n else:\n self._image_size = Size.from_tuple(\n self._slide.level_dimensions[self._level_index]\n )\n\n self._blank_encoded_frame = bytes()\n self._blank_encoded_frame_size = None\n self._blank_decoded_frame = None\n self._blank_decoded_frame_size = None\n\n @property\n def image_size(self) -> Size:\n \"\"\"The pixel size of the image.\"\"\"\n return self._image_size\n\n @property\n def tile_size(self) -> Size:\n \"\"\"The pixel tile size of the image.\"\"\"\n return self._tile_size\n\n @property\n def pixel_spacing(self) -> SizeMm:\n \"\"\"Size of the pixels in mm/pixel.\"\"\"\n return self._pixel_spacing\n\n @property\n def downsample(self) -> int:\n \"\"\"Downsample facator for level.\"\"\"\n return self._downsample\n\n @property\n def pyramid_index(self) -> int:\n \"\"\"The pyramidal index in relation to the base layer.\"\"\"\n return self._pyramid_index\n\n def stitch_tiles(\n self,\n region: Region,\n path: str,\n z: float\n ) -> Image.Image:\n \"\"\"Overrides ImageData stitch_tiles() to read reagion directly from\n openslide object.\n\n Parameters\n ----------\n region: Region\n Pixel region to stitch to image\n path: str\n Optical path\n z: float\n Z coordinate\n\n Returns\n ----------\n Image.Image\n Stitched image\n \"\"\"\n if z not in self.focal_planes:\n raise WsiDicomNotFoundError(f'focal plane {z}', str(self))\n if path not in self.optical_paths:\n raise WsiDicomNotFoundError(f'optical path {path}', str(self))\n image_data = self._get_region(region)\n if image_data is None:\n image_data = self._get_blank_decoded_frame(region.size)\n return image_data\n\n def _detect_blank_tile(self, data: np.ndarray) -> bool:\n \"\"\"Detect if tile data is a blank tile, i.e. either has full\n transparency or is filled with background color. First checks if the\n corners are transparent or has background color before checking whole\n data.\n\n Parameters\n ----------\n data: np.ndarray\n Data to check if blank.\n\n Returns\n ----------\n bool\n True if tile is blank.\n \"\"\"\n TOP = RIGHT = -1\n BOTTOM = LEFT = 0\n CORNERS_Y = (BOTTOM, BOTTOM, TOP, TOP)\n CORNERS_X = (LEFT, RIGHT, LEFT, RIGHT)\n TRANSPARENCY = 3\n background = np.array(self.blank_color)\n transparency = data[:, :, TRANSPARENCY]\n if np.all(transparency[CORNERS_Y, CORNERS_X] == 0):\n if np.all(transparency == 0):\n return True\n if np.all(data[CORNERS_Y, CORNERS_X, 0:TRANSPARENCY] == background):\n if np.all(data[:, :, 0:TRANSPARENCY] == background):\n return True\n return False\n\n def _get_blank_encoded_frame(self, size: Size) -> bytes:\n \"\"\"Return cached blank encoded frame for size, or create frame if\n cached frame not available or of wrong size.\n\n Parameters\n ----------\n size: Size\n Size of frame to get.\n\n Returns\n ----------\n bytes\n Encoded blank frame.\n \"\"\"\n if self._blank_encoded_frame_size != size:\n frame = np.full(\n size.to_tuple() + (3,),\n self.blank_color,\n dtype=np.dtype(np.uint8)\n )\n self._blank_encoded_frame = self._encode(frame)\n self._blank_encoded_frame_size = size\n return self._blank_encoded_frame\n\n def _get_blank_decoded_frame(self, size: Size) -> Image.Image:\n \"\"\"Return cached blank decoded frame for size, or create frame if\n cached frame not available or of wrong size.\n\n Parameters\n ----------\n size: Size\n Size of frame to get.\n\n Returns\n ----------\n bytes\n Decoded blank frame.\n \"\"\"\n if (\n self._blank_decoded_frame is None\n or self._blank_decoded_frame_size != size\n ):\n frame = Image.new('RGB', size.to_tuple(), self.blank_color)\n self._blank_decoded_frame = frame\n return self._blank_decoded_frame\n\n def _get_region(\n self,\n region: Region\n ) -> Optional[Image.Image]:\n \"\"\"Return Image read from region in openslide image. If image data for\n region is blank, None is returned. Transparent pixels are made into\n background color\n\n Parameters\n ----------\n region: Region\n Region to get image for.\n\n Returns\n ----------\n Optional[Image.Image]\n Image of region, or None if region is blank.\n \"\"\"\n if region.size.width < 0 or region.size.height < 0:\n raise ValueError('Negative size not allowed')\n\n location_in_base_level = region.start * self.downsample + self._offset\n buffer = (region.size.width * region.size.height * c_uint32)()\n _read_region(\n self._slide._osr,\n buffer,\n location_in_base_level.x,\n location_in_base_level.y,\n self._level_index,\n region.size.width,\n region.size.height\n )\n tile_data: np.ndarray = np.frombuffer(buffer, dtype=np.dtype(np.uint8))\n tile_data.shape = (region.size.height, region.size.width, 4)\n if self._detect_blank_tile(tile_data):\n return None\n convert_argb_to_rgba(tile_data)\n image = Image.fromarray(tile_data)\n no_alpha = Image.new('RGB', image.size, self.blank_color)\n no_alpha.paste(image, mask=image.split()[3])\n return no_alpha\n\n def _get_encoded_tile(\n self,\n tile_point: Point,\n z: float,\n path: str\n ) -> bytes:\n \"\"\"Return image bytes for tile. Transparency is removed and tile is\n encoded as jpeg.\n\n Parameters\n ----------\n tile_point: Point\n Tile position to get.\n z: float\n Focal plane of tile to get.\n path: str\n Optical path of tile to get.\n\n Returns\n ----------\n bytes\n Tile bytes.\n \"\"\"\n if z not in self.focal_planes:\n raise WsiDicomNotFoundError(f'focal plane {z}', str(self))\n if path not in self.optical_paths:\n raise WsiDicomNotFoundError(f'optical path {path}', str(self))\n tile = self._get_region(\n Region(tile_point*self.tile_size, self.tile_size)\n )\n if tile is None:\n return self._get_blank_encoded_frame(self.tile_size)\n return self._encode(np.asarray(tile))\n\n def _get_decoded_tile(\n self,\n tile_point: Point,\n z: float,\n path: str\n ) -> Image.Image:\n \"\"\"Return Image for tile. Image mode is RGB.\n\n Parameters\n ----------\n tile_point: Point\n Tile position to get.\n z: float\n Focal plane of tile to get.\n path: str\n Optical path of tile to get.\n\n Returns\n ----------\n Image.Image\n Tile as Image.\n \"\"\"\n if z not in self.focal_planes:\n raise WsiDicomNotFoundError(f'focal plane {z}', str(self))\n if path not in self.optical_paths:\n raise WsiDicomNotFoundError(f'optical path {path}', str(self))\n tile = self._get_region(\n Region(tile_point*self.tile_size, self.tile_size)\n )\n if tile is None:\n return self._get_blank_decoded_frame(self.tile_size)\n return tile\n\n\nclass OpenSlideDicomizer(MetaDicomizer):\n @classmethod\n def open(\n cls,\n filepath: str,\n modules: Optional[Union[Dataset, Sequence[Dataset]]] = None,\n tile_size: Optional[int] = None,\n include_levels: Optional[Sequence[int]] = None,\n include_label: bool = True,\n include_overview: bool = True,\n include_confidential: bool = True,\n encoding_format: str = 'jpeg',\n encoding_quality: int = 90,\n jpeg_subsampling: str = '422'\n ) -> WsiDicom:\n \"\"\"Open openslide file in filepath as WsiDicom object. Note that\n created instances always has a random UID.\n\n Parameters\n ----------\n filepath: str\n Path to tiff file\n modules: Optional[Union[Dataset, Sequence[Dataset]]] = None\n Module datasets to use in files. If none, use default modules.\n tile_size: Optional[int]\n Tile size to use if not defined by file.\n include_levels: Sequence[int] = None\n Levels to include. If None, include all levels.\n include_label: bool = True\n Inclube label.\n include_overview: bool = True\n Include overview.\n include_confidential: bool = True\n Include confidential metadata. Not implemented.\n encoding_format: str = 'jpeg'\n Encoding format to use if re-encoding. 'jpeg' or 'jpeg2000'.\n encoding_quality: int = 90\n Quality to use if re-encoding. Do not use > 95 for jpeg. Use 100\n for lossless jpeg2000.\n jpeg_subsampling: str = '422'\n Subsampling option if using jpeg for re-encoding. Use '444' for\n no subsampling, '422' for 2x2 subsampling.\n\n Returns\n ----------\n WsiDicom\n WsiDicom object of openslide file in filepath.\n \"\"\"\n if tile_size is None:\n raise ValueError(\"Tile size required for open slide\")\n encoder = create_encoder(\n encoding_format,\n encoding_quality,\n subsampling=jpeg_subsampling\n )\n base_dataset = create_base_dataset(modules)\n slide = OpenSlide(filepath)\n instance_number = 0\n level_instances = [\n cls._create_instance(\n OpenSlideLevelImageData(\n slide,\n level_index,\n tile_size,\n encoder\n ),\n base_dataset,\n 'VOLUME',\n instance_number+level_index\n )\n for level_index in range(slide.level_count)\n if include_levels is None or level_index in include_levels\n ]\n instance_number += len(level_instances)\n if include_label and 'label' in slide.associated_images:\n label_instances = [cls._create_instance(\n OpenSlideAssociatedImageData(slide, 'label', encoder),\n base_dataset,\n 'LABEL',\n instance_number\n )]\n else:\n label_instances = []\n instance_number += len(label_instances)\n if include_overview and 'macro' in slide.associated_images:\n overview_instances = [cls._create_instance(\n OpenSlideAssociatedImageData(slide, 'macro', encoder),\n base_dataset,\n 'OVERVIEW',\n instance_number\n )]\n else:\n overview_instances = []\n levels = WsiDicomLevels.open(level_instances)\n labels = WsiDicomLabels.open(label_instances)\n overviews = WsiDicomOverviews.open(overview_instances)\n return cls(levels, labels, overviews)\n\n @staticmethod\n def is_supported(filepath: str) -> bool:\n \"\"\"Return True if file in filepath is supported by OpenSlide.\"\"\"\n return OpenSlide.detect_format(str(filepath)) is not None\n"
] | [
[
"numpy.all",
"numpy.array",
"numpy.asarray",
"numpy.dtype"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
disruptive-technologies/simple-forecast-an | [
"785c4b08857af1b501531bdd1997f3582327b238"
] | [
"forecast/sensor.py"
] | [
"# packages\nimport numpy as np\n\n# project\nimport forecast.helpers as hlp\nimport config.parameters as prm\n\n\nclass Sensor():\n \"\"\"\n One Sensor object for each sensor in project.\n It keeps track of the algorithm state between events.\n When new event_data json is received, iterate algorithm one sample.\n \"\"\"\n\n def __init__(self, device, device_id, args):\n # give to self\n self.device = device\n self.device_id = device_id\n self.args = args\n\n # contains level, trend and season for modelled data\n self.model = {\n 'unixtime': [], # shared unixtime timeaxis\n 'temperature': [], # temperature values\n 'level': [], # modeled level\n 'trend': [], # modeled trend\n 'season': [], # modeled season\n }\n\n # contains all previous forecasts in history\n self.forecast = {\n 'unixtime': [], # shared unixtime timeaxis\n 'temperature': [], # temperature values\n 'residual': [], # forecast residual\n }\n\n # variables\n self.n_samples = 0 # number of event samples received\n self.initialised = False\n self.residual_std = 0\n\n\n def new_event_data(self, event_data):\n \"\"\"\n Receive new event from Director and iterate algorithm.\n\n Parameters\n ----------\n event_data : dict\n Event json containing temperature data.\n\n \"\"\"\n\n # convert timestamp to unixtime\n _, unixtime = hlp.convert_event_data_timestamp(event_data['data']['temperature']['updateTime'])\n\n # append new temperature value\n self.model['unixtime'].append(unixtime)\n self.model['temperature'].append(event_data['data']['temperature']['value'])\n self.n_samples += 1\n\n # initialise holt winters\n if self.n_samples < prm.season_length * prm.n_seasons_init:\n return\n elif not self.initialised:\n self.__initialise_holt_winters()\n else:\n # iterate Holt-Winters\n self.__iterate_holt_winters()\n\n # forecast\n self.__model_forecast()\n\n\n def __initialise_holt_winters(self):\n \"\"\"\n Calculate initial level, trend and seasonal component.\n Based on: https://robjhyndman.com/hyndsight/hw-initialization/\n\n \"\"\"\n\n # convert to numpy array for indexing\n temperature = np.array(self.model['temperature'])\n\n # fit a 3xseason moving average to temperature\n ma = np.zeros(self.n_samples)\n for i in range(self.n_samples):\n # define ma interval\n xl = max(0, i - int(1.5*prm.season_length))\n xr = min(self.n_samples, i + int(1.5*prm.season_length+1))\n\n # mean\n ma[i] = np.mean(temperature[xl:xr])\n\n # subtract moving average\n df = temperature - ma\n\n # generate average seasonal component\n avs = []\n for i in range(prm.season_length):\n avs.append(np.mean([df[i+j*prm.season_length] for j in range(prm.n_seasons_init)]))\n\n # expand average season into own seasonal component\n for i in range(self.n_samples):\n self.model['season'].append(avs[i%len(avs)])\n\n # subtract initial season from original temperature to get adjusted temperature\n adjusted = temperature - np.array(self.model['season'])\n\n # fit a linear trend to adjusted temperature\n xax = np.arange(self.n_samples)\n a, b = hlp.algebraic_linreg(xax, adjusted)\n linreg = a + xax*b\n\n # set initial level, slope, and brutlag deviation\n for i in range(self.n_samples):\n self.model['level'].append(linreg[i])\n self.model['trend'].append(b)\n\n # flip flag\n self.initialised = True\n\n\n def __iterate_holt_winters(self):\n \"\"\"\n Update level, trend and seasonal component of Holt-Winters model.\n\n \"\"\"\n\n # calculate level (l), trend (b), and season (s) components\n l = prm.alpha*(self.model['temperature'][-1] - self.model['season'][-prm.season_length]) + (1 - prm.alpha)*(self.model['level'][-1] + self.model['trend'][-1])\n b = prm.beta*(l - self.model['level'][-1]) + (1 - prm.beta)*self.model['trend'][-1]\n s = prm.gamma*(self.model['temperature'][-1] - self.model['level'][-1] - self.model['trend'][-1]) + (1 - prm.gamma)*self.model['season'][-prm.season_length]\n\n # append components\n self.model['level'].append(l)\n self.model['trend'].append(b)\n self.model['season'].append(s)\n\n\n def __model_forecast(self):\n \"\"\"\n Holt-Winters n-step ahead forecasting and prediction interval calculation.\n Forecast based on: https://otexts.com/fpp2/prediction-intervals.html\n Prediction intervals based on: https://otexts.com/fpp2/prediction-intervals.html\n\n \"\"\"\n\n # use average step length the last 24h\n tax = np.array(self.model['unixtime'])[np.array(self.model['unixtime']) > int(self.model['unixtime'][-1])-60*60*24]\n ux_step = np.mean(tax[1:] - tax[:-1])\n\n # forecast value\n fux = (prm.n_step_ahead+1)*ux_step\n fvv = self.model['level'][-1] + prm.n_step_ahead*self.model['trend'][-1] + self.model['season'][-prm.season_length + (prm.n_step_ahead-1)%prm.season_length]\n self.forecast['unixtime'].append(fux)\n self.forecast['temperature'].append(fvv)\n\n # calculate residual\n if len(self.forecast['temperature']) > prm.n_step_ahead:\n res = abs(self.model['temperature'][-1] - self.forecast['temperature'][-prm.n_step_ahead-1])\n else:\n res = 0\n self.forecast['residual'].append(res)\n\n # update residual standard deviation\n self.residual_std = np.std(np.array(self.forecast['residual'])[max(0, len(self.forecast['residual'])-prm.n_forecast):])\n\n\n def get_forecast(self, n):\n \"\"\"\n Forecast n samples into the future using current HW state.\n\n Parameters\n ----------\n n : int\n Number of samples to forecast.\n\n \"\"\"\n\n # initialise empty\n timestamp = np.zeros(n)*np.nan\n temperature = np.zeros(n)*np.nan\n upper_bound = np.zeros(n)*np.nan\n lower_bound = np.zeros(n)*np.nan\n\n if len(self.model['season']) > prm.season_length:\n # use average step length the last 24h\n tax = np.array(self.model['unixtime'])[np.array(self.model['unixtime']) > int(self.model['unixtime'][-1])-60*60*24]\n ux_step = np.mean(tax[1:] - tax[:-1])\n for t in range(n):\n # holt winters forecast\n timestamp[t] = self.model['unixtime'][-1] + (t+1)*ux_step\n temperature[t] = self.model['level'][-1] + t*self.model['trend'][-1] + self.model['season'][-prm.season_length + (t-1)%prm.season_length]\n\n # prediction interval\n k = ((t-1)/prm.season_length)\n upper_bound[t] = temperature[t] + self.residual_std*np.sqrt(k+1)*prm.bound_modifier\n lower_bound[t] = temperature[t] - self.residual_std*np.sqrt(k+1)*prm.bound_modifier\n\n return timestamp, temperature, upper_bound, lower_bound\n\n\n"
] | [
[
"numpy.sqrt",
"numpy.arange",
"numpy.mean",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
KlemenBr/darkgreybox | [
"83848ead4488f15cbf456b967be503e8bd3ae25b"
] | [
"docs/tutorials/util/plot.py"
] | [
"import matplotlib.pyplot as plt\nfrom sklearn.metrics import mean_squared_error, r2_score\nfrom statsmodels.graphics.tsaplots import plot_pacf\n\n\ndef plot(y, model_result, suptitle=''):\n\n rmse = mean_squared_error(y, model_result.Z) ** 0.5\n r2 = r2_score(y, model_result.Z)\n\n fig, ax = plt.subplots(2, 2, figsize=(12, 8))\n fig.suptitle(f'{suptitle}', fontsize=12, fontweight='bold')\n\n ax[0, 0].plot(y.index, model_result.Z, label='Ti Modelled', alpha=0.75)\n ax[0, 0].plot(y, label='Ti Measured', alpha=0.75)\n if hasattr(model_result, 'Te'):\n ax[0, 0].plot(y.index, model_result.Te, label='Te Modelled', alpha=0.75)\n ax[0, 0].legend()\n ax[0, 0].set_ylabel('[˚C]')\n if abs(rmse) < 100:\n ax[0, 0].set_title(f'RMSE: {rmse:.4f}')\n else:\n ax[0, 0].set_title(f'RMSE: {rmse:.4e}')\n plt.setp(ax[0, 0].get_xticklabels(), rotation=30, horizontalalignment='right')\n\n ax[1, 0].scatter(model_result.Z, y, alpha=0.2)\n ax[1, 0].set_xlabel('Ti Modelled [˚C]')\n ax[1, 0].set_ylabel('Ti Measured [˚C]')\n if abs(r2) < 100:\n ax[1, 0].set_title(f'$R^2$: {r2:.4f}')\n else:\n ax[1, 0].set_title(f'$R^2$: {r2:.4e}')\n plt.setp(ax[0, 1].get_xticklabels(), rotation=30, horizontalalignment='right')\n\n ax[0, 1].plot(y.index, y - model_result.Z, label='Ti Residual', color='black', alpha=0.75)\n ax[0, 1].legend()\n ax[0, 1].set_ylabel('[˚C]')\n ax[0, 1].set_title('Residuals')\n\n plot_pacf(y - model_result.Z, ax=ax[1, 1], lags=50)\n\n fig.tight_layout()\n\n\ndef plot_input_data(df):\n\n # keep this out of pandas plotting capabilities for compatibility\n # with the rest of the plots\n fig, ax = plt.subplots(2, 1, figsize=(12, 8))\n\n ax[0].plot(df.index, df['Ti'], label='Ti', alpha=0.75)\n ax[0].plot(df.index, df['Ta'], label='Ta', alpha=0.75)\n ax[0].legend()\n ax[0].set_ylabel('[˚C]')\n ax[0].set_xticks([], minor=[])\n ax[0].set_xlim(df.index[0], df.index[-1])\n\n ax[1].plot(df.index, df['Ph'], label='Ph', alpha=0.75, color='black')\n ax[1].legend()\n ax[1].set_ylabel('[kWh]')\n ax[1].set_xlim(df.index[0], df.index[-1])\n\n fig.tight_layout()\n"
] | [
[
"sklearn.metrics.r2_score",
"matplotlib.pyplot.subplots",
"sklearn.metrics.mean_squared_error"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hebrewsnabla/pyscf_addon | [
"7988d2b3964d75282a817062c1679d792000b5c6"
] | [
"guess.py"
] | [
"from pyscf import gto, scf, dft\nimport numpy as np\ntry:\n from fch2py import fch2py\n import gaussian\nexcept:\n print('fch2py not found. Interface with fch is disabled. Install MOKIT if you need that.')\nimport stability\nimport time\nimport copy\n\ndef gen(xyz, bas, charge, spin, conv='tight', level_shift=0):\n '''for states other than singlets'''\n mol = gto.Mole()\n mol.atom = xyz\n mol.basis = bas\n mol.charge = charge\n mol.spin = spin\n mol.verbose = 4\n mol.build()\n \n mf = scf.UHF(mol)\n if conv == 'loose':\n mf.conv_tol = 1e-6\n mf.max_cycle = 10\n mf.level_shift = level_shift\n mf.kernel()\n\n return mf\n\ndef from_fch_simp(fch, cycle=2):\n mol = gaussian.load_mol_from_fch(fch)\n \n mf = scf.UHF(mol)\n #mf.init_guess = '1e'\n #mf.init_guess_breaksym = True\n mf.max_cycle = 1\n mf.kernel()\n \n # read MOs from .fch(k) file\n nbf = mf.mo_coeff[0].shape[0]\n nif = mf.mo_coeff[0].shape[1]\n S = mol.intor_symmetric('int1e_ovlp')\n Sdiag = S.diagonal()\n alpha_coeff = fch2py(fch, nbf, nif, Sdiag, 'a')\n beta_coeff = fch2py(fch, nbf, nif, Sdiag, 'b')\n mf.mo_coeff = (alpha_coeff, beta_coeff)\n # read done\n dm = mf.make_rdm1()\n mf.max_cycle = cycle\n mf.kernel(dm)\n return mf\n\ndef mix(xyz, bas, charge=0, level_shift=0.0, skipstb=False):\n mol = gto.Mole()\n mol.atom = xyz\n #with open(xyz, 'r') as f:\n # mol.atom = f.read()\n #print(mol.atom)\n mol.basis = bas\n mol.charge = charge\n #mol.output = 'test.pylog'\n mol.verbose = 4\n mol.build()\n\n t1 = time.time() \n mf = scf.RHF(mol)\n mf.conv_tol = 1e-5\n mf.kernel() # Guess by 1e is poor,\n #dm, mo_coeff, mo_energy, mo_occ = init_guess_by_1e(mf)\n #mf.init_guess_breaksym = True\n #mo = (mf.mo_coeff, mf.mo_coeff)\n #occ = (mf.mo_occ, mf.mo_occ)\n dm_mix = init_guess_mixed(mf.mo_coeff, mf.mo_occ)\n mf_mix = scf.UHF(mol)\n mf_mix.level_shift = level_shift\n mf_mix.max_cycle = 100\n mf_mix.kernel(dm0=dm_mix)\n if not mf_mix.converged:\n raise RuntimeError('UHF not converged')\n ss, s = mf_mix.spin_square()\n if s < 0.1:\n print('Warning: S too small, symmetry breaking may be failed')\n \n if not skipstb:\n check_stab(mf_mix)\n\n t2 = time.time()\n print('time for guess: %.3f' % (t2-t1))\n return mf_mix\n\ndef check_stab(mf_mix):\n mf_mix.verbose = 9\n mo, stable = stability.uhf_internal(mf_mix)\n cyc = 0\n while(not stable and cyc < 10):\n mf_mix.verbose = 4\n dm_new = scf.uhf.make_rdm1(mo, mf_mix.mo_occ)\n mf_mix.kernel(dm0=dm_new)\n mf_mix.verbose = 9\n mo, stable = stability.uhf_internal(mf_mix)\n cyc += 1\n if not stable:\n raise RuntimeError('Stablility Opt failed after %d attempts.' % cyc)\n\ndef from_frag(xyz, bas, frags, chgs, spins, cycle=2, xc=None):\n mol = gto.Mole()\n mol.atom = xyz\n mol.basis = bas\n mol.build()\n \n dm, mo, occ = guess_frag(mol, frags, chgs, spins)\n if xc is None:\n mf = scf.UHF(mol)\n else:\n mf = dft.UKS(mol)\n mf.xc = xc\n mf.verbose = 4\n #mf.conv_tol = 1e-2\n mf.max_cycle = cycle\n mf.kernel(dm0 = dm)\n ss, s = mf.spin_square()\n if s < 0.1:\n print('Warning: S too small, symmetry breaking may be failed')\n return mf\n\n\ndef guess_frag(mol, frags, chgs, spins):\n '''\n frags: e.g. [[0], [1]] for N2\n '''\n #mol.build()\n print('generating fragment guess')\n atom = mol.format_atom(mol.atom, unit=1)\n #print(atom)\n fraga, fragb = frags\n chga, chgb = chgs\n spina, spinb = spins\n atoma = [atom[i] for i in fraga]\n atomb = [atom[i] for i in fragb]\n print('fragments:', atoma, atomb)\n ca_a, cb_a, na_a, nb_a = do_uhf(atoma, mol.basis, chga, spina)\n ca_b, cb_b, na_b, nb_b = do_uhf(atomb, mol.basis, chgb, spinb)\n print(' na nb')\n print('atom1 %2d %2d' % (na_a, nb_a))\n print('atom2 %2d %2d' % (na_b, nb_b))\n #print(mo_a)\n #print(mo_b)\n nbasa = ca_a.shape[0]\n nbasb = ca_b.shape[0]\n ca = np.vstack((\n np.hstack((ca_a[:,:na_a], np.zeros((nbasa,na_b)), ca_a[:,na_a:], np.zeros((nbasa, ca_b.shape[1]-na_b)) )),\n np.hstack((np.zeros((nbasb, na_a)), ca_b[:,:na_b], np.zeros((nbasb, ca_a.shape[1]-na_a)), ca_b[:,na_b:]))\n ))\n cb = np.vstack((\n np.hstack((cb_a[:,:nb_a], np.zeros((nbasa,nb_b)), cb_a[:,nb_a:], np.zeros((nbasa, cb_b.shape[1]-nb_b)) )),\n np.hstack((np.zeros((nbasb, nb_a)), cb_b[:,:nb_b], np.zeros((nbasb, cb_a.shape[1]-nb_a)), cb_b[:,nb_b:]))\n ))\n mo = np.array([ca, cb])\n na = na_a + na_b\n nb = nb_a + nb_b\n #print(ca.shape, cb.shape)\n occa = np.hstack((np.ones(na), np.zeros(ca.shape[1]-na))) \n occb = np.hstack((np.ones(nb), np.zeros(cb.shape[1]-nb)))\n occ = np.array([occa, occb]) \n #print(occ)\n dm = scf.uhf.make_rdm1(mo, occ)\n #print(dm.shape)\n return dm, mo, occ\n \ndef do_uhf(atoma, basisa, chga, spina):\n mola = gto.Mole()\n mola.atom = atoma\n mola.basis = basisa\n mola.charge = chga\n mola.spin = spina\n mola.build()\n mfa = scf.UHF(mola)\n mfa.kernel()\n #print(mfa.nelec)\n ca, cb = mfa.mo_coeff\n na, nb = mfa.nelec\n return ca, cb, na, nb\n\n'''\nmodified from pyscf/examples/scf/56-h2_symm_breaking.py, by James D Whitfield\nThe initial guess is obtained by mixing the HOMO and LUMO and is implemented\nas a function that can be used in other applications.\nSee also 16-h2_scan.py, 30-scan_pes.py, 32-break_spin_symm.py\n'''\n\ndef init_guess_by_1e(rhf, mol=None):\n h1e = rhf.get_hcore(mol)\n s1e = rhf.get_ovlp(mol)\n mo_energy, mo_coeff = scf.hf.eig(h1e, s1e)\n mo_occ = rhf.get_occ(mo_energy, mo_coeff)\n return rhf.make_rdm1(mo_coeff, mo_occ), mo_coeff, mo_energy, mo_occ\n\ndef init_guess_mixed(mo_coeff, mo_occ, mixing_parameter=np.pi/4):\n ''' Generate density matrix with broken spatial and spin symmetry by mixing\n HOMO and LUMO orbitals following ansatz in Szabo and Ostlund, Sec 3.8.7.\n \n psi_1a = numpy.cos(q)*psi_homo + numpy.sin(q)*psi_lumo\n psi_1b = numpy.cos(q)*psi_homo - numpy.sin(q)*psi_lumo\n \n psi_2a = -numpy.sin(q)*psi_homo + numpy.cos(q)*psi_lumo\n psi_2b = numpy.sin(q)*psi_homo + numpy.cos(q)*psi_lumo\n Returns: \n Density matrices, a list of 2D ndarrays for alpha and beta spins\n '''\n # opt: q, mixing parameter 0 < q < 2 pi\n\n homo_idx=0\n lumo_idx=1\n\n for i in range(len(mo_occ)-1):\n if mo_occ[i]>0 and mo_occ[i+1]<0.1:\n homo_idx=i\n lumo_idx=i+1\n\n psi_homo=mo_coeff[:, homo_idx]\n psi_lumo=mo_coeff[:, lumo_idx]\n \n Ca=copy.deepcopy(mo_coeff)\n Cb=copy.deepcopy(mo_coeff)\n\n #mix homo and lumo of alpha and beta coefficients\n q=mixing_parameter\n\n Ca[:,homo_idx] = np.cos(q)*psi_homo + np.sin(q)*psi_lumo\n Cb[:,homo_idx] = np.cos(q)*psi_homo - np.sin(q)*psi_lumo\n\n Ca[:,lumo_idx] = -np.sin(q)*psi_homo + np.cos(q)*psi_lumo\n Cb[:,lumo_idx] = np.sin(q)*psi_homo + np.cos(q)*psi_lumo\n\n dm = scf.uhf.make_rdm1( (Ca,Cb), (mo_occ,mo_occ) )\n return dm\n\n"
] | [
[
"numpy.cos",
"numpy.sin",
"numpy.ones",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
eric-erki/biopython | [
"2b4f7474766427c4ff4bf01e66b436f23456208e"
] | [
"Tests/test_pairwise_aligner.py"
] | [
"# This code is part of the Biopython distribution and governed by its\n# license. Please see the LICENSE file that should have been included\n# as part of this package.\n\n\n\"\"\"Tests for pairwise aligner module.\"\"\"\n\nimport array\nimport os\nimport unittest\n\nfrom Bio import Align, SeqIO\n\n\nclass TestAlignerProperties(unittest.TestCase):\n def test_aligner_property_epsilon(self):\n aligner = Align.PairwiseAligner()\n self.assertAlmostEqual(aligner.epsilon, 1.0e-6)\n aligner.epsilon = 1.0e-4\n self.assertAlmostEqual(aligner.epsilon, 1.0e-4)\n aligner.epsilon = 1.0e-8\n self.assertAlmostEqual(aligner.epsilon, 1.0e-8)\n with self.assertRaises(TypeError):\n aligner.epsilon = \"not a number\"\n with self.assertRaises(TypeError):\n aligner.epsilon = None\n\n def test_aligner_property_mode(self):\n aligner = Align.PairwiseAligner()\n aligner.mode = \"global\"\n self.assertEqual(aligner.mode, \"global\")\n aligner.mode = \"local\"\n self.assertEqual(aligner.mode, \"local\")\n with self.assertRaises(ValueError):\n aligner.mode = \"wrong\"\n\n def test_aligner_property_match_mismatch(self):\n aligner = Align.PairwiseAligner()\n aligner.match_score = 3.0\n self.assertAlmostEqual(aligner.match_score, 3.0)\n aligner.mismatch_score = -2.0\n self.assertAlmostEqual(aligner.mismatch_score, -2.0)\n with self.assertRaises(ValueError):\n aligner.match_score = \"not a number\"\n with self.assertRaises(ValueError):\n aligner.mismatch_score = \"not a number\"\n self.assertEqual(\n str(aligner),\n \"\"\"\\\nPairwise sequence aligner with parameters\n wildcard: None\n match_score: 3.000000\n mismatch_score: -2.000000\n target_internal_open_gap_score: 0.000000\n target_internal_extend_gap_score: 0.000000\n target_left_open_gap_score: 0.000000\n target_left_extend_gap_score: 0.000000\n target_right_open_gap_score: 0.000000\n target_right_extend_gap_score: 0.000000\n query_internal_open_gap_score: 0.000000\n query_internal_extend_gap_score: 0.000000\n query_left_open_gap_score: 0.000000\n query_left_extend_gap_score: 0.000000\n query_right_open_gap_score: 0.000000\n query_right_extend_gap_score: 0.000000\n mode: global\n\"\"\",\n )\n\n def test_aligner_property_gapscores(self):\n aligner = Align.PairwiseAligner()\n open_score, extend_score = (-5, -1)\n aligner.target_open_gap_score = open_score\n aligner.target_extend_gap_score = extend_score\n self.assertAlmostEqual(aligner.target_open_gap_score, open_score)\n self.assertAlmostEqual(aligner.target_extend_gap_score, extend_score)\n open_score, extend_score = (-6, -7)\n aligner.query_open_gap_score = open_score\n aligner.query_extend_gap_score = extend_score\n self.assertAlmostEqual(aligner.query_open_gap_score, open_score)\n self.assertAlmostEqual(aligner.query_extend_gap_score, extend_score)\n open_score, extend_score = (-3, -9)\n aligner.target_end_open_gap_score = open_score\n aligner.target_end_extend_gap_score = extend_score\n self.assertAlmostEqual(aligner.target_end_open_gap_score, open_score)\n self.assertAlmostEqual(aligner.target_end_extend_gap_score, extend_score)\n open_score, extend_score = (-1, -2)\n aligner.query_end_open_gap_score = open_score\n aligner.query_end_extend_gap_score = extend_score\n self.assertEqual(\n str(aligner),\n \"\"\"\\\nPairwise sequence aligner with parameters\n wildcard: None\n match_score: 1.000000\n mismatch_score: 0.000000\n target_internal_open_gap_score: -5.000000\n target_internal_extend_gap_score: -1.000000\n target_left_open_gap_score: -3.000000\n target_left_extend_gap_score: -9.000000\n target_right_open_gap_score: -3.000000\n target_right_extend_gap_score: -9.000000\n query_internal_open_gap_score: -6.000000\n query_internal_extend_gap_score: -7.000000\n query_left_open_gap_score: -1.000000\n query_left_extend_gap_score: -2.000000\n query_right_open_gap_score: -1.000000\n query_right_extend_gap_score: -2.000000\n mode: global\n\"\"\",\n )\n self.assertAlmostEqual(aligner.query_end_open_gap_score, open_score)\n self.assertAlmostEqual(aligner.query_end_extend_gap_score, extend_score)\n score = -3\n aligner.target_gap_score = score\n self.assertAlmostEqual(aligner.target_gap_score, score)\n self.assertAlmostEqual(aligner.target_open_gap_score, score)\n self.assertAlmostEqual(aligner.target_extend_gap_score, score)\n score = -2\n aligner.query_gap_score = score\n self.assertAlmostEqual(aligner.query_gap_score, score)\n self.assertAlmostEqual(aligner.query_open_gap_score, score)\n self.assertAlmostEqual(aligner.query_extend_gap_score, score)\n score = -4\n aligner.target_end_gap_score = score\n self.assertAlmostEqual(aligner.target_end_gap_score, score)\n self.assertAlmostEqual(aligner.target_end_open_gap_score, score)\n self.assertAlmostEqual(aligner.target_end_extend_gap_score, score)\n self.assertAlmostEqual(aligner.target_left_gap_score, score)\n self.assertAlmostEqual(aligner.target_left_open_gap_score, score)\n self.assertAlmostEqual(aligner.target_left_extend_gap_score, score)\n self.assertAlmostEqual(aligner.target_right_gap_score, score)\n self.assertAlmostEqual(aligner.target_right_open_gap_score, score)\n self.assertAlmostEqual(aligner.target_right_extend_gap_score, score)\n score = -5\n aligner.query_end_gap_score = score\n self.assertAlmostEqual(aligner.query_end_gap_score, score)\n self.assertAlmostEqual(aligner.query_end_open_gap_score, score)\n self.assertAlmostEqual(aligner.query_end_extend_gap_score, score)\n self.assertAlmostEqual(aligner.query_left_gap_score, score)\n self.assertAlmostEqual(aligner.query_left_open_gap_score, score)\n self.assertAlmostEqual(aligner.query_left_extend_gap_score, score)\n self.assertAlmostEqual(aligner.query_right_gap_score, score)\n self.assertAlmostEqual(aligner.query_right_open_gap_score, score)\n self.assertAlmostEqual(aligner.query_right_extend_gap_score, score)\n self.assertEqual(\n str(aligner),\n \"\"\"\\\nPairwise sequence aligner with parameters\n wildcard: None\n match_score: 1.000000\n mismatch_score: 0.000000\n target_internal_open_gap_score: -3.000000\n target_internal_extend_gap_score: -3.000000\n target_left_open_gap_score: -4.000000\n target_left_extend_gap_score: -4.000000\n target_right_open_gap_score: -4.000000\n target_right_extend_gap_score: -4.000000\n query_internal_open_gap_score: -2.000000\n query_internal_extend_gap_score: -2.000000\n query_left_open_gap_score: -5.000000\n query_left_extend_gap_score: -5.000000\n query_right_open_gap_score: -5.000000\n query_right_extend_gap_score: -5.000000\n mode: global\n\"\"\",\n )\n with self.assertRaises(ValueError):\n aligner.target_gap_score = \"wrong\"\n with self.assertRaises(ValueError):\n aligner.query_gap_score = \"wrong\"\n with self.assertRaises(TypeError):\n aligner.target_end_gap_score = \"wrong\"\n with self.assertRaises(TypeError):\n aligner.query_end_gap_score = \"wrong\"\n\n def test_aligner_nonexisting_property(self):\n aligner = Align.PairwiseAligner()\n with self.assertRaises(AttributeError):\n aligner.no_such_property\n with self.assertRaises(AttributeError):\n aligner.no_such_property = 1\n\n\nclass TestPairwiseGlobal(unittest.TestCase):\n def test_needlemanwunsch_simple1(self):\n seq1 = \"GAACT\"\n seq2 = \"GAT\"\n aligner = Align.PairwiseAligner()\n aligner.mode = \"global\"\n self.assertEqual(\n str(aligner),\n \"\"\"\\\nPairwise sequence aligner with parameters\n wildcard: None\n match_score: 1.000000\n mismatch_score: 0.000000\n target_internal_open_gap_score: 0.000000\n target_internal_extend_gap_score: 0.000000\n target_left_open_gap_score: 0.000000\n target_left_extend_gap_score: 0.000000\n target_right_open_gap_score: 0.000000\n target_right_extend_gap_score: 0.000000\n query_internal_open_gap_score: 0.000000\n query_internal_extend_gap_score: 0.000000\n query_left_open_gap_score: 0.000000\n query_left_extend_gap_score: 0.000000\n query_right_open_gap_score: 0.000000\n query_right_extend_gap_score: 0.000000\n mode: global\n\"\"\",\n )\n self.assertEqual(aligner.algorithm, \"Needleman-Wunsch\")\n score = aligner.score(seq1, seq2)\n self.assertAlmostEqual(score, 3.0)\n alignments = aligner.align(seq1, seq2)\n self.assertEqual(len(alignments), 2)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, 3.0)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nGAACT\n||--|\nGA--T\n\"\"\",\n )\n self.assertEqual(alignment.aligned, (((0, 2), (4, 5)), ((0, 2), (2, 3))))\n alignment = alignments[1]\n self.assertAlmostEqual(alignment.score, 3.0)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nGAACT\n|-|-|\nG-A-T\n\"\"\",\n )\n self.assertEqual(\n alignment.aligned, (((0, 1), (2, 3), (4, 5)), ((0, 1), (1, 2), (2, 3)))\n )\n\n def test_align_affine1_score(self):\n aligner = Align.PairwiseAligner()\n aligner.mode = \"global\"\n aligner.match_score = 0\n aligner.mismatch_score = -1\n aligner.open_gap_score = -5\n aligner.extend_gap_score = -1\n self.assertEqual(aligner.algorithm, \"Gotoh global alignment algorithm\")\n self.assertEqual(\n str(aligner),\n \"\"\"\\\nPairwise sequence aligner with parameters\n wildcard: None\n match_score: 0.000000\n mismatch_score: -1.000000\n target_internal_open_gap_score: -5.000000\n target_internal_extend_gap_score: -1.000000\n target_left_open_gap_score: -5.000000\n target_left_extend_gap_score: -1.000000\n target_right_open_gap_score: -5.000000\n target_right_extend_gap_score: -1.000000\n query_internal_open_gap_score: -5.000000\n query_internal_extend_gap_score: -1.000000\n query_left_open_gap_score: -5.000000\n query_left_extend_gap_score: -1.000000\n query_right_open_gap_score: -5.000000\n query_right_extend_gap_score: -1.000000\n mode: global\n\"\"\",\n )\n score = aligner.score(\"CC\", \"ACCT\")\n self.assertAlmostEqual(score, -7.0)\n\n\nclass TestPairwiseLocal(unittest.TestCase):\n def test_smithwaterman(self):\n aligner = Align.PairwiseAligner()\n aligner.mode = \"local\"\n aligner.gap_score = -0.1\n self.assertEqual(aligner.algorithm, \"Smith-Waterman\")\n self.assertEqual(\n str(aligner),\n \"\"\"\\\nPairwise sequence aligner with parameters\n wildcard: None\n match_score: 1.000000\n mismatch_score: 0.000000\n target_internal_open_gap_score: -0.100000\n target_internal_extend_gap_score: -0.100000\n target_left_open_gap_score: -0.100000\n target_left_extend_gap_score: -0.100000\n target_right_open_gap_score: -0.100000\n target_right_extend_gap_score: -0.100000\n query_internal_open_gap_score: -0.100000\n query_internal_extend_gap_score: -0.100000\n query_left_open_gap_score: -0.100000\n query_left_extend_gap_score: -0.100000\n query_right_open_gap_score: -0.100000\n query_right_extend_gap_score: -0.100000\n mode: local\n\"\"\",\n )\n score = aligner.score(\"AwBw\", \"zABz\")\n self.assertAlmostEqual(score, 1.9)\n alignments = aligner.align(\"AwBw\", \"zABz\")\n self.assertEqual(len(alignments), 1)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, 1.9)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\n AwBw\n |-| \nzA-Bz\n\"\"\", # noqa: W291\n )\n self.assertEqual(alignment.aligned, (((0, 1), (2, 3)), ((1, 2), (2, 3))))\n\n def test_gotoh_local(self):\n aligner = Align.PairwiseAligner()\n aligner.mode = \"local\"\n aligner.open_gap_score = -0.1\n aligner.extend_gap_score = 0.0\n self.assertEqual(aligner.algorithm, \"Gotoh local alignment algorithm\")\n self.assertEqual(\n str(aligner),\n \"\"\"\\\nPairwise sequence aligner with parameters\n wildcard: None\n match_score: 1.000000\n mismatch_score: 0.000000\n target_internal_open_gap_score: -0.100000\n target_internal_extend_gap_score: 0.000000\n target_left_open_gap_score: -0.100000\n target_left_extend_gap_score: 0.000000\n target_right_open_gap_score: -0.100000\n target_right_extend_gap_score: 0.000000\n query_internal_open_gap_score: -0.100000\n query_internal_extend_gap_score: 0.000000\n query_left_open_gap_score: -0.100000\n query_left_extend_gap_score: 0.000000\n query_right_open_gap_score: -0.100000\n query_right_extend_gap_score: 0.000000\n mode: local\n\"\"\",\n )\n score = aligner.score(\"AwBw\", \"zABz\")\n self.assertAlmostEqual(score, 1.9)\n alignments = aligner.align(\"AwBw\", \"zABz\")\n self.assertEqual(len(alignments), 1)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, 1.9)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\n AwBw\n |-| \nzA-Bz\n\"\"\", # noqa: W291\n )\n self.assertEqual(alignment.aligned, (((0, 1), (2, 3)), ((1, 2), (2, 3))))\n\n\nclass TestUnknownCharacter(unittest.TestCase):\n def test_needlemanwunsch_simple1(self):\n seq1 = \"GACT\"\n seq2 = \"GA?T\"\n aligner = Align.PairwiseAligner()\n aligner.mode = \"global\"\n aligner.gap_score = -1.0\n aligner.mismatch_score = -1.0\n aligner.wildcard = \"?\"\n score = aligner.score(seq1, seq2)\n self.assertAlmostEqual(score, 3.0)\n alignments = aligner.align(seq1, seq2)\n self.assertEqual(len(alignments), 1)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, 3.0)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nGACT\n||.|\nGA?T\n\"\"\",\n )\n self.assertEqual(alignment.aligned, (((0, 4),), ((0, 4),)))\n seq2 = \"GAXT\"\n aligner.wildcard = \"X\"\n score = aligner.score(seq1, seq2)\n self.assertAlmostEqual(score, 3.0)\n alignments = aligner.align(seq1, seq2)\n self.assertEqual(len(alignments), 1)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, 3.0)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nGACT\n||.|\nGAXT\n\"\"\",\n )\n self.assertEqual(alignment.aligned, (((0, 4),), ((0, 4),)))\n aligner.wildcard = None\n score = aligner.score(seq1, seq2)\n self.assertAlmostEqual(score, 2.0)\n alignments = aligner.align(seq1, seq2)\n self.assertEqual(len(alignments), 1)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, 2.0)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nGACT\n||.|\nGAXT\n\"\"\",\n )\n self.assertEqual(alignment.aligned, (((0, 4),), ((0, 4),)))\n\n def test_needlemanwunsch_simple2(self):\n seq1 = \"GA?AT\"\n seq2 = \"GAA?T\"\n aligner = Align.PairwiseAligner()\n aligner.mode = \"global\"\n aligner.wildcard = \"?\"\n score = aligner.score(seq1, seq2)\n self.assertAlmostEqual(score, 4.0)\n alignments = aligner.align(seq1, seq2)\n self.assertEqual(len(alignments), 1)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, 4.0)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nGA?A-T\n||-|-|\nGA-A?T\n\"\"\",\n )\n self.assertEqual(\n alignment.aligned, (((0, 2), (3, 4), (4, 5)), ((0, 2), (2, 3), (4, 5)))\n )\n seq1 = \"GAXAT\"\n seq2 = \"GAAXT\"\n aligner.wildcard = \"X\"\n score = aligner.score(seq1, seq2)\n self.assertAlmostEqual(score, 4.0)\n alignments = aligner.align(seq1, seq2)\n self.assertEqual(len(alignments), 1)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, 4.0)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nGAXA-T\n||-|-|\nGA-AXT\n\"\"\",\n )\n self.assertEqual(\n alignment.aligned, (((0, 2), (3, 4), (4, 5)), ((0, 2), (2, 3), (4, 5)))\n )\n\n\nclass TestPairwiseOpenPenalty(unittest.TestCase):\n def test_match_score_open_penalty1(self):\n aligner = Align.PairwiseAligner()\n aligner.mode = \"global\"\n aligner.match_score = 2\n aligner.mismatch_score = -1\n aligner.open_gap_score = -0.1\n aligner.extend_gap_score = 0.0\n self.assertEqual(aligner.algorithm, \"Gotoh global alignment algorithm\")\n self.assertEqual(\n str(aligner),\n \"\"\"\\\nPairwise sequence aligner with parameters\n wildcard: None\n match_score: 2.000000\n mismatch_score: -1.000000\n target_internal_open_gap_score: -0.100000\n target_internal_extend_gap_score: 0.000000\n target_left_open_gap_score: -0.100000\n target_left_extend_gap_score: 0.000000\n target_right_open_gap_score: -0.100000\n target_right_extend_gap_score: 0.000000\n query_internal_open_gap_score: -0.100000\n query_internal_extend_gap_score: 0.000000\n query_left_open_gap_score: -0.100000\n query_left_extend_gap_score: 0.000000\n query_right_open_gap_score: -0.100000\n query_right_extend_gap_score: 0.000000\n mode: global\n\"\"\",\n )\n seq1 = \"AA\"\n seq2 = \"A\"\n score = aligner.score(seq1, seq2)\n self.assertAlmostEqual(score, 1.9)\n alignments = aligner.align(seq1, seq2)\n self.assertEqual(len(alignments), 2)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, 1.9)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nAA\n-|\n-A\n\"\"\",\n )\n self.assertEqual(alignment.aligned, (((1, 2),), ((0, 1),)))\n alignment = alignments[1]\n self.assertAlmostEqual(alignment.score, 1.9)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nAA\n|-\nA-\n\"\"\",\n )\n self.assertEqual(alignment.aligned, (((0, 1),), ((0, 1),)))\n\n def test_match_score_open_penalty2(self):\n aligner = Align.PairwiseAligner()\n aligner.mode = \"global\"\n aligner.match_score = 1.5\n aligner.mismatch_score = 0.0\n aligner.open_gap_score = -0.1\n aligner.extend_gap_score = 0.0\n self.assertEqual(aligner.algorithm, \"Gotoh global alignment algorithm\")\n self.assertEqual(\n str(aligner),\n \"\"\"\\\nPairwise sequence aligner with parameters\n wildcard: None\n match_score: 1.500000\n mismatch_score: 0.000000\n target_internal_open_gap_score: -0.100000\n target_internal_extend_gap_score: 0.000000\n target_left_open_gap_score: -0.100000\n target_left_extend_gap_score: 0.000000\n target_right_open_gap_score: -0.100000\n target_right_extend_gap_score: 0.000000\n query_internal_open_gap_score: -0.100000\n query_internal_extend_gap_score: 0.000000\n query_left_open_gap_score: -0.100000\n query_left_extend_gap_score: 0.000000\n query_right_open_gap_score: -0.100000\n query_right_extend_gap_score: 0.000000\n mode: global\n\"\"\",\n )\n seq1 = \"GAA\"\n seq2 = \"GA\"\n score = aligner.score(seq1, seq2)\n self.assertAlmostEqual(score, 2.9)\n alignments = aligner.align(seq1, seq2)\n self.assertEqual(len(alignments), 2)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, 2.9)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nGAA\n|-|\nG-A\n\"\"\",\n )\n self.assertEqual(alignment.aligned, (((0, 1), (2, 3)), ((0, 1), (1, 2))))\n alignment = alignments[1]\n self.assertAlmostEqual(alignment.score, 2.9)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nGAA\n||-\nGA-\n\"\"\",\n )\n self.assertEqual(alignment.aligned, (((0, 2),), ((0, 2),)))\n\n def test_match_score_open_penalty3(self):\n aligner = Align.PairwiseAligner()\n aligner.mode = \"global\"\n aligner.query_open_gap_score = -0.1\n aligner.query_extend_gap_score = 0.0\n self.assertEqual(aligner.algorithm, \"Gotoh global alignment algorithm\")\n self.assertEqual(\n str(aligner),\n \"\"\"\\\nPairwise sequence aligner with parameters\n wildcard: None\n match_score: 1.000000\n mismatch_score: 0.000000\n target_internal_open_gap_score: 0.000000\n target_internal_extend_gap_score: 0.000000\n target_left_open_gap_score: 0.000000\n target_left_extend_gap_score: 0.000000\n target_right_open_gap_score: 0.000000\n target_right_extend_gap_score: 0.000000\n query_internal_open_gap_score: -0.100000\n query_internal_extend_gap_score: 0.000000\n query_left_open_gap_score: -0.100000\n query_left_extend_gap_score: 0.000000\n query_right_open_gap_score: -0.100000\n query_right_extend_gap_score: 0.000000\n mode: global\n\"\"\",\n )\n seq1 = \"GAACT\"\n seq2 = \"GAT\"\n score = aligner.score(seq1, seq2)\n self.assertAlmostEqual(score, 2.9)\n alignments = aligner.align(seq1, seq2)\n self.assertEqual(len(alignments), 1)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, 2.9)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nGAACT\n||--|\nGA--T\n\"\"\",\n )\n self.assertEqual(alignment.aligned, (((0, 2), (4, 5)), ((0, 2), (2, 3))))\n\n def test_match_score_open_penalty4(self):\n aligner = Align.PairwiseAligner()\n aligner.mode = \"global\"\n aligner.mismatch_score = -2.0\n aligner.open_gap_score = -0.1\n aligner.extend_gap_score = 0.0\n self.assertEqual(aligner.algorithm, \"Gotoh global alignment algorithm\")\n self.assertEqual(\n str(aligner),\n \"\"\"\\\nPairwise sequence aligner with parameters\n wildcard: None\n match_score: 1.000000\n mismatch_score: -2.000000\n target_internal_open_gap_score: -0.100000\n target_internal_extend_gap_score: 0.000000\n target_left_open_gap_score: -0.100000\n target_left_extend_gap_score: 0.000000\n target_right_open_gap_score: -0.100000\n target_right_extend_gap_score: 0.000000\n query_internal_open_gap_score: -0.100000\n query_internal_extend_gap_score: 0.000000\n query_left_open_gap_score: -0.100000\n query_left_extend_gap_score: 0.000000\n query_right_open_gap_score: -0.100000\n query_right_extend_gap_score: 0.000000\n mode: global\n\"\"\",\n )\n seq1 = \"GCT\"\n seq2 = \"GATA\"\n score = aligner.score(seq1, seq2)\n self.assertAlmostEqual(score, 1.7)\n alignments = aligner.align(seq1, seq2)\n self.assertEqual(len(alignments), 2)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, 1.7)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nG-CT-\n|--|-\nGA-TA\n\"\"\",\n )\n self.assertEqual(alignment.aligned, (((0, 1), (2, 3)), ((0, 1), (2, 3))))\n alignment = alignments[1]\n self.assertAlmostEqual(alignment.score, 1.7)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nGC-T-\n|--|-\nG-ATA\n\"\"\",\n )\n self.assertEqual(alignment.aligned, (((0, 1), (2, 3)), ((0, 1), (2, 3))))\n\n\nclass TestPairwiseExtendPenalty(unittest.TestCase):\n def test_extend_penalty1(self):\n aligner = Align.PairwiseAligner()\n aligner.mode = \"global\"\n aligner.open_gap_score = -0.2\n aligner.extend_gap_score = -0.5\n self.assertEqual(aligner.algorithm, \"Gotoh global alignment algorithm\")\n self.assertEqual(\n str(aligner),\n \"\"\"\\\nPairwise sequence aligner with parameters\n wildcard: None\n match_score: 1.000000\n mismatch_score: 0.000000\n target_internal_open_gap_score: -0.200000\n target_internal_extend_gap_score: -0.500000\n target_left_open_gap_score: -0.200000\n target_left_extend_gap_score: -0.500000\n target_right_open_gap_score: -0.200000\n target_right_extend_gap_score: -0.500000\n query_internal_open_gap_score: -0.200000\n query_internal_extend_gap_score: -0.500000\n query_left_open_gap_score: -0.200000\n query_left_extend_gap_score: -0.500000\n query_right_open_gap_score: -0.200000\n query_right_extend_gap_score: -0.500000\n mode: global\n\"\"\",\n )\n seq1 = \"GACT\"\n seq2 = \"GT\"\n score = aligner.score(seq1, seq2)\n self.assertAlmostEqual(score, 1.3)\n alignments = aligner.align(seq1, seq2)\n self.assertEqual(len(alignments), 1)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, 1.3)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nGACT\n|--|\nG--T\n\"\"\",\n )\n self.assertEqual(alignment.aligned, (((0, 1), (3, 4)), ((0, 1), (1, 2))))\n\n def test_extend_penalty2(self):\n aligner = Align.PairwiseAligner()\n aligner.mode = \"global\"\n aligner.open_gap_score = -0.2\n aligner.extend_gap_score = -1.5\n self.assertEqual(aligner.algorithm, \"Gotoh global alignment algorithm\")\n self.assertEqual(\n str(aligner),\n \"\"\"\\\nPairwise sequence aligner with parameters\n wildcard: None\n match_score: 1.000000\n mismatch_score: 0.000000\n target_internal_open_gap_score: -0.200000\n target_internal_extend_gap_score: -1.500000\n target_left_open_gap_score: -0.200000\n target_left_extend_gap_score: -1.500000\n target_right_open_gap_score: -0.200000\n target_right_extend_gap_score: -1.500000\n query_internal_open_gap_score: -0.200000\n query_internal_extend_gap_score: -1.500000\n query_left_open_gap_score: -0.200000\n query_left_extend_gap_score: -1.500000\n query_right_open_gap_score: -0.200000\n query_right_extend_gap_score: -1.500000\n mode: global\n\"\"\",\n )\n seq1 = \"GACT\"\n seq2 = \"GT\"\n score = aligner.score(seq1, seq2)\n self.assertAlmostEqual(score, 0.6)\n alignments = aligner.align(seq1, seq2)\n self.assertEqual(len(alignments), 2)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, 0.6)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nGACT\n-.-|\n-G-T\n\"\"\",\n )\n self.assertEqual(alignment.aligned, (((1, 2), (3, 4)), ((0, 1), (1, 2))))\n alignment = alignments[1]\n self.assertAlmostEqual(alignment.score, 0.6)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nGACT\n|-.-\nG-T-\n\"\"\",\n )\n self.assertEqual(alignment.aligned, (((0, 1), (2, 3)), ((0, 1), (1, 2))))\n\n\nclass TestPairwisePenalizeExtendWhenOpening(unittest.TestCase):\n def test_penalize_extend_when_opening(self):\n aligner = Align.PairwiseAligner()\n aligner.mode = \"global\"\n aligner.open_gap_score = -1.7\n aligner.extend_gap_score = -1.5\n self.assertEqual(aligner.algorithm, \"Gotoh global alignment algorithm\")\n self.assertEqual(\n str(aligner),\n \"\"\"\\\nPairwise sequence aligner with parameters\n wildcard: None\n match_score: 1.000000\n mismatch_score: 0.000000\n target_internal_open_gap_score: -1.700000\n target_internal_extend_gap_score: -1.500000\n target_left_open_gap_score: -1.700000\n target_left_extend_gap_score: -1.500000\n target_right_open_gap_score: -1.700000\n target_right_extend_gap_score: -1.500000\n query_internal_open_gap_score: -1.700000\n query_internal_extend_gap_score: -1.500000\n query_left_open_gap_score: -1.700000\n query_left_extend_gap_score: -1.500000\n query_right_open_gap_score: -1.700000\n query_right_extend_gap_score: -1.500000\n mode: global\n\"\"\",\n )\n seq1 = \"GACT\"\n seq2 = \"GT\"\n score = aligner.score(seq1, seq2)\n self.assertAlmostEqual(score, -1.2)\n alignments = aligner.align(seq1, seq2)\n self.assertEqual(len(alignments), 1)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, -1.2)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nGACT\n|--|\nG--T\n\"\"\",\n )\n self.assertEqual(alignment.aligned, (((0, 1), (3, 4)), ((0, 1), (1, 2))))\n\n\nclass TestPairwisePenalizeEndgaps(unittest.TestCase):\n def test_penalize_end_gaps(self):\n aligner = Align.PairwiseAligner()\n aligner.mode = \"global\"\n aligner.open_gap_score = -0.2\n aligner.extend_gap_score = -0.8\n end_score = 0.0\n aligner.target_end_gap_score = end_score\n aligner.query_end_gap_score = end_score\n self.assertEqual(\n str(aligner),\n \"\"\"\\\nPairwise sequence aligner with parameters\n wildcard: None\n match_score: 1.000000\n mismatch_score: 0.000000\n target_internal_open_gap_score: -0.200000\n target_internal_extend_gap_score: -0.800000\n target_left_open_gap_score: 0.000000\n target_left_extend_gap_score: 0.000000\n target_right_open_gap_score: 0.000000\n target_right_extend_gap_score: 0.000000\n query_internal_open_gap_score: -0.200000\n query_internal_extend_gap_score: -0.800000\n query_left_open_gap_score: 0.000000\n query_left_extend_gap_score: 0.000000\n query_right_open_gap_score: 0.000000\n query_right_extend_gap_score: 0.000000\n mode: global\n\"\"\",\n )\n self.assertEqual(aligner.algorithm, \"Gotoh global alignment algorithm\")\n seq1 = \"GACT\"\n seq2 = \"GT\"\n score = aligner.score(seq1, seq2)\n self.assertAlmostEqual(score, 1.0)\n alignments = aligner.align(seq1, seq2)\n self.assertEqual(len(alignments), 3)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, 1.0)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nGACT\n--.|\n--GT\n\"\"\",\n )\n self.assertEqual(alignment.aligned, (((2, 4),), ((0, 2),)))\n alignment = alignments[1]\n self.assertAlmostEqual(alignment.score, 1.0)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nGACT\n|--|\nG--T\n\"\"\",\n )\n self.assertEqual(alignment.aligned, (((0, 1), (3, 4)), ((0, 1), (1, 2))))\n alignment = alignments[2]\n self.assertAlmostEqual(alignment.score, 1.0)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nGACT\n|.--\nGT--\n\"\"\",\n )\n self.assertEqual(alignment.aligned, (((0, 2),), ((0, 2),)))\n\n\nclass TestPairwiseSeparateGapPenalties(unittest.TestCase):\n def test_separate_gap_penalties1(self):\n seq1 = \"GAT\"\n seq2 = \"GTCT\"\n aligner = Align.PairwiseAligner()\n aligner.mode = \"local\"\n open_score, extend_score = (-0.3, 0)\n aligner.target_open_gap_score = open_score\n aligner.target_extend_gap_score = extend_score\n aligner.target_end_open_gap_score = open_score\n aligner.target_end_extend_gap_score = extend_score\n open_score, extend_score = (-0.8, 0)\n aligner.query_open_gap_score = open_score\n aligner.query_extend_gap_score = extend_score\n aligner.query_end_open_gap_score = open_score\n aligner.query_end_extend_gap_score = extend_score\n self.assertEqual(aligner.algorithm, \"Gotoh local alignment algorithm\")\n self.assertEqual(\n str(aligner),\n \"\"\"\\\nPairwise sequence aligner with parameters\n wildcard: None\n match_score: 1.000000\n mismatch_score: 0.000000\n target_internal_open_gap_score: -0.300000\n target_internal_extend_gap_score: 0.000000\n target_left_open_gap_score: -0.300000\n target_left_extend_gap_score: 0.000000\n target_right_open_gap_score: -0.300000\n target_right_extend_gap_score: 0.000000\n query_internal_open_gap_score: -0.800000\n query_internal_extend_gap_score: 0.000000\n query_left_open_gap_score: -0.800000\n query_left_extend_gap_score: 0.000000\n query_right_open_gap_score: -0.800000\n query_right_extend_gap_score: 0.000000\n mode: local\n\"\"\",\n )\n score = aligner.score(seq1, seq2)\n self.assertAlmostEqual(score, 1.7)\n alignments = aligner.align(seq1, seq2)\n self.assertEqual(len(alignments), 2)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, 1.7)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nG-AT\n|-.|\nGTCT\n\"\"\",\n )\n self.assertEqual(alignment.aligned, (((0, 1), (1, 3)), ((0, 1), (2, 4))))\n alignment = alignments[1]\n self.assertAlmostEqual(alignment.score, 1.7)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nGA-T\n|.-|\nGTCT\n\"\"\",\n )\n self.assertEqual(alignment.aligned, (((0, 2), (2, 3)), ((0, 2), (3, 4))))\n\n def test_separate_gap_penalties2(self):\n aligner = Align.PairwiseAligner()\n aligner.mode = \"local\"\n aligner.target_open_gap_score = -0.3\n aligner.target_extend_gap_score = 0.0\n aligner.query_open_gap_score = -0.2\n aligner.query_extend_gap_score = 0.0\n self.assertEqual(aligner.algorithm, \"Gotoh local alignment algorithm\")\n self.assertEqual(\n str(aligner),\n \"\"\"\\\nPairwise sequence aligner with parameters\n wildcard: None\n match_score: 1.000000\n mismatch_score: 0.000000\n target_internal_open_gap_score: -0.300000\n target_internal_extend_gap_score: 0.000000\n target_left_open_gap_score: -0.300000\n target_left_extend_gap_score: 0.000000\n target_right_open_gap_score: -0.300000\n target_right_extend_gap_score: 0.000000\n query_internal_open_gap_score: -0.200000\n query_internal_extend_gap_score: 0.000000\n query_left_open_gap_score: -0.200000\n query_left_extend_gap_score: 0.000000\n query_right_open_gap_score: -0.200000\n query_right_extend_gap_score: 0.000000\n mode: local\n\"\"\",\n )\n seq1 = \"GAT\"\n seq2 = \"GTCT\"\n score = aligner.score(seq1, seq2)\n self.assertAlmostEqual(score, 1.8)\n alignments = aligner.align(seq1, seq2)\n self.assertEqual(len(alignments), 1)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, 1.8)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nGAT \n|-| \nG-TCT\n\"\"\", # noqa: W291\n )\n self.assertEqual(alignment.aligned, (((0, 1), (2, 3)), ((0, 1), (1, 2))))\n\n\nclass TestPairwiseSeparateGapPenaltiesWithExtension(unittest.TestCase):\n def test_separate_gap_penalties_with_extension(self):\n seq1 = \"GAAT\"\n seq2 = \"GTCCT\"\n aligner = Align.PairwiseAligner()\n aligner.mode = \"local\"\n open_score, extend_score = (-0.1, 0)\n aligner.target_open_gap_score = open_score\n aligner.target_extend_gap_score = extend_score\n aligner.target_end_open_gap_score = open_score\n aligner.target_end_extend_gap_score = extend_score\n score = -0.1\n aligner.query_gap_score = score\n aligner.query_end_gap_score = score\n self.assertEqual(aligner.algorithm, \"Gotoh local alignment algorithm\")\n self.assertEqual(\n str(aligner),\n \"\"\"\\\nPairwise sequence aligner with parameters\n wildcard: None\n match_score: 1.000000\n mismatch_score: 0.000000\n target_internal_open_gap_score: -0.100000\n target_internal_extend_gap_score: 0.000000\n target_left_open_gap_score: -0.100000\n target_left_extend_gap_score: 0.000000\n target_right_open_gap_score: -0.100000\n target_right_extend_gap_score: 0.000000\n query_internal_open_gap_score: -0.100000\n query_internal_extend_gap_score: -0.100000\n query_left_open_gap_score: -0.100000\n query_left_extend_gap_score: -0.100000\n query_right_open_gap_score: -0.100000\n query_right_extend_gap_score: -0.100000\n mode: local\n\"\"\",\n )\n score = aligner.score(seq1, seq2)\n self.assertAlmostEqual(score, 1.9)\n alignments = aligner.align(seq1, seq2)\n self.assertEqual(len(alignments), 3)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, 1.9)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nG-AAT\n|-..|\nGTCCT\n\"\"\",\n )\n self.assertEqual(alignment.aligned, (((0, 1), (1, 4)), ((0, 1), (2, 5))))\n alignment = alignments[1]\n self.assertAlmostEqual(alignment.score, 1.9)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nGA-AT\n|.-.|\nGTCCT\n\"\"\",\n )\n self.assertEqual(alignment.aligned, (((0, 2), (2, 4)), ((0, 2), (3, 5))))\n alignment = alignments[2]\n self.assertAlmostEqual(alignment.score, 1.9)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nGAA-T\n|..-|\nGTCCT\n\"\"\",\n )\n self.assertEqual(alignment.aligned, (((0, 3), (3, 4)), ((0, 3), (4, 5))))\n alignments = aligner.align(seq1, seq2)\n self.assertEqual(len(alignments), 3)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, 1.9)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nG-AAT\n|-..|\nGTCCT\n\"\"\",\n )\n self.assertEqual(alignment.aligned, (((0, 1), (1, 4)), ((0, 1), (2, 5))))\n alignment = alignments[1]\n self.assertAlmostEqual(alignment.score, 1.9)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nGA-AT\n|.-.|\nGTCCT\n\"\"\",\n )\n self.assertEqual(alignment.aligned, (((0, 2), (2, 4)), ((0, 2), (3, 5))))\n alignment = alignments[2]\n self.assertAlmostEqual(alignment.score, 1.9)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nGAA-T\n|..-|\nGTCCT\n\"\"\",\n )\n self.assertEqual(alignment.aligned, (((0, 3), (3, 4)), ((0, 3), (4, 5))))\n\n\nclass TestPairwiseMatchDictionary(unittest.TestCase):\n\n match_dict = {(\"A\", \"A\"): 1.5, (\"A\", \"T\"): 0.5, (\"T\", \"A\"): 0.5, (\"T\", \"T\"): 1.0}\n\n def test_match_dictionary1(self):\n try:\n from Bio.Align import substitution_matrices\n except ImportError:\n return\n substitution_matrix = substitution_matrices.Array(data=self.match_dict)\n seq1 = \"ATAT\"\n seq2 = \"ATT\"\n aligner = Align.PairwiseAligner()\n aligner.mode = \"local\"\n aligner.substitution_matrix = substitution_matrix\n aligner.open_gap_score = -0.5\n aligner.extend_gap_score = 0.0\n self.assertEqual(aligner.algorithm, \"Gotoh local alignment algorithm\")\n lines = str(aligner).splitlines()\n self.assertEqual(len(lines), 15)\n self.assertEqual(lines[0], \"Pairwise sequence aligner with parameters\")\n line = lines[1]\n prefix = \" substitution_matrix: <Array object at \"\n suffix = \">\"\n self.assertTrue(line.startswith(prefix))\n self.assertTrue(line.endswith(suffix))\n address = int(line[len(prefix) : -len(suffix)], 16)\n self.assertEqual(lines[2], \" target_internal_open_gap_score: -0.500000\")\n self.assertEqual(lines[3], \" target_internal_extend_gap_score: 0.000000\")\n self.assertEqual(lines[4], \" target_left_open_gap_score: -0.500000\")\n self.assertEqual(lines[5], \" target_left_extend_gap_score: 0.000000\")\n self.assertEqual(lines[6], \" target_right_open_gap_score: -0.500000\")\n self.assertEqual(lines[7], \" target_right_extend_gap_score: 0.000000\")\n self.assertEqual(lines[8], \" query_internal_open_gap_score: -0.500000\")\n self.assertEqual(lines[9], \" query_internal_extend_gap_score: 0.000000\")\n self.assertEqual(lines[10], \" query_left_open_gap_score: -0.500000\")\n self.assertEqual(lines[11], \" query_left_extend_gap_score: 0.000000\")\n self.assertEqual(lines[12], \" query_right_open_gap_score: -0.500000\")\n self.assertEqual(lines[13], \" query_right_extend_gap_score: 0.000000\")\n self.assertEqual(lines[14], \" mode: local\")\n score = aligner.score(seq1, seq2)\n self.assertAlmostEqual(score, 3.0)\n alignments = aligner.align(seq1, seq2)\n self.assertEqual(len(alignments), 2)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, 3.0)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nATAT\n||. \nATT \n\"\"\", # noqa: W291\n )\n self.assertEqual(alignment.aligned, (((0, 3),), ((0, 3),)))\n alignment = alignments[1]\n self.assertAlmostEqual(alignment.score, 3.0)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nATAT\n||-|\nAT-T\n\"\"\",\n )\n self.assertEqual(alignment.aligned, (((0, 2), (3, 4)), ((0, 2), (2, 3))))\n\n def test_match_dictionary2(self):\n try:\n from Bio.Align import substitution_matrices\n except ImportError:\n return\n substitution_matrix = substitution_matrices.Array(data=self.match_dict)\n seq1 = \"ATAT\"\n seq2 = \"ATT\"\n aligner = Align.PairwiseAligner()\n aligner.mode = \"local\"\n aligner.substitution_matrix = substitution_matrix\n aligner.open_gap_score = -1.0\n aligner.extend_gap_score = 0.0\n lines = str(aligner).splitlines()\n self.assertEqual(len(lines), 15)\n self.assertEqual(lines[0], \"Pairwise sequence aligner with parameters\")\n line = lines[1]\n prefix = \" substitution_matrix: <Array object at \"\n suffix = \">\"\n self.assertTrue(line.startswith(prefix))\n self.assertTrue(line.endswith(suffix))\n address = int(line[len(prefix) : -len(suffix)], 16)\n self.assertEqual(lines[2], \" target_internal_open_gap_score: -1.000000\")\n self.assertEqual(lines[3], \" target_internal_extend_gap_score: 0.000000\")\n self.assertEqual(lines[4], \" target_left_open_gap_score: -1.000000\")\n self.assertEqual(lines[5], \" target_left_extend_gap_score: 0.000000\")\n self.assertEqual(lines[6], \" target_right_open_gap_score: -1.000000\")\n self.assertEqual(lines[7], \" target_right_extend_gap_score: 0.000000\")\n self.assertEqual(lines[8], \" query_internal_open_gap_score: -1.000000\")\n self.assertEqual(lines[9], \" query_internal_extend_gap_score: 0.000000\")\n self.assertEqual(lines[10], \" query_left_open_gap_score: -1.000000\")\n self.assertEqual(lines[11], \" query_left_extend_gap_score: 0.000000\")\n self.assertEqual(lines[12], \" query_right_open_gap_score: -1.000000\")\n self.assertEqual(lines[13], \" query_right_extend_gap_score: 0.000000\")\n self.assertEqual(lines[14], \" mode: local\")\n score = aligner.score(seq1, seq2)\n self.assertAlmostEqual(score, 3.0)\n alignments = aligner.align(seq1, seq2)\n self.assertEqual(len(alignments), 1)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, 3.0)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nATAT\n||. \nATT \n\"\"\", # noqa: W291\n )\n self.assertEqual(alignment.aligned, (((0, 3),), ((0, 3),)))\n\n def test_match_dictionary3(self):\n try:\n from Bio.Align import substitution_matrices\n except ImportError:\n return\n substitution_matrix = substitution_matrices.Array(data=self.match_dict)\n seq1 = \"ATT\"\n seq2 = \"ATAT\"\n aligner = Align.PairwiseAligner()\n aligner.mode = \"local\"\n aligner.substitution_matrix = substitution_matrix\n aligner.open_gap_score = -1.0\n aligner.extend_gap_score = 0.0\n lines = str(aligner).splitlines()\n self.assertEqual(len(lines), 15)\n self.assertEqual(lines[0], \"Pairwise sequence aligner with parameters\")\n line = lines[1]\n prefix = \" substitution_matrix: <Array object at \"\n suffix = \">\"\n self.assertTrue(line.startswith(prefix))\n self.assertTrue(line.endswith(suffix))\n address = int(line[len(prefix) : -len(suffix)], 16)\n self.assertEqual(lines[2], \" target_internal_open_gap_score: -1.000000\")\n self.assertEqual(lines[3], \" target_internal_extend_gap_score: 0.000000\")\n self.assertEqual(lines[4], \" target_left_open_gap_score: -1.000000\")\n self.assertEqual(lines[5], \" target_left_extend_gap_score: 0.000000\")\n self.assertEqual(lines[6], \" target_right_open_gap_score: -1.000000\")\n self.assertEqual(lines[7], \" target_right_extend_gap_score: 0.000000\")\n self.assertEqual(lines[8], \" query_internal_open_gap_score: -1.000000\")\n self.assertEqual(lines[9], \" query_internal_extend_gap_score: 0.000000\")\n self.assertEqual(lines[10], \" query_left_open_gap_score: -1.000000\")\n self.assertEqual(lines[11], \" query_left_extend_gap_score: 0.000000\")\n self.assertEqual(lines[12], \" query_right_open_gap_score: -1.000000\")\n self.assertEqual(lines[13], \" query_right_extend_gap_score: 0.000000\")\n self.assertEqual(lines[14], \" mode: local\")\n score = aligner.score(seq1, seq2)\n self.assertAlmostEqual(score, 3.0)\n alignments = aligner.align(seq1, seq2)\n self.assertEqual(len(alignments), 1)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, 3.0)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nATT \n||. \nATAT\n\"\"\", # noqa: W291\n )\n self.assertEqual(alignment.aligned, (((0, 3),), ((0, 3),)))\n\n def test_match_dictionary4(self):\n try:\n from Bio.Align import substitution_matrices\n except ImportError:\n return\n substitution_matrix = substitution_matrices.Array(alphabet=\"AT\", dims=2)\n self.assertEqual(substitution_matrix.shape, (2, 2))\n substitution_matrix.update(self.match_dict)\n seq1 = \"ATAT\"\n seq2 = \"ATT\"\n aligner = Align.PairwiseAligner()\n aligner.mode = \"local\"\n aligner.substitution_matrix = substitution_matrix\n aligner.open_gap_score = -0.5\n aligner.extend_gap_score = 0.0\n self.assertEqual(aligner.algorithm, \"Gotoh local alignment algorithm\")\n lines = str(aligner).splitlines()\n self.assertEqual(len(lines), 15)\n self.assertEqual(lines[0], \"Pairwise sequence aligner with parameters\")\n line = lines[1]\n prefix = \" substitution_matrix: <Array object at \"\n suffix = \">\"\n self.assertTrue(line.startswith(prefix))\n self.assertTrue(line.endswith(suffix))\n address = int(line[len(prefix) : -len(suffix)], 16)\n self.assertEqual(lines[2], \" target_internal_open_gap_score: -0.500000\")\n self.assertEqual(lines[3], \" target_internal_extend_gap_score: 0.000000\")\n self.assertEqual(lines[4], \" target_left_open_gap_score: -0.500000\")\n self.assertEqual(lines[5], \" target_left_extend_gap_score: 0.000000\")\n self.assertEqual(lines[6], \" target_right_open_gap_score: -0.500000\")\n self.assertEqual(lines[7], \" target_right_extend_gap_score: 0.000000\")\n self.assertEqual(lines[8], \" query_internal_open_gap_score: -0.500000\")\n self.assertEqual(lines[9], \" query_internal_extend_gap_score: 0.000000\")\n self.assertEqual(lines[10], \" query_left_open_gap_score: -0.500000\")\n self.assertEqual(lines[11], \" query_left_extend_gap_score: 0.000000\")\n self.assertEqual(lines[12], \" query_right_open_gap_score: -0.500000\")\n self.assertEqual(lines[13], \" query_right_extend_gap_score: 0.000000\")\n self.assertEqual(lines[14], \" mode: local\")\n score = aligner.score(seq1, seq2)\n self.assertAlmostEqual(score, 3.0)\n alignments = aligner.align(seq1, seq2)\n self.assertEqual(len(alignments), 2)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, 3.0)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nATAT\n||. \nATT \n\"\"\", # noqa: W291\n )\n self.assertEqual(alignment.aligned, (((0, 3),), ((0, 3),)))\n alignment = alignments[1]\n self.assertAlmostEqual(alignment.score, 3.0)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nATAT\n||-|\nAT-T\n\"\"\",\n )\n self.assertEqual(alignment.aligned, (((0, 2), (3, 4)), ((0, 2), (2, 3))))\n\n def test_match_dictionary5(self):\n try:\n from Bio.Align import substitution_matrices\n except ImportError:\n return\n substitution_matrix = substitution_matrices.Array(alphabet=\"AT\", dims=2)\n self.assertEqual(substitution_matrix.shape, (2, 2))\n substitution_matrix.update(self.match_dict)\n seq1 = \"ATAT\"\n seq2 = \"ATT\"\n aligner = Align.PairwiseAligner()\n aligner.mode = \"local\"\n aligner.substitution_matrix = substitution_matrix\n aligner.open_gap_score = -1.0\n aligner.extend_gap_score = 0.0\n lines = str(aligner).splitlines()\n self.assertEqual(len(lines), 15)\n self.assertEqual(lines[0], \"Pairwise sequence aligner with parameters\")\n line = lines[1]\n prefix = \" substitution_matrix: <Array object at \"\n suffix = \">\"\n self.assertTrue(line.startswith(prefix))\n self.assertTrue(line.endswith(suffix))\n address = int(line[len(prefix) : -len(suffix)], 16)\n self.assertEqual(lines[2], \" target_internal_open_gap_score: -1.000000\")\n self.assertEqual(lines[3], \" target_internal_extend_gap_score: 0.000000\")\n self.assertEqual(lines[4], \" target_left_open_gap_score: -1.000000\")\n self.assertEqual(lines[5], \" target_left_extend_gap_score: 0.000000\")\n self.assertEqual(lines[6], \" target_right_open_gap_score: -1.000000\")\n self.assertEqual(lines[7], \" target_right_extend_gap_score: 0.000000\")\n self.assertEqual(lines[8], \" query_internal_open_gap_score: -1.000000\")\n self.assertEqual(lines[9], \" query_internal_extend_gap_score: 0.000000\")\n self.assertEqual(lines[10], \" query_left_open_gap_score: -1.000000\")\n self.assertEqual(lines[11], \" query_left_extend_gap_score: 0.000000\")\n self.assertEqual(lines[12], \" query_right_open_gap_score: -1.000000\")\n self.assertEqual(lines[13], \" query_right_extend_gap_score: 0.000000\")\n self.assertEqual(lines[14], \" mode: local\")\n score = aligner.score(seq1, seq2)\n self.assertAlmostEqual(score, 3.0)\n alignments = aligner.align(seq1, seq2)\n self.assertEqual(len(alignments), 1)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, 3.0)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nATAT\n||. \nATT \n\"\"\", # noqa: W291\n )\n self.assertEqual(alignment.aligned, (((0, 3),), ((0, 3),)))\n\n def test_match_dictionary6(self):\n try:\n from Bio.Align import substitution_matrices\n except ImportError:\n return\n substitution_matrix = substitution_matrices.Array(alphabet=\"AT\", dims=2)\n self.assertEqual(substitution_matrix.shape, (2, 2))\n substitution_matrix.update(self.match_dict)\n seq1 = \"ATT\"\n seq2 = \"ATAT\"\n aligner = Align.PairwiseAligner()\n aligner.mode = \"local\"\n aligner.substitution_matrix = substitution_matrix\n aligner.open_gap_score = -1.0\n aligner.extend_gap_score = 0.0\n lines = str(aligner).splitlines()\n self.assertEqual(len(lines), 15)\n self.assertEqual(lines[0], \"Pairwise sequence aligner with parameters\")\n line = lines[1]\n prefix = \" substitution_matrix: <Array object at \"\n suffix = \">\"\n self.assertTrue(line.startswith(prefix))\n self.assertTrue(line.endswith(suffix))\n address = int(line[len(prefix) : -len(suffix)], 16)\n self.assertEqual(lines[2], \" target_internal_open_gap_score: -1.000000\")\n self.assertEqual(lines[3], \" target_internal_extend_gap_score: 0.000000\")\n self.assertEqual(lines[4], \" target_left_open_gap_score: -1.000000\")\n self.assertEqual(lines[5], \" target_left_extend_gap_score: 0.000000\")\n self.assertEqual(lines[6], \" target_right_open_gap_score: -1.000000\")\n self.assertEqual(lines[7], \" target_right_extend_gap_score: 0.000000\")\n self.assertEqual(lines[8], \" query_internal_open_gap_score: -1.000000\")\n self.assertEqual(lines[9], \" query_internal_extend_gap_score: 0.000000\")\n self.assertEqual(lines[10], \" query_left_open_gap_score: -1.000000\")\n self.assertEqual(lines[11], \" query_left_extend_gap_score: 0.000000\")\n self.assertEqual(lines[12], \" query_right_open_gap_score: -1.000000\")\n self.assertEqual(lines[13], \" query_right_extend_gap_score: 0.000000\")\n self.assertEqual(lines[14], \" mode: local\")\n score = aligner.score(seq1, seq2)\n self.assertAlmostEqual(score, 3.0)\n alignments = aligner.align(seq1, seq2)\n self.assertEqual(len(alignments), 1)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, 3.0)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nATT \n||. \nATAT\n\"\"\", # noqa: W291\n )\n self.assertEqual(alignment.aligned, (((0, 3),), ((0, 3),)))\n\n\nclass TestPairwiseOneCharacter(unittest.TestCase):\n def test_align_one_char1(self):\n aligner = Align.PairwiseAligner()\n aligner.mode = \"local\"\n aligner.open_gap_score = -0.3\n aligner.extend_gap_score = -0.1\n self.assertEqual(aligner.algorithm, \"Gotoh local alignment algorithm\")\n self.assertEqual(\n str(aligner),\n \"\"\"\\\nPairwise sequence aligner with parameters\n wildcard: None\n match_score: 1.000000\n mismatch_score: 0.000000\n target_internal_open_gap_score: -0.300000\n target_internal_extend_gap_score: -0.100000\n target_left_open_gap_score: -0.300000\n target_left_extend_gap_score: -0.100000\n target_right_open_gap_score: -0.300000\n target_right_extend_gap_score: -0.100000\n query_internal_open_gap_score: -0.300000\n query_internal_extend_gap_score: -0.100000\n query_left_open_gap_score: -0.300000\n query_left_extend_gap_score: -0.100000\n query_right_open_gap_score: -0.300000\n query_right_extend_gap_score: -0.100000\n mode: local\n\"\"\",\n )\n score = aligner.score(\"abcde\", \"c\")\n self.assertAlmostEqual(score, 1)\n alignments = aligner.align(\"abcde\", \"c\")\n self.assertEqual(len(alignments), 1)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, 1)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nabcde\n | \n c \n\"\"\", # noqa: W291\n )\n self.assertEqual(alignment.aligned, (((2, 3),), ((0, 1),)))\n\n def test_align_one_char2(self):\n aligner = Align.PairwiseAligner()\n aligner.mode = \"local\"\n aligner.open_gap_score = -0.3\n aligner.extend_gap_score = -0.1\n self.assertEqual(aligner.algorithm, \"Gotoh local alignment algorithm\")\n self.assertEqual(\n str(aligner),\n \"\"\"\\\nPairwise sequence aligner with parameters\n wildcard: None\n match_score: 1.000000\n mismatch_score: 0.000000\n target_internal_open_gap_score: -0.300000\n target_internal_extend_gap_score: -0.100000\n target_left_open_gap_score: -0.300000\n target_left_extend_gap_score: -0.100000\n target_right_open_gap_score: -0.300000\n target_right_extend_gap_score: -0.100000\n query_internal_open_gap_score: -0.300000\n query_internal_extend_gap_score: -0.100000\n query_left_open_gap_score: -0.300000\n query_left_extend_gap_score: -0.100000\n query_right_open_gap_score: -0.300000\n query_right_extend_gap_score: -0.100000\n mode: local\n\"\"\",\n )\n score = aligner.score(\"abcce\", \"c\")\n self.assertAlmostEqual(score, 1)\n alignments = aligner.align(\"abcce\", \"c\")\n self.assertEqual(len(alignments), 2)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, 1)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nabcce\n | \n c \n\"\"\", # noqa: W291\n )\n self.assertEqual(alignment.aligned, (((2, 3),), ((0, 1),)))\n alignment = alignments[1]\n self.assertAlmostEqual(alignment.score, 1)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nabcce\n | \n c \n\"\"\", # noqa: W291\n )\n self.assertEqual(alignment.aligned, (((3, 4),), ((0, 1),)))\n\n def test_align_one_char3(self):\n aligner = Align.PairwiseAligner()\n aligner.mode = \"global\"\n aligner.open_gap_score = -0.3\n aligner.extend_gap_score = -0.1\n self.assertEqual(aligner.algorithm, \"Gotoh global alignment algorithm\")\n self.assertEqual(\n str(aligner),\n \"\"\"\\\nPairwise sequence aligner with parameters\n wildcard: None\n match_score: 1.000000\n mismatch_score: 0.000000\n target_internal_open_gap_score: -0.300000\n target_internal_extend_gap_score: -0.100000\n target_left_open_gap_score: -0.300000\n target_left_extend_gap_score: -0.100000\n target_right_open_gap_score: -0.300000\n target_right_extend_gap_score: -0.100000\n query_internal_open_gap_score: -0.300000\n query_internal_extend_gap_score: -0.100000\n query_left_open_gap_score: -0.300000\n query_left_extend_gap_score: -0.100000\n query_right_open_gap_score: -0.300000\n query_right_extend_gap_score: -0.100000\n mode: global\n\"\"\",\n )\n seq1 = \"abcde\"\n seq2 = \"c\"\n score = aligner.score(seq1, seq2)\n self.assertAlmostEqual(score, 0.2)\n alignments = aligner.align(seq1, seq2)\n self.assertEqual(len(alignments), 1)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, 0.2)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nabcde\n--|--\n--c--\n\"\"\",\n )\n self.assertEqual(alignment.aligned, (((2, 3),), ((0, 1),)))\n\n def test_align_one_char_score3(self):\n aligner = Align.PairwiseAligner()\n aligner.mode = \"global\"\n aligner.open_gap_score = -0.3\n aligner.extend_gap_score = -0.1\n self.assertEqual(aligner.algorithm, \"Gotoh global alignment algorithm\")\n self.assertEqual(\n str(aligner),\n \"\"\"\\\nPairwise sequence aligner with parameters\n wildcard: None\n match_score: 1.000000\n mismatch_score: 0.000000\n target_internal_open_gap_score: -0.300000\n target_internal_extend_gap_score: -0.100000\n target_left_open_gap_score: -0.300000\n target_left_extend_gap_score: -0.100000\n target_right_open_gap_score: -0.300000\n target_right_extend_gap_score: -0.100000\n query_internal_open_gap_score: -0.300000\n query_internal_extend_gap_score: -0.100000\n query_left_open_gap_score: -0.300000\n query_left_extend_gap_score: -0.100000\n query_right_open_gap_score: -0.300000\n query_right_extend_gap_score: -0.100000\n mode: global\n\"\"\",\n )\n score = aligner.score(\"abcde\", \"c\")\n self.assertAlmostEqual(score, 0.2)\n\n\nclass TestPerSiteGapPenalties(unittest.TestCase):\n \"\"\"Check gap penalty callbacks use correct gap opening position.\n\n This tests that the gap penalty callbacks are really being used\n with the correct gap opening position.\n \"\"\"\n\n def test_gap_here_only_1(self):\n seq1 = \"AAAABBBAAAACCCCCCCCCCCCCCAAAABBBAAAA\"\n seq2 = \"AABBBAAAACCCCAAAABBBAA\"\n breaks = [0, 11, len(seq2)]\n # Very expensive to open a gap in seq1:\n nogaps = lambda x, y: -2000 - y # noqa: E731\n # Very expensive to open a gap in seq2 unless it is in one of the allowed positions\n specificgaps = (\n lambda x, y: (-2 - y) if x in breaks else (-2000 - y) # noqa: E731\n )\n aligner = Align.PairwiseAligner()\n aligner.mode = \"global\"\n aligner.match_score = 1\n aligner.mismatch_score = -1\n aligner.target_gap_score = nogaps\n aligner.query_gap_score = specificgaps\n self.assertEqual(\n str(aligner),\n \"\"\"\\\nPairwise sequence aligner with parameters\n wildcard: None\n match_score: 1.000000\n mismatch_score: -1.000000\n target_gap_function: %s\n query_gap_function: %s\n mode: global\n\"\"\"\n % (nogaps, specificgaps),\n )\n self.assertEqual(\n aligner.algorithm, \"Waterman-Smith-Beyer global alignment algorithm\"\n )\n score = aligner.score(seq1, seq2)\n self.assertAlmostEqual(score, 2)\n alignments = aligner.align(seq1, seq2)\n self.assertEqual(len(alignments), 1)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, 2)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nAAAABBBAAAACCCCCCCCCCCCCCAAAABBBAAAA\n--|||||||||||----------|||||||||||--\n--AABBBAAAACC----------CCAAAABBBAA--\n\"\"\",\n )\n self.assertEqual(alignment.aligned, (((2, 13), (23, 34)), ((0, 11), (11, 22))))\n\n def test_gap_here_only_2(self):\n # Force a bad alignment.\n #\n # Forces a bad alignment by having a very expensive gap penalty\n # where one would normally expect a gap, and a cheap gap penalty\n # in another place.\n seq1 = \"AAAABBBAAAACCCCCCCCCCCCCCAAAABBBAAAA\"\n seq2 = \"AABBBAAAACCCCAAAABBBAA\"\n breaks = [0, 3, len(seq2)]\n # Very expensive to open a gap in seq1:\n nogaps = lambda x, y: -2000 - y # noqa: E731\n # Very expensive to open a gap in seq2 unless it is in one of the allowed positions:\n specificgaps = (\n lambda x, y: (-2 - y) if x in breaks else (-2000 - y)\n ) # noqa: E731\n aligner = Align.PairwiseAligner()\n aligner.mode = \"global\"\n aligner.match_score = 1\n aligner.mismatch_score = -1\n aligner.target_gap_score = nogaps\n aligner.query_gap_score = specificgaps\n self.assertEqual(\n str(aligner),\n \"\"\"\\\nPairwise sequence aligner with parameters\n wildcard: None\n match_score: 1.000000\n mismatch_score: -1.000000\n target_gap_function: %s\n query_gap_function: %s\n mode: global\n\"\"\"\n % (nogaps, specificgaps),\n )\n self.assertEqual(\n aligner.algorithm, \"Waterman-Smith-Beyer global alignment algorithm\"\n )\n score = aligner.score(seq1, seq2)\n self.assertAlmostEqual(score, -10)\n alignments = aligner.align(seq1, seq2)\n self.assertEqual(len(alignments), 2)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, -10)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nAAAABBBAAAACCCCCCCCCCCCCCAAAABBBAAAA\n--|||----------......|||||||||||||--\n--AAB----------BBAAAACCCCAAAABBBAA--\n\"\"\",\n )\n self.assertEqual(alignment.aligned, (((2, 5), (15, 34)), ((0, 3), (3, 22))))\n alignment = alignments[1]\n self.assertAlmostEqual(alignment.score, -10)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nAAAABBBAAAACCCCCCCCCCCCCCAAAABBBAAAA\n||.------------......|||||||||||||--\nAAB------------BBAAAACCCCAAAABBBAA--\n\"\"\",\n )\n self.assertEqual(alignment.aligned, (((0, 3), (15, 34)), ((0, 3), (3, 22))))\n\n def test_gap_here_only_3(self):\n # Check if gap open and gap extend penalties are handled correctly.\n seq1 = \"TTCCAA\"\n seq2 = \"TTGGAA\"\n\n def gap_score(i, n):\n if i == 3:\n return -10\n if n == 1:\n return -1\n return -10\n\n aligner = Align.PairwiseAligner()\n aligner.mode = \"global\"\n aligner.match_score = 1\n aligner.mismatch_score = -10\n aligner.target_gap_score = gap_score\n self.assertEqual(\n aligner.algorithm, \"Waterman-Smith-Beyer global alignment algorithm\"\n )\n self.assertEqual(\n str(aligner),\n \"\"\"\\\nPairwise sequence aligner with parameters\n wildcard: None\n match_score: 1.000000\n mismatch_score: -10.000000\n target_gap_function: %s\n query_internal_open_gap_score: 0.000000\n query_internal_extend_gap_score: 0.000000\n query_left_open_gap_score: 0.000000\n query_left_extend_gap_score: 0.000000\n query_right_open_gap_score: 0.000000\n query_right_extend_gap_score: 0.000000\n mode: global\n\"\"\"\n % gap_score,\n )\n score = aligner.score(seq1, seq2)\n self.assertAlmostEqual(score, 2.0)\n alignments = aligner.align(seq1, seq2)\n self.assertEqual(len(alignments), 1)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, 2.0)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nTT-CC-AA\n||----||\nTTG--GAA\n\"\"\",\n )\n self.assertEqual(alignment.aligned, (((0, 2), (4, 6)), ((0, 2), (4, 6))))\n aligner.query_gap_score = gap_score\n self.assertEqual(\n str(aligner),\n \"\"\"\\\nPairwise sequence aligner with parameters\n wildcard: None\n match_score: 1.000000\n mismatch_score: -10.000000\n target_gap_function: %s\n query_gap_function: %s\n mode: global\n\"\"\"\n % (gap_score, gap_score),\n )\n score = aligner.score(seq1, seq2)\n self.assertAlmostEqual(score, -8.0)\n alignments = aligner.align(seq1, seq2)\n self.assertEqual(len(alignments), 4)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, -8.0)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nTT-CCAA\n||-.-||\nTTGG-AA\n\"\"\",\n )\n self.assertEqual(\n alignment.aligned, (((0, 2), (2, 3), (4, 6)), ((0, 2), (3, 4), (4, 6)))\n )\n alignment = alignments[1]\n self.assertAlmostEqual(alignment.score, -8.0)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nTTC--CAA\n||----||\nTT-GG-AA\n\"\"\",\n )\n self.assertEqual(alignment.aligned, (((0, 2), (4, 6)), ((0, 2), (4, 6))))\n alignment = alignments[2]\n self.assertAlmostEqual(alignment.score, -8.0)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nTTCC-AA\n||-.-||\nTT-GGAA\n\"\"\",\n )\n self.assertEqual(\n alignment.aligned, (((0, 2), (3, 4), (4, 6)), ((0, 2), (2, 3), (4, 6)))\n )\n alignment = alignments[3]\n self.assertAlmostEqual(alignment.score, -8.0)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nTT-CC-AA\n||----||\nTTG--GAA\n\"\"\",\n )\n self.assertEqual(alignment.aligned, (((0, 2), (4, 6)), ((0, 2), (4, 6))))\n\n def test_gap_here_only_local_1(self):\n seq1 = \"AAAABBBAAAACCCCCCCCCCCCCCAAAABBBAAAA\"\n seq2 = \"AABBBAAAACCCCAAAABBBAA\"\n breaks = [0, 11, len(seq2)]\n # Very expensive to open a gap in seq1:\n nogaps = lambda x, y: -2000 - y # noqa: E731\n # Very expensive to open a gap in seq2 unless it is in one of the allowed positions\n specificgaps = (\n lambda x, y: (-2 - y) if x in breaks else (-2000 - y)\n ) # noqa: E731\n aligner = Align.PairwiseAligner()\n aligner.mode = \"local\"\n aligner.match_score = 1\n aligner.mismatch_score = -1\n aligner.target_gap_score = nogaps\n aligner.query_gap_score = specificgaps\n self.assertEqual(\n aligner.algorithm, \"Waterman-Smith-Beyer local alignment algorithm\"\n )\n self.assertEqual(\n str(aligner),\n \"\"\"\\\nPairwise sequence aligner with parameters\n wildcard: None\n match_score: 1.000000\n mismatch_score: -1.000000\n target_gap_function: %s\n query_gap_function: %s\n mode: local\n\"\"\"\n % (nogaps, specificgaps),\n )\n score = aligner.score(seq1, seq2)\n self.assertAlmostEqual(score, 13)\n alignments = aligner.align(seq1, seq2)\n self.assertEqual(len(alignments), 2)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, 13)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nAAAABBBAAAACCCCCCCCCCCCCCAAAABBBAAAA\n ||||||||||||| \n AABBBAAAACCCCAAAABBBAA \n\"\"\", # noqa: W291\n )\n self.assertEqual(alignment.aligned, (((2, 15),), ((0, 13),)))\n alignment = alignments[1]\n self.assertAlmostEqual(alignment.score, 13)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nAAAABBBAAAACCCCCCCCCCCCCCAAAABBBAAAA\n ||||||||||||| \n AABBBAAAACCCCAAAABBBAA \n\"\"\", # noqa: W291\n )\n self.assertEqual(alignment.aligned, (((21, 34),), ((9, 22),)))\n\n def test_gap_here_only_local_2(self):\n # Force a bad alignment.\n #\n # Forces a bad alignment by having a very expensive gap penalty\n # where one would normally expect a gap, and a cheap gap penalty\n # in another place.\n seq1 = \"AAAABBBAAAACCCCCCCCCCCCCCAAAABBBAAAA\"\n seq2 = \"AABBBAAAACCCCAAAABBBAA\"\n breaks = [0, 3, len(seq2)]\n # Very expensive to open a gap in seq1:\n nogaps = lambda x, y: -2000 - y # noqa: E731\n # Very expensive to open a gap in seq2 unless it is in one of the allowed positions:\n specificgaps = (\n lambda x, y: (-2 - y) if x in breaks else (-2000 - y)\n ) # noqa: E731\n aligner = Align.PairwiseAligner()\n aligner.mode = \"local\"\n aligner.match_score = 1\n aligner.mismatch_score = -1\n aligner.target_gap_score = nogaps\n aligner.query_gap_score = specificgaps\n self.assertEqual(\n str(aligner),\n \"\"\"\\\nPairwise sequence aligner with parameters\n wildcard: None\n match_score: 1.000000\n mismatch_score: -1.000000\n target_gap_function: %s\n query_gap_function: %s\n mode: local\n\"\"\"\n % (nogaps, specificgaps),\n )\n self.assertEqual(\n aligner.algorithm, \"Waterman-Smith-Beyer local alignment algorithm\"\n )\n score = aligner.score(seq1, seq2)\n self.assertAlmostEqual(score, 13)\n alignments = aligner.align(seq1, seq2)\n self.assertEqual(len(alignments), 2)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, 13)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nAAAABBBAAAACCCCCCCCCCCCCCAAAABBBAAAA\n ||||||||||||| \n AABBBAAAACCCCAAAABBBAA \n\"\"\", # noqa: W291\n )\n self.assertEqual(alignment.aligned, (((2, 15),), ((0, 13),)))\n alignment = alignments[1]\n self.assertAlmostEqual(alignment.score, 13)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nAAAABBBAAAACCCCCCCCCCCCCCAAAABBBAAAA\n ||||||||||||| \n AABBBAAAACCCCAAAABBBAA \n\"\"\", # noqa: W291\n )\n self.assertEqual(alignment.aligned, (((21, 34),), ((9, 22),)))\n\n def test_gap_here_only_local_3(self):\n # Check if gap open and gap extend penalties are handled correctly.\n seq1 = \"TTCCAA\"\n seq2 = \"TTGGAA\"\n\n def gap_score(i, n):\n if i == 3:\n return -10\n if n == 1:\n return -1\n return -10\n\n aligner = Align.PairwiseAligner()\n aligner.mode = \"local\"\n aligner.match_score = 1\n aligner.mismatch_score = -10\n aligner.target_gap_score = gap_score\n self.assertEqual(\n aligner.algorithm, \"Waterman-Smith-Beyer local alignment algorithm\"\n )\n self.assertEqual(\n str(aligner),\n \"\"\"\\\nPairwise sequence aligner with parameters\n wildcard: None\n match_score: 1.000000\n mismatch_score: -10.000000\n target_gap_function: %s\n query_internal_open_gap_score: 0.000000\n query_internal_extend_gap_score: 0.000000\n query_left_open_gap_score: 0.000000\n query_left_extend_gap_score: 0.000000\n query_right_open_gap_score: 0.000000\n query_right_extend_gap_score: 0.000000\n mode: local\n\"\"\"\n % gap_score,\n )\n score = aligner.score(seq1, seq2)\n self.assertAlmostEqual(score, 2.0)\n alignments = aligner.align(seq1, seq2)\n self.assertEqual(len(alignments), 2)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, 2.0)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nTTCCAA\n|| \nTTGGAA\n\"\"\", # noqa: W291\n )\n self.assertEqual(alignment.aligned, (((0, 2),), ((0, 2),)))\n alignment = alignments[1]\n self.assertAlmostEqual(alignment.score, 2.0)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nTTCCAA\n ||\nTTGGAA\n\"\"\",\n )\n self.assertEqual(alignment.aligned, (((4, 6),), ((4, 6),)))\n aligner.query_gap_score = gap_score\n self.assertEqual(\n str(aligner),\n \"\"\"\\\nPairwise sequence aligner with parameters\n wildcard: None\n match_score: 1.000000\n mismatch_score: -10.000000\n target_gap_function: %s\n query_gap_function: %s\n mode: local\n\"\"\"\n % (gap_score, gap_score),\n )\n alignments = aligner.align(seq1, seq2)\n score = aligner.score(seq1, seq2)\n self.assertAlmostEqual(score, 2.0)\n self.assertEqual(len(alignments), 2)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, 2.0)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nTTCCAA\n|| \nTTGGAA\n\"\"\", # noqa: W291\n )\n self.assertEqual(alignment.aligned, (((0, 2),), ((0, 2),)))\n alignment = alignments[1]\n self.assertAlmostEqual(alignment.score, 2.0)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nTTCCAA\n ||\nTTGGAA\n\"\"\",\n )\n self.assertEqual(alignment.aligned, (((4, 6),), ((4, 6),)))\n\n def test_broken_gap_function(self):\n # Check if an Exception is propagated if the gap function raises one\n seq1 = \"TTCCAA\"\n seq2 = \"TTGGAA\"\n\n def gap_score(i, n):\n raise RuntimeError(\"broken gap function\")\n\n aligner = Align.PairwiseAligner()\n aligner.target_gap_score = gap_score\n aligner.query_gap_score = -1\n aligner.mode = \"global\"\n with self.assertRaises(RuntimeError):\n aligner.score(seq1, seq2)\n with self.assertRaises(RuntimeError):\n alignments = aligner.align(seq1, seq2)\n alignments = list(alignments)\n aligner.mode = \"local\"\n with self.assertRaises(RuntimeError):\n aligner.score(seq1, seq2)\n with self.assertRaises(RuntimeError):\n alignments = aligner.align(seq1, seq2)\n alignments = list(alignments)\n aligner.target_gap_score = -1\n aligner.query_gap_score = gap_score\n aligner.mode = \"global\"\n with self.assertRaises(RuntimeError):\n aligner.score(seq1, seq2)\n with self.assertRaises(RuntimeError):\n alignments = aligner.align(seq1, seq2)\n alignments = list(alignments)\n aligner.mode = \"local\"\n with self.assertRaises(RuntimeError):\n aligner.score(seq1, seq2)\n with self.assertRaises(RuntimeError):\n alignments = aligner.align(seq1, seq2)\n alignments = list(alignments)\n\n\nclass TestSequencesAsLists(unittest.TestCase):\n \"\"\"Check aligning sequences provided as lists.\n\n This tests whether we can align sequences that are provided as lists\n consisting of three-letter codons or three-letter amino acids.\n \"\"\"\n\n def test_three_letter_amino_acids_global(self):\n seq1 = [\"Gly\", \"Ala\", \"Thr\"]\n seq2 = [\"Gly\", \"Ala\", \"Ala\", \"Cys\", \"Thr\"]\n aligner = Align.PairwiseAligner()\n aligner.mode = \"global\"\n # fmt: off\n aligner.alphabet = [\n \"Ala\", \"Arg\", \"Asn\", \"Asp\", \"Cys\", \"Gln\", \"Glu\", \"Gly\", \"His\", \"Ile\",\n \"Leu\", \"Lys\", \"Met\", \"Phe\", \"Pro\", \"Ser\", \"Thr\", \"Trp\", \"Tyr\", \"Val\",\n ]\n # fmt: on\n score = aligner.score(seq1, seq2)\n self.assertAlmostEqual(score, 3.0)\n alignments = aligner.align(seq1, seq2)\n self.assertEqual(len(alignments), 2)\n self.assertEqual(\n str(alignments[0]),\n \"\"\"\\\nGly Ala --- --- Thr\n||| ||| --- --- |||\nGly Ala Ala Cys Thr\n\"\"\",\n )\n self.assertEqual(\n str(alignments[1]),\n \"\"\"\\\nGly --- Ala --- Thr\n||| --- ||| --- |||\nGly Ala Ala Cys Thr\n\"\"\",\n )\n self.assertAlmostEqual(alignments[0].score, 3.0)\n self.assertAlmostEqual(alignments[1].score, 3.0)\n\n seq1 = [\"Pro\", \"Pro\", \"Gly\", \"Ala\", \"Thr\"]\n seq2 = [\"Gly\", \"Ala\", \"Ala\", \"Cys\", \"Thr\", \"Asn\", \"Asn\"]\n score = aligner.score(seq1, seq2)\n self.assertAlmostEqual(score, 3.0)\n alignments = aligner.align(seq1, seq2)\n self.assertEqual(len(alignments), 2)\n self.assertEqual(\n str(alignments[0]),\n \"\"\"\\\nPro Pro Gly Ala --- --- Thr --- ---\n--- --- ||| ||| --- --- ||| --- ---\n--- --- Gly Ala Ala Cys Thr Asn Asn\n\"\"\",\n )\n self.assertEqual(\n str(alignments[1]),\n \"\"\"\\\nPro Pro Gly --- Ala --- Thr --- ---\n--- --- ||| --- ||| --- ||| --- ---\n--- --- Gly Ala Ala Cys Thr Asn Asn\n\"\"\",\n )\n self.assertAlmostEqual(alignments[0].score, 3.0)\n self.assertAlmostEqual(alignments[1].score, 3.0)\n\n def test_three_letter_amino_acids_local(self):\n seq1 = [\"Asn\", \"Asn\", \"Gly\", \"Ala\", \"Thr\", \"Glu\", \"Glu\"]\n seq2 = [\"Pro\", \"Pro\", \"Gly\", \"Ala\", \"Ala\", \"Cys\", \"Thr\", \"Leu\"]\n aligner = Align.PairwiseAligner()\n aligner.mode = \"local\"\n # fmt: off\n aligner.alphabet = [\n \"Ala\", \"Arg\", \"Asn\", \"Asp\", \"Cys\", \"Gln\", \"Glu\", \"Gly\", \"His\", \"Ile\",\n \"Leu\", \"Lys\", \"Met\", \"Phe\", \"Pro\", \"Ser\", \"Thr\", \"Trp\", \"Tyr\", \"Val\",\n ]\n # fmt: on\n score = aligner.score(seq1, seq2)\n self.assertAlmostEqual(score, 3.0)\n alignments = aligner.align(seq1, seq2)\n self.assertEqual(len(alignments), 2)\n self.assertEqual(\n str(alignments[0]),\n \"\"\"\\\nGly Ala --- --- Thr\n||| ||| --- --- |||\nGly Ala Ala Cys Thr\n\"\"\",\n )\n self.assertEqual(\n str(alignments[1]),\n \"\"\"\\\nGly --- Ala --- Thr\n||| --- ||| --- |||\nGly Ala Ala Cys Thr\n\"\"\",\n )\n self.assertAlmostEqual(alignments[0].score, 3.0)\n self.assertAlmostEqual(alignments[1].score, 3.0)\n\n\nclass TestArgumentErrors(unittest.TestCase):\n def test_aligner_string_errors(self):\n aligner = Align.PairwiseAligner()\n message = \"^argument should support the sequence protocol$\"\n with self.assertRaisesRegex(TypeError, message):\n aligner.score(\"AAA\", 3)\n message = \"^sequence has zero length$\"\n with self.assertRaisesRegex(ValueError, message):\n aligner.score(\"AAA\", \"\")\n message = \"^sequence contains letters not in the alphabet$\"\n aligner.alphabet = \"ABCD\"\n with self.assertRaisesRegex(ValueError, message):\n aligner.score(\"AAA\", \"AAE\")\n\n def test_aligner_array_errors(self):\n aligner = Align.PairwiseAligner()\n s1 = \"GGG\"\n s2 = array.array(\"i\", [ord(\"G\"), ord(\"A\"), ord(\"G\")])\n score = aligner.score(s1, s2)\n self.assertAlmostEqual(score, 2.0)\n s2 = array.array(\"f\", [1.0, 0.0, 1.0])\n message = \"^sequence has incorrect data type 'f'$\"\n with self.assertRaisesRegex(ValueError, message):\n aligner.score(s1, s2)\n aligner.wildcard = chr(99)\n s1 = array.array(\"i\", [1, 5, 6])\n s2 = array.array(\"i\", [1, 8, 6])\n s2a = array.array(\"i\", [1, 8, 99])\n s2b = array.array(\"i\", [1, 28, 6])\n aligner.match = 3.0\n aligner.mismatch = -2.0\n aligner.gap_score = -10.0\n score = aligner.score(s1, s2)\n self.assertAlmostEqual(score, 4.0)\n # the following two are valid as we are using match/mismatch scores\n # instead of a substitution matrix:\n score = aligner.score(s1, s2a)\n # since we set the wildcard character to chr(99), the number 99\n # is interpreted as an unknown character, and gets a zero score:\n self.assertAlmostEqual(score, 1.0)\n score = aligner.score(s1, s2b)\n self.assertAlmostEqual(score, 4.0)\n try:\n import numpy\n except ImportError:\n return\n aligner = Align.PairwiseAligner()\n aligner.wildcard = chr(99)\n s1 = \"GGG\"\n s2 = numpy.array([ord(\"G\"), ord(\"A\"), ord(\"G\")], numpy.int32)\n score = aligner.score(s1, s2)\n self.assertAlmostEqual(score, 2.0)\n s2 = numpy.array([1.0, 0.0, 1.0])\n message = \"^sequence has incorrect data type 'd'$\"\n with self.assertRaisesRegex(ValueError, message):\n aligner.score(s1, s2)\n s2 = numpy.zeros((3, 2), numpy.int32)\n message = \"^sequence has incorrect rank \\\\(2 expected 1\\\\)$\"\n with self.assertRaisesRegex(ValueError, message):\n aligner.score(s1, s2)\n s1 = numpy.array([1, 5, 6], numpy.int32)\n s2 = numpy.array([1, 8, 6], numpy.int32)\n s2a = numpy.array([1, 8, 99], numpy.int32)\n s2b = numpy.array([1, 28, 6], numpy.int32)\n s2c = numpy.array([1, 8, -6], numpy.int32)\n aligner.match = 3.0\n aligner.mismatch = -2.0\n aligner.gap_score = -10.0\n score = aligner.score(s1, s2)\n self.assertAlmostEqual(score, 4.0)\n # the following two are valid as we are using match/mismatch scores\n # instead of a substitution matrix:\n score = aligner.score(s1, s2a)\n self.assertAlmostEqual(score, 1.0)\n score = aligner.score(s1, s2b)\n self.assertAlmostEqual(score, 4.0)\n # when using a substitution matrix, all indices should be between 0\n # and the size of the substitution matrix:\n m = 5 * numpy.eye(10)\n aligner.substitution_matrix = m\n score = aligner.score(s1, s2) # no ValueError\n self.assertAlmostEqual(score, 10.0)\n message = \"^sequence item 2 is negative \\\\(-6\\\\)$\"\n with self.assertRaisesRegex(ValueError, message):\n aligner.score(s1, s2c)\n message = \"^sequence item 1 is out of bound \\\\(28, should be < 10\\\\)$\"\n with self.assertRaisesRegex(ValueError, message):\n aligner.score(s1, s2b)\n # note that the wildcard character is ignored when using a substitution\n # matrix, so 99 is interpreted as an index here:\n message = \"^sequence item 2 is out of bound \\\\(99, should be < 10\\\\)$\"\n with self.assertRaisesRegex(ValueError, message):\n aligner.score(s1, s2a)\n\n\nclass TestOverflowError(unittest.TestCase):\n def test_align_overflow_error(self):\n aligner = Align.PairwiseAligner()\n path = os.path.join(\"Align\", \"bsubtilis.fa\")\n record = SeqIO.read(path, \"fasta\")\n seq1 = str(record.seq)\n path = os.path.join(\"Align\", \"ecoli.fa\")\n record = SeqIO.read(path, \"fasta\")\n seq2 = str(record.seq)\n alignments = aligner.align(seq1, seq2)\n self.assertAlmostEqual(alignments.score, 1286.0)\n message = \"^number of optimal alignments is larger than (%d|%d)$\" % (\n 2147483647, # on 32-bit systems\n 9223372036854775807,\n ) # on 64-bit systems\n with self.assertRaisesRegex(OverflowError, message):\n n = len(alignments)\n # confirm that we can still pull out individual alignments\n alignment = alignments[0]\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nATTTA-TC-GGA-GAGTTTGATCC-TGGCTCAGGAC--GAACGCTGGCGGC-GTGCCTAAT-ACATGCAAGTCGAG-CGG-A-CAG-AT-GGGA-GCTTGCT-C----CCTGAT-GTTAGC-GGCGGACGGGTGAGTAACAC-GT--GGGTAA-CCTGCCTGTAA-G-ACTGGG--ATAACT-CC-GGGAAACCGG--GGCTAATACCGG-ATGGTTGTTTGAACCGCAT-GGTTCAA-AC-ATAA-AAGGTGG--C-TTCGG-C-TACCACTTA-C-A--G-ATG-GACCC-GC--GGCGCATTAGCTAGTT-GGTGAGG-TAACGGCTCACC-AAGGCGACGATGCG--TAGCC-GA--CCTGAGAGGG-TGATC--GGCCACACTGGGA-CTGAGACACGG-CCCAGACTCCTACGGGAGGCAGCAGTAGGG-AATC-TTCCGCA-A-TGGA-CG-AAAGTC-TGAC-GG-AGCAAC--GCCGCGTG-AGTGAT-GAAGG--TTTTCGGA-TC-GTAAAGCT-CTGTTGTT-AG-GG--A--A-G--A--ACAAGTGCCGTTCGAATAGGGC----GG-TACC-TTGACGGT-ACCTAAC-CAGAA-A-GCCAC-GGCTAACTAC-GTGCCAGCAGCCGCGGTAATACGT-AGG-TGGCAAGCGTTG--TCCGGAATTA-TTGGGCGTAAAG-GGCT-CGCAGGCGGTTTC-TTAAGTCT-GATGTGAAAG-CCCCCGG-CTCAACC-GGGGAGGG--T-CAT-TGGA-AACTGGGG-AA-CTTGAGTGCA--G-AAGAGGAGAGTGG-A-A-TTCCACG-TGTAGCGGTGAAATGCGTAGAGATG-TGGAGGAAC-ACCAG-TGGCGAAGGCGA-CTCTC--TGGT-CTGTAA--CTGACGCTG-AGGA-GCGAAAGCGTGGGGAGCGAA-CAGGATTAGATACCCTGGTAGTCCACGCCGTAAACGATGAGT-G-CTAAGTGTT-AGGGGGTT-TCCGCCCCTT-AGTGC-TG-C------AGCTAACGCA-TTAAG-C-ACTCCGCCTGGGGAGTACGGTC-GCAAGACTG--AAA-CTCAAA-GGAATTGACGGGGGCCCGCACAAGCGGTGGAGCATGTGGTTTAATTCGAA-GCAACGCGAAGAACCTTACCA-GGTCTTGACATCCTCTGACA-A--T--CCTAGAGATAGGAC--G-T-CCCCTTCGGGGGCAGA--GTGA--CAGGTGG-TGCATGG-TTGTCGTCAGCTCGTGTC-GTGAGA-TGTTGGGTTAAGTCCCGCAACGAGCGCAACCCTTGATCTTA--GTTGCCAGCA--TTCA-GTTG--GGC-A-CTCTAA-GGT-GACTGCC-GGTGAC-AAACC-GGAGGAAGGTGGGGATGACGTCAAA-TCATCATG-CCCCTTAT-GACCT-GGGCTACACACGTGCTACAATGGACAG-A-ACAAAG-GGCA-GCGAAACC--GCGAG-GTT-AAGCC--AATCC-CAC-AAA-T-CTGTTC-TCAGTTC-GGATC-GC-AGTCTGCAACTCGACTGCG--TGAAGCT-GGAATCGCTAGTAATCGC-GGATCAGCA-TGCCG-CGGTGAATACGTTCCCGGGCCTTGTACACACCGCCCGTCACACCAC-GAG-AGT---TTGT-AACACCC-GAAGTC-GGTGAGG-T-AACCTTTTA-GG-AG--C-C--AGCCG-CC---GAAGGTGGGA--CAGATGA-TTGGGGTGAAGTCGTAACAAGGTAG-CCGTATCGGAAGG----TGCGGCT-GGATCACCTCCTTTCTA\n|---|-|--|-|-||||||||||--||||||||-|---|||||||||||||-|-||||||--|||||||||||||--|||-|-|||-|--|--|-|||||||-|----|-|||--|--||--|||||||||||||||||----||--|||-||-|-||||||-|--|-|--|||--||||||-|--||-||||-||--|-|||||||||--||---------|||-|--|-|---|||-||-|-||-|-||-||--|-|||||-|-|-|---||--|-|--|-|||-|-|||-|---||-|-||||||||||--||||-||-||||||||||||-|-|||||||||-|---||||--|---|-|||||||--|||-|--|-|||||||||-|-|||||||||||-||-|||||||||||||||||||||||-|||-|||--||--|||-|-|||--||-||-|-|-|||--|--|||--|--||||||||-|-|||--|||||--||--|||--|--||||||-|-||-||----||-||--|--|-|--|--|-||||----|---||||---|----|--|-|--||||||-|-|||---|-|||||-|-||-||-||||||||-|-|||||||||||||||||||||||--|||-||-||||||||---||-|||||||-|-||||||||||-|-|--||||||||||||--|||||||--|||||||||--||||-||-|||||||-|||-|-----|-|||-||-|-|-||||---||-|||||||-|---|-|-||||-|-|-||-|-|-|||||-|-||||||||||||||||||||||||--||||||||--|||-|-|||||||||||--|-|-|--|||--|-|-||--||||||||--|||--|||||||||||||||||-||-|||||||||||||||||||||||||||||||||||||||--|-|-||---||---|||---||-|--||||-||-||-||-||-|------|||||||||--|||||-|-||-|-|||||||||||||||-|-|||||---|--|||-||||||-|-|||||||||||||||||||||||||||||||||||||||||||||||--||||||||||||||||||||--|||||||||||||----|||-|--|--||-||||||-|||---|-|-||--||||||---|-|--||||--||||||--|||||||-|-|||||||||||||||--||||-|-||||||||||||||||||||||||||||||||||-|||||---|||||||||---|-|--|--|--||--|-|||-||-||--|||||||-|-|||--||||--||||||||||||||||||||||||--||||||||-|||-|||--||||--|||||||||||||||||||||||-|-|-|-||||||-|--|-||||--||--|||||-|---||||---|--||-||--|||-|-|-||-|-|-|||-|-||||--|--||||||||||||||||-|---|||||-|-|||||||||||||||||--|||||||-|-||||--|||||||||||||||||||||||||||||||||||||||||||||--|-|-|||---|||--||-|----|||||--|||-||--|-||||||----||-||--|-|--|-||--|----|----||--|--||--|||-|-||||||||||||||||||||||--|||||--||--||----|||||-|-|||||||||||||---|\nA---AAT-TG-AAGAGTTTGATC-ATGGCTCAG-A-TTGAACGCTGGCGGCAG-GCCTAA-CACATGCAAGTCGA-ACGGTAACAGGA-AG--AAGCTTGCTTCTTTGC-TGA-CG--AG-TGGCGGACGGGTGAGTAA---TGTCTGGG-AAAC-TGCCTG-A-TGGA--GGGGGATAACTAC-TGG-AAAC-GGTAG-CTAATACCG-CAT---------AAC-G--TCG---CAAGACCA-AAGA-GG-GGGACCTTCGGGCCT-C---TT-GCCATCGGATGTG-CCCAG-ATGG-G-ATTAGCTAGT-AGGTG-GGGTAACGGCTCACCTA-GGCGACGAT-C-CCTAGC-TG-GTC-TGAGAGG-ATGA-CCAG-CCACACTGG-AACTGAGACACGGTCC-AGACTCCTACGGGAGGCAGCAGT-GGGGAAT-ATT--GCACAATGG-GCGCAA-G-CCTGA-TG-CAGC--CATGCCGCGTGTA-TGA-AGAAGGCCTT--CGG-GT-TGTAAAG-TACT-TT---CAGCGGGGAGGAAGGGAGTA-AAGT----T---AATA---CCTTTG-CT-C-ATTGACG-TTACC---CGCAGAAGAAGC-ACCGGCTAACT-CCGTGCCAGCAGCCGCGGTAATACG-GAGGGTG-CAAGCGTT-AATC-GGAATTACT-GGGCGTAAAGCG-C-ACGCAGGCGGTTT-GTTAAGTC-AGATGTGAAA-TCCCC-GGGCTCAACCTGGG-A---ACTGCATCTG-ATA-CTGG--CAAGCTTGAGT-C-TCGTA-GAGG-G-G-GGTAGAATTCCA-GGTGTAGCGGTGAAATGCGTAGAGAT-CTGGAGGAA-TACC-GGTGGCGAAGGCG-GC-C-CCCTGG-AC-G-AAGACTGACGCT-CAGG-TGCGAAAGCGTGGGGAGC-AAACAGGATTAGATACCCTGGTAGTCCACGCCGTAAACGATG--TCGACT---TG--GAGG---TTGT--GCCC-TTGAG-GCGTGGCTTCCGGAGCTAACGC-GTTAAGTCGAC-C-GCCTGGGGAGTACGG-CCGCAAG---GTTAAAACTCAAATG-AATTGACGGGGGCCCGCACAAGCGGTGGAGCATGTGGTTTAATTCGA-TGCAACGCGAAGAACCTTACC-TGGTCTTGACATCC----ACAGAACTTTCC-AGAGAT-GGA-TTGGTGCC--TTCGGG---A-ACTGTGAGACAGGTG-CTGCATGGCT-GTCGTCAGCTCGTGT-TGTGA-AATGTTGGGTTAAGTCCCGCAACGAGCGCAACCCTT-ATCTT-TTGTTGCCAGC-GGT-C-CG--GCCGG-GAACTC-AAAGG-AGACTGCCAG-TGA-TAAAC-TGGAGGAAGGTGGGGATGACGTCAA-GTCATCATGGCCC-TTA-CGACC-AGGGCTACACACGTGCTACAATGG-C-GCATACAAAGAG--AAGCGA--CCTCGCGAGAG--CAAGC-GGA--CCTCA-TAAAGTGC-GT-CGT-AGT-CCGGAT-TG-GAGTCTGCAACTCGACT-C-CATGAAG-TCGGAATCGCTAGTAATCG-TGGATCAG-AATGCC-ACGGTGAATACGTTCCCGGGCCTTGTACACACCGCCCGTCACACCA-TG-GGAGTGGGTTG-CAA-A---AGAAGT-AGGT-AG-CTTAACCTT---CGGGAGGGCGCTTA-CC-AC-TTTG----TG--ATTCA--TGACT-GGGGTGAAGTCGTAACAAGGTA-ACCGTA--GG--GGAACCTGCGG-TTGGATCACCTCCTT---A\n\"\"\",\n )\n self.assertAlmostEqual(alignment.score, 1286.0)\n\n\nclass TestKeywordArgumentsConstructor(unittest.TestCase):\n def test_confusing_arguments(self):\n aligner = Align.PairwiseAligner(\n mode=\"local\",\n open_gap_score=-0.3,\n extend_gap_score=-0.1,\n target_open_gap_score=-0.2,\n )\n self.assertEqual(\n str(aligner),\n \"\"\"\\\nPairwise sequence aligner with parameters\n wildcard: None\n match_score: 1.000000\n mismatch_score: 0.000000\n target_internal_open_gap_score: -0.200000\n target_internal_extend_gap_score: -0.100000\n target_left_open_gap_score: -0.200000\n target_left_extend_gap_score: -0.100000\n target_right_open_gap_score: -0.200000\n target_right_extend_gap_score: -0.100000\n query_internal_open_gap_score: -0.300000\n query_internal_extend_gap_score: -0.100000\n query_left_open_gap_score: -0.300000\n query_left_extend_gap_score: -0.100000\n query_right_open_gap_score: -0.300000\n query_right_extend_gap_score: -0.100000\n mode: local\n\"\"\",\n )\n\n\nclass TestUnicodeStrings(unittest.TestCase):\n def test_needlemanwunsch_simple1(self):\n seq1 = \"ĞĀĀČŦ\"\n seq2 = \"ĞĀŦ\"\n aligner = Align.PairwiseAligner()\n aligner.mode = \"global\"\n aligner.alphabet = None\n self.assertEqual(aligner.algorithm, \"Needleman-Wunsch\")\n score = aligner.score(seq1, seq2)\n self.assertAlmostEqual(score, 3.0)\n alignments = aligner.align(seq1, seq2)\n self.assertEqual(len(alignments), 2)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, 3.0)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nĞĀĀČŦ\n||--|\nĞĀ--Ŧ\n\"\"\",\n )\n self.assertEqual(alignment.aligned, (((0, 2), (4, 5)), ((0, 2), (2, 3))))\n alignment = alignments[1]\n self.assertAlmostEqual(alignment.score, 3.0)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\nĞĀĀČŦ\n|-|-|\nĞ-Ā-Ŧ\n\"\"\",\n )\n self.assertEqual(\n alignment.aligned, (((0, 1), (2, 3), (4, 5)), ((0, 1), (1, 2), (2, 3)))\n )\n\n def test_align_affine1_score(self):\n aligner = Align.PairwiseAligner()\n aligner.mode = \"global\"\n aligner.alphabet = None\n aligner.match_score = 0\n aligner.mismatch_score = -1\n aligner.open_gap_score = -5\n aligner.extend_gap_score = -1\n self.assertEqual(aligner.algorithm, \"Gotoh global alignment algorithm\")\n score = aligner.score(\"いい\", \"あいいう\")\n self.assertAlmostEqual(score, -7.0)\n\n def test_smithwaterman(self):\n aligner = Align.PairwiseAligner()\n aligner.mode = \"local\"\n aligner.alphabet = None\n aligner.gap_score = -0.1\n self.assertEqual(aligner.algorithm, \"Smith-Waterman\")\n score = aligner.score(\"ℵℷℶℷ\", \"ℸℵℶℸ\")\n self.assertAlmostEqual(score, 1.9)\n alignments = aligner.align(\"ℵℷℶℷ\", \"ℸℵℶℸ\")\n self.assertEqual(len(alignments), 1)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, 1.9)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\n ℵℷℶℷ\n |-| \nℸℵ-ℶℸ\n\"\"\", # noqa: W291\n )\n self.assertEqual(alignment.aligned, (((0, 1), (2, 3)), ((1, 2), (2, 3))))\n\n def test_gotoh_local(self):\n aligner = Align.PairwiseAligner()\n aligner.alphabet = None\n aligner.mode = \"local\"\n aligner.open_gap_score = -0.1\n aligner.extend_gap_score = 0.0\n self.assertEqual(aligner.algorithm, \"Gotoh local alignment algorithm\")\n score = aligner.score(\"生物科物\", \"学生科学\")\n self.assertAlmostEqual(score, 1.9)\n alignments = aligner.align(\"生物科物\", \"学生科学\")\n self.assertEqual(len(alignments), 1)\n alignment = alignments[0]\n self.assertAlmostEqual(alignment.score, 1.9)\n self.assertEqual(\n str(alignment),\n \"\"\"\\\n 生物科物\n |-| \n学生-科学\n\"\"\", # noqa: W291\n )\n self.assertEqual(alignment.aligned, (((0, 1), (2, 3)), ((1, 2), (2, 3))))\n\n\nif __name__ == \"__main__\":\n runner = unittest.TextTestRunner(verbosity=2)\n unittest.main(testRunner=runner)\n"
] | [
[
"numpy.eye",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
guanjue/imputed_cistrome_2022 | [
"223efd1e76ce547b480c13f36d0df913ee1f0e5f"
] | [
"merge_train_test.qta.py"
] | [
"#!/usr/bin/env python3\nimport gc\nimport os\nimport pickle\n\nimport fire\nimport h5py\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom hyperopt.fmin import generate_trials_to_calculate\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import precision_recall_curve\nfrom numpy import linalg as LA\nimport sklearn.metrics as metrics\nimport json\nimport lightgbm as lgb\nimport pandas as pd\nimport numpy as np\n\nfrom sklearn.metrics import average_precision_score\nfrom sklearn.preprocessing import QuantileTransformer\n\n\n#python3 scripts/merge_train_test.py merge_ave \\\n#--input_h5file='hdf5s/DNase/DNASE_bam_5_mer_variable_bp_all_samples_lightGBM_chr19_all_cell_types.SQTKNNN.h5' \\\n#--output_h5file='hdf5s/DNase/DNASE_bam_5_mer_variable_bp_all_samples_lightGBM_chr19_all_cell_types.SQTKNNN.ave.h5' \\\n#--chrom='chr19' \\\n#--train_id_file='HepG2_Epithelium_Liver.train.id.list.txt'\n#--test_id_file='MCF-7_Epithelium_Breast.test.id.list.txt'\n\n\ndef merge_ave(input_h5file, output_h5file, chrom, train_id_file, test_id_file):\n\n\twith h5py.File(input_h5file, \"r\") as infile:\n\t\tscore100 = infile[chrom][...]\n\t\t#samples = np.array(infile['samples'][...]).astype(np.int64)\n\t\tsamples = np.array(infile['samples'][...]).astype(str)\n\t\tprint(samples)\n\t\tfeatures = infile['feature_names'][...]\n\t\tstarts = infile['%s_starts' % chrom][...]\n\n\tinfile.close()\n\n\tprint(samples)\n\n\t#qt = QuantileTransformer(n_quantiles=1000, random_state=6, output_distribution='uniform', ignore_implicit_zeros=False, subsample=100000, copy=False)\n\t#all_score = score100[0,:,:]\n\t#for i in range(1,score100.shape[0]):\n\t#\tall_score = np.concatenate((all_score, score100[i,:,:]), axis=1)\n\t#qt.fit(all_score)\n\t#new_data = qt.fit_transform(all_score)\n\n\t\t\n\n\t#print(all_score.shape)\n\t#score100_new = score100\n\t#qt = QuantileTransformer(n_quantiles=1000, random_state=6, output_distribution='uniform', ignore_implicit_zeros=False, subsample=100000, copy=False)\n\t#qt.fit(score100[0,:,:])\n\t#for i in range(0,score100.shape[0]):\n\t#\tprint(i)\n\t#\tscore100_i = score100[i,:,:]\n\t#\tprint(score100_i.shape)\n\t#\tqt = QuantileTransformer(n_quantiles=1000, random_state=6, output_distribution='uniform', ignore_implicit_zeros=False, subsample=100000, copy=False)\n\t#\tqt.fit(score100_i)\n\t#\tscore100_i_new = qt.fit_transform(score100_i)\n\t#\tscore100_new[i,:,:] = score100_i_new\n\t#\t#score100_new[i,:,:] = all_score[:,0:48]\n\t#\toutput_name_i = str(i)+'check.txt'\n\t#\tnp.savetxt(output_name_i, score100_i_new[:,11], fmt='%10.2f', delimiter='\\t')\n\n\t#score100 = score100_new\n\n\t### read samples & train & test ids \n\ttrain_id = np.array(pd.read_csv(train_id_file, sep=\"\\t\", header=None))\n\ttest_id = np.array(pd.read_csv(test_id_file, sep=\"\\t\", header=None))\n\n\t### get where are the train & test ids \n\ttrain_id_pos = np.intersect1d(samples, train_id, return_indices=True)[1]\n\ttest_id_pos = np.intersect1d(samples, test_id, return_indices=True)[1]\n\n\t### get average of train & test\n\tscore100_train = np.mean(score100[0:4,:,:], axis=0)#.reshape(1,score100.shape[1],score100.shape[2])\n\tscore100_test = np.mean(score100[4:10,:,:], axis=0)#.reshape(1,score100.shape[1],score100.shape[2])\n\tprint('score100_train.shape')\n\tprint(score100_train.shape)\n\n\tqt = QuantileTransformer(n_quantiles=1000, random_state=6, output_distribution='uniform', ignore_implicit_zeros=False, subsample=100000, copy=False)\n\tqt.fit(score100_train)\n\tscore100_train_QT = qt.fit_transform(score100_train).reshape(1,score100.shape[1],score100.shape[2])\n\tprint('score100_train_QT.shape')\n\tprint(score100_train_QT.shape)\n\n\t#print('check 0 number')\n\t#print(score100_train.shape)\n\t#print(np.sum(score100_train[:,11]==0))\n\t#print(np.sum(score100_test[:,11]==0))\n\n\tqt = QuantileTransformer(n_quantiles=1000, random_state=6, output_distribution='uniform', ignore_implicit_zeros=False, subsample=100000, copy=False)\n\tqt.fit(score100_test)\n\tscore100_test_QT = qt.fit_transform(score100_test).reshape(1,score100.shape[1],score100.shape[2])\n\n\tscore100_train_test_ave = np.concatenate((score100_train_QT, score100_test_QT), axis=0)\n\n\n\t#print('check 0 number after QT')\n\t#print(score100_train_QT.shape)\n\t#print(np.sum(score100_train_QT[0,:,11]==0))\n\t#print(np.sum(score100_test_QT[0,:,11]==0))\n\n\n\tsamples_new = np.array(['train', 'test']).astype('bytes_')\n\n\t### write output\n\twith h5py.File(output_h5file, \"w\") as outfile:\n\t\toutfile.create_dataset(chrom, data=np.array(score100_train_test_ave), compression='gzip', shuffle=True, fletcher32=True, compression_opts=9)\n\t\toutfile.create_dataset('%s_starts' % chrom, data=starts, compression='gzip', shuffle=True, fletcher32=True, compression_opts=9)\n\t\toutfile.create_dataset('feature_names', data=features, compression='gzip', shuffle=True, fletcher32=True, compression_opts=9)\n\t\toutfile.create_dataset('samples', data=samples_new, compression='gzip', shuffle=True, fletcher32=True, compression_opts=9)\n\n\toutfile.close()\n\n\n\nif __name__ == '__main__':\n\tfire.Fire(merge_ave)\n\n\n"
] | [
[
"pandas.read_csv",
"sklearn.preprocessing.QuantileTransformer",
"numpy.concatenate",
"numpy.intersect1d",
"numpy.mean",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
MeisQuietude/Simple-Neural-Network | [
"66ca182837f38af9acf636a44809517e99fdc57b"
] | [
"app/data/__init__.py"
] | [
"from numpy import array\n\nfrom ._parse import raw_data_in, raw_data_out, raw_data_test\n\n# Append 1 as additional parameter to avoid extra multiplication by 0\nraw_data_in = [set_ + [1] for set_ in raw_data_in]\nraw_data_test = [set_ + [1] for set_ in raw_data_test]\n\n# Create ndarray of each data set for export it\ndata_in = array(raw_data_in)\ndata_out = array([raw_data_out]).T\ndata_test = array(raw_data_test)\n\nprint(f\"Train cases: {len(data_in)}\")\nif len(data_test):\n print(f\"Test cases: {len(data_test)}\")\nprint()\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ElsevierSoftwareX/SOFTX-D-21-00117 | [
"8ce9f9b5c1f1bb07733f248763759b05454690c8"
] | [
"Examples/12_tacticities/polycarboxybetaine/scripts/polymerization.py"
] | [
"from pysimm import lmps, system, forcefield\nfrom pysimm.apps.random_walk import random_walk, random_walk_tacticity, check_tacticity\n\nimport matplotlib.pyplot as mplp\nimport numpy \nimport os\n\n# ----------> Here starts the main body of polymerisation code <------------\ndef cap_with_methyls(input_sst, ff):\n '''\n An utility method that implements capping of free ends of polymer chains with methyl \n groups in all-atom forcefield representation\n '''\n # Let's cap the oligomer with the methyl (-CH3) group\n captypes = []\n for cpn in ['CG331', 'HGA3']:\n tmp = input_sst.particle_types.get(cpn)\n if tmp:\n cpt = tmp[0]\n else:\n cpt = ff.particle_types.get(cpn)[0].copy()\n input_sst.particle_types.add(cpt)\n captypes.append(cpt)\n\n for p in input_sst.particles:\n if p.linker is not None:\n if len(p.bonded_to) < 4:\n\n # assuming that the linker atom is sp3 hybridised C let's define the last non-occupied direction\n # of the tetrahedron\n dir = numpy.zeros(3)\n for p_ in p.bonded_to:\n dir += numpy.array([p.x, p.y, p.z]) - numpy.array([p_.x, p_.y, p_.z])\n\n dir = dir / numpy.linalg.norm(dir)\n cap_c = system.Particle(x=p.x + 1.53 * dir[0], y=p.y + 1.53 * dir[1], z=p.z + 1.53 * dir[2],\n type=captypes[0])\n input_sst.add_particle_bonded_to(cap_c, p, f=ff)\n\n dir_h = numpy.array([1.0, 1.0, 1.0])\n dir_h[0] = -(dir_h[1] * dir[1] + dir_h[2] * dir[2]) / dir[0]\n dir_h = dir_h / numpy.linalg.norm(dir_h)\n\n dir_h2 = numpy.array([1.0, 1.0, -1.0])\n dir_h2[1] = (dir[2] / dir[0] - dir_h[2] / dir_h[0]) / (dir[1] / dir[0] - dir_h[1] / dir_h[0])\n dir_h2[0] = dir[2] / dir[0] - dir[1] * dir_h2[1] / dir[0]\n dir_h2 = dir_h2 / numpy.linalg.norm(dir_h2)\n\n stretch = 0.78\n input_sst.add_particle_bonded_to(system.Particle(x=cap_c.x + stretch * dir[0] + stretch * dir_h[0],\n y=cap_c.y + stretch * dir[1] + stretch * dir_h[1],\n z=cap_c.z + stretch * dir[2] + stretch * dir_h[2],\n type=captypes[1]), cap_c, f=ff)\n input_sst.add_particle_bonded_to(system.Particle(x=cap_c.x + stretch * dir[0] + stretch * dir_h2[0],\n y=cap_c.y + stretch * dir[1] + stretch * dir_h2[1],\n z=cap_c.z + stretch * dir[2] + stretch * dir_h2[2],\n type=captypes[1]), cap_c, f=ff)\n input_sst.add_particle_bonded_to(system.Particle(x=cap_c.x + stretch * dir[0] - stretch * dir_h2[0],\n y=cap_c.y + stretch * dir[1] - stretch * dir_h2[1],\n z=cap_c.z + stretch * dir[2] - stretch * dir_h2[2],\n type=captypes[1]), cap_c, f=ff)\n input_sst.objectify()\n input_sst.center(what='particles', at=[0.0, 0.0, 0.0], move_both=False)\n\n sim = lmps.Simulation(input_sst, log='capping_opt.log')\n sim.add_min(min_style='cg', name='min_cg', etol=1.0e-6, ftol=1.0e-6, maxiter=int(1e+6), maxeval=int(1e+7))\n sim.run()\n\n\n# ----------> Here is the declaratin section -- majority of parameters <------------\n# ----------> to adjust for your monomer to polymerize it correctly <------------\n\n# defines whether free ends of oligomer chains will be capped with methyls or not\nis_cap = True\n\n# length of the polymer chain built in random_walk() polymerisation\nchain_len = 10\n\n# model of repititive unit for the polymer to built -- code assumes that the monomer is uncapped -- two atoms in\n# the molecule which are called linkers are undercoordinated\ndata_path = '../../../../pysimm/models/monomers/topologies/'\nmonomer = system.read_pdb(data_path + 'cbma.pdb', str_file=data_path + 'cbma.str')\n\n# mapping defines 'geometrically important' atoms in a monomer you use: 'head' and 'tail' are connection points for\n# polymer chain growth; 'mirror' together with head and tail define plane which will mirrors the monomer for\n# syndiotactic insertion. Best way to define the mirror atom is to assign it to the capping atom of the head, though\n# the capping atom does not exist yet, it will be created before the random_walk_tacticity() run and will have next\n# availible index (e.g. if there were 35 atoms in uncapped system it will have index #36)\nlnkr_atoms = {'head': 1, 'tail': 2}\n\n# list defines indexes of atoms for the tacticity analysis and marks 1st backbone atom, 2nd backbone atom, atom in\n# the first side chain and atom in the second side chain (like methyl group)\ntacticity_order = [1, 2, 3, 5]\n\n\n# ----------> Here starts the main body of polymerisation code <------------\nff = forcefield.Charmm()\n\n# we assign linkers by absolute indexes, so for your monomer they likely will be different\n# thus please assign them correct values\nfor nm in lnkr_atoms.keys():\n monomer.particles[lnkr_atoms[nm]].linker = nm\n\n# Type forcefield particles types in the system automatically using Charmm-FF typer of PySIMM\nmonomer.apply_forcefield(ff, charges=None)\n\n# In this example partial charges of the system are already set in the .str file and read into the system\n# let's check whether they add up to 0, and add counterion if not, because some of the monomers in the library are charged.\nmonomer.set_charge()\nprint('\\tRead monomer has charge of {:}q'.format(round(monomer.charge, 8)))\nif abs(monomer.charge + 1) < 0.1:\n print('\\tAdding SODIUM counterion to equilibrate the system chargewise')\n cntrion_tp = ff.particle_types.get('SOD')[0].copy()\n chrg = 1\nelif abs(monomer.charge - 1) < 0.1:\n print('\\tAdding CLORINE counterion to equilibrate the system chargewise')\n cntrion_tp = ff.particle_types.get('CLA')[0].copy()\n chrg = -1\nelse:\n cntrion_tp = None\n\nif cntrion_tp:\n monomer.particle_types.add(cntrion_tp)\n monomer.particles.add(system.Particle(x=monomer.cog[0], y=monomer.cog[1], z=monomer.cog[2] + 5.0, type=cntrion_tp,\n charge=chrg, molecule=monomer.molecules[1]))\n\n# -------------> Polymer construction and tacticity check <--------------\nsngl_chain = random_walk(monomer, chain_len, forcefield=ff, density=0.01, print_to_screen='true', traj=False, unwrap=True)\n\nif is_cap:\n cap_with_methyls(sngl_chain, ff)\n\n# After polymerisation and possibly capping is done let's cleanup: remove all counterions and reshape the simulation\n# box so that the chain is in the center of a cube with padding of 3 nm\n\nsngl_chain.center(what='particles', at=[0.0, 0.0, 0.0], move_both=False)\n\nif cntrion_tp:\n for p in sngl_chain.particles:\n if p.type.name == cntrion_tp.name:\n sngl_chain.particles.remove(p.tag, update=False)\n sngl_chain.molecules[p.molecule.tag].particles.remove(p.tag, update=False)\n sngl_chain.objectify()\n\nbxSize = 30.0\nfor param in ['dx', 'dy', 'dz']:\n setattr(sngl_chain, param, bxSize)\n\nsngl_chain.write_pdb('1.polymer.random_walk' + '.pdb')\n\n# check tacticity of the simple polymer chain \ntacticity_stat = check_tacticity(sngl_chain, tacticity_order, len(monomer.particles))\nprint('\\t Simple random_walk chain contains {:}/{:} meso- and {:}/{:} racemo- diads'.format(\n tacticity_stat[1].count(True), chain_len, tacticity_stat[1].count(False), chain_len))\n\n# >>>>>> Controlled tactisity <<<<<<<<<<<<\nnew_monomer = monomer.copy()\n\n# random_walk_tacticity requires capped molecule, however, cap atoms will be deleted \n# those are baiscally dummy atoms, an can be of any type. Let's define them as a carbon\n# backbone atoms, which will allow us not to change types of other carbon atoms in the backbone\ncaptype = (new_monomer.particle_types.get('CG331') +\n new_monomer.particle_types.get('CG321') +\n new_monomer.particle_types.get('CG311'))[0]\n# loop through the particles to add caps to linkers\nfor p in new_monomer.particles:\n if p.linker:\n # define and normalize directional vector for the capping atom\n tmp = numpy.array([sum([p.x - p_.x for p_ in p.bonded_to]), \n sum([p.y - p_.y for p_ in p.bonded_to]), \n sum([p.z - p_.z for p_ in p.bonded_to])])\n tmp = 1.54 * tmp / numpy.linalg.norm(tmp)\n # add new capping particle along the defined direction\n new_p = new_monomer.add_particle_bonded_to(system.Particle(x=p.x + tmp[0], y=p.y + tmp[1],\n z=p.z + tmp[2], type=captype), p, f=ff)\n # decorate particle with '****_cap' tag\n new_p.rnd_wlk_tag = p.linker + '_cap'\n if p.linker == 'head':\n setattr(new_p, 'linker', 'mirror')\n\nnew_monomer.objectified = False\nnew_monomer.objectify()\n\nnew_monomer.write_pdb('2.monomer_to_rndwlk-tacticity.pdb')\n\n# first random walk **without** simulations\npolymer = random_walk_tacticity(new_monomer, chain_len + 1, forcefield=ff, tacticity='syndiotactic', sim='no')\npolymer.write_pdb('3.polymer_rndwlk-tacticity.no-sim.pdb')\n\ntacticity_stat = check_tacticity(polymer, tacticity_order, len(monomer.particles))\nprint('\\t Polymer chain built without MD contains {:}/{:} meso- and {:}/{:} racemo- diads'.format(\n tacticity_stat[1].count(True), chain_len, tacticity_stat[1].count(False), chain_len))\n\n\n# second, random walk **with** simulations\npolymer = random_walk_tacticity(new_monomer, chain_len + 1, forcefield=ff, tacticity='syndiotactic')\npolymer.write_pdb('4.polymer_rndwlk-tacticity.pdb')\n\ntacticity_stat = check_tacticity(polymer, tacticity_order, len(monomer.particles))\nprint('\\t Polymer chain built using MD contains {:}/{:} meso- and {:}/{:} racemo- diads'.format(\n tacticity_stat[1].count(True), chain_len, tacticity_stat[1].count(False), chain_len))\n\n"
] | [
[
"numpy.array",
"numpy.zeros",
"numpy.linalg.norm"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AutuanLiu/PyCon | [
"ba0e2005d1e0301d77bb8111ff67b663dc234784"
] | [
"src/plot4.py"
] | [
"import numpy as np\n\nfrom bokeh.plotting import figure, output_file, show\n\n# prepare some data\nN = 4000\nx = np.random.random(size=N) * 100\ny = np.random.random(size=N) * 100\nradii = np.random.random(size=N) * 1.5\ncolors = [\n \"#%02x%02x%02x\" % (int(r), int(g), 150) for r, g in zip(50+2*x, 30+2*y)\n]\n\n# output to static HTML file (with CDN resources)\noutput_file(\"color_scatter.html\", title=\"color_scatter.py example\", mode=\"cdn\")\n\nTOOLS=\"resize,crosshair,pan,wheel_zoom,box_zoom,reset,box_select,lasso_select\"\n\n# create a new plot with the tools above, and explicit ranges\np = figure(tools=TOOLS, x_range=(0,100), y_range=(0,100))\n\n# add a circle renderer with vectorized colors and sizes\np.circle(x,y, radius=radii, fill_color=colors, fill_alpha=0.6, line_color=None)\n\n# show the results\nshow(p)\n"
] | [
[
"numpy.random.random"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mahmoudyusof/trax | [
"20ee40ff9c7c55ba8507a71967806a4438f587e7"
] | [
"trax/math/tf.py"
] | [
"# coding=utf-8\n# Copyright 2020 The Trax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Trax math: TF backend.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nfrom trax.shapes import ShapeDtype\nfrom trax.tf_numpy import extensions as tf_np_extensions\nfrom trax.tf_numpy import numpy as tf_np\n\n\ndef tf_abstract_eval(f):\n \"\"\"Returns a function that evaluates `f` given input shapes and dtypes.\n\n It transforms function `f` to a function that performs the same computation as\n `f` but only on shapes and dtypes (a.k.a. shape inference).\n\n Args:\n f: the function to be transformed.\n\n Returns:\n A function whose input arguments can be either the same as `f`'s or only\n their shapes/dtypes represented by `ShapeDtype`, and whose return values are\n `ShapeDtype`s with the same nested structure as `f`'s return values.\n \"\"\"\n f_shape = tf_np_extensions.eval_on_shapes(f)\n def from_shape_type(x):\n if isinstance(x, ShapeDtype):\n return tf.TensorSpec(x.shape, x.dtype)\n else:\n return x\n def to_shape_type(x): # pylint: disable=missing-docstring\n # TODO(wangpeng): handle partial output shapes using `tf.shape`.\n def to_numpy_shape(s):\n if s.is_fully_defined():\n return tuple(s.as_list())\n else:\n raise ValueError(\"The output shapes (%s) of the dry-run'ed function are\"\n ' not fully defined.' % s)\n def to_numpy_dtype(t):\n return np.dtype(t.as_numpy_dtype)\n if isinstance(x, tf.TensorSpec):\n return ShapeDtype(to_numpy_shape(x.shape), to_numpy_dtype(x.dtype))\n else:\n return x\n def f_return(*args):\n args = tf.nest.map_structure(from_shape_type, args)\n res = f_shape(*args)\n return tf.nest.map_structure(to_shape_type, res)\n return f_return\n\n\n# The arguments order is different from tf_np_extensions.uniform\ndef tf_randint(key, shape, minval, maxval, dtype=np.int32):\n \"\"\"Sample uniform random values in [minval, maxval) with given shape/dtype.\n\n Args:\n key: a PRNGKey used as the random key.\n shape: a tuple of nonnegative integers representing the shape.\n minval: int or array of ints broadcast-compatible with ``shape``, a minimum\n (inclusive) value for the range.\n maxval: int or array of ints broadcast-compatible with ``shape``, a maximum\n (exclusive) value for the range.\n dtype: optional, an int dtype for the returned values (default int32).\n\n Returns:\n A random array with the specified shape and dtype.\n \"\"\"\n return tf_np_extensions.uniform(key, shape, minval=minval, maxval=maxval,\n dtype=dtype)\n\n\n_tf_xla_forced_compile_enabled = False\n\n\ndef tf_xla_forced_compile_enabled():\n return _tf_xla_forced_compile_enabled\n\n\ndef set_tf_xla_forced_compile(b):\n global _tf_xla_forced_compile_enabled\n _tf_xla_forced_compile_enabled = b\n\n\ndef _tf_jit(*args, **kwargs):\n kwargs['xla_forced_compile'] = tf_xla_forced_compile_enabled()\n kwargs.pop('donate_argnums', None) # donate_argnums not used in TF\n return tf_np_extensions.jit(*args, **kwargs)\n\n\ndef _tf_pmap(*args, **kwargs):\n kwargs.pop('donate_argnums', None) # donate_argnums not used in TF\n return tf_np_extensions.pmap(*args, **kwargs)\n\n\nTF_BACKEND = {\n 'name': 'tf',\n 'np': tf_np,\n 'jit': _tf_jit,\n 'stop_gradient': tf_np_extensions.stop_gradient,\n 'grad': tf_np_extensions.grad,\n 'vjp': tf_np_extensions.vjp,\n 'custom_grad': tf_np_extensions.custom_grad,\n 'abstract_eval': tf_abstract_eval,\n 'expit': tf_np_extensions.expit,\n 'erf': tf_np_extensions.erf,\n 'logsumexp': tf_np_extensions.logsumexp,\n 'conv': tf_np_extensions.conv,\n 'lt': lambda x, y: x < y,\n 'avg_pool': tf_np_extensions.avg_pool,\n 'max_pool': tf_np_extensions.max_pool,\n 'sort_key_val': tf_np_extensions.sort_key_val,\n 'random_uniform': tf_np_extensions.uniform,\n 'random_randint': tf_randint,\n 'random_normal': tf_np_extensions.normal,\n 'random_bernoulli': tf_np_extensions.bernoulli,\n 'random_get_prng': tf_np_extensions.prng,\n 'random_split': tf_np_extensions.split,\n 'dataset_as_numpy': tf_np_extensions.dataset_as_numpy,\n 'device_count': lambda: max(len(tf_np_extensions.accelerators()), 1),\n 'pmap': _tf_pmap,\n 'psum': tf_np_extensions.psum,\n}\n"
] | [
[
"tensorflow.compat.v2.nest.map_structure",
"tensorflow.compat.v2.TensorSpec",
"numpy.dtype"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sadin911/projectTM | [
"e434022157b30dbbbd7e052404f1e24c0f27712b"
] | [
"src/train.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# Tensorflow 2.0\n\"\"\"\nCreated on Wed Jan 8 11:08:18 2020\n\n@author: chonlatid\n\"\"\"\n\nimport os\nimport shutil\nimport tensorflow as tf\n\nimport datetime\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\nif tf.test.gpu_device_name():\n print('GPU found')\nelse:\n print(\"No GPU found\")\nfrom PIL import Image\nfrom tensorflow.python.keras.layers.convolutional import UpSampling2D, Conv2D\nfrom tensorflow.python.keras.layers.advanced_activations import LeakyReLU\nfrom tensorflow.python.keras.models import Model\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.python.keras.layers import BatchNormalization,Dropout, MaxPooling2D , GlobalMaxPool1D,Reshape,Dropout\nfrom tensorflow.python.keras.layers import Input,Activation, Dense, Flatten, Concatenate, LSTM, Embedding\nimport numpy as np\nimport gendata\nimport io\nimport sys\nimport glob2\nfrom os.path import join\nimport tensorflow.keras.backend as K\nimport random\nimport tensorflow.python.keras\nfrom tensorflow.keras.callbacks import TensorBoard\ntrain_log_dir = \"logs/fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\ntrain_summary_writer = tf.summary.create_file_writer(train_log_dir)\n\npath_input = r'D:/datasets/LSLOGO/train_and_test/train/**/'\npath_test = r'/home/keng/Python/Dataset/DocData/testset/**/'\nbg_path = r'/home/keng/Python/Dataset/background/**/'\nsave_path = r'/home/keng/docsegmentation/4connerwithsegment/'\ntypes = ('*.bmp', '*.jpg' ,'.*gif' ,'*.png' , '*.tif') # the tuple of file types\nos.makedirs('logs',exist_ok=True)\nshutil.rmtree('logs')\ntrain_log_dir = \"logs/fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\ntrain_summary_writer = tf.summary.create_file_writer(train_log_dir)\n\nclass DocScanner():\n def __init__(self):\n # Input shape\n self.img_rows = 128\n self.img_cols = 128\n self.channels = 3\n self.gf = 16\n self.img_shape = (self.img_rows, self.img_cols, self.channels)\n self.input_shape = (self.img_rows, self.img_cols, self.channels)\n self.model = self.build_autoencoder()\n op = Adam(0.00001)\n self.model.load_weights(r'./cnnATECSmallDense.h5')\n # self.model.save('./tfModels/corner')\n self.model.compile(loss=['mse'],\n optimizer=op,\n metrics=[tf.keras.metrics.MeanSquaredError()])\n self.model.summary()\n \n if tf.test.gpu_device_name():\n print('GPU found')\n else:\n print(\"No GPU found\")\n self.gen_data = gendata.gendata()\n \n self.pathlist = []\n self.testlist = []\n self.pathbglist = []\n\n #self.pathbglist = glob2.glob(bg_path)\n\n for files in types:\n self.pathlist.extend(glob2.glob(join(path_input, files)))\n \n print(len(self.pathlist))\n \n def build_autoencoder(self):\n input_img = Input(shape=(128, 128, 3))\n x = Conv2D(8, (3, 3), activation='relu', padding='same')(input_img)\n x = MaxPooling2D((2, 2), padding='same')(x)\n x = BatchNormalization(momentum = 0.8)(x)\n x = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img)\n x = MaxPooling2D((2, 2), padding='same')(x)\n x = Dropout(0.2)(x)\n x = BatchNormalization(momentum = 0.8)(x)\n x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)\n x = MaxPooling2D((2, 2), padding='same')(x)\n x = BatchNormalization(momentum = 0.8)(x)\n x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)\n x = MaxPooling2D((2, 2), padding='same')(x)\n x = BatchNormalization(momentum = 0.8)(x)\n x = Conv2D(128, (3, 3), activation='relu', padding='same')(x)\n x = MaxPooling2D((2, 2), padding='same')(x)\n x = BatchNormalization(momentum = 0.8)(x)\n x = Dropout(0.2)(x)\n x = Flatten()(x)\n encoded = Dense(8192)(x)\n \n # at this point the representation is (4, 4, 8) i.e. 128-dimensional\n x = Dense(8192)(encoded)\n x = Reshape((8,8,128))(x)\n x = Conv2D(128, (3, 3), activation='relu', padding='same')(x)\n x = UpSampling2D((2, 2))(x)\n x = BatchNormalization(momentum = 0.8)(x)\n x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)\n x = UpSampling2D((2, 2))(x)\n x = BatchNormalization(momentum = 0.8)(x)\n x = Dropout(0.2)(x)\n x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)\n x = UpSampling2D((2, 2))(x)\n x = BatchNormalization(momentum = 0.8)(x)\n x = Conv2D(16, (3, 3), activation='relu', padding='same')(x)\n x = UpSampling2D((2, 2))(x)\n x = BatchNormalization(momentum = 0.8)(x)\n x = Dropout(0.2)(x)\n decoded = Conv2D(3, (3, 3), activation='sigmoid', padding='same')(x)\n autoencoder = Model(input_img, decoded)\n return autoencoder\n \n def train(self,start_epoch, max_epoch, batch_size, viz_interval):\n log_dir=\"logs/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)\n max_step = len(self.pathlist) // batch_size\n permu_ind = list(range(len(self.pathlist)))\n step = 0\n random.shuffle(self.pathlist)\n for epoch in range(start_epoch,max_epoch):\n permu_ind = np.random.permutation(permu_ind)\n epoch_loss = []\n \n for step_index in range(max_step):\n batch_img = np.zeros((batch_size,self.input_shape[0],self.input_shape[1],self.input_shape[2] ))\n batch_target = np.zeros((batch_size,self.input_shape[0],self.input_shape[1],self.input_shape[2] ))\n batch_mask = np.zeros((batch_size,self.input_shape[0],self.input_shape[1]))\n \n for batch_index in range(batch_size):\n img,target,mask = self.gen_data.gen_data((self.pathlist[ step_index * batch_size + batch_index ]))\n batch_img[batch_index] = img\n batch_target[batch_index] = target\n \n train_loss = self.model.train_on_batch(batch_img,batch_target)\n # with train_summary_writer.as_default():\n tf.summary.scalar('loss', train_loss[0], step=step)\n # tf.summary.scalar('accuracy', train_loss[1], step=step)\n step = step + 1 \n \n \n # Reset metrics every epoch\n \n sys.stdout.write('\\r epoch ' + str(epoch) + ' / ' + str(max_epoch) + ' ' + 'step ' + str(step_index) + ' / ' + str(max_step) + ' loss = ' + str(train_loss))\n\n # epoch_loss.append(loss)\n \n if(step_index % viz_interval == 0): \n img_viz,target,mask = self.gen_data.gen_data((self.pathlist[np.random.randint(0,len(self.pathlist))]))\n indput_data = np.expand_dims(img_viz, axis = 0)\n predict_mask = self.model.predict(indput_data)[0]\n target = (target+1)*127.5\n img_viz = (img_viz+1)*127.5\n predict_mask = (predict_mask+1)*127.5\n test_img = Image.fromarray(predict_mask.astype('uint8'))\n input_img = Image.fromarray(img_viz.astype('uint8'))\n target_img = Image.fromarray(target.astype('uint8'))\n self.model.save_weights('cnnATECSmallDense.h5')\n \n try:\n input_img.save(\"viz.jpg\")\n test_img.save(\"test_img.jpg\")\n target_img.save(\"target_img.jpg\")\n print(\"tested\")\n \n except IOError as e:\n print(\"I/O error({0}): {1}\".format(e.errno, e.strerror))\n \n \n \nif __name__ == '__main__':\n \n doc = DocScanner()\n \n doc.train(1,10000000,4,100)"
] | [
[
"numpy.expand_dims",
"tensorflow.python.keras.layers.Flatten",
"tensorflow.python.keras.layers.MaxPooling2D",
"tensorflow.test.gpu_device_name",
"tensorflow.python.keras.layers.Dense",
"tensorflow.keras.callbacks.TensorBoard",
"tensorflow.summary.scalar",
"tensorflow.python.keras.layers.Reshape",
"tensorflow.python.keras.layers.BatchNormalization",
"tensorflow.keras.metrics.MeanSquaredError",
"tensorflow.python.keras.layers.convolutional.Conv2D",
"numpy.zeros",
"tensorflow.python.keras.layers.convolutional.UpSampling2D",
"tensorflow.python.keras.layers.Dropout",
"tensorflow.python.keras.layers.Input",
"tensorflow.keras.optimizers.Adam",
"tensorflow.python.keras.models.Model",
"numpy.random.permutation",
"tensorflow.summary.create_file_writer"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
charliewhyman/santander-cycles | [
"63d410775ea4921dd143e38f5ec8ad3e0ef39343"
] | [
"get_bikepoints.py"
] | [
"import urllib.request\nimport json\nimport pandas as pd\n\n# Define bikepoint URL (from TfL Unified API)\nbikepoint_url = \"https://api.tfl.gov.uk/BikePoint\"\n\n# Request and open bikepoint url\nresponse = urllib.request.urlopen(bikepoint_url)\nbikepoints = json.loads(response.read())\n\n# Load json file into a data frame\ndf = pd.read_json(bikepoint_url, orient='columns')\n\n# Keep id, name and lat/long fields\ndf.drop(df.columns.difference(['id','commonName','lat','lon']), 1, inplace=True)\ndf.to_csv(\"bikepoints.csv\",index=False)\n"
] | [
[
"pandas.read_json"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
asherif844/diarization | [
"ee9c9a7d88def5110bed2a3696cc484d29def646"
] | [
"pyAudioAnalysis/audioSegmentation.py"
] | [
"from __future__ import print_function\nimport numpy as np\nimport sklearn.cluster\nimport scipy\nimport os\nfrom pyAudioAnalysis import ShortTermFeatures as sF\nfrom pyAudioAnalysis import MidTermFeatures as aF\nfrom pyAudioAnalysis import audioTrainTest as aT\nfrom pyAudioAnalysis import audioBasicIO\nfrom scipy.spatial import distance\nimport matplotlib.pyplot as plt\nimport sklearn.discriminant_analysis\nimport csv\nimport os.path\nimport sklearn\nimport sklearn.cluster\nimport hmmlearn.hmm\nimport pickle as cPickle\nimport glob\n\n\"\"\" General utility functions \"\"\"\n\n\ndef smoothMovingAvg(inputSignal, windowLen=11):\n windowLen = int(windowLen)\n if inputSignal.ndim != 1:\n raise ValueError(\"\")\n if inputSignal.size < windowLen:\n raise ValueError(\"Input vector needs to be bigger than window size.\")\n if windowLen < 3:\n return inputSignal\n s = np.r_[2*inputSignal[0] - inputSignal[windowLen-1::-1],\n inputSignal, 2*inputSignal[-1]-inputSignal[-1:-windowLen:-1]]\n w = np.ones(windowLen, 'd')\n y = np.convolve(w/w.sum(), s, mode='same')\n return y[windowLen:-windowLen+1]\n\n\ndef selfSimilarityMatrix(featureVectors):\n \"\"\"\n This function computes the self-similarity matrix for a sequence\n of feature vectors.\n ARGUMENTS:\n - featureVectors: a np matrix (nDims x nVectors) whose i-th column\n corresponds to the i-th feature vector\n\n RETURNS:\n - S: the self-similarity matrix (nVectors x nVectors)\n \"\"\"\n\n [nDims, nVectors] = featureVectors.shape\n [featureVectors2, MEAN, STD] = aT.normalizeFeatures([featureVectors.T])\n featureVectors2 = featureVectors2[0].T\n S = 1.0 - distance.squareform(distance.pdist(featureVectors2.T, 'cosine'))\n return S\n\n\ndef flags2segs(flags, window):\n \"\"\"\n ARGUMENTS:\n - flags: a sequence of class flags (per time window)\n - window: window duration (in seconds)\n\n RETURNS:\n - segs: a sequence of segment's limits: segs[i,0] is start and\n segs[i,1] are start and end point of segment i\n - classes: a sequence of class flags: class[i] is the class ID of\n the i-th segment\n \"\"\"\n\n if len(flags)==1:\n segs = [0, window]\n classes = flags\n return segs, classes\n\n preFlag = 0\n cur_flag = 0\n n_segs = 0\n\n cur_val = flags[cur_flag]\n segsList = []\n classes = []\n while (cur_flag < len(flags) - 1):\n stop = 0\n preFlag = cur_flag\n preVal = cur_val\n while (stop == 0):\n cur_flag = cur_flag + 1\n tempVal = flags[cur_flag]\n if ((tempVal != cur_val) | (cur_flag == len(flags) - 1)): # stop\n n_segs = n_segs + 1\n stop = 1\n cur_seg = cur_val\n cur_val = flags[cur_flag]\n segsList.append((cur_flag * window))\n classes.append(preVal)\n segs = np.zeros((len(segsList), 2))\n\n for i in range(len(segsList)):\n if i > 0:\n segs[i, 0] = segsList[i-1]\n segs[i, 1] = segsList[i]\n return (segs, classes)\n\n\ndef segs2flags(seg_start, seg_end, seg_label, win_size):\n \"\"\"\n This function converts segment endpoints and respective segment\n labels to fix-sized class labels.\n ARGUMENTS:\n - seg_start: segment start points (in seconds)\n - seg_end: segment endpoints (in seconds)\n - seg_label: segment labels\n - win_size: fix-sized window (in seconds)\n RETURNS:\n - flags: np array of class indices\n - class_names: list of classnames (strings)\n \"\"\"\n flags = []\n class_names = list(set(seg_label))\n curPos = win_size / 2.0\n while curPos < seg_end[-1]:\n for i in range(len(seg_start)):\n if curPos > seg_start[i] and curPos <= seg_end[i]:\n break\n flags.append(class_names.index(seg_label[i]))\n curPos += win_size\n return np.array(flags), class_names\n\ndef computePreRec(cm, class_names):\n \"\"\"\n This function computes the precision, recall and f1 measures,\n given a confusion matrix\n \"\"\"\n n_classes = cm.shape[0]\n if len(class_names) != n_classes:\n print(\"Error in computePreRec! Confusion matrix and class_names \"\n \"list must be of the same size!\")\n return\n precision = []\n recall = []\n f1 = [] \n for i, c in enumerate(class_names):\n precision.append(cm[i,i] / np.sum(cm[:,i]))\n recall.append(cm[i,i] / np.sum(cm[i,:]))\n f1.append( 2 * precision[-1] * recall[-1] / (precision[-1] + recall[-1]))\n return recall, precision, f1\n\n\ndef readSegmentGT(gt_file):\n \"\"\"\n This function reads a segmentation ground truth file,\n following a simple CSV format with the following columns:\n <segment start>,<segment end>,<class label>\n\n ARGUMENTS:\n - gt_file: the path of the CSV segment file\n RETURNS:\n - seg_start: a np array of segments' start positions\n - seg_end: a np array of segments' ending positions\n - seg_label: a list of respective class labels (strings)\n \"\"\"\n f = open(gt_file, 'rt')\n reader = csv.reader(f, delimiter=',')\n seg_start = []\n seg_end = []\n seg_label = []\n for row in reader:\n if len(row) == 3:\n seg_start.append(float(row[0]))\n seg_end.append(float(row[1]))\n #if row[2]!=\"other\":\n # seg_label.append((row[2]))\n #else:\n # seg_label.append(\"silence\")\n seg_label.append((row[2]))\n return np.array(seg_start), np.array(seg_end), seg_label\n\n\ndef plotSegmentationResults(flags_ind, flags_ind_gt, class_names, mt_step, ONLY_EVALUATE=False):\n \"\"\"\n This function plots statistics on the classification-segmentation results \n produced either by the fix-sized supervised method or the HMM method.\n It also computes the overall accuracy achieved by the respective method \n if ground-truth is available.\n \"\"\"\n \n flags = [class_names[int(f)] for f in flags_ind]\n (segs, classes) = flags2segs(flags, mt_step) \n min_len = min(flags_ind.shape[0], flags_ind_gt.shape[0]) \n if min_len > 0:\n accuracy = np.sum(flags_ind[0:min_len] == \n flags_ind_gt[0:min_len]) / float(min_len)\n else:\n accuracy = -1\n\n if not ONLY_EVALUATE:\n duration = segs[-1, 1]\n s_percentages = np.zeros((len(class_names), ))\n percentages = np.zeros((len(class_names), ))\n av_durations = np.zeros((len(class_names), ))\n\n for iSeg in range(segs.shape[0]):\n s_percentages[class_names.index(classes[iSeg])] += \\\n (segs[iSeg, 1]-segs[iSeg, 0])\n\n for i in range(s_percentages.shape[0]):\n percentages[i] = 100.0 * s_percentages[i] / duration\n S = sum(1 for c in classes if c == class_names[i])\n if S > 0:\n av_durations[i] = s_percentages[i] / S\n else:\n av_durations[i] = 0.0\n\n for i in range(percentages.shape[0]):\n print(class_names[i], percentages[i], av_durations[i])\n\n font = {'size': 10}\n plt.rc('font', **font)\n\n fig = plt.figure()\n ax1 = fig.add_subplot(211)\n ax1.set_yticks(np.array(range(len(class_names))))\n ax1.axis((0, duration, -1, len(class_names)))\n ax1.set_yticklabels(class_names)\n ax1.plot(np.array(range(len(flags_ind))) * mt_step +\n mt_step / 2.0, flags_ind)\n if flags_ind_gt.shape[0] > 0:\n ax1.plot(np.array(range(len(flags_ind_gt))) * mt_step +\n mt_step / 2.0, flags_ind_gt + 0.05, '--r')\n plt.xlabel(\"time (seconds)\")\n if accuracy >= 0:\n plt.title('Accuracy = {0:.1f}%'.format(100.0 * accuracy))\n\n ax2 = fig.add_subplot(223)\n plt.title(\"Classes percentage durations\")\n ax2.axis((0, len(class_names) + 1, 0, 100))\n ax2.set_xticks(np.array(range(len(class_names) + 1)))\n ax2.set_xticklabels([\" \"] + class_names)\n print(np.array(range(len(class_names))), percentages)\n ax2.bar(np.array(range(len(class_names))) + 0.5, percentages)\n\n ax3 = fig.add_subplot(224)\n plt.title(\"Segment average duration per class\")\n ax3.axis((0, len(class_names)+1, 0, av_durations.max()))\n ax3.set_xticks(np.array(range(len(class_names) + 1)))\n ax3.set_xticklabels([\" \"] + class_names)\n ax3.bar(np.array(range(len(class_names))) + 0.5, av_durations)\n fig.tight_layout()\n plt.show()\n return accuracy\n\n\ndef evaluateSpeakerDiarization(flags, flags_gt):\n\n min_len = min(flags.shape[0], flags_gt.shape[0])\n flags = flags[0:min_len]\n flags_gt = flags_gt[0:min_len]\n\n u_flags = np.unique(flags)\n u_flags_gt = np.unique(flags_gt)\n\n # compute contigency table:\n c_matrix = np.zeros((u_flags.shape[0], u_flags_gt.shape[0]))\n for i in range(min_len):\n c_matrix[int(np.nonzero(u_flags == flags[i])[0]),\n int(np.nonzero(u_flags_gt == flags_gt[i])[0])] += 1.0\n\n Nc, Ns = c_matrix.shape\n N_s = np.sum(c_matrix, axis=0)\n N_c = np.sum(c_matrix, axis=1)\n N = np.sum(c_matrix)\n\n purity_clust = np.zeros((Nc, ))\n purity_speak = np.zeros((Ns, ))\n # compute cluster purity:\n for i in range(Nc):\n purity_clust[i] = np.max((c_matrix[i, :])) / (N_c[i])\n\n for j in range(Ns):\n purity_speak[j] = np.max((c_matrix[:, j])) / (N_s[j])\n\n purity_cluster_m = np.sum(purity_clust * N_c) / N\n purity_speaker_m = np.sum(purity_speak * N_s) / N\n\n return purity_cluster_m, purity_speaker_m\n\n\ndef trainHMM_computeStatistics(features, labels):\n \"\"\"\n This function computes the statistics used to train\n an HMM joint segmentation-classification model\n using a sequence of sequential features and respective labels\n\n ARGUMENTS:\n - features: a np matrix of feature vectors (numOfDimensions x n_wins)\n - labels: a np array of class indices (n_wins x 1)\n RETURNS:\n - start_prob: matrix of prior class probabilities (n_classes x 1)\n - transmat: transition matrix (n_classes x n_classes)\n - means: means matrix (numOfDimensions x 1)\n - cov: deviation matrix (numOfDimensions x 1)\n \"\"\"\n u_labels = np.unique(labels)\n n_comps = len(u_labels)\n\n n_feats = features.shape[0]\n\n if features.shape[1] < labels.shape[0]:\n print(\"trainHMM warning: number of short-term feature vectors \"\n \"must be greater or equal to the labels length!\")\n labels = labels[0:features.shape[1]]\n\n # compute prior probabilities:\n start_prob = np.zeros((n_comps,))\n for i, u in enumerate(u_labels):\n start_prob[i] = np.count_nonzero(labels == u)\n # normalize prior probabilities\n start_prob = start_prob / start_prob.sum()\n\n # compute transition matrix:\n transmat = np.zeros((n_comps, n_comps))\n for i in range(labels.shape[0]-1):\n transmat[int(labels[i]), int(labels[i + 1])] += 1\n # normalize rows of transition matrix:\n for i in range(n_comps):\n transmat[i, :] /= transmat[i, :].sum()\n\n means = np.zeros((n_comps, n_feats))\n for i in range(n_comps):\n means[i, :] = np.matrix(features[:, \n np.nonzero(labels == \n u_labels[i])[0]].mean(axis=1))\n\n cov = np.zeros((n_comps, n_feats))\n for i in range(n_comps):\n #cov[i,:,:] = np.cov(features[:,np.nonzero(labels==u_labels[i])[0]])\n # use line above if HMM using full gaussian distributions are to be used\n cov[i, :] = np.std(features[:, np.nonzero(labels == \n u_labels[i])[0]], \n axis=1)\n\n return start_prob, transmat, means, cov\n\n\ndef trainHMM_fromFile(wav_file, gt_file, hmm_model_name, mt_win, mt_step):\n \"\"\"\n This function trains a HMM model for segmentation-classification\n using a single annotated audio file\n ARGUMENTS:\n - wav_file: the path of the audio filename\n - gt_file: the path of the ground truth filename\n (a csv file of the form <segment start in seconds>,\n <segment end in seconds>,<segment label> in each row\n - hmm_model_name: the name of the HMM model to be stored\n - mt_win: mid-term window size\n - mt_step: mid-term window step\n RETURNS:\n - hmm: an object to the resulting HMM\n - class_names: a list of class_names\n\n After training, hmm, class_names, along with the mt_win and mt_step\n values are stored in the hmm_model_name file\n \"\"\"\n\n [seg_start, seg_end, seg_labs] = readSegmentGT(gt_file)\n flags, class_names = segs2flags(seg_start, seg_end, seg_labs, mt_step)\n [fs, x] = audioBasicIO.read_audio_file(wav_file)\n [F, _, _] = aF.mid_feature_extraction(x, fs, mt_win * fs, mt_step * fs,\n round(fs * 0.050), round(fs * 0.050))\n start_prob, transmat, means, cov = trainHMM_computeStatistics(F, flags)\n hmm = hmmlearn.hmm.GaussianHMM(start_prob.shape[0], \"diag\")\n\n hmm.startprob_ = start_prob\n hmm.transmat_ = transmat \n hmm.means_ = means\n hmm.covars_ = cov\n \n fo = open(hmm_model_name, \"wb\")\n cPickle.dump(hmm, fo, protocol=cPickle.HIGHEST_PROTOCOL)\n cPickle.dump(class_names, fo, protocol=cPickle.HIGHEST_PROTOCOL)\n cPickle.dump(mt_win, fo, protocol=cPickle.HIGHEST_PROTOCOL)\n cPickle.dump(mt_step, fo, protocol=cPickle.HIGHEST_PROTOCOL)\n fo.close()\n\n return hmm, class_names\n\n\ndef trainHMM_fromDir(dirPath, hmm_model_name, mt_win, mt_step):\n \"\"\"\n This function trains a HMM model for segmentation-classification using\n a where WAV files and .segment (ground-truth files) are stored\n ARGUMENTS:\n - dirPath: the path of the data diretory\n - hmm_model_name: the name of the HMM model to be stored\n - mt_win: mid-term window size\n - mt_step: mid-term window step\n RETURNS:\n - hmm: an object to the resulting HMM\n - class_names: a list of class_names\n\n After training, hmm, class_names, along with the mt_win\n and mt_step values are stored in the hmm_model_name file\n \"\"\"\n\n flags_all = np.array([])\n classes_all = []\n for i, f in enumerate(glob.glob(dirPath + os.sep + '*.wav')):\n # for each WAV file\n wav_file = f\n gt_file = f.replace('.wav', '.segments')\n if not os.path.isfile(gt_file):\n continue\n [seg_start, seg_end, seg_labs] = readSegmentGT(gt_file)\n flags, class_names = segs2flags(seg_start, seg_end, seg_labs, mt_step)\n for c in class_names:\n # update class names:\n if c not in classes_all:\n classes_all.append(c)\n [fs, x] = audioBasicIO.read_audio_file(wav_file)\n [F, _, _] = aF.mid_feature_extraction(x, fs, mt_win * fs,\n mt_step * fs, round(fs * 0.050),\n round(fs * 0.050))\n\n lenF = F.shape[1]\n lenL = len(flags)\n min_sm = min(lenF, lenL)\n F = F[:, 0:min_sm]\n flags = flags[0:min_sm]\n\n flagsNew = []\n for j, fl in enumerate(flags): # append features and labels\n flagsNew.append(classes_all.index(class_names[flags[j]]))\n\n flags_all = np.append(flags_all, np.array(flagsNew))\n\n if i == 0:\n f_all = F\n else:\n f_all = np.concatenate((f_all, F), axis=1)\n\n # compute HMM statistics\n start_prob, transmat, means, cov = trainHMM_computeStatistics(f_all,\n flags_all)\n # train the HMM\n hmm = hmmlearn.hmm.GaussianHMM(start_prob.shape[0], \"diag\")\n hmm.startprob_ = start_prob\n hmm.transmat_ = transmat \n hmm.means_ = means\n hmm.covars_ = cov\n\n fo = open(hmm_model_name, \"wb\") # save HMM model\n cPickle.dump(hmm, fo, protocol=cPickle.HIGHEST_PROTOCOL)\n cPickle.dump(classes_all, fo, protocol=cPickle.HIGHEST_PROTOCOL)\n cPickle.dump(mt_win, fo, protocol=cPickle.HIGHEST_PROTOCOL)\n cPickle.dump(mt_step, fo, protocol=cPickle.HIGHEST_PROTOCOL)\n fo.close()\n\n return hmm, classes_all\n\n\ndef hmmSegmentation(wav_file_name, hmm_model_name, plot_res=False,\n gt_file_name=\"\"):\n [fs, x] = audioBasicIO.read_audio_file(wav_file_name)\n try:\n fo = open(hmm_model_name, \"rb\")\n except IOError:\n print(\"didn't find file\")\n return\n\n try:\n hmm = cPickle.load(fo)\n classes_all = cPickle.load(fo)\n mt_win = cPickle.load(fo)\n mt_step = cPickle.load(fo)\n except:\n fo.close()\n fo.close()\n\n [Features, _, _] = aF.mid_feature_extraction(x, fs, mt_win * fs,\n mt_step * fs,\n round(fs * 0.050),\n round(fs * 0.050))\n flags_ind = hmm.predict(Features.T) # apply model\n if os.path.isfile(gt_file_name):\n [seg_start, seg_end, seg_labs] = readSegmentGT(gt_file_name)\n flags_gt, class_names_gt = segs2flags(seg_start, seg_end, seg_labs,\n mt_step)\n flagsGTNew = []\n for j, fl in enumerate(flags_gt):\n # \"align\" labels with GT\n if class_names_gt[flags_gt[j]] in classes_all:\n flagsGTNew.append(classes_all.index(class_names_gt[\n flags_gt[j]]))\n else:\n flagsGTNew.append(-1)\n cm = np.zeros((len(classes_all), len(classes_all)))\n flags_ind_gt = np.array(flagsGTNew)\n for i in range(min(flags_ind.shape[0], flags_ind_gt.shape[0])):\n cm[int(flags_ind_gt[i]),int(flags_ind[i])] += 1\n else:\n flags_ind_gt = np.array([]) \n acc = plotSegmentationResults(flags_ind, flags_ind_gt, classes_all,\n mt_step, not plot_res)\n if acc >= 0:\n print(\"Overall Accuracy: {0:.2f}\".format(acc))\n return (flags_ind, class_names_gt, acc, cm)\n else:\n return (flags_ind, classes_all, -1, -1)\n\n\ndef mtFileClassification(input_file, model_name, model_type,\n plot_results=False, gt_file=\"\"):\n \"\"\"\n This function performs mid-term classification of an audio stream.\n Towards this end, supervised knowledge is used,\n i.e. a pre-trained classifier.\n ARGUMENTS:\n - input_file: path of the input WAV file\n - model_name: name of the classification model\n - model_type: svm or knn depending on the classifier type\n - plot_results: True if results are to be plotted using\n matplotlib along with a set of statistics\n\n RETURNS:\n - segs: a sequence of segment's endpoints: segs[i] is the\n endpoint of the i-th segment (in seconds)\n - classes: a sequence of class flags: class[i] is the\n class ID of the i-th segment\n \"\"\"\n\n if not os.path.isfile(model_name):\n print(\"mtFileClassificationError: input model_type not found!\")\n return (-1, -1, -1, -1)\n # Load classifier:\n if model_type == \"knn\":\n [classifier, MEAN, STD, class_names, mt_win, mt_step, st_win, st_step,\n compute_beat] = aT.load_model_knn(model_name)\n else:\n [classifier, MEAN, STD, class_names, mt_win, mt_step, st_win, st_step,\n compute_beat] = aT.load_model(model_name)\n\n if compute_beat:\n print(\"Model \" + model_name + \" contains long-term music features \"\n \"(beat etc) and cannot be used in \"\n \"segmentation\")\n return (-1, -1, -1, -1)\n [fs, x] = audioBasicIO.read_audio_file(input_file) # load input file\n if fs == -1: # could not read file\n return (-1, -1, -1, -1)\n x = audioBasicIO.stereo_to_mono(x) # convert stereo (if) to mono\n # mid-term feature extraction:\n [mt_feats, _, _] = aF.mid_feature_extraction(x, fs, mt_win * fs,\n mt_step * fs,\n round(fs * st_win),\n round(fs * st_step))\n flags = []\n Ps = []\n flags_ind = []\n # for each feature vector (i.e. for each fix-sized segment):\n for i in range(mt_feats.shape[1]):\n cur_fv = (mt_feats[:, i] - MEAN) / STD # normalize current feature v\n # classify vector:\n [res, P] = aT.classifierWrapper(classifier, model_type, cur_fv)\n flags_ind.append(res)\n flags.append(class_names[int(res)]) # update class label matrix\n Ps.append(np.max(P)) # update probability matrix\n flags_ind = np.array(flags_ind)\n # 1-window smoothing\n for i in range(1, len(flags_ind) - 1):\n if flags_ind[i-1] == flags_ind[i + 1]:\n flags_ind[i] = flags_ind[i + 1]\n # convert fix-sized flags to segments and classes\n (segs, classes) = flags2segs(flags, mt_step)\n segs[-1] = len(x) / float(fs)\n\n # Load grount-truth: \n if os.path.isfile(gt_file):\n [seg_start_gt, seg_end_gt, seg_l_gt] = readSegmentGT(gt_file)\n flags_gt, class_names_gt = segs2flags(seg_start_gt, seg_end_gt,\n seg_l_gt, mt_step)\n flags_ind_gt = []\n for j, fl in enumerate(flags_gt):\n # \"align\" labels with GT\n if class_names_gt[flags_gt[j]] in class_names:\n flags_ind_gt.append(class_names.index(class_names_gt[\n flags_gt[j]]))\n else:\n flags_ind_gt.append(-1)\n flags_ind_gt = np.array(flags_ind_gt) \n cm = np.zeros((len(class_names_gt), len(class_names_gt)))\n for i in range(min(flags_ind.shape[0], flags_ind_gt.shape[0])):\n cm[int(flags_ind_gt[i]),int(flags_ind[i])] += 1 \n else:\n cm = []\n flags_ind_gt = np.array([])\n acc = plotSegmentationResults(flags_ind, flags_ind_gt,\n class_names, mt_step, not plot_results)\n if acc >= 0:\n print(\"Overall Accuracy: {0:.3f}\".format(acc) )\n return (flags_ind, class_names_gt, acc, cm)\n else:\n return (flags_ind, class_names, acc, cm)\n\n\ndef evaluateSegmentationClassificationDir(dir_name, model_name, method_name):\n accuracies = []\n\n # for each WAV file\n for i, f in enumerate(glob.glob(dir_name + os.sep + '*.wav')):\n wav_file = f\n print(wav_file)\n gt_file = f.replace('.wav', '.segments') # open for annotated file\n\n if method_name.lower() in [\"svm\", \"svm_rbf\", \"knn\",\n \"randomforest\",\"gradientboosting\",\n \"extratrees\"]:\n flags_ind, class_names, acc, cm_t = \\\n mtFileClassification(wav_file, model_name, method_name,\n False, gt_file)\n else:\n flags_ind, class_names, acc, cm_t = hmmSegmentation(wav_file,\n model_name,\n False, gt_file)\n if acc > -1:\n if i==0:\n cm = np.copy(cm_t)\n else: \n cm = cm + cm_t\n accuracies.append(acc)\n print(cm_t, class_names)\n print(cm)\n [rec, pre, f1] = computePreRec(cm_t, class_names)\n\n cm = cm / np.sum(cm)\n [rec, pre, f1] = computePreRec(cm, class_names)\n\n print(\" - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \")\n print(\"Average Accuracy: {0:.1f}\".format(100.0*np.array(accuracies).mean()))\n print(\"Average recall: {0:.1f}\".format(100.0*np.array(rec).mean()))\n print(\"Average precision: {0:.1f}\".format(100.0*np.array(pre).mean()))\n print(\"Average f1: {0:.1f}\".format(100.0*np.array(f1).mean()))\n print(\"Median Accuracy: {0:.1f}\".format(100.0*np.median(np.array(accuracies))))\n print(\"Min Accuracy: {0:.1f}\".format(100.0*np.array(accuracies).min()))\n print(\"Max Accuracy: {0:.1f}\".format(100.0*np.array(accuracies).max()))\n\n\ndef silenceRemoval(x, fs, st_win, st_step, smoothWindow=0.5, weight=0.5, plot=False):\n \"\"\"\n Event Detection (silence removal)\n ARGUMENTS:\n - x: the input audio signal\n - fs: sampling freq\n - st_win, st_step: window size and step in seconds\n - smoothWindow: (optinal) smooth window (in seconds)\n - weight: (optinal) weight factor (0 < weight < 1)\n the higher, the more strict\n - plot: (optinal) True if results are to be plotted\n RETURNS:\n - seg_limits: list of segment limits in seconds (e.g [[0.1, 0.9],\n [1.4, 3.0]] means that\n the resulting segments are (0.1 - 0.9) seconds\n and (1.4, 3.0) seconds\n \"\"\"\n\n if weight >= 1:\n weight = 0.99\n if weight <= 0:\n weight = 0.01\n\n # Step 1: feature extraction\n x = audioBasicIO.stereo_to_mono(x)\n st_feats, _ = sF.feature_extraction(x, fs, st_win * fs, st_step * fs)\n\n # Step 2: train binary svm classifier of low vs high energy frames\n # keep only the energy short-term sequence (2nd feature)\n st_energy = st_feats[1, :]\n en = np.sort(st_energy)\n # number of 10% of the total short-term windows\n l1 = int(len(en) / 10)\n # compute \"lower\" 10% energy threshold\n t1 = np.mean(en[0:l1]) + 0.000000000000001\n # compute \"higher\" 10% energy threshold\n t2 = np.mean(en[-l1:-1]) + 0.000000000000001\n # get all features that correspond to low energy\n class1 = st_feats[:, np.where(st_energy <= t1)[0]]\n # get all features that correspond to high energy\n class2 = st_feats[:, np.where(st_energy >= t2)[0]]\n # form the binary classification task and ...\n faets_s = [class1.T, class2.T]\n # normalize and train the respective svm probabilistic model\n # (ONSET vs SILENCE)\n [faets_s_norm, means_s, stds_s] = aT.normalizeFeatures(faets_s)\n svm = aT.trainSVM(faets_s_norm, 1.0)\n\n # Step 3: compute onset probability based on the trained svm\n prob_on_set = []\n for i in range(st_feats.shape[1]):\n # for each frame\n cur_fv = (st_feats[:, i] - means_s) / stds_s\n # get svm probability (that it belongs to the ONSET class)\n prob_on_set.append(svm.predict_proba(cur_fv.reshape(1,-1))[0][1])\n prob_on_set = np.array(prob_on_set)\n # smooth probability:\n prob_on_set = smoothMovingAvg(prob_on_set, smoothWindow / st_step)\n\n # Step 4A: detect onset frame indices:\n prog_on_set_sort = np.sort(prob_on_set)\n # find probability Threshold as a weighted average\n # of top 10% and lower 10% of the values\n Nt = int(prog_on_set_sort.shape[0] / 10)\n T = (np.mean((1 - weight) * prog_on_set_sort[0:Nt]) +\n weight * np.mean(prog_on_set_sort[-Nt::]))\n\n max_idx = np.where(prob_on_set > T)[0]\n # get the indices of the frames that satisfy the thresholding\n i = 0\n time_clusters = []\n seg_limits = []\n\n # Step 4B: group frame indices to onset segments\n while i < len(max_idx):\n # for each of the detected onset indices\n cur_cluster = [max_idx[i]]\n if i == len(max_idx)-1:\n break\n while max_idx[i+1] - cur_cluster[-1] <= 2:\n cur_cluster.append(max_idx[i+1])\n i += 1\n if i == len(max_idx)-1:\n break\n i += 1\n time_clusters.append(cur_cluster)\n seg_limits.append([cur_cluster[0] * st_step,\n cur_cluster[-1] * st_step])\n\n # Step 5: Post process: remove very small segments:\n min_dur = 0.2\n seg_limits_2 = []\n for s in seg_limits:\n if s[1] - s[0] > min_dur:\n seg_limits_2.append(s)\n seg_limits = seg_limits_2\n\n if plot:\n timeX = np.arange(0, x.shape[0] / float(fs), 1.0 / fs)\n\n plt.subplot(2, 1, 1)\n plt.plot(timeX, x)\n for s in seg_limits:\n plt.axvline(x=s[0], color='red')\n plt.axvline(x=s[1], color='red')\n plt.subplot(2, 1, 2)\n plt.plot(np.arange(0, prob_on_set.shape[0] * st_step, st_step), \n prob_on_set)\n plt.title('Signal')\n for s in seg_limits:\n plt.axvline(x=s[0], color='red')\n plt.axvline(x=s[1], color='red')\n plt.title('svm Probability')\n plt.show()\n\n return seg_limits\n\n\ndef speakerDiarization(filename, n_speakers, mt_size=2.0, mt_step=0.2, \n st_win=0.05, lda_dim=35, plot_res=False):\n \"\"\"\n ARGUMENTS:\n - filename: the name of the WAV file to be analyzed\n - n_speakers the number of speakers (clusters) in\n the recording (<=0 for unknown)\n - mt_size (opt) mid-term window size\n - mt_step (opt) mid-term window step\n - st_win (opt) short-term window size\n - lda_dim (opt LDA dimension (0 for no LDA)\n - plot_res (opt) 0 for not plotting the results 1 for plotting\n \"\"\"\n [fs, x] = audioBasicIO.read_audio_file(filename)\n x = audioBasicIO.stereo_to_mono(x)\n duration = len(x) / fs\n\n [classifier_1, MEAN1, STD1, classNames1, mtWin1, mtStep1, stWin1, stStep1, computeBEAT1] = aT.load_model_knn(os.path.join(os.path.dirname(os.path.realpath(__file__)), \"data/models\", \"knn_speaker_10\"))\n [classifier_2, MEAN2, STD2, classNames2, mtWin2, mtStep2, stWin2, stStep2, computeBEAT2] = aT.load_model_knn(os.path.join(os.path.dirname(os.path.realpath(__file__)), \"data/models\", \"knn_speaker_male_female\"))\n\n [mt_feats, st_feats, _] = aF.mid_feature_extraction(x, fs, mt_size * fs,\n mt_step * fs,\n round(fs * st_win),\n round(fs*st_win * 0.5))\n\n MidTermFeatures2 = np.zeros((mt_feats.shape[0] + len(classNames1) +\n len(classNames2), mt_feats.shape[1]))\n\n for i in range(mt_feats.shape[1]):\n cur_f1 = (mt_feats[:, i] - MEAN1) / STD1\n cur_f2 = (mt_feats[:, i] - MEAN2) / STD2\n [res, P1] = aT.classifierWrapper(classifier_1, \"knn\", cur_f1)\n [res, P2] = aT.classifierWrapper(classifier_2, \"knn\", cur_f2)\n MidTermFeatures2[0:mt_feats.shape[0], i] = mt_feats[:, i]\n MidTermFeatures2[mt_feats.shape[0]:mt_feats.shape[0]+len(classNames1), i] = P1 + 0.0001\n MidTermFeatures2[mt_feats.shape[0] + len(classNames1)::, i] = P2 + 0.0001\n\n mt_feats = MidTermFeatures2 # TODO\n iFeaturesSelect = [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 41,\n 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53]\n\n mt_feats = mt_feats[iFeaturesSelect, :]\n\n (mt_feats_norm, MEAN, STD) = aT.normalizeFeatures([mt_feats.T])\n mt_feats_norm = mt_feats_norm[0].T\n n_wins = mt_feats.shape[1]\n\n # remove outliers:\n dist_all = np.sum(distance.squareform(distance.pdist(mt_feats_norm.T)),\n axis=0)\n m_dist_all = np.mean(dist_all)\n i_non_outliers = np.nonzero(dist_all < 1.2 * m_dist_all)[0]\n\n # TODO: Combine energy threshold for outlier removal:\n #EnergyMin = np.min(mt_feats[1,:])\n #EnergyMean = np.mean(mt_feats[1,:])\n #Thres = (1.5*EnergyMin + 0.5*EnergyMean) / 2.0\n #i_non_outliers = np.nonzero(mt_feats[1,:] > Thres)[0]\n #print i_non_outliers\n\n perOutLier = (100.0 * (n_wins - i_non_outliers.shape[0])) / n_wins\n mt_feats_norm_or = mt_feats_norm\n mt_feats_norm = mt_feats_norm[:, i_non_outliers]\n\n # LDA dimensionality reduction:\n if lda_dim > 0:\n #[mt_feats_to_red, _, _] = aF.mtFeatureExtraction(x, fs, mt_size * fs,\n # st_win * fs, round(fs*st_win), round(fs*st_win));\n # extract mid-term features with minimum step:\n mt_win_ratio = int(round(mt_size / st_win))\n mt_step_ratio = int(round(st_win / st_win))\n mt_feats_to_red = []\n num_of_features = len(st_feats)\n num_of_stats = 2\n #for i in range(num_of_stats * num_of_features + 1):\n for i in range(num_of_stats * num_of_features):\n mt_feats_to_red.append([])\n\n for i in range(num_of_features): # for each of the short-term features:\n curPos = 0\n N = len(st_feats[i])\n while (curPos < N):\n N1 = curPos\n N2 = curPos + mt_win_ratio\n if N2 > N:\n N2 = N\n curStFeatures = st_feats[i][N1:N2]\n mt_feats_to_red[i].append(np.mean(curStFeatures))\n mt_feats_to_red[i+num_of_features].append(np.std(curStFeatures))\n curPos += mt_step_ratio\n mt_feats_to_red = np.array(mt_feats_to_red)\n mt_feats_to_red_2 = np.zeros((mt_feats_to_red.shape[0] +\n len(classNames1) + len(classNames2),\n mt_feats_to_red.shape[1]))\n for i in range(mt_feats_to_red.shape[1]):\n cur_f1 = (mt_feats_to_red[:, i] - MEAN1) / STD1\n cur_f2 = (mt_feats_to_red[:, i] - MEAN2) / STD2\n [res, P1] = aT.classifierWrapper(classifier_1, \"knn\", cur_f1)\n [res, P2] = aT.classifierWrapper(classifier_2, \"knn\", cur_f2)\n mt_feats_to_red_2[0:mt_feats_to_red.shape[0], i] = mt_feats_to_red[:, i]\n mt_feats_to_red_2[mt_feats_to_red.shape[0]:mt_feats_to_red.shape[0] + len(classNames1), i] = P1 + 0.0001\n mt_feats_to_red_2[mt_feats_to_red.shape[0]+len(classNames1)::, i] = P2 + 0.0001\n mt_feats_to_red = mt_feats_to_red_2\n mt_feats_to_red = mt_feats_to_red[iFeaturesSelect, :]\n #mt_feats_to_red += np.random.rand(mt_feats_to_red.shape[0], mt_feats_to_red.shape[1]) * 0.0000010\n (mt_feats_to_red, MEAN, STD) = aT.normalizeFeatures([mt_feats_to_red.T])\n mt_feats_to_red = mt_feats_to_red[0].T\n #dist_all = np.sum(distance.squareform(distance.pdist(mt_feats_to_red.T)), axis=0)\n #m_dist_all = np.mean(dist_all)\n #iNonOutLiers2 = np.nonzero(dist_all < 3.0*m_dist_all)[0]\n #mt_feats_to_red = mt_feats_to_red[:, iNonOutLiers2]\n Labels = np.zeros((mt_feats_to_red.shape[1], ));\n LDAstep = 1.0\n LDAstepRatio = LDAstep / st_win\n #print LDAstep, LDAstepRatio\n for i in range(Labels.shape[0]):\n Labels[i] = int(i*st_win/LDAstepRatio); \n clf = sklearn.discriminant_analysis.LinearDiscriminantAnalysis(n_components=lda_dim)\n clf.fit(mt_feats_to_red.T, Labels)\n mt_feats_norm = (clf.transform(mt_feats_norm.T)).T\n\n if n_speakers <= 0:\n s_range = range(2, 10)\n else:\n s_range = [n_speakers]\n clsAll = []\n sil_all = []\n centersAll = []\n \n for iSpeakers in s_range: \n k_means = sklearn.cluster.KMeans(n_clusters=iSpeakers)\n k_means.fit(mt_feats_norm.T)\n cls = k_means.labels_ \n means = k_means.cluster_centers_\n\n # Y = distance.squareform(distance.pdist(mt_feats_norm.T))\n clsAll.append(cls)\n centersAll.append(means)\n sil_1 = []; sil_2 = []\n for c in range(iSpeakers):\n # for each speaker (i.e. for each extracted cluster)\n clust_per_cent = np.nonzero(cls == c)[0].shape[0] / \\\n float(len(cls))\n if clust_per_cent < 0.020:\n sil_1.append(0.0)\n sil_2.append(0.0)\n else:\n # get subset of feature vectors\n mt_feats_norm_temp = mt_feats_norm[:, cls==c]\n # compute average distance between samples\n # that belong to the cluster (a values)\n Yt = distance.pdist(mt_feats_norm_temp.T)\n sil_1.append(np.mean(Yt)*clust_per_cent)\n silBs = []\n for c2 in range(iSpeakers):\n # compute distances from samples of other clusters\n if c2 != c:\n clust_per_cent_2 = np.nonzero(cls == c2)[0].shape[0] /\\\n float(len(cls))\n MidTermFeaturesNormTemp2 = mt_feats_norm[:, cls == c2]\n Yt = distance.cdist(mt_feats_norm_temp.T, \n MidTermFeaturesNormTemp2.T)\n silBs.append(np.mean(Yt)*(clust_per_cent\n + clust_per_cent_2)/2.0)\n silBs = np.array(silBs)\n # ... and keep the minimum value (i.e.\n # the distance from the \"nearest\" cluster)\n sil_2.append(min(silBs))\n sil_1 = np.array(sil_1); \n sil_2 = np.array(sil_2); \n sil = []\n for c in range(iSpeakers):\n # for each cluster (speaker) compute silhouette\n sil.append( ( sil_2[c] - sil_1[c]) / (max(sil_2[c],\n sil_1[c]) + 0.00001))\n # keep the AVERAGE SILLOUETTE\n sil_all.append(np.mean(sil))\n\n imax = np.argmax(sil_all)\n # optimal number of clusters\n nSpeakersFinal = s_range[imax]\n\n # generate the final set of cluster labels\n # (important: need to retrieve the outlier windows:\n # this is achieved by giving them the value of their\n # nearest non-outlier window)\n cls = np.zeros((n_wins,))\n for i in range(n_wins):\n j = np.argmin(np.abs(i-i_non_outliers)) \n cls[i] = clsAll[imax][j]\n \n # Post-process method 1: hmm smoothing\n for i in range(1):\n # hmm training\n start_prob, transmat, means, cov = \\\n trainHMM_computeStatistics(mt_feats_norm_or, cls)\n hmm = hmmlearn.hmm.GaussianHMM(start_prob.shape[0], \"diag\")\n hmm.startprob_ = start_prob\n hmm.transmat_ = transmat \n hmm.means_ = means; hmm.covars_ = cov\n cls = hmm.predict(mt_feats_norm_or.T) \n \n # Post-process method 2: median filtering:\n cls = scipy.signal.medfilt(cls, 13)\n cls = scipy.signal.medfilt(cls, 11)\n\n sil = sil_all[imax]\n class_names = [\"speaker{0:d}\".format(c) for c in range(nSpeakersFinal)];\n\n\n # load ground-truth if available\n gt_file = filename.replace('.wav', '.segments')\n # if groundturh exists\n if os.path.isfile(gt_file):\n [seg_start, seg_end, seg_labs] = readSegmentGT(gt_file)\n flags_gt, class_names_gt = segs2flags(seg_start, seg_end, seg_labs, mt_step)\n\n if plot_res:\n fig = plt.figure() \n if n_speakers > 0:\n ax1 = fig.add_subplot(111)\n else:\n ax1 = fig.add_subplot(211)\n ax1.set_yticks(np.array(range(len(class_names))))\n ax1.axis((0, duration, -1, len(class_names)))\n ax1.set_yticklabels(class_names)\n ax1.plot(np.array(range(len(cls)))*mt_step+mt_step/2.0, cls)\n\n if os.path.isfile(gt_file):\n if plot_res:\n ax1.plot(np.array(range(len(flags_gt))) *\n mt_step + mt_step / 2.0, flags_gt, 'r')\n purity_cluster_m, purity_speaker_m = \\\n evaluateSpeakerDiarization(cls, flags_gt)\n print(\"{0:.1f}\\t{1:.1f}\".format(100 * purity_cluster_m,\n 100 * purity_speaker_m))\n if plot_res:\n plt.title(\"Cluster purity: {0:.1f}% - \"\n \"Speaker purity: {1:.1f}%\".format(100 * purity_cluster_m,\n 100 * purity_speaker_m))\n if plot_res:\n plt.xlabel(\"time (seconds)\")\n #print s_range, sil_all \n if n_speakers<=0:\n plt.subplot(212)\n plt.plot(s_range, sil_all)\n plt.xlabel(\"number of clusters\");\n plt.ylabel(\"average clustering's sillouette\");\n plt.show()\n return cls\n\n\ndef speakerDiarizationEvaluateScript(folder_name, ldas):\n \"\"\"\n This function prints the cluster purity and speaker purity for\n each WAV file stored in a provided directory (.SEGMENT files\n are needed as ground-truth)\n ARGUMENTS:\n - folder_name: the full path of the folder where the WAV and\n SEGMENT (ground-truth) files are stored\n - ldas: a list of LDA dimensions (0 for no LDA)\n \"\"\"\n types = ('*.wav', )\n wavFilesList = []\n for files in types:\n wavFilesList.extend(glob.glob(os.path.join(folder_name, files))) \n \n wavFilesList = sorted(wavFilesList)\n\n # get number of unique speakers per file (from ground-truth) \n N = []\n for wav_file in wavFilesList: \n gt_file = wav_file.replace('.wav', '.segments');\n if os.path.isfile(gt_file):\n [seg_start, seg_end, seg_labs] = readSegmentGT(gt_file)\n N.append(len(list(set(seg_labs))))\n else:\n N.append(-1)\n \n for l in ldas:\n print(\"LDA = {0:d}\".format(l))\n for i, wav_file in enumerate(wavFilesList):\n speakerDiarization(wav_file, N[i], 2.0, 0.2, 0.05, l, plot_res=False)\n print\n \ndef musicThumbnailing(x, fs, short_term_size=1.0, short_term_step=0.5, \n thumb_size=10.0, limit_1 = 0, limit_2 = 1):\n \"\"\"\n This function detects instances of the most representative part of a\n music recording, also called \"music thumbnails\".\n A technique similar to the one proposed in [1], however a wider set of\n audio features is used instead of chroma features.\n In particular the following steps are followed:\n - Extract short-term audio features. Typical short-term window size: 1 second\n - Compute the self-similarity matrix, i.e. all pairwise similarities\n between feature vectors\n - Apply a diagonal mask is as a moving average filter on the values of the\n self-similarty matrix.\n The size of the mask is equal to the desirable thumbnail length.\n - Find the position of the maximum value of the new (filtered)\n self-similarity matrix. The audio segments that correspond to the\n diagonial around that position are the selected thumbnails\n \n\n ARGUMENTS:\n - x: input signal\n - fs: sampling frequency\n - short_term_size: window size (in seconds)\n - short_term_step: window step (in seconds)\n - thumb_size: desider thumbnail size (in seconds)\n \n RETURNS:\n - A1: beginning of 1st thumbnail (in seconds)\n - A2: ending of 1st thumbnail (in seconds)\n - B1: beginning of 2nd thumbnail (in seconds)\n - B2: ending of 2nd thumbnail (in seconds)\n\n USAGE EXAMPLE:\n import audioFeatureExtraction as aF\n [fs, x] = basicIO.readAudioFile(input_file)\n [A1, A2, B1, B2] = musicThumbnailing(x, fs)\n\n [1] Bartsch, M. A., & Wakefield, G. H. (2005). Audio thumbnailing\n of popular music using chroma-based representations.\n Multimedia, IEEE Transactions on, 7(1), 96-104.\n \"\"\"\n x = audioBasicIO.stereo_to_mono(x);\n # feature extraction:\n st_feats, _ = sF.feature_extraction(x, fs, fs * short_term_size,\n fs * short_term_step)\n\n # self-similarity matrix\n S = selfSimilarityMatrix(st_feats)\n\n # moving filter:\n M = int(round(thumb_size / short_term_step))\n B = np.eye(M,M)\n S = scipy.signal.convolve2d(S, B, 'valid')\n\n\n # post-processing (remove main diagonal elements)\n min_sm = np.min(S)\n for i in range(S.shape[0]):\n for j in range(S.shape[1]):\n if abs(i-j) < 5.0 / short_term_step or i > j:\n S[i,j] = min_sm;\n\n # find max position:\n S[0:int(limit_1 * S.shape[0]), :] = min_sm\n S[:, 0:int(limit_1 * S.shape[0])] = min_sm\n S[int(limit_2 * S.shape[0])::, :] = min_sm\n S[:, int(limit_2 * S.shape[0])::] = min_sm\n\n maxVal = np.max(S) \n [I, J] = np.unravel_index(S.argmax(), S.shape)\n #plt.imshow(S)\n #plt.show()\n # expand:\n i1 = I\n i2 = I\n j1 = J\n j2 = J\n\n while i2-i1<M: \n if i1 <=0 or j1<=0 or i2 >= S.shape[0]-2 or j2 >= S.shape[1]-2:\n break\n if S[i1-1, j1-1] > S[i2 + 1, j2 + 1]:\n i1 -= 1\n j1 -= 1 \n else: \n i2 += 1\n j2 += 1 \n\n return short_term_step * i1, short_term_step * i2, \\\n short_term_step * j1, short_term_step * j2, S\n\n\n"
] | [
[
"sklearn.cluster.KMeans",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.concatenate",
"numpy.mean",
"sklearn.discriminant_analysis.LinearDiscriminantAnalysis",
"numpy.where",
"numpy.unique",
"scipy.signal.medfilt",
"numpy.arange",
"numpy.eye",
"numpy.copy",
"numpy.argmax",
"matplotlib.pyplot.subplot",
"numpy.std",
"numpy.count_nonzero",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.min",
"numpy.nonzero",
"scipy.spatial.distance.cdist",
"scipy.signal.convolve2d",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.sum",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.axvline",
"numpy.abs",
"numpy.sort",
"numpy.ones",
"scipy.spatial.distance.pdist",
"matplotlib.pyplot.xlabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
brisvag/pygfx | [
"02e1f5ff92f50899ca990b712016e10d2914fcaf"
] | [
"examples/line_thin.py"
] | [
"\"\"\"\nSome thin line drawing.\n\"\"\"\n\nimport numpy as np\n\nimport pygfx as gfx\n\nfrom PySide6 import QtWidgets\nfrom wgpu.gui.qt import WgpuCanvas\n\napp = QtWidgets.QApplication([])\n\ncanvas = WgpuCanvas()\nrenderer = gfx.WgpuRenderer(canvas)\n\nscene = gfx.Scene()\n\npositions = [[200 + np.sin(i) * i * 6, 200 + np.cos(i) * i * 6, 0] for i in range(20)]\npositions += [[400 - np.sin(i) * i * 6, 200 + np.cos(i) * i * 6, 0] for i in range(20)]\npositions += [\n [450, 400, 0],\n [375, 400, 0],\n [300, 400, 0],\n [400, 370, 0],\n [300, 340, 0],\n]\n\n# Spiral away in z (to make the depth buffer less boring)\nfor i in range(len(positions)):\n positions[i][2] = i\n\ncolors = np.array(positions.copy())\ncolors /= colors.max()\ncolors = np.hstack([colors, np.ones((colors.shape[0], 1))])\ncolors = colors.astype(\"f4\")\n\ngeometry = gfx.Geometry(positions=positions, colors=colors)\nmaterial = gfx.LineThinMaterial(thickness=12.0, vertex_colors=True)\nline = gfx.Line(geometry, material)\nscene.add(line)\n\ncamera = gfx.OrthographicCamera(600, 500)\ncamera.position.set(300, 250, 0)\n\n\nif __name__ == \"__main__\":\n canvas.request_draw(lambda: renderer.render(scene, camera))\n app.exec()\n"
] | [
[
"numpy.cos",
"numpy.sin",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Lnna/OpenNRE-PyTorch | [
"907026a8bece7a867558087131cd1e97d41eb3f2"
] | [
"config/Config.py"
] | [
"#coding:utf-8\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.optim as optim\nimport numpy as np\nimport os\nimport time\nimport datetime\nimport json\nimport sys\nimport sklearn.metrics\nfrom tqdm import tqdm\nimport pickle as pc\n# sys.path.append('/home/nana/Documents/pycharmforlinux/mParser')\n# sys.path.append('/home/lnn/Documents/private/mParser')\n# from src.gen_mediate_para import hs_parse\n# print(hs_parse([('NN','人们'),('VV','爱'),('NN','小明')]))\n\ndevice = torch.device(\"cuda:1\" if torch.cuda.is_available() else \"cpu\")\n# device=\"cpu\"\n# torch.cuda.set_device(0)\ndef to_var(x):\n return Variable(torch.from_numpy(x).to(device))\n\nclass Accuracy(object):\n def __init__(self):\n self.correct = 0\n self.total = 0\n def add(self, is_correct):\n self.total += 1\n if is_correct:\n self.correct += 1\n def get(self):\n if self.total == 0:\n return 0.0\n else:\n return float(self.correct) / self.total\n def clear(self):\n self.correct = 0\n self.total = 0 \n\nclass Config(object):\n def __init__(self):\n self.acc_NA = Accuracy()\n self.acc_not_NA = Accuracy()\n self.acc_total = Accuracy()\n self.data_path = './mnre_data/176rels_data/need_data/'\n # self.data_path = '/home/lnn/Documents/OpenNRE-Ina/OpenNRE-PyTorch/data'\n self.use_bag = True\n self.use_gpu = True\n self.is_training = True\n self.max_length = 120\n self.pos_num = 2 * self.max_length\n #NYT\n # self.num_classes = 53\n # mnre\n self.num_classes = 176\n self.hidden_size = 230\n self.pos_size = 5\n self.max_epoch = 15\n self.opt_method = 'SGD'\n self.optimizer = None\n self.learning_rate = 0.1\n self.weight_decay = 1e-5\n self.drop_prob = 0.5\n self.checkpoint_dir = './mnre_data/176rels_data/f187_checkpoint'\n self.test_result_dir = './mnre_data/176rels_data/f187_test_result'\n self.save_epoch = 1\n self.test_epoch = 1\n self.pretrain_model = None\n self.trainModel = None\n self.testModel = None\n self.batch_size = 60\n self.word_size = 50\n self.window_size = 3\n self.epoch_range = None\n def set_data_path(self, data_path):\n self.data_path = data_path\n def set_max_length(self, max_length):\n self.max_length = max_length\n self.pos_num = 2 * self.max_length\n def set_num_classes(self, num_classes):\n self.num_classes = num_classes\n def set_hidden_size(self, hidden_size):\n self.hidden_size = hidden_size\n def set_window_size(self, window_size):\n self.window_size = window_size\n def set_pos_size(self, pos_size):\n self.pos_size = pos_size\n def set_word_size(self, word_size):\n self.word_size = word_size\n def set_max_epoch(self, max_epoch):\n self.max_epoch = max_epoch\n def set_batch_size(self, batch_size):\n self.batch_size = batch_size\n def set_opt_method(self, opt_method):\n self.opt_method = opt_method\n def set_learning_rate(self, learning_rate):\n self.learning_rate = learning_rate\n def set_weight_decay(self, weight_decay):\n self.weight_decay = weight_decay\n def set_drop_prob(self, drop_prob):\n self.drop_prob = drop_prob\n def set_checkpoint_dir(self, checkpoint_dir):\n self.checkpoint_dir = checkpoint_dir\n def set_test_epoch(self, test_epoch):\n self.test_epoch = test_epoch\n def set_save_epoch(self, save_epoch):\n self.save_epoch = save_epoch\n def set_pretrain_model(self, pretrain_model):\n self.pretrain_model = pretrain_model\n def set_is_training(self, is_training):\n self.is_training = is_training\n def set_use_bag(self, use_bag):\n self.use_bag = use_bag\n def set_use_gpu(self, use_gpu):\n self.use_gpu = use_gpu\n def set_epoch_range(self, epoch_range):\n self.epoch_range = epoch_range\n\n\n def load_train_data(self):\n print(\"Reading training data...\")\n self.data_word_vec = np.load(os.path.join(self.data_path, 'vec.npy'))\n self.data_train_word = np.load(os.path.join(self.data_path, 'train_word.npy'))\n self.data_train_pos1 = np.load(os.path.join(self.data_path, 'train_pos1.npy'))\n self.data_train_pos2 = np.load(os.path.join(self.data_path, 'train_pos2.npy'))\n self.data_train_mask = np.load(os.path.join(self.data_path, 'train_mask.npy'))\n if self.use_bag:\n self.data_query_label = np.load(os.path.join(self.data_path, 'train_ins_label.npy'))\n self.data_train_label = np.load(os.path.join(self.data_path, 'train_bag_label.npy'))\n self.data_train_scope = np.load(os.path.join(self.data_path, 'train_bag_scope.npy'))\n else:\n self.data_train_label = np.load(os.path.join(self.data_path, 'train_ins_label.npy'))\n self.data_train_scope = np.load(os.path.join(self.data_path, 'train_ins_scope.npy'))\n\n # add by Ina Liu 20180117\n # self.train_lstm_out=np.load(os.path.join(self.data_path,'big_stanford_train_lstm_out.npy'))\n # add by Ina Liu 20190211\n # self.train_posseg=np.load(os.path.join(self.data_path, 'train_posseg.npy'))\n\n print(\"Finish reading\")\n self.train_order = list(range(len(self.data_train_label)))\n self.train_batches = len(self.data_train_label) // self.batch_size\n if len(self.data_train_label) % self.batch_size != 0:\n self.train_batches += 1\n\n def load_test_data(self):\n print(\"Reading testing data...\")\n self.data_word_vec = np.load(os.path.join(self.data_path, 'vec.npy'))\n self.data_test_word = np.load(os.path.join(self.data_path, 'test_word.npy'))\n self.data_test_pos1 = np.load(os.path.join(self.data_path, 'test_pos1.npy'))\n self.data_test_pos2 = np.load(os.path.join(self.data_path, 'test_pos2.npy'))\n self.data_test_mask = np.load(os.path.join(self.data_path, 'test_mask.npy'))\n if self.use_bag:\n self.data_test_label = np.load(os.path.join(self.data_path, 'test_bag_label.npy'))\n self.data_test_scope = np.load(os.path.join(self.data_path, 'test_bag_scope.npy'))\n else:\n self.data_test_label = np.load(os.path.join(self.data_path, 'test_ins_label.npy'))\n self.data_test_scope = np.load(os.path.join(self.data_path, 'test_ins_scope.npy'))\n\n # add by Ina Liu 20180117\n # self.test_lstm_out=np.load(os.path.join(self.data_path,'big_stanford_test_lstm_out.npy'))\n # add by Ina Liu 20190211\n # self.test_posseg = np.load(os.path.join(self.data_path, 'test_posseg.npy'))\n print(\"Finish reading\")\n self.test_batches = len(self.data_test_label) // self.batch_size\n if len(self.data_test_label) % self.batch_size != 0:\n self.test_batches += 1\n\n self.total_recall = self.data_test_label[:, 1:].sum()\n\n def set_train_model(self, model):\n print(\"Initializing training model...\")\n self.model = model\n self.trainModel = self.model(config = self)\n if self.pretrain_model != None:\n self.trainModel.load_state_dict(torch.load(self.pretrain_model))\n self.trainModel.to(device)\n if self.optimizer != None:\n pass\n elif self.opt_method == \"Adagrad\" or self.opt_method == \"adagrad\":\n self.optimizer = optim.Adagrad(self.trainModel.parameters(), lr = self.learning_rate, lr_decay = self.lr_decay, weight_decay = self.weight_decay)\n elif self.opt_method == \"Adadelta\" or self.opt_method == \"adadelta\":\n self.optimizer = optim.Adadelta(self.trainModel.parameters(), lr = self.learning_rate, weight_decay = self.weight_decay)\n elif self.opt_method == \"Adam\" or self.opt_method == \"adam\":\n self.optimizer = optim.Adam(self.trainModel.parameters(), lr = self.learning_rate, weight_decay = self.weight_decay)\n else:\n self.optimizer = optim.SGD(self.trainModel.parameters(), lr = self.learning_rate, weight_decay = self.weight_decay)\n print(\"Finish initializing\")\n \n def set_test_model(self, model):\n print(\"Initializing test model...\")\n self.model = model\n self.testModel = self.model(config = self)\n self.testModel.to(device)\n self.testModel.eval()\n print(\"Finish initializing\")\n\n def get_train_batch(self, batch):\n input_scope = np.take(self.data_train_scope, self.train_order[batch * self.batch_size : (batch + 1) * self.batch_size], axis = 0)\n index = []\n scope = [0]\n for num in input_scope:\n index = index + list(range(num[0], num[1] + 1))\n scope.append(scope[len(scope) - 1] + num[1] - num[0] + 1)\n self.batch_word = self.data_train_word[index, :]\n self.batch_pos1 = self.data_train_pos1[index, :]\n self.batch_pos2 = self.data_train_pos2[index, :]\n self.batch_mask = self.data_train_mask[index, :] \n self.batch_label = np.take(self.data_train_label, self.train_order[batch * self.batch_size : (batch + 1) * self.batch_size], axis = 0)\n # print('batch label shape {}'.format(self.batch_label.shape))\n self.batch_attention_query = self.data_query_label[index]\n self.batch_scope = scope\n\n # add by Ina Liu 20180117\n # self.batch_lstm_out=Variable(torch.from_numpy(self.train_lstm_out[index,:]).float().cuda())\n # print('batch lstm out shape:{}'.format(self.batch_lstm_out.shape))\n # add by Ina Liu 20190313\n\n self.batch_lstm_hs=[]\n # Ina Liu 20190508\n # now = time.time()\n lstm_mod=1000\n lstm_dir='/media/sda1/nana/opennre-pytorch/mnre_data/176rels_data/need_data/f187_lstm_parse'\n tmp=dict()\n # lstm_dic=pc.load(open(os.path.join(lstm_dir,'train_{}.pc'.format(1)),mode='rb'))\n\n for i in index:\n tlist=tmp.get(i//lstm_mod+1,[])\n tlist.append(i)\n tmp[i//lstm_mod+1]=tlist\n\n batch_dict=dict()\n for k,v in tmp.items():\n lstm_dic = pc.load(open(os.path.join(lstm_dir, 'train_{}.pc'.format(k)), mode='rb'))\n for i in v:\n batch_dict[i]=lstm_dic[i]\n for i in index:\n self.batch_lstm_hs.append(batch_dict[i])\n\n\n\n #Ina Liu 20190507\n # for i in index:\n # lstm_dic=pc.load(open(os.path.join(lstm_dir,'train_{}.pc'.format(i//lstm_mod+1)),mode='rb'))\n # self.batch_lstm_hs.append(lstm_dic[i])\n # print(\"one batch lstm spend time {}\".format(time.time() - now))\n # lstm_path='{}train_batch{}.npy'.format('/home/nana/Documents/pycharmforlinux/opennre-pytorch/mnre_data/thesis_data/lstm_parse/',str(batch))\n #\n # if os.path.exists(lstm_path):\n # self.batch_lstm_hs=np.load(lstm_path)\n # else:\n # now=time.time()\n # for i in index:\n # line=self.train_posseg[i]\n # line = [tuple(i) for i in line]\n # res=hs_parse(line)\n # if len(res)<self.max_length:\n # res=np.vstack((res,np.zeros((self.max_length-len(res),100))))\n # else:\n # res=res[:self.max_length]\n # self.batch_lstm_hs.append(res)\n # print(\"one batch lstm spend time {}\".format(time.time()-now))\n # np.save(lstm_path,self.batch_lstm_hs)\n # print('batch {}'.format(batch))\n self.batch_lstm_hs=Variable(torch.from_numpy(np.array(self.batch_lstm_hs)).float().to(device))\n\n\n def get_test_batch(self, batch):\n input_scope = self.data_test_scope[batch * self.batch_size : (batch + 1) * self.batch_size]\n index = []\n scope = [0]\n for num in input_scope:\n index = index + list(range(num[0], num[1] + 1))\n scope.append(scope[len(scope) - 1] + num[1] - num[0] + 1)\n self.batch_word = self.data_test_word[index, :]\n self.batch_pos1 = self.data_test_pos1[index, :]\n self.batch_pos2 = self.data_test_pos2[index, :]\n self.batch_mask = self.data_test_mask[index, :]\n self.batch_scope = scope\n\n # add by Ina Liu 20180117\n # self.batch_lstm_out=Variable(torch.from_numpy(self.test_lstm_out[index,:]).float().cuda())\n # add by Ina Liu 20190313\n self.batch_lstm_hs = []\n lstm_mod = 1000\n lstm_dir = '/media/sda1/nana/opennre-pytorch/mnre_data/176rels_data/need_data/f187_lstm_parse'\n tmp = dict()\n for i in index:\n tlist = tmp.get(i // lstm_mod + 1, [])\n tlist.append(i)\n tmp[i // lstm_mod + 1] = tlist\n\n batch_dict = dict()\n for k, v in tmp.items():\n lstm_dic = pc.load(open(os.path.join(lstm_dir, 'test_{}.pc'.format(k)), mode='rb'))\n for i in v:\n batch_dict[i] = lstm_dic[i]\n for i in index:\n self.batch_lstm_hs.append(batch_dict[i])\n\n\n # lstm_path='{}test_batch{}.npy'.format('/home/nana/Documents/pycharmforlinux/opennre-pytorch/mnre_data/thesis_data/lstm_parse/',str(batch))\n #\n # if os.path.exists(lstm_path):\n # self.batch_lstm_hs = np.load(lstm_path)\n # else:\n # for i in index:\n # line = self.train_posseg[i]\n # line = [tuple(i) for i in line]\n # res = hs_parse(line)\n # if len(res) < self.max_length:\n # res = np.vstack((res, np.zeros((self.max_length - len(res), 100))))\n # else:\n # res = res[:self.max_length]\n # self.batch_lstm_hs.append(res)\n # np.save(lstm_path, self.batch_lstm_hs)\n # self.batch_lstm_hs = Variable(torch.from_numpy(self.batch_lstm_hs).float().cuda())\n self.batch_lstm_hs = Variable(torch.from_numpy(np.array(self.batch_lstm_hs)).float().to(device))\n\n def train_one_step(self):\n self.trainModel.embedding.word = to_var(self.batch_word)\n self.trainModel.embedding.pos1 = to_var(self.batch_pos1)\n self.trainModel.embedding.pos2 = to_var(self.batch_pos2)\n self.trainModel.encoder.mask = to_var(self.batch_mask)\n self.trainModel.selector.scope = self.batch_scope\n self.trainModel.selector.attention_query = to_var(self.batch_attention_query)\n # print('attention_query shape {}'.format(self.trainModel.selector.attention_query.shape))\n self.trainModel.selector.label = to_var(self.batch_label)\n self.trainModel.classifier.label = to_var(self.batch_label)\n # print(self.trainModel.classifier.label)\n self.optimizer.zero_grad()\n loss, _output = self.trainModel()\n loss.backward()\n self.optimizer.step()\n for i, prediction in enumerate(_output):\n if self.batch_label[i] == 0:\n self.acc_NA.add(prediction.cpu().numpy() == self.batch_label[i])\n else:\n self.acc_not_NA.add(prediction.cpu().numpy() == self.batch_label[i])\n self.acc_total.add(prediction.cpu().numpy() == self.batch_label[i])\n # return loss.data[0]\n return loss.item()\n\n def test_one_step(self):\n self.testModel.embedding.word = to_var(self.batch_word)\n self.testModel.embedding.pos1 = to_var(self.batch_pos1)\n self.testModel.embedding.pos2 = to_var(self.batch_pos2)\n self.testModel.encoder.mask = to_var(self.batch_mask)\n self.testModel.selector.scope = self.batch_scope\n return self.testModel.test()\n\n def train(self):\n self.cur_epoch = 0\n if not os.path.exists(self.checkpoint_dir):\n os.mkdir(self.checkpoint_dir)\n else:\n self.cur_epoch=15\n model_path=os.path.join(self.checkpoint_dir, self.model.__name__ + '-' + str(self.cur_epoch-1))\n self.trainModel.load_state_dict(torch.load(model_path))\n best_auc = 0.0\n best_p = None\n best_r = None\n best_epoch = 0\n for epoch in range(self.cur_epoch,self.max_epoch):\n print('Epoch ' + str(epoch) + ' starts...')\n if epoch%3==0 and epoch!=0:\n self.set_learning_rate(self.learning_rate/2.6)\n self.acc_NA.clear()\n self.acc_not_NA.clear()\n self.acc_total.clear()\n np.random.shuffle(self.train_order)\n for batch in range(self.train_batches):\n # print('total batches:{} now batch:{}'.format(self.train_batches,batch))\n self.get_train_batch(batch)\n loss = self.train_one_step()\n time_str = datetime.datetime.now().isoformat()\n sys.stdout.write(\"epoch %d step %d time %s | loss: %f, NA accuracy: %f, not NA accuracy: %f, total accuracy: %f\\r\" % (epoch, batch, time_str, loss, self.acc_NA.get(), self.acc_not_NA.get(), self.acc_total.get())) \n sys.stdout.flush()\n if (epoch + 1) % self.save_epoch == 0:\n print('Epoch ' + str(epoch) + ' has finished')\n print('Saving model...')\n path = os.path.join(self.checkpoint_dir, self.model.__name__ + '-' + str(epoch))\n torch.save(self.trainModel.state_dict(), path)\n print('Have saved model to ' + path)\n if (epoch + 1) % self.test_epoch == 0:\n self.testModel = self.trainModel\n auc, pr_x, pr_y = self.test_one_epoch()\n if auc > best_auc:\n best_auc = auc\n best_p = pr_x\n best_r = pr_y\n best_epoch = epoch\n print(\"Finish training\")\n print(\"Best epoch = %d | auc = %f\" % (best_epoch, best_auc))\n print(\"Storing best result...\")\n if not os.path.isdir(self.test_result_dir):\n os.mkdir(self.test_result_dir)\n np.save(os.path.join(self.test_result_dir, self.model.__name__ + '_x.npy'), best_p)\n np.save(os.path.join(self.test_result_dir, self.model.__name__ + '_y.npy'), best_r)\n print(\"Finish storing\")\n def test_one_epoch(self):\n test_score = []\n for batch in tqdm(range(self.test_batches)):\n self.get_test_batch(batch)\n batch_score = self.test_one_step()\n test_score = test_score + batch_score\n test_result = []\n for i in range(len(test_score)):\n for j in range(1, len(test_score[i])):\n test_result.append([self.data_test_label[i][j], test_score[i][j]])\n test_result = sorted(test_result, key = lambda x: x[1])\n test_result = test_result[::-1]\n pr_x = []\n pr_y = []\n correct = 0\n for i, item in enumerate(test_result):\n correct += item[0]\n pr_y.append(float(correct) / (i + 1))\n pr_x.append(float(correct) / self.total_recall)\n # if pr_x[-1] > 0.60:\n # print(item[1])\n # print(pr_y[-1])\n auc = sklearn.metrics.auc(x = pr_x, y = pr_y)\n print(\"auc: \", auc)\n return auc, pr_x, pr_y\n def test(self):\n best_epoch = None\n best_auc = 0.0\n best_p = None\n best_r = None\n for epoch in self.epoch_range:\n path = os.path.join(self.checkpoint_dir, self.model.__name__ + '-' + str(epoch))\n if not os.path.exists(path):\n continue\n print(\"Start testing epoch %d\" % (epoch))\n self.testModel.load_state_dict(torch.load(path))\n auc, p, r = self.test_one_epoch()\n if auc > best_auc:\n best_auc = auc\n best_epoch = epoch\n best_p = p\n best_r = r\n print(\"Finish testing epoch %d\" % (epoch))\n print(\"Best epoch = %d | auc = %f\" % (best_epoch, best_auc))\n print(\"Storing best result...\")\n if not os.path.isdir(self.test_result_dir):\n os.mkdir(self.test_result_dir)\n np.save(os.path.join(self.test_result_dir, self.model.__name__ + '_x.npy'), best_p)\n np.save(os.path.join(self.test_result_dir, self.model.__name__ + '_y.npy'), best_r)\n print(\"Finish storing\")\n\n def predict(self,epoch,store_path='ori_predict'):\n path = os.path.join(self.checkpoint_dir, self.model.__name__ + '-' + str(epoch))\n if not os.path.exists(path):\n return\n self.testModel.load_state_dict(torch.load(path))\n test_score = []\n for batch in tqdm(range(self.test_batches)):\n self.get_test_batch(batch)\n batch_score = self.test_one_step()\n test_score = test_score + batch_score\n np.save(os.path.join(self.test_result_dir, store_path+'_res.npy'),test_score)\n if not os.path.exists(os.path.join(self.test_result_dir, 'true_label_res.npy')):\n np.save(os.path.join(self.test_result_dir, 'true_label_res.npy'),self.data_test_label)\n if not os.path.exists(os.path.join(self.test_result_dir, 'data_test_scope.npy')):\n np.save(os.path.join(self.test_result_dir, 'data_test_scope.npy'),self.data_test_scope)\n\n"
] | [
[
"numpy.take",
"torch.load",
"torch.from_numpy",
"numpy.random.shuffle",
"torch.cuda.is_available",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kazemiakk/wss_mri_calculator | [
"8e4808d66a739bf7d7ab75be628d7d74a32d1905"
] | [
"src/wss_utils_test.py"
] | [
"import logging\nimport wss_utils\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef test_vector_magnitude():\n v1 = np.asarray([[0.370491361,\t-0.167037462,\t-0.225748788],\n [0.585235848,\t-0.099175084,\t-0.116312987]])\n v1_mag = wss_utils.get_vector_magnitude(v1)\n \n true_mag = [0.464895555, 0.60486809]\n print('calculated mag', v1_mag)\n print('actual mag', v1_mag)\n np.testing.assert_almost_equal(v1_mag, true_mag)\n\n print('test_vector_magnitude PASS')\n\ndef test_orthogonal_vectors():\n v1 = np.asarray([[0.64604717, 0.0196206, 0.08837089],\n [0.64604717, 0.0196206, 0.08837089]])\n normals = np.asarray([[0.6021105,\t0.40786213,\t0.6863755],\n [0.24887183, 0.48617426,\t0.8376738]])\n\n normal_vectors, tangent_vectors = wss_utils.get_orthogonal_vectors(v1, normals)\n\n # Actual values\n true_normal = np.asarray([\n [0.275555809, 0.186658062, 0.314119678],\n [0.060811322, 0.118795684, 0.204683877]\n ])\n true_tangent = np.asarray([\n [0.370491361, -0.167037462, -0.225748788],\n [0.585235848, -0.099175084, -0.116312987]\n ])\n\n # Assert\n np.testing.assert_almost_equal(normal_vectors, true_normal)\n np.testing.assert_almost_equal(tangent_vectors, true_tangent)\n \n # Plot\n\n print('test_orthogonal_vectors PASS')\n\ndef test_calculate_gradient(inward_distance = 1, parabolic=True):\n pc0_tangent_mag = np.asarray([0,0,0,0,0,0])\n pc1_tangent_mag = np.asarray([1,4,3,1,2, 0.46489556])\n pc2_tangent_mag = np.asarray([2,5,5,4,5, 0.60486809])\n\n g, xx, yy = wss_utils._calculate_gradient_with_values(pc0_tangent_mag, pc1_tangent_mag, pc2_tangent_mag, inward_distance, use_parabolic=parabolic)\n\n # Prepare to plot\n x = np.array([0,1,2]) # We have 3 points to evaluate\n x = x * inward_distance # Get the correct distance scaling\n y = np.stack((pc0_tangent_mag, pc1_tangent_mag, pc2_tangent_mag), axis=1)\n \n # Plot the figures\n fig = plt.figure(1)\n main_title = \"Parabolic\" if parabolic else \"Linear\"\n main_title = \"{} x={}\".format(main_title, inward_distance)\n fig.suptitle(main_title)\n\n for i in range(0, len(y)):\n y_new = yy[i]\n g = np.gradient(y_new, xx)\n # print('wall_gradient', g[0])\n \n ax = fig.add_subplot(2,3,i+1)\n ax.plot(x,y[i],'o', xx, y_new)\n ax.title.set_text(\"Wall gradient {:.2f}\".format(g[0]))\n \n plt.show()\n plt.clf()\n\nif __name__ == \"__main__\":\n logging.getLogger(\"wss_utils\").setLevel(logging.DEBUG)\n \n test_vector_magnitude()\n \n test_orthogonal_vectors()\n\n test_calculate_gradient(inward_distance=1, parabolic=True)\n test_calculate_gradient(inward_distance=0.7, parabolic=True)\n\n test_calculate_gradient(inward_distance=1, parabolic=False)\n test_calculate_gradient(inward_distance=0.7, parabolic=False)\n\n "
] | [
[
"numpy.gradient",
"numpy.asarray",
"numpy.stack",
"numpy.testing.assert_almost_equal",
"matplotlib.pyplot.clf",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sevmardi/ml-projects | [
"0eb218c77cda61285cfcf599599ff28a8a8deba7"
] | [
"predicting-airline-delay/tools.py"
] | [
"import numpy as np\nimport sys\n\nfrom pprint import pprint\n\n\ndef get_vpo2(values, goback=1):\n \"\"\"\n Supporting rolling windows with goback.\n\n Just make sure that goback parameter here\n will match the one in network configuration\n in input parameter.\n\n if goback=2 inputs=2 in train.py:\n 'new_model_conf': dict(model=get_mlp, inputs=2)\n \"\"\"\n yy = [np.nan] * len(values)\n for i, v in enumerate(values):\n if i + goback > len(values) - 1:\n break\n fdata = []\n ii = 0\n while ii < goback:\n fdata.append(values[i + ii])\n ii += 1\n if(len(fdata) == 1):\n fdata = fdata[0]\n yy[i + goback] = fdata\n return yy\n\n\ndef get_params(script='train.py'):\n xa = ''\n if script == 'train.py':\n xa = '[plot|ploth]'\n try:\n name, epochs, batches = sys.argv[1:4]\n except ValueError:\n print('Usage: %s model_name epochs batch_size %s' % (script, xa))\n exit(1)\n try:\n plot = sys.argv[4]\n except IndexError:\n plot = False\n\n return name, int(epochs), int(batches), plot\n\n\ndef train_test_split(rawx, xpo):\n train_size = int(len(rawx) * 0.80)\n test_size = int(len(rawx) * 0.20)\n #print(train_size, test_size, len(rawx))\n train_x, train_y = np.array(rawx[:train_size]), np.array(xpo[:train_size])\n test_x, test_y = np.array(rawx[train_size:]), np.array(xpo[train_size:])\n return train_x, train_y, test_x, test_y\n\n\ndef logme(msg):\n pprint(msg)\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
m3hdi-i/graph-total-spanning-trees | [
"2fa7b0ae300c05e6eb27604a03b8b047f4db7940"
] | [
"main.py"
] | [
"# -----------------------------\r\n# Author : Mehdi I.\r\n# -----------------------------\r\n\r\nimport numpy as np\r\n\r\ndef get_total_number_of_spanning_trees(matrix,nodes_count):\r\n # STEP 1: Replace all the diagonal elements with the degree of nodes.\r\n for i in range(nodes_count):\r\n sum_of_col =0\r\n for j in range(nodes_count):\r\n sum_of_col+=matrix[j][i]\r\n matrix[i][i]=sum_of_col\r\n\r\n # STEP 2: Replace all non-diagonal 1’s with -1.\r\n for i in range(nodes_count):\r\n for j in range(nodes_count):\r\n if i!=j and matrix[i][j]==1:\r\n matrix[i][j]=-1\r\n\r\n # STEP 3: final result is co-factor of any element ( like 0,0 )\r\n matrix=np.delete(matrix, 0, 0)\r\n matrix=np.delete(matrix, 0, 1)\r\n\r\n return abs(round(np.linalg.det(matrix)))\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n adjacency_matrix=[]\r\n\r\n # Specifying adjacency matrix of graph\r\n nodes_count=int(input(\"Enter your graph's nodes count : \"))\r\n for i in range(nodes_count):\r\n row=[]\r\n print('Enter values for row '+str(i+1)+' of adjacency matrix : ')\r\n for j in range(nodes_count):\r\n row.append(int(input()))\r\n\r\n adjacency_matrix.append(row)\r\n\r\n print('\\n Your Adjacency Matrix : \\n')\r\n for row in adjacency_matrix:\r\n print(\" \".join(map(str,row)))\r\n \r\n print('\\n Total number of spanning trees for given graph is:', get_total_number_of_spanning_trees(adjacency_matrix,nodes_count))\r\n"
] | [
[
"numpy.linalg.det",
"numpy.delete"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
clarkalvarez/Yuro-Tennis-Project | [
"32f106293200ba84ea6641f8fce70e217fd9a812"
] | [
"gitclark/distance_to_camera.py"
] | [
"# import the necessary packages\nfrom imutils import paths\nimport numpy as np\nimport imutils\nimport cv2\n \ndef find_marker(image):\n\t# convert the image to grayscale, blur it, and detect edges\n\tgray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\tgray = cv2.GaussianBlur(gray, (5, 5), 0)\n\tedged = cv2.Canny(gray, 35, 125)\n \n\t# find the contours in the edged image and keep the largest one;\n\t# we'll assume that this is our piece of paper in the image\n\tcnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n\tcnts = imutils.grab_contours(cnts)\n\tc = max(cnts, key = cv2.contourArea)\n \n\t# compute the bounding box of the of the paper region and return it\n\treturn cv2.minAreaRect(c)\n \ndef distance_to_camera(knownWidth, focalLength, perWidth):\n\t# compute and return the distance from the maker to the camera\n\treturn (knownWidth * focalLength) / perWidth\n \n \n# initialize the known distance from the camera to the object, which\n# in this case is 24 inches\nKNOWN_DISTANCE = 8.0\n \n# initialize the known object width, which in this case, the piece of\n# paper is 12 inches wide\nKNOWN_WIDTH = 4.0\n \n# load the furst image that contains an object that is KNOWN TO BE 2 feet\n# from our camera, then find the paper marker in the image, and initialize\n# the focal length\nimage = cv2.imread(\"images/2ft.png\")\nmarker = find_marker(image)\nfocalLength = (marker[1][0] * KNOWN_DISTANCE) / KNOWN_WIDTH\n \ncap=cv2.VideoCapture(1)\nwhile True:\n _, image = cap.read()\n \n# loop over the images\n\n\t# load the image, find the marker in the image, then compute the\n\t# distance to the marker from the camera\n\n marker = find_marker(image)\n inches = distance_to_camera(KNOWN_WIDTH, focalLength, marker[1][0])\n \n\t# draw a bounding box around the image and display it\n box = cv2.cv.BoxPoints(marker) if imutils.is_cv2() else cv2.boxPoints(marker)\n box = np.int0(box)\n cv2.drawContours(image, [box], -1, (0, 255, 0), 2)\n cv2.putText(image, \"%.2fft\" % (inches / 12),\n (image.shape[1] - 200, image.shape[0] - 20), cv2.FONT_HERSHEY_SIMPLEX,\n 2.0, (0, 255, 0), 3)\n cv2.imshow(\"image\", image)\n cv2.waitKey(0)\n key=cv2.waitKey(1)\n if key==ord('q'):\n break\ncap.release()\ncv2.destroyAllWindows()"
] | [
[
"numpy.int0"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ttthomaschan/KylinModelLib | [
"9c812aadbf5d9155a6c77e37950e2a7919e45475",
"9c812aadbf5d9155a6c77e37950e2a7919e45475"
] | [
"models/detection/FCOS/dataset/TableBank_dataset.py",
"models/OCR/DBNet/networks/necks/det_fpn.py"
] | [
"from torchvision.datasets import CocoDetection\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nimport numpy as np\nfrom torchvision import transforms\nimport os\nimport cv2\nimport random\nimport json\nfrom sklearn.model_selection import train_test_split\n\n\nclass TablebankDataset(Dataset):\n CLASSES_NAME = ('__back_ground__', 'table')\n def __init__(self,imgs_path, anno_path, resize_size=[800,1333], is_train = True, transform=None):\n super().__init__()\n print(\"INFO====>check annos, filtering invalid data......\")\n\n self.imgs_path = imgs_path\n ## 读入annotations文件,并分别取出label部分和image部分\n with open(anno_path, 'r') as ann_f:\n ann_dict = json.load(ann_f)\n self.anno_dict = ann_dict['annotations']\n self.img_dict = ann_dict['images']\n\n self.imgid2annoidx = {}\n for i in range(len(self.anno_dict)):\n if str(self.anno_dict[i]['image_id']) in self.imgid2annoidx.keys():\n self.imgid2annoidx[str(self.anno_dict[i]['image_id'])].append(i)\n else:\n self.imgid2annoidx[str(self.anno_dict[i]['image_id'])] = [i]\n\n self.boxCountMax = 0\n for _, v in self.imgid2annoidx.items():\n self.boxCountMax = max(self.boxCountMax, len(v))\n\n self.imgdict_train, self.imgdict_val = train_test_split(self.img_dict, test_size=0.1, random_state=22)\n\n self.category2id = {'table': 1}\n self.id2category = {'1': \"table\"}\n\n self.transform = transform\n self.resize_size = resize_size\n\n self.mean = [0.40789654, 0.44719302, 0.47026115]\n self.std = [0.28863828, 0.27408164, 0.27809835]\n self.train = is_train\n print(\"call TableBank_dataset.__init__().\")\n\n def __getitem__(self, index):\n\n if self.train:\n img_id = self.imgdict_train[index]['id']\n img_file = self.imgdict_train[index]['file_name']\n else:\n img_id = self.imgdict_val[index]['id']\n img_file = self.imgdict_val[index]['file_name']\n\n anno_index_list = self.imgid2annoidx[str(img_id)]\n img = cv2.imread(os.path.join(self.imgs_path, img_file))\n boxes = []\n classes = []\n for i in range(len(anno_index_list)):\n boxes.append(self.anno_dict[anno_index_list[i]]['bbox'])\n classes.append(self.anno_dict[anno_index_list[i]]['category_id'])\n # list --> ndarray\n boxes = np.array(boxes, dtype = np.float32)\n # xywh --> xyxy\n boxes[..., 2:] = boxes[..., 2:] + boxes[..., :2]\n\n # if self.train:\n # if random.random() < 0.5 :\n # img, boxes = flip(img, boxes)\n # if self.transform is not None:\n # img, boxes = self.transform(img, boxes)\n # img = np.array(img)\n\n img, boxes = self.preprocess_img_boxes(img, boxes, self.resize_size)\n # img = draw_bboxes(img,boxes)\n\n\n img = transforms.ToTensor()(img)\n # img = transforms.Normalize(self.mean, self.std,inplace=True)(img)\n boxes = torch.from_numpy(boxes)\n classes = torch.LongTensor(classes)\n\n return img, boxes, classes\n\n def __len__(self):\n if self.train:\n return len(self.imgdict_train)\n else:\n return len(self.imgdict_val)\n\n def preprocess_img_boxes(self, image, boxes, input_ksize):\n '''\n resize image and bboxes\n Returns\n image_paded: input_ksize\n bboxes: [None,4]\n '''\n min_side, max_side = input_ksize\n h, w, _ = image.shape\n\n smallest_side = min(w, h)\n largest_side = max(w, h)\n scale = min_side/smallest_side\n if largest_side*scale > max_side:\n scale = max_side/largest_side\n nw, nh = int(scale * w), int(scale * h)\n image_resized = cv2.resize(image, (nw, nh))\n\n pad_w = 32 - nw%32\n pad_h = 32 - nh%32\n\n image_paded = np.zeros(shape=[nh+pad_h, nw+pad_w, 3], dtype=np.uint8)\n image_paded[:nh, :nw, :] = image_resized\n\n if boxes is None:\n return image_paded\n else:\n boxes[:, [0, 2]] = boxes[:, [0, 2]] * scale\n boxes[:, [1, 3]] = boxes[:, [1, 3]] * scale\n return image_paded, boxes\n\n\n\n def _has_only_empty_bbox(self,annot):\n return all(any(o <= 1 for o in obj['bbox'][2:]) for obj in annot)\n\n\n def _has_valid_annotation(self,annot):\n if len(annot) == 0:\n return False\n\n if self._has_only_empty_bbox(annot):\n return False\n\n return True\n\n def collate_fn(self, data):\n imgs_list, boxes_list, classes_list = zip(*data)\n assert len(imgs_list) == len(boxes_list) == len(classes_list)\n batch_size = len(boxes_list)\n pad_imgs_list = []\n pad_boxes_list = []\n pad_classes_list = []\n\n h_list = [int(s.shape[1]) for s in imgs_list]\n w_list = [int(s.shape[2]) for s in imgs_list]\n max_h = np.array(h_list).max()\n max_w = np.array(w_list).max()\n for i in range(batch_size):\n img = imgs_list[i]\n pad_imgs_list.append(transforms.Normalize(self.mean, self.std, inplace=True)(torch.nn.functional.pad(img, (0, int(max_w-img.shape[2]),0,int(max_h-img.shape[1])),value=0.)))\n\n max_num=0\n for i in range(batch_size):\n n = boxes_list[i].shape[0]\n if n > max_num : max_num = n\n for i in range(batch_size):\n pad_boxes_list.append(torch.nn.functional.pad(boxes_list[i], (0,0,0,max_num-boxes_list[i].shape[0]), value=-1))\n pad_classes_list.append(torch.nn.functional.pad(classes_list[i], (0,max_num-classes_list[i].shape[0]), value=-1))\n\n\n batch_boxes = torch.stack(pad_boxes_list)\n batch_classes = torch.stack(pad_classes_list)\n batch_imgs = torch.stack(pad_imgs_list)\n\n return batch_imgs, batch_boxes, batch_classes\n\n\n\nif __name__==\"__main__\":\n\n dataset = TablebankDataset(\"/home/elimen/Data/OCR_dataset/Table/TableBank/Detection/images\", \"/home/elimen/Data/OCR_dataset/Table/TableBank/Detection/annotations/tablebank_word_train.json\")\n # img, boxes, classes = dataset[0] ## 直接给索引,调用的是forward()\n # cv2.imwrite(\"./123.jpg\",img) ## error,此时img格式为torch.Tensor\n img, boxes, classes = dataset.collate_fn([dataset[0], dataset[1], dataset[2]]) ## 此处直接调用的collate_fn()\n\n print(boxes, \"\\n\", classes, \"\\n\", img.shape, boxes.shape, classes.shape, \"\\n\", boxes.dtype, classes.dtype, img.dtype)\n print(\"Done.\")",
"\"\"\"\n# @file name : det_fpn.py\n# @author : JLChen\n# @date : 2021-07\n# @brief : DBNet neck -- FPN\n\"\"\"\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nimport numpy as np\n\nclass DB_fpn(nn.Module):\n def __init__(self, in_channels, out_channels=256, **kwargs):\n super().__init__()\n inplace = True\n self.out_channels = out_channels\n # reduce layers using 1*1 conv-filter\n self.in2_conv = nn.Conv2d(in_channels[0], self.out_channels, kernel_size=1, bias=False)\n self.in3_conv = nn.Conv2d(in_channels[1], self.out_channels, kernel_size=1, bias=False)\n self.in4_conv = nn.Conv2d(in_channels[2], self.out_channels, kernel_size=1, bias=False)\n self.in5_conv = nn.Conv2d(in_channels[3], self.out_channels, kernel_size=1, bias=False)\n # Smooth layers\n self.p5_conv = nn.Conv2d(self.out_channels, self.out_channels//4, kernel_size=3, padding=1, bias=False)\n self.p4_conv = nn.Conv2d(self.out_channels, self.out_channels//4, kernel_size=3, padding=1, bias=False)\n self.p3_conv = nn.Conv2d(self.out_channels, self.out_channels//4, kernel_size=3, padding=1, bias=False)\n self.p2_conv = nn.Conv2d(self.out_channels, self.out_channels//4, kernel_size=3, padding=1, bias=False)\n\n def _upsample_add(self, x, y):\n return F.interpolate(x, scale_factor=2) + y\n\n def _upsample_cat(self, p2, p3, p4, p5):\n p3 = F.interpolate(p3, scale_factor=2)\n p4 = F.interpolate(p4, scale_factor=4)\n p5 = F.interpolate(p5, scale_factor=8)\n return torch.cat([p5, p4, p3, p2], dim=1)\n\n def forward(self, x):\n # 从backbone传入不同尺寸特征图\n c2, c3, c4, c5 = x\n # 减少通道数,使每个尺寸通道数一致\n in5 = self.in5_conv(c5)\n in4 = self.in4_conv(c4)\n in3 = self.in3_conv(c3)\n in2 = self.in2_conv(c2)\n # 先上采样再相加,融合相邻两层特征层\n out4 = self._upsample_add(in5, in4)\n out3 = self._upsample_add(out4, in3)\n out2 = self._upsample_add(out3, in2)\n # 平滑特征层,调整输出层数\n p5 = self.p5_conv(in5)\n p4 = self.p4_conv(out4)\n p3 = self.p3_conv(out3)\n p2 = self.p2_conv(out2)\n # 拼接4层\n x = self._upsample_cat(p2, p3, p4, p5)\n return x\n"
] | [
[
"torch.LongTensor",
"torch.from_numpy",
"sklearn.model_selection.train_test_split",
"torch.stack",
"numpy.array",
"numpy.zeros",
"torch.nn.functional.pad"
],
[
"torch.nn.Conv2d",
"torch.nn.functional.interpolate",
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hmc-cs-rkretsch/Secondary-Protein-Structure | [
"f1436976d453fbf6f5dbc4716bef3c5ad35b8455"
] | [
"Program/log_regression.py"
] | [
"'''\nAuthor: Rachael Kretsch\nBig Data Final Project\nSecondary Protein Structure\n\nfinally perform logistic regression on our data!\n'''\n\n\nimport numpy as np\nimport pickle\nimport matplotlib.pyplot as plt\nfrom sklearn import linear_model\n\ndef log_reg(filename, penalty='l2', reg=1.0):\n '''grabs the data file and performs a 3 group logistic\n regression!''' \n \n data_file = filename + \"_data.pik\"\n with open(data_file, 'rb') as f:\n data = pickle.load(f)\n \n np.random.shuffle(data)\n \n data_test, data_train = data[:len(data)/4,:], data[len(data)/4:,:]\n \n x_train = data_train[:,1:]\n y_train = data_train[:,:1]\n x_test = data_test[:,1:]\n y_test = data_test[:,:1]\n \n #trying out logistic regression... \n#==============================================================================\n# \n# accuracy=[] \n# for regl in np.linspace(1e-4,5,20):\n# logreg = linear_model.LogisticRegression(C=regl)\n# \n# # we create an instance of Neighbours Classifier and fit the data.\n# logreg.fit(x_train, y_train)\n# \n# result = logreg.predict(x_test)\n# accuracy += [(result==y_test.flatten()).sum()/len(result)]\n# \n# plt.plot(np.linspace(1e-4,5,20), accuracy, label='l2 regression')\n# \n# #best reg is like 3.4 but any where in this range was fine\n# \n# accuracy=[] \n# for regl in np.linspace(1e-4,5,20):\n# logreg = linear_model.LogisticRegression(C=regl,penalty='l1')\n# \n# # we create an instance of Neighbours Classifier and fit the data.\n# logreg.fit(x_train, y_train)\n# \n# result = logreg.predict(x_test)\n# accuracy += [(result==y_test.flatten()).sum()/len(result)]\n# \n# plt.plot(np.linspace(1e-4,5,20), accuracy, label='l1 reggression', linestyle='--')\n# plt.title(\"Accuracy of prediction\")\n# plt.xlabel(\"regression coefficient\")\n# plt.ylabel(\"Accuracy\")\n# plt.legend(loc=4)\n# \n# #L2 performs better than L1!\n# #get graph for this...\n# \n#==============================================================================\n \n \n#==============================================================================\n# # accuracy=[] \n# dict_acc ={}\n# for i in range(100):\n# np.random.shuffle(data)\n# \n# data_test, data_train = data[:len(data)/4,:], data[len(data)/4:,:]\n# \n# x_train = data_train[:,1:]\n# y_train = data_train[:,:1]\n# x_test = data_test[:,1:]\n# y_test = data_test[:,:1]\n# logreg = linear_model.LogisticRegression(C=3.4)\n# \n# # we create an instance of Neighbours Classifier and fit the data.\n# logreg.fit(x_train, y_train)\n# \n# result = logreg.predict(x_test)\n# \n# # accuracy += [(result==y_test.flatten()).sum()/len(result)]\n# \n# \n# # plt.hist(accuracy, np.linspace(min(accuracy),max(accuracy),20), alpha = 0.5)\n# # plt.title(\"Range of accuracies with different training set sampling\")\n# # plt.xlabel(\"Average accuracy\")\n# # plt.ylabel(\"Frequency\")\n#==============================================================================\n\n \n logreg = linear_model.LogisticRegression(C=3.4)\n logreg.fit(x_train,y_train)\n data_file = filename + \"_data.pik\"\n with open(data_file, 'rb') as f:\n data_full = pickle.load(f)\n \n struct_file = filename + \"_struct_data.pik\"\n with open(struct_file, 'rb') as f:\n structures, sequences = pickle.load(f)\n \n x_full = data_full[:,1:]\n y_full = data_full[:,:1]\n prob_full = logreg.predict_proba(x_full)\n result_full = logreg.predict(x_full)\n \n \n new_file = filename + \"_seqs_data.pik\"\n with open(new_file, 'rb') as f:\n sequences_2,seqids,names,descriptions = pickle.load(f)\n \n dict_data = {}\n i = 0\n #smallest_errors= []\n #largest_errors=[]\n j = -1\n for seqid in seqids:\n j+=1\n length_sequence = len(sequences[j])\n result_temp = result_full[i:length_sequence+i]\n y_temp = y_full[i:length_sequence+i]\n accuracy_temp = (result_temp==y_temp.flatten()).sum()/len(result_temp)\n #probs = [prob_full[i:length_sequence+i]]\n dict_data[seqid] = accuracy_temp\n #dict_data[seqid] = (accuracy_temp,result_temp,y_temp,sequences[j],probs)\n #if (len(smallest_errors)<10 or accuracy_temp<smallest_errors[0][1]) and seqid != '1unj':\n # smallest_errors += [[seqid,accuracy_temp,j]]\n # smallest_errors = sorted(smallest_errors,key=lambda l:l[1], reverse=True)\n # if len(smallest_errors)>10:\n # smallest_errors=smallest_errors[:10]\n #if (len(largest_errors)<10 or accuracy_temp>largest_errors[0][1]) and seqid != '1unj':\n # largest_errors += [[seqid,accuracy_temp,j]]\n # largest_errors = sorted(largest_errors,key=lambda l:l[1])\n # if len(largest_errors)>10:\n # largest_errors=largest_errors[:10]\n #i+=length_sequence \n \n return dict_data\n\n#==============================================================================\n# coef_0 = [coef[0][3],coef[0][2],coef[0][1],coef[0][0],coef[0][4],coef[0][5],coef[0][6]]\n# coef_1 = [coef[1][3],coef[1][2],coef[1][1],coef[1][0],coef[1][4],coef[1][5],coef[1][6]]\n# coef_2 = [coef[2][3],coef[2][2],coef[2][1],coef[2][0],coef[2][4],coef[2][5],coef[2][6]]\n# \n# position = [-3,-2,-1,0,1,2,3]\n# \n# plt.plot(position,coef_0,label=\"beta strand (-1)\")\n# plt.plot(position,coef_1,label=\"coil (0)\")\n# plt.plot(position,coef_2,label='alpha helix (1)')\n# plt.title(\"Regression coefficients\")\n# plt.xlabel(\"Distance from amino acid being predicted\")\n# plt.legend(bbox_to_anchor=(1.05, 1), loc=2,)\n#==============================================================================\n \n#==============================================================================\n# print(prob)\n# print(result)\n# \n# \n# data_file = filename + \"_data.pik\"\n# with open(data_file, 'rb') as f:\n# data_full = pickle.load(f)\n# \n# struct_file = filename + \"_struct_data.pik\"\n# with open(struct_file, 'rb') as f:\n# structures, sequences = pickle.load(f)\n# \n# x_full = data_full[:,1:]\n# y_full = data_full[:,:1]\n# prob_full = logreg.predict_proba(x_full)\n# result_full = logreg.predict(x_full)\n# \n# \n# new_file = filename + \"_seqs_data.pik\"\n# with open(new_file, 'rb') as f:\n# sequences_2,seqids,names,descriptions = pickle.load(f)\n# \n# i = 0\n# j = -1\n# largest_errors = [] #ORDERED\n# smallest_errors = []\n# accurcacies = []\n# probs = []\n# for seqid in seqids:\n# j+=1\n# length_sequence = len(sequences[j])\n# result_temp = result_full[i:length_sequence+i]\n# y_temp = y_full[i:length_sequence+i]\n# accuracy_temp = (result_temp==y_temp.flatten()).sum()/len(result_temp)\n# accurcacies += [accuracy_temp]\n# probs += [prob_full[i:length_sequence+i]]\n# if (len(smallest_errors)<10 or accuracy_temp<smallest_errors[0][1]) and seqid != '1unj':\n# smallest_errors += [[seqid,accuracy_temp,j]]\n# smallest_errors = sorted(smallest_errors,key=lambda l:l[1], reverse=True)\n# if len(smallest_errors)>10:\n# smallest_errors=smallest_errors[:10]\n# if (len(largest_errors)<10 or accuracy_temp>largest_errors[0][1]) and seqid != '1unj':\n# largest_errors += [[seqid,accuracy_temp,j]]\n# largest_errors = sorted(largest_errors,key=lambda l:l[1])\n# if len(largest_errors)>10:\n# largest_errors=largest_errors[:10]\n# i+=length_sequence\n#==============================================================================\n \n#==============================================================================\n# plt.hist(np.array(accurcacies)[np.isfinite(accurcacies)], np.linspace(0.0,0.8,20), alpha = 0.5)\n# plt.title(\"Range of accuracies for individual proteins\")\n# plt.xlabel(\"Accuracy\")\n# plt.ylabel(\"Frequency\")\n# \n# \n#==============================================================================\n#==============================================================================\n# print(largest_errors)\n# print(smallest_errors)\n# \n# \n# print('small') \n# for res in smallest_errors:\n# num = res[2]\n# print(seqids[num])\n# print(''.join(sequences[num]))\n# graph_protein_prob(probs[num],sequences[num],res[0],res[1])\n# \n# print('large') \n# for res in largest_errors:\n# num = res[2]\n# print(seqids[num])\n# print(''.join(sequences[num]))\n# graph_protein_prob(probs[num],sequences[num],res[0],res[1])\n# \n# \n# \n# def graph_protein_prob(prob,seq,seqid,accuracy):\n# '''graphs the prob of the three structures against\n# the sequence'''\n# x = range(len(seq))\n# prob = np.array(prob)\n# beta = prob[:,:1].flatten()\n# coil = prob[:,1:2].flatten()\n# alpha = prob[:,2:].flatten()\n# plt.plot(x,beta,label='beta',linestyle='--')\n# plt.plot(x,coil,label='coil')\n# plt.plot(x,alpha,label='alpha',linestyle=':')\n# plt.title('Secondary structure prediction '+seqid)\n# plt.xlabel('Amino acid position')\n# plt.ylabel('Probability')\n# lgd = plt.legend(bbox_to_anchor=(1.05, 1), loc=2)\n# ax=plt.gca()\n# ax.set_xlim([0,len(seq)])\n# ax.set_ylim([0,1])\n# accuracy=round(accuracy,4)\n# plt.text(len(seq)*1.02,0.5,\"accuracy = \"+str(accuracy))\n# plt.savefig('../Data/'+seqid+'.png',bbox_extra_artists=(lgd,),dpi=600,bbox_inches='tight')\n# plt.close()\n#==============================================================================\n \n \ndef get_real_data(filename):\n '''essentially just to graph the accurate data of the ones we want'''\n seqidnums=[3,614,619,571,728,\n 423,616,416,729,727,\n 2,357,395,554,318,\n 107,270,634,671,341]\n data_file = filename + \"_data.pik\"\n with open(data_file, 'rb') as f:\n data_full = pickle.load(f)\n \n struct_file = filename + \"_struct_data.pik\"\n with open(struct_file, 'rb') as f:\n structures, sequences = pickle.load(f)\n\n y_full = data_full[:,:1]\n\n \n \n new_file = filename + \"_seqs_data.pik\"\n with open(new_file, 'rb') as f:\n sequences_2,seqids,names,descriptions = pickle.load(f)\n for i in seqidnums:\n seqid = seqids[i]\n x = range(len(sequences[i]))\n struct = structures[i]\n beta = []\n alpha = []\n coil = []\n for amino in range(len(sequences[i])):\n if struct[amino] == -1:\n beta += [1]\n alpha += [0]\n coil += [0]\n elif struct[amino] == 1:\n beta += [0]\n alpha += [1]\n coil += [0]\n else:\n beta += [0]\n alpha += [0]\n coil += [1]\n plt.scatter(x,beta,label='beta',marker = 'o',color='blue')\n plt.scatter(x,coil,label='coil',marker='x', color='green')\n plt.scatter(x,alpha,label='alpha',color='red')\n plt.title('Secondary structure '+seqid)\n plt.xlabel('Amino acid position')\n plt.ylabel('Probability')\n lgd = plt.legend(bbox_to_anchor=(1.05, 1), loc=2)\n ax=plt.gca()\n fig = plt.gcf()\n fig.set_size_inches\n ax.set_xlim([0,len(sequences[i])])\n ax.set_ylim([0.9,1.1])\n plt.savefig('../Data/'+seqid+'_actual.png',bbox_extra_artists=(lgd,),dpi=600,bbox_inches='tight')\n plt.close()\n \n\n \n \n "
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.gca",
"sklearn.linear_model.LogisticRegression",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"numpy.random.shuffle",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nuka137/DeepLearningFramework | [
"613881e46b48c2206b9424a49106455cb2336d2e"
] | [
"dl/layers/normalization.py"
] | [
"import numpy as np\nfrom collections import OrderedDict\n\nfrom .layer_base import LayerBase\n\n\nclass BatchNormalizationLayer(LayerBase):\n def __init__(self, gamma=1.0, beta=0.0):\n super().__init__()\n\n self.cache = {}\n\n self.params = OrderedDict()\n self.params[\"gamma\"] = gamma\n self.params[\"beta\"] = beta\n\n self.grads = OrderedDict()\n self.grads[\"gamma\"] = 0\n self.grads[\"beta\"] = 0\n\n def id(self):\n return \"BatchNormalizationLayer\"\n\n def parameters(self):\n return self.params\n\n def gradients(self):\n return self.grads\n\n def forward(self, x):\n gamma = self.params[\"gamma\"]\n beta = self.params[\"beta\"]\n\n mu = np.mean(x, axis=0)\n xmu = x - mu\n variance = np.mean(xmu**2, axis=0)\n sqrt_variance = np.sqrt(variance + 1e-7)\n inv_sqrt_variance = 1 / sqrt_variance\n xhat = xmu * inv_sqrt_variance\n y = gamma * xhat + beta\n\n self.cache[\"variance\"] = variance\n self.cache[\"sqrt_variance\"] = sqrt_variance\n self.cache[\"inv_sqrt_variance\"] = inv_sqrt_variance\n self.cache[\"xhat\"] = xhat\n self.cache[\"xmu\"] = xmu\n self.cache[\"batch_size\"] = x.shape[0]\n\n return y\n\n def backward(self, dy):\n xhat = self.cache[\"xhat\"]\n xmu = self.cache[\"xmu\"]\n inv_sqrt_variance = self.cache[\"inv_sqrt_variance\"]\n sqrt_variance = self.cache[\"sqrt_variance\"]\n variance = self.cache[\"variance\"]\n batch_size = self.cache[\"batch_size\"]\n gamma = self.params[\"gamma\"]\n\n dbeta = dy * np.sum(dy, axis=0)\n dgamma = np.sum(dy * xhat, axis=0)\n dxhat = dy * gamma\n dxmu1 = dxhat * inv_sqrt_variance\n dinv_sqrt_variance = np.sum(dxhat * xmu, axis=0)\n dsqrt_variance = -dinv_sqrt_variance / sqrt_variance**2\n dvariance = 0.5 * dsqrt_variance / np.sqrt(variance + 1e-7)\n dmean = dvariance * np.ones(xmu.shape) / batch_size\n dxmu2 = 2.0 * xmu * dmean\n dx1 = dxmu1 + dxmu2\n dmu = -1 * np.sum(dxmu1 + dxmu2, axis=0)\n dx2 = dmu * np.ones(xmu.shape) / batch_size\n dx = dx1 + dx2\n\n return dx\n"
] | [
[
"numpy.sum",
"numpy.mean",
"numpy.sqrt",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
akashsunilgaikwad/Pytorch-Squeeznet | [
"e7b9f4d44d9e91c8d23ad623d956c2f2410deb1c"
] | [
"main.py"
] | [
"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 13 19:32:33 2018\n\n@author: akash\n\"\"\"\n\n\nfrom __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nimport torchvision\nimport numpy as np\nfrom torch.autograd import Variable\nimport model_squeeznet\n\n\nseed = 42\nnp.random.seed(seed)\ntorch.manual_seed(seed)\n\n#The compose function allows for multiple transforms\n#transforms.ToTensor() converts our PILImage to a tensor of shape (C x H x W) in the range [0,1]\n#transforms.Normalize(mean,std) normalizes a tensor to a (mean, std) for (R, G, B)\ntransform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.491399689874, 0.482158419622, 0.446530924224), (0.247032237587, 0.243485133253, 0.261587846975))])\n\ntrain_set = torchvision.datasets.CIFAR10(root='./cifardata', train=True, download=True, transform=transform)\n\ntest_set = torchvision.datasets.CIFAR10(root='./cifardata', train=False, download=True, transform=transform)\n\n\nclasses = ('plane', 'car', 'bird', 'cat',\n 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n\nfrom torch.utils.data.sampler import SubsetRandomSampler\n\n#Training\nn_training_samples = 50000\ntrain_sampler = SubsetRandomSampler(np.arange(n_training_samples, dtype=np.int64))\nprint(train_sampler)\n\n#Validation\nn_val_samples = 5000\nval_sampler = SubsetRandomSampler(np.arange(n_training_samples, n_training_samples + n_val_samples, dtype=np.int64))\n\n#Test\nn_test_samples = 10000\ntest_sampler = SubsetRandomSampler(np.arange(n_test_samples, dtype=np.int64))\n\n\ndef train(args, model, device, train_loader, optimizer, epoch):\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(data)\n output = output.view(len(target),-1)\n loss = F.nll_loss(output, target)\n loss.backward()\n optimizer.step()\n if batch_idx % args.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item()))\n\n\n\n\ndef test(args, model, device, test_loader):\n# model.val()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n output = output.view(len(target),-1)\n test_loss += F.nll_loss(output, target, size_average=False).item() # sum up batch loss\n pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n \n\ndef main():\n # Training settings\n parser = argparse.ArgumentParser(description='PyTorch MNIST Example')\n parser.add_argument('--batch-size', type=int, default=64, metavar='N',\n help='input batch size for training (default: 64)')\n parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',\n help='input batch size for testing (default: 1000)')\n parser.add_argument('--epochs', type=int, default=1, metavar='N',\n help='number of epochs to train (default: 10)')\n parser.add_argument('--lr', type=float, default=0.01, metavar='LR',\n help='learning rate (default: 0.01)')\n parser.add_argument('--momentum', type=float, default=0.5, metavar='M',\n help='SGD momentum (default: 0.5)')\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n parser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\n \n \n args = parser.parse_args()\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n\n torch.manual_seed(args.seed)\n\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n print(device)\n\n# kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n train_loader = torch.utils.data.DataLoader(train_set, batch_size=128, num_workers=2)\n print(len(train_loader.dataset))\n test_loader = torch.utils.data.DataLoader(test_set, batch_size=128, num_workers=2)\n\n\n# model = Net().to(device)\n model = model_squeeznet.SqueezeNet().to(device)\n print(model)\n# model = model.SqueezeNet()\n \n optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)\n\n for epoch in range(1, args.epochs + 1):\n train(args, model, device, train_loader, optimizer, epoch)\n torch.save(model, \"akash_squeezenet\")\n test(args, model, device, test_loader)\n \n \n \n import matplotlib.pyplot as plt\n \n def imshow(img):\n img = img / 2 + 0.5 # unnormalize\n npimg = img.numpy()\n plt.imshow(np.transpose(npimg, (1, 2, 0)))\n\n\n\n dataiter = iter(test_loader)\n print(test_loader)\n images, labels = dataiter.next()\n \n images, labels = Variable(images), Variable(labels)\n images, labels = images.to(device), labels.to(device)\n \n print(labels)\n \n ## print images\n #imshow(torchvision.utils.make_grid(images))\n print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(12)))\n \n \n outputs = model(images)\n \n _, predicted = torch.max(outputs, 1)\n \n print('Predicted: ', ' '.join('%5s' % classes[predicted[j]] for j in range(12)))\n\n\nif __name__ == '__main__':\n main()\n \n \n\n\n"
] | [
[
"torch.max",
"numpy.random.seed",
"torch.nn.functional.nll_loss",
"torch.manual_seed",
"numpy.arange",
"torch.utils.data.DataLoader",
"torch.autograd.Variable",
"torch.no_grad",
"torch.cuda.is_available",
"numpy.transpose",
"torch.device",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
afeldman/TorchMoon | [
"04c6ad75b0202542a2a885cb2a900085e43c5884"
] | [
"torchmoon/model/cratervnet.py"
] | [
"from typing import (Any, List)\n\nfrom torch.nn import (Module, Conv2d, Dropout2d, ConvTranspose2d)\nfrom torch import (cat, add)\nfrom torch.nn import BCEWithLogitsLoss\nfrom torch.optim import Adam\nimport pytorch_lightning as pl\n\nfrom torchmetrics import (MaxMetric, StructuralSimilarityIndexMeasure)\n\nfrom torchmoon.torch.activations import Activation\nfrom torchmoon.torch.util import (passthrough, ContBatchNorm2d)\nfrom torchmoon.torch.conv import make_Conv\n\n\nclass InputTransition(Module):\n\n def __init__(self, relu):\n super().__init__()\n\n self.conv1 = Conv2d(1, 16, kernel_size=5, padding=2)\n self.bn1 = ContBatchNorm2d(16)\n self.relu1 = Activation(relu, 16)\n\n def forward(self, idata):\n outdata = self.relu1(self.bn1(self.conv1(idata)))\n\n return outdata\n\n\n#start downpath\nclass DownTransition(Module):\n\n def __init__(self, inchan, nConvs, relu, dropout=0):\n super().__init__()\n\n outchan = 2 * inchan\n self.down_conv = Conv2d(inchan, outchan, kernel_size=2, stride=2)\n self.bn1 = ContBatchNorm2d(outchan)\n self.do1 = passthrough\n self.relu1 = Activation(relu, outchan)\n self.relu2 = Activation(relu, outchan)\n\n if dropout > 0:\n self.do1 = Dropout2d(dropout)\n\n self.ops = make_Conv(outchan, nConvs, relu)\n\n def forward(self, idata):\n down = self.relu1(self.bn1(self.down_conv(idata)))\n out = self.do1(down)\n out = self.ops(out)\n out = self.relu2(add(out, down))\n return out\n\n\nclass UpTransition(Module):\n\n def __init__(self, inchan, outchan, nConvs, relu, dropout=0):\n super().__init__()\n\n self.up_conv = ConvTranspose2d(inchan,\n outchan // 2,\n kernel_size=2,\n stride=2)\n self.bn1 = ContBatchNorm2d(outchan // 2)\n self.do1 = passthrough\n self.do2 = Dropout2d(dropout)\n self.relu1 = Activation(relu, outchan // 2)\n self.relu2 = Activation(relu, outchan)\n\n if dropout > 0:\n self.do1 = Dropout2d(dropout)\n\n self.ops = make_Conv(outchan, nConvs, relu)\n\n def forward(self, idata, skipx):\n up = self.do1(idata)\n skipxdo = self.do2(skipx)\n out = self.relu1(self.bn1(self.up_conv(up)))\n xcat = cat((out, skipxdo), 1)\n out = self.ops(xcat)\n out = self.relu2(add(out, xcat))\n return out\n\n\nclass OutTransition(Module):\n\n def __init__(self, inchan, relu):\n super().__init__()\n\n self.conv1 = Conv2d(inchan, 2, kernel_size=5, padding=2)\n self.bn1 = ContBatchNorm2d(2)\n self.conv2 = Conv2d(2, 2, kernel_size=1)\n self.relu1 = Activation(relu, 2)\n self.conv3 = Conv2d(2, 1, kernel_size=1)\n\n def forward(self, idata):\n out = self.relu1(self.bn1(self.conv1(idata)))\n out = self.relu1(self.conv2(out))\n out = self.conv3(out)\n return out\n\n\nclass Crater_VNet(pl.LightningModule):\n\n def __init__(self, activation=\"relu\", dropout=.15, lr=0.02):\n super().__init__()\n self.save_hyperparameters(logger=False)\n\n self.criterion = BCEWithLogitsLoss()\n\n # use separate metric instance for train, val and test step\n # to ensure a proper reduction over the epoch\n self.train_acc = StructuralSimilarityIndexMeasure()\n self.val_acc = StructuralSimilarityIndexMeasure()\n self.test_acc = StructuralSimilarityIndexMeasure()\n\n # for logging best so far validation accuracy\n self.val_acc_best = MaxMetric()\n\n self.in_tr = InputTransition(self.hparams.activation)\n self.down_32 = DownTransition(16, 1, self.hparams.activation)\n self.down_64 = DownTransition(32, 1, self.hparams.activation)\n self.down_128 = DownTransition(64, 2, self.hparams.activation,\n self.hparams.dropout)\n self.down_256 = DownTransition(128, 2, self.hparams.activation,\n self.hparams.dropout)\n self.up_256 = UpTransition(256, 256, 2, self.hparams.activation,\n self.hparams.dropout)\n self.up_128 = UpTransition(256, 128, 2, self.hparams.activation,\n self.hparams.dropout)\n self.up_64 = UpTransition(128, 64, 1, self.hparams.activation)\n self.up_32 = UpTransition(64, 32, 1, self.hparams.activation)\n self.out_tr = OutTransition(32, self.hparams.activation)\n\n def forward(self, idata):\n out16 = self.in_tr(idata)\n out32 = self.down_32(out16)\n out64 = self.down_64(out32)\n out128 = self.down_128(out64)\n out256 = self.down_256(out128)\n\n out = self.up_256(out256, out128)\n out = self.up_128(out, out64)\n out = self.up_64(out, out32)\n out = self.up_32(out, out16)\n out = self.out_tr(out)\n\n return out\n\n def configure_optimizers(self):\n optimizer = Adam(self.parameters(), lr=self.hparams.lr)\n return optimizer\n\n def step(self, batch: Any):\n x, y, _ = batch\n\n y_hat = self.forward(x)\n loss = self.criterion(y_hat, y)\n\n return loss, y_hat, y\n\n def training_step(self, train_batch: Any, batch_idx: int):\n # data to device\n loss, preds, targets = self.step(train_batch)\n\n acc = self.train_acc(preds, targets)\n self.log(\"train/loss\",\n loss,\n on_step=False,\n on_epoch=True,\n prog_bar=False)\n self.log(\"train/acc\", acc, on_step=False, on_epoch=True, prog_bar=True)\n\n return {\"loss\": loss, \"preds\": preds, \"targets\": targets}\n\n def validation_step(self, val_batch: Any, batch_idx: int):\n loss, preds, targets = self.step(val_batch)\n\n # log val metrics\n acc = self.val_acc(preds, targets)\n self.log(\"val/loss\",\n loss,\n on_step=False,\n on_epoch=True,\n prog_bar=False)\n self.log(\"val/acc\", acc, on_step=False, on_epoch=True, prog_bar=True)\n\n return {\"loss\": loss, \"preds\": preds, \"targets\": targets}\n\n def validation_epoch_end(self, outputs: List[Any]):\n acc = self.val_acc.compute() # get val accuracy from current epoch\n self.val_acc_best.update(acc)\n self.log(\"val/acc_best\",\n self.val_acc_best.compute(),\n on_epoch=True,\n prog_bar=True)\n\n def test_step(self, batch: Any, batch_idx: int):\n loss, preds, targets = self.step(batch)\n\n # log test metrics\n acc = self.test_acc(preds, targets)\n self.log(\"test/loss\", loss, on_step=False, on_epoch=True)\n self.log(\"test/acc\", acc, on_step=False, on_epoch=True)\n\n return {\"loss\": loss, \"preds\": preds, \"targets\": targets}\n\n def on_epoch_end(self):\n # reset metrics at the end of every epoch\n self.train_acc.reset()\n self.test_acc.reset()\n self.val_acc.reset()\n"
] | [
[
"torch.nn.Dropout2d",
"torch.add",
"torch.nn.ConvTranspose2d",
"torch.cat",
"torch.nn.Conv2d",
"torch.nn.BCEWithLogitsLoss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tanwj14/examples | [
"08e59e6ccf45e8fb0e15951c752921d70e0b548c"
] | [
"word_language_model/model.py"
] | [
"import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass FNNModel(nn.Module):\n\n def __init__(self, vocab_size, embedding_dim, context_size, nhid, tie_weights=False):\n super(FNNModel, self).__init__()\n self.context_size = context_size\n self.embedding_dim = embedding_dim\n self.nhid = nhid\n self.vocab_size = vocab_size\n self.vocab_size1 = vocab_size + 1\n\n self.embeddings = nn.Embedding(vocab_size+1, embedding_dim)\n self.linear1 = nn.Linear(context_size * embedding_dim, nhid)\n # nn.init.xavier_normal_(self.linear1.weight)\n # self.tanh = nn.Tanh()\n self.linear2 = nn.Linear(nhid, vocab_size+1)\n # self.decoder = nn.Linear(nhid, vocab_size)\n\n # Tie weights: weight sharing between input and output layer\n if tie_weights:\n if nhid != embedding_dim:\n raise ValueError('When using the tied flag, nhid must be equal to emsize')\n self.linear2.weight = self.embeddings.weight\n\n def wrap_input(self, input):\n '''\n Preprocess the input to fit the computation graph of FNNModel\n e.g. input = [[1, 3], \n [2, 4]]\n wrapped_input = [\n [[<PAD>, 1], [<PAD>, 3], \n [[1, 2]], [3, 4]]\n ]\n Arguments:\n input: torch tensor with shape [seq_len, batch_size]\n Returns:\n wrapped_input: torch tensor with shape [seq_len, batch_size, model_seq_len]\n '''\n wrapped_input = []\n batch_size = input.shape[1] # Num of col (dimensions)\n context_size = input.shape[0] # Num of rows\n # print(\"input size:\", input.shape)\n\n for idx in range(0, context_size):\n\n if idx == self.context_size-1:\n # The last time step needs no padding\n wrapped_input.append(input)\n continue\n\n valid_tokens = input[0:idx+1, :]\n padding = self.vocab_size * torch.ones([self.context_size - 1 - idx, batch_size], dtype=torch.int32).to(valid_tokens.device)\n # print(\"padding shape\", padding.shape)\n # print(\"valid tokens\", valid_tokens.shape)\n padded_tokens = torch.cat([padding, valid_tokens], dim=0)\n wrapped_input.append(padded_tokens)\n\n wrapped_input = torch.stack(wrapped_input, dim=0)\n wrapped_input = torch.transpose(wrapped_input, 1, 2)\n return wrapped_input\n \n def forward(self, inputs):\n # print(\"batch size:\", inputs.shape[1])\n # print(\"context_size:\", inputs.shape[0])\n\n wrapped_input = self.wrap_input(inputs)\n # print(\"valid? \", wrapped_input.shape[2] == inputs.shape[0])\n\n embeds = self.embeddings(wrapped_input)\n # print(\"embeds\", embeds.shape)\n embeds = embeds.view(embeds.shape[0], embeds.shape[1], -1)\n # print(\"embeds\", embeds.shape)\n # embeds = self.embeddings(inputs).view((1, -1))\n # embeds = self.embeddings(inputs).view((-1, self.context_size * self.embedding_dim))\n out = F.tanh(self.linear1(embeds))\n out = self.linear2(out)\n out = out.view(-1, self.vocab_size1)\n log_probs = F.log_softmax(out, dim=1)\n return log_probs\n\n # def init_hidden(self, bsz):\n # weight = next(self.parameters())\n # return weight.new_zeros(2, bsz, self.nhid)\n\nclass RNNModel(nn.Module):\n \"\"\"Container module with an encoder, a recurrent module, and a decoder.\"\"\"\n\n def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False):\n super(RNNModel, self).__init__()\n self.ntoken = ntoken\n self.drop = nn.Dropout(dropout)\n self.encoder = nn.Embedding(ntoken, ninp)\n if rnn_type in ['LSTM', 'GRU']:\n self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=dropout)\n else:\n try:\n nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[rnn_type]\n except KeyError:\n raise ValueError( \"\"\"An invalid option for `--model` was supplied,\n options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']\"\"\")\n self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=nonlinearity, dropout=dropout)\n self.decoder = nn.Linear(nhid, ntoken)\n\n # Optionally tie weights as in:\n # \"Using the Output Embedding to Improve Language Models\" (Press & Wolf 2016)\n # https://arxiv.org/abs/1608.05859\n # and\n # \"Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling\" (Inan et al. 2016)\n # https://arxiv.org/abs/1611.01462\n if tie_weights:\n if nhid != ninp:\n raise ValueError('When using the tied flag, nhid must be equal to emsize')\n self.decoder.weight = self.encoder.weight\n\n self.init_weights()\n\n self.rnn_type = rnn_type\n self.nhid = nhid\n self.nlayers = nlayers\n\n def init_weights(self):\n initrange = 0.1\n nn.init.uniform_(self.encoder.weight, -initrange, initrange)\n nn.init.zeros_(self.decoder.weight)\n nn.init.uniform_(self.decoder.weight, -initrange, initrange)\n\n def forward(self, input, hidden):\n emb = self.drop(self.encoder(input))\n output, hidden = self.rnn(emb, hidden)\n output = self.drop(output)\n decoded = self.decoder(output)\n decoded = decoded.view(-1, self.ntoken)\n return F.log_softmax(decoded, dim=1), hidden\n\n def init_hidden(self, bsz):\n weight = next(self.parameters())\n if self.rnn_type == 'LSTM':\n return (weight.new_zeros(self.nlayers, bsz, self.nhid),\n weight.new_zeros(self.nlayers, bsz, self.nhid))\n else:\n return weight.new_zeros(self.nlayers, bsz, self.nhid)\n\n# Temporarily leave PositionalEncoding module here. Will be moved somewhere else.\nclass PositionalEncoding(nn.Module):\n r\"\"\"Inject some information about the relative or absolute position of the tokens\n in the sequence. The positional encodings have the same dimension as\n the embeddings, so that the two can be summed. Here, we use sine and cosine\n functions of different frequencies.\n .. math::\n \\text{PosEncoder}(pos, 2i) = sin(pos/10000^(2i/d_model))\n \\text{PosEncoder}(pos, 2i+1) = cos(pos/10000^(2i/d_model))\n \\text{where pos is the word position and i is the embed idx)\n Args:\n d_model: the embed dim (required).\n dropout: the dropout value (default=0.1).\n max_len: the max. length of the incoming sequence (default=5000).\n Examples:\n >>> pos_encoder = PositionalEncoding(d_model)\n \"\"\"\n\n def __init__(self, d_model, dropout=0.1, max_len=5000):\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0).transpose(0, 1)\n self.register_buffer('pe', pe)\n\n def forward(self, x):\n r\"\"\"Inputs of forward function\n Args:\n x: the sequence fed to the positional encoder model (required).\n Shape:\n x: [sequence length, batch size, embed dim]\n output: [sequence length, batch size, embed dim]\n Examples:\n >>> output = pos_encoder(x)\n \"\"\"\n\n x = x + self.pe[:x.size(0), :]\n return self.dropout(x)\n\nclass TransformerModel(nn.Module):\n \"\"\"Container module with an encoder, a recurrent or transformer module, and a decoder.\"\"\"\n\n def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5):\n super(TransformerModel, self).__init__()\n try:\n from torch.nn import TransformerEncoder, TransformerEncoderLayer\n except:\n raise ImportError('TransformerEncoder module does not exist in PyTorch 1.1 or lower.')\n self.model_type = 'Transformer'\n self.src_mask = None\n self.pos_encoder = PositionalEncoding(ninp, dropout)\n encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout)\n self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)\n self.encoder = nn.Embedding(ntoken, ninp)\n self.ninp = ninp\n self.decoder = nn.Linear(ninp, ntoken)\n\n self.init_weights()\n\n def _generate_square_subsequent_mask(self, sz):\n mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))\n return mask\n\n def init_weights(self):\n initrange = 0.1\n nn.init.uniform_(self.encoder.weight, -initrange, initrange)\n nn.init.zeros_(self.decoder.weight)\n nn.init.uniform_(self.decoder.weight, -initrange, initrange)\n\n def forward(self, src, has_mask=True):\n if has_mask:\n device = src.device\n if self.src_mask is None or self.src_mask.size(0) != len(src):\n mask = self._generate_square_subsequent_mask(len(src)).to(device)\n self.src_mask = mask\n else:\n self.src_mask = None\n\n src = self.encoder(src) * math.sqrt(self.ninp)\n src = self.pos_encoder(src)\n output = self.transformer_encoder(src, self.src_mask)\n output = self.decoder(output)\n return F.log_softmax(output, dim=-1)\n"
] | [
[
"torch.nn.Dropout",
"torch.transpose",
"torch.nn.init.uniform_",
"torch.ones",
"torch.nn.functional.log_softmax",
"torch.zeros",
"torch.sin",
"torch.cat",
"torch.nn.RNN",
"torch.arange",
"torch.nn.Embedding",
"torch.nn.Linear",
"torch.nn.TransformerEncoderLayer",
"torch.nn.TransformerEncoder",
"torch.nn.init.zeros_",
"torch.stack",
"torch.cos"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dviewai/Face-Liveness-Detection | [
"eab9a90e38b1fbea4dc921fb1a68cbe9fd29f7c4"
] | [
"training.py"
] | [
"\nimport matplotlib\nfrom imutils import paths\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\nimport random\nimport cv2\nimport os\nmatplotlib.use(\"Agg\")\nfrom architecture import MiniVGG\nimport tensorflow as tf\nfrom keras.preprocessing.image import ImageDataGenerator,img_to_array\nfrom keras.optimizers import Adam\nfrom sklearn.model_selection import train_test_split\nfrom keras.utils import to_categorical\nfrom sklearn.metrics import classification_report,confusion_matrix\n\nimg_height=128 # height of the training image\nimg_width=128 #width of the training image\nEPOCHS = 10 #number of epochs to be trained for\nnum_classes=2 #number of labels\nINIT_LR = 1e-3 #Initial Learning rate\nBS = 32 # Bach size to feed\n\n# initialize the data and labels\nprint(\"Processing images in form of NPY file\")\ndata = []\nlabels = []\n# grab the image paths and randomly shuffle them\nimagePaths = sorted(list(paths.list_images(r\"{path of the image dataset directory}\")))\nrandom.seed(42)\nrandom.shuffle(imagePaths)\n\n# loop over the input images\nfor imagePath in imagePaths:\n\t# load the image, pre-process it, and store it in the data list\n\timage = cv2.imread(imagePath)\n\timage = cv2.resize(image, (img_height, img_width))\n\timage = img_to_array(image)\n\tdata.append(image)\n\n\t# extract the class label from the image path and update the\n\t# labels list\n\tlabel = imagePath.split(os.path.sep)[-2]\n\tlabel = 1 if label == \"fake\" else 0\n\tlabels.append(label)\n\n# scale the raw pixel intensities to the range [0, 1]\ndata = np.array(data, dtype=\"float\") / 255.0\nnp.save('data.npy',data)\nlabels = np.array(labels)\nnp.save('labels.npy',labels)\ndata=np.load('data.npy')\nlabels=np.load('labels.npy')\n# partition the data into training and testing splits using 75% of\n# the data for training and the remaining 25% for testing\n(trainX, testX, trainY, testY) = train_test_split(data,\n\tlabels, test_size=0.25, random_state=42)\nchannels=trainX.shape[3]\n# convert the labels from integers to vectors\ntrainY = to_categorical(trainY, num_classes)\ntestY = to_categorical(testY, num_classes)\n\n# construct the image generator for data augmentation\naug = ImageDataGenerator(rotation_range=30, width_shift_range=0.1,height_shift_range=0.1, shear_range=0.2, zoom_range=0.2,horizontal_flip=True, fill_mode=\"nearest\")\n\n# initialize the model\nprint(\"Compiling model...\")\nmodel = MiniVGG.build(width=img_width, height=img_height, depth=channel, classes=num_classes)\nopt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS) #Optimise uisng Adam \nmodel.compile(loss=\"binary_crossentropy\", optimizer=opt,metrics=[\"accuracy\"])\n\n# train the network\nprint(\"Training network\")\nH = model.fit_generator(aug.flow(trainX, trainY, batch_size=BS),\n\tvalidation_data=(testX, testY), steps_per_epoch=len(trainX),\n\tepochs=EPOCHS, verbose=1)\nlabel_name=[\"real\",\"fake\"]\nprint(\"[INFO] evaluating network...\")\npredictions = model.predict(testX, batch_size=BS) \nprint(classification_report(testY.argmax(axis=1),\npredictions.argmax(axis=1)))\n\ncm = confusion_matrix(testY.argmax(axis=1), predictions.argmax(axis=1))\ntotal = sum(sum(cm))\nacc = (cm[0, 0] + cm[1, 1]) / total\nprint(cm)\n\n"
] | [
[
"matplotlib.use",
"sklearn.model_selection.train_test_split",
"numpy.save",
"numpy.load",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nan-dre/FFTNR | [
"a66569fa11b0ee81345f5bffe8167cc5ae41a7fa"
] | [
"main.py"
] | [
"import scipy\nimport numpy as np\nimport librosa\nfrom librosa import display\nfrom matplotlib import pyplot as plt\nimport pprint\n\nfile_path = \"sounds/a_tired_ghost.wav\"\nsamples, sampling_rate = librosa.load(file_path, sr = None, mono = True,\n offset = 0.0, duration = None)\n\ndef plot_wave():\n plt.figure()\n librosa.display.waveplot( y = samples, sr = sampling_rate )\n plt.xlabel(\"Time\")\n plt.ylabel(\"Amplitutde\")\n plt.show()\n\ndef fft_plot(sampling_rate, samples):\n n = len(samples)\n T = 1/sampling_rate\n yf = scipy.fft(samples)\n xf = np.linspace(0.0, 1.0/(2.0*T), n//2)\n fig, ax = plt.subplots()\n ax.plot(xf, 2.0/n * np.abs(yf[:n//2]))\n plt.grid()\n return plt.show()\n\nif __name__ == \"__main__\":\n\n duration = len(samples) / sampling_rate\n print(duration)\n print(len(samples))\n # for i in range(0,100):\n # print(samples[i])\n\n plot_wave()\n # fft_plot(sampling_rate, samples)\n"
] | [
[
"numpy.abs",
"scipy.fft",
"numpy.linspace",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mosco/graphknn | [
"0bbee471eae7ca909a8b4870c011d658a36f8088"
] | [
"test_graphknn.py"
] | [
"'''Use py.test to run the testing functions here, or just run them manually.'''\n\nimport numpy as np\nimport scipy.sparse\nimport heapdict\n\nimport graphknn\n\n\ndef graphknn_using_dijkstra(W, mask, k):\n n = graphknn._check_sparse_edge_weights_matrix(W)\n\n assert mask.dtype == np.bool\n assert mask.shape == (n,)\n terminal_indices = mask.nonzero()[0]\n\n assert k <= len(terminal_indices)\n\n distances_from_terminals = np.vstack(scipy.sparse.csgraph.dijkstra(W, indices = [i])[0] for i in terminal_indices)\n assert distances_from_terminals.shape == (len(terminal_indices), n)\n\n knn = []\n for i in range(n):\n k_closest_terminals_to_i = np.argpartition(distances_from_terminals[:,i], k-1)[:k]\n knn.append(list(zip(distances_from_terminals[k_closest_terminals_to_i, i], terminal_indices[k_closest_terminals_to_i])))\n\n return knn\n\n\ndef build_sparse_undirected_nonnegative_csr_matrix(n):\n W = np.random.random((n,n))\n W = W + W.transpose()\n W[W < 1.5] = np.inf\n return scipy.sparse.csr_matrix(W)\n\n\ndef test_graphknn():\n N = 100\n p = 0.2 \n k = 5\n \n W = build_sparse_undirected_nonnegative_csr_matrix(N)\n mask = np.random.random(N) < p\n print('terminal indices:')\n print(mask.nonzero()[0])\n\n result0 = graphknn_using_dijkstra(W, mask, k)\n result1 = graphknn.algorithm1(W, mask, k)\n result2 = graphknn.algorithm2(W, mask, k)\n\n for i in range(len(result0)):\n print('result0[{0}]:\\n{1}'.format(i, sorted(result0[i])))\n print('result1[{0}]:\\n{1}'.format(i, sorted(result1[i])))\n print('result2[{0}]:\\n{1}'.format(i, sorted(result2[i])))\n\n assert sorted(result0[i]) == sorted(result1[i])\n assert sorted(result0[i]) == sorted(result2[i])\n\n"
] | [
[
"numpy.random.random",
"numpy.argpartition"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Phusun/kaggle-and-public-datasets | [
"4deeb8406deec7eddb3a3217f0dce55df09dfe0d"
] | [
"A Machine Learning Based Approach for Forecasting Housing Prices/Data_Code/databin/metro_names.py"
] | [
"import pandas as pd\n\nmetros = pd.read_excel('Metro names.xlsx', header=0)\nmetros_list = list(metros.iloc[:,0])\nstates_list = list(metros.iloc[:,1])\n\npersonal_income = pd.read_excel('Per Capita Personal Income 2015-2017.xlsx', header=None, skiprows=8, usecols=[0,1,2,3], names=['Metropolitan Areas','2015','2016','2017'])\npersonal_income.dropna(inplace=True)\n\n\n# Split the metro areas and their states\npersonal_income['metro_name'] = personal_income['Metropolitan Areas'].apply(lambda x: x.split(',')[0])\npersonal_income['state_name'] = personal_income['Metropolitan Areas'].apply(lambda x: x.split(',')[1].lstrip()) # remove the leading whitespace from state abbreviations\n\n# Function to match metro and state names\ndef metro_matching(row):\n exists = 0\n\n for i, metro in enumerate(metros_list):\n if metro in row['metro_name']:\n if row['state_name'] == states_list[i]:\n exists = 1\n\n return exists\n\npersonal_income['in50'] = personal_income.apply(metro_matching, axis=1)\n\n# Only retain rows for 50 metro areas in our list\npersonal_income = personal_income[personal_income['in50']==1]\n\n# Function to replace metro area names to corresponding ones from the standard list of names\ndef std_name(row):\n for metro in metros_list:\n if metro in row:\n return metro\n\npersonal_income_metros = personal_income.copy()\npersonal_income_metros['Metro name'] = personal_income['metro_name'].apply(std_name)\n\n# drop columns, reset index and rearrange the columns\npersonal_income_metros.drop(columns=['Metropolitan Areas', 'metro_name', 'state_name', 'in50'], inplace=True)\npersonal_income_metros.reset_index(drop=True, inplace=True)\npersonal_income_metros = personal_income_metros[['Metro name', '2015', '2016', '2017']]\n"
] | [
[
"pandas.read_excel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
filipeamorais/cs463cloudsecurityuitwitterbotdetection | [
"1edc4c8040a8666a4f4e0377561ae3477f74cf93"
] | [
"deployment/getcsv.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 16 12:32:01 2019\n\n@author: Dhiral\n\"\"\"\n\n#%%\nimport warnings\nimport requests\nimport numpy as np\nfrom requests_oauthlib import OAuth1\nimport tweepy as tw\nimport pandas as pd\nimport os\nfrom sklearn import tree\n\n#%% Your Account info\nconsumer_key = 'HYE8UXTCKR0B6pUFANnlMPsi5'\nconsumer_secret = '5Z3hn5SWhfChFPekmJDMOyjfdGTLjjOA4HbHpsJVBzwBZQi1Ef'\naccess_token = '918726623662301184-HZLSkCtTVU0EHm6P1ZVmkmjKl8aZvbT'\naccess_token_secret = 'WwN4JwSYtUH4bttxoFPHNq19m40FlSPjSOr4jUwRW0otI'\n\n\nauth = tw.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\napi = tw.API(auth, wait_on_rate_limit=True)\n\noauth = OAuth1(consumer_key,\n client_secret=consumer_secret,\n resource_owner_key=access_token,\n resource_owner_secret=access_token_secret)\n\n#%%\n\nREQUIRED_FIELDS = ['id','id_str','screen_name','location','description','url',\n 'followers_count','friends_count', 'listed_count', 'created_at',\n 'favourites_count', 'verified','statuses_count','lang','status',\n 'default_profile','default_profile_image','has_extended_profile',\n 'name','bot']\n\ndef write_to_csv(responses):\n user_l = []\n for response in responses:\n final_fields = {}\n\n try:\n response_dict = response.json()[0]\n except AttributeError:\n response_dict = response\n final_fields['bot'] = 0\n final_fields['status'] = ''\n \n for key, value in response_dict.items():\n if key in REQUIRED_FIELDS:\n final_fields[key] = value\n #print(key)\n \n user_l.append(final_fields)\n\n user_df = pd.DataFrame(user_l) \n #print(user_df)\n\n twitter_user_data = open(os.getcwd() + '/test.csv', 'w',encoding=\"utf-8\")\n user_df[REQUIRED_FIELDS].to_csv(twitter_user_data,index=False)\n #print(twitter_user_data)\n twitter_user_data.close()\n#%% Get all the details\ndef bottell(twitterid):\n uname = twitterid\n print(uname)\n #uname = \"filipe_a_morais\"\n user = api.get_user(uname)\n \"\"\"print(user.name)\n print(user.friends_count)\n print(user.description)\"\"\"\n responses = []\n response = requests.get(url=\"https://api.twitter.com/1.1/users/lookup.json?screen_name=\"+str(uname), auth=oauth)\n if response.status_code == 200:\n responses.append(response)\n write_to_csv(responses)\n \n #%%\n \n \n train_data = pd.read_csv(os.getcwd() + '/kaggle_train.csv')\n \n bot_data = pd.read_csv(os.getcwd() + '/bots_data.csv',engine='python')\n nonbot_data = pd.read_csv(os.getcwd() + '/nonbots_data.csv',engine='python')\n test_data = pd.read_csv(os.getcwd() + '/test.csv')\n \n #%%\n \n train_attr = train_data[\n ['followers_count', 'friends_count', 'listedcount', 'favourites_count', 'statuses_count', 'verified']]\n train_label = train_data[['bot']]\n \n #%%\n \n bot_attr = bot_data[\n ['followers_count', 'friends_count', 'listedcount', 'favourites_count', 'statuses_count', 'verified']]\n bot_label = bot_data[['bot']]\n \n nonbot_attr = nonbot_data[\n ['followers_count', 'friends_count', 'listedcount', 'favourites_count', 'statuses_count', 'verified']]\n nonbot_label = nonbot_data[['bot']]\n \n test_attr = test_data[\n ['followers_count', 'friends_count', 'listed_count', 'favourites_count', 'statuses_count', 'verified']]\n test_label = test_data[['bot']]\n \n #%%\n \n clf = tree.DecisionTreeClassifier()\n \n X = train_attr.as_matrix()\n Y = train_label.as_matrix()\n clf = clf.fit(X, Y)\n \n #%%\n \n actual = np.array(test_label)\n predicted = clf.predict(test_attr)\n pred = np.array(predicted)\n \n if pred==1:\n print(\"Its a bot\")\n else:\n print(\"Its not a bot\")\n \n #%%\n \n warnings.filterwarnings('ignore')\n print(pred)\n return(pred)"
] | [
[
"sklearn.tree.DecisionTreeClassifier",
"numpy.array",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
handal95/Timeband | [
"301ebec628dd6543670f57d1ed74ad40a857f99b"
] | [
"source/cleaner.py"
] | [
"import os\nimport numpy as np\nimport pandas as pd\n\nfrom tqdm import tqdm\nfrom torch.utils.data import DataLoader\n\nfrom .loss import TIMEBANDLoss\nfrom .model import TIMEBANDModel\nfrom .metric import TIMEBANDMetric\nfrom .dataset import TIMEBANDDataset\nfrom .dashboard import TIMEBANDDashboard\n\n\n# Anomaly Labels\nUPPER_ANOMALY = -1\nMISSING_VALUE = 0\nLOWER_ANOMALY = 1\n\n\nclass TIMEBANDCleaner:\n def __init__(\n self,\n config: dict,\n dataset: TIMEBANDDataset,\n models: TIMEBANDModel,\n metric: TIMEBANDMetric,\n losses: TIMEBANDLoss,\n dashboard: TIMEBANDDashboard,\n ) -> None:\n self.logger = config[\"logger\"]\n\n self.dataset = dataset\n self.models = models\n self.metric = metric\n self.losses = losses\n self.dashboard = dashboard\n\n # Set Config\n config = self.set_config(config=config)\n self.forecast_len = self.dataset.forecast_len\n\n def set_config(self, config: dict = None) -> dict:\n \"\"\"\n Configure settings related to the data set.\n\n params:\n config: Trainer configuration dict\n `config['trainer']`\n \"\"\"\n\n # Train option\n self.__dict__ = {**config, **self.__dict__}\n\n def clean(self, dataset: DataLoader) -> None:\n self.logger.info(\"RUN the model\")\n\n # Prediction\n self.idx = 0\n # self.data_labeling()\n self.pred_initate()\n\n # Dashboard\n self.dashboard.init_figure()\n\n # Process step\n def generate(x):\n return self.models.netG(x)[:, : self.forecast_len].to(self.device)\n\n tqdm_ = tqdm(dataset)\n outputs = self.dataset.observed\n lower_bands = self.dataset.observed\n upper_bands = self.dataset.observed\n\n for i, data in enumerate(tqdm_):\n true_x = data[\"encoded\"].to(self.device)\n true_y = data[\"decoded\"].to(self.device)\n (batchs, forecast_len, target_dims) = true_y.shape\n\n # #######################\n # Generate\n # #######################\n fake_y = generate(true_x)\n\n # #######################\n # Process\n # #######################\n pred_y = self.dataset.denormalize(fake_y.cpu())\n preds, lower, upper = self.predicts(pred_y)\n\n pred_len = preds.shape[0]\n reals = self.dataset.forecast[self.idx : self.idx + pred_len].numpy()\n masks = self.dataset.missing[self.idx : self.idx + pred_len]\n\n output = np.concatenate([outputs[-1:], reals])\n target = self.adjust(output, preds, masks, lower, upper)\n lower_bands = np.concatenate([lower_bands[: 1 - forecast_len], lower])\n upper_bands = np.concatenate([upper_bands[: 1 - forecast_len], upper])\n outputs = np.concatenate([outputs[: 1 - forecast_len], target])\n\n # #######################\n # Visualize\n # #######################\n if i > 8:\n self.dashboard.vis(batchs, reals, preds, lower, upper, target)\n self.idx += batchs\n\n # Dashboard\n self.dashboard.clear_figure()\n\n # OUTPUTS\n lower_cols = [f\"{x}_lower\" for x in self.dataset.targets]\n upper_cols = [f\"{x}_upper\" for x in self.dataset.targets]\n\n index = self.dataset.times\n outputs_df = pd.DataFrame(outputs, columns=self.dataset.targets, index=index)\n\n bands_df = pd.concat(\n [\n pd.DataFrame(lower_bands, columns=lower_cols, index=index),\n pd.DataFrame(upper_bands, columns=upper_cols, index=index),\n ],\n axis=1,\n )\n bands_df.index.name = self.dataset.time_index\n outputs_df.index.name = self.dataset.time_index\n\n return outputs_df, bands_df\n\n def adjust(self, output, preds, masks, lower, upper):\n len = preds.shape[0]\n a = self.missing_gamma\n b = self.anomaly_gamma\n\n for p in range(len):\n value = output[p + 1]\n\n mmask = masks[p]\n lmask = value < lower[p]\n umask = value > upper[p]\n\n value = (1 - mmask) * value + mmask * (a * preds[p] + (1 - a) * output[p])\n value = (1 - lmask) * value + lmask * (b * preds[p] + (1 - b) * value)\n value = (1 - umask) * value + umask * (b * preds[p] + (1 - b) * value)\n\n output[p + 1] = value\n\n target = output[1:]\n return target\n\n def pred_initate(self):\n forecast_len = self.dataset.decode_shape[1]\n target_dims = self.dataset.decode_shape[2]\n\n self.preds = np.empty((forecast_len - 1, forecast_len, target_dims))\n self.preds[:] = np.nan\n\n def predicts(self, pred):\n (batch_size, forecast_len, target_dims) = pred.shape\n pred = pred.detach().numpy()\n\n nan_shape = np.empty((batch_size, forecast_len, target_dims))\n nan_shape[:] = np.nan\n\n self.preds = np.concatenate([self.preds[1 - forecast_len :], nan_shape])\n for f in range(forecast_len):\n self.preds[f : batch_size + f, f] = pred[:, f]\n\n preds = np.nanmedian(self.preds, axis=1)\n std = np.nanstd(self.preds, axis=1)\n\n for f in range(forecast_len - 1, 0, -1):\n gamma = (forecast_len - f) / (forecast_len - 1)\n std[-f] += std[-f - 1] * gamma\n\n lower = preds - self.band_width * std\n upper = preds + self.band_width * std\n\n return preds, lower, upper\n\n def data_labeling(self):\n if not self.labeling:\n return\n\n self.label_data = np.empty(self.target_data.shape)\n self.label_data[:] = np.nan\n self.outputs = self.target_data.to_numpy()\n self.labels = pd.DataFrame(\n self.label_data,\n columns=self.target_col,\n index=self.dataset.times,\n )\n\n if self.zero_is_missing:\n self.labels[self.target_data == 0] = MISSING_VALUE\n self.logger.info(f\"A value of 0 is recognized as a missing value.\")\n\n labels_path = os.path.join(self.directory, f\"{self.data_name}_label.csv\")\n self.labels.to_csv(labels_path)\n\n self.logger.info(f\"CSV saved at {labels_path}\")\n"
] | [
[
"numpy.nanmedian",
"pandas.DataFrame",
"numpy.concatenate",
"numpy.nanstd",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
generall/hnswlib | [
"e54e865b246fb97a9a1b865308124e0b547a102c"
] | [
"python_bindings/tests/conditional_search.py"
] | [
"import unittest\nfrom typing import List\nimport tqdm\nimport numpy as np\nfrom statsmodels.stats.proportion import proportion_confint\nfrom collections import defaultdict\n\n\ndef calc_precision_at(found_pos: List[int], limit):\n hits = np.array(found_pos) < limit\n return np.mean(hits), proportion_confint(sum(hits), len(found_pos))\n\ndef cosine_similarity(vector, matrix):\n return (np.sum(vector * matrix, axis=1) / (\n np.sqrt(np.sum(matrix ** 2, axis=1)) * np.sqrt(np.sum(vector ** 2))))\n\ndef get_closest_brut(target, data, mask):\n true_distance = 1 - cosine_similarity(target, data)\n np.putmask(true_distance, ~mask, 1_000_000)\n closest = list(np.argsort(true_distance))\n return closest\n\ndef get_constraints(num_elements, parts_count, part_ids):\n\n mask = np.zeros(num_elements)\n for part_id in part_ids:\n mask = np.logical_or(mask, np.arange(0, num_elements) % parts_count == part_id)\n\n tags = defaultdict(list)\n for i in range(num_elements):\n tags[i % parts_count].append(i)\n\n condition = [[(False, part_id) for part_id in part_ids]]\n\n return tags, mask, condition\n\ndef get_random_vector(dim):\n return np.float32(np.random.random((1, dim)))\n\ndef get_top_fount(true_labels, found_labels):\n found_top = []\n for found_label in found_labels:\n found_top.append(true_labels.index(found_label))\n return found_top\n\n\nclass ConditionalSeachTestCase(unittest.TestCase):\n\n def test_complex_condition(self):\n import hnswlib \n dim = 50\n elements = 10_000\n\n hnsw = hnswlib.Index(space='cosine', dim=dim)\n hnsw.init_index(max_elements = elements, ef_construction = 10, M = 16, random_seed=45)\n hnsw.set_num_threads(1)\n\n tags, mask, condition = get_constraints(elements, 100, [66, 45, 21, 55, 12, 99, 0, 4, 83])\n points = np.random.rand(elements, dim)\n hnsw.add_items(points)\n for tag, ids in tqdm.tqdm(tags.items()):\n hnsw.add_tags(ids, tag)\n hnsw.index_tagged(tag)\n \n hnsw.save_index('tmp_index.hnsw')\n\n hnsw2 = hnswlib.Index(space='cosine', dim=dim)\n hnsw2.load_index('tmp_index.hnsw')\n \n target = get_random_vector(dim)\n\n found, _ = hnsw2.knn_query(target, k=10, conditions=condition)\n\n\n def test_random_subsample(self):\n import hnswlib \n dim = 50\n elements = 10_000\n attempts = 100\n\n points = np.random.rand(elements, dim)\n\n tags, mask, condition = get_constraints(elements, 100, [66])\n\n hnsw = hnswlib.Index(space='cosine', dim=dim)\n hnsw.init_index(max_elements = elements, ef_construction = 10, M = 16, random_seed=45)\n hnsw.set_num_threads(1)\n\n hnsw.add_items(points)\n\n for tag, ids in tqdm.tqdm(tags.items()):\n hnsw.add_tags(ids, tag)\n hnsw.index_tagged(tag)\n\n top_hits = []\n for _ in range(attempts):\n target = get_random_vector(dim)\n true_closest = get_closest_brut(target, points, mask)\n found, _ = hnsw.knn_query(target, k=10, conditions=condition)\n top_found = get_top_fount(true_closest, found[0])\n top_hit = top_found[0]\n top_hits.append(top_hit)\n \n precision, conf_interval = calc_precision_at(top_hits, 1)\n print(\"precision:\", precision)\n\n self.assertGreater(precision, 0.9)\n\n\nif __name__ == \"__main__\":\n unittest.main()"
] | [
[
"numpy.putmask",
"numpy.random.random",
"numpy.arange",
"numpy.mean",
"numpy.random.rand",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
njuaplusplus/mirror | [
"133969741b87daafd367cb4fd3612076e7e4ff05",
"133969741b87daafd367cb4fd3612076e7e4ff05"
] | [
"genforce/my_get_GD.py",
"my_test_confidence.py"
] | [
"# python3.7\n\"\"\"This file is modifed from synthesize.py. The goal is to return a generator which output an image in range [0., 1.]\"\"\"\n\nimport os\nimport argparse\nimport subprocess\nfrom tqdm import tqdm\nimport numpy as np\n\nimport torch\nfrom torchvision.utils import save_image\n\nfrom .models import MODEL_ZOO\nfrom .models import build_generator, build_discriminator\nfrom .utils.misc import bool_parser\nfrom .utils.visualizer import HtmlPageVisualizer\n\n\ndef postprocess(images):\n \"\"\"change the range from [-1, 1] to [0., 1.]\"\"\"\n images = torch.clamp((images + 1.) / 2., 0., 1.)\n return images\n\n\ndef parse_args(model_name, num, batch_size, trunc_psi=0.7, trunc_layers=8):\n \"\"\"Parses arguments.\"\"\"\n parser = argparse.ArgumentParser(\n description='Synthesize images with pre-trained models.')\n parser.add_argument('model_name', type=str,\n help='Name to the pre-trained model.')\n parser.add_argument('--save_dir', type=str, default=None,\n help='Directory to save the results. If not specified, '\n 'the results will be saved to '\n '`work_dirs/synthesis/` by default. '\n '(default: %(default)s)')\n parser.add_argument('--num', type=int, default=num,\n help='Number of samples to synthesize. '\n '(default: %(default)s)')\n parser.add_argument('--batch_size', type=int, default=batch_size,\n help='Batch size. (default: %(default)s)')\n parser.add_argument('--seed', type=int, default=0,\n help='Seed for sampling. (default: %(default)s)')\n parser.add_argument('--trunc_psi', type=float, default=trunc_psi,\n help='Psi factor used for truncation. This is '\n 'particularly applicable to StyleGAN (v1/v2). '\n '(default: %(default)s)')\n parser.add_argument('--trunc_layers', type=int, default=trunc_layers,\n help='Number of layers to perform truncation. This is '\n 'particularly applicable to StyleGAN (v1/v2). '\n '(default: %(default)s)')\n parser.add_argument('--randomize_noise', type=bool_parser, default=False,\n help='Whether to randomize the layer-wise noise. This '\n 'is particularly applicable to StyleGAN (v1/v2). '\n '(default: %(default)s)')\n # return parser.parse_args([model_name, f'--num={num}', f'--batch_size={batch_size}', ])\n return parser.parse_args([model_name, ])\n\n\ndef main(device, model_name, num, batch_size, use_w_space=True, use_discri=True, repeat_w=True, use_z_plus_space=False, trunc_psi=0.7, trunc_layers=8):\n \"\"\"Main function.\"\"\"\n args = parse_args(model_name, num, batch_size, trunc_psi, trunc_layers)\n print(args)\n if args.num <= 0:\n return\n\n # Parse model configuration.\n if args.model_name not in MODEL_ZOO:\n raise SystemExit(f'Model `{args.model_name}` is not registered in '\n f'`models/model_zoo.py`!')\n model_config = MODEL_ZOO[args.model_name].copy()\n url = model_config.pop('url') # URL to download model if needed.\n\n # Get work directory and job name.\n if args.save_dir:\n work_dir = args.save_dir\n else:\n work_dir = os.path.join('work_dirs', 'synthesis')\n os.makedirs(work_dir, exist_ok=True)\n\n # Build generation and get synthesis kwargs.\n print(f'Building generator for model `{args.model_name}` ...')\n if model_name.startswith('stylegan'):\n generator = build_generator(**model_config, repeat_w=repeat_w)\n else:\n generator = build_generator(**model_config)\n synthesis_kwargs = dict(trunc_psi=args.trunc_psi,\n trunc_layers=args.trunc_layers,\n randomize_noise=args.randomize_noise)\n print('Finish building generator.')\n\n # Build discriminator\n if use_discri:\n print(f'Building discriminator for model `{args.model_name}` ...')\n discriminator = build_discriminator(**model_config)\n print('Finish building discriminator.')\n else:\n discriminator = None\n\n # Load pre-trained weights.\n os.makedirs('checkpoints', exist_ok=True)\n checkpoint_path = os.path.join('checkpoints', args.model_name + '.pth')\n print(f'Loading checkpoint from `{checkpoint_path}` ...')\n if not os.path.exists(checkpoint_path):\n print(f' Downloading checkpoint from `{url}` ...')\n subprocess.call(['wget', '--quiet', '-O', checkpoint_path, url])\n print(' Finish downloading checkpoint.')\n checkpoint = torch.load(checkpoint_path, map_location='cpu')\n\n if 'generator_smooth' in checkpoint:\n generator.load_state_dict(checkpoint['generator_smooth'])\n else:\n generator.load_state_dict(checkpoint['generator'])\n generator = generator.to(device)\n generator.eval()\n if use_discri:\n discriminator.load_state_dict(checkpoint['discriminator'])\n discriminator = discriminator.to(device)\n discriminator.eval()\n print('Finish loading checkpoint.')\n\n # Set random seed.\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n\n def fake_generator(code):\n # Sample and synthesize.\n # print(f'Synthesizing {args.num} samples ...')\n # code = torch.randn(args.batch_size, generator.z_space_dim).cuda()\n if use_z_plus_space:\n code = generator.mapping(code)['w']\n code = code.view(args.batch_size, generator.num_layers, generator.w_space_dim)\n images = generator(code, **synthesis_kwargs, use_w_space=use_w_space)['image']\n images = postprocess(images)\n # save_image(images, os.path.join(work_dir, 'tmp.png'), nrow=5)\n # print(f'Finish synthesizing {args.num} samples.')\n return images\n\n return Fake_G(generator, fake_generator), discriminator\n\n\nclass Fake_G:\n\n def __init__(self, G, g_function):\n self.G = G\n self.g_function = g_function\n\n def __call__(self, code):\n # print(f'code.shape {code.shape}')\n return self.g_function(code)\n\n def zero_grad(self):\n self.G.zero_grad()\n\n\nif __name__ == '__main__':\n # main('stylegan_ffhq1024', 7, 7)\n # main('stylegan_ffhq256', 35, 35)\n main('stylegan_celeba_partial256', 35, 35)\n",
"#!/usr/bin/env python3\n# coding=utf-8\nimport argparse\nimport glob\nimport os\nimport sys\n\nfrom PIL import Image\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torchvision.transforms.functional as F\nfrom torchvision.utils import save_image\n\nfrom facenet_pytorch import InceptionResnetV1\nimport vgg_m_face_bn_dag\nimport resnet50_scratch_dag\nimport vgg_face_dag\nimport net_sphere\nimport ccs19_model_inversion\nfrom my_utils import crop_img, resize_img, normalize, my_select_ind\nfrom my_target_models import get_model\n\n\ndef myprint(*args, **kwargs):\n if False:\n print(*args, **kwargs)\n\n\ndef normalize_tensor(inputs, arch_name, image_resolution, crop_face=False):\n if crop_face:\n inputs = resize_img(inputs, 256)\n inputs = inputs[..., 34:214, 40:220]\n else:\n inputs = crop_img(inputs, arch_name)\n inputs = resize_img(inputs, image_resolution)\n inputs = normalize(inputs*255., arch_name)\n myprint(f'loaded inputs shape: {inputs.shape}')\n return inputs\n\n\[email protected]_grad()\ndef test_final_result(arch_name=None, dirs=None, external_args=None):\n\n if arch_name is None or dirs is None:\n parser = argparse.ArgumentParser()\n parser.add_argument('arch_name', help='network architecture')\n parser.add_argument('root_dir', help='the dir of the final images')\n parser.add_argument('--latent_space', choices=['w', 'z', 'z+', 'w+'], help='evaluate batch with another model')\n parser.add_argument('genforce_model', choices=['pggan_celebahq1024', 'stylegan_celeba_partial256', 'stylegan_ffhq256', 'stylegan2_ffhq1024', 'stylegan_cat256', 'stylegan_car512', ], help='genforce gan model')\n parser.add_argument('--bs', default=8, type=int, help='batch size')\n\n args = parser.parse_args()\n external_args = args\n\n arch_name = args.arch_name\n batch_size = args.bs\n dirs = [args.root_dir, ]\n else:\n assert arch_name is not None and dirs is not None\n if isinstance(dirs, str):\n dirs = [dirs, ]\n batch_size = external_args.bs\n\n assert external_args is not None\n\n device = 'cuda'\n\n if arch_name == 'sphere20a':\n sphere20_theta_net = getattr(net_sphere, 'sphere20a')(use_theta=True)\n sphere20_theta_net.load_state_dict(torch.load('./sphere20a_20171020.pth'))\n sphere20_theta_net.to(device)\n\n # net = get_model(arch_name, device, use_dropout=external_args.use_dropout)\n net = get_model(arch_name, device) # we test the results on the original network\n\n try:\n label_logits_dict = torch.load(os.path.join('./centroid_data', arch_name, 'test/centroid_logits.pt'))\n except FileNotFoundError:\n print('Note: centroid_logits.pt is not found')\n label_logits_dict = None\n\n image_resolution = 224\n if arch_name.startswith('inception_resnetv1'):\n image_resolution = 160\n elif arch_name == 'sphere20a':\n image_resolution = (112, 96)\n elif arch_name == 'car_resnet34':\n image_resolution = 400\n\n # fake\n my_selected_inds = []\n\n use_w_space = 'w' in external_args.latent_space\n repeat_w = '+' not in external_args.latent_space # if False, opt w+ space\n # num_layers = 14 # 14 for stylegan w+ space\n # num_layers = 18 # 14 for stylegan w+ space with stylegan_celebahq1024\n\n genforce_model = external_args.genforce_model\n if not genforce_model.startswith('stylegan'):\n use_w_space = False\n\n if external_args.latent_space == 'z+':\n use_z_plus_space = True # to use z+ space, set this and use_w_space to be true and repeat_w to be false\n use_w_space = True\n else:\n use_z_plus_space = False\n\n def get_generator(batch_size, device):\n from genforce import my_get_GD\n use_discri = False\n generator, discri = my_get_GD.main(device, genforce_model, batch_size, batch_size, use_w_space=use_w_space, use_discri=use_discri, repeat_w=repeat_w, use_z_plus_space=use_z_plus_space)\n return generator\n\n generator = get_generator(batch_size, device)\n\n all_confs = []\n correct_cnt = 0\n topk_correct_cnt = 0\n total_cnt = 0\n l2_dist = []\n conf_diff_scores = []\n my_selected_inds = []\n all_images = []\n for root_dir in dirs:\n tensor_files = sorted(glob.glob(os.path.join(root_dir, 'img_*.pt')))\n all_tensor_files = {}\n for f in tensor_files:\n f_basename = os.path.basename(f).split('_')\n id_in_batch = int(f_basename[2][2:])\n target = int(f_basename[1][5:])\n all_tensor_files[id_in_batch] = (target, f)\n\n all_targets = [all_tensor_files[k][0] for k in sorted(all_tensor_files.keys())]\n\n latent_in = torch.load(os.path.join(root_dir, 'latent_inputs.pt')).to(device)\n\n assert batch_size == len(all_targets)\n\n images = generator(latent_in)\n all_images.append(images)\n save_image(images, './tmp/my_test_confidence.png')\n images = normalize_tensor(images, arch_name, image_resolution)\n\n outputs = net(images.to(device))\n if arch_name == 'sphere20a':\n outputs = outputs[0]\n logits = sphere20_theta_net(images.to(device)).cpu()\n else:\n logits = outputs.detach().cpu()\n\n if external_args.my_select:\n # NOTE: here we assume all_targets have the same target element\n if external_args.my_ce_select:\n t_ce = nn.functional.softmax(outputs, dim=1)[:, all_targets[0]]\n my_selected_ind = torch.argmax(t_ce).item()\n else:\n my_selected_ind = my_select_ind(outputs, all_targets[0])\n myprint('my_selected_ind:', my_selected_ind)\n my_selected_inds.append(my_selected_ind)\n outputs = outputs[my_selected_ind:my_selected_ind+1]\n logits = logits[my_selected_ind:my_selected_ind+1]\n all_targets = all_targets[my_selected_ind:my_selected_ind+1]\n\n conf_diff_scores.extend(compute_confidence_score(outputs, all_targets))\n outputs = nn.functional.softmax(outputs, dim=1)\n conf_res = []\n for i, t in enumerate(all_targets):\n conf_res.append(f'{outputs[i][t].item():.4f}')\n if arch_name == 'sphere20a':\n label_logits_dict and l2_dist.append(torch.dist(logits[i], label_logits_dict[t]).item())\n else:\n label_logits_dict and l2_dist.append(torch.dist(logits[i], label_logits_dict[t]).item())\n all_confs.append([outputs[i][t].item() for i, t in enumerate(all_targets)])\n myprint('confidence:', ' '.join(conf_res))\n k = 5\n myprint(f'top-{k} labels')\n topk_conf, topk_class = torch.topk(outputs, k, dim=1)\n myprint(topk_conf)\n myprint(topk_class)\n total_cnt += len(all_targets)\n for i, t in enumerate(all_targets):\n if topk_class[i][0] == t:\n correct_cnt += 1\n if t in topk_class[i]:\n topk_correct_cnt += 1\n l2_dist = len(l2_dist) and sum(l2_dist)/len(l2_dist)\n myprint('l2 dist:', l2_dist)\n conf_diff_score = sum(conf_diff_scores)/len(conf_diff_scores)\n myprint(f'conf_diff_scores {len(conf_diff_scores)}: {conf_diff_score}')\n\n return all_confs, correct_cnt, topk_correct_cnt, total_cnt, l2_dist, conf_diff_score, my_selected_inds\n\n\ndef compute_confidence_score(outputs, all_targets):\n outputs = outputs.clone()\n target_conf_scores = []\n for i, t in enumerate(all_targets):\n output = outputs[i]\n conf_score = output[t].item()\n output[t] = 0.\n other_max_score = output.max().item()\n target_conf_scores.append(conf_score-other_max_score)\n return target_conf_scores\n\n\nif __name__ == '__main__':\n test_final_result()\n"
] | [
[
"torch.manual_seed",
"torch.clamp",
"numpy.random.seed",
"torch.load"
],
[
"torch.nn.functional.softmax",
"torch.load",
"torch.no_grad",
"torch.topk",
"torch.dist",
"torch.argmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ndangtt/DACBench | [
"ddb94bfd30f885901562b3cad3320857da2c768d"
] | [
"tests/envs/test_onell.py"
] | [
"import unittest\nfrom unittest import mock\n\nimport numpy as np\nfrom dacbench import AbstractEnv\nfrom dacbench.envs import OneLLEnv\nfrom dacbench.envs.onell_env import OneMax, LeadingOne\nfrom dacbench.additional_configs.onell.configs import onell_lbd_theory, onell_lbd_onefifth, onell_lbd_p_c, onell_lbd1_lbd2_p_c\nfrom dacbench.abstract_benchmark import objdict\n\nonell_configs = [onell_lbd_theory, onell_lbd_onefifth, onell_lbd_p_c, onell_lbd1_lbd2_p_c]\nclass TestOneLLEnv(unittest.TestCase):\n def make_env(self, config): \n config['instance_set'] = {0: objdict({'size': 2000, 'max_evals': 30000})}\n env = OneLLEnv(config) \n return env\n\n def test_setup(self):\n for config in onell_configs:\n env = self.make_env(config)\n self.assertTrue(issubclass(type(env), AbstractEnv))\n self.assertFalse(env.rng is None)\n\n for var_name in ['include_xprime', 'count_different_inds_only']: \n self.assertTrue(vars(env)[var_name] == config[var_name]) \n self.assertTrue(env.problem == globals()[config.problem])\n\n self.assertTrue(len(env.state_var_names) == len(env.state_functions))\n\n def test_reset(self):\n for config in onell_configs:\n env = self.make_env(config)\n env.reset()\n self.assertFalse(env.n is None)\n self.assertFalse(env.max_evals is None)\n self.assertFalse(env.x is None)\n self.assertTrue(env.total_evals==1)\n\n def test_get_state(self): \n # basic tests\n for config in onell_configs: \n env = self.make_env(config)\n state = env.reset()\n self.assertTrue(issubclass(type(state), np.ndarray))\n self.assertTrue(len(env.state_var_names) == len(state))\n\n # test if histories are updated and retrieved correctly\n env = self.make_env(onell_lbd_onefifth)\n state = env.reset()\n for i in range(10):\n state, reward, done, _ = env.step(np.random.choice(np.arange(10,20),size=1))\n self.assertTrue((env.history_fx[-1] - env.history_fx[-2]) == state[1])\n \n def test_step(self): \n for config in onell_configs: \n env = self.make_env(config)\n state = env.reset()\n action = env.action_space.sample() \n self.assertTrue(action.shape[0] == len(env.action_var_names)) \n state, reward, done, _ = env.step(action) \n self.assertTrue(issubclass(type(state), np.ndarray))\n self.assertTrue(len(env.state_var_names) == len(state))\n \n#TestOneLLEnv().test_setup()\n#TestOneLLEnv().test_reset()\n#TestOneLLEnv().test_get_state()\n#TestOneLLEnv().test_step()\n"
] | [
[
"numpy.arange"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
akshay-jaggi/basic_neural_processing_modules | [
"96dd6b0a507b730aa1883109a87e7e22636dd50d"
] | [
"indexing.py"
] | [
"import numpy as np\nfrom numba import jit, njit, prange\nimport copy\nimport scipy.signal\nimport matplotlib.pyplot as plt\n\n\ndef widen_boolean(arr, n_before, n_after, axis=None):\n '''\n Widens boolean events by n_before and n_after. \n RH 2021 \n\n Args:\n arr (np.ndarray):\n Input array. Widening will be applied\n to the last dimension.\n n_before (int):\n Number of samples before 'True' values\n that will also be set to 'True'.\n n_after (int):\n Number of samples after 'True' values\n that will also be set to 'True'.\n axis (int):\n Axis to apply the event widening.\n If None then arr should be a 1-D array.\n \n Returns:\n widened arr (np.ndarray, dtype=bool):\n Output array. Same as input arr, but\n with additional 'True' values before\n and after initial 'True' values.\n '''\n \n kernel = np.zeros(np.max(np.array([n_before, n_after])) * 2 + 1)\n kernel_center = int(np.ceil(len(kernel) / 2))\n kernel[kernel_center - (n_before+1): kernel_center] = 1\n kernel[kernel_center: kernel_center + n_after] = 1\n kernel = kernel / np.mean(kernel)\n \n if axis is None:\n return np.bool8(scipy.signal.convolve(arr, kernel, mode='same'))\n else:\n return np.bool8(np.apply_along_axis(lambda m: scipy.signal.convolve(m, kernel, mode='same'),\n axis=axis, arr=arr))\n\n\n# @njit\ndef idx2bool(idx, length=None):\n '''\n Converts a vector of indices to a boolean vector.\n RH 2021\n\n Args:\n idx (np.ndarray):\n 1-D array of indices.\n length (int):\n Length of boolean vector.\n If None then length will be set to\n the maximum index in idx + 1.\n \n Returns:\n bool_vec (np.ndarray):\n 1-D boolean array.\n '''\n if length is None:\n length = np.uint64(np.max(idx) + 1)\n out = np.zeros(length, dtype=np.bool8)\n out[idx] = True\n return out\n\ndef bool2idx(bool_vec):\n '''\n Converts a boolean vector to indices.\n RH 2021\n\n Args:\n bool_vec (np.ndarray):\n 1-D boolean array.\n \n Returns:\n idx (np.ndarray):\n 1-D array of indices.\n '''\n return np.where(bool_vec)[0]\n\ndef moduloCounter_to_linearCounter(trace, modulus, modulus_value, diff_thresh=None, plot_pref=False):\n '''\n Converts a (sawtooth) trace of modulo counter\n values to a linear counter.\n Useful for converting a pixel clock with a modulus\n to total times. Use this for FLIR camera top pixel\n stuff.\n The function basically just finds where the modulus\n events occur in the trace and adds 'modulus_value'\n to the next element in the trace.\n RH 2021\n\n Args:\n trace (np.ndarray):\n 1-D array of modulo counter values.\n modulus (scalar):\n Modulus of the counter. Values in trace\n should range from 0 to modulus-1.\n modulus_value (scalar):\n Multiplier for the modulus counter. The\n value of a modulus event.\n diff_thresh (scalar):\n Threshold for defining a modulus event.\n Should typically be a negative value\n smaller than 'modulus', but larger\n than the difference between consecutive\n trace values.\n plot_pref (bool):\n Whether or not to plot the trace.\n\n Returns:\n linearCounter (np.ndarray):\n 1-D array of linearized counter values.\n '''\n\n if diff_thresh is None:\n diff_thresh = -modulus/2\n\n diff_trace = np.diff(np.double(trace))\n mod_times = np.where(diff_trace<diff_thresh)[0]\n\n\n mod_times_bool = np.zeros(len(trace))\n mod_times_bool[mod_times+1] = modulus_value\n mod_times_steps = np.cumsum(mod_times_bool)\n trace_times = (trace/modulus)*modulus_value + mod_times_steps\n\n if plot_pref:\n plt.figure()\n plt.plot(trace)\n plt.plot(mod_times , trace[mod_times] , 'o')\n\n plt.figure()\n plt.plot(mod_times_steps)\n plt.plot(trace_times)\n \n return trace_times\n\n\n@njit\ndef binary_search(arr, lb, ub, val):\n '''\n Recursive binary search\n adapted from https://www.geeksforgeeks.org/python-program-for-binary-search/\n RH 2021\n \n Args:\n arr (sorted list):\n 1-D array of numbers that are already sorted.\n To use numba's jit features, arr MUST be a\n typed list. These include:\n - numpy.ndarray (ideally np.ascontiguousarray)\n - numba.typed.List\n lb (int):\n lower bound index.\n ub (int):\n upper bound index.\n val (scalar):\n value being searched for\n \n Returns:\n output (int):\n index of val in arr.\n returns -1 if value is not present\n \n Demo:\n # Test array\n arr = np.array([ 2, 3, 4, 10, 40 ])\n x = 100\n\n # Function call\n result = binary_search(arr, 0, len(arr)-1, x)\n\n if result != -1:\n print(\"Element is present at index\", str(result))\n else:\n print(\"Element is not present in array\")\n '''\n # Check base case\n if ub >= lb:\n \n mid = (ub + lb) // 2\n \n # If element is present at the middle itself\n if arr[mid] == val:\n return mid\n \n # If element is smaller than mid, then it can only\n # be present in left subarray\n elif arr[mid] > val:\n return binary_search(arr, lb, mid - 1, val)\n \n # Else the element can only be present in right subarray\n else:\n return binary_search(arr, mid + 1, ub, val)\n \n else:\n # Element is not present in the array\n return -1\n\n\ndef get_last_True_idx(input_array):\n '''\n for 1-d arrays only. gets idx of last entry\n that == True\n RH 2021\n '''\n nz = np.nonzero(input_array)[0]\n print(nz.size)\n if nz.size==0:\n output = len(input_array)-1\n else:\n output = np.max(nz)\n# print(output)\n return output\n\n\ndef make_batches(iterable, batch_size=None, num_batches=5, min_batch_size=0):\n \"\"\"\n Make batches of data or any other iterable.\n RH 2021\n\n Args:\n iterable (iterable):\n iterable to be batched\n batch_size (int):\n size of each batch\n if None, then batch_size based on num_batches\n num_batches (int):\n number of batches to make\n min_batch_size (int):\n minimum size of each batch\n \n Returns:\n output (iterable):\n batches of iterable\n \"\"\"\n l = len(iterable)\n \n if batch_size is None:\n batch_size = np.int64(np.ceil(l / num_batches))\n \n for start in range(0, l, batch_size):\n end = min(start + batch_size, l)\n if (end-start) < min_batch_size:\n break\n else:\n yield iterable[start:end]\n\n\n\n@njit\ndef find_nearest(array, value):\n '''\n Finds the value and index of the nearest\n value in an array.\n RH 2021\n \n Args:\n array (np.ndarray):\n Array of values to search through.\n value (scalar):\n Value to search for.\n\n Returns:\n array_idx (int):\n Index of the nearest value in array.\n array_val (scalar):\n Value of the nearest value in array.\n '''\n array = np.asarray(array)\n idx = (np.abs(array - value)).argmin()\n return array[idx] , idx\n@njit(parallel=True)\ndef find_nearest_array(array, values):\n '''\n Finds the values and indices of the nearest\n values in an array.\n RH 2021\n\n Args:\n array (np.ndarray):\n Array of values to search through.\n values (np.ndarray):\n Values to search for.\n\n Returns:\n array_idx (np.ndarray):\n Indices of the nearest values in array.\n array_val (np.ndarray):\n Values of the nearest values in array.\n '''\n vals_nearest = np.zeros_like(values)\n idx_nearest = np.zeros_like(values)\n for ii in prange(len(values)):\n vals_nearest[ii] , idx_nearest[ii] = find_nearest(array , values[ii])\n return vals_nearest, idx_nearest\n\n\ndef pad_with_singleton_dims(array, n_dims):\n arr_out = copy.copy(array)\n while arr_out.ndim < n_dims:\n arr_out = np.expand_dims(arr_out, -1)\n return arr_out\n\n\nclass lazy_repeat_item():\n \"\"\"\n Makes a lazy iterator that repeats an item.\n RH 2021\n \"\"\"\n def __init__(self, item, pseudo_length=None):\n \"\"\"\n Args:\n item (any object):\n item to repeat\n pseudo_length (int):\n length of the iterator.\n \"\"\"\n self.item = item\n self.pseudo_length = pseudo_length\n\n def __getitem__(self, i):\n \"\"\"\n Args:\n i (int):\n index of item to return.\n Ignored if pseudo_length is None.\n \"\"\"\n if self.pseudo_length is None:\n return self.item\n elif i < self.pseudo_length:\n return self.item\n else:\n raise IndexError('Index out of bounds')\n\n\n def __len__(self):\n return self.pseudo_length\n\n def __repr__(self):\n return repr(self.item)\n\n\ndef index_with_nans(values, indices):\n \"\"\"\n Creates an array of values with the same shape\n as indices, but with nans where indices are NaN.\n RH 2022\n\n Args:\n values (np.ndarray):\n values to index from\n indices (np.ndarray, dtype=float):\n indices to index into values\n\n Returns:\n output (np.ndarray):\n array of values indexed by indices\n \"\"\"\n values = np.concatenate((np.array([np.nan]), values))\n indices += 1\n indices[np.isnan(indices)] = 0\n \n return values[indices.astype(np.int64)]\n "
] | [
[
"numpy.expand_dims",
"numpy.abs",
"numpy.nonzero",
"numpy.asarray",
"numpy.isnan",
"numpy.double",
"numpy.cumsum",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.ceil",
"numpy.mean",
"numpy.zeros_like",
"numpy.array",
"numpy.zeros",
"numpy.where",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
faizanahemad/Hybrid-Weighted-Embedding-Recommender | [
"457c4f13521aefa70476947c5849e85482abc3d4"
] | [
"hwer/ncf.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom .gcn import *\n\n\nclass NCF(nn.Module):\n def __init__(self, feature_size, depth, gaussian_noise):\n super(NCF, self).__init__()\n noise = GaussianNoise(gaussian_noise)\n layers = [noise]\n for layer_idx in range(1, depth + 1):\n iw = 4 if layer_idx == 2 else 2\n ow = 1 if layer_idx == depth else (4 if layer_idx == 1 else 2)\n wx = nn.Linear(feature_size * iw, feature_size * ow)\n init_fc(wx, 'xavier_uniform_', 'leaky_relu', 0.01)\n layers.extend([wx, nn.LeakyReLU(negative_slope=0.01)])\n\n w_out = nn.Linear(feature_size, 1)\n init_fc(w_out, 'xavier_uniform_', 'sigmoid')\n layers.extend([w_out, nn.Sigmoid()])\n self.W = nn.Sequential(*layers)\n\n def forward(self, src, dst, g_src, g_dst):\n vec = torch.cat([g_src, g_dst], 1)\n ncf = self.W(vec).flatten()\n return ncf\n\n\nclass RecImplicit(nn.Module):\n def __init__(self, gcn: GraphConvModule, ncf: NCF):\n super(RecImplicit, self).__init__()\n self.gcn = gcn\n self.ncf = ncf\n\n def forward(self, nf, src, dst):\n h_output = self.gcn(nf)\n h_src = h_output[nf.map_from_parent_nid(-1, src, True)]\n h_dst = h_output[nf.map_from_parent_nid(-1, dst, True)]\n return self.ncf(src, dst, h_src, h_dst)\n\n\n"
] | [
[
"torch.nn.Sequential",
"torch.cat",
"torch.nn.Sigmoid",
"torch.nn.Linear",
"torch.nn.LeakyReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TheCleric/surgeo | [
"018ff69a817ea115d6d42dc0d7d5b7b09db331e4"
] | [
"surgeo/app/surgeo_gui.py"
] | [
"\"\"\"Script containing a basic GUI program.\"\"\"\n\nimport pathlib\nimport sys\nimport traceback\n\nimport tkinter as tk\nimport tkinter.ttk as ttk\nimport tkinter.filedialog as filedialog\nimport tkinter.messagebox as messagebox\n\nimport pandas as pd\n\nimport surgeo\n\nfrom surgeo.utility.surgeo_exception import SurgeoException\nfrom surgeo.models.geocode_model import GeocodeModel\nfrom surgeo.models.surgeo_model import SurgeoModel\nfrom surgeo.models.surname_model import SurnameModel\n\n\nclass SurgeoGUI(object):\n \"\"\"A GUI application class to function as an executable\n\n This script creates a single window tkinter application. This\n application allows a user to specify inputs/outputs, specify which\n model that they want to run, and then define the column headers for the\n ZCTA and surname fields in their data. It then runs the model and\n stores the results in a file.\n\n It also has various helper functions to integrate the surgeo logic\n within the program.\n\n It currently supports .xlsx, .xls, and .csv inputs; it currently\n supports .xlsx and .csv outputs.\n\n \"\"\"\n\n def __init__(self):\n # Create dictionary to track all objects and populate with root\n self._objects = {'root': tk.Tk()}\n # https://cx-freeze.readthedocs.io/en/latest/faq.html#using-data-files\n # If it's frozen, we can't use __file__\n if getattr(sys, 'frozen', False):\n # The application is frozen\n freeze_package = pathlib.Path(sys.executable).parents[0]\n self._package_root = freeze_package / 'Lib' / 'surgeo'\n else:\n # The application is not frozen\n self._package_root = pathlib.Path(__file__).parents[1]\n self._app_static = self._package_root / 'static'\n\n def main(self):\n \"\"\"This is the entry point for the GUI program.\n\n It calls a function to create a root window, then adds widgets to\n that root window, and then finally triggers the tkinter main loop.\n\n \"\"\"\n # Application setup\n self._window_setup()\n self._add_widgets()\n # Run mainloop\n self._objects['root'].mainloop()\n\n def _window_setup(self):\n \"\"\"This sets up the main window.\"\"\"\n # Create a frame object for layouts.\n self._objects['frame'] = tk.Frame(master=self._objects['root'])\n # Set title and window size\n self._objects['root'].title(f\"Surgeo v.{surgeo.VERSION}\")\n self._objects['root'].minsize(700, 150)\n # Bind enter to a function that starts the analysis\n self._objects['root'].bind('<Return>', self._execute)\n # Add icon\n self._objects['root'].tk.call(\n 'wm', \n 'iconphoto', \n self._objects['root']._w, \n tk.PhotoImage(\n file=str(self._app_static / 'logo.gif')\n )\n )\n\n def _select_input(self):\n \"\"\"File selection window for input path (button triggered)\"\"\"\n # Get filename from dialog\n input_filename = filedialog.askopenfilename(\n title='Select Input Path',\n filetypes=(\n ('CSV files' , '*.csv' ),\n ('Excel XLSX', '*.xlsx'),\n ('Excel XLS' , '*.xls' )\n )\n )\n # Populate variable (in turn, updates screen)\n self._objects['input_var'].set(input_filename)\n\n def _select_output(self):\n \"\"\"File selection window for output path (button triggered)\"\"\"\n # This has to be used twice (filetypes and defaultextention)\n files = (\n ('CSV files' , '*.csv' ),\n ('Excel XLSX', '*.xlsx'),\n )\n # Get filename from dialog\n output_filename = filedialog.asksaveasfilename(\n title='Select Output Path',\n filetypes=files,\n defaultextension=files,\n )\n # Populate variable (in turn updates screen)\n self._objects['output_var'].set(output_filename)\n\n def _add_widgets(self):\n \"\"\"This huge function sets up the Surgeo interface row by row.\n\n For a visual, see:\n https://github.com/theonaunheim/surgeo/static/surgeo_example.gif\n\n Generally speaking, this goes row by row to create each and every\n widget used. The widget is created, assigned to a spot in the\n window using its .grid() method, and then it is stored in the\n self._objects to allow for later reference.\n \"\"\"\n # Root alias to save typing\n root = self._objects['root']\n #######################################################################\n # Row 1\n #######################################################################\n # File selection label\n select_label = ttk.Label(root, text='Input File')\n select_label.grid(row=0, column=0, padx=10, sticky='w')\n self._objects['select_label'] = select_label\n # File Selection Path Label and variable to store its data\n input_var = tk.StringVar()\n self._objects['input_var'] = input_var\n select_path_text = ttk.Label(\n root,\n borderwidth=1,\n relief='solid',\n width=80,\n textvariable=input_var,\n )\n self._objects['select_path_text'] = select_path_text\n select_path_text.grid(row=0, column=1, padx=10, stick='w')\n # File selection button\n select_button = ttk.Button(\n root, \n text='Select',\n command=self._select_input,\n )\n select_button.grid(row=0, column=2, padx=10, sticky='w')\n self._objects['select_button'] = select_button\n #######################################################################\n # Row 2\n #######################################################################\n # Output file label\n output_label = ttk.Label(root, text='Output File')\n output_label.grid(row=1, column=0, padx=10, sticky='w')\n self._objects['output_label'] = output_label\n # File Output path and associated variable\n output_var = tk.StringVar()\n self._objects['output_var'] = output_var\n output_path_text = ttk.Label(\n root,\n borderwidth=1,\n relief='solid',\n width=80,\n textvariable=output_var,\n )\n self._objects['output_path_text'] = output_path_text\n output_path_text.grid(row=1, column=1, padx=10, sticky='w')\n # Output selection button\n output_button = ttk.Button(\n root, \n text='Select',\n command=self._select_output,\n )\n output_button.grid(row=1, column=2, padx=10, sticky='w')\n self._objects['output_button'] = output_button\n #######################################################################\n # Row 3\n #######################################################################\n # Surname label\n surname_label = ttk.Label(\n root, \n text='Surname Column Header', \n )\n surname_label.grid(row=2, column=0, padx=10, sticky='w')\n self._objects['surname_label'] = surname_label\n # Text entry box and associated variable\n name_var = tk.StringVar()\n self._objects['name_var'] = name_var\n surname_entry = ttk.Entry(\n root,\n text='Enter Name Column Header',\n textvariable=name_var,\n )\n surname_entry.grid(row=2, column=1, padx=10, sticky='w')\n self._objects['surname_entry'] = surname_entry\n #######################################################################\n # Row 4\n #######################################################################\n # ZCTA label\n zcta_label = ttk.Label(\n root, \n text='ZIP/ZCTA Column Header', \n )\n zcta_label.grid(row=3, column=0, padx=10, sticky='w')\n self._objects['zcta_label'] = zcta_label\n # String variable and text entry box\n zip_var = tk.StringVar()\n self._objects['zip_var'] = zip_var\n zcta_entry = ttk.Entry(\n root,\n text='Enter ZIP/ZCTA Column Header',\n textvariable=zip_var,\n )\n zcta_entry.grid(row=3, column=1, padx=10, pady=3, sticky='w')\n self._objects['zcta_entry'] = zcta_entry\n #######################################################################\n # Row 5\n #######################################################################\n # Model selector label \n model_label = ttk.Label(root, text='Model Type')\n model_label.grid(row=4, column=0, padx=10, sticky='w')\n self._objects['model_label'] = model_label\n # Dropdown to select which model to use. Also create variable\n model_var = tk.StringVar()\n self._objects['model_var'] = model_var\n model_selector = ttk.OptionMenu(\n root,\n model_var,\n 'Surgeo (Surname + Geocode)',\n 'Surname',\n 'Geocode',\n )\n model_selector.grid(row=4, column=1, padx=10, sticky='w')\n self._objects['model_selector'] = model_selector\n #######################################################################\n # Row 6\n #######################################################################\n # Proces inputs button (this runs self._execute)\n # Note: this is also bound to <Enter> in the window setup func.\n execute_button = ttk.Button(\n root, \n text='Execute',\n command=self._execute,\n )\n execute_button.grid(row=5, column=2, padx=10, sticky='w')\n self._objects['execute_button'] = execute_button\n\n def _check_inputs(self, df):\n \"\"\"Take DF and raise error if improper column names given\"\"\"\n # Create shortnames for variables\n name_var = self._objects['name_var'].get()\n zip_var = self._objects['zip_var'].get()\n model_var = self._objects['model_var'].get()\n # If it's geocode, make sure column is there. Otherwise error.\n if model_var == 'Geocode':\n if zip_var not in df.columns:\n # Otherwise raise error\n raise SurgeoException(f'{name_var} not in input data.')\n # If Surname, make sure the user-specified surname column is there.\n elif model_var == 'Surname':\n # Otherwise raise error\n if name_var not in df.columns:\n raise SurgeoException(f'{name_var} not in input data.')\n # If Surgeo, make sure both user-specified columns are present.\n else:\n if zip_var not in df.columns:\n raise SurgeoException(f'{name_var} not in input data.')\n if name_var not in df.columns:\n raise SurgeoException(f'{name_var} not in input data.')\n\n def _load_df(self, input_path):\n \"\"\"This creates a dataframe based on self._input_path\"\"\"\n path = pathlib.Path(input_path)\n suffix = path.suffix\n # If it's excel, read_excel()\n if suffix == '.xlsx' or suffix == 'xls':\n df = pd.read_excel(path)\n # If CSV, read read_csv()\n elif suffix == '.csv':\n df = pd.read_csv(path, skip_blank_lines=False)\n # If path is unrecognized, throw error\n else:\n raise SurgeoException(\n f'File ending for \"{path}\" not '\n 'recognized. Please use .csv or .xlsx.'\n )\n return df\n\n def _execute(self, event=None, show_msgbox=True):\n \"\"\"This takes all the user inputs and runs the analysis.\n \n It can be triggered by the enter key (in which case it supplied an\n event), or it can be triggered by clicking the \"Execute\" button.\n The outcome in either event is identical.\n\n \"\"\"\n\n # Get tkinter variables and stuff strings in short names.\n # Input path from file selection\n input_var = self._objects['input_var'].get()\n # Output path from file selection\n output_var = self._objects['output_var'].get()\n # Surname column header from text field\n name_var = self._objects['name_var'].get()\n # ZCTA column header from text field\n zip_var = self._objects['zip_var'].get()\n # Model being run from drop down window\n model_var = self._objects['model_var'].get()\n # Output path suffix (to determine if .csv or .xlsx)\n suffix = pathlib.Path(output_var).suffix\n # This large try block captures any errors for error window\n try:\n # Load the dataframe\n input_df = self._load_df(input_var)\n # Ensure the inputs are OK\n self._check_inputs(input_df)\n # If geo, run the geo model assign result to df\n if model_var == 'Geocode':\n geo = GeocodeModel()\n output_df = geo.get_probabilities(input_df[zip_var])\n # If sur, run the sur model and assign result to df\n if model_var == 'Surname':\n sur = SurnameModel()\n output_df = sur.get_probabilities(input_df[name_var])\n # If surgeo, run the surgeo model and assign to df\n if model_var == 'Surgeo (Surname + Geocode)':\n surgeo = SurgeoModel()\n # Note that surgeo takes two input columns unlike others\n output_df = surgeo.get_probabilities(\n input_df[name_var], \n input_df[zip_var]\n )\n # If output is .xlsx, write to Excel\n if suffix == '.xlsx':\n output_df.to_excel(output_var, index=False)\n # Otherwise write to CSV\n else:\n output_df.to_csv(output_var, index=False)\n # Show message on success\n if show_msgbox:\n messagebox.showinfo(\n 'Success',\n f'{len(output_df)} items successfully written.'\n )\n except Exception:\n # Show error box on fail\n err = traceback.format_exc()\n if show_msgbox:\n messagebox.showerror('Error', err)\n\n\nif __name__ == '__main__':\n # Error handling within application.\n gui = SurgeoGUI()\n gui.main()\n"
] | [
[
"pandas.read_excel",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
vitaldb/pyvital | [
"b7b045d92e8db3a9d7f45ea6593e57c73ae8ec11"
] | [
"pyvital/filters/pleth_ptt.py"
] | [
"from .. import arr\nimport numpy as np\n\ncfg = {\n 'name': 'Pleth - Pulse Transit Time',\n 'group': 'Medical algorithms',\n 'desc': 'Calculate pulse transit time.',\n 'reference': '',\n 'overlap': 5,\n 'interval': 30,\n 'inputs': [{'name': 'ecg', 'type': 'wav'}, {'name': 'pleth', 'type': 'wav'}],\n 'outputs': [\n {'name': 'PTT_min', 'type': 'num', 'unit': 'ms', 'min': 100, 'max': 500},\n {'name': 'PTT_dmax', 'type': 'num', 'unit': 'ms', 'min': 100, 'max': 500},\n {'name': 'PTT_max', 'type': 'num', 'unit': 'ms', 'min': 100, 'max': 500},\n {'name': 'R_peak', 'type': 'num', 'min': 0, 'max': 2}\n ]\n }\n\n\ndef run(inp, opt, cfg):\n ecg_data = arr.interp_undefined(inp['ecg']['vals'])\n ecg_srate = inp['ecg']['srate']\n\n pleth_data = arr.interp_undefined(inp['pleth']['vals'])\n pleth_srate = inp['pleth']['srate']\n pleth_data = arr.band_pass(pleth_data, pleth_srate, 0.5, 15)\n\n ecg_rlist = arr.detect_qrs(ecg_data, ecg_srate)\n pleth_minlist, pleth_maxlist = arr.detect_peaks(pleth_data, pleth_srate)\n\n dpleth = np.diff(pleth_data)\n pleth_dmaxlist = [] # index of the maximum slope between peak and nadir in pleth\n for i in range(len(pleth_minlist)): # maxlist is one less than minlist\n dmax_idx = arr.max_idx(dpleth, pleth_minlist[i], pleth_maxlist[i+1])\n pleth_dmaxlist.append(dmax_idx)\n\n pttmax_list = []\n pttmin_list = []\n pttdmax_list = []\n for i in range(len(ecg_rlist) - 1):\n if len(pleth_minlist) == 0:\n continue\n if len(pleth_maxlist) == 0:\n continue\n\n rpeak_dt = ecg_rlist[i] / ecg_srate\n rpeak_dt_next = ecg_rlist[i+1] / ecg_srate\n if rpeak_dt < cfg['overlap']:\n continue\n\n # find first min in pleth after rpeak_dt in ecg\n found_minidx = 0\n for minidx in pleth_minlist:\n if minidx > rpeak_dt * pleth_srate:\n found_minidx = minidx\n break\n elif minidx > rpeak_dt_next * pleth_srate:\n break\n if found_minidx == 0:\n continue\n\n # find first dmax in pleth after rpeak_dt in ecg\n found_dmaxidx = 0\n for dmaxidx in pleth_dmaxlist:\n if dmaxidx > rpeak_dt * pleth_srate:\n found_dmaxidx = dmaxidx\n break\n elif dmaxidx > rpeak_dt_next * pleth_srate:\n break\n if found_dmaxidx == 0:\n continue\n\n # find first dmax in pleth after rpeak_dt in ecg\n found_maxidx = 0\n for maxidx in pleth_maxlist:\n if maxidx > rpeak_dt * pleth_srate:\n found_maxidx = maxidx\n break\n elif maxidx > rpeak_dt_next * pleth_srate:\n break\n if found_maxidx == 0:\n continue\n\n max_dt = found_maxidx / pleth_srate\n if max_dt > cfg['interval']:\n continue\n min_dt = found_minidx / pleth_srate\n dmax_dt = found_dmaxidx / pleth_srate\n\n pttmax_list.append({'dt': max_dt, 'val': (max_dt - rpeak_dt) * 1000})\n pttdmax_list.append({'dt': dmax_dt, 'val': (dmax_dt - rpeak_dt) * 1000})\n pttmin_list.append({'dt': min_dt, 'val': (min_dt - rpeak_dt) * 1000})\n\n return [\n pttmin_list, \n pttdmax_list, \n arr.get_samples(ecg_data, ecg_srate, ecg_rlist), \n pttmax_list]\n"
] | [
[
"numpy.diff"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
terrencetec/sigflow | [
"e46e3b1e732b6b95250bc21e49ce3a5d180593a2"
] | [
"sigflow/system/system.py"
] | [
"import numpy as np\n\nfrom sigflow.blocks import Block\nfrom sigflow.core.utils import to_array\n\nclass System(Block):\n \"\"\"A generic system class that connect blocks.\n\n Attributes\n ----------\n blocks : list of Block objects\n System's blocks which connect to each other.\n \"\"\"\n def __init__(self, blocks=None, nin=0, nout=0):\n \"\"\"Constructor.\n\n Parameters\n ----------\n blocks : Block or iterable of Block objects.\n The block to be included in the system.\n \"\"\"\n if blocks is None:\n blocks = []\n\n ## node table\n blocks = to_array(blocks, Block)\n ids = range(len(blocks))\n self.blocks = dict(enumerate(blocks))\n self._ids = dict(zip(blocks, ids))\n\n ## adjacency list\n out_ports = [[{}]*block.noutput for block in blocks]\n self._succ = dict(zip(ids, out_ports))\n\n self._set = False # indicate if starting block is set.\n in_ports = [[{}]*block.ninput for block in blocks]\n self._pred = dict(zip(ids, in_ports))\n pending = [[0.]*block.ninput for block in blocks]\n self._pending = dict(zip(ids, pending))\n self.set_ninout(nin, nout)\n\n def set_ninout(self, ninput, noutput=0):\n \"\"\"Set the input and output blocks of the system.\n System then behaves like a block, with definite input and output.\n\n Parameters\n ----------\n ninput : int\n number of input port of the system.\n output_blocks : int, optional\n number of output port of the system.\n Defaults to 0.\n \"\"\"\n self.ninput = ninput\n self.noutput = noutput\n if ninput > 0:\n self._succ = {**self._succ, **{\"input\": [{}]*ninput}}\n if noutput > 0:\n self._pred = {**self._pred, **{\"output\": [{}]*noutput}}\n self._pending = {**self._pending, **{\"output\": [None]*noutput}}\n self._set = True\n\n def _i2o(self):\n \"\"\"Method to convert the input signal to an output signal.\n\n Returns\n -------\n list\n \"\"\"\n if not self._set:\n raise ValueError(\"self.input_blocks is not set.\"\n \"Set it by using self.set_blocks method.\")\n ## for short hand\n inputs = self.inputs\n pending = self._pending\n visited = {}.fromkeys(self.blocks.keys(), False)\n ## block waiting to process in breadth first search method,\n ## may have duplicates\n queue = []\n ## reset to block input=list of zero if block mutated\n for ids, data in pending.items():\n if ids != \"input\" and ids != \"output\":\n length = self.blocks[ids].ninput\n if len(data) != length:\n pending[ids] = [0.]*length\n ## setting input to the system to blocks' inputs\n for from_port in range(self.ninput):\n for target_id, to_port in self._succ[\"input\"][from_port].items():\n queue.append(target_id) # add blocks to be run\n pending[target_id][to_port] = self.inputs[from_port]\n\n\n while len(queue):\n current_id = queue.pop(0)\n if visited[current_id]:\n continue\n\n visited[current_id] = True\n current_block = self.blocks[current_id]\n\n ## setting predessors output as successor's input\n if current_block.ninput > 1:\n ## setting each element of the input as the same size\n tmp = np.broadcast(*pending[current_id])\n pending[current_id] = np.column_stack(tuple(tmp))\n current_block.inputs = pending[current_id]\n else:\n current_block.inputs = pending[current_id][0]\n ## process input to output\n tmp_output = current_block.output\n\n ports = self._succ[current_id] # nested dict of target blocks\n for from_port in range(len(ports)):\n ## caching data\n for target_id, to_port in ports[from_port].items():\n if target_id != \"output\":\n queue.append(target_id) # add blocks to be run\n pending[target_id][to_port] = tmp_output[from_port]\n if self.noutput > 0:\n res = pending[\"output\"].copy()\n else:\n res = None\n return res\n\n def add_blocks(self, blocks):\n \"\"\"Add blocks to the system\n\n Parameters\n ----------\n blocks : Block or iterable of Block objects.\n The block to add in the system.\n \"\"\"\n if len(self.blocks) == 0:\n id_start = 0\n else:\n last_id = max(self.blocks)\n id_start = last_id + 1\n blocks = to_array(blocks, types=Block)\n new_ids = range(id_start, id_start+len(blocks))\n out_ports = [[{}]*block.noutput for block in blocks]\n self.blocks = {**self.blocks,\n **dict(zip(new_ids, blocks))}\n self._ids = {**self._ids,\n **dict(zip(blocks, new_ids))}\n self._succ = {**self._succ,\n **dict(zip(new_ids, out_ports))}\n in_ports = [[{}]*block.ninput for block in blocks]\n self._pred = {**self._pred,\n **dict(zip(new_ids, in_ports))}\n pending = [[0.]*block.ninput for block in blocks]\n self._pending = {**self._pending,\n **dict(zip(new_ids, pending))}\n\n def add_edge(self, edge_from, edge_to, from_port=0, to_port=0):\n \"\"\"Add a directed connection from block out_edge to in_edge.\n\n Parameters\n ----------\n edge_from : Block object, block_id, or 'input'\n The block to connect from.\n 'input' indicates system's input.\n edge_to : Block object, block_id, or 'output'\n The block to connect to.\n 'output' indicates system's output\n from_port : int, optional\n The output port to connect from.\n out_port must be smaller than noutput of the block.\n Defaults to 0.\n to_port : int, optional\n The input port to connect from.\n in_port must be smaller than ninput of the block.\n Defaults to 0.\n \"\"\"\n self._check_block_exists(edge_from)\n self._check_block_exists(edge_to)\n if isinstance(edge_from, Block):\n from_id = self._ids[edge_from]\n else:\n from_id = edge_from\n if isinstance(edge_to, Block):\n to_id = self._ids[edge_to]\n else:\n to_id = edge_to\n\n ## check valid port\n if from_id == \"input\":\n nport = self.ninput\n else:\n nport = self.blocks[from_id].ninput\n if from_port >= nport:\n raise ValueError(\"invalid from port {} for id:{}\"\n \"\".format(from_port, from_id))\n if to_id == \"output\":\n nport = self.noutput\n else:\n nport = self.blocks[to_id].ninput\n if to_port >= nport:\n raise ValueError(\"invalid to port {} for id:{}\"\n \"\".format(from_port, to_id))\n\n ## check if to_port is already connected to others\n if len(self._pred[to_id][to_port]) > 0:\n raise ValueError(\"node {} port {} already connected to another\"\n \" node {} port {}, please remove the connection\"\n \" before connecting to it.\"\n \"\".format(to_id, to_port, from_id, from_port))\n\n ## add edge\n target_dict = self._succ[from_id][from_port]\n self._succ[from_id][from_port] = {**target_dict, **{to_id: to_port}}\n\n source_dict = self._pred[to_id][to_port]\n self._pred[to_id][to_port] = {**source_dict, **{from_id: to_port}}\n\n def remove_edge(self, edge_from, edge_to, from_port=0, to_port=0):\n \"\"\"Remove the given edge from the system.\n\n Parameters\n ----------\n edge_from : Block or block_id\n The block to connect from.\n edge_to : Block or block_id\n The block to connect to.\n from_port : int, optional\n The output port to connect from.\n out_port must be smaller tha noutput of the block.\n Defaults to 0.\n to_port : int, optional\n The input port to connect from.\n in_port must be smaller tha ninput of the block.\n Defaults to 0.\n \"\"\"\n if isinstance(edge_from, Block):\n from_id = self._ids[edge_from]\n else:\n from_id = edge_from\n if isinstance(edge_to, Block):\n to_id = self._ids[edge_to]\n else:\n to_id = edge_to\n del self._succ[from_id][from_port][to_id]\n del self._pred[to_id][to_port][from_id]\n\n def clear_edges(self):\n \"\"\"Clear all the connections in the system.\"\"\"\n succ = [(i, [{}]*block.noutput) for i, block in self.blocks.items()]\n self._succ = dict(succ)\n pred = [(i, [{}]*block.noutput) for i, block in self.blocks.items()]\n self._pred = dict(pred)\n\n def remove_blocks(self, blocks):\n \"\"\"Remove blocks from the system.\n\n Parameters\n ----------\n blocks : Block or list of Block\n Blocks to remove from the system.\n \"\"\"\n blocks = to_array(blocks, types=Block)\n for delete in blocks:\n self._check_block_exists(delete)\n del_id = self._ids[delete]\n self._remove_block(del_id)\n\n def remove_by_id(self, block_id):\n \"\"\"Remove blocks from the system.\n\n Parameters\n ----------\n block_id : int or list of int\n ID of the blocks to remove from the system.\n \"\"\"\n blocks_id = to_array(block_id, np.integer)\n for del_id in blocks_id:\n self._check_block_exists(del_id)\n self._remove_block(del_id)\n\n def _remove_block(self, del_id):\n delete = self.blocks.pop(del_id)\n del self._ids[delete]\n del self._pending[del_id]\n\n for dictionary in [self._succ, self._pred]:\n del dictionary[del_id]\n for key in dictionary:\n for port in range(len(dictionary[key])):\n if del_id in dictionary[key][port]:\n del dictionary[key][port][del_id]\n\n def _check_block_exists(self, block):\n \"\"\"An internal method to check if block is in the system.\n\n Parameters\n ----------\n block : Block objects, or block_id\n \"\"\"\n if isinstance(block, Block):\n if block not in self._ids:\n raise LookupError(\"{} doesn't exist in the system\"\n \"\".format(block))\n elif isinstance(block, (int, np.integer)):\n if block not in self.blocks:\n raise LookupError(\"ID {:d} doesn't exist in the system\"\n \"\".format(block))\n elif block != \"input\" and block != \"output\":\n print(block)\n raise TypeError(\"wrong type to call block\")\n\n def connections(self):\n \"\"\"Connections of blocks in the system.\"\"\"\n seq = [\"Connections:\"]\n seq.append(\"from_id\\tfrom_port\\tto_id\\tto_port\")\n seq.append(str(self._succ))\n return \"\\n\".join(seq)\n\n def __str__(self):\n \"\"\"Description of the system in string.\"\"\"\n seq = [\"{:<6s} {:<14s} {:s}\".format(\"ID\", \"type\", \"label\")]\n for i, block in self.blocks.items():\n tmp = \"{:<6d} {:<14s} {:s}\".format(i,\n block.__class__.__name__,\n str(block.label))\n seq.append(tmp)\n seq = \"\\n\".join(seq)\n seq += \"\\n\" + self.connections()\n return seq\n\n @property\n def blocks(self):\n \"\"\"blocks in the system and their corresponding id.\"\"\"\n if self._blocks is None:\n raise ValueError(\"self.blocks is not set, please add blocks\"\n \" before using the system\")\n return self._blocks\n\n @blocks.setter\n def blocks(self, blocks):\n \"\"\"block setter.\"\"\"\n if isinstance(blocks, dict):\n for block in blocks.values():\n if not isinstance(block, Block):\n raise TypeError(\"System.blocks dict's value must be Block\\\n objects, not %s\"%type(block).__name__)\n self._blocks = blocks\n else:\n raise TypeError(\"blocks must be of type dict, not %s\"\\\n % type(blocks).__name__)\n @property\n def inputs(self):\n return self._inputs\n\n @inputs.setter\n def inputs(self, values):\n \"\"\"inputs setter\"\"\"\n values = to_array(values, (np.integer, np.floating, np.ndarray))\n # print(\"inputs :\", values)\n if len(values) != self.ninput:\n raise ValueError(\"expected input size of {} in axis 0, \"\n \"got {} instead\".format(self.ninput, len(values)))\n self._inputs = values\n\n"
] | [
[
"numpy.broadcast"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
esteng/miso_uds | [
"66cc382e0dd11247c445e1ccbdb057a3a9b3dcc2"
] | [
"miso/losses/mixing.py"
] | [
"from overrides import overrides\nimport logging \n\nimport torch\nimport torch.nn.functional as F\nfrom allennlp.common.registrable import Registrable\n\nlogger = logging.getLogger(__name__) # pylint: disable=invalid-name\n\nclass LossMixer(torch.nn.Module, Registrable):\n def __init__(self):\n super().__init__()\n # convention: semantics loss, syntax loss\n self.loss_weights = [1,1]\n\n def forward(self, sem_loss, syn_loss): \n # convention: semantics loss, syntax loss\n return self.loss_weights[0] * sem_loss +\\\n self.loss_weights[1] * syn_loss\n\n def update_weights(self, curr_epoch, total_epochs):\n raise NotImplementedError\n\[email protected](\"alternating\") \nclass AlternatingLossMixer(LossMixer):\n \"\"\"\n Alternate between all syntax or all semantics loss \n \"\"\"\n def __init__(self):\n super().__init__() \n self.syn_loss_weights = [0,1]\n self.sem_loss_weights = [1,0]\n self.loss_weights = self.syn_loss_weights\n\n def update_weights(self, curr_epoch, total_epochs): \n print(f\"updating loss weights with curr_epoch {curr_epoch} and total_epochs {total_epochs}\" )\n if curr_epoch % 2 == 0:\n self.loss_weights = self.syn_loss_weights\n else:\n self.loss_weights = self.sem_loss_weights\n\[email protected](\"fixed\") \nclass FixedLossMixer(LossMixer):\n \"\"\"\n fixed 50-50 loss \n \"\"\"\n def __init__(self):\n super().__init__() \n self.loss_weights = [0.5, 0.5]\n\n def update_weights(self, curr_epoch, total_epochs): \n pass \n\[email protected](\"syntax->semantics\") \nclass SyntaxSemanticsLossMixer(LossMixer):\n \"\"\"\n Start with all syntax loss, move to all semantics loss \n \"\"\"\n def __init__(self):\n super().__init__() \n self.loss_weights = [0,1]\n\n def update_weights(self, curr_epoch, total_epochs): \n # take steps towards all semantics loss s.t. by the end of training, \n # semantics loss weight is 1\n step_size = 1/total_epochs\n syn_weight = 1 - step_size * curr_epoch\n self.loss_weights[1] = syn_weight\n self.loss_weights[0] = 1 - syn_weight\n\[email protected](\"semantics->syntax\") \nclass SemanticsSyntaxLossMixer(LossMixer):\n \"\"\"\n Start with all semantics loss, move to all syntax loss \n \"\"\"\n def __init__(self):\n super().__init__() \n self.loss_weights = [1,0]\n\n def update_weights(self, curr_epoch, total_epochs): \n # take steps towards all syntax loss s.t. by the end of training, \n # syntax loss weight is 1\n step_size = 1/total_epochs\n sem_weight = 1 - step_size * curr_epoch\n self.loss_weights[0] = sem_weight\n self.loss_weights[1] = 1 - sem_weight\n\[email protected](\"semantics-only\") \nclass SemanticsOnlyLossMixer(LossMixer):\n \"\"\"\n Start with all semantics loss\n \"\"\"\n def __init__(self):\n super().__init__() \n self.loss_weights = [1,0]\n\n def update_weights(self, curr_epoch, total_epochs): \n pass\n\[email protected](\"syntax-only\") \nclass SyntaxOnlyLossMixer(LossMixer):\n \"\"\"\n Start with all syntax loss\n \"\"\"\n def __init__(self):\n super().__init__() \n self.loss_weights = [0,1]\n\n def update_weights(self, curr_epoch, total_epochs): \n pass\n\[email protected](\"static-semantics-heavy\") \nclass SemanticsHeavyLossMixer(LossMixer):\n \"\"\"\n Downweight syntactic loss so that it's roughly the same magnitude as semantic loss \n based on observed ratio of losses \n \"\"\"\n def __init__(self):\n super().__init__() \n self.loss_weights = [1, 0.003]\n\n def update_weights(self, curr_epoch, total_epochs): \n pass\n\[email protected](\"static-syntax-heavy\") \nclass SemanticsHeavyLossMixer(LossMixer):\n \"\"\"\n upweight syntactic loss \n \"\"\"\n def __init__(self, weight=5):\n super().__init__() \n self.loss_weights = [1, weight]\n\n def update_weights(self, curr_epoch, total_epochs): \n pass\n\[email protected](\"learned\") \nclass LearnedLossMixer(LossMixer):\n \"\"\"\n Downweight syntactic loss so that it's roughly the same magnitude as semantic loss \n based on observed ratio of losses \n \"\"\"\n def __init__(self):\n super().__init__() \n # placeholder \n self.loss_weights = [0.5, 0.5]\n # start at 50-50\n self.semantics_raw_weight = torch.nn.Parameter(torch.zeros((1), dtype=torch.float))\n\n @overrides\n def forward(self, sem_loss, syn_loss): \n sem_weight = F.sigmoid(self.semantics_raw_weight)\n syn_weight = 1 - sem_weight\n\n # convention: semantics loss, syntax loss\n return sem_weight * sem_loss +\\\n syn_weight * syn_loss\n\n def update_weights(self, curr_epoch, total_epochs): \n # log the weights \n sem_weight = F.sigmoid(self.semantics_raw_weight)\n syn_weight = 1 - sem_weight\n logger.info(f\"learned weights are: semantics: {sem_weight}, syntax: {syn_weight}\") \n\n\n"
] | [
[
"torch.nn.functional.sigmoid",
"torch.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
IsakFalk/IKML | [
"9b9c7ee2eccd8021295c47190f55f0c0616ea826"
] | [
"scripts/gas_sensor/linear.py"
] | [
"import argparse\nimport pickle as pkl\nimport warnings\nfrom collections import OrderedDict\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom implicit_kernel_meta_learning.data_utils import GasSensorDataLoader\nfrom implicit_kernel_meta_learning.experiment_utils import set_seed\nfrom implicit_kernel_meta_learning.kernels import LinearKernel\n\nwarnings.filterwarnings(\"ignore\")\n\n\ndef visualise_run(result):\n t_val = result[\"meta_val_every\"] * np.arange(len(result[\"meta_valid_error\"]))\n t = np.arange(len(result[\"meta_train_error\"]))\n fig, ax = plt.subplots()\n ax.plot(t, result[\"meta_train_error\"], label=\"Meta train MSE\")\n ax.plot(t_val, result[\"meta_valid_error\"], label=\"Meta val MSE\")\n ax.legend()\n ax.set_title(\n \"meta-(val, test) holdout MSE: ({:.4f}, {:.4f})\".format(\n result[\"holdout_meta_valid_error\"][0], result[\"holdout_meta_test_error\"][0]\n )\n )\n return fig, ax\n\n\nclass FeatureMapRidgeRegression(nn.Module):\n \"\"\"Like RidgeRegression but with an additional feature map phi: X \\to Phi\n\n feature_map is a torch module which is learned together with the rest of the parameters\"\"\"\n\n def __init__(self, log_lam, kernel, feature_map, device=None):\n super(FeatureMapRidgeRegression, self).__init__()\n self.log_lam = nn.Parameter(torch.tensor(log_lam))\n self.kernel = kernel\n self.feature_map = feature_map\n self.alphas = None\n self.Phi_tr = None\n if device is None:\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n else:\n self.device = device\n\n def fit(self, X, Y):\n n = X.size()[0]\n\n Phi = self.feature_map(X)\n\n self.K = self.kernel(Phi, Phi)\n K_nl = self.K + torch.exp(self.log_lam) * n * torch.eye(n).to(self.device)\n # To use solve we need to make sure Y is a float\n # and not an int\n self.alphas, _ = torch.solve(Y.float(), K_nl)\n self.Phi_tr = Phi\n\n def predict(self, X):\n return torch.matmul(self.kernel(self.feature_map(X), self.Phi_tr), self.alphas)\n\n\ndef fast_adapt_ker(batch, model, loss, device):\n # Unpack data\n X_tr, y_tr = batch[\"train\"]\n X_tr = X_tr.to(device).float()\n y_tr = y_tr.to(device).float()\n X_val, y_val = batch[\"valid\"]\n X_val = X_val.to(device).float()\n y_val = y_val.to(device).float()\n\n # adapt algorithm\n model.fit(X_tr, y_tr)\n\n # Predict\n y_hat = model.predict(X_val)\n return loss(y_val, y_hat)\n\n\ndef get_nonlinearity(nonlinearity):\n nonlinearity = nonlinearity.lower()\n if nonlinearity == \"relu\":\n return nn.ReLU\n elif nonlinearity == \"sigmoid\":\n return nn.Sigmoid\n elif nonlinearity == \"tanh\":\n return nn.Tanh\n\n\ndef mlp_layer(in_dim, out_dim, nonlinearity):\n layer = nn.Sequential(nn.Linear(in_dim, out_dim), nonlinearity())\n return layer\n\n\ndef create_mlp(num_layers, hidden_dim, in_dim, out_dim, nonlinearity):\n if num_layers == 0:\n mlp = nn.Linear(in_dim, out_dim)\n else:\n mlp = nn.Sequential(\n nn.Linear(in_dim, hidden_dim),\n nonlinearity(),\n *(\n mlp_layer(hidden_dim, hidden_dim, nonlinearity)\n for _ in range(num_layers)\n ),\n nn.Linear(hidden_dim, out_dim)\n )\n return mlp\n\n\ndef main(\n seed,\n k_support,\n k_query,\n num_iterations,\n meta_batch_size,\n meta_val_batch_size,\n meta_val_every,\n holdout_size,\n num_layers,\n hidden_dim,\n nonlinearity,\n lam,\n meta_lr,\n):\n nonlinearity = get_nonlinearity(nonlinearity)\n result = OrderedDict(\n meta_train_error=[],\n meta_valid_error=[],\n holdout_meta_test_error=[],\n holdout_meta_valid_error=[],\n meta_val_every=meta_val_every,\n num_iterations=num_iterations,\n name=\"R2D2\",\n )\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n set_seed(seed, False)\n\n # Load train/validation/test data\n traindata = GasSensorDataLoader(k_support, k_query, split=\"train\", t=True)\n valdata = GasSensorDataLoader(k_support, k_query, split=\"valid\", t=True)\n testdata = GasSensorDataLoader(k_support, k_query, split=\"test\", t=True)\n\n # Holdout errors\n valid_batches = [valdata.sample() for _ in range(holdout_size)]\n test_batches = [testdata.sample() for _ in range(holdout_size)]\n\n # Define model\n in_dim = 14\n out_dim = 1\n kernel = LinearKernel()\n model = FeatureMapRidgeRegression(\n np.log(lam),\n kernel,\n create_mlp(num_layers, hidden_dim, in_dim, out_dim, nonlinearity),\n ).to(device)\n opt = optim.Adam(model.parameters(), meta_lr)\n\n loss = nn.MSELoss(\"mean\")\n\n # Keep best model around\n best_val_iteration = 0\n best_val_mse = np.inf\n\n for iteration in range(num_iterations):\n validate = True if iteration % meta_val_every == 0 else False\n\n train_batches = [traindata.sample() for _ in range(meta_batch_size)]\n opt.zero_grad()\n meta_train_error = 0.0\n meta_valid_error = 0.0\n for train_batch in train_batches:\n evaluation_error = fast_adapt_ker(\n batch=train_batch,\n model=model,\n loss=loss,\n device=device,\n )\n evaluation_error.backward()\n meta_train_error += evaluation_error.item()\n if validate:\n val_batches = [valdata.sample() for _ in range(meta_val_batch_size)]\n for val_batch in val_batches:\n evaluation_error = fast_adapt_ker(\n batch=val_batch,\n model=model,\n loss=loss,\n device=device,\n )\n meta_valid_error += evaluation_error.item()\n meta_valid_error /= meta_val_batch_size\n result[\"meta_valid_error\"].append(meta_valid_error)\n print(\"Iteration {}\".format(iteration))\n print(\"meta_valid_error: {}\".format(meta_valid_error))\n if meta_valid_error < best_val_mse:\n best_val_iteration = iteration\n best_val_mse = meta_valid_error\n best_state_dict = model.state_dict()\n\n meta_train_error /= meta_batch_size\n result[\"meta_train_error\"].append(meta_train_error)\n # Average the accumulated gradients and optimize\n for p in model.parameters():\n p.grad.data.mul_(1.0 / meta_batch_size)\n opt.step()\n\n # Load best model\n print(\"best_valid_iteration: {}\".format(best_val_iteration))\n print(\"best_valid_mse: {}\".format(best_val_mse))\n model.load_state_dict(best_state_dict)\n\n meta_valid_error = 0.0\n meta_test_error = 0.0\n for (valid_batch, test_batch) in zip(valid_batches, test_batches):\n evaluation_error = fast_adapt_ker(\n batch=valid_batch,\n model=model,\n loss=loss,\n device=device,\n )\n meta_valid_error += evaluation_error.item()\n evaluation_error = fast_adapt_ker(\n batch=test_batch,\n model=model,\n loss=loss,\n device=device,\n )\n meta_test_error += evaluation_error.item()\n\n meta_valid_error /= holdout_size\n meta_test_error /= holdout_size\n print(\"holdout_meta_valid_error: {}\".format(meta_valid_error))\n print(\"holdout_meta_test_error: {}\".format(meta_test_error))\n result[\"holdout_meta_valid_error\"].append(meta_valid_error)\n result[\"holdout_meta_test_error\"].append(meta_test_error)\n\n with open(\"result.pkl\", \"wb\") as f:\n pkl.dump(result, f)\n\n # Visualise\n fig, ax = visualise_run(result)\n plt.tight_layout()\n fig.savefig(\"learning_curves.pdf\", bbox_inches=\"tight\")\n fig.savefig(\"learning_curves.png\", bbox_inches=\"tight\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--seed\", type=int, default=42)\n parser.add_argument(\"--k_support\", type=int, default=10)\n parser.add_argument(\"--k_query\", type=int, default=10)\n parser.add_argument(\"--num_iterations\", type=int, default=10000)\n parser.add_argument(\"--meta_batch_size\", type=int, default=4)\n parser.add_argument(\"--meta_val_batch_size\", type=int, default=100)\n parser.add_argument(\"--meta_val_every\", type=int, default=100)\n parser.add_argument(\"--holdout_size\", type=int, default=3000)\n parser.add_argument(\"--num_layers\", type=int, default=1)\n parser.add_argument(\"--hidden_dim\", type=int, default=64)\n parser.add_argument(\"--nonlinearity\", type=str, default=\"relu\")\n parser.add_argument(\"--lam\", type=float, default=0.001)\n parser.add_argument(\"--meta_lr\", type=float, default=0.001)\n args = parser.parse_args()\n main(**vars(args))\n"
] | [
[
"numpy.log",
"matplotlib.pyplot.tight_layout",
"torch.eye",
"matplotlib.pyplot.subplots",
"torch.tensor",
"torch.exp",
"torch.nn.Linear",
"torch.cuda.is_available",
"torch.nn.MSELoss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
numberonewastefellow/projects | [
"b33f826930ef3e98165c267f34c7ea550853f2df"
] | [
"small_ds/counts_merger_pickle_files.py"
] | [
"import argparse\nfrom concurrent.futures.process import ProcessPoolExecutor\nimport shutil\n\nfrom pickle_reading_writing import PickleReadWrite\nimport os\nimport pandas as pd\n\n\ndef read_pickles_from_dir(input_folder):\n files = os.listdir(input_folder)\n pkl = PickleReadWrite()\n output = pkl.read_from_dir(files)\n return output\n\n\ndef merge_dict_to_data_frame(lst_of_dict):\n return pd.DataFrame(lst_of_dict)\n\n\ndef data_frame_to_csv(data_frame, file_path):\n data_frame.to_csv(file_path)\n\n\ndef data_frame_to_pickle(data_frame, file_path):\n pkl = PickleReadWrite()\n pkl.write_pickle_to_data_frame(file_path, data_frame)\n\n\ndef merge_text_files_to_another_file(input_folder, output_file_name):\n if not os.path.isdir(input_folder):\n return True\n\n print(\"processing folder \" + input_folder)\n files = os.listdir(input_folder)\n with open(output_file_name, 'w') as outfile:\n for fname in files:\n with open(os.path.join(input_folder, fname)) as infile:\n for line in infile:\n outfile.write(line)\n return True\n\n\ndef merge_pickles_to_another_pickle(input_folder, output_folder, output_file_name):\n print(\"input folder {}\".format(input_folder))\n print(\"outupt folder {}\".format(output_folder))\n print(\"outupt folder {}\".format(output_file_name))\n files = os.listdir(input_folder)\n pkl = PickleReadWrite()\n pcikles = pkl.read_list_of_pickles(files, input_folder)\n\n path = os.path.join(output_folder, \"pickle\", \"{}\".format(output_file_name))\n if not os.path.exists(os.path.dirname(path)):\n os.makedirs(os.path.dirname(path), exist_ok=True)\n pkl.write_list_dict_to_dict(\"{}.dict.pickle\".format(path), pcikles)\n\n df = merge_dict_to_data_frame(pcikles)\n path = os.path.join(output_folder, \"dataframe\", \"{}\".format(output_file_name))\n if not os.path.exists(os.path.dirname(path)):\n os.makedirs(os.path.dirname(path), exist_ok=True)\n pkl.write_pickle_to_data_frame(\"{}.dataframe.pickle\".format(path), df)\n\n path = os.path.join(output_folder, \"csv\", \"{}\".format(output_file_name))\n if not os.path.exists(os.path.dirname(path)):\n os.makedirs(os.path.dirname(path), exist_ok=True)\n pkl.write_pickle_to_data_frame_to_csv(\"{}.dataframe.csv\".format(path), df)\n\n\ndef merge_pickle_files_in_each_dir(input_folder, output_folder, num_workers):\n print(\"running merge pickle files\")\n list_subfolders_with_paths = [f.path for f in os.scandir(input_folder) if f.is_dir()]\n if not os.path.exists(output_folder):\n os.makedirs(output_folder, exist_ok=True)\n\n file_futures = []\n with ProcessPoolExecutor(max_workers=num_workers) as fe:\n for folder in list_subfolders_with_paths:\n if os.path.isdir(folder):\n file_future = fe.submit(merge_pickles_to_another_pickle, folder, output_folder,\n os.path.basename(folder))\n file_futures.append(file_future)\n file_results = [f.result() for f in file_futures]\n\n\ndef merge_pickle_files_in_each_dir_no_parallel(input_folder, output_folder, num_workers):\n print(\"running merge pickle files no parallel\")\n list_subfolders_with_paths = [f.path for f in os.scandir(input_folder) if f.is_dir()]\n if not os.path.exists(output_folder):\n os.makedirs(output_folder, exist_ok=True)\n\n for folder in list_subfolders_with_paths:\n if os.path.isdir(folder):\n print(\"reading pkls from {}\".format(folder))\n merge_pickles_to_another_pickle(folder, output_folder, os.path.basename(folder))\n return True\n\n\ndef merge_text_files_in_each_dir_no_parallel(input_folder, output_folder, num_workers):\n print(\"running merge text files\")\n list_subfolders_with_paths = [f.path for f in os.scandir(input_folder) if f.is_dir()]\n if not os.path.exists(output_folder):\n os.makedirs(output_folder, exist_ok=True)\n\n for folder in list_subfolders_with_paths:\n if os.path.isdir(folder):\n merge_text_files_to_another_file(folder, os.path.join(output_folder,\n \"merged_{}\".format(os.path.basename(folder))))\n\n\ndef merge_text_files_in_each_dir(input_folder, output_folder, num_workers):\n print(\"running merge text files\")\n list_subfolders_with_paths = [f.path for f in os.scandir(input_folder) if f.is_dir()]\n if not os.path.exists(output_folder):\n os.makedirs(output_folder, exist_ok=True)\n\n file_futures = []\n with ProcessPoolExecutor(max_workers=num_workers) as fe:\n for folder in list_subfolders_with_paths:\n if os.path.isdir(folder):\n file_future = fe.submit(merge_text_files_to_another_file, folder,\n os.path.join(output_folder, \"merged_{}\".format(os.path.basename(folder))))\n file_futures.append(file_future)\n file_results = [f.result() for f in file_futures]\n\n\ndef merge_text_files(input_dir, output_dir, num_workers):\n merge_text_files_in_each_dir(input_dir, output_dir, num_workers)\n pass\n\n\ndef merge_picke_files(input_dir, output_dir, num_workers):\n merge_pickle_files_in_each_dir_no_parallel(input_dir, output_dir, num_workers)\n pkl = PickleReadWrite()\n # merge_pickles_to_another_pickle(os.path.join(output_dir,\"pickle\"), output_dir,\"all\")\n in_dir = os.path.join(output_dir, \"pickle\")\n ouput_file = os.path.join(output_dir, \"all.pkl\")\n print(\"Creating full picke file in this dir {} with file name {} \".format(in_dir, ouput_file))\n pkl.merge_and_update_counts_dict_from_pickle(in_dir, ouput_file)\n # pkl.merge_and_update_counts_dict_from_pickle(os.path.join(output_dir, \"dataframe\", \"all.dataframe.pickle\"), os.path.join(output_dir, \"all.dataframe.pickle\"))\n # pkl.merge_and_update_counts_dict_from_pickle(os.path.join(output_dir, \"pickle\", \"all.dict.pickle\"), os.path.join(output_dir, \"all.dict.pickle\"))\n\n\ndef moveFiles(source, dest):\n shutil.move(source, dest)\n\n\ndef main(num_workers, input_dir, output_dir, mode):\n print(\"{} workers\".format(num_workers))\n # output_dir = os.path.join(output_dir, \"1995_2005_ds\")\n if not os.path.exists(output_dir):\n print(\"output will be here {}\", output_dir)\n os.makedirs(output_dir)\n\n if \"text\" in mode:\n with ProcessPoolExecutor(max_workers=num_workers) as px:\n px.submit(merge_text_files, input_dir, output_dir, num_workers)\n if \"pickle\" in mode:\n with ProcessPoolExecutor(max_workers=num_workers) as pp:\n pp.submit(merge_picke_files, input_dir, output_dir, num_workers)\n\n\ndef main_no_parallel(num_workers, input_dir, output_dir, mode):\n print(\"running in single thred\")\n if not os.path.exists(output_dir):\n print(\"output will be here {}\", output_dir)\n os.makedirs(output_dir)\n\n if \"text\" in mode:\n merge_text_files(input_dir, output_dir, num_workers)\n if \"pickle\" in mode:\n merge_picke_files(input_dir, output_dir, num_workers)\n\n\ndef start_process():\n print(\"--mode pickle or text\")\n global parser, args\n parser = argparse.ArgumentParser(description='Parallel text processor')\n parser.add_argument('--num_workers', '-n', default=8, type=int)\n parser.add_argument('--output_dir', type=str, required=True)\n parser.add_argument('--input_dir', type=str, required=True)\n parser.add_argument('--mode', type=str, required=True)\n args = parser.parse_args()\n main_no_parallel(args.num_workers, args.input_dir, args.output_dir, args.mode)\n\n\n# merge_text_files_in_each_dir(\"E:\\\\DataScience\\\\projects\\\\nlp\\\\5grm\\\\data\\\\ngrm_txt\", \"E:\\\\DataScience\\\\projects\\\\nlp\\\\5grm\\\\data\\\\merged_txt_files\")\n\n# merge_pickle_files_in_each_dir_test(\"E:\\\\DataScience\\\\projects\\\\nlp\\\\5grm\\\\data\\\\ngrm_pickle\", \"E:\\\\DataScience\\\\projects\\\\nlp\\\\5grm\\\\data\\\\merged_pkl_files\")\n\n# merge_picke_files(\"E:\\\\download\\\\counts\\\\the_year_wise_count\", \"E:\\\\download\\\\counts\\\\the_year_wise_merged_2\", 0)\n\n# merge_pickles_to_another_pickle()\n\n#merge_picke_files(\"\", \"E:\\\\download\\\\counts\\\\words_year_wise_merged\\\\\", 0)\n\n# merge_pickles_to_another_pickle()\n\n#merge_picke_files(\"\", \"E:\\\\download\\\\counts\\\\words_year_wise_merged\\\\\", 0)\n\nif __name__ == '__main__':\n start_process()\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
huylb314/mmpose | [
"c5cb46fbea58340cb89bc92a0884343a825d03a0"
] | [
"mmpose/models/detectors/top_down.py"
] | [
"import math\n\nimport cv2\nimport mmcv\nimport numpy as np\nimport torch.nn as nn\nfrom mmcv.image import imwrite\nfrom mmcv.visualization.image import imshow\n\nfrom mmpose.core.evaluation import pose_pck_accuracy\nfrom mmpose.core.evaluation.top_down_eval import keypoints_from_heatmaps\nfrom mmpose.core.post_processing import flip_back\nfrom .. import builder\nfrom ..registry import POSENETS\nfrom .base import BasePose\n\n\[email protected]_module()\nclass TopDown(BasePose):\n \"\"\"Top-down pose detectors.\n\n Args:\n backbone (dict): Backbone modules to extract feature.\n keypoint_head (dict): Keypoint head to process feature.\n train_cfg (dict): Config for training. Default: None.\n test_cfg (dict): Config for testing. Default: None.\n pretrained (str): Path to the pretrained models.\n loss_pose (dict): Config for loss. Default: None.\n \"\"\"\n\n def __init__(self,\n backbone,\n keypoint_head=None,\n train_cfg=None,\n test_cfg=None,\n pretrained=None,\n loss_pose=None):\n super().__init__()\n\n self.backbone = builder.build_backbone(backbone)\n\n if keypoint_head is not None:\n self.keypoint_head = builder.build_head(keypoint_head)\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n self.loss = builder.build_loss(loss_pose)\n self.init_weights(pretrained=pretrained)\n\n @property\n def with_keypoint(self):\n \"\"\"Check if has keypoint_head.\"\"\"\n return hasattr(self, 'keypoint_head')\n\n def init_weights(self, pretrained=None):\n \"\"\"Weight initialization for model.\"\"\"\n self.backbone.init_weights(pretrained)\n if self.with_keypoint:\n self.keypoint_head.init_weights()\n\n def forward(self,\n img,\n target=None,\n target_weight=None,\n img_metas=None,\n return_loss=True,\n return_heatmap=False,\n **kwargs):\n \"\"\"Calls either forward_train or forward_test depending on whether\n return_loss=True. Note this setting will change the expected inputs.\n When `return_loss=True`, img and img_meta are single-nested (i.e.\n Tensor and List[dict]), and when `resturn_loss=False`, img and img_meta\n should be double nested (i.e. List[Tensor], List[List[dict]]), with\n the outer list indicating test time augmentations.\n\n Note:\n batch_size: N\n num_keypoints: K\n num_img_channel: C (Default: 3)\n img height: imgH\n img weight: imgW\n heatmaps height: H\n heatmaps weight: W\n\n Args:\n img (torch.Tensor[NxCximgHximgW]): Input images.\n target (torch.Tensor[NxKxHxW]): Target heatmaps.\n target_weight (torch.Tensor[NxKx1]): Weights across\n different joint types.\n img_metas (list(dict)): Information about data augmentation\n By default this includes:\n - \"image_file: path to the image file\n - \"center\": center of the bbox\n - \"scale\": scale of the bbox\n - \"rotation\": rotation of the bbox\n - \"bbox_score\": score of bbox\n return_loss (bool): Option to `return loss`. `return loss=True`\n for training, `return loss=False` for validation & test.\n return_heatmap (bool) : Option to return heatmap.\n\n Returns:\n dict|tuple: if `return loss` is true, then return losses.\n Otherwise, return predicted poses, boxes, image paths\n and heatmaps.\n \"\"\"\n if return_loss:\n return self.forward_train(img, target, target_weight, img_metas,\n **kwargs)\n return self.forward_test(\n img, img_metas, return_heatmap=return_heatmap, **kwargs)\n\n def forward_train(self, img, target, target_weight, img_metas, **kwargs):\n \"\"\"Defines the computation performed at every call when training.\"\"\"\n output = self.backbone(img)\n if self.with_keypoint:\n output = self.keypoint_head(output)\n\n # if return loss\n losses = dict()\n if isinstance(output, list):\n if target.dim() == 5 and target_weight.dim() == 4:\n # target: [batch_size, num_outputs, num_joints, h, w]\n # target_weight: [batch_size, num_outputs, num_joints, 1]\n assert target.size(1) == len(output)\n if isinstance(self.loss, nn.Sequential):\n assert len(self.loss) == len(output)\n if 'loss_weights' in self.train_cfg and self.train_cfg[\n 'loss_weights'] is not None:\n assert len(self.train_cfg['loss_weights']) == len(output)\n for i in range(len(output)):\n if target.dim() == 5 and target_weight.dim() == 4:\n target_i = target[:, i, :, :, :]\n target_weight_i = target_weight[:, i, :, :]\n else:\n target_i = target\n target_weight_i = target_weight\n if isinstance(self.loss, nn.Sequential):\n loss_func = self.loss[i]\n else:\n loss_func = self.loss\n\n loss_i = loss_func(output[i], target_i, target_weight_i)\n if 'loss_weights' in self.train_cfg and self.train_cfg[\n 'loss_weights']:\n loss_i = loss_i * self.train_cfg['loss_weights'][i]\n if 'mse_loss' not in losses:\n losses['mse_loss'] = loss_i\n else:\n losses['mse_loss'] += loss_i\n else:\n assert not isinstance(self.loss, nn.Sequential)\n assert target.dim() == 4 and target_weight.dim() == 3\n # target: [batch_size, num_joints, h, w]\n # target_weight: [batch_size, num_joints, 1]\n losses['mse_loss'] = self.loss(output, target, target_weight)\n\n if isinstance(output, list):\n if target.dim() == 5 and target_weight.dim() == 4:\n _, avg_acc, _ = pose_pck_accuracy(\n output[-1].detach().cpu().numpy(),\n target[:, -1, ...].detach().cpu().numpy(),\n target_weight[:, -1,\n ...].detach().cpu().numpy().squeeze(-1) > 0)\n # Only use the last output for prediction\n else:\n _, avg_acc, _ = pose_pck_accuracy(\n output[-1].detach().cpu().numpy(),\n target.detach().cpu().numpy(),\n target_weight.detach().cpu().numpy().squeeze(-1) > 0)\n else:\n _, avg_acc, _ = pose_pck_accuracy(\n output.detach().cpu().numpy(),\n target.detach().cpu().numpy(),\n target_weight.detach().cpu().numpy().squeeze(-1) > 0)\n\n losses['acc_pose'] = float(avg_acc)\n\n return losses\n\n def forward_test(self, img, img_metas, return_heatmap=False, **kwargs):\n \"\"\"Defines the computation performed at every call when testing.\"\"\"\n assert img.size(0) == 1\n assert len(img_metas) == 1\n img_metas = img_metas[0]\n\n # compute backbone features\n output = self.backbone(img)\n\n # process head\n all_preds, all_boxes, image_path, heatmap = self.process_head(\n output, img, img_metas, return_heatmap=return_heatmap)\n\n return all_preds, all_boxes, image_path, heatmap\n\n def forward_dummy(self, img):\n \"\"\"Used for computing network FLOPs.\n\n See ``tools/get_flops.py``.\n\n Args:\n img (torch.Tensor): Input image.\n\n Returns:\n Tensor: Output heatmaps.\n \"\"\"\n output = self.backbone(img)\n if self.with_keypoint:\n output = self.keypoint_head(output)\n return output\n\n def process_head(self, output, img, img_metas, return_heatmap=False):\n \"\"\"Process heatmap and keypoints from backbone features.\"\"\"\n flip_pairs = img_metas['flip_pairs']\n\n if self.with_keypoint:\n output = self.keypoint_head(output)\n\n if isinstance(output, list):\n output = output[-1]\n\n output_heatmap = output.detach().cpu().numpy()\n if self.test_cfg['flip_test']:\n img_flipped = img.flip(3)\n\n output_flipped = self.backbone(img_flipped)\n if self.with_keypoint:\n output_flipped = self.keypoint_head(output_flipped)\n if isinstance(output_flipped, list):\n output_flipped = output_flipped[-1]\n output_flipped = flip_back(output_flipped.detach().cpu().numpy(),\n flip_pairs)\n\n # feature is not aligned, shift flipped heatmap for higher accuracy\n if self.test_cfg['shift_heatmap']:\n output_flipped[:, :, :, 1:] = output_flipped[:, :, :, :-1]\n output_heatmap = (output_heatmap + output_flipped) * 0.5\n\n c = img_metas['center'].reshape(1, -1)\n s = img_metas['scale'].reshape(1, -1)\n\n score = 1.0\n if 'bbox_score' in img_metas:\n score = np.array(img_metas['bbox_score']).reshape(-1)\n\n preds, maxvals = keypoints_from_heatmaps(\n output_heatmap,\n c,\n s,\n post_process=self.test_cfg['post_process'],\n unbiased=self.test_cfg.get('unbiased_decoding', False),\n kernel=self.test_cfg['modulate_kernel'])\n\n all_preds = np.zeros((1, output_heatmap.shape[1], 3), dtype=np.float32)\n all_boxes = np.zeros((1, 6), dtype=np.float32)\n image_path = []\n\n all_preds[0, :, 0:2] = preds[:, :, 0:2]\n all_preds[0, :, 2:3] = maxvals\n all_boxes[0, 0:2] = c[:, 0:2]\n all_boxes[0, 2:4] = s[:, 0:2]\n all_boxes[0, 4] = np.prod(s * 200.0, axis=1)\n all_boxes[0, 5] = score\n image_path.extend(img_metas['image_file'])\n\n if not return_heatmap:\n output_heatmap = None\n\n return all_preds, all_boxes, image_path, output_heatmap\n\n def show_result(self,\n img,\n result,\n skeleton=None,\n kpt_score_thr=0.3,\n bbox_color='green',\n pose_kpt_color=None,\n pose_limb_color=None,\n radius=4,\n text_color=(255, 0, 0),\n thickness=1,\n font_scale=0.5,\n win_name='',\n show=False,\n wait_time=0,\n out_file=None):\n \"\"\"Draw `result` over `img`.\n\n Args:\n img (str or Tensor): The image to be displayed.\n result (list[dict]): The results to draw over `img`\n (bbox_result, pose_result).\n kpt_score_thr (float, optional): Minimum score of keypoints\n to be shown. Default: 0.3.\n bbox_color (str or tuple or :obj:`Color`): Color of bbox lines.\n pose_kpt_color (np.array[Nx3]`): Color of N keypoints.\n If None, do not draw keypoints.\n pose_limb_color (np.array[Mx3]): Color of M limbs.\n If None, do not draw limbs.\n text_color (str or tuple or :obj:`Color`): Color of texts.\n thickness (int): Thickness of lines.\n font_scale (float): Font scales of texts.\n win_name (str): The window name.\n wait_time (int): Value of waitKey param.\n Default: 0.\n out_file (str or None): The filename to write the image.\n Default: None.\n\n Returns:\n Tensor: Visualized img, only if not `show` or `out_file`.\n \"\"\"\n\n img = mmcv.imread(img)\n img = img.copy()\n img_h, img_w, _ = img.shape\n\n bbox_result = []\n pose_result = []\n for res in result:\n bbox_result.append(res['bbox'])\n pose_result.append(res['keypoints'])\n\n if len(bbox_result) > 0:\n bboxes = np.vstack(bbox_result)\n # draw bounding boxes\n mmcv.imshow_bboxes(\n img,\n bboxes,\n colors=bbox_color,\n top_k=-1,\n thickness=thickness,\n show=False,\n win_name=win_name,\n wait_time=wait_time,\n out_file=None)\n\n for _, kpts in enumerate(pose_result):\n # draw each point on image\n if pose_kpt_color is not None:\n assert len(pose_kpt_color) == len(kpts)\n for kid, kpt in enumerate(kpts):\n x_coord, y_coord, kpt_score = int(kpt[0]), int(\n kpt[1]), kpt[2]\n if kpt_score > kpt_score_thr:\n img_copy = img.copy()\n r, g, b = pose_kpt_color[kid]\n cv2.circle(img_copy, (int(x_coord), int(y_coord)),\n radius, (int(r), int(g), int(b)), -1)\n transparency = max(0, min(1, kpt_score))\n cv2.addWeighted(\n img_copy,\n transparency,\n img,\n 1 - transparency,\n 0,\n dst=img)\n\n # draw limbs\n if skeleton is not None and pose_limb_color is not None:\n assert len(pose_limb_color) == len(skeleton)\n for sk_id, sk in enumerate(skeleton):\n pos1 = (int(kpts[sk[0] - 1, 0]), int(kpts[sk[0] - 1,\n 1]))\n pos2 = (int(kpts[sk[1] - 1, 0]), int(kpts[sk[1] - 1,\n 1]))\n if (pos1[0] > 0 and pos1[0] < img_w and pos1[1] > 0\n and pos1[1] < img_h and pos2[0] > 0\n and pos2[0] < img_w and pos2[1] > 0\n and pos2[1] < img_h\n and kpts[sk[0] - 1, 2] > kpt_score_thr\n and kpts[sk[1] - 1, 2] > kpt_score_thr):\n img_copy = img.copy()\n X = (pos1[0], pos2[0])\n Y = (pos1[1], pos2[1])\n mX = np.mean(X)\n mY = np.mean(Y)\n length = ((Y[0] - Y[1])**2 + (X[0] - X[1])**2)**0.5\n angle = math.degrees(\n math.atan2(Y[0] - Y[1], X[0] - X[1]))\n stickwidth = 2\n polygon = cv2.ellipse2Poly(\n (int(mX), int(mY)),\n (int(length / 2), int(stickwidth)), int(angle),\n 0, 360, 1)\n\n r, g, b = pose_limb_color[sk_id]\n cv2.fillConvexPoly(img_copy, polygon,\n (int(r), int(g), int(b)))\n transparency = max(\n 0,\n min(\n 1, 0.5 *\n (kpts[sk[0] - 1, 2] + kpts[sk[1] - 1, 2])))\n cv2.addWeighted(\n img_copy,\n transparency,\n img,\n 1 - transparency,\n 0,\n dst=img)\n\n if show:\n imshow(img, win_name, wait_time)\n\n if out_file is not None:\n imwrite(img, out_file)\n\n return img\n"
] | [
[
"numpy.mean",
"numpy.prod",
"numpy.array",
"numpy.zeros",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Blank-Wang/dcase2017_task4_cvssp | [
"97ab1e29709bc87c9c1f9459929fb3853e7fa664"
] | [
"prepare_data.py"
] | [
"from __future__ import print_function\nimport numpy as np\nimport sys\nimport soundfile\nimport os\nimport librosa\nfrom scipy import signal\nimport pickle\nimport cPickle\nimport scipy\nimport time\nimport csv\nimport gzip\nimport h5py\nimport matplotlib.ticker as ticker\nimport matplotlib.pyplot as plt\nfrom sklearn import preprocessing\nfrom sklearn import metrics\nimport argparse\n\nimport config as cfg\n\n# Read wav\ndef read_audio(path, target_fs=None):\n (audio, fs) = soundfile.read(path)\n if audio.ndim > 1:\n audio = np.mean(audio, axis=1)\n if target_fs is not None and fs != target_fs:\n audio = librosa.resample(audio, orig_sr=fs, target_sr=target_fs)\n fs = target_fs\n return audio, fs\n \n# Write wav\ndef write_audio(path, audio, sample_rate):\n soundfile.write(file=path, data=audio, samplerate=sample_rate)\n\n# Create an empty folder\ndef create_folder(fd):\n if not os.path.exists(fd):\n os.makedirs(fd)\n\n### Feature extraction. \ndef extract_features(wav_dir, out_dir, recompute):\n \"\"\"Extract log mel spectrogram features. \n \n Args:\n wav_dir: string, directory of wavs. \n out_dir: string, directory to write out features. \n recompute: bool, if True recompute all features, if False skip existed\n extracted features. \n \n Returns:\n None\n \"\"\"\n fs = cfg.sample_rate\n n_window = cfg.n_window\n n_overlap = cfg.n_overlap\n \n create_folder(out_dir)\n names = [na for na in os.listdir(wav_dir) if na.endswith(\".wav\")]\n names = sorted(names)\n print(\"Total file number: %d\" % len(names))\n\n # Mel filter bank\n melW = librosa.filters.mel(sr=fs, \n n_fft=n_window, \n n_mels=64, \n fmin=0., \n fmax=8000.)\n \n cnt = 0\n t1 = time.time()\n for na in names:\n wav_path = wav_dir + '/' + na\n out_path = out_dir + '/' + os.path.splitext(na)[0] + '.p'\n \n # Skip features already computed\n if recompute or (not os.path.isfile(out_path)):\n print(cnt, out_path)\n (audio, _) = read_audio(wav_path, fs)\n \n # Skip corrupted wavs\n if audio.shape[0] == 0:\n print(\"File %s is corrupted!\" % wav_path)\n else:\n # Compute spectrogram\n ham_win = np.hamming(n_window)\n [f, t, x] = signal.spectral.spectrogram(\n x=audio, \n window=ham_win,\n nperseg=n_window, \n noverlap=n_overlap, \n detrend=False, \n return_onesided=True, \n mode='magnitude') \n x = x.T\n x = np.dot(x, melW.T)\n x = np.log(x + 1e-8)\n x = x.astype(np.float32)\n \n # Dump to pickle\n cPickle.dump(x, open(out_path, 'wb'), \n protocol=cPickle.HIGHEST_PROTOCOL)\n cnt += 1\n print(\"Extracting feature time: %s\" % (time.time() - t1,))\n\n### Pack features of hdf5 file\ndef pack_features_to_hdf5(fe_dir, csv_path, out_path):\n \"\"\"Pack extracted features to a single hdf5 file. \n \n This hdf5 file can speed up loading the features. This hdf5 file has \n structure:\n na_list: list of names\n x: bool array, (n_clips)\n y: float32 array, (n_clips, n_time, n_freq)\n \n Args: \n fe_dir: string, directory of features. \n csv_path: string | \"\", path of csv file. E.g. \"testing_set.csv\". If the \n string is empty, then pack features with all labels False. \n out_path: string, path to write out the created hdf5 file. \n \n Returns:\n None\n \"\"\"\n max_len = cfg.max_len\n create_folder(os.path.dirname(out_path))\n \n t1 = time.time()\n x_all, y_all, na_all = [], [], []\n \n if csv_path != \"\": # Pack from csv file (training & testing from dev. data)\n with open(csv_path, 'rb') as f:\n reader = csv.reader(f)\n lis = list(reader)\n cnt = 0\n for li in lis:\n [na, bgn, fin, lbs, ids] = li\n if cnt % 100 == 0: print(cnt)\n na = os.path.splitext(na)[0]\n bare_na = 'Y' + na + '_' + bgn + '_' + fin # Correspond to the wav name. \n fe_na = bare_na + \".p\"\n fe_path = os.path.join(fe_dir, fe_na)\n \n if not os.path.isfile(fe_path):\n print(\"File %s is in the csv file but the feature is not extracted!\" % fe_path)\n else:\n na_all.append(bare_na[1:] + \".wav\") # Remove 'Y' in the begining. \n x = cPickle.load(open(fe_path, 'rb'))\n x = pad_trunc_seq(x, max_len)\n x_all.append(x)\n ids = ids.split(',')\n y = ids_to_multinomial(ids)\n y_all.append(y)\n cnt += 1\n else: # Pack from features without ground truth label (dev. data)\n names = os.listdir(fe_dir)\n names = sorted(names)\n for fe_na in names:\n bare_na = os.path.splitext(fe_na)[0]\n fe_path = os.path.join(fe_dir, fe_na)\n na_all.append(bare_na + \".wav\")\n x = cPickle.load(open(fe_path, 'rb'))\n x = pad_trunc_seq(x, max_len)\n x_all.append(x)\n y_all.append(None)\n \n x_all = np.array(x_all, dtype=np.float32)\n y_all = np.array(y_all, dtype=np.bool)\n print(\"len(na_all): %d\", len(na_all))\n print(\"x_all.shape: %s, %s\" % (x_all.shape, x_all.dtype))\n print(\"y_all.shape: %s, %s\" % (y_all.shape, y_all.dtype))\n \n with h5py.File(out_path, 'w') as hf:\n hf.create_dataset('na_list', data=na_all)\n hf.create_dataset('x', data=x_all)\n hf.create_dataset('y', data=y_all)\n \n print(\"Save hdf5 to %s\" % out_path)\n print(\"Pack features time: %s\" % (time.time() - t1,))\n \ndef ids_to_multinomial(ids):\n \"\"\"Ids of wav to multinomial representation. \n \n Args:\n ids: list of id, e.g. ['/m/0284vy3', '/m/02mfyn']\n \n Returns:\n 1d array, multimonial representation, e.g. [1,0,1,0,0,...]\n \"\"\"\n y = np.zeros(len(cfg.lbs))\n for id in ids:\n index = cfg.id_to_idx[id]\n y[index] = 1\n return y\n \ndef pad_trunc_seq(x, max_len):\n \"\"\"Pad or truncate a sequence data to a fixed length. \n \n Args:\n x: ndarray, input sequence data. \n max_len: integer, length of sequence to be padded or truncated. \n \n Returns:\n ndarray, Padded or truncated input sequence data. \n \"\"\"\n L = len(x)\n shape = x.shape\n if L < max_len:\n pad_shape = (max_len - L,) + shape[1:]\n pad = np.zeros(pad_shape)\n x_new = np.concatenate((x, pad), axis=0)\n else:\n x_new = x[0:max_len]\n return x_new\n \n### Load data & scale data\ndef load_hdf5_data(hdf5_path, verbose=1):\n \"\"\"Load hdf5 data. \n \n Args:\n hdf5_path: string, path of hdf5 file. \n verbose: integar, print flag. \n \n Returns:\n x: ndarray (np.float32), shape: (n_clips, n_time, n_freq)\n y: ndarray (np.bool), shape: (n_clips, n_classes)\n na_list: list, containing wav names. \n \"\"\"\n t1 = time.time()\n with h5py.File(hdf5_path, 'r') as hf:\n x = np.array(hf.get('x'))\n y = np.array(hf.get('y'))\n na_list = list(hf.get('na_list'))\n \n if verbose == 1:\n print(\"--- %s ---\" % hdf5_path)\n print(\"x.shape: %s %s\" % (x.shape, x.dtype))\n print(\"y.shape: %s %s\" % (y.shape, y.dtype))\n print(\"len(na_list): %d\" % len(na_list))\n print(\"Loading time: %s\" % (time.time() - t1,))\n \n return x, y, na_list\n\ndef calculate_scaler(hdf5_path, out_path):\n \"\"\"Calculate scaler of input data on each frequency bin. \n \n Args:\n hdf5_path: string, path of packed hdf5 features file. \n out_path: string, path to write out the calculated scaler. \n \n Returns:\n None. \n \"\"\"\n create_folder(os.path.dirname(out_path))\n t1 = time.time()\n (x, y, na_list) = load_hdf5_data(hdf5_path, verbose=1)\n (n_clips, n_time, n_freq) = x.shape\n x2d = x.reshape((n_clips * n_time, n_freq))\n scaler = preprocessing.StandardScaler().fit(x2d)\n print(\"Mean: %s\" % (scaler.mean_,))\n print(\"Std: %s\" % (scaler.scale_,))\n print(\"Calculating scaler time: %s\" % (time.time() - t1,))\n pickle.dump(scaler, open(out_path, 'wb'))\n \ndef do_scale(x3d, scaler_path, verbose=1):\n \"\"\"Do scale on the input sequence data. \n \n Args:\n x3d: ndarray, input sequence data, shape: (n_clips, n_time, n_freq)\n scaler_path: string, path of pre-calculated scaler. \n verbose: integar, print flag. \n \n Returns:\n Scaled input sequence data. \n \"\"\"\n t1 = time.time()\n scaler = pickle.load(open(scaler_path, 'rb'))\n (n_clips, n_time, n_freq) = x3d.shape\n x2d = x3d.reshape((n_clips * n_time, n_freq))\n x2d_scaled = scaler.transform(x2d)\n x3d_scaled = x2d_scaled.reshape((n_clips, n_time, n_freq))\n if verbose == 1:\n print(\"Scaling time: %s\" % (time.time() - t1,))\n return x3d_scaled\n\n### Main function\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"\")\n subparsers = parser.add_subparsers(dest='mode')\n \n parser_ef = subparsers.add_parser('extract_features')\n parser_ef.add_argument('--wav_dir', type=str)\n parser_ef.add_argument('--out_dir', type=str)\n parser_ef.add_argument('--recompute', type=bool)\n \n parser_pf = subparsers.add_parser('pack_features')\n parser_pf.add_argument('--fe_dir', type=str)\n parser_pf.add_argument('--csv_path', type=str)\n parser_pf.add_argument('--out_path', type=str)\n \n parser_cs = subparsers.add_parser('calculate_scaler')\n parser_cs.add_argument('--hdf5_path', type=str)\n parser_cs.add_argument('--out_path', type=str)\n\n args = parser.parse_args()\n \n if args.mode == 'extract_features':\n extract_features(wav_dir=args.wav_dir, \n out_dir=args.out_dir, \n recompute=args.recompute)\n elif args.mode == 'pack_features':\n pack_features_to_hdf5(fe_dir=args.fe_dir, \n csv_path=args.csv_path, \n out_path=args.out_path)\n elif args.mode == 'calculate_scaler':\n calculate_scaler(hdf5_path=args.hdf5_path, \n out_path=args.out_path)\n else:\n raise Exception(\"Incorrect argument!\")\n"
] | [
[
"numpy.dot",
"numpy.log",
"scipy.signal.spectral.spectrogram",
"numpy.concatenate",
"numpy.mean",
"numpy.hamming",
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.4",
"1.3",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"0.16"
],
"tensorflow": []
}
] |
btrr/covid19-epicenters | [
"4134967f6dbbdeb5ad91a435dc72d905e9886fd6"
] | [
"deprecated/italy/it-nc.py"
] | [
"import datetime as dt\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport matplotlib.ticker as ticker\nfrom matplotlib.dates import MO, TU, WE, TH, FR, SA, SU\n\ndates = ['1/31/2020', '2/1/2020', '2/2/2020', '2/3/2020', '2/4/2020', '2/5/2020', '2/6/2020', '2/7/2020', '2/8/2020', '2/9/2020', '2/10/2020', '2/11/2020', '2/12/2020', '2/13/2020', '2/14/2020', '2/15/2020', '2/16/2020', '2/17/2020', '2/18/2020', '2/19/2020', '2/20/2020', '2/21/2020', '2/22/2020', '2/23/2020', '2/24/2020', '2/25/2020', '2/26/2020', '2/27/2020', '2/28/2020', '2/29/2020', '3/1/2020', '3/2/2020', '3/3/2020', '3/4/2020', '3/5/2020', '3/6/2020', '3/7/2020', '3/8/2020', '3/9/2020', '3/10/2020', '3/11/2020', '3/12/2020', '3/13/2020', '3/14/2020', '3/15/2020', '3/16/2020', '3/17/2020', '3/18/2020', '3/19/2020', '3/20/2020', '3/21/2020', '3/22/2020', '3/23/2020', '3/24/2020', '3/25/2020', '3/26/2020', '3/27/2020', '3/28/2020', '3/29/2020', '3/30/2020', '3/31/2020', '4/1/2020', '4/2/2020', '4/3/2020', '4/4/2020', '4/5/2020', '4/6/2020', '4/7/2020', '4/8/2020', '4/9/2020', '4/10/2020', '4/11/2020', '4/12/2020', '4/13/2020', '4/14/2020', '4/15/2020', '4/16/2020', '4/17/2020', '4/18/2020', '4/19/2020', '4/20/2020', '4/21/2020', '4/22/2020', '4/23/2020', '4/24/2020', '4/25/2020', '4/26/2020', '4/27/2020', '4/28/2020', '4/29/2020', '4/30/2020', '5/1/2020', '5/2/2020', '5/3/2020', '5/4/2020', '5/5/2020', '5/6/2020', '5/7/2020', '5/8/2020', '5/9/2020', '5/10/2020', '5/11/2020', '5/12/2020', '5/13/2020', '5/14/2020', '5/15/2020']\n\n# format dates\nx_values = [dt.datetime.strptime(d, \"%m/%d/%Y\").date() for d in dates]\nax = plt.gca()\nformatter = mdates.DateFormatter(\"%m/%d\")\nax.xaxis.set_major_formatter(formatter)\n\n# create x-axis\nax.xaxis.set_major_locator(mdates.WeekdayLocator(byweekday=(MO, TU, WE, TH, FR, SA, SU), interval=6))\n# minor tick = daily\nax.xaxis.set_minor_locator(mdates.WeekdayLocator(byweekday=(MO, TU, WE, TH, FR, SA, SU)))\n\n# quarantine of red zone\nplt.axvline(dt.datetime(2020, 3, 1), linestyle='--', color='red', linewidth=2, label='red zone')\n# schools closed\nplt.axvline(dt.datetime(2020, 3, 4), linestyle='--', color='orange', linewidth=2, label='schools')\n# stay-at-home\nplt.axvline(dt.datetime(2020, 3, 21), color='#0b28f7', linewidth=2, label='nonessential')\n\n# format y-axis\nax.get_yaxis().set_major_formatter(ticker.FuncFormatter(lambda x, pos: format(int(x/1000), ',')))\n\n# new cases by day\nnew_cases = [2, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 58, 78, 72, 94, 147, 185, 234, 239, 573, 335, 466, 587, 769, 778, 1247, 1492, 1797, 977, 2313, 2651, 2547, 3497, 3590, 3233, 3526, 4207, 5322, 5986, 6557, 5560, 4789, 5249, 5210, 6203, 5909, 5974, 5217, 4050, 4053, 4782, 4668, 4585, 4805, 4316, 3599, 3039, 3836, 4204, 3951, 4694, 4092, 3153, 2972, 2667, 3786, 3493, 3491, 3047, 2256, 2729, 3370, 2646, 3021, 2357, 2324, 1739, 2091, 2086, 1872, 1965, 1900, 1389, 1221, 1075, 1444, 1401, 1327, 1083, 802, 744, 1402, 888, 992, 789]\n\n# text labels\nplt.title('Covid-19 in Italy: New Confirmed Diagnoses')\nplt.xlabel('Date')\nplt.ylabel('Number of New Cases (in thousands)')\nplt.legend(['Quarantine of Red Zone', 'Nationwide Schools Closure', 'Non-Essential Services Closure'], loc='upper left')\n\n# create the graph\nplt.plot(x_values, new_cases, color='#730af2', linewidth=2)\n\nplt.show()"
] | [
[
"matplotlib.pyplot.gca",
"matplotlib.dates.DateFormatter",
"matplotlib.pyplot.legend",
"matplotlib.dates.WeekdayLocator",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bycycle-tools/bycycle | [
"51425920236cbcab6eaff80cfb84b464dd2848fe"
] | [
"bycycle/group/utils.py"
] | [
"\"\"\"Functions to compute features across epoched data.\"\"\"\n\nfrom importlib import import_module\nimport numpy as np\n\n###################################################################################################\n###################################################################################################\n\ndef progress_bar(iterable, progress, n_to_run, pbar_desc='Computing Bycycle Features'):\n \"\"\"Add a progress bar to an iterable to be processed.\n\n Parameters\n ----------\n iterable : list or iterable\n Iterable object to potentially apply progress tracking to.\n progress : {None, 'tqdm', 'tqdm.notebook'}\n Which kind of progress bar to use. If None, no progress bar is used.\n n_to_run : int\n Number of jobs to complete.\n pbar_desc: str, optional\n Display text for the progress bar.\n\n Returns\n -------\n pbar : iterable or tqdm object\n Iterable object, with tqdm progress functionality, if requested.\n\n Raises\n ------\n ValueError\n If the input for `progress` is not understood.\n\n Notes\n -----\n\n - ``tqdm`` must be installed separately from bycycle.\n - The explicit `n_to_run` input is required as tqdm requires this in the parallel case.\n The `tqdm` object that is potentially returned acts the same as the underlying iterable,\n with the addition of printing out progress every time items are requested.\n\n\n Examples\n --------\n Use a ``tqdm`` progress bar, which must me installed separately from ``bycycle``,\n when computing the features for 10 signals:\n\n >>> from multiprocessing import Pool\n >>> from functools import partial\n >>> from bycycle.features import compute_features\n >>> from neurodsp.sim import sim_bursty_oscillation\n >>> sigs = [sim_bursty_oscillation(10, fs=500, freq=10)] * 10\n >>> mapping = Pool(1).imap(partial(compute_features, fs=500, f_range=(8, 12),\n ... return_samples=False), sigs)\n >>> df_features = list(progress_bar(mapping, progress='tqdm', n_to_run=len(sigs)))\n \"\"\"\n\n # Check progress specifier is okay\n tqdm_options = ['tqdm', 'tqdm.notebook']\n if progress is not None and progress not in tqdm_options:\n raise ValueError(\"Progress bar option not understood.\")\n\n # Use a tqdm, progress bar, if requested\n if progress:\n\n # Try loading the tqdm module\n try:\n tqdm = import_module(progress)\n\n # If tqdm loaded, apply the progress bar to the iterable\n pbar = tqdm.tqdm(iterable, desc=pbar_desc, total=n_to_run, dynamic_ncols=True)\n\n except ImportError:\n\n # If tqdm isn't available, proceed without a progress bar\n print((\"A progress bar requiring the 'tqdm' module was requested, \"\n \"but 'tqdm' is not installed. \\nProceeding without using a progress bar.\"))\n pbar = iterable\n\n # If progress is None, return the original iterable without a progress bar applied\n else:\n pbar = iterable\n\n return pbar\n\n\ndef check_kwargs_shape(sigs, compute_features_kwargs, axis):\n \"\"\"Raise an error when compute_features_kwargs and the shape of sigs mismatch.\n\n Parameters\n ----------\n sigs : 2d or 3d array\n Voltage time series.\n compute_features_kwargs : dict or 1d list of dict or 2d list of dict\n Keyword arguments used in :func:`~.compute_features`.\n axis : {None, 0, 1, (0, 1)}\n Which axes to calculate features across.\n\n Raises\n ------\n ValueError\n If the shape compute_features_kwargs and sigs are not compatible.\n \"\"\"\n\n kwargs = compute_features_kwargs\n\n # Don't raise error when kwargs is None or a dict\n if isinstance(kwargs, dict) or kwargs is None:\n return\n\n # Ensure kwargs match to sigs\n kwargs_dim0 = np.shape(kwargs)[0]\n kwargs_dim1 = np.shape(kwargs)[1] if kwargs.ndim == 2 else None\n if kwargs.ndim == 3:\n raise ValueError(\"compute_features_kwargs must be 1D or 2D.\")\n\n # Sig checks\n sigs_dim0 = np.shape(sigs)[0]\n sigs_dim1 = np.shape(sigs)[1] if sigs.ndim == 3 else None\n\n # 2D checks\n if sigs_dim1 == None and axis in [0, None] and kwargs_dim0 != sigs_dim0:\n kwargs_shape = (sigs_dim0,)\n elif sigs_dim1 == None and axis in [0, None] and kwargs_dim1 is not None:\n kwargs_shape = (sigs_dim0,)\n\n # 3D checks\n elif sigs_dim1 != None and axis == 0 and kwargs_dim0 != sigs_dim0:\n kwargs_shape = (sigs_dim0,)\n elif sigs_dim1 != None and axis == 1 and kwargs_dim0 != sigs_dim1:\n kwargs_shape = (sigs_dim1,)\n elif sigs_dim1 != None and axis == (0,1) and (kwargs_dim0!=sigs_dim0 or kwargs_dim1!=sigs_dim1):\n kwargs_shape = (sigs_dim0, sigs_dim1)\n\n # Axis checks\n elif sigs_dim1 == None and axis not in [0, None]:\n raise ValueError(\"When sigs is 2D, axis must be either {0, None}.\")\n elif sigs_dim1 != None and axis not in [0, 1, (0, 1)]:\n raise ValueError(\"When sigs is 3D, axis must be either {0, 1, (0, 1)}\")\n else:\n return\n\n error_str = \"\"\"\n When sigs is {sigs_str}D and axis is {axis_str}, compute_features_kwargs must be {kwargs_dim}D\n with a shape equal to {kwargs_shape}.\n \"\"\".format(sigs_str=str(sigs.ndim), axis_str=str(axis),\n kwargs_dim=str(kwargs.ndim), kwargs_shape=str(kwargs_shape))\n\n raise ValueError(error_str)\n"
] | [
[
"numpy.shape"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lcchu/closed-form-matting | [
"622a6b74f333cb6cf9d5458a16302ab8b605c69a"
] | [
"closed_form_matting.py"
] | [
"#!/usr/bin/env python \n\"\"\"Implementation of Closed-Form Matting.\n\nThis module implements natural image matting method described in:\n Levin, Anat, Dani Lischinski, and Yair Weiss. \"A closed-form solution to natural image matting.\"\n IEEE Transactions on Pattern Analysis and Machine Intelligence 30.2 (2008): 228-242.\n\nThe code can be used in two ways:\n 1. By importing solve_foregound_background in your code:\n ```\n import closed_form_matting\n ...\n # For scribles input\n alpha = closed_form_matting.closed_form_matting_with_scribbles(image, scribbles)\n\n # For trimap input\n alpha = closed_form_matting.closed_form_matting_with_trimap(image, trimap)\n\n # For prior with confidence\n alpha = closed_form_matting.closed_form_matting_with_prior(\n image, prior, prior_confidence, optional_const_mask)\n\n # To get Matting Laplacian for image\n laplacian = compute_laplacian(image, optional_const_mask)\n ```\n 2. From command line:\n ```\n # Scribbles input\n ./closed_form_matting.py input_image.png -s scribbles_image.png -o output_alpha.png\n\n # Trimap input\n ./closed_form_matting.py input_image.png -t scribbles_image.png -o output_alpha.png\n\n # Add flag --solve-fg to compute foreground color and output RGBA image instead\n # of alpha.\n ```\n\"\"\"\n\nfrom __future__ import division\n\nimport logging\n\nimport cv2\nimport numpy as np\nfrom numpy.lib.stride_tricks import as_strided\nimport scipy.sparse\nimport scipy.sparse.linalg\n\n\ndef _rolling_block(A, block=(3, 3)):\n \"\"\"Applies sliding window to given matrix.\"\"\"\n shape = (A.shape[0] - block[0] + 1, A.shape[1] - block[1] + 1) + block\n strides = (A.strides[0], A.strides[1]) + A.strides\n return as_strided(A, shape=shape, strides=strides)\n\n\ndef compute_laplacian(img, mask=None, eps=10**(-7), win_rad=1):\n \"\"\"Computes Matting Laplacian for a given image.\n\n Args:\n img: 3-dim numpy matrix with input image\n mask: mask of pixels for which Laplacian will be computed.\n If not set Laplacian will be computed for all pixels.\n eps: regularization parameter controlling alpha smoothness\n from Eq. 12 of the original paper. Defaults to 1e-7.\n win_rad: radius of window used to build Matting Laplacian (i.e.\n radius of omega_k in Eq. 12).\n Returns: sparse matrix holding Matting Laplacian.\n \"\"\"\n\n win_size = (win_rad * 2 + 1) ** 2\n h, w, d = img.shape\n # Number of window centre indices in h, w axes\n c_h, c_w = h - 2 * win_rad, w - 2 * win_rad\n win_diam = win_rad * 2 + 1\n\n indsM = np.arange(h * w).reshape((h, w))\n ravelImg = img.reshape(h * w, d)\n win_inds = _rolling_block(indsM, block=(win_diam, win_diam))\n\n win_inds = win_inds.reshape(c_h, c_w, win_size)\n if mask is not None:\n mask = cv2.dilate(\n mask.astype(np.uint8),\n np.ones((win_diam, win_diam), np.uint8)\n ).astype(np.bool)\n win_mask = np.sum(mask.ravel()[win_inds], axis=2)\n win_inds = win_inds[win_mask > 0, :]\n else:\n win_inds = win_inds.reshape(-1, win_size)\n\n \n winI = ravelImg[win_inds]\n\n win_mu = np.mean(winI, axis=1, keepdims=True)\n win_var = np.einsum('...ji,...jk ->...ik', winI, winI) / win_size - np.einsum('...ji,...jk ->...ik', win_mu, win_mu)\n\n inv = np.linalg.inv(win_var + (eps/win_size)*np.eye(3))\n\n X = np.einsum('...ij,...jk->...ik', winI - win_mu, inv)\n vals = np.eye(win_size) - (1.0/win_size)*(1 + np.einsum('...ij,...kj->...ik', X, winI - win_mu))\n\n nz_indsCol = np.tile(win_inds, win_size).ravel()\n nz_indsRow = np.repeat(win_inds, win_size).ravel()\n nz_indsVal = vals.ravel()\n L = scipy.sparse.coo_matrix((nz_indsVal, (nz_indsRow, nz_indsCol)), shape=(h*w, h*w))\n return L\n\n\ndef closed_form_matting_with_prior(image, prior, prior_confidence, consts_map=None):\n \"\"\"Applies closed form matting with prior alpha map to image.\n\n Args:\n image: 3-dim numpy matrix with input image.\n prior: matrix of same width and height as input image holding apriori alpha map.\n prior_confidence: matrix of the same shape as prior hodling confidence of prior alpha.\n consts_map: binary mask of pixels that aren't expected to change due to high\n prior confidence.\n\n Returns: 2-dim matrix holding computed alpha map.\n \"\"\"\n\n assert image.shape[:2] == prior.shape, ('prior must be 2D matrix with height and width equal '\n 'to image.')\n assert image.shape[:2] == prior_confidence.shape, ('prior_confidence must be 2D matrix with '\n 'height and width equal to image.')\n assert (consts_map is None) or image.shape[:2] == consts_map.shape, (\n 'consts_map must be 2D matrix with height and width equal to image.')\n\n logging.info('Computing Matting Laplacian.')\n laplacian = compute_laplacian(image, ~consts_map if consts_map is not None else None)\n\n confidence = scipy.sparse.diags(prior_confidence.ravel())\n logging.info('Solving for alpha.')\n solution = scipy.sparse.linalg.spsolve(\n laplacian + confidence,\n prior.ravel() * prior_confidence.ravel()\n )\n alpha = np.minimum(np.maximum(solution.reshape(prior.shape), 0), 1)\n return alpha\n\n\ndef closed_form_matting_with_trimap(image, trimap, trimap_confidence=100.0):\n \"\"\"Apply Closed-Form matting to given image using trimap.\"\"\"\n\n assert image.shape[:2] == trimap.shape, ('trimap must be 2D matrix with height and width equal '\n 'to image.')\n consts_map = (trimap < 0.1) | (trimap > 0.9)\n return closed_form_matting_with_prior(image, trimap, trimap_confidence * consts_map, consts_map)\n\n\ndef closed_form_matting_with_scribbles(image, scribbles, scribbles_confidence=100.0):\n \"\"\"Apply Closed-Form matting to given image using scribbles image.\"\"\"\n\n assert image.shape == scribbles.shape, 'scribbles must have exactly same shape as image.'\n prior = np.sign(np.sum(scribbles - image, axis=2)) / 2 + 0.5\n consts_map = prior != 0.5\n return closed_form_matting_with_prior(\n image,\n prior,\n scribbles_confidence * consts_map,\n consts_map\n )\n\n\nclosed_form_matting = closed_form_matting_with_trimap\n\ndef main():\n import argparse\n\n logging.basicConfig(level=logging.INFO)\n arg_parser = argparse.ArgumentParser(description=__doc__)\n arg_parser.add_argument('image', type=str, help='input image')\n\n arg_parser.add_argument('-t', '--trimap', type=str, help='input trimap')\n arg_parser.add_argument('-s', '--scribbles', type=str, help='input scribbles')\n arg_parser.add_argument('-o', '--output', type=str, required=True, help='output image')\n arg_parser.add_argument(\n '--solve-fg', dest='solve_fg', action='store_true',\n help='compute foreground color and output RGBA image'\n )\n args = arg_parser.parse_args()\n\n image = cv2.imread(args.image, cv2.IMREAD_COLOR) / 255.0\n\n if args.scribbles:\n scribbles = cv2.imread(args.scribbles, cv2.IMREAD_COLOR) / 255.0\n alpha = closed_form_matting_with_scribbles(image, scribbles)\n elif args.trimap:\n trimap = cv2.imread(args.trimap, cv2.IMREAD_GRAYSCALE) / 255.0\n alpha = closed_form_matting_with_trimap(image, trimap)\n else:\n logging.error('Either trimap or scribbles must be specified.')\n arg_parser.print_help()\n exit(-1)\n\n if args.solve_fg:\n from solve_foreground_background import solve_foreground_background\n foreground, _ = solve_foreground_background(image, alpha)\n output = np.concatenate((foreground, alpha[:, :, np.newaxis]), axis=2)\n else:\n output = alpha\n\n cv2.imwrite(args.output, output * 255.0)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.einsum",
"numpy.arange",
"numpy.eye",
"numpy.tile",
"numpy.ones",
"numpy.lib.stride_tricks.as_strided",
"numpy.concatenate",
"numpy.mean",
"numpy.repeat",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dirkroorda/fusus | [
"ee83067e1fb4fb4e4a7389554df4ab200c6f2092"
] | [
"fusus/lines.py"
] | [
"\"\"\"Line detection\n\nWe detect lines in page blocks based on ink distribution.\n\nOur proxy to ink distribution are histograms, but there is no easy correspondence\nbetween the peaks in the histograms and the lines on the page.\n\nWe will need some signal processing tools from\n[SciPy](https://docs.scipy.org/doc/scipy/reference/),\nin particular\n[find_peaks](https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.find_peaks.html#scipy.signal.find_peaks)\nand\n[medfilt](https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.medfilt.html#scipy.signal.medfilt),\nto filter the peaks into significant peaks.\n\nWe also need to massage the ink histograms in order to correct for short lines.\n\"\"\"\n\nimport cv2\nimport numpy as np\nfrom scipy.signal import find_peaks, medfilt\n\nfrom .lib import (\n applyBandOffset,\n getMargins,\n overlay,\n pureAverage,\n)\n\n\ndef getInkDistribution(C, info, stages, pageH, blocks, batch, boxed):\n \"\"\"Add line band data to all blocks based on histograms.\n\n By means of histograms we can discern where the lines are.\n We define several bands with respect to lines, such as main, inter, broad,\n high, mid, low.\n We also define a band for the space between lines.\n\n We mark the main bands on the `layout layer` by a starting green line\n and an ending red line and the space between them will be overlaid with gray.\n\n Parameters\n ----------\n C: object\n Configuration settings\n stages: dict\n We need access to several intermediate results.\n pageH: int\n size of a full page in pixels\n blocks: dict\n The blocks as delivered by `getBlocks`.\n The blocks dict will be updated: each block value gets a new key `bands`\n with the band data.\n batch: boolean\n Whether we run in batch mode.\n boxed: boolean\n Whether we run in boxed mode (generate boxes around wiped marks).\n\n Returns\n -------\n list\n A list of keys in the blocks dict that correspond to blocks\n that turn out to be devoid of written material.\n \"\"\"\n\n mColor = C.marginRGB\n whit = C.marginGRS\n white = C.whiteRGB\n thresholdX = C.marginThresholdX\n colorBand = C.colorBand\n if not batch:\n layout = stages[\"layout\"]\n histogram = layout.copy()\n stages[\"histogram\"] = histogram\n\n blurred = stages[\"blurred\"]\n demargined = stages[\"demargined\"]\n\n emptyBlocks = []\n\n for ((stripe, block), data) in blocks.items():\n (left, top, right, bottom) = data[\"inner\"]\n\n hasRegion = bottom > top and right > left\n\n if not hasRegion:\n emptyBlocks.append((stripe, block))\n continue\n\n imgOut = histogram if not batch else None\n histX = getInkX(blurred, left, top, right, bottom, imgOut=imgOut)\n lines = getInkY(\n C, info, blurred, pageH, left, top, right, bottom, True, imgOut=imgOut\n )\n\n # chop off the left and right margins of a region\n\n (normH, normW) = (bottom - top, right - left)\n roiOut = demargined[top:bottom, left:right]\n if not batch:\n roiOutC = layout[top:bottom, left:right]\n margins = getMargins(histX, normW, thresholdX)\n\n for (x1, x2) in margins:\n cv2.rectangle(roiOut, (x1, 0), (x2, normH), whit, -1)\n if not batch:\n overlay(roiOutC, x1 + 2, 2, x2 - 2, normH - 2, white, mColor)\n\n if len(margins) != 2:\n emptyBlocks.append((stripe, block))\n continue\n\n data[\"inner\"] = (margins[0][1] + left, top, margins[1][0] + left, bottom)\n\n # define bands\n\n bands = {}\n data[\"bands\"] = bands\n\n for (band, bandColor) in colorBand.items():\n inter = band in {\"inter\", \"low\", \"high\"}\n theLines = applyBandOffset(C, normH, band, lines, inter=inter)\n bands[band] = dict(lines=theLines, color=bandColor)\n\n bandInfo = bands[\"main\"]\n lines = bandInfo[\"lines\"]\n\n # remove top white space\n\n topWhite = lines[0][0] if lines else normH\n cv2.rectangle(roiOut, (0, 0), (normW, topWhite), whit, -1)\n if not batch:\n overlay(roiOutC, 0, 0, normW, topWhite, white, mColor)\n\n # remove bottom white space\n\n bottomWhite = lines[-1][1] if lines else 0\n cv2.rectangle(roiOut, (0, bottomWhite), (normW, normH), whit, -1)\n if not batch:\n overlay(roiOutC, 0, bottomWhite, normW, normH, white, mColor)\n\n if not lines:\n emptyBlocks.append((stripe, block))\n\n return emptyBlocks\n\n\ndef getInkX(imgIn, left, top, right, bottom, imgOut=None):\n \"\"\"Make a horizontal histogram of an input region of interest.\n\n Optionally draw the histograms on the corresponding roi of an output image.\n\n Parameters\n ----------\n imgIn: np array\n Input image.\n top, bottom, left, right: int\n Region of interest on input and output image.\n imgOut: np array, optional `None`\n Output image.\n\n Returns\n -------\n histX: list\n The X histogram\n \"\"\"\n\n roiIn = imgIn[top:bottom, left:right]\n histX = cv2.reduce(roiIn, 0, cv2.REDUCE_AVG).reshape(-1)\n if imgOut is not None:\n roiOut = imgOut[top:bottom, left:right]\n for (i, val) in enumerate(histX):\n color = (int(val), int(2 * val), int(val))\n index = (i, 0)\n value = (i, val)\n cv2.line(roiOut, index, value, color, 1)\n\n return histX\n\n\ndef firstNonzero(arr, axis=None):\n return (arr != 0).argmax(axis=axis or 0)\n\n\ndef lastNonzero(arr, axis=None):\n ax = axis or 0\n mask = arr != 0\n ln = arr.shape[ax]\n val = ln - np.flip(mask, axis=ax or 0).argmax(axis=ax) - 1\n return val if axis is None else np.where(mask.any(axis=ax), val, -1)\n\n\ndef getHist(C, imgIn, lineHeight):\n if lineHeight is None:\n return cv2.reduce(imgIn, 1, cv2.REDUCE_AVG).reshape(-1)\n\n contourFactor = C.contourFactor\n contourOffset = C.contourOffset\n\n (h, w) = imgIn.shape[0:2]\n increase = int(round(w * contourOffset))\n\n left = firstNonzero(imgIn, axis=1)\n right = lastNonzero(imgIn, axis=1)\n\n left[left > increase] -= increase\n right[(0 < right) & (right < w - increase)] += increase\n\n # smooth the left and right contours by taking the median value\n # of a range around each value.\n # the range stretches a fraction of the peak distance to each side\n # we use a median filter from scipy for it\n\n windowSize = int(round(lineHeight * contourFactor))\n if not windowSize % 2:\n windowSize += 1\n if windowSize > h:\n windowSize = h - (0 if h % 2 else 1)\n if windowSize > 1:\n left = np.rint(medfilt(left, kernel_size=windowSize)).astype(int)\n right = np.rint(medfilt(right, kernel_size=windowSize)).astype(int)\n\n lengths = np.transpose(right - left + 1)\n\n histY = np.sum(imgIn, axis=1).astype(float)\n histY[lengths > 0] = histY[lengths > 0] / lengths[lengths > 0]\n histY[histY > 200] = 200\n return (np.rint(histY).astype(np.uint8), left, right)\n\n\ndef getInkY(C, info, imgIn, pageH, left, top, right, bottom, final, imgOut=None):\n \"\"\"Determine the line distribution in a block of text.\n\n Optionally draw the histogram and the peaks and valleys\n on the corresponding roi of an output image.\n\n In this operation, we determine the regular line height by analysing the peaks\n and the distances between them.\n\n But if we have just one peak, we do not have distances.\n In those cases, we take the last line height that has been calculated.\n\n Parameters\n ----------\n C: object\n The configuration object of the book engine.\n info: function\n To write messages to the console\n imgIn: np array\n Input image.\n pageH: int\n size of a full page in pixels\n top, bottom, left, right: int\n Region of interest on input and output image.\n final: boolean\n When computing the layout of a page, we call this function\n to adjust the vertical sizes of blocks. This is a non-final call to this\n function. Later, we determine the lines per block, that is the final call.\n When debugging, it is handy to be able to distinguish the debug information\n generated by these calls.\n imgOut: np array, optional `None`\n Output image.\n\n Returns\n -------\n lines: list\n The detected lines, given as a list of tuples of upper and lower y coordinates\n \"\"\"\n\n debug = C.debug\n show = debug > 1 or debug == 1 and final\n\n white = C.whiteRGB\n black = C.blackRGB\n green = C.greenRGB\n orange = C.orangeRGB\n purple = C.purpleRGB\n mColor = C.marginRGB\n upperColor = C.upperRGB\n lowerColor = C.lowerRGB\n peakSignificant = C.peakSignificant\n peakTargetWidthFraction = C.peakTargetWidthFraction\n peakProminence = C.peakProminenceY\n valleyProminence = C.valleyProminenceY\n outerValleyShiftFraction = C.outerValleyShiftFraction\n defaultLineHeight = C.defaultLineHeight\n\n peakDistance = int(round(pageH / 45))\n\n # little squares that indicate the significant peaks and valleys in the histogram\n sqHWidth = 10\n sqWidth = 2 * sqHWidth\n sqDWidth = 4 * sqHWidth\n\n (normH, normW) = (bottom - top, right - left)\n\n roiIn = imgIn[top:bottom, left:right]\n\n # the raw histogram\n histY = getHist(C, roiIn, None)\n\n # estimate the lineheight based on the raw histogram\n\n def getLineHeight(histY, show=False):\n # rough collection of peaks (we'll find too many)\n (peaks, peakData) = find_peaks(\n histY, prominence=peakProminence, distance=peakDistance\n )\n\n # if there are no peaks: no lines\n\n if not len(peaks):\n return None\n\n # filter out the significant peaks\n maxPeak = max(histY[peak] for peak in peaks)\n peakThreshold = peakSignificant * maxPeak\n sigPeaks = [peak for peak in peaks if histY[peak] > peakThreshold]\n\n # get the distances between the significant peaks\n diffPeaks = [sigPeaks[i] - sigPeaks[i - 1] for i in range(1, len(sigPeaks))]\n if show:\n info(\"\\nPeaks:\", tm=False)\n info(\n f\"maxPeak={maxPeak};\"\n f\" {len(peaks)} peaks of which {len(sigPeaks)} > {peakThreshold}\",\n tm=False,\n )\n info(\"Peaks:\")\n for peak in peaks:\n info(f\"{histY[peak]:>3} @ {peak:>4}\", tm=False)\n info(f\"sigPeaks={sigPeaks}\", tm=False)\n info(f\"diffPeaks={diffPeaks}\", tm=False)\n\n # remove the outliers from the distances and determine the average of the\n # remaining distances: that is the line height\n return pureAverage(np.array(diffPeaks), defaultLineHeight)\n\n lineHeight = getLineHeight(histY, show=False)\n if lineHeight is None:\n # no lines\n return []\n\n # compute a better histogram, based on smooth contour lines\n # Crucial: the contour computation is based on the estimated line height\n (histY, leftContour, rightContour) = getHist(C, roiIn, lineHeight)\n lineHeight = getLineHeight(histY, show=show)\n if lineHeight is None:\n # no lines\n return []\n\n # precise calculation of peaks, based on the calculated line height\n distance = int(round(peakTargetWidthFraction * lineHeight))\n plateauThreshold = int(distance // 4)\n (peaks, peakData) = find_peaks(histY, prominence=2, distance=distance)\n\n # invert the histogram data to detect valleys\n histV = 255 - histY\n\n # let the inverted histogram start and end with zeroes,\n # otherwise the first and last peaks in the inverted histogram are not detected.\n # These are the first and last valleys of the original histogram.\n # The find_peaks() algorithm in SciPy does not detect one-sided peaks, i.e.\n # peaks right at the start or the end of a sequence.\n histV[0] = 0\n histV[-1] = 0\n\n # Rough way to find the valleys.\n # It turns out that we find too many valleys, and when we increase the prominence,\n # we miss important valleys.\n (protoValleys, valleyData) = find_peaks(\n histV,\n prominence=valleyProminence,\n distance=distance,\n plateau_size=0,\n height=0,\n width=0,\n )\n if show:\n info(\"\\nLines:\", tm=False)\n\n # We need to filter by a rather subtle criterion, involving the plateau size,\n # height, and width of a peak.\n # for valleys with a big plateau, we split them into two ones, closer to\n # the ink, one higher, one lower than the plateau.\n\n valleys = []\n remove = None\n\n def showValley(v):\n removeRep = \"xxx\" if remove else f\"{len(valleys):>3}\"\n info(\n f\"valley {removeRep} @ {v:>4}\"\n f\" prom={int(round(prominence)):>3}\"\n f\" ps={plateauSize:>3}\"\n f\" w={int(round(width)):>3} h={int(round(height)):>3}\",\n tm=False,\n )\n\n for (i, (v, prominence, plateauSize, width, height),) in enumerate(\n zip(\n protoValleys,\n valleyData[\"prominences\"],\n valleyData[\"plateau_sizes\"],\n valleyData[\"width_heights\"],\n valleyData[\"peak_heights\"],\n )\n ):\n # a valley with a smallish prominence and small plateau combined\n # with a lack of depth is not convincing\n\n # the valleys at the start and at the end might be too far removed\n # from the actual ink.\n # We recognize that by means of the size of the plateau.\n # Therefore we shift the valley towards the ink over a length\n # proportional to the plateau size.\n\n remove = prominence < 50 or (prominence < 100 and (plateauSize + height < 230))\n lastProtoValley = len(protoValleys) - 1\n if i == 0 or i == lastProtoValley:\n shiftCorrection = int(plateauSize * outerValleyShiftFraction)\n vc = v + shiftCorrection if i == 0 else v - shiftCorrection\n if not remove:\n valleys.append(vc)\n if show:\n showValley(vc)\n else:\n if plateauSize > 2 * plateauThreshold:\n thisShift = int(plateauSize // 2) - plateauThreshold\n if not remove:\n valleys.append(v - thisShift)\n if show:\n showValley(v - thisShift)\n if not remove:\n valleys.append(v + thisShift)\n if show:\n showValley(v + thisShift)\n else:\n if not remove:\n valleys.append(v)\n if show:\n showValley(v)\n\n # from the peaks and valleys found above, compute the lines\n # as a list of (top, bottom) coordinates.\n\n # For each peak we determine its nearest surrounding valleys before and after.\n # Those are the top and bottom of a line.\n # It is possible that there are multiple peaks between valleys.\n # We take care to not produce duplicate lines in these cases.\n\n # We walk through the peaks and maintain the last relevant valley.\n\n lines = []\n lastV = 0\n lastLine = None\n for peak in peaks:\n # move forward the last valley until it passes the peak\n while lastV < len(valleys) and valleys[lastV] <= peak:\n lastV += 1\n # then the valley before lastV is the last valley before the peak,\n # and lastV itself is the first valley after the peak\n thisLine = (\n valleys[lastV - 1] if lastV > 0 else 0,\n valleys[lastV] if lastV < len(valleys) else normH,\n )\n # we found a line.\n # Check that it is not the same line that we found before.\n # If all is well, add it to the result.\n if thisLine != lastLine:\n lines.append(thisLine)\n lastLine = thisLine\n\n if imgOut is not None:\n roiOut = imgOut[top:bottom, left:right]\n faze = 5\n for (i, val) in enumerate(leftContour):\n tl = (max((val - faze, 0)), max((i - faze, 0)))\n br = (min((val + faze, right)), min((i + faze, bottom)))\n cv2.rectangle(roiOut, tl, br, orange, -1)\n for (i, val) in enumerate(rightContour):\n tl = (max((val - faze, 0)), max((i - faze, 0)))\n br = (min((val + faze, right)), min((i + faze, bottom)))\n cv2.rectangle(roiOut, tl, br, purple, -1)\n for (i, val) in enumerate(histY):\n color = (int(val), int(2 * val), int(val))\n index = (sqDWidth + 10, i)\n value = (sqDWidth + 10 + val, i)\n cv2.line(roiOut, index, value, color, 1)\n for e in valleys:\n index = (0, max((e - sqHWidth, 0)))\n value = (sqWidth, min((e + sqHWidth, len(histY) - 1)))\n cv2.rectangle(roiOut, index, value, black, -1)\n for e in peaks:\n index = (sqWidth, max((e - sqHWidth, 0)))\n value = (sqDWidth, min((e + sqHWidth, len(histY) - 1)))\n cv2.rectangle(roiOut, index, value, green, -1)\n for (up, lo) in lines:\n overlay(roiOut, 14, up, normW - 14, up + 3, white, upperColor)\n overlay(roiOut, 14, lo - 3, normW - 14, lo, white, lowerColor)\n for (lo, up) in zip(\n (0, *(x[1] for x in lines)), (*(x[0] for x in lines), normH)\n ):\n overlay(roiOut, 14, lo, normW - 14, up + 1, white, mColor)\n\n return lines\n"
] | [
[
"scipy.signal.find_peaks",
"scipy.signal.medfilt",
"numpy.rint",
"numpy.transpose",
"numpy.array",
"numpy.flip",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.3",
"1.9",
"1.5",
"1.7",
"1.2",
"1.8"
],
"tensorflow": []
}
] |
perfectbullet/albumy | [
"6e0fa1bef31f470c19fd6bcf6751d0be6510d864"
] | [
"opencv_demos/svm_train/train_number_svm.py"
] | [
"#!/usr/bin/python\n# coding=utf-8\n\nimport cv2, os\nimport numpy as np\n\n\n# 获取数据集\n# 参数: datadirs:数据目录, labels:数据目录对应的标签, descriptor:特征描述器, size:图片归一化尺寸(通常是2的n次方, 比如(64,64)), kwargs:描述器计算特征的附加参数\n# 返回值, descs:特征数据, labels:标签数据\ndef getDataset(datadirs, labels, descriptor, size, **kwargs):\n # 获取训练数据\n # 参数: path:图片目录, label:图片标签, descriptor:特征描述器, size:图片归一化尺寸(通常是2的n次方, 比如(64,64)), kwargs:描述器计算特征的附加参数\n # 返回值: 图像数据, 标签数据\n def getDatas(path, label):\n datas = []\n for root, dirs, files in os.walk(path):\n for fname in files:\n lowname = fname.lower()\n if not lowname.endswith('.jpg') and not lowname.endswith('.png') and not lowname.endswith('.bmp'): continue\n imgpath = os.path.join(root, fname)\n gray = cv2.imread(imgpath, 0)\n if gray is None or len(gray) < 10: continue\n desc = descriptor.compute(cv2.resize(gray, size,interpolation=cv2.INTER_AREA), **kwargs).reshape((-1))\n datas.append(desc)\n return np.array(datas), np.full((len(datas)), label, dtype=np.int32)\n\n descs, dlabels = None, None\n for path, label in zip(datadirs,labels):\n if descs is None:\n descs, dlabels = getDatas(path, label)\n else:\n ds, ls = getDatas(path, label)\n descs, dlabels = np.vstack((descs, ds)), np.hstack((dlabels, ls))\n return descs, dlabels\n\n\nif __name__ == '__main__':\n from os.path import join, basename\n from os import walk\n # 正样本的标签为1, 负样本的标签为0\n # base_train_dir = '/disk_workspace/train_data_for_svm/0-9_train/'\n base_train_dir = '/disk_workspace/train_data_for_svm/dzx_number'\n\n dir_ls = [dr for dr in os.listdir(base_train_dir) if not dr.endswith('.dat')]\n # train_dirs = [join(base_train_dir, d) for d in dir_ls if not d.endswith('_test')]\n # train_labels = [int(basename(d)) for d in train_dirs]\n test_dirs = [join(base_train_dir, d) for d in dir_ls if d.endswith('_test')]\n test_labels = [int(basename(d).split('_')[0]) for d in test_dirs]\n\n outpath = join(base_train_dir, 'digits-20191114-ten.dat') # 模型输出目录\n\n # hog特征描述器\n # 参数图解: https://blog.csdn.net/qq_26898461/article/details/46786285\n # 参数说明: winSize:窗口大小, blockSize:块大小, blockStride:块滑动增量, cellSize:胞元大小, nbins:梯度方向数目\n descriptor = cv2.HOGDescriptor(_winSize=(64, 64), _blockSize=(16, 16), _blockStride=(8, 8), _cellSize=(8, 8), _nbins=9)\n\n # # # 拟合\n # train_datas, train_labels = getDataset(train_dirs, train_labels, descriptor, size=(64, 64), winStride=(8, 8), padding=(0, 0))\n # print('train_datas.shape={}, train_labels.shape={}'.format(train_datas.shape, train_labels.shape))\n # svm = cv2.ml.SVM_create()\n # svm.setKernel(cv2.ml.SVM_LINEAR)\n # svm.setType(cv2.ml.SVM_C_SVC)\n # svm.setC(2.67)\n # svm.setGamma(5.383)\n # svm.train(train_datas, cv2.ml.ROW_SAMPLE, train_labels)\n # print('outpath={}'.format(outpath))\n # svm.save(outpath)\n\n # 开始测试, 测试数据和拟合的数据不能有重复, 有重复的测试结果不能说明问题\n svmer = cv2.ml.SVM_load(outpath)\n # test_dirs = [join(base_train_dir, pa) for pa in ['套管双耳上部_包含多个_test', '套管双耳下部_包含多个_test']]\n # test_lables = [1, -1]\n test_des_data, test_labels = getDataset(test_dirs, test_labels, descriptor, size=(64, 64), winStride=(8, 8), padding=(0, 0))\n test_query_data = np.array(test_des_data) #\n ret, responses = svmer.predict(test_query_data) # ret\n\n # Check Accuracy\n mask = test_labels == responses.reshape(responses.shape[0])\n correct = np.count_nonzero(mask)\n acc = correct / float(mask.size)\n print('test_labels={}, responses.shape={}, mask.shape={}, acc={}'\n .format(test_labels.shape, responses.shape, mask.shape, acc))\n\n"
] | [
[
"numpy.hstack",
"numpy.vstack",
"numpy.array",
"numpy.count_nonzero"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Flunzmas/vp-suite | [
"479bd48185b9a93a6bc6bff2dfe226e9c65800d8"
] | [
"vp_suite/datasets/mmnist_on_the_fly.py"
] | [
"import numpy as np\nimport torch\nfrom torchvision.datasets import MNIST\n\nfrom vp_suite.base import VPDataset, VPData\nfrom vp_suite.defaults import SETTINGS\n\n\nclass MovingMNISTOnTheFly(VPDataset):\n r\"\"\"\n Dataset class for the dataset \"Moving MNIST\", as firstly encountered in\n \"Unsupervised Learning of Video Representations using LSTMs\" by Srivastava et al.\n (https://arxiv.org/pdf/1502.04681v3.pdf).\n\n Each sequence depicts two digits from the MNIST dataset moving linearly in front of a black background,\n occasionally bouncing off the wall and overlapping each other.\n\n As opposed to the other Moving MNIST dataset, this one generates the digit sequences on-the-fly,\n randomly sampling digits and velocities. Besides the digit templates, no data is downloaded.\n \"\"\"\n NAME = \"Moving MNIST - On the fly\"\n IS_DOWNLOADABLE = \"Yes (MNIST digits)\"\n ON_THE_FLY = True\n DEFAULT_DATA_DIR = SETTINGS.DATA_PATH / \"moving_mnist_on_the_fly\"\n VALID_SPLITS = [\"train\", \"val\", \"test\"]\n MIN_SEQ_LEN = 1e8 #: Sequence length unbounded, depends on input sequence length\n ACTION_SIZE = 0\n DATASET_FRAME_SHAPE = (64, 64, 3)\n DEFAULT_N_SEQS = {\"train\": 9600, \"val\": 400, \"test\": 1000} #: Default values for the dataset split sizes.\n SPLIT_SEED_OFFSETS = {\"train\": lambda x: 3*x+2, \"val\": lambda x: 3*x+1, \"test\": lambda x: 3*x} #: passing the seed value to these functions guarantees unique RNG for all splits\n\n min_speed = 2\n max_speed = 5\n min_acc = 0\n max_acc = 0\n num_channels = 3\n num_digits = 2\n rng_seed = 4115 # with this default value, the test default becomes 3*x=12345.\n n_seqs = None\n\n def __init__(self, split, **dataset_kwargs):\n super(MovingMNISTOnTheFly, self).__init__(split, **dataset_kwargs)\n self.NON_CONFIG_VARS.extend([\"data\", \"rng\", \"digit_id_rng\", \"speed_rng\", \"acc_rng\", \"pos_rng\",\n \"get_digit_id\", \"get_speed\", \"get_acc\", \"get_init_pos\"])\n\n if self.num_channels not in [1, 3]:\n raise ValueError(\"num_channels for dataset needs to be in [1, 3].\")\n img_c, img_h, img_w = self.img_shape\n if img_h != img_w:\n raise ValueError(\"MMNIST only permits square images\")\n self.DATASET_FRAME_SHAPE = (img_h, img_w, img_c) # TODO dirty hack\n\n # loading data and rng\n self.data = MNIST(root=self.data_dir, train=(self.split == \"train\"), download=False)\n self.n_seqs = self.n_seqs or self.DEFAULT_N_SEQS[self.split]\n self.digit_id_rng, self.speed_rng, self.acc_rng, self.pos_rng = None, None, None, None\n self.reset_rng()\n\n self.get_digit_id = lambda: self.digit_id_rng.integers(len(self.data))\n self.get_speed = lambda: self.speed_rng.integers(-1*self.max_speed, self.max_speed+1)\n self.get_acc = lambda: self.acc_rng.integers(-1*self.max_acc, self.max_acc+1)\n self.get_init_pos = lambda digit_size: (self.pos_rng.integers(0, self.img_shape[1]-digit_size),\n self.pos_rng.integers(0, self.img_shape[2]-digit_size))\n\n def __len__(self):\n return self.n_seqs\n\n def reset_rng(self):\n r\"\"\"\n Creates RNG-based generation helpers for the on-the-fly generation, re-setting the RNG.\n \"\"\"\n split_rng_seed = self.SPLIT_SEED_OFFSETS[self.split](self.rng_seed)\n self.digit_id_rng = np.random.default_rng(split_rng_seed)\n self.speed_rng = np.random.default_rng(split_rng_seed)\n self.acc_rng = np.random.default_rng(split_rng_seed)\n self.pos_rng = np.random.default_rng(split_rng_seed)\n\n def __getitem__(self, i) -> VPData:\n if not self.ready_for_usage:\n raise RuntimeError(\"Dataset is not yet ready for usage (maybe you forgot to call set_seq_len()).\")\n\n digits, next_poses, speeds, digit_size = [], [], [], None\n for i in range(self.num_digits):\n digit, pos, speed, digit_size = self._sample_digit()\n digits.append(digit)\n next_poses.append(pos)\n speeds.append(speed)\n\n # generating sequence by moving the digit given velocity\n frames = np.zeros((self.seq_len, *self.DATASET_FRAME_SHAPE))\n for i, frame in enumerate(frames):\n for j, (digit, cur_pos, speed) in enumerate(zip(digits, next_poses, speeds)):\n speed, cur_pos = self._move_digit(speed=speed, cur_pos=cur_pos,\n img_size=self.img_shape[1], digit_size=digit_size)\n speeds[j] = speed\n next_poses[j] = cur_pos\n cur_h, cur_w = cur_pos\n frame[cur_h:cur_h+digit_size, cur_w:cur_w+digit_size] += digit\n frames[i] = np.clip(frame, 0, 1)\n frames = self.preprocess(frames * 255)\n\n actions = torch.zeros((self.total_frames, 1)) # [t, a], actions should be disregarded in training logic\n data = {\"frames\": frames, \"actions\": actions, \"origin\": \"generated on-the-fly\"}\n return data\n\n def _sample_digit(self):\n \"\"\"\n Samples digit, initial position and speed.\n \"\"\"\n digit_id = self.get_digit_id()\n cur_digit = np.array(self.data[digit_id][0]) / 255 # sample IDX, digit\n digit_size = cur_digit.shape[-1]\n cur_digit = cur_digit[..., np.newaxis]\n if self.num_channels == 3:\n cur_digit = np.repeat(cur_digit, 3, axis=-1)\n\n # obtaining position in original frame\n x_coord, y_coord = self.get_init_pos(digit_size)\n cur_pos = np.array([y_coord, x_coord])\n\n # generating sequence\n speed_x, speed_y, acc = None, None, None\n while speed_x is None or np.abs(speed_x) < self.min_speed:\n speed_x = self.get_speed()\n while speed_y is None or np.abs(speed_y) < self.min_speed:\n speed_y = self.get_speed()\n while acc is None or np.abs(acc) < self.min_acc:\n acc = self.get_acc()\n speed = np.array([speed_y, speed_x])\n\n return cur_digit, cur_pos, speed, digit_size\n\n def _move_digit(self, speed, cur_pos, img_size, digit_size):\n \"\"\"\n Performs digit movement. Also produces bounces and makes appropriate changes.\n \"\"\"\n next_pos = cur_pos + speed\n for i, p in enumerate(next_pos):\n # left/down bounce\n if p + digit_size > img_size:\n offset = p + digit_size - img_size\n next_pos[i] = p - offset\n speed[i] = -1 * speed[i]\n elif (p < 0):\n next_pos[i] = -1 * p\n speed[i] = -1 * speed[i]\n return speed, next_pos\n\n def download_and_prepare_dataset(self):\n r\"\"\"\n Downloads the MNIST digit data so that on-the-fly generation can take place.\n \"\"\"\n _ = MNIST(root=self.DEFAULT_DATA_DIR, train=True, download=True)\n _ = MNIST(root=self.DEFAULT_DATA_DIR, train=False, download=True)\n"
] | [
[
"numpy.abs",
"numpy.clip",
"torch.zeros",
"numpy.repeat",
"numpy.array",
"numpy.zeros",
"numpy.random.default_rng"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ahodges9/Systematic-LEDs | [
"b3b997aba6cf9d08f0935e3b858fac3904d0261b"
] | [
"python/main.py"
] | [
"from __future__ import print_function\nfrom __future__ import division\nfrom scipy.ndimage.filters import gaussian_filter1d\nfrom collections import deque\nimport time\nimport sys\nimport numpy as np\nimport lib.config as config\nimport lib.microphone as microphone\nimport lib.dsp as dsp\n#import lib.led as led\nimport lib.melbank as melbank\nimport lib.devices as devices\nimport random\nfrom PyQt5.QtCore import QSettings\nif config.settings[\"configuration\"][\"USE_GUI\"]:\n from lib.qrangeslider import QRangeSlider\n from lib.qfloatslider import QFloatSlider\n import pyqtgraph as pg\n from PyQt5.QtCore import *\n from PyQt5.QtWidgets import *\n\nclass Visualizer():\n def __init__(self, board):\n # Name of board this for which this visualizer instance is visualising\n self.board = board\n # Dictionary linking names of effects to their respective functions\n self.effects = {\"Scroll\":self.visualize_scroll,\n \"Energy\":self.visualize_energy,\n \"Spectrum\":self.visualize_spectrum,\n \"Power\":self.visualize_power,\n \"Wavelength\":self.visualize_wavelength,\n \"Beat\":self.visualize_beat,\n \"Wave\":self.visualize_wave,\n \"Bars\":self.visualize_bars,\n #\"Pulse\":self.visualize_pulse,\n #\"Auto\":self.visualize_auto,\n \"Single\":self.visualize_single,\n \"Fade\":self.visualize_fade,\n \"Gradient\":self.visualize_gradient,\n \"Calibration\": self.visualize_calibration}\n # List of all the visualisation effects that aren't audio reactive.\n # These will still display when no music is playing.\n self.non_reactive_effects = [\"Single\", \"Gradient\", \"Fade\", \"Calibration\"]\n # Setup for frequency detection algorithm\n self.freq_channel_history = 40\n self.beat_count = 0\n self.freq_channels = [deque(maxlen=self.freq_channel_history) for i in range(config.settings[\"devices\"][self.board][\"configuration\"][\"N_FFT_BINS\"])]\n self.prev_output = np.array([[0 for i in range(config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"])] for i in range(3)])\n self.prev_spectrum = [0 for i in range(config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]//2)]\n self.current_freq_detects = {\"beat\":False,\n \"low\":False,\n \"mid\":False,\n \"high\":False}\n self.prev_freq_detects = {\"beat\":0,\n \"low\":0,\n \"mid\":0,\n \"high\":0}\n self.detection_ranges = {\"beat\":(0,int(config.settings[\"devices\"][self.board][\"configuration\"][\"N_FFT_BINS\"]*0.13)),\n \"low\":(int(config.settings[\"devices\"][self.board][\"configuration\"][\"N_FFT_BINS\"]*0.15),\n int(config.settings[\"devices\"][self.board][\"configuration\"][\"N_FFT_BINS\"]*0.4)),\n \"mid\":(int(config.settings[\"devices\"][self.board][\"configuration\"][\"N_FFT_BINS\"]*0.4),\n int(config.settings[\"devices\"][self.board][\"configuration\"][\"N_FFT_BINS\"]*0.7)),\n \"high\":(int(config.settings[\"devices\"][self.board][\"configuration\"][\"N_FFT_BINS\"]*0.8),\n int(config.settings[\"devices\"][self.board][\"configuration\"][\"N_FFT_BINS\"]))}\n self.min_detect_amplitude = {\"beat\":0.7,\n \"low\":0.5,\n \"mid\":0.3,\n \"high\":0.3}\n self.min_percent_diff = {\"beat\":70,\n \"low\":100,\n \"mid\":50,\n \"high\":30}\n # Configurations for dynamic ui generation. Effect options can be changed by widgets created at runtime,\n # meaning that you don't need to worry about the user interface - it's all done for you. All you need to\n # do is add items to this dict below.\n #\n # First line of code below explained (as an example):\n # \"Energy\" is the visualization we're doing options for\n # \"blur\" is the key in the options dict (config.settings[\"devices\"][self.board][\"effect_opts\"][\"Energy\"][\"blur\"])\n # \"Blur\" is the string we show on the GUI next to the slider\n # \"float_slider\" is the GUI element we want to use\n # (0.1,4.0,0.1) is a tuple containing all the details for setting up the slider (see above)\n #\n # Each effect key points to a list. Each list contains lists giving config for each option.\n # Syntax: effect:[key, label_text, ui_element, opts]\n # effect - the effect which you want to change options for. MUST have a key in config.settings[\"devices\"][self.board][\"effect_opts\"]\n # key - the key of thing you want to be changed. MUST be in config.settings[\"devices\"][self.board][\"effect_opts\"][effect], otherwise it won't work.\n # label - the text displayed on the ui\n # ui_element - how you want the variable to be changed\n # opts - options for the ui element. Must be a tuple.\n # UI Elements + opts:\n # slider, (min, max, interval) (for integer values in a given range)\n # float_slider, (min, max, interval) (for floating point values in a given range)\n # checkbox, () (for True/False values)\n # dropdown, (dict or list) (dict/list, example see below. Keys will be displayed in the dropdown if dict, otherwise just list items)\n #\n # Hope this clears things up a bit for you! GUI has never been easier..? The reason for doing this is\n # 1 - To make it easy to add options to your effects for the user\n # 2 - To give a consistent GUI for the user. If every options page was set out differently it would all be a mess\n self.dynamic_effects_config = {\"Energy\":[[\"blur\", \"Blur\", \"float_slider\", (0.1,4.0,0.1)],\n [\"scale\", \"Scale\", \"float_slider\", (0.4,1.0,0.05)],\n [\"r_multiplier\", \"Red\", \"float_slider\", (0.05,1.0,0.05)],\n [\"g_multiplier\", \"Green\", \"float_slider\", (0.05,1.0,0.05)],\n [\"b_multiplier\", \"Blue\", \"float_slider\", (0.05,1.0,0.05)]],\n \"Wave\":[[\"color_flash\", \"Flash Color\", \"dropdown\", config.settings[\"colors\"]],\n [\"color_wave\", \"Wave Color\", \"dropdown\", config.settings[\"colors\"]],\n [\"wipe_len\", \"Wave Start Length\", \"slider\", (0,config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]//4,1)],\n [\"wipe_speed\", \"Wave Speed\", \"slider\", (1,10,1)],\n [\"decay\", \"Flash Decay\", \"float_slider\", (0.1,1.0,0.05)]],\n \"Spectrum\":[[\"r_multiplier\", \"Red\", \"float_slider\", (0.05,1.0,0.05)],\n [\"g_multiplier\", \"Green\", \"float_slider\", (0.05,1.0,0.05)],\n [\"b_multiplier\", \"Blue\", \"float_slider\", (0.05,1.0,0.05)]],\n \"Wavelength\":[[\"color_mode\", \"Color Mode\", \"dropdown\", config.settings[\"gradients\"]],\n [\"roll_speed\", \"Roll Speed\", \"slider\", (0,8,1)],\n [\"blur\", \"Blur\", \"float_slider\", (0.1,4.0,0.1)],\n [\"mirror\", \"Mirror\", \"checkbox\"],\n [\"reverse_grad\", \"Reverse Gradient\", \"checkbox\"],\n [\"reverse_roll\", \"Reverse Roll\", \"checkbox\"],\n [\"flip_lr\", \"Flip LR\", \"checkbox\"]],\n \"Scroll\":[[\"blur\", \"Blur\", \"float_slider\", (0.05,4.0,0.05)],\n [\"decay\", \"Decay\", \"float_slider\", (0.97,1.0,0.0005)],\n [\"speed\", \"Speed\", \"slider\", (1,5,1)],\n [\"r_multiplier\", \"Red\", \"float_slider\", (0.05,1.0,0.05)],\n [\"g_multiplier\", \"Green\", \"float_slider\", (0.05,1.0,0.05)],\n [\"b_multiplier\", \"Blue\", \"float_slider\", (0.05,1.0,0.05)]],\n \"Power\":[[\"color_mode\", \"Color Mode\", \"dropdown\", config.settings[\"gradients\"]],\n [\"s_color\", \"Spark Color \", \"dropdown\", config.settings[\"colors\"]],\n [\"s_count\", \"Spark Amount\", \"slider\", (0,config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]//6,1)],\n [\"mirror\", \"Mirror\", \"checkbox\"],\n [\"flip_lr\", \"Flip LR\", \"checkbox\"]],\n \"Single\":[[\"color\", \"Color\", \"dropdown\", config.settings[\"colors\"]]],\n \"Beat\":[[\"color\", \"Color\", \"dropdown\", config.settings[\"colors\"]],\n [\"decay\", \"Flash Decay\", \"float_slider\", (0.3,0.98,0.005)]],\n \"Bars\":[[\"color_mode\", \"Color Mode\", \"dropdown\", config.settings[\"gradients\"]],\n [\"resolution\", \"Resolution\", \"slider\", (1, config.settings[\"devices\"][self.board][\"configuration\"][\"N_FFT_BINS\"], 1)],\n [\"roll_speed\", \"Roll Speed\", \"slider\", (0,8,1)],\n [\"flip_lr\", \"Flip LR\", \"checkbox\"],\n [\"mirror\", \"Mirror\", \"checkbox\"],\n [\"reverse_roll\", \"Reverse Roll\", \"checkbox\"]],\n \"Gradient\":[[\"color_mode\", \"Color Mode\", \"dropdown\", config.settings[\"gradients\"]],\n [\"roll_speed\", \"Roll Speed\", \"slider\", (0,8,1)],\n [\"mirror\", \"Mirror\", \"checkbox\"],\n [\"reverse\", \"Reverse\", \"checkbox\"]],\n \"Fade\":[[\"color_mode\", \"Color Mode\", \"dropdown\", config.settings[\"gradients\"]],\n [\"roll_speed\", \"Fade Speed\", \"slider\", (0,8,1)],\n [\"reverse\", \"Reverse\", \"checkbox\"]],\n \"Calibration\":[[\"r\", \"Red value\", \"slider\", (0,255,1)],\n [\"g\", \"Green value\", \"slider\", (0,255,1)],\n [\"b\", \"Blue value\", \"slider\", (0,255,1)]]\n }\n # Setup for latency timer\n self.latency_deque = deque(maxlen=1000)\n # Setup for \"Wave\" (don't change these)\n self.wave_wipe_count = 0\n # Setup for \"Power\" (don't change these)\n self.power_indexes = []\n self.power_brightness = 0\n # Setup for multicolour modes (don't mess with this either unless you want to add in your own multicolour modes)\n # If there's a multicolour mode you would like to see, let me know on GitHub! \n\n #def _vect_easing_func_gen(slope=2.5, length=1):\n # return np.vectorize(_easing_func)\n\n def _easing_func(x, length, slope=2.5):\n # returns a nice eased curve with defined length and curve\n xa = (x/length)**slope\n return xa / (xa + (1 - (x/length))**slope)\n\n\n def _easing_gradient_generator(colors, length):\n \"\"\"\n returns np.array of given length that eases between specified colours\n\n parameters:\n colors - list, colours must be in config.settings[\"colors\"]\n eg. [\"Red\", \"Orange\", \"Blue\", \"Purple\"]\n length - int, length of array to return. should be from config.settings\n eg. config.settings[\"devices\"][\"my strip\"][\"configuration\"][\"N_PIXELS\"]\n \"\"\"\n colors = colors[::-1] # needs to be reversed, makes it easier to deal with\n n_transitions = len(colors) - 1\n ease_length = length // n_transitions\n pad = length - (n_transitions * ease_length)\n output = np.zeros((3, length))\n ease = np.array([_easing_func(i, ease_length, slope=2.5) for i in range(ease_length)])\n # for r,g,b\n for i in range(3):\n # for each transition\n for j in range(n_transitions):\n # Starting ease value\n start_value = config.settings[\"colors\"][colors[j]][i]\n # Ending ease value\n end_value = config.settings[\"colors\"][colors[j+1]][i]\n # Difference between start and end\n diff = end_value - start_value\n # Make array of all start value\n base = np.empty(ease_length)\n base.fill(start_value)\n # Make array of the difference between start and end\n diffs = np.empty(ease_length)\n diffs.fill(diff)\n # run diffs through easing function to make smooth curve\n eased_diffs = diffs * ease\n # add transition to base values to produce curve from start to end value\n base += eased_diffs\n # append this to the output array\n output[i, j*ease_length:(j+1)*ease_length] = base\n # cast to int\n output = np.asarray(output, dtype=int)\n # pad out the ends (bit messy but it works and looks good)\n if pad:\n for i in range(3):\n output[i, -pad:] = output[i, -pad-1]\n return output\n\n self.multicolor_modes = {}\n for gradient in config.settings[\"gradients\"]:\n self.multicolor_modes[gradient] = _easing_gradient_generator(config.settings[\"gradients\"][gradient],\n config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"])\n\n # # chunks of colour gradients\n # _blank_overlay = np.zeros((3,config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]))\n # # used to construct rgb overlay. [0-255,255...] whole length of strip\n \n # _gradient_whole = [int(i*config.settings[\"configuration\"][\"MAX_BRIGHTNESS\"]/(config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]//2))\\\n # for i in range(config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]//2)] +\\\n # [config.settings[\"configuration\"][\"MAX_BRIGHTNESS\"] for i in range(config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]//2)]\n # # also used to make bits and pieces. [0-255], 1/2 length of strip\n # _alt_gradient_half = [int(i*config.settings[\"configuration\"][\"MAX_BRIGHTNESS\"]/(config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]//2))\\\n # for i in range(config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]//2)]\n # # used to construct rgb overlay. [0-255,255...] 1/2 length of strip\n # _gradient_half = _gradient_whole[::2]\n # # Spectral colour mode\n # self.multicolor_modes[\"Spectral\"] = np.zeros((3,config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]))\n # self.multicolor_modes[\"Spectral\"][2, :config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]//2] = _gradient_half[::-1]\n # self.multicolor_modes[\"Spectral\"][1, :] = _gradient_half + _gradient_half[::-1]\n # self.multicolor_modes[\"Spectral\"][0, :] = np.flipud(self.multicolor_modes[\"Spectral\"][2])\n # # Dancefloor colour mode\n # self.multicolor_modes[\"Dancefloor\"] = np.zeros((3,config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]))\n # self.multicolor_modes[\"Dancefloor\"][2, :] = _gradient_whole[::-1]\n # self.multicolor_modes[\"Dancefloor\"][0, :] = _gradient_whole\n # # Brilliance colour mode\n # self.multicolor_modes[\"Brilliance\"] = np.zeros((3,config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]))\n # self.multicolor_modes[\"Brilliance\"][2, :] = _gradient_whole[::-1]\n # self.multicolor_modes[\"Brilliance\"][1, :] = 255\n # self.multicolor_modes[\"Brilliance\"][0, :] = _gradient_whole\n # # Jungle colour mode\n # self.multicolor_modes[\"Jungle\"] = np.zeros((3,config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]))\n # self.multicolor_modes[\"Jungle\"][1, :] = _gradient_whole[::-1]\n # self.multicolor_modes[\"Jungle\"][0, :] = _gradient_whole\n # # Sky colour mode\n # self.multicolor_modes[\"Sky\"] = np.zeros((3,config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]))\n # self.multicolor_modes[\"Sky\"][1, :config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]//2] = _alt_gradient_half[::-1]\n # self.multicolor_modes[\"Sky\"][0, config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]//2:] = _alt_gradient_half\n # self.multicolor_modes[\"Sky\"][2, :config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]] = 255\n # # Acid colour mode\n # self.multicolor_modes[\"Acid\"] = np.zeros((3,config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]))\n # self.multicolor_modes[\"Acid\"][2, :config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]//2] = _alt_gradient_half[::-1]\n # self.multicolor_modes[\"Acid\"][1, :] = 255\n # self.multicolor_modes[\"Acid\"][0, config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]//2:] = _alt_gradient_half\n # # Ocean colour mode\n # self.multicolor_modes[\"Ocean\"] = np.zeros((3,config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]))\n # self.multicolor_modes[\"Ocean\"][1, :] = _gradient_whole\n # self.multicolor_modes[\"Ocean\"][2, :] = _gradient_whole[::-1]\n for i in self.multicolor_modes:\n self.multicolor_modes[i] = np.concatenate((self.multicolor_modes[i][:, ::-1],\n self.multicolor_modes[i]), axis=1)\n\n def get_vis(self, y, audio_input):\n self.update_freq_channels(y)\n self.detect_freqs()\n time1 = time.time()\n if config.settings[\"devices\"][self.board][\"configuration\"][\"current_effect\"] in self.non_reactive_effects:\n self.prev_output = self.effects[config.settings[\"devices\"][self.board][\"configuration\"][\"current_effect\"]]()\n elif audio_input:\n self.prev_output = self.effects[config.settings[\"devices\"][self.board][\"configuration\"][\"current_effect\"]](y)\n else:\n self.prev_output = np.multiply(self.prev_output, 0.95)\n time2 = time.time()\n self.latency_deque.append(1000*(time2-time1))\n if config.settings[\"configuration\"][\"USE_GUI\"]:\n gui.label_latency.setText(\"{} ms Processing Latency \".format(int(sum(self.latency_deque)/len(self.latency_deque))))\n return self.prev_output\n\n def _split_equal(self, value, parts):\n value = float(value)\n return [int(round(i*value/parts)) for i in range(1,parts+1)]\n\n def update_freq_channels(self, y):\n for i in range(len(y)):\n self.freq_channels[i].appendleft(y[i])\n\n def detect_freqs(self):\n \"\"\"\n Function that updates current_freq_detects. Any visualisation algorithm can check if\n there is currently a beat, low, mid, or high by querying the self.current_freq_detects dict.\n \"\"\"\n channel_avgs = []\n differences = []\n for i in range(config.settings[\"devices\"][self.board][\"configuration\"][\"N_FFT_BINS\"]):\n channel_avgs.append(sum(self.freq_channels[i])/len(self.freq_channels[i]))\n differences.append(((self.freq_channels[i][0]-channel_avgs[i])*100)//channel_avgs[i])\n for i in [\"beat\", \"low\", \"mid\", \"high\"]:\n if any(differences[j] >= self.min_percent_diff[i]\\\n and self.freq_channels[j][0] >= self.min_detect_amplitude[i]\\\n for j in range(*self.detection_ranges[i]))\\\n and (time.time() - self.prev_freq_detects[i] > 0.1)\\\n and len(self.freq_channels[0]) == self.freq_channel_history:\n self.prev_freq_detects[i] = time.time()\n self.current_freq_detects[i] = True\n #print(i)\n else:\n self.current_freq_detects[i] = False \n\n def visualize_scroll(self, y):\n \"\"\"Effect that originates in the center and scrolls outwards\"\"\"\n global p\n y = y**4.0\n signal_processers[self.board].gain.update(y)\n y /= signal_processers[self.board].gain.value\n y *= 255.0\n r = int(np.max(y[:len(y) // 3])*config.settings[\"devices\"][self.board][\"effect_opts\"][\"Scroll\"][\"r_multiplier\"])\n g = int(np.max(y[len(y) // 3: 2 * len(y) // 3])*config.settings[\"devices\"][self.board][\"effect_opts\"][\"Scroll\"][\"g_multiplier\"])\n b = int(np.max(y[2 * len(y) // 3:])*config.settings[\"devices\"][self.board][\"effect_opts\"][\"Scroll\"][\"b_multiplier\"])\n # Scrolling effect window\n speed = config.settings[\"devices\"][self.board][\"effect_opts\"][\"Scroll\"][\"speed\"]\n p[:, speed:] = p[:, :-speed]\n p *= config.settings[\"devices\"][self.board][\"effect_opts\"][\"Scroll\"][\"decay\"]\n p = gaussian_filter1d(p, sigma=config.settings[\"devices\"][self.board][\"effect_opts\"][\"Scroll\"][\"blur\"])\n # Create new color originating at the center\n p[0, :speed] = r\n p[1, :speed] = g\n p[2, :speed] = b\n # Update the LED strip\n return np.concatenate((p[:, ::-1], p), axis=1)\n\n def visualize_energy(self, y):\n \"\"\"Effect that expands from the center with increasing sound energy\"\"\"\n global p\n y = np.copy(y)\n signal_processers[self.board].gain.update(y)\n y /= signal_processers[self.board].gain.value\n scale = config.settings[\"devices\"][self.board][\"effect_opts\"][\"Energy\"][\"scale\"]\n # Scale by the width of the LED strip\n y *= float((config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"] * scale) - 1)\n # Map color channels according to energy in the different freq bands\n r = int(np.mean(y[:len(y) // 3]**scale)*config.settings[\"devices\"][self.board][\"effect_opts\"][\"Energy\"][\"r_multiplier\"])\n g = int(np.mean(y[len(y) // 3: 2 * len(y) // 3]**scale)*config.settings[\"devices\"][self.board][\"effect_opts\"][\"Energy\"][\"g_multiplier\"])\n b = int(np.mean(y[2 * len(y) // 3:]**scale)*config.settings[\"devices\"][self.board][\"effect_opts\"][\"Energy\"][\"b_multiplier\"])\n # Assign color to different frequency regions\n p[0, :r] = 255.0\n p[0, r:] = 0.0\n p[1, :g] = 255.0\n p[1, g:] = 0.0\n p[2, :b] = 255.0\n p[2, b:] = 0.0\n signal_processers[self.board].p_filt.update(p)\n p = np.round(signal_processers[self.board].p_filt.value)\n # Apply blur to smooth the edges\n p[0, :] = gaussian_filter1d(p[0, :], sigma=config.settings[\"devices\"][self.board][\"effect_opts\"][\"Energy\"][\"blur\"])\n p[1, :] = gaussian_filter1d(p[1, :], sigma=config.settings[\"devices\"][self.board][\"effect_opts\"][\"Energy\"][\"blur\"])\n p[2, :] = gaussian_filter1d(p[2, :], sigma=config.settings[\"devices\"][self.board][\"effect_opts\"][\"Energy\"][\"blur\"])\n # Set the new pixel value\n return np.concatenate((p[:, ::-1], p), axis=1)\n\n def visualize_wavelength(self, y):\n y = np.copy(interpolate(y, config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"] // 2))\n signal_processers[self.board].common_mode.update(y)\n diff = y - self.prev_spectrum\n self.prev_spectrum = np.copy(y)\n # Color channel mappings\n r = signal_processers[self.board].r_filt.update(y - signal_processers[self.board].common_mode.value)\n #g = np.abs(diff)\n b = signal_processers[self.board].b_filt.update(np.copy(y))\n r = np.array([j for i in zip(r,r) for j in i])\n output = np.array([self.multicolor_modes[config.settings[\"devices\"][self.board][\"effect_opts\"][\"Wavelength\"][\"color_mode\"]][0][\n (config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"] if config.settings[\"devices\"][self.board][\"effect_opts\"][\"Wavelength\"][\"reverse_grad\"] else 0):\n (None if config.settings[\"devices\"][self.board][\"effect_opts\"][\"Wavelength\"][\"reverse_grad\"] else config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]):]*r,\n self.multicolor_modes[config.settings[\"devices\"][self.board][\"effect_opts\"][\"Wavelength\"][\"color_mode\"]][1][\n (config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"] if config.settings[\"devices\"][self.board][\"effect_opts\"][\"Wavelength\"][\"reverse_grad\"] else 0):\n (None if config.settings[\"devices\"][self.board][\"effect_opts\"][\"Wavelength\"][\"reverse_grad\"] else config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]):]*r,\n self.multicolor_modes[config.settings[\"devices\"][self.board][\"effect_opts\"][\"Wavelength\"][\"color_mode\"]][2][\n (config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"] if config.settings[\"devices\"][self.board][\"effect_opts\"][\"Wavelength\"][\"reverse_grad\"] else 0):\n (None if config.settings[\"devices\"][self.board][\"effect_opts\"][\"Wavelength\"][\"reverse_grad\"] else config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]):]*r])\n #self.prev_spectrum = y\n self.multicolor_modes[config.settings[\"devices\"][self.board][\"effect_opts\"][\"Wavelength\"][\"color_mode\"]] = np.roll(\n self.multicolor_modes[config.settings[\"devices\"][self.board][\"effect_opts\"][\"Wavelength\"][\"color_mode\"]],\n config.settings[\"devices\"][self.board][\"effect_opts\"][\"Wavelength\"][\"roll_speed\"]*(-1 if config.settings[\"devices\"][self.board][\"effect_opts\"][\"Wavelength\"][\"reverse_roll\"] else 1),\n axis=1)\n output[0] = gaussian_filter1d(output[0], sigma=config.settings[\"devices\"][self.board][\"effect_opts\"][\"Wavelength\"][\"blur\"])\n output[1] = gaussian_filter1d(output[1], sigma=config.settings[\"devices\"][self.board][\"effect_opts\"][\"Wavelength\"][\"blur\"])\n output[2] = gaussian_filter1d(output[2], sigma=config.settings[\"devices\"][self.board][\"effect_opts\"][\"Wavelength\"][\"blur\"])\n if config.settings[\"devices\"][self.board][\"effect_opts\"][\"Wavelength\"][\"flip_lr\"]:\n output = np.fliplr(output)\n if config.settings[\"devices\"][self.board][\"effect_opts\"][\"Wavelength\"][\"mirror\"]:\n output = np.concatenate((output[:, ::-2], output[:, ::2]), axis=1)\n return output\n \n def visualize_spectrum(self, y):\n \"\"\"Effect that maps the Mel filterbank frequencies onto the LED strip\"\"\"\n global p\n #print(len(y))\n #print(y)\n y = np.copy(interpolate(y, config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"] // 2))\n signal_processers[self.board].common_mode.update(y)\n diff = y - self.prev_spectrum\n self.prev_spectrum = np.copy(y)\n # Color channel mappings\n r = signal_processers[self.board].r_filt.update(y - signal_processers[self.board].common_mode.value)\n g = np.abs(diff)\n b = signal_processers[self.board].b_filt.update(np.copy(y))\n r *= config.settings[\"devices\"][self.board][\"effect_opts\"][\"Spectrum\"][\"r_multiplier\"]\n g *= config.settings[\"devices\"][self.board][\"effect_opts\"][\"Spectrum\"][\"g_multiplier\"]\n b *= config.settings[\"devices\"][self.board][\"effect_opts\"][\"Spectrum\"][\"b_multiplier\"]\n # Mirror the color channels for symmetric output\n r = np.concatenate((r[::-1], r))\n g = np.concatenate((g[::-1], g))\n b = np.concatenate((b[::-1], b))\n output = np.array([r, g,b]) * 255\n self.prev_spectrum = y\n return output\n\n def visualize_auto(self,y):\n \"\"\"Automatically (intelligently?) cycle through effects\"\"\"\n return self.visualize_beat(y) # real intelligent\n\n def visualize_wave(self, y):\n \"\"\"Effect that flashes to the beat with scrolling coloured bits\"\"\"\n if self.current_freq_detects[\"beat\"]:\n output = np.zeros((3,config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]))\n output[0][:]=config.settings[\"colors\"][config.settings[\"devices\"][self.board][\"effect_opts\"][\"Wave\"][\"color_flash\"]][0]\n output[1][:]=config.settings[\"colors\"][config.settings[\"devices\"][self.board][\"effect_opts\"][\"Wave\"][\"color_flash\"]][1]\n output[2][:]=config.settings[\"colors\"][config.settings[\"devices\"][self.board][\"effect_opts\"][\"Wave\"][\"color_flash\"]][2]\n self.wave_wipe_count = config.settings[\"devices\"][self.board][\"effect_opts\"][\"Wave\"][\"wipe_len\"]\n else:\n output = np.copy(self.prev_output)\n #for i in range(len(self.prev_output)):\n # output[i] = np.hsplit(self.prev_output[i],2)[0]\n output = np.multiply(self.prev_output,config.settings[\"devices\"][self.board][\"effect_opts\"][\"Wave\"][\"decay\"])\n for i in range(self.wave_wipe_count):\n output[0][i]=config.settings[\"colors\"][config.settings[\"devices\"][self.board][\"effect_opts\"][\"Wave\"][\"color_wave\"]][0]\n output[0][-i]=config.settings[\"colors\"][config.settings[\"devices\"][self.board][\"effect_opts\"][\"Wave\"][\"color_wave\"]][0]\n output[1][i]=config.settings[\"colors\"][config.settings[\"devices\"][self.board][\"effect_opts\"][\"Wave\"][\"color_wave\"]][1]\n output[1][-i]=config.settings[\"colors\"][config.settings[\"devices\"][self.board][\"effect_opts\"][\"Wave\"][\"color_wave\"]][1]\n output[2][i]=config.settings[\"colors\"][config.settings[\"devices\"][self.board][\"effect_opts\"][\"Wave\"][\"color_wave\"]][2]\n output[2][-i]=config.settings[\"colors\"][config.settings[\"devices\"][self.board][\"effect_opts\"][\"Wave\"][\"color_wave\"]][2]\n #output = np.concatenate([output,np.fliplr(output)], axis=1)\n if self.wave_wipe_count > config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]//2:\n self.wave_wipe_count = config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]//2\n self.wave_wipe_count += config.settings[\"devices\"][self.board][\"effect_opts\"][\"Wave\"][\"wipe_speed\"]\n return output\n\n def visualize_beat(self, y):\n \"\"\"Effect that flashes to the beat\"\"\"\n if self.current_freq_detects[\"beat\"]:\n output = np.zeros((3,config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]))\n output[0][:]=config.settings[\"colors\"][config.settings[\"devices\"][self.board][\"effect_opts\"][\"Beat\"][\"color\"]][0]\n output[1][:]=config.settings[\"colors\"][config.settings[\"devices\"][self.board][\"effect_opts\"][\"Beat\"][\"color\"]][1]\n output[2][:]=config.settings[\"colors\"][config.settings[\"devices\"][self.board][\"effect_opts\"][\"Beat\"][\"color\"]][2]\n else:\n output = np.copy(self.prev_output)\n output = np.multiply(self.prev_output,config.settings[\"devices\"][self.board][\"effect_opts\"][\"Beat\"][\"decay\"])\n return output\n\n def visualize_bars(self, y):\n # Bit of fiddling with the y values\n y = np.copy(interpolate(y, config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"] // 2))\n signal_processers[self.board].common_mode.update(y)\n self.prev_spectrum = np.copy(y)\n # Color channel mappings\n r = signal_processers[self.board].r_filt.update(y - signal_processers[self.board].common_mode.value)\n r = np.array([j for i in zip(r,r) for j in i])\n # Split y into [resulution] chunks and calculate the average of each\n max_values = np.array([max(i) for i in np.array_split(r, config.settings[\"devices\"][self.board][\"effect_opts\"][\"Bars\"][\"resolution\"])])\n max_values = np.clip(max_values, 0, 1)\n color_sets = []\n for i in range(config.settings[\"devices\"][self.board][\"effect_opts\"][\"Bars\"][\"resolution\"]):\n # [r,g,b] values from a multicolour gradient array at [resulution] equally spaced intervals\n color_sets.append([self.multicolor_modes[config.settings[\"devices\"][self.board][\"effect_opts\"][\"Bars\"][\"color_mode\"]]\\\n [j][i*(config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]//config.settings[\"devices\"][self.board][\"effect_opts\"][\"Bars\"][\"resolution\"])] for j in range(3)])\n output = np.zeros((3,config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]))\n chunks = np.array_split(output[0], config.settings[\"devices\"][self.board][\"effect_opts\"][\"Bars\"][\"resolution\"])\n n = 0\n # Assign blocks with heights corresponding to max_values and colours from color_sets\n for i in range(len(chunks)):\n m = len(chunks[i])\n for j in range(3):\n output[j][n:n+m] = color_sets[i][j]*max_values[i]\n n += m\n self.multicolor_modes[config.settings[\"devices\"][self.board][\"effect_opts\"][\"Bars\"][\"color_mode\"]] = np.roll(\n self.multicolor_modes[config.settings[\"devices\"][self.board][\"effect_opts\"][\"Bars\"][\"color_mode\"]],\n config.settings[\"devices\"][self.board][\"effect_opts\"][\"Bars\"][\"roll_speed\"]*(-1 if config.settings[\"devices\"][self.board][\"effect_opts\"][\"Bars\"][\"reverse_roll\"] else 1),\n axis=1)\n if config.settings[\"devices\"][self.board][\"effect_opts\"][\"Bars\"][\"flip_lr\"]:\n output = np.fliplr(output)\n if config.settings[\"devices\"][self.board][\"effect_opts\"][\"Bars\"][\"mirror\"]:\n output = np.concatenate((output[:, ::-2], output[:, ::2]), axis=1)\n return output\n\n def visualize_power(self, y):\n #config.settings[\"devices\"][self.board][\"effect_opts\"][\"Power\"][\"color_mode\"]\n # Bit of fiddling with the y values\n y = np.copy(interpolate(y, config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"] // 2))\n signal_processers[self.board].common_mode.update(y)\n self.prev_spectrum = np.copy(y)\n # Color channel mappings\n r = signal_processers[self.board].r_filt.update(y - signal_processers[self.board].common_mode.value)\n r = np.array([j for i in zip(r,r) for j in i])\n output = np.array([self.multicolor_modes[config.settings[\"devices\"][self.board][\"effect_opts\"][\"Power\"][\"color_mode\"]][0, :config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]]*r,\n self.multicolor_modes[config.settings[\"devices\"][self.board][\"effect_opts\"][\"Power\"][\"color_mode\"]][1, :config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]]*r,\n self.multicolor_modes[config.settings[\"devices\"][self.board][\"effect_opts\"][\"Power\"][\"color_mode\"]][2, :config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]]*r])\n # if there's a high (eg clap):\n if self.current_freq_detects[\"high\"]:\n self.power_brightness = 1.0\n # Generate random indexes\n self.power_indexes = random.sample(range(config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]), config.settings[\"devices\"][self.board][\"effect_opts\"][\"Power\"][\"s_count\"])\n #print(\"ye\")\n # Assign colour to the random indexes\n for index in self.power_indexes:\n output[0, index] = int(config.settings[\"colors\"][config.settings[\"devices\"][self.board][\"effect_opts\"][\"Power\"][\"s_color\"]][0]*self.power_brightness)\n output[1, index] = int(config.settings[\"colors\"][config.settings[\"devices\"][self.board][\"effect_opts\"][\"Power\"][\"s_color\"]][1]*self.power_brightness)\n output[2, index] = int(config.settings[\"colors\"][config.settings[\"devices\"][self.board][\"effect_opts\"][\"Power\"][\"s_color\"]][2]*self.power_brightness)\n # Remove some of the indexes for next time\n self.power_indexes = [i for i in self.power_indexes if i not in random.sample(self.power_indexes, len(self.power_indexes)//4)]\n if len(self.power_indexes) <= 4:\n self.power_indexes = []\n # Fade the colour of the sparks out a bit for next time\n if self.power_brightness > 0:\n self.power_brightness -= 0.05\n # Calculate length of bass bar based on max bass frequency volume and length of strip\n strip_len = int((config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]//3)*max(y[:int(config.settings[\"devices\"][self.board][\"configuration\"][\"N_FFT_BINS\"]*0.2)]))\n # Add the bass bars into the output. Colour proportional to length\n output[0][:strip_len] = self.multicolor_modes[config.settings[\"devices\"][self.board][\"effect_opts\"][\"Power\"][\"color_mode\"]][0][strip_len]\n output[1][:strip_len] = self.multicolor_modes[config.settings[\"devices\"][self.board][\"effect_opts\"][\"Power\"][\"color_mode\"]][1][strip_len]\n output[2][:strip_len] = self.multicolor_modes[config.settings[\"devices\"][self.board][\"effect_opts\"][\"Power\"][\"color_mode\"]][2][strip_len]\n if config.settings[\"devices\"][self.board][\"effect_opts\"][\"Power\"][\"flip_lr\"]:\n output = np.fliplr(output)\n if config.settings[\"devices\"][self.board][\"effect_opts\"][\"Power\"][\"mirror\"]:\n output = np.concatenate((output[:, ::-2], output[:, ::2]), axis=1)\n return output\n\n def visualize_pulse(self, y):\n \"\"\"fckin dope ass visuals that's what\"\"\"\n config.settings[\"devices\"][self.board][\"effect_opts\"][\"Pulse\"][\"bar_color\"]\n config.settings[\"devices\"][self.board][\"effect_opts\"][\"Pulse\"][\"bar_speed\"]\n config.settings[\"devices\"][self.board][\"effect_opts\"][\"Pulse\"][\"bar_length\"]\n config.settings[\"devices\"][self.board][\"effect_opts\"][\"Pulse\"][\"color_mode\"]\n y = np.copy(interpolate(y, config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"] // 2))\n common_mode.update(y) # i honestly have no idea what this is but i just work with it rather than trying to figure it out\n self.prev_spectrum = np.copy(y)\n # Color channel mappings\n r = r_filt.update(y - common_mode.value) # same with this, no flippin clue\n r = np.array([j for i in zip(r,r) for j in i])\n output = np.array([self.multicolor_modes[config.settings[\"devices\"][self.board][\"effect_opts\"][\"Pulse\"][\"color_mode\"]][0][:config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]],\n self.multicolor_modes[config.settings[\"devices\"][self.board][\"effect_opts\"][\"Pulse\"][\"color_mode\"]][1][:config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]],\n self.multicolor_modes[config.settings[\"devices\"][self.board][\"effect_opts\"][\"Pulse\"][\"color_mode\"]][2][:config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]]])\n \n def visualize_single(self):\n \"Displays a single colour, non audio reactive\"\n output = np.zeros((3,config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]))\n output[0][:]=config.settings[\"colors\"][config.settings[\"devices\"][self.board][\"effect_opts\"][\"Single\"][\"color\"]][0]\n output[1][:]=config.settings[\"colors\"][config.settings[\"devices\"][self.board][\"effect_opts\"][\"Single\"][\"color\"]][1]\n output[2][:]=config.settings[\"colors\"][config.settings[\"devices\"][self.board][\"effect_opts\"][\"Single\"][\"color\"]][2]\n return output\n\n def visualize_gradient(self):\n \"Displays a multicolour gradient, non audio reactive\"\n output = np.array([self.multicolor_modes[config.settings[\"devices\"][self.board][\"effect_opts\"][\"Gradient\"][\"color_mode\"]][0][:config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]],\n self.multicolor_modes[config.settings[\"devices\"][self.board][\"effect_opts\"][\"Gradient\"][\"color_mode\"]][1][:config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]],\n self.multicolor_modes[config.settings[\"devices\"][self.board][\"effect_opts\"][\"Gradient\"][\"color_mode\"]][2][:config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"]]])\n self.multicolor_modes[config.settings[\"devices\"][self.board][\"effect_opts\"][\"Gradient\"][\"color_mode\"]] = np.roll(\n self.multicolor_modes[config.settings[\"devices\"][self.board][\"effect_opts\"][\"Gradient\"][\"color_mode\"]],\n config.settings[\"devices\"][self.board][\"effect_opts\"][\"Gradient\"][\"roll_speed\"]*(-1 if config.settings[\"devices\"][self.board][\"effect_opts\"][\"Gradient\"][\"reverse\"] else 1),\n axis=1)\n if config.settings[\"devices\"][self.board][\"effect_opts\"][\"Gradient\"][\"mirror\"]:\n output = np.concatenate((output[:, ::-2], output[:, ::2]), axis=1)\n return output\n\n def visualize_fade(self):\n \"Fades through a multicolour gradient, non audio reactive\"\n output = np.array([[self.multicolor_modes[config.settings[\"devices\"][self.board][\"effect_opts\"][\"Fade\"][\"color_mode\"]][0][0] for i in range(config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"])],\n [self.multicolor_modes[config.settings[\"devices\"][self.board][\"effect_opts\"][\"Fade\"][\"color_mode\"]][1][0] for i in range(config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"])],\n [self.multicolor_modes[config.settings[\"devices\"][self.board][\"effect_opts\"][\"Fade\"][\"color_mode\"]][2][0] for i in range(config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"])]])\n self.multicolor_modes[config.settings[\"devices\"][self.board][\"effect_opts\"][\"Fade\"][\"color_mode\"]] = np.roll(\n self.multicolor_modes[config.settings[\"devices\"][self.board][\"effect_opts\"][\"Fade\"][\"color_mode\"]],\n config.settings[\"devices\"][self.board][\"effect_opts\"][\"Fade\"][\"roll_speed\"]*(-1 if config.settings[\"devices\"][self.board][\"effect_opts\"][\"Fade\"][\"reverse\"] else 1),\n axis=1)\n return output\n\n def visualize_calibration(self):\n \"Custom values for RGB\"\n output = np.array([[config.settings[\"devices\"][self.board][\"effect_opts\"][\"Calibration\"][\"r\"] for i in range(config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"])],\n [config.settings[\"devices\"][self.board][\"effect_opts\"][\"Calibration\"][\"g\"] for i in range(config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"])],\n [config.settings[\"devices\"][self.board][\"effect_opts\"][\"Calibration\"][\"b\"] for i in range(config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"])]])\n return output\n \nclass GUI(QMainWindow):\n def __init__(self):\n super().__init__()\n self.initMainWindow()\n self.updateUIVisibleItems()\n\n def initMainWindow(self):\n # Set up window and wrapping layout\n self.setWindowTitle(\"Visualization\")\n # Initial window size/pos last saved if available\n settings.beginGroup(\"MainWindow\")\n if not settings.value(\"geometry\") == None:\n self.restoreGeometry(settings.value(\"geometry\"))\n if not settings.value(\"state\") == None:\n self.restoreState(settings.value(\"state\"))\n settings.endGroup()\n self.main_wrapper = QVBoxLayout()\n\n # Set up toolbar\n #toolbar_guiDialogue.setShortcut('Ctrl+H')\n toolbar_deviceDialogue = QAction('LED Strip Manager', self)\n toolbar_deviceDialogue.triggered.connect(self.deviceDialogue)\n toolbar_guiDialogue = QAction('GUI Properties', self)\n toolbar_guiDialogue.triggered.connect(self.guiDialogue)\n toolbar_saveDialogue = QAction('Save Settings', self)\n toolbar_saveDialogue.triggered.connect(self.saveDialogue)\n \n self.toolbar = self.addToolBar('top_toolbar')\n self.toolbar.setObjectName('top_toolbar')\n self.toolbar.addAction(toolbar_guiDialogue)\n self.toolbar.addAction(toolbar_saveDialogue)\n self.toolbar.addAction(toolbar_deviceDialogue)\n\n # Set up FPS and error labels\n self.statusbar = QStatusBar()\n self.setStatusBar(self.statusbar)\n self.label_error = QLabel(\"\")\n self.label_fps = QLabel(\"\")\n self.label_latency = QLabel(\"\")\n self.label_fps.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.label_latency.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.statusbar.addPermanentWidget(self.label_error, stretch=1)\n self.statusbar.addPermanentWidget(self.label_latency)\n self.statusbar.addPermanentWidget(self.label_fps)\n\n # Set up board tabs\n self.label_boards = QLabel(\"Boards\")\n self.boardsTabWidget = QTabWidget()\n # Dynamically set up boards tabs\n self.board_tabs = {} # contains all the tabs for each board\n self.board_tabs_widgets = {} # contains all the widgets for each tab\n for board in config.settings[\"devices\"]:\n # Make the tab\n self.addBoard(board)\n self.main_wrapper.addWidget(self.label_boards)\n self.main_wrapper.addWidget(self.boardsTabWidget)\n #self.setLayout(self.main_wrapper)\n\n # Set wrapper as main widget\n self.setCentralWidget(QWidget(self))\n self.centralWidget().setLayout(self.main_wrapper)\n self.show()\n\n def addBoard(self, board):\n self.board_tabs_widgets[board] = {}\n self.board_tabs[board] = QWidget()\n\n self.initBoardUI(board)\n self.boardsTabWidget.addTab(self.board_tabs[board],board)\n self.board_tabs[board].setLayout(self.board_tabs_widgets[board][\"wrapper\"])\n pass\n\n def closeEvent(self, event):\n # executed when the window is being closed\n quit_msg = \"Are you sure you want to exit?\"\n reply = QMessageBox.question(self, 'Message', \n quit_msg, QMessageBox.Yes, QMessageBox.No)\n if reply == QMessageBox.Yes:\n # Save window state\n settings.beginGroup(\"MainWindow\")\n settings.setValue(\"geometry\", self.saveGeometry())\n settings.setValue('state', self.saveState())\n settings.endGroup()\n # save all settings\n settings.setValue(\"settings_dict\", config.settings)\n # save and close\n settings.sync()\n event.accept()\n sys.exit(0)\n \n else:\n event.ignore()\n\n def updateUIVisibleItems(self):\n for section in self.gui_widgets:\n for widget in self.gui_widgets[section]:\n widget.setVisible(config.settings[\"GUI_opts\"][section])\n\n def deviceDialogue(self):\n def update_visibilty_dict():\n for checkbox in self.gui_vis_checkboxes:\n config.settings[\"GUI_opts\"][checkbox] = self.gui_vis_checkboxes[checkbox].isChecked()\n self.updateUIVisibleItems()\n\n def show_hide_addBoard_interface():\n current_device = device_type_cbox.currentText()\n for device in config.device_req_config:\n for req_config_setting in widgets[device]:\n if req_config_setting is not \"no_config\":\n for widget in widgets[device][req_config_setting]:\n widget.setVisible(device == current_device)\n else:\n # doesn't make sense i know i know\n widgets[device][req_config_setting].setVisible(device == current_device)\n\n def validate_input():\n import re\n current_device = device_type_cbox.currentText()\n tests = []\n print(\"testing\")\n if current_device == \"ESP8266\":\n for req_config_setting in config.device_req_config[current_device]:\n test = widgets[current_device][req_config_setting][1].text()\n if req_config_setting == \"MAC_ADDR\":\n # Validate MAC\n tests.append(True if re.match(\"[0-9a-f]{2}([-:])[0-9a-f]{2}(\\\\1[0-9a-f]{2}){4}$\", test.lower()) else False)\n elif req_config_setting == \"UDP_IP\":\n # Validate IP\n try:\n pieces = test.split('.')\n if len(pieces) != 4: return False\n tests.append(all(0<=int(p)<256 for p in pieces))\n except:\n tests.append(False)\n elif req_config_setting == \"UDP_PORT\":\n # Validate port\n print(test)\n try:\n int(test)\n if test > 0:\n test.append(True)\n except:\n tests.append(False)\n \n\n\n\n\n #pass\n \n \n # Validate port\n elif current_device == \"RaspberryPi\":\n pass\n # Validate LED Pin\n # Validate LED Freq\n # Validate LED DMA\n elif current_device == \"Fadecandy\":\n pass\n # Validate server\n elif not config.req_config_setting[current_device]:\n pass\n print(tests)\n\n # def lineEdit(labelText, defaultText):\n # wrapper = QWidget()\n # hLayout = QHBoxLayout()\n # wrapper.setLayout(hLayout)\n # label = QLabel(labelText)\n # lEdit = QLineEdit()\n # lEdit.setPlaceholderText(defaultText)\n # hLayout.addWidget(label)\n # hLayout.addWidget(lEdit)\n # return wrapper\n\n # Set up window and layout\n self.device_dialogue = QDialog(None, Qt.WindowSystemMenuHint | Qt.WindowCloseButtonHint)\n self.device_dialogue.setWindowTitle(\"LED Strip Manager\")\n self.device_dialogue.setWindowModality(Qt.ApplicationModal)\n layout = QVBoxLayout()\n self.device_dialogue.setLayout(layout)\n\n # Set up tab layouts\n tabs = QTabWidget()\n layout.addWidget(tabs)\n addDeviceTab = QWidget()\n remDeviceTab = QWidget()\n addDeviceTabLayout = QVBoxLayout()\n remDeviceTabLayout = QVBoxLayout()\n addDeviceTabButtonLayout = QGridLayout()\n remDeviceTabButtonLayout = QGridLayout()\n addDeviceTab.setLayout(addDeviceTabLayout)\n remDeviceTab.setLayout(remDeviceTabLayout)\n tabs.addTab(addDeviceTab, \"Add Device\")\n tabs.addTab(remDeviceTab, \"Remove Device\")\n\n # Set up \"Add Device\" tab\n device_type_cbox = QComboBox()\n device_type_cbox.addItems(config.device_req_config.keys())\n device_type_cbox.currentIndexChanged.connect(show_hide_addBoard_interface)\n addDeviceTabLayout.addWidget(device_type_cbox)\n\n # Set up \"Add Device\" widgets\n widgets = {}\n addDeviceTabLayout.addLayout(addDeviceTabButtonLayout)\n remDeviceTabLayout.addLayout(remDeviceTabButtonLayout)\n # if the new board has required config\n for device in config.device_req_config:\n # Make the widgets\n widgets[device] = {}\n if config.device_req_config[device]:\n for req_config_setting in config.device_req_config[device]:\n label = config.device_req_config[device][req_config_setting][0]\n guide = config.device_req_config[device][req_config_setting][1]\n wType = config.device_req_config[device][req_config_setting][2]\n deflt = config.device_req_config[device][req_config_setting][3]\n wLabel = QLabel(label)\n #wGuide = QLabel(guide)\n if wType == \"textbox\":\n wEdit = QLineEdit()\n wEdit.setPlaceholderText(deflt)\n wEdit.textChanged.connect(validate_input)\n elif wType == \"checkbox\":\n wEdit = QCheckBox()\n wEdit.setCheckState(Qt.Checked if deflt else Qt.Unchecked)\n widgets[device][req_config_setting] = [wLabel, wEdit]\n # Add widgets to layout\n i = 0\n for req_config in widgets[device]:\n addDeviceTabButtonLayout.addWidget(widgets[device][req_config][0], i, 0)\n addDeviceTabButtonLayout.addWidget(widgets[device][req_config][1], i, 1)\n #addDeviceTabButtonLayout.addWidget(widget_set[2], i+1, 0, 1, 2)\n i += 1\n else:\n no_setup = QLabel(\"Device requires no additional setup here! :)\")\n widgets[device][\"no_config\"] = no_setup\n addDeviceTabButtonLayout.addWidget(no_setup, 0, 0)\n\n # Show appropriate widgets\n show_hide_addBoard_interface()\n\n\n\n # self.gui_vis_checkboxes = {}\n # for section in self.gui_widgets:\n # self.gui_vis_checkboxes[section] = QCheckBox(section)\n # self.gui_vis_checkboxes[section].setCheckState(\n # Qt.Checked if config.settings[\"GUI_opts\"][section] else Qt.Unchecked)\n # self.gui_vis_checkboxes[section].stateChanged.connect(update_visibilty_dict)\n # addDeviceTabLayout.addWidget(self.gui_vis_checkboxes[section])\n self.add_device_button = QPushButton(\"Add Device\")\n addDeviceTabLayout.addWidget(self.add_device_button)\n\n # Set up \"Remove Device\" tab\n\n # Set up ok/cancel buttons\n self.buttons = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel, Qt.Horizontal, self)\n self.buttons.accepted.connect(self.device_dialogue.accept)\n self.buttons.rejected.connect(self.device_dialogue.reject)\n layout.addWidget(self.buttons)\n \n self.device_dialogue.show()\n\n def saveDialogue(self):\n # Save window state\n settings.beginGroup(\"MainWindow\")\n settings.setValue(\"geometry\", self.saveGeometry())\n settings.setValue('state', self.saveState())\n settings.endGroup()\n # save all settings\n settings.setValue(\"settings_dict\", config.settings)\n # save and close\n settings.sync()\n # Confirmation message\n self.conf_dialogue = QMessageBox()\n self.conf_dialogue.setText(\"Settings saved.\\nSettings are also automatically saved when program closes.\")\n self.conf_dialogue.show()\n\n def guiDialogue(self):\n def update_visibilty_dict():\n for checkbox in self.gui_vis_checkboxes:\n config.settings[\"GUI_opts\"][checkbox] = self.gui_vis_checkboxes[checkbox].isChecked()\n self.updateUIVisibleItems()\n\n self.gui_dialogue = QDialog(None, Qt.WindowSystemMenuHint | Qt.WindowCloseButtonHint)\n self.gui_dialogue.setWindowTitle(\"GUI Properties\")\n self.gui_dialogue.setWindowModality(Qt.ApplicationModal)\n layout = QGridLayout()\n self.gui_dialogue.setLayout(layout)\n # OK button\n self.buttons = QDialogButtonBox(QDialogButtonBox.Ok, Qt.Horizontal, self)\n self.buttons.accepted.connect(self.gui_dialogue.accept)\n\n self.gui_vis_checkboxes = {}\n for section in self.gui_widgets:\n self.gui_vis_checkboxes[section] = QCheckBox(section)\n self.gui_vis_checkboxes[section].setCheckState(\n Qt.Checked if config.settings[\"GUI_opts\"][section] else Qt.Unchecked)\n self.gui_vis_checkboxes[section].stateChanged.connect(update_visibilty_dict)\n layout.addWidget(self.gui_vis_checkboxes[section])\n layout.addWidget(self.buttons)\n self.gui_dialogue.show()\n \n def initBoardUI(self, board):\n self.board = board\n # Set up wrapping layout\n self.board_tabs_widgets[board][\"wrapper\"] = QVBoxLayout()\n \n # Set up graph layout\n self.board_tabs_widgets[board][\"graph_view\"] = pg.GraphicsView()\n graph_layout = pg.GraphicsLayout(border=(100,100,100))\n self.board_tabs_widgets[board][\"graph_view\"].setCentralItem(graph_layout)\n # Mel filterbank plot\n fft_plot = graph_layout.addPlot(title='Filterbank Output', colspan=3)\n fft_plot.setRange(yRange=[-0.1, 1.2])\n fft_plot.disableAutoRange(axis=pg.ViewBox.YAxis)\n x_data = np.array(range(1, config.settings[\"devices\"][self.board][\"configuration\"][\"N_FFT_BINS\"] + 1))\n self.board_tabs_widgets[board][\"mel_curve\"] = pg.PlotCurveItem()\n self.board_tabs_widgets[board][\"mel_curve\"].setData(x=x_data, y=x_data*0)\n fft_plot.addItem(self.board_tabs_widgets[board][\"mel_curve\"])\n # Visualization plot\n graph_layout.nextRow()\n led_plot = graph_layout.addPlot(title='Visualization Output', colspan=3)\n led_plot.setRange(yRange=[-5, 260])\n led_plot.disableAutoRange(axis=pg.ViewBox.YAxis)\n # Pen for each of the color channel curves\n r_pen = pg.mkPen((255, 30, 30, 200), width=4)\n g_pen = pg.mkPen((30, 255, 30, 200), width=4)\n b_pen = pg.mkPen((30, 30, 255, 200), width=4)\n # Color channel curves\n self.board_tabs_widgets[board][\"r_curve\"] = pg.PlotCurveItem(pen=r_pen)\n self.board_tabs_widgets[board][\"g_curve\"] = pg.PlotCurveItem(pen=g_pen)\n self.board_tabs_widgets[board][\"b_curve\"] = pg.PlotCurveItem(pen=b_pen)\n # Define x data\n x_data = np.array(range(1, config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"] + 1))\n self.board_tabs_widgets[board][\"r_curve\"].setData(x=x_data, y=x_data*0)\n self.board_tabs_widgets[board][\"g_curve\"].setData(x=x_data, y=x_data*0)\n self.board_tabs_widgets[board][\"b_curve\"].setData(x=x_data, y=x_data*0)\n # Add curves to plot\n led_plot.addItem(self.board_tabs_widgets[board][\"r_curve\"])\n led_plot.addItem(self.board_tabs_widgets[board][\"g_curve\"])\n led_plot.addItem(self.board_tabs_widgets[board][\"b_curve\"])\n\n # Set up button layout\n self.board_tabs_widgets[board][\"label_reactive\"] = QLabel(\"Audio Reactive Effects\")\n self.board_tabs_widgets[board][\"label_non_reactive\"] = QLabel(\"Non Reactive Effects\")\n self.board_tabs_widgets[board][\"reactive_button_grid_wrap\"] = QWidget()\n self.board_tabs_widgets[board][\"non_reactive_button_grid_wrap\"] = QWidget()\n self.board_tabs_widgets[board][\"reactive_button_grid\"] = QGridLayout()\n self.board_tabs_widgets[board][\"non_reactive_button_grid\"] = QGridLayout()\n self.board_tabs_widgets[board][\"reactive_button_grid_wrap\"].setLayout(self.board_tabs_widgets[board][\"reactive_button_grid\"]) \n self.board_tabs_widgets[board][\"non_reactive_button_grid_wrap\"].setLayout(self.board_tabs_widgets[board][\"non_reactive_button_grid\"]) \n buttons = {}\n connecting_funcs = {}\n grid_width = 4\n i = 0\n j = 0\n k = 0\n l = 0\n # Dynamically layout reactive_buttons and connect them to the visualisation effects\n def connect_generator(effect):\n def func():\n config.settings[\"devices\"][board][\"configuration\"][\"current_effect\"] = effect\n buttons[effect].setDown(True)\n func.__name__ = effect\n return func\n # Where the magic happens\n for effect in visualizers[board].effects:\n if not effect in visualizers[board].non_reactive_effects:\n connecting_funcs[effect] = connect_generator(effect)\n buttons[effect] = QPushButton(effect)\n buttons[effect].clicked.connect(connecting_funcs[effect])\n self.board_tabs_widgets[board][\"reactive_button_grid\"].addWidget(buttons[effect], j, i)\n i += 1\n if i % grid_width == 0:\n i = 0\n j += 1\n else:\n connecting_funcs[effect] = connect_generator(effect)\n buttons[effect] = QPushButton(effect)\n buttons[effect].clicked.connect(connecting_funcs[effect])\n self.board_tabs_widgets[board][\"non_reactive_button_grid\"].addWidget(buttons[effect], l, k)\n k += 1\n if k % grid_width == 0:\n k = 0\n l += 1\n \n # Set up frequency slider\n # Frequency range label\n self.board_tabs_widgets[board][\"label_slider\"] = QLabel(\"Frequency Range\")\n # Frequency slider\n def freq_slider_change(tick):\n minf = self.board_tabs_widgets[board][\"freq_slider\"].tickValue(0)**2.0 * (config.settings[\"configuration\"][\"MIC_RATE\"] / 2.0)\n maxf = self.board_tabs_widgets[board][\"freq_slider\"].tickValue(1)**2.0 * (config.settings[\"configuration\"][\"MIC_RATE\"] / 2.0)\n t = 'Frequency range: {:.0f} - {:.0f} Hz'.format(minf, maxf)\n freq_label.setText(t)\n config.settings[\"configuration\"][\"MIN_FREQUENCY\"] = minf\n config.settings[\"configuration\"][\"MAX_FREQUENCY\"] = maxf\n dsp.create_mel_bank()\n def set_freq_min():\n config.settings[\"configuration\"][\"MIN_FREQUENCY\"] = self.board_tabs_widgets[board][\"freq_slider\"].start()\n dsp.create_mel_bank()\n def set_freq_max():\n config.settings[\"configuration\"][\"MAX_FREQUENCY\"] = self.board_tabs_widgets[board][\"freq_slider\"].end()\n dsp.create_mel_bank()\n self.board_tabs_widgets[board][\"freq_slider\"] = QRangeSlider()\n self.board_tabs_widgets[board][\"freq_slider\"].show()\n self.board_tabs_widgets[board][\"freq_slider\"].setMin(0)\n self.board_tabs_widgets[board][\"freq_slider\"].setMax(20000)\n self.board_tabs_widgets[board][\"freq_slider\"].setRange(config.settings[\"configuration\"][\"MIN_FREQUENCY\"], config.settings[\"configuration\"][\"MAX_FREQUENCY\"])\n self.board_tabs_widgets[board][\"freq_slider\"].setBackgroundStyle('background: qlineargradient(x1:0, y1:0, x2:0, y2:1, stop:0 #222, stop:1 #333);')\n self.board_tabs_widgets[board][\"freq_slider\"].setSpanStyle('background: qlineargradient(x1:0, y1:0, x2:0, y2:1, stop:0 #282, stop:1 #393);')\n self.board_tabs_widgets[board][\"freq_slider\"].setDrawValues(True)\n self.board_tabs_widgets[board][\"freq_slider\"].endValueChanged.connect(set_freq_max)\n self.board_tabs_widgets[board][\"freq_slider\"].startValueChanged.connect(set_freq_min)\n self.board_tabs_widgets[board][\"freq_slider\"].setStyleSheet(\"\"\"\n QRangeSlider * {\n border: 0px;\n padding: 0px;\n }\n QRangeSlider > QSplitter::handle {\n background: #fff;\n }\n QRangeSlider > QSplitter::handle:vertical {\n height: 3px;\n }\n QRangeSlider > QSplitter::handle:pressed {\n background: #ca5;\n }\n \"\"\")\n\n # Set up option tabs layout\n self.board_tabs_widgets[board][\"label_options\"] = QLabel(\"Effect Options\")\n self.board_tabs_widgets[board][\"opts_tabs\"] = QTabWidget()\n # Dynamically set up tabs\n tabs = {}\n grid_layouts = {}\n self.board_tabs_widgets[board][\"grid_layout_widgets\"] = {}\n options = config.settings[\"devices\"][board][\"effect_opts\"].keys()\n for effect in visualizers[self.board].effects:\n # Make the tab\n self.board_tabs_widgets[board][\"grid_layout_widgets\"][effect] = {}\n tabs[effect] = QWidget()\n grid_layouts[effect] = QGridLayout()\n tabs[effect].setLayout(grid_layouts[effect])\n self.board_tabs_widgets[board][\"opts_tabs\"].addTab(tabs[effect],effect)\n # These functions make functions for the dynamic ui generation\n # YOU WANT-A DYNAMIC I GIVE-A YOU DYNAMIC!\n def gen_slider_valuechanger(effect, key):\n def func():\n config.settings[\"devices\"][board][\"effect_opts\"][effect][key] = self.board_tabs_widgets[board][\"grid_layout_widgets\"][effect][key].value()\n return func\n def gen_float_slider_valuechanger(effect, key):\n def func():\n config.settings[\"devices\"][board][\"effect_opts\"][effect][key] = self.board_tabs_widgets[board][\"grid_layout_widgets\"][effect][key].slider_value\n return func\n def gen_combobox_valuechanger(effect, key):\n def func():\n config.settings[\"devices\"][board][\"effect_opts\"][effect][key] = self.board_tabs_widgets[board][\"grid_layout_widgets\"][effect][key].currentText()\n return func\n def gen_checkbox_valuechanger(effect, key):\n def func():\n config.settings[\"devices\"][board][\"effect_opts\"][effect][key] = self.board_tabs_widgets[board][\"grid_layout_widgets\"][effect][key].isChecked()\n return func\n # Dynamically generate ui for settings\n if effect in visualizers[self.board].dynamic_effects_config:\n i = 0\n connecting_funcs[effect] = {}\n for key, label, ui_element, *opts in visualizers[self.board].dynamic_effects_config[effect]:\n if opts: # neatest way ^^^^^ i could think of to unpack and handle an unknown number of opts (if any) NOTE only works with py >=3.6\n opts = list(opts[0])\n if ui_element == \"slider\":\n connecting_funcs[effect][key] = gen_slider_valuechanger(effect, key)\n self.board_tabs_widgets[board][\"grid_layout_widgets\"][effect][key] = QSlider(Qt.Horizontal)\n self.board_tabs_widgets[board][\"grid_layout_widgets\"][effect][key].setMinimum(opts[0])\n self.board_tabs_widgets[board][\"grid_layout_widgets\"][effect][key].setMaximum(opts[1])\n self.board_tabs_widgets[board][\"grid_layout_widgets\"][effect][key].setValue(config.settings[\"devices\"][board][\"effect_opts\"][effect][key])\n self.board_tabs_widgets[board][\"grid_layout_widgets\"][effect][key].valueChanged.connect(\n connecting_funcs[effect][key])\n elif ui_element == \"float_slider\":\n connecting_funcs[effect][key] = gen_float_slider_valuechanger(effect, key)\n self.board_tabs_widgets[board][\"grid_layout_widgets\"][effect][key] = QFloatSlider(*opts, config.settings[\"devices\"][board][\"effect_opts\"][effect][key])\n self.board_tabs_widgets[board][\"grid_layout_widgets\"][effect][key].setValue(config.settings[\"devices\"][board][\"effect_opts\"][effect][key])\n self.board_tabs_widgets[board][\"grid_layout_widgets\"][effect][key].valueChanged.connect(\n connecting_funcs[effect][key])\n elif ui_element == \"dropdown\":\n connecting_funcs[effect][key] = gen_combobox_valuechanger(effect, key)\n self.board_tabs_widgets[board][\"grid_layout_widgets\"][effect][key] = QComboBox()\n self.board_tabs_widgets[board][\"grid_layout_widgets\"][effect][key].addItems(opts)\n self.board_tabs_widgets[board][\"grid_layout_widgets\"][effect][key].setCurrentIndex(opts.index(config.settings[\"devices\"][board][\"effect_opts\"][effect][key]))\n self.board_tabs_widgets[board][\"grid_layout_widgets\"][effect][key].currentIndexChanged.connect(\n connecting_funcs[effect][key])\n elif ui_element == \"checkbox\":\n connecting_funcs[effect][key] = gen_checkbox_valuechanger(effect, key)\n self.board_tabs_widgets[board][\"grid_layout_widgets\"][effect][key] = QCheckBox()\n self.board_tabs_widgets[board][\"grid_layout_widgets\"][effect][key].stateChanged.connect(\n connecting_funcs[effect][key])\n self.board_tabs_widgets[board][\"grid_layout_widgets\"][effect][key].setCheckState(\n Qt.Checked if config.settings[\"devices\"][board][\"effect_opts\"][effect][key] else Qt.Unchecked)\n grid_layouts[effect].addWidget(QLabel(label),i,0)\n grid_layouts[effect].addWidget(self.board_tabs_widgets[board][\"grid_layout_widgets\"][effect][key],i,1)\n i += 1 \n else:\n grid_layouts[effect].addWidget(QLabel(\"No customisable options for this effect :(\"),0,0)\n \n \n \n # Add layouts into self.board_tabs_widgets[board][\"wrapper\"]\n self.board_tabs_widgets[board][\"wrapper\"].addWidget(self.board_tabs_widgets[board][\"graph_view\"])\n self.board_tabs_widgets[board][\"wrapper\"].addWidget(self.board_tabs_widgets[board][\"label_reactive\"])\n self.board_tabs_widgets[board][\"wrapper\"].addWidget(self.board_tabs_widgets[board][\"reactive_button_grid_wrap\"])\n self.board_tabs_widgets[board][\"wrapper\"].addWidget(self.board_tabs_widgets[board][\"label_non_reactive\"])\n self.board_tabs_widgets[board][\"wrapper\"].addWidget(self.board_tabs_widgets[board][\"non_reactive_button_grid_wrap\"])\n self.board_tabs_widgets[board][\"wrapper\"].addWidget(self.board_tabs_widgets[board][\"label_slider\"])\n self.board_tabs_widgets[board][\"wrapper\"].addWidget(self.board_tabs_widgets[board][\"freq_slider\"])\n self.board_tabs_widgets[board][\"wrapper\"].addWidget(self.board_tabs_widgets[board][\"label_options\"])\n self.board_tabs_widgets[board][\"wrapper\"].addWidget(self.board_tabs_widgets[board][\"opts_tabs\"])\n self.gui_widgets = {\"Graphs\": [self.board_tabs_widgets[board][\"graph_view\"]],\n \"Reactive Effect Buttons\": [self.board_tabs_widgets[board][\"label_reactive\"], self.board_tabs_widgets[board][\"reactive_button_grid_wrap\"]],\n \"Non Reactive Effect Buttons\": [self.board_tabs_widgets[board][\"label_non_reactive\"], self.board_tabs_widgets[board][\"non_reactive_button_grid_wrap\"]],\n \"Frequency Range\": [self.board_tabs_widgets[board][\"label_slider\"], self.board_tabs_widgets[board][\"freq_slider\"]],\n \"Effect Options\": [self.board_tabs_widgets[board][\"label_options\"], self.board_tabs_widgets[board][\"opts_tabs\"]]} \n\nclass DSP():\n def __init__(self, board):\n # Name of board for which this dsp instance is processing audio\n self.board = board\n\n # Initialise filters etc. I've no idea what most of these are for but i imagine i'll be removing them eventually. \n self.fft_plot_filter = dsp.ExpFilter(np.tile(1e-1, config.settings[\"devices\"][self.board][\"configuration\"][\"N_FFT_BINS\"]), alpha_decay=0.5, alpha_rise=0.99)\n self.mel_gain = dsp.ExpFilter(np.tile(1e-1, config.settings[\"devices\"][self.board][\"configuration\"][\"N_FFT_BINS\"]), alpha_decay=0.01, alpha_rise=0.99)\n self.mel_smoothing = dsp.ExpFilter(np.tile(1e-1, config.settings[\"devices\"][self.board][\"configuration\"][\"N_FFT_BINS\"]), alpha_decay=0.5, alpha_rise=0.99)\n self.gain = dsp.ExpFilter(np.tile(0.01, config.settings[\"devices\"][self.board][\"configuration\"][\"N_FFT_BINS\"]), alpha_decay=0.001, alpha_rise=0.99)\n self.r_filt = dsp.ExpFilter(np.tile(0.01, config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"] // 2), alpha_decay=0.2, alpha_rise=0.99)\n self.g_filt = dsp.ExpFilter(np.tile(0.01, config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"] // 2), alpha_decay=0.05, alpha_rise=0.3)\n self.b_filt = dsp.ExpFilter(np.tile(0.01, config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"] // 2), alpha_decay=0.1, alpha_rise=0.5)\n self.common_mode = dsp.ExpFilter(np.tile(0.01, config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"] // 2), alpha_decay=0.99, alpha_rise=0.01)\n self.p_filt = dsp.ExpFilter(np.tile(1, (3, config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"] // 2)), alpha_decay=0.1, alpha_rise=0.99)\n self.volume = dsp.ExpFilter(config.settings[\"configuration\"][\"MIN_VOLUME_THRESHOLD\"], alpha_decay=0.02, alpha_rise=0.02)\n self.p = np.tile(1.0, (3, config.settings[\"devices\"][self.board][\"configuration\"][\"N_PIXELS\"] // 2))\n # Number of audio samples to read every time frame\n self.samples_per_frame = int(config.settings[\"configuration\"][\"MIC_RATE\"] / config.settings[\"configuration\"][\"FPS\"])\n # Array containing the rolling audio sample window\n self.y_roll = np.random.rand(config.settings[\"configuration\"][\"N_ROLLING_HISTORY\"], self.samples_per_frame) / 1e16\n self.fft_window = np.hamming(int(config.settings[\"configuration\"][\"MIC_RATE\"] / config.settings[\"configuration\"][\"FPS\"])\\\n * config.settings[\"configuration\"][\"N_ROLLING_HISTORY\"])\n\n self.samples = None\n self.mel_y = None\n self.mel_x = None\n self.create_mel_bank()\n\n def update(self, audio_samples):\n \"\"\" Return processed audio data\n\n Returns mel curve, x/y data\n\n This is called every time there is a microphone update\n\n Returns\n -------\n audio_data : dict\n Dict containinng \"mel\", \"x\", and \"y\"\n \"\"\"\n\n audio_data = {}\n # Normalize samples between 0 and 1\n y = audio_samples / 2.0**15\n # Construct a rolling window of audio samples\n self.y_roll[:-1] = self.y_roll[1:]\n self.y_roll[-1, :] = np.copy(y)\n y_data = np.concatenate(self.y_roll, axis=0).astype(np.float32)\n vol = np.max(np.abs(y_data))\n # Transform audio input into the frequency domain\n N = len(y_data)\n N_zeros = 2**int(np.ceil(np.log2(N))) - N\n # Pad with zeros until the next power of two\n y_data *= self.fft_window\n y_padded = np.pad(y_data, (0, N_zeros), mode='constant')\n YS = np.abs(np.fft.rfft(y_padded)[:N // 2])\n # Construct a Mel filterbank from the FFT data\n mel = np.atleast_2d(YS).T * self.mel_y.T\n # Scale data to values more suitable for visualization\n mel = np.sum(mel, axis=0)\n mel = mel**2.0\n # Gain normalization\n self.mel_gain.update(np.max(gaussian_filter1d(mel, sigma=1.0)))\n mel /= self.mel_gain.value\n mel = self.mel_smoothing.update(mel)\n x = np.linspace(config.settings[\"configuration\"][\"MIN_FREQUENCY\"], config.settings[\"configuration\"][\"MAX_FREQUENCY\"], len(mel))\n y = self.fft_plot_filter.update(mel)\n\n audio_data[\"mel\"] = mel\n audio_data[\"vol\"] = vol\n audio_data[\"x\"] = x\n audio_data[\"y\"] = y\n return audio_data\n\n def rfft(self, data, window=None):\n window = 1.0 if window is None else window(len(data))\n ys = np.abs(np.fft.rfft(data * window))\n xs = np.fft.rfftfreq(len(data), 1.0 / config.settings[\"configuration\"][\"MIC_RATE\"])\n return xs, ys\n\n\n def fft(self, data, window=None):\n window = 1.0 if window is None else window(len(data))\n ys = np.fft.fft(data * window)\n xs = np.fft.fftfreq(len(data), 1.0 / config.settings[\"configuration\"][\"MIC_RATE\"])\n return xs, ys\n\n\n def create_mel_bank(self):\n samples = int(config.settings[\"configuration\"][\"MIC_RATE\"] * config.settings[\"configuration\"][\"N_ROLLING_HISTORY\"]\\\n / (2.0 * config.settings[\"configuration\"][\"FPS\"]))\n self.mel_y, (_, self.mel_x) = melbank.compute_melmat(num_mel_bands=config.settings[\"devices\"][self.board][\"configuration\"][\"N_FFT_BINS\"],\n freq_min=config.settings[\"configuration\"][\"MIN_FREQUENCY\"],\n freq_max=config.settings[\"configuration\"][\"MAX_FREQUENCY\"],\n num_fft_bands=samples,\n sample_rate=config.settings[\"configuration\"][\"MIC_RATE\"])\n\n\ndef update_config_dicts():\n # Updates config.settings with any values stored in settings.ini\n if settings.value(\"settings_dict\"):\n for settings_dict in settings.value(\"settings_dict\"):\n if not config.use_defaults[settings_dict]:\n try:\n config.settings[settings_dict] = {**config.settings[settings_dict], **settings.value(\"settings_dict\")[settings_dict]}\n except TypeError:\n pass\n\ndef frames_per_second():\n \"\"\" Return the estimated frames per second\n\n Returns the current estimate for frames-per-second (FPS).\n FPS is estimated by measured the amount of time that has elapsed since\n this function was previously called. The FPS estimate is low-pass filtered\n to reduce noise.\n\n This function is intended to be called one time for every iteration of\n the program's main loop.\n\n Returns\n -------\n fps : float\n Estimated frames-per-second. This value is low-pass filtered\n to reduce noise.\n \"\"\"\n global _time_prev, _fps\n time_now = time.time() * 1000.0\n dt = time_now - _time_prev\n _time_prev = time_now\n if dt == 0.0:\n return _fps.value\n return _fps.update(1000.0 / dt)\n\ndef memoize(function):\n \"\"\"Provides a decorator for memoizing functions\"\"\"\n from functools import wraps\n memo = {}\n\n @wraps(function)\n def wrapper(*args):\n if args in memo:\n return memo[args]\n else:\n rv = function(*args)\n memo[args] = rv\n return rv\n return wrapper\n\n@memoize\ndef _normalized_linspace(size):\n return np.linspace(0, 1, size)\n\ndef interpolate(y, new_length):\n \"\"\"Intelligently resizes the array by linearly interpolating the values\n\n Parameters\n ----------\n y : np.array\n Array that should be resized\n\n new_length : int\n The length of the new interpolated array\n\n Returns\n -------\n z : np.array\n New array with length of new_length that contains the interpolated\n values of y.\n \"\"\"\n if len(y) == new_length:\n return y\n x_old = _normalized_linspace(len(y))\n x_new = _normalized_linspace(new_length)\n z = np.interp(x_new, x_old, y)\n return z\n\ndef microphone_update(audio_samples):\n global y_roll, prev_rms, prev_exp, prev_fps_update\n\n # Get processed audio data for each device\n audio_datas = {}\n for board in boards:\n audio_datas[board] = signal_processers[board].update(audio_samples)\n \n outputs = {}\n \n # Visualization for each board\n for board in boards:\n # Get visualization output for each board\n audio_input = audio_datas[board][\"vol\"] > config.settings[\"configuration\"][\"MIN_VOLUME_THRESHOLD\"]\n outputs[board] = visualizers[board].get_vis(audio_datas[board][\"mel\"], audio_input)\n # Map filterbank output onto LED strip(s)\n boards[board].show(outputs[board])\n if config.settings[\"configuration\"][\"USE_GUI\"]:\n # Plot filterbank output\n gui.board_tabs_widgets[board][\"mel_curve\"].setData(x=audio_datas[board][\"x\"], y=audio_datas[board][\"y\"])\n # Plot visualizer output\n gui.board_tabs_widgets[board][\"r_curve\"].setData(y=outputs[board][0])\n gui.board_tabs_widgets[board][\"g_curve\"].setData(y=outputs[board][1])\n gui.board_tabs_widgets[board][\"b_curve\"].setData(y=outputs[board][2])\n\n # FPS update\n fps = frames_per_second()\n if time.time() - 0.5 > prev_fps_update:\n prev_fps_update = time.time()\n\n # Various GUI updates\n if config.settings[\"configuration\"][\"USE_GUI\"]:\n # Update error label\n if audio_input:\n gui.label_error.setText(\"\")\n else:\n gui.label_error.setText(\"No audio input. Volume below threshold.\")\n # Update fps counter\n gui.label_fps.setText('{:.0f} / {:.0f} FPS'.format(fps, config.settings[\"configuration\"][\"FPS\"]))\n app.processEvents()\n\n # Left in just in case prople dont use the gui\n elif vol < config.settings[\"configuration\"][\"MIN_VOLUME_THRESHOLD\"]:\n print(\"No audio input. Volume below threshold. Volume: {}\".format(vol))\n if config.settings[\"configuration\"][\"DISPLAY_FPS\"]:\n print('FPS {:.0f} / {:.0f}'.format(fps, config.settings[\"configuration\"][\"FPS\"]))\n\n# Load and update configuration from settings.ini\nsettings = QSettings('./lib/settings.ini', QSettings.IniFormat)\nsettings.setFallbacksEnabled(False) # File only, no fallback to registry\nupdate_config_dicts()\n\n# Initialise board(s)\nvisualizers = {}\nboards = {}\nfor board in config.settings[\"devices\"]:\n visualizers[board] = Visualizer(board)\n if config.settings[\"devices\"][board][\"configuration\"][\"TYPE\"] == 'ESP8266':\n boards[board] = devices.ESP8266(\n auto_detect=config.settings[\"devices\"][board][\"configuration\"][\"AUTO_DETECT\"],\n mac_addr=config.settings[\"devices\"][board][\"configuration\"][\"MAC_ADDR\"],\n ip=config.settings[\"devices\"][board][\"configuration\"][\"UDP_IP\"],\n port=config.settings[\"devices\"][board][\"configuration\"][\"UDP_PORT\"])\n elif config.settings[\"devices\"][board][\"configuration\"][\"TYPE\"] == 'RaspberryPi':\n boards[board] = devices.RaspberryPi(\n n_pixels=config.settings[\"devices\"][board][\"configuration\"][\"N_PIXELS\"],\n pin=config.settings[\"devices\"][board][\"configuration\"][\"LED_PIN\"],\n invert_logic=config.settings[\"devices\"][board][\"configuration\"][\"LED_INVERT\"],\n freq=config.settings[\"devices\"][board][\"configuration\"][\"LED_FREQ_HZ\"],\n dma=config.settings[\"devices\"][board][\"configuration\"][\"LED_DMA\"])\n elif config.settings[\"devices\"][board][\"configuration\"][\"TYPE\"] == 'Fadecandy':\n boards[board] = devices.FadeCandy(\n server=config.settings[\"devices\"][board][\"configuration\"][\"SERVER\"])\n elif config.settings[\"devices\"][board][\"configuration\"][\"TYPE\"] == 'BlinkStick':\n boards[board] = devices.BlinkStick()\n elif config.settings[\"devices\"][board][\"configuration\"][\"TYPE\"] == 'DotStar':\n boards[board] = devices.DotStar()\n elif config.settings[\"devices\"][board][\"configuration\"][\"TYPE\"] == 'Stripless':\n pass\n\n# Initialise DSP\nsignal_processers = {}\nfor board in config.settings[\"devices\"]:\n signal_processers[board] = DSP(board)\n\n# Initialise GUI \nif config.settings[\"configuration\"][\"USE_GUI\"]:\n # Create GUI window\n app = QApplication([])\n app.setApplicationName('Visualization')\n gui = GUI()\n app.processEvents()\n\nprev_fps_update = time.time()\n# The previous time that the frames_per_second() function was called\n_time_prev = time.time() * 1000.0\n# The low-pass filter used to estimate frames-per-second\n_fps = dsp.ExpFilter(val=config.settings[\"configuration\"][\"FPS\"], alpha_decay=0.2, alpha_rise=0.2)\n\n# Initialize LEDs\n# led.update()\n# Start listening to live audio stream\nmicrophone.start_stream(microphone_update)\n"
] | [
[
"numpy.linspace",
"numpy.asarray",
"numpy.concatenate",
"numpy.round",
"numpy.roll",
"numpy.pad",
"numpy.clip",
"numpy.fliplr",
"numpy.copy",
"numpy.interp",
"numpy.zeros",
"numpy.multiply",
"numpy.atleast_2d",
"numpy.random.rand",
"scipy.ndimage.filters.gaussian_filter1d",
"numpy.array",
"numpy.sum",
"numpy.log2",
"numpy.abs",
"numpy.fft.fft",
"numpy.fft.rfft",
"numpy.tile",
"numpy.array_split",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"0.15",
"1.4",
"0.16",
"1.0",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"0.10",
"0.17",
"1.3"
],
"tensorflow": []
}
] |
davidstone/tensorflow | [
"6044759779a564b3ecffe4cb60f28f20b8034add"
] | [
"tensorflow/python/compat/compat.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Utilities for API compatibility between TensorFlow release versions.\n\nSee [Version\nCompatibility](https://tensorflow.org/guide/version_compat#backward_forward)\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport datetime\nimport os\n\nfrom tensorflow.python.util import tf_contextlib\nfrom tensorflow.python.util.tf_export import tf_export\n\n# This value changes every day with an automatic CL. It can be modified in code\n# via `forward_compatibility_horizon()` or with the environment variable\n# TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date.\n_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2020, 4, 23)\n_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = \"TF_FORWARD_COMPATIBILITY_DELTA_DAYS\"\n_FORWARD_COMPATIBILITY_DATE_NUMBER = None\n\n\ndef _date_to_date_number(year, month, day):\n return (year << 9) | (month << 5) | day\n\n\ndef _update_forward_compatibility_date_number(date_to_override=None):\n \"\"\"Update the base date to compare in forward_compatible function.\"\"\"\n\n global _FORWARD_COMPATIBILITY_DATE_NUMBER\n\n if date_to_override:\n date = date_to_override\n else:\n date = _FORWARD_COMPATIBILITY_HORIZON\n delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME)\n if delta_days:\n date += datetime.timedelta(days=int(delta_days))\n\n _FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number(\n date.year, date.month, date.day)\n\n\n_update_forward_compatibility_date_number()\n\n\n@tf_export(\"compat.forward_compatible\")\ndef forward_compatible(year, month, day):\n \"\"\"Return true if the forward compatibility window has expired.\n\n See [Version\n compatibility](https://tensorflow.org/guide/version_compat#backward_forward).\n\n Forward-compatibility refers to scenarios where the producer of a TensorFlow\n model (a GraphDef or SavedModel) is compiled against a version of the\n TensorFlow library newer than what the consumer was compiled against. The\n \"producer\" is typically a Python program that constructs and trains a model\n while the \"consumer\" is typically another program that loads and serves the\n model.\n\n TensorFlow has been supporting a 3 week forward-compatibility window for\n programs compiled from source at HEAD.\n\n For example, consider the case where a new operation `MyNewAwesomeAdd` is\n created with the intent of replacing the implementation of an existing Python\n wrapper - `tf.add`. The Python wrapper implementation should change from\n something like:\n\n ```python\n def add(inputs, name=None):\n return gen_math_ops.add(inputs, name)\n ```\n\n to:\n\n ```python\n from tensorflow.python.compat import compat\n\n def add(inputs, name=None):\n if compat.forward_compatible(year, month, day):\n # Can use the awesome new implementation.\n return gen_math_ops.my_new_awesome_add(inputs, name)\n # To maintain forward compatibility, use the old implementation.\n return gen_math_ops.add(inputs, name)\n ```\n\n Where `year`, `month`, and `day` specify the date beyond which binaries\n that consume a model are expected to have been updated to include the\n new operations. This date is typically at least 3 weeks beyond the date\n the code that adds the new operation is committed.\n\n Args:\n year: A year (e.g., 2018). Must be an `int`.\n month: A month (1 <= month <= 12) in year. Must be an `int`.\n day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an\n `int`.\n\n Returns:\n True if the caller can expect that serialized TensorFlow graphs produced\n can be consumed by programs that are compiled with the TensorFlow library\n source code after (year, month, day).\n \"\"\"\n return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number(\n year, month, day)\n\n\n@tf_export(\"compat.forward_compatibility_horizon\")\n@tf_contextlib.contextmanager\ndef forward_compatibility_horizon(year, month, day):\n \"\"\"Context manager for testing forward compatibility of generated graphs.\n\n See [Version\n compatibility](https://tensorflow.org/guide/version_compat#backward_forward).\n\n To ensure forward compatibility of generated graphs (see `forward_compatible`)\n with older binaries, new features can be gated with:\n\n ```python\n if compat.forward_compatible(year=2018, month=08, date=01):\n generate_graph_with_new_features()\n else:\n generate_graph_so_older_binaries_can_consume_it()\n ```\n\n However, when adding new features, one may want to unittest it before\n the forward compatibility window expires. This context manager enables\n such tests. For example:\n\n ```python\n from tensorflow.python.compat import compat\n\n def testMyNewFeature(self):\n with compat.forward_compatibility_horizon(2018, 08, 02):\n # Test that generate_graph_with_new_features() has an effect\n ```\n\n Args:\n year: A year (e.g., 2018). Must be an `int`.\n month: A month (1 <= month <= 12) in year. Must be an `int`.\n day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an\n `int`.\n\n Yields:\n Nothing.\n \"\"\"\n try:\n _update_forward_compatibility_date_number(datetime.date(year, month, day))\n yield\n finally:\n _update_forward_compatibility_date_number()\n"
] | [
[
"tensorflow.python.util.tf_export.tf_export"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.5",
"1.7",
"1.4"
]
}
] |
intflow/YOLOX_AUDIO | [
"c962bad922bdc7ae0ab6b6ed0ed4d13d18511fad"
] | [
"tools/demo_audio.py"
] | [
"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport argparse\nimport os\nimport time\nfrom loguru import logger\n\nimport cv2\nimport numpy as np\nfrom scipy.io import wavfile\nimport scipy.io\nimport librosa\nfrom PIL import Image\n\nimport torch\n\nfrom yolox_audio.data.data_augment import ValTransform\nfrom yolox_audio.data.datasets import COCO_CLASSES, INTFLOW_CLASSES, CROWDHUMAN_CLASSES\nfrom yolox_audio.exp import get_exp\nfrom yolox_audio.utils import fuse_model, get_model_info, postprocess, vis_bbox\n\nWAV_EXT = [\".wav\"]\n\ndef make_parser():\n parser = argparse.ArgumentParser(\"YOLOX_AUDIO Demo!\")\n parser.add_argument(\n \"--demo\", default=\"wav\", help=\"demo type, eg. wav\"\n )\n parser.add_argument(\"-expn\", \"--experiment-name\", type=str, default=\"yolox_audio\")\n parser.add_argument(\"-n\", \"--name\", type=str, default=\"yolox_audio_x\", help=\"model name\")\n\n parser.add_argument(\n \"--path\", default=\"./assets/00000.wav\", help=\"path to images or video\"\n )\n parser.add_argument(\n \"--save_folder\", default=None, help=\"path to images or video output\"\n )\n parser.add_argument(\"--camid\", type=int, default=0, help=\"webcam demo camera id\")\n parser.add_argument(\n \"--save_result\",\n default=True,\n action=\"store_true\",\n help=\"whether to save the inference result of image/video\",\n )\n\n # exp file\n parser.add_argument(\n \"-f\",\n \"--exp_file\",\n default=\"exps/yolox_audio/yolox_x.py\",\n type=str,\n help=\"pls input your expriment description file\",\n )\n parser.add_argument(\"-c\", \"--ckpt\", default=\"YOLOX_outputs/yolox_audio/best_ckpt.pth\", type=str, help=\"ckpt for eval\")\n #parser.add_argument(\"-m\", \"--model\", default=None, type=str, help=\"model reference for eval\")\n #parser.add_argument(\"-c\", \"--ckpt\", default=\"/data/pretrained/hcow/yolox_s_oad_lm3__intflow_total_1K_p0.pth\", type=str, help=\"ckpt for eval\")\n parser.add_argument(\n \"--device\",\n default=\"gpu\",\n type=str,\n help=\"device to run our model, can either be cpu or gpu\",\n )\n parser.add_argument(\"--conf\", default=0.25, type=float, help=\"test conf\")\n parser.add_argument(\"--nms\", default=0.65, type=float, help=\"test nms threshold\")\n parser.add_argument(\"--tsize_h\", default=256, type=int, help=\"test img size(h)\")\n parser.add_argument(\"--tsize_w\", default=512, type=int, help=\"test img size(w)\")\n parser.add_argument(\n \"--fp16\",\n dest=\"fp16\",\n default=True,\n action=\"store_true\",\n help=\"Adopting mix precision evaluating.\",\n )\n parser.add_argument(\n \"--legacy\",\n dest=\"legacy\",\n default=False,\n action=\"store_true\",\n help=\"To be compatible with older versions\",\n )\n parser.add_argument(\n \"--fuse\",\n dest=\"fuse\",\n default=False,\n action=\"store_true\",\n help=\"Fuse conv and bn for testing.\",\n )\n parser.add_argument(\n \"--pruning\",\n dest=\"pruning\",\n default=False,\n action=\"store_true\",\n help=\"Set pretrained model is whether pruned or not\",\n )\n parser.add_argument(\n \"--trt\",\n dest=\"trt\",\n default=False,\n action=\"store_true\",\n help=\"Using TensorRT model for testing.\",\n )\n return parser\n\n\ndef get_wav_list(path):\n wav_names = []\n for maindir, subdir, file_name_list in os.walk(path):\n for filename in file_name_list:\n apath = os.path.join(maindir, filename)\n ext = os.path.splitext(apath)[1]\n if ext in WAV_EXT:\n wav_names.append(apath)\n return wav_names\n\n\nclass Predictor(object):\n def __init__(\n self,\n model,\n exp,\n cls_names=COCO_CLASSES,\n trt_file=None,\n decoder=None,\n device=\"cpu\",\n fp16=False,\n legacy=False,\n ):\n self.model = model\n self.cls_names = cls_names\n self.decoder = decoder\n self.num_classes = exp.num_classes\n self.confthre = exp.test_conf\n self.nmsthre = exp.nmsthre\n self.test_size = exp.test_size\n self.device = device\n self.fp16 = fp16\n self.preproc = ValTransform(legacy=legacy)\n if trt_file is not None:\n from torch2trt import TRTModule\n\n model_trt = TRTModule()\n model_trt.load_state_dict(torch.load(trt_file))\n\n x = torch.ones(1, 3, exp.test_size[0], exp.test_size[1]).cuda()\n self.model(x)\n self.model = model_trt\n\n def inference(self, img):\n img_info = {\"id\": 0}\n #if isinstance(img, str):\n # img_info[\"file_name\"] = os.path.basename(img)\n # img = cv2.imread(img)\n #else:\n img_info[\"file_name\"] = None\n\n height, width = img.shape[:2]\n img_info[\"height\"] = height\n img_info[\"width\"] = width\n img_info[\"raw_img\"] = img\n\n ratio = min(self.test_size[0] / img.shape[0], self.test_size[1] / img.shape[1])\n img_info[\"ratio\"] = ratio\n\n img, _ = self.preproc(img, None, self.test_size)\n img = torch.from_numpy(img).unsqueeze(0)\n img = img.float()\n if self.device == \"gpu\":\n img = img.cuda()\n if self.fp16:\n img = img.half() # to FP16\n\n with torch.no_grad():\n t0 = time.time()\n outputs = self.model(img)\n if self.decoder is not None:\n outputs = self.decoder(outputs, dtype=outputs.type())\n outputs = postprocess(\n outputs, self.num_classes, self.confthre,\n self.nmsthre, class_agnostic=False\n )\n logger.info(\"Infer time: {:.4f}s\".format(time.time() - t0))\n return outputs, img_info\n\n def visual(self, output, img_info, cls_conf=0.35):\n ratio = img_info[\"ratio\"]\n img = img_info[\"raw_img\"]\n if output is None:\n return img\n output = output.cpu()\n\n bboxes = output[:, 0:4]\n\n # preprocessing: resize\n bboxes /= ratio\n ###landmarks /= ratio\n\n cls = output[:, 6]\n scores = output[:, 4] * output[:, 5]\n## rads = output[:,7]\n\n vis_res = vis_bbox(img, bboxes, scores, cls, cls_conf, self.cls_names)\n\n return vis_res\n\n\ndef wav_to_img(wav_path):\n \n try:\n sr, wavs = wavfile.read(wav_path)\n except:\n print('Can not load wav file')\n\n # 7 to 1 sum > Resample to 8K > Get spectrogram > chop to multiple images\n step = int(sr / 6000)\n wav = np.mean(wavs,1)\n wav_6k = wav[::step] / 32768.0\n sr = 6000\n\n # STFT -> spectrogram\n hop_length = 128 # 전체 frame 수 (21.33ms)\n n_fft = 512 # frame 하나당 sample 수 (85.33ms)\n hop_length_s = hop_length / sr\n\n # calculate duration hop length and window in seconds\n hop_length_duration = float(hop_length)/sr\n n_fft_duration = float(n_fft)/sr\n\n # STFT\n stft = librosa.stft(wav_6k, n_fft=n_fft, hop_length=hop_length)\n stft = stft[1:,:]\n f_len = stft.shape[0]\n t_len = stft.shape[-1]\n t_step = 512\n c_len = 3\n\n # 복소공간 값 절댓값 취하기\n mel = librosa.feature.melspectrogram(S=np.abs(stft), sr=sr, n_mels=f_len).reshape(f_len, -1, 1)\n mag = np.abs(stft).reshape(f_len, -1, 1)\n mfcc = librosa.feature.mfcc(S=mel, sr=sr, n_mfcc=f_len)\n\n feat_cat = np.concatenate((mel, mag, mfcc), axis = 2)\n feat_cat = feat_cat ** 2.0\n\n # magnitude > Decibels\n for i in range(0,3):\n feat = feat_cat[:,:,i]\n log_spectrogram = librosa.amplitude_to_db(feat)\n log_spectrogram += np.abs(log_spectrogram.min()) + 1e-5\n log_spectrogram /= log_spectrogram.max()\n log_spectrogram *= 255.0\n log_spectrogram = np.flip(log_spectrogram, axis=0)\n feat_cat[:,:,i] = log_spectrogram\n\n sub_cnt = 0\n img_set = []\n for t_sub in range(0, t_len, t_step):\n t_end = t_sub + t_step\n \n if t_end > t_len:\n feat_cat_sub = np.concatenate((feat_cat[:,t_sub:,:], np.zeros((f_len, t_end - t_len, c_len))),axis=1)\n else:\n feat_cat_sub = feat_cat[:,t_sub:t_end,:]\n\n feat_cat_sub = feat_cat_sub.astype('uint8')\n img = feat_cat_sub[...,::-1].copy()\n img_set.append(img)\n #img_path = os.path.join(, each_file)[:-4]+'_'+str(sub_cnt)+'.jpg'\n #img.save(img_path)\n return img_set, t_step, hop_length, sr\n\n\ndef wav_demo(predictor, vis_folder, path, current_time, save_result, save_folder=None):\n\n if os.path.isdir(path):\n files = get_wav_list(path)\n else:\n files = [path]\n files.sort()\n\n\n for _file in files:\n ## Load wav file then convert into image sets\n img_set, t_step, hop_length, sr = wav_to_img(_file)\n filename = _file.split('/')[-1]\n filename = filename.split('.')[0]\n\n ##set_name = filename.split('_')[0].replace('0','_')\n ##drone_name = filename.split('_')[1].replace('0','_')\n\n img_idx = 0\n outputs_pixel_set = []\n for img in img_set:\n outputs, img_info = predictor.inference(img)\n outputs_pixel_set.append(outputs)\n # Image save for visual debug\n result_image = predictor.visual(outputs[0], img_info, predictor.confthre)\n if save_result:\n if save_folder == None:\n save_folder = os.path.join(\n vis_folder, time.strftime(\"%Y_%m_%d_%H_%M_%S\", current_time)\n )\n os.makedirs(save_folder, exist_ok=True)\n image_name = filename+'_'+str(img_idx)+'.jpg'\n save_file_name = os.path.join(save_folder, os.path.basename(image_name))\n ##logger.info(\"Saving detection result in {}\".format(save_file_name))\n cv2.imwrite(save_file_name, result_image)\n img_idx += 1\n ch = cv2.waitKey(0)\n if ch == 27 or ch == ord(\"q\") or ch == ord(\"Q\"):\n break\n\n # Convert image-wise pixel vad into time values with json\n set_id = 0\n vad_set = []\n for outputs_pixel in outputs_pixel_set: \n if outputs_pixel[0] != None:\n for vad_chunk in outputs_pixel[0]: # x0, y0, x1, y1, obj_score, cls_scre, cls_id\n vad_pixel = [vad_chunk[0].item()+set_id, vad_chunk[2].item()+set_id, vad_chunk[5].item(), vad_chunk[6].item()] #we only use [x0, x1, cls_id]\n vad_set.append(vad_pixel)\n set_id += t_step\n\n vad_set.sort(key = lambda x: x[0]) #Sort by time series\n\n #Merge adjacent VADs by distance and classes\n init = True\n del_list = []\n list_idx = 0\n for vad in vad_set:\n if init == True:\n init = False\n vad_1d = vad\n else:\n if vad[0] - vad_1d[1] < 15.0:\n if vad[2] > vad_1d[2]:\n vad_prob = vad[2]\n vad_cls = vad[3]\n else:\n vad_prob = vad_1d[2]\n vad_cls = vad_1d[3]\n\n vad_tmp = [vad_1d[0], vad[1], vad_prob, vad_cls]\n vad_set[list_idx] = vad_tmp\n del_list.append(list_idx-1)\n vad_1d = vad_tmp\n else:\n vad_1d = vad\n list_idx += 1\n\n #Delete duplicated list\n for del_id in sorted(del_list, reverse=True):\n del vad_set[del_id]\n\n #Convert frame to time\n time_unit = hop_length / sr #21.333333ms\n vad_set = np.asarray(vad_set)\n if len(vad_set) > 0:\n vad_set[:,0:2] *= time_unit\n print(vad_set)\n \n\ndef main(exp, args):\n if not args.experiment_name:\n args.experiment_name = exp.exp_name\n\n file_name = os.path.join(exp.output_dir, args.experiment_name)\n os.makedirs(file_name, exist_ok=True)\n\n vis_folder = None\n if args.save_result:\n vis_folder = os.path.join(file_name, \"vis_res\")\n os.makedirs(vis_folder, exist_ok=True)\n\n if args.trt:\n args.device = \"gpu\"\n\n logger.info(\"Args: {}\".format(args))\n\n if args.conf is not None:\n exp.test_conf = args.conf\n if args.nms is not None:\n exp.nmsthre = args.nms\n if args.tsize_h is not None:\n exp.test_size = (args.tsize_h, args.tsize_w)\n\n\n if args.pruning:\n model = exp.get_model_pruning(args.model)\n logger.info(\"Model Summary: {}\".format(get_model_info(model, exp.test_size)))\n else:\n model = exp.get_model()\n logger.info(\"Model Summary: {}\".format(get_model_info(model, exp.test_size)))\n\n if args.device == \"gpu\":\n model.cuda()\n if args.fp16:\n model.half() # to FP16\n model.eval()\n\n if not args.trt:\n if args.ckpt is None:\n ckpt_file = os.path.join(file_name, \"best_ckpt.pth\")\n else:\n ckpt_file = args.ckpt\n logger.info(\"loading checkpoint\")\n ckpt = torch.load(ckpt_file, map_location=\"cpu\")\n # load the model state dict\n model.load_state_dict(ckpt[\"model\"])\n logger.info(\"loaded checkpoint done.\")\n\n if args.fuse:\n logger.info(\"\\tFusing model...\")\n model = fuse_model(model)\n\n if args.trt:\n assert not args.fuse, \"TensorRT model is not support model fusing!\"\n trt_file = os.path.join(file_name, \"model_trt.pth\")\n assert os.path.exists(\n trt_file\n ), \"TensorRT model is not found!\\n Run python3 tools/trt.py first!\"\n model.head.decode_in_inference = False\n decoder = model.head.decode_outputs\n logger.info(\"Using TensorRT to inference\")\n else:\n trt_file = None\n decoder = None\n\n classes_list = INTFLOW_CLASSES\n predictor = Predictor(model, exp, classes_list, trt_file, decoder, args.device, args.fp16, args.legacy)\n current_time = time.localtime()\n if args.demo == \"wav\":\n wav_demo(predictor, vis_folder, args.path, current_time, args.save_result, args.save_folder)\n\n\nif __name__ == \"__main__\":\n args = make_parser().parse_args()\n exp = get_exp(args.exp_file, args.name)\n\n main(exp, args)\n"
] | [
[
"torch.ones",
"numpy.abs",
"torch.load",
"numpy.asarray",
"torch.from_numpy",
"numpy.concatenate",
"numpy.mean",
"torch.no_grad",
"numpy.flip",
"scipy.io.wavfile.read",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
Abhishek2304/Cerebro-System-Ray | [
"1e2f2ae291cd449573f87bb83fb2bda12e606b3a"
] | [
"cerebro/tune/grid.py"
] | [
"# Copyright 2020 Supun Nakandala, Yuhao Zhang, and Arun Kumar. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport os\nimport itertools\nimport datetime\nimport tensorflow as tf\nfrom sqlalchemy import and_\nimport numpy as np\nimport logging\nimport traceback\nfrom ..commons.constants import *\n\nfrom .base import ModelSelection, is_larger_better, ModelSelectionResult, _HP, _HPChoice, update_model_results\nfrom ..db.dao import Model, Metric, ParamVal, ParamDef, Experiment\nfrom ..commons.constants import CREATED_STATUS, RUNNING_STATUS, COMPLETED_STATUS\n\n\nclass GridSearch(ModelSelection):\n \"\"\"Performs grid search using the given param grid\n\n :param backend: Cerebro backend object (e.g., SparkBackend).\n :param store: Cerebro store object (e.g., LocalStore, HDFSStore).\n :param estimator_gen_fn: A function which takes\n in a dictionary of parameters and returns a Cerebro Estimator (e.g., cerebro.SparkEstimator).\n :param search_space: A dictionary object defining the parameter search space.\n :param num_epochs: Number of maximum epochs each model should be trained for.\n :param evaluation_metric: Evaluation metric used to pick the best model (default \"loss\").\n :param validation: (Optional) The ratio of the validation set (default: 0.25) or a string defining the column name\n defining the validation set. In the latter case the column value can be bool or int.\n :param label_columns: (Optional) A list containing the names of the label/output columns (default ['label']).\n :param feature_columns: (Optional) A list containing the names of the feature columns (default ['features']).\n :param verbose: Debug output verbosity (0-2). Defaults to 1.\n\n :return: :class:`cerebro.tune.ModelSelectionResult`\n \"\"\"\n\n def __init__(self, backend, store, estimator_gen_fn, search_space, num_epochs,\n evaluation_metric='loss', validation=0.25, label_columns=['label'], feature_columns=['features'],\n verbose=1):\n super(GridSearch, self).__init__(backend, store, validation, estimator_gen_fn, evaluation_metric,\n label_columns, feature_columns, verbose)\n\n self.search_space = search_space\n # validate the search space\n self._validate_search_space()\n\n self.estimator_param_maps = self._generate_all_param_maps()\n self.num_epochs = num_epochs\n\n def _validate_search_space(self):\n search_space = self.search_space\n if not type(search_space) == dict:\n raise Exception('Search space has to be type dict. Provided: {}'.format(type(search_space)))\n\n if not all([isinstance(k, str) for k in search_space.keys()]):\n raise Exception('Only string values are allowed for hyperparameter space keys.')\n\n if not all([isinstance(k, _HPChoice) for k in search_space.values()]):\n raise Exception('All hyperparameter space values has to be of type cerebro.tune.base._HPChoice.'\n ' Nested search spaces are not supported yet')\n\n def _generate_all_param_maps(self):\n keys = self.search_space.keys()\n grid_values = [v.options for v in self.search_space.values()]\n\n def _to_key_value_pairs(keys, values):\n # values = [v if isinstance(v, list) else v() for v in values]\n return [(key, value) for key, value in zip(keys, values)]\n\n return [dict(_to_key_value_pairs(keys, prod)) for prod in itertools.product(*[v if isinstance(v, list) else \\\n v() for v in grid_values])]\n\n def _fit_on_prepared_data(self, metadata):\n return _fit_on_prepared_data(self, metadata)\n\n\nclass HILGridSearch(GridSearch):\n \"\"\"Performs intermittent HIL grid search using the given param grid\n :param exp_id: Experiment ID.\n :param backend: Cerebro backend object (e.g., SparkBackend).\n :param store: Cerebro store object (e.g., LocalStore, HDFSStore).\n :param estimator_gen_fn: A function which takes \n in a dictionary of parameters and returns a Cerebro Estimator (e.g., cerebro.SparkEstimator).\n :param search_space: A dictionary object defining the parameter search space.\n :param num_epochs: Number of maximum epochs each model should be trained for.\n :param db: SQLAlchemy DB object.\n :param label_columns: (Optional) A list containing the names of the label/output columns (default ['label']).\n :param feature_columns: (Optional) A list containing the names of the feature columns (default ['features']).\n :param verbose: Debug output verbosity (0-2). Defaults to 1.\n \"\"\"\n\n def __init__(self, exp_id, backend, store, estimator_gen_fn, search_space, num_epochs, db,\n label_columns=['label'], feature_columns=['features'], verbose=1):\n super(HILGridSearch, self).__init__(backend=backend, store=store, estimator_gen_fn=estimator_gen_fn, search_space=search_space,\n num_epochs=num_epochs, label_columns=label_columns, feature_columns=feature_columns, verbose=verbose)\n self.exp_id = exp_id\n self.db = db\n\n def fit(self, df):\n raise NotImplementedError('method not implemented')\n\n def fit_on_prepared_data(self):\n \"\"\"\n Execute the model selection/AutoML workload on already prepared data.\n \"\"\"\n _hil_fit_on_prepared_data(self)\n\n\nclass RandomSearch(ModelSelection):\n \"\"\" Performs Random Search over the param grid\n\n :param backend: Cerebro backend object (e.g., SparkBackend).\n :param store: Cerebro store object (e.g., LocalStore, HDFSStore).\n :param estimator_gen_fn: A function which takes\n in a dictionary of parameters and returns a Cerebro Estimator (e.g., cerebro.SparkEstimator).\n :param search_space: A dictionary object defining the parameter search space.\n :param num_models: Maximum number of models to be explored.\n :param num_epochs: Number of maximum epochs each model should be trained for.\n :param evaluation_metric: Evaluation metric used to pick the best model (default: \"loss\").\n :param validation: (Optional) The ratio of the validation set (default: 0.25) or a string defining the column name\n defining the validation set. In the latter case the column value can be bool or int.\n :param label_columns: (Optional) A list containing the names of the label/output columns (default ['label']).\n :param feature_columns: (Optional) A list containing the names of the feature columns (default ['features']).\n :param verbose: Debug output verbosity (0-2). Defaults to 1.\n\n :return: :class:`cerebro.tune.ModelSelectionResult`\n \"\"\"\n\n def __init__(self, backend, store, estimator_gen_fn, search_space, num_models, num_epochs, evaluation_metric='loss',\n validation=0.25, label_columns=['label'], feature_columns=['features'], verbose=1):\n super(RandomSearch, self).__init__(backend, store, validation, estimator_gen_fn, evaluation_metric,\n label_columns, feature_columns, verbose)\n\n self.search_space = search_space\n # validate the search space\n self._validate_search_space()\n\n self.num_params = num_models\n self.estimator_param_maps = self._generate_all_param_maps()\n self.num_epochs = num_epochs\n\n def _validate_search_space(self):\n search_space = self.search_space\n if not type(search_space) == dict:\n raise Exception('Search space has to be type dict. Provided: {}'.format(type(search_space)))\n\n if not all([isinstance(k, str) for k in search_space.keys()]):\n raise Exception('Only string values are allowed for hyperparameter space keys.')\n\n if not all([isinstance(k, _HP) for k in search_space.values()]):\n raise Exception('All hyperparameter space values has to be of type cerebro.tune.base._HP.'\n ' Nested search spaces are not supported yet')\n\n def _generate_all_param_maps(self):\n params = []\n keys = self.search_space.keys()\n for _ in range(self.num_params):\n param_dict = {}\n for k in keys:\n param_dict[k] = self.search_space[k].sample_value()\n params.append(param_dict)\n return params\n\n def _fit_on_prepared_data(self, metadata):\n return _fit_on_prepared_data(self, metadata)\n\n\nclass HILRandomSearch(RandomSearch):\n \"\"\"Performs intermittent HIL random search using the given param grid\n :param exp_id: Experiment ID.\n :param backend: Cerebro backend object (e.g., SparkBackend).\n :param store: Cerebro store object (e.g., LocalStore, HDFSStore).\n :param estimator_gen_fn: A function which takes \n in a dictionary of parameters and returns a Cerebro Estimator (e.g., cerebro.SparkEstimator).\n :param search_space: A dictionary object defining the parameter search space.\n :param num_models: Maximum number of models to be explored.\n :param num_epochs: Number of maximum epochs each model should be trained for.\n :param db: SQLAlchemy DB object.\n :param label_columns: (Optional) A list containing the names of the label/output columns (default ['label']).\n :param feature_columns: (Optional) A list containing the names of the feature columns (default ['features']).\n :param verbose: Debug output verbosity (0-2). Defaults to 1.\n\n \"\"\"\n\n def __init__(self, exp_id, backend, store, estimator_gen_fn, search_space, num_models, num_epochs, db,\n label_columns=['label'], feature_columns=['features'], verbose=1):\n super(HILRandomSearch, self).__init__(backend=backend, store=store, estimator_gen_fn=estimator_gen_fn, search_space=search_space,\n num_models=num_models, num_epochs=num_epochs, label_columns=label_columns, feature_columns=feature_columns, verbose=verbose)\n self.exp_id = exp_id\n self.db = db\n\n def fit(self, df):\n raise NotImplementedError('method not implemented')\n\n def fit_on_prepared_data(self):\n \"\"\"\n Execute the model selection/AutoML workload on already prepared data.\n \"\"\"\n _hil_fit_on_prepared_data(self)\n\n\n# Batch implementation (i.e., without any user interaction) of model selection.\ndef _fit_on_prepared_data(self, metadata = None):\n # create estimators\n estimators = [self._estimator_gen_fn_wrapper(param) for param in self.estimator_param_maps]\n estimator_results = {model.getRunId(): {} for model in estimators}\n\n # log hyperparameters to TensorBoard\n # self._log_hp_to_tensorboard(estimators, self.estimator_param_maps)\n\n # Trains the models up to the number of epochs specified. For each iteration also performs validation\n for epoch in range(self.num_epochs):\n epoch_results = self.backend.train_for_one_epoch(estimators, self.store, self.feature_cols,\n self.label_cols)\n update_model_results(estimator_results, epoch_results)\n\n epoch_results = self.backend.train_for_one_epoch(estimators, self.store, self.feature_cols,\n self.label_cols, is_train=False)\n update_model_results(estimator_results, epoch_results)\n\n # self._log_epoch_metrics_to_tensorboard(estimators, estimator_results)\n\n # find the best model and crate ModelSearchModel\n models = [est.create_model(estimator_results[est.getRunId()], est.getRunId(), metadata) for est in estimators]\n val_metrics = [estimator_results[est.getRunId()]['val_' + self.evaluation_metric][-1] for est in estimators]\n best_model_idx = np.argmax(val_metrics) if is_larger_better(self.evaluation_metric) else np.argmin(val_metrics)\n best_model = models[best_model_idx]\n\n return ModelSelectionResult(best_model, estimator_results, models, [x+'__output' for x in self.label_cols])\n\n\n# Human-in-the-loop implementation\ndef _hil_fit_on_prepared_data(self):\n _, _, metadata, _ = self.backend.get_metadata_from_parquet(self.store, self.label_cols, self.feature_cols)\n\n if self.verbose >= 1: print(\n 'CEREBRO => Time: {}, Initializing Data Loaders'.format(\n datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n self.backend.initialize_data_loaders(self.store, self.feature_cols + self.label_cols)\n\n if self.verbose >= 1: print('CEREBRO => Time: {}, Launching Model Selection Workload'.format(\n datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\n\n exp_id = self.exp_id\n exp_obj = Experiment.query.filter(Experiment.id == exp_id).one()\n db = self.db\n\n warm_start_model_id = None\n if exp_obj.warm_start_from_cloned_model:\n warm_start_model_id = exp_obj.clone_model_id\n\n # Creating the intial model specs.\n param_maps = self.estimator_param_maps\n for param_map in param_maps:\n model_id = next_user_friendly_model_id()\n model_dao = Model(model_id, exp_obj.id, 0, int(exp_obj.max_train_epochs), warm_start_model_id=warm_start_model_id)\n db.session.add(model_dao)\n\n for k in param_map:\n dtype = ParamDef.query.filter(and_(ParamDef.exp_id == exp_id, ParamDef.name == k)).one().dtype\n pval_dao = ParamVal(model_id, k, param_map[k], dtype)\n db.session.add(pval_dao)\n db.session.add(model_dao)\n db.session.commit()\n exp_obj.status = RUNNING_STATUS\n db.session.commit()\n"
] | [
[
"numpy.argmax",
"numpy.argmin"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
grezesf/Research | [
"10b7de860f9caf9275ac23c4b4fce51150af5f7f"
] | [
"Reservoirs/Task1_Toy_Examples/lib_task1.py"
] | [
"#!/usr/bin/python\nimport random\nimport math\nimport numpy\n\n\n\ndef gen_toy_data(data_dim=500, set_size=100, freq_range=[20,40], phase_range=[20,40], amplitude_range=[10,50], delay=5, input_noise=0.0, target_noise=0.0):\n # generates toy wavy data\n # data_dim is the number of points per wave \n # set_size is the number of waves\n # the target is delayed by delay\n\n # sets starts empty\n input_set = []\n target_set = []\n\n # set ranges extrema\n [min_freq, max_freq] = freq_range\n [min_phase, max_phase] = phase_range\n [min_amplitude,max_amplitude] = amplitude_range\n\n\n # generate set_size signals\n for nb in range(set_size):\n\n input_wave = []\n target_wave = []\n\n # pick random freq, phase and amplitude\n freq1 = random.randint(min_freq, max_freq)\n phase1 = random.randint(min_phase, max_phase)\n amplitude1 = random.randint(min_amplitude,max_amplitude)\n # amplitude1 = 1.0\n freq2 = random.randint(min_freq, max_freq)\n phase2 = random.randint(0, freq2)\n amplitude2 = random.random()\n\n # test: remove randomness\n # freq1 = 25\n # phase1 = 0\n # amplitude1 = 10\n\n\n # generate a signal of data_dim points\n for i in range(data_dim):\n # generate data point\n # generate input noise\n noise = input_noise * (2.0*random.random()-1.0)\n \n point1 = noise + amplitude1 * math.sin(2.0*math.pi*(i+phase1)/freq1)\n point2 = noise + amplitude2 * math.sin(2.0*math.pi*(i+phase2)/freq2)\n \n # add to input_wave\n input_wave.append(numpy.array([point1]))\n\n # input_wave.append([point1, point2])\n # test: make inputs different sizes\n # if random.random()<0.5:\n # input_wave.append(numpy.array([point1]))\n # else:\n # input_wave.append(numpy.array([point1, point1]))\n\n # generate target point delayed\n if i<delay:\n target1 = 0\n target2 = 0\n else:\n # generate target noise\n noise = target_noise * (2.0*random.random()-1.0)\n target1 = noise + amplitude1 * math.sin(2.0*math.pi*(i+phase1-delay)/freq1)\n target2 = noise + amplitude2 * math.sin(2.0*math.pi*(i+phase2-delay)/freq2)\n \n # add to target_wave\n # target_wave.append([target1, target2])\n target_wave.append(numpy.array([target1]))\n\n # add signals to data sets\n input_set.append(numpy.array(input_wave))\n target_set.append(numpy.array(target_wave))\n\n return numpy.array([input_set, target_set])\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
IT-BillDeng/Garbage-Classification | [
"f68f4821e63dac67992ba5ea9cd7b77fae1cf4e5"
] | [
"data/util.py"
] | [
"import numpy as np\nfrom PIL import Image\nimport random\n# from skimage import io, color\n\n\ndef read_image(path, dtype=np.float32, color=True):\n \"\"\"Read an image from a file.\n\n This function reads an image from given file. The image is CHW format and\n the range of its value is :math:`[0, 255]`. If :obj:`color = True`, the\n order of the channels is RGB.\n\n Args:\n path (str): A path of image file.\n dtype: The type of array. The default value is :obj:`~numpy.float32`.\n color (bool): This option determines the number of channels.\n If :obj:`True`, the number of channels is three. In this case,\n the order of the channels is RGB. This is the default behaviour.\n If :obj:`False`, this function returns a grayscale image.\n\n Returns:\n ~numpy.ndarray: An image.\n \"\"\"\n # img_gray = io.imread(path, as_gray=True)\n # print(img_gray)\n # img = color.gray2rgb(img_gray)\n # img = np.asarray(img, dtype=dtype)\n # img = img.transpose((2, 0, 1))\n # return img\n\n # f = Image.open(path)\n # try:\n # if color:\n # img = f.convert('RGB')\n # else:\n # img = f.convert('P')\n # img = np.asarray(img, dtype=dtype)\n # finally:\n # if hasattr(f, 'close'):\n # f.close()\n\n # if img.ndim == 2:\n # # reshape (H, W) -> (1, H, W)\n # img = color.gray2rgb(img)\n # img = img.transpose((2, 0, 1))\n # # img = img[np.newaxis]\n # return img\n # else:\n # # transpose (H, W, C) -> (C, H, W)\n # return img.transpose((2, 0, 1))\n\n f = Image.open(path)\n try:\n if color:\n img = f.convert('RGB')\n else:\n img = f.convert('P')\n img = np.asarray(img, dtype=dtype)\n finally:\n if hasattr(f, 'close'):\n f.close()\n\n if img.ndim == 2:\n # reshape (H, W) -> (1, H, W)\n return img[np.newaxis]\n else:\n # transpose (H, W, C) -> (C, H, W)\n return img.transpose((2, 0, 1))\n\ndef resize_bbox(bbox, in_size, out_size):\n \"\"\"Resize bounding boxes according to image resize.\n\n The bounding boxes are expected to be packed into a two dimensional\n tensor of shape :math:`(R, 4)`, where :math:`R` is the number of\n bounding boxes in the image. The second axis represents attributes of\n the bounding box. They are :math:`(y_{min}, x_{min}, y_{max}, x_{max})`,\n where the four attributes are coordinates of the top left and the\n bottom right vertices.\n\n Args:\n bbox (~numpy.ndarray): An array whose shape is :math:`(R, 4)`.\n :math:`R` is the number of bounding boxes.\n in_size (tuple): A tuple of length 2. The height and the width\n of the image before resized.\n out_size (tuple): A tuple of length 2. The height and the width\n of the image after resized.\n\n Returns:\n ~numpy.ndarray:\n Bounding boxes rescaled according to the given image shapes.\n\n \"\"\"\n bbox = bbox.copy()\n y_scale = float(out_size[0]) / in_size[0]\n x_scale = float(out_size[1]) / in_size[1]\n bbox[:, 0] = y_scale * bbox[:, 0]\n bbox[:, 2] = y_scale * bbox[:, 2]\n bbox[:, 1] = x_scale * bbox[:, 1]\n bbox[:, 3] = x_scale * bbox[:, 3]\n return bbox\n\n\ndef flip_bbox(bbox, size, y_flip=False, x_flip=False):\n \"\"\"Flip bounding boxes accordingly.\n\n The bounding boxes are expected to be packed into a two dimensional\n tensor of shape :math:`(R, 4)`, where :math:`R` is the number of\n bounding boxes in the image. The second axis represents attributes of\n the bounding box. They are :math:`(y_{min}, x_{min}, y_{max}, x_{max})`,\n where the four attributes are coordinates of the top left and the\n bottom right vertices.\n\n Args:\n bbox (~numpy.ndarray): An array whose shape is :math:`(R, 4)`.\n :math:`R` is the number of bounding boxes.\n size (tuple): A tuple of length 2. The height and the width\n of the image before resized.\n y_flip (bool): Flip bounding box according to a vertical flip of\n an image.\n x_flip (bool): Flip bounding box according to a horizontal flip of\n an image.\n\n Returns:\n ~numpy.ndarray:\n Bounding boxes flipped according to the given flips.\n\n \"\"\"\n H, W = size\n bbox = bbox.copy()\n if y_flip:\n y_max = H - bbox[:, 0]\n y_min = H - bbox[:, 2]\n bbox[:, 0] = y_min\n bbox[:, 2] = y_max\n if x_flip:\n x_max = W - bbox[:, 1]\n x_min = W - bbox[:, 3]\n bbox[:, 1] = x_min\n bbox[:, 3] = x_max\n return bbox\n\n\ndef crop_bbox(\n bbox, y_slice=None, x_slice=None,\n allow_outside_center=True, return_param=False):\n \"\"\"Translate bounding boxes to fit within the cropped area of an image.\n\n This method is mainly used together with image cropping.\n This method translates the coordinates of bounding boxes like\n :func:`data.util.translate_bbox`. In addition,\n this function truncates the bounding boxes to fit within the cropped area.\n If a bounding box does not overlap with the cropped area,\n this bounding box will be removed.\n\n The bounding boxes are expected to be packed into a two dimensional\n tensor of shape :math:`(R, 4)`, where :math:`R` is the number of\n bounding boxes in the image. The second axis represents attributes of\n the bounding box. They are :math:`(y_{min}, x_{min}, y_{max}, x_{max})`,\n where the four attributes are coordinates of the top left and the\n bottom right vertices.\n\n Args:\n bbox (~numpy.ndarray): Bounding boxes to be transformed. The shape is\n :math:`(R, 4)`. :math:`R` is the number of bounding boxes.\n y_slice (slice): The slice of y axis.\n x_slice (slice): The slice of x axis.\n allow_outside_center (bool): If this argument is :obj:`False`,\n bounding boxes whose centers are outside of the cropped area\n are removed. The default value is :obj:`True`.\n return_param (bool): If :obj:`True`, this function returns\n indices of kept bounding boxes.\n\n Returns:\n ~numpy.ndarray or (~numpy.ndarray, dict):\n\n If :obj:`return_param = False`, returns an array :obj:`bbox`.\n\n If :obj:`return_param = True`,\n returns a tuple whose elements are :obj:`bbox, param`.\n :obj:`param` is a dictionary of intermediate parameters whose\n contents are listed below with key, value-type and the description\n of the value.\n\n * **index** (*numpy.ndarray*): An array holding indices of used \\\n bounding boxes.\n\n \"\"\"\n\n t, b = _slice_to_bounds(y_slice)\n l, r = _slice_to_bounds(x_slice)\n crop_bb = np.array((t, l, b, r))\n\n if allow_outside_center:\n mask = np.ones(bbox.shape[0], dtype=bool)\n else:\n center = (bbox[:, :2] + bbox[:, 2:]) / 2.0\n mask = np.logical_and(crop_bb[:2] <= center, center < crop_bb[2:]) \\\n .all(axis=1)\n\n bbox = bbox.copy()\n bbox[:, :2] = np.maximum(bbox[:, :2], crop_bb[:2])\n bbox[:, 2:] = np.minimum(bbox[:, 2:], crop_bb[2:])\n bbox[:, :2] -= crop_bb[:2]\n bbox[:, 2:] -= crop_bb[:2]\n\n mask = np.logical_and(mask, (bbox[:, :2] < bbox[:, 2:]).all(axis=1))\n bbox = bbox[mask]\n\n if return_param:\n return bbox, {'index': np.flatnonzero(mask)}\n else:\n return bbox\n\n\ndef _slice_to_bounds(slice_):\n if slice_ is None:\n return 0, np.inf\n\n if slice_.start is None:\n l = 0\n else:\n l = slice_.start\n\n if slice_.stop is None:\n u = np.inf\n else:\n u = slice_.stop\n\n return l, u\n\n\ndef translate_bbox(bbox, y_offset=0, x_offset=0):\n \"\"\"Translate bounding boxes.\n\n This method is mainly used together with image transforms, such as padding\n and cropping, which translates the left top point of the image from\n coordinate :math:`(0, 0)` to coordinate\n :math:`(y, x) = (y_{offset}, x_{offset})`.\n\n The bounding boxes are expected to be packed into a two dimensional\n tensor of shape :math:`(R, 4)`, where :math:`R` is the number of\n bounding boxes in the image. The second axis represents attributes of\n the bounding box. They are :math:`(y_{min}, x_{min}, y_{max}, x_{max})`,\n where the four attributes are coordinates of the top left and the\n bottom right vertices.\n\n Args:\n bbox (~numpy.ndarray): Bounding boxes to be transformed. The shape is\n :math:`(R, 4)`. :math:`R` is the number of bounding boxes.\n y_offset (int or float): The offset along y axis.\n x_offset (int or float): The offset along x axis.\n\n Returns:\n ~numpy.ndarray:\n Bounding boxes translated according to the given offsets.\n\n \"\"\"\n\n out_bbox = bbox.copy()\n out_bbox[:, :2] += (y_offset, x_offset)\n out_bbox[:, 2:] += (y_offset, x_offset)\n\n return out_bbox\n\n\ndef random_flip(img, y_random=False, x_random=False,\n return_param=False, copy=False):\n \"\"\"Randomly flip an image in vertical or horizontal direction.\n\n Args:\n img (~numpy.ndarray): An array that gets flipped. This is in\n CHW format.\n y_random (bool): Randomly flip in vertical direction.\n x_random (bool): Randomly flip in horizontal direction.\n return_param (bool): Returns information of flip.\n copy (bool): If False, a view of :obj:`img` will be returned.\n\n Returns:\n ~numpy.ndarray or (~numpy.ndarray, dict):\n\n If :obj:`return_param = False`,\n returns an array :obj:`out_img` that is the result of flipping.\n\n If :obj:`return_param = True`,\n returns a tuple whose elements are :obj:`out_img, param`.\n :obj:`param` is a dictionary of intermediate parameters whose\n contents are listed below with key, value-type and the description\n of the value.\n\n * **y_flip** (*bool*): Whether the image was flipped in the\\\n vertical direction or not.\n * **x_flip** (*bool*): Whether the image was flipped in the\\\n horizontal direction or not.\n\n \"\"\"\n y_flip, x_flip = False, False\n if y_random:\n y_flip = random.choice([True, False])\n if x_random:\n x_flip = random.choice([True, False])\n\n if y_flip:\n img = img[:, ::-1, :]\n if x_flip:\n img = img[:, :, ::-1]\n\n if copy:\n img = img.copy()\n\n if return_param:\n return img, {'y_flip': y_flip, 'x_flip': x_flip}\n else:\n return img\n"
] | [
[
"numpy.maximum",
"numpy.minimum",
"numpy.logical_and",
"numpy.asarray",
"numpy.ones",
"numpy.flatnonzero",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
szymonmaszke/UniFirstKaggle | [
"c1718e5ad9006251e8280c65fd8651b4d4efa31e"
] | [
"src/utilities/analysis.py"
] | [
"import pathlib\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import skew, skewtest\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.feature_selection import f_classif, mutual_info_classif\nfrom statsmodels.stats.outliers_influence import variance_inflation_factor\nfrom statsmodels.tools.tools import add_constant\n\nfrom .decorators import logger, sorter\n\n\n@sorter(by=\"skew\")\n@logger(\"csv\")\ndef skewness(_: pathlib.Path, data: pd.DataFrame):\n statistic, pvalue = skewtest(data)\n return pd.DataFrame({\"skew\": skew(data), \"statistic\": statistic, \"pvalue\": pvalue})\n\n\n@sorter(by=\"VIF\")\n@logger(\"csv\")\ndef vifs(_: pathlib.Path, data: pd.DataFrame) -> pd.DataFrame:\n vif_data = add_constant(data)\n return pd.DataFrame(\n [\n variance_inflation_factor(vif_data.values, i)\n for i in range(vif_data.shape[1])\n ],\n index=[\"constant\"] + list(data),\n columns=[\"VIF\"],\n )\n\n\n@sorter(by=\"mean\", ascending=True)\n@logger(\"csv\")\ndef mean(_: pathlib.Path, data: pd.DataFrame) -> pd.DataFrame:\n return pd.DataFrame(data.mean(axis=0), columns=[\"mean\"])\n\n\n@sorter(by=\"variance\", ascending=True)\n@logger(\"csv\")\ndef variance(_: pathlib.Path, data: pd.DataFrame) -> pd.DataFrame:\n return pd.DataFrame(data.var(axis=0), columns=[\"variance\"])\n\n\n@sorter(by=\"anova\")\n@logger(\"csv\")\ndef feature_importance(_: pathlib.Path, X: pd.DataFrame, y) -> pd.DataFrame:\n anova, anova_p_vals = f_classif(X, y)\n mutual_info = mutual_info_classif(X, y)\n return pd.DataFrame(\n {\n \"anova\": anova,\n \"pvalue\": anova_p_vals,\n \"mutual\": mutual_info,\n \"mix\": mutual_info * anova,\n }\n )\n\n\n@sorter(by=\"importance\")\n@logger(\"csv\")\ndef rf_feature_importance(\n _: pathlib.Path, X: pd.DataFrame, y, random_state\n) -> pd.DataFrame:\n classifier = RandomForestClassifier(\n n_estimators=100, n_jobs=-1, random_state=random_state\n )\n classifier.fit(X, y)\n return pd.DataFrame({\"importance\": classifier.feature_importances_})\n"
] | [
[
"sklearn.ensemble.RandomForestClassifier",
"sklearn.feature_selection.f_classif",
"scipy.stats.skewtest",
"pandas.DataFrame",
"sklearn.feature_selection.mutual_info_classif",
"scipy.stats.skew"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
Junction4Nako/mvp_pytorch | [
"66db043d7beb1481c7a4c62908a4a46c7bfbc57a"
] | [
"oscar/run_retrieval.py"
] | [
"# Copyright (c) 2020 Microsoft Corporation. Licensed under the MIT license. \nfrom __future__ import absolute_import, division, print_function\nimport argparse\nimport os\nimport glob\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = '0'\nimport base64\nimport os.path as op\nimport random, json\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler\nfrom tqdm import tqdm\n\nfrom oscar.utils.tsv_file import TSVFile\nfrom oscar.utils.logger import setup_logger\nfrom oscar.utils.misc import mkdir, set_seed, weighted_sample\nfrom oscar.modeling.modeling_vlbert import BiImageBertForRetrieval\nfrom transformers.pytorch_transformers import BertTokenizer, BertConfig, WEIGHTS_NAME \nfrom transformers.pytorch_transformers import AdamW, WarmupLinearSchedule, WarmupConstantSchedule\n\n\nclass RetrievalDataset(Dataset):\n \"\"\" Image/Text Retrieval Dataset\"\"\"\n def __init__(self, tokenizer, args, split='train', is_train=True, coarse_cap_index=None, coarse_img_index=None):\n \"\"\"\n tokenizer: tokenizer to process caption text.\n args: configureation parameters including max_seq_length, etc.\n split: used to infer the data used for training or testing. \n All files are in .pt format of a dictionary with image keys and \n image features (pytorch tensors), captions (list of str, support multiple\n captions per image), labels (list of dictionary or str of all labels),\n\n \"\"\"\n super(RetrievalDataset, self).__init__()\n self.img_file = args.img_feat_file\n caption_file = op.join(args.data_dir, '{}_captions.pt'.format(split))\n self.img_tsv = TSVFile(self.img_file)\n self.captions = torch.load(caption_file)\n self.img_keys = list(self.captions.keys()) # img_id as int\n if not type(self.captions[self.img_keys[0]]) == list:\n self.captions = {k: json.loads(self.captions[k]) for k in self.img_keys}\n\n self.num_of_total_captions = args.num_captions_per_img_train*len(self.img_keys)\n print('number of total captions:',self.num_of_total_captions)\n print('number of images', len(self.captions))\n\n # get the image image_id to index map\n # imgid2idx_file = op.join(op.dirname(self.img_file), 'imageid2idx.json')\n # self.image_id2idx = json.load(open(imgid2idx_file)) # img_id as string\n \n # get the image features and labels\n if args.dataset_name == 'flickr':\n img_feat_file = op.join(args.data_dir, '{}_img_frcnn_feats.pt'.format(split))\n self.img_feats = torch.load(img_feat_file)\n if args.add_od_labels:\n labels_file = op.join(args.data_dir, '{}_{}_labels.pt'.format(split, args.od_label_type))\n self.labels = torch.load(labels_file)\n else:\n self.img_feats = None\n # get the image image_id to index map\n imgid2idx_file = op.join(op.dirname(self.img_file), 'imageid2idx.json')\n self.image_id2idx = json.load(open(imgid2idx_file)) # img_id as string\n \n if args.add_od_labels:\n label_data_dir = op.dirname(self.img_file)\n label_file = os.path.join(label_data_dir, \"predictions.tsv\")\n self.label_tsv = TSVFile(label_file)\n self.labels = {}\n for line_no in tqdm(range(self.label_tsv.num_rows())):\n row = self.label_tsv.seek(line_no)\n image_id = row[0]\n if int(image_id) in self.img_keys:\n results = json.loads(row[1])\n objects = results['objects'] if type(\n results) == dict else results\n self.labels[int(image_id)] = {\n \"image_h\": results[\"image_h\"] if type(\n results) == dict else 600,\n \"image_w\": results[\"image_w\"] if type(\n results) == dict else 800,\n \"class\": [cur_d['class'] for cur_d in objects],\n \"boxes\": np.array([cur_d['rect'] for cur_d in objects],\n dtype=np.float32)\n }\n self.label_tsv._fp.close()\n self.label_tsv._fp = None\n\n # self.img2theme = json.load(open(args.img2theme, 'r'))\n self.sent_sgs = torch.load(args.sent_sg_json)\n self.id2sg = json.load(open(args.id2node, 'r'))\n self.sg2id = {tuple(v):int(k) for k,v in self.id2sg.items()}\n self.phrase_vocab_size = len(self.sg2id)\n self.ds_name = args.dataset_name\n # self.img2theme = {k:v for k,v in self.img2theme.items() if k.startswith(self.ds_name)}\n\n\n # get extra concepts\n if args.extra_concept:\n add_concept_file = op.join(args.data_dir, '{}_extra_concepts.pt'.format(split))\n self.extra_concep = torch.load(add_concept_file)\n \n\n if args.clip_neg_sampling and is_train:\n neg_scpres_file = op.join(args.data_dir, '{}_clip_ft_scores.pt'.format(split))\n self.neg_scores = torch.load(neg_scpres_file)\n\n if is_train:\n self.num_captions_per_img = args.num_captions_per_img_train\n else:\n self.num_captions_per_img = args.num_captions_per_img_val\n self.num_images_per_cap = args.num_images_per_cap_val\n if args.eval_img_keys_file:\n # select a subset of image keys for evaluation. eg. COCO 1k and 5k\n # eval_img_keys_file is a list of image keys saved in tsv file\n with open(op.join(args.data_dir, args.eval_img_keys_file), 'r') as f:\n img_keys = f.readlines()\n self.img_keys = [int(k.strip()) for k in img_keys]\n self.num_of_total_captions = args.num_captions_per_img_train*len(self.img_keys)\n self.captions = {k: self.captions[k] for k in self.img_keys}\n if args.add_od_labels:\n self.labels = {k: self.labels[k] for k in self.img_keys}\n\n if args.eval_caption_index_file:\n # hard negative image/caption indexs for retrieval re-rank setting.\n # useful for mini val set to monitor the performance during training.\n # However, it cannot be used together with cross image evaluation.\n self.has_caption_indexs = True\n assert not args.cross_image_eval \n caption_index_file = op.join(args.data_dir, args.eval_caption_index_file)\n self.caption_indexs = torch.load(caption_index_file)\n if not type(self.caption_indexs[self.img_keys[0]]) == list:\n self.caption_indexs = {k: json.loads(self.caption_indexs[k]) for k in self.img_keys}\n else:\n self.has_caption_indexs = False\n\n if coarse_cap_index:\n self.has_caption_indexs = True\n self.caption_indexs = coarse_cap_index\n else:\n self.has_caption_indexs = False\n\n if coarse_img_index:\n self.has_image_indexs = True\n self.image_indexs = coarse_img_index\n else:\n self.has_image_indexs = False\n\n self.is_train = is_train\n self.output_mode = args.output_mode\n self.tokenizer = tokenizer\n self.max_seq_len = args.max_seq_length\n self.max_img_seq_len = args.max_img_seq_length\n self.args = args\n\n def set_caption_index(self, caption_index):\n self.num_captions_per_img = self.args.num_captions_per_img_val\n self.has_caption_indexs = True\n self.has_image_indexs = False\n self.caption_indexs = caption_index\n\n def set_image_index(self, image_index):\n self.num_images_per_cap = self.args.num_images_per_cap_val\n self.has_image_indexs = True\n self.has_caption_indexs = False\n self.image_indexs = image_index\n\n def unset_index(self):\n self.num_captions_per_img = self.args.num_captions_per_img_train\n self.num_images_per_cap = 1\n self.has_image_indexs = False\n self.has_caption_indexs = False\n \n def get_image_caption_index(self, index):\n # return img_idx to access features and [img_key, cap_idx] to access caption\n if not self.is_train and self.args.cross_image_eval:\n img_idx = index // (self.num_captions_per_img * len(self.img_keys))\n cap_idx = index % (self.num_captions_per_img * len(self.img_keys))\n img_idx1 = cap_idx // self.num_captions_per_img\n cap_idx1 = cap_idx % self.num_captions_per_img\n return img_idx, [self.img_keys[img_idx1], cap_idx1]\n if not self.is_train and self.has_caption_indexs:\n img_idx = index // self.num_captions_per_img\n cap_idx = index % self.num_captions_per_img\n img_key1, cap_idx1 = self.caption_indexs[self.img_keys[img_idx]][cap_idx]\n return img_idx, [img_key1, cap_idx1]\n if not self.is_train and self.has_image_indexs:\n cap_idx = index // self.num_images_per_cap\n cap_img_idx = cap_idx // self.args.num_captions_per_img_train\n cap_cap_idx = cap_idx % self.args.num_captions_per_img_train\n img_idx = index % self.num_images_per_cap\n img_key1 = self.image_indexs[(self.img_keys[cap_img_idx],cap_cap_idx)][img_idx]\n return img_key1, [self.img_keys[cap_img_idx], cap_cap_idx]\n img_idx = index // self.num_captions_per_img\n cap_idx = index % self.num_captions_per_img\n return img_idx, [self.img_keys[img_idx], cap_idx]\n\n def get_label(self, index):\n img_idx, cap_idx = self.get_image_caption_index(index)\n return 1 if self.img_keys[img_idx] == cap_idx[0] else 0\n\n def get_od_labels(self, img_key, cap_index=None):\n if self.args.add_od_labels:\n if self.ds_name != 'flickr':\n if type(self.labels[img_key]) == str:\n od_labels = self.labels[img_key]\n else:\n od_labels = ' '.join(self.labels[img_key]['class'])\n return od_labels\n else:\n if type(self.labels[img_key]) == str:\n od_labels = self.labels[img_key]\n else:\n od_labels = ' '.join(list(set(self.labels[img_key]['class'])))\n # od_labels = ' '.join(self.labels[img_key]['class'])\n \n if cap_index is not None:\n extra_concepts = self.extra_concep[str(img_key)][cap_index]\n if self.args.num_extra_concept < len(extra_concepts):\n extra_concepts = random.sample(extra_concepts, self.args.num_extra_concept)\n od_labels += ' '.join(od_labels)\n return od_labels\n\n def tensorize_example(self, text_a, img_feat, text_b=None, phrase_nodes=None,\n cls_token_segment_id=0, pad_token_segment_id=0,\n sequence_a_segment_id=0, sequence_b_segment_id=1):\n tokens_a = self.tokenizer.tokenize(text_a)\n num_extra_tokens = 2\n num_phrases = self.args.max_phrases\n if len(tokens_a) > self.args.max_seq_length - num_extra_tokens: # edited here to make it for sequence length == 68\n tokens_a = tokens_a[:(self.args.max_seq_length - num_extra_tokens)]\n\n if len(phrase_nodes) >= num_phrases+self.args.max_seq_length-2-len(tokens_a):\n phrase_nodes = phrase_nodes[:(num_phrases+self.args.max_seq_length-2-len(tokens_a))]\n\n seq_tokens_a = [self.tokenizer.cls_token] + tokens_a # + [self.tokenizer.sep_token]\n phrase_index = [len(seq_tokens_a), len(seq_tokens_a)+len(phrase_nodes)]\n input_ids_a = self.tokenizer.convert_tokens_to_ids(seq_tokens_a) + phrase_nodes + [self.tokenizer.vocab[self.tokenizer.sep_token]]\n segment_ids_a = [cls_token_segment_id] + [sequence_a_segment_id] * (len(tokens_a) + len(phrase_nodes) + 1)\n seq_a_len = len(input_ids_a)\n input_mask_a = [1] * len(input_ids_a)\n\n tokens_b = self.tokenizer.tokenize(text_b)\n if len(tokens_b) > self.args.max_tag_length - 2:\n # num_left_tokens = max(0, self.max_seq_len - len(tokens_b) - 2) # to avoid -1\n # assert(num_left_tokens >= 0)\n tokens_b = tokens_b[: (self.args.max_tag_length - 2)]\n seq_tokens_b = ['[CLS]'] + tokens_b + [self.tokenizer.sep_token]\n input_ids_b = self.tokenizer.convert_tokens_to_ids(seq_tokens_b)\n segment_ids_b = [sequence_b_segment_id] * len(seq_tokens_b)\n input_mask_b = [1] * len(input_ids_b)\n seq_b_len = len(input_ids_b)\n\n seq_len_a = len(input_ids_a)\n tmp_max_seq_len = self.max_seq_len + self.args.max_phrases\n seq_padding_len_a = tmp_max_seq_len - seq_len_a\n input_ids_a += seq_padding_len_a * [0,]\n input_mask_a += seq_padding_len_a * [0,]\n segment_ids_a += seq_padding_len_a * [pad_token_segment_id,]\n\n seq_padding_len_b = self.args.max_tag_length - seq_b_len\n input_ids_b += seq_padding_len_b * [0, ]\n input_mask_b += seq_padding_len_b * [0, ]\n segment_ids_b += seq_padding_len_b * [pad_token_segment_id, ]\n\n # image features\n img_len = img_feat.shape[0]\n if img_len > self.max_img_seq_len:\n img_feat = img_feat[0 : self.max_img_seq_len, :]\n img_len = img_feat.shape[0]\n img_padding_len = 0\n input_mask_b += [1]*self.max_img_seq_len\n else:\n img_padding_len = self.max_img_seq_len - img_len\n padding_matrix = torch.zeros((img_padding_len, img_feat.shape[1]))\n img_feat = torch.cat((img_feat, padding_matrix), 0)\n input_mask_b += [1]*img_len + [0]*img_padding_len\n image_start_index = len(input_ids_a) # input_ids_a here for the concated sequence\n image_end_index = image_start_index + img_len\n img_index = [image_start_index, image_end_index]\n\n input_ids_a = torch.tensor(input_ids_a, dtype=torch.long)\n input_mask_a = torch.tensor(input_mask_a, dtype=torch.long)\n segment_ids_a = torch.tensor(segment_ids_a, dtype=torch.long)\n input_ids_b = torch.tensor(input_ids_b, dtype=torch.long)\n input_mask_b = torch.tensor(input_mask_b, dtype=torch.long)\n segment_ids_b = torch.tensor(segment_ids_b, dtype=torch.long)\n phrase_index = torch.tensor(phrase_index, dtype=torch.long)\n image_index = torch.tensor(img_index, dtype=torch.long)\n if self.is_train:\n return (input_ids_a, input_mask_a, segment_ids_a, input_ids_b,\n input_mask_b, segment_ids_b, img_feat)\n else:\n return (input_ids_a, input_mask_a, segment_ids_a, input_ids_b,\n input_mask_b, segment_ids_b, img_feat)\n\n\n def get_neg_txt(self, img_idx):\n img_scores = self.neg_scores['img2htxt_logit'][img_idx, :]\n sample_idx = weighted_sample(img_scores)\n neg_txt = self.neg_scores['img2htxt_index'][img_idx, sample_idx]\n img_idx_neg = neg_txt // self.num_captions_per_img\n cap_idx_neg = neg_txt % self.num_captions_per_img\n caption_neg = self.captions[self.img_keys[img_idx_neg]][cap_idx_neg]\n phrases_neg = self.get_caption_phrase(self.img_keys[img_idx_neg], cap_idx_neg)\n return caption_neg, phrases_neg\n\n\n def get_neg_img(self, img_idx, cap_idx):\n cap_scores = self.neg_scores['txt2himg_logit'][img_idx*5+cap_idx, :]\n sample_idx = weighted_sample(cap_scores)\n neg_img = self.neg_scores['txt2himg_index'][img_idx*5+cap_idx, sample_idx]\n feature_neg, v_c_neg = self.get_image(self.img_keys[neg_img])\n od_labels_neg = self.get_od_labels(self.img_keys[neg_img])\n return feature_neg, od_labels_neg, v_c_neg\n\n\n def __getitem__(self, index):\n img_idx, cap_idxs = self.get_image_caption_index(index)\n img_key = self.img_keys[img_idx]\n feature = self.get_image(img_key)\n caption = self.captions[cap_idxs[0]][cap_idxs[1]]\n phrases_node = self.get_caption_phrase(cap_idxs[0], cap_idxs[1])\n # print(phrases_node)\n od_labels = self.get_od_labels(img_key)\n example = self.tensorize_example(caption, feature, text_b=od_labels, phrase_nodes=phrases_node)\n label = 1 if img_key == cap_idxs[0] else 0\n # print([i.shape for i in example])\n return index, tuple(list(example)+[label])\n if self.is_train:\n img_idx, cap_idxs = self.get_image_caption_index(index)\n img_key = self.img_keys[img_idx]\n feature, v_c = self.get_image(img_key)\n caption = self.captions[cap_idxs[0]][cap_idxs[1]]\n phrases = self.get_caption_phrase(cap_idxs[0], cap_idxs[1])\n if self.args.extra_concept:\n od_labels = self.get_od_labels(img_key, cap_idxs[1])\n else:\n od_labels = self.get_od_labels(img_key)\n example = self.tensorize_example(caption, feature, text_b=od_labels, visual_theme=v_c, phrase_nodes=phrases)\n\n # select a negative pair\n if self.args.clip_neg_sampling and random.random() <= self.args.clip_neg_prob:\n if random.random() <= 0.5:\n caption_neg, phrases_neg = self.get_neg_txt(img_idx)\n example_neg = self.tensorize_example(caption_neg, feature, text_b=od_labels, visual_theme=v_c, phrase_nodes=phrases_neg)\n else:\n feature_neg, od_labels_neg, v_c_neg = self.get_neg_img(img_idx, cap_idxs[1])\n example_neg = self.tensorize_example(caption, feature_neg, text_b=od_labels_neg, visual_theme=v_c_neg, phrase_nodes=phrases)\n else:\n neg_img_indexs = list(range(0, img_idx)) + list(range(img_idx + 1, len(self.img_keys)))\n img_idx_neg = random.choice(neg_img_indexs)\n if random.random() <= 0.5:\n # randomly select a negative caption from a different image.\n cap_idx_neg = random.randint(0, self.num_captions_per_img - 1)\n caption_neg = self.captions[self.img_keys[img_idx_neg]][cap_idx_neg]\n phrases_neg = self.get_caption_phrase(self.img_keys[img_idx_neg], cap_idx_neg)\n example_neg = self.tensorize_example(caption_neg, feature, text_b=od_labels, visual_theme=v_c, phrase_nodes=phrases_neg)\n else:\n # randomly select a negative image \n feature_neg, v_c_neg = self.get_image(self.img_keys[img_idx_neg])\n od_labels_neg = self.get_od_labels(self.img_keys[img_idx_neg])\n example_neg = self.tensorize_example(caption, feature_neg, text_b=od_labels_neg, visual_theme=v_c_neg, phrase_nodes=phrases)\n\n example_pair = tuple(list(example) + [1] + list(example_neg) + [0])\n return index, example_pair\n else:\n img_idx, cap_idxs = self.get_image_caption_index(index)\n img_key = self.img_keys[img_idx]\n feature, v_c = self.get_image(img_key)\n caption = self.captions[cap_idxs[0]][cap_idxs[1]]\n phrases_node = self.get_caption_phrase(cap_idxs[0], cap_idxs[1])\n od_labels = self.get_od_labels(img_key)\n example = self.tensorize_example(caption, feature, text_b=od_labels, visual_theme=v_c, phrase_nodes=phrases_node)\n label = 1 if img_key == cap_idxs[0] else 0\n return index, tuple(list(example) + [label])\n\n def get_image(self, image_id):\n if self.ds_name != 'flickr':\n image_idx = self.image_id2idx[str(image_id)]\n row = self.img_tsv.seek(image_idx)\n num_boxes = int(row[1])\n features = np.frombuffer(base64.b64decode(row[-1]),\n dtype=np.float32).reshape((num_boxes, -1))\n if not features.flags['WRITEABLE']:\n features = np.copy(features)\n t_features = torch.from_numpy(features)\n else:\n t_features = self.img_feats[image_id]\n return t_features\n # theme_nodes = self.img2theme[self.ds_name+'_'+str(image_id)]\n # if len(theme_nodes) > self.args.max_visual_themes:\n # theme_nodes = theme_nodes[:self.args.max_visual_themes]\n # theme_nodes = [t[0]+self.tokenizer.vocab_size+self.phrase_vocab_size for t in theme_nodes]\n # return t_features, theme_nodes\n\n def get_caption_phrase(self, image_id, cap_id):\n if self.ds_name == 'flickr':\n phrase_nodes = [tuple(t) for t in self.sent_sgs[image_id][cap_id]]\n else:\n phrase_nodes = [tuple(t.split('_')) for t in self.sent_sgs[image_id][cap_id]]\n phrase_nodes = [self.sg2id[t] for t in phrase_nodes if t in self.sg2id]\n # if len(phrase_nodes) > self.args.max_phrases:\n # phrase_nodes = phrase_nodes[:self.args.max_phrases]\n return phrase_nodes\n\n def __len__(self):\n if not self.is_train and self.args.cross_image_eval:\n return len(self.img_keys) ** 2 * self.num_captions_per_img\n if not self.is_train and self.has_image_indexs:\n return self.num_images_per_cap * self.num_of_total_captions\n return len(self.img_keys) * self.num_captions_per_img\n\n\ndef compute_score_with_logits(logits, labels):\n if logits.shape[1] > 1:\n logits = torch.max(logits, 1)[1].data # argmax\n scores = logits == labels \n else:\n scores = torch.zeros_like(labels).cuda()\n for i, (logit, label) in enumerate(zip(logits, labels)):\n logit_ = torch.sigmoid(logit)\n if (logit_ >= 0.5 and label == 1) or (logit_ < 0.5 and label == 0):\n scores[i] = 1\n return scores\n\n\ndef compute_ranks(dataset, results):\n labels = np.array([dataset.get_label(i) for i in range(len(dataset))])\n similarities = np.array([results[i] for i in range(len(dataset))])\n if dataset.has_caption_indexs:\n num_captions_per_img = dataset.num_captions_per_img\n else:\n num_captions_per_img = len(dataset.img_keys) * dataset.num_captions_per_img\n labels = np.reshape(labels, [-1, num_captions_per_img])\n similarities = np.reshape(similarities, [-1, num_captions_per_img])\n\n i2t_ranks, t2i_ranks = [], []\n for lab, sim in zip(labels, similarities):\n inds = np.argsort(sim)[::-1]\n rank = num_captions_per_img\n for r, ind in enumerate(inds):\n if lab[ind] == 1:\n rank = r\n break\n i2t_ranks.append(rank)\n if not dataset.has_caption_indexs:\n labels = np.swapaxes(labels, 0, 1)\n similarities = np.swapaxes(similarities, 0, 1)\n for lab, sim in zip(labels, similarities):\n inds = np.argsort(sim)[::-1]\n rank = num_captions_per_img\n for r, ind in enumerate(inds):\n if lab[ind] == 1:\n rank = r\n break\n t2i_ranks.append(rank)\n return i2t_ranks, t2i_ranks\n\n\ndef compute_ranks_t2i(dataset, results):\n labels = np.array([dataset.get_label(i) for i in range(len(dataset))])\n similarities = np.array([results[i] for i in range(len(dataset))])\n assert dataset.has_image_indexs\n num_images_per_cap = dataset.num_images_per_cap\n labels = np.reshape(labels, [-1, num_images_per_cap])\n similarities = np.reshape(similarities, [-1, num_images_per_cap])\n t2i_ranks = []\n for lab, sim in zip(labels, similarities):\n inds = np.argsort(sim)[::-1]\n rank = num_images_per_cap\n for r, ind in enumerate(inds):\n if lab[ind] == 1:\n rank = r\n break\n t2i_ranks.append(rank)\n return t2i_ranks\n\n\ndef compute_ranks_coarse(dataset, similarities):\n i2t_ranks, t2i_ranks = [], []\n i2t_index = {}\n t2i_index = {}\n # i2t\n for i in range(similarities.shape[0]):\n tmp_index = []\n inds = np.argsort(similarities[i,:])[::-1]\n rank = similarities.shape[1]\n for r, ind in enumerate(inds):\n if ind >= i*dataset.args.num_captions_per_img_train and ind < (i+1)*dataset.args.num_captions_per_img_train:\n rank = r\n break\n i2t_ranks.append(rank)\n for r, ind in enumerate(inds):\n if r >= dataset.args.num_captions_per_img_val:\n break\n cap_img_index = ind // dataset.args.num_captions_per_img_train\n cap_cap_index = ind % dataset.args.num_captions_per_img_train\n tmp_index.append((dataset.img_keys[cap_img_index], cap_cap_index))\n\n i2t_index[dataset.img_keys[i]] = tmp_index\n\n # t2i\n for i in range(similarities.shape[1]):\n tmp_index = []\n inds = np.argsort(similarities[:,i])[::-1]\n rank = similarities.shape[0]\n cap_img_index = i // dataset.args.num_captions_per_img_train\n cap_cap_index = i % dataset.args.num_captions_per_img_train\n for r, ind in enumerate(inds):\n if ind == i//dataset.args.num_captions_per_img_train:\n rank = r\n break\n t2i_ranks.append(rank)\n for r, ind in enumerate(inds):\n if r >= dataset.args.num_images_per_cap_val:\n break\n tmp_index.append(ind)\n\n t2i_index[(dataset.img_keys[cap_img_index], cap_cap_index)] = tmp_index\n return i2t_ranks, t2i_ranks, i2t_index, t2i_index\n\n\ndef save_checkpoint(model, tokenizer, args, epoch, global_step):\n checkpoint_dir = op.join(args.output_dir, 'checkpoint-{}-{}'.format(\n epoch, global_step))\n mkdir(checkpoint_dir)\n model_to_save = model.module if hasattr(model, 'module') else model\n save_num = 0\n while (save_num < 10):\n try:\n model_to_save.save_pretrained(checkpoint_dir)\n torch.save(args, op.join(checkpoint_dir, 'training_args.bin'))\n tokenizer.save_pretrained(checkpoint_dir)\n logger.info(\"Save checkpoint to {}\".format(checkpoint_dir))\n break\n except:\n save_num += 1\n if save_num == 10:\n logger.info(\"Failed to save checkpoint after 10 trails.\")\n return\n\n\ndef train(args, train_dataset, val_dataset, model, tokenizer):\n args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n train_sampler = RandomSampler(train_dataset) \n train_dataloader = DataLoader(train_dataset, sampler=train_sampler, \n batch_size=args.train_batch_size, num_workers=args.num_workers)\n\n if args.max_steps > 0:\n t_total = args.max_steps\n args.num_train_epochs = args.max_steps // (len(train_dataloader) // \\\n args.gradient_accumulation_steps) + 1\n else:\n t_total = len(train_dataloader) // args.gradient_accumulation_steps \\\n * args.num_train_epochs\n\n # Prepare optimizer and scheduler\n no_decay = ['bias', 'LayerNorm.weight']\n grouped_parameters = [\n {'params': [p for n, p in model.named_parameters() if not \\\n any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},\n {'params': [p for n, p in model.named_parameters() if \\\n any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n optimizer = AdamW(grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n if args.scheduler == \"constant\":\n scheduler = WarmupConstantSchedule(\n optimizer, warmup_steps=args.warmup_steps)\n elif args.scheduler == \"linear\":\n scheduler = WarmupLinearSchedule(\n optimizer, warmup_steps=args.warmup_steps, t_total=t_total)\n else:\n raise ValueError(\"Unknown scheduler type: {}\".format(args.scheduler))\n\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataset))\n logger.info(\" Num Epochs = %d\", args.num_train_epochs)\n logger.info(\" Batch size per GPU = %d\", args.per_gpu_train_batch_size)\n logger.info(\" Total train batch size (w. parallel, & accumulation) = %d\",\n args.train_batch_size * args.gradient_accumulation_steps)\n logger.info(\" Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", t_total)\n\n global_step, global_loss, global_acc =0, 0.0, 0.0\n global_r_loss, global_f_loss = 0.0, 0.0\n global_w_loss = 0.0\n model.zero_grad()\n log_json = []\n best_score = 0\n for epoch in range(int(args.num_train_epochs)):\n for step, (_, batch) in enumerate(train_dataloader):\n model.train()\n if hasattr(model, 'module'):\n model.module.forward_mod = 'train'\n else:\n model.forward_mod = 'train'\n batch = tuple(t.to(args.device) for t in batch)\n inputs = {\n 'input_ids_a': batch[0],\n 'attention_mask_a': batch[1],\n 'token_type_ids_a': batch[2],\n 'input_ids_b': batch[3],\n 'attention_mask_b': batch[4],\n 'token_type_ids_b': batch[5],\n 'img_feats': batch[6],\n 'max_tag_length': args.max_tag_length\n }\n if args.use_phrase:\n inputs.update({\n 'phrase_index': batch[7],\n 'img_index': batch[8],\n 'phrase_layer': args.phrase_layer\n })\n bs = batch[0].shape[0]\n outputs = model(**inputs)\n if args.use_phrase:\n loss, logits, r_loss, f_loss, pseudo_labels, wra_loss = outputs\n if args.n_gpu > 1:\n wra_loss = wra_loss.mean()\n wra_loss = wra_loss.item()\n else:\n loss, logits, r_loss, f_loss, pseudo_labels = outputs\n wra_loss = 0\n if args.n_gpu > 1: \n loss = loss.mean() # mean() to average on multi-gpu parallel training\n r_loss = r_loss.mean()\n f_loss = f_loss.mean()\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n # pseudo_labels = torch.cat([torch.ones(sim_mat.shape[0]), torch.zeros(bs)], dim=0).to(dtype=torch.long, device=logits.device)\n batch_score = compute_score_with_logits(logits, pseudo_labels).sum()\n batch_acc = batch_score.item() / (args.train_batch_size * 2)\n global_loss += loss.item()\n global_r_loss += r_loss.item()\n global_f_loss += f_loss.item()\n global_w_loss += wra_loss\n global_acc += batch_acc\n if (step + 1) % args.gradient_accumulation_steps == 0:\n global_step += 1\n scheduler.step()\n optimizer.step()\n model.zero_grad()\n if global_step % args.logging_steps == 0:\n logger.info(\"Epoch: {}, global_step: {}, lr: {:.6f}, loss: {:.4f} ({:.4f}), \" \\\n \"CLIP_loss: {:.4f} ({:.4f}), HN_loss: {:.4f} ({:.4f}), wra_loss: {:.4f} ({:.4f}), score: {:.4f} ({:.4f})\".format(epoch, global_step, \n optimizer.param_groups[0][\"lr\"], loss, global_loss / global_step, \n r_loss.item(), global_r_loss/global_step, f_loss.item(), global_f_loss/global_step, wra_loss, global_w_loss/global_step ,batch_acc, global_acc / global_step)\n )\n\n if (args.save_steps > 0 and global_step % args.save_steps == 0) or \\\n global_step == t_total:\n save_checkpoint(model, tokenizer, args, epoch, global_step) \n # evaluation\n if args.evaluate_during_training: \n logger.info(\"Perform evaluation at step: %d\" % (global_step))\n # only VSE retrieval\n coarse_sim = test_coarse(args, model, val_dataset)\n eval_result, caption_index, image_index = evaluate_coarse(val_dataset, coarse_sim)\n # caption index and image index\n eval_i2t_result, _ = test_fine_i2t(args, model, val_dataset, caption_index=caption_index)\n eval_t2i_result = test_fine_t2i(args, model, val_dataset, image_index=image_index)\n print('fine inference:')\n # print(eval_i2t_result, eval_t2i_result)\n eval_result = evaluate_fine(eval_i2t_result, eval_t2i_result)\n\n rank_accs = eval_result['i2t_retrieval']\n if rank_accs['R@1'] > best_score:\n best_score = rank_accs['R@1']\n epoch_log = {'epoch': epoch, 'global_step': global_step, \n 'R1': rank_accs['R@1'], 'R5': rank_accs['R@5'], \n 'R10': rank_accs['R@10'], 'best_R1':best_score}\n log_json.append(epoch_log)\n with open(args.output_dir + '/eval_logs.json', 'w') as fp:\n json.dump(log_json, fp) \n return global_step, global_loss / global_step\n\ndef prepare_inputs(inputs, args):\n for k, v in inputs.items():\n if isinstance(v, torch.Tensor):\n if inputs[k].dtype != torch.int64:\n # NLP models inputs are int64 and those get adjusted to the right dtype of the\n # embedding. Other models such as wav2vec2's inputs are already float and thus\n # may need special handling to match the dtypes of the model\n inputs[k]=v.to(dtype=args.dtype)\n return inputs\n\ndef test_coarse(args, model, eval_dataset):\n # 2 stage evaluation\n if hasattr(model, 'module'):\n model.module.forward_mod = 'coarse'\n else:\n model.forward_mod = 'coarse'\n args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)\n eval_dataset.unset_index()\n eval_sampler = SequentialSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler,\n batch_size=args.eval_batch_size, num_workers=args.num_workers)\n \n logger.info(\"Num examples = {}\".format(len(eval_dataset)))\n logger.info(\"Evaluation batch size = {}\".format(args.eval_batch_size))\n model.eval()\n results = {}\n softmax = nn.Softmax(dim=1)\n full_txt_emb = []\n full_img_emb = []\n for indexs, batch in tqdm(eval_dataloader):\n batch = tuple(t.to(args.device) for t in batch)\n with torch.no_grad():\n inputs = {\n 'input_ids_a': batch[0],\n 'attention_mask_a': batch[1],\n 'token_type_ids_a': batch[2],\n 'input_ids_b': batch[3],\n 'attention_mask_b': batch[4],\n 'token_type_ids_b': batch[5],\n 'img_feats': batch[6],\n 'max_tag_length': args.max_tag_length\n }\n inputs = prepare_inputs(inputs, args)\n global_txt, global_img = model(**inputs)[:2]\n full_txt_emb.append(global_txt)\n full_img_emb.append(global_img)\n with torch.no_grad():\n full_txt_emb = torch.cat(full_txt_emb, dim=0)\n full_img_emb = torch.cat(full_img_emb, dim=0)\n torch.save(full_txt_emb, '/opt/tiger/tmp_dir/txt_emb.pt')\n torch.save(full_img_emb, '/opt/tiger/tmp_dir/img_emb.pt')\n num_imgs = int(full_img_emb.shape[0] / args.num_captions_per_img_train)\n assert(full_img_emb.shape[0] % args.num_captions_per_img_train == 0)\n select_index = [i*args.num_captions_per_img_train for i in range(num_imgs)]\n full_img_emb = full_img_emb[select_index]\n full_sims = full_img_emb @ full_txt_emb.t()\n print(full_sims.shape)\n return full_sims.detach().cpu().numpy()\n\ndef test_fine_t2i(args, model, eval_dataset, image_index):\n # 2 stage evaluation\n if hasattr(model, 'module'):\n model.module.forward_mod = 'fine'\n else:\n model.forward_mod = 'fine'\n args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)\n eval_dataset.set_image_index(image_index)\n eval_sampler = SequentialSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler,\n batch_size=args.eval_batch_size, num_workers=args.num_workers)\n \n logger.info(\"Num examples = {}\".format(len(eval_dataset)))\n logger.info(\"Evaluation batch size = {}\".format(args.eval_batch_size))\n model.eval()\n results = {}\n softmax = nn.Softmax(dim=1)\n for indexs, batch in tqdm(eval_dataloader):\n batch = tuple(t.to(args.device) for t in batch)\n with torch.no_grad():\n inputs = {\n 'input_ids_a': batch[0],\n 'attention_mask_a': batch[1],\n 'token_type_ids_a': batch[2],\n 'input_ids_b': batch[3],\n 'attention_mask_b': batch[4],\n 'token_type_ids_b': batch[5],\n 'img_feats': batch[6],\n 'max_tag_length': args.max_tag_length\n }\n inputs = prepare_inputs(inputs, args)\n logits = model(**inputs)\n if args.num_labels == 2:\n probs = softmax(logits)\n result = probs[:, 1] # the confidence to be a matched pair\n else:\n result = logits\n result = [_.to(torch.device(\"cpu\")) for _ in result]\n results.update({idx.item(): res.item() for idx, res in zip(indexs, result)})\n return compute_ranks_t2i(eval_dataset, results)\n\n\ndef test_fine_i2t(args, model, eval_dataset, caption_index):\n # 2 stage evaluation\n if hasattr(model, 'module'):\n model.module.forward_mod = 'fine'\n else:\n model.forward_mod = 'fine'\n args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)\n eval_dataset.set_caption_index(caption_index)\n eval_sampler = SequentialSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler,\n batch_size=args.eval_batch_size, num_workers=args.num_workers)\n \n logger.info(\"Num examples = {}\".format(len(eval_dataset)))\n logger.info(\"Evaluation batch size = {}\".format(args.eval_batch_size))\n model.eval()\n results = {}\n softmax = nn.Softmax(dim=1)\n for indexs, batch in tqdm(eval_dataloader):\n batch = tuple(t.to(args.device) for t in batch)\n with torch.no_grad():\n inputs = {\n 'input_ids_a': batch[0],\n 'attention_mask_a': batch[1],\n 'token_type_ids_a': batch[2],\n 'input_ids_b': batch[3],\n 'attention_mask_b': batch[4],\n 'token_type_ids_b': batch[5],\n 'img_feats': batch[6],\n 'max_tag_length': args.max_tag_length\n }\n inputs = prepare_inputs(inputs, args)\n logits = model(**inputs)\n # print(logits.shape)\n if args.num_labels == 2:\n probs = softmax(logits)\n result = probs[:, 1] # the confidence to be a matched pair\n else:\n result = logits\n result = [_.to(torch.device(\"cpu\")) for _ in result]\n # print(indexs)\n results.update({idx.item(): res.item() for idx, res in zip(indexs, result)})\n return compute_ranks(eval_dataset,results)\n\n\n\ndef evaluate(eval_dataset, test_results):\n i2t_ranks, t2i_ranks = compute_ranks(eval_dataset, test_results)\n rank = [1, 5, 10]\n i2t_accs = [sum([_ < r for _ in i2t_ranks]) / len(i2t_ranks) for r in rank]\n logger.info(\"I2T Retrieval: {:.4f} @ R1, {:.4f} @ R5, {:.4f} @ R10\".format(\n i2t_accs[0], i2t_accs[1], i2t_accs[2]))\n eval_result = {\"i2t_retrieval\": {\"R@1\": i2t_accs[0], \"R@5\": i2t_accs[1], \"R@10\": i2t_accs[2]}}\n if t2i_ranks:\n t2i_accs = [sum([_ < r for _ in t2i_ranks]) / len(t2i_ranks) for r in rank]\n logger.info(\"T2I Retrieval: {:.4f} @ R1, {:.4f} @ R5, {:.4f} @ R10\".format(\n t2i_accs[0], t2i_accs[1], t2i_accs[2]))\n eval_result[\"t2i_retrieval\"] = {\"R@1\": t2i_accs[0], \"R@5\": t2i_accs[1], \"R@10\": t2i_accs[2]}\n return eval_result\n\ndef evaluate_fine(i2t_ranks, t2i_ranks):\n # i2t_ranks, t2i_ranks = compute_ranks(eval_dataset, test_results)\n rank = [1, 5, 10]\n i2t_accs = [sum([_ < r for _ in i2t_ranks]) / len(i2t_ranks) for r in rank]\n logger.info(\"I2T Retrieval: {:.4f} @ R1, {:.4f} @ R5, {:.4f} @ R10\".format(\n i2t_accs[0], i2t_accs[1], i2t_accs[2]))\n eval_result = {\"i2t_retrieval\": {\"R@1\": i2t_accs[0], \"R@5\": i2t_accs[1], \"R@10\": i2t_accs[2]}}\n if t2i_ranks:\n t2i_accs = [sum([_ < r for _ in t2i_ranks]) / len(t2i_ranks) for r in rank]\n logger.info(\"T2I Retrieval: {:.4f} @ R1, {:.4f} @ R5, {:.4f} @ R10\".format(\n t2i_accs[0], t2i_accs[1], t2i_accs[2]))\n eval_result[\"t2i_retrieval\"] = {\"R@1\": t2i_accs[0], \"R@5\": t2i_accs[1], \"R@10\": t2i_accs[2]}\n return eval_result\n\n\ndef evaluate_coarse(eval_dataset, test_results):\n i2t_ranks, t2i_ranks, caption_index, image_index = compute_ranks_coarse(eval_dataset, test_results)\n rank = [1, 5, 10]\n i2t_accs = [sum([_ < r for _ in i2t_ranks]) / len(i2t_ranks) for r in rank]\n logger.info(\"I2T Retrieval: {:.4f} @ R1, {:.4f} @ R5, {:.4f} @ R10\".format(\n i2t_accs[0], i2t_accs[1], i2t_accs[2]))\n eval_result = {\"i2t_retrieval\": {\"R@1\": i2t_accs[0], \"R@5\": i2t_accs[1], \"R@10\": i2t_accs[2]}}\n if t2i_ranks:\n t2i_accs = [sum([_ < r for _ in t2i_ranks]) / len(t2i_ranks) for r in rank]\n logger.info(\"T2I Retrieval: {:.4f} @ R1, {:.4f} @ R5, {:.4f} @ R10\".format(\n t2i_accs[0], t2i_accs[1], t2i_accs[2]))\n eval_result[\"t2i_retrieval\"] = {\"R@1\": t2i_accs[0], \"R@5\": t2i_accs[1], \"R@10\": t2i_accs[2]}\n return eval_result, caption_index, image_index\n\n\ndef get_predict_file(args):\n cc = []\n data = op.basename(op.join(args.data_dir, '')[:-1])\n if data != 'coco_ir':\n cc.append(data)\n cc.append(args.test_split)\n if args.add_od_labels:\n cc.append('wlabels{}'.format(args.od_label_type))\n return op.join(args.eval_model_dir, '{}.results.pt'.format('.'.join(cc))) \n\n\ndef restore_training_settings(args):\n assert not args.do_train and (args.do_test or args.do_eval)\n train_args = torch.load(op.join(args.eval_model_dir, 'training_args.bin'))\n override_params = ['do_lower_case', 'img_feature_type', 'max_seq_length', \n 'max_img_seq_length', 'add_od_labels', 'od_label_type',\n 'use_img_layernorm', 'img_layer_norm_eps']\n for param in override_params:\n if hasattr(train_args, param):\n train_v = getattr(train_args, param)\n test_v = getattr(args, param)\n if train_v != test_v:\n logger.warning('Override {} with train args: {} -> {}'.format(param,\n test_v, train_v))\n setattr(args, param, train_v)\n return args\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--data_dir\", default='datasets/coco_ir', type=str, required=False,\n help=\"The input data dir with all required files.\")\n parser.add_argument(\"--img_feat_file\", default='datasets/coco_ir/features.tsv', type=str, required=False,\n help=\"The absolute address of the image feature file.\")\n parser.add_argument(\"--model_name_or_path\", default=None, type=str, required=False,\n help=\"Path to pre-trained model or model type. required for training.\")\n parser.add_argument(\"--output_dir\", default='output/', type=str, required=False,\n help=\"The output directory to save checkpoint and test results.\")\n parser.add_argument(\"--loss_type\", default='sfmx', type=str, \n help=\"Loss function types: support kl, sfmx\")\n parser.add_argument(\"--config_name\", default=\"\", type=str, \n help=\"Pretrained config name or path if not the same as model_name.\")\n parser.add_argument(\"--tokenizer_name\", default=\"\", type=str, \n help=\"Pretrained tokenizer name or path if not the same as model_name.\")\n parser.add_argument(\"--max_seq_length\", default=70, type=int,\n help=\"The maximum total input sequence length after tokenization. \"\n \"Sequences longer than this will be truncated, \"\n \"sequences shorter will be padded.\"\n \"This number is calculated on COCO dataset\" \n \"If add object detection labels, the suggested length should be 70.\")\n parser.add_argument(\"--do_train\", action='store_true', help=\"Whether to run training.\")\n parser.add_argument(\"--do_test\", action='store_true', help=\"Whether to run inference.\")\n parser.add_argument(\"--do_eval\", action='store_true', help=\"Whether to run performance valuation.\"\n \"do not activate if we want to inference on dataset without gt labels.\")\n parser.add_argument(\"--test_split\", default='test', type=str, help='data split name.')\n parser.add_argument(\"--eval_img_keys_file\", default='', type=str, \n help=\"image key tsv to select a subset of images for evaluation. \"\n \"This is useful in 5-folds evaluation. The topn index file is not \" \n \"needed in this case.\")\n parser.add_argument(\"--eval_caption_index_file\", default='', type=str, \n help=\"index of a list of (img_key, cap_idx) for each image.\"\n \"this is used to perform re-rank using hard negative samples.\"\n \"useful for validation set to monitor the performance during training.\")\n parser.add_argument(\"--cross_image_eval\", action='store_true', \n help=\"perform cross image inference, ie. each image with all texts from other images.\")\n parser.add_argument(\"--add_od_labels\", default=False, action='store_true', \n help=\"Whether to add object detection labels or not.\")\n parser.add_argument(\"--od_label_type\", default='vg', type=str, \n help=\"label type, support vg, gt, oid\")\n parser.add_argument(\"--att_mask_type\", default='CLR', type=str, \n help=\"attention mask type, support ['CL', 'CR', 'LR', 'CLR']\"\n \"C: caption, L: labels, R: image regions; CLR is full attention by default.\"\n \"CL means attention between caption and labels.\"\n \"please pay attention to the order CLR, which is the default concat order.\")\n parser.add_argument(\"--do_lower_case\", action='store_true', \n help=\"Set this flag if you are using an uncased model.\")\n parser.add_argument(\"--drop_out\", default=0.1, type=float, help=\"Drop out in BERT.\")\n parser.add_argument(\"--max_img_seq_length\", default=50, type=int, \n help=\"The maximum total input image sequence length.\")\n parser.add_argument(\"--img_feature_dim\", default=2054, type=int, \n help=\"The Image Feature Dimension.\")\n parser.add_argument(\"--img_feature_type\", default='frcnn', type=str,\n help=\"Image feature type.\")\n parser.add_argument(\"--use_img_layernorm\", type=int, default=1,\n help=\"Normalize image features with bertlayernorm\")\n parser.add_argument(\"--img_layer_norm_eps\", default=1e-12, type=float,\n help=\"The eps in image feature laynorm layer\")\n parser.add_argument(\"--per_gpu_train_batch_size\", default=32, type=int, \n help=\"Batch size per GPU/CPU for training.\")\n parser.add_argument(\"--per_gpu_eval_batch_size\", default=64, type=int, \n help=\"Batch size per GPU/CPU for evaluation.\")\n parser.add_argument(\"--output_mode\", default='classification', type=str,\n help=\"output mode, support classification or regression.\")\n parser.add_argument(\"--num_labels\", default=2, type=int, \n help=\"num_labels is 2 for classification and 1 for regression.\")\n parser.add_argument(\"--num_captions_per_img_train\", default=5, type=int,\n help=\"number of positive matched captions for each training image.\")\n parser.add_argument(\"--num_captions_per_img_val\", default=5, type=int,\n help=\"number of captions for each testing image.\")\n parser.add_argument('--num_images_per_cap_val', type=int, default=128)\n parser.add_argument('--gradient_accumulation_steps', type=int, default=1,\n help=\"Number of updates steps to accumulate before backward.\")\n parser.add_argument(\"--learning_rate\", default=2e-5, type=float, help=\"The initial lr.\")\n parser.add_argument(\"--weight_decay\", default=0.05, type=float, help=\"Weight deay.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-8, type=float, help=\"Epsilon for Adam.\")\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float, help=\"Max gradient norm.\")\n parser.add_argument(\"--warmup_steps\", default=0, type=int, help=\"Linear warmup.\")\n parser.add_argument(\"--scheduler\", default='linear', type=str, help=\"constant or linear.\")\n parser.add_argument(\"--num_workers\", default=4, type=int, help=\"Workers in dataloader.\")\n parser.add_argument(\"--num_train_epochs\", default=20, type=int, \n help=\"Total number of training epochs to perform.\")\n parser.add_argument(\"--max_steps\", default=-1, type=int, \n help=\"Total number of training steps. Override num_train_epochs.\")\n parser.add_argument('--logging_steps', type=int, default=20, help=\"Log every X steps.\")\n parser.add_argument('--save_steps', type=int, default=-1, \n help=\"Save checkpoint every X steps. Will also perform evaluatin.\")\n parser.add_argument(\"--evaluate_during_training\", action='store_true', \n help=\"Run evaluation during training at each save_steps.\")\n parser.add_argument(\"--eval_model_dir\", type=str, default='', \n help=\"Model directory for evaluation.\")\n parser.add_argument(\"--no_cuda\", action='store_true', help=\"Avoid using CUDA.\")\n parser.add_argument('--seed', type=int, default=88, help=\"random seed for initialization.\")\n parser.add_argument('--extra_concept', action='store_true', help=\"Whether to add more related concepts from the concept graph.\")\n parser.add_argument('--num_extra_concept', type=int, default=5, help=\"Number of extra concapts added\")\n parser.add_argument('--devices', type=str, default='0,1,2,3,4,5,6,7', help=\"Which GPUs to use\")\n parser.add_argument('--half_evaluation', action='store_true', help='Whther to use half precision for evaluation')\n parser.add_argument('--dataset_name', type=str, default='flickr', help='which dataset is using')\n parser.add_argument('--max_phrases', type=int, default=5)\n parser.add_argument('--sent_sg_json', type=str, default=None)\n parser.add_argument('--id2node', type=str, default=None)\n parser.add_argument('--clip_neg_sampling', action='store_true')\n parser.add_argument('--clip_neg_prob', type=float, default=0.4)\n parser.add_argument('--max_tag_length', type=int, default=20)\n parser.add_argument('--phrase_layer', type=int, default=2)\n parser.add_argument('--use_phrase', action='store_true')\n parser.add_argument('--no_itm', action='store_true')\n parser.add_argument('--eval_all_checkpoints', action='store_true')\n args = parser.parse_args()\n\n global logger\n mkdir(args.output_dir)\n logger = setup_logger(\"vlpretrain\", args.output_dir, 0)\n\n # os.environ[\"CUDA_VISIBLE_DEVICES\"] = '0,1'\n args.device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n args.n_gpu = torch.cuda.device_count()\n set_seed(args.seed, args.n_gpu)\n logger.warning(\"Device: %s, n_gpu: %s\", args.device, args.n_gpu)\n logger.info('output_mode: {}, #Labels: {}'.format(args.output_mode, args.num_labels))\n \n config_class, tokenizer_class = BertConfig, BertTokenizer\n model_class = BiImageBertForRetrieval\n if args.do_train:\n config = config_class.from_pretrained(args.config_name if args.config_name else \\\n args.model_name_or_path, num_labels=args.num_labels, finetuning_task='ir')\n tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name \\\n else args.model_name_or_path, do_lower_case=args.do_lower_case)\n config.img_feature_dim = args.img_feature_dim\n config.img_feature_type = args.img_feature_type\n config.hidden_dropout_prob = args.drop_out\n config.loss_type = args.loss_type\n config.img_layer_norm_eps = args.img_layer_norm_eps\n config.use_img_layernorm = args.use_img_layernorm\n model = model_class.from_pretrained(args.model_name_or_path, \n from_tf=bool('.ckpt' in args.model_name_or_path), config=config)\n args.dtype = torch.float32\n else:\n checkpoint = args.eval_model_dir\n assert op.isdir(checkpoint)\n config = config_class.from_pretrained(checkpoint)\n tokenizer = tokenizer_class.from_pretrained(checkpoint)\n logger.info(\"Evaluate the following checkpoint: %s\", checkpoint)\n model = model_class.from_pretrained(checkpoint, config=config)\n if args.half_evaluation:\n model = model.half()\n args.dtype = torch.float16\n else:\n args.dtype = torch.float32\n\n model.to(args.device)\n logger.info(\"Training/evaluation parameters %s\", args)\n if args.do_train:\n train_dataset = RetrievalDataset(tokenizer, args, 'train', is_train=True)\n if args.evaluate_during_training:\n if 'coco_ir' not in args.data_dir:\n val_split = 'val'\n else:\n val_split = 'minival'\n val_dataset = RetrievalDataset(tokenizer, args, val_split, is_train=False)\n else:\n val_dataset = None\n global_step, avg_loss = train(args, train_dataset, val_dataset, model, tokenizer)\n logger.info(\"Training done: total_step = %s, avg loss = %s\", global_step, avg_loss)\n\n # inference and evaluation\n if args.do_test or args.do_eval:\n if args.eval_all_checkpoints:\n checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))\n args = restore_training_settings(args)\n test_dataset = RetrievalDataset(tokenizer, args, args.test_split, is_train=False)\n for checkpoint in checkpoints:\n assert op.isdir(checkpoint)\n logger.info(\"Evaluate the following checkpoint: %s\", checkpoint)\n model = model_class.from_pretrained(checkpoint, config=config)\n if args.half_evaluation:\n model = model.half()\n args.dtype = torch.float16\n else:\n args.dtype = torch.float32\n model.to(args.device)\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n #pred_file = get_predict_file(args)\n # if op.isfile(pred_file):\n # logger.info(\"Prediction file exist, skip inference.\")\n # if args.do_eval:\n # test_result = torch.load(pred_file)\n # else:\n # test_result = test(args, model, test_dataset)\n # torch.save(test_result, pred_file)\n # logger.info(\"Prediction results saved to {}.\".format(pred_file))\n\n coarse_sim = test_coarse(args, model, test_dataset)\n eval_result, caption_index, image_index = evaluate_coarse(test_dataset, coarse_sim)\n # caption index and image index\n eval_i2t_result, _ = test_fine_i2t(args, model, test_dataset, caption_index=caption_index)\n eval_t2i_result = test_fine_t2i(args, model, test_dataset, image_index=image_index)\n print('fine inference:')\n # print(eval_i2t_result, eval_t2i_result)\n if args.do_eval:\n eval_result = evaluate_fine(eval_i2t_result, eval_t2i_result)\n # result_file = op.splitext(pred_file)[0] + '.eval.json'\n result_file = op.join(checkpoint, 'test_eval.json')\n with open(result_file, 'w') as f:\n json.dump(eval_result, f)\n logger.info(\"Evaluation results saved to {}.\".format(result_file))\n else:\n args = restore_training_settings(args)\n test_dataset = RetrievalDataset(tokenizer, args, args.test_split, is_train=False)\n checkpoint = args.eval_model_dir\n assert op.isdir(checkpoint)\n logger.info(\"Evaluate the following checkpoint: %s\", checkpoint)\n model = model_class.from_pretrained(checkpoint, config=config)\n if args.half_evaluation:\n model = model.half()\n args.dtype = torch.float16\n else:\n args.dtype = torch.float32\n model.to(args.device)\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n pred_file = get_predict_file(args)\n # if op.isfile(pred_file):\n # logger.info(\"Prediction file exist, skip inference.\")\n # if args.do_eval:\n # test_result = torch.load(pred_file)\n # else:\n # test_result = test(args, model, test_dataset)\n # torch.save(test_result, pred_file)\n # logger.info(\"Prediction results saved to {}.\".format(pred_file))\n\n coarse_sim = test_coarse(args, model, test_dataset)\n eval_result, caption_index, image_index = evaluate_coarse(test_dataset, coarse_sim)\n # caption index and image index\n eval_i2t_result, _ = test_fine_i2t(args, model, test_dataset, caption_index=caption_index)\n eval_t2i_result = test_fine_t2i(args, model, test_dataset, image_index=image_index)\n print('fine inference:')\n # print(eval_i2t_result, eval_t2i_result)\n if args.do_eval:\n eval_result = evaluate_fine(eval_i2t_result, eval_t2i_result)\n result_file = op.splitext(pred_file)[0] + '.eval.json'\n with open(result_file, 'w') as f:\n json.dump(eval_result, f)\n logger.info(\"Evaluation results saved to {}.\".format(result_file))\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"torch.nn.Softmax",
"torch.max",
"torch.load",
"torch.cat",
"torch.zeros",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.cuda.is_available",
"torch.device",
"torch.save",
"numpy.swapaxes",
"numpy.reshape",
"torch.from_numpy",
"torch.tensor",
"numpy.copy",
"torch.sigmoid",
"torch.zeros_like",
"numpy.argsort",
"torch.cuda.device_count",
"numpy.array",
"torch.utils.data.SequentialSampler",
"torch.utils.data.RandomSampler",
"torch.nn.DataParallel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Danish-VSL/deep-person-reid | [
"2e3a4b6706b84c77203f9905683b917ab0871b93"
] | [
"torchreid/losses/sa_loss.py"
] | [
"import torch\nimport os\n\n\ndef sa_loss(features_dict):\n\n if os.environ.get('sa'):\n layer3, layer4_1, layer4_2 = features_dict['layers']\n\n layer3 = torch.norm(layer3, dim=1, p=2) ** 2 / 1024\n layer3 = layer3.view(layer3.size(0), -1)\n layer4_1 = torch.norm(layer4_1, dim=1, p=2) ** 2 / 2048\n layer4_1 = layer4_1.view(layer4_1.size(0), -1)\n # layer4_2 = torch.norm(layer4_2, dim=1, p=2) ** 2 / 2048\n # layer4_2 = layer4_2.view(layer4_2.size(0), -1)\n\n as_loss = (((layer3 - layer4_1) ** 2).sum()) * .1\n print(as_loss)\n else:\n as_loss = 0.\n\n return as_loss\n"
] | [
[
"torch.norm"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tsyploff/tsyplov_stats | [
"29126d494bd846a9d40357c59c5c1751d006bc65"
] | [
"tsyplov_stats/validation.py"
] | [
"\r\nimport numpy as np\r\nfrom tsyplov_stats.wolfram_functions import *\r\n\r\n\r\ndef mean_square_error(x, y):\r\n\treturn np.mean((x - y)**2)\r\n\r\ndef root_mean_square_error(x, y):\r\n\treturn np.sqrt(np.mean((x - y)**2))\r\n\r\ndef mean_absolute_error(x, y):\r\n\treturn np.mean(np.abs(x - y))\r\n\r\ndef time_series_split(ts, n, k):\r\n '''Gives the list of k pairs (train, test), where \r\n len(test) == n; len(train) >= len(test)'''\r\n return [take_drop(ts[:n + i], i) for i in range(len(ts) - n, len(ts) - (k + 1)*n, -n)][::-1]\r\n \r\ndef cross_val_score(model, metric, ts_split):\r\n '''Gives the self.metric score on each test in time seiries split\r\n using self.model fitted on train\r\n '''\r\n h = len(ts_split[0][1])\r\n result = list()\r\n model_ = model.reset_to_default()\r\n\r\n for train, test in ts_split:\r\n forecast = model_.fit(train).predict(h)\r\n result.append(metric(forecast, test))\r\n\r\n return np.array(result)\r\n"
] | [
[
"numpy.array",
"numpy.mean",
"numpy.abs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
juliaprocess/ml_libs | [
"52cac5d64b55a12dfbdad1c768cdd8d79d5789f5"
] | [
"kernel_herding_subsampling/lib/sort_samples_into_classes.py"
] | [
"#!/usr/bin/env python\n#\tWritten by Chieh Wu\n\nimport numpy as np\nimport sklearn.metrics\nfrom sklearn.utils import shuffle\nfrom sklearn.preprocessing import OneHotEncoder\n\n\nnp.set_printoptions(precision=4)\nnp.set_printoptions(linewidth=300)\nnp.set_printoptions(suppress=True)\n\nclass sort_samples_into_class:\n\t# sample_num, the larger the better approximation\n\tdef __init__(self, X, Y):\n\t\tself.X = X\n\t\tself.Y = Y\n\t\tself.d = X.shape[1]\n\t\tself.n = X.shape[0]\n\n\t\tif type(Y) == type([]): self.Y = np.array(Y)\n\t\tself.Y = np.reshape(self.Y,(len(Y),1))\n\t\tself.Yₒ = OneHotEncoder(categories='auto', sparse=False).fit_transform(self.Y)\n\t\tself.c = self.Yₒ.shape[1]\n\n\t\tself.X_list = {}\n\t\tself.Y_list = {}\n\t\tself.Yₒ_list = {}\n\n\t\tself.l = np.unique(Y)\n\t\tfor i in self.l:\n\t\t\tindices = np.where(Y == i)[0]\n\t\t\tself.X_list[i] = X[indices, :]\n\t\t\tself.Y_list[i] = self.Y[indices]\n\t\t\tself.Yₒ_list[i] = self.Yₒ[indices, :]\n\n\n\n\n\nif __name__ == \"__main__\":\n\tX = np.array([[1,1],[2,2],[3,3],[4,4],[5,5],[6,6],[7,7],[8,8],[9,9],[10,10]])\n\tY = np.array([0,0,0,0,0,1,1,1,1,1])\n\n\n\tsortS = sort_samples_into_class(X,Y)\n\tprint(sortS.X_list.keys())\n\n"
] | [
[
"numpy.unique",
"numpy.set_printoptions",
"sklearn.preprocessing.OneHotEncoder",
"numpy.array",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
arijit-s/CarND-Semantic-Segmentation | [
"dbcb2a3b9aae03dd74eb567e834a5c9eef3456d4"
] | [
"main.py"
] | [
"import os.path\nimport tensorflow as tf\nimport helper\nimport warnings\nfrom distutils.version import LooseVersion\nimport project_tests as tests\n\n\n# Check TensorFlow Version\nassert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__)\nprint('TensorFlow Version: {}'.format(tf.__version__))\n\n# Check for a GPU\nif not tf.test.gpu_device_name():\n warnings.warn('No GPU found. Please use a GPU to train your neural network.')\nelse:\n print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))\n\n\ndef load_vgg(sess, vgg_path):\n \"\"\"\n Load Pretrained VGG Model into TensorFlow.\n :param sess: TensorFlow Session\n :param vgg_path: Path to vgg folder, containing \"variables/\" and \"saved_model.pb\"\n :return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out)\n \"\"\"\n # TODO: Implement function\n # Use tf.saved_model.loader.load to load the model and weights\n vgg_tag = 'vgg16'\n vgg_input_tensor_name = 'image_input:0'\n vgg_keep_prob_tensor_name = 'keep_prob:0'\n vgg_layer3_out_tensor_name = 'layer3_out:0'\n vgg_layer4_out_tensor_name = 'layer4_out:0'\n vgg_layer7_out_tensor_name = 'layer7_out:0'\n \n \n tf.saved_model.loader.load(sess, [vgg_tag], vgg_path)\n graph = tf.get_default_graph()\n w1 = graph.get_tensor_by_name(vgg_input_tensor_name)\n keep = graph.get_tensor_by_name(vgg_keep_prob_tensor_name)\n l3 = graph.get_tensor_by_name(vgg_layer3_out_tensor_name)\n l4 = graph.get_tensor_by_name(vgg_layer4_out_tensor_name)\n l7 = graph.get_tensor_by_name(vgg_layer7_out_tensor_name)\n \n \n return w1, keep, l3, l4, l7\ntests.test_load_vgg(load_vgg, tf)\n\n\ndef layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):\n \"\"\"\n Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.\n :param vgg_layer3_out: TF Tensor for VGG Layer 3 output\n :param vgg_layer4_out: TF Tensor for VGG Layer 4 output\n :param vgg_layer7_out: TF Tensor for VGG Layer 7 output\n :param num_classes: Number of classes to classify\n :return: The Tensor for the last layer of output\n \"\"\"\n # TODO: Implement function\n conv_1x1_layer7 = tf.layers.conv2d(vgg_layer7_out,num_classes, 1, padding='same',kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))\n output_layer7 = tf.layers.conv2d_transpose(conv_1x1_layer7,num_classes, 4, 2, padding='same',kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))\n\n conv_1x1_layer4 = tf.layers.conv2d(vgg_layer4_out,num_classes, 1, padding='same',kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))\n skip_layer4 = tf.add(output_layer7, conv_1x1_layer4)\n output_layer4 = tf.layers.conv2d_transpose(skip_layer4,num_classes, 4, 2, padding='same',kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))\n\n conv_1x1_layer3 = tf.layers.conv2d(vgg_layer3_out,num_classes, 1, padding='same',kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))\n skip_layer3 = tf.add(output_layer4, conv_1x1_layer3)\n output_layer4 = tf.layers.conv2d_transpose(skip_layer3,num_classes, 16, 8, padding='same',kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))\n \n \n #Add other VGG layers\n \n return output_layer4\ntests.test_layers(layers)\n\n\ndef optimize(nn_last_layer, correct_label, learning_rate, num_classes):\n \"\"\"\n Build the TensorFLow loss and optimizer operations.\n :param nn_last_layer: TF Tensor of the last layer in the neural network\n :param correct_label: TF Placeholder for the correct label image\n :param learning_rate: TF Placeholder for the learning rate\n :param num_classes: Number of classes to classify\n :return: Tuple of (logits, train_op, cross_entropy_loss)\n \"\"\"\n # TODO: Implement function\n logits = tf.reshape(nn_last_layer,(-1,num_classes))\n correct_label = tf.reshape(correct_label, (-1,num_classes))\n\n cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits,labels = correct_label))\n \n train_op = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy_loss)\n \n return logits, train_op, cross_entropy_loss\ntests.test_optimize(optimize)\n\n\ndef train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image,\n correct_label, keep_prob, learning_rate):\n \"\"\"\n Train neural network and print out the loss during training.\n :param sess: TF Session\n :param epochs: Number of epochs\n :param batch_size: Batch size\n :param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)\n :param train_op: TF Operation to train the neural network\n :param cross_entropy_loss: TF Tensor for the amount of loss\n :param input_image: TF Placeholder for input images\n :param correct_label: TF Placeholder for label images\n :param keep_prob: TF Placeholder for dropout keep probability\n :param learning_rate: TF Placeholder for learning rate\n \"\"\"\n print(\"Initializing Variables\")\n sess.run(tf.global_variables_initializer())\n \n print(\"Starting training\");\n # TODO: Implement function\n for epoch in range(epochs):\n print(\"Runnning epoch:\",epoch)\n for image,label in get_batches_fn(batch_size):\n _, loss = sess.run([train_op, cross_entropy_loss], \n feed_dict={input_image: image, correct_label: label, keep_prob: 0.5, learning_rate: 0.0001})\n print(\"Loss: = {:.3f}\".format(loss))\n\n \n \ntests.test_train_nn(train_nn)\n\n\ndef run():\n num_classes = 2\n image_shape = (160, 576)\n data_dir = './data'\n runs_dir = './runs'\n tests.test_for_kitti_dataset(data_dir)\n\n # Download pretrained vgg model\n helper.maybe_download_pretrained_vgg(data_dir)\n\n # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.\n # You'll need a GPU with at least 10 teraFLOPS to train on.\n # https://www.cityscapes-dataset.com/\n\n with tf.Session() as sess:\n # Path to vgg model\n vgg_path = os.path.join(data_dir, 'vgg')\n # Create function to get batches\n get_batches_fn = helper.gen_batch_function(os.path.join(data_dir, 'data_road/training'), image_shape)\n\n # OPTIONAL: Augment Images for better results\n # https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network\n \n correct_label = tf.placeholder(tf.int32, [None, None, None, num_classes], name='correct_label')\n learning_rate = tf.placeholder(tf.float32, name='learning_rate')\n \n epochs = 2\n batch_size = 2\n\n # TODO: Build NN using load_vgg, layers, and optimize function\n input_image,keep_prob,layer3_out,layer4_out,layer7_out = load_vgg(sess,vgg_path)\n layer_output = layers(layer3_out,layer4_out,layer7_out,num_classes)\n\n # TODO: Train NN using the train_nn function\n logits, train_op, cross_entropy_loss = optimize(layer_output, correct_label, learning_rate, num_classes)\n \n train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image,\n correct_label, keep_prob, learning_rate)\n\n\n # TODO: Save inference data using helper.save_inference_samples\n # helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image)\n helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image)\n\n\n # OPTIONAL: Apply the trained model to a video\n\n\nif __name__ == '__main__':\n run()\n"
] | [
[
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.test.gpu_device_name",
"tensorflow.reshape",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.add",
"tensorflow.Session",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.train.AdamOptimizer",
"tensorflow.get_default_graph",
"tensorflow.saved_model.loader.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
XanTiuM-Dev/kiwi-dev | [
"1daf51aaf289d37392a0e1da0d86c020f7f3676a"
] | [
"AI/Python/AI-Object-Recognition/test.py"
] | [
"from imutils.video import VideoStream\nfrom imutils.video import FPS\nimport numpy as np\nimport argparse\nimport imutils\nimport time\nimport cv2\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-p\", \"--prototxt\", required=True,\n\thelp=\"path to Caffe 'deploy' prototxt file\")\nap.add_argument(\"-m\", \"--model\", required=True,\n\thelp=\"path to Caffe pre-trained model\")\nap.add_argument(\"-c\", \"--confidence\", type=float, default=0.2,\n\thelp=\"minimum probability to filter weak predictions\")\nargs = vars(ap.parse_args())\n\nCLASSES = [\"aeroplane\", \"background\", \"bicycle\", \"bird\", \"boat\",\n \"bottle\", \"bus\", \"car\", \"cat\", \"chair\", \"cow\", \"diningtable\",\n \"dog\", \"horse\", \"motorbike\", \"person\", \"pottedplant\", \"sheep\",\n \"sofa\", \"train\", \"tvmonitor\"]\n\nCOLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))\n\nprint(\"[INFO] loading model...\")\nnet = cv2.dnn.readNetFromCaffe(args[\"prototxt\"], args[\"model\"])\n\nprint(\"[INFO] starting video stream...\")\nvs = VideoStream(src=0).start()\nfps = FPS().start()\n\nwhile True:\n\n\tframe = vs.read()\n\tframe = imutils.resize(frame, width=1080)\n\tprint(frame.shape) \n\t(h, w) = frame.shape[:2]\n\tresized_image = cv2.resize(frame, (300, 300))\n\tblob = cv2.dnn.blobFromImage(resized_image, (1/127.5), (300, 300), 127.5, swapRB=True)\n\tnet.setInput(blob)\n\tpredictions = net.forward()\n\n\tfor i in np.arange(0, predictions.shape[2]):\n\n\t\tconfidence = predictions[0, 0, i, 2]\n\t\tif confidence > args[\"confidence\"]:\n\t\t\tidx = int(predictions[0, 0, i, 1])\n\t\t\tbox = predictions[0, 0, i, 3:7] * np.array([w, h, w, h])\n\t\t\t(startX, startY, endX, endY) = box.astype(\"int\")\n\n\t\t\tlabel = \"{}: {:.2f}%\".format(CLASSES[idx], confidence * 100)\n\t\t\tprint(\"Object detected: \", label)\n\t\t\tcv2.rectangle(frame, (startX, startY), (endX, endY),\n\t\t\t\tCOLORS[idx], 2)\n\t\t\ty = startY - 15 if startY - 15 > 15 else startY + 15\n\t\t\tcv2.putText(frame, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)\n\n\tcv2.imshow(\"Frame\", frame)\n\n\tkey = cv2.waitKey(1) & 0xFF\n\n\tif key == ord(\"q\"):\n\t\tbreak\n\n\tfps.update()\n\nfps.stop()\n\nprint(\"[INFO] Elapsed Time: {:.2f}\".format(fps.elapsed()))\nprint(\"[INFO] Approximate FPS: {:.2f}\".format(fps.fps()))\n\ncv2.destroyAllWindows()\nvs.stop()\n"
] | [
[
"numpy.arange",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
spectre-team/DiviK-standalone-client | [
"bda3de8e1781216d6e249dfb34a63f3fbea99cd7"
] | [
"src/Python/divik2spectre.py"
] | [
"\"\"\"Convert MATLAB data file into Spectre txt format\n\ndivik2spectre.py\nConverts MATLAB data file into Spectre txt format\n\nCopyright 2017 Spectre Team\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n================================================================================\n\nExpected content of input data file:\n\na. m x n matrix data - contains m observations (measurement spots) described by\n n features (mass channels or convolutions of subsequent components\nb. m x 2 matrix xy - for m observations contains X coordinate in first column\n and Y coordinate in second column\nc. m x 1 matrix region - describes distinct biological structures; contains all\n numbers from 1 to p, where p is the number of distinct biological structures\nd. p x 1 cell array region_names - content of i-th cell contains biological\n interpretation of region with label i in vector region\ne. 1 x n matrix mz - contains m/z properties of mass channels or component means\n\n\"\"\"\n\nimport argparse as agp\nimport h5py\nimport numpy as np\nimport scipy.io as scio\nfrom tqdm import tqdm\nfrom typing import List\n\n\ndef parse_args():\n \"\"\"Parses command line arguments, requiring source and destination paths\"\"\"\n parser = agp.ArgumentParser(description=\"Convert DiviK data source mat-file\"\n \" into Spectre's data format\")\n parser.add_argument(dest=\"source\", help=\"Input MATLAB file\")\n parser.add_argument(dest=\"destination\", help=\"Output txt file\")\n args = parser.parse_args()\n if not args.destination.endswith('.txt'):\n args.destination = args.destination + '.txt'\n return args\n\n\nCONST_Z_COORDINATE = 0\n\n\ndef _serialize_entry(spectrum, location, region):\n \"\"\"Serializes single spectrum with metadata\n\n :param spectrum: single spectrum values\n :param location: 2D coordinates of the spectrum\n :param region: assignment of spectrum to a Region-of-Interest\n :return: text file entry\n \"\"\"\n return '\\n'.join([\n ' '.join(map(str, [*location, CONST_Z_COORDINATE, region])),\n ' '.join(map(str, spectrum))\n ])\n\n\nclass DivikDataSerializer(object):\n \"\"\"Serializes DiviK data into Spectre txt format\"\"\"\n def __init__(self, data, xy, mz, region, region_names):\n self.data = data\n self.xy = xy\n self.mz = mz.reshape((-1,))\n self.region = region.reshape((-1))\n self.region_names = region_names\n self._serialized = None\n\n def __str__(self):\n if self._serialized is None:\n header = '\\n'.join([\n 'region names: ' + ', '.join(self.region_names),\n ' '.join(map(str, self.mz))\n ])\n self._serialized = '\\n'.join([\n header,\n *[_serialize_entry(spectrum, location, region)\n for spectrum, location, region\n in zip(tqdm(self.data, desc='Spectrum'), self.xy, self.region)]\n ])\n return self._serialized\n\n\ndef _parse_hdf5_cell_array(object_reference, matfile) -> List[str]:\n \"\"\"Parses MATLAB cell array of character arrays from hdf5 file\n\n :param object_reference: cell array in the file\n :param matfile: mat-file parsed\n :return: list of strings\n \"\"\"\n data = []\n for column in object_reference:\n row_data = []\n for row_number in range(len(column)):\n row_data.append(''.join(map(chr, matfile[column[row_number]][:])))\n data.append(row_data)\n data = list(np.transpose(data)[0])\n return data\n\n\ndef read_hdf5(path: str) -> DivikDataSerializer:\n \"\"\"Parses hdf5 mat-file with DiviK data into serializer\"\"\"\n matfile = h5py.File(path, 'r')\n region_names = _parse_hdf5_cell_array(matfile['region_names'], matfile)\n data = np.transpose(matfile['data'])\n xy = np.transpose(matfile['xy']).astype(int)\n assert data.shape[0] == xy.shape[0]\n assert xy.shape[1] == 2\n region = np.transpose(matfile['region']).astype(int)\n assert region.size == data.shape[0]\n if 'mz' not in matfile:\n print('M/Z array not found, creating fake...')\n mz = np.arange(data.shape[1])\n else:\n mz = np.transpose(matfile['mz'])\n assert mz.size == data.shape[1]\n return DivikDataSerializer(data, xy, mz, region, region_names)\n\n\ndef read_matfile(path: str) -> DivikDataSerializer:\n \"\"\"Parses v7.0 >= mat-file with DiviK data into serializer\"\"\"\n matfile = scio.loadmat(path)\n region_names = [name_table[0] for name_table in matfile['region_names'][0]]\n data = matfile['data']\n xy = matfile['xy']\n assert data.shape[0] == xy.shape[0]\n assert xy.shape[1] == 2\n region = matfile['region']\n assert region.size == data.shape[0]\n if not 'mz' in matfile:\n print('M/Z array not found, creating fake...')\n mz = np.arange(data.shape[1])\n else:\n mz = matfile['mz']\n assert mz.size == data.shape[1]\n return DivikDataSerializer(data, xy, mz, region, region_names)\n\n\ndef main():\n args = parse_args()\n try:\n data = read_matfile(args.source)\n except NotImplementedError:\n data = read_hdf5(args.source)\n with open(args.destination, 'w') as output_file:\n output_file.write(str(data))\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.arange",
"scipy.io.loadmat",
"numpy.transpose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
AleksUDKM/udkm1Dsim-1 | [
"ea6806a005f14cc34cea813cbfb3e3beaf01719e"
] | [
"udkm1Dsim/structure.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# This file is part of the udkm1Dsimpy module.\n#\n# udkm1Dsimpy is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n#\n# Copyright (C) 2017 Daniel Schick\n\n\"\"\"A :mod:`Structure` module \"\"\"\n\n__all__ = [\"Structure\"]\n\n__docformat__ = \"restructuredtext\"\n\nimport itertools\nimport numpy as np\nfrom .unitCell import UnitCell\nfrom . import u, Q_\nfrom .helpers import make_hash_md5\n\n\nclass Structure:\n \"\"\"Structure\n\n The structure class can hold various sub_structures. Each\n sub_structure can be either a layer of N unitCell objects or a\n structure by itself.\n Thus it is possible to recursively build up 1D structures.\n\n Args:\n name (str): name of the sample\n\n Attributes:\n name (str): name of sample\n sub_structures (list): list of structures in sample\n substrate (object): structure of the substrate\n num_sub_systems (int): number of subsystems for heat and phonons\n (electronic, lattice, spins, ...)\n\n \"\"\"\n\n def __init__(self, name):\n self.name = name\n self.num_sub_systems = 1\n self.sub_structures = []\n self.substrate = []\n\n def __str__(self, tabs=0):\n \"\"\"String representation of this class\"\"\"\n tab_str = ''\n for i in range(tabs):\n tab_str += '\\t'\n\n class_str = tab_str + 'Structure properties:\\n\\n'\n class_str += tab_str + 'Name : {:s}\\n'.format(self.name)\n class_str += tab_str + 'Length : {:0.2f} nm\\n'.format(self.get_length()/1e-9)\n class_str += tab_str + '----\\n'\n # traverse all substructures\n for i, sub_structure in enumerate(self.sub_structures):\n if isinstance(sub_structure[0], UnitCell):\n # the substructure is an unitCell\n class_str += tab_str + '{:d} times {:s}: {:0.2f}\\n'.format(\n sub_structure[1],\n sub_structure[0].name,\n sub_structure[1]*sub_structure[0].c_axis.to('nm'))\n else:\n # the substructure is a structure instance by itself\n # call the display() method recursively\n class_str += tab_str + 'sub-structure {:d} times:\\n'.format(\n sub_structure[1])\n sub_structure[0].__str__(tabs+1)\n class_str += tab_str + '----\\n'\n # check for a substrate\n if isinstance(self.substrate, Structure):\n class_str += tab_str + 'Substrate:\\n'\n class_str += tab_str + '----\\n'\n class_str += tab_str + '{:d} times {:s}: {:0.2f}\\n'.format(\n self.substrate.sub_structures[0][1],\n self.substrate.sub_structures[0][0].name,\n self.substrate.sub_structures[0][1]\n * self.substrate.sub_structures[0][0].c_axis.to('nm'))\n else:\n class_str += 'no substrate\\n'\n return class_str\n\n def visualize(self):\n \"\"\"visualize\"\"\"\n# % initialize input parser and define defaults and validators\n# p = inputParser;\n# p.addRequired('obj' , @(x)isa(x,'structure'));\n# p.addParamValue('handle', '', @ishandle);\n# % parse the input\n# p.parse(obj,varargin{:});\n# % assign parser results to object properties\n# if isempty(p.Results.handle)\n# h = figure;\n# else\n# h = p.Results.handle;\n# end%if\n# a = obj.getUniqueUnitCells();\n# N = size(a,1);\n# figure(h);\n# distances = obj.getDistancesOfUnitCells/units.nm;\n# stairs(distances,obj.getUnitCellVectors, 'LineWidth', 2);\n# axis([min(distances) max(distances) 0.9 length(obj.getUniqueUnitCells)+0.1]);\n# xlabel('Distance [nm]');\n# title('Structure Visualization');\n# set(gca,'YTick',1:N,'YTickLabel', a(:,1));\n pass\n\n def get_hash(self, **kwargs):\n \"\"\"hash\n\n Returns a unique hash from all unitCell IDs in the correct order\n in the structure.\n\n \"\"\"\n param = []\n ucs = self.get_unique_unit_cells()\n for uc in ucs[1]:\n param.append(uc.get_property_dict(**kwargs))\n\n _, IDs, _ = self.get_unit_cell_vectors()\n param.append(IDs)\n return make_hash_md5(param)\n\n def add_sub_structure(self, sub_structure, N):\n \"\"\"add_sub_structure\n\n Add a sub_structure of N unitCells or N structures to the\n structure.\n\n Args:\n sub_structure (UnitCell, Structure): unit cell or structure\n to add as sub structure\n N (int): number or repetitions\n\n \"\"\"\n # check of the sub_structure is an instance of the unitCell of\n # structure class\n if not isinstance(sub_structure, (UnitCell, Structure)):\n raise ValueError('Class '\n + type(sub_structure).__name__\n + ' is no possible sub structure. '\n + 'Only UnitCell and '\n + 'Structure classes are allowed!')\n\n # if a structure is added as a sub_structure, the sub_structure\n # can not have a substrate\n if isinstance(sub_structure, Structure):\n if sub_structure.substrate:\n raise ValueError('No substrate in sub_structure allowed!')\n\n # check the number of subsystems of the sub_structure\n if ((self.num_sub_systems > 1)\n and not (sub_structure.num_sub_systems == self.num_sub_systems)):\n\n raise ValueError('The number of subsystems in each sub_structure'\n 'must be the same!')\n else:\n self.num_sub_systems = sub_structure.num_sub_systems\n\n # add a sub_structure of N repetitions to the structure with\n self.sub_structures.append([sub_structure, N])\n\n def add_substrate(self, sub_structure):\n \"\"\"add_substrate\n\n Add a structure as static substrate to the structure\n\n Args:\n sub_structure (Structure): substrate structure\n\n \"\"\"\n if not isinstance(sub_structure, Structure):\n raise ValueError('Class '\n + type(sub_structure).__name__\n + ' is no possible substrate. '\n + 'Only structure class is allowed!')\n\n self.substrate = sub_structure\n\n def get_number_of_sub_structures(self):\n \"\"\"get_number_of_sub_structures\n\n Returns the number of all sub structures.\n This methods does not return the number of all unitCells in the\n structure, see get_number_of_unit_cells().\n\n \"\"\"\n N = 0\n for i in range(len(self.sub_structures)):\n if isinstance(self.sub_structures[i][0], UnitCell):\n N = N + 1\n else:\n N = N + self.sub_structures[i][0].getNumberOfsub_structures()\n return N\n\n def get_number_of_unit_cells(self):\n \"\"\"get_number_of_unit_cells\n\n Returns the number of all unitCells in the structure.\n\n \"\"\"\n N = 0\n # traverse the substructres\n for i in range(len(self.sub_structures)):\n if isinstance(self.sub_structures[i][0], UnitCell):\n N = N + self.sub_structures[i][1]\n else:\n # its a sturcture, so call the method recursively\n N = N + self.sub_structures[i][0].get_number_of_unit_cells() \\\n * self.sub_structures[i][1]\n\n return N\n\n def get_number_of_unique_unit_cells(self):\n \"\"\"get_number_of_unique_unit_cells\n\n Returns the number of unique unitCells in the structure.\n\n \"\"\"\n N = len(self.get_unique_unit_cells()[0])\n return N\n\n def get_length(self):\n \"\"\"get_length\n\n Returns the length from surface to bottom of the structure\n\n \"\"\"\n _, d_end, _ = self.get_distances_of_unit_cells()\n return d_end[-1]\n\n def get_unique_unit_cells(self):\n \"\"\"get_unique_unit_cells\n\n Returns a list of ids and handles of all unique UnitCell\n instances in the structure.\n The uniqueness is determined by the handle of each unitCell\n instance.\n\n \"\"\"\n uc_ids = []\n uc_handles = []\n # traverse the sub_structures\n for i in range(len(self.sub_structures)):\n if isinstance(self.sub_structures[i][0], UnitCell):\n # its a UnitCell\n uc_id = self.sub_structures[i][0].id\n if not uc_ids:\n # the cell array is empty at the beginning so add\n # the first unitCell\n uc_ids = uc_ids + [uc_id]\n uc_handles = uc_handles + [self.sub_structures[i][0]]\n else:\n # the cell array is not empty so check if the id is\n # already in the ucs id vector\n if uc_id not in uc_ids:\n # if id not in list, so add it\n uc_ids = uc_ids + [uc_id]\n uc_handles = uc_handles + [self.sub_structures[i][0]]\n else:\n # its a sub_structure\n if not uc_ids:\n # the cell array is empty at the beginning so call\n # the method recursively and add the result to the\n # ucs array\n uc_ids = self.sub_structures[i][0].get_unique_unit_cells()[0]\n uc_handles = self.sub_structures[i][0].get_unique_unit_cells()[1]\n else:\n # the cell array is not empty so check if the ids\n # from the recursive call are already in the ucs id\n # vector.\n temp1 = self.sub_structures[i][0].get_unique_unit_cells()[0]\n temp2 = self.sub_structures[i][0].get_unique_unit_cells()[1]\n for j, temp in enumerate(temp1):\n # check all ids from recursive call\n if temp1[j] not in uc_ids:\n # ids not in list, so add them\n uc_ids = uc_ids + [temp1[j]]\n uc_handles = uc_handles + [temp2[j]]\n\n return uc_ids, uc_handles\n\n def get_unit_cell_vectors(self, *args):\n \"\"\"get_unit_cell_vectors\n\n Returns three lists with the numeric index of all unit cells\n in a structure given by the get_unique_unit_cells() method and\n addidionally vectors with the ids and Handles of the\n corresponding unitCell instances.\n The list and order of the unique unitCells can be either handed\n as an input parameter or is requested at the beginning.\n\n Args:\n ucs (Optional[list]): list of unique unit cells including\n ids and handles\n\n \"\"\"\n indices = []\n uc_ids = []\n uc_handles = []\n # if no ucs (UniqueUnitCells) are given, we have to get them\n if len(args) < 1:\n ucs = self.get_unique_unit_cells()\n else:\n ucs = args[0]\n # traverse the substructres\n for i in range(len(self.sub_structures)):\n if isinstance(self.sub_structures[i][0], UnitCell):\n # its a UnitCell\n # find the index of the current UC id in the unique\n # unitCell vector\n Index = ucs[0].index(self.sub_structures[i][0].id)\n # add the index N times to the indices vector\n indices = np.append(indices, Index*np.ones(self.sub_structures[i][1]))\n # create a cell array of N unitCell ids and add them to\n # the ids cell array\n temp1 = list(itertools.repeat(self.sub_structures[i][0].id,\n self.sub_structures[i][1]))\n uc_ids = uc_ids + list(temp1)\n # create a cell array of N unitCell handles and add them to\n # the Handles cell array\n temp2 = list(itertools.repeat(self.sub_structures[i][0],\n self.sub_structures[i][1]))\n uc_handles = uc_handles + list(temp2)\n else:\n # its a structure\n # make a recursive call and hand in the same unique\n # unit cell vector as we used before\n [temp1, temp2, temp3] = self.sub_structures[i][0].get_unit_cell_vectors(ucs)\n temp11 = []\n temp22 = []\n temp33 = []\n # concat the temporary arrays N times\n for j in range(self.sub_structures[i][1]):\n temp11 = temp11 + list(temp1)\n temp22 = temp22 + list(temp2)\n temp33 = temp33 + list(temp3)\n # add the temporary arrays to the outputs\n indices = np.append(indices, temp11)\n uc_ids = uc_ids + list(temp22)\n uc_handles = uc_handles + list(temp33)\n return list(map(int, indices)), uc_ids, uc_handles\n\n def get_all_positions_per_unique_unit_cell(self):\n \"\"\"get_all_positions_per_unique_unit_cell\n\n Returns a list with one vector of position indices for\n each unique unitCell in the structure.\n\n \"\"\"\n ucs = self.get_unique_unit_cells()\n indices = self.get_unit_cell_vectors()[0]\n pos = {} # Dictionary used instead of array\n for i, uc in enumerate(ucs[0]):\n pos[ucs[0][i]] = list(np.where(indices == i))\n # Each element accessible through Unit cell id\n return pos\n\n def get_distances_of_unit_cells(self):\n \"\"\"get_distances_of_unit_cells\n\n Returns a vector of the distance from the surface for each unit\n cell starting at 0 (dStart) and starting at the end of the first\n unit cell (dEnd) and from the center of each unit cell (dMid).\n\n ToDo: add argument to return distances in according unit or only\n numbers.\n\n \"\"\"\n c_axes = self.get_unit_cell_property_vector('_c_axis')\n d_end = np.cumsum(c_axes)\n d_start = np.hstack([[0], d_end[0:-1]])\n d_mid = (d_start + c_axes)/2\n return d_start*u.m, d_end*u.m, d_mid*u.m\n\n def get_distances_of_interfaces(self):\n \"\"\"get_distances_of_interfaces\n\n Returns the distances from the surface of each interface of the\n structure.\n\n \"\"\"\n\n d_start, d_end, d_mid = self.get_distances_of_unit_cells()\n indices = np.r_[1, np.diff(self.get_unit_cell_vectors()[0])]\n return np.append(d_start[np.nonzero(indices)].magnitude, d_end[-1].magnitude)*u.m\n\n def interp_distance_at_interfaces(self):\n \"\"\"interp_distance_at_interfaces\"\"\"\n# % Returns a distance Vector of the center of UCs interpolated by an\n# % odd number N at the interface of sturctures.\n# function [distInterp originalIndicies] = interpDistanceAtInterfaces(obj,N)\n# [dStart,dEnd,dMid] = obj.getDistancesOfUnitCells();\n# % these are the distances of the interfaces\n# distIntf = obj.getDistancesOfInterfaces();\n# % we start with the distances of the centers of the unit cells\n# distInterp = dMid;\n#\n# N = floor(N); % make N an integer\n# if mod(N,2) == 0\n# % we want to have odd numbers\n# N = N+1;\n# end%if\n#\n# % traverse all distances\n# for i=1:length(distIntf)\n# x = distIntf(i); % this is the distance of an interface\n#\n# inda = finderb(x,dStart); % this is the index of an UC after the interface\n# indb = inda-1; % this is the index of an UC before the interface\n#\n# % now interpolate linearly N new distances at the interface\n# if indb == 0 % this is the surface interface\n# distInterp = vertcat(distInterp,linspace(0,dMid(inda),2+(N-1)/2)');\n# elseif inda >= length(dMid) % this is the bottom interface\n# distInterp = vertcat(distInterp,\n# linspace(dMid(inda),dEnd(end),2+(N-1)/2)');\n# else % this is a surface inside the structure\n# distInterp = vertcat(distInterp,linspace(dMid(indb),dMid(inda),2+N)');\n# end%if\n# end%for\n#\n# distInterp = unique(sort(distInterp)); % sort and unify the distances\n# % these are the indicies of the original distances in the interpolated new vector\n# originalIndicies = finderb(dMid,distInterp);\n# end%function\n pass\n\n def get_unit_cell_property_vector(self, property_name):\n \"\"\"get_unit_cell_property_vector\n\n Returns a vector for a property of all unitCells in the\n structure. The property is determined by the propertyName and\n returns a scalar value or a function handle.\n\n Args:\n property_name (str): type of property to return as vector\n\n \"\"\"\n # get the Handle to all unitCells in the Structure\n handles = self.get_unit_cell_vectors()[2]\n\n if callable(getattr(handles[0], property_name)):\n # it's a function\n prop = np.zeros([self.get_number_of_unit_cells()])\n for i in range(self.get_number_of_unit_cells()):\n prop[i] = getattr(handles[i], property_name)\n elif ((type(getattr(handles[0], property_name)) is list) or\n (type(getattr(handles[0], property_name)) is str)):\n # it's a list of functions or str\n prop = []\n for i in range(self.get_number_of_unit_cells()):\n # Prop = Prop + getattr(Handles[i],types)\n prop.append(getattr(handles[i], property_name))\n elif type(getattr(handles[0], property_name)) is Q_:\n # its a pint quantity\n unit = getattr(handles[0], property_name).units\n prop = np.empty([self.get_number_of_unit_cells()])\n for i in range(self.get_number_of_unit_cells()):\n prop[i] = getattr(handles[i], property_name).magnitude\n prop *= unit\n else:\n # its a number or array\n ucs = self.get_unique_unit_cells()\n temp = np.zeros([len(ucs[0]), 1])\n for i, uc in enumerate(ucs[1]):\n try:\n temp[i] = len(getattr(uc, property_name))\n except TypeError:\n temp[i] = 1\n max_dim = int(np.max(temp))\n if max_dim > 1:\n prop = np.empty([self.get_number_of_unit_cells(), max_dim])\n else:\n prop = np.empty([self.get_number_of_unit_cells()])\n del temp\n # traverse all unitCells\n for i in range(self.get_number_of_unit_cells()):\n temp = getattr(handles[i], property_name)\n if max_dim > 1:\n prop[i, :] = temp\n else:\n prop[i] = temp\n\n return prop\n\n def get_unit_cell_handle(self, i):\n \"\"\"get_unit_cell_handle\n\n Returns the handle to the unitCell at position i in the\n structure.\n\n \"\"\"\n handles = self.get_unit_cell_vectors()[2]\n handle = handles[i]\n return handle\n"
] | [
[
"numpy.hstack",
"numpy.nonzero",
"numpy.cumsum",
"numpy.ones",
"numpy.max",
"numpy.append",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AmanDaVinci/lifelong-learning | [
"cd131860120748775b5be5a1ccdddd65cd53ba1a"
] | [
"datastreams/datastream.py"
] | [
"import random\nimport pandas as pd\nfrom functools import partial\nfrom torch.utils.data import DataLoader\nfrom datasets import load_dataset, concatenate_datasets\nfrom datasets import Features, Value, ClassLabel\nfrom datastreams.datasets import dataset_configs\n\n\nclass DataStream:\n features = Features({\n \"context\": Value(\"string\"),\n \"statement\": Value(\"string\"),\n \"label\": ClassLabel(2, names=[\"False\", \"True\"])\n })\n\n def __init__(self, dataset_names: list, split: str=\"train_split\"):\n self.dataset_names = dataset_names\n self.stream = []\n for name in dataset_names:\n config = dataset_configs[name] \n path = config[\"path\"]\n name = config.get(\"name\", None)\n dataset_split = config[split]\n dataset = load_dataset(path, name, split=dataset_split)\n filter_column = config.get(\"filter_column\", None)\n filter_value = config.get(\"filter_value\", None)\n if filter_column and filter_value:\n dataset = dataset.filter(lambda batch: batch[filter_column]==filter_value)\n transform = config[\"transform\"]\n dataset = dataset.map(transform, batched=True, remove_columns=dataset.column_names)\n try:\n dataset = dataset.cast(self.features)\n except:\n raise ValueError(f\"{transform} didn't transform to datastream features.\")\n self.stream.append(dataset)\n \n def summary(self):\n return pd.DataFrame(\n [(name, data.num_rows) for name, data in zip(self.dataset_names, self.stream)],\n columns=[\"dataset\", \"num_examples\"]\n )\n \n def save(self, path):\n path.mkdir(parents=True, exist_ok=True)\n for name, data in zip(self.dataset_names, self.stream):\n data.to_pandas().to_csv(path/f\"{name}.csv\", index=False)\n\n def sample_examples(self, num_per_dataset: int=1) -> pd.DataFrame:\n all_sample_data = []\n for name, data in zip(self.dataset_names, self.stream):\n sample_idxs = random.choices(range(data.num_rows), k=num_per_dataset)\n sample_data = data.select(sample_idxs).to_pandas()\n sample_data[\"dataset\"] = name\n all_sample_data.append(sample_data)\n return pd.concat(all_sample_data)\n\n def shuffle_datasets(self, seed: int=None):\n self.stream = [data.shuffle(seed) for data in self.stream]\n \n def limit_datasets(self, max_size: int):\n self.stream = [data.select(range(max_size)) if max_size<=data.num_rows else data\n for data in self.stream]\n\n def resize_datasets(self, new_size: int):\n new_stream = []\n for data in self.stream:\n if new_size <= data.num_rows:\n new_stream.append(data.select(range(new_size)))\n elif new_size > data.num_rows:\n size = data.num_rows\n num_duplications, remaining_rows = new_size//size, new_size%size\n # BUG: https://github.com/huggingface/datasets/pull/2025\n # HOTFIX: Create and cache a new dataset using flatten_indices()\n resized_data = [data.flatten_indices()] * num_duplications\n if remaining_rows:\n resized_data += [data.select(range(remaining_rows)).flatten_indices()] \n resized_data = concatenate_datasets(resized_data)\n new_stream.append(resized_data)\n self.stream = new_stream\n \n def remix_datasets(self, indices: list):\n assert len(self.stream) == len(indices), \\\n \"Must have indices for each dataset in the datastream.\"\n self.stream = [data.select(idxs) if max(idxs)<=data.num_rows else data\n for data, idxs in zip(self.stream, indices)]\n \n def get_dataloader(self, tokenizer, concatenate: bool, batch_size: int, shuffle_examples: bool):\n tokenizer = partial(tokenizer.batch_encode_plus, \n padding=\"max_length\", truncation=\"longest_first\")\n def dataloader(dataset):\n dataset = dataset.map(lambda x: tokenizer(list(zip(x[\"context\"], x[\"statement\"]))),\n batched=True, remove_columns=[\"context\", \"statement\"])\n dataset.set_format(type='torch', columns=['input_ids', 'token_type_ids', \n 'attention_mask', 'label'])\n return DataLoader(dataset, batch_size=batch_size, shuffle=shuffle_examples)\n if concatenate:\n # BUG: https://github.com/huggingface/datasets/pull/2025\n # HOTFIX: Create and cache a new dataset using flatten_indices()\n self.stream = [data.flatten_indices() for data in self.stream]\n return dataloader(concatenate_datasets(self.stream))\n else:\n return [dataloader(dataset) for dataset in self.stream]\n"
] | [
[
"pandas.concat",
"torch.utils.data.DataLoader"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.