repo_name
stringlengths
8
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
ntraut/example
[ "5b7501ee7c8ad4e7c61b3ff8e9b1d3c0380c33de" ]
[ "run.py" ]
[ "#!/usr/bin/env python3\nimport argparse\nimport os\nimport subprocess\nimport nibabel\nimport numpy\nfrom glob import glob\n\n__version__ = open(os.path.join(os.path.dirname(os.path.realpath(__file__)),\n 'version')).read()\n\ndef run(command, env={}):\n merged_env = os.environ\n merged_env.update(env)\n process = subprocess.Popen(command, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT, shell=True,\n env=merged_env)\n while True:\n line = process.stdout.readline()\n line = str(line, 'utf-8')[:-1]\n print(line)\n if line == '' and process.poll() != None:\n break\n if process.returncode != 0:\n raise Exception(\"Non zero return code: %d\"%process.returncode)\n\nparser = argparse.ArgumentParser(description='Example BIDS App entrypoint script.')\nparser.add_argument('bids_dir', help='The directory with the input dataset '\n 'formatted according to the BIDS standard.')\nparser.add_argument('output_dir', help='The directory where the output files '\n 'should be stored. If you are running group level analysis '\n 'this folder should be prepopulated with the results of the'\n 'participant level analysis.')\nparser.add_argument('analysis_level', help='Level of the analysis that will be performed. '\n 'Multiple participant level analyses can be run independently '\n '(in parallel) using the same output_dir.',\n choices=['participant', 'group'])\nparser.add_argument('--participant_label', help='The label(s) of the participant(s) that should be analyzed. The label '\n 'corresponds to sub-<participant_label> from the BIDS spec '\n '(so it does not include \"sub-\"). If this parameter is not '\n 'provided all subjects should be analyzed. Multiple '\n 'participants can be specified with a space separated list.',\n nargs=\"+\")\nparser.add_argument('--skip_bids_validator', help='Whether or not to perform BIDS dataset validation',\n action='store_true')\nparser.add_argument('-v', '--version', action='version',\n version='BIDS-App example version {}'.format(__version__))\n\n\nargs = parser.parse_args()\n\nif not args.skip_bids_validator:\n run('bids-validator %s'%args.bids_dir)\n\nsubjects_to_analyze = []\n# only for a subset of subjects\nif args.participant_label:\n subjects_to_analyze = args.participant_label\n# for all subjects\nelse:\n subject_dirs = glob(os.path.join(args.bids_dir, \"sub-*\"))\n subjects_to_analyze = [subject_dir.split(\"-\")[-1] for subject_dir in subject_dirs]\n\n# running participant level\nif args.analysis_level == \"participant\":\n\n # find all T1s and skullstrip them\n for subject_label in subjects_to_analyze:\n for T1_file in glob(os.path.join(args.bids_dir, \"sub-%s\"%subject_label,\n \"anat\", \"*_T1w.nii*\")) + glob(os.path.join(args.bids_dir,\"sub-%s\"%subject_label,\"ses-*\",\"anat\", \"*_T1w.nii*\")):\n out_file = os.path.split(T1_file)[-1].replace(\"_T1w.\", \"_brain.\")\n cmd = \"bet %s %s\"%(T1_file, os.path.join(args.output_dir, out_file))\n print(cmd)\n run(cmd)\n\n# running group level\nelif args.analysis_level == \"group\":\n brain_sizes = []\n for subject_label in subjects_to_analyze:\n for brain_file in glob(os.path.join(args.output_dir, \"sub-%s*.nii*\"%subject_label)):\n data = nibabel.load(brain_file).get_data()\n # calcualte average mask size in voxels\n brain_sizes.append((data != 0).sum())\n\n with open(os.path.join(args.output_dir, \"avg_brain_size.txt\"), 'w') as fp:\n fp.write(\"Average brain size is %g voxels\"%numpy.array(brain_sizes).mean())\n" ]
[ [ "numpy.array" ] ]
benbenti/fuzzyclustering
[ "f67224105528d82f6d950ec7692a50d927ca0621" ]
[ "tests/conftest.py" ]
[ "import pytest\nimport random\nimport numpy as np\nfrom numpy.random import rand\nimport lib.algorithms as al\n\[email protected](scope=\"session\")\ndef unif_1D():\n \"\"\"\n Test case: one dimension, samples evenly distributed.\n \"\"\"\n data = np.array([[0], [1], [2], [3], [4], [5], [6],\n [7], [8], [9], [10], [11], [12]\n ]\n )\n return data\n\[email protected](scope=\"session\")\ndef rng():\n return random.Random()\n\[email protected]\ndef dataset(rng):\n n_samples = rng.randint(100, 1000)\n n_features = rng.randint(10, 100)\n feature_range = rng.randint(1, 10)\n return (rand(n_samples, n_features) - 1/2) * feature_range\n\[email protected]\ndef nc(rng):\n return rng.randint(2, 50)\n\[email protected]\ndef FC_random(rng, dataset, nc):\n p = 1 + rng.random() * 2\n return al.FuzzyClustering(dataset, p, nc)\n\n# @pytest.fixture\n# def FCP_random(rng, dataset, nc):\n# p = rng.random()\n# return al.FuzzyClusteringPoly(dataset, p, nc)\n\n# @pytest.fixture\n# def FCRS_random(rng, dataset, nc):\n# p = rng.random() * 5\n# return al.FuzzyClusteringRegulSh(dataset, p, nc)\n\n# @pytest.fixture\n# def FCRQ_random(rng, dataset, nc):\n# p = rng.random() * 5\n# return al.FuzzyClusteringRegulQuad(dataset, p, nc)\n" ]
[ [ "numpy.array", "numpy.random.rand" ] ]
jssprz/video_captioning_with_visual_syntactic_embedding
[ "0687772b22c56f448dabbe46932422363964abd4" ]
[ "test.py" ]
[ "import os\nimport argparse\nimport pickle\n\nfrom utils import decode_from_tokens\nfrom vocabulary import Vocabulary\nfrom configuration_file import ConfigurationFile\nfrom model.encoder import SCNEncoder\nfrom model.decoder import SemSynANDecoder\n\nimport h5py\nimport torch\nimport numpy as np\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Generate captions por test samples')\n parser.add_argument('-chckpt', '--checkpoint_path', type=str, default='pretrain/chckpt.pt',\n help='Set the path to pre-trained model (default is pretrain/chckpt.pt).')\n parser.add_argument('-data', '--dataset_folder', type=str, default='data/MSVD',\n help='Set the path to dataset folder (default is data/MSVD).')\n parser.add_argument('-out', '--output_folder', type=str, default='results/MSVD',\n help='Set the path to output folder (default is results/MSVD).')\n\n args = parser.parse_args()\n\n # load vocabulary\n with open(os.path.join(args.dataset_folder, 'corpus.pkl'), \"rb\") as f:\n corpus = pickle.load(f)\n idx2word_dict = corpus[4]\n vocab = Vocabulary.from_idx2word_dict(idx2word_dict, False)\n print('Size of vocabulary: {}'.format(len(vocab)))\n\n # Pretrained Embedding\n pretrained_embedding = torch.Tensor(corpus[5])\n\n cnn_feature_size = 2048\n c3d_feature_size = 4096\n i3d_feature_size = 400\n eco_feature_size = 1536\n res_eco_features_size = 3584\n cnn_global_size = 512\n projected_size = 512\n hidden_size = 1024 # Number of hidden layer units of the cyclic network\n mid_size = 128 # The middle of the boundary detection layer represents the dimension\n\n n_tags = 300\n global_tagger_hidden_size = 1024\n specific_tagger_hidden_size = 128\n hidden_size = 1024\n embedding_size = 300 #1024\n rnn_in_size = 300 #1024\n rnn_hidden_size = 1024\n\n config = ConfigurationFile(os.path.join(args.dataset_folder, 'config.ini'), 'sem-syn-cn-max')\n\n # Models\n encoder = SCNEncoder(cnn_feature_size=cnn_feature_size,\n c3d_feature_size=c3d_feature_size,\n i3d_feature_size=i3d_feature_size,\n eco_feature_size=eco_feature_size,\n res_eco_features_size=res_eco_features_size,\n n_tags=n_tags,\n hidden_size=hidden_size,\n global_tagger_hidden_size=global_tagger_hidden_size,\n specific_tagger_hidden_size=specific_tagger_hidden_size,\n n_layers=config.encoder_num_layers,\n input_dropout_p=config.encoder_dropout_p,\n rnn_dropout_p=config.encoder_dropout_p,\n bidirectional=config.encoder_bidirectional,\n rnn_cell=config.encoder_rnn_cell,\n device='cpu')\n\n decoder = SemSynANDecoder(in_seq_length=config.max_frames, \n out_seq_length=config.max_words,\n n_feats=res_eco_features_size + cnn_global_size,\n# n_feats=cnn_feature_size+c3d_feature_size,\n n_tags=n_tags + 400, #+ 174,\n n_pos_emb=512,\n embedding_size=embedding_size,\n pretrained_embedding=pretrained_embedding,\n hidden_size=hidden_size, \n rnn_in_size=rnn_in_size, \n rnn_hidden_size=rnn_hidden_size,\n vocab=vocab,\n device='cpu',\n rnn_cell=config.decoder_rnn_cell,\n encoder_num_layers=config.encoder_num_layers,\n encoder_bidirectional=config.encoder_bidirectional,\n num_layers=config.decoder_num_layers,\n dropout_p=config.decoder_dropout_p,\n beam_size=config.decoder_beam_size,\n temperature=config.decoder_temperature, \n train_sample_max=config.decoder_train_sample_max,\n test_sample_max=config.decoder_test_sample_max,\n beam_search_logic=config.decoder_beam_search_logic,\n dataset_name=config.dataset_name)\n\n # Checkpoint\n checkpoint = torch.load(args.checkpoint_path, map_location='cpu')\n\n # 1. filter out unnecessary keys for encoder\n chckpt_dict = {k: v for k, v in checkpoint['encoder'].items() if k not in ['fc1.weight', 'fc1.bias', 'fc2.weight', 'fc2.bias']}\n encoder_dict = encoder.state_dict()\n encoder_dict.update(chckpt_dict)\n\n encoder.load_state_dict(encoder_dict)\n decoder.load_state_dict(checkpoint['decoder'])\n\n #load test set features\n test_vidxs = sorted(list(set(corpus[2][1])))\n\n with h5py.File(os.path.join(args.dataset_folder, config.features_path), 'r') as feats_file:\n print('loading visual feats...')\n dataset = feats_file[config.dataset_name]\n cnn_feats = torch.from_numpy(dataset['cnn_features'][test_vidxs]).float()\n c3d_feats = torch.from_numpy(dataset['c3d_features'][test_vidxs]).float()\n cnn_globals = torch.zeros(cnn_feats.size(0), 512) # torch.from_numpy(dataset['cnn_globals'][test_vidxs]).float()\n cnn_sem_globals = torch.from_numpy(dataset['cnn_sem_globals'][test_vidxs]).float()\n f_counts = dataset['count_features'][test_vidxs]\n print('visual feats loaded')\n\n res_eco_globals = torch.from_numpy(np.load(os.path.join(args.dataset_folder, 'resnext_eco.npy'))[test_vidxs])\n tags_globals = torch.from_numpy(np.load(os.path.join(args.dataset_folder, 'tag_feats.npy'))[test_vidxs])\n\n encoder.eval()\n decoder.eval()\n\n with torch.no_grad():\n video_encoded = encoder(cnn_feats, c3d_feats, cnn_globals, cnn_sem_globals, tags_globals, res_eco_globals)\n logits, tokens = decoder(video_encoded, None, teacher_forcing_ratio=0)\n\n scores = logits.max(dim=2)[0].mean(dim=1)\n\n confidences, sentences = [], []\n for score, seq in zip(scores, tokens):\n s = decode_from_tokens(seq, vocab)\n print(score, s)\n sentences.append(s)\n confidences.append(score)\n\n if not os.path.exists(args.output_folder):\n os.makedirs(args.output_folder)\n\n with open(os.path.join(args.output_folder, 'predictions.txt'), 'w') as fo:\n for vidx, sentence in zip(test_vidxs, sentences):\n fo.write(f'{vidx}\\t{sentence}\\n')" ]
[ [ "torch.from_numpy", "torch.no_grad", "torch.load", "torch.Tensor" ] ]
vncsna/mirror
[ "0de84de6fa4f8a4569beed0bf2e313901d95a17d" ]
[ "app/routers/images.py" ]
[ "# TODO: Add exception checking\n# TODO: Use wrong uuids as input\n\nimport os\nimport cv2\nimport shutil\nimport numpy as np\nfrom enum import Enum\nfrom uuid import uuid4\nfrom pathlib import Path\nfrom dotenv import load_dotenv\nfrom fastapi import APIRouter, File, UploadFile\nfrom fastapi.responses import FileResponse\n\nload_dotenv()\nDATABASE_IMGE = os.environ['DATABASE_IMGE']\nDATABASE_TEXT = os.environ['DATABASE_TEXT']\nHAAR_CLF_PATH = os.environ['HAAR_CLF_PATH']\n\nCASCADE_CLASSIFIER = cv2.CascadeClassifier(HAAR_CLF_PATH)\n\n# ---------------------------------------\n\nclass FilterName(str, Enum):\n blur = \"blur\"\n cover = \"cover\"\n pixelate = \"pixelate\"\n\n# ---------------------------------------\n\nrouter = APIRouter(tags=['Image'])\n\[email protected]('/image')\ndef create_image(image: UploadFile = File(...)):\n uuid = uuid4()\n with open(f'{DATABASE_IMGE}/{uuid}.png', 'wb') as buffer:\n shutil.copyfileobj(image.file, buffer)\n return {'uuid': uuid}\n\[email protected]('/image/{uuid}')\ndef read_image(uuid: str):\n filepath = Path(f'{DATABASE_IMGE}/{uuid}.png')\n return FileResponse(filepath)\n\[email protected]('/image/{uuid}')\ndef update_image(uuid: str, image: UploadFile = File(...)):\n with open(f'{DATABASE_IMGE}/{uuid}.png', 'wb') as buffer:\n shutil.copyfileobj(image.file, buffer)\n return {'uuid': uuid}\n\[email protected]('/image/{uuid}')\ndef delete_image(uuid: str):\n filepath = Path(f'{DATABASE_IMGE}/{uuid}.png')\n filepath.unlink()\n return {'uuid': uuid}\n\[email protected]('/image/{uuid}/{filter_}')\ndef transform_image(uuid: str, filter_: FilterName):\n filepath = f'{DATABASE_IMGE}/{uuid}.png'\n image = cv2.imread(str(filepath))\n \n if filter_ == FilterName.blur:\n anonymized_image = anonymize_faces(image, blur)\n elif filter_ == FilterName.cover:\n anonymized_image = anonymize_faces(image, cover)\n elif filter_ == FilterName.pixelate:\n anonymized_image = anonymize_faces(image, pixelate)\n \n new_filepath = f'{DATABASE_IMGE}/{uuid}-{filter_}.png'\n cv2.imwrite(new_filepath, anonymized_image)\n return FileResponse(new_filepath)\n\[email protected]('/images')\ndef read_images():\n uuids = Path(f'{DATABASE_IMGE}').glob('*')\n uuids = [uuid.stem for uuid in uuids]\n return {'uuids': uuids}\n\n# ---------------------------------------\n\ndef blur(img, factor=3.0):\n # auto determine the size of blurring kernel\n (h, w) = img.shape[:2]\n kW = int(w / factor)\n kH = int(h / factor)\n\n # ensure that width and height are odd\n kW = kW if kW % 2 != 0 else kW - 1\n kH = kH if kH % 2 != 0 else kH - 1\n\n # apply a gaussian blue to image\n return cv2.GaussianBlur(img, (kW, kH), 0)\n\ndef cover(img):\n return np.zeros_like(img)\n\ndef pixelate(img):\n height, width = img.shape[:2]\n \n # downscale image\n output = cv2.resize(\n img, (6, 6), interpolation=cv2.INTER_LINEAR)\n \n # upscale image\n output = cv2.resize(\n output, (width, height), interpolation=cv2.INTER_NEAREST)\n \n return output\n\ndef anonymize_faces(img, filtr):\n # transform color to gray\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n \n # detect region of interest with\n # a haar cascade feature classifier\n faces = CASCADE_CLASSIFIER.detectMultiScale(gray, 1.1, 4)\n \n # loop faces and apply filter\n for (x0, y0, width, height) in faces:\n face = img[x0:x0 + width, y0:y0 + height, :]\n img[x0:x0 + width, y0:y0 + height, :] = filtr(face)\n \n return img" ]
[ [ "numpy.zeros_like" ] ]
alisafaya/shalstm
[ "3d85f29c82451b393975ba587d53e0db0e43fff9" ]
[ "shalstm/qa/model.py" ]
[ "import torch.nn as nn\nimport torch\n\nfrom shalstm import SHALSTM\nfrom shalstm.utils import top_k_top_p_filtering\n\nclass SHALSTMforQuestionAnswering(SHALSTM):\n\n def forward(self, input, attention_mask=None, type_ids=None, hidden=None, mems=None, return_loss=False, lm_loss=False):\n \"\"\"\n all arguments have shape (seq length, batch)\n padding should be on left for input, on right for targets (as in seq2seq models)\n \n - type_ids is used both for loss masking and attention masking. it should be 1 for the answer tokens and 0 otherwise. \n - attention_mask (attention mask) is 0 for paddings and 1 for other tokens \n \"\"\"\n x = input[:-1].to(self.device)\n targets = input[1:].to(self.device)\n\n seq_len, batch_size = x.shape\n\n if attention_mask is None:\n attention_mask = torch.ones(*x.shape)\n\n if type_ids is None:\n type_ids = torch.zeros(*input.shape)\n loss_mask = type_ids[1:].view(-1).to(self.device)\n\n # encode and dropout input\n h = self.encoder(x)\n h = self.idrop(h)\n\n # if memory is provided, trim it to fit max memory size\n if attention_mask is None:\n attn_mask = torch.full((seq_len, seq_len), -1e6, device=h.device, dtype=h.dtype) # instead of -Inf we use -1,000,000\n attn_mask = torch.triu(attn_mask, diagonal=1)\n \n # concatenate memories from the previous pass if provided\n if mems is not None:\n max_mems = max(len(m) for m in mems)\n mem_mask = torch.zeros((seq_len, max_mems), device=h.device, dtype=h.dtype)\n attn_mask = torch.cat([mem_mask, attn_mask], dim=-1)\n \n else:\n attention_mask = attention_mask.to(self.device)\n attn_mask = torch.full((batch_size, seq_len, seq_len), -1e6, device=self.device, dtype=h.dtype)\n attn_mask = torch.triu(attn_mask, diagonal=1)\n for b in range(batch_size):\n mask = torch.where(attention_mask[:-1, b] == 0) \n attn_mask[b, :, mask[0]] = -1e6\n attn_mask[b, mask[0], :] = -1e6\n\n # concatenate memories from the previous pass if provided\n if mems is not None:\n max_mems = max(len(m) for m in mems)\n mem_mask = torch.zeros((batch_size, seq_len, max_mems), device=h.device, dtype=h.dtype)\n attn_mask = torch.cat([mem_mask, attn_mask], dim=-1)\n\n\n # iterate over blocks\n new_hidden, new_mems = [], []\n for idx, block in enumerate(self.blocks):\n mem = mems[idx] if mems is not None else None\n hid = hidden[idx] if hidden is not None else None\n h, new_mem, new_hid = block(h, attn_mask, self.memory_size, memory=mem, hidden=hid)\n new_hidden.append(new_hid)\n new_mems.append(new_mem)\n\n # final dropout\n h = self.odrop(h)\n\n if return_loss:\n if not lm_loss:\n # calculate loss targets are provided\n loss = -(self.splitloss(h.view(-1, self.embed_size), input[1:].to(self.device).view(-1)).output * loss_mask).mean() # .view(*x.shape).mean(0).mean()\n else:\n # calculate loss on all tokens\n loss = self.ate(h.view(-1, self.embed_size), input[1:].to(self.device).view(-1)).loss\n return loss, h, new_hidden, new_mems\n else:\n # calculate predictions\n output = self.splitloss.log_prob(h.view(-1, self.embed_size))\n output = output.view(*x.shape, -1)\n return output, h, new_hidden, new_mems\n\n def conditional_generate(self, input, attention_mask, type_ids, eos_id=2, max_length=64, use_sampling=False, top_p=0.95, temperature=1.0):\n \"\"\" input sequence has shape [seq length, batch size] \"\"\"\n\n prompt = torch.cat([input, torch.zeros(1, input.shape[1], dtype=torch.long)])\n attention_mask = torch.cat([attention_mask, torch.ones(1, attention_mask.shape[1])])\n type_ids = torch.cat([type_ids, torch.zeros(1, type_ids.shape[1])])\n\n self.eval()\n sequences = torch.zeros(max_length, input.shape[1], dtype=torch.long)\n hidden, mems = None, None\n with torch.no_grad():\n output, h, hidden, mems = self(prompt[:-1], attention_mask=attention_mask[:-1], type_ids=type_ids[:-1], hidden=hidden, mems=mems)\n \n prompt = prompt[-2:]\n attention_mask=attention_mask[-2:]\n type_ids=type_ids[-2:]\n\n for i in range(max_length):\n output, h, hidden, mems = self(prompt, attention_mask=attention_mask, type_ids=type_ids, hidden=hidden, mems=mems)\n\n if use_sampling:\n raise NotImplementedError\n token_weights = top_k_top_p_filtering(torch.exp(output.view(-1)) / temperature, top_p=top_p, filter_value=0.0)\n output_idx = torch.multinomial(token_weights, num_samples=1)[0]\n \n else:\n output_idx = torch.argmax(output, dim=-1)\n\n prompt[0, :] = output_idx\n sequences[i, :] = output_idx\n\n if torch.all(output_idx == eos_id):\n break\n\n return sequences\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--model\", type=str, default=\"bin/base/model\")\n parser.add_argument(\"--tokenizer\", type=str, default=\"tokenizer/tokenizer.json\")\n parser.add_argument(\"--device\", type=str, default=\"cuda\")\n args = parser.parse_args()\n\n model = SHALSTMforQuestionAnswering.from_pretrained(args.model, device=torch.device(args.device))\n\n from tokenizer import SHALSTMTokenizer\n tokenizer = SHALSTMTokenizer.from_file(args.tokenizer)\n\n questions = [\n \"another thing there\",\n \"some length here\",\n ]\n\n answers = [\n \"brother Hi how\",\n \"this answer for question one\",\n ]\n\n input, attn_mask, type_ids, input_length = tokenizer.encode_for_qa(questions, answers)\n \n loss, h, hidden, mems = model(input, attn_mask, type_ids, return_loss=True)\n\n warmup = 5\n total_steps = 1500\n optimizer = torch.optim.SGD(model.parameters(), lr=1e-2)\n scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=[lambda x: float(x / warmup) if x < warmup else float((total_steps - x) / total_steps)])\n \n use_amp = False\n scaler = torch.cuda.amp.GradScaler(enabled=use_amp)\n\n import time\n starttime = time.time()\n\n model.train()\n for i in range(total_steps):\n\n with torch.cuda.amp.autocast(enabled=use_amp):\n loss, h, hidden, mems = model(input, attn_mask, type_ids, return_loss=True)\n\n scaler.scale(loss).backward()\n scaler.unscale_(optimizer)\n\n torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)\n\n scaler.step(optimizer)\n scaler.update()\n scheduler.step()\n\n if i % (total_steps // 10) == 0:\n print(loss.item())\n\n print(\"Excecution time =\", (time.time() - starttime) / total_steps, \"sec per batch\")\n\n questions = [\n \"question one ?\",\n \"some length here\",\n ]\n\n answers = [\n \"this answer to this one\",\n \"This is another answer for another question \",\n ]\n\n input, attn_mask, type_ids, input_length = tokenizer.encode_for_qa(questions, answers)\n \n with torch.no_grad():\n model.eval()\n output, h, hidden, mems = model(input, attn_mask, type_ids)\n output = torch.argmax(output, dim=-1)\n\n ids = output[input_length - 1:].t().cpu().tolist()\n ids = output.t().cpu().tolist()\n print(tokenizer.decode(ids[0]))\n print(tokenizer.decode(ids[1]))\n\n input, attn_mask, type_ids, input_length = tokenizer.encode_for_qa(questions, direction='left')\n sequence = model.conditional_generate(input, attn_mask, type_ids, max_length=10, use_sampling=False)\n \n print(\"Conditional generation\")\n print(tokenizer.decode_batch(sequence.t().cpu().tolist()))\n" ]
[ [ "torch.ones", "torch.cuda.amp.GradScaler", "torch.argmax", "torch.no_grad", "torch.multinomial", "torch.full", "torch.device", "torch.all", "torch.where", "torch.cuda.amp.autocast", "torch.zeros", "torch.triu", "torch.cat" ] ]
itsliya/modin
[ "d4ce5390816ae7eb8717bf271e1feabd3d5fabee" ]
[ "modin/pandas/test/utils.py" ]
[ "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nimport pytest\nimport numpy as np\nimport math\nimport pandas\nfrom pandas.testing import (\n assert_series_equal,\n assert_frame_equal,\n assert_index_equal,\n assert_extension_array_equal,\n)\nimport modin.pandas as pd\nfrom modin.utils import to_pandas\nfrom modin.config import TestDatasetSize, TrackFileLeaks\nfrom io import BytesIO\nimport os\nfrom string import ascii_letters\nimport csv\nimport psutil\nimport functools\n\nrandom_state = np.random.RandomState(seed=42)\n\nDATASET_SIZE_DICT = {\n \"Small\": (2 ** 2, 2 ** 3),\n \"Normal\": (2 ** 6, 2 ** 8),\n \"Big\": (2 ** 7, 2 ** 12),\n}\n\n# Size of test dataframes\nNCOLS, NROWS = DATASET_SIZE_DICT.get(TestDatasetSize.get(), DATASET_SIZE_DICT[\"Normal\"])\n\n# Range for values for test data\nRAND_LOW = 0\nRAND_HIGH = 100\n\n# Directory for storing I/O operations test data\nIO_OPS_DATA_DIR = os.path.join(os.path.dirname(__file__), \"io_tests_data\")\n\n# Input data and functions for the tests\n# The test data that we will test our code against\ntest_data = {\n # \"empty_data\": {},\n # \"columns_only\": {\"col1\": [], \"col2\": [], \"col3\": [], \"col4\": [], \"col5\": []},\n \"int_data\": {\n \"col{}\".format(int((i - NCOLS / 2) % NCOLS + 1)): random_state.randint(\n RAND_LOW, RAND_HIGH, size=(NROWS)\n )\n for i in range(NCOLS)\n },\n \"float_nan_data\": {\n \"col{}\".format(int((i - NCOLS / 2) % NCOLS + 1)): [\n x\n if (j % 4 == 0 and i > NCOLS // 2) or (j != i and i <= NCOLS // 2)\n else np.NaN\n for j, x in enumerate(\n random_state.uniform(RAND_LOW, RAND_HIGH, size=(NROWS))\n )\n ]\n for i in range(NCOLS)\n },\n # \"int_float_object_data\": {\n # \"col3\": [1, 2, 3, 4],\n # \"col4\": [4, 5, 6, 7],\n # \"col1\": [8.0, 9.4, 10.1, 11.3],\n # \"col2\": [\"a\", \"b\", \"c\", \"d\"],\n # },\n # \"datetime_timedelta_data\": {\n # \"col3\": [\n # np.datetime64(\"2010\"),\n # np.datetime64(\"2011\"),\n # np.datetime64(\"2011-06-15T00:00\"),\n # np.datetime64(\"2009-01-01\"),\n # ],\n # \"col4\": [\n # np.datetime64(\"2010\"),\n # np.datetime64(\"2011\"),\n # np.datetime64(\"2011-06-15T00:00\"),\n # np.datetime64(\"2009-01-01\"),\n # ],\n # \"col1\": [\n # np.timedelta64(1, \"M\"),\n # np.timedelta64(2, \"D\"),\n # np.timedelta64(3, \"Y\"),\n # np.timedelta64(20, \"D\"),\n # ],\n # \"col2\": [\n # np.timedelta64(1, \"M\"),\n # np.timedelta64(2, \"D\"),\n # np.timedelta64(3, \"Y\"),\n # np.timedelta64(20, \"D\"),\n # ],\n # },\n # \"all_data\": {\n # \"col3\": 1.0,\n # \"col4\": np.datetime64(\"2011-06-15T00:00\"),\n # \"col5\": np.array([3] * 4, dtype=\"int32\"),\n # \"col1\": \"foo\",\n # \"col2\": True,\n # },\n}\n\n# See details in #1403\ntest_data[\"int_data\"][\"index\"] = test_data[\"int_data\"].pop(\n \"col{}\".format(int(NCOLS / 2))\n)\n\nfor col in test_data[\"float_nan_data\"]:\n for row in range(NROWS // 2):\n if row % 16 == 0:\n test_data[\"float_nan_data\"][col][row] = np.NaN\n\ntest_data_values = list(test_data.values())\ntest_data_keys = list(test_data.keys())\n\ntest_bool_data = {\n \"col{}\".format(int((i - NCOLS / 2) % NCOLS + 1)): random_state.choice(\n [True, False], size=(NROWS)\n )\n for i in range(NCOLS)\n}\n\ntest_data_resample = {\n \"data\": {\"A\": range(12), \"B\": range(12)},\n \"index\": pandas.date_range(\"31/12/2000\", periods=12, freq=\"H\"),\n}\n\ntest_data_with_duplicates = {\n \"no_duplicates\": {\n \"col{}\".format(int((i - NCOLS / 2) % NCOLS + 1)): range(NROWS)\n for i in range(NCOLS)\n },\n \"all_duplicates\": {\n \"col{}\".format(int((i - NCOLS / 2) % NCOLS + 1)): [\n float(i) for _ in range(NROWS)\n ]\n for i in range(NCOLS)\n },\n \"some_duplicates\": {\n \"col{}\".format(int((i - NCOLS / 2) % NCOLS + 1)): [\n i if j % 7 == 0 else x for j, x in enumerate(range(NROWS))\n ]\n for i in range(NCOLS)\n },\n \"has_name_column\": {\n \"name\": [\"one\", \"two\", \"two\", \"three\"],\n \"col1\": [1, 2, 2, 3],\n \"col3\": [10, 20, 20, 3],\n \"col7\": [100, 201, 200, 300],\n },\n \"str_columns\": {\n \"col_str{}\".format(int((i - NCOLS / 2) % NCOLS + 1)): [\n \"s\" + str(x % 5) for x in range(NROWS)\n ]\n for i in range(NCOLS)\n },\n}\n\ntest_data_with_duplicates[\"float_nan\"] = test_data[\"float_nan_data\"]\n\ntest_data_small = {\n \"small\": {\n \"col0\": [1, 2, 3, 4],\n \"col1\": [8.0, 9.4, 10.1, 11.3],\n \"col2\": [4, 5, 6, 7],\n }\n}\n\ntest_data_diff_dtype = {\n \"int_col\": [-5, 2, 7, 16],\n \"float_col\": [np.NaN, -9.4, 10.1, np.NaN],\n \"str_col\": [\"a\", np.NaN, \"c\", \"d\"],\n \"bool_col\": [False, True, True, False],\n}\n\ntest_data_small_values = list(test_data_small.values())\ntest_data_small_keys = list(test_data_small.keys())\n\ntest_data_with_duplicates_values = list(test_data_with_duplicates.values())\ntest_data_with_duplicates_keys = list(test_data_with_duplicates.keys())\n\ntest_data_categorical = {\n \"ordered\": pandas.Categorical(list(\"testdata\"), ordered=True),\n \"unordered\": pandas.Categorical(list(\"testdata\"), ordered=False),\n}\n\ntest_data_categorical_values = list(test_data_categorical.values())\ntest_data_categorical_keys = list(test_data_categorical.keys())\n\nnumeric_dfs = [\n \"empty_data\",\n \"columns_only\",\n \"int_data\",\n \"float_nan_data\",\n \"with_index_column\",\n]\n\nno_numeric_dfs = [\"datetime_timedelta_data\"]\n\n# String test data\ntest_string_data = {\n \"separator data\": [\n \"abC|DeF,Hik\",\n \"234,3245.67\",\n \"gSaf,qWer|Gre\",\n \"asd3,4sad|\",\n np.NaN,\n ]\n}\n\ntest_string_data_values = list(test_string_data.values())\ntest_string_data_keys = list(test_string_data.keys())\n\n# List of strings test data\ntest_string_list_data = {\"simple string\": [[\"a\"], [\"CdE\"], [\"jDf\"], [\"werB\"]]}\n\ntest_string_list_data_values = list(test_string_list_data.values())\ntest_string_list_data_keys = list(test_string_list_data.keys())\n\nstring_seperators = {\"empty sep\": \"\", \"comma sep\": \",\", \"None sep\": None}\n\nstring_sep_values = list(string_seperators.values())\nstring_sep_keys = list(string_seperators.keys())\n\nstring_na_rep = {\"None na_rep\": None, \"- na_rep\": \"-\", \"nan na_rep\": np.NaN}\n\nstring_na_rep_values = list(string_na_rep.values())\nstring_na_rep_keys = list(string_na_rep.keys())\n\njoin_type = {\"left\": \"left\", \"right\": \"right\", \"inner\": \"inner\", \"outer\": \"outer\"}\n\njoin_type_keys = list(join_type.keys())\njoin_type_values = list(join_type.values())\n\n# Test functions for applymap\ntest_func = {\n \"plus one\": lambda x: x + 1,\n \"convert to string\": lambda x: str(x),\n \"square\": lambda x: x * x,\n \"identity\": lambda x: x,\n \"return false\": lambda x: False,\n}\ntest_func_keys = list(test_func.keys())\ntest_func_values = list(test_func.values())\n\nnumeric_test_funcs = [\"plus one\", \"square\"]\n\n# Test functions for query\nquery_func = {\n \"col1 < col2\": \"col1 < col2\",\n \"col3 > col4\": \"col3 > col4\",\n \"col1 == col2\": \"col1 == col2\",\n \"(col2 > col1) and (col1 < col3)\": \"(col2 > col1) and (col1 < col3)\",\n}\nquery_func_keys = list(query_func.keys())\nquery_func_values = list(query_func.values())\n\n# Test agg functions for apply, agg, and aggregate\nagg_func = {\n \"sum\": \"sum\",\n \"df sum\": lambda df: df.sum(),\n \"str\": str,\n \"sum mean\": [\"sum\", \"mean\"],\n \"sum df sum\": [\"sum\", lambda df: df.sum()],\n \"should raise TypeError\": 1,\n}\nagg_func_keys = list(agg_func.keys())\nagg_func_values = list(agg_func.values())\n\n# For this sort of parameters pandas throws an exception.\n# See details in pandas issue 36036.\nagg_func_except = {\n \"sum sum\": [\"sum\", \"sum\"],\n}\nagg_func_except_keys = list(agg_func_except.keys())\nagg_func_except_values = list(agg_func_except.values())\n\nnumeric_agg_funcs = [\"sum mean\", \"sum sum\", \"sum df sum\"]\n\nudf_func = {\n \"return self\": lambda df: lambda x, *args, **kwargs: type(x)(x.values),\n \"change index\": lambda df: lambda x, *args, **kwargs: pandas.Series(\n x.values, index=np.arange(-1, len(x.index) - 1)\n ),\n \"return none\": lambda df: lambda x, *args, **kwargs: None,\n \"return empty\": lambda df: lambda x, *args, **kwargs: pandas.Series(),\n \"access self\": lambda df: lambda x, other, *args, **kwargs: pandas.Series(\n x.values, index=other.index\n ),\n}\nudf_func_keys = list(udf_func.keys())\nudf_func_values = list(udf_func.values())\n\n# Test q values for quantiles\nquantiles = {\n \"0.25\": 0.25,\n \"0.5\": 0.5,\n \"0.75\": 0.75,\n \"0.66\": 0.66,\n \"0.01\": 0.01,\n \"list\": [0.25, 0.5, 0.75, 0.66, 0.01],\n}\nquantiles_keys = list(quantiles.keys())\nquantiles_values = list(quantiles.values())\n\n# Test indices for get, set_index, __contains__, insert\nindices = {\n \"col1\": \"col1\",\n \"col2\": \"col2\",\n \"A\": \"A\",\n \"B\": \"B\",\n \"does not exist\": \"does not exist\",\n}\nindices_keys = list(indices.keys())\nindices_values = list(indices.values())\n\n# Test functions for groupby apply\ngroupby_apply_func = {\"sum\": lambda df: df.sum(), \"negate\": lambda df: -df}\ngroupby_apply_func_keys = list(groupby_apply_func.keys())\ngroupby_apply_func_values = list(groupby_apply_func.values())\n\n# Test functions for groupby agg\ngroupby_agg_func = {\"min\": \"min\", \"max\": \"max\"}\ngroupby_agg_func_keys = list(groupby_agg_func.keys())\ngroupby_agg_func_values = list(groupby_agg_func.values())\n\n# Test functions for groupby transform\ngroupby_transform_func = {\n \"add 4\": lambda df: df + 4,\n \"negatie and minus 10\": lambda df: -df - 10,\n}\ngroupby_transform_func_keys = list(groupby_transform_func.keys())\ngroupby_transform_func_values = list(groupby_transform_func.values())\n\n# Test functions for groupby pipe\ngroupby_pipe_func = {\"sum\": lambda df: df.sum()}\ngroupby_pipe_func_keys = list(groupby_pipe_func.keys())\ngroupby_pipe_func_values = list(groupby_pipe_func.values())\n\n# END Test input data and functions\n\n# Parametrizations of common kwargs\naxis = {\n \"over_rows_int\": 0,\n \"over_rows_str\": \"rows\",\n \"over_columns_int\": 1,\n \"over_columns_str\": \"columns\",\n}\naxis_keys = list(axis.keys())\naxis_values = list(axis.values())\n\nbool_arg = {\"True\": True, \"False\": False, \"None\": None}\nbool_arg_keys = list(bool_arg.keys())\nbool_arg_values = list(bool_arg.values())\n\nint_arg = {\"-5\": -5, \"-1\": -1, \"0\": 0, \"1\": 1, \"5\": 5}\nint_arg_keys = list(int_arg.keys())\nint_arg_values = list(int_arg.values())\n\n# END parametrizations of common kwargs\n\njson_short_string = \"\"\"[{\"project\": \"modin\"}]\"\"\"\njson_long_string = \"\"\"{\n \"quiz\": {\n \"sport\": {\n \"q1\": {\n \"question\": \"Which one is correct team name in NBA?\",\n \"options\": [\n \"New York Bulls\",\n \"Los Angeles Kings\",\n \"Golden State Warriros\",\n \"Huston Rocket\"\n ],\n \"answer\": \"Huston Rocket\"\n }\n },\n \"maths\": {\n \"q1\": {\n \"question\": \"5 + 7 = ?\",\n \"options\": [\n \"10\",\n \"11\",\n \"12\",\n \"13\"\n ],\n \"answer\": \"12\"\n },\n \"q2\": {\n \"question\": \"12 - 8 = ?\",\n \"options\": [\n \"1\",\n \"2\",\n \"3\",\n \"4\"\n ],\n \"answer\": \"4\"\n }\n }\n }\n }\"\"\"\njson_long_bytes = BytesIO(json_long_string.encode(encoding=\"UTF-8\"))\njson_short_bytes = BytesIO(json_short_string.encode(encoding=\"UTF-8\"))\n\n\n# Text encoding types\nencoding_types = [\n \"ascii\",\n \"utf_32\",\n \"utf_32_be\",\n \"utf_32_le\",\n \"utf_16\",\n \"utf_16_be\",\n \"utf_16_le\",\n \"utf_7\",\n \"utf_8\",\n \"utf_8_sig\",\n]\n\n# raising of this exceptions can be caused by unexpected behavior\n# of I/O operation test, but can passed by eval_io function since\n# the type of this exceptions are the same\nio_ops_bad_exc = [TypeError, FileNotFoundError]\n\n# Files compression to extension mapping\nCOMP_TO_EXT = {\"gzip\": \"gz\", \"bz2\": \"bz2\", \"xz\": \"xz\", \"zip\": \"zip\"}\n\n\ndef categories_equals(left, right):\n assert (left.ordered and right.ordered) or (not left.ordered and not right.ordered)\n assert_extension_array_equal(left, right)\n\n\ndef df_categories_equals(df1, df2):\n if not hasattr(df1, \"select_dtypes\"):\n if isinstance(df1, pandas.CategoricalDtype):\n return categories_equals(df1, df2)\n elif isinstance(getattr(df1, \"dtype\"), pandas.CategoricalDtype) and isinstance(\n getattr(df1, \"dtype\"), pandas.CategoricalDtype\n ):\n return categories_equals(df1.dtype, df2.dtype)\n else:\n return True\n\n categories_columns = df1.select_dtypes(include=\"category\").columns\n for column in categories_columns:\n assert_extension_array_equal(\n df1[column].values,\n df2[column].values,\n check_dtype=False,\n )\n\n\ndef df_equals(df1, df2):\n \"\"\"Tests if df1 and df2 are equal.\n\n Args:\n df1: (pandas or modin DataFrame or series) dataframe to test if equal.\n df2: (pandas or modin DataFrame or series) dataframe to test if equal.\n\n Returns:\n True if df1 is equal to df2.\n \"\"\"\n # Gets AttributError if modin's groupby object is not import like this\n from modin.pandas.groupby import DataFrameGroupBy\n\n groupby_types = (pandas.core.groupby.DataFrameGroupBy, DataFrameGroupBy)\n\n # The typing behavior of how pandas treats its index is not consistent when the\n # length of the DataFrame or Series is 0, so we just verify that the contents are\n # the same.\n if (\n hasattr(df1, \"index\")\n and hasattr(df2, \"index\")\n and len(df1) == 0\n and len(df2) == 0\n ):\n if type(df1).__name__ == type(df2).__name__:\n if hasattr(df1, \"name\") and hasattr(df2, \"name\") and df1.name == df2.name:\n return\n if (\n hasattr(df1, \"columns\")\n and hasattr(df2, \"columns\")\n and df1.columns.equals(df2.columns)\n ):\n return\n assert False\n\n if isinstance(df1, (list, tuple)) and all(\n isinstance(d, (pd.DataFrame, pd.Series, pandas.DataFrame, pandas.Series))\n for d in df1\n ):\n assert isinstance(df2, type(df1)), \"Different type of collection\"\n assert len(df1) == len(df2), \"Different length result\"\n return (df_equals(d1, d2) for d1, d2 in zip(df1, df2))\n\n # Convert to pandas\n if isinstance(df1, (pd.DataFrame, pd.Series)):\n df1 = to_pandas(df1)\n if isinstance(df2, (pd.DataFrame, pd.Series)):\n df2 = to_pandas(df2)\n\n if isinstance(df1, pandas.DataFrame) and isinstance(df2, pandas.DataFrame):\n if (df1.empty and not df2.empty) or (df2.empty and not df1.empty):\n assert False, \"One of the passed frames is empty, when other isn't\"\n elif df1.empty and df2.empty and type(df1) != type(df2):\n assert (\n False\n ), f\"Empty frames have different types: {type(df1)} != {type(df2)}\"\n\n if isinstance(df1, pandas.DataFrame) and isinstance(df2, pandas.DataFrame):\n assert_frame_equal(\n df1,\n df2,\n check_dtype=False,\n check_datetimelike_compat=True,\n check_index_type=False,\n check_column_type=False,\n check_categorical=False,\n )\n df_categories_equals(df1, df2)\n elif isinstance(df1, pandas.Index) and isinstance(df2, pandas.Index):\n assert_index_equal(df1, df2)\n elif isinstance(df1, pandas.Series) and isinstance(df2, pandas.Series):\n assert_series_equal(df1, df2, check_dtype=False, check_series_type=False)\n elif isinstance(df1, groupby_types) and isinstance(df2, groupby_types):\n for g1, g2 in zip(df1, df2):\n assert g1[0] == g2[0]\n df_equals(g1[1], g2[1])\n elif (\n isinstance(df1, pandas.Series)\n and isinstance(df2, pandas.Series)\n and df1.empty\n and df2.empty\n ):\n assert all(df1.index == df2.index)\n assert df1.dtypes == df2.dtypes\n elif isinstance(df1, pandas.core.arrays.numpy_.PandasArray):\n assert isinstance(df2, pandas.core.arrays.numpy_.PandasArray)\n assert df1 == df2\n elif isinstance(df1, np.recarray) and isinstance(df2, np.recarray):\n np.testing.assert_array_equal(df1, df2)\n else:\n if df1 != df2:\n np.testing.assert_almost_equal(df1, df2)\n\n\ndef modin_df_almost_equals_pandas(modin_df, pandas_df):\n df_categories_equals(modin_df._to_pandas(), pandas_df)\n\n modin_df = to_pandas(modin_df)\n\n if hasattr(modin_df, \"select_dtypes\"):\n modin_df = modin_df.select_dtypes(exclude=[\"category\"])\n if hasattr(pandas_df, \"select_dtypes\"):\n pandas_df = pandas_df.select_dtypes(exclude=[\"category\"])\n\n difference = modin_df - pandas_df\n diff_max = difference.max()\n if isinstance(diff_max, pandas.Series):\n diff_max = diff_max.max()\n assert (\n modin_df.equals(pandas_df)\n or diff_max < 0.0001\n or (all(modin_df.isna().all()) and all(pandas_df.isna().all()))\n )\n\n\ndef df_is_empty(df):\n \"\"\"Tests if df is empty.\n\n Args:\n df: (pandas or modin DataFrame) dataframe to test if empty.\n\n Returns:\n True if df is empty.\n \"\"\"\n assert df.size == 0 and df.empty\n assert df.shape[0] == 0 or df.shape[1] == 0\n\n\ndef arg_keys(arg_name, keys):\n \"\"\"Appends arg_name to the front of all values in keys.\n\n Args:\n arg_name: (string) String containing argument name.\n keys: (list of strings) Possible inputs of argument.\n\n Returns:\n List of strings with arg_name append to front of keys.\n \"\"\"\n return [\"{0}_{1}\".format(arg_name, key) for key in keys]\n\n\ndef name_contains(test_name, vals):\n \"\"\"Determines if any string in vals is a substring of test_name.\n\n Args:\n test_name: (string) String to determine if contains substrings.\n vals: (list of strings) List of substrings to test for.\n\n Returns:\n True if a substring in vals is in test_name, else False.\n \"\"\"\n return any(val in test_name for val in vals)\n\n\ndef check_df_columns_have_nans(df, cols):\n \"\"\"Checks if there are NaN values in specified columns of a dataframe.\n\n :param df: Dataframe to check.\n :param cols: One column name or list of column names.\n :return:\n True if specified columns of dataframe contains NaNs.\n \"\"\"\n return (\n pandas.api.types.is_list_like(cols)\n and (\n any(isinstance(x, str) and x in df.columns and df[x].hasnans for x in cols)\n or any(\n isinstance(x, pd.Series) and x._parent is df and x.hasnans for x in cols\n )\n )\n ) or (\n not pandas.api.types.is_list_like(cols)\n and cols in df.columns\n and df[cols].hasnans\n )\n\n\ndef eval_general(\n modin_df,\n pandas_df,\n operation,\n comparator=df_equals,\n __inplace__=False,\n check_exception_type=True,\n raising_exceptions=None,\n check_kwargs_callable=True,\n md_extra_kwargs=None,\n **kwargs,\n):\n if raising_exceptions:\n assert (\n check_exception_type\n ), \"if raising_exceptions is not None or False, check_exception_type should be True\"\n md_kwargs, pd_kwargs = {}, {}\n\n def execute_callable(fn, inplace=False, md_kwargs={}, pd_kwargs={}):\n try:\n pd_result = fn(pandas_df, **pd_kwargs)\n except Exception as pd_e:\n if check_exception_type is None:\n return None\n with pytest.raises(Exception) as md_e:\n # repr to force materialization\n repr(fn(modin_df, **md_kwargs))\n if check_exception_type:\n assert isinstance(md_e.value, type(pd_e))\n if raising_exceptions:\n assert not isinstance(\n md_e.value, tuple(raising_exceptions)\n ), f\"not acceptable exception type: {md_e.value}\"\n else:\n md_result = fn(modin_df, **md_kwargs)\n return (md_result, pd_result) if not __inplace__ else (modin_df, pandas_df)\n\n for key, value in kwargs.items():\n if check_kwargs_callable and callable(value):\n values = execute_callable(value)\n # that means, that callable raised an exception\n if values is None:\n return\n else:\n md_value, pd_value = values\n else:\n md_value, pd_value = value, value\n\n md_kwargs[key] = md_value\n pd_kwargs[key] = pd_value\n\n if md_extra_kwargs:\n assert isinstance(md_extra_kwargs, dict)\n md_kwargs.update(md_extra_kwargs)\n\n values = execute_callable(\n operation, md_kwargs=md_kwargs, pd_kwargs=pd_kwargs, inplace=__inplace__\n )\n if values is not None:\n comparator(*values)\n\n\ndef eval_io(\n fn_name,\n comparator=df_equals,\n cast_to_str=False,\n check_exception_type=True,\n raising_exceptions=io_ops_bad_exc,\n check_kwargs_callable=True,\n modin_warning=None,\n md_extra_kwargs=None,\n *args,\n **kwargs,\n):\n \"\"\"Evaluate I/O operation outputs equality check.\n\n Parameters\n ----------\n fn_name: str\n I/O operation name (\"read_csv\" for example).\n comparator: obj\n Function to perform comparison.\n cast_to_str: bool\n There could be some missmatches in dtypes, so we're\n casting the whole frame to `str` before comparison.\n See issue #1931 for details.\n check_exception_type: bool\n Check or not exception types in the case of operation fail\n (compare exceptions types raised by Pandas and Modin).\n raising_exceptions: Exception or list of Exceptions\n Exceptions that should be raised even if they are raised\n both by Pandas and Modin (check evaluated only if\n `check_exception_type` passed as `True`).\n modin_warning: obj\n Warning that should be raised by Modin.\n md_extra_kwargs: dict\n Modin operation specific kwargs.\n \"\"\"\n\n def applyier(module, *args, **kwargs):\n result = getattr(module, fn_name)(*args, **kwargs)\n if cast_to_str:\n result = result.astype(str)\n return result\n\n def call_eval_general():\n eval_general(\n pd,\n pandas,\n applyier,\n check_exception_type=check_exception_type,\n raising_exceptions=raising_exceptions,\n check_kwargs_callable=check_kwargs_callable,\n md_extra_kwargs=md_extra_kwargs,\n *args,\n **kwargs,\n )\n\n if modin_warning:\n with pytest.warns(modin_warning):\n call_eval_general()\n else:\n call_eval_general()\n\n\ndef eval_io_from_str(csv_str: str, unique_filename: str, **kwargs):\n \"\"\"Evaluate I/O operation outputs equality check by using `csv_str`\n data passed as python str (csv test file will be created from `csv_str`).\n\n Parameters\n ----------\n csv_str: str\n Test data for storing to csv file.\n unique_filename: str\n csv file name.\n \"\"\"\n try:\n with open(unique_filename, \"w\") as f:\n f.write(csv_str)\n\n eval_io(\n filepath_or_buffer=unique_filename,\n fn_name=\"read_csv\",\n **kwargs,\n )\n\n finally:\n if os.path.exists(unique_filename):\n try:\n os.remove(unique_filename)\n except PermissionError:\n pass\n\n\ndef create_test_dfs(*args, **kwargs):\n post_fn = kwargs.pop(\"post_fn\", lambda df: df)\n return map(\n post_fn, [pd.DataFrame(*args, **kwargs), pandas.DataFrame(*args, **kwargs)]\n )\n\n\ndef generate_dfs():\n df = pandas.DataFrame(\n {\n \"col1\": [0, 1, 2, 3],\n \"col2\": [4, 5, 6, 7],\n \"col3\": [8, 9, 10, 11],\n \"col4\": [12, 13, 14, 15],\n \"col5\": [0, 0, 0, 0],\n }\n )\n\n df2 = pandas.DataFrame(\n {\n \"col1\": [0, 1, 2, 3],\n \"col2\": [4, 5, 6, 7],\n \"col3\": [8, 9, 10, 11],\n \"col6\": [12, 13, 14, 15],\n \"col7\": [0, 0, 0, 0],\n }\n )\n return df, df2\n\n\ndef generate_multiindex_dfs(axis=1):\n def generate_multiindex(index):\n return pandas.MultiIndex.from_tuples(\n [(\"a\", x) for x in index.values], names=[\"name1\", \"name2\"]\n )\n\n df1, df2 = generate_dfs()\n df1.axes[axis], df2.axes[axis] = map(\n generate_multiindex, [df1.axes[axis], df2.axes[axis]]\n )\n return df1, df2\n\n\ndef generate_multiindex(elements_number, nlevels=2, is_tree_like=False):\n def generate_level(length, nlevel):\n src = [\"bar\", \"baz\", \"foo\", \"qux\"]\n return [src[i % len(src)] + f\"-{nlevel}-{i}\" for i in range(length)]\n\n if is_tree_like:\n for penalty_level in [0, 1]:\n lvl_len_f, lvl_len_d = math.modf(\n round(elements_number ** (1 / (nlevels - penalty_level)), 12)\n )\n if lvl_len_d >= 2 and lvl_len_f == 0:\n break\n\n if lvl_len_d < 2 or lvl_len_f != 0:\n raise RuntimeError(\n f\"Can't generate Tree-like MultiIndex with lenght: {elements_number} and number of levels: {nlevels}\"\n )\n\n lvl_len = int(lvl_len_d)\n result = pd.MultiIndex.from_product(\n [generate_level(lvl_len, i) for i in range(nlevels - penalty_level)],\n names=[f\"level-{i}\" for i in range(nlevels - penalty_level)],\n )\n if penalty_level:\n result = pd.MultiIndex.from_tuples(\n [(\"base_level\", *ml_tuple) for ml_tuple in result],\n names=[f\"level-{i}\" for i in range(nlevels)],\n )\n return result.sort_values()\n else:\n base_level = [\"first\"] * (elements_number // 2 + elements_number % 2) + [\n \"second\"\n ] * (elements_number // 2)\n primary_levels = [generate_level(elements_number, i) for i in range(1, nlevels)]\n arrays = [base_level] + primary_levels\n return pd.MultiIndex.from_tuples(\n list(zip(*arrays)), names=[f\"level-{i}\" for i in range(nlevels)]\n ).sort_values()\n\n\ndef generate_none_dfs():\n df = pandas.DataFrame(\n {\n \"col1\": [0, 1, 2, 3],\n \"col2\": [4, 5, None, 7],\n \"col3\": [8, 9, 10, 11],\n \"col4\": [12, 13, 14, 15],\n \"col5\": [None, None, None, None],\n }\n )\n\n df2 = pandas.DataFrame(\n {\n \"col1\": [0, 1, 2, 3],\n \"col2\": [4, 5, 6, 7],\n \"col3\": [8, 9, 10, 11],\n \"col6\": [12, 13, 14, 15],\n \"col7\": [0, 0, 0, 0],\n }\n )\n return df, df2\n\n\ndef get_unique_filename(\n test_name: str = \"test\",\n kwargs: dict = {},\n extension: str = \"csv\",\n data_dir: str = IO_OPS_DATA_DIR,\n suffix: str = \"\",\n debug_mode=False,\n):\n \"\"\"Returns unique file name with specified parameters.\n\n Parameters\n ----------\n test_name: str\n name of the test for which the unique file name is needed.\n kwargs: list of ints\n Unique combiantion of test parameters for creation of unique name.\n extension: str\n Extension of unique file.\n data_dir: str\n Data directory where test files will be created.\n suffix: str\n String to append to the resulted name.\n debug_mode: bool\n Get unique filename containing kwargs values.\n Otherwise kwargs values will be replaced with hash equivalent.\n\n Returns\n -------\n Unique file name.\n \"\"\"\n suffix_part = f\"_{suffix}\" if suffix else \"\"\n extension_part = f\".{extension}\" if extension else \"\"\n if debug_mode:\n # shortcut if kwargs parameter are not provided\n if len(kwargs) == 0 and extension == \"csv\" and suffix == \"\":\n return os.path.join(data_dir, (test_name + suffix_part + f\".{extension}\"))\n\n assert \".\" not in extension, \"please provide pure extension name without '.'\"\n prohibited_chars = ['\"', \"\\n\"]\n non_prohibited_char = \"np_char\"\n char_counter = 0\n kwargs_name = dict(kwargs)\n for key, value in kwargs_name.items():\n for char in prohibited_chars:\n if isinstance(value, str) and char in value or callable(value):\n kwargs_name[key] = non_prohibited_char + str(char_counter)\n char_counter += 1\n parameters_values = \"_\".join(\n [\n str(value)\n if not isinstance(value, (list, tuple))\n else \"_\".join([str(x) for x in value])\n for value in kwargs_name.values()\n ]\n )\n return os.path.join(\n data_dir, test_name + parameters_values + suffix_part + extension_part\n )\n else:\n import uuid\n\n return os.path.join(data_dir, uuid.uuid1().hex + suffix_part + extension_part)\n\n\ndef get_random_string():\n random_string = \"\".join(\n random_state.choice([x for x in ascii_letters], size=10).tolist()\n )\n return random_string\n\n\ndef insert_lines_to_csv(\n csv_name: str,\n lines_positions: list,\n lines_type: str = \"blank\",\n encoding: str = None,\n **csv_reader_writer_params,\n):\n \"\"\"Insert lines to \".csv\" file.\n\n Parameters\n ----------\n csv_name: str\n \".csv\" file that should be modified.\n lines_positions: list of ints\n Lines postions that sghould be modified (serial number\n of line - begins from 0, ends in <rows_number> - 1).\n lines_type: str\n Lines types that should be inserted to \".csv\" file. Possible types:\n \"blank\" - empty line without any delimiters/separators,\n \"bad\" - lines with len(lines_data) > cols_number\n encoding: str\n Encoding type that should be used during file reading and writing.\n \"\"\"\n cols_number = len(pandas.read_csv(csv_name, nrows=1).columns)\n if lines_type == \"blank\":\n lines_data = []\n elif lines_type == \"bad\":\n cols_number = len(pandas.read_csv(csv_name, nrows=1).columns)\n lines_data = [x for x in range(cols_number + 1)]\n else:\n raise ValueError(\n f\"acceptable values for parameter are ['blank', 'bad'], actually passed {lines_type}\"\n )\n lines = []\n dialect = \"excel\"\n with open(csv_name, \"r\", encoding=encoding, newline=\"\") as read_file:\n try:\n dialect = csv.Sniffer().sniff(read_file.read())\n read_file.seek(0)\n except Exception:\n dialect = None\n\n reader = csv.reader(\n read_file,\n dialect=dialect if dialect is not None else \"excel\",\n **csv_reader_writer_params,\n )\n counter = 0\n for row in reader:\n if counter in lines_positions:\n lines.append(lines_data)\n else:\n lines.append(row)\n counter += 1\n with open(csv_name, \"w\", encoding=encoding, newline=\"\") as write_file:\n writer = csv.writer(\n write_file,\n dialect=dialect if dialect is not None else \"excel\",\n **csv_reader_writer_params,\n )\n writer.writerows(lines)\n\n\ndef _get_open_files():\n \"\"\"\n psutil open_files() can return a lot of extra information that we can allow to\n be different, like file position; for simplicity we care about path and fd only.\n \"\"\"\n return sorted((info.path, info.fd) for info in psutil.Process().open_files())\n\n\ndef check_file_leaks(func):\n \"\"\"\n A decorator that ensures that no *newly* opened file handles are left\n after decorated function is finished.\n \"\"\"\n if not TrackFileLeaks.get():\n return func\n\n @functools.wraps(func)\n def check(*a, **kw):\n fstart = _get_open_files()\n try:\n return func(*a, **kw)\n finally:\n leaks = []\n for item in _get_open_files():\n try:\n fstart.remove(item)\n except ValueError:\n # ignore files in /proc/, as they have nothing to do with\n # modin reading any data (and this is what we care about)\n if not item[0].startswith(\"/proc/\"):\n leaks.append(item)\n assert (\n not leaks\n ), f\"Unexpected open handles left for: {', '.join(item[0] for item in leaks)}\"\n\n return check\n\n\ndef dummy_decorator():\n \"\"\"A problematic decorator that does not use `functools.wraps`. This introduces unwanted local variables for\n inspect.currentframe. This decorator is used in test_io to test `read_csv` and `read_table`\n \"\"\"\n\n def wrapper(method):\n def wrapped_function(self, *args, **kwargs):\n result = method(self, *args, **kwargs)\n return result\n\n return wrapped_function\n\n return wrapper\n\n\ndef generate_dataframe(row_size=NROWS, additional_col_values=None):\n dates = pandas.date_range(\"2000\", freq=\"h\", periods=row_size)\n data = {\n \"col1\": np.arange(row_size) * 10,\n \"col2\": [str(x.date()) for x in dates],\n \"col3\": np.arange(row_size) * 10,\n \"col4\": [str(x.time()) for x in dates],\n \"col5\": [get_random_string() for _ in range(row_size)],\n \"col6\": random_state.uniform(low=0.0, high=10000.0, size=row_size),\n }\n\n if additional_col_values is not None:\n assert isinstance(additional_col_values, (list, tuple))\n data.update(\n {\n \"col7\": random_state.choice(additional_col_values, size=row_size),\n }\n )\n return pandas.DataFrame(data)\n\n\ndef _make_csv_file(filenames):\n def _csv_file_maker(\n filename,\n row_size=NROWS,\n force=True,\n delimiter=\",\",\n encoding=None,\n compression=\"infer\",\n additional_col_values=None,\n remove_randomness=False,\n add_blank_lines=False,\n add_bad_lines=False,\n add_nan_lines=False,\n thousands_separator=None,\n decimal_separator=None,\n comment_col_char=None,\n quoting=csv.QUOTE_MINIMAL,\n quotechar='\"',\n doublequote=True,\n escapechar=None,\n line_terminator=None,\n ):\n if os.path.exists(filename) and not force:\n pass\n else:\n df = generate_dataframe(row_size, additional_col_values)\n if remove_randomness:\n df = df[[\"col1\", \"col2\", \"col3\", \"col4\"]]\n if add_nan_lines:\n for i in range(0, row_size, row_size // (row_size // 10)):\n df.loc[i] = pandas.Series()\n if comment_col_char:\n char = comment_col_char if isinstance(comment_col_char, str) else \"#\"\n df.insert(\n loc=0,\n column=\"col_with_comments\",\n value=[char if (x + 2) == 0 else x for x in range(row_size)],\n )\n\n if thousands_separator:\n for col_id in [\"col1\", \"col3\"]:\n df[col_id] = df[col_id].apply(\n lambda x: f\"{x:,d}\".replace(\",\", thousands_separator)\n )\n df[\"col6\"] = df[\"col6\"].apply(\n lambda x: f\"{x:,f}\".replace(\",\", thousands_separator)\n )\n filename = (\n f\"{filename}.{COMP_TO_EXT[compression]}\"\n if compression != \"infer\"\n else filename\n )\n df.to_csv(\n filename,\n sep=delimiter,\n encoding=encoding,\n compression=compression,\n index=False,\n decimal=decimal_separator if decimal_separator else \".\",\n line_terminator=line_terminator,\n quoting=quoting,\n quotechar=quotechar,\n doublequote=doublequote,\n escapechar=escapechar,\n )\n csv_reader_writer_params = {\n \"delimiter\": delimiter,\n \"doublequote\": doublequote,\n \"escapechar\": escapechar,\n \"lineterminator\": line_terminator if line_terminator else os.linesep,\n \"quotechar\": quotechar,\n \"quoting\": quoting,\n }\n if add_blank_lines:\n insert_lines_to_csv(\n csv_name=filename,\n lines_positions=[\n x for x in range(5, row_size, row_size // (row_size // 10))\n ],\n lines_type=\"blank\",\n encoding=encoding,\n **csv_reader_writer_params,\n )\n if add_bad_lines:\n insert_lines_to_csv(\n csv_name=filename,\n lines_positions=[\n x for x in range(6, row_size, row_size // (row_size // 10))\n ],\n lines_type=\"bad\",\n encoding=encoding,\n **csv_reader_writer_params,\n )\n filenames.append(filename)\n return df\n\n return _csv_file_maker\n\n\ndef teardown_test_file(test_path):\n if os.path.exists(test_path):\n # PermissionError can occure because of issue #2533\n try:\n os.remove(test_path)\n except PermissionError:\n pass\n\n\ndef teardown_test_files(test_paths: list):\n for path in test_paths:\n teardown_test_file(path)\n\n\ndef sort_index_for_equal_values(series, ascending=False):\n if series.index.dtype == np.float64:\n # HACK: workaround for pandas bug:\n # https://github.com/pandas-dev/pandas/issues/34455\n series.index = series.index.astype(\"str\")\n res = series.groupby(series, sort=False).apply(\n lambda df: df.sort_index(ascending=ascending)\n )\n if res.index.nlevels > series.index.nlevels:\n # Sometimes GroupBy adds an extra level with 'by' to the result index.\n # GroupBy is very inconsistent about when it's doing this, so that's\n # why this clumsy if-statement is used.\n res.index = res.index.droplevel(0)\n res.name = series.name\n return res\n" ]
[ [ "numpy.testing.assert_almost_equal", "pandas.Series", "pandas.date_range", "pandas.api.types.is_list_like", "pandas.read_csv", "pandas.DataFrame", "numpy.testing.assert_array_equal", "numpy.random.RandomState", "pandas.testing.assert_index_equal", "numpy.arange", "pandas.MultiIndex.from_tuples", "pandas.testing.assert_extension_array_equal", "pandas.testing.assert_series_equal", "pandas.testing.assert_frame_equal" ] ]
Razorro/xalpha
[ "bcecd53dc9d081deb1b8235437a4f6b74951c23d" ]
[ "xalpha/cons.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nbasic constants and utility functions\n\"\"\"\n\nimport datetime as dt\nimport os\nimport time\nimport logging\nimport inspect\nfrom decimal import Decimal\nimport requests\nfrom functools import wraps\nfrom simplejson.errors import JSONDecodeError\n\nimport pandas as pd\nfrom pyecharts.options import (\n AxisOpts,\n DataZoomOpts,\n LegendOpts,\n TooltipOpts,\n VisualMapOpts,\n)\nfrom numpy import sqrt\nfrom scipy import optimize\n\nfrom xalpha import __path__\nfrom .exceptions import HttpStatusError\n\nlogger = logging.getLogger(__name__)\n\n# date obj of today\n# today = lambda: dt.datetime.combine(dt.date.today(), dt.time.min)\n\ntz_bj = dt.timezone(dt.timedelta(hours=8))\n\n\ndef today_obj():\n \"\"\"\n today obj in beijing timezone with no tzinfo\n\n :return: datetime.datetime\n \"\"\"\n now = dt.datetime.now(tz=tz_bj)\n return now.replace(hour=0, minute=0, second=0, microsecond=0).replace(tzinfo=None)\n\n\n# datetime obj for yesterdate date with time set to be 0:0:0\nyesterdayobj = lambda: (dt.datetime.now(tz_bj).replace(tzinfo=None) - dt.timedelta(1))\n\n# string for yesterday, only used for indexinfo url\nyesterday = lambda: dt.datetime.strftime(yesterdayobj(), \"%Y%m%d\")\n\n# string for yesterday with dash\nyesterdaydash = lambda: dt.datetime.strftime(yesterdayobj(), \"%Y-%m-%d\")\n\n\n# list: all the trade date of domestic stock market in the form of string\ncaldate = pd.read_csv(os.path.join(__path__[0], \"caldate.csv\"))\nopendate = list(caldate[caldate[\"is_open\"] == 1][\"cal_date\"])\n# opendate = list(ts.trade_cal()[ts.trade_cal()['isOpen']==1]['calendarDate'])\nopendate_set = set(opendate) # for speed checking?\n\n# fund code list which always round down for the purchase share approximation\ndroplist = [\"003318\", \"000311\", \"000601\", \"009989\"]\n\nsqrt_days_in_year = sqrt(250.0)\n\n\ndef calendar_selfcheck():\n # 国内链接 githubusercontent.com 大概率存在问题,因此设计成联网自动更新日历大概率无用。\n # 也许之后考虑一些较稳定的第三方资源托管服务\n current_year = dt.datetime.now().year\n if str(current_year) != opendate[-1][:4]:\n logger.warning(\n \"Please update xalpha via `pip install -U xalpha` to keep the trade calendar up-to-date\"\n )\n print(\"请更新 xalpha 版本以更新最新年份的 A 股交易日历, 否则将可能无法正确获取和处理最新的基金净值\")\n\n\ncalendar_selfcheck()\n\n\nregion_trans = {\n \"瑞士\": \"CH\",\n \"日本\": \"JP\",\n \"韩国\": \"KR\",\n \"美国\": \"US\",\n \"香港\": \"HK\",\n \"中国香港\": \"HK\",\n \"德国\": \"DE\",\n \"英国\": \"UK\",\n \"法国\": \"FR\",\n \"中国\": \"CN\",\n \"墨西哥\": \"MX\",\n \"澳大利亚\": \"AU\",\n \"新加坡\": \"SG\",\n \"印度\": \"IN\",\n \"台湾\": \"TW\",\n \"中国台湾\": \"TW\",\n}\n\n# extract from xa.misc.get_tdx_holidays\nholidays = {\n \"AU\": [\n \"2020-01-01\",\n \"2020-01-27\",\n \"2020-04-10\",\n \"2020-04-13\",\n \"2020-04-25\",\n \"2020-06-08\",\n \"2020-12-24\",\n \"2020-12-25\",\n \"2020-12-28\",\n \"2020-12-31\",\n \"2021-01-01\",\n \"2021-01-26\",\n \"2021-04-02\",\n \"2021-04-05\",\n \"2021-06-14\",\n \"2021-12-24\",\n \"2021-12-27\",\n \"2021-12-28\",\n \"2021-12-31\",\n ],\n \"CH\": [\n \"2020-01-01\",\n \"2020-01-02\",\n \"2020-04-10\",\n \"2020-04-13\",\n \"2020-05-01\",\n \"2020-05-21\",\n \"2020-06-01\",\n \"2020-12-24\",\n \"2020-12-25\",\n \"2020-12-31\",\n \"2021-01-01\",\n \"2021-04-02\",\n \"2021-04-05\",\n \"2021-05-13\",\n \"2021-05-24\",\n \"2021-12-24\",\n \"2021-12-31\",\n ],\n \"CN\": [\n \"2020-01-01\",\n \"2020-01-24\",\n \"2020-01-27\",\n \"2020-01-28\",\n \"2020-01-29\",\n \"2020-01-30\",\n \"2020-01-31\",\n \"2020-04-06\",\n \"2020-05-01\",\n \"2020-05-04\",\n \"2020-05-05\",\n \"2020-06-25\",\n \"2020-06-26\",\n \"2020-10-01\",\n \"2020-10-02\",\n \"2020-10-05\",\n \"2020-10-06\",\n \"2020-10-07\",\n \"2020-10-08\",\n \"2021-01-01\",\n \"2021-02-11\",\n \"2021-02-12\",\n \"2021-02-15\",\n \"2021-02-16\",\n \"2021-02-17\",\n \"2021-04-05\",\n \"2021-05-03\",\n \"2021-05-04\",\n \"2021-05-05\",\n \"2021-06-14\",\n \"2021-09-20\",\n \"2021-09-21\",\n \"2021-10-01\",\n \"2021-10-04\",\n \"2021-10-05\",\n \"2021-10-06\",\n \"2021-10-07\",\n ],\n \"DE\": [\n \"2020-01-01\",\n \"2020-04-10\",\n \"2020-04-13\",\n \"2020-05-01\",\n \"2020-06-01\",\n \"2020-12-24\",\n \"2020-12-25\",\n \"2020-12-31\",\n \"2021-01-01\",\n \"2021-04-02\",\n \"2021-04-05\",\n \"2021-05-24\",\n \"2021-12-24\",\n \"2021-12-31\",\n ],\n \"FR\": [\n \"2020-01-01\",\n \"2020-04-10\",\n \"2020-04-13\",\n \"2020-05-01\",\n \"2020-12-24\",\n \"2020-12-25\",\n \"2020-12-31\",\n \"2021-01-01\",\n \"2021-04-02\",\n \"2021-04-05\",\n \"2021-12-24\",\n \"2021-12-31\",\n ],\n \"HK\": [\n \"2020-01-01\",\n \"2020-01-27\",\n \"2020-01-28\",\n \"2020-04-10\",\n \"2020-04-13\",\n \"2020-04-30\",\n \"2020-05-01\",\n \"2020-06-25\",\n \"2020-07-01\",\n \"2020-10-01\",\n \"2020-10-02\",\n \"2020-10-26\",\n \"2020-12-25\",\n \"2021-01-01\",\n \"2021-02-11\",\n \"2021-02-12\",\n \"2021-02-15\",\n \"2021-04-02\",\n \"2021-04-05\",\n \"2021-04-06\",\n \"2021-05-19\",\n \"2021-06-14\",\n \"2021-07-01\",\n \"2021-09-22\",\n \"2021-10-01\",\n \"2021-10-14\",\n \"2021-12-24\",\n \"2021-12-27\",\n \"2021-12-31\",\n ],\n \"IN\": [\n \"2020-02-21\",\n \"2020-03-10\",\n \"2020-04-02\",\n \"2020-04-06\",\n \"2020-04-10\",\n \"2020-04-14\",\n \"2020-05-01\",\n \"2020-05-25\",\n \"2020-10-02\",\n \"2020-11-16\",\n \"2020-11-30\",\n \"2020-12-25\",\n \"2021-01-26\",\n \"2021-03-11\",\n \"2021-03-29\",\n \"2021-04-02\",\n \"2021-04-14\",\n \"2021-04-21\",\n \"2021-05-13\",\n \"2021-07-20\",\n \"2021-08-19\",\n \"2021-09-10\",\n \"2021-10-15\",\n \"2021-11-04\",\n \"2021-11-19\",\n ],\n \"JP\": [\n \"2020-01-01\",\n \"2020-01-02\",\n \"2020-01-03\",\n \"2020-01-13\",\n \"2020-02-11\",\n \"2020-02-24\",\n \"2020-03-20\",\n \"2020-04-29\",\n \"2020-05-04\",\n \"2020-05-05\",\n \"2020-05-06\",\n \"2020-07-23\",\n \"2020-07-24\",\n \"2020-08-10\",\n \"2020-09-21\",\n \"2020-09-22\",\n \"2020-11-03\",\n \"2020-11-23\",\n \"2020-12-31\",\n \"2021-01-01\",\n \"2021-01-11\",\n \"2021-02-11\",\n \"2021-02-23\",\n \"2021-04-29\",\n \"2021-05-03\",\n \"2021-05-04\",\n \"2021-05-05\",\n \"2021-07-22\",\n \"2021-07-23\",\n \"2021-08-09\",\n \"2021-09-20\",\n \"2021-09-23\",\n \"2021-11-03\",\n \"2021-11-23\",\n \"2021-12-31\",\n ],\n \"KR\": [\n \"2020-01-01\",\n \"2020-01-24\",\n \"2020-01-27\",\n \"2020-04-30\",\n \"2020-05-01\",\n \"2020-05-05\",\n \"2020-09-30\",\n \"2020-10-01\",\n \"2020-10-02\",\n \"2020-10-09\",\n \"2020-12-25\",\n \"2020-12-31\",\n \"2021-01-01\",\n \"2021-02-11\",\n \"2021-02-12\",\n \"2021-03-01\",\n \"2021-05-05\",\n \"2021-05-19\",\n \"2021-09-20\",\n \"2021-09-21\",\n \"2021-09-22\",\n \"2021-12-31\",\n ],\n \"SG\": [\n \"2020-01-01\",\n \"2020-01-24\",\n \"2020-04-10\",\n \"2020-05-01\",\n \"2020-05-07\",\n \"2020-05-21\",\n \"2020-07-31\",\n \"2020-08-10\",\n \"2020-12-24\",\n \"2020-12-25\",\n \"2020-12-31\",\n \"2021-01-01\",\n \"2021-02-11\",\n \"2021-02-12\",\n \"2021-04-02\",\n \"2021-05-13\",\n \"2021-05-26\",\n \"2021-07-20\",\n \"2021-08-09\",\n \"2021-11-04\",\n \"2021-12-24\",\n \"2021-12-31\",\n ],\n \"TW\": [\n \"2020-01-01\",\n \"2020-01-21\",\n \"2020-01-22\",\n \"2020-01-23\",\n \"2020-01-24\",\n \"2020-01-27\",\n \"2020-01-28\",\n \"2020-01-29\",\n \"2020-02-28\",\n \"2020-04-02\",\n \"2020-04-03\",\n \"2020-05-01\",\n \"2020-06-25\",\n \"2020-06-26\",\n \"2020-10-01\",\n \"2020-10-02\",\n \"2020-10-09\",\n \"2021-01-01\",\n \"2021-02-08\",\n \"2021-02-09\",\n \"2021-02-10\",\n \"2021-02-11\",\n \"2021-02-12\",\n \"2021-02-15\",\n \"2021-02-16\",\n \"2021-03-01\",\n \"2021-04-02\",\n \"2021-04-05\",\n \"2021-04-30\",\n \"2021-06-14\",\n \"2021-09-20\",\n \"2021-09-21\",\n \"2021-10-11\",\n \"2021-12-31\",\n ],\n \"UK\": [\n \"2020-01-01\",\n \"2020-04-10\",\n \"2020-04-13\",\n \"2020-05-08\",\n \"2020-05-25\",\n \"2020-08-31\",\n \"2020-12-24\",\n \"2020-12-25\",\n \"2020-12-28\",\n \"2020-12-31\",\n \"2021-01-01\",\n \"2021-01-01\",\n \"2021-04-02\",\n \"2021-04-05\",\n \"2021-05-03\",\n \"2021-05-31\",\n \"2021-08-30\",\n \"2021-12-24\",\n \"2021-12-27\",\n \"2021-12-28\",\n \"2021-12-31\",\n \"2022-01-03\",\n ],\n \"US\": [\n \"2020-01-01\",\n \"2020-01-20\",\n \"2020-02-17\",\n \"2020-03-08\",\n \"2020-04-10\",\n \"2020-05-25\",\n \"2020-07-03\",\n \"2020-09-07\",\n \"2020-11-01\",\n \"2020-11-26\",\n \"2020-11-27\",\n \"2020-12-24\",\n \"2020-12-25\",\n \"2021-01-01\",\n \"2021-01-01\",\n \"2021-01-18\",\n \"2021-02-15\",\n \"2021-03-14\",\n \"2021-04-02\",\n \"2021-05-31\",\n \"2021-07-05\",\n \"2021-09-06\",\n \"2021-11-07\",\n \"2021-11-25\",\n \"2021-11-26\",\n \"2021-12-24\",\n ],\n}\n\nconnection_errors = (\n HttpStatusError,\n ConnectionResetError,\n requests.exceptions.RequestException,\n requests.exceptions.ConnectionError,\n requests.exceptions.SSLError,\n JSONDecodeError,\n)\n\nline_opts = {\n \"datazoom_opts\": [\n DataZoomOpts(is_show=True, type_=\"slider\", range_start=50, range_end=100),\n DataZoomOpts(\n is_show=True,\n type_=\"slider\",\n orient=\"vertical\",\n range_start=50,\n range_end=100,\n ),\n ],\n \"tooltip_opts\": TooltipOpts(\n is_show=True, trigger=\"axis\", trigger_on=\"mousemove\", axis_pointer_type=\"cross\"\n ),\n}\n\nheatmap_opts = {\n \"visualmap_opts\": VisualMapOpts(\n min_=-1, max_=1, orient=\"horizontal\", pos_right=\"middle\", pos_top=\"bottom\"\n )\n}\n\n# pie_opts = {\n# \"tooltip_opts\": TooltipOpts(),\n# \"legend_opts\": LegendOpts(orient=\"vertical\", pos_left=\"left\"),\n# }\n\nthemeriver_opts = {\n \"xaxis_opts\": AxisOpts(type_=\"time\"),\n \"datazoom_opts\": [DataZoomOpts(range_start=60, range_end=100)],\n \"tooltip_opts\": TooltipOpts(trigger_on=\"mousemove\", trigger=\"item\"),\n \"legend_opts\": LegendOpts(pos_top=\"top\"),\n}\n\n\ndef xnpv(rate, cashflows):\n \"\"\"\n give the current cash value based on future cashflows\n\n :param rate: float, the preset year rate\n :param cashflows: a list, in which each element is a tuple of the form (date, amount),\n where date is a datetime object and amount is an integer or floating number.\n Cash outflows (investments) are represented with negative amounts,\n and cash inflows (returns) are positive amounts.\n :returns: a single float value which is the NPV of the given cash flows\n \"\"\"\n chron_order = sorted(cashflows, key=lambda x: x[0])\n t0 = chron_order[0][0]\n return sum([cf / (1 + rate) ** ((t - t0).days / 365.0) for (t, cf) in chron_order])\n\n\ndef xirr(cashflows, guess=0.1):\n \"\"\"\n calculate the Internal Rate of Return of a series of cashflows at irregular intervals.\n\n :param cashflows: a list, in which each element is a tuple of the form (date, amount),\n where date is a datetime object and amount is an integer or floating number.\n Cash outflows (investments) are represented with negative amounts,\n and cash inflows (returns) are positive amounts.\n :param guess: floating number, a guess at the xirr rate solution to be used\n as a starting point for the numerical solution\n :returns: the IRR as a single floating number\n \"\"\"\n return optimize.newton(lambda r: xnpv(r, cashflows), guess)\n\n\ndef myround(num, label=1):\n \"\"\"\n correct implementation of round with round half up, round to 2 decimals\n\n :param num: the floating number, to be rounded\n :param label: integer 1 or 2, 1 for round half up while 2 for always round down\n :returns: the float number after rounding, with two decimals\n \"\"\"\n if label == 1:\n res = float(\n Decimal(str(num)).quantize(Decimal(\"0.01\"), rounding=\"ROUND_HALF_UP\")\n )\n elif (\n label == 2\n ): # for jingshunchangcheng... who just omit the overflow share behind 2 decimal\n res = float(Decimal(str(num)).quantize(Decimal(\"0.01\"), rounding=\"ROUND_DOWN\"))\n return res\n\n\ndef convert_date(date):\n \"\"\"\n convert date into datetime object\n\n :param date: string of form '2017-01-01' or datetime object\n :returns: corresponding datetime object\n \"\"\"\n if isinstance(date, str):\n return pd.Timestamp(date)\n else:\n return date\n\n\ndef _date_check(dtobj, check=False):\n if not isinstance(dtobj, dt.datetime):\n dtobj = dt.datetime.strptime(dtobj.replace(\"/\", \"\").replace(\"-\", \"\"), \"%Y%m%d\")\n if check and (dtobj.year > dt.datetime.now().year or dtobj.year < 1991):\n raise ValueError(\n \"date goes beyond market range: %s\" % dtobj.strftime(\"%Y-%m-%d\")\n )\n return dtobj\n\n\ndef next_onday(dtobj):\n dtobj = _date_check(dtobj, check=True)\n dtobj += dt.timedelta(1)\n while dtobj.strftime(\"%Y-%m-%d\") not in opendate_set:\n dtobj += dt.timedelta(1)\n return dtobj\n\n\ndef last_onday(dtobj):\n dtobj = _date_check(dtobj, check=True)\n dtobj -= dt.timedelta(1)\n while dtobj.strftime(\"%Y-%m-%d\") not in opendate_set:\n dtobj -= dt.timedelta(1)\n return dtobj\n\n\ndef avail_dates(dtlist, future=False):\n \"\"\"\n make every day in the list the next open day\n\n :param dtlist: datetime obj list\n :param future: bool, default False, indicating the latest day in the list is yesterday\n :return: datetime obj list\n \"\"\"\n ndtlist = []\n for d in dtlist:\n if d.strftime(\"%Y-%m-%d\") not in opendate_set:\n nd = next_onday(d)\n else:\n nd = d\n if future is False:\n if (nd - yesterdayobj()).days > 0:\n continue\n ndtlist.append(nd)\n return ndtlist\n\n\ndef scale_dict(d, scale=1, ulimit=100, dlimit=50, aim=None):\n t = sum([v for _, v in d.items()])\n if t * scale > ulimit:\n scale = ulimit / t\n elif t * scale < dlimit:\n scale = dlimit / t\n if aim:\n scale = aim / t\n for k, v in d.items():\n d[k] = v * scale\n return d\n\n\ndef _float(n):\n try:\n n = n.replace(\",\", \"\")\n if n.endswith(\"K\") or n.endswith(\"k\"):\n n = float(n[:-1]) * 1000\n elif n.endswith(\"M\") or n.endswith(\"m\"):\n n = float(n[:-1]) * 1000 * 1000\n elif n.endswith(\"G\") or n.endswith(\"g\") or n.endswith(\"B\") or n.endswith(\"b\"):\n n = float(n[:-1]) * 1000 * 1000 * 1000\n elif n == \"-\":\n logger.info(\"_float met -, taken as 0\")\n return 0\n elif n.endswith(\"%\"):\n logger.info(\"_float met with %% as %s\" % n)\n return float(n[:-1]) / 100\n except AttributeError:\n pass\n if not n:\n logger.info(\"_float met with None as input arguments\")\n return 0.0\n return float(n)\n\n\ndef reconnect(tries=5, timeout=12):\n def robustify(f):\n default_header = {\n 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,'\n 'application/signed-exchange;v=b3;q=0.9',\n 'accept-encoding': 'gzip, deflate, br',\n 'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6,ja;q=0.5',\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/89.0.4389.114 Safari/537.36 Edg/89.0.774.76',\n }\n\n @wraps(f)\n def wrapper(*args, **kws):\n import xalpha.provider as xp\n\n if getattr(xp, \"proxy\", None):\n kws[\"proxies\"] = {\"http\": xp.proxy, \"https\": xp.proxy}\n kws[\"timeout\"] = timeout\n logger.debug(\"Using proxy %s\" % xp.proxy)\n if args:\n url = args[0]\n else:\n url = kws.get(\"url\", \"\")\n\n headers = kws.get(\"headers\", {})\n if len(headers) == 0:\n headers.update(default_header)\n\n kws[\"headers\"] = headers\n for count in range(tries):\n try:\n logger.debug(\"Fetching url: %s . Inside function `%s`\" % (url, inspect.stack()[1].function))\n r = f(*args, **kws)\n if getattr(r, \"status_code\", 200) != 200: # in case r is a json dict\n raise HttpStatusError\n return r\n except connection_errors as e:\n logger.warning(\"Fails at fetching url: %s. Try again.\" % url)\n\n if count == tries - 1:\n logger.error(\"Still wrong at fetching url: %s. after %s tries.\" % (url, tries))\n logger.error(\"Fails due to %s\" % e.args[0])\n raise e\n\n time.sleep(0.5 * count)\n\n return wrapper\n\n return robustify\n\n\nrget = reconnect()(requests.get)\nrpost = reconnect()(requests.post)\n\n\n@reconnect()\ndef rget_json(*args, **kws):\n r = requests.get(*args, **kws)\n return r.json()\n\n\n@reconnect()\ndef rpost_json(*args, **kws):\n r = requests.post(*args, **kws)\n return r.json()\n\n\n# def rget(*args, **kws):\n# tries = 5\n# for count in range(tries):\n# try:\n# r = requests.get(*args, **kws)\n# return r\n# except connection_errors as e:\n# if count == tries - 1:\n# print(*args, sep=\"\\n\")\n# print(\"still wrong after several tries\")\n# raise e\n# time.sleep(0.5*count)\n#\n#\n# def rget_json(*args, **kws):\n# tries = 5\n# for count in range(tries):\n# try:\n# r = requests.get(*args, **kws)\n# return r.json()\n# except connection_errors as e:\n# if count == tries - 1:\n# print(*args, sep=\"\\n\")\n# print(\"still wrong after several tries\")\n# raise e\n# time.sleep(0.5*count)\n#\n#\n# def rpost(*args, **kws):\n# tries = 5\n# for count in range(tries):\n# try:\n# r = requests.post(*args, **kws)\n# return r\n# except connection_errors as e:\n# if count == tries - 1:\n# print(*args, sep=\"\\n\")\n# print(\"still wrong after several tries\")\n# raise e\n# time.sleep(0.5*count)\n#\n#\n# def rpost_json(*args, **kws):\n# tries = 5\n# for count in range(tries):\n# try:\n# r = requests.post(*args, **kws)\n# return r.json()\n# except connection_errors as e:\n# if count == tries - 1:\n# print(*args, sep=\"\\n\")\n# print(\"still wrong after several tries\")\n# raise e\n# time.sleep(0.5*count)\n\n\n## simple subsitution for holdings.py\n\nholdings = {}\nholdings[\"501018\"] = {\n \"etfs/etfs-brent-1mth-uk\": 17.51,\n \"etfs/etfs-brent-crude\": 15.04,\n \"etfs/etfs-crude-oil\": 7.34,\n \"etfs/ipath-series-b-sp-gsci-crd-oil-tr\": 0.06,\n \"etfs/powershares-db-oil-fund\": 11.6,\n \"etfs/ubs-cmci-oil-sf-usd\": 8.68,\n \"etfs/united-states-12-month-oil\": 8.14,\n \"etfs/united-states-brent-oil-fund-lp\": 15.42,\n \"etfs/united-states-oil-fund\": 9.63,\n}\nholdings[\"501018rt\"] = {\n \"commodities/brent-oil\": {\"weight\": 49, \"time\": -1},\n \"commodities/crude-oil\": {\"weight\": 45, \"time\": 4},\n}\n" ]
[ [ "numpy.sqrt", "pandas.Timestamp" ] ]
huawei-noah/Disout
[ "dd4a131ee27043fd3da638056808216944722336" ]
[ "models/resnet_imagenet.py" ]
[ "#Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\n\n#This program is free software; you can redistribute it and/or modify it under the terms of the BSD 3-Clause License.\n\n#This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the BSD 3-Clause License for more details.\n\n\n\n\nimport torch.nn as nn\nimport math\nimport sys\nsys.path.append(\"..\")\n\nfrom disout import Disout,LinearScheduler\n\ndploc = [73, 77, 81, 88, 92, 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 148, 152, 156, 160, 164, 168, 173,177, 181, 188, 192, 196, 200, 204, 208, 212]\nconvloc =[75, 79, 90, 90, 94, 98, 106, 106, 110, 114, 122, 122, 126, 130, 138, 138, 142, 146, 154, 154, 158, 162, 171, 171, 175, 179, 190, 190, 194, 198, 206, 206, 210, 214]\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,padding=1, bias=False)\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None,dist_prob=None,block_size=None,alpha=None,nr_steps=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck_disout(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None,dist_prob=0.05,block_size=6,alpha=30,nr_steps=5e3):\n super(Bottleneck_disout, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n \n self.disout1=LinearScheduler(Disout(dist_prob=dist_prob,block_size=block_size,alpha=alpha),\n start_value=0.,stop_value=dist_prob,nr_steps=nr_steps)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,padding=1, bias=False)\n \n self.disout2=LinearScheduler(Disout(dist_prob=dist_prob,block_size=block_size,alpha=alpha),\n start_value=0.,stop_value=dist_prob,nr_steps=nr_steps)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n \n self.disout3=LinearScheduler(Disout(dist_prob=dist_prob,block_size=block_size,alpha=alpha),\n start_value=0.,stop_value=dist_prob,nr_steps=nr_steps)\n self.bn3 = nn.BatchNorm2d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n \n self.downsample = downsample\n self.stride = stride\n self.disout4=LinearScheduler(Disout(dist_prob=dist_prob,block_size=block_size,alpha=alpha),\n start_value=0.,stop_value=dist_prob,nr_steps=nr_steps)\n\n def forward(self, x):\n \n residual = x\n\n out = self.conv1(x) \n out = self.bn1(out)\n out = self.relu(out)\n out=self.disout1(out)\n \n out = self.conv2(out) \n out = self.bn2(out)\n out = self.relu(out)\n out=self.disout2(out)\n \n out = self.conv3(out)\n out = self.bn3(out)\n out=self.disout3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n residual=self.disout4(residual)\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNet_disout(nn.Module):\n\n def __init__(self, layers, num_classes=1000,dist_prob=0.05,block_size=6,alpha=30,nr_steps=5e3):\n super(ResNet_disout, self).__init__()\n \n self.inplanes = 64\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(Bottleneck, 64, layers[0])\n self.layer2 = self._make_layer(Bottleneck, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(Bottleneck_disout, 256, layers[2], stride=2,\n dist_prob=dist_prob/4,block_size=block_size,alpha=alpha,nr_steps=nr_steps)\n self.layer4 = self._make_layer(Bottleneck_disout, 512, layers[3], stride=2,\n dist_prob=dist_prob,block_size=block_size,alpha=alpha,nr_steps=nr_steps)\n self.avgpool = nn.AvgPool2d(7, stride=1)\n self.fc = nn.Linear(512 * Bottleneck.expansion, num_classes)\n\n for name,m in self.named_modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m,nn.BatchNorm2d) and 'bn3'in name:\n m.weight.data.fill_(0)\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, stride=1,dist_prob=0.05,block_size=6,alpha=30,nr_steps=5e3):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion),)\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample,\n dist_prob=dist_prob,block_size=block_size,alpha=alpha,nr_steps=nr_steps))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes,\n dist_prob=dist_prob,block_size=block_size,alpha=alpha,nr_steps=nr_steps))\n return nn.Sequential(*layers)\n\n def forward(self, x):\n \n gpu_id = str(x.get_device())\n modulelist=list(self.modules())\n for imodu in range(len(dploc)):\n modulelist[dploc[imodu]].weight_behind[gpu_id]=modulelist[convloc[imodu]].weight.data\n \n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n\n\ndef resnet50_disout(dist_prob=0.05,block_size=6,alpha=30,nr_steps=5e3):\n model = ResNet_disout([3, 4, 6, 3],dist_prob=dist_prob,block_size=block_size,alpha=alpha,nr_steps=nr_steps)\n return model\n\n\n\n" ]
[ [ "torch.nn.MaxPool2d", "torch.nn.BatchNorm2d", "torch.nn.Linear", "torch.nn.Conv2d", "torch.nn.Sequential", "torch.nn.AvgPool2d", "torch.nn.ReLU" ] ]
blocktorch/blocktorch
[ "044aa269813ab22c5fd27f84272e5fb540fc522b" ]
[ "ml_source/src/blocktorch/blocktorch/pipelines/components/estimators/classifiers/logistic_regression_classifier.py" ]
[ "\"\"\"Logistic Regression Classifier.\"\"\"\nimport numpy as np\nfrom sklearn.linear_model import LogisticRegression as SKLogisticRegression\nfrom skopt.space import Real\n\nfrom blocktorch.model_family import ModelFamily\nfrom blocktorch.pipelines.components.estimators import Estimator\nfrom blocktorch.problem_types import ProblemTypes\n\n\nclass LogisticRegressionClassifier(Estimator):\n \"\"\"Logistic Regression Classifier.\n\n Args:\n penalty ({\"l1\", \"l2\", \"elasticnet\", \"none\"}): The norm used in penalization. Defaults to \"l2\".\n C (float): Inverse of regularization strength. Must be a positive float. Defaults to 1.0.\n multi_class ({\"auto\", \"ovr\", \"multinomial\"}): If the option chosen is \"ovr\", then a binary problem is fit for each label.\n For \"multinomial\" the loss minimised is the multinomial loss fit across the entire probability distribution,\n even when the data is binary. \"multinomial\" is unavailable when solver=\"liblinear\".\n \"auto\" selects \"ovr\" if the data is binary, or if solver=\"liblinear\", and otherwise selects \"multinomial\". Defaults to \"auto\".\n solver ({\"newton-cg\", \"lbfgs\", \"liblinear\", \"sag\", \"saga\"}): Algorithm to use in the optimization problem.\n For small datasets, \"liblinear\" is a good choice, whereas \"sag\" and \"saga\" are faster for large ones.\n For multiclass problems, only \"newton-cg\", \"sag\", \"saga\" and \"lbfgs\" handle multinomial loss; \"liblinear\" is limited to one-versus-rest schemes.\n\n - \"newton-cg\", \"lbfgs\", \"sag\" and \"saga\" handle L2 or no penalty\n - \"liblinear\" and \"saga\" also handle L1 penalty\n - \"saga\" also supports \"elasticnet\" penalty\n - \"liblinear\" does not support setting penalty='none'\n\n Defaults to \"lbfgs\".\n n_jobs (int): Number of parallel threads used to run xgboost. Note that creating thread contention will significantly slow down the algorithm. Defaults to -1.\n random_seed (int): Seed for the random number generator. Defaults to 0.\n \"\"\"\n\n name = \"Logistic Regression Classifier\"\n hyperparameter_ranges = {\n \"penalty\": [\"l2\"],\n \"C\": Real(0.01, 10),\n }\n \"\"\"{\n \"penalty\": [\"l2\"],\n \"C\": Real(0.01, 10),\n }\"\"\"\n model_family = ModelFamily.LINEAR_MODEL\n \"\"\"ModelFamily.LINEAR_MODEL\"\"\"\n supported_problem_types = [\n ProblemTypes.BINARY,\n ProblemTypes.MULTICLASS,\n ProblemTypes.TIME_SERIES_BINARY,\n ProblemTypes.TIME_SERIES_MULTICLASS,\n ]\n \"\"\"[\n ProblemTypes.BINARY,\n ProblemTypes.MULTICLASS,\n ProblemTypes.TIME_SERIES_BINARY,\n ProblemTypes.TIME_SERIES_MULTICLASS,\n ]\"\"\"\n\n def __init__(\n self,\n penalty=\"l2\",\n C=1.0,\n multi_class=\"auto\",\n solver=\"lbfgs\",\n n_jobs=-1,\n random_seed=0,\n **kwargs,\n ):\n parameters = {\n \"penalty\": penalty,\n \"C\": C,\n \"n_jobs\": n_jobs,\n \"multi_class\": multi_class,\n \"solver\": solver,\n }\n parameters.update(kwargs)\n lr_classifier = SKLogisticRegression(random_state=random_seed, **parameters)\n super().__init__(\n parameters=parameters, component_obj=lr_classifier, random_seed=random_seed\n )\n\n @property\n def feature_importance(self):\n \"\"\"Feature importance for fitted logistic regression classifier.\"\"\"\n coef_ = self._component_obj.coef_\n # binary classification case\n if len(coef_) <= 2:\n return coef_[0]\n else:\n # multiclass classification case\n return np.linalg.norm(coef_, axis=0, ord=2)\n" ]
[ [ "numpy.linalg.norm", "sklearn.linear_model.LogisticRegression" ] ]
komorihi/GPyOpt
[ "5c8424f92ffaa745d3daebca3f38de2569500d6d" ]
[ "GPyOpt/util/general.py" ]
[ "# Copyright (c) 2016, the GPyOpt Authors\n# Licensed under the BSD 3-clause license (see LICENSE.txt)\n\nimport numpy as np\nfrom scipy.special import erfc\nimport time\nfrom ..core.errors import InvalidConfigError\n\ndef compute_integrated_acquisition(acquisition,x):\n '''\n Used to compute the acquisition function when samples of the hyper-parameters have been generated (used in GP_MCMC model).\n\n :param acquisition: acquisition function with GpyOpt model type GP_MCMC.\n :param x: location where the acquisition is evaluated.\n '''\n\n acqu_x = 0\n\n for i in range(acquisition.model.num_hmc_samples):\n acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i,:]\n acqu_x += acquisition.acquisition_function(x)\n\n acqu_x = acqu_x/acquisition.model.num_hmc_samples\n return acqu_x\n\ndef compute_integrated_acquisition_withGradients(acquisition,x):\n '''\n Used to compute the acquisition function with gradients when samples of the hyper-parameters have been generated (used in GP_MCMC model).\n\n :param acquisition: acquisition function with GpyOpt model type GP_MCMC.\n :param x: location where the acquisition is evaluated.\n '''\n\n acqu_x = 0\n d_acqu_x = 0\n\n for i in range(acquisition.model.num_hmc_samples):\n acquisition.model.model.kern[:] = acquisition.model.hmc_samples[i,:]\n acqu_x_sample, d_acqu_x_sample = acquisition.acquisition_function_withGradients(x)\n acqu_x += acqu_x_sample\n d_acqu_x += d_acqu_x_sample\n\n acqu_x = acqu_x/acquisition.model.num_hmc_samples\n d_acqu_x = d_acqu_x/acquisition.model.num_hmc_samples\n\n return acqu_x, d_acqu_x\n\n\ndef best_guess(f,X):\n '''\n Gets the best current guess from a vector.\n :param f: function to evaluate.\n :param X: locations.\n '''\n n = X.shape[0]\n xbest = np.zeros(n)\n for i in range(n):\n ff = f(X[0:(i+1)])\n xbest[i] = ff[np.argmin(ff)]\n return xbest\n\n\ndef samples_multidimensional_uniform(bounds,num_data):\n '''\n Generates a multidimensional grid uniformly distributed.\n :param bounds: tuple defining the box constrains.\n :num_data: number of data points to generate.\n\n '''\n dim = len(bounds)\n Z_rand = np.zeros(shape=(num_data,dim))\n for k in range(0,dim): Z_rand[:,k] = np.random.uniform(low=bounds[k][0],high=bounds[k][1],size=num_data)\n return Z_rand\n\n\ndef reshape(x,input_dim):\n '''\n Reshapes x into a matrix with input_dim columns\n\n '''\n x = np.array(x)\n if x.size ==input_dim:\n x = x.reshape((1,input_dim))\n return x\n\ndef get_moments(model,x):\n '''\n Moments (mean and sdev.) of a GP model at x\n\n '''\n input_dim = model.X.shape[1]\n x = reshape(x,input_dim)\n fmin = min(model.predict(model.X)[0])\n m, v = model.predict(x)\n s = np.sqrt(np.clip(v, 0, np.inf))\n return (m,s, fmin)\n\ndef get_d_moments(model,x):\n '''\n Gradients with respect to x of the moments (mean and sdev.) of the GP\n :param model: GPy model.\n :param x: location where the gradients are evaluated.\n '''\n input_dim = model.input_dim\n x = reshape(x,input_dim)\n _, v = model.predict(x)\n dmdx, dvdx = model.predictive_gradients(x)\n dmdx = dmdx[:,:,0]\n dsdx = dvdx / (2*np.sqrt(v))\n return (dmdx, dsdx)\n\n\ndef get_quantiles(acquisition_par, fmin, m, s):\n '''\n Quantiles of the Gaussian distribution useful to determine the acquisition function values\n :param acquisition_par: parameter of the acquisition function\n :param fmin: current minimum.\n :param m: vector of means.\n :param s: vector of standard deviations.\n '''\n if isinstance(s, np.ndarray):\n s[s<1e-10] = 1e-10\n elif s< 1e-10:\n s = 1e-10\n u = (fmin-m-acquisition_par)/s\n phi = np.exp(-0.5 * u**2) / np.sqrt(2*np.pi)\n Phi = 0.5 * erfc(-u / np.sqrt(2))\n return (phi, Phi, u)\n\n\ndef best_value(Y,sign=1):\n '''\n Returns a vector whose components i are the minimum (default) or maximum of Y[:i]\n '''\n n = Y.shape[0]\n Y_best = np.ones(n)\n for i in range(n):\n if sign == 1:\n Y_best[i]=Y[:(i+1)].min()\n else:\n Y_best[i]=Y[:(i+1)].max()\n return Y_best\n\ndef spawn(f):\n '''\n Function for parallel evaluation of the acquisition function\n '''\n def fun(pipe,x):\n pipe.send(f(x))\n pipe.close()\n return fun\n\n\ndef evaluate_function(f,X):\n '''\n Returns the evaluation of a function *f* and the time per evaluation\n '''\n num_data, dim_data = X.shape\n Y_eval = np.zeros((num_data, dim_data))\n Y_time = np.zeros((num_data, 1))\n for i in range(num_data):\n time_zero = time.time()\n Y_eval[i,:] = f(X[i,:])\n Y_time[i,:] = time.time() - time_zero\n return Y_eval, Y_time\n\n\ndef values_to_array(input_values):\n '''\n Transforms a values of int, float and tuples to a column vector numpy array\n '''\n if type(input_values)==tuple:\n values = np.array(input_values).reshape(-1,1)\n elif type(input_values) == np.ndarray:\n values = np.atleast_2d(input_values)\n elif type(input_values)==int or type(input_values)==float or type(np.int64):\n values = np.atleast_2d(np.array(input_values))\n else:\n print('Type to transform not recognized')\n return values\n\n\ndef merge_values(values1,values2):\n '''\n Merges two numpy arrays by calculating all possible combinations of rows\n '''\n array1 = values_to_array(values1)\n array2 = values_to_array(values2)\n\n if array1.size == 0:\n return array2\n if array2.size == 0:\n return array1\n\n merged_array = []\n for row_array1 in array1:\n for row_array2 in array2:\n merged_row = np.hstack((row_array1,row_array2))\n merged_array.append(merged_row)\n return np.atleast_2d(merged_array)\n\ndef round_optimum(x_opt,domain):\n \"\"\"\n Rounds the some value x_opt to a feasible value in the function domain.\n \"\"\"\n\n x_opt_rounded = x_opt.copy()\n counter = 0\n\n for variable in domain:\n if variable.type == 'continuous':\n var_dim = 1\n\n elif variable.type == 'discrete':\n var_dim = 1\n x_opt_rounded[0,counter:(counter+var_dim)] = round_discrete(x_opt[0,counter:(counter+var_dim)],variable.domain)\n\n elif variable.type == 'categorical':\n var_dim = len(variable.domain)\n x_opt_rounded[0,counter:(counter+var_dim)] = round_categorical(x_opt[0,counter:(counter+var_dim)])\n\n elif variable.type == 'bandit':\n var_dim = variable.domain.shape[1]\n x_opt_rounded[0,counter:(counter+var_dim)] = round_bandit(x_opt[0,counter:(counter+var_dim)],variable.domain)\n else:\n raise Exception('Wrong type of variable')\n\n counter += var_dim\n return x_opt_rounded\n\n\ndef round_categorical(values):\n \"\"\"\n Rounds a categorical variable by taking setting to one the max of the given vector and to zero the rest of the entries.\n \"\"\"\n\n rounded_values = np.zeros(values.shape)\n rounded_values[np.argmax(values)] = 1\n return rounded_values\n\ndef round_discrete(value,domain):\n \"\"\"\n Rounds a discrete variable by selecting the closest point in the domain\n \"\"\"\n rounded_value = domain[0]\n\n for domain_value in domain:\n if np.abs(domain_value-value)< np.abs(rounded_value-value):\n rounded_value = domain_value\n return rounded_value\n\ndef round_bandit(value,domain):\n \"\"\"\n Rounds a discrete variable by selecting the closest point in the domain\n \"\"\"\n idx = np.argmin(((domain- value)**2).sum(1))\n return domain[idx,:]\n" ]
[ [ "numpy.random.uniform", "numpy.ones", "numpy.sqrt", "numpy.atleast_2d", "numpy.zeros", "numpy.argmin", "numpy.abs", "numpy.exp", "numpy.argmax", "numpy.hstack", "numpy.clip", "numpy.array" ] ]
dawidkski/federated-faceid
[ "95b1f4b7da0e8baf1cac35edf3b49528c650c491", "95b1f4b7da0e8baf1cac35edf3b49528c650c491" ]
[ "src/federatedid/federated.py", "src/facenet/datasets/generate_csv_files.py" ]
[ "import copy\nfrom dataclasses import dataclass\nfrom typing import List, Optional\n\nimport torch\nfrom torch.nn import CrossEntropyLoss, Module\nfrom torch.utils.data import DataLoader\n\n\ndef federated_averaging(models: List[Module]) -> Module:\n global_model = copy.deepcopy(models[0])\n global_weights = global_model.state_dict()\n\n local_weights = [m.state_dict() for m in models]\n\n for k in global_weights.keys():\n for i in range(1, len(local_weights)):\n global_weights[k] += local_weights[i][k]\n global_weights[k] = torch.div(global_weights[k], len(local_weights))\n\n global_model.load_state_dict(global_weights)\n return global_model\n\n\nclass ModelAccumulator:\n def __init__(self):\n self.model_counter: int = 0\n self.global_model = None\n self.global_weights = None\n\n def update(self, model):\n local_weights = model.state_dict()\n\n if self.global_model is None:\n self.global_model = model\n self.global_weights = local_weights\n self.model_counter += 1\n else:\n for k in self.global_weights.keys():\n self.global_weights[k] += local_weights[k]\n self.model_counter += 1\n\n def get(self):\n for k in self.global_weights.keys():\n self.global_weights[k] = torch.div(\n self.global_weights[k], self.model_counter\n )\n\n self.global_model.load_state_dict(self.global_weights)\n return self.global_model\n\n def reset(self):\n self.global_model = None\n self.global_weights = None\n self.model_counter = 0\n\n\n@dataclass\nclass EdgeDeviceSettings:\n batch_size: int\n epochs: int\n learning_rate: float\n learning_rate_decay: float\n device: str\n\n\n@dataclass\nclass TrainingResult:\n loss: float\n steps: int\n learning_rate: float\n\n\nclass EdgeDevice:\n def __init__(\n self, device_id: int, settings: EdgeDeviceSettings, data_loader: DataLoader\n ):\n self.device_id = device_id\n self._data_loader = data_loader\n self.setting = copy.deepcopy(settings)\n self._loss_func = CrossEntropyLoss()\n self._model: Optional[Module] = None\n\n def download(self, model: Module):\n self._model = copy.deepcopy(model)\n\n def upload(self) -> Module:\n if self._model is not None:\n return copy.deepcopy(self._model)\n else:\n raise ValueError(\"Model not found on this device!\")\n\n def train(self) -> TrainingResult:\n if self._data_loader is None:\n raise ValueError(\"Dataset not found on this device!\")\n\n self._model.train()\n self.setting.learning_rate = (\n self.setting.learning_rate * self.setting.learning_rate_decay\n )\n optimizer = torch.optim.SGD(\n params=self._model.parameters(), lr=self.setting.learning_rate\n )\n epoch_loss = []\n local_steps: int = 0\n for _ in range(self.setting.epochs):\n batch_loss = []\n for i_batch, (images, labels) in enumerate(self._data_loader):\n self._model.zero_grad()\n images = images.to(self.setting.device)\n labels = labels.to(self.setting.device)\n\n logits = self._model(images)\n loss = self._loss_func(logits, labels)\n loss.backward()\n optimizer.step()\n local_steps += 1\n batch_loss.append(loss.item())\n epoch_loss.append(sum(batch_loss) / len(batch_loss))\n\n mean_loss = sum(epoch_loss) / len(epoch_loss)\n return TrainingResult(\n loss=mean_loss, steps=local_steps, learning_rate=self.setting.learning_rate\n )\n", "import argparse\nimport glob\nimport os\nimport time\n\nimport pandas as pd\nfrom tqdm import tqdm\n\n\ndef generate_csv_file(data_dir: str, output_file: str):\n \"\"\"Generates a csv file containing the image paths of the VGGFace2 dataset for use in triplet selection in\n triplet loss training.\n\n Args:\n dataroot (str): absolute path to the training dataset.\n csv_name (str): name of the resulting csv file.\n \"\"\"\n print(\"\\nLoading image paths ...\")\n files = glob.glob(data_dir + \"/*/*\")\n start_time = time.time()\n list_rows = []\n\n print(data_dir)\n print(os.listdir(data_dir))\n print(files)\n\n print(\"Number of files: {}\".format(len(files)))\n print(\"\\nGenerating csv file ...\")\n\n for file_index, file in enumerate(tqdm(files)):\n face_id = os.path.basename(file).split(\".\")[0]\n face_label = os.path.basename(os.path.dirname(file))\n\n # Better alternative than dataframe.append()\n row = {\"id\": face_id, \"name\": face_label}\n list_rows.append(row)\n\n df = pd.DataFrame(list_rows)\n df = df.sort_values(by=[\"name\", \"id\"]).reset_index(drop=True)\n\n # Encode names as categorical classes\n df[\"class\"] = pd.factorize(df[\"name\"])[0]\n df.to_csv(path_or_buf=output_file, index=False)\n\n elapsed_time = time.time() - start_time\n print(\"\\nDone! Elapsed time: {:.2f} minutes.\".format(elapsed_time / 60))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description=\"Generating csv file for triplet loss!\"\n )\n parser.add_argument(\n \"--dataroot\",\n \"-d\",\n type=str,\n required=True,\n help=\"(REQUIRED) Absolute path to the dataset folder to generate a csv \"\n \"file containing the paths of the images for triplet loss. \",\n )\n\n parser.add_argument(\n \"--csv_name\",\n type=str,\n help=\"Required name of the csv file to be generated. (default: \"\n \"'vggface2.csv') \",\n )\n\n args = parser.parse_args()\n return args\n\n\ndef main():\n args = parse_args()\n generate_csv_file(data_dir=args.dataroot, output_file=args.csv_name)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.div" ], [ "pandas.DataFrame", "pandas.factorize" ] ]
ianhuang0630/CSQ
[ "5f1fe99a8d9da73692643b3911d675dce269a03d" ]
[ "setup.py" ]
[ "\"\"\"Setup learnable_primitives\"\"\"\n\nfrom distutils.core import setup\nfrom Cython.Build import cythonize\nfrom distutils.extension import Extension\n\nfrom itertools import dropwhile\nimport numpy as np\nfrom os import path\n\n\ndef collect_docstring(lines):\n \"\"\"Return document docstring if it exists\"\"\"\n lines = dropwhile(lambda x: not x.startswith('\"\"\"'), lines)\n doc = \"\"\n for line in lines:\n doc += line\n if doc.endswith('\"\"\"\\n'):\n break\n\n return doc[3:-4].replace(\"\\r\", \"\").replace(\"\\n\", \" \")\n\n\ndef collect_metadata():\n meta = {}\n with open(path.join(\"learnable_primitives\", \"__init__.py\")) as f:\n lines = iter(f)\n meta[\"description\"] = collect_docstring(lines)\n for line in lines:\n if line.startswith(\"__\"):\n key, value = map(lambda x: x.strip(), line.split(\"=\"))\n meta[key[2:-2]] = value[1:-1]\n\n return meta\n\n\ndef get_extensions():\n return cythonize([\n Extension(\n \"learnable_primitives.fast_sampler._sampler\",\n [\n \"learnable_primitives/fast_sampler/_sampler.pyx\",\n \"learnable_primitives/fast_sampler/sampling.cpp\"\n ],\n language=\"c++11\",\n libraries=[\"stdc++\"],\n include_dirs=[np.get_include()],\n extra_compile_args=[\"-std=c++11\", \"-O3\"]\n )\n ])\n\n\ndef get_install_requirements():\n return [\n \"numpy\",\n \"scikit-learn\",\n \"trimesh==2.38.42\",\n \"torch==0.4.1\",\n \"torchvision==0.1.8\",\n \"progress==1.4\",\n \"cython\",\n \"Pillow\",\n \"pyquaternion\",\n \"backports.functools_lru_cache\",\n \"sympy\",\n \"matplotlib==2.2.4\",\n \"seaborn\",\n \"mayavi\"\n ]\n\n\ndef setup_package():\n meta = collect_metadata()\n setup(\n name=\"learnable_primitives\",\n version=meta[\"version\"],\n maintainer=meta[\"maintainer\"],\n maintainer_email=meta[\"email\"],\n url=meta[\"url\"],\n license=meta[\"license\"],\n classifiers=[\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Topic :: Scientific/Engineering\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n ],\n install_requires=get_install_requirements(),\n ext_modules=get_extensions()\n )\n\n\nif __name__ == \"__main__\":\n setup_package()\n" ]
[ [ "numpy.get_include" ] ]
source-data/soda-roberta
[ "28f23ae68a1bb17c9844815a7c36d4c590e8c3d0" ]
[ "src/lm/metrics.py" ]
[ "from transformers import EvalPrediction\nfrom sklearn.metrics import precision_recall_fscore_support\nimport numpy as np\n\n\ndef compute_metrics(pred: EvalPrediction):\n \"\"\"Compute recall at the masked position\n \"\"\"\n mask = pred.label_ids != -100\n # filter everything except the masked position and flatten tensors\n labels = pred.label_ids[mask].flatten()\n preds = pred.predictions[mask].flatten()\n _, recall, _, _ = precision_recall_fscore_support(y_true=labels, y_pred=preds, average='micro')\n return {'recall': recall}\n\n\ndef self_test():\n pred = EvalPrediction(\n label_ids=np.array([\n [-100, 1, -100],\n [ 2, -100, -100],\n [-100, -100, 3],\n [-100, -100, 4]\n ]),\n predictions=np.array([\n [-100, 1, -100], # 1 true positive\n [ 2, -100, -100], # 1 true positive\n [ 2, 6, 8], # 1 false positive, irrelevant pos will be ignored\n [ 1, 7, 4] # 1 true positive, irrelevant pos will be ignored\n ]) \n )\n m = compute_metrics(pred)\n print(f\"recall={m['recall']}\")\n assert m['recall'] == 0.75\n print(\"Looks like it is working!\")\n\n\nif __name__ == \"__main__\":\n self_test()\n" ]
[ [ "numpy.array", "sklearn.metrics.precision_recall_fscore_support" ] ]
jlmaurer/tectosaur
[ "7cc5606d814f061395b19754e7a4b6c5e4c236e5", "7cc5606d814f061395b19754e7a4b6c5e4c236e5" ]
[ "tectosaur/fmm/ts_terms.py", "tests/test_farfield.py" ]
[ "from math import factorial\nimport scipy.special\nimport numpy as np\n\ndef sloppy_spherical(y):\n r = np.linalg.norm(y)\n costheta = y[2] / r\n theta = np.arccos(costheta)\n phi = np.arccos(y[0] / r / np.sin(theta))\n return r, theta, phi\n\ndef Rdirect(n_max, y):\n r, theta, phi = sloppy_spherical(y)\n real = np.zeros((n_max + 1, 2 * n_max + 1))\n imag = np.zeros((n_max + 1, 2 * n_max + 1))\n Pmn = scipy.special.lpmn(n_max, n_max, np.cos(theta))[0]\n for i in range(n_max + 1):\n for j in range(-i, i + 1):\n if j < 0:\n lp = (\n ((-1) ** (-j)) * (factorial(i + j) / factorial(i - j))\n * Pmn[-j, i] / ((-1) ** -j)\n )\n else:\n lp = Pmn[j, i] / ((-1) ** j)\n factor = (r ** i) * lp / factorial(i + j)\n real[i, n_max + j] = factor * np.cos(j * phi)\n imag[i, n_max + j] = factor * np.sin(j * phi)\n return real, imag\n\ndef Sdirect(n_max, y):\n r, theta, phi = sloppy_spherical(y)\n real = np.zeros((n_max + 1, 2 * n_max + 1))\n imag = np.zeros((n_max + 1, 2 * n_max + 1))\n Pmn = scipy.special.lpmn(n_max, n_max, np.cos(theta))[0]\n for i in range(n_max + 1):\n for j in range(-i, i + 1):\n if j < 0:\n lp = (\n ((-1) ** (-j)) * (factorial(i + j) / factorial(i - j))\n * Pmn[-j, i] / ((-1) ** -j)\n )\n else:\n lp = Pmn[j, i] / ((-1) ** j)\n factor = factorial(i - j) * lp / (r ** (i + 1))\n real[i, n_max + j] = factor * np.cos(j * phi)\n imag[i, n_max + j] = factor * np.sin(j * phi)\n return real, imag\n\ndef R(n_max, y):\n y1, y2, y3 = y\n real = np.zeros((n_max + 1, 2 * n_max + 1))\n imag = np.zeros((n_max + 1, 2 * n_max + 1))\n real[0, n_max] = 1.0\n for i in range(0, n_max):\n real[i + 1, n_max + i + 1] = (\n (y1 * real[i, n_max + i] - y2 * imag[i, n_max + i])\n / (2 * (i + 1))\n )\n imag[i + 1, n_max + i + 1] = (\n (y1 * imag[i, n_max + i] + y2 * real[i, n_max + i])\n / (2 * (i + 1))\n )\n\n t2f = np.linalg.norm(y) ** 2\n for j in range(n_max + 1):\n for i in range(j, n_max):\n factor = 1.0 / ((i + 1) ** 2 - j ** 2)\n t1f = (2 * i + 1) * y3\n real[i + 1, n_max + j] = factor * (t1f * real[i, n_max + j] - t2f * real[i - 1, n_max + j])\n imag[i + 1, n_max + j] = factor * (t1f * imag[i, n_max + j] - t2f * imag[i - 1, n_max + j])\n for i in range(n_max + 1):\n for j in range(1, n_max + 1):\n real[i, n_max - j] = ((-1) ** j) * real[i, n_max + j]\n imag[i, n_max - j] = ((-1) ** (j + 1)) * imag[i, n_max + j]\n return real, imag\n\ndef R_storagefree(n_max, y):\n def neg(real, imag, mi):\n return (\n ((-1) ** mi) * real,\n ((-1) ** (mi + 1)) * imag\n )\n\n y1, y2, y3 = y\n real = np.zeros((n_max + 1, 2 * n_max + 1))\n imag = np.zeros((n_max + 1, 2 * n_max + 1))\n\n t2f = np.linalg.norm(y) ** 2\n Rsr = 1.0\n Rsi = 0.0\n for mi in range(0, n_max + 1):\n real[mi, n_max + mi] = Rsr\n imag[mi, n_max + mi] = Rsi\n real[mi, n_max - mi], imag[mi, n_max - mi] = neg(Rsr, Rsi, mi)\n\n Rm2r = 0.0\n Rm2i = 0.0\n Rm1r = Rsr\n Rm1i = Rsi\n for ni in range(mi, n_max):\n factor = 1.0 / ((ni + 1) ** 2 - mi ** 2)\n t1f = (2 * ni + 1) * y3\n Rvr = factor * (t1f * Rm1r - t2f * Rm2r)\n Rvi = factor * (t1f * Rm1i - t2f * Rm2i)\n real[ni + 1, n_max + mi] = Rvr\n imag[ni + 1, n_max + mi] = Rvi\n real[ni + 1, n_max - mi], imag[ni + 1, n_max - mi] = neg(Rvr, Rvi, mi)\n Rm2r = Rm1r\n Rm2i = Rm1i\n Rm1r = Rvr\n Rm1i = Rvi\n Rsrold = Rsr\n Rsiold = Rsi\n Rsr = (y1 * Rsrold - y2 * Rsiold) / (2 * (mi + 1))\n Rsi = (y1 * Rsiold + y2 * Rsrold) / (2 * (mi + 1))\n return real, imag\n\ndef Rderivs(n_max, y, d):\n Rvr, Rvi = R(n_max + 1, y)\n real = np.zeros((n_max + 1, 2 * n_max + 1))\n imag = np.zeros((n_max + 1, 2 * n_max + 1))\n\n if d == 0:\n for i in range(n_max):\n for j in range(-i, i + 1):\n real[i, n_max + j] = 0.5 * (\n Rvr[i - 1, (n_max + 1) + j - 1]\n - Rvr[i - 1, (n_max + 1) + j + 1]\n )\n imag[i, n_max + j] = 0.5 * (\n Rvi[i - 1, (n_max + 1) + j - 1]\n - Rvi[i - 1, (n_max + 1) + j + 1]\n )\n elif d == 1:\n for i in range(n_max + 1):\n for j in range(-i, i + 1):\n real[i, n_max + j] = -0.5 * (\n Rvi[i - 1, (n_max + 1) + j - 1]\n + Rvi[i - 1, (n_max + 1) + j + 1]\n )\n imag[i, n_max + j] = 0.5 * (\n Rvr[i - 1, (n_max + 1) + j - 1]\n + Rvr[i - 1, (n_max + 1) + j + 1]\n )\n else:\n for i in range(n_max + 1):\n for j in range(-i, i + 1):\n real[i, n_max + j] = Rvr[i - 1, (n_max + 1) + j]\n imag[i, n_max + j] = Rvi[i - 1, (n_max + 1) + j]\n return real, imag\n\ndef S(n_max, y):\n y1, y2, y3 = y\n ynorm = np.linalg.norm(y)\n ynorm2 = ynorm ** 2\n real = np.zeros((n_max + 1, 2 * n_max + 1))\n imag = np.zeros((n_max + 1, 2 * n_max + 1))\n real[0, n_max] = 1.0 / ynorm\n for i in range(0, n_max):\n factor = (2 * i + 1) / ynorm2\n real[i + 1, n_max + i + 1] = factor * (\n (y1 * real[i, n_max + i] - y2 * imag[i, n_max + i])\n )\n imag[i + 1, n_max + i + 1] = factor * (\n (y1 * imag[i, n_max + i] + y2 * real[i, n_max + i])\n )\n\n for j in range(n_max + 1):\n for i in range(j, n_max):\n factor = 1.0 / ynorm2\n t1f = (2 * i + 1) * y3\n t2f = i ** 2 - j ** 2\n real[i + 1, n_max + j] = factor * (\n t1f * real[i, n_max + j] - t2f * real[i - 1, n_max + j]\n )\n imag[i + 1, n_max + j] = factor * (\n t1f * imag[i, n_max + j] - t2f * imag[i - 1, n_max + j]\n )\n for i in range(n_max + 1):\n for j in range(1, n_max + 1):\n real[i, n_max - j] = ((-1) ** j) * real[i, n_max + j]\n imag[i, n_max - j] = ((-1) ** (j + 1)) * imag[i, n_max + j]\n return real, imag\n\ndef S_storagefree(n_max, y):\n def neg(real, imag, mi):\n return (\n ((-1) ** mi) * real,\n ((-1) ** (mi + 1)) * imag\n )\n\n y1, y2, y3 = y\n real = np.zeros((n_max + 1, 2 * n_max + 1))\n imag = np.zeros((n_max + 1, 2 * n_max + 1))\n\n ynorm = np.linalg.norm(y)\n ynorm2 = ynorm ** 2\n Ssr = 1.0 / ynorm\n Ssi = 0.0\n for mi in range(0, n_max + 1):\n real[mi, n_max + mi] = Ssr\n imag[mi, n_max + mi] = Ssi\n real[mi, n_max - mi], imag[mi, n_max - mi] = neg(Ssr, Ssi, mi)\n\n Sm2r = 0.0\n Sm2i = 0.0\n Sm1r = Ssr\n Sm1i = Ssi\n for ni in range(mi, n_max):\n factor = 1.0 / ynorm2\n t1f = (2 * ni + 1) * y3\n t2f = ni ** 2 - mi ** 2\n Svr = factor * (t1f * Sm1r - t2f * Sm2r)\n Svi = factor * (t1f * Sm1i - t2f * Sm2i)\n real[ni + 1, n_max + mi] = Svr\n imag[ni + 1, n_max + mi] = Svi\n real[ni + 1, n_max - mi], imag[ni + 1, n_max - mi] = neg(Svr, Svi, mi)\n Sm2r = Sm1r\n Sm2i = Sm1i\n Sm1r = Svr\n Sm1i = Svi\n Ssrold = Ssr\n Ssiold = Ssi\n factor = (2 * mi + 1) / ynorm2\n Ssr = factor * (y1 * Ssrold - y2 * Ssiold)\n Ssi = factor * (y1 * Ssiold + y2 * Ssrold)\n return real, imag\n\ndef Sderivs(n_max, y, d):\n Svr, Svi = S(n_max + 1, y)\n real = np.zeros((n_max + 1, 2 * n_max + 1))\n imag = np.zeros((n_max + 1, 2 * n_max + 1))\n\n if d == 0:\n for i in range(n_max + 1):\n for j in range(-i, i + 1):\n real[i, n_max + j] = 0.5 * (\n Svr[i + 1, (n_max + 1) + j - 1]\n - Svr[i + 1, (n_max + 1) + j + 1]\n )\n imag[i, n_max + j] = 0.5 * (\n Svi[i + 1, (n_max + 1) + j - 1]\n - Svi[i + 1, (n_max + 1) + j + 1]\n )\n elif d == 1:\n for i in range(n_max + 1):\n for j in range(-i, i + 1):\n real[i, n_max + j] = -0.5 * (\n Svi[i + 1, (n_max + 1) + j - 1]\n + Svi[i + 1, (n_max + 1) + j + 1]\n )\n imag[i, n_max + j] = 0.5 * (\n Svr[i + 1, (n_max + 1) + j - 1]\n + Svr[i + 1, (n_max + 1) + j + 1]\n )\n else:\n for i in range(n_max + 1):\n for j in range(-i, i + 1):\n real[i, n_max + j] = -Svr[i + 1, (n_max + 1) + j]\n imag[i, n_max + j] = -Svi[i + 1, (n_max + 1) + j]\n return real, imag\n\n", "import time\nimport numpy as np\n\nfrom tectosaur.farfield import farfield_pts_direct, get_gpu_module\nfrom tectosaur.ops.sparse_farfield_op import TriToTriDirectFarfieldOp\nfrom tectosaur.util.geometry import normalize\nfrom tectosaur.mesh.mesh_gen import make_rect\nfrom tectosaur.mesh.modify import concat\n\ndef make_meshes(n_m = 8, sep = 2, w = 1, n_m2 = None):\n if n_m2 is None:\n n_m2 = n_m\n\n m1 = make_rect(n_m, n_m, [\n [-w, 0, w], [-w, 0, -w],\n [w, 0, -w], [w, 0, w]\n ])\n m2 = make_rect(n_m2, n_m2, [\n [-w, sep, w], [-w, sep, -w],\n [w, sep, -w], [w, sep, w]\n ])\n m = concat(m1, m2)\n surf1_idxs = np.arange(m1[1].shape[0])\n surf2_idxs = (surf1_idxs[-1] + 1) + surf1_idxs\n return m, surf1_idxs, surf2_idxs\n\ndef test_tri_tri_farfield():\n m, surf1_idxs, surf2_idxs = make_meshes()\n T1, T2 = [\n C(\n 2, 'elasticT3', [1.0,0.25], m[0], m[1],\n np.float32, obs_subset = surf1_idxs,\n src_subset = surf2_idxs\n ) for C in [PtToPtDirectFarfieldOp, TriToTriDirectFarfieldOp]\n ]\n in_vals = np.random.rand(T1.shape[1])\n out1 = T1.dot(in_vals)\n out2 = T2.dot(in_vals)\n np.testing.assert_almost_equal(out1, out2)\n\ndef timing(n, runtime, name, flops):\n print(\"for \" + name)\n cycles = runtime * 5e12\n entries = ((n * 3) ** 2)\n cycles_per_entry = cycles / entries\n print(\"total time: \" + str(runtime))\n print(\"pts: \" + str(n))\n print(\"interacts: \" + str(entries))\n print(\"cycles/interact: \" + str(cycles_per_entry))\n print(\"total flop count: \" + str(flops * n ** 2))\n print(\"Tflop/s: \" + str(flops * n ** 2 / runtime / 1e12))\n\ndef run_kernel(n, k_name, flops, testit = False, timeit = False):\n block_size = 128\n np.random.seed(100)\n obs_pts = np.random.rand(n, 3)\n obs_ns = normalize(np.random.rand(n, 3))\n src_pts = np.random.rand(n, 3)\n src_ns = obs_ns\n weights = np.random.rand(n, 3).flatten()\n\n start = time.time()\n params = [1.0, 0.25]\n result = farfield_pts_direct(\n k_name, obs_pts, obs_ns, src_pts, src_ns, weights, params, np.float32\n ).reshape((n, 3))\n runtime = time.time() - start\n if timeit:\n timing(n, runtime, k_name, flops)\n\n # if testit:\n # correct = fmm.direct_eval(\n # \"elastic\" + k_name, obs_pts, obs_ns, src_pts, src_ns, [1.0, 0.25]\n # )\n # correct = correct.reshape((n * 3, n * 3))\n # correct = correct.dot(weights.reshape(n * 3)).reshape((n, 3))\n # np.testing.assert_almost_equal(\n # np.abs((result - correct) / correct),\n # np.zeros_like(result), 2\n # )\n\ndef test_U():\n run_kernel(1000, 'elasticU3', 28, testit = True)\n\ndef test_T():\n run_kernel(1000, 'elasticT3', 63, testit = True)\n\ndef test_A():\n run_kernel(1000, 'elasticA3', 63, testit = True)\n\ndef test_H():\n run_kernel(1000, 'elasticH3', 102, testit = True)\n\nif __name__ == '__main__':\n n = 32 * 512\n run_kernel(n, 'elasticU3', 28, timeit = True)\n run_kernel(n, 'elasticA3', 63, timeit = True)\n run_kernel(n, 'elasticT3', 63, timeit = True)\n run_kernel(n, 'elasticH3', 102, timeit = True)\n run_kernel(n, 'laplaceS3', 3, timeit = True)\n" ]
[ [ "numpy.zeros", "numpy.arccos", "numpy.cos", "numpy.sin", "numpy.linalg.norm" ], [ "numpy.arange", "numpy.random.seed", "numpy.random.rand", "numpy.testing.assert_almost_equal" ] ]
pyjhzwh/hiddenlayer
[ "59f84299986d9aed7e0534147a87f7dd491ab08d" ]
[ "hiddenlayer/graph.py" ]
[ "\"\"\"\nHiddenLayer\n\nImplementation of the Graph class. A framework independent directed graph to\nrepresent a neural network.\n\nWritten by Waleed Abdulla. Additions by Phil Ferriere.\nLicensed under the MIT License\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\nimport os\nimport re\nfrom random import getrandbits\nimport inspect\nimport numpy as np\n\n\nTHEMES = {\n \"basic\": {\n \"background_color\": \"#FFFFFF\",\n \"fill_color\": \"#E8E8E8\",\n \"outline_color\": \"#000000\",\n \"font_color\": \"#000000\",\n \"font_name\": \"Times\",\n \"font_size\": \"10\",\n \"margin\": \"0,0\",\n \"padding\": \"1.0,0.5\",\n },\n \"blue\": {\n \"background_color\": \"#FFFFFF\",\n \"fill_color\": \"#BCD6FC\",\n \"outline_color\": \"#7C96BC\",\n \"font_color\": \"#202020\",\n \"font_name\": \"Verdana\",\n \"font_size\": \"10\",\n \"margin\": \"0,0\",\n \"padding\": \"1.0,0.5\",\n },\n}\n\n\n###########################################################################\n# Utility Functions\n###########################################################################\n\ndef detect_framework(value):\n # Get all base classes\n classes = inspect.getmro(value.__class__)\n for c in classes:\n if c.__module__.startswith(\"torch\"):\n return \"torch\"\n elif c.__module__.startswith(\"tensorflow\"):\n return \"tensorflow\"\n\n\n###########################################################################\n# Node\n###########################################################################\n\nclass Node():\n \"\"\"Represents a framework-agnostic neural network layer in a directed graph.\"\"\"\n\n def __init__(self, uid, name, op, output_shape=None, params=None):\n \"\"\"\n uid: unique ID for the layer that doesn't repeat in the computation graph.\n name: Name to display\n op: Framework-agnostic operation name.\n \"\"\"\n self.id = uid\n self.name = name # TODO: clarify the use of op vs name vs title\n self.op = op\n self.repeat = 1\n if output_shape:\n assert isinstance(output_shape, (tuple, list)),\\\n \"output_shape must be a tuple or list but received {}\".format(type(output_shape))\n self.output_shape = output_shape\n self.params = params if params else {}\n self._caption = \"\"\n\n @property\n def title(self):\n # Default\n title = self.name or self.op\n\n if \"kernel_shape\" in self.params:\n # Kernel\n kernel = self.params[\"kernel_shape\"]\n title += \"x\".join(map(str, kernel))\n if \"stride\" in self.params:\n stride = self.params[\"stride\"]\n if np.unique(stride).size == 1:\n stride = stride[0]\n if stride != 1:\n title += \"/s{}\".format(str(stride))\n # # Transposed\n # if node.transposed:\n # name = \"Transposed\" + name\n return title\n\n @property\n def caption(self):\n if self._caption:\n return self._caption\n\n caption = \"\"\n\n # Stride\n # if \"stride\" in self.params:\n # stride = self.params[\"stride\"]\n # if np.unique(stride).size == 1:\n # stride = stride[0]\n # if stride != 1:\n # caption += \"/{}\".format(str(stride))\n return caption\n\n def __repr__(self):\n args = (self.op, self.name, self.id, self.title, self.repeat)\n f = \"<Node: op: {}, name: {}, id: {}, title: {}, repeat: {}\"\n if self.output_shape:\n args += (str(self.output_shape),)\n f += \", shape: {:}\"\n if self.params:\n args += (str(self.params),)\n f += \", params: {:}\"\n f += \">\"\n return f.format(*args)\n\n\n###########################################################################\n# Graph\n###########################################################################\n\ndef build_graph(model=None, args=None, input_names=None,\n transforms=\"default\", framework_transforms=\"default\"):\n # Initialize an empty graph\n g = Graph()\n\n # Detect framwork\n framework = detect_framework(model)\n if framework == \"torch\":\n from .pytorch_builder import import_graph, FRAMEWORK_TRANSFORMS\n assert args is not None, \"Argument args must be provided for Pytorch models.\"\n import_graph(g, model, args)\n elif framework == \"tensorflow\":\n from .tf_builder import import_graph, FRAMEWORK_TRANSFORMS\n import_graph(g, model)\n else:\n raise ValueError(\"`model` input param must be a PyTorch, TensorFlow, or Keras-with-TensorFlow-backend model.\") \n\n # Apply Transforms\n if framework_transforms:\n if framework_transforms == \"default\":\n framework_transforms = FRAMEWORK_TRANSFORMS\n for t in framework_transforms:\n g = t.apply(g)\n if transforms:\n if transforms == \"default\":\n from .transforms import SIMPLICITY_TRANSFORMS\n transforms = SIMPLICITY_TRANSFORMS\n for t in transforms:\n g = t.apply(g)\n return g\n\n\nclass Graph():\n \"\"\"Tracks nodes and edges of a directed graph and supports basic operations on them.\"\"\"\n\n def __init__(self, model=None, args=None, input_names=None,\n transforms=\"default\", framework_transforms=\"default\",\n meaningful_ids=False):\n self.nodes = {}\n self.edges = []\n self.meaningful_ids = meaningful_ids # TODO\n self.theme = THEMES[\"basic\"]\n\n if model:\n # Detect framwork\n framework = detect_framework(model)\n if framework == \"torch\":\n from .pytorch_builder import import_graph, FRAMEWORK_TRANSFORMS\n assert args is not None, \"Argument args must be provided for Pytorch models.\"\n import_graph(self, model, args)\n elif framework == \"tensorflow\":\n from .tf_builder import import_graph, FRAMEWORK_TRANSFORMS\n import_graph(self, model)\n \n # Apply Transforms\n if framework_transforms:\n if framework_transforms == \"default\":\n framework_transforms = FRAMEWORK_TRANSFORMS\n for t in framework_transforms:\n t.apply(self)\n if transforms:\n if transforms == \"default\":\n from .transforms import SIMPLICITY_TRANSFORMS\n transforms = SIMPLICITY_TRANSFORMS\n for t in transforms:\n t.apply(self)\n\n\n def id(self, node):\n \"\"\"Returns a unique node identifier. If the node has an id\n attribute (preferred), it's used. Otherwise, the hash() is returned.\"\"\"\n return node.id if hasattr(node, \"id\") else hash(node)\n\n def add_node(self, node):\n id = self.id(node)\n # assert(id not in self.nodes)\n self.nodes[id] = node\n\n def add_edge(self, node1, node2, label=None):\n # If the edge is already present, don't add it again.\n # TODO: If an edge exists with a different label, still don't add it again.\n edge = (self.id(node1), self.id(node2), label)\n if edge not in self.edges:\n self.edges.append(edge)\n\n def add_edge_by_id(self, vid1, vid2, label=None):\n self.edges.append((vid1, vid2, label))\n\n def outgoing(self, node):\n \"\"\"Returns nodes connecting out of the given node (or list of nodes).\"\"\"\n nodes = node if isinstance(node, list) else [node]\n node_ids = [self.id(n) for n in nodes]\n # Find edges outgoing from this group but not incoming to it\n outgoing = [self[e[1]] for e in self.edges\n if e[0] in node_ids and e[1] not in node_ids]\n return outgoing\n\n def incoming(self, node):\n \"\"\"Returns nodes connecting to the given node (or list of nodes).\"\"\"\n nodes = node if isinstance(node, list) else [node]\n node_ids = [self.id(n) for n in nodes]\n # Find edges incoming to this group but not outgoing from it\n incoming = [self[e[0]] for e in self.edges\n if e[1] in node_ids and e[0] not in node_ids]\n return incoming\n\n def siblings(self, node):\n \"\"\"Returns all nodes that share the same parent (incoming node) with\n the given node, including the node itself.\n \"\"\"\n incoming = self.incoming(node)\n # TODO: Not handling the case of multiple incoming nodes yet\n if len(incoming) == 1:\n incoming = incoming[0]\n siblings = self.outgoing(incoming)\n return siblings\n else:\n return [node]\n\n def __getitem__(self, key):\n if isinstance(key, list):\n return [self.nodes.get(k) for k in key]\n else:\n return self.nodes.get(key)\n\n def remove(self, nodes):\n \"\"\"Remove a node and its edges.\"\"\"\n nodes = nodes if isinstance(nodes, list) else [nodes]\n for node in nodes:\n k = self.id(node)\n self.edges = list(filter(lambda e: e[0] != k and e[1] != k, self.edges))\n del self.nodes[k]\n\n def replace(self, nodes, node):\n \"\"\"Replace nodes with node. Edges incoming to nodes[0] are connected to\n the new node, and nodes outgoing from nodes[-1] become outgoing from\n the new node.\"\"\"\n nodes = nodes if isinstance(nodes, list) else [nodes]\n # Is the new node part of the replace nodes (i.e. want to collapse\n # a group of nodes into one of them)?\n collapse = self.id(node) in self.nodes\n # Add new node and edges\n if not collapse:\n self.add_node(node)\n for in_node in self.incoming(nodes):\n # TODO: check specifically for output_shape is not generic. Consider refactoring.\n self.add_edge(in_node, node, in_node.output_shape if hasattr(in_node, \"output_shape\") else None)\n for out_node in self.outgoing(nodes):\n self.add_edge(node, out_node, node.output_shape if hasattr(node, \"output_shape\") else None)\n # Remove the old nodes\n for n in nodes:\n if collapse and n == node:\n continue\n self.remove(n)\n\n def search(self, pattern):\n \"\"\"Searches the graph for a sub-graph that matches the given pattern\n and returns the first match it finds.\n \"\"\"\n for node in self.nodes.values():\n match, following = pattern.match(self, node)\n if match:\n return match, following\n return [], None\n\n\n def sequence_id(self, sequence):\n \"\"\"Make up an ID for a sequence (list) of nodes.\n Note: `getrandbits()` is very uninformative as a \"readable\" ID. Here, we build a name\n such that when the mouse hovers over the drawn node in Jupyter, one can figure out\n which original nodes make up the sequence. This is actually quite useful.\n \"\"\"\n if self.meaningful_ids:\n # TODO: This might fail if the ID becomes too long\n return \"><\".join([node.id for node in sequence])\n else:\n return getrandbits(64)\n\n def build_dot(self):\n \"\"\"Generate a GraphViz Dot graph.\n\n Returns a GraphViz Digraph object.\n \"\"\"\n from graphviz import Digraph\n\n # Build GraphViz Digraph\n dot = Digraph()\n dot.attr(\"graph\", \n bgcolor=self.theme[\"background_color\"],\n color=self.theme[\"outline_color\"],\n fontsize=self.theme[\"font_size\"],\n fontcolor=self.theme[\"font_color\"],\n fontname=self.theme[\"font_name\"],\n margin=self.theme[\"margin\"],\n rankdir=\"LR\",\n pad=self.theme[\"padding\"])\n dot.attr(\"node\", shape=\"box\", \n style=\"filled\", margin=\"0,0\",\n fillcolor=self.theme[\"fill_color\"],\n color=self.theme[\"outline_color\"],\n fontsize=self.theme[\"font_size\"],\n fontcolor=self.theme[\"font_color\"],\n fontname=self.theme[\"font_name\"])\n dot.attr(\"edge\", style=\"solid\", \n color=self.theme[\"outline_color\"],\n fontsize=self.theme[\"font_size\"],\n fontcolor=self.theme[\"font_color\"],\n fontname=self.theme[\"font_name\"])\n\n for k, n in self.nodes.items():\n label = \"<tr><td cellpadding='6'>{}</td></tr>\".format(n.title)\n if n.caption:\n label += \"<tr><td>{}</td></tr>\".format(n.caption)\n if n.repeat > 1:\n label += \"<tr><td align='right' cellpadding='2'>x{}</td></tr>\".format(n.repeat)\n label = \"<<table border='0' cellborder='0' cellpadding='0'>\" + label + \"</table>>\"\n dot.node(str(k), label)\n for a, b, label in self.edges:\n if isinstance(label, (list, tuple)):\n label = \"x\".join([str(l or \"?\") for l in label])\n\n dot.edge(str(a), str(b), label)\n return dot\n\n def _repr_svg_(self):\n \"\"\"Allows Jupyter notebook to render the graph automatically.\"\"\"\n return self.build_dot()._repr_image_svg_xml()\n \n def save(self, path, format=\"pdf\"):\n # TODO: assert on acceptable format values\n dot = self.build_dot()\n dot.format = format\n directory, file_name = os.path.split(path)\n # Remove extension from file name. dot.render() adds it.\n file_name = file_name.replace(\".\" + format, \"\")\n dot.render(file_name, directory=directory, cleanup=True)\n" ]
[ [ "numpy.unique" ] ]
SungbinChoi/traffic4cast2021
[ "3d63b7e90ad0d9c7346f2a6c6c89d605849bf49e" ]
[ "train/t2m2/run.py" ]
[ "import random\nfrom random import shuffle\nimport numpy as np\nfrom datetime import datetime\nimport time\nimport queue\nimport threading\nimport logging\nfrom PIL import Image\nimport itertools\nimport re\nimport os\nimport glob\nimport shutil\nimport sys\nimport copy\nimport h5py\nfrom typing import Any, List, Tuple\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.nn.parallel.data_parallel import data_parallel\nimport torch.utils.checkpoint as cp\nfrom collections import OrderedDict\nfrom torch import Tensor\n\n\n\ntarget_city = 'ANTWERP'\nother_city_list = ['ANTWERP', 'BANGKOK', 'BARCELONA', 'MOSCOW', 'BERLIN', 'CHICAGO', 'ISTANBUL', 'MELBOURNE', ]\n\n\ninput_train_data_folder_path = '../../0_data/' + target_city + '/' + 'training'\ninput_static_data_path = '../../0_data/' + target_city + '/' + target_city + \"_static.h5\"\nout_dir = 'output'\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\nSEED = int(time.time())\nnum_train_file = 180\nnum_frame_per_day = 288\nnum_frame_before = 12\nnum_frame_sequence = 24\nnum_frame_out = 6 \nnum_sequence_per_day = num_frame_per_day - num_frame_sequence + 1 \nheight=495\nwidth =436\nnum_channel=8\nnum_channel_out=8\nnum_channel_static = 9\nvisual_input_channels=105\nvisual_output_channels=48 \nvector_input_channels=1 \nnum_epoch_to_train = 100000000\nsave_per_iteration = 5000\nglobal_step_start = 0 \ninitial_checkpoint = None\ninitial_checkpoint_optimizer = None\nLEARNING_RATE = 3e-4\nbatch_size = 2\nbatch_size_val = 1 \nnum_thread=2\nnum_groups = 8\nEPS = 1e-12\nnp.set_printoptions(precision=8)\nNUM_INPUT_CHANNEL = visual_input_channels\nNUM_OUTPUT_CHANNEL = visual_output_channels\n\ndef get_data_filepath_list_by_year(input_data_folder_path):\n data_filepath_list_1 = []\n data_filepath_list_2 = []\n for filename in os.listdir(input_data_folder_path):\n if filename.split('.')[-1] != 'h5': \n continue\n if filename.startswith('2019'):\n data_filepath_list_1.append(os.path.join(input_data_folder_path, filename))\n elif filename.startswith('2020'):\n data_filepath_list_2.append(os.path.join(input_data_folder_path, filename))\n else:\n print('Error - Unknown data year\\t', filename)\n exit(-1)\n data_filepath_list_1 = sorted(data_filepath_list_1)\n data_filepath_list_2 = sorted(data_filepath_list_2)\n return data_filepath_list_1, data_filepath_list_2\n\nclass Deconv3x3Block(nn.Sequential):\n def __init__(self, \n in_size: int, \n h_size: int, ) -> None:\n super(Deconv3x3Block, self).__init__()\n self.add_module('deconv', nn.ConvTranspose2d(in_size, h_size, kernel_size=3, stride=2, padding=1, bias=True))\n self.add_module('elu', nn.ELU(inplace=True)) \n self.add_module('norm', nn.GroupNorm(num_groups=num_groups, num_channels=h_size)) \n\nclass Conv1x1Block(nn.Sequential):\n def __init__(self, \n in_size: int, \n h_size: int, ) -> None:\n super(Conv1x1Block, self).__init__()\n self.add_module('conv', nn.Conv2d(in_size, h_size, kernel_size=1, stride=1, padding=0, bias=True))\n\nclass Conv3x3Block(nn.Sequential):\n def __init__(self, \n in_size: int, \n h_size: int, ) -> None:\n super(Conv3x3Block, self).__init__()\n self.add_module('conv', nn.Conv2d(in_size, h_size, kernel_size=3, stride=1, padding=1, bias=True))\n self.add_module('elu', nn.ELU(inplace=True)) \n self.add_module('norm', nn.GroupNorm(num_groups=num_groups, num_channels=h_size)) \n\nclass AvgBlock(nn.Sequential):\n def __init__(self, \n kernel_size: int, \n stride: int, \n padding: int) -> None:\n super(AvgBlock, self).__init__()\n self.add_module('pool', nn.AvgPool2d(kernel_size=kernel_size, stride=stride, padding=padding)) \n \nclass MaxBlock(nn.Sequential):\n def __init__(self, \n kernel_size: int, \n stride: int, \n padding: int) -> None:\n super(MaxBlock, self).__init__()\n self.add_module('pool', nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding)) \n\nclass DownBlock(nn.Module):\n \n\n def __init__(self, \n in_size: int, \n h_size: int, \n out_size: int, \n do_pool: int = True):\n \n super(DownBlock, self).__init__() \n\n self.do_pool = do_pool\n \n in_size_cum = in_size \n \n self.conv_1 = Conv3x3Block( in_size=in_size_cum, h_size=h_size)\n in_size_cum += h_size\n \n self.conv_3 = Conv3x3Block( in_size=in_size_cum, h_size=h_size)\n in_size_cum += h_size\n \n self.conv_2 = Conv1x1Block( in_size=in_size_cum, h_size=out_size)\n\n def forward(self, x):\n \n batch_size = len(x)\n\n if self.do_pool:\n x = F.interpolate(x, scale_factor=0.7, mode='bilinear', align_corners=False, recompute_scale_factor=None)\n\n x_list = []\n x_list.append(x)\n \n x = self.conv_1(x)\n x_list.append(x)\n x = torch.cat(x_list, 1)\n \n x = self.conv_3(x)\n x_list.append(x)\n x = torch.cat(x_list, 1)\n \n x = self.conv_2(x)\n\n return x\n\n def cuda(self, ):\n super(DownBlock, self).cuda() \n \n self.conv_1.cuda()\n self.conv_3.cuda()\n self.conv_2.cuda()\n \n return self\n\n\nclass UpBlock(nn.Module):\n def __init__(self, \n in_size: int, \n in_size_2: int, \n h_size: int, \n out_size: int, \n ):\n super(UpBlock, self).__init__() \n self.deconv = Conv3x3Block( in_size=in_size, h_size=h_size)\n self.out_conv = Conv3x3Block( in_size=h_size + in_size_2, h_size=out_size)\n\n def forward(self, x1, x2):\n x1 = self.deconv(x1)\n x1 = F.interpolate(x1, size=x2.size()[2:4], scale_factor=None, mode='bilinear', align_corners=False, recompute_scale_factor=None)\n x = torch.cat([x2, x1], dim=1)\n return self.out_conv(x)\n\n def cuda(self, ):\n super(UpBlock, self).cuda() \n self.deconv.cuda()\n self.out_conv.cuda()\n return self\n \n\nclass NetA(nn.Module):\n\n def __init__(self,):\n super(NetA, self).__init__()\n self.block0 = DownBlock(in_size=NUM_INPUT_CHANNEL, h_size=128, out_size=128, do_pool=False)\n self.block1 = DownBlock(in_size=128, h_size=128, out_size=128,)\n self.block2 = DownBlock(in_size=128, h_size=128, out_size=128, )\n self.block3 = DownBlock(in_size=128, h_size=128, out_size=128, )\n self.block4 = DownBlock(in_size=128, h_size=128, out_size=128, )\n self.block5 = DownBlock(in_size=128, h_size=128, out_size=128, )\n self.block6 = DownBlock(in_size=128, h_size=128, out_size=128,)\n self.block7 = DownBlock(in_size=128, h_size=128, out_size=128,)\n \n self.block20 = Conv3x3Block(in_size=128, h_size=128)\n\n self.block16 = UpBlock(in_size=128, in_size_2=128, h_size=128, out_size=128,) \n self.block15 = UpBlock(in_size=128, in_size_2=128, h_size=128, out_size=128,) \n self.block14 = UpBlock(in_size=128, in_size_2=128, h_size=128, out_size=128,) \n self.block13 = UpBlock(in_size=128, in_size_2=128, h_size=128, out_size=128,) \n self.block12 = UpBlock(in_size=128, in_size_2=128, h_size=128, out_size=128,) \n self.block11 = UpBlock(in_size=128, in_size_2=128 , h_size=128, out_size=128,) \n self.block10 = UpBlock(in_size=128, in_size_2=128 , h_size=128, out_size=128,) \n\n self.out_conv = nn.Sequential(nn.Conv2d(128*1, NUM_OUTPUT_CHANNEL, kernel_size=3, stride=1, padding=1, bias=True))\n \n if 1:\n for name, m in self.named_modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):\n nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.GroupNorm):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x):\n\n batch_size = len(x)\n x0 = self.block0(x)\n x1 = self.block1(x0)\n x2 = self.block2(x1)\n x3 = self.block3(x2)\n x4 = self.block4(x3)\n x5 = self.block5(x4)\n x6 = self.block6(x5)\n x7 = self.block7(x6)\n \n x = self.block20(x7)\n \n x = self.block16(x, x6)\n x = self.block15(x, x5)\n x = self.block14(x, x4)\n x = self.block13(x, x3)\n x = self.block12(x, x2)\n x = self.block11(x, x1)\n x = self.block10(x, x0)\n\n x = self.out_conv(x)\n x = torch.sigmoid(x)\n return x\n\n def cuda(self, ):\n super(NetA, self).cuda()\n \n self.block0.cuda()\n self.block1.cuda()\n self.block2.cuda()\n self.block3.cuda()\n self.block4.cuda()\n self.block5.cuda()\n self.block6.cuda()\n self.block7.cuda()\n \n self.block20.cuda()\n \n self.block16.cuda()\n self.block15.cuda()\n self.block14.cuda()\n self.block13.cuda()\n self.block12.cuda()\n self.block11.cuda()\n self.block10.cuda()\n \n self.out_conv.cuda()\n return self \n \n\n \n \nif __name__ == '__main__':\n \n if initial_checkpoint == None:\n assert global_step_start == 0 \n else:\n assert global_step_start > 0 \n\n random.seed(SEED)\n np.random.seed(SEED)\n torch.manual_seed(SEED)\n torch.cuda.manual_seed_all(SEED)\n torch.backends.cudnn.enabled = True\n torch.backends.cudnn.benchmark = True \n torch.backends.cudnn.deterministic = False \n\n try:\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n except Exception:\n print('out_dir not made')\n\n net = NetA().cuda()\n optimizer = optim.Adam(filter(lambda p: p.requires_grad, net.parameters()),lr=LEARNING_RATE)\n loss_func2 = nn.MSELoss() \n if initial_checkpoint is not None:\n print('Loading ', initial_checkpoint)\n state_dict = torch.load(initial_checkpoint, map_location=lambda storage, loc: storage)\n net.load_state_dict(state_dict, strict=True)\n optimizer_state_dict_ = torch.load(initial_checkpoint_optimizer, map_location=lambda storage, loc: storage)\n optimizer_state_dict = optimizer_state_dict_['optimizer']\n optimizer.load_state_dict(optimizer_state_dict)\n\n static_data = None\n if 1:\n file_path = input_static_data_path\n fr = h5py.File(file_path, 'r')\n a_group_key = list(fr.keys())[0]\n data = np.asarray(fr[a_group_key], np.uint8)\n static_data = data[np.newaxis,:,:,:]\n static_data = static_data.astype(np.float32)\n static_data = static_data / 255.0\n static_data_list = []\n if 1:\n for other_city in other_city_list:\n file_path = '../../0_data/' + other_city + '/' + other_city + \"_static.h5\"\n fr = h5py.File(file_path, 'r')\n a_group_key = list(fr.keys())[0]\n data = np.asarray(fr[a_group_key], np.uint8)\n static_data_ = data[np.newaxis,:,:,:]\n static_data_ = static_data_.astype(np.float32)\n static_data_ = static_data_ / 255.0\n static_data_list.append(static_data_)\n\n train_static_data_index_list = []\n train_data_filepath_list, val_data_filepath_list = get_data_filepath_list_by_year(input_train_data_folder_path)\n target_city_i = other_city_list.index(target_city)\n for _ in range(len(train_data_filepath_list)):\n train_static_data_index_list.append(target_city_i)\n for o, other_city in enumerate(other_city_list):\n if o == target_city_i:\n continue\n train_data_filepath_list_one, _ = get_data_filepath_list_by_year('../../0_data/' + other_city + '/' + 'training')\n for _ in range(len(train_data_filepath_list_one)):\n train_static_data_index_list.append(o)\n train_data_filepath_list += train_data_filepath_list_one\n\n train_set = [] \n for i in range(len(train_data_filepath_list)):\n for j in range(num_sequence_per_day):\n train_set.append( (i,j) )\n num_iteration_per_epoch = int(len(train_set) / batch_size)\n print('num_iteration_per_epoch:', num_iteration_per_epoch)\n assert num_iteration_per_epoch > 10\n val_set = []\n val_skip_k = 0\n val_skip_ratio = 5\n for i in range(len(val_data_filepath_list)):\n for j in range(0, num_sequence_per_day, num_frame_sequence):\n val_skip_k += 1\n if val_skip_k % val_skip_ratio == 0:\n val_set.append( (i,j) )\n num_val_iteration_per_epoch = int(len(val_set) / batch_size_val) \n print('num_val_iteration_per_epoch:', num_val_iteration_per_epoch)\n\n \n train_input_queue = queue.Queue()\n train_output_queue = queue.Queue()\n def load_train_multithread():\n while True:\n if train_input_queue.empty() or train_output_queue.qsize() > 8:\n time.sleep(0.1)\n continue\n i_j_list = train_input_queue.get()\n train_orig_data_batch_list = []\n train_data_batch_list = [] \n train_data_mask_list = [] \n train_stat_batch_list = [] \n train_static_data_batch_list = []\n for train_i_j in i_j_list:\n (i,j) = train_i_j\n file_path = train_data_filepath_list[i]\n train_static_data_batch_list.append(static_data_list[train_static_data_index_list[i]])\n fr = h5py.File(file_path, 'r')\n a_group_key = list(fr.keys())[0]\n data = fr[a_group_key] \n train_data_batch_list.append(data[j:j+num_frame_sequence,:,:,:][np.newaxis,:,:,:,:]) \n train_data_batch = np.concatenate(train_data_batch_list, axis=0)\n train_static_data_batch = np.concatenate(train_static_data_batch_list,axis=0)\n input_data = train_data_batch[:,:num_frame_before ,:,:,:] \n orig_label = train_data_batch[:, num_frame_before:,:,:,:num_channel_out] \n true_label = np.concatenate((orig_label[:, 0:3, :,:,:], orig_label[:, 5::3,:,:,:] ), axis=1)\n input_data = input_data.astype(np.float32)\n true_label = true_label.astype(np.float32)\n input_data = input_data / 255.0\n true_label = true_label / 255.0\n \n flip_dr = np.random.randint(0,2)\n if flip_dr == 1:\n input_data_flipped = copy.deepcopy(input_data) \n input_data_flipped[:,:,:,:,4:8] = input_data[:,:,:,:,0:4]\n input_data_flipped[:,:,:,:,0:4] = input_data[:,:,:,:,4:8]\n input_data = input_data_flipped[:,:,::-1,::-1,:]\n true_label_flipped = copy.deepcopy(true_label)\n true_label_flipped[:,:,:,:,4:8] = true_label[:,:,:,:,0:4]\n true_label_flipped[:,:,:,:,0:4] = true_label[:,:,:,:,4:8]\n true_label = true_label_flipped[:,:,::-1,::-1,:] \n train_static_data_batch_flipped = copy.deepcopy(train_static_data_batch)\n train_static_data_batch_flipped[:,5:9,:,:] = train_static_data_batch[:,1:5,:,:]\n train_static_data_batch_flipped[:,1:5,:,:] = train_static_data_batch[:,5:9,:,:]\n train_static_data_batch = train_static_data_batch_flipped[:,:,::-1,::-1]\n\n input_data = np.moveaxis(input_data, -1, 2).reshape((batch_size, -1, height, width)) \n true_label = np.moveaxis(true_label, -1, 2).reshape((batch_size, -1, height, width)) \n input_data = np.concatenate((input_data, train_static_data_batch), axis=1)\n train_output_queue.put( (input_data, true_label) )\n thread_list = []\n assert num_thread > 0\n for i in range(num_thread):\n t = threading.Thread(target=load_train_multithread)\n t.start()\n \n \n net.train() \n sum_train_loss = 0.0\n sum_train_iter = 0\n global_step = global_step_start\n for epoch in range(num_epoch_to_train):\n np.random.shuffle(train_set)\n for a in range(num_iteration_per_epoch):\n i_j_list = [] \n for train_i_j in train_set[a * batch_size : (a+1) * batch_size]:\n i_j_list.append(train_i_j)\n train_input_queue.put(i_j_list)\n \n for a in range(num_iteration_per_epoch):\n\n if global_step % save_per_iteration == 0:\n net.eval()\n state_dict_0 = copy.deepcopy(net.state_dict())\n torch.save(state_dict_0, out_dir + '/%09d_model.pth' % (global_step))\n torch.save(\n {\n 'optimizer': optimizer.state_dict(),\n 'global_step': global_step,\n 'epoch': epoch,\n }, \n out_dir + '/%09d_optimizer.pth' % (global_step)) \n \n eval_loss_list = list()\n eval_loss_list = [0]\n with torch.no_grad():\n for a in range(num_val_iteration_per_epoch):\n val_orig_data_batch_list = []\n val_data_batch_list = [] \n val_data_mask_list = [] \n val_stat_batch_list = [] \n for i_j in val_set[a * batch_size_val : (a+1) * batch_size_val]:\n (i,j) = i_j\n file_path = val_data_filepath_list[i]\n fr = h5py.File(file_path, 'r')\n a_group_key = list(fr.keys())[0]\n data = fr[a_group_key]\n val_data_batch_list.append(data[j:j+num_frame_sequence,:,:,:][np.newaxis,:,:,:,:])\n val_data_batch = np.concatenate(val_data_batch_list, axis=0)\n input_data = val_data_batch[:,:num_frame_before ,:,:,:] \n orig_label = val_data_batch[:, num_frame_before:,:,:,:num_channel_out] \n true_label = np.concatenate((orig_label[:, 0:3, :,:,:], orig_label[:, 5::3,:,:,:]), axis=1)\n input_data = input_data.astype(np.float32)\n true_label = true_label.astype(np.float32)\n input_data = input_data / 255.0\n true_label = true_label / 255.0\n input_data = np.moveaxis(input_data, -1, 2).reshape((batch_size_val, -1, height, width))\n true_label = np.moveaxis(true_label, -1, 2).reshape((batch_size_val, -1, height, width))\n input_data = np.concatenate((input_data,np.repeat(static_data, batch_size_val, axis=0)), axis=1)\n input = torch.from_numpy(input_data).float().cuda() \n target = torch.from_numpy(true_label).float().cuda() \n prediction = net(input)\n loss = loss_func2(prediction, target) \n eval_loss_list.append(loss.item())\n avg_train_loss = sum_train_loss / (float(sum_train_iter)+EPS)\n sum_train_loss = 0.0\n sum_train_iter = 0\n\n print('global_step:', global_step, '\\t', 'epoch:', epoch, \\\n '\\t', 'train_loss:', avg_train_loss, \\\n '\\t', 'eval_loss:', np.mean(eval_loss_list), \\\n '\\t', datetime.now(), )\n debug_out = open('res.txt', 'a')\n debug_out.write(str(global_step))\n debug_out.write('\\t')\n debug_out.write('%.8f' % float(avg_train_loss))\n debug_out.write('\\t')\n debug_out.write('%.8f' % float(np.mean(eval_loss_list)))\n debug_out.write('\\n')\n debug_out.close()\n net.train()\n\n while train_output_queue.empty():\n time.sleep(0.1)\n (input_data, true_label) = train_output_queue.get()\n optimizer.zero_grad()\n input = torch.from_numpy(input_data).float().cuda() \n target = torch.from_numpy(true_label).float().cuda() \n prediction = net(input)\n loss = loss_func2(prediction, target) \n sum_train_iter += 1\n sum_train_loss += loss.item()\n loss.backward()\n optimizer.step()\n global_step += 1\n\n" ]
[ [ "torch.cuda.manual_seed_all", "torch.no_grad", "numpy.random.seed", "numpy.asarray", "numpy.moveaxis", "torch.nn.Conv2d", "torch.cat", "torch.nn.ConvTranspose2d", "torch.nn.init.kaiming_normal_", "torch.nn.GroupNorm", "torch.save", "numpy.set_printoptions", "torch.from_numpy", "torch.nn.AvgPool2d", "torch.nn.ELU", "torch.sigmoid", "numpy.mean", "torch.nn.MaxPool2d", "torch.load", "torch.manual_seed", "numpy.repeat", "numpy.random.shuffle", "torch.nn.MSELoss", "torch.nn.init.constant_", "numpy.concatenate", "numpy.random.randint", "torch.nn.functional.interpolate" ] ]
jwsiegel2510/ESPEI
[ "cb72f676138c96d560d8b83cea6b7ca2da100078" ]
[ "espei/datasets.py" ]
[ "import fnmatch, warnings, json, os\n\nimport numpy as np\nfrom six import string_types\nfrom tinydb.storages import MemoryStorage\nfrom tinydb import where\n\nfrom espei.utils import PickleableTinyDB\nfrom espei.core_utils import recursive_map\n\nclass DatasetError(Exception):\n \"\"\"Exception raised when datasets are invalid.\"\"\"\n pass\n\n\ndef check_dataset(dataset):\n \"\"\"Ensure that the dataset is valid and consistent.\n\n Currently supports the following validation checks:\n * data shape is valid\n * phases and components used match phases and components entered\n * individual shapes of keys, such as ZPF, sublattice configs and site ratios\n\n Planned validation checks:\n * all required keys are present\n\n Note that this follows some of the implicit assumptions in ESPEI at the time\n of writing, such that conditions are only P, T, configs for single phase and\n essentially only T for ZPF data.\n\n Parameters\n ----------\n dataset : dict\n Dictionary of the standard ESPEI dataset.\n\n Returns\n -------\n None\n\n Raises\n ------\n DatasetError\n If an error is found in the dataset\n \"\"\"\n is_activity = dataset['output'].startswith('ACR')\n is_zpf = dataset['output'] == 'ZPF'\n is_single_phase = (not is_zpf) and (not is_activity)\n components = dataset['components']\n conditions = dataset['conditions']\n values = dataset['values']\n phases = dataset['phases']\n if is_single_phase:\n solver = dataset['solver']\n sublattice_configurations = solver['sublattice_configurations']\n sublattice_site_ratios = solver['sublattice_site_ratios']\n sublattice_occupancies = solver.get('sublattice_occupancies', None)\n # check for mixing\n is_mixing = any([any([isinstance(subl, list) for subl in config]) for config in sublattice_configurations])\n # pad the values of sublattice occupancies if there is no mixing\n if sublattice_occupancies is None and not is_mixing:\n sublattice_occupancies = [None]*len(sublattice_configurations)\n elif sublattice_occupancies is None:\n raise DatasetError('At least one sublattice in the following sublattice configurations is mixing, but the \"sublattice_occupancies\" key is empty: {}'.format(sublattice_configurations))\n if is_activity:\n conditions = dataset['conditions']\n ref_state = dataset['reference_state']\n comp_conditions = {k: v for k, v in conditions.items() if k.startswith('X_')}\n\n\n # check that the shape of conditions match the values\n num_pressure = np.atleast_1d(conditions['P']).size\n num_temperature = np.atleast_1d(conditions['T']).size\n if is_activity:\n values_shape = np.array(values).shape\n # check each composition condition is the same shape\n num_x_conds = [len(v) for _, v in comp_conditions.items()]\n if num_x_conds.count(num_x_conds[0]) != len(num_x_conds):\n raise DatasetError('All compositions in conditions are not the same shape. Note that conditions cannot be broadcast. Composition conditions are {}'.format(comp_conditions))\n conditions_shape = (num_pressure, num_temperature, num_x_conds[0])\n if conditions_shape != values_shape:\n raise DatasetError('Shape of conditions (P, T, compositions): {} does not match the shape of the values {}.'.format(conditions_shape, values_shape))\n elif is_single_phase:\n values_shape = np.array(values).shape\n num_configs = len(dataset['solver']['sublattice_configurations'])\n conditions_shape = (num_pressure, num_temperature, num_configs)\n if conditions_shape != values_shape:\n raise DatasetError('Shape of conditions (P, T, configs): {} does not match the shape of the values {}.'.format(conditions_shape, values_shape))\n elif is_zpf:\n values_shape = (len(values))\n conditions_shape = (num_temperature)\n if conditions_shape != values_shape:\n raise DatasetError('Shape of conditions (T): {} does not match the shape of the values {}.'.format(conditions_shape, values_shape))\n\n # check that all of the correct phases are present\n if is_zpf:\n phases_entered = set(phases)\n phases_used = set()\n for zpf in values:\n for tieline in zpf:\n phases_used.add(tieline[0])\n if len(phases_entered - phases_used) > 0:\n raise DatasetError('Phases entered {} do not match phases used {}.'.format(phases_entered, phases_used))\n\n # check that all of the components used match the components entered\n components_entered = set(components)\n components_used = set()\n if is_single_phase:\n for config in sublattice_configurations:\n for sl in config:\n if isinstance(sl, list):\n components_used.update(set(sl))\n else:\n components_used.add(sl)\n comp_dof = 0\n elif is_activity:\n components_used.update({c.split('_')[1] for c in comp_conditions.keys()})\n # mass balance of components\n comp_dof = len(comp_conditions.keys())\n elif is_zpf:\n for zpf in values:\n for tieline in zpf:\n tieline_comps = set(tieline[1])\n components_used.update(tieline_comps)\n if len(components_entered - tieline_comps - {'VA'}) != 1:\n raise DatasetError('Degree of freedom error for entered components {} in tieline {} of ZPF {}'.format(components_entered, tieline, zpf))\n # handle special case of mass balance in ZPFs\n comp_dof = 1\n if len(components_entered - components_used - {'VA'}) > comp_dof or len(components_used - components_entered) > 0:\n raise DatasetError('Components entered {} do not match components used {}.'.format(components_entered, components_used))\n\n # check that the ZPF values are formatted properly\n if is_zpf:\n for zpf in values:\n for tieline in zpf:\n phase = tieline[0]\n component_list = tieline[1]\n mole_fraction_list = tieline[2]\n # check that the phase is a string, components a list of strings,\n # and the fractions are a list of float\n if not isinstance(phase, string_types):\n raise DatasetError('The first element in the tieline {} for the ZPF point {} should be a string. Instead it is a {} of value {}'.format(tieline, zpf, type(phase), phase))\n if not all([isinstance(comp, string_types) for comp in component_list]):\n raise DatasetError('The second element in the tieline {} for the ZPF point {} should be a list of strings. Instead it is a {} of value {}'.format(tieline, zpf, type(component_list), component_list))\n if not all([(isinstance(mole_frac, (int, float)) or mole_frac is None) for mole_frac in mole_fraction_list]):\n raise DatasetError('The last element in the tieline {} for the ZPF point {} should be a list of numbers. Instead it is a {} of value {}'.format(tieline, zpf, type(mole_fraction_list), mole_fraction_list))\n # check that the shape of components list and mole fractions list is the same\n if len(component_list) != len(mole_fraction_list):\n raise DatasetError('The length of the components list and mole fractions list in tieline {} for the ZPF point {} should be the same.'.format(tieline, zpf))\n # check that all mole fractions are less than one\n mf_sum = np.nansum(np.array(mole_fraction_list, dtype=np.float))\n if any([mf is not None for mf in mole_fraction_list]) and mf_sum > 1.0:\n raise DatasetError('Mole fractions for tieline {} for the ZPF point {} sum to greater than one.'.format(tieline, zpf))\n\n # check that the site ratios are valid as well as site occupancies, if applicable\n if is_single_phase:\n nconfigs = len(sublattice_configurations)\n noccupancies = len(sublattice_occupancies)\n if nconfigs != noccupancies:\n raise DatasetError('Number of sublattice configurations ({}) does not match the number of sublattice occupancies ({})'.format(nconfigs, noccupancies))\n for configuration, occupancy in zip(sublattice_configurations, sublattice_occupancies):\n if len(configuration) != len(sublattice_site_ratios):\n raise DatasetError('Sublattice configuration {} and sublattice site ratio {} describe different numbers of sublattices ({} and {}).'.format(configuration, sublattice_site_ratios, len(configuration), len(sublattice_site_ratios)))\n if is_mixing:\n configuration_shape = tuple(len(sl) if isinstance(sl, list) else 1 for sl in configuration)\n occupancy_shape = tuple(len(sl) if isinstance(sl, list) else 1 for sl in occupancy)\n if configuration_shape != occupancy_shape:\n raise DatasetError('The shape of sublattice configuration {} ({}) does not match the shape of occupancies {} ({})'.format(configuration, configuration_shape, occupancy, occupancy_shape))\n # check that sublattice interactions are in sorted. Related to sorting in espei.core_utils.get_samples\n for subl in configuration:\n if isinstance(subl, (list, tuple)) and sorted(subl) != subl:\n raise DatasetError('Sublattice {} in configuration {} is must be sorted in alphabetic order ({})'.format(subl, configuration, sorted(subl)))\n\n\ndef clean_dataset(dataset):\n \"\"\"\n Clean an ESPEI dataset dictionary.\n\n Parameters\n ----------\n dataset : dict\n Dictionary of the standard ESPEI dataset. dataset : dic\n\n Returns\n -------\n dict\n Modified dataset that has been cleaned\n\n Notes\n -----\n Assumes a valid, checked dataset. Currently handles\n * Converting expected numeric values to floats\n\n \"\"\"\n dataset[\"conditions\"] = {k: recursive_map(float, v) for k, v in dataset[\"conditions\"].items()}\n\n solver = dataset.get(\"solver\")\n if solver is not None:\n solver[\"sublattice_site_ratios\"] = recursive_map(float, solver[\"sublattice_site_ratios\"])\n occupancies = solver.get(\"sublattice_occupancies\")\n if occupancies is not None:\n solver[\"sublattice_occupancies\"] = recursive_map(float, occupancies)\n\n if dataset[\"output\"] == \"ZPF\":\n values = dataset[\"values\"]\n new_values = []\n for tieline in values:\n new_tieline = []\n for tieline_point in tieline:\n if all([comp is None for comp in tieline_point[2]]):\n # this is a null tieline point\n new_tieline.append(tieline_point)\n else:\n new_tieline.append([tieline_point[0], tieline_point[1], recursive_map(float, tieline_point[2])])\n new_values.append(new_tieline)\n dataset[\"values\"] = new_values\n else:\n # values should be all numerical\n dataset[\"values\"] = recursive_map(float, dataset[\"values\"])\n\n return dataset\n\n\ndef apply_tags(datasets, tags):\n \"\"\"\n Modify datasets using the tags system\n\n Parameters\n ----------\n datasets : PickleableTinyDB\n Datasets to modify\n tags : dict\n Dictionary of {tag: update_dict}\n\n Returns\n -------\n PickleableTinyDB\n\n Notes\n -----\n In general, everything replaces or is additive. We use the following update rules:\n 1. If the update value is a list, extend the existing list (empty list if key does not exist)\n 2. If the update value is scalar, override the previous (deleting any old value, if present)\n 3. If the update value is a dict, update the exist dict (empty dict if dict does not exist)\n 4. Otherwise, the value is updated, overriding the previous\n\n Examples\n --------\n >>> from espei.utils import PickleableTinyDB\n >>> from tinydb.storages import MemoryStorage\n >>> ds = PickleableTinyDB(storage=MemoryStorage)\n >>> doc_id = ds.insert({'tags': ['dft'], 'excluded_model_contributions': ['contrib']})\n >>> my_tags = {'dft': {'excluded_model_contributions': ['idmix', 'mag'], 'weight': 5.0}}\n >>> from espei.datasets import apply_tags\n >>> apply_tags(ds, my_tags)\n >>> all_data = ds.all()\n >>> all(d['excluded_model_contributions'] == ['contrib', 'idmix', 'mag'] for d in all_data)\n True\n >>> all(d['weight'] == 5.0 for d in all_data)\n True\n\n \"\"\"\n for tag, update_dict in tags.items():\n matching_datasets = datasets.search(where(\"tags\").test(lambda x: tag in x))\n for newkey, newval in update_dict.items():\n for match in matching_datasets:\n if isinstance(newval, list):\n match[newkey] = match.get(newkey, []) + newval\n elif np.isscalar(newval):\n match[newkey] = newval\n elif isinstance(newval, dict):\n d = match.get(newkey, dict())\n d.update(newval)\n match[newkey] = d\n else:\n match[newkey] = newval\n datasets.write_back(matching_datasets)\n\n\ndef add_ideal_exclusions(datasets):\n \"\"\"\n If there are single phase datasets present and none of them have an\n `excluded_model_contributions` key, add ideal exclusions automatically and\n emit a DeprecationWarning that this feature will be going away.\n\n Parameters\n ----------\n datasets : PickleableTinyDB\n\n Returns\n -------\n PickleableTinyDB\n\n \"\"\"\n all_single_phase = datasets.search(where('solver').exists())\n no_exclusions = datasets.search(where('solver').exists() & (~where('excluded_model_contributions').exists()))\n if len(all_single_phase) > 0 and len(all_single_phase) == len(no_exclusions):\n idmix_warning = \"Single phase datasets are present, but there are no specified `excluded_model_contributions` keys present. \" + \\\n \"'idmix' exclusion will be added automatically for backwards compatibility, but this will go away in ESPEI v0.8. \" + \\\n \"If you want ideal mixing contributions to be excluded, see the documentation for building datasets: http://espei.org/en/latest/input_data.html\"\n warnings.warn(idmix_warning, DeprecationWarning)\n print(idmix_warning)\n import espei\n if int(espei.__version__.split('.')[1]) >= 8 or int(espei.__version__.split('.')[0]) > 0:\n raise ValueError(\"ESPEI developer: remove the automatic addition of ideal mixing exclusions\")\n for ds in all_single_phase:\n ds['excluded_model_contributions'] = ['idmix']\n datasets.write_back(all_single_phase)\n return datasets\n\ndef load_datasets(dataset_filenames):\n \"\"\"\n Create a PickelableTinyDB with the data from a list of filenames.\n\n Parameters\n ----------\n dataset_filenames : [str]\n List of filenames to load as datasets\n\n Returns\n -------\n PickleableTinyDB\n \"\"\"\n ds_database = PickleableTinyDB(storage=MemoryStorage)\n for fname in dataset_filenames:\n with open(fname) as file_:\n try:\n d = json.load(file_)\n check_dataset(d)\n ds_database.insert(clean_dataset(d))\n except ValueError as e:\n raise ValueError('JSON Error in {}: {}'.format(fname, e))\n except DatasetError as e:\n raise DatasetError('Dataset Error in {}: {}'.format(fname, e))\n return ds_database\n\n\ndef recursive_glob(start, pattern='*.json'):\n \"\"\"\n Recursively glob for the given pattern from the start directory.\n\n Parameters\n ----------\n start : str\n Path of the directory to walk while for file globbing\n pattern : str\n Filename pattern to match in the glob.\n\n Returns\n -------\n [str]\n List of matched filenames\n\n \"\"\"\n matches = []\n for root, dirnames, filenames in os.walk(start):\n for filename in fnmatch.filter(filenames, pattern):\n matches.append(os.path.join(root, filename))\n return sorted(matches)\n" ]
[ [ "numpy.array", "numpy.isscalar", "numpy.atleast_1d" ] ]
kornesh/text
[ "f762def9dbb14f8f182936dd25af154af79f366e" ]
[ "tensorflow_text/python/ops/bert_tokenizer.py" ]
[ "# coding=utf-8\n# Copyright 2019 TF.Text Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Basic tokenization ops for BERT preprocessing.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import lookup_ops\nfrom tensorflow.python.ops import string_ops\nfrom tensorflow_text.python.ops import regex_split_ops\nfrom tensorflow_text.python.ops.normalize_ops import case_fold_utf8\nfrom tensorflow_text.python.ops.normalize_ops import normalize_utf8\nfrom tensorflow_text.python.ops.tokenization import TokenizerWithOffsets\nfrom tensorflow_text.python.ops.wordpiece_tokenizer import WordpieceTokenizer\n\n\n_DELIM_REGEX = [\n r\"\\s+\",\n r\"|\".join([\n r\"[!-/]\",\n r\"[:-@]\",\n r\"[\\[-`]\",\n r\"[{-~]\",\n r\"[\\p{P}]\",\n ]),\n r\"|\".join([\n r\"[\\x{4E00}-\\x{9FFF}]\",\n r\"[\\x{3400}-\\x{4DBF}]\",\n r\"[\\x{20000}-\\x{2A6DF}]\",\n r\"[\\x{2A700}-\\x{2B73F}]\",\n r\"[\\x{2B740}-\\x{2B81F}]\",\n r\"[\\x{2B820}-\\x{2CEAF}]\",\n r\"[\\x{F900}-\\x{FAFF}]\",\n r\"[\\x{2F800}-\\x{2FA1F}]\",\n ]),\n]\n\n_DELIM_REGEX_PATTERN = \"|\".join(_DELIM_REGEX)\n_KEEP_DELIM_NO_WHITESPACE = copy.deepcopy(_DELIM_REGEX)\n_KEEP_DELIM_NO_WHITESPACE.remove(r\"\\s+\")\n\n_KEEP_DELIM_NO_WHITESPACE_PATTERN = \"|\".join(_KEEP_DELIM_NO_WHITESPACE)\n\n\nclass BasicTokenizer(TokenizerWithOffsets):\n \"\"\"Basic tokenizer for for tokenizing text.\n\n A basic tokenizer that tokenizes using some deterministic rules:\n - For most languages, this tokenizer will split on whitespace.\n - For Chinese, Japanese, and Korean characters, this tokenizer will split on\n Unicode characters.\n\n Attributes:\n lower_case: bool - If true, a preprocessing step is added to lowercase the\n text, apply NFD normalization, and strip accents characters.\n keep_whitespace: bool - If true, preserves whitespace characters instead of\n stripping them away.\n normalization_form: If true and lower_case=False, the input text will be\n normalized to `normalization_form`. See normalize_utf8() op for a list of\n valid values.\n \"\"\"\n\n def __init__(self,\n lower_case=False,\n keep_whitespace=False,\n normalization_form=None):\n self._lower_case = lower_case\n if not keep_whitespace:\n self._keep_delim_regex_pattern = _KEEP_DELIM_NO_WHITESPACE_PATTERN\n else:\n self._keep_delim_regex_pattern = _DELIM_REGEX_PATTERN\n self._normalization_form = normalization_form\n\n def tokenize(self, text_input):\n tokens, _, _ = self.tokenize_with_offsets(text_input)\n return tokens\n\n def tokenize_with_offsets(self, text_input):\n \"\"\"Performs basic word tokenization for BERT.\n\n Args:\n text_input: A `Tensor` or `RaggedTensor` of untokenized UTF-8 strings.\n Returns:\n A `RaggedTensor` of tokenized strings from text_input.\n \"\"\"\n # lowercase and strip accents (if option is set)\n if self._lower_case:\n text_input = case_fold_utf8(text_input)\n text_input = normalize_utf8(text_input, \"NFD\")\n text_input = string_ops.regex_replace(text_input, r\"\\p{Mn}\", \"\")\n else:\n # utf8 normalization\n if self._normalization_form is not None:\n text_input = normalize_utf8(text_input, self._normalization_form)\n\n # strip out control characters\n text_input = string_ops.regex_replace(text_input, r\"\\p{Cc}|\\p{Cf}\", \" \")\n\n return regex_split_ops.regex_split_with_offsets(\n text_input, _DELIM_REGEX_PATTERN, self._keep_delim_regex_pattern,\n \"BertBasicTokenizer\")\n\n\nclass BertTokenizer(TokenizerWithOffsets):\n \"\"\"Tokenizer used for BERT.\n\n This tokenizer applies an end-to-end, text string to wordpiece tokenization.\n It first applies basic tokenization, and then follwed by wordpiece\n tokenization.\n\n See BasicTokenizer and WordpieceTokenizer for their respective details.\n\n Attributes:\n vocab_lookup_table: A lookup table implementing the LookupInterface\n containing the vocabulary of subwords or a string which is the file path\n to the vocab.txt file.\n suffix_indicator: (optional) The characters prepended to a wordpiece to\n indicate that it is a suffix to another subword. Default is '##'.\n max_bytes_per_word: (optional) Max size of input token. Default is 100.\n max_chars_per_token: (optional) Max size of subwords, excluding suffix\n indicator. If known, providing this improves the efficiency of decoding\n long words.\n token_out_type: (optional) The type of the token to return. This can be\n `tf.int64` IDs, or `tf.string` subwords. The default is `tf.int64`.\n unknown_token: (optional) The value to use when an unknown token is found.\n Default is \"[UNK]\". If this is set to a string, and `token_out_type` is\n `tf.int64`, the `vocab_lookup_table` is used to convert the\n `unknown_token` to an integer. If this is set to `None`,\n out-of-vocabulary tokens are left as is.\n split_unknown_characters: (optional) Whether to split out single unknown\n characters as subtokens. If False (default), words containing unknown\n characters will be treated as single unknown tokens.\n lower_case: bool - If true, a preprocessing step is added to lowercase the\n text, apply NFD normalization, and strip accents characters.\n keep_whitespace: bool - If true, preserves whitespace characters instead of\n stripping them away.\n normalization_form: If true and lower_case=False, the input text will be\n normalized to `normalization_form`. See normalize_utf8() op for a list\n of valid values.\n \"\"\"\n\n def __init__(self,\n vocab_lookup_table,\n suffix_indicator=\"##\",\n max_bytes_per_word=100,\n max_chars_per_token=None,\n token_out_type=dtypes.int64,\n unknown_token=\"[UNK]\",\n split_unknown_characters=False,\n lower_case=False,\n keep_whitespace=False,\n normalization_form=None):\n if isinstance(vocab_lookup_table, str):\n init = lookup_ops.TextFileIdTableInitializer(vocab_lookup_table)\n vocab_lookup_table = lookup_ops.StaticVocabularyTableV1(\n init, num_oov_buckets=1, lookup_key_dtype=dtypes.string)\n\n self._basic_tokenizer = BasicTokenizer(lower_case, keep_whitespace,\n normalization_form)\n self._wordpiece_tokenizer = WordpieceTokenizer(\n vocab_lookup_table, suffix_indicator, max_bytes_per_word,\n max_chars_per_token, token_out_type, unknown_token,\n split_unknown_characters)\n\n def tokenize_with_offsets(self, text_input):\n tokens, begin, _ = self._basic_tokenizer.tokenize_with_offsets(text_input)\n wordpieces, wp_begin, wp_end = (\n self._wordpiece_tokenizer.tokenize_with_offsets(tokens))\n begin_expanded = array_ops.expand_dims(begin, axis=2)\n final_begin = begin_expanded + wp_begin\n final_end = begin_expanded + wp_end\n return wordpieces, final_begin, final_end\n\n def tokenize(self, text_input):\n \"\"\"Performs untokenized text to wordpiece tokenization for BERT.\n\n Args:\n text_input: input: A `Tensor` or `RaggedTensor` of untokenized UTF-8\n strings.\n Returns:\n A `RaggedTensor` of tokens where `tokens[i1...iN, j]` is the string\n contents (or ID in the vocab_lookup_table representing that string)\n of the `jth` token in `input[i1...iN]`\n \"\"\"\n tokens = self._basic_tokenizer.tokenize(text_input)\n return self._wordpiece_tokenizer.tokenize(tokens)\n" ]
[ [ "tensorflow.python.ops.array_ops.expand_dims", "tensorflow.python.ops.lookup_ops.StaticVocabularyTableV1", "tensorflow.python.ops.string_ops.regex_replace", "tensorflow.python.ops.lookup_ops.TextFileIdTableInitializer" ] ]
Abner0627/IPRV_Optical-Flow
[ "85c0650f671ad44c8bbe1d820a761be42cbe56d0" ]
[ "func.py" ]
[ "import cv2\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# %%\ndef _pick(L, ty, path):\n L_ = [cv2.imread(os.path.join(path, i)) for i in L if i.split('_')[0]==ty]\n # 輸入影像\n return L_\n\ndef _gray(img):\n return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\ndef _Pos(img, idx):\n def on_press(event):\n L.append(np.array([int(event.xdata), int(event.ydata)]))\n # 紀錄點選的座標點\n if len(L)>=2: \n plt.close()\n # 當點選次數大於等於2時,關閉視窗\n np.save('./npy/loc_' + idx + '.npy', np.array(L))\n # 儲存紀錄座標點\n fig = plt.figure()\n plt.imshow(img, animated= True)\n L = []\n fig.canvas.mpl_connect('button_press_event', on_press)\n # 用動態圖的形式產生介面供使用者點選目標點\n plt.show() \n\ndef _PlotPos(img, idx):\n img_c = np.copy(img)\n src = np.load('./npy/loc_' + idx + '.npy')\n # 輸入儲存的選取座標\n print('Choose point 1: ({}, {})'.format(src[0, 0], src[0, 1]))\n print('Choose point 2: ({}, {})'.format(src[1, 0], src[1, 1]))\n cv2.circle(img_c, (src[0, 0], src[0, 1]), 3, (0, 38, 255), -1)\n cv2.circle(img_c, (src[1, 0], src[1, 1]), 3, (0, 38, 255), -1)\n # 畫上座標點\n return img_c\n\n# def _flow(pre_img, nxt_img, pt_x, pt_y, param, init_flow=None):\n# XL, YL = [0], [0]\n# PX, PY = [pt_x], [pt_y]\n# flow = init_flow\n# ep = 1000\n# i=0\n# while ep>1e-2:\n# if i==0:\n# fg = 0\n# else:\n# fg = cv2.OPTFLOW_USE_INITIAL_FLOW\n# flow = cv2.calcOpticalFlowFarneback(pre_img, nxt_img, flow=flow, flags=fg, **param)\n \n# XL.append(flow[pt_y, pt_x, 0])\n# YL.append(flow[pt_y, pt_x, 1])\n# PX.append(int(pt_x + flow[pt_y, pt_x, 0]))\n# PY.append(int(pt_y + flow[pt_y, pt_x, 1]))\n# print('iter:{}, ep:{}\\nu = {:.4f}, v = {:.4f}'.format(i, ep, XL[i], YL[i]))\n# print('x = {:.4f}, y = {:.4f}'.format(PX[i], PY[i]))\n# print('======================')\n# i+=1\n# if i>0:\n# ep = np.sum(np.abs(XL[i-1] - XL[i])) + np.sum(np.abs(YL[i-1] - YL[i]))\n# return PX, PY\n\ndef _LKflow(pre_img, nxt_img, pt_x, pt_y, lk_params):\n p0 = np.array([[pt_x, pt_y]]).astype(np.float32)\n i = 0\n PX, PY = [pt_x], [pt_y]\n XL, YL = [], []\n ep = 1e3\n # 初始化各參數\n while ep>1e-2:\n if i==0:\n p1, _, _ = cv2.calcOpticalFlowPyrLK(pre_img, nxt_img, p0, None, **lk_params)\n else:\n p1, _, _ = cv2.calcOpticalFlowPyrLK(pre_img, nxt_img, p0, p1, flags=cv2.OPTFLOW_USE_INITIAL_FLOW, **lk_params)\n # 用迴圈計算每個iteration的輸出座標\n PX.append(p1[0][0])\n PY.append(p1[0][1])\n XL.append(PX[i] - PX[i+1])\n YL.append(PY[i] - PY[i+1])\n # 紀錄輸出座標與位移向量\n if i>0:\n ep = np.sum(np.abs(XL[i-1] - XL[i])) + np.sum(np.abs(YL[i-1] - YL[i])) \n # 與前一個iteration位移向量之差值,\n # 當差值<0.01時則停止迴圈\n print('iter:{}, ep:{}\\nu = {:.4f}, v = {:.4f}'.format(i, ep, XL[i], YL[i]))\n print('x = {:.4f}, y = {:.4f}'.format(PX[i+1], PY[i+1]))\n print('======================') \n i+=1 \n return PX, PY \n\ndef _plot(img, PX, PY):\n PX = np.array(PX).astype(np.int)\n PY = np.array(PY).astype(np.int)\n for j in range(len(PX)):\n if j!=0:\n cv2.line(img, (PX[j-1], PY[j-1]), (PX[j], PY[j]), (250, 5, 216), 2)\n for k in range(len(PX)):\n if k==0:\n c = (0, 38, 255)\n elif k==len(PX)-1:\n c = (182, 255, 0)\n else:\n c = (255, 0, 0)\n cv2.circle(img, (PX[k], PY[k]), 3, c, -1) \n # 依每個iteration輸出的座標畫上標點\n return img\n\n# param = dict(pyr_scale=0.8,\n# levels=25,\n# iterations=1,\n# winsize=5,\n# poly_n=5,\n# poly_sigma=1.1)\n\nlk_params = dict(winSize = (15, 15),\n maxLevel = 3,\n criteria = (cv2.TERM_CRITERIA_COUNT, 1, 0.03)) " ]
[ [ "numpy.load", "matplotlib.pyplot.figure", "numpy.abs", "numpy.copy", "matplotlib.pyplot.imshow", "matplotlib.pyplot.show", "matplotlib.pyplot.close", "numpy.array" ] ]
Rossil2012/mindspore
[ "55372b41fdfae6d2b88d7078971e06d537f6c558" ]
[ "mindspore/ops/operations/nn_ops.py" ]
[ "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\n\"\"\"Operators for nn.\"\"\"\n\nimport math\nimport operator\nfrom functools import reduce\n\nimport numpy as np\n\nfrom ... import context\nfrom .. import signature as sig\nfrom ..._checkparam import Validator as validator\nfrom ..._checkparam import Rel\nfrom ...common import dtype as mstype\nfrom ..primitive import Primitive, PrimitiveWithInfer, PrimitiveWithCheck, prim_attr_register\nfrom ..operations.math_ops import _infer_shape_reduce\n\n\ndef _check_positive_int_or_tuple(arg_name, arg_value, prim_name, allow_four=False, ret_four=False):\n \"\"\"\n Checks whether an argument is a positive int or tuple with 2 or 4(when allow_four is True) positive int elements.\n \"\"\"\n\n def _raise_message():\n raise ValueError(f\"For '{prim_name}' attr '{arg_name}' should be an positive int number or a tuple of two \"\n f\"{'or four ' if allow_four else ''}positive int numbers, but got {arg_value}\")\n\n def _get_return_value():\n if isinstance(arg_value, int):\n ret = (1, 1, arg_value, arg_value) if ret_four else (arg_value, arg_value)\n elif len(arg_value) == 2:\n ret = (1, 1, arg_value[0], arg_value[1]) if ret_four else arg_value\n elif len(arg_value) == 4:\n if not allow_four:\n _raise_message()\n ret = arg_value if ret_four else (arg_value[2], arg_value[3])\n else:\n _raise_message()\n return ret\n\n validator.check_value_type(arg_name, arg_value, (int, tuple), prim_name)\n ret_value = _get_return_value()\n for item in ret_value:\n if isinstance(item, int) and item > 0:\n continue\n _raise_message()\n return ret_value\n\n\nclass Flatten(PrimitiveWithInfer):\n r\"\"\"\n Flattens a tensor without changing its batch size on the 0-th axis.\n\n Inputs:\n - **input_x** (Tensor) - Tensor of shape :math:`(N, \\ldots)` to be flattened.\n\n Outputs:\n Tensor, the shape of the output tensor is :math:`(N, X)`, where :math:`X` is\n the product of the remaining dimension.\n\n Examples:\n >>> input_tensor = Tensor(np.ones(shape=[1, 2, 3, 4]), mindspore.float32)\n >>> flatten = P.Flatten()\n >>> output = flatten(input_tensor)\n >>> assert output.shape == (1, 24)\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n pass\n\n def infer_shape(self, input_x):\n validator.check_integer('input_x rank', len(input_x), 1, Rel.GE, self.name)\n prod = 1 if len(input_x) == 1 else reduce(operator.mul, input_x[1:])\n return input_x[0], prod\n\n def infer_dtype(self, input_x):\n validator.check_subclass(\"input_x\", input_x, mstype.tensor, self.name)\n return input_x\n\n\nclass Softmax(PrimitiveWithInfer):\n r\"\"\"\n Softmax operation.\n\n Applies the Softmax operation to the input tensor on the specified axis.\n Suppose a slice in the given aixs :math:`x` then for each element :math:`x_i`\n the Softmax function is shown as follows:\n\n .. math::\n \\text{output}(x_i) = \\frac{exp(x_i)}{\\sum_{j = 0}^{N-1}\\exp(x_j)},\n\n where :math:`N` is the length of the tensor.\n\n Args:\n axis (Union[int, tuple]): The axis to do the Softmax operation. Default: -1.\n\n Inputs:\n - **logits** (Tensor) - The input of Softmax, with float16 or float32 data type.\n\n Outputs:\n Tensor, with the same type and shape as the logits.\n\n Examples:\n >>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)\n >>> softmax = P.Softmax()\n >>> softmax(input_x)\n [0.01165623, 0.03168492, 0.08612854, 0.23412167, 0.6364086]\n \"\"\"\n\n @prim_attr_register\n def __init__(self, axis=-1):\n self.init_prim_io_names(inputs=['x'], outputs=['output'])\n validator.check_value_type(\"axis\", axis, [int, tuple], self.name)\n if isinstance(axis, int):\n self.add_prim_attr('axis', (axis,))\n for item in self.axis:\n validator.check_value_type(\"item of axis\", item, [int], self.name)\n\n def infer_shape(self, logits):\n validator.check_integer(\"length of axis\", len(self.axis), 1, Rel.GE, self.name)\n rank = len(logits)\n for axis_v in self.axis:\n validator.check_int_range(\"axis\", axis_v, -rank, rank, Rel.INC_LEFT, self.name)\n return logits\n\n def infer_dtype(self, logits):\n validator.check_subclass(\"logits\", logits, mstype.tensor, self.name)\n validator.check_tensor_type_same({\"logits\": logits}, mstype.float_type, self.name)\n return logits\n\n\nclass LogSoftmax(PrimitiveWithInfer):\n r\"\"\"\n Log Softmax activation function.\n\n Applies the Log Softmax function to the input tensor on the specified axis.\n Suppose a slice in the given aixs :math:`x` then for each element :math:`x_i`\n the Log Softmax function is shown as follows:\n\n .. math::\n \\text{output}(x_i) = \\log \\left(\\frac{exp(x_i)} {\\sum_{j = 0}^{N-1}\\exp(x_j)}\\right),\n\n where :math:`N` is the length of the Tensor.\n\n Args:\n axis (int): The axis to do the Log softmax operation. Default: -1.\n\n Inputs:\n - **logits** (Tensor) - The input of Log Softmax, with float16 or float32 data type.\n\n Outputs:\n Tensor, with the same type and shape as the logits.\n\n Examples:\n >>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)\n >>> log_softmax = P.LogSoftmax()\n >>> log_softmax(input_x)\n [-4.4519143, -3.4519143, -2.4519143, -1.4519144, -0.4519144]\n \"\"\"\n\n @prim_attr_register\n def __init__(self, axis=-1):\n validator.check_value_type(\"axis\", axis, [int], self.name)\n\n def infer_shape(self, logits):\n rank = len(logits)\n validator.check_int_range('axis', self.axis, -rank, rank, Rel.INC_LEFT, self.name)\n return logits\n\n def infer_dtype(self, logits):\n validator.check_subclass(\"logits\", logits, mstype.tensor, self.name)\n validator.check_tensor_type_same({\"logits\": logits}, mstype.float_type, self.name)\n return logits\n\n\nclass Softplus(PrimitiveWithInfer):\n r\"\"\"\n Softplus activation function.\n\n Softplus is a smooth approximation to the ReLU function.\n The function is shown as follows:\n\n .. math::\n \\text{output} = \\log(1 + \\exp(\\text{input_x})),\n\n Inputs:\n - **input_x** (Tensor) - The input tensor whose data type should be float.\n\n Outputs:\n Tensor, with the same type and shape as the `input_x`.\n\n Examples:\n >>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)\n >>> softplus = P.Softplus()\n >>> softplus(input_x)\n [1.3132615, 2.126928, 3.0485873, 4.01815, 5.0067153]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"init Softplus\"\"\"\n self.init_prim_io_names(inputs=['x'], outputs=['output'])\n\n def infer_shape(self, input_x):\n return input_x\n\n def infer_dtype(self, input_x):\n validator.check_tensor_type_same({'input_x': input_x}, mstype.float_type, self.name)\n return input_x\n\n\nclass Softsign(PrimitiveWithInfer):\n r\"\"\"\n Softsign activation function.\n\n The function is shown as follows:\n\n .. math::\n \\text{output} = \\frac{\\text{input_x}}{1 + \\left| \\text{input_x} \\right|},\n\n Inputs:\n - **input_x** (Tensor) - The input tensor whose data type should be float16 or float32.\n\n Outputs:\n Tensor, with the same type and shape as the `input_x`.\n\n Examples:\n >>> input_x = Tensor(np.array([0, -1, 2, 30, -30]), mindspore.float32)\n >>> softsign = P.Softsign()\n >>> softsign(input_x)\n [0. -0.5 0.6666667 0.9677419 -0.9677419]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"init Softsign\"\"\"\n self.init_prim_io_names(inputs=['x'], outputs=['output'])\n\n def infer_shape(self, input_x):\n return input_x\n\n def infer_dtype(self, input_x):\n validator.check_tensor_type_same({'input_x': input_x}, [mstype.float16, mstype.float32], self.name)\n return input_x\n\n\nclass ReLU(PrimitiveWithInfer):\n r\"\"\"\n Computes ReLU(Rectified Linear Unit) of input tensor element-wise.\n\n It returns :math:`\\max(x,\\ 0)` element-wise.\n\n Inputs:\n - **input_x** (Tensor) - The input tensor.\n\n Outputs:\n Tensor, with the same type and shape as the `input_x`.\n\n Examples:\n >>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)\n >>> relu = P.ReLU()\n >>> result = relu(input_x)\n [[0, 4.0, 0.0], [2.0, 0.0, 9.0]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"init ReLU\"\"\"\n self.init_prim_io_names(inputs=['x'], outputs=['output'])\n\n def infer_shape(self, input_x):\n return input_x\n\n def infer_dtype(self, input_x):\n validator.check_tensor_type_same({'input_x': input_x}, mstype.number_type, self.name)\n return input_x\n\n\nclass ReLU6(PrimitiveWithInfer):\n r\"\"\"\n Computes ReLU(Rectified Linear Unit) upper bounded by 6 of input tensor element-wise.\n\n It returns :math:`\\min(\\max(0,x), 6)` element-wise.\n\n Inputs:\n - **input_x** (Tensor) - The input tensor. With float16 or float32 data type.\n\n Outputs:\n Tensor, with the same type and shape as the `input_x`.\n\n Examples:\n >>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)\n >>> relu6 = P.ReLU6()\n >>> result = relu6(input_x)\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"init ReLU6\"\"\"\n self.init_prim_io_names(inputs=['x'], outputs=['output'])\n\n def infer_shape(self, input_x):\n return input_x\n\n def infer_dtype(self, input_x):\n validator.check_tensor_type_same({'input_x': input_x}, (mstype.float16, mstype.float32), self.name)\n return input_x\n\n\nclass ReLUV2(PrimitiveWithInfer):\n r\"\"\"\n Computes ReLU(Rectified Linear Unit) of input tensor element-wise.\n\n It returns :math:`\\max(x,\\ 0)` element-wise.\n\n Inputs:\n - **input_x** (Tensor) - The input tensor should be a 4-D tensor.\n\n Outputs:\n - **output** (Tensor) - Has the same type and shape as the `input_x`.\n - **mask** (Tensor) - A tensor whose data type must be uint8.\n\n Examples:\n >>> input_x = Tensor(np.array([[[[1, -2], [-3, 4]], [[-5, 6], [7, -8]]]]), mindspore.float32)\n >>> relu_v2 = P.ReLUV2()\n >>> output = relu_v2(input_x)\n ([[[[1., 0.], [0., 4.]], [[0., 6.], [7., 0.]]]],\n [[[[1, 0], [2, 0]], [[2, 0], [1, 0]]]])\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"init ReLUV2\"\"\"\n self.init_prim_io_names(inputs=['x'], outputs=['output', 'mask'])\n\n def __infer__(self, input_x):\n input_shape = list(input_x['shape'])\n input_dtype = input_x['dtype']\n mask_shape = []\n if len(input_shape) != 4:\n raise ValueError(\"The `input_x` should be a 4-D tensor, \"\n f\"but got a {len(input_shape)}-D tensor whose shape is {input_shape}\")\n for i in enumerate(input_shape):\n if i[0] == 1:\n if input_dtype == mstype.uint8 and input_dtype == mstype.int8:\n mask_shape.append((input_shape[1] + 31) // 32)\n else:\n mask_shape.append((input_shape[1] + 15) // 16)\n else:\n mask_shape.append(i[1])\n if input_dtype == mstype.uint8 and input_dtype == mstype.int8:\n mask_shape.append(4)\n else:\n mask_shape.append(2)\n\n output_shape = (input_x['shape'], mask_shape)\n validator.check_subclass(\"input_x\", input_dtype, mstype.tensor, self.name)\n validator.check_tensor_type_same({'input_x': input_dtype}, mstype.number_type, self.name)\n mask_dtype = mstype.uint8\n output_dtype = (input_dtype, mask_dtype)\n\n return {'shape': output_shape,\n 'dtype': output_dtype,\n 'value': None}\n\n\nclass Elu(PrimitiveWithInfer):\n r\"\"\"\n Computes exponential linear: `alpha * (exp(x) - 1)` if x < 0, `x` otherwise.\n The data type of input tensor should be float.\n\n Args:\n alpha (float): The coefficient of negative factor whose type is float,\n only support '1.0' currently. Default: 1.0.\n\n Inputs:\n - **input_x** (Tensor) - The input tensor whose data type should be float.\n\n Outputs:\n Tensor, has the same shape and data type as `input_x`.\n\n Examples:\n >>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)\n >>> elu = P.Elu()\n >>> result = elu(input_x)\n Tensor([[-0.632 4.0 -0.999]\n [2.0 -0.993 9.0 ]], shape=(2, 3), dtype=mindspore.float32)\n \"\"\"\n\n @prim_attr_register\n def __init__(self, alpha=1.0):\n \"\"\"Init Elu\"\"\"\n validator.check_value_type(\"alpha\", alpha, [float], self.name)\n validator.check_number(\"alpha\", alpha, 1.0, Rel.EQ, self.name)\n\n def infer_shape(self, input_x):\n return input_x\n\n def infer_dtype(self, input_x):\n validator.check_tensor_type_same({'input_x': input_x}, mstype.float_type, self.name)\n return input_x\n\n\nclass HSwish(PrimitiveWithInfer):\n r\"\"\"\n Hard swish activation function.\n\n Applies hswish-type activation element-wise. The input is a Tensor with any valid shape.\n\n Hard swish is defined as:\n\n .. math::\n \\text{hswish}(x_{i}) = x_{i} * \\frac{ReLU6(x_{i} + 3)}{6},\n\n where :math:`x_{i}` is the :math:`i`-th slice in the given dimension of the input Tensor.\n\n Inputs:\n - **input_data** (Tensor) - The input of HSwish, data type should be float16 or float32.\n\n Outputs:\n Tensor, with the same type and shape as the `input_data`.\n\n Examples:\n >>> hswish = P.HSwish()\n >>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)\n >>> result = hswish(input_x)\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n self.init_prim_io_names(inputs=['x'], outputs=['output'])\n\n def infer_shape(self, xshape):\n return xshape\n\n def infer_dtype(self, x_dtype):\n validator.check_tensor_type_same({\"x\": x_dtype}, (mstype.float16, mstype.float32), self.name)\n return x_dtype\n\n\nclass Sigmoid(PrimitiveWithInfer):\n r\"\"\"\n Sigmoid activation function.\n\n Computes Sigmoid of input element-wise. The Sigmoid function is defined as:\n\n .. math::\n \\text{sigmoid}(x_i) = \\frac{1}{1 + exp(-x_i)},\n\n where :math:`x_i` is the element of the input.\n\n Inputs:\n - **input_x** (Tensor) - The input of Sigmoid, data type should be float16 or float32.\n\n Outputs:\n Tensor, with the same type and shape as the input_x.\n\n Examples:\n >>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)\n >>> sigmoid = P.Sigmoid()\n >>> sigmoid(input_x)\n [0.73105866, 0.880797, 0.9525742, 0.98201376, 0.9933071]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n self.init_prim_io_names(inputs=['x'], outputs=['output'])\n\n def infer_shape(self, input_x):\n return input_x\n\n def infer_dtype(self, input_x):\n validator.check_tensor_type_same({\"input_x\": input_x}, (mstype.float16, mstype.float32), self.name)\n return input_x\n\n\nclass HSigmoid(PrimitiveWithInfer):\n r\"\"\"\n Hard sigmoid activation function.\n\n Applies hard sigmoid activation element-wise. The input is a Tensor with any valid shape.\n\n Hard sigmoid is defined as:\n\n .. math::\n \\text{hsigmoid}(x_{i}) = max(0, min(1, \\frac{x_{i} + 3}{6})),\n\n where :math:`x_{i}` is the :math:`i`-th slice in the given dimension of the input Tensor.\n\n Inputs:\n - **input_data** (Tensor) - The input of HSigmoid, data type should be float16 or float32.\n\n Outputs:\n Tensor, with the same type and shape as the `input_data`.\n\n Examples:\n >>> hsigmoid = P.HSigmoid()\n >>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)\n >>> result = hsigmoid(input_x)\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n self.init_prim_io_names(inputs=['x'], outputs=['output'])\n\n def infer_shape(self, x_shape):\n return x_shape\n\n def infer_dtype(self, x_dtype):\n validator.check_tensor_type_same({\"x\": x_dtype}, (mstype.float16, mstype.float32), self.name)\n return x_dtype\n\n\nclass Tanh(PrimitiveWithInfer):\n r\"\"\"\n Tanh activation function.\n\n Computes hyperbolic tangent of input element-wise. The Tanh function is defined as:\n\n .. math::\n tanh(x_i) = \\frac{\\exp(x_i) - \\exp(-x_i)}{\\exp(x_i) + \\exp(-x_i)} = \\frac{\\exp(2x_i) - 1}{\\exp(2x_i) + 1},\n\n where :math:`x_i` is an element of the input Tensor.\n\n Inputs:\n - **input_x** (Tensor) - The input of Tanh.\n\n Outputs:\n Tensor, with the same type and shape as the input_x.\n\n Examples:\n >>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)\n >>> tanh = P.Tanh()\n >>> tanh(input_x)\n [0.7615941, 0.9640276, 0.9950548, 0.9993293, 0.99990916]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n pass\n\n def infer_shape(self, input_x):\n return input_x\n\n def infer_dtype(self, input_x):\n validator.check_subclass(\"input_x\", input_x, mstype.tensor, self.name)\n return input_x\n\n\nclass FusedBatchNorm(Primitive):\n r\"\"\"\n FusedBatchNorm is a BatchNorm that moving mean and moving variance will be computed instead of being loaded.\n\n Batch Normalization is widely used in convolutional networks. This operation applies\n Batch Normalization over input to avoid internal covariate shift as described in the\n paper `Batch Normalization: Accelerating Deep Network Training by Reducing Internal\n Covariate Shift <https://arxiv.org/abs/1502.03167>`_. It rescales and recenters the\n feature using a mini-batch of data and the learned parameters which can be described\n in the following formula.\n\n .. math::\n y = \\frac{x - mean}{\\sqrt{variance + \\epsilon}} * \\gamma + \\beta\n\n where :math:`\\gamma` is scale, :math:`\\beta` is bias, :math:`\\epsilon` is epsilon.\n\n Args:\n mode (int): Mode of batch normalization, value is 0 or 1. Default: 0.\n epsilon (float): A small value added for numerical stability. Default: 1e-5.\n momentum (float): The hyper parameter to compute moving average for running_mean and running_var\n (e.g. :math:`new\\_running\\_mean = momentum * running\\_mean + (1 - momentum) * current\\_mean`).\n Momentum value should be [0, 1]. Default: 0.9.\n\n Inputs:\n - **input_x** (Tensor) - Tensor of shape :math:`(N, C)`.\n - **scale** (Tensor) - Tensor of shape :math:`(C,)`.\n - **bias** (Tensor) - Tensor of shape :math:`(C,)`.\n - **mean** (Tensor) - Tensor of shape :math:`(C,)`.\n - **variance** (Tensor) - Tensor of shape :math:`(C,)`.\n\n Outputs:\n Tuple of 5 Tensor, the normalized input and the updated parameters.\n\n - **output_x** (Tensor) - The same type and shape as the `input_x`.\n - **updated_scale** (Tensor) - Tensor of shape :math:`(C,)`.\n - **updated_bias** (Tensor) - Tensor of shape :math:`(C,)`.\n - **updated_moving_mean** (Tensor) - Tensor of shape :math:`(C,)`.\n - **updated_moving_variance** (Tensor) - Tensor of shape :math:`(C,)`.\n\n Examples:\n >>> input_x = Tensor(np.ones([128, 64, 32, 64]), mindspore.float32)\n >>> scale = Tensor(np.ones([64]), mindspore.float32)\n >>> bias = Tensor(np.ones([64]), mindspore.float32)\n >>> mean = Tensor(np.ones([64]), mindspore.float32)\n >>> variance = Tensor(np.ones([64]), mindspore.float32)\n >>> op = P.FusedBatchNorm()\n >>> output = op(input_x, scale, bias, mean, variance)\n \"\"\"\n\n @prim_attr_register\n def __init__(self, mode=0, epsilon=1e-5, momentum=0.1):\n self.init_prim_io_names(inputs=['x', 'scale', 'b', 'mean', 'variance'],\n outputs=['y', 'running_mean', 'running_variance', 'save_mean', 'save_inv_variance'])\n self.mode = validator.check_integer('mode', mode, [0, 1], Rel.IN, self.name)\n self.epsilon = validator.check_number_range('epsilon', epsilon, 0, 1, Rel.INC_RIGHT, self.name)\n self.momentum = validator.check_number_range('momentum', momentum, 0, 1, Rel.INC_BOTH, self.name)\n self._update_parameter = True\n\n\nclass FusedBatchNormEx(PrimitiveWithInfer):\n r\"\"\"\n FusedBatchNormEx is an extension of FusedBatchNorm, FusedBatchNormEx has one more output(output reserve)\n than FusedBatchNorm, reserve will be used in backpropagation phase. FusedBatchNorm is a BatchNorm that\n moving mean and moving variance will be computed instead of being loaded.\n\n Batch Normalization is widely used in convolutional networks. This operation applies\n Batch Normalization over input to avoid internal covariate shift as described in the\n paper `Batch Normalization: Accelerating Deep Network Training by Reducing Internal\n Covariate Shift <https://arxiv.org/abs/1502.03167>`_. It rescales and recenters the\n feature using a mini-batch of data and the learned parameters which can be described\n in the following formula.\n\n .. math::\n y = \\frac{x - mean}{\\sqrt{variance + \\epsilon}} * \\gamma + \\beta\n\n where :math:`\\gamma` is scale, :math:`\\beta` is bias, :math:`\\epsilon` is epsilon.\n\n Args:\n mode (int): Mode of batch normalization, value is 0 or 1. Default: 0.\n epsilon (float): A small value added for numerical stability. Default: 1e-5.\n momentum (float): The hyper parameter to compute moving average for running_mean and running_var\n (e.g. :math:`new\\_running\\_mean = momentum * running\\_mean + (1 - momentum) * current\\_mean`).\n Momentum value should be [0, 1]. Default: 0.9.\n\n Inputs:\n - **input_x** (Tensor) - The input of FusedBatchNormEx, Tensor of shape :math:`(N, C)`,\n data type: float16 or float32.\n - **scale** (Tensor) - Parameter scale, same with gamma above-mentioned, Tensor of shape :math:`(C,)`,\n data type: float32.\n - **bias** (Tensor) - Parameter bias, same with beta above-mentioned, Tensor of shape :math:`(C,)`,\n data type: float32.\n - **mean** (Tensor) - mean value, Tensor of shape :math:`(C,)`, data type: float32.\n - **variance** (Tensor) - variance value, Tensor of shape :math:`(C,)`, data type: float32.\n\n Outputs:\n Tuple of 6 Tensors, the normalized input, the updated parameters and reserve.\n\n - **output_x** (Tensor) - The input of FusedBatchNormEx, same type and shape as the `input_x`.\n - **updated_scale** (Tensor) - Updated parameter scale, Tensor of shape :math:`(C,)`, data type: float32.\n - **updated_bias** (Tensor) - Updated parameter bias, Tensor of shape :math:`(C,)`, data type: float32.\n - **updated_moving_mean** (Tensor) - Updated mean value, Tensor of shape :math:`(C,)`, data type: float32.\n - **updated_moving_variance** (Tensor) - Updated variance value, Tensor of shape :math:`(C,)`,\n data type: float32.\n - **reserve** (Tensor) - reserve space, Tensor of shape :math:`(C,)`, data type: float32.\n\n Examples:\n >>> input_x = Tensor(np.ones([128, 64, 32, 64]), mindspore.float32)\n >>> scale = Tensor(np.ones([64]), mindspore.float32)\n >>> bias = Tensor(np.ones([64]), mindspore.float32)\n >>> mean = Tensor(np.ones([64]), mindspore.float32)\n >>> variance = Tensor(np.ones([64]), mindspore.float32)\n >>> op = P.FusedBatchNormEx()\n >>> output = op(input_x, scale, bias, mean, variance)\n \"\"\"\n __mindspore_signature__ = (\n sig.make_sig('input_x', dtype=sig.sig_dtype.T2),\n sig.make_sig('scale', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('bias', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('mean', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('variance', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n )\n\n @prim_attr_register\n def __init__(self, mode=0, epsilon=1e-5, momentum=0.1):\n self.init_prim_io_names(inputs=['x', 'scale', 'b', 'mean', 'variance'],\n outputs=['y', 'save_scale', 'save_bias', 'save_mean', 'save_inv_variance', 'reserve'])\n self.mode = validator.check_integer('mode', mode, [0, 1], Rel.IN, self.name)\n self.epsilon = validator.check_number_range('epsilon', epsilon, 0, 1, Rel.INC_RIGHT, self.name)\n self.momentum = validator.check_number_range('momentum', momentum, 0, 1, Rel.INC_BOTH, self.name)\n self._update_parameter = True\n self.add_prim_attr('data_format', \"NCHW\")\n\n def infer_shape(self, input_x, scale, bias, mean, variance):\n validator.check_integer(\"scale rank\", len(scale), 1, Rel.EQ, self.name)\n validator.check(\"scale shape\", scale, \"bias shape\", bias, Rel.EQ, self.name)\n validator.check(\"scale shape[0]\", scale[0], \"input_x shape[1]\", input_x[1], Rel.EQ, self.name)\n validator.check_integer(\"mean rank\", len(mean), 1, Rel.EQ, self.name)\n validator.check(\"mean shape\", mean, \"variance shape\", variance, Rel.EQ, self.name)\n validator.check(\"mean shape\", mean, \"scale shape\", scale, Rel.EQ, self.name)\n return (input_x, scale, scale, scale, scale, scale)\n\n def infer_dtype(self, input_x, scale, bias, mean, variance):\n validator.check_tensor_type_same({\"input_x\": input_x}, [mstype.float16, mstype.float32], self.name)\n args = {\"scale\": scale, \"bias\": bias}\n validator.check_tensor_type_same(args, [mstype.float32], self.name)\n args_moving = {\"mean\": mean, \"variance\": variance}\n valid_types = [mstype.tensor_type(mstype.float32)]\n validator.check_type_same(args_moving, valid_types, self.name)\n return (input_x, scale, scale, scale, scale, scale)\n\n\nclass BNTrainingReduce(PrimitiveWithInfer):\n \"\"\"\n reduce sum at axis [0, 2, 3].\n\n Inputs:\n - **x** (Tensor) - Tensor of shape :math:`(N, C)`.\n\n Outputs:\n - **sum** (Tensor) - Tensor of shape :math:`(C,)`.\n - **square_sum** (Tensor) - Tensor of shape :math:`(C,)`.\n\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n self.init_prim_io_names(inputs=['x'], outputs=['sum', 'square_sum'])\n\n def infer_shape(self, x_shape):\n validator.check_integer(\"x rank\", len(x_shape), 4, Rel.EQ, self.name)\n return ([x_shape[1]], [x_shape[1]])\n\n def infer_dtype(self, x_type):\n return (x_type, x_type)\n\n\nclass BNTrainingUpdate(PrimitiveWithInfer):\n \"\"\"\n The primitive operator of the register and info descriptor in bn_training_update.\n \"\"\"\n @prim_attr_register\n def __init__(self, isRef=True, epsilon=1e-5, factor=0.1):\n self.init_prim_io_names(inputs=['x', 'sum', 'square_sum', 'scale', 'b', 'mean', 'variance'],\n outputs=['y', 'running_mean', 'running_variance', 'save_mean', 'save_inv_variance'])\n #self.isRef = validator.check_integer('isRef', isRef, [0, 1], Rel.IN)\n self.epsilon = validator.check_number_range('epsilon', epsilon, 0, 1, Rel.INC_RIGHT, 'BNTrainingUpdate')\n self.factor = validator.check_number_range('factor', factor, 0, 1, Rel.INC_BOTH, 'BNTrainingUpdate')\n\n def infer_shape(self, x, sum, square_sum, scale, b, mean, variance):\n return (x, variance, variance, variance, variance)\n\n def infer_dtype(self, x, sum, square_sum, scale, b, mean, variance):\n return (x, variance, variance, variance, variance)\n\n\nclass BatchNorm(PrimitiveWithInfer):\n r\"\"\"\n Batch Normalization for input data and updated parameters.\n\n Batch Normalization is widely used in convolutional neural networks. This operation\n applies Batch Normalization over input to avoid internal covariate shift as described\n in the paper `Batch Normalization: Accelerating Deep Network Training by Reducing Internal\n Covariate Shift <https://arxiv.org/abs/1502.03167>`_. It rescales and recenters the\n features using a mini-batch of data and the learned parameters which can be described\n in the following formula,\n\n .. math::\n y = \\frac{x - mean}{\\sqrt{variance + \\epsilon}} * \\gamma + \\beta\n\n where :math:`\\gamma` is scale, :math:`\\beta` is bias, :math:`\\epsilon` is epsilon.\n\n Args:\n is_training (bool): If `is_training` is True, `mean` and `variance` are computed during training.\n If `is_training` is False, they're loaded from checkpoint during inference. Default: False.\n epsilon (float): A small value added for numerical stability. Default: 1e-5.\n\n Inputs:\n - **input_x** (Tensor) - Tensor of shape :math:`(N, C)`, with float16 or float32 data type.\n - **scale** (Tensor) - Tensor of shape :math:`(C,)`, with float16 or float32 data type.\n - **bias** (Tensor) - Tensor of shape :math:`(C,)`, has the same data type with `scale`.\n - **mean** (Tensor) - Tensor of shape :math:`(C,)`, with float16 or float32 data type.\n - **variance** (Tensor) - Tensor of shape :math:`(C,)`, has the same data type with `mean`.\n\n Outputs:\n Tuple of 5 Tensor, the normalized inputs and the updated parameters.\n\n - **output_x** (Tensor) - The same type and shape as the input_x. The shape is :math:`(N, C)`.\n - **updated_scale** (Tensor) - Tensor of shape :math:`(C,)`.\n - **updated_bias** (Tensor) - Tensor of shape :math:`(C,)`.\n - **reserve_space_1** (Tensor) - Tensor of shape :math:`(C,)`.\n - **reserve_space_2** (Tensor) - Tensor of shape :math:`(C,)`.\n\n Examples:\n >>> input_x = Tensor(np.ones([128, 64, 32, 64]), mindspore.float32)\n >>> scale = Tensor(np.ones([64]), mindspore.float32)\n >>> bias = Tensor(np.ones([64]), mindspore.float32)\n >>> mean = Tensor(np.ones([64]), mindspore.float32)\n >>> variance = Tensor(np.ones([64]), mindspore.float32)\n >>> batch_norm = P.BatchNorm()\n >>> output = batch_norm(input_x, scale, bias, mean, variance)\n \"\"\"\n\n @prim_attr_register\n def __init__(self, is_training=False, epsilon=1e-5):\n validator.check_value_type('is_training', is_training, (bool,), self.name)\n validator.check_number_range('epsilon', epsilon, 0, 1, Rel.INC_RIGHT, self.name)\n self.add_prim_attr('data_format', \"NCHW\")\n self.init_prim_io_names(inputs=['x', 'scale', 'offset', 'mean', 'variance'],\n outputs=['y', 'batch_mean', 'batch_variance', 'reserve_space_1', 'reserve_space_2'])\n\n def infer_shape(self, input_x, scale, bias, mean, variance):\n validator.check_integer(\"scale rank\", len(scale), 1, Rel.EQ, self.name)\n validator.check(\"scale shape\", scale, \"bias shape\", bias, Rel.EQ, self.name)\n validator.check(\"scale shape[0]\", scale[0], \"input_x shape[1]\", input_x[1], Rel.EQ, self.name)\n if not self.is_training:\n validator.check_integer(\"mean rank\", len(mean), 1, Rel.EQ, self.name)\n validator.check(\"mean shape\", mean, \"variance shape\", variance, Rel.EQ, self.name)\n validator.check(\"mean shape\", mean, \"scale shape\", scale, Rel.EQ, self.name)\n return (input_x, scale, scale, scale, scale)\n\n def infer_dtype(self, input_x, scale, bias, mean, variance):\n validator.check_tensor_type_same({\"input_x\": input_x}, [mstype.float16, mstype.float32], self.name)\n args = {\"scale\": scale, \"bias\": bias}\n validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)\n args_moving = {\"mean\": mean, \"variance\": variance}\n if self.is_training:\n valid_types = [mstype.tensor_type(mstype.float16), mstype.tensor_type(mstype.float32), None]\n validator.check_type_same(args_moving, valid_types, self.name)\n else:\n args_moving = {\"mean\": mean, \"variance\": variance}\n validator.check_tensor_type_same(args_moving, [mstype.float16, mstype.float32], self.name)\n return (input_x, scale, bias, input_x, input_x)\n\n\nclass Conv2D(PrimitiveWithInfer):\n r\"\"\"\n 2D convolution layer.\n\n Applies a 2D convolution over an input tensor which is typically of shape :math:`(N, C_{in}, H_{in}, W_{in})`,\n where :math:`N` is batch size and :math:`C_{in}` is channel number. For each batch of shape\n :math:`(C_{in}, H_{in}, W_{in})`, the formula is defined as:\n\n .. math::\n\n out_j = \\sum_{i=0}^{C_{in} - 1} ccor(W_{ij}, X_i) + b_j,\n\n where :math:`ccor` is the cross correlation operator, :math:`C_{in}` is the input channel number, :math:`j` ranges\n from :math:`0` to :math:`C_{out} - 1`, :math:`W_{ij}` corresponds to the :math:`i`-th channel of the :math:`j`-th\n filter and :math:`out_{j}` corresponds to the :math:`j`-th channel of the output. :math:`W_{ij}` is a slice\n of kernel and it has shape :math:`(\\text{ks_h}, \\text{ks_w})`, where :math:`\\text{ks_h}` and\n :math:`\\text{ks_w}` are the height and width of the convolution kernel. The full kernel has shape\n :math:`(C_{out}, C_{in} // \\text{group}, \\text{ks_h}, \\text{ks_w})`, where group is the group number\n to split the input in the channel dimension.\n\n If the 'pad_mode' is set to be \"valid\", the output height and width will be\n :math:`\\left \\lfloor{1 + \\frac{H_{in} + 2 \\times \\text{padding} - \\text{ks_h} -\n (\\text{ks_h} - 1) \\times (\\text{dilation} - 1) }{\\text{stride}}} \\right \\rfloor` and\n :math:`\\left \\lfloor{1 + \\frac{W_{in} + 2 \\times \\text{padding} - \\text{ks_w} -\n (\\text{ks_w} - 1) \\times (\\text{dilation} - 1) }{\\text{stride}}} \\right \\rfloor` respectively.\n\n\n The first introduction can be found in paper `Gradient Based Learning Applied to Document Recognition\n <http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_. More detailed introduction can be found here:\n http://cs231n.github.io/convolutional-networks/.\n\n Args:\n out_channel (int): The dimension of the output.\n kernel_size (Union[int, tuple[int]]): The kernel size of the 2D convolution.\n mode (int): Modes for different convolutions. 0 Math convolutiuon, 1 cross-correlation convolution ,\n 2 deconvolution, 3 depthwise convolution. Default: 1.\n pad_mode (str): Modes to fill padding. It could be \"valid\", \"same\", or \"pad\". Default: \"valid\".\n pad (Union(int, tuple[int])): The pad value to be filled. Default: 0. If `pad` is an integer, the paddings of\n top, bottom, left and right are the same, equal to pad. If `pad` is a tuple of four integers, the\n padding of top, bottom, left and right equal to pad[0], pad[1], pad[2], and pad[3] correspondingly.\n stride (Union(int, tuple[int])): The stride to be applied to the convolution filter. Default: 1.\n dilation (Union(int, tuple[int])): Specify the space to use between kernel elements. Default: 1.\n group (int): Split input into groups. Default: 1.\n\n Returns:\n Tensor, the value that applied 2D convolution.\n\n Inputs:\n - **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.\n - **weight** (Tensor) - Set size of kernel is :math:`(K_1, K_2)`, then the shape is\n :math:`(C_{out}, C_{in}, K_1, K_2)`.\n\n Outputs:\n Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.\n\n Examples:\n >>> input = Tensor(np.ones([10, 32, 32, 32]), mindspore.float32)\n >>> weight = Tensor(np.ones([32, 32, 3, 3]), mindspore.float32)\n >>> conv2d = P.Conv2D(out_channel=32, kernel_size=3)\n >>> conv2d(input, weight)\n \"\"\"\n\n @prim_attr_register\n def __init__(self,\n out_channel,\n kernel_size,\n mode=1,\n pad_mode=\"valid\",\n pad=0,\n stride=1,\n dilation=1,\n group=1):\n \"\"\"init Conv2D\"\"\"\n self.init_prim_io_names(inputs=['x', 'w'], outputs=['output'])\n self.kernel_size = _check_positive_int_or_tuple('kernel_size', kernel_size, self.name)\n self.stride = _check_positive_int_or_tuple('stride', stride, self.name, allow_four=True, ret_four=True)\n self.add_prim_attr('stride', self.stride)\n self.dilation = _check_positive_int_or_tuple('dilation', dilation, self.name, allow_four=True, ret_four=True)\n self.add_prim_attr('dilation', self.dilation)\n validator.check_value_type('pad', pad, (int, tuple), self.name)\n if isinstance(pad, int):\n pad = (pad,) * 4\n else:\n validator.check_integer('pad size', len(pad), 4, Rel.EQ, self.name)\n self.padding = pad\n self.pad_mode = validator.check_string('pad_mode', pad_mode, ['valid', 'same', 'pad'], self.name)\n\n if pad_mode != 'pad' and pad != (0, 0, 0, 0):\n raise ValueError(f\"For '{self.name}', padding must be zero when pad_mode is '{pad_mode}'.\")\n if self.pad_mode == 'pad':\n for item in pad:\n validator.check_integer('pad item', item, 0, Rel.GE, self.name)\n\n self.mode = validator.check_integer('mode', mode, 1, Rel.EQ, self.name)\n self.add_prim_attr('data_format', \"NCHW\")\n self.out_channel = validator.check_integer('out_channel', out_channel, 0, Rel.GT, self.name)\n self.group = validator.check_integer('group', group, 0, Rel.GT, self.name)\n self.add_prim_attr('offset_a', 0)\n\n def infer_shape(self, x_shape, w_shape, b_shape=None):\n validator.check_integer(\"weight rank\", len(w_shape), 4, Rel.EQ, self.name)\n validator.check_integer(\"x rank\", len(x_shape), 4, Rel.EQ, self.name)\n validator.check(f\"x_shape[1] / group\", x_shape[1] // self.group, \"w_shape[1]\", w_shape[1], Rel.EQ, self.name)\n validator.check('out_channel', self.out_channel, 'w_shape[0]', w_shape[0], Rel.EQ, self.name)\n validator.check('kernel_size', self.kernel_size, 'w_shape[2:4]', tuple(w_shape[2:4]), Rel.EQ, self.name)\n\n kernel_size_h = w_shape[2]\n kernel_size_w = w_shape[3]\n stride_h = self.stride[2]\n stride_w = self.stride[3]\n dilation_h = self.dilation[2]\n dilation_w = self.dilation[3]\n\n if self.pad_mode == \"valid\":\n h_out = math.ceil((x_shape[2] - dilation_h * (kernel_size_h - 1)) / stride_h)\n w_out = math.ceil((x_shape[3] - dilation_w * (kernel_size_w - 1)) / stride_w)\n pad_top, pad_bottom, pad_left, pad_right = 0, 0, 0, 0\n elif self.pad_mode == \"same\":\n h_out = math.ceil(x_shape[2] / stride_h)\n w_out = math.ceil(x_shape[3] / stride_w)\n\n pad_needed_h = max(0, (h_out - 1) * stride_h + dilation_h * (kernel_size_h - 1) + 1 - x_shape[2])\n pad_top = math.floor(pad_needed_h / 2)\n pad_bottom = pad_needed_h - pad_top\n\n pad_needed_w = max(0, (w_out - 1) * stride_w + dilation_w * (kernel_size_w - 1) + 1 - x_shape[3])\n pad_left = math.floor(pad_needed_w / 2)\n pad_right = pad_needed_w - pad_left\n elif self.pad_mode == 'pad':\n pad_top, pad_bottom, pad_left, pad_right = self.padding\n\n h_out = 1 + (x_shape[2] + pad_top + pad_bottom - kernel_size_h - (kernel_size_h - 1) * (dilation_h - 1)) \\\n / stride_h\n w_out = 1 + (x_shape[3] + pad_left + pad_right - kernel_size_w - (kernel_size_w - 1) * (dilation_w - 1)) \\\n / stride_w\n h_out = math.floor(h_out)\n w_out = math.floor(w_out)\n\n self.pad_list = [pad_top, pad_bottom, pad_left, pad_right]\n self.add_prim_attr('pad_list', (pad_top, pad_bottom, pad_left, pad_right))\n out_channel = self.out_channel\n out_shape = [x_shape[0], out_channel, h_out, w_out]\n return out_shape\n\n def infer_dtype(self, x_dtype, w_dtype, b_dtype=None):\n args = {'x': x_dtype, 'w': w_dtype}\n valid_types = [mstype.int8, mstype.int32, mstype.float16, mstype.float32]\n validator.check_tensor_type_same(args, valid_types, self.name)\n if x_dtype.element_type() == mstype.int8:\n return mstype.tensor_type(mstype.int32)\n return x_dtype\n\n\nclass DepthwiseConv2dNative(PrimitiveWithInfer):\n r\"\"\"\n Returns the depth-wise convolution value for the input.\n\n Applies depthwise conv2d for the input, which will generate more channels with channel_multiplier.\n Given an input tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})` where :math:`N` is the batch size and a\n filter tensor with kernel size :math:`(ks_{h}, ks_{w})`, containing :math:`C_{in} * \\text{channel_multiplier}`\n convolutional filters of depth 1; it applies different filters to each input channel (channel_multiplier channels\n for each input channel has the default value 1), then concatenates the results together. The output has\n :math:`\\text{in_channels} * \\text{channel_multiplier}` channels.\n\n Args:\n channel_multiplier (int): The multipiler for the original output convolution. Its value must be greater than 0.\n kernel_size (Union[int, tuple[int]]): The size of the convolution kernel.\n mode (int): Modes for different convolutions. 0 Math convolution, 1 cross-correlation convolution ,\n 2 deconvolution, 3 depthwise convolution. Default: 3.\n pad_mode (str): Modes to fill padding. It could be \"valid\", \"same\", or \"pad\". Default: \"valid\".\n pad (Union[int, tuple[int]]): The pad value to be filled. If `pad` is an integer, the paddings of\n top, bottom, left and right are the same, equal to pad. If `pad` is a tuple of four integers, the padding\n of top, bottom, left and right equal to pad[0], pad[1], pad[2], and pad[3] correspondingly. Default: 0.\n stride (Union[int, tuple[int]]): The stride to be applied to the convolution filter. Default: 1.\n dilation (Union[int, tuple[int]]): Specifies the dilation rate to be used for the dilated convolution.\n Default: 1.\n group (int): Splits input into groups. Default: 1.\n\n Inputs:\n - **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.\n - **weight** (Tensor) - Set the size of kernel as :math:`(K_1, K_2)`, then the shape is\n :math:`(K, C_{in}, K_1, K_2)`, `K` must be 1.\n\n Outputs:\n Tensor of shape :math:`(N, C_{in} * \\text{channel_multiplier}, H_{out}, W_{out})`.\n\n Examples:\n >>> input = Tensor(np.ones([10, 32, 32, 32]), mindspore.float32)\n >>> weight = Tensor(np.ones([1, 32, 3, 3]), mindspore.float32)\n >>> depthwise_conv2d = P.DepthwiseConv2dNative(channel_multiplier = 3, kernel_size = (3, 3))\n >>> output = depthwise_conv2d(input, weight)\n >>> output.shape == (10, 96, 30, 30)\n \"\"\"\n\n @prim_attr_register\n def __init__(self,\n channel_multiplier,\n kernel_size,\n mode=3,\n pad_mode=\"valid\",\n pad=0,\n stride=1,\n dilation=1,\n group=1):\n \"\"\"init DepthwiseConv2dNative\"\"\"\n self.init_prim_io_names(inputs=['x', 'w'], outputs=['output'])\n self.kernel_size = _check_positive_int_or_tuple('kernel_size', kernel_size, self.name)\n self.stride = _check_positive_int_or_tuple('stride', stride, self.name)\n if self.stride[0] != self.stride[1]:\n raise ValueError(\"The height and width of stride should be equal,\"\n f\"but got height:{self.stride[0]}, width:{self.stride[1]}\")\n self.add_prim_attr('stride', (1, 1, self.stride[0], self.stride[1]))\n\n self.dilation = _check_positive_int_or_tuple('dilation', dilation, self.name)\n if self.dilation[0] != self.dilation[1]:\n raise ValueError(\"The height and width of dilation should be equal,\"\n f\"but got height:{self.dilation[0]}, width:{self.dilation[1]}\")\n self.add_prim_attr('dilation', (1, 1, self.dilation[0], self.dilation[1]))\n validator.check_value_type('pad', pad, (int, tuple), self.name)\n if isinstance(pad, int):\n pad = (pad,) * 4\n else:\n validator.check_integer('pad size', len(pad), 4, Rel.EQ, self.name)\n self.padding = pad\n self.pad_mode = validator.check_string('pad_mode', pad_mode, ['valid', 'same', 'pad'], self.name)\n if pad_mode != 'pad' and pad != (0, 0, 0, 0):\n raise ValueError(f\"For '{self.name}', padding must be zero when pad_mode is '{pad_mode}'.\")\n if self.pad_mode == 'pad':\n for item in pad:\n validator.check_integer('pad item', item, 0, Rel.GE, self.name)\n self.mode = validator.check_integer(\"mode\", mode, 3, Rel.EQ, self.name)\n self.add_prim_attr('data_format', \"NCHW\")\n self.channel_multiplier = validator.check_integer(\"channel_multiplier\", channel_multiplier, 0, Rel.GT,\n self.name)\n self.group = validator.check_integer(\"group\", group, 0, Rel.GT, self.name)\n self.add_prim_attr('offset_a', 0)\n\n def infer_shape(self, x_shape, w_shape, b_shape=None):\n validator.check_integer(\"weight rank\", len(w_shape), 4, Rel.EQ, self.name)\n validator.check_integer(\"x rank\", len(x_shape), 4, Rel.EQ, self.name)\n validator.check(\"x_shape[1]\", x_shape[1], \"w_shape[1]\", w_shape[1], Rel.EQ, self.name)\n validator.check('kernel_size', self.kernel_size, 'w_shape[2:4]', tuple(w_shape[2:4]), Rel.EQ, self.name)\n\n kernel_size_n, _, kernel_size_h, kernel_size_w = w_shape\n _, _, stride_h, stride_w = self.stride\n _, _, dilation_h, dilation_w = self.dilation\n if kernel_size_n != 1:\n raise ValueError(f\"The batch of input weight should be 1, but got {kernel_size_n}\")\n if self.pad_mode == \"valid\":\n h_out = math.ceil((x_shape[2] - dilation_h * (kernel_size_h - 1)) / stride_h)\n w_out = math.ceil((x_shape[3] - dilation_w * (kernel_size_w - 1)) / stride_w)\n pad_top, pad_bottom, pad_left, pad_right = 0, 0, 0, 0\n elif self.pad_mode == \"same\":\n h_out = math.ceil(x_shape[2] / stride_h)\n w_out = math.ceil(x_shape[3] / stride_w)\n\n pad_needed_h = max(0, (h_out - 1) * stride_h + dilation_h * (kernel_size_h - 1) + 1 - x_shape[2])\n pad_top = math.floor(pad_needed_h / 2)\n pad_bottom = pad_needed_h - pad_top\n\n pad_needed_w = max(0, (w_out - 1) * stride_w + dilation_w * (kernel_size_w - 1) + 1 - x_shape[3])\n pad_left = math.floor(pad_needed_w / 2)\n pad_right = pad_needed_w - pad_left\n elif self.pad_mode == 'pad':\n pad_top, pad_bottom, pad_left, pad_right = self.padding\n\n h_out = 1 + (x_shape[2] + pad_top + pad_bottom - kernel_size_h - (kernel_size_h - 1) * (dilation_h - 1)) \\\n / stride_h\n w_out = 1 + (x_shape[3] + pad_left + pad_right - kernel_size_w - (kernel_size_w - 1) * (dilation_w - 1)) \\\n / stride_w\n h_out = math.floor(h_out)\n w_out = math.floor(w_out)\n\n self.pad_list = (pad_top, pad_bottom, pad_left, pad_right)\n self.add_prim_attr('pads', self.pad_list)\n\n out_channel = self.channel_multiplier * x_shape[1]\n out_shape = [x_shape[0], out_channel, h_out, w_out]\n return out_shape\n\n def infer_dtype(self, x_dtype, w_dtype, b_dtype=None):\n args = {'x': x_dtype, 'w': w_dtype}\n validator.check_tensor_type_same(args, mstype.number_type, self.name)\n if x_dtype.element_type() == mstype.int8:\n return mstype.tensor_type(mstype.int32)\n return x_dtype\n\n\nclass _Pool(PrimitiveWithInfer):\n r\"\"\"\n Performs max/avg pooling operation.\n\n Args:\n ksize (Union[int, tuple[int]]): The size of the kernel, that should be a tuple\n of two `int` for height and width. Default: 1.\n strides (Union[int, tuple[int]]): The stride of the window, that should be\n a tuple of two `int` for height and width. Default: 1.\n padding (str): The optional value for pad mode, is \"same\" or \"valid\", not case sensitive.\n Default: \"valid\".\n \"\"\"\n\n @prim_attr_register\n def __init__(self, ksize=1, strides=1, padding=\"valid\"):\n self.init_prim_io_names(inputs=['x'], outputs=['output'])\n validator.check_value_type('ksize', ksize, [int, tuple], self.name)\n validator.check_value_type('strides', strides, [int, tuple], self.name)\n self.padding = validator.check_string('padding', padding.upper(), ['VALID', 'SAME'], self.name)\n self.add_prim_attr(\"padding\", self.padding)\n self.is_maxpoolwithargmax = (self.name == \"MaxPoolWithArgmax\")\n if not self.is_maxpoolwithargmax:\n self.add_prim_attr('data_format', \"NCHW\")\n\n self.ksize = _check_positive_int_or_tuple(\"ksize\", ksize, self.name, allow_four=False, ret_four=True)\n if self.is_maxpoolwithargmax:\n self.ksize = (1, self.ksize[-2], self.ksize[-1], 1)\n self.add_prim_attr(\"ksize\", self.ksize)\n\n self.strides = _check_positive_int_or_tuple(\"strides\", strides, self.name, allow_four=False, ret_four=True)\n if self.is_maxpoolwithargmax:\n self.strides = (1, self.strides[-2], self.strides[-1], 1)\n self.add_prim_attr(\"strides\", self.strides)\n\n def infer_shape(self, x_shape):\n validator.check_integer(\"x rank\", len(x_shape), 4, Rel.EQ, self.name)\n batch, channel, input_h, input_w = x_shape\n if self.is_maxpoolwithargmax:\n _, kernel_h, kernel_w, _ = self.ksize\n _, stride_h, stride_w, _ = self.strides\n else:\n _, _, kernel_h, kernel_w = self.ksize\n _, _, stride_h, stride_w = self.strides\n\n if self.padding == \"VALID\":\n out_h = math.ceil((input_h - (kernel_h - 1)) / stride_h)\n out_w = math.ceil((input_w - (kernel_w - 1)) / stride_w)\n elif self.padding == \"SAME\":\n out_h = math.ceil(input_h / stride_h)\n out_w = math.ceil(input_w / stride_w)\n out_shape = [batch, channel, out_h, out_w]\n\n for shape_value in out_shape:\n if shape_value <= 0:\n raise ValueError(f\"For '{self.name}' The kernel size is not valid, \"\n f\"please check it if is larger than data's shape size.\")\n return out_shape\n\n def infer_dtype(self, x_dtype):\n validator.check_subclass(\"input\", x_dtype, mstype.tensor, self.name)\n return x_dtype\n\n\nclass MaxPool(_Pool):\n r\"\"\"\n Max pooling operation.\n\n Applies a 2D max pooling over an input Tensor which can be regarded as a composition of 2D planes.\n\n Typically the input is of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})`, MaxPool outputs\n regional maximum in the :math:`(H_{in}, W_{in})`-dimension. Given kernel size\n :math:`ks = (h_{ker}, w_{ker})` and stride :math:`s = (s_0, s_1)`, the operation is as follows.\n\n .. math::\n \\text{output}(N_i, C_j, h, w) = \\max_{m=0, \\ldots, h_{ker}-1} \\max_{n=0, \\ldots, w_{ker}-1}\n \\text{input}(N_i, C_j, s_0 \\times h + m, s_1 \\times w + n)\n\n Args:\n ksize (Union[int, tuple[int]]): The size of kernel used to take the maximum value,\n is an int number that represents height and width are both ksize, or a tuple\n of two int numbers that represent height and width respectively. Default: 1.\n strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents\n the height and width of movement are both strides, or a tuple of two int numbers that\n represent height and width of movement respectively. Default: 1.\n padding (str): The optional value for pad mode, is \"same\" or \"valid\", not case sensitive.\n Default: \"valid\".\n\n - same: Adopts the way of completion. The height and width of the output will be the same as\n the input. The total number of padding will be calculated in horizontal and vertical\n directions and evenly distributed to top and bottom, left and right if possible.\n Otherwise, the last extra padding will be done from the bottom and the right side.\n\n - valid: Adopts the way of discarding. The possible largest height and width of output\n will be returned without padding. Extra pixels will be discarded.\n\n Inputs:\n - **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.\n\n Outputs:\n Tensor, with shape :math:`(N, C_{out}, H_{out}, W_{out})`.\n\n Examples:\n >>> input_tensor = Tensor(np.arange(1 * 3 * 3 * 4).reshape((1, 3, 3, 4)), mindspore.float32)\n >>> maxpool_op = P.MaxPool(padding=\"VALID\", ksize=2, strides=1)\n >>> output_tensor = maxpool_op(input_tensor)\n \"\"\"\n\n @prim_attr_register\n def __init__(self, ksize=1, strides=1, padding=\"valid\"):\n super(MaxPool, self).__init__(ksize, strides, padding)\n\n\nclass MaxPoolWithArgmax(_Pool):\n r\"\"\"\n Performs max pooling on the input Tensor and return both max values and indices.\n\n Typically the input is of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})`, MaxPool outputs\n regional maximum in the :math:`(H_{in}, W_{in})`-dimension. Given kernel size\n :math:`ks = (h_{ker}, w_{ker})` and stride :math:`s = (s_0, s_1)`, the operation is as follows.\n\n .. math::\n \\text{output}(N_i, C_j, h, w) = \\max_{m=0, \\ldots, h_{ker}-1} \\max_{n=0, \\ldots, w_{ker}-1}\n \\text{input}(N_i, C_j, s_0 \\times h + m, s_1 \\times w + n)\n\n Args:\n ksize (Union[int, tuple[int]]): The size of kernel used to take the maximum value and arg value,\n is an int number that represents height and width are both ksize, or a tuple of\n two int numbers that represent height and width respectively. Default: 1.\n strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents\n the height and width of movement are both strides, or a tuple of two int numbers that\n represent height and width of movement respectively. Default: 1.\n padding (str): The optional value for pad mode, is \"same\" or \"valid\", not case sensitive.\n Default: \"valid\".\n\n - same: Adopts the way of completion. The height and width of the output will be the same as\n the input. The total number of padding will be calculated in horizontal and vertical\n directions and evenly distributed to top and bottom, left and right if possible.\n Otherwise, the last extra padding will be done from the bottom and the right side.\n\n - valid: Adopts the way of discarding. The possible largest height and width of output\n will be returned without padding. Extra pixels will be discarded.\n\n\n Inputs:\n - **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.\n Data type should be float16 or float32.\n\n Outputs:\n Tuple of 2 Tensor, the maxpool result and where max values from.\n\n - **output** (Tensor) - Maxpooling result, with shape :math:`(N, C_{out}, H_{out}, W_{out})`.\n - **mask** (Tensor) - Max values' index represented by the mask.\n\n Examples:\n >>> input_tensor = Tensor(np.arange(1 * 3 * 3 * 4).reshape((1, 3, 3, 4)), mindspore.float32)\n >>> maxpool_arg_op = P.MaxPoolWithArgmax(padding=\"VALID\", ksize=2, strides=1)\n >>> output_tensor, argmax = maxpool_arg_op(input_tensor)\n \"\"\"\n\n def __init__(self, ksize=1, strides=1, padding=\"valid\"):\n super(MaxPoolWithArgmax, self).__init__(ksize, strides, padding)\n self.is_tbe = context.get_context(\"device_target\") == \"Ascend\"\n self.is_gpu = context.get_context(\"device_target\") == \"GPU\"\n\n def infer_shape(self, x_shape):\n out_shape = _Pool.infer_shape(self, x_shape)\n _, _, out_h, out_w = out_shape\n _, kernel_h, kernel_w, _ = self.ksize\n\n argmax_shape = []\n if self.is_tbe:\n for i in range(4):\n if i == 2:\n dim = kernel_h * kernel_w\n argmax_shape.append(dim)\n elif i == 3:\n dim = math.ceil(out_h * out_w / 16) + 1\n argmax_shape.append(dim)\n else:\n argmax_shape.append(x_shape[i])\n else:\n argmax_shape = out_shape\n\n return out_shape, argmax_shape\n\n def infer_dtype(self, x_dtype):\n out_dtype = x_dtype\n validator.check_tensor_type_same({\"x\": x_dtype}, (mstype.float16, mstype.float32), self.name)\n argmax_dtype = mstype.uint16\n if self.is_gpu:\n argmax_dtype = mstype.int32\n return out_dtype, argmax_dtype\n\n\nclass AvgPool(_Pool):\n r\"\"\"\n Average pooling operation.\n\n Applies a 2D average pooling over an input Tensor which can be regarded as a composition of 2D input planes.\n Typically the input is of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})`, AvgPool2d outputs\n regional average in the :math:`(H_{in}, W_{in})`-dimension. Given kernel size\n :math:`ks = (h_{ker}, w_{ker})` and stride :math:`s = (s_0, s_1)`, the operation is as follows.\n\n .. math::\n \\text{output}(N_i, C_j, h, w) = \\frac{1}{h_{ker} * w_{ker}} \\sum_{m=0}^{h_{ker}-1} \\sum_{n=0}^{w_{ker}-1}\n \\text{input}(N_i, C_j, s_0 \\times h + m, s_1 \\times w + n)\n\n Args:\n ksize (Union[int, tuple[int]]): The size of kernel used to take the average value,\n is an int number that represents height and width are both ksize, or a tuple\n of two int numbers that represent height and width respectively. Default: 1.\n strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents\n the height and width of movement are both strides, or a tuple of two int numbers that\n represent height and width of movement respectively. Default: 1.\n padding (str): The optional value for pad mode, is \"same\" or \"valid\", not case sensitive.\n Default: \"valid\".\n\n - same: Adopts the way of completion. The height and width of the output will be the same as\n the input. The total number of padding will be calculated in horizontal and vertical\n directions and evenly distributed to top and bottom, left and right if possible.\n Otherwise, the last extra padding will be done from the bottom and the right side.\n\n - valid: Adopts the way of discarding. The possible largest height and width of output\n will be returned without padding. Extra pixels will be discarded.\n\n Inputs:\n - **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.\n\n Outputs:\n Tensor, with shape :math:`(N, C_{out}, H_{out}, W_{out})`.\n\n Examples:\n >>> import mindspore\n >>> import mindspore.nn as nn\n >>> import numpy as np\n >>> from mindspore import Tensor\n >>> from mindspore.ops import operations as P\n >>> class Net(nn.Cell):\n >>> def __init__(self):\n >>> super(Net, self).__init__()\n >>> self.avgpool_op = P.AvgPool(padding=\"VALID\", ksize=2, strides=1)\n >>>\n >>> def construct(self, x):\n >>> result = self.avgpool_op(x)\n >>> return result\n >>>\n >>> input_x = Tensor(np.arange(1 * 3 * 3 * 4).reshape(1, 3, 3, 4), mindspore.float32)\n >>> net = Net()\n >>> result = net(input_x)\n [[[[ 2.5 3.5 4.5]\n [ 6.5 7.5 8.5]]\n [[ 14.5 15.5 16.5]\n [ 18.5 19.5 20.5]]\n [[ 26.5 27.5 28.5]\n [ 30.5 31.5 32.5]]]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self, ksize=1, strides=1, padding=\"valid\"):\n if context.get_context(\"device_target\") == \"GPU\":\n self.target = \"GPU\"\n elif context.get_context(\"enable_ge\"):\n self.target = \"GE\"\n else:\n self.target = \"OTHER\"\n super(AvgPool, self).__init__(ksize, strides, padding)\n\n\nclass Conv2DBackpropInput(PrimitiveWithInfer):\n \"\"\"\n Computes the gradients of convolution with respect to the input.\n\n Args:\n out_channel (int): The dimensionality of the output space.\n kernel_size (Union[int, tuple[int]]): The size of the convolution window.\n pad_mode (str): Modes to fill padding. It could be \"valid\", \"same\", or \"pad\". Default: \"valid\".\n pad (Union[int, tuple[int]]): The pad value to be filled. Default: 0. If `pad` is an integer, the paddings of\n top, bottom, left and right are the same, equal to pad. If `pad` is a tuple of four integers, the\n padding of top, bottom, left and right equal to pad[0], pad[1], pad[2], and pad[3] correspondingly.\n mode (int): Modes for different convolutions. 0 Math convolutiuon, 1 cross-correlation convolution ,\n 2 deconvolution, 3 depthwise convolution. Default: 1.\n stride (Union[int. tuple[int]]): The stride to be applied to the convolution filter. Default: 1.\n dilation (Union[int. tuple[int]]): Specifies the dilation rate to be used for the dilated convolution.\n Default: 1.\n group (int): Splits input into groups. Default: 1.\n\n Returns:\n Tensor, the gradients of convolution.\n\n Examples:\n >>> dout = Tensor(np.ones([10, 32, 30, 30]), mindspore.float32)\n >>> weight = Tensor(np.ones([32, 32, 3, 3]), mindspore.float32)\n >>> x = Tensor(np.ones([10, 32, 32, 32]))\n >>> conv2d_backprop_input = P.Conv2DBackpropInput(out_channel=32, kernel_size=3)\n >>> conv2d_backprop_input(dout, weight, F.shape(x))\n \"\"\"\n\n @prim_attr_register\n def __init__(self,\n out_channel,\n kernel_size,\n pad_mode=\"valid\",\n pad=0,\n pad_list=None,\n mode=1,\n stride=1,\n dilation=1,\n group=1):\n \"\"\"init Conv2DBackpropInput\"\"\"\n self.init_prim_io_names(inputs=['out_backprop', 'filter', 'input_sizes'], outputs=['output'])\n self.out_channel = validator.check_integer('out_channel', out_channel, 0, Rel.GT, self.name)\n self.kernel_size = _check_positive_int_or_tuple('kernel_size', kernel_size, self.name)\n self.stride = _check_positive_int_or_tuple('stride', stride, self.name, allow_four=True, ret_four=False)\n self.add_prim_attr('stride', self.stride)\n self.dilation = _check_positive_int_or_tuple('dilation', dilation, self.name, allow_four=True, ret_four=True)\n self.add_prim_attr('dilation', self.dilation)\n\n validator.check_value_type('pad', pad, (int, tuple), self.name)\n if isinstance(pad, int):\n pad = (pad,) * 4\n else:\n validator.check_integer('pad size', len(pad), 4, Rel.EQ, self.name)\n self.padding = pad\n self.pad_mode = validator.check_string('pad_mode', pad_mode, ['valid', 'same', 'pad'], self.name)\n if pad_mode != 'pad' and pad != (0, 0, 0, 0):\n raise ValueError(f\"For '{self.name}', padding must be zero when pad_mode is '{pad_mode}'.\")\n if self.pad_mode == 'pad':\n for item in pad:\n validator.check_integer('pad item', item, 0, Rel.GE, self.name)\n\n pad_mode = pad_mode.upper()\n self.add_prim_attr('pad_mode', pad_mode)\n self.mode = validator.check_integer('mode', mode, 1, Rel.EQ, self.name)\n self.group = validator.check_integer('group', group, 0, Rel.GT, self.name)\n self.add_prim_attr('data_format', \"NCHW\")\n if pad_list:\n for x in pad_list:\n validator.check_integer('element of pad_list', x, 0, Rel.GE, self.name)\n self.pad_list = pad_list\n\n def __infer__(self, doutput, w, x_size):\n x_size_v = x_size['value']\n validator.check_value_type('x_size', x_size_v, [tuple], self.name)\n for i, dim_len in enumerate(x_size_v):\n validator.check_value_type(\"x_size[%d]\" % i, dim_len, [int], self.name)\n args = {'doutput': doutput['dtype'], 'w': w['dtype']}\n valid_types = [mstype.int8, mstype.int32, mstype.float16, mstype.float32]\n validator.check_tensor_type_same(args, valid_types, self.name)\n\n # infer shape\n dout_shape = doutput['shape']\n kernel_h = self.kernel_size[0]\n kernel_w = self.kernel_size[1]\n stride_h = self.stride[0]\n stride_w = self.stride[1]\n dilation_h = self.dilation[2]\n dilation_w = self.dilation[3]\n # default pad mode is valid\n pad_list = (0, 0, 0, 0)\n if self.pad_list:\n pad_list = tuple(self.pad_list)\n elif self.pad_mode == \"SAME\":\n pad_needed_h = max(0, (dout_shape[2] - 1) * stride_h + dilation_h * (kernel_h - 1) + 1 - x_size_v[2])\n pad_top = math.floor(pad_needed_h / 2)\n pad_bottom = pad_needed_h - pad_top\n\n pad_needed_w = max(0, (dout_shape[3] - 1) * stride_w + dilation_w * (kernel_w - 1) + 1 - x_size_v[3])\n pad_left = math.floor(pad_needed_w / 2)\n pad_right = pad_needed_w - pad_left\n pad_list = (pad_top, pad_bottom, pad_left, pad_right)\n elif self.pad_mode == 'PAD':\n pad_list = self.padding\n self.add_prim_attr('pad_list', pad_list)\n out = {\n 'value': None,\n 'shape': x_size_v,\n 'dtype': doutput['dtype'],\n }\n return out\n\n\nclass BiasAdd(PrimitiveWithInfer):\n r\"\"\"\n Returns sum of input and bias tensor.\n\n Adds the 1-D bias tensor to the input tensor, and broadcasts the shape on all axis\n except for the channel axis.\n\n Inputs:\n - **input_x** (Tensor) - The input tensor. The shape can be 2-4 dimensions.\n - **bias** (Tensor) - The bias tensor, with shape :math:`(C)`.\n The shape of `bias` must be the same as `input_x` in the second dimension.\n\n Outputs:\n Tensor, with the same shape and type as `input_x`.\n\n Examples:\n >>> input_x = Tensor(np.arange(6).reshape((2, 3)), mindspore.float32)\n >>> bias = Tensor(np.random.random(3).reshape((3,)), mindspore.float32)\n >>> bias_add = P.BiasAdd()\n >>> bias_add(input_x, bias)\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n self.init_prim_io_names(inputs=['x', 'b'], outputs=['output'])\n self.add_prim_attr('data_format', 'NCHW')\n\n def infer_shape(self, x_shape, b_shape):\n validator.check_integer(\"x rank\", len(x_shape), 2, Rel.GE, self.name)\n validator.check_integer(\"bias rank\", len(b_shape), 1, Rel.EQ, self.name)\n validator.check(\"b_shape[0]\", b_shape[0], \"x_shape[1]\", x_shape[1], Rel.EQ, self.name)\n return x_shape\n\n def infer_dtype(self, x_type, b_type):\n args = {\"input_x\": x_type, \"bias\": b_type}\n validator.check_tensor_type_same(args, mstype.number_type, self.name)\n return x_type\n\n\nclass TopK(PrimitiveWithInfer):\n \"\"\"\n Finds values and indices of the `k` largest entries along the last dimension.\n\n Args:\n sorted (bool): If true, the resulting elements will\n be sorted by the values in descending order. Default: False.\n\n Inputs:\n - **input_x** (Tensor) - Input to be computed, data type should be float16, float32 or int32.\n - **k** (int) - Number of top elements to be computed along the last dimension, constant input is needed.\n\n Outputs:\n Tuple of 2 Tensor, the values and the indices.\n\n - **values** (Tensor) - The `k` largest elements along each last dimensional slice.\n - **indices** (Tensor) - The indices of values within the last dimension of input.\n\n Examples:\n >>> topk = P.TopK(sorted=True)\n >>> input_x = Tensor([1, 2, 3, 4, 5], mindspore.float16)\n >>> k = 3\n >>> values, indices = topk(input_x, k)\n >>> assert values == Tensor(np.array([5, 4, 3]), mstype.float16)\n >>> assert indices == Tensor(np.array([4, 3, 2]), mstype.int32)\n \"\"\"\n\n @prim_attr_register\n def __init__(self, sorted=False):\n validator.check_value_type(\"sorted\", sorted, [bool], self.name)\n self.init_prim_io_names(inputs=['input', 'k'],\n outputs=['values', 'indices'])\n\n def __infer__(self, input_x, k):\n x_dtype = input_x['dtype']\n valid_types = (mstype.int32, mstype.float16, mstype.float32)\n validator.check_tensor_type_same({'x': x_dtype}, valid_types, self.name)\n k_v = k['value']\n validator.check_value_type('k', k_v, (int,), self.name)\n x_shape = list(input_x['shape'])\n ndim = len(x_shape) - 1\n x_shape[ndim] = k_v\n return {'shape': (x_shape, x_shape),\n 'dtype': (x_dtype, mstype.int32),\n 'value': None}\n\n\nclass SoftmaxCrossEntropyWithLogits(PrimitiveWithInfer):\n r\"\"\"\n Gets the softmax cross-entropy value between logits and labels which shoule be one-hot encoding.\n\n Note:\n Sets input logits as `X`, input label as `Y`, output as `loss`. Then,\n\n .. math::\n p_{ij} = softmax(X_{ij}) = \\frac{exp(x_i)}{\\sum_{j = 0}^{N-1}\\exp(x_j)}\n\n .. math::\n loss_{ij} = -\\sum_j{Y_{ij} * ln(p_{ij})}\n\n Inputs:\n - **logits** (Tensor) - Input logits, with shape :math:`(N, C)`. Data type should be float16 or float32.\n - **labels** (Tensor) - Ground truth labels, with shape :math:`(N, C)`, has the same data type with `logits`.\n\n Outputs:\n Tuple of 2 Tensor, the loss shape is `(N,)`, and the dlogits with the same shape as `logits`.\n\n Examples:\n >>> logits = Tensor([[2, 4, 1, 4, 5], [2, 1, 2, 4, 3]], mindspore.float32)\n >>> labels = Tensor([[0, 0, 0, 0, 1], [0, 0, 0, 1, 0]], mindspore.float32)\n >>> softmax_cross = P.SoftmaxCrossEntropyWithLogits()\n >>> loss, backprop = softmax_cross(logits, labels)\n ([0.5899297, 0.52374405], [[0.02760027, 0.20393994, 0.01015357, 0.20393994, -0.44563377],\n [0.08015892, 0.02948882, 0.08015892, -0.4077012, 0.21789455]])\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n pass\n\n def infer_shape(self, logits_shape, labels_shape):\n validator.check(\"logits_shape\", logits_shape, \"labels_shape\", labels_shape, Rel.EQ, self.name)\n loss_shape = [logits_shape[0]]\n dlogits_shape = logits_shape\n return (loss_shape, dlogits_shape)\n\n def infer_dtype(self, logits_type, labels_type):\n args = {\"logits\": logits_type, \"labels\": labels_type}\n validator.check_tensor_type_same(args, (mstype.float16, mstype.float32), self.name)\n return (logits_type, logits_type)\n\n\nclass SparseSoftmaxCrossEntropyWithLogits(PrimitiveWithInfer):\n r\"\"\"\n Computes the softmax cross-entropy value between logits and sparse encoding labels.\n\n Note:\n Sets input logits as `X`, input label as `Y`, output as `loss`. Then,\n\n .. math::\n p_{ij} = softmax(X_{ij}) = \\frac{exp(x_i)}{\\sum_{j = 0}^{N-1}\\exp(x_j)}\n\n .. math::\n loss_{ij} = \\begin{cases} -ln(p_{ij}), &j = y_i \\cr -ln(1 - p_{ij}), & j \\neq y_i \\end{cases}\n\n .. math::\n loss = \\sum_{ij} loss_{ij}\n\n Args:\n is_grad (bool): If it's true, this operation returns the computed gradient. Default: False.\n\n Inputs:\n - **logits** (Tensor) - Input logits, with shape :math:`(N, C)`. Data type should be float16 or float32.\n - **labels** (Tensor) - Ground truth labels, with shape :math:`(N)`.\n Data type should be int32 or int64.\n\n Outputs:\n Tensor, if `is_grad` is False, the output tensor is the value of loss which is a scalar tensor;\n if `is_grad` is True, the output tensor is the gradient of input with the same shape as `logits`.\n\n Examples:\n Please refer to the usage in nn.SoftmaxCrossEntropyWithLogits source code.\n \"\"\"\n\n @prim_attr_register\n def __init__(self, is_grad=False):\n self.init_prim_io_names(inputs=['features', 'labels'], outputs=['output'])\n self.is_grad = is_grad\n self.add_prim_attr('sens', 1.0)\n\n def infer_shape(self, logits_shape, labels_shape):\n validator.check(\"logits_shape[0]\", logits_shape[0], \"labels_shape[0]\", labels_shape[0], Rel.EQ, self.name)\n loss_shape = []\n if self.is_grad:\n return logits_shape\n return loss_shape\n\n def infer_dtype(self, logits_type, labels_type):\n validator.check_tensor_type_same({\"logits\": logits_type}, (mstype.float16, mstype.float32), self.name)\n validator.check_tensor_type_same({\"labels\": labels_type}, (mstype.int32, mstype.int64), self.name)\n return logits_type\n\n\nclass ApplyMomentum(PrimitiveWithInfer):\n \"\"\"\n Optimizer that implements the Momentum algorithm.\n\n Refer to the paper `On the importance of initialization and momentum in deep\n learning <https://dl.acm.org/doi/10.5555/3042817.3043064>`_ for more details.\n\n Inputs of `variable`, `accumulation` and `gradient` comply with the implicit type conversion rules\n to make the data types consistent.\n If they have different data types, lower priority data type will be converted to\n relatively highest priority data type.\n Data type conversion of Parameter is not supported. RuntimeError exception will be thrown.\n\n Args:\n use_locking (bool): Enable a lock to protect the update of variable and accumlation tensors. Default: False.\n use_nesterov (bool): Enable Nesterov momentum. Default: False.\n gradient_scale (float): The scale of the gradient. Default: 1.0.\n\n Inputs:\n - **variable** (Parameter) - Weights to be updated. data type should be float.\n - **accumulation** (Parameter) - Accumulated gradient value by moment weight.\n Has the same data type with `variable`.\n - **learning_rate** (Union[Number, Tensor]) - The learning rate value, should be a float number or\n a scalar tensor with float data type.\n - **gradient** (Tensor) - Gradients, has the same data type as `variable`.\n - **momentum** (Union[Number, Tensor]) - Momentum, should be a float number or\n a scalar tensor with float data type.\n\n Outputs:\n Tensor, parameters to be updated.\n\n Examples:\n Please refer to the usage in nn.ApplyMomentum.\n \"\"\"\n __mindspore_signature__ = (\n sig.make_sig('variable', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('accumulation', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('learning_rate', dtype=sig.sig_dtype.T1),\n sig.make_sig('gradient', dtype=sig.sig_dtype.T),\n sig.make_sig('momentum', dtype=sig.sig_dtype.T2),\n )\n\n @prim_attr_register\n def __init__(self, use_nesterov=False, use_locking=False, gradient_scale=1.0):\n self.init_prim_io_names(inputs=['variable', 'accumulation', 'learning_rate', 'gradient', 'momentum'],\n outputs=['output'])\n self.is_tbe = context.get_context(\"device_target\") == \"Ascend\"\n self.is_ge = context.get_context(\"enable_ge\")\n\n def infer_shape(self, v_shape, a_shape, l_shape, g_shape, m_shape):\n if not self.is_ge and self.is_tbe:\n return v_shape, v_shape\n return v_shape\n\n def infer_dtype(self, v_dtype, a_dtype, l_dtype, g_dtype, m_dtype):\n valid_types = [mstype.float16, mstype.float32, mstype.float64]\n if v_dtype != mstype.type_refkey and a_dtype != mstype.type_refkey:\n validator.check_tensor_type_same({\"v\": v_dtype}, valid_types, self.name)\n validator.check_tensor_type_same({\"a\": a_dtype}, valid_types, self.name)\n validator.check_scalar_or_tensor_type_same({\"l_dtype\": l_dtype}, valid_types, self.name)\n validator.check_scalar_or_tensor_type_same({\"g_dtype\": g_dtype}, valid_types, self.name)\n validator.check_scalar_or_tensor_type_same({\"m_dtype\": m_dtype}, valid_types, self.name)\n if not self.is_ge and self.is_tbe:\n return g_dtype, g_dtype\n return g_dtype\n\n\nclass SmoothL1Loss(PrimitiveWithInfer):\n r\"\"\"\n Computes smooth L1 loss, a robust L1 loss.\n\n SmoothL1Loss is a Loss similar to MSELoss but less sensitive to outliers as described in the\n `Fast R-CNN <https://arxiv.org/abs/1504.08083>`_ by Ross Girshick.\n\n Note:\n Sets input prediction as `X`, input target as `Y`, output as `loss`. Then,\n\n .. math::\n \\text{SmoothL1Loss} = \\begin{cases} \\frac{0.5 x^{2}}{\\text{beta}}, &if \\left |x \\right | < \\text{beta} \\cr\n \\left |x \\right|-0.5 \\text{beta}, &\\text{otherwise}\\end{cases}\n\n Args:\n beta (float): A parameter used to control the point where the function will change from\n quadratic to linear. Default: 1.0.\n\n Inputs:\n - **prediction** (Tensor) - Predict data. Data type should be float16 or float32.\n - **target** (Tensor) - Ground truth data, with the same type and shape as `prediction`.\n\n Outputs:\n Tensor, with the same type and shape as `prediction`.\n\n Examples:\n >>> loss = P.SmoothL1Loss()\n >>> input_data = Tensor(np.array([1, 2, 3]), mindspore.float32)\n >>> target_data = Tensor(np.array([1, 2, 2]), mindspore.float32)\n >>> loss(input_data, target_data)\n [0, 0, 0.5]\n \"\"\"\n\n @prim_attr_register\n def __init__(self, beta=1.0):\n validator.check_value_type('beta', beta, [float], self.name)\n validator.check('beta', beta, '', 0, Rel.GT, self.name)\n self.init_prim_io_names(inputs=['prediction', 'target'], outputs=['output'])\n\n def infer_shape(self, prediction, target):\n validator.check('prediction shape', prediction, 'target shape', target, Rel.EQ, self.name)\n return prediction\n\n def infer_dtype(self, prediction, target):\n args = {\"prediction\": prediction, \"target\": target}\n validator.check_tensor_type_same(args, (mstype.float16, mstype.float32), self.name)\n return prediction\n\n\nclass L2Loss(PrimitiveWithInfer):\n \"\"\"\n Calculates half of the L2 norm of a tensor without using the `sqrt`.\n\n Set `input_x` as x and output as loss.\n\n .. math::\n loss = sum(x ** 2) / nelement(x)\n\n :math:`nelement(x)` represents the number of `input_x`.\n\n Inputs:\n - **input_x** (Tensor) - A input Tensor. Data type should be float16 or float32.\n\n Outputs:\n Tensor, has the same dtype as `input_x`. The output tensor is the value of loss which is a scalar tensor.\n\n Examples\n >>> input_x = Tensor(np.array([1, 2, 3]), mindspore.float16)\n >>> l2_loss = P.L2Loss()\n >>> l2_loss(input_x)\n 7.0\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"init L2Loss\"\"\"\n\n def infer_shape(self, input_x):\n loss_shape = []\n return loss_shape\n\n def infer_dtype(self, x_type):\n validator.check_subclass(\"x_type\", x_type, mstype.tensor, self.name)\n valid_types = [mstype.float16, mstype.float32]\n validator.check_tensor_type_same({'x_type': x_type}, valid_types, self.name)\n return x_type\n\n\nclass DataFormatDimMap(PrimitiveWithInfer):\n \"\"\"\n Returns the dimension index in the destination data format given in the source data format.\n\n Args:\n src_format (string): An optional value for source data format. Default: 'NHWC'.\n dst_format (string): An optional value for destination data format. Default: 'NCHW'.\n\n Inputs:\n - **input_x** (Tensor) - A Tensor with each element as a dimension index in source data format.\n The suggested values is in the range [-4, 4). It's type is int32.\n\n Outputs:\n Tensor, has the same type as the `input_x`.\n\n Examples:\n >>> x = Tensor([0, 1, 2, 3], mindspore.int32)\n >>> dfdm = P.DataFormatDimMap()\n >>> dfdm(x)\n [0 3 1 2]\n \"\"\"\n\n @prim_attr_register\n def __init__(self, src_format='NHWC', dst_format='NCHW'):\n valid_values = ['NHWC', 'NCHW']\n self.src_format = validator.check_string(\"src_format\", src_format, valid_values, self.name)\n self.dst_format = validator.check_string(\"dst_format\", dst_format, valid_values, self.name)\n self.init_prim_io_names(inputs=['input_x'], outputs=['output'])\n\n def infer_shape(self, x_shape):\n return x_shape\n\n def infer_dtype(self, x_type):\n validator.check_subclass(\"x\", x_type, mstype.tensor, self.name)\n valid_types = [mstype.int32]\n validator.check_tensor_type_same({\"x\": x_type}, valid_types, self.name)\n return x_type\n\n\nclass RNNTLoss(PrimitiveWithInfer):\n \"\"\"\n Computes the RNNTLoss and its gradient with respect to the softmax outputs.\n\n Args:\n blank_label (int): blank label. Default: 0.\n\n Inputs:\n - **acts** (Tensor) - Tensor of shape :math:`(B, T, U, V)`. Data type should be float16 or float32.\n - **labels** (Tensor[int32]) - Tensor of shape :math:`(B, U-1)`.\n - **input_lengths** (Tensor[int32]) - Tensor of shape :math:`(B,)`.\n - **label_lebgths** (Tensor[int32]) - Tensor of shape :math:`(B,)`.\n\n Outputs:\n - **costs** (Tensor[int32]) - Tensor of shape :math:`(B,)`.\n - **grads** (Tensor[int32]) - Has the same shape as `acts`.\n\n Examples:\n >>> B, T, U, V = 1, 2, 3, 5\n >>> acts = np.random.random((B, T, U, V)).astype(np.float32)\n >>> labels = np.array([[1, 2]]).astype(np.int32)\n >>> input_length = np.array([T] * B).astype(np.int32)\n >>> label_length = np.array([len(l) for l in labels]).astype(np.int32)\n >>> rnnt_loss = P.RNNTLoss(blank_label=blank)\n >>> costs, grads = rnnt_loss(Tensor(acts), Tensor(labels), Tensor(input_length), Tensor(label_length))\n \"\"\"\n\n @prim_attr_register\n def __init__(self, blank_label=0):\n validator.check_value_type('blank_label', blank_label, [int], self.name)\n self.init_prim_io_names(inputs=['acts', 'labels', 'input_length', 'label_length'],\n outputs=['costs', 'grads'])\n\n def infer_shape(self, acts_shape, labels_shape, input_length_shape, label_length_shape):\n validator.check_integer('acts_rank', len(acts_shape), 4, Rel.EQ, self.name)\n validator.check_integer('labels_rank', len(labels_shape), 2, Rel.EQ, self.name)\n validator.check_integer('input_length_rank', len(input_length_shape), 1, Rel.EQ, self.name)\n validator.check_integer('label_length_rank', len(label_length_shape), 1, Rel.EQ, self.name)\n validator.check('labels shape[0]', labels_shape[0], 'acts shape[0]', acts_shape[0], Rel.EQ, self.name)\n validator.check('labels shape[1]', labels_shape[1], 'acts shape[2]-1', acts_shape[2]-1, Rel.EQ, self.name)\n validator.check('input_length size', input_length_shape[0], 'acts shape[0]', acts_shape[0], Rel.EQ, self.name)\n validator.check('label_length size', label_length_shape[0], 'acts shape[0]', acts_shape[0], Rel.EQ, self.name)\n costs_shape = (acts_shape[0],)\n return (costs_shape, acts_shape)\n\n def infer_dtype(self, acts_type, labels_type, input_length_type, label_length_type):\n validator.check_subclass(\"acts_type\", acts_type, mstype.tensor, self.name)\n validator.check_subclass(\"labels_type\", labels_type, mstype.tensor, self.name)\n validator.check_subclass(\"input_length_type\", input_length_type, mstype.tensor, self.name)\n validator.check_subclass(\"label_length_type\", label_length_type, mstype.tensor, self.name)\n validator.check_tensor_type_same({\"acts_type\": acts_type}, [mstype.float32, mstype.float16], self.name)\n validator.check_tensor_type_same({\"labels_type\": labels_type}, [mstype.int32], self.name)\n validator.check_tensor_type_same({\"input_length_type\": input_length_type}, [mstype.int32], self.name)\n validator.check_tensor_type_same({\"label_length_type\": label_length_type}, [mstype.int32], self.name)\n return (acts_type, acts_type)\n\n\nclass SGD(PrimitiveWithInfer):\n \"\"\"\n Computes stochastic gradient descent (optionally with momentum).\n\n Nesterov momentum is based on the formula from On the importance of\n initialization and momentum in deep learning.\n\n Note:\n For details, please refer to `nn.SGD` source code.\n\n Args:\n dampening (float): The dampening for momentum. Default: 0.0.\n weight_decay (float): Weight decay (L2 penalty). Default: 0.0.\n nesterov (bool): Enable Nesterov momentum. Default: False.\n\n Inputs:\n - **parameters** (Tensor) - Parameters to be updated. With float16 or float32 data type.\n - **gradient** (Tensor) - Gradients. With float16 or float32 data type.\n - **learning_rate** (Tensor) - Learning rate, a scalar tensor with float16 or float32 data type.\n e.g. Tensor(0.1, mindspore.float32)\n - **accum** (Tensor) - Accum(velocity) to be updated. With float16 or float32 data type.\n - **momentum** (Tensor) - Momentum, a scalar tensor with float16 or float32 data type.\n e.g. Tensor(0.1, mindspore.float32).\n - **stat** (Tensor) - States to be updated with the same shape as gradient. With float16 or float32 data type.\n\n Outputs:\n Tensor, parameters to be updated.\n\n Examples:\n >>> sgd = P.SGD()\n >>> parameters = Tensor(np.array([2, -0.5, 1.7, 4]), mindspore.float32)\n >>> gradient = Tensor(np.array([1, -1, 0.5, 2]), mindspore.float32)\n >>> learning_rate = Tensor(0.01, mindspore.float32)\n >>> accum = Tensor(np.array([0.1, 0.3, -0.2, -0.1]), mindspore.float32)\n >>> momentum = Tensor(0.1, mindspore.float32)\n >>> stat = Tensor(np.array([1.5, -0.3, 0.2, -0.7]), mindspore.float32)\n >>> result = sgd(parameters, gradient, learning_rate, accum, momentum, stat)\n \"\"\"\n\n @prim_attr_register\n def __init__(self, dampening=0.0, weight_decay=0.0, nesterov=False):\n validator.check_value_type(\"nesterov\", nesterov, [bool], self.name)\n if nesterov and dampening != 0:\n raise ValueError(f\"Nesterov need zero dampening!\")\n self.init_prim_io_names(inputs=['parameters', 'gradient', 'learning_rate', 'accum', 'momentum', 'stat'],\n outputs=['output'])\n\n def infer_shape(self, parameters_shape, gradient_shape, learning_rate_shape,\n accum_shape, momentum_shape, stat_shape):\n validator.check_integer(f'parameters rank', len(parameters_shape), 0, Rel.GT, self.name)\n validator.check_integer(f'gradient rank', len(gradient_shape), 0, Rel.GE, self.name)\n validator.check_integer(f'learning rate rank', len(learning_rate_shape), 0, Rel.GE, self.name)\n validator.check_integer(f'accumulation rank', len(accum_shape), 0, Rel.GT, self.name)\n validator.check_integer(f'momentum rank', len(momentum_shape), 0, Rel.GE, self.name)\n validator.check_integer(f'stat rank', len(stat_shape), 0, Rel.GE, self.name)\n validator.check(\"gradient shape\", gradient_shape, \"stat shape\", stat_shape, Rel.EQ, self.name)\n return parameters_shape\n\n def infer_dtype(self, parameters_dtype, gradient_dtype, learning_rate_dtype,\n accum_dtype, momentum_dtype, stat_dtype):\n valid_types = [mstype.float16, mstype.float32]\n validator.check_tensor_type_same({\"parameters\": parameters_dtype}, valid_types, self.name)\n validator.check_tensor_type_same({\"gradient\": gradient_dtype}, valid_types, self.name)\n validator.check_tensor_type_same({\"learning_rate\": learning_rate_dtype}, valid_types, self.name)\n validator.check_tensor_type_same({\"accum\": accum_dtype}, valid_types, self.name)\n validator.check_tensor_type_same({\"momentum\": momentum_dtype}, valid_types, self.name)\n validator.check_tensor_type_same({\"stat\": stat_dtype}, valid_types, self.name)\n return parameters_dtype\n\n\nclass ApplyRMSProp(PrimitiveWithInfer):\n \"\"\"\n Optimizer that implements the Root Mean Square prop(RMSProp) algorithm.\n Please refer to the usage in source code of `nn.RMSProp`.\n\n Note:\n Update `var` according to the RMSProp algorithm.\n\n .. math::\n s_{t} = \\\\rho s_{t-1} + (1 - \\\\rho)(\\\\nabla Q_{i}(w))^2\n\n .. math::\n m_{t} = \\\\beta m_{t-1} + \\\\frac{\\\\eta} {\\\\sqrt{s_{t} + \\\\epsilon}} \\\\nabla Q_{i}(w)\n\n .. math::\n w = w - m_{t}\n\n where :math:`w` represents `var`, which will be updated.\n :math:`s_{t}` represents `mean_square`, :math:`s_{t-1}` is the last momentent of :math:`s_{t}`,\n :math:`m_{t}` represents `moment`, :math:`m_{t-1}` is the last momentent of :math:`m_{t}`.\n :math:`\\\\rho` represents `decay`. :math:`\\\\beta` is the momentum term, represents `momentum`.\n :math:`\\\\epsilon` is a smoothing term to avoid division by zero, represents `epsilon`.\n :math:`\\\\eta` represents `learning_rate`. :math:`\\\\nabla Q_{i}(w)` represents `grad`.\n\n Args:\n use_locking (bool): Enable a lock to protect the update of variable tensors. Default: False.\n\n Inputs:\n - **var** (Tensor) - Weights to be update.\n - **mean_square** (Tensor) - Mean square gradients, must have the same type as `var`.\n - **moment** (Tensor) - Delta of `var`, must have the same type as `var`.\n - **learning_rate** (Union[Number, Tensor]) - Learning rate. Should be a float number or\n a scalar tensor with float16 or float32 data type.\n - **grad** (Tensor) - Gradients, must have the same type as `var`.\n - **decay** (float) - Decay rate. Only constant value is allowed.\n - **momentum** (float) - Momentum. Only constant value is allowed.\n - **epsilon** (float) - Ridge term. Only constant value is allowed.\n\n Outputs:\n Tensor, parameters to be update.\n\n Examples:\n >>> apply_rms = P.ApplyRMSProp()\n >>> input_x = Tensor(1., mindspore.float32)\n >>> mean_square = Tensor(2., mindspore.float32)\n >>> moment = Tensor(1., mindspore.float32)\n >>> grad = Tensor(2., mindspore.float32 )\n >>> learning_rate = Tensor(0.9, mindspore.float32)\n >>> decay = 0.0\n >>> momentum = 1e-10\n >>> epsilon = 0.001\n >>> result = apply_rms(input_x, mean_square, moment, learning_rate, grad, decay, momentum, epsilon)\n (-2.9977674, 0.80999994, 1.9987665)\n \"\"\"\n\n @prim_attr_register\n def __init__(self, use_locking=False):\n self.use_locking = validator.check_value_type(\"use_locking\", use_locking, [bool], self.name)\n self.init_prim_io_names(inputs=['var', 'mean_square', 'moment', 'learning_rate', 'grad',\n 'rho', 'momentum', 'epsilon'], outputs=['output'])\n self.is_ge = context.get_context(\"enable_ge\")\n self.is_d = context.get_context(\"device_target\") == \"Ascend\"\n\n def infer_shape(self, var_shape, mean_square_shape, moment_shape, learning_rate_shape, grad_shape, decay_shape,\n momentum_shape, epsilon_shape):\n validator.check(\"var_shape\", var_shape, \"mean_square_shape\", mean_square_shape, Rel.EQ, self.name)\n validator.check(\"var_shape\", var_shape, \"moment_shape\", moment_shape, Rel.EQ, self.name)\n validator.check(\"var_shape\", var_shape, \"grad_shape\", grad_shape, Rel.EQ, self.name)\n if not self.is_ge and self.is_d:\n return var_shape, var_shape, var_shape\n return var_shape\n\n def infer_dtype(self, var_dtype, mean_square_dtype, moment_dtype, learning_rate_dtype, grad_dtype, decay_dtype,\n momentum_dtype, epsilon_dtype):\n args = {\"var\": var_dtype, \"mean_square\": mean_square_dtype, \"moment\": moment_dtype, \"grad\": grad_dtype}\n validator.check_tensor_type_same(args, mstype.number_type, self.name)\n\n valid_types = [mstype.float16, mstype.float32]\n args_decay = {\"decay\": decay_dtype, 'momentum': momentum_dtype, \"epsilon\": epsilon_dtype}\n validator.check_type_same(args_decay, valid_types, self.name)\n args_lr = {\"learning_rate\": learning_rate_dtype, \"decay\": decay_dtype}\n validator.check_scalar_or_tensor_type_same(args_lr, valid_types, self.name, allow_mix=True)\n if not self.is_ge and self.is_d:\n return var_dtype, var_dtype, var_dtype\n return var_dtype\n\n def infer_value(self, var, mean_square, moment, learning_rate, grad, decay, momentum, epsilon):\n if decay is None or momentum is None or epsilon is None:\n raise ValueError(f\"For {self.name}, decay, momentum, epsilon must be const.\")\n\n\nclass ApplyCenteredRMSProp(PrimitiveWithInfer):\n \"\"\"\n Optimizer that implements the centered RMSProp algorithm.\n Please refer to the usage in source code of `nn.RMSProp`.\n\n Note:\n Update `var` according to the centered RMSProp algorithm.\n\n .. math::\n g_{t} = \\\\rho g_{t-1} + (1 - \\\\rho)\\\\nabla Q_{i}(w)\n\n .. math::\n s_{t} = \\\\rho s_{t-1} + (1 - \\\\rho)(\\\\nabla Q_{i}(w))^2\n\n .. math::\n m_{t} = \\\\beta m_{t-1} + \\\\frac{\\\\eta} {\\\\sqrt{s_{t} - g_{t}^2 + \\\\epsilon}} \\\\nabla Q_{i}(w)\n\n .. math::\n w = w - m_{t}\n\n where :math:`w` represents `var`, which will be updated.\n :math:`g_{t}` represents `mean_gradient`, :math:`g_{t-1}` is the last momentent of :math:`g_{t}`.\n :math:`s_{t}` represents `mean_square`, :math:`s_{t-1}` is the last momentent of :math:`s_{t}`,\n :math:`m_{t}` represents `moment`, :math:`m_{t-1}` is the last momentent of :math:`m_{t}`.\n :math:`\\\\rho` represents `decay`. :math:`\\\\beta` is the momentum term, represents `momentum`.\n :math:`\\\\epsilon` is a smoothing term to avoid division by zero, represents `epsilon`.\n :math:`\\\\eta` represents `learning_rate`. :math:`\\\\nabla Q_{i}(w)` represents `grad`.\n\n Args:\n use_locking (bool): Enable a lock to protect the update of variable tensors. Default: False.\n\n Inputs:\n - **var** (Tensor) - Weights to be update.\n - **mean_gradient** (Tensor) - Mean gradients, must have the same type as `var`.\n - **mean_square** (Tensor) - Mean square gradients, must have the same type as `var`.\n - **moment** (Tensor) - Delta of `var`, must have the same type as `var`.\n - **grad** (Tensor) - Gradients, must have the same type as `var`.\n - **learning_rate** (Union[Number, Tensor]) - Learning rate. Should be a float number or\n a scalar tensor with float16 or float32 data type.\n - **decay** (float) - Decay rate.\n - **momentum** (float) - Momentum.\n - **epsilon** (float) - Ridge term.\n\n Outputs:\n Tensor, parameters to be update.\n\n Examples:\n >>> centered_rms_prop = P.ApplyCenteredRMSProp()\n >>> input_x = Tensor(np.arange(-6, 6).astype(np.float32).reshape(2, 3, 2), mindspore.float32)\n >>> mean_grad = Tensor(np.arange(12).astype(np.float32).reshape(2, 3, 2), mindspore.float32)\n >>> mean_square = Tensor(np.arange(-8, 4).astype(np.float32).reshape(2, 3, 2), mindspore.float32)\n >>> moment = Tensor(np.arange(12).astype(np.float32).reshape(2, 3, 2), mindspore.float32)\n >>> grad = Tensor(np.arange(12).astype(np.float32).reshape(2, 3, 2), mindspore.float32)\n >>> learning_rate = Tensor(0.9, mindspore.float32)\n >>> decay = 0.0\n >>> momentum = 1e-10\n >>> epsilon = 0.05\n >>> result = centered_rms_prop(input_x, mean_grad, mean_square, moment, grad,\n >>> learning_rate, decay, momentum, epsilon)\n [[[ -6. -9.024922]\n [-12.049845 -15.074766]\n [-18.09969 -21.124613]]\n [[-24.149532 -27.174456]\n [-30.199379 -33.2243 ]\n [-36.249226 -39.274143]]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self, use_locking=False):\n self.use_locking = validator.check_value_type(\"use_locking\", use_locking, [bool], self.name)\n self.is_ascend = context.get_context(\"device_target\") == \"Ascend\"\n\n def infer_shape(self, var_shape, mean_gradient_shape, mean_square_shape, moment_shape, grad_shape,\n learning_rate_shape, decay_shape, momentum_shape, epsilon_shape):\n validator.check(\"var_shape\", var_shape, \"mean_gradient_shape\", mean_gradient_shape, Rel.EQ, self.name)\n validator.check(\"var_shape\", var_shape, \"mean_square_shape\", mean_square_shape, Rel.EQ, self.name)\n validator.check(\"var_shape\", var_shape, \"moment_shape\", moment_shape, Rel.EQ, self.name)\n validator.check(\"var_shape\", var_shape, \"grad_shape\", grad_shape, Rel.EQ, self.name)\n if self.is_ascend:\n return var_shape, mean_gradient_shape, mean_square_shape, moment_shape\n return var_shape\n\n def infer_dtype(self, var_dtype, mean_gradient_dtype, mean_square_dtype, moment_dtype, grad_dtype,\n learning_rate_dtype, rho_dtype, momentum_dtype, epsilon_dtype):\n args = {\"var\": var_dtype, \"mean_gradient\": mean_gradient_dtype,\n \"mean_square\": mean_square_dtype, \"moment\": moment_dtype, \"grad\": grad_dtype}\n validator.check_tensor_type_same(args, mstype.number_type, self.name)\n\n valid_types = [mstype.float16, mstype.float32]\n args_rho = {\"rho\": rho_dtype, 'momentum': momentum_dtype, \"epsilon\": epsilon_dtype}\n validator.check_type_same(args_rho, valid_types, self.name)\n args_lr = {\"learning_rate\": learning_rate_dtype, \"rho\": rho_dtype}\n validator.check_scalar_or_tensor_type_same(args_lr, valid_types, self.name, allow_mix=True)\n if self.is_ascend:\n return var_dtype, mean_gradient_dtype, mean_square_dtype, moment_dtype\n return var_dtype\n\n\nclass LayerNorm(Primitive):\n r\"\"\"\n Applies the Layer Normalization to the input tensor.\n\n This operator will normalize the input tensor on given axis. LayerNorm is described in the paper\n `Layer Normalization <https://arxiv.org/abs/1607.06450>`_.\n\n .. math::\n y = \\frac{x - mean}{\\sqrt{variance + \\epsilon}} * \\gamma + \\beta\n\n where :math:`\\gamma` is scale, :math:`\\beta` is bias, :math:`\\epsilon` is epsilon.\n\n Args:\n begin_norm_axis (int): The begin axis of the `input_x` to apply LayerNorm,\n the value should be in [-1, rank(input)). Default: 1.\n begin_params_axis (int): The begin axis of the parameter input (`gamma`, `beta`) to\n apply LayerNorm, the value should be in [-1, rank(input)). Default: 1.\n epsilon (float): A value added to the denominator for numerical stability. Default: 1e-7.\n\n Inputs:\n - **input_x** (Tensor) - Tensor of shape :math:`(N, \\ldots)`.\n The input of LayerNorm.\n - **gamma** (Tensor) - Tensor of shape :math:`(P_0, \\ldots, P_\\text{begin_params_axis})`.\n The learnable parameter `gamma` as the scale on norm.\n - **beta** (Tensor) - Tensor of shape :math:`(P_0, \\ldots, P_\\text{begin_params_axis})`.\n The learnable parameter `beta` as the scale on norm.\n\n Outputs:\n tuple[Tensor], tuple of 3 tensors, the normalized input and the updated parameters.\n\n - **output_x** (Tensor) - The normalized input, has the same type and shape as the `input_x`.\n The shape is :math:`(N, C)`.\n - **mean** (Tensor) - Tensor of shape :math:`(C,)`.\n - **variance** (Tensor) - Tensor of shape :math:`(C,)`.\n\n Examples:\n >>> input_x = Tensor(np.array([[1, 2, 3], [1, 2, 3]]), mindspore.float32)\n >>> gamma = Tensor(np.ones([3]), mindspore.float32)\n >>> beta = Tensor(np.ones([3]), mindspore.float32)\n >>> layer_norm = P.LayerNorm()\n >>> output = layer_norm(input_x, gamma, beta)\n ([[-0.22474492, 1., 2.2247488], [-0.22474492, 1., 2.2247488]],\n [[2.], [2.]], [[0.6666667], [0.6666667]])\n \"\"\"\n\n @prim_attr_register\n def __init__(self, begin_norm_axis=1, begin_params_axis=1, epsilon=1e-7):\n validator.check_value_type('begin_norm_axis', begin_norm_axis, [int], self.name)\n validator.check_value_type('begin_params_axis', begin_params_axis, [int], self.name)\n validator.check_value_type('epsilon', epsilon, [float], self.name)\n\n\nclass L2Normalize(PrimitiveWithInfer):\n r\"\"\"\n L2 normalization Operator.\n\n This operator will normalizes the input using the given axis. The function is shown as follows:\n\n .. math::\n \\text{output} = \\frac{x}{\\sqrt{\\text{max}(\\text{sum} (\\text{input_x}^2), \\epsilon)}},\n\n where :math:`\\epsilon` is epsilon.\n\n Args:\n axis (int): The begin axis for the input to apply L2 normalize. Default: 0.\n epsilon (float): A small value added for numerical stability. Default: 1e-4.\n\n Inputs:\n - **input_x** (Tensor) - Input to compute the normalization. Data type should be float16 or float32.\n\n Outputs:\n Tensor, with the same type and shape as the input.\n\n Examples:\n >>> l2_normalize = P.L2Normalize()\n >>> input_x = Tensor(np.random.randint(-256, 256, (2, 3, 4)), mindspore.float32)\n >>> result = l2_normalize(input_x)\n [[[-0.47247353 -0.30934513 -0.4991462 0.8185567 ]\n [-0.08070751 -0.9961299 -0.5741758 0.09262337]\n [-0.9916556 -0.3049123 0.5730487 -0.40579924]\n [[-0.88134485 0.9509498 -0.86651784 0.57442576]\n [ 0.99673784 0.08789381 -0.8187321 0.9957012 ]\n [ 0.12891524 -0.9523804 -0.81952125 0.91396334]]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self, axis=0, epsilon=1e-4):\n validator.check_value_type('axis', axis, [int], self.name)\n validator.check_value_type('epsilon', epsilon, [int, float], self.name)\n\n def infer_shape(self, input_x):\n dim = len(input_x)\n validator.check_int_range('axis value', self.axis, -dim, dim, Rel.INC_LEFT, self.name)\n return input_x\n\n def infer_dtype(self, input_x):\n validator.check_subclass(\"x\", input_x, mstype.tensor, self.name)\n validator.check_tensor_type_same({\"input_x\": input_x}, [mstype.float16, mstype.float32], self.name)\n return input_x\n\n\nclass DropoutGenMask(Primitive):\n \"\"\"\n Generates the mask value for the input shape.\n\n Args:\n Seed0 (int): Seed0 value for random generating. Default: 0.\n Seed1 (int): Seed1 value for random generating. Default: 0.\n\n Inputs:\n - **shape** (tuple[int]) - The shape of target mask.\n - **keep_prob** (Tensor) - The keep rate, between 0 and 1, e.g. keep_prob = 0.9,\n means dropping out 10% of input units.\n\n Outputs:\n Tensor, the value of generated mask for input shape.\n\n Examples:\n >>> dropout_gen_mask = P.DropoutGenMask()\n >>> shape = (20, 16, 50)\n >>> keep_prob = Tensor(0.5, mindspore.float32)\n >>> mask = dropout_gen_mask(shape, keep_prob)\n \"\"\"\n\n @prim_attr_register\n def __init__(self, Seed0=0, Seed1=0):\n self.init_prim_io_names(inputs=['shape', 'keep_prob'], outputs=['output'])\n validator.check_value_type(\"Seed0\", Seed0, [int], self.name)\n validator.check_value_type(\"Seed1\", Seed1, [int], self.name)\n self.add_prim_attr(\"_random_effect\", True)\n\n\nclass DropoutDoMask(PrimitiveWithInfer):\n \"\"\"\n Applies dropout mask on the input tensor.\n\n Take the mask output of DropoutGenMask as input, and apply dropout on the input.\n\n Inputs:\n - **input_x** (Tensor) - The input tensor.\n - **mask** (Tensor) - The mask to be applied on `input_x`, which is the output of `DropoutGenMask`. And the\n shape of `input_x` must be the same as the value of `DropoutGenMask`'s input `shape`. If input wrong `mask`,\n the output of `DropoutDoMask` are unpredictable.\n - **keep_prob** (Tensor) - The keep rate, between 0 and 1, e.g. keep_prob = 0.9,\n means dropping out 10% of input units. The value of `keep_prob` is the same as the input `keep_prob` of\n `DropoutGenMask`.\n\n Outputs:\n Tensor, the value that applied dropout on.\n\n Examples:\n >>> x = Tensor(np.ones([20, 16, 50]), mindspore.float32)\n >>> shape = (20, 16, 50)\n >>> keep_prob = Tensor(0.5, mindspore.float32)\n >>> dropout_gen_mask = P.DropoutGenMask()\n >>> dropout_do_mask = P.DropoutDoMask()\n >>> mask = dropout_gen_mask(shape, keep_prob)\n >>> output = dropout_do_mask(x, mask, keep_prob)\n >>> assert output.shape == (20, 16, 50)\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n pass\n\n def __infer__(self, input_x, mask, keep_prob):\n input_x_shape = input_x['shape']\n mask_shape = mask['shape']\n keep_prob_shape = keep_prob['shape']\n validator.check(\"keep_prob's dim\", len(keep_prob_shape), '0(scalar)', 0, Rel.EQ, self.name)\n size_x = reduce(lambda x, y: x * y, input_x_shape)\n if len(mask_shape) != 1:\n raise ValueError(\"DropoutDoMask mask shape should be 1-dimension.\")\n size_y = mask_shape[0] * 8\n if size_x > size_y:\n raise ValueError(f\"DropoutDoMask y mask do not math input input_x shape:\"\n \"{input_x_shape}, mask shape: {mask_shape}.\")\n\n validator.check_tensor_type_same({\"input_x\": input_x['dtype']}, [mstype.float32, mstype.float16, mstype.int32],\n self.name)\n validator.check_tensor_type_same({\"input_mask\": mask['dtype']}, [mstype.uint8], self.name)\n\n keep_prob_v = keep_prob['value']\n if keep_prob_v is not None:\n validator.check_number_range('keep_prob', keep_prob_v.asnumpy(), 0, 1, Rel.INC_BOTH, self.name)\n\n out = {'shape': input_x_shape,\n 'dtype': input_x['dtype'],\n 'value': None}\n return out\n\n\nclass ResizeBilinear(PrimitiveWithInfer):\n r\"\"\"\n Resizes the image to certain size using bilinear interpolation.\n\n The resizing only affects the lower two dimensions which represent the height and width. The input images\n can be represented by different data types, but the data types of output images are always float32.\n\n Args:\n size (tuple[int]): A tuple of 2 int elements `(new_height, new_width)`, the new size for the images.\n align_corners (bool): If it's true, rescale input by `(new_height - 1) / (height - 1)`,\n which exactly aligns the 4 corners of images and resized images. If it's false,\n rescale by `new_height / height`. Default: False.\n\n Inputs:\n - **input** (Tensor) - Image to be resized. Tensor of shape `(N_i, ..., N_n, height, width)`,\n with data type of float32 or float16.\n\n Outputs:\n Tensor, resized image. Tensor of shape `(N_i, ..., N_n, new_height, new_width)` in `float32`.\n\n Examples:\n >>> tensor = Tensor([[[[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]]], mindspore.float32)\n >>> resize_bilinear = P.ResizeBilinear((5, 5))\n >>> result = resize_bilinear(tensor)\n >>> assert result.shape == (1, 1, 5, 5)\n \"\"\"\n\n @prim_attr_register\n def __init__(self, size, align_corners=False):\n pass\n\n def infer_shape(self, input_shape):\n input_shape = list(input_shape)\n batch, channel, _, _ = input_shape\n out_shape = [batch, channel]\n for i in self.size:\n out_shape.append(int(i))\n return out_shape\n\n def infer_dtype(self, input_dtype):\n validator.check_tensor_type_same({'input_dtype': input_dtype}, [mstype.float16, mstype.float32], self.name)\n return mstype.tensor_type(mstype.float32)\n\n\nclass OneHot(PrimitiveWithInfer):\n r\"\"\"\n Computes a one-hot tensor.\n\n Makes a new tensor, whose locations represented by indices in `indices` take value `on_value`, while all\n other locations take value `off_value`.\n\n Note:\n If the input indices is rank `N`, the output will have rank `N+1`. The new axis is created at dimension `axis`.\n\n Args:\n axis (int): Position to insert the value. e.g. If `indices` shape is [n, c], and `axis` is `-1` the output shape\n will be [n, c, depth], If `axis` is `0` the output shape will be [depth, n, c]. Default: -1.\n\n Inputs:\n - **indices** (Tensor) - A tensor of indices. Tensor of shape :math:`(X_0, \\ldots, X_n)`.\n Data type must be int32.\n - **depth** (int) - A scalar defining the depth of the one hot dimension.\n - **on_value** (Tensor) - A value to fill in output when `indices[j] = i`. With data type of float16 or float32.\n - **off_value** (Tensor) - A value to fill in output when `indices[j] != i`.\n Has the same data type with as `on_value`.\n\n Outputs:\n Tensor, one_hot tensor. Tensor of shape :math:`(X_0, \\ldots, X_{axis}, \\text{depth} ,X_{axis+1}, \\ldots, X_n)`.\n\n Examples:\n >>> indices = Tensor(np.array([0, 1, 2]), mindspore.int32)\n >>> depth, on_value, off_value = 3, Tensor(1.0, mindspore.float32), Tensor(0.0, mindspore.float32)\n >>> onehot = P.OneHot()\n >>> result = onehot(indices, depth, on_value, off_value)\n [[1, 0, 0], [0, 1, 0], [0, 0, 1]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self, axis=-1):\n self.init_prim_io_names(inputs=['indices', 'depth', 'on_value', 'off_value'], outputs=['output'])\n validator.check_value_type(\"axis\", axis, [int], self.name)\n\n def __infer__(self, indices, depth, on_value, off_value):\n # check type\n validator.check_tensor_type_same({\"indices\": indices['dtype']}, (mstype.int32,), self.name)\n validator.check_type_name(\"depth\", depth['dtype'], mstype.int_type, self.name)\n args = {\"on_value\": on_value['dtype'], \"off_value\": off_value['dtype']}\n validator.check_tensor_type_same(args, (mstype.float16, mstype.float32), self.name)\n\n # check shape\n indices_shp = indices['shape']\n validator.check_int_range(\"axis\", self.axis, -1, len(indices_shp), Rel.INC_BOTH, self.name)\n depth_val = depth['value']\n validator.check_integer(\"depth\", depth_val, 0, Rel.GE, self.name)\n # create new dimension at end if self.axis is -1\n _ = indices_shp.insert(self.axis, depth_val) if self.axis >= 0 else indices_shp.append(depth_val)\n\n return {'shape': indices_shp,\n 'dtype': on_value['dtype'],\n 'value': None}\n\n\nclass Gelu(PrimitiveWithInfer):\n r\"\"\"\n Gaussian Error Linear Units activation function.\n\n GeLU is described in the paper `Gaussian Error Linear Units (GELUs) <https://arxiv.org/abs/1606.08415>`_.\n And also please refer to `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding\n <https://arxiv.org/abs/1810.04805>`_.\n\n Gelu is defined as follows:\n\n .. math::\n \\text{output} = 0.5 * x * (1 + erf(x / \\sqrt{2})),\n\n where :math:`erf` is the \"Gauss error function\" .\n\n Inputs:\n - **input_x** (Tensor) - Input to compute the Gelu with data type of float16 or float32.\n\n Outputs:\n Tensor, with the same type and shape as input.\n\n Examples:\n >>> tensor = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)\n >>> gelu = P.Gelu()\n >>> result = gelu(tensor)\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"init GeLU\"\"\"\n self.init_prim_io_names(inputs=['x'], outputs=['output'])\n\n def infer_shape(self, input_x):\n return input_x\n\n def infer_dtype(self, input_x):\n validator.check_tensor_type_same({\"input_x\": input_x}, (mstype.float16, mstype.float32), self.name)\n return input_x\n\n\nclass GetNext(PrimitiveWithInfer):\n \"\"\"\n Returns the next element in the dataset queue.\n\n Note:\n The GetNext operation needs to be associated with network and it also depends on the init_dataset interface,\n it can't be used directly as a single operation.\n For details, please refer to `nn.DataWrapper` source code.\n\n Args:\n types (list[:class:`mindspore.dtype`]): The type of the outputs.\n shapes (list[tuple[int]]): The dimensionality of the outputs.\n output_num (int): The output number, length of `types` and `shapes`.\n shared_name (str): The queue name of `init_dataset` interface.\n\n Inputs:\n No inputs.\n\n Outputs:\n tuple[Tensor], the output of Dataset. The shape is described in `shapes`\n and the type is described is `types`.\n\n Examples:\n >>> get_next = P.GetNext([mindspore.float32, mindspore.int32], [[32, 1, 28, 28], [10]], 2, 'shared_name')\n >>> feature, label = get_next()\n \"\"\"\n\n @prim_attr_register\n def __init__(self, types, shapes, output_num, shared_name):\n validator.check_value_type(\"types\", types, [list, tuple], self.name)\n validator.check_value_type(\"shapes\", shapes, [list, tuple], self.name)\n validator.check(\"types length\", len(types), \"shapes length\", len(shapes), Rel.EQ, self.name)\n validator.check_value_type(\"output_num\", output_num, [int], self.name)\n\n def infer_shape(self):\n return tuple(self.shapes)\n\n def infer_dtype(self):\n return tuple(self.types)\n\n\nclass PReLU(PrimitiveWithInfer):\n r\"\"\"\n Parametric Rectified Linear Unit activation function.\n\n PReLU is described in the paper `Delving Deep into Rectifiers: Surpassing Human-Level Performance on\n ImageNet Classification <https://arxiv.org/abs/1502.01852>`_. Defined as follows:\n\n .. math::\n prelu(x_i)= \\max(0, x_i) + \\min(0, w * x_i),\n\n where :math:`x_i` is an element of an channel of the input.\n\n Note:\n 1-dimensional input_x is not supported.\n\n Inputs:\n - **input_x** (Tensor) - Float tensor, representing the output of the preview layer.\n With data type of float16 or float32.\n - **weight** (Tensor) - Float Tensor, w > 0, there is only two shapes are legitimate,\n 1 or the number of channels at input. With data type of float16 or float32.\n\n Outputs:\n Tensor, with the same type as `input_x`.\n\n Detailed information, please refer to `nn.PReLU`.\n\n Examples:\n >>> import mindspore\n >>> import mindspore.nn as nn\n >>> import numpy as np\n >>> from mindspore import Tensor\n >>> from mindspore.ops import operations as P\n >>> class Net(nn.Cell):\n >>> def __init__(self):\n >>> super(Net, self).__init__()\n >>> self.prelu = P.PReLU()\n >>> def construct(self, input_x, weight):\n >>> result = self.prelu(input_x, weight)\n >>> return result\n >>>\n >>> input_x = Tensor(np.random.randint(-3, 3, (2, 3, 2)), mindspore.float32)\n >>> weight = Tensor(np.array([0.1, 0.6, -0.3]), mindspore.float32)\n >>> net = Net()\n >>> result = net(input_x, weight)\n [[[-0.1 1. ]\n [ 0. 2. ]\n [0. 0. ]]\n\n [[-0.2 -0.1 ]\n [2. -1.8000001]\n [0.6 0.6 ]]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n pass\n\n def infer_shape(self, input_x_shape, weight_shape):\n input_x_dim = len(input_x_shape)\n weight_dim = len(weight_shape)\n\n if input_x_dim == 1:\n raise ValueError(f'For \\'{self.name}\\' input_x rank 1 is not supported.')\n\n if weight_dim != 1:\n raise ValueError(f'For \\'{self.name}\\' weight_dim must be 1, while weight_dim is {weight_dim}.')\n\n if weight_shape[0] != input_x_shape[1] and weight_shape[0] != 1:\n raise ValueError(f'For \\'{self.name}\\' channel of input_x and weight must be matched,'\n f' while channel of input_x is {input_x_shape[1]},'\n f' weight_shape[0] is {weight_shape[0]}.')\n\n return input_x_shape\n\n def infer_dtype(self, input_x_dtype, weight_dtype):\n valid_types = (mstype.float16, mstype.float32)\n validator.check_tensor_type_same({\"input_x\": input_x_dtype}, valid_types, self.name)\n validator.check_tensor_type_same({\"weight\": weight_dtype}, valid_types, self.name)\n return input_x_dtype\n\n\nclass LSTM(PrimitiveWithInfer):\n \"\"\"\n Performs the long short term memory(LSTM) on the input.\n\n Detailed information, please refer to `nn.LSTM`.\n \"\"\"\n\n @prim_attr_register\n def __init__(self, input_size, hidden_size, num_layers, has_bias, bidirectional, dropout):\n self.input_size = validator.check_integer(\"input_size\", input_size, 0, Rel.GT, self.name)\n self.hidden_size = validator.check_integer(\"hidden_size\", hidden_size, 0, Rel.GT, self.name)\n self.num_layers = validator.check_integer(\"num_layers\", num_layers, 0, Rel.GT, self.name)\n self.has_bias = validator.check_value_type(\"has_bias\", has_bias, (bool,), self.name)\n self.bidirectional = validator.check_value_type(\"bidirectional\", bidirectional, (bool,), self.name)\n self.dropout = validator.check_value_type(\"dropout\", dropout, [float], self.name)\n self.dropout = validator.check_number_range('dropout', dropout, 0, 1, Rel.INC_BOTH, self.name)\n\n if bidirectional:\n self.num_directions = 2\n else:\n self.num_directions = 1\n\n def infer_shape(self, x_shape, h_shape, c_shape, w_shape):\n # (seq, batch_size, feature)\n validator.check_integer(\"x rank\", len(x_shape), 3, Rel.EQ, self.name)\n validator.check_integer(\"x[2]\", x_shape[2], self.input_size, Rel.EQ, self.name)\n\n # h and c should be same shape\n validator.check_integer(\"h rank\", len(h_shape), 3, Rel.EQ, self.name)\n validator.check(\"h_shape\", h_shape, \"c_shape\", c_shape, Rel.EQ, self.name)\n\n # (num_layers * num_directions, batch, hidden_size)\n validator.check_integer(\"h[0]\", h_shape[0], self.num_layers * self.num_directions, Rel.EQ, self.name)\n validator.check_integer(\"h[1]\", h_shape[1], x_shape[1], Rel.EQ, self.name)\n validator.check_integer(\"h[2]\", h_shape[2], self.hidden_size, Rel.EQ, self.name)\n\n y_shape = (x_shape[0], x_shape[1], self.hidden_size * self.num_directions)\n\n # set arbitrary shape for reserved space\n type_size = 4\n gates_ws_ld = self.get_good_ld(self.hidden_size * 4, type_size)\n states_ws_ld = self.get_good_ld(max(self.hidden_size, self.input_size), type_size)\n self.ws_gates_size = self.num_layers * self.num_directions * x_shape[0] * x_shape[1] * gates_ws_ld * type_size\n self.ws_states_size = (self.num_layers + 1) * self.num_directions * (x_shape[0] + 1) * x_shape[\n 1] * states_ws_ld * type_size\n self.ws_c_states_size = (self.num_layers + 1) * self.num_directions * (x_shape[0] + 1) * x_shape[\n 1] * states_ws_ld * type_size\n self.ws_diff_states_size = (self.num_layers + 1) * self.num_directions * (x_shape[0] + 1) * (2 + 1) * x_shape[\n 1] * states_ws_ld * type_size\n self.ws_grid_comp_size = 0\n self.page_size = 4096\n current_offset = 0\n current_offset += self.ws_gates_size\n current_offset = self.rnd_up(current_offset, self.page_size)\n current_offset += self.ws_states_size\n current_offset = self.rnd_up(current_offset, self.page_size)\n current_offset += self.ws_c_states_size\n current_offset = self.rnd_up(current_offset, self.page_size)\n current_offset += self.ws_diff_states_size\n current_offset = self.rnd_up(current_offset, self.page_size)\n current_offset += self.ws_grid_comp_size\n reserved_shape = (current_offset, 1)\n state_shape = (1, 1)\n return (y_shape, h_shape, c_shape, reserved_shape, state_shape)\n\n def infer_dtype(self, x_dtype, h_dtype, c_dtype, w_dtype):\n args = {'x': x_dtype, 'h': h_dtype, 'c': c_dtype, 'w': w_dtype}\n validator.check_tensor_type_same(args, (mstype.float32, mstype.float16), self.name)\n return (x_dtype, x_dtype, x_dtype, x_dtype, x_dtype)\n\n def rnd_up(self, current_offset, page_size):\n return ((current_offset + page_size - 1) // page_size) * page_size\n\n def get_good_ld(self, dim, type_size):\n ld = self.rnd_up(dim, 64 // type_size)\n if ld * 256 == 0:\n return ld + 64 // type_size\n return ld\n\n\nclass SigmoidCrossEntropyWithLogits(PrimitiveWithInfer):\n r\"\"\"\n Uses the given logits to compute sigmoid cross entropy.\n\n Note:\n Sets input logits as `X`, input label as `Y`, output as `loss`. Then,\n\n .. math::\n p_{ij} = sigmoid(X_{ij}) = \\frac{1}{1 + e^{-X_{ij}}}\n\n .. math::\n loss_{ij} = -[Y_{ij} * ln(p_{ij}) + (1 - Y_{ij})ln(1 - p_{ij})]\n\n Inputs:\n - **logits** (Tensor) - Input logits.\n - **label** (Tensor) - Ground truth label.\n\n Outputs:\n Tensor, with the same shape and type as input `logits`.\n\n Examples:\n >>> logits = Tensor(np.random.randn(2, 3).astype(np.float16))\n >>> labels = Tensor(np.random.randn(2, 3).astype(np.float16))\n >>> sigmoid = P.SigmoidCrossEntropyWithLogits()\n >>> sigmoid(logits, labels)\n \"\"\"\n\n @prim_attr_register\n def __init__(self):\n \"\"\"Init SigmoidCrossEntropyWithLogits\"\"\"\n self.init_prim_io_names(inputs=['predict', 'target'], outputs=['loss'])\n\n def infer_shape(self, x_shape, y_shape):\n validator.check(\"x_shape\", x_shape, \"y_shape\", y_shape, Rel.EQ, self.name)\n return x_shape\n\n def infer_dtype(self, x_dtype, y_dtype):\n args = {\"x_dtype\": x_dtype, \"y_dtype\": y_dtype}\n validator.check_tensor_type_same(args, mstype.number_type, self.name)\n return x_dtype\n\n\nclass Pad(PrimitiveWithInfer):\n \"\"\"\n Pads input tensor according to the paddings.\n\n Args:\n paddings (tuple): The shape of parameter `paddings` is (N, 2). N is the rank of input data. All elements of\n paddings are int type. For the input in `D` th dimension, paddings[D, 0] indicates how many sizes to be\n extended ahead of the input tensor in the `D` th dimension, and paddings[D, 1] indicates how many sizes to\n be extended behind of the input tensor in the `D` th dimension.\n\n Inputs:\n - **input_x** (Tensor) - The input tensor.\n\n Outputs:\n Tensor, the tensor after padding.\n\n Examples:\n >>> input_tensor = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)\n >>> pad_op = P.Pad(((1, 2), (2, 1)))\n >>> output_tensor = pad_op(input_tensor)\n >>> assert output_tensor == Tensor(np.array([[ 0. , 0. , 0. , 0. , 0. , 0. ],\n >>> [ 0. , 0. , -0.1, 0.3, 3.6, 0. ],\n >>> [ 0. , 0. , 0.4, 0.5, -3.2, 0. ],\n >>> [ 0. , 0. , 0. , 0. , 0. , 0. ],\n >>> [ 0. , 0. , 0. , 0. , 0. , 0. ]]), mindspore.float32)\n \"\"\"\n\n @prim_attr_register\n def __init__(self, paddings):\n \"\"\"Init Pad\"\"\"\n self.init_prim_io_names(inputs=['x'], outputs=['y'])\n if not isinstance(paddings, tuple):\n raise TypeError('Paddings must be tuple type.')\n for item in paddings:\n if len(item) != 2:\n raise ValueError('The shape of paddings must be (n, 2).')\n self.paddings = paddings\n\n def infer_shape(self, x):\n paddings = np.array(self.paddings)\n validator.check_integer('paddings.shape', paddings.size, len(x) * 2, Rel.EQ, self.name)\n if not np.all(paddings >= 0):\n raise ValueError('All elements of paddings must be >= 0.')\n y_shape = ()\n for i in range(int(paddings.size / 2)):\n y_shape += ((x[i] + paddings[i, 0] + paddings[i, 1]),)\n return y_shape\n\n def infer_dtype(self, x):\n validator.check_subclass(\"input_x\", x, mstype.tensor, self.name)\n return x\n\n\nclass MirrorPad(PrimitiveWithInfer):\n \"\"\"\n Pads the input tensor according to the paddings and mode.\n\n Args:\n mode (str): Specifies padding mode. The optional values are \"REFLECT\", \"SYMMETRIC\".\n Default: \"REFLECT\".\n\n Inputs:\n - **input_x** (Tensor) - The input tensor.\n - **paddings** (Tensor) - The paddings tensor. The value of `paddings` is a matrix(list),\n and its shape is (N, 2). N is the rank of input data. All elements of paddings\n are int type. For the input in `D` th dimension, paddings[D, 0] indicates how many sizes to be\n extended ahead of the input tensor in the `D` th dimension, and paddings[D, 1] indicates how many sizes to\n be extended behind of the input tensor in the `D` th dimension.\n\n Outputs:\n Tensor, the tensor after padding.\n\n - If `mode` is \"REFLECT\", it uses a way of symmetrical copying throught the axis of symmetry to fill in.\n If the `input_x` is [[1,2,3],[4,5,6],[7,8,9]] and `paddings` is [[1,1],[2,2]], then the\n Outputs is [[6,5,4,5,6,5,4],[3,2,1,2,3,2,1],[6,5,4,5,6,5,4],[9,8,7,8,9,8,7],[6,5,4,5,6,5,4]].\n - If `mode` is \"SYMMETRIC\", the filling method is similar to the \"REFLECT\". It is also copied\n according to the symmetry axis, except that it includes the symmetry axis. If the `input_x`\n is [[1,2,3],[4,5,6],[7,8,9]] and `paddings` is [[1,1],[2,2]], then the Outputs is\n [[2,1,1,2,3,3,2],[2,1,1,2,3,3,2],[5,4,4,5,6,6,5],[8,7,7,8,9,9,8],[8,7,7,8,9,9,8]].\n\n Examples:\n >>> from mindspore import Tensor\n >>> from mindspore.ops import operations as P\n >>> import mindspore.nn as nn\n >>> import numpy as np\n >>> class Net(nn.Cell):\n >>> def __init__(self):\n >>> super(Net, self).__init__()\n >>> self.pad = P.MirrorPad(mode=\"REFLECT\")\n >>> def construct(self, x, paddings):\n >>> return self.pad(x, paddings)\n >>> x = np.random.random(size=(2, 3)).astype(np.float32)\n >>> paddings = Tensor([[1,1],[2,2]])\n >>> pad = Net()\n >>> ms_output = pad(Tensor(x), paddings)\n \"\"\"\n\n @prim_attr_register\n def __init__(self, mode='REFLECT'):\n \"\"\"Init Pad\"\"\"\n validator.check_string('mode', mode, ['REFLECT', 'SYMMETRIC'], self.name)\n self.mode = mode\n self.set_const_input_indexes([1])\n\n def __infer__(self, input_x, paddings):\n validator.check_subclass(\"input_x\", input_x['dtype'], mstype.tensor, self.name)\n validator.check_subclass(\"paddings\", paddings['dtype'], mstype.tensor, self.name)\n x_shape = list(input_x['shape'])\n paddings_value = paddings['value'].asnumpy()\n paddings_size = paddings_value.size\n validator.check_integer('paddings.shape', paddings_size, len(x_shape) * 2, Rel.EQ, self.name)\n if not np.all(paddings_value >= 0):\n raise ValueError('All elements of paddings must be >= 0.')\n adjust = 0\n if self.mode == 'SYMMETRIC':\n adjust = 1\n for i in range(0, int(paddings_size / 2)):\n if (paddings_value[i, 0] >= x_shape[i] + adjust) or (paddings_value[i, 1] >= x_shape[i] + adjust):\n raise ValueError('At least one dim has too high a padding value for this input and mode')\n y_shape = ()\n for i in range(0, int(paddings_size / 2)):\n y_shape += ((x_shape[i] + paddings_value[i, 0] + paddings_value[i, 1]),)\n return {'shape': y_shape,\n 'dtype': input_x['dtype'],\n 'value': None}\n\n\nclass ROIAlign(PrimitiveWithInfer):\n \"\"\"\n Computes Region of Interest (RoI) Align operator.\n\n The operator computes the value of each sampling point by bilinear interpolation from the nearby grid points on the\n feature map. No quantization is performed on any coordinates involved in the RoI, its bins, or the sampling\n points. The details of (RoI) Align operator are described in `Mask R-CNN <https://arxiv.org/abs/1703.06870>`_.\n\n Args:\n pooled_height (int): The output features' height.\n pooled_width (int): The output features' width.\n spatial_scale (float): A scaling factor that maps the raw image coordinates to the input\n feature map coordinates. Suppose the height of a RoI is `ori_h` in the raw image and `fea_h` in the\n input feature map, the `spatial_scale` should be `fea_h / ori_h`.\n sample_num (int): Number of sampling points. Default: 2.\n roi_end_mode (int): Number must be 0 or 1. Default: 1.\n\n Inputs:\n - **features** (Tensor) - The input features, whose shape should be `(N, C, H, W)`.\n - **rois** (Tensor) - The shape is `(rois_n, 5)`. With data type of float16 or float32.\n `rois_n` represents the number of RoI. The size of the second dimension should be `5` and the `5` colunms\n are `(image_index, top_left_x, top_left_y, bottom_right_x, bottom_right_y)`. `image_index` represents the\n index of image. `top_left_x` and `top_left_y` represent the `x, y` coordinates of the top left corner\n of corresponding RoI, respectively. `bottom_right_x` and `bottom_right_y` represent the `x, y`\n coordinates of the bottom right corner of corresponding RoI, respectively.\n\n Outputs:\n Tensor, the shape is `(rois_n, C, pooled_height, pooled_width)`.\n\n Examples:\n >>> input_tensor = Tensor(np.array([[[[1., 2.], [3., 4.]]]]), mindspore.float32)\n >>> rois = Tensor(np.array([[0, 0.2, 0.3, 0.2, 0.3]]), mindspore.float32)\n >>> roi_align = P.ROIAlign(2, 2, 0.5, 2)\n >>> output_tensor = roi_align(input_tensor, rois)\n >>> assert output_tensor == Tensor(np.array([[[[2.15]]]]), mindspore.float32)\n \"\"\"\n\n @prim_attr_register\n def __init__(self, pooled_height, pooled_width, spatial_scale, sample_num=2, roi_end_mode=1):\n \"\"\"init ROIAlign\"\"\"\n validator.check_value_type(\"pooled_height\", pooled_height, [int], self.name)\n validator.check_value_type(\"pooled_width\", pooled_width, [int], self.name)\n validator.check_value_type(\"spatial_scale\", spatial_scale, [float], self.name)\n validator.check_value_type(\"sample_num\", sample_num, [int], self.name)\n validator.check_value_type(\"roi_end_mode\", roi_end_mode, [int], self.name)\n validator.check_int_range(\"roi_end_mode\", roi_end_mode, 0, 1, Rel.INC_BOTH, self.name)\n self.pooled_height = pooled_height\n self.pooled_width = pooled_width\n self.spatial_scale = spatial_scale\n self.sample_num = sample_num\n self.roi_end_mode = roi_end_mode\n\n def infer_shape(self, inputs_shape, rois_shape):\n return [rois_shape[0], inputs_shape[1], self.pooled_height, self.pooled_width]\n\n def infer_dtype(self, inputs_type, rois_type):\n valid_types = (mstype.float16, mstype.float32)\n validator.check_tensor_type_same({\"inputs_type\": inputs_type}, valid_types, self.name)\n validator.check_tensor_type_same({\"rois_type\": rois_type}, valid_types, self.name)\n return inputs_type\n\n\nclass Adam(PrimitiveWithInfer):\n r\"\"\"\n Updates gradients by Adaptive Moment Estimation (Adam) algorithm.\n\n The Adam algorithm is proposed in `Adam: A Method for Stochastic Optimization <https://arxiv.org/abs/1412.6980>`_.\n\n The updating formulas are as follows,\n\n .. math::\n \\begin{array}{ll} \\\\\n m = \\beta_1 * m + (1 - \\beta_1) * g \\\\\n v = \\beta_2 * v + (1 - \\beta_2) * g * g \\\\\n l = \\alpha * \\frac{\\sqrt{1-\\beta_2^t}}{1-\\beta_1^t} \\\\\n w = w - l * \\frac{m}{\\sqrt{v} + \\epsilon}\n \\end{array}\n\n :math:`m` represents the 1st moment vector, :math:`v` represents the 2nd moment vector, :math:`g` represents\n `gradient`, :math:`l` represents scaling factor `lr`, :math:`\\beta_1, \\beta_2` represent `beta1` and `beta2`,\n :math:`t` represents updating step while :math:`beta_1^t` and :math:`beta_2^t` represent `beta1_power` and\n `beta2_power`, :math:`\\alpha` represents `learning_rate`, :math:`w` represents `var`, :math:`\\epsilon` represents\n `epsilon`.\n\n Args:\n use_locking (bool): Whether to enable a lock to protect variable tensors from being updated.\n If true, updates of the var, m, and v tensors will be protected by a lock.\n If false, the result is unpredictable. Default: False.\n use_nesterov (bool): Whether to use Nesterov Accelerated Gradient (NAG) algorithm to update the gradients.\n If true, update the gradients using NAG.\n If true, update the gradients without using NAG. Default: False.\n\n Inputs:\n - **var** (Tensor) - Weights to be updated.\n - **m** (Tensor) - The 1st moment vector in the updating formula, has the same type as `var`.\n - **v** (Tensor) - the 2nd moment vector in the updating formula.\n Mean square gradients with the same type as `var`.\n - **beta1_power** (float) - :math:`beta_1^t` in the updating formula.\n - **beta2_power** (float) - :math:`beta_2^t` in the updating formula.\n - **lr** (float) - :math:`l` in the updating formula.\n - **beta1** (float) - The exponential decay rate for the 1st moment estimations.\n - **beta2** (float) - The exponential decay rate for the 2nd moment estimations.\n - **epsilon** (float) - Term added to the denominator to improve numerical stability.\n - **gradient** (Tensor) - Gradients, has the same type as `var`.\n\n Outputs:\n Tuple of 3 Tensor, the updated parameters.\n\n - **var** (Tensor) - The same shape and data type as `var`.\n - **m** (Tensor) - The same shape and data type as `m`.\n - **v** (Tensor) - The same shape and data type as `v`.\n\n Examples:\n >>> import numpy as np\n >>> import mindspore.nn as nn\n >>> from mindspore import Tensor, Parameter\n >>> from mindspore.ops import operations as P\n >>> class Net(nn.Cell):\n >>> def __init__(self):\n >>> super(Net, self).__init__()\n >>> self.apply_adam = P.Adam()\n >>> self.var = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name=\"var\")\n >>> self.m = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name=\"m\")\n >>> self.v = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name=\"v\")\n >>> def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad):\n >>> out = self.apply_adam(self.var, self.m, self.v, beta1_power, beta2_power, lr, beta1, beta2,\n >>> epsilon, grad)\n >>> return out\n >>> net = Net()\n >>> gradient = Tensor(np.random.rand(3, 3, 3).astype(np.float32))\n >>> result = net(0.9, 0.999, 0.001, 0.9, 0.999, 1e-8, gradient)\n \"\"\"\n @prim_attr_register\n def __init__(self, use_locking=False, use_nesterov=False):\n validator.check_value_type(\"use_locking\", use_locking, [bool], self.name)\n validator.check_value_type(\"use_nesterov\", use_nesterov, [bool], self.name)\n\n def infer_shape(self, var_shape, m_shape, v_shape, beta1_power_shape, beta2_power_shape, lr_shape,\n beta1_shape, beta2_shape, epsilon_shape, grad_shape):\n validator.check(\"var_shape\", var_shape, \"m_shape\", m_shape, Rel.EQ, self.name)\n validator.check(\"var_shape\", var_shape, \"v_shape\", v_shape, Rel.EQ, self.name)\n validator.check(\"var_shape\", var_shape, \"grad_shape\", grad_shape, Rel.EQ, self.name)\n return var_shape, m_shape, v_shape\n\n def infer_dtype(self, var_dtype, m_dtype, v_dtype, beta1_power_dtype, beta2_power_dtype, lr_dtype,\n beta1_dtype, beta2_dtype, epsilon_dtype, grad_dtype):\n args = {\"var\": var_dtype, \"m\": m_dtype, \"v\": v_dtype, \"grad\": grad_dtype}\n validator.check_tensor_type_same(args, mstype.number_type, self.name)\n\n args = {\"beta1_power\": beta1_power_dtype, \"beta2_power\": beta2_power_dtype, 'lr': lr_dtype,\n \"beta1\": beta1_dtype, \"beta2\": beta2_dtype, \"epsilon\": epsilon_dtype}\n validator.check_scalar_or_tensor_type_same(args, [mstype.float16, mstype.float32], self.name, True)\n return var_dtype, m_dtype, v_dtype\n\n\nclass FusedSparseAdam(PrimitiveWithInfer):\n r\"\"\"\n Merge the duplicate value of the gradient and then update parameters by Adaptive Moment Estimation (Adam)\n algorithm. This operator is used when the gradient is sparse.\n\n The Adam algorithm is proposed in `Adam: A Method for Stochastic Optimization <https://arxiv.org/abs/1412.6980>`_.\n\n The updating formulas are as follows,\n\n .. math::\n \\begin{array}{ll} \\\\\n m = \\beta_1 * m + (1 - \\beta_1) * g \\\\\n v = \\beta_2 * v + (1 - \\beta_2) * g * g \\\\\n l = \\alpha * \\frac{\\sqrt{1-\\beta_2^t}}{1-\\beta_1^t} \\\\\n w = w - l * \\frac{m}{\\sqrt{v} + \\epsilon}\n \\end{array}\n\n :math:`m` represents the 1st moment vector, :math:`v` represents the 2nd moment vector, :math:`g` represents\n `gradient`, :math:`l` represents scaling factor `lr`, :math:`\\beta_1, \\beta_2` represent `beta1` and `beta2`,\n :math:`t` represents updating step while :math:`beta_1^t` and :math:`beta_2^t` represent `beta1_power` and\n `beta2_power`, :math:`\\alpha` represents `learning_rate`, :math:`w` represents `var`, :math:`\\epsilon` represents\n `epsilon`.\n\n All of inputs except `indices` comply with the implicit type conversion rules to make the data types consistent.\n If they have different data types, lower priority data type will be converted to\n relatively highest priority data type.\n RuntimeError exception will be thrown when the data type conversion of Parameter is required.\n\n Args:\n use_locking (bool): Whether to enable a lock to protect variable tensors from being updated.\n If true, updates of the var, m, and v tensors will be protected by a lock.\n If false, the result is unpredictable. Default: False.\n use_nesterov (bool): Whether to use Nesterov Accelerated Gradient (NAG) algorithm to update the gradients.\n If true, update the gradients using NAG.\n If true, update the gradients without using NAG. Default: False.\n\n Inputs:\n - **var** (Parameter) - Parameters to be updated with float32 data type.\n - **m** (Parameter) - The 1st moment vector in the updating formula, has the same type as `var` with\n float32 data type.\n - **v** (Parameter) - The 2nd moment vector in the updating formula. Mean square gradients, has the same type as\n `var` with float32 data type.\n - **beta1_power** (Tensor) - :math:`beta_1^t` in the updating formula with float32 data type.\n - **beta2_power** (Tensor) - :math:`beta_2^t` in the updating formula with float32 data type.\n - **lr** (Tensor) - :math:`l` in the updating formula. With float32 data type.\n - **beta1** (Tensor) - The exponential decay rate for the 1st moment estimations with float32 data type.\n - **beta2** (Tensor) - The exponential decay rate for the 2nd moment estimations with float32 data type.\n - **epsilon** (Tensor) - Term added to the denominator to improve numerical stability with float32 data type.\n - **gradient** (Tensor) - Gradient value with float32 data type.\n - **indices** (Tensor) - Gradient indices with int32 data type.\n\n Outputs:\n Tuple of 3 Tensors, this operator will update the input parameters directly, the outputs are useless.\n\n - **var** (Tensor) - A Tensor with shape (1,).\n - **m** (Tensor) - A Tensor with shape (1,).\n - **v** (Tensor) - A Tensor with shape (1,).\n\n Examples:\n >>> import numpy as np\n >>> import mindspore.nn as nn\n >>> from mindspore import Tensor, Parameter\n >>> from mindspore.ops import operations as P\n >>> import mindspore.common.dtype as mstype\n >>> class Net(nn.Cell):\n >>> def __init__(self):\n >>> super(Net, self).__init__()\n >>> self.sparse_apply_adam = P.FusedSparseAdam()\n >>> self.var = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name=\"var\")\n >>> self.m = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name=\"m\")\n >>> self.v = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name=\"v\")\n >>> def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, indices):\n >>> out = self.sparse_apply_adam(self.var, self.m, self.v, beta1_power, beta2_power, lr, beta1, beta2,\n >>> epsilon, grad, indices)\n >>> return out\n >>> net = Net()\n >>> beta1_power = Tensor(0.9, mstype.float32)\n >>> beta2_power = Tensor(0.999, mstype.float32)\n >>> lr = Tensor(0.001, mstype.float32)\n >>> beta1 = Tensor(0.9, mstype.float32)\n >>> beta2 = Tensor(0.999, mstype.float32)\n >>> epsilon = Tensor(1e-8, mstype.float32)\n >>> gradient = Tensor(np.random.rand(2, 1, 2), mstype.float32)\n >>> indices = Tensor([0, 1], mstype.int32)\n >>> result = net(beta1_power, beta2_power, lr, beta1, beta2, epsilon, gradient, indices)\n \"\"\"\n __mindspore_signature__ = (\n sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('v', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('beta1_power', dtype=sig.sig_dtype.T),\n sig.make_sig('beta2_power', dtype=sig.sig_dtype.T),\n sig.make_sig('lr', dtype=sig.sig_dtype.T),\n sig.make_sig('beta1', dtype=sig.sig_dtype.T),\n sig.make_sig('beta2', dtype=sig.sig_dtype.T),\n sig.make_sig('epsilon', dtype=sig.sig_dtype.T),\n sig.make_sig('grad', dtype=sig.sig_dtype.T),\n sig.make_sig('indices', dtype=sig.sig_dtype.T1),\n )\n\n @prim_attr_register\n def __init__(self, use_locking=False, use_nesterov=False):\n validator.check_value_type(\"use_locking\", use_locking, [bool], self.name)\n validator.check_value_type(\"use_nesterov\", use_nesterov, [bool], self.name)\n self.init_prim_io_names(inputs=['var', 'm', 'v', 'beta1_power', 'beta2_power', 'lr', 'beta1', 'beta2',\n 'epsilon', 'grad', 'indices'],\n outputs=['var', 'm', 'v'])\n\n def infer_shape(self, var_shape, m_shape, v_shape, beta1_power_shape, beta2_power_shape, lr_shape,\n beta1_shape, beta2_shape, epsilon_shape, grad_shape, indices_shape):\n validator.check(\"var_shape\", var_shape, \"m_shape\", m_shape, Rel.EQ, self.name)\n validator.check(\"var_shape\", var_shape, \"v_shape\", v_shape, Rel.EQ, self.name)\n validator.check_integer(\"indices rank\", len(indices_shape), 1, Rel.EQ, self.name)\n validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)\n if len(var_shape) > 1 and grad_shape != indices_shape + var_shape[1:]:\n raise ValueError(f\"For '{self.name}', the shape of updates should be [] or \"\n f\"grad_shape = indices_shape + var_shape[1:], but got var_shape: {var_shape}, \"\n f\"indices_shape: {indices_shape}, grad_shape: {grad_shape}.\")\n return [1], [1], [1]\n\n def infer_dtype(self, var_dtype, m_dtype, v_dtype, beta1_power_dtype, beta2_power_dtype, lr_dtype,\n beta1_dtype, beta2_dtype, epsilon_dtype, grad_dtype, indices_dtype):\n args = {\"var\": var_dtype, \"m\": m_dtype, \"v\": v_dtype, \"grad\": grad_dtype}\n validator.check_tensor_type_same(args, mstype.number_type, self.name)\n\n args = {\"beta1_power\": beta1_power_dtype, \"beta2_power\": beta2_power_dtype, 'lr': lr_dtype,\n \"beta1\": beta1_dtype, \"beta2\": beta2_dtype, \"epsilon\": epsilon_dtype}\n validator.check_scalar_or_tensor_type_same(args, [mstype.float16, mstype.float32], self.name, True)\n validator.check_tensor_type_same({\"indices_dtype\": indices_dtype}, [mstype.int32], self.name)\n return var_dtype, m_dtype, v_dtype\n\n\nclass FusedSparseLazyAdam(PrimitiveWithInfer):\n r\"\"\"\n Merge the duplicate value of the gradient and then update parameters by Adaptive Moment Estimation (Adam)\n algorithm. This operator is used when the gradient is sparse. The behavior is not equivalent to the\n original Adam algorithm, as only the current indices parameters will be updated.\n\n The Adam algorithm is proposed in `Adam: A Method for Stochastic Optimization <https://arxiv.org/abs/1412.6980>`_.\n\n The updating formulas are as follows,\n\n .. math::\n \\begin{array}{ll} \\\\\n m = \\beta_1 * m + (1 - \\beta_1) * g \\\\\n v = \\beta_2 * v + (1 - \\beta_2) * g * g \\\\\n l = \\alpha * \\frac{\\sqrt{1-\\beta_2^t}}{1-\\beta_1^t} \\\\\n w = w - l * \\frac{m}{\\sqrt{v} + \\epsilon}\n \\end{array}\n\n :math:`m` represents the 1st moment vector, :math:`v` represents the 2nd moment vector, :math:`g` represents\n `gradient`, :math:`l` represents scaling factor `lr`, :math:`\\beta_1, \\beta_2` represent `beta1` and `beta2`,\n :math:`t` represents updating step while :math:`beta_1^t` and :math:`beta_2^t` represent `beta1_power` and\n `beta2_power`, :math:`\\alpha` represents `learning_rate`, :math:`w` represents `var`, :math:`\\epsilon` represents\n `epsilon`.\n\n All of inputs except `indices` comply with the implicit type conversion rules to make the data types consistent.\n If they have different data types, lower priority data type will be converted to\n relatively highest priority data type.\n RuntimeError exception will be thrown when the data type conversion of Parameter is required.\n\n Args:\n use_locking (bool): Whether to enable a lock to protect variable tensors from being updated.\n If true, updates of the var, m, and v tensors will be protected by a lock.\n If false, the result is unpredictable. Default: False.\n use_nesterov (bool): Whether to use Nesterov Accelerated Gradient (NAG) algorithm to update the gradients.\n If true, update the gradients using NAG.\n If true, update the gradients without using NAG. Default: False.\n\n Inputs:\n - **var** (Parameter) - Parameters to be updated with float32 data type.\n - **m** (Parameter) - The 1st moment vector in the updating formula, has the same type as `var` with\n float32 data type.\n - **v** (Parameter) - The 2nd moment vector in the updating formula. Mean square gradients, has the same type as\n `var` with float32 data type.\n - **beta1_power** (Tensor) - :math:`beta_1^t` in the updating formula with float32 data type.\n - **beta2_power** (Tensor) - :math:`beta_2^t` in the updating formula with float32 data type.\n - **lr** (Tensor) - :math:`l` in the updating formula with float32 data type.\n - **beta1** (Tensor) - The exponential decay rate for the 1st moment estimations with float32 data type.\n - **beta2** (Tensor) - The exponential decay rate for the 2nd moment estimations with float32 data type.\n - **epsilon** (Tensor) - Term added to the denominator to improve numerical stability with float32 data type.\n - **gradient** (Tensor) - Gradient value with float32 data type.\n - **indices** (Tensor) - Gradient indices with int32 data type.\n\n Outputs:\n Tuple of 3 Tensors, this operator will update the input parameters directly, the outputs are useless.\n\n - **var** (Tensor) - A Tensor with shape (1,).\n - **m** (Tensor) - A Tensor with shape (1,).\n - **v** (Tensor) - A Tensor with shape (1,).\n\n Examples:\n >>> import numpy as np\n >>> import mindspore.nn as nn\n >>> from mindspore import Tensor, Parameter\n >>> from mindspore.ops import operations as P\n >>> import mindspore.common.dtype as mstype\n >>> class Net(nn.Cell):\n >>> def __init__(self):\n >>> super(Net, self).__init__()\n >>> self.sparse_apply_lazyadam = P.FusedSparseLazyAdam()\n >>> self.var = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name=\"var\")\n >>> self.m = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name=\"m\")\n >>> self.v = Parameter(Tensor(np.ones([3, 1, 2]).astype(np.float32)), name=\"v\")\n >>> def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, indices):\n >>> out = self.sparse_apply_lazyadam(self.var, self.m, self.v, beta1_power, beta2_power, lr, beta1,\n >>> beta2, epsilon, grad, indices)\n >>> return out\n >>> net = Net()\n >>> beta1_power = Tensor(0.9, mstype.float32)\n >>> beta2_power = Tensor(0.999, mstype.float32)\n >>> lr = Tensor(0.001, mstype.float32)\n >>> beta1 = Tensor(0.9, mstype.float32)\n >>> beta2 = Tensor(0.999, mstype.float32)\n >>> epsilon = Tensor(1e-8, mstype.float32)\n >>> gradient = Tensor(np.random.rand(2, 1, 2), mstype.float32)\n >>> indices = Tensor([0, 1], mstype.int32)\n >>> result = net(beta1_power, beta2_power, lr, beta1, beta2, epsilon, gradient, indices)\n \"\"\"\n __mindspore_signature__ = (\n sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('v', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('beta1_power', dtype=sig.sig_dtype.T),\n sig.make_sig('beta2_power', dtype=sig.sig_dtype.T),\n sig.make_sig('lr', dtype=sig.sig_dtype.T),\n sig.make_sig('beta1', dtype=sig.sig_dtype.T),\n sig.make_sig('beta2', dtype=sig.sig_dtype.T),\n sig.make_sig('epsilon', dtype=sig.sig_dtype.T),\n sig.make_sig('grad', dtype=sig.sig_dtype.T),\n sig.make_sig('indices', dtype=sig.sig_dtype.T1),\n )\n\n @prim_attr_register\n def __init__(self, use_locking=False, use_nesterov=False):\n validator.check_value_type(\"use_locking\", use_locking, [bool], self.name)\n validator.check_value_type(\"use_nesterov\", use_nesterov, [bool], self.name)\n self.init_prim_io_names(inputs=['var', 'm', 'v', 'beta1_power', 'beta2_power', 'lr', 'beta1', 'beta2',\n 'epsilon', 'grad', 'indices'],\n outputs=['var', 'm', 'v'])\n\n def infer_shape(self, var_shape, m_shape, v_shape, beta1_power_shape, beta2_power_shape, lr_shape,\n beta1_shape, beta2_shape, epsilon_shape, grad_shape, indices_shape):\n validator.check(\"var_shape\", var_shape, \"m_shape\", m_shape, Rel.EQ, self.name)\n validator.check(\"var_shape\", var_shape, \"v_shape\", v_shape, Rel.EQ, self.name)\n validator.check_integer(\"indices rank\", len(indices_shape), 1, Rel.EQ, self.name)\n validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)\n if len(var_shape) > 1 and grad_shape != indices_shape + var_shape[1:]:\n raise ValueError(f\"For '{self.name}', the shape of updates should be [] or \"\n f\"grad_shape = indices_shape + var_shape[1:], but got var_shape: {var_shape}, \"\n f\"indices_shape: {indices_shape}, grad_shape: {grad_shape}.\")\n return [1], [1], [1]\n\n def infer_dtype(self, var_dtype, m_dtype, v_dtype, beta1_power_dtype, beta2_power_dtype, lr_dtype,\n beta1_dtype, beta2_dtype, epsilon_dtype, grad_dtype, indices_dtype):\n args = {\"var\": var_dtype, \"m\": m_dtype, \"v\": v_dtype, \"grad\": grad_dtype}\n validator.check_tensor_type_same(args, mstype.number_type, self.name)\n\n args = {\"beta1_power\": beta1_power_dtype, \"beta2_power\": beta2_power_dtype, 'lr': lr_dtype,\n \"beta1\": beta1_dtype, \"beta2\": beta2_dtype, \"epsilon\": epsilon_dtype}\n validator.check_scalar_or_tensor_type_same(args, [mstype.float16, mstype.float32], self.name, True)\n\n validator.check_tensor_type_same({\"indices_dtype\": indices_dtype}, [mstype.int32], self.name)\n return var_dtype, m_dtype, v_dtype\n\n\nclass FusedSparseFtrl(PrimitiveWithInfer):\n \"\"\"\n Merge the duplicate value of the gradient and then update relevant entries according to the FTRL-proximal scheme.\n\n All of inputs except `indices` comply with the implicit type conversion rules to make the data types consistent.\n If they have different data types, lower priority data type will be converted to\n relatively highest priority data type.\n RuntimeError exception will be thrown when the data type conversion of Parameter is required.\n\n Args:\n lr (float): The learning rate value, must be positive.\n l1 (float): l1 regularization strength, must be greater than or equal to zero.\n l2 (float): l2 regularization strength, must be greater than or equal to zero.\n lr_power (float): Learning rate power controls how the learning rate decreases during training,\n must be less than or equal to zero. Use fixed learning rate if `lr_power` is zero.\n use_locking (bool): Use locks for updating operation if True . Default: False.\n\n Inputs:\n - **var** (Parameter) - The variable to be updated. The data type must be float32.\n - **accum** (Parameter) - The accumulation to be updated, must be same type and shape as `var`.\n - **linear** (Parameter) - the linear coefficient to be updated, must be same type and shape as `var`.\n - **grad** (Tensor) - A tensor of the same type as `var`, for the gradient.\n - **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`. The shape\n of `indices` must be the same as `grad` in first dimension. The type must be int32.\n\n Outputs:\n Tuple of 3 Tensor, this operator will update the input parameters directly, the outputs are useless.\n\n - **var** (Tensor) - A Tensor with shape (1,).\n - **accum** (Tensor) - A Tensor with shape (1,).\n - **linear** (Tensor) - A Tensor with shape (1,).\n\n Examples:\n >>> import mindspore\n >>> import mindspore.nn as nn\n >>> import numpy as np\n >>> from mindspore import Parameter\n >>> from mindspore import Tensor\n >>> from mindspore.ops import operations as P\n >>> class SparseApplyFtrlNet(nn.Cell):\n >>> def __init__(self):\n >>> super(SparseApplyFtrlNet, self).__init__()\n >>> self.sparse_apply_ftrl = P.FusedSparseFtrl(lr=0.01, l1=0.0, l2=0.0, lr_power=-0.5)\n >>> self.var = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name=\"var\")\n >>> self.accum = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name=\"accum\")\n >>> self.linear = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name=\"linear\")\n >>>\n >>> def construct(self, grad, indices):\n >>> out = self.sparse_apply_ftrl(self.var, self.accum, self.linear, grad, indices)\n >>> return out\n >>>\n >>> net = SparseApplyFtrlNet()\n >>> grad = Tensor(np.random.rand(2, 1, 2).astype(np.float32))\n >>> indices = Tensor(np.array([0, 1]).astype(np.int32))\n >>> output = net(grad, indices)\n \"\"\"\n __mindspore_signature__ = (\n sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('linear', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('grad', dtype=sig.sig_dtype.T),\n sig.make_sig('indices', dtype=sig.sig_dtype.T1),\n )\n\n @prim_attr_register\n def __init__(self, lr, l1, l2, lr_power, use_locking=False):\n self.init_prim_io_names(inputs=['var', 'accum', 'linear', 'grad', 'indices'],\n outputs=['output'])\n validator.check_value_type(\"lr\", lr, [float], self.name)\n validator.check_value_type(\"l1\", l1, [float], self.name)\n validator.check_value_type(\"l2\", l2, [float], self.name)\n validator.check_value_type(\"lr_power\", lr_power, [float], self.name)\n self.lr = validator.check_number_range(\"lr\", lr, 0.0, float(\"inf\"), Rel.INC_NEITHER, self.name)\n self.l1 = validator.check_number_range(\"l1\", l1, 0.0, float(\"inf\"), Rel.INC_LEFT, self.name)\n self.l2 = validator.check_number_range(\"l2\", l2, 0.0, float(\"inf\"), Rel.INC_LEFT, self.name)\n self.lr_power = validator.check_number(\"lr_power\", lr_power, 0, Rel.LE, self.name)\n self.use_locking = validator.check_value_type(\"use_locking\", use_locking, [bool], self.name)\n\n def infer_shape(self, var_shape, accum_shape, linear_shape, grad_shape, indices_shape):\n validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)\n validator.check('var shape', var_shape, 'linear shape', linear_shape, Rel.EQ, self.name)\n if len(var_shape) > 1:\n validator.check('var_shape[1:]', var_shape[1:], 'grad_shape[1:]', grad_shape[1:], Rel.EQ, self.name)\n validator.check_integer(\"indices rank\", len(indices_shape), 1, Rel.EQ, self.name)\n validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)\n return [1], [1], [1]\n\n def infer_dtype(self, var_dtype, accum_dtype, linear_dtype, grad_dtype, indices_dtype):\n args = {\"var_dtype\": var_dtype, \"accum_dtype\": accum_dtype,\n \"linear_dtype\": linear_dtype, \"grad_dtype\": grad_dtype}\n validator.check_tensor_type_same(args, [mstype.float32], self.name)\n validator.check_tensor_type_same({\"indices_dtype\": indices_dtype}, [mstype.int32], self.name)\n return var_dtype, accum_dtype, linear_dtype\n\n\nclass FusedSparseProximalAdagrad(PrimitiveWithInfer):\n r\"\"\"\n Merge the duplicate value of the gradient and then update relevant entries according to the proximal adagrad\n algorithm.\n\n .. math::\n accum += grad * grad\n .. math::\n \\text{prox_v} = var - lr * grad * \\frac{1}{\\sqrt{accum}}\n .. math::\n var = \\frac{sign(\\text{prox_v})}{1 + lr * l2} * \\max(\\left| \\text{prox_v} \\right| - lr * l1, 0)\n\n All of inputs except `indices` comply with the implicit type conversion rules to make the data types consistent.\n If they have different data types, lower priority data type will be converted to\n relatively highest priority data type.\n RuntimeError exception will be thrown when the data type conversion of Parameter is required.\n\n Args:\n use_locking (bool): If true, the variable and accumulation tensors will be protected from being updated.\n Default: False.\n\n Inputs:\n - **var** (Parameter) - Variable tensor to be updated. The data type must be float32.\n - **accum** (Parameter) - Variable tensor to be updated, has the same dtype as `var`.\n - **lr** (Tensor) - The learning rate value. The data type must be float32.\n - **l1** (Tensor) - l1 regularization strength. The data type must be float32.\n - **l2** (Tensor) - l2 regularization strength. The data type must be float32.\n - **grad** (Tensor) - A tensor of the same type as `var`, for the gradient. The data type must be float32.\n - **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`. The data type\n must be int32.\n\n Outputs:\n Tuple of 2 Tensors, this operator will update the input parameters directly, the outputs are useless.\n\n - **var** (Tensor) - A Tensor with shape (1,).\n - **accum** (Tensor) - A Tensor with shape (1,).\n\n Examples:\n >>> import numpy as np\n >>> import mindspore.nn as nn\n >>> from mindspore import Tensor, Parameter\n >>> from mindspore.ops import operations as P\n >>> class Net(nn.Cell):\n >>> def __init__(self):\n >>> super(Net, self).__init__()\n >>> self.sparse_apply_proximal_adagrad = P.FusedSparseProximalAdagrad()\n >>> self.var = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name=\"var\")\n >>> self.accum = Parameter(Tensor(np.random.rand(3, 1, 2).astype(np.float32)), name=\"accum\")\n >>> self.lr = Tensor(0.01, mstype.float32)\n >>> self.l1 = Tensor(0.0, mstype.float32)\n >>> self.l2 = Tensor(0.0, mstype.float32)\n >>> def construct(self, grad, indices):\n >>> out = self.sparse_apply_proximal_adagrad(self.var, self.accum, self.lr, self.l1,\n >>> self.l2, grad, indices)\n >>> return out\n >>> net = Net()\n >>> grad = Tensor(np.random.rand(2, 1, 2).astype(np.float32))\n >>> indices = Tensor(np.array([0, 1]).astype(np.int32))\n >>> output = net(grad, indices)\n \"\"\"\n __mindspore_signature__ = (\n sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('lr', dtype=sig.sig_dtype.T),\n sig.make_sig('l1', dtype=sig.sig_dtype.T),\n sig.make_sig('l2', dtype=sig.sig_dtype.T),\n sig.make_sig('grad', dtype=sig.sig_dtype.T),\n sig.make_sig('indices', dtype=sig.sig_dtype.T1),\n )\n\n @prim_attr_register\n def __init__(self, use_locking=False):\n self.init_prim_io_names(inputs=['var', 'accum', 'lr', 'l1', 'l2', 'grad', 'indices'],\n outputs=['output'])\n self.use_locking = validator.check_value_type(\"use_locking\", use_locking, [bool], self.name)\n\n def infer_shape(self, var_shape, accum_shape, lr_shape, l1_shape, l2_shape, grad_shape, indices_shape):\n validator.check_integer(\"indices rank\", len(indices_shape), 1, Rel.EQ, self.name)\n return [1], [1]\n\n def infer_dtype(self, var_dtype, accum_dtype, lr_dtype, l1_dtype, l2_dtype, grad_dtype, indices_dtype):\n args = {'var': var_dtype, 'accum': accum_dtype, 'grad': grad_dtype}\n validator.check_tensor_type_same(args, [mstype.float32], self.name)\n validator.check_scalar_or_tensor_type_same({\"lr\": lr_dtype}, [mstype.float32], self.name)\n validator.check_scalar_or_tensor_type_same({\"l1\": l1_dtype}, [mstype.float32], self.name)\n validator.check_scalar_or_tensor_type_same({\"l2\": l2_dtype}, [mstype.float32], self.name)\n valid_types = [mstype.int16, mstype.int32, mstype.int64,\n mstype.uint16, mstype.uint32, mstype.uint64]\n validator.check_tensor_type_same({'indices': indices_dtype}, valid_types, self.name)\n return var_dtype, accum_dtype\n\n\nclass KLDivLoss(PrimitiveWithInfer):\n r\"\"\"\n Computes the Kullback-Leibler divergence between the target and the output.\n\n Note:\n Sets input as :math:`x`, input label as :math:`y`, output as :math:`\\ell(x, y)`.\n Let,\n\n .. math::\n L = \\{l_1,\\dots,l_N\\}^\\top, \\quad\n l_n = y_n \\cdot (\\log y_n - x_n)\n\n Then,\n\n .. math::\n \\ell(x, y) = \\begin{cases}\n L, & \\text{if reduction} = \\text{`none';}\\\\\n \\operatorname{mean}(L), & \\text{if reduction} = \\text{`mean';}\\\\\n \\operatorname{sum}(L), & \\text{if reduction} = \\text{`sum'.}\n \\end{cases}\n\n Args:\n reduction (str): Specifies the reduction to be applied to the output.\n Its value should be one of 'none', 'mean', 'sum'. Default: 'mean'.\n\n Inputs:\n - **input_x** (Tensor) - The input Tensor. The data type must be float32.\n - **input_y** (Tensor) - The label Tensor which has the same shape as `input_x`. The data type must be float32.\n\n Outputs:\n Tensor or Scalar, if `reduction` is 'none', then output is a tensor and has the same shape as `input_x`.\n Otherwise it is a scalar.\n\n Examples:\n >>> import mindspore\n >>> import mindspore.nn as nn\n >>> import numpy as np\n >>> from mindspore import Tensor\n >>> from mindspore.ops import operations as P\n >>> class Net(nn.Cell):\n >>> def __init__(self):\n >>> super(Net, self).__init__()\n >>> self.kldiv_loss = P.KLDivLoss()\n >>> def construct(self, x, y):\n >>> result = self.kldiv_loss(x, y)\n >>> return result\n >>>\n >>> net = Net()\n >>> input_x = Tensor(np.array([0.2, 0.7, 0.1]), mindspore.float32)\n >>> input_y = Tensor(np.array([0., 1., 0.]), mindspore.float32)\n >>> result = net(input_x, input_y)\n \"\"\"\n\n @prim_attr_register\n def __init__(self, reduction='mean'):\n self.reduction = validator.check_string('reduction', reduction, ['none', 'mean', 'sum'], self.name)\n\n def infer_shape(self, x_shape, y_shape):\n validator.check('x_shape', x_shape, 'y_shape', y_shape, Rel.EQ, self.name)\n if self.reduction in ('mean', 'sum'):\n shape = []\n else:\n shape = x_shape\n return shape\n\n def infer_dtype(self, x_type, y_type):\n args = {'x': x_type, 'y': y_type}\n valid_types = (mstype.float16, mstype.float32)\n validator.check_tensor_type_same(args, valid_types, self.name)\n return x_type\n\n\nclass BinaryCrossEntropy(PrimitiveWithInfer):\n r\"\"\"\n Computes the Binary Cross Entropy between the target and the output.\n\n Note:\n Sets input as :math:`x`, input label as :math:`y`, output as :math:`\\ell(x, y)`.\n Let,\n\n .. math::\n L = \\{l_1,\\dots,l_N\\}^\\top, \\quad\n l_n = - w_n \\left[ y_n \\cdot \\log x_n + (1 - y_n) \\cdot \\log (1 - x_n) \\right]\n\n Then,\n\n .. math::\n \\ell(x, y) = \\begin{cases}\n L, & \\text{if reduction} = \\text{`none';}\\\\\n \\operatorname{mean}(L), & \\text{if reduction} = \\text{`mean';}\\\\\n \\operatorname{sum}(L), & \\text{if reduction} = \\text{`sum'.}\n \\end{cases}\n\n Args:\n reduction (str): Specifies the reduction to be applied to the output.\n Its value should be one of 'none', 'mean', 'sum'. Default: 'mean'.\n\n Inputs:\n - **input_x** (Tensor) - The input Tensor. The data type should be float16 or float32.\n - **input_y** (Tensor) - The label Tensor which has same shape and data type as `input_x`.\n - **weight** (Tensor, optional) - A rescaling weight applied to the loss of each batch element.\n And it should have same shape and data type as `input_x`. Default: None.\n\n Outputs:\n Tensor or Scalar, if `reduction` is 'none', then output is a tensor and has the same shape as `input_x`.\n Otherwise, the output is a scalar.\n\n Examples:\n >>> import mindspore\n >>> import mindspore.nn as nn\n >>> import numpy as np\n >>> from mindspore import Tensor\n >>> from mindspore.ops import operations as P\n >>> class Net(nn.Cell):\n >>> def __init__(self):\n >>> super(Net, self).__init__()\n >>> self.binary_cross_entropy = P.BinaryCrossEntropy()\n >>> def construct(self, x, y, weight):\n >>> result = self.binary_cross_entropy(x, y, weight)\n >>> return result\n >>>\n >>> net = Net()\n >>> input_x = Tensor(np.array([0.2, 0.7, 0.1]), mindspore.float32)\n >>> input_y = Tensor(np.array([0., 1., 0.]), mindspore.float32)\n >>> weight = Tensor(np.array([1, 2, 2]), mindspore.float32)\n >>> result = net(input_x, input_y, weight)\n 0.38240486\n \"\"\"\n\n @prim_attr_register\n def __init__(self, reduction='mean'):\n self.reduction = validator.check_string('reduction', reduction, ['none', 'mean', 'sum'], self.name)\n\n def infer_shape(self, x_shape, y_shape, weight_shape):\n validator.check('x_shape', x_shape, 'y_shape', y_shape, Rel.EQ, self.name)\n if weight_shape:\n validator.check('y_shape', y_shape, 'weight_shape', weight_shape, Rel.EQ, self.name)\n if self.reduction in ('mean', 'sum'):\n shape = []\n else:\n shape = x_shape\n return shape\n\n def infer_dtype(self, x_type, y_type, weight_type):\n args = {'x': x_type, 'y': y_type}\n valid_types = (mstype.float16, mstype.float32)\n validator.check_tensor_type_same(args, valid_types, self.name)\n if weight_type:\n validator.check_tensor_type_same({'x': x_type, 'weight': weight_type}, valid_types, self.name)\n return x_type\n\n\nclass ApplyAdaMax(PrimitiveWithInfer):\n r\"\"\"\n Update relevant entries according to the adamax scheme.\n\n The updating formulas are as follows,\n\n .. math::\n \\begin{array}{ll} \\\\\n m_{t} = \\beta_1 * m_{t-1} + (1 - \\beta_1) * g \\\\\n v_{t} = \\max(\\beta_2 * v_{t-1}, \\left| g \\right|) \\\\\n var = var - \\frac{l}{1 - \\beta_1^t} * \\frac{m_{t}}{v_{t} + \\epsilon}\n \\end{array}\n\n :math:`t` represents updating step while :math:`m` represents the 1st moment vector, :math:`m_{t-1}`\n is the last momentent of :math:`m_{t}`, :math:`v` represents the 2nd moment vector, :math:`v_{t-1}`\n is the last momentent of :math:`v_{t}`, :math:`l` represents scaling factor `lr`,\n :math:`g` represents `grad`, :math:`\\beta_1, \\beta_2` represent `beta1` and `beta2`,\n :math:`beta_1^t` represents `beta1_power`, :math:`var` represents the variable to be updated,\n :math:`\\epsilon` represents `epsilon`.\n\n Inputs of `var`, `m`, `v` and `grad` comply with the implicit type conversion rules\n to make the data types consistent.\n If they have different data types, lower priority data type will be converted to\n relatively highest priority data type.\n RuntimeError exception will be thrown when the data type conversion of Parameter is required.\n\n Inputs:\n - **var** (Parameter) - Variable to be updated. With float32 or float16 data type.\n - **m** (Parameter) - The 1st moment vector in the updating formula, has the same shape and type as `var`.\n With float32 or float16 data type.\n - **v** (Parameter) - The 2nd moment vector in the updating formula. Mean square gradients\n with the same shape and type as `var`. With float32 or float16 data type.\n - **beta1_power** (Union[Number, Tensor]) - :math:`beta_1^t` in the updating formula, should be scalar.\n With float32 or float16 data type.\n - **lr** (Union[Number, Tensor]) - Learning rate, :math:`l` in the updating formula, should be scalar.\n With float32 or float16 data type.\n - **beta1** (Union[Number, Tensor]) - The exponential decay rate for the 1st moment estimations,\n should be scalar. With float32 or float16 data type.\n - **beta2** (Union[Number, Tensor]) - The exponential decay rate for the 2nd moment estimations,\n should be scalar. With float32 or float16 data type.\n - **epsilon** (Union[Number, Tensor]) - A small value added for numerical stability, should be scalar.\n With float32 or float16 data type.\n - **grad** (Tensor) - A tensor for gradient, has the same shape and type as `var`.\n With float32 or float16 data type.\n\n Outputs:\n Tuple of 3 Tensor, the updated parameters.\n\n - **var** (Tensor) - The same shape and data type as `var`.\n - **m** (Tensor) - The same shape and data type as `m`.\n - **v** (Tensor) - The same shape and data type as `v`.\n\n Examples:\n >>> import numpy as np\n >>> import mindspore.nn as nn\n >>> from mindspore import Tensor, Parameter\n >>> from mindspore.ops import operations as P\n >>> import mindspore.common.dtype as mstype\n >>> class Net(nn.Cell):\n >>> def __init__(self):\n >>> super(Net, self).__init__()\n >>> self.apply_ada_max = P.ApplyAdaMax()\n >>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name=\"var\")\n >>> self.m = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name=\"m\")\n >>> self.v = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name=\"v\")\n >>> def construct(self, beta1_power, lr, beta1, beta2, epsilon, grad):\n >>> out = self.apply_ada_max(self.var, self.m, self.v, beta1_power, lr, beta1, beta2, epsilon, grad)\n >>> return out\n >>> net = Net()\n >>> beta1_power =Tensor(0.9, mstype.float32)\n >>> lr = Tensor(0.001, mstype.float32)\n >>> beta1 = Tensor(0.9, mstype.float32)\n >>> beta2 = Tensor(0.99, mstype.float32)\n >>> epsilon = Tensor(1e-10, mstype.float32)\n >>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))\n >>> result = net(beta1_power, lr, beta1, beta2, epsilon, grad)\n \"\"\"\n\n __mindspore_signature__ = (\n sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('v', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('beta1_power', dtype=sig.sig_dtype.T1),\n sig.make_sig('lr', dtype=sig.sig_dtype.T2),\n sig.make_sig('beta1', dtype=sig.sig_dtype.T3),\n sig.make_sig('beta2', dtype=sig.sig_dtype.T4),\n sig.make_sig('epsilon', dtype=sig.sig_dtype.T5),\n sig.make_sig('grad', dtype=sig.sig_dtype.T),\n )\n\n @prim_attr_register\n def __init__(self):\n \"\"\"init ApplyAdaMax\"\"\"\n\n def infer_shape(self, var_shape, m_shape, v_shape, beta1_power_shape, lr_shape,\n beta1_shape, beta2_shape, epsilon_shape, grad_shape):\n validator.check(\"m_shape\", m_shape, \"var_shape\", var_shape, Rel.EQ, self.name)\n validator.check(\"v_shape\", v_shape, \"var_shape\", var_shape, Rel.EQ, self.name)\n validator.check(\"grad_shape\", grad_shape, \"var_shape\", var_shape, Rel.EQ, self.name)\n beta1_power_shp_len = len(beta1_power_shape)\n validator.check_integer(\"beta1 power's rank\", beta1_power_shp_len, 1, Rel.LE, self.name)\n if beta1_power_shp_len == 1:\n validator.check_integer(\"beta1_power_shape[0]\", beta1_power_shape[0], 1, Rel.EQ, self.name)\n lr_shp_len = len(lr_shape)\n validator.check_integer(\"lr's rank\", lr_shp_len, 1, Rel.LE, self.name)\n if lr_shp_len == 1:\n validator.check_integer(\"lr_shape[0]\", lr_shape[0], 1, Rel.EQ, self.name)\n beta1_shp_len = len(beta1_shape)\n validator.check_integer(\"beta1's rank\", beta1_shp_len, 1, Rel.LE, self.name)\n if beta1_shp_len == 1:\n validator.check_integer(\"beta1_shape[0]\", beta1_shape[0], 1, Rel.EQ, self.name)\n beta2_shp_len = len(beta2_shape)\n validator.check_integer(\"beta2's rank\", beta2_shp_len, 1, Rel.LE, self.name)\n if beta2_shp_len == 1:\n validator.check_integer(\"beta2_shape[0]\", beta2_shape[0], 1, Rel.EQ, self.name)\n epsilon_shp_len = len(epsilon_shape)\n validator.check_integer(\"epsilon's rank\", epsilon_shp_len, 1, Rel.LE, self.name)\n if epsilon_shp_len == 1:\n validator.check_integer(\"epsilon_shape[0]\", epsilon_shape[0], 1, Rel.EQ, self.name)\n return var_shape, m_shape, v_shape\n\n def infer_dtype(self, var_dtype, m_dtype, v_dtype, beta1_power_dtype, lr_dtype,\n beta1_dtype, beta2_dtype, epsilon_dtype, grad_dtype):\n valid_types = [mstype.float16, mstype.float32]\n args = {\"var\": var_dtype, \"m\": m_dtype, \"v\": v_dtype, \"grad\": grad_dtype}\n validator.check_tensor_type_same(args, valid_types, self.name)\n validator.check_scalar_or_tensor_type_same({\"beta1_power\": beta1_power_dtype}, valid_types, self.name)\n validator.check_scalar_or_tensor_type_same({\"lr\": lr_dtype}, valid_types, self.name)\n validator.check_scalar_or_tensor_type_same({\"beta1\": beta1_dtype}, valid_types, self.name)\n validator.check_scalar_or_tensor_type_same({\"beta2\": beta2_dtype}, valid_types, self.name)\n validator.check_scalar_or_tensor_type_same({\"epsilon\": epsilon_dtype}, valid_types, self.name)\n return var_dtype, m_dtype, v_dtype\n\n\nclass ApplyAdadelta(PrimitiveWithInfer):\n r\"\"\"\n Update relevant entries according to the adadelta scheme.\n\n .. math::\n accum = \\rho * accum + (1 - \\rho) * grad^2\n .. math::\n \\text{update} = \\sqrt{\\text{accum_update} + \\epsilon} * \\frac{grad}{\\sqrt{accum + \\epsilon}}\n .. math::\n \\text{accum_update} = \\rho * \\text{accum_update} + (1 - \\rho) * update^2\n .. math::\n var -= lr * update\n\n Inputs of `var`, `accum`, `accum_update` and `grad` comply with the implicit type conversion rules\n to make the data types consistent.\n If they have different data types, lower priority data type will be converted to\n relatively highest priority data type.\n RuntimeError exception will be thrown when the data type conversion of Parameter is required.\n\n Inputs:\n - **var** (Parameter) - Weights to be updated. With float32 or float16 data type.\n - **accum** (Parameter) - Accumulation to be updated, has the same shape and type as `var`.\n With float32 or float16 data type.\n - **accum_update** (Parameter) - Accum_update to be updated, has the same shape and type as `var`.\n With float32 or float16 data type.\n - **lr** (Union[Number, Tensor]) - Learning rate, should be scalar. With float32 or float16 data type.\n - **rho** (Union[Number, Tensor]) - Decay rate, should be scalar. With float32 or float16 data type.\n - **epsilon** (Union[Number, Tensor]) - A small value added for numerical stability, should be scalar.\n With float32 or float16 data type.\n - **grad** (Tensor) - Gradients, has the same shape and type as `var`. With float32 or float16 data type.\n\n Outputs:\n Tuple of 3 Tensor, the updated parameters.\n\n - **var** (Tensor) - The same shape and data type as `var`.\n - **accum** (Tensor) - The same shape and data type as `accum`.\n - **accum_update** (Tensor) - The same shape and data type as `accum_update`.\n\n Examples:\n >>> import numpy as np\n >>> import mindspore.nn as nn\n >>> from mindspore import Tensor, Parameter\n >>> from mindspore.ops import operations as P\n >>> import mindspore.common.dtype as mstype\n >>> class Net(nn.Cell):\n >>> def __init__(self):\n >>> super(Net, self).__init__()\n >>> self.apply_adadelta = P.ApplyAdadelta()\n >>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name=\"var\")\n >>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name=\"accum\")\n >>> self.accum_update = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name=\"accum_update\")\n >>> def construct(self, lr, rho, epsilon, grad):\n >>> out = self.apply_adadelta(self.var, self.accum, self.accum_update, lr, rho, epsilon, grad)\n >>> return out\n >>> net = Net()\n >>> lr = Tensor(0.001, mstype.float32)\n >>> rho = Tensor(0.0, mstype.float32)\n >>> epsilon = Tensor(1e-6, mstype.float32)\n >>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))\n >>> result = net(lr, rho, epsilon, grad)\n \"\"\"\n\n __mindspore_signature__ = (\n sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('accum_update', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('lr', dtype=sig.sig_dtype.T1),\n sig.make_sig('rho', dtype=sig.sig_dtype.T2),\n sig.make_sig('epsilon', dtype=sig.sig_dtype.T3),\n sig.make_sig('grad', dtype=sig.sig_dtype.T),\n )\n\n @prim_attr_register\n def __init__(self):\n \"\"\"init ApplyAdadelta\"\"\"\n\n def infer_shape(self, var_shape, accum_shape, accum_update_shape, lr_shape, rho_shape,\n epsilon_shape, grad_shape):\n validator.check(\"accum_shape\", accum_shape, \"var_shape\", var_shape, Rel.EQ, self.name)\n validator.check(\"accum_update_shape\", accum_update_shape, \"var_shape\", var_shape, Rel.EQ, self.name)\n validator.check(\"grad_shape\", grad_shape, \"var_shape\", var_shape, Rel.EQ, self.name)\n lr_shp_len = len(lr_shape)\n validator.check_integer(\"lr's rank\", lr_shp_len, 1, Rel.LE, self.name)\n if lr_shp_len == 1:\n validator.check_integer(\"lr_shape[0]\", lr_shape[0], 1, Rel.EQ, self.name)\n rho_shp_len = len(rho_shape)\n validator.check_integer(\"rho's rank\", rho_shp_len, 1, Rel.LE, self.name)\n if rho_shp_len == 1:\n validator.check_integer(\"rho_shape[0]\", rho_shape[0], 1, Rel.EQ, self.name)\n epsilon_shp_len = len(epsilon_shape)\n validator.check_integer(\"lepsilon's rank\", epsilon_shp_len, 1, Rel.LE, self.name)\n if epsilon_shp_len == 1:\n validator.check_integer(\"epsilon_shape[0]\", epsilon_shape[0], 1, Rel.EQ, self.name)\n return var_shape, accum_shape, accum_update_shape\n\n def infer_dtype(self, var_dtype, accum_dtype, accum_update_dtype, lr_dtype, rho_dtype,\n epsilon_dtype, grad_dtype):\n valid_types = [mstype.float16, mstype.float32]\n args = {\"var\": var_dtype, \"accum\": accum_dtype, \"accum_update\": accum_update_dtype, \"grad\": grad_dtype}\n validator.check_tensor_type_same(args, valid_types, self.name)\n validator.check_scalar_or_tensor_type_same({\"lr\": lr_dtype}, valid_types, self.name)\n validator.check_scalar_or_tensor_type_same({\"rho\": rho_dtype}, valid_types, self.name)\n validator.check_scalar_or_tensor_type_same({\"epsilon\": epsilon_dtype}, valid_types, self.name)\n return var_dtype, accum_dtype, accum_update_dtype\n\n\nclass ApplyAdagrad(PrimitiveWithInfer):\n r\"\"\"\n Update relevant entries according to the adagrad scheme.\n\n .. math::\n accum += grad * grad\n .. math::\n var -= lr * grad * \\frac{1}{\\sqrt{accum}}\n\n Inputs of `var`, `accum` and `grad` comply with the implicit type conversion rules\n to make the data types consistent..\n If they have different data types, lower priority data type will be converted to\n relatively highest priority data type.\n RuntimeError exception will be thrown when the data type conversion of Parameter is required.\n\n Args:\n update_slots (bool): If `True`, `accum` will be updated. Default: True.\n\n Inputs:\n - **var** (Parameter) - Variable to be updated. With float32 or float16 data type.\n - **accum** (Parameter) - Accumulation to be updated. The shape and dtype should be the same as `var`.\n With float32 or float16 data type.\n - **lr** (Union[Number, Tensor]) - The learning rate value, should be scalar. With float32 or float16 data type.\n - **grad** (Tensor) - A tensor for gradient. The shape and dtype should be the same as `var`.\n With float32 or float16 data type.\n\n Outputs:\n Tuple of 2 Tensor, the updated parameters.\n\n - **var** (Tensor) - The same shape and data type as `var`.\n - **accum** (Tensor) - The same shape and data type as `accum`.\n\n Examples:\n >>> import numpy as np\n >>> import mindspore.nn as nn\n >>> from mindspore import Tensor, Parameter\n >>> from mindspore.ops import operations as P\n >>> import mindspore.common.dtype as mstype\n >>> class Net(nn.Cell):\n >>> def __init__(self):\n >>> super(Net, self).__init__()\n >>> self.apply_adagrad = P.ApplyAdagrad()\n >>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name=\"var\")\n >>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name=\"accum\")\n >>> def construct(self, lr, grad):\n >>> out = self.apply_adagrad(self.var, self.accum, lr, grad)\n >>> return out\n >>> net = Net()\n >>> lr = Tensor(0.001, mstype.float32)\n >>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))\n >>> result = net(lr, grad)\n \"\"\"\n\n __mindspore_signature__ = (\n sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('lr', dtype=sig.sig_dtype.T1),\n sig.make_sig('grad', dtype=sig.sig_dtype.T),\n )\n\n @prim_attr_register\n def __init__(self, update_slots=True):\n validator.check_value_type(\"update_slots\", update_slots, [bool], self.name)\n\n def infer_shape(self, var_shape, accum_shape, lr_shape, grad_shape):\n validator.check('accum shape', accum_shape, 'var shape', var_shape, Rel.EQ, self.name)\n validator.check('grad shape', grad_shape, 'var shape', var_shape, Rel.EQ, self.name)\n lr_shp_len = len(lr_shape)\n validator.check_integer(\"lr's rank\", lr_shp_len, 1, Rel.LE, self.name)\n if lr_shp_len == 1:\n validator.check_integer(\"lr_shape[0]\", lr_shape[0], 1, Rel.EQ, self.name)\n return var_shape, accum_shape\n\n def infer_dtype(self, var_dtype, accum_dtype, lr_dtype, grad_dtype):\n args = {'var': var_dtype, 'accum': accum_dtype, 'grad': grad_dtype}\n valid_types = [mstype.float16, mstype.float32]\n validator.check_tensor_type_same(args, valid_types, self.name)\n validator.check_scalar_or_tensor_type_same({'lr': lr_dtype}, valid_types, self.name)\n return var_dtype, accum_dtype\n\n\nclass ApplyAdagradV2(PrimitiveWithInfer):\n r\"\"\"\n Update relevant entries according to the adagradv2 scheme.\n\n .. math::\n accum += grad * grad\n .. math::\n var -= lr * grad * \\frac{1}{\\sqrt{accum} + \\epsilon}\n\n Inputs of `var`, `accum` and `grad` comply with the implicit type conversion rules\n to make the data types consistent.\n If they have different data types, lower priority data type will be converted to\n relatively highest priority data type.\n RuntimeError exception will be thrown when the data type conversion of Parameter is required.\n\n Args:\n epsilon (float): A small value added for numerical stability.\n update_slots (bool): If `True`, `accum` will be updated. Default: True.\n\n Inputs:\n - **var** (Parameter) - Variable to be updated. With float16 or float32 data type.\n - **accum** (Parameter) - Accumulation to be updated. The shape and dtype should be the same as `var`.\n With float16 or float32 data type.\n - **lr** (Union[Number, Tensor]) - The learning rate value, should be a float number or\n a scalar tensor with float16 or float32 data type.\n - **grad** (Tensor) - A tensor for gradient. The shape and dtype should be the same as `var`.\n With float16 or float32 data type.\n\n Outputs:\n Tuple of 2 Tensor, the updated parameters.\n\n - **var** (Tensor) - The same shape and data type as `var`.\n - **accum** (Tensor) - The same shape and data type as `m`.\n\n Examples:\n >>> import numpy as np\n >>> import mindspore.nn as nn\n >>> from mindspore import Tensor, Parameter\n >>> from mindspore.ops import operations as P\n >>> import mindspore.common.dtype as mstype\n >>> class Net(nn.Cell):\n >>> def __init__(self):\n >>> super(Net, self).__init__()\n >>> self.apply_adagrad_v2 = P.ApplyAdagradV2(epsilon=1e-6)\n >>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name=\"var\")\n >>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name=\"accum\")\n >>> def construct(self, lr, grad):\n >>> out = self.apply_adagrad_v2(self.var, self.accum, lr, grad)\n >>> return out\n >>> net = Net()\n >>> lr = Tensor(0.001, mstype.float32)\n >>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))\n >>> result = net(lr, grad)\n \"\"\"\n\n __mindspore_signature__ = (\n sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('lr', dtype=sig.sig_dtype.T1),\n sig.make_sig('grad', dtype=sig.sig_dtype.T),\n )\n\n @prim_attr_register\n def __init__(self, epsilon, update_slots=True):\n validator.check_value_type(\"epsilon\", epsilon, [float], self.name)\n validator.check_value_type(\"update_slots\", update_slots, [bool], self.name)\n\n def infer_shape(self, var_shape, accum_shape, lr_shape, grad_shape):\n validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)\n validator.check('var shape', var_shape, 'grad shape', grad_shape, Rel.EQ, self.name)\n lr_shp_len = len(lr_shape)\n validator.check_integer(\"lr's rank\", lr_shp_len, 1, Rel.LE, self.name)\n if lr_shp_len == 1:\n validator.check_integer(\"lr_shape[0]\", lr_shape[0], 1, Rel.EQ, self.name)\n return var_shape, accum_shape\n\n def infer_dtype(self, var_dtype, accum_dtype, lr_dtype, grad_dtype):\n args = {'var': var_dtype, 'accum': accum_dtype, 'grad': grad_dtype}\n validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)\n validator.check_scalar_or_tensor_type_same({'lr': lr_dtype}, [mstype.float16, mstype.float32], self.name)\n return var_dtype, accum_dtype\n\n\nclass SparseApplyAdagrad(PrimitiveWithInfer):\n r\"\"\"\n Update relevant entries according to the adagrad scheme.\n\n .. math::\n accum += grad * grad\n .. math::\n var -= lr * grad * (1 / sqrt(accum))\n\n Inputs of `var`, `accum` and `grad` comply with the implicit type conversion rules\n to make the data types consistent.\n If they have different data types, lower priority data type will be converted to\n relatively highest priority data type.\n RuntimeError exception will be thrown when the data type conversion of Parameter is required.\n\n Args:\n lr (float): Learning rate.\n update_slots (bool): If `True`, `accum` will be updated. Default: True.\n use_locking (bool): If true, the var and accumulation tensors will be protected from being updated.\n Default: False.\n\n Inputs:\n - **var** (Parameter) - Variable to be updated. The data type must be float16 or float32.\n - **accum** (Parameter) - Accumulation to be updated. The shape and dtype should be the same as `var`.\n - **grad** (Tensor) - Gradient. The shape must be the same as `var`'s shape except first dimension.\n Has the same data type as `var`.\n - **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`.\n The shape of `indices` must be the same as `grad` in first dimension, the type must be int32.\n\n Outputs:\n Tuple of 2 Tensor, the updated parameters.\n\n - **var** (Tensor) - The same shape and data type as `var`.\n - **accum** (Tensor) - The same shape and data type as `accum`.\n\n Examples:\n >>> import numpy as np\n >>> import mindspore.nn as nn\n >>> from mindspore import Tensor, Parameter\n >>> from mindspore.ops import operations as P\n >>> import mindspore.common.dtype as mstype\n >>> class Net(nn.Cell):\n >>> def __init__(self):\n >>> super(Net, self).__init__()\n >>> self.sparse_apply_adagrad = P.SparseApplyAdagrad(lr=1e-8)\n >>> self.var = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name=\"var\")\n >>> self.accum = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name=\"accum\")\n >>> def construct(self, grad, indices):\n >>> out = self.sparse_apply_adagrad(self.var, self.accum, grad, indices)\n >>> return out\n >>> net = Net()\n >>> grad = Tensor(np.random.rand(3, 3, 3).astype(np.float32))\n >>> indices = Tensor([0, 1, 2], mstype.int32)\n >>> result = net(grad, indices)\n \"\"\"\n\n __mindspore_signature__ = (\n sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('grad', dtype=sig.sig_dtype.T),\n sig.make_sig('indices', dtype=sig.sig_dtype.T1),\n )\n\n @prim_attr_register\n def __init__(self, lr, update_slots=True, use_locking=False):\n validator.check_value_type(\"lr\", lr, [float], self.name)\n validator.check_number_range(\"lr\", lr, float(\"-inf\"), float(\"inf\"), Rel.INC_NEITHER, self.name)\n validator.check_value_type(\"update_slots\", update_slots, [bool], self.name)\n validator.check_value_type(\"use_locking\", use_locking, [bool], self.name)\n\n def infer_shape(self, var_shape, accum_shape, grad_shape, indices_shape):\n validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)\n validator.check('len of var shape', len(var_shape), 'len of grad shape', len(grad_shape), Rel.EQ, self.name)\n if len(var_shape) > 1:\n validator.check('var_shape[1:]', var_shape[1:], 'grad_shape[1:]', grad_shape[1:], Rel.EQ, self.name)\n validator.check_integer(\"indices rank\", len(indices_shape), 1, Rel.EQ, self.name)\n validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)\n return var_shape, accum_shape\n\n def infer_dtype(self, var_type, accum_type, grad_type, indices_type):\n args = {'var': var_type, 'accum': accum_type, 'grad': grad_type}\n validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)\n validator.check_tensor_type_same({'indices': indices_type}, [mstype.int32], self.name)\n return var_type, accum_type\n\n\nclass SparseApplyAdagradV2(PrimitiveWithInfer):\n r\"\"\"\n Update relevant entries according to the adagrad scheme.\n\n .. math::\n accum += grad * grad\n .. math::\n var -= lr * grad * \\frac{1}{\\sqrt{accum} + \\epsilon}\n\n Inputs of `var`, `accum` and `grad` comply with the implicit type conversion rules\n to make the data types consistent.\n If they have different data types, lower priority data type will be converted to\n relatively highest priority data type.\n RuntimeError exception will be thrown when the data type conversion of Parameter is required.\n\n Args:\n lr (float): Learning rate.\n epsilon (float): A small value added for numerical stability.\n use_locking (bool): If `True`, the var and accumulation tensors will be protected from being updated.\n Default: False.\n update_slots (bool): If `True`, the computation logic will be different to `False`. Default: True.\n\n Inputs:\n - **var** (Parameter) - Variable to be updated. The data type must be float16 or float32.\n - **accum** (Parameter) - Accumulation to be updated. The shape and dtype should be the same as `var`.\n - **grad** (Tensor) - Gradient. The shape must be the same as `var`'s shape except first dimension.\n Has the same data type as `var`.\n - **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`.\n The shape of `indices` must be the same as `grad` in first dimension, the type must be int32.\n\n Outputs:\n Tuple of 2 Tensor, the updated parameters.\n\n - **var** (Tensor) - The same shape and data type as `var`.\n - **accum** (Tensor) - The same shape and data type as `accum`.\n\n Examples:\n >>> import numpy as np\n >>> import mindspore.nn as nn\n >>> from mindspore import Tensor, Parameter\n >>> from mindspore.ops import operations as P\n >>> import mindspore.common.dtype as mstype\n >>> class Net(nn.Cell):\n >>> def __init__(self):\n >>> super(Net, self).__init__()\n >>> self.sparse_apply_adagrad_v2 = P.SparseApplyAdagradV2(lr=1e-8, epsilon=1e-6)\n >>> self.var = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name=\"var\")\n >>> self.accum = Parameter(Tensor(np.ones([3, 3, 3]).astype(np.float32)), name=\"accum\")\n >>>\n >>> def construct(self, grad, indices):\n >>> out = self.sparse_apply_adagrad_v2(self.var, self.accum, grad, indices)\n >>> return out\n >>> net = Net()\n >>> grad = Tensor(np.random.rand(3, 3, 3).astype(np.float32))\n >>> indices = Tensor([0, 1, 2], mstype.int32)\n >>> result = net(grad, indices)\n \"\"\"\n\n __mindspore_signature__ = (\n sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('grad', dtype=sig.sig_dtype.T),\n sig.make_sig('indices', dtype=sig.sig_dtype.T1),\n )\n\n @prim_attr_register\n def __init__(self, lr, epsilon, use_locking=False, update_slots=True):\n self.lr = validator.check_value_type(\"lr\", lr, [float], self.name)\n self.epsilon = validator.check_value_type(\"epsilon\", epsilon, [float], self.name)\n self.use_locking = validator.check_value_type(\"update_slots\", update_slots, [bool], self.name)\n self.update_slots = validator.check_value_type(\"use_locking\", use_locking, [bool], self.name)\n\n def infer_shape(self, var_shape, accum_shape, grad_shape, indices_shape):\n validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)\n validator.check('len of var shape', len(var_shape), 'len of grad shape', len(grad_shape), Rel.EQ, self.name)\n if len(var_shape) > 1:\n validator.check('var_shape[1:]', var_shape[1:], 'grad_shape[1:]', grad_shape[1:], Rel.EQ, self.name)\n validator.check_integer(\"indices rank\", len(indices_shape), 1, Rel.EQ, self.name)\n validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)\n return var_shape, accum_shape\n\n def infer_dtype(self, var_type, accum_type, grad_type, indices_type):\n args = {'var': var_type, 'accum': accum_type, 'grad': grad_type}\n validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)\n validator.check_tensor_type_same({'indices': indices_type}, [mstype.int32], self.name)\n return var_type, accum_type\n\n\nclass ApplyProximalAdagrad(PrimitiveWithInfer):\n r\"\"\"\n Update relevant entries according to the proximal adagrad algorithm.\n\n .. math::\n accum += grad * grad\n .. math::\n \\text{prox_v} = var - lr * grad * \\frac{1}{\\sqrt{accum}}\n .. math::\n var = \\frac{sign(\\text{prox_v})}{1 + lr * l2} * \\max(\\left| \\text{prox_v} \\right| - lr * l1, 0)\n\n Inputs of `var`, `accum` and `grad` comply with the implicit type conversion rules\n to make the data types consistent.\n If they have different data types, lower priority data type will be converted to\n relatively highest priority data type.\n RuntimeError exception will be thrown when the data type conversion of Parameter is required.\n\n Args:\n use_locking (bool): If true, the var and accumulation tensors will be protected from being updated.\n Default: False.\n\n Inputs:\n - **var** (Parameter) - Variable to be updated. The data type should be float16 or float32.\n - **accum** (Parameter) - Accumulation to be updated. Must has the same shape and dtype as `var`.\n - **lr** (Union[Number, Tensor]) - The learning rate value, should be scalar. The data type should be\n float16 or float32.\n - **l1** (Union[Number, Tensor]) - l1 regularization strength, should be scalar. The data type should be\n float16 or float32.\n - **l2** (Union[Number, Tensor]) - l2 regularization strength, should be scalar. The data type should be\n float16 or float32.\n - **grad** (Tensor) - Gradient with the same shape and dtype as `var`.\n\n Outputs:\n Tuple of 2 Tensor, the updated parameters.\n\n - **var** (Tensor) - The same shape and data type as `var`.\n - **accum** (Tensor) - The same shape and data type as `accum`.\n\n Examples:\n >>> import numpy as np\n >>> import mindspore.nn as nn\n >>> from mindspore import Tensor, Parameter\n >>> from mindspore.ops import operations as P\n >>> class Net(nn.Cell):\n >>> def __init__(self):\n >>> super(Net, self).__init__()\n >>> self.apply_proximal_adagrad = P.ApplyProximalAdagrad()\n >>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name=\"var\")\n >>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name=\"accum\")\n >>> self.lr = 0.01\n >>> self.l1 = 0.0\n >>> self.l2 = 0.0\n >>> def construct(self, grad):\n >>> out = self.apply_proximal_adagrad(self.var, self.accum, self.lr, self.l1, self.l2, grad)\n >>> return out\n >>> net = Net()\n >>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))\n >>> output = net(grad)\n \"\"\"\n\n __mindspore_signature__ = (\n sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('lr', dtype=sig.sig_dtype.T1),\n sig.make_sig('l1', dtype=sig.sig_dtype.T2),\n sig.make_sig('l2', dtype=sig.sig_dtype.T3),\n sig.make_sig('grad', dtype=sig.sig_dtype.T),\n )\n\n @prim_attr_register\n def __init__(self, use_locking=False):\n self.init_prim_io_names(inputs=['var', 'accum', 'lr', 'l1', 'l2', 'grad'],\n outputs=['var', 'accum'])\n self.use_locking = validator.check_value_type(\"use_locking\", use_locking, [bool], self.name)\n\n def infer_shape(self, var_shape, accum_shape, lr_shape, l1_shape, l2_shape, grad_shape):\n validator.check('accum shape', accum_shape, 'var shape', var_shape, Rel.EQ, self.name)\n validator.check('grad shape', grad_shape, 'var shape', var_shape, Rel.EQ, self.name)\n lr_shp_len = len(lr_shape)\n validator.check_integer(\"lr's rank\", lr_shp_len, 1, Rel.LE, self.name)\n if lr_shp_len == 1:\n validator.check_integer(\"lr_shape[0]\", lr_shape[0], 1, Rel.EQ, self.name)\n l1_shp_len = len(l1_shape)\n validator.check_integer(\"l1's rank\", l1_shp_len, 1, Rel.LE, self.name)\n if l1_shp_len == 1:\n validator.check_integer(\"l1_shape[0]\", l1_shape[0], 1, Rel.EQ, self.name)\n l2_shp_len = len(l2_shape)\n validator.check_integer(\"l2's rank\", l2_shp_len, 1, Rel.LE, self.name)\n if l2_shp_len == 1:\n validator.check_integer(\"l2_shape[0]\", l2_shape[0], 1, Rel.EQ, self.name)\n return var_shape, accum_shape\n\n def infer_dtype(self, var_dtype, accum_dtype, lr_dtype, l1_dtype, l2_dtype, grad_dtype):\n valid_types = [mstype.float16, mstype.float32]\n args = {'var': var_dtype, 'accum': accum_dtype, 'grad': grad_dtype}\n validator.check_tensor_type_same(args, valid_types, self.name)\n validator.check_scalar_or_tensor_type_same({\"lr\": lr_dtype}, valid_types, self.name)\n validator.check_scalar_or_tensor_type_same({\"l1\": l1_dtype}, valid_types, self.name)\n validator.check_scalar_or_tensor_type_same({\"l2\": l2_dtype}, valid_types, self.name)\n return var_dtype, accum_dtype\n\n\nclass SparseApplyProximalAdagrad(PrimitiveWithCheck):\n r\"\"\"\n Update relevant entries according to the proximal adagrad algorithm. Compared with ApplyProximalAdagrad,\n an additional index tensor is input.\n\n .. math::\n accum += grad * grad\n .. math::\n \\text{prox_v} = var - lr * grad * \\frac{1}{\\sqrt{accum}}\n .. math::\n var = \\frac{sign(\\text{prox_v})}{1 + lr * l2} * \\max(\\left| \\text{prox_v} \\right| - lr * l1, 0)\n\n Inputs of `var`, `accum` and `grad` comply with the implicit type conversion rules\n to make the data types consistent.\n If they have different data types, lower priority data type will be converted to\n relatively highest priority data type.\n RuntimeError exception will be thrown when the data type conversion of Parameter is required.\n\n Args:\n use_locking (bool): If true, the var and accumulation tensors will be protected from being updated.\n Default: False.\n\n Inputs:\n - **var** (Parameter) - Variable tensor to be updated. The data type must be float16 or float32.\n - **accum** (Parameter) - Variable tensor to be updated, has the same dtype as `var`.\n - **lr** (Union[Number, Tensor]) - The learning rate value. Tshould be a float number or\n a scalar tensor with float16 or float32 data type.\n - **l1** (Union[Number, Tensor]) - l1 regularization strength. should be a float number or\n a scalar tensor with float16 or float32 data type.\n - **l2** (Union[Number, Tensor]) - l2 regularization strength. should be a float number or\n a scalar tensor with float16 or float32 data type..\n - **grad** (Tensor) - A tensor of the same type as `var`, for the gradient.\n - **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`.\n\n Outputs:\n Tuple of 2 Tensor, the updated parameters.\n\n - **var** (Tensor) - The same shape and data type as `var`.\n - **accum** (Tensor) - The same shape and data type as `accum`.\n\n Examples:\n >>> import numpy as np\n >>> import mindspore.nn as nn\n >>> from mindspore import Tensor, Parameter\n >>> from mindspore.ops import operations as P\n >>> class Net(nn.Cell):\n >>> def __init__(self):\n >>> super(Net, self).__init__()\n >>> self.sparse_apply_proximal_adagrad = P.SparseApplyProximalAdagrad()\n >>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name=\"var\")\n >>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name=\"accum\")\n >>> self.lr = 0.01\n >>> self.l1 = 0.0\n >>> self.l2 = 0.0\n >>> def construct(self, grad, indices):\n >>> out = self.sparse_apply_proximal_adagrad(self.var, self.accum, self.lr, self.l1,\n self.l2, grad, indices)\n >>> return out\n >>> net = Net()\n >>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))\n >>> indices = Tensor(np.ones((3,), np.int32))\n >>> output = net(grad, indices)\n \"\"\"\n\n __mindspore_signature__ = (\n sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('lr', dtype=sig.sig_dtype.T1),\n sig.make_sig('l1', dtype=sig.sig_dtype.T2),\n sig.make_sig('l2', dtype=sig.sig_dtype.T3),\n sig.make_sig('grad', dtype=sig.sig_dtype.T),\n sig.make_sig('indices', dtype=sig.sig_dtype.T4),\n )\n\n @prim_attr_register\n def __init__(self, use_locking=False):\n self.init_prim_io_names(inputs=['var', 'accum', 'lr', 'l1', 'l2', 'grad', 'indices'],\n outputs=['var', 'accum'])\n self.use_locking = validator.check_value_type(\"use_locking\", use_locking, [bool], self.name)\n\n def check_shape(self, var_shape, accum_shape, lr_shape, l1_shape, l2_shape, grad_shape, indices_shape):\n validator.check_integer(\"indices rank\", len(indices_shape), 1, Rel.EQ, self.name)\n\n def check_dtype(self, var_dtype, accum_dtype, lr_dtype, l1_dtype, l2_dtype, grad_dtype, indices_dtype):\n args = {'var': var_dtype, 'accum': accum_dtype, 'grad': grad_dtype}\n validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)\n validator.check_scalar_or_tensor_type_same({\"lr\": lr_dtype}, [mstype.float16, mstype.float32], self.name)\n validator.check_scalar_or_tensor_type_same({\"l1\": l1_dtype}, [mstype.float16, mstype.float32], self.name)\n validator.check_scalar_or_tensor_type_same({\"l2\": l2_dtype}, [mstype.float16, mstype.float32], self.name)\n valid_types = [mstype.int16, mstype.int32, mstype.int64,\n mstype.uint16, mstype.uint32, mstype.uint64]\n validator.check_tensor_type_same({'indices': indices_dtype}, valid_types, self.name)\n\n\nclass ApplyAddSign(PrimitiveWithInfer):\n r\"\"\"\n Update relevant entries according to the AddSign algorithm.\n\n .. math::\n \\begin{array}{ll} \\\\\n m_{t} = \\beta * m_{t-1} + (1 - \\beta) * g \\\\\n \\text{update} = (\\alpha + \\text{sign_decay} * sign(g) * sign(m)) * g \\\\\n var = var - lr_{t} * \\text{update}\n \\end{array}\n\n :math:`t` represents updating step while :math:`m` represents the 1st moment vector, :math:`m_{t-1}`\n is the last momentent of :math:`m_{t}`, :math:`lr` represents scaling factor `lr`, :math:`g` represents `grad`.\n\n Inputs of `var`, `accum` and `grad` comply with the implicit type conversion rules\n to make the data types consistent.\n If they have different data types, lower priority data type will be converted to\n relatively highest priority data type.\n RuntimeError exception will be thrown when the data type conversion of Parameter is required.\n\n Inputs:\n - **var** (Parameter) - Variable tensor to be updated. With float32 or float16 data type.\n - **m** (Parameter) - Variable tensor to be updated, has the same dtype as `var`.\n - **lr** (Union[Number, Tensor]) - The learning rate value, should be a scalar.\n With float32 or float16 data type.\n - **alpha** (Union[Number, Tensor]) - Should be a scalar. With float32 or float16 data type.\n - **sign_decay** (Union[Number, Tensor]) - Should be a scalar. With float32 or float16 data type.\n - **beta** (Union[Number, Tensor]) - The exponential decay rate, should be a scalar.\n With float32 or float16 data type.\n - **grad** (Tensor) - A tensor of the same type as `var`, for the gradient.\n\n Outputs:\n Tuple of 2 Tensor, the updated parameters.\n\n - **var** (Tensor) - The same shape and data type as `var`.\n - **m** (Tensor) - The same shape and data type as `m`.\n\n Examples:\n >>> import numpy as np\n >>> import mindspore.nn as nn\n >>> from mindspore import Tensor, Parameter\n >>> from mindspore.ops import operations as P\n >>> class Net(nn.Cell):\n >>> def __init__(self):\n >>> super(Net, self).__init__()\n >>> self.apply_add_sign = P.ApplyAddSign()\n >>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name=\"var\")\n >>> self.m = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name=\"m\")\n >>> self.lr = 0.001\n >>> self.alpha = 1.0\n >>> self.sign_decay = 0.99\n >>> self.beta = 0.9\n >>> def construct(self, grad):\n >>> out = self.apply_add_sign(self.var, self.m, self.lr, self.alpha, self.sign_decay, self.beta, grad)\n >>> return out\n >>> net = Net()\n >>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))\n >>> output = net(grad)\n \"\"\"\n\n __mindspore_signature__ = (\n sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('lr', dtype=sig.sig_dtype.T1),\n sig.make_sig('alpha', dtype=sig.sig_dtype.T2),\n sig.make_sig('sign_decay', dtype=sig.sig_dtype.T3),\n sig.make_sig('beta', dtype=sig.sig_dtype.T3),\n sig.make_sig('grad', dtype=sig.sig_dtype.T),\n )\n\n @prim_attr_register\n def __init__(self):\n \"init ApplyAddSign\"\n\n def infer_shape(self, var_shape, m_shape, lr_shape, alpha_shape, sign_decay_shape, beta_shape, grad_shape):\n validator.check('m_shape', m_shape, 'var_shape', var_shape, Rel.EQ, self.name)\n validator.check('grad_shape', grad_shape, 'var_shape', var_shape, Rel.EQ, self.name)\n lr_shape_len = len(lr_shape)\n validator.check_integer(\"lr's rank\", lr_shape_len, 1, Rel.LE, self.name)\n if lr_shape_len == 1:\n validator.check_integer(\"lr_shape[0]\", lr_shape[0], 1, Rel.EQ, self.name)\n alpha_shape_len = len(alpha_shape)\n validator.check_integer(\"alpha's rank\", alpha_shape_len, 1, Rel.LE, self.name)\n if alpha_shape_len == 1:\n validator.check_integer(\"alpha_shape[0]\", alpha_shape[0], 1, Rel.EQ, self.name)\n sign_decay_shape_len = len(sign_decay_shape)\n validator.check_integer(\"sign_decay's rank\", sign_decay_shape_len, 1, Rel.LE, self.name)\n if sign_decay_shape_len == 1:\n validator.check_integer(\"sign_decay_shape[0]\", sign_decay_shape[0], 1, Rel.EQ, self.name)\n beta_shape_len = len(beta_shape)\n validator.check_integer(\"beta's rank\", beta_shape_len, 1, Rel.LE, self.name)\n if beta_shape_len == 1:\n validator.check_integer(\"beta_shape[0]\", beta_shape[0], 1, Rel.EQ, self.name)\n return var_shape, m_shape\n\n def infer_dtype(self, var_dtype, m_dtype, lr_dtype, alpha_dtype, sign_decay_dtype, beta_dtype, grad_dtype):\n valid_types = [mstype.float16, mstype.float32]\n args = {'var': var_dtype, 'm': m_dtype, 'grad': grad_dtype}\n validator.check_tensor_type_same(args, valid_types, self.name)\n validator.check_scalar_or_tensor_type_same({\"lr\": lr_dtype}, valid_types, self.name)\n validator.check_scalar_or_tensor_type_same({\"alpha\": alpha_dtype}, valid_types, self.name)\n validator.check_scalar_or_tensor_type_same({\"sign_decay\": sign_decay_dtype}, valid_types, self.name)\n validator.check_scalar_or_tensor_type_same({\"beta\": beta_dtype}, valid_types, self.name)\n return var_dtype, m_dtype\n\n\nclass ApplyPowerSign(PrimitiveWithInfer):\n r\"\"\"\n Update relevant entries according to the AddSign algorithm.\n\n .. math::\n \\begin{array}{ll} \\\\\n m_{t} = \\beta * m_{t-1} + (1 - \\beta) * g \\\\\n \\text{update} = \\exp(\\text{logbase} * \\text{sign_decay} * sign(g) * sign(m)) * g \\\\\n var = var - lr_{t} * \\text{update}\n \\end{array}\n\n :math:`t` represents updating step while :math:`m` represents the 1st moment vector, :math:`m_{t-1}`\n is the last momentent of :math:`m_{t}`, :math:`lr` represents scaling factor `lr`, :math:`g` represents `grad`.\n\n All of inputs comply with the implicit type conversion rules to make the data types consistent.\n If `lr`, `logbase`, `sign_decay` or `beta` is a number, the number is automatically converted to Tensor,\n and the data type is consistent with the Tensor data type involved in the operation.\n If inputs are tensors and have different data types, lower priority data type will be converted to\n relatively highest priority data type.\n RuntimeError exception will be thrown when the data type conversion of Parameter is required.\n\n Inputs:\n - **var** (Parameter) - Variable tensor to be updated. With float32 or float16 data type.\n If data type of `var` is float16, all inputs must have the same data type as `var`.\n - **m** (Parameter) - Variable tensor to be updated, has the same dtype as `var`.\n - **lr** (Union[Number, Tensor]) - The learning rate value, should be a scalar.\n With float32 or float16 data type.\n - **logbase** (Union[Number, Tensor]) - Should be a scalar. With float32 or float16 data type.\n - **sign_decay** (Union[Number, Tensor]) - Should be a scalar. With float32 or float16 data type.\n - **beta** (Union[Number, Tensor]) - The exponential decay rate, should be a scalar.\n With float32 or float16 data type.\n - **grad** (Tensor) - A tensor of the same type as `var`, for the gradient.\n\n Outputs:\n Tuple of 2 Tensor, the updated parameters.\n\n - **var** (Tensor) - The same shape and data type as `var`.\n - **m** (Tensor) - The same shape and data type as `m`.\n\n Examples:\n >>> import numpy as np\n >>> import mindspore.nn as nn\n >>> from mindspore import Tensor, Parameter\n >>> from mindspore.ops import operations as P\n >>> class Net(nn.Cell):\n >>> def __init__(self):\n >>> super(Net, self).__init__()\n >>> self.apply_power_sign = P.ApplyPowerSign()\n >>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name=\"var\")\n >>> self.m = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name=\"m\")\n >>> self.lr = 0.001\n >>> self.logbase = np.e\n >>> self.sign_decay = 0.99\n >>> self.beta = 0.9\n >>> def construct(self, grad):\n >>> out = self.apply_power_sign(self.var, self.m, self.lr, self.logbase,\n self.sign_decay, self.beta, grad)\n >>> return out\n >>> net = Net()\n >>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))\n >>> output = net(grad)\n \"\"\"\n\n __mindspore_signature__ = (\n sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('lr', dtype=sig.sig_dtype.T),\n sig.make_sig('logbase', dtype=sig.sig_dtype.T),\n sig.make_sig('sign_decay', dtype=sig.sig_dtype.T),\n sig.make_sig('beta', dtype=sig.sig_dtype.T),\n sig.make_sig('grad', dtype=sig.sig_dtype.T),\n )\n\n @prim_attr_register\n def __init__(self):\n \"init ApplyPowerSign\"\n\n def infer_shape(self, var_shape, m_shape, lr_shape, logbase_shape, sign_decay_shape, beta_shape, grad_shape):\n validator.check('m_shape', m_shape, 'var_shape', var_shape, Rel.EQ, self.name)\n validator.check('grad_shape', grad_shape, 'var_shape', var_shape, Rel.EQ, self.name)\n lr_shape_len = len(lr_shape)\n validator.check_integer(\"lr's rank\", lr_shape_len, 1, Rel.LE, self.name)\n if lr_shape_len == 1:\n validator.check_integer(\"lr_shape[0]\", lr_shape[0], 1, Rel.EQ, self.name)\n logbase_shape_len = len(logbase_shape)\n validator.check_integer(\"logbase's rank\", logbase_shape_len, 1, Rel.LE, self.name)\n if logbase_shape_len == 1:\n validator.check_integer(\"logbase_shape[0]\", logbase_shape[0], 1, Rel.EQ, self.name)\n sign_decay_shape_len = len(sign_decay_shape)\n validator.check_integer(\"sign_decay's rank\", sign_decay_shape_len, 1, Rel.LE, self.name)\n if sign_decay_shape_len == 1:\n validator.check_integer(\"sign_decay_shape[0]\", sign_decay_shape[0], 1, Rel.EQ, self.name)\n beta_shape_len = len(beta_shape)\n validator.check_integer(\"beta's rank\", beta_shape_len, 1, Rel.LE, self.name)\n if beta_shape_len == 1:\n validator.check_integer(\"beta_shape[0]\", beta_shape[0], 1, Rel.EQ, self.name)\n return var_shape, m_shape\n\n def infer_dtype(self, var_dtype, m_dtype, lr_dtype, logbase_dtype, sign_decay_dtype, beta_dtype, grad_dtype):\n valid_types = [mstype.float16, mstype.float32]\n args = {'var': var_dtype, 'm': m_dtype, 'grad': grad_dtype}\n validator.check_tensor_type_same(args, valid_types, self.name)\n validator.check_scalar_or_tensor_type_same({\"lr\": lr_dtype}, valid_types, self.name)\n validator.check_scalar_or_tensor_type_same({\"logbase\": logbase_dtype}, valid_types, self.name)\n validator.check_scalar_or_tensor_type_same({\"sign_decay\": sign_decay_dtype}, valid_types, self.name)\n validator.check_scalar_or_tensor_type_same({\"beta\": beta_dtype}, valid_types, self.name)\n return var_dtype, m_dtype\n\n\nclass ApplyGradientDescent(PrimitiveWithInfer):\n r\"\"\"\n Update relevant entries according to the following formula.\n\n .. math::\n var = var - \\alpha * \\delta\n\n Inputs of `var` and `delta` comply with the implicit type conversion rules to make the data types consistent.\n If they have different data types, lower priority data type will be converted to\n relatively highest priority data type.\n RuntimeError exception will be thrown when the data type conversion of Parameter is required.\n\n Inputs:\n - **var** (Parameter) - Variable tensor to be updated. With float32 or float16 data type.\n - **alpha** (Union[Number, Tensor]) - Scaling factor, should be a scalar. With float32 or float16 data type.\n - **delta** (Tensor) - A tensor for the change, has the same type as `var`.\n\n Outputs:\n Tensor, represents the updated `var`.\n\n Examples:\n >>> import numpy as np\n >>> import mindspore.nn as nn\n >>> from mindspore import Tensor, Parameter\n >>> from mindspore.ops import operations as P\n >>> class Net(nn.Cell):\n >>> def __init__(self):\n >>> super(Net, self).__init__()\n >>> self.apply_gradient_descent = P.ApplyGradientDescent()\n >>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name=\"var\")\n >>> self.alpha = 0.001\n >>> def construct(self, delta):\n >>> out = self.apply_gradient_descent(self.var, self.alpha, delta)\n >>> return out\n >>> net = Net()\n >>> delta = Tensor(np.random.rand(3, 3).astype(np.float32))\n >>> output = net(delta)\n \"\"\"\n\n __mindspore_signature__ = (\n sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('alpha', dtype=sig.sig_dtype.T1),\n sig.make_sig('delta', dtype=sig.sig_dtype.T),\n )\n\n @prim_attr_register\n def __init__(self):\n \"init ApplyGradientDescent\"\n\n def infer_shape(self, var_shape, alpha_shape, delta_shape):\n validator.check('delta shape', delta_shape, 'var shape', var_shape, Rel.EQ, self.name)\n alpha_shape_len = len(alpha_shape)\n validator.check_integer(\"alpha's rank\", alpha_shape_len, 1, Rel.LE, self.name)\n if alpha_shape_len == 1:\n validator.check_integer(\"alpha_shape[0]\", alpha_shape[0], 1, Rel.EQ, self.name)\n return var_shape\n\n def infer_dtype(self, var_dtype, alpha_dtype, delta_dtype):\n valid_types = [mstype.float16, mstype.float32]\n args = {'var': var_dtype, 'delta': delta_dtype}\n validator.check_tensor_type_same(args, valid_types, self.name)\n validator.check_scalar_or_tensor_type_same({\"alpha\": alpha_dtype}, valid_types, self.name)\n return var_dtype\n\n\nclass ApplyProximalGradientDescent(PrimitiveWithInfer):\n r\"\"\"\n Update relevant entries according to the FOBOS(Forward Backward Splitting) algorithm.\n\n .. math::\n \\text{prox_v} = var - \\alpha * \\delta\n .. math::\n var = \\frac{sign(\\text{prox_v})}{1 + \\alpha * l2} * \\max(\\left| \\text{prox_v} \\right| - alpha * l1, 0)\n\n Inputs of `var` and `delta` comply with the implicit type conversion rules to make the data types consistent.\n If they have different data types, lower priority data type will be converted to\n relatively highest priority data type.\n RuntimeError exception will be thrown when the data type conversion of Parameter is required.\n\n Inputs:\n - **var** (Parameter) - Variable tensor to be updated. With float32 or float16 data type.\n - **alpha** (Union[Number, Tensor]) - Saling factor, should be a scalar. With float32 or float16 data type.\n - **l1** (Union[Number, Tensor]) - l1 regularization strength, should be scalar.\n With float32 or float16 data type.\n - **l2** (Union[Number, Tensor]) - l2 regularization strength, should be scalar.\n With float32 or float16 data type.\n - **delta** (Tensor) - A tensor for the change, has the same type as `var`.\n\n Outputs:\n Tensor, represents the updated `var`.\n\n Examples:\n >>> import numpy as np\n >>> import mindspore.nn as nn\n >>> from mindspore import Tensor, Parameter\n >>> from mindspore.ops import operations as P\n >>> class Net(nn.Cell):\n >>> def __init__(self):\n >>> super(Net, self).__init__()\n >>> self.apply_proximal_gradient_descent = P.ApplyProximalGradientDescent()\n >>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name=\"var\")\n >>> self.alpha = 0.001\n >>> self.l1 = 0.0\n >>> self.l2 = 0.0\n >>> def construct(self, delta):\n >>> out = self.apply_proximal_gradient_descent(self.var, self.alpha, self.l1, self.l2, delta)\n >>> return out\n >>> net = Net()\n >>> delta = Tensor(np.random.rand(3, 3).astype(np.float32))\n >>> output = net(delta)\n \"\"\"\n\n __mindspore_signature__ = (\n sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('alpha', dtype=sig.sig_dtype.T1),\n sig.make_sig('l1', dtype=sig.sig_dtype.T2),\n sig.make_sig('l2', dtype=sig.sig_dtype.T3),\n sig.make_sig('delta', dtype=sig.sig_dtype.T),\n )\n\n @prim_attr_register\n def __init__(self):\n \"init ApplyGradientDescent\"\n\n def infer_shape(self, var_shape, alpha_shape, l1_shape, l2_shape, delta_shape):\n validator.check('delta shape', delta_shape, 'var shape', var_shape, Rel.EQ, self.name)\n alpha_shape_len = len(alpha_shape)\n validator.check_integer(\"alpha's rank\", alpha_shape_len, 1, Rel.LE, self.name)\n if alpha_shape_len == 1:\n validator.check_integer(\"alpha_shape[0]\", alpha_shape[0], 1, Rel.EQ, self.name)\n l1_shape_len = len(l1_shape)\n validator.check_integer(\"l1's rank\", l1_shape_len, 1, Rel.LE, self.name)\n if l1_shape_len == 1:\n validator.check_integer(\"l1_shape[0]\", l1_shape[0], 1, Rel.EQ, self.name)\n l2_shape_len = len(l2_shape)\n validator.check_integer(\"l2's rank\", l2_shape_len, 1, Rel.LE, self.name)\n if l2_shape_len == 1:\n validator.check_integer(\"l2_shape[0]\", l2_shape[0], 1, Rel.EQ, self.name)\n return var_shape\n\n def infer_dtype(self, var_dtype, alpha_dtype, l1_dtype, l2_dtype, delta_dtype):\n valid_types = [mstype.float16, mstype.float32]\n args = {'var': var_dtype, 'delta': delta_dtype}\n validator.check_tensor_type_same(args, valid_types, self.name)\n validator.check_scalar_or_tensor_type_same({\"alpha\": alpha_dtype}, valid_types, self.name)\n validator.check_scalar_or_tensor_type_same({\"l1\": l1_dtype}, valid_types, self.name)\n validator.check_scalar_or_tensor_type_same({\"l2\": l2_dtype}, valid_types, self.name)\n return var_dtype\n\n\nclass LARSUpdate(PrimitiveWithInfer):\n \"\"\"\n Conduct lars (layer-wise adaptive rate scaling) update on the square sum of gradient.\n\n Args:\n epsilon (float): Term added to the denominator to improve numerical stability. Default: 1e-05.\n hyperpara (float): Trust coefficient for calculating the local learning rate. Default: 0.001.\n use_clip (bool): Whether to use clip operation for calculating the local learning rate. Default: False.\n\n Inputs:\n - **weight** (Tensor) - The weight to be updated.\n - **gradient** (Tensor) - The gradient of weight, which has the same shape and dtype with weight.\n - **norm_weight** (Tensor) - A scalar tensor, representing the square sum of weight.\n - **norm_gradient** (Tensor) - A scalar tensor, representing the square sum of gradient.\n - **weight_decay** (Union[Number, Tensor]) - Weight decay. It should be a scalar tensor or number.\n - **learning_rate** (Union[Number, Tensor]) - Learning rate. It should be a scalar tensor or number.\n\n Outputs:\n Tensor, represents the new gradient.\n\n Examples:\n >>> from mindspore import Tensor\n >>> from mindspore.ops import operations as P\n >>> from mindspore.ops import functional as F\n >>> import mindspore.nn as nn\n >>> import numpy as np\n >>> class Net(nn.Cell):\n >>> def __init__(self):\n >>> super(Net, self).__init__()\n >>> self.lars = P.LARSUpdate()\n >>> self.reduce = P.ReduceSum()\n >>> def construct(self, weight, gradient):\n >>> w_square_sum = self.reduce(F.square(weight))\n >>> grad_square_sum = self.reduce(F.square(gradient))\n >>> grad_t = self.lars(weight, gradient, w_square_sum, grad_square_sum, 0.0, 1.0)\n >>> return grad_t\n >>> weight = np.random.random(size=(2, 3)).astype(np.float32)\n >>> gradient = np.random.random(size=(2, 3)).astype(np.float32)\n >>> net = Net()\n >>> ms_output = net(Tensor(weight), Tensor(gradient))\n \"\"\"\n\n @prim_attr_register\n def __init__(self, epsilon=1e-05, hyperpara=0.001, use_clip=False):\n \"\"\"init\"\"\"\n validator.check_value_type(\"epsilon\", epsilon, [float], self.name)\n validator.check_value_type(\"hyperpara\", hyperpara, [float], self.name)\n validator.check_value_type(\"use_clip\", use_clip, [bool], self.name)\n\n def infer_shape(self, weight_shape, gradient_shape, norm_weight_shape, norm_gradient_shape, weight_decay_shape,\n learning_rate_shape):\n validator.check(\"weight shape\", weight_shape, \"gradient shape\", gradient_shape, Rel.EQ, self.name)\n validator.check(\"norm weight shape\", norm_weight_shape, \"norm gradient shape\", norm_gradient_shape, Rel.EQ,\n self.name)\n shp_len = len(weight_decay_shape)\n validator.check_integer(\"weight decay's rank\", shp_len, 1, Rel.LE, self.name)\n if shp_len == 1:\n validator.check_integer(\"weight_decay_shape[0]\", weight_decay_shape[0], 1, Rel.EQ, self.name)\n shp_len = len(learning_rate_shape)\n validator.check_integer(\"learning rate's rank\", shp_len, 1, Rel.LE, self.name)\n if shp_len == 1:\n validator.check_integer(\"learning_rate_shape[0]\", learning_rate_shape[0], 1, Rel.EQ, self.name)\n return weight_shape\n\n def infer_dtype(self, weight_dtype, gradient_dtype, norm_weight_dtype, norm_gradient_dtype,\n weight_decay_dtype, learning_rate_dtype):\n args = {\"Weight dtype\": weight_dtype, \"gradient dtype\": gradient_dtype, \"norm weight dtype\": norm_weight_dtype,\n \"norm gradient dtype\": norm_gradient_dtype}\n validator.check_tensor_type_same(args, [mstype.float16, mstype.float32, mstype.int16, mstype.int32], self.name)\n validator.check_scalar_or_tensor_type_same({\"weight_decay\": weight_decay_dtype},\n [mstype.float16, mstype.float32, mstype.float64], self.name)\n validator.check_scalar_or_tensor_type_same({\"learning_rate\": learning_rate_dtype},\n [mstype.float16, mstype.float32, mstype.float64], self.name)\n return weight_dtype\n\n\nclass ApplyFtrl(PrimitiveWithInfer):\n \"\"\"\n Update relevant entries according to the FTRL scheme.\n\n Args:\n use_locking (bool): Use locks for updating operation if True . Default: False.\n\n Inputs:\n - **var** (Parameter) - The variable to be updated. The data type should be float16 or float32.\n - **accum** (Parameter) - The accumulation to be updated, must be same type and shape as `var`.\n - **linear** (Parameter) - the linear coefficient to be updated, must be same type and shape as `var`.\n - **grad** (Tensor) - Gradient. The data type should be float16 or float32.\n - **lr** (Union[Number, Tensor]) - The learning rate value, must be positive. Default: 0.001.\n It should be a float number or a scalar tensor with float16 or float32 data type.\n - **l1** (Union[Number, Tensor]) - l1 regularization strength, must be greater than or equal to zero.\n Default: 0.0. It should be a float number or a scalar tensor with float16 or float32 data type.\n - **l2** (Union[Number, Tensor]) - l2 regularization strength, must be greater than or equal to zero.\n Default: 0.0. It should be a float number or a scalar tensor with float16 or float32 data type.\n - **lr_power** (Union[Number, Tensor]) - Learning rate power controls how the learning rate decreases\n during training, must be less than or equal to zero. Use fixed learning rate if lr_power is zero.\n Default: -0.5. It should be a float number or a scalar tensor with float16 or float32 data type.\n\n Outputs:\n Tensor, represents the updated `var`.\n\n Examples:\n >>> import mindspore\n >>> import mindspore.nn as nn\n >>> import numpy as np\n >>> from mindspore import Parameter\n >>> from mindspore import Tensor\n >>> from mindspore.ops import operations as P\n >>> class ApplyFtrlNet(nn.Cell):\n >>> def __init__(self):\n >>> super(ApplyFtrlNet, self).__init__()\n >>> self.apply_ftrl = P.ApplyFtrl()\n >>> self.lr = 0.001\n >>> self.l1 = 0.0\n >>> self.l2 = 0.0\n >>> self.lr_power = -0.5\n >>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name=\"var\")\n >>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name=\"accum\")\n >>> self.linear = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name=\"linear\")\n >>>\n >>> def construct(self, grad):\n >>> out = self.apply_ftrl(self.var, self.accum, self.linear, grad, self.lr, self.l1, self.l2,\n >>> self.lr_power)\n >>> return out\n >>>\n >>> net = ApplyFtrlNet()\n >>> input_x = Tensor(np.random.randint(-4, 4, (3, 3)), mindspore.float32)\n >>> result = net(input_x)\n [[0.67455846 0.14630564 0.160499 ]\n [0.16329421 0.00415689 0.05202988]\n [0.18672481 0.17418946 0.36420345]]\n \"\"\"\n\n @prim_attr_register\n def __init__(self, use_locking=False):\n self.init_prim_io_names(inputs=['var', 'accum', 'linear', 'grad', 'lr', 'l1', 'l2', 'lr_power'],\n outputs=['output'])\n self.use_locking = validator.check_value_type(\"use_locking\", use_locking, [bool], self.name)\n self.is_tbe = context.get_context(\"device_target\") == \"Ascend\"\n\n def infer_shape(self, var_shape, accum_shape, linear_shape, grad_shape, lr_shape, l1_shape, l2_shape,\n lr_power_shape):\n validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)\n validator.check('var shape', var_shape, 'linear shape', linear_shape, Rel.EQ, self.name)\n if self.is_tbe:\n return var_shape, var_shape, var_shape\n return var_shape\n\n def infer_dtype(self, var_type, accum_type, linear_type, grad_type, lr_type, l1_type, l2_type, lr_power_type):\n valid_types = [mstype.float16, mstype.float32]\n args = {'var': var_type, 'accum': accum_type, 'linear': linear_type, 'grad': grad_type}\n validator.check_tensor_type_same(args, valid_types, self.name)\n\n validator.check_scalar_or_tensor_type_same({\"lr\": lr_type}, valid_types, self.name)\n validator.check_scalar_or_tensor_type_same({\"l1\": l1_type}, valid_types, self.name)\n validator.check_scalar_or_tensor_type_same({\"l2\": l2_type}, valid_types, self.name)\n validator.check_scalar_or_tensor_type_same({\"lr_power\": lr_power_type}, valid_types, self.name)\n if self.is_tbe:\n return var_type, var_type, var_type\n return var_type\n\n\nclass SparseApplyFtrl(PrimitiveWithCheck):\n \"\"\"\n Update relevant entries according to the FTRL-proximal scheme.\n\n All of inputs except `indices` comply with the implicit type conversion rules to make the data types consistent.\n If they have different data types, lower priority data type will be converted to\n relatively highest priority data type.\n RuntimeError exception will be thrown when the data type conversion of Parameter is required.\n\n Args:\n lr (float): The learning rate value, must be positive.\n l1 (float): l1 regularization strength, must be greater than or equal to zero.\n l2 (float): l2 regularization strength, must be greater than or equal to zero.\n lr_power (float): Learning rate power controls how the learning rate decreases during training,\n must be less than or equal to zero. Use fixed learning rate if `lr_power` is zero.\n use_locking (bool): Use locks for updating operation if True . Default: False.\n\n Inputs:\n - **var** (Parameter) - The variable to be updated. The data type must be float16 or float32.\n - **accum** (Parameter) - The accumulation to be updated, must be same type and shape as `var`.\n - **linear** (Parameter) - the linear coefficient to be updated, must be same type and shape as `var`.\n - **grad** (Tensor) - A tensor of the same type as `var`, for the gradient.\n - **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`.\n The shape of `indices` must be the same as `grad` in first dimension. The type must be int32.\n\n Outputs:\n - **var** (Tensor) - Tensor, has the same shape and type as `var`.\n - **accum** (Tensor) - Tensor, has the same shape and type as `accum`.\n - **linear** (Tensor) - Tensor, has the same shape and type as `linear`.\n\n Examples:\n >>> import mindspore\n >>> import mindspore.nn as nn\n >>> import numpy as np\n >>> from mindspore import Parameter\n >>> from mindspore import Tensor\n >>> from mindspore.ops import operations as P\n >>> class SparseApplyFtrlNet(nn.Cell):\n >>> def __init__(self):\n >>> super(SparseApplyFtrlNet, self).__init__()\n >>> self.sparse_apply_ftrl = P.SparseApplyFtrl(lr=0.01, l1=0.0, l2=0.0, lr_power=-0.5)\n >>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name=\"var\")\n >>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name=\"accum\")\n >>> self.linear = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name=\"linear\")\n >>>\n >>> def construct(self, grad, indices):\n >>> out = self.sparse_apply_ftrl(self.var, self.accum, self.linear, grad, indices)\n >>> return out\n >>>\n >>> net = SparseApplyFtrlNet()\n >>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))\n >>> indices = Tensor(np.ones([3]), mindspore.int32)\n >>> output = net(grad, indices)\n \"\"\"\n\n __mindspore_signature__ = (\n sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('linear', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('grad', dtype=sig.sig_dtype.T),\n sig.make_sig('indices', dtype=sig.sig_dtype.T1),\n )\n\n @prim_attr_register\n def __init__(self, lr, l1, l2, lr_power, use_locking=False):\n validator.check_value_type(\"lr\", lr, [float], self.name)\n validator.check_value_type(\"l1\", l1, [float], self.name)\n validator.check_value_type(\"l2\", l2, [float], self.name)\n validator.check_value_type(\"lr_power\", lr_power, [float], self.name)\n self.lr = validator.check_number_range(\"lr\", lr, 0.0, float(\"inf\"), Rel.INC_NEITHER, self.name)\n self.l1 = validator.check_number_range(\"l1\", l1, 0.0, float(\"inf\"), Rel.INC_LEFT, self.name)\n self.l2 = validator.check_number_range(\"l2\", l2, 0.0, float(\"inf\"), Rel.INC_LEFT, self.name)\n self.lr_power = validator.check_number(\"lr_power\", lr_power, 0, Rel.LE, self.name)\n self.use_locking = validator.check_value_type(\"use_locking\", use_locking, [bool], self.name)\n\n def check_shape(self, var_shape, accum_shape, linear_shape, grad_shape, indices_shape):\n validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)\n validator.check('var shape', var_shape, 'linear shape', linear_shape, Rel.EQ, self.name)\n if len(var_shape) > 1:\n validator.check('var_shape[1:]', var_shape[1:], 'grad_shape[1:]', grad_shape[1:], Rel.EQ, self.name)\n validator.check_integer(\"indices rank\", len(indices_shape), 1, Rel.EQ, self.name)\n validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)\n\n def check_dtype(self, var_dtype, accum_dtype, linear_dtype, grad_dtype, indices_dtype):\n args = {\"var_dtype\": var_dtype, \"accum_dtype\": accum_dtype,\n \"linear_dtype\": linear_dtype, \"grad_dtype\": grad_dtype}\n validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)\n validator.check_tensor_type_same({\"indices_dtype\": indices_dtype}, [mstype.int32], self.name)\n\n\nclass SparseApplyFtrlV2(PrimitiveWithInfer):\n \"\"\"\n Update relevant entries according to the FTRL-proximal scheme.\n\n All of inputs except `indices` comply with the implicit type conversion rules to make the data types consistent.\n If they have different data types, lower priority data type will be converted to\n relatively highest priority data type.\n RuntimeError exception will be thrown when the data type conversion of Parameter is required.\n\n Args:\n lr (float): The learning rate value, must be positive.\n l1 (float): l1 regularization strength, must be greater than or equal to zero.\n l2 (float): l2 regularization strength, must be greater than or equal to zero.\n l2_shrinkage (float): L2 shrinkage regularization.\n lr_power (float): Learning rate power controls how the learning rate decreases during training,\n must be less than or equal to zero. Use fixed learning rate if `lr_power` is zero.\n use_locking (bool): If `True`, the var and accumulation tensors will be protected from being updated.\n Default: False.\n\n Inputs:\n - **var** (Parameter) - The variable to be updated. The data type must be float16 or float32.\n - **accum** (Parameter) - The accumulation to be updated, must be same type and shape as `var`.\n - **linear** (Parameter) - the linear coefficient to be updated, must be same type and shape as `var`.\n - **grad** (Tensor) - A tensor of the same type as `var`, for the gradient.\n - **indices** (Tensor) - A vector of indices into the first dimension of `var` and `accum`.\n The shape of `indices` must be the same as `grad` in first dimension. The type must be int32.\n\n Outputs:\n Tuple of 3 Tensor, the updated parameters.\n\n - **var** (Tensor) - Tensor, has the same shape and type as `var`.\n - **accum** (Tensor) - Tensor, has the same shape and type as `accum`.\n - **linear** (Tensor) - Tensor, has the same shape and type as `linear`.\n\n Examples:\n >>> import mindspore\n >>> import mindspore.nn as nn\n >>> import numpy as np\n >>> from mindspore import Parameter\n >>> from mindspore import Tensor\n >>> from mindspore.ops import operations as P\n >>> class SparseApplyFtrlV2Net(nn.Cell):\n >>> def __init__(self):\n >>> super(SparseApplyFtrlV2Net, self).__init__()\n >>> self.sparse_apply_ftrl_v2 = P.SparseApplyFtrlV2(lr=0.01, l1=0.0, l2=0.0,\n l2_shrinkage=0.0, lr_power=-0.5)\n >>> self.var = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name=\"var\")\n >>> self.accum = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name=\"accum\")\n >>> self.linear = Parameter(Tensor(np.random.rand(3, 3).astype(np.float32)), name=\"linear\")\n >>>\n >>> def construct(self, grad, indices):\n >>> out = self.sparse_apply_ftrl_v2(self.var, self.accum, self.linear, grad, indices)\n >>> return out\n >>>\n >>> net = SparseApplyFtrlV2Net()\n >>> grad = Tensor(np.random.rand(3, 3).astype(np.float32))\n >>> indices = Tensor(np.ones([3]), mindspore.int32)\n >>> output = net(grad, indices)\n \"\"\"\n\n __mindspore_signature__ = (\n sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('accum', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('linear', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),\n sig.make_sig('grad', dtype=sig.sig_dtype.T),\n sig.make_sig('indices', dtype=sig.sig_dtype.T1),\n )\n\n @prim_attr_register\n def __init__(self, lr, l1, l2, l2_shrinkage, lr_power, use_locking=False):\n validator.check_value_type(\"lr\", lr, [float], self.name)\n validator.check_value_type(\"l1\", l1, [float], self.name)\n validator.check_value_type(\"l2\", l2, [float], self.name)\n validator.check_value_type(\"lr_power\", lr_power, [float], self.name)\n self.lr = validator.check_number_range(\"lr\", lr, 0.0, float(\"inf\"), Rel.INC_NEITHER, self.name)\n self.l1 = validator.check_number_range(\"l1\", l1, 0.0, float(\"inf\"), Rel.INC_LEFT, self.name)\n self.l2 = validator.check_number_range(\"l2\", l2, 0.0, float(\"inf\"), Rel.INC_LEFT, self.name)\n self.lr_power = validator.check_number(\"lr_power\", lr_power, 0, Rel.LE, self.name)\n self.l2_shrinkage = validator.check_value_type(\"l2_shrinkage\", l2_shrinkage, [float], self.name)\n self.use_locking = validator.check_value_type(\"use_locking\", use_locking, [bool], self.name)\n\n def infer_shape(self, var_shape, accum_shape, linear_shape, grad_shape, indices_shape):\n validator.check('var shape', var_shape, 'accum shape', accum_shape, Rel.EQ, self.name)\n validator.check('var shape', var_shape, 'linear shape', linear_shape, Rel.EQ, self.name)\n if len(var_shape) > 1:\n validator.check('var_shape[1:]', var_shape[1:], 'grad_shape[1:]', grad_shape[1:], Rel.EQ, self.name)\n validator.check_integer(\"indices rank\", len(indices_shape), 1, Rel.EQ, self.name)\n validator.check('grad_shape[0]', grad_shape[0], 'indices_shape[0]', indices_shape[0], Rel.EQ, self.name)\n return var_shape, accum_shape, linear_shape\n\n def infer_dtype(self, var_dtype, accum_dtype, linear_dtype, grad_dtype, indices_dtype):\n args = {\"var_dtype\": var_dtype, \"accum_dtype\": accum_dtype,\n \"linear_dtype\": linear_dtype, \"grad_dtype\": grad_dtype}\n validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)\n validator.check_tensor_type_same({\"indices_dtype\": indices_dtype}, [mstype.int32], self.name)\n return var_dtype, accum_dtype, linear_dtype\n\n\nclass ConfusionMulGrad(PrimitiveWithInfer):\n \"\"\"\n `output0` is the dot product result of input0 and input1.\n\n `output1` is the dot product result of input0 and input1, then apply the reducesum operation on it.\n\n Args:\n axis (Union[int, tuple[int], list[int]]): The dimensions to reduce.\n Default:(), reduce all dimensions. Only constant value is allowed.\n keep_dims (bool):\n - If true, keep these reduced dimensions and the length as 1.\n - If false, don't keep these dimensions. Default:False.\n\n Inputs:\n - **input_0** (Tensor) - The input Tensor.\n - **input_1** (Tensor) - The input Tensor.\n - **input_2** (Tensor) - The input Tensor.\n\n Outputs:\n - **output_0** (Tensor) - The same shape as `input0`.\n - **output_1** (Tensor)\n\n - If axis is (), and keep_dims is false, the output is a 0-D array representing\n the sum of all elements in the input array.\n - If axis is int, set as 2, and keep_dims is false,\n the shape of output is :math:`(x_1,x_3,...,x_R)`.\n - If axis is tuple(int), set as (2,3), and keep_dims is false,\n the shape of output is :math:`(x_1,x_4,...x_R)`.\n\n Examples:\n >>> confusion_mul_grad = P.ConfusionMulGrad()\n >>> input_0 = Tensor(np.random.randint(-2, 2, (2, 3)), mindspore.float32)\n >>> input_1 = Tensor(np.random.randint(0, 4, (2, 3)), mindspore.float32)\n >>> input_2 = Tensor(np.random.randint(-4, 0, (2, 3)), mindspore.float32)\n >>> output_0, output_1 = confusion_mul_grad(input_0, input_1, input_2)\n output_0:\n [[ 3. 1. 0.]\n [-6. 2. -2.]]\n output_1:\n -3.0\n \"\"\"\n\n @prim_attr_register\n def __init__(self, axis=(), keep_dims=False):\n self.init_prim_io_names(inputs=[\"input0\", \"input1\", \"input2\"], outputs=[\"output0\", \"output1\"])\n self.axis_ = validator.check_value_type(\"axis\", axis, [int, tuple, list], self.name)\n self.keep_dims_ = validator.check_value_type(\"keep_dims\", keep_dims, [bool], self.name)\n\n def infer_shape(self, input0_shape, input1_shape, input2_shape):\n outshape0 = input0_shape\n outshape1 = _infer_shape_reduce(input1_shape, self.axis_, self.keep_dims_, self.name)\n return outshape0, outshape1\n\n def infer_dtype(self, input0_dtype, input1_dtype, input2_dtype):\n validator.check_subclass(\"input0_dtype\", input0_dtype, mstype.tensor, self.name)\n validator.check_subclass(\"input1_dtype\", input1_dtype, mstype.tensor, self.name)\n validator.check_subclass(\"input2_dtype\", input2_dtype, mstype.tensor, self.name)\n return input0_dtype, input1_dtype\n\n\nclass Dropout(PrimitiveWithInfer):\n \"\"\"\n During training, randomly zeroes some of the elements of the input tensor with probability.\n\n Args:\n keep_prob (float): The keep rate, between 0 and 1, e.g. keep_prob = 0.9,\n means dropping out 10% of input units.\n\n Inputs:\n - **shape** (tuple[int]) - The shape of target mask.\n\n Outputs:\n Tensor, the value of generated mask for input shape.\n\n Examples:\n >>> dropout = P.Dropout(keep_prob=0.5)\n >>> in = Tensor((20, 16, 50, 50))\n >>> out = dropout(in)\n \"\"\"\n\n @prim_attr_register\n def __init__(self, keep_prob=0.5):\n self.keep_prob = validator.check_number_range(\"keep_prob\", keep_prob, 0, 1, Rel.INC_RIGHT, self.name)\n\n def infer_shape(self, x_shape):\n validator.check_integer(\"x_shape\", len(x_shape), 1, Rel.GE, self.name)\n mask_shape = x_shape\n return x_shape, mask_shape\n\n def infer_dtype(self, x_dtype):\n valid_types = (mstype.float16, mstype.float32)\n validator.check_subclass(\"x\", x_dtype, mstype.tensor, self.name)\n validator.check_tensor_type_same({\"x_dtype\": x_dtype}, valid_types, self.name)\n return x_dtype, x_dtype\n\n\nclass DropoutGrad(PrimitiveWithInfer):\n \"\"\"\n The gradient of Dropout. During training, randomly zeroes some of the elements\n of the input tensor with probability.\n\n Args:\n keep_prob (float): The keep rate, between 0 and 1, e.g. keep_prob = 0.9,\n means dropping out 10% of input units.\n\n Inputs:\n - **shape** (tuple[int]) - The shape of target mask.\n\n Outputs:\n Tensor, the value of generated mask for input shape.\n\n Examples:\n >>> dropout_grad = P.DropoutGrad(keep_prob=0.5)\n >>> in = Tensor((20, 16, 50, 50))\n >>> out = dropout_grad(in)\n \"\"\"\n\n @prim_attr_register\n def __init__(self, keep_prob=0.5):\n self.keep_prob = validator.check_number_range(\"keep_prob\", keep_prob, 0, 1, Rel.INC_RIGHT, self.name)\n\n def infer_shape(self, dy_shape, mask_shape):\n return dy_shape\n\n def infer_dtype(self, dy_dtype, mask_dtype):\n valid_types = (mstype.float16, mstype.float32)\n validator.check_subclass(\"dy\", dy_dtype, mstype.tensor, self.name)\n validator.check_subclass(\"mask\", mask_dtype, mstype.tensor, self.name)\n validator.check_tensor_type_same({\"dy_dtype\": dy_dtype}, valid_types, self.name)\n return dy_dtype\n\n\nclass CTCLoss(PrimitiveWithInfer):\n \"\"\"\n Calculates the CTC (Connectionist Temporal Classification) loss and the gradient.\n\n Args:\n preprocess_collapse_repeated (bool): If true, repeated labels will be collapsed prior to the CTC calculation.\n Default: False.\n ctc_merge_repeated (bool): If false, during CTC calculation, repeated non-blank labels will not be merged\n and these labels will be interpreted as individual ones. This is a simplfied\n version of CTC. Default: True.\n ignore_longer_outputs_than_inputs (bool): If True, sequences with longer outputs than inputs will be ignored.\n Default: False.\n\n Inputs:\n - **inputs** (Tensor) - The input Tensor should be a `3-D` tensor whose shape is\n :math:`(max_time, batch_size, num_classes)`. `num_classes` should be `num_labels + 1` classes, `num_labels`\n indicates the number of actual labels. Blank labels are reserved. Default blank label is `num_classes - 1`.\n Data type must be float16, float32 or float64.\n - **labels_indices** (Tensor) - The indices of labels. `labels_indices[i, :] == [b, t]` means `labels_values[i]`\n stores the id for `(batch b, time t)`. The type must be int64 and rank must be 2.\n - **labels_values** (Tensor) - A `1-D` input tensor. The values are associated with the given batch and time.\n The type must be int32. `labels_values[i]` must in the range of `[0, num_classes)`.\n - **sequence_length** (Tensor) - A tensor containing sequence lengths with the shape of :math:`(batch_size)`.\n The type must be int32. Each value in the tensor should not be greater than `max_time`.\n\n Outputs:\n - **loss** (Tensor) - A tensor containing log-probabilities, the shape is :math:`(batch_size)`. The tensor has\n the same type with `inputs`.\n - **gradient** (Tensor) - The gradient of `loss`, has the same type and shape with `inputs`.\n\n Examples:\n >>> inputs = Tensor(np.random.random((2, 2, 3)), mindspore.float32)\n >>> labels_indices = Tensor(np.array([[0, 0], [1, 0]]), mindspore.int64)\n >>> labels_values = Tensor(np.array([2, 2]), mindspore.int32)\n >>> sequence_length = Tensor(np.array([2, 2]), mindspore.int32)\n >>> ctc_loss = P.CTCLoss()\n >>> output = ctc_loss(inputs, labels_indices, labels_values, sequence_length)\n \"\"\"\n\n @prim_attr_register\n def __init__(self, preprocess_collapse_repeated=False, ctc_merge_repeated=True,\n ignore_longer_outputs_than_inputs=False):\n self.init_prim_io_names(inputs=[\"inputs\", \"labels_indices\", \"labels_values\", \"sequence_length\"],\n outputs=[\"loss\", \"gradient\"])\n validator.check_value_type(\"preprocess_collapse_repeated\", preprocess_collapse_repeated, [bool], self.name)\n self.preprocess_collapse_repeated_ = preprocess_collapse_repeated\n self.ctc_merge_repeated_ = validator.check_value_type(\"ctc_merge_repeated\", ctc_merge_repeated,\n [bool], self.name)\n validator.check_value_type(\"ignore_longer_outputs_than_inputs\",\n ignore_longer_outputs_than_inputs, [bool], self.name)\n self.ignore_longer_outputs_than_inputs_ = ignore_longer_outputs_than_inputs\n\n def infer_shape(self, inputs, labels_indices, labels_values, sequence_length):\n validator.check_integer(\"inputs rank\", len(inputs), 3, Rel.EQ, self.name)\n validator.check_integer(\"labels_indices rank\", len(labels_indices), 2, Rel.EQ, self.name)\n validator.check_integer(\"labels_indices dim one\", labels_indices[1], 2, Rel.EQ, self.name)\n validator.check_integer(\"labels_values rank\", len(labels_values), 1, Rel.EQ, self.name)\n validator.check_integer(\"sequence_length rank\", len(sequence_length), 1, Rel.EQ, self.name)\n validator.check('labels_indices size', labels_indices[0], 'labels_values size',\n labels_values[0], Rel.EQ, self.name)\n validator.check('inputs batch_size', inputs[1], 'sequence_length batch_size',\n sequence_length[0], Rel.EQ, self.name)\n batch_size = []\n batch_size.append(inputs[1])\n return batch_size, inputs\n\n def infer_dtype(self, inputs, labels_indices, labels_values, sequence_length):\n valid_dtype = [mstype.float16, mstype.float32, mstype.double]\n validator.check_tensor_type_same({\"inputs_dtype\": inputs}, valid_dtype, self.name)\n validator.check_tensor_type_same({\"labels_indices_dtype\": labels_indices}, [mstype.int64], self.name)\n validator.check_tensor_type_same({\"labels_values_dtype\": labels_values}, [mstype.int32], self.name)\n validator.check_tensor_type_same({\"sequence_length_dtype\": sequence_length}, [mstype.int32], self.name)\n return inputs, inputs\n\n\nclass CTCGreedyDecoder(PrimitiveWithInfer):\n \"\"\"\n Performs greedy decoding on the logits given in inputs.\n\n Args:\n merge_repeated (bool): If True, merge repeated classes in output. Default: True.\n\n Inputs:\n - **inputs** (Tensor) - The input Tensor should be a `3-D` tensor whose shape is\n :math:`(max_time, batch_size, num_classes)`. `num_classes` should be `num_labels + 1` classes, `num_labels`\n indicates the number of actual labels. Blank labels are reserved. Default blank label is `num_classes - 1`.\n Data type must be float32 or float64.\n - **sequence_length** (Tensor) - A tensor containing sequence lengths with the shape of :math:`(batch_size)`.\n The type must be int32. Each value in the tensor should not greater than `max_time`.\n\n Outputs:\n - **decoded_indices** (Tensor) - A tensor with shape of :math:`(total_decoded_outputs, 2)`.\n Data type is int64.\n - **decoded_values** (Tensor) - A tensor with shape of :math:`(total_decoded_outputs)`,\n it stores the decoded classes. Data type is int64.\n - **decoded_shape** (Tensor) - The value of tensor is :math:`[batch_size, max_decoded_legth]`.\n Data type is int64.\n - **log_probability** (Tensor) - A tensor with shape of :math:`(batch_size, 1)`,\n containing sequence log-probability, has the same type as `inputs`.\n\n Examples:\n >>> class CTCGreedyDecoderNet(nn.Cell):\n >>> def __init__(self):\n >>> super(CTCGreedyDecoderNet, self).__init__()\n >>> self.ctc_greedy_decoder = P.CTCGreedyDecoder()\n >>> self.assert_op = P.Assert(300)\n >>>\n >>> def construct(self, inputs, sequence_length):\n >>> out = self.ctc_greedy_decoder(inputs,sequence_length)\n >>> self.assert_op(True, (out[0], out[1], out[2], out[3]))\n >>> return out[2]\n >>>\n >>> inputs = Tensor(np.random.random((2, 2, 3)), mindspore.float32)\n >>> sequence_length = Tensor(np.array([2, 2]), mindspore.int32)\n >>> net = CTCGreedyDecoderNet()\n >>> output = net(inputs, sequence_length)\n \"\"\"\n\n @prim_attr_register\n def __init__(self, merge_repeated=True):\n self.merge_repeated = validator.check_value_type(\"merge_repeated\", merge_repeated, [bool], self.name)\n\n def infer_shape(self, inputs_shape, sequence_length_shape):\n validator.check_integer(\"inputs rank\", len(inputs_shape), 3, Rel.EQ, self.name)\n validator.check_integer(\"sequence_length rank\", len(sequence_length_shape), 1, Rel.EQ, self.name)\n validator.check('inputs batch_size', inputs_shape[1], 'sequence_length batch_size',\n sequence_length_shape[0], Rel.EQ, self.name)\n total_decoded_outputs = -1\n decoded_indices_shape = [total_decoded_outputs, 2]\n decoded_values = [total_decoded_outputs]\n decoded_shape = [2]\n log_probability_shape = [inputs_shape[1], 1]\n return decoded_indices_shape, decoded_values, decoded_shape, log_probability_shape\n\n def infer_dtype(self, inputs_dtype, sequence_length_dtype):\n validator.check_tensor_type_same({\"inputs_dtype\": inputs_dtype}, [mstype.float32, mstype.double], self.name)\n validator.check_tensor_type_same({\"sequence_length_dtype\": sequence_length_dtype}, [mstype.int32], self.name)\n decoded_type = mstype.tensor_type(mstype.int64)\n return decoded_type, decoded_type, decoded_type, inputs_dtype\n\n\nclass BasicLSTMCell(PrimitiveWithInfer):\n r\"\"\"\n Applies the long short-term memory (LSTM) to the input.\n\n .. math::\n \\begin{array}{ll} \\\\\n i_t = \\sigma(W_{ix} x_t + b_{ix} + W_{ih} h_{(t-1)} + b_{ih}) \\\\\n f_t = \\sigma(W_{fx} x_t + b_{fx} + W_{fh} h_{(t-1)} + b_{fh}) \\\\\n \\tilde{c}_t = \\tanh(W_{cx} x_t + b_{cx} + W_{ch} h_{(t-1)} + b_{ch}) \\\\\n o_t = \\sigma(W_{ox} x_t + b_{ox} + W_{oh} h_{(t-1)} + b_{oh}) \\\\\n c_t = f_t * c_{(t-1)} + i_t * \\tilde{c}_t \\\\\n h_t = o_t * \\tanh(c_t) \\\\\n \\end{array}\n\n Here :math:`\\sigma` is the sigmoid function, and :math:`*` is the Hadamard product. :math:`W, b`\n are learnable weights between the output and the input in the formula. For instance,\n :math:`W_{ix}, b_{ix}` are the weight and bias used to transform from input :math:`x` to :math:`i`.\n Details can be found in paper `LONG SHORT-TERM MEMORY\n <https://www.bioinf.jku.at/publications/older/2604.pdf>`_ and\n `Long Short-Term Memory Recurrent Neural Network Architectures for Large Scale Acoustic Modeling\n <https://static.googleusercontent.com/media/research.google.com/zh-CN//pubs/archive/43905.pdf>`_.\n\n Args:\n keep_prob (float): If not 1.0, append `Dropout` layer on the outputs of each\n LSTM layer except the last layer. Default 1.0. The range of dropout is [0.0, 1.0].\n forget_bias (float): Add forget bias to forget gate biases in order to decrease former scale. Default: 1.0.\n state_is_tuple (bool): If true, the state is a tuple of 2 tensors, containing h and c; If false, the state is\n a tensor and it needs to be split first. Default: True.\n activation (str): Activation. Default: \"tanh\". Only \"tanh\" is currently supported.\n\n Inputs:\n - **x** (Tensor) - Current words. Tensor of shape (`batch_size`, `input_size`).\n The data type must be float16 or float32.\n - **h** (Tensor) - Hidden state last moment. Tensor of shape (`batch_size`, `hidden_size`).\n The data type must be float16 or float32.\n - **c** (Tensor) - Cell state last moment. Tensor of shape (`batch_size`, `hidden_size`).\n The data type must be float16 or float32.\n - **w** (Tensor) - Weight. Tensor of shape (`input_size + hidden_size`, `4 x hidden_size`).\n The data type must be float16 or float32.\n - **b** (Tensor) - Bias. Tensor of shape (`4 x hidden_size`).\n The data type must be the same as `c`.\n\n Outputs:\n - **ct** (Tensor) - Forward :math:`c_t` cache at moment `t`. Tensor of shape (`batch_size`, `hidden_size`).\n Has the same type with input `c`.\n - **ht** (Tensor) - Cell output. Tensor of shape (`batch_size`, `hidden_size`). With data type of float16.\n - **it** (Tensor) - Forward :math:`i_t` cache at moment `t`. Tensor of shape (`batch_size`, `hidden_size`).\n Has the same type with input `c`.\n - **jt** (Tensor) - Forward :math:`j_t` cache at moment `t`. Tensor of shape (`batch_size`, `hidden_size`).\n Has the same type with input `c`.\n - **ft** (Tensor) - Forward :math:`f_t` cache at moment `t`. Tensor of shape (`batch_size`, `hidden_size`).\n Has the same type with input `c`.\n - **ot** (Tensor) - Forward :math:`o_t` cache at moment `t`. Tensor of shape (`batch_size`, `hidden_size`).\n Has the same type with input `c`.\n - **tanhct** (Tensor) - Forward :math:`tanh c_t` cache at moment `t`.\n Tensor of shape (`batch_size`, `hidden_size`), has the same type with input `c`.\n\n Examples:\n >>> x = Tensor(np.random.rand(1, 32).astype(np.float16))\n >>> h = Tensor(np.random.rand(1, 64).astype(np.float16))\n >>> c = Tensor(np.random.rand(1, 64).astype(np.float16))\n >>> w = Tensor(np.random.rand(96, 256).astype(np.float16))\n >>> b = Tensor(np.random.rand(256, ).astype(np.float16))\n >>> lstm = P.BasicLSTMCell(keep_prob=1.0, forget_bias=1.0, state_is_tuple=True, activation='tanh')\n >>> lstm(x, h, c, w, b)\n \"\"\"\n\n @prim_attr_register\n def __init__(self, keep_prob=1.0, forget_bias=1.0, state_is_tuple=True, activation='tanh'):\n self.keep_prob = validator.check_value_type(\"keep_prob\", keep_prob, [float], self.name)\n self.keep_prob = validator.check_number_range(\"keep_prob\", keep_prob, 0.0, 1.0, Rel.INC_BOTH, self.name)\n self.forget_bias = validator.check_value_type(\"forget_bias\", forget_bias, [float], self.name)\n self.state_is_tuple = validator.check_value_type(\"state_is_tuple\", state_is_tuple, [bool], self.name)\n self.activation = validator.check_string(\"activation\", activation, ['tanh'], self.name)\n self.add_prim_attr(\"io_format\", \"ND\")\n\n def infer_shape(self, x_shape, h_shape, c_shape, w_shape, b_shape):\n validator.check_integer(\"x rank\", len(x_shape), 2, Rel.EQ, self.name)\n validator.check_integer(\"h rank\", len(h_shape), 2, Rel.EQ, self.name)\n validator.check_integer(\"c rank\", len(c_shape), 2, Rel.EQ, self.name)\n validator.check_integer(\"w rank\", len(w_shape), 2, Rel.EQ, self.name)\n validator.check_integer(\"b rank\", len(b_shape), 1, Rel.EQ, self.name)\n validator.check(\"x_shape[0]\", x_shape[0], \"h_shape[0]\", h_shape[0], Rel.EQ, self.name)\n validator.check(\"c_shape[0]\", c_shape[0], \"h_shape[0]\", h_shape[0], Rel.EQ, self.name)\n validator.check(\"c_shape[1]\", c_shape[1], \"h_shape[1]\", h_shape[1], Rel.EQ, self.name)\n validator.check(\"w_shape[1]\", w_shape[1], \"4*h_shape[1]\", 4 * h_shape[1], Rel.EQ, self.name)\n validator.check(\"w_shape[0]\", w_shape[0], \"x_shape[1]+h_shape[1]\", x_shape[1] + h_shape[1], Rel.EQ, self.name)\n validator.check(\"b_shape[0]\", b_shape[0], \"4*h_shape[1]\", 4 * h_shape[1], Rel.EQ, self.name)\n ct_shape = c_shape\n ht_shape = c_shape\n it_shape = c_shape\n jt_shape = c_shape\n ft_shape = c_shape\n ot_shape = c_shape\n tanhct_shape = c_shape\n\n return (ct_shape, ht_shape, it_shape, jt_shape, ft_shape, ot_shape, tanhct_shape)\n\n def infer_dtype(self, x_dtype, h_dtype, c_dtype, w_dtype, b_dtype):\n validator.check_tensor_type_same({\"x_dtype\": x_dtype}, [mstype.float16, mstype.float32], self.name)\n validator.check_tensor_type_same({\"h_dtype\": h_dtype}, [mstype.float16, mstype.float32], self.name)\n validator.check_tensor_type_same({\"w_dtype\": w_dtype}, [mstype.float16, mstype.float32], self.name)\n\n args = {\"c_dtype\": c_dtype, \"b_dtype\": b_dtype}\n validator.check_tensor_type_same(args, [mstype.float16, mstype.float32], self.name)\n return (c_dtype, mstype.float16, c_dtype, c_dtype, c_dtype, c_dtype, c_dtype)\n\n\nclass InTopK(PrimitiveWithInfer):\n r\"\"\"\n Whether the targets are in the top `k` predictions.\n\n Args:\n k (int): Specify the number of top elements to be used for computing precision.\n\n Inputs:\n - **x1** (Tensor) - A 2D Tensor defines the predictions of a batch of samples with float16 or float32 data type.\n - **x2** (Tensor) - A 1D Tensor defines the labels of a batch of samples with int32 data type.\n\n Outputs:\n Tensor has 1 dimension of type bool and the same shape with `x2`. For labeling sample `i` in `x2`,\n if the label in the first `k` predictions for sample `i` is in `x1`, then the value is True, otherwise False.\n\n Examples:\n >>> x1 = Tensor(np.array([[1, 8, 5, 2, 7], [4, 9, 1, 3, 5]]), mindspore.float32)\n >>> x2 = Tensor(np.array([1, 3]), mindspore.int32)\n >>> in_top_k = P.InTopK(3)\n >>> result = in_top_k(x1, x2)\n [True False]\n \"\"\"\n\n @prim_attr_register\n def __init__(self, k):\n \"\"\"Init InTopK\"\"\"\n self.init_prim_io_names(inputs=['x1', 'x2', 'k'], outputs=['y'])\n validator.check_value_type(\"k\", k, [int], self.name)\n\n def infer_dtype(self, x1_dtype, x2_dtype):\n validator.check_tensor_type_same({\"x1\": x1_dtype}, (mstype.float16, mstype.float32,), self.name)\n validator.check_tensor_type_same({\"x2\": x2_dtype}, (mstype.int32,), self.name)\n\n return mstype.tensor_type(mstype.bool_)\n\n def infer_shape(self, x1_shape, x2_shape):\n validator.check(\"x1\", len(x1_shape), \"\", 2, Rel.EQ, self.name)\n validator.check(\"x2\", len(x2_shape), \"\", 1, Rel.EQ, self.name)\n validator.check(\"size of x2\", x2_shape[0], \"x1's first dimension\", x1_shape[0], Rel.EQ, self.name)\n return x2_shape\n\n\nclass LRN(PrimitiveWithInfer):\n r\"\"\"\n Local Response Normalization\n\n Args:\n depth_radius (int): Half-width of the 1-D normalization window. Shape of 0-D.\n bias (float): An offset (usually positive to avoid dividing by 0).\n alpha (float): A scale factor, usually positive.\n beta (float): An exponent.\n norm_region (str): Specify normalization region. Options: \"ACROSS_CHANNELS\". Default: \"ACROSS_CHANNELS\".\n\n Inputs:\n - **x** (Tensor) - A 4D Tensor with float16 or float32 data type.\n\n Outputs:\n Tensor, With shape and data type same as the input tensor.\n\n Examples:\n >>> x = Tensor(np.random.rand(1, 10, 4, 4)), mindspore.float32)\n >>> lrn = P.LRN()\n >>> lrn(x)\n \"\"\"\n @prim_attr_register\n def __init__(self, depth_radius=5, bias=1.0, alpha=1.0, beta=0.5, norm_region=\"ACROSS_CHANNELS\"):\n \"\"\"Init LRN\"\"\"\n self.init_prim_io_names(inputs=['x'], outputs=['y'])\n validator.check_value_type(\"depth_radius\", depth_radius, [int], self.name)\n validator.check_value_type(\"bias\", bias, [float], self.name)\n validator.check_value_type(\"alpha\", alpha, [float], self.name)\n validator.check_value_type(\"beta\", beta, [float], self.name)\n validator.check_value_type(\"norm_region\", norm_region, [str], self.name)\n validator.check_string('norm_region', norm_region, ['ACROSS_CHANNELS'], self.name)\n validator.check_integer(\"depth_radius\", depth_radius, 0, Rel.GE, self.name)\n\n def infer_dtype(self, x_dtype):\n validator.check_tensor_type_same({\"x\": x_dtype}, (mstype.float16, mstype.float32,), self.name)\n return x_dtype\n\n def infer_shape(self, x_shape):\n validator.check_integer(\"x_shape\", len(x_shape), 4, Rel.EQ, self.name)\n return x_shape\n\n\nclass CTCLossV2(PrimitiveWithInfer):\n r\"\"\"\n Calculates the CTC (Connectionist Temporal Classification) loss and the gradient.\n Note:\n - Cudnn Uses label value of for the `blank`\n\n Inputs:\n - **inputs** (Tensor) - The input Tensor should be a `3-D` tensor whose shape is\n :math:`(max_time, batch_size, num_class)`. `num_class` should be `num_labels + 1` classes, `num_labels`\n indicates the number of actual labels. Blank labels are reserved.\n - **labels** (Tensor) - The labels Tensor should be a `1-D` tensor whose shape is\n :math:`(\\sigma{label_lengths})`\n or `2-D` tensor whose shape is\n :math:`(max_time, max{label_lengths})`\n The type must be int32.\n - **input_lengths** (Tensor) - A `1-D` input tensor whose shape is\n :math:`(batch_size,)`. The values should be batch. The type must be int32.\n - **label_lengths** (Tensor) - A tensor containing sequence lengths with the shape of :math:`(batch_size)`.\n The type must be int32. Each value in the tensor should not greater than `max_time`.\n\n Outputs:\n - **loss** (Tensor) - A tensor containing log-probabilities, the shape is :math:`(batch_size)`, has the same\n type with `inputs`.\n - **gradient** (Tensor) - The gradient of `loss`, has the same type and shape with `inputs`.\n\n Examples:\n >>> inputs = Tensor(np.random.random((2, 2, 3)), mindspore.float32)\n >>> labels = Tensor(np.array([[0, 0], [1, 0]]), mindspore.int32)\n >>> input_lengths = Tensor(np.array([3, 3, 3]), mindspore.int32)\n >>> label_lengths = Tensor(np.array([3, 3, 3]), mindspore.int32)\n >>> ctc_loss = P.CTCLossV2()\n >>> output = ctc_loss(inputs, labels, input_lengths, label_lengths)\n \"\"\"\n @prim_attr_register\n def __init__(self):\n pass\n\n def infer_dtype(self, input_dtype, labels_dtype, input_lengths_dtype, label_lengths_dtype):\n validator.check_tensor_type_same({\"input\": input_dtype}, (mstype.float32,), self.name)\n validator.check_tensor_type_same({\"labels\": labels_dtype}, (mstype.int32,), self.name)\n validator.check_tensor_type_same({\"input_lengths\": input_lengths_dtype}, (mstype.int32,), self.name)\n validator.check_tensor_type_same({\"target_lengths\": label_lengths_dtype}, (mstype.int32,), self.name)\n return mstype.float32, mstype.float32\n\n def infer_shape(self, input_shape, labels_shape, input_lengths_shape, label_lengths_shape):\n validator.check_integer(\"input shape\", len(input_shape), 3, Rel.EQ, self.name)\n validator.check_number_range(\"labels shape\", len(labels_shape), 1, 2, Rel.INC_BOTH, self.name)\n validator.check_integer(\"input lengths shape\", len(input_lengths_shape), 1, Rel.EQ, self.name)\n validator.check_integer(\"label lengths shape\", len(label_lengths_shape), 1, Rel.EQ, self.name)\n validator.check_integer(\"input[1]\", input_shape[1], input_lengths_shape[0], Rel.EQ, self.name)\n validator.check_integer(\"input[1]\", input_shape[1], label_lengths_shape[0], Rel.EQ, self.name)\n return (input_shape[1],), input_shape\n" ]
[ [ "numpy.array", "numpy.all" ] ]
nfuster2017/AmazonWebCrawler
[ "d45e2dec826b5cadd632ed8a94c2c4c127430000" ]
[ "venv/Scripts/f2py.py" ]
[ "#!D:\\School\\UMD\\INST326\\Group Project\\venv\\Scripts\\python.exe\n# See http://cens.ioc.ee/projects/f2py2e/\nfrom __future__ import division, print_function\n\nimport os\nimport sys\nfor mode in [\"g3-numpy\", \"2e-numeric\", \"2e-numarray\", \"2e-numpy\"]:\n try:\n i = sys.argv.index(\"--\" + mode)\n del sys.argv[i]\n break\n except ValueError:\n pass\nos.environ[\"NO_SCIPY_IMPORT\"] = \"f2py\"\nif mode == \"g3-numpy\":\n sys.stderr.write(\"G3 f2py support is not implemented, yet.\\\\n\")\n sys.exit(1)\nelif mode == \"2e-numeric\":\n from f2py2e import main\nelif mode == \"2e-numarray\":\n sys.argv.append(\"-DNUMARRAY\")\n from f2py2e import main\nelif mode == \"2e-numpy\":\n from numpy.f2py import main\nelse:\n sys.stderr.write(\"Unknown mode: \" + repr(mode) + \"\\\\n\")\n sys.exit(1)\nmain()\n" ]
[ [ "numpy.f2py.main" ] ]
mjsML/fast_flax
[ "d982b59b715524884d08d6ed506ab325e8be1ece" ]
[ "examples/lm1b/main.py" ]
[ "# Copyright 2021 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Main file for running the Language Modelling example with LM1B.\n\nThis file is intentionally kept short. The majority for logic is in libraries\nthan can be easily tested and imported in Colab.\n\"\"\"\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\n\nfrom clu import platform\nimport train\nimport jax\nfrom ml_collections import config_flags\nimport tensorflow as tf\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string('workdir', None, 'Directory to store model data.')\nconfig_flags.DEFINE_config_file(\n 'config',\n 'configs/default.py',\n 'File path to the training hyperparameter configuration.',\n lock_config=True)\nflags.mark_flags_as_required(['config', 'workdir'])\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError('Too many command-line arguments.')\n\n # Hide any GPUs form TensorFlow. Otherwise TF might reserve memory and make\n # it unavailable to JAX.\n tf.config.experimental.set_visible_devices([], 'GPU')\n\n logging.info('JAX process: %d / %d', jax.process_index(), jax.process_count())\n logging.info('JAX local devices: %r', jax.local_devices())\n\n # Add a note so that we can tell which task is which JAX host.\n # (Depending on the platform task 0 is not guaranteed to be host 0)\n platform.work_unit().set_task_status(f'process_index: {jax.process_index()}, '\n f'process_count: {jax.process_count()}')\n platform.work_unit().create_artifact(platform.ArtifactType.DIRECTORY,\n FLAGS.workdir, 'workdir')\n\n train.train_and_evaluate(FLAGS.config, FLAGS.workdir)\n\n\nif __name__ == '__main__':\n jax.config.parse_flags_with_absl()\n app.run(main)\n" ]
[ [ "tensorflow.config.experimental.set_visible_devices" ] ]
keonlee9420/DiffSinger
[ "2bfcae4a78068c2061eae64ee675959a077aa54b" ]
[ "model/optimizer.py" ]
[ "import torch\r\nimport numpy as np\r\n\r\n\r\nclass ScheduledOptim:\r\n \"\"\" A simple wrapper class for learning rate scheduling \"\"\"\r\n\r\n def __init__(self, model, train_config, model_config, current_step):\r\n\r\n self._optimizer = torch.optim.Adam(\r\n model.parameters(),\r\n betas=train_config[\"optimizer\"][\"betas\"],\r\n eps=train_config[\"optimizer\"][\"eps\"],\r\n weight_decay=train_config[\"optimizer\"][\"weight_decay\"],\r\n )\r\n self.n_warmup_steps = train_config[\"optimizer\"][\"warm_up_step\"]\r\n self.anneal_steps = train_config[\"optimizer\"][\"anneal_steps\"]\r\n self.anneal_rate = train_config[\"optimizer\"][\"anneal_rate\"]\r\n self.current_step = current_step\r\n self.init_lr = train_config[\"optimizer\"][\"init_lr\"]\r\n\r\n def step_and_update_lr(self):\r\n self._update_learning_rate()\r\n self._optimizer.step()\r\n\r\n def zero_grad(self):\r\n # print(\"self.init_lr:\", self.init_lr)\r\n self._optimizer.zero_grad()\r\n\r\n def load_state_dict(self, path):\r\n self._optimizer.load_state_dict(path)\r\n\r\n def _get_lr_scale(self):\r\n lr = np.min(\r\n [\r\n np.power(self.current_step, -0.5),\r\n np.power(self.n_warmup_steps, -1.5) * self.current_step,\r\n ]\r\n )\r\n for s in self.anneal_steps:\r\n if self.current_step > s:\r\n lr = lr * self.anneal_rate\r\n return lr\r\n\r\n def _update_learning_rate(self):\r\n \"\"\" Learning rate scheduling per step \"\"\"\r\n self.current_step += 1\r\n lr = self.init_lr\r\n\r\n for param_group in self._optimizer.param_groups:\r\n param_group[\"lr\"] = lr\r\n" ]
[ [ "numpy.power" ] ]
Joevaen/Scikit-image_On_CT
[ "e3bf0eeadc50691041b4b7c44a19d07546a85001" ]
[ "Feature/structure_tensor_eigenvalues.py" ]
[ "# 计算结构张量的特征值。\n\nfrom skimage.feature import structure_tensor\nfrom skimage.feature import structure_tensor_eigenvalues\nimport numpy as np\nsquare = np.zeros((5, 5))\nsquare[2, 2] = 1\nA_elems = structure_tensor(square, sigma=0.1, order='rc')\nprint(structure_tensor_eigenvalues(A_elems)[0])\n\n\n\n\n" ]
[ [ "numpy.zeros" ] ]
desertfireballnetwork/DFN_darkflight
[ "f41d2a2b82ce96f380f26acfe278c0afa536b9cd" ]
[ "orbital_utilities.py" ]
[ "#!/usr/bin/env python\n\"\"\"\nFunctions and objects to deal with meteoroids orbits\n\"\"\"\n\n__author__ = \"Hadrien A.R. Devillepoix, Trent Jansen-Sturgeon \"\n__copyright__ = \"Copyright 2016-2017, Desert Fireball Network\"\n__license__ = \"MIT\"\n__version__ = \"1.0\"\n\nimport numpy as np\nfrom numpy.linalg import norm\nimport matplotlib.pyplot as plt\n\nfrom astropy import units as u\nfrom astropy.time import Time\nfrom astropy.coordinates import HCRS, ITRS, GCRS\nfrom astropy.utils.iers import IERS_A, IERS_A_URL, IERS\nfrom astropy.utils.data import download_file\n\nfrom trajectory_utilities import ECEF2LLH, \\\n EarthPosition, HCRS2HCI, HCI2ECI_pos, \\\n OrbitalElements2PosVel, ECI2ECEF_pos\n\n\ntry:\n iers_a_file = download_file(IERS_A_URL, cache=True)\n iers_a = IERS_A.open(iers_a_file)\n IERS.iers_table = iers_a\nexcept:\n print('IERS_A_URL is temporarily unavailable')\n pass\n\n\nAU = 1*u.au.to(u.m)\nSMA_JUPITER = 5.20336301 * u.au\n\n\n\ndef tisserand_wrt_jupiter(a, e, i):\n '''\n Calculate the Tisserrand criterion with respect to Jupiter\n '''\n T_j = (SMA_JUPITER / a +\n 2 * np.cos(i) *\n np.sqrt(a / SMA_JUPITER * (1 - e**2)))\n return T_j\n\n# Conversion vector\nAU_Deg2m_Rad = np.vstack((AU, 1, np.pi / 180 * np.ones((4, 1))))\n\nPlanets = {'Mercury': np.vstack((0.387099, 0.205636, 7.004979, 29.127030, 48.330766, 252.250324)),\n 'Venus': np.vstack((0.723336, 0.006777, 3.394676, 54.922625, 76.679843, 181.979100)),\n 'Earth': np.vstack((1.000003, 0.016711, -0.000015, 102.937682, 0.000000, 100.464572)),\n 'Mars': np.vstack((1.523710, 0.093394, 1.849691, -73.503169, 49.559539, -4.553432)),\n 'Jupiter': np.vstack((5.202887, 0.048386, 1.304397, -85.745429, 100.473909, 34.396441)),\n 'Saturn': np.vstack((9.536676,0.053862,2.485992,-21.063546,113.662424,49.954244)),\n 'Uranus': np.vstack((19.189165,0.047257,0.772638,96.937351,74.016925,313.238105)),\n 'Neptune': np.vstack((30.069923,0.008590,1.770043,-86.819463,131.784226,-55.120030))}\n\nclass OrbitObject(object):\n \"\"\"\n Solar system object osculating orbit\n \"\"\"\n\n def __init__(self,\n orbit_type,\n a, e, i, omega, Omega, theta,\n ra_corr=np.nan*u.rad, dec_corr=np.nan*u.rad,\n v_g=np.nan*u.m/u.second):\n self.semi_major_axis = a.to(u.au)\n self.eccentricity = e\n self.inclination = i.to(u.deg)\n self.argument_periapsis = omega.to(u.deg)\n self.longitude_ascending_node = Omega.to(u.deg)\n self.longitude_perihelion = (self.longitude_ascending_node + self.argument_periapsis) % (360 * u.deg)\n\n self.true_anomaly = theta.to(u.deg)\n self.orbit_type = orbit_type\n\n self.perihelion = (1 - self.eccentricity) * self.semi_major_axis\n self.aphelion = (1 + self.eccentricity) * self.semi_major_axis\n\n self.corr_radiant_ra = (ra_corr.to(u.deg)) % (360 * u.deg)\n self.corr_radiant_dec = dec_corr.to(u.deg)\n\n radiant = HCRS(ra=self.corr_radiant_ra, dec=self.corr_radiant_dec, distance=1.0*u.au)\n ecpliptic_radiant = HCRS2HCI(np.vstack(radiant.cartesian.xyz.value))\n self.ecliptic_latitude = np.rad2deg(np.arcsin(ecpliptic_radiant[2] / norm(ecpliptic_radiant)))*u.deg\n\n self.velocity_g = v_g.to(u.m / u.second)\n\n self.T_j = self.tisserand_criterion_wrt_jupiter()\n\n def tisserand_criterion_wrt_jupiter(self):\n '''\n Calculate the Tisserrand criterion with respect to Jupiter\n '''\n return tisserand_wrt_jupiter(self.semi_major_axis, self.eccentricity, self.inclination)\n\n def __str__(self):\n return str(\"Semi-major axis: \" + str(self.semi_major_axis) + \"\\n\" +\n \"Eccentricity: \" + str(self.eccentricity) + \"\\n\" +\n \"Inclination: \" + str(self.inclination) + \"\\n\" +\n \"Argument of Periapsis: \" + str(self.argument_periapsis) + \"\\n\" +\n \"Longitude of Ascending Node: \" + str(self.longitude_ascending_node) + \"\\n\" +\n \"True Anomaly: \" + str(self.true_anomaly) + \"\\n\\n\" +\n \"Ra_corrected: \" + str(self.corr_radiant_ra) + \"\\n\" +\n \"Dec_corrected: \" + str(self.corr_radiant_dec) + \"\\n\" +\n \"Vel_g: \" + str(self.velocity_g))\n\n\n'''\nFunction delibaretely outside of native StateVector class to allow multithreaded call\n'''\n\ndef random_compute_orbit_ceplecha(sv):\n sv.randomize_velocity_vector()\n sv.computeOrbit(orbit_computation_method='Ceplecha')\n return sv\n\ndef random_compute_orbit_integration_EOE(sv):\n sv.randomize_velocity_vector()\n sv.computeOrbit(orbit_computation_method='integrate_EOE')\n return sv\n\ndef random_compute_orbit_integration_posvel(sv):\n sv.randomize_velocity_vector()\n sv.computeOrbit(orbit_computation_method='integrate_posvel')\n return sv\n\n\ndef PlotOrbitalElements(COE, t_jd, t_soi, Sol):\n\n Colour = ['b', 'g', 'r', 'c', 'm', 'y', 'k']\n i = 2 #FIXME error\n\n plt.figure()\n plt.subplot(321)\n plt.plot(t_jd, COE[0] / AU, Colour[i])\n plt.axvline(x=t_soi[0], color='b'); plt.grid()\n plt.xlabel(\"Time (JD)\"); plt.ylabel(\"Semi-major Axis (AU)\")\n# plt.axvline(x=t_soi[1], color='k')\n# plt.axvline(x=t_soi[2], color='c')\n\n\n plt.subplot(322)\n plt.plot(t_jd, COE[1], Colour[i])\n plt.axvline(x=t_soi[0], color='b'); plt.grid()\n plt.xlabel(\"Time (JD)\"); plt.ylabel(\"Eccentricity\")\n# plt.axvline(x=t_soi[1], color='k')\n# plt.axvline(x=t_soi[2], color='c')\n\n plt.subplot(323)\n plt.plot(t_jd, COE[2] * 180 / np.pi, Colour[i])\n plt.axvline(x=t_soi[0], color='b'); plt.grid()\n plt.xlabel(\"Time (JD)\"); plt.ylabel(\"Inclination (deg)\")\n# plt.axvline(x=t_soi[1], color='k')\n# plt.axvline(x=t_soi[2], color='c')\n\n plt.subplot(324)\n plt.plot(t_jd, COE[3] * 180 / np.pi, Colour[i])\n plt.axvline(x=t_soi[0], color='b'); plt.grid()\n plt.xlabel(\"Time (JD)\"); plt.ylabel(\"Argument of Periapsis (deg)\")\n# plt.axvline(x=t_soi[1], color='k')\n# plt.axvline(x=t_soi[2], color='c')\n\n plt.subplot(325)\n plt.plot(t_jd, COE[4] * 180 / np.pi, Colour[i])\n plt.axvline(x=t_soi[0], color='b'); plt.grid()\n plt.xlabel(\"Time (JD)\"); plt.ylabel(\"Longitude of the Ascending Node (deg)\")\n# plt.axvline(x=t_soi[1], color='k')\n# plt.axvline(x=t_soi[2], color='c')\n\n plt.subplot(326)\n plt.plot(t_jd, COE[5] * 180 / np.pi, Colour[i])\n plt.axvline(x=t_soi[0], color='b'); plt.grid()\n plt.xlabel(\"Time (JD)\"); plt.ylabel(\"True Anomaly (deg)\")\n# plt.axvline(x=t_soi[1], color='k')\n# plt.axvline(x=t_soi[2], color='c')\n\n if Sol != 'NoSol':\n plt.subplot(321)\n plt.axhline(Sol.semi_major_axis.value, color='g')\n plt.subplot(322)\n plt.axhline(Sol.eccentricity, color='g')\n plt.subplot(323)\n plt.axhline(Sol.inclination.value, color='g')\n plt.subplot(324)\n plt.axhline(Sol.argument_periapsis.value, color='g')\n plt.subplot(325)\n plt.axhline(Sol.longitude_ascending_node.value, color='g')\n plt.subplot(326)\n plt.axhline(Sol.true_anomaly.value, color='g')\n\n plt.show()\n\n\ndef PlotOrbit3D(OrbObjList, t0=2457535.0, Sol='NoSol'):\n\n from mpl_toolkits.mplot3d import Axes3D\n\n ''' 3D Orbit Plot'''\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n for OrbObj in OrbObjList:\n COE = np.vstack((OrbObj.semi_major_axis.value,\n OrbObj.eccentricity,\n OrbObj.inclination.value,\n OrbObj.argument_periapsis.value,\n OrbObj.longitude_ascending_node.value,\n OrbObj.true_anomaly.value)) * AU_Deg2m_Rad\n COE = COE + np.vstack((np.zeros((5, 100)), np.linspace(0, 2 * np.pi, 100)))\n [Pos_HCI, Vel_HCI] = OrbitalElements2PosVel(COE, 'Sun', 'Classical')\n ax.plot(Pos_HCI[0]/AU, Pos_HCI[1]/AU, Pos_HCI[2]/AU, color='r', label='Determined Orbit')\n\n ''' Plot the planets'''\n for Planet in Planets:\n COE = Planets[Planet] * AU_Deg2m_Rad\n COEs = COE + np.vstack((np.zeros((5, 200)), np.linspace(0, 2 * np.pi, 200)))\n [pos, vel] = OrbitalElements2PosVel(COEs, 'Sun', 'Classical')\n ax.plot(pos[0]/AU, pos[1]/AU, pos[2]/AU, color='b')\n\n # t_yr = t0 + np.linspace(0, 365.25, 100)\n # pos_earth = EarthPosition(t_yr)\n # ax.plot(pos_earth[0]/AU, pos_earth[1]/AU, pos_earth[2]/AU,\n # color='b', linewidth=2.0, label='Earth')\n\n ''' Plot the solution (if given) '''\n if Sol != 'NoSol':\n Sol_oe = np.vstack((Sol.semi_major_axis.value,\n Sol.eccentricity,\n Sol.inclination.value,\n Sol.argument_periapsis.value,\n Sol.longitude_ascending_node.value,\n Sol.true_anomaly.value)) * AU_Deg2m_Rad\n Sol_oe = Sol_oe + np.vstack((np.zeros((5, 100)), np.linspace(0, 2 * np.pi, 100)))\n [pos, vel] = OrbitalElements2PosVel(Sol_oe, 'Sun', 'Classical')\n ax.plot(pos[0]/AU, pos[1]/AU, pos[2]/AU, color='g', label='Published Orbit')\n\n plt.legend()\n ax.set_xlim([-5, 5])\n ax.set_ylim([-5, 5])\n ax.set_zlim([-5, 5])\n\n plt.show()\n\ndef PlotPerts(Pert):\n \n PPert = np.vstack(Pert).T; t = PPert[0]\n \n plt.figure(figsize=(16,9))\n t_rel = t - np.max(t) # Days\n plt.plot(t_rel, PPert[1], '-b', linewidth=3.0, label='Earth')\n plt.plot(t_rel, PPert[2], '--k', linewidth=3.0, label='Moon')\n plt.plot(t_rel, PPert[3], '-.r', linewidth=3.0, label='Sun')\n PertJ2 = PPert[4][~np.isnan(PPert[4])]\n plt.plot(t_rel[~np.isnan(PPert[4])], PertJ2, ':g', linewidth=3.0, label='J2')\n PertDrag = PPert[5][~np.isnan(PPert[5])]\n plt.plot(t_rel[~np.isnan(PPert[5])], PertDrag, '-.c', linewidth=3.0, label='Drag')\n plt.yscale('log'); plt.grid(True); plt.legend(loc='best')\n plt.xlabel('Relative Time [days]'); plt.ylabel('Perturbation Acceleration [m/s^2]')\n \n plt.show()\n\ndef PlotIntStep(t):\n \n dt=[]\n for k in range(len(t)-1):\n dt.append((t[k+1] - t[k]) * 24*60*60)\n \n plt.figure(figsize=(16,9))\n t_rel = t - np.max(t) # Days\n plt.plot(t_rel[1:], abs(np.array(dt)))\n plt.yscale('log'); plt.grid(True)#; plt.legend()\n plt.xlabel('Relative Time [days]'); plt.ylabel('Timestep [sec]')\n \n plt.show()\n\n\ndef ThirdBodyPerturbation(Pos, rho, mu):\n '''\n Pos is the position of the meteoroid (m)\n rho is the position of the third body (m)\n mu is the standard gravitational parameter of the third body (m3/s2)\n '''\n\n # Battin's scalar formula for vector difference\n q = np.dot(Pos.T, (Pos - 2 * rho) / (np.dot(rho.T, rho)))\n f = (3 * q + 3 * q**2 + q**3) / (1 + (1 + q)**1.5)\n\n # Third body perturbation acceleration (with indirect term)\n u = -mu * (Pos + f * rho) / ((norm(Pos - rho))**3)\n\n return u\n\n\ndef NRLMSISE_00(pos, time, pos_type='eci'):\n ''' Courtesy of Ellie Sansom '''\n \"\"\"\n Inputs: inertial position and time\n Outputs: [altitude, temp, atm_pres, atm density, sos, dyn_vis]\n \"\"\"\n\n from nrlmsise_00_header import nrlmsise_input, nrlmsise_output, nrlmsise_flags\n from nrlmsise_00 import gtd7\n\n time = Time(time, format='jd', scale='utc')\n\n # Convert ECI to LLH coordinates\n if pos_type == 'eci':\n Pos_LLH = ECEF2LLH(ECI2ECEF_pos(pos, time))\n elif pos_type == 'ecef':\n Pos_LLH = ECEF2LLH(pos)\n elif pos_type == 'llh':\n Pos_LLH = pos\n else:\n print('NRLMSISE_00 error: Invalid pos_type')\n exit()\n g_lat = np.rad2deg(Pos_LLH[0][0])\n g_long = np.rad2deg(Pos_LLH[1][0])\n alt = Pos_LLH[2][0]\n\n # Break up time into year, day of year, and seconds of the day\n yDay = time.yday.split(':'); yr = float(yDay[0]); doy = float(yDay[1])\n sec = float(yDay[2]) * 60*60 + float(yDay[3]) * 60 + float(yDay[4])\n\n # Assign our variables into the nrmsise inputs\n Input = nrlmsise_input(yr, doy, sec, alt/1000, g_lat, g_long)\n Output = nrlmsise_output(); Flags = nrlmsise_flags()\n\n # Switches\n for i in range(1, 24):\n Flags.switches[i]=1\n\n # GTD7 atmospheric model subroutine\n gtd7(Input, Flags, Output)\n\n # Temperature at alt [deg K]\n T = Output.t[1]\n\n # Molecular number densities [m-3]\n He = Output.d[0] # He\n O = Output.d[1] # O\n N2 = Output.d[2] # N2\n O2 = Output.d[3] # O2\n Ar = Output.d[4] # Ar\n H = Output.d[6] # H\n N = Output.d[7] # N\n# ano_O = Output.d[8] # Anomalous oxygen\n sum_mass = He + O + N2 + O2 + Ar + H + N\n\n # Molar mass\n He_mass = 4.0026 # g/mol\n O_mass = 15.9994 # g/mol\n N2_mass = 28.013 # g/mol\n O2_mass = 31.998 # g/mol\n Ar_mass = 39.948 # g/mol\n H_mass = 1.0079 # g/mol\n N_mass = 14.0067 # g/mol\n\n # Molecular weight of air [kg/mol]\n mol_mass_air = (He_mass * He + O_mass * O + N2_mass * N2 + O2_mass * O2\n + Ar_mass * Ar + H_mass * H + N_mass * N) / (1000 * sum_mass)\n\n # Total mass density [kg*m-3]\n po = Output.d[5] * 1000\n\n Ru = 8.3144621 # Universal gas constant [J/(K*mol)]\n R = Ru / mol_mass_air # Individual gas constant [J/(kg*K)] #287.058\n\n # Ideal gas law\n atm_pres = po * T * R\n\n # Speed of sound in atm\n sos = 331.3 * np.sqrt(1 + T / 273.15)\n\n # Dynamic viscosity (http://en.wikipedia.org/wiki/Viscosity)\n C = 120 #Sutherland's constant for air [deg K]\n mu_ref = 18.27e-6 # Reference viscosity [[mu_Pa s] * e-6]\n T_ref = 291.15 # Reference temperature [deg K]\n\n dyn_vis = mu_ref * (T_ref + C) / (T + C) * (T / T_ref)**1.5\n\n return T, atm_pres, po, sos, dyn_vis\n\n# def compute_infinity_radiant(stateVec):\n# ''' This method computing the apparent radiant, it doesn't consider the zenith attraction '''\n\n# Pos_geo = stateVec.position\n# Vel_geo = stateVec.vel_xyz\n# t0 = stateVec.epoch\n\n# # Compute radiant (apparent ORIGIN of meteoroid)\n# Vel_eci = ECEF2ECI(Pos_geo, Vel_geo, t0)[1]\n# ra_eci = np.arctan2(-Vel_eci[1], -Vel_eci[0])\n# dec_eci = np.arcsin(-Vel_eci[2] / norm(Vel_eci))\n# # ^-- redundant information. Already have it in metadata\n\n# return ra_eci, dec_eci\n\n\ndef compute_cartesian_velocities_from_radiant(stateVec):\n '''\n Turn apparent ecef radiant and velocity into cartesian velocity component\n '''\n\n vel_geo = -(stateVec.velocity_inf *\n np.vstack((np.cos(np.deg2rad(stateVec.ra_ecef_inf)) * np.cos(np.deg2rad(stateVec.dec_ecef_inf)),\n np.sin(np.deg2rad(stateVec.ra_ecef_inf)) * np.cos(np.deg2rad(stateVec.dec_ecef_inf)),\n np.sin(np.deg2rad(stateVec.dec_ecef_inf)))))\n\n return vel_geo\n\n\n\ndef SimilarityCriterion(COE1, COE2, method='SH'):\n '''\n Southworth & Hawkins similarity criterion (1963); or\n Drummond's similarity criterion (1981); or\n Jopek's similarity criterion (1993).\n '''\n if type(COE1) == np.ndarray:\n a1 = COE1[0]/AU; a2 = COE2[0]/AU # [AU]\n e1 = COE1[1]; e2 = COE2[1] # []\n i1 = COE1[2]; i2 = COE2[2] # [rad]\n w1 = COE1[3]; w2 = COE2[3] # [rad]\n W1 = COE1[4]; W2 = COE2[4] # [rad]\n\n else:\n a1 = COE1.semi_major_axis.value; a2 = COE2.semi_major_axis.value # [AU]\n e1 = COE1.eccentricity; e2 = COE2.eccentricity # []\n i1 = COE1.inclination.to(u.rad).value; i2 = COE2.inclination.to(u.rad).value # [rad]\n w1 = COE1.argument_periapsis.to(u.rad).value; w2 = COE2.argument_periapsis.to(u.rad).value # [rad]\n W1 = COE1.longitude_ascending_node.to(u.rad).value; W2 = COE2.longitude_ascending_node.to(u.rad).value # [rad]\n\n q1 = a1 * (1 - e1) # [AU]\n q2 = a2 * (1 - e2) # [AU]\n\n # Angle between the orbital planes (I21)\n var = (2 * np.sin((i2 - i1) / 2))**2 + np.sin(i1) * np.sin(i2) * (2 * np.sin((W2 - W1) / 2))**2\n I21 = 2 * np.arcsin(np.sqrt(var) / 2)\n\n if method == 'SH':\n # Difference between orbits longitude of perihelion (pi21)\n pi21 = w2 - w1 + 2 * np.arcsin(np.cos((i2 + i1) / 2) * np.sin((W2 - W1) / 2) / np.cos(I21 / 2))\n\n Similarity2 = (e2 - e1)**2 + (q2 - q1)**2 + var + (((e2 + e1) / 2) * (2 * np.sin(pi21 / 2)))**2\n Similarity = np.sqrt(Similarity2)\n\n elif method == 'D':\n # Angle between the orbital lines of apsides (theta21)\n# l1 = W1 + np.arcsin(np.cos(i1) * np.tan(w1)); b1 = np.arcsin(np.sin(i1) * np.sin(w1))\n# l2 = W2 + np.arcsin(np.cos(i2) * np.tan(w2)); b2 = np.arcsin(np.sin(i2) * np.sin(w2))\n l1 = W1 + np.arctan(np.cos(i1) * np.tan(w1)); b1 = np.arcsin(np.sin(i1) * np.sin(w1))\n l2 = W2 + np.arctan(np.cos(i2) * np.tan(w2)); b2 = np.arcsin(np.sin(i2) * np.sin(w2))\n theta21 = np.arccos(np.sin(b1) * np.sin(b2) + np.cos(b1) * np.cos(b2) * np.cos(l2 - l1))\n\n Similarity2 = ((e2 - e1) / (e2 + e1))**2 + ((q2 - q1) / (q2 + q1))**2 + \\\n (I21 / np.pi)**2 + ((e2 + e1) / 2)**2 * (theta21 / np.pi)**2\n Similarity = np.sqrt(Similarity2)\n\n elif method == 'H':\n # Difference between orbits longitude of perihelion (pi21)\n pi21 = w2 - w1 + 2 * np.arcsin(np.cos((i2 + i1) / 2) * np.sin((W2 - W1) / 2) / np.cos(I21 / 2))\n\n Similarity2 = (e2 - e1)**2 + ((q2 - q1) / (q2 + q1))**2 + var + \\\n (((e2 + e1) / 2) * (2 * np.sin(pi21 / 2)))**2\n Similarity = np.sqrt(Similarity2)\n\n return Similarity\n\ndef generate_ephemeris(pos_hci, t_jd):\n\n # Save the datetime\n ephem_dict = {'datetime': Time(t_jd, format='jd', scale='utc').isot}\n ephem_dict['MJD'] = Time(t_jd, format='jd', scale='utc').mjd\n \n # distance to sun\n ephem_dict['distance_to_sun'] = norm(pos_hci, axis=0) / 1000 #km\n\n # Convert to eci coordinates\n pos_eci = HCI2ECI_pos(pos_hci, t_jd)\n ephem_dict['pos_eci_x'] = pos_eci[0]\n ephem_dict['pos_eci_y'] = pos_eci[1]\n ephem_dict['pos_eci_z'] = pos_eci[2]\n pos_hcrs = HCI2HCRS(pos_hci)\n\n # Calculate phase angle\n ephem_dict['phase_angle'] = np.rad2deg(np.arccos(np.sum(pos_hcrs * pos_eci, axis=0)\n / (norm(pos_hcrs, axis=0) * norm(pos_eci, axis=0))))\n\n # Calculate elongation angle\n pos_sun = pos_eci - pos_hcrs\n ephem_dict['elongation_angle'] = np.rad2deg(np.arccos(np.sum(pos_sun * pos_eci, axis=0)\n / (norm(pos_sun, axis=0) * norm(pos_eci, axis=0))))\n\n # Calculate ephemeris\n dist = norm(pos_eci, axis=0) #m\n ephem_dict['ra'] = np.rad2deg(np.arctan2(pos_eci[1], pos_eci[0]))%360 #deg\n ephem_dict['dec'] = np.rad2deg(np.arcsin(pos_eci[2] / dist)) #deg\n ephem_dict['distance_to_earth'] = norm(pos_eci, axis=0) / 1000 #km\n\n return ephem_dict\n" ]
[ [ "numpy.ones", "numpy.sum", "matplotlib.pyplot.yscale", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.plot", "numpy.vstack", "matplotlib.pyplot.figure", "numpy.cos", "numpy.isnan", "numpy.linspace", "numpy.deg2rad", "numpy.sqrt", "matplotlib.pyplot.axvline", "numpy.zeros", "numpy.rad2deg", "matplotlib.pyplot.axhline", "numpy.max", "numpy.tan", "numpy.linalg.norm", "numpy.arctan2", "matplotlib.pyplot.legend", "numpy.arcsin", "matplotlib.pyplot.grid", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "numpy.array", "numpy.sin", "numpy.dot", "matplotlib.pyplot.xlabel" ] ]
idigitopia/Distributed-VI
[ "323be8c50862d8dff9cae68313c518080a9df72e" ]
[ "vi_engine_s.py" ]
[ "import numpy as np\nimport ray\n\nray.shutdown()\nray.init()\n\n\n# A : Action Space\n# S : State Space \n\[email protected]\nclass VI_worker(object):\n def __init__(self, list_of_actions, tran_dict, reward_dict, beta, backup_states, true_action_prob=0.8,\n unknown_value=0):\n self.backup_states = backup_states\n self.list_of_actions = list_of_actions\n self.tran_dict = tran_dict\n self.reward_dict = reward_dict\n self.beta = beta\n self.unknown_value = unknown_value # Default Value for any states that do not have transitions defined.\n\n self.true_action_prob = true_action_prob\n self.slip_prob = 1 - self.true_action_prob\n self.slip_action_prob = self.slip_prob / len(self.list_of_actions)\n\n def compute(self, V_t, backup_states=None):\n \"\"\"\n \n :param V_t: Value Vector at t\n :return: \n \"\"\"\n backup_states = backup_states or self.backup_states\n\n V_tplus1 = {s: 0 for s in backup_states}\n max_vals = {s: float(\"-inf\") for s in backup_states}\n\n max_error = 0\n\n for s in backup_states:\n for a in self.tran_dict[s]:\n expected_ns_val = 0\n for ns in self.tran_dict[s][a]:\n try:\n expected_ns_val += self.tran_dict[s][a][ns] * V_t[ns]\n except:\n expected_ns_val += self.tran_dict[s][a][ns] * self.unknown_value\n\n expect_s_val = self.reward_dict[s][a] + self.beta * expected_ns_val\n max_vals[s] = max(max_vals[s], expect_s_val)\n V_tplus1[s] += self.slip_action_prob * expect_s_val\n V_tplus1[s] += (self.true_action_prob - self.slip_action_prob) * max_vals[s]\n\n max_error = max(max_error, abs(V_tplus1[s] - V_t[s]))\n\n return V_tplus1, max_error\n\n\ndef distributed_value_iteration(S, A, reward_dict, tran_dict, seed_value=None, unknown_value=0, true_action_prob=0.8,\n beta=0.99, epsilon=0.01, workers_num=4, verbose=True):\n # Split the state space evenly to be distributed to VI workers\n state_chunks = [a.tolist() for a in np.array_split(np.array(S), workers_num)]\n V_t = {s: 0 for s in S} if seed_value is None else seed_value\n\n # Make VI workers\n workers_list = [VI_worker.remote(list_of_actions=A,\n tran_dict=tran_dict,\n reward_dict=reward_dict,\n beta=beta,\n backup_states=state_chunk,\n unknown_value=unknown_value,\n true_action_prob=true_action_prob)\n for state_chunk in state_chunks]\n\n # Do VI computation\n error = float('inf')\n while error > epsilon:\n object_list = [workers_list[i].compute.remote(V_t) for i in range(workers_num)]\n error_list = []\n for i in range(workers_num):\n finish_id = ray.wait(object_list, num_returns=1, timeout=None)[0][0]\n object_list.remove(finish_id)\n V_tplus1, error = ray.get(finish_id)\n\n V_t.update(V_tplus1)\n error_list.append(error)\n\n if (verbose):\n print(\"Error:\", error)\n\n error = max(error_list)\n\n pi = get_pi_from_value(V_t, A, tran_dict, reward_dict, beta)\n\n return V_t, pi\n\n\ndef simple_value_iteration(S, A, reward_dict, tran_dict, seed_value=None, unknown_value=0, true_action_prob=0.8,\n beta=0.99, epsilon=0.01, workers_num=4, verbose=True):\n slip_prob = 1 - true_action_prob\n slip_action_prob = slip_prob / len(A)\n\n V_t = {s: 0 for s in S} if seed_value is None else seed_value\n error = float(\"inf\")\n\n while error > epsilon:\n V_tplus1 = {s: 0 for s in S}\n max_vals = {s: float(\"-inf\") for s in S}\n\n max_error = 0\n\n for s in S:\n for a in tran_dict[s]:\n expected_ns_val = 0\n for ns in tran_dict[s][a]:\n try:\n expected_ns_val += tran_dict[s][a][ns] * V_t[ns]\n except:\n expected_ns_val += tran_dict[s][a][ns] * unknown_value\n\n expect_s_val = reward_dict[s][a] + beta * expected_ns_val\n max_vals[s] = max(max_vals[s], expect_s_val)\n V_tplus1[s] += slip_action_prob * expect_s_val\n V_tplus1[s] += (true_action_prob - slip_action_prob) * max_vals[s]\n\n max_error = max(max_error, abs(V_tplus1[s] - V_t[s]))\n\n V_t.update(V_tplus1)\n error = max_error\n\n if (verbose):\n print(\"Error:\", error)\n\n pi = get_pi_from_value(V_t, A, tran_dict, reward_dict, beta)\n\n return V_t, pi\n\n\ndef get_pi_from_value(V, list_of_actions, tran_dict, reward_dict, beta):\n v_max = {s: float('-inf') for s in V}\n pi = {}\n\n for s in V:\n for a in tran_dict[s]:\n expected_val = 0\n for ns in tran_dict[s][a]:\n try:\n expected_val += tran_dict[s][a][ns] * V[ns]\n except:\n expected_val += tran_dict[s][a][ns] * 0\n expect_s_val = reward_dict[s][a] + beta * expected_val\n if expect_s_val > v_max[s]:\n v_max[s] = expect_s_val\n pi[s] = a\n\n return pi\n" ]
[ [ "numpy.array" ] ]
yyuting/learning_from_program_trace
[ "e0e4ac9bc2d4069eef64bdc2de64a87a735fa508" ]
[ "apps/render_mandelbulb_slim.py" ]
[ "from render_util import *\nfrom render_single import *\nimport numpy\nimport skimage\nimport skimage.io\n\ndef mb(p, time):\n\n z = [p[0], p[1], p[2]]\n dr = 1.0\n t0 = 1.0\n\n cond = True\n\n power = 20.0\n\n for i in range(4):\n r = sqrt(z[0] ** 2.0 + z[1] ** 2.0 + z[2] ** 2.0)\n #cond *= r <= 2.0\n #cond = select(r <= 2.0, cond, False)\n cond = r <= 2.0\n theta = atan(z[1] / z[0]) * power\n phi = (asin(z[2] / r) + time * 0.1) * power\n\n #dr = select(cond, (r ** (power - 1.0)) * dr * power + 1.0, dr)\n\n #r = select(cond, r ** power, r)\n\n this_power = select(cond, power, 1.0)\n new_dr = (r ** (this_power - 1.0)) * dr * power + 1.0\n dr = select(cond, new_dr, dr)\n r = select(cond, r ** this_power, r)\n\n cos_phi = cos(phi)\n\n z[0] = select(cond, r * cos(theta) * cos_phi + p[0], z[0])\n z[1] = select(cond, r * sin(theta) * cos_phi + p[1], z[1])\n z[2] = select(cond, r * sin(phi) + p[2], z[2])\n\n t0 = select(cond, min_nosmooth(t0, r), t0)\n\n return [0.5 * log(r) * r / dr, t0]\n\ndef f(p, time):\n new_p = rotation_y(p, time * 0.2)\n return mb(new_p, time)\n\ndef intersect(ro, rd, time, orig_t):\n t = orig_t\n res_t = ConstExpr(0.0)\n res_c1 = ConstExpr(0.0)\n max_error = ConstExpr(1000.0)\n d = ConstExpr(1.0)\n pd = ConstExpr(100.0)\n os = ConstExpr(0.0)\n step = ConstExpr(0.0)\n error = ConstExpr(1000.0)\n cond1 = True\n c = [ConstExpr(0.0), ConstExpr(0.0)]\n for i in loop_generator(48, is_raymarching=True):\n compiler.DEFAULT_FOR_LOOP_ITER = i\n #cond1 *= (error >= 0.0) * (t <= 20.0)\n cond1 = (error >= 0.0) * (t <= 20.0)\n\n c = f(ro + rd * t, time)\n d = select(cond1, c[0], d)\n\n cond2 = d > os\n os = select(cond2, 0.4 * d * d / pd, 0.0)\n step = select(cond2, d + os, -os)\n pd = select(cond2, d, 100.0)\n d = select(cond2, d, 1.0)\n\n error = select(cond1, d / t, error)\n\n cond3 = cond1 * (error < max_error)\n\n max_error = select(cond3, error, max_error)\n res_t = select(cond3, t, res_t)\n res_c1 = select(cond3, c[1], res_c1)\n\n t = select(cond1, t + step, t)\n\n #compiler.DEFAULT_FOR_LOOP_NAME = None\n #compiler.DEFAULT_FOR_LOOP_ITER = None\n ro_len = sqrt(ro[0] ** 2 + ro[1] ** 2 + ro[2] ** 2)\n res_t = select(t > ro_len, -1.0, res_t)\n #res_t = select(t > 2.0, -1.0, res_t)\n #res_t = Var('res_t', select(t <= 1.0, -10.0, res_t))\n return [res_t, res_c1]\n\ndef mandelbulb_slim(ray_dir_p, ray_origin, time):\n\n sundir = numpy.array([0.1, 0.8, 0.6])\n sundir /= numpy.linalg.norm(sundir)\n\n sun = numpy.array([1.64, 1.27, 0.99])\n skycolor = numpy.array([0.6, 1.5, 1.0])\n\n ray_origin = numpy.array(ray_origin)\n ray_dir_p = numpy.array(ray_dir_p)\n\n orig_t = (ray_origin[0] ** 2.0 + ray_origin[1] ** 2.0 + ray_origin[2] ** 2.0) ** 0.5 / 3.0\n \n res = intersect(ray_origin, ray_dir_p, time, orig_t)\n\n t_ray = Var(log_prefix + 't_ray', res[0])\n t_ray.log_intermediates_rank = 2\n\n cond = t_ray > 0.0\n p = ray_origin + res[0] * ray_dir_p\n \n n = normal_functor(lambda x: f(x, time)[0], 0.001, 3)(p)\n\n # change log_intermediates_rank for input arguments\n old_log_intermediates_rank = compiler.log_intermediates_rank\n compiler.log_intermediates_rank = 1\n\n for list in [ray_dir_p, ray_origin, [time], [res[0]], n]:\n for item in list:\n item.log_intermediates_rank = compiler.log_intermediates_rank\n\n dif = max_nosmooth(0.0, n[0] * sundir[0] + n[1] * sundir[1] + n[2] * sundir[2])\n sky = 0.6 + 0.4 * max_nosmooth(0.0, n[1])\n bac = max_nosmooth(0.0, 0.3 + 0.7 * (-n[0] * sundir[0] - n[1] - n[2] * sundir[2]))\n \n \n\n lin_coef_a = 4.5 * dif + 0.8 * bac\n lin_coef_b = 0.6 * sky\n lin0 = sun[0] * lin_coef_a + skycolor[0] * lin_coef_b\n lin1 = sun[1] * lin_coef_a + skycolor[1] * lin_coef_b\n lin2 = sun[2] * lin_coef_a + skycolor[2] * lin_coef_b\n\n tc0_coef = 3.0 + 4.2 * (res[1] ** 0.55)\n col0 = lin0 * 0.9 * 0.2 * (0.5 + 0.5 * sin(tc0_coef))\n col1 = lin1 * 0.8 * 0.2 * (0.5 + 0.5 * sin(tc0_coef + 0.5))\n col2 = lin2 * 0.6 * 0.2 * (0.5 + 0.5 * sin(tc0_coef + 1.0))\n\n col0 = select(cond, col0 ** 0.45, 0.0)\n col1 = select(cond, col1 ** 0.45, 0.0)\n col2 = select(cond, col2 ** 0.45, 0.0)\n\n col = numpy.array([col0, col1, col2])\n col = col * 0.6 + 0.4 * col * col * (3.0 - 2.0 * col)\n col = col * 1.5 - 0.5 * 0.33 * (col[0] + col[1] + col[2])\n\n #col = select(res[0] <= -2.0, numpy.array([1.0, 1.0, 1.0]), col)\n\n compiler.log_intermediates_rank = old_log_intermediates_rank\n\n for expr in col.tolist() + n.tolist() + [t_ray]:\n expr.log_intermediates_subset_rank = 1\n\n return output_color(col)\n\nshaders = [mandelbulb_slim]\nis_color = True\n# use a different rotation parameterization so can easily compute direction to world coord origin\nfov = 'small_seperable'\n\nx_center = 0.0\ny_center = 0.0\nz_center = 0.0\noffset = np.array([0.4, 0.4, 0.4])\n\ndef pos_solver(x0, x1, x2):\n \"\"\"\n given x (length 3) as camera position,\n solve a camera direction that satisfies:\n the center of the image points to the point (0.0, 0.4, 0.0) plus some noise,\n the actual center is (0.0, 0.4, 0.0) + (random(3) * 2.0 - 1.0) * (0.2, 0.2, 0.07)\n the horizonal axis in image is perpendicular to the upward (y axis) in world,\n the vertical axis upward in image is in the same direction of the upward y axis in world.\n \"\"\"\n random_offset = (np.random.rand(3) * 2.0 - 1.0) * offset\n a = x_center - x0 + random_offset[0]\n b = y_center - x1 + random_offset[1]\n c = z_center - x2 + random_offset[2]\n norm = (a ** 2 + b ** 2 + c ** 2) ** 0.5\n d = a / norm\n e = b / norm\n f = c / norm\n \n ang1 = np.random.rand() * 2 * np.pi\n \n de_norm = (d ** 2 + e ** 2) ** 0.5\n if de_norm > 0:\n # assume cos2 > 0\n ang3 = math.atan2(e / de_norm, d / de_norm)\n \n cos3 = np.cos(ang3)\n if cos3 != 0:\n ang2 = math.atan2(-f, d / cos3)\n else:\n sin3 = np.sin(ang3)\n ang2 = math.atan2(-f, e / sin3)\n else:\n if f > 0:\n ang2 = - np.pi / 2\n else:\n ang2 = np.pi / 2\n ang3 = np.random.rand() * 2 * np.pi\n\n return ang1, ang2, ang3\n\ndef main():\n \n if len(sys.argv) < 3:\n print('Usage: python render_[shader].py base_mode base_dir')\n raise\n \n base_mode = sys.argv[1]\n base_dir = sys.argv[2]\n \n camera_dir = os.path.join(base_dir, 'datasets/datas_mandelbulb_with_bg')\n preprocess_dir = os.path.join(base_dir, 'preprocess/mandelbulb')\n \n if not os.path.exists(camera_dir):\n os.makedirs(camera_dir, exist_ok=True)\n \n if not os.path.exists(preprocess_dir):\n os.makedirs(preprocess_dir, exist_ok=True)\n \n if base_mode == 'collect_raw':\n \n camera_pos = numpy.load(os.path.join(camera_dir, 'train.npy'))\n render_t = numpy.load(os.path.join(camera_dir, 'train_time.npy'))\n nframes = render_t.shape[0]\n \n train_start = numpy.load(os.path.join(camera_dir, 'train_start.npy'))\n render_single(os.path.join(preprocess_dir, 'train'), 'render_mandelbulb_slim', 'none', 'none', sys.argv[1:], nframes=nframes, log_intermediates=True, render_size = (80, 80), render_kw={'render_t': render_t, 'compute_f': False, 'ground_truth_samples': 1, 'random_camera': True, 'camera_pos': camera_pos, 'zero_samples': False, 'gname': 'train_small', 'tile_only': True, 'tile_start': train_start, 'collect_loop_and_features': True, 'log_only_return_def_raymarching': True})\n \n elif base_mode == 'generate_dataset':\n for mode in ['train', 'test_close', 'test_far', 'test_middle', 'validate']:\n camera_pos = numpy.load(os.path.join(camera_dir, mode + '.npy')) \n nframes = camera_pos.shape[0]\n \n if mode in ['train', 'validate']:\n tile_start = numpy.load(os.path.join(camera_dir, mode + '_start.npy'))[:nframes]\n render_size = (320, 320)\n tile_only = True\n render_t = numpy.load(os.path.join(camera_dir, mode + '_time.npy'))\n else:\n tile_start = None\n render_size = (640, 960)\n tile_only = False\n render_t_pool = numpy.load(os.path.join(camera_dir, 'test_time.npy'))\n if mode == 'test_close':\n render_t = render_t_pool[:5]\n elif mode == 'test_far':\n render_t = render_t_pool[5:10]\n else:\n render_t = render_t_pool[10:]\n \n render_t = render_t[:nframes]\n \n \n outdir = get_shader_dirname(os.path.join(preprocess_dir, mode), shaders[0], 'none', 'none')\n \n render_single(os.path.join(preprocess_dir, mode), 'render_mandelbulb_slim', 'none', 'none', sys.argv[1:], nframes=nframes, log_intermediates=False, render_size = render_size, render_kw={'render_t': render_t, 'compute_f': False, 'ground_truth_samples': 1000, 'random_camera': True, 'camera_pos': camera_pos, 'zero_samples': False, 'gname': '%s_ground' % mode, 'tile_only': tile_only, 'tile_start': tile_start, 'collect_loop_and_features': True, 'log_only_return_def_raymarching': True})\n \n if mode in ['train', 'validate']:\n target_dir = os.path.join(camera_dir, mode + '_img')\n else:\n target_dir = os.path.join(camera_dir, 'test_img')\n \n if not os.path.exists(target_dir):\n os.mkdir(target_dir)\n \n \n for file in os.listdir(outdir):\n if file.startswith('%s_ground' % mode) and file.endswith('.png'):\n os.rename(os.path.join(outdir, file),\n os.path.join(target_dir, file))\n \n elif base_mode == 'sample_camera_pos':\n \n test_render_t = None\n \n t_range = 31.5\n \n for mode in ['train', 'test_close', 'test_far', 'test_middle', 'validate']:\n \n x_min = -4\n x_max = 4\n y_min = -4\n y_max = 4\n z_min = -4\n z_max = 4\n \n if mode == 'train':\n nframes = 800\n x_max = 3.5\n y_max = 3.5\n elif mode == 'validate':\n nframes = 80\n x_max = 3.5\n y_max = 3.5\n elif mode == 'test_close':\n nframes = 5\n x_min = 3.5\n elif mode == 'test_far':\n nframes = 5\n y_min = 3.5\n elif mode == 'test_middle':\n nframes = 20\n x_max = 3.5\n y_max = 3.5\n\n camera_pos = numpy.empty([nframes, 6])\n\n for i in range(nframes):\n while True:\n x = numpy.random.rand() * (x_max - x_min) + x_min\n y = numpy.random.rand() * (y_max - y_min) + y_min\n z = numpy.random.rand() * (z_max - z_min) + z_min\n if (x ** 2 + y ** 2 + z ** 2) > 1.8 ** 2:\n break\n ang1, ang2, ang3 = pos_solver(x, y, z)\n camera_pos[i] = np.array([x, y, z, ang1, ang2, ang3])\n\n numpy.save(os.path.join(preprocess_dir, '%s.npy' % mode), camera_pos)\n \n if mode in ['train', 'validate']:\n expand_boundary = 160\n render_t = np.random.rand(nframes) * t_range\n numpy.save(os.path.join(preprocess_dir, mode + '_time.npy'), render_t)\n else:\n expand_boundary = 0\n if test_render_t is None:\n test_render_t = np.random.rand(30) * t_range\n np.save(os.path.join(preprocess_dir, 'test_time.npy'), render_t)\n \n if mode == 'test_close':\n render_t = test_render_t[:5]\n elif mode == 'test_far':\n render_t = test_render_t[5:10]\n else:\n render_t = test_render_t[10:]\n \n render_single(os.path.join(preprocess_dir, mode), 'render_mandelbulb_slim', 'none', 'none', sys.argv[1:], nframes=nframes, log_intermediates=False, render_size = (640, 960), render_kw={'render_t': render_t, 'compute_f': False, 'ground_truth_samples': 1, 'random_camera': True, 'camera_pos': camera_pos, 'zero_samples': False, 'gname': '%s_noisy' % mode, 'collect_loop_and_features': True, 'log_only_return_def_raymarching': True, 'expand_boundary': expand_boundary})\n \n elif base_mode == 'generate_temporal_dataset':\n \n camera_dir = os.path.join(base_dir, 'datasets/datas_mandelbulb_temporal_with_bg')\n preprocess_dir = os.path.join(base_dir, 'preprocess/mandelbulb_temporal')\n \n if not os.path.exists(camera_dir):\n os.makedirs(camera_dir, exist_ok=True)\n\n if not os.path.exists(preprocess_dir):\n os.makedirs(preprocess_dir, exist_ok=True)\n \n for mode in ['train', 'test', 'validate']:\n \n if mode in ['train', 'validate']:\n tile_start = numpy.load(os.path.join(camera_dir, mode + '_start.npy'))\n render_size = (320, 320)\n tile_only = True\n render_t_base = numpy.load(os.path.join(camera_dir, mode + '_time.npy'))\n camera_pos = numpy.load(os.path.join(camera_dir, mode + '.npy')) \n t_schedule = np.arange(8)\n else:\n tile_start = None\n render_size = (640, 960)\n tile_only = False\n render_t_base = numpy.load(os.path.join(camera_dir, 'test_time.npy'))\n \n camera_pos = np.concatenate((np.load(os.path.join(camera_dir, 'test_close.npy')),\n np.load(os.path.join(camera_dir, 'test_far.npy')),\n np.load(os.path.join(camera_dir, 'test_middle.npy'))), axis=0)\n t_schedule = [0, 1, 29]\n \n nframes = camera_pos.shape[0]\n outdir = get_shader_dirname(os.path.join(preprocess_dir, mode), shaders[0], 'none', 'none')\n \n for t_val in t_schedule:\n render_t = render_t_base + t_val / 30\n\n render_single(os.path.join(preprocess_dir, mode), 'render_mandelbulb_slim', 'none', 'none', sys.argv[1:], nframes=nframes, log_intermediates=False, render_size = render_size, render_kw={'render_t': render_t, 'compute_f': False, 'ground_truth_samples': 1000, 'random_camera': True, 'camera_pos': camera_pos, 'zero_samples': False, 'gname': '%s_ground_%d' % (mode, t_val), 'tile_only': tile_only, 'tile_start': tile_start, 'collect_loop_and_features': True, 'log_only_return_def_raymarching': True})\n \n target_dir = os.path.join(camera_dir, '%s_img' % mode)\n \n if not os.path.exists(target_dir):\n os.mkdir(target_dir)\n\n for file in os.listdir(outdir):\n if file.startswith('%s_ground' % mode) and file.endswith('.png'):\n os.rename(os.path.join(outdir, file),\n os.path.join(target_dir, file))\n \n elif base_mode == 'generate_blur_additional':\n \n preprocess_dir = os.path.join(base_dir, 'preprocess/mandelbulb_blur')\n \n for mode in ['train', 'test_close', 'test_far', 'test_middle', 'validate']:\n camera_pos = numpy.load(os.path.join(camera_dir, mode + '.npy')) \n nframes = camera_pos.shape[0]\n \n if mode in ['train', 'validate']:\n tile_start = numpy.load(os.path.join(camera_dir, mode + '_start.npy'))[:nframes]\n render_size = (320, 320)\n tile_only = True\n render_t = numpy.load(os.path.join(camera_dir, mode + '_time.npy'))\n else:\n tile_start = None\n render_size = (640, 960)\n tile_only = False\n render_t_pool = numpy.load(os.path.join(camera_dir, 'test_time.npy'))\n if mode == 'test_close':\n render_t = render_t_pool[:5]\n elif mode == 'test_far':\n render_t = render_t_pool[5:10]\n else:\n render_t = render_t_pool[10:]\n \n render_t = render_t[:nframes]\n \n render_single(os.path.join(preprocess_dir, mode), 'render_mandelbulb_slim', 'none', 'none', sys.argv[1:], nframes=nframes, log_intermediates=True, render_size = render_size, render_kw={'render_t': render_t, 'compute_f': False, 'ground_truth_samples': 1, 'random_camera': True, 'camera_pos': camera_pos, 'zero_samples': False, 'gname': '%s_noisy' % mode, 'tile_only': tile_only, 'tile_start': tile_start, 'collect_loop_and_features': True, 'log_only_return_def_raymarching': True, 'log_t_ray': True, 'log_intermediates_level': 2})\n \n return\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.array", "numpy.random.rand", "numpy.linalg.norm", "numpy.empty" ] ]
yeonseok-jeong-cm/multimodal_research
[ "bb1140f13f76d4cda6175a072806a0ee0908bd0d" ]
[ "downstream/UNITER/adapter/src/transformers/adapters/models/gpt2.py" ]
[ "from typing import Union\n\nimport torch\nfrom torch import nn\n\nfrom ..composition import AdapterCompositionBlock, parse_composition\nfrom ..heads import CausalLMHead, ClassificationHead, MultiLabelClassificationHead\nfrom ..model_mixin import InvertibleAdaptersMixin, ModelAdaptersMixin\nfrom .bert import (\n BertEncoderAdaptersMixin,\n BertOutputAdaptersMixin,\n BertSelfOutputAdaptersMixin,\n ModelWithFlexibleHeadsAdaptersMixin,\n)\n\n\nclass GPT2AttentionAdaptersModule(BertSelfOutputAdaptersMixin, nn.Module):\n \"\"\"Adds attention adapters to the Transformer module of DistilBert.\"\"\"\n\n def __init__(self, parent):\n super().__init__()\n # keep a reference to the parent module without registering as a submodule\n object.__setattr__(self, \"parent\", parent)\n self.config = parent.config\n\n @property\n def transformer_layer_norm(self):\n return None\n\n\nclass GPT2OutputAdaptersModule(BertOutputAdaptersMixin, nn.Module):\n \"\"\"Adds output adapters to the Transformer module of DistilBert.\"\"\"\n\n def __init__(self, parent):\n super().__init__()\n # keep a reference to the parent module without registering as a submodule\n object.__setattr__(self, \"parent\", parent)\n self.config = parent.config\n\n @property\n def transformer_layer_norm(self):\n return None\n\n\nclass GPT2DecoderBlockAdaptersMixin(BertEncoderAdaptersMixin):\n \"\"\"Adds adapters to the TransformerBlock module of DistilBert.\"\"\"\n\n def _init_adapter_modules(self):\n self.attention_adapters = GPT2AttentionAdaptersModule(self)\n self.output_adapters = GPT2OutputAdaptersModule(self)\n self.attention_adapters._init_adapter_modules()\n self.output_adapters._init_adapter_modules()\n\n def add_fusion_layer(self, adapter_names):\n self.attention_adapters.add_fusion_layer(adapter_names)\n self.output_adapters.add_fusion_layer(adapter_names)\n\n def add_adapter(self, adapter_name: str, layer_idx: int):\n self.attention_adapters.add_adapter(adapter_name, layer_idx)\n self.output_adapters.add_adapter(adapter_name, layer_idx)\n\n def delete_adapter(self, adapter_name):\n self.attention_adapters.delete_adapter(adapter_name)\n self.output_adapters.delete_adapter(adapter_name)\n\n def delete_fusion_layer(self, adapter_names):\n self.attention_adapters.delete_fusion_layer(adapter_names)\n self.output_adapters.delete_fusion_layer(adapter_names)\n\n def enable_adapters(self, adapter_names: list, unfreeze_adapters: bool, unfreeze_attention: bool):\n self.attention_adapters.enable_adapters(adapter_names, unfreeze_adapters, unfreeze_attention)\n self.output_adapters.enable_adapters(adapter_names, unfreeze_adapters, unfreeze_attention)\n\n\nclass GPT2ModelAdapterMixin(InvertibleAdaptersMixin, ModelAdaptersMixin):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def _init_adapter_modules(self):\n super()._init_adapter_modules()\n\n # add adapters specified in config; invertible adapter will only be added if required\n for adapter_name in self.config.adapters.adapters:\n self._add_adapter(adapter_name)\n # fusion\n if hasattr(self.config, \"fusion_models\"):\n for fusion_adapter_names in self.config.fusion_models:\n self.add_fusion_layer(fusion_adapter_names)\n\n def _add_adapter(self, adapter_name: str):\n adapter_config = self.config.adapters.get(adapter_name)\n leave_out = adapter_config.get(\"leave_out\", [])\n for i, layer in enumerate(self.base_model.h):\n if i not in leave_out:\n layer.add_adapter(adapter_name, i)\n\n self.add_invertible_adapter(adapter_name)\n\n def train_adapter(self, adapter_setup: Union[list, AdapterCompositionBlock]):\n self.train()\n self.freeze_model(True)\n adapter_setup = parse_composition(adapter_setup)\n self.enable_adapters(adapter_setup, True, False)\n self.enable_invertible_adapters(adapter_setup.flatten())\n # use the adapters to be trained by default in every forward pass\n self.set_active_adapters(adapter_setup)\n\n def train_adapter_fusion(self, adapter_setup: Union[list, AdapterCompositionBlock], unfreeze_adapters=False):\n self.train()\n self.freeze_model(True)\n adapter_setup = parse_composition(adapter_setup)\n self.enable_adapters(adapter_setup, unfreeze_adapters, True)\n # use the adapters to be trained by default in every forward pass\n self.set_active_adapters(adapter_setup)\n\n def enable_adapters(\n self, adapter_setup: AdapterCompositionBlock, unfreeze_adapters: bool, unfreeze_attention: bool\n ):\n for layer in self.base_model.h:\n layer.enable_adapters(adapter_setup, unfreeze_adapters, unfreeze_attention)\n\n def adjust_attention_mask_for_parallel(self, hidden_states, attention_mask):\n if attention_mask is not None and hidden_states.shape[0] != attention_mask.shape[0]:\n repeats = [1] * len(attention_mask.shape)\n repeats[0] = hidden_states.shape[0] // attention_mask.shape[0]\n attention_mask = attention_mask.repeat(*repeats)\n return attention_mask\n\n def _add_fusion_layer(self, adapter_names):\n for layer in self.base_model.h:\n layer.add_fusion_layer(adapter_names)\n\n def _delete_adapter(self, adapter_name: str):\n for layer in self.base_model.h:\n layer.delete_adapter(adapter_name)\n self.delete_invertible_adapter(adapter_name)\n\n def _delete_fusion_layer(self, adapter_names):\n for layer in self.base_model.h:\n layer.delete_fusion_layer(adapter_names)\n\n def get_fusion_regularization_loss(self):\n reg_loss = 0.0\n target = torch.zeros((self.config.hidden_size, self.config.hidden_size)).fill_diagonal_(1.0).to(self.device)\n for _, v in self.base_model.h._modules.items():\n\n for _, layer_fusion in v.output_adapters.adapter_fusion_layer.items():\n if hasattr(layer_fusion, \"value\"):\n reg_loss += 0.01 * (target - layer_fusion.value.weight).pow(2).sum()\n\n for _, layer_fusion in v.attention_adapters.adapter_fusion_layer.items():\n if hasattr(layer_fusion, \"value\"):\n reg_loss += 0.01 * (target - layer_fusion.value.weight).pow(2).sum()\n\n return reg_loss\n\n def get_adapter(self, name):\n return_adapters = {}\n for idx, layer in enumerate(self.h):\n adapters = {\n \"attention\": layer.attention_adapters.adapters,\n \"output\": layer.output_adapters.adapters,\n }\n for key, adapt in adapters.items():\n if hasattr(adapt, name):\n if idx not in return_adapters:\n return_adapters[idx] = {}\n return_adapters[idx][key] = getattr(adapt, name)\n\n return return_adapters\n\n\nclass GPT2ModelHeadsMixin(ModelWithFlexibleHeadsAdaptersMixin):\n \"\"\"Adds flexible heads to a GPT-2 model.\"\"\"\n\n head_types = {\n \"classification\": ClassificationHead,\n \"multilabel_classification\": MultiLabelClassificationHead,\n \"causal_lm\": CausalLMHead,\n }\n\n def add_classification_head(\n self,\n head_name,\n num_labels=2,\n layers=2,\n activation_function=\"tanh\",\n overwrite_ok=False,\n multilabel=False,\n id2label=None,\n ):\n \"\"\"\n Adds a sequence classification head on top of the model.\n\n Args:\n head_name (str): The name of the head.\n num_labels (int, optional): Number of classification labels. Defaults to 2.\n layers (int, optional): Number of layers. Defaults to 2.\n activation_function (str, optional): Activation function. Defaults to 'tanh'.\n overwrite_ok (bool, optional): Force overwrite if a head with the same name exists. Defaults to False.\n multilabel (bool, optional): Enable multilabel classification setup. Defaults to False.\n \"\"\"\n\n if multilabel:\n head = MultiLabelClassificationHead(self, head_name, num_labels, layers, activation_function, id2label)\n else:\n head = ClassificationHead(self, head_name, num_labels, layers, activation_function, id2label)\n self.add_prediction_head(head, overwrite_ok)\n\n def add_causal_lm_head(self, head_name, overwrite_ok=False):\n \"\"\"\n Adds a causal language modeling head on top of the model.\n\n Args:\n head_name (str): The name of the head.\n overwrite_ok (bool, optional): Force overwrite if a head with the same name exists. Defaults to False.\n \"\"\"\n head = CausalLMHead(self, head_name)\n self.add_prediction_head(head, overwrite_ok=overwrite_ok)\n" ]
[ [ "torch.zeros" ] ]
SzymonSzyszko/AeroPy
[ "b061c690e5926fdd834b7c50837c25108e908156" ]
[ "examples/structural/beam.py" ]
[ "from aeropy.geometry.parametric import poly\nfrom aeropy.structural.stable_solution import (structure, mesh_1D, properties,\n boundary_conditions)\nfrom aeropy.xfoil_module import output_reader\n\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport numpy as np\nimport pickle\n\nabaqus_primary = pickle.load(open(\"save.p\", \"rb\"), encoding='latin1')\nabaqus_secondary = output_reader('secondary_variables.txt')\n# sort data\nabaqus_data = np.array(sorted(zip(abaqus_primary['C_U']['x'],\n abaqus_primary['C_U']['y'],\n abaqus_primary['U'][:, 0],\n abaqus_primary['U'][:, 1],)))\nabq_x, abq_y, abq_u1, abq_u2 = abaqus_data.T\nabq_y = -abq_y + .005\nabq_u2 = -abq_u2\n# Convert log strains into engineering strain\nabaqus_secondary['LE11'] = np.exp(np.array(abaqus_secondary['LE11'])) - 1\nabaqus_secondary['LE12'] = np.exp(np.array(abaqus_secondary['LE12'])) - 1\nabaqus_secondary['LE22'] = np.exp(np.array(abaqus_secondary['LE22'])) - 1\ncoefficients = np.array([0, 0, 0, 0])\n\nbp = properties()\nbc = boundary_conditions(load=np.array([[0, -1]]))\nanalytical_solution = bc.concentrated_load[0][1]/(6*bp.young*bp.inertia) * \\\n np.array([-1, 3, 0, 0])\nmesh = mesh_1D(mesh_n=10)\ncurve_parent = poly(a=[0, 0, 0, 0])\ncurve_child = poly(a=analytical_solution)\n\nbeam = structure(curve_parent, curve_child, mesh, bp, bc)\nbeam.calculate_position()\nstrain = beam.strain()\nstress = beam.stress(loading_condition='plane_stress')\n\n# Plot beam results\nplt.figure()\nu = beam.u()\nu1 = beam.u(diff='x1')\nu2 = beam.u(diff='x2')\nplt.plot(beam.r_p[0], beam.r_p[1], label='parent')\nplt.scatter(beam.r_p[0], beam.r_p[1], label='parent')\nplt.plot(beam.r_c[0], beam.r_c[1], label='child')\nplt.scatter(beam.r_c[0], beam.r_c[1], label='child')\nplt.plot(abq_x, abq_y, label='Abaqus')\nplt.title('Position')\nplt.grid()\nplt.legend()\n\n# Plot beam results\nplt.figure()\nr1_p, r1_c = beam.calculate_position(diff='x1')\nr2_p, r2_c = beam.calculate_position(diff='x2')\n# plt.plot(beam.r_p[0], r1_p[0], label='$r_{1,1}^p$')\nplt.plot(beam.r_p[0], r1_p[1], label='$r_{2,1}^p$')\n# plt.plot(beam.r_p[0], r2_p[0], label='$r_{1,2}^p$')\nplt.plot(beam.r_p[0], r2_p[1], label='$r_{2,2}^p$')\n# plt.plot(beam.r_p[0], r1_c[0], label='$r_{1,1}^c$')\nplt.plot(beam.r_p[0], r1_c[1], label='$r_{2,1}^c$')\n# plt.plot(beam.r_p[0], r2_c[0], label='$r_{1,2}^c$')\nplt.plot(beam.r_p[0], r2_c[1], label='$r_{2,2}^c$')\nplt.title('Position gradients')\nplt.grid()\nplt.legend()\n\n# Plot beam results\nplt.figure()\nu = beam.u()\nu1 = beam.u(diff='x1')\nu2 = beam.u(diff='x2')\nplt.scatter(beam.mesh.x_p, u[0], label=r'$u_1$')\nplt.scatter(beam.mesh.x_p, u[1], label=r'$u_2$')\nplt.plot(beam.mesh.x_p, u[0], label=r'$u_1$')\nplt.plot(beam.mesh.x_p, u[1], label=r'$u_2$')\n# plt.plot(abq_x, abq_u1, label=r'Abaqus $u_1$')\n# plt.plot(abq_x, abq_u2, label=r'Abaqus $u_2$')\nplt.title('Displacement diff')\nplt.legend()\n\n\nplt.figure()\nplt.plot(beam.mesh.x_p, strain[0][0], label=r'$\\epsilon_{11}$')\nplt.plot(beam.mesh.x_p, strain[0][1], label=r'$\\epsilon_{12}$')\nplt.plot(beam.mesh.x_p, strain[1][1], label=r'$\\epsilon_{22}$')\nplt.plot(abaqus_secondary['X'], abaqus_secondary['LE11'],\n label=r'Abaqus $\\epsilon_{11}$')\nplt.plot(abaqus_secondary['X'], abaqus_secondary['LE12'],\n label=r'Abaqus $\\epsilon_{12}$')\nplt.plot(abaqus_secondary['X'], abaqus_secondary['LE22'],\n label=r'Abaqus $\\epsilon_{22}$')\nplt.title('Strain')\nplt.legend()\n\nplt.figure()\nplt.plot(beam.mesh.x_p, stress[0][0], label=r'$\\sigma_{11}$')\nplt.plot(beam.mesh.x_p, stress[0][1], label=r'$\\sigma_{12}$')\nplt.plot(beam.mesh.x_p, stress[1][1], label=r'$\\sigma_{22}$')\nplt.plot(abaqus_secondary['X'], abaqus_secondary['S11'],\n label=r'Abaqus $\\sigma_{11}$')\nplt.plot(abaqus_secondary['X'], abaqus_secondary['S12'],\n label=r'Abaqus $\\sigma_{12}$')\nplt.plot(abaqus_secondary['X'], abaqus_secondary['S22'],\n label=r'Abaqus $\\sigma_{22}$')\nplt.legend()\nplt.title('Stress')\nplt.show()\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "matplotlib.pyplot.grid", "matplotlib.pyplot.title", "matplotlib.pyplot.show", "numpy.array", "matplotlib.pyplot.plot", "matplotlib.pyplot.scatter" ] ]
sandymule/Credit-Card-Default
[ "c9d67feffa65fb7aad514bd9c1991766e8e2777b" ]
[ "credit_default/app/views.py" ]
[ "import logging\nimport json\n\nimport pandas as pd\nfrom flask import render_template\nfrom flask_wtf import Form\nfrom wtforms import fields\nfrom wtforms.validators import Required\n\nfrom . import app, estimator, target_names\n\nlogger = logging.getLogger('app')\n\nclass PredictForm(Form):\n \"\"\"Fields for Predict\"\"\"\n # sepal_length = fields.DecimalField('Sepal Length:', places=2, validators=[Required()])\n # sepal_width = fields.DecimalField('Sepal Width:', places=2, validators=[Required()])\n # petal_length = fields.DecimalField('Petal Length:', places=2, validators=[Required()])\n # petal_width = fields.DecimalField('Petal Width:', places=2, validators=[Required()])\n Limit_bal = fields.DecimalField('Limit Balance:', places=2, validators=[Required()])\n Gender_list = [(1, \"Male\"), (2, \"Female\")]\n Gender = fields.SelectField(\"Gender\", choices=Gender_list, coerce=int)\n Education_list = [(1, \"Graduate school\"), (2, \"College\"), (3, \"High school\"), (4, \"Less than high school\")]\n Education = fields.SelectField(\"Education\", choices=Education_list, coerce=int)\n Marriage_list = [(1, \"Married\"), (2, \"Single\"), (3, \"Separated, Divorced, or Widowed\")]\n Marriage = fields.SelectField(\"Marriage\", choices=Marriage_list, coerce=int)\n Age= fields.DecimalField('Age:', places=2, validators=[Required()])\n Percent_1_monthago = fields.DecimalField('Percent Paid 1 Month Ago:', places=2, validators=[Required()])\n Percent_2_monthago = fields.DecimalField('Percent Paid 2 Months Ago:', places=2, validators=[Required()])\n Percent_3_monthago = fields.DecimalField('Percent Paid 3 Months Ago:', places=2, validators=[Required()])\n Percent_4_monthago = fields.DecimalField('Percent Paid 4 Months Ago:', places=2, validators=[Required()])\n Percent_5_monthago = fields.DecimalField('Percent Paid 5 Months Ago:', places=2, validators=[Required()])\n Percent_6_monthago = fields.DecimalField('Percent Paid 6 Months Ago:', places=2, validators=[Required()])\n\n submit = fields.SubmitField('Submit')\n\[email protected]('/',methods=('GET','POST'))\ndef predict():\n return render_template('homepage.html')\n\[email protected]('/visualize',methods=('GET','POST'))\ndef visualize():\n datastuff = []\n\n\n \"\"\"Index page\"\"\"\n form = PredictForm()\n # predicted_iris = None\n result = None\n\n if form.validate_on_submit():\n # store the submitted values\n\n submitted_data = form.data\n\n # Retrieve values from form\n # sepal_length = float(submitted_data['sepal_length'])\n # sepal_width = float(submitted_data['sepal_width'])\n # petal_length = float(submitted_data['petal_length'])\n # petal_width = float(submitted_data['petal_width'])\n Limit_bal = float(submitted_data['Limit_bal'])\n Gender = float(submitted_data['Gender'])\n Education = float(submitted_data['Education'])\n Marriage = float(submitted_data['Marriage'])\n Age = float(submitted_data['Age'])\n Percent_1_monthago = float(submitted_data['Percent_1_monthago'])\n Percent_2_monthago = float(submitted_data['Percent_2_monthago'])\n Percent_3_monthago = float(submitted_data['Percent_3_monthago'])\n Percent_4_monthago = float(submitted_data['Percent_4_monthago'])\n Percent_5_monthago = float(submitted_data['Percent_5_monthago'])\n Percent_6_monthago = float(submitted_data['Percent_6_monthago'])\n\n # Create array from values\n # flower_instance = [sepal_length, sepal_width, petal_length, petal_width]\n default_instance = [Limit_bal, Gender, Education, Marriage, Age,\n Percent_1_monthago, Percent_2_monthago, Percent_3_monthago,\n Percent_4_monthago, Percent_5_monthago, Percent_6_monthago]\n # my_prediction = estimator.predict(flower_instance)\n result = estimator.predict(default_instance)[0] # Target Predicted\n\n df = pd.DataFrame([{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Over 1%\", \"Payment 3 Over 2%\", \"Payment 4 Over 3%\", \"Payment 5 Over 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Over 1%\", \"Payment 3 Over 2%\", \"Payment 4 Over 3%\", \"Payment 5 Over 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Over 1%\", \"Payment 3 Over 2%\", \"Payment 4 Over 3%\", \"Payment 5 Under 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Over 1%\", \"Payment 3 Over 2%\", \"Payment 4 Over 3%\", \"Payment 5 Under 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Over 1%\", \"Payment 3 Over 2%\", \"Payment 4 Under 3%\", \"Payment 5 Over 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Over 1%\", \"Payment 3 Over 2%\", \"Payment 4 Under 3%\", \"Payment 5 Over 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Over 1%\", \"Payment 3 Over 2%\", \"Payment 4 Under 3%\", \"Payment 5 Under 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Over 1%\", \"Payment 3 Over 2%\", \"Payment 4 Under 3%\", \"Payment 5 Under 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Over 1%\", \"Payment 3 Under 2%\", \"Payment 4 Over 3%\", \"Payment 5 Over 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Over 1%\", \"Payment 3 Under 2%\", \"Payment 4 Over 3%\", \"Payment 5 Over 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Over 1%\", \"Payment 3 Under 2%\", \"Payment 4 Over 3%\", \"Payment 5 Under 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Over 1%\", \"Payment 3 Under 2%\", \"Payment 4 Over 3%\", \"Payment 5 Under 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Over 1%\", \"Payment 3 Under 2%\", \"Payment 4 Under 3%\", \"Payment 5 Over 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Over 1%\", \"Payment 3 Under 2%\", \"Payment 4 Under 3%\", \"Payment 5 Over 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Over 1%\", \"Payment 3 Under 2%\", \"Payment 4 Under 3%\", \"Payment 5 Under 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Over 1%\", \"Payment 3 Under 2%\", \"Payment 4 Under 3%\", \"Payment 5 Under 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Under 1%\", \"Payment 3 Over 2%\", \"Payment 4 Over 3%\", \"Payment 5 Over 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Under 1%\", \"Payment 3 Over 2%\", \"Payment 4 Over 3%\", \"Payment 5 Over 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Under 1%\", \"Payment 3 Over 2%\", \"Payment 4 Over 3%\", \"Payment 5 Under 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Under 1%\", \"Payment 3 Over 2%\", \"Payment 4 Over 3%\", \"Payment 5 Under 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Under 1%\", \"Payment 3 Over 2%\", \"Payment 4 Under 3%\", \"Payment 5 Over 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Under 1%\", \"Payment 3 Over 2%\", \"Payment 4 Under 3%\", \"Payment 5 Over 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Under 1%\", \"Payment 3 Over 2%\", \"Payment 4 Under 3%\", \"Payment 5 Under 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Under 1%\", \"Payment 3 Over 2%\", \"Payment 4 Under 3%\", \"Payment 5 Under 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Under 1%\", \"Payment 3 Under 2%\", \"Payment 4 Over 3%\", \"Payment 5 Over 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Under 1%\", \"Payment 3 Under 2%\", \"Payment 4 Over 3%\", \"Payment 5 Over 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Under 1%\", \"Payment 3 Under 2%\", \"Payment 4 Over 3%\", \"Payment 5 Under 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Under 1%\", \"Payment 3 Under 2%\", \"Payment 4 Over 3%\", \"Payment 5 Under 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Under 1%\", \"Payment 3 Under 2%\", \"Payment 4 Under 3%\", \"Payment 5 Over 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Under 1%\", \"Payment 3 Under 2%\", \"Payment 4 Under 3%\", \"Payment 5 Over 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Under 1%\", \"Payment 3 Under 2%\", \"Payment 4 Under 3%\", \"Payment 5 Under 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Under 1%\", \"Payment 3 Under 2%\", \"Payment 4 Under 3%\", \"Payment 5 Under 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Over 1%\", \"Payment 3 Over 2%\", \"Payment 4 Over 3%\", \"Payment 5 Over 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Over 1%\", \"Payment 3 Over 2%\", \"Payment 4 Over 3%\", \"Payment 5 Over 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Over 1%\", \"Payment 3 Over 2%\", \"Payment 4 Over 3%\", \"Payment 5 Under 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Over 1%\", \"Payment 3 Over 2%\", \"Payment 4 Over 3%\", \"Payment 5 Under 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Over 1%\", \"Payment 3 Over 2%\", \"Payment 4 Under 3%\", \"Payment 5 Over 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Over 1%\", \"Payment 3 Over 2%\", \"Payment 4 Under 3%\", \"Payment 5 Over 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Over 1%\", \"Payment 3 Over 2%\", \"Payment 4 Under 3%\", \"Payment 5 Under 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Over 1%\", \"Payment 3 Over 2%\", \"Payment 4 Under 3%\", \"Payment 5 Under 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Over 1%\", \"Payment 3 Under 2%\", \"Payment 4 Over 3%\", \"Payment 5 Over 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Over 1%\", \"Payment 3 Under 2%\", \"Payment 4 Over 3%\", \"Payment 5 Over 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Over 1%\", \"Payment 3 Under 2%\", \"Payment 4 Over 3%\", \"Payment 5 Under 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Over 1%\", \"Payment 3 Under 2%\", \"Payment 4 Over 3%\", \"Payment 5 Under 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Over 1%\", \"Payment 3 Under 2%\", \"Payment 4 Under 3%\", \"Payment 5 Over 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Over 1%\", \"Payment 3 Under 2%\", \"Payment 4 Under 3%\", \"Payment 5 Over 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Over 1%\", \"Payment 3 Under 2%\", \"Payment 4 Under 3%\", \"Payment 5 Under 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Over 1%\", \"Payment 3 Under 2%\", \"Payment 4 Under 3%\", \"Payment 5 Under 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Under 1%\", \"Payment 3 Over 2%\", \"Payment 4 Over 3%\", \"Payment 5 Over 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Under 1%\", \"Payment 3 Over 2%\", \"Payment 4 Over 3%\", \"Payment 5 Over 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Under 1%\", \"Payment 3 Over 2%\", \"Payment 4 Over 3%\", \"Payment 5 Under 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Under 1%\", \"Payment 3 Over 2%\", \"Payment 4 Over 3%\", \"Payment 5 Under 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Under 1%\", \"Payment 3 Over 2%\", \"Payment 4 Under 3%\", \"Payment 5 Over 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Under 1%\", \"Payment 3 Over 2%\", \"Payment 4 Under 3%\", \"Payment 5 Over 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Under 1%\", \"Payment 3 Over 2%\", \"Payment 4 Under 3%\", \"Payment 5 Under 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Under 1%\", \"Payment 3 Over 2%\", \"Payment 4 Under 3%\", \"Payment 5 Under 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Under 1%\", \"Payment 3 Under 2%\", \"Payment 4 Over 3%\", \"Payment 5 Over 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Under 1%\", \"Payment 3 Under 2%\", \"Payment 4 Over 3%\", \"Payment 5 Over 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Under 1%\", \"Payment 3 Under 2%\", \"Payment 4 Over 3%\", \"Payment 5 Under 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Under 1%\", \"Payment 3 Under 2%\", \"Payment 4 Over 3%\", \"Payment 5 Under 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Under 1%\", \"Payment 3 Under 2%\", \"Payment 4 Under 3%\", \"Payment 5 Over 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Under 1%\", \"Payment 3 Under 2%\", \"Payment 4 Under 3%\", \"Payment 5 Over 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Under 1%\", \"Payment 3 Under 2%\", \"Payment 4 Under 3%\", \"Payment 5 Under 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Under 1%\", \"Payment 3 Under 2%\", \"Payment 4 Under 3%\", \"Payment 5 Under 4%\", \"Payment 6 Less Than 5%\"]\n }\n])\n\n\n datastuff = df.to_json(orient=\"records\")\n else:\n print (form.data)\n\n return render_template('visualize.html',\n form=form,\n # prediction=predicted_iris\n prediction=result, data=datastuff)\n\n\n\n\[email protected]('/predict', methods=('GET', 'POST'))\ndef index():\n\n datastuff = []\n\n\n \"\"\"Index page\"\"\"\n form = PredictForm()\n # predicted_iris = None\n result = None\n\n if form.validate_on_submit():\n # store the submitted values\n\n submitted_data = form.data\n\n # Retrieve values from form\n # sepal_length = float(submitted_data['sepal_length'])\n # sepal_width = float(submitted_data['sepal_width'])\n # petal_length = float(submitted_data['petal_length'])\n # petal_width = float(submitted_data['petal_width'])\n Limit_bal = float(submitted_data['Limit_bal'])\n Gender = float(submitted_data['Gender'])\n Education = float(submitted_data['Education'])\n Marriage = float(submitted_data['Marriage'])\n Age = float(submitted_data['Age'])\n Percent_1_monthago = float(submitted_data['Percent_1_monthago'])\n Percent_2_monthago = float(submitted_data['Percent_2_monthago'])\n Percent_3_monthago = float(submitted_data['Percent_3_monthago'])\n Percent_4_monthago = float(submitted_data['Percent_4_monthago'])\n Percent_5_monthago = float(submitted_data['Percent_5_monthago'])\n Percent_6_monthago = float(submitted_data['Percent_6_monthago'])\n\n # Create array from values\n # flower_instance = [sepal_length, sepal_width, petal_length, petal_width]\n default_instance = [Limit_bal, Gender, Education, Marriage, Age,\n Percent_1_monthago, Percent_2_monthago, Percent_3_monthago,\n Percent_4_monthago, Percent_5_monthago, Percent_6_monthago]\n # my_prediction = estimator.predict(flower_instance)\n result = estimator.predict(default_instance)[0] # Target Predicted\n\n df = pd.DataFrame([{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Over 1%\", \"Payment 3 Over 2%\", \"Payment 4 Over 3%\", \"Payment 5 Over 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Over 1%\", \"Payment 3 Over 2%\", \"Payment 4 Over 3%\", \"Payment 5 Over 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Over 1%\", \"Payment 3 Over 2%\", \"Payment 4 Over 3%\", \"Payment 5 Under 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Over 1%\", \"Payment 3 Over 2%\", \"Payment 4 Over 3%\", \"Payment 5 Under 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Over 1%\", \"Payment 3 Over 2%\", \"Payment 4 Under 3%\", \"Payment 5 Over 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Over 1%\", \"Payment 3 Over 2%\", \"Payment 4 Under 3%\", \"Payment 5 Over 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Over 1%\", \"Payment 3 Over 2%\", \"Payment 4 Under 3%\", \"Payment 5 Under 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Over 1%\", \"Payment 3 Over 2%\", \"Payment 4 Under 3%\", \"Payment 5 Under 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Over 1%\", \"Payment 3 Under 2%\", \"Payment 4 Over 3%\", \"Payment 5 Over 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Over 1%\", \"Payment 3 Under 2%\", \"Payment 4 Over 3%\", \"Payment 5 Over 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Over 1%\", \"Payment 3 Under 2%\", \"Payment 4 Over 3%\", \"Payment 5 Under 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Over 1%\", \"Payment 3 Under 2%\", \"Payment 4 Over 3%\", \"Payment 5 Under 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Over 1%\", \"Payment 3 Under 2%\", \"Payment 4 Under 3%\", \"Payment 5 Over 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Over 1%\", \"Payment 3 Under 2%\", \"Payment 4 Under 3%\", \"Payment 5 Over 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Over 1%\", \"Payment 3 Under 2%\", \"Payment 4 Under 3%\", \"Payment 5 Under 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Over 1%\", \"Payment 3 Under 2%\", \"Payment 4 Under 3%\", \"Payment 5 Under 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Under 1%\", \"Payment 3 Over 2%\", \"Payment 4 Over 3%\", \"Payment 5 Over 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Under 1%\", \"Payment 3 Over 2%\", \"Payment 4 Over 3%\", \"Payment 5 Over 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Under 1%\", \"Payment 3 Over 2%\", \"Payment 4 Over 3%\", \"Payment 5 Under 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Under 1%\", \"Payment 3 Over 2%\", \"Payment 4 Over 3%\", \"Payment 5 Under 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Under 1%\", \"Payment 3 Over 2%\", \"Payment 4 Under 3%\", \"Payment 5 Over 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Under 1%\", \"Payment 3 Over 2%\", \"Payment 4 Under 3%\", \"Payment 5 Over 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Under 1%\", \"Payment 3 Over 2%\", \"Payment 4 Under 3%\", \"Payment 5 Under 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Under 1%\", \"Payment 3 Over 2%\", \"Payment 4 Under 3%\", \"Payment 5 Under 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Under 1%\", \"Payment 3 Under 2%\", \"Payment 4 Over 3%\", \"Payment 5 Over 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Under 1%\", \"Payment 3 Under 2%\", \"Payment 4 Over 3%\", \"Payment 5 Over 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Under 1%\", \"Payment 3 Under 2%\", \"Payment 4 Over 3%\", \"Payment 5 Under 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Under 1%\", \"Payment 3 Under 2%\", \"Payment 4 Over 3%\", \"Payment 5 Under 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Under 1%\", \"Payment 3 Under 2%\", \"Payment 4 Under 3%\", \"Payment 5 Over 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Under 1%\", \"Payment 3 Under 2%\", \"Payment 4 Under 3%\", \"Payment 5 Over 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Under 1%\", \"Payment 3 Under 2%\", \"Payment 4 Under 3%\", \"Payment 5 Under 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Over 0%\", \"Payment 2 Under 1%\", \"Payment 3 Under 2%\", \"Payment 4 Under 3%\", \"Payment 5 Under 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Over 1%\", \"Payment 3 Over 2%\", \"Payment 4 Over 3%\", \"Payment 5 Over 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Over 1%\", \"Payment 3 Over 2%\", \"Payment 4 Over 3%\", \"Payment 5 Over 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Over 1%\", \"Payment 3 Over 2%\", \"Payment 4 Over 3%\", \"Payment 5 Under 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Over 1%\", \"Payment 3 Over 2%\", \"Payment 4 Over 3%\", \"Payment 5 Under 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Over 1%\", \"Payment 3 Over 2%\", \"Payment 4 Under 3%\", \"Payment 5 Over 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Over 1%\", \"Payment 3 Over 2%\", \"Payment 4 Under 3%\", \"Payment 5 Over 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Over 1%\", \"Payment 3 Over 2%\", \"Payment 4 Under 3%\", \"Payment 5 Under 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Over 1%\", \"Payment 3 Over 2%\", \"Payment 4 Under 3%\", \"Payment 5 Under 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Over 1%\", \"Payment 3 Under 2%\", \"Payment 4 Over 3%\", \"Payment 5 Over 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Over 1%\", \"Payment 3 Under 2%\", \"Payment 4 Over 3%\", \"Payment 5 Over 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Over 1%\", \"Payment 3 Under 2%\", \"Payment 4 Over 3%\", \"Payment 5 Under 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Over 1%\", \"Payment 3 Under 2%\", \"Payment 4 Over 3%\", \"Payment 5 Under 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Over 1%\", \"Payment 3 Under 2%\", \"Payment 4 Under 3%\", \"Payment 5 Over 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Over 1%\", \"Payment 3 Under 2%\", \"Payment 4 Under 3%\", \"Payment 5 Over 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Over 1%\", \"Payment 3 Under 2%\", \"Payment 4 Under 3%\", \"Payment 5 Under 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Over 1%\", \"Payment 3 Under 2%\", \"Payment 4 Under 3%\", \"Payment 5 Under 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Under 1%\", \"Payment 3 Over 2%\", \"Payment 4 Over 3%\", \"Payment 5 Over 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Under 1%\", \"Payment 3 Over 2%\", \"Payment 4 Over 3%\", \"Payment 5 Over 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Under 1%\", \"Payment 3 Over 2%\", \"Payment 4 Over 3%\", \"Payment 5 Under 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Under 1%\", \"Payment 3 Over 2%\", \"Payment 4 Over 3%\", \"Payment 5 Under 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Under 1%\", \"Payment 3 Over 2%\", \"Payment 4 Under 3%\", \"Payment 5 Over 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Under 1%\", \"Payment 3 Over 2%\", \"Payment 4 Under 3%\", \"Payment 5 Over 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Under 1%\", \"Payment 3 Over 2%\", \"Payment 4 Under 3%\", \"Payment 5 Under 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Under 1%\", \"Payment 3 Over 2%\", \"Payment 4 Under 3%\", \"Payment 5 Under 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Under 1%\", \"Payment 3 Under 2%\", \"Payment 4 Over 3%\", \"Payment 5 Over 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Under 1%\", \"Payment 3 Under 2%\", \"Payment 4 Over 3%\", \"Payment 5 Over 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Under 1%\", \"Payment 3 Under 2%\", \"Payment 4 Over 3%\", \"Payment 5 Under 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Under 1%\", \"Payment 3 Under 2%\", \"Payment 4 Over 3%\", \"Payment 5 Under 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Under 1%\", \"Payment 3 Under 2%\", \"Payment 4 Under 3%\", \"Payment 5 Over 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Under 1%\", \"Payment 3 Under 2%\", \"Payment 4 Under 3%\", \"Payment 5 Over 4%\", \"Payment 6 Less Than 5%\"]\n },{\n \"name\": \"Payment 6 Over 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Under 1%\", \"Payment 3 Under 2%\", \"Payment 4 Under 3%\", \"Payment 5 Under 4%\", \"Payment 6 Over 5%\"]\n },{\n \"name\": \"Payment 6 Less Than 5%\",\n \"taxonomy\": [\"Payment 1 Under 0%\", \"Payment 2 Under 1%\", \"Payment 3 Under 2%\", \"Payment 4 Under 3%\", \"Payment 5 Under 4%\", \"Payment 6 Less Than 5%\"]\n }\n])\n\n\n\n\n\n datastuff = df.to_json(orient=\"records\")\n else:\n print (form.data)\n\n return render_template('predict.html',\n form=form,\n # prediction=predicted_iris\n prediction=result, data=datastuff)\n" ]
[ [ "pandas.DataFrame" ] ]
xiangzhemeng/epfl-ml2017-project2
[ "16345b3e453989dfeba70667773b76362897a782" ]
[ "cnn_training.py" ]
[ "import pandas as pd\nimport numpy as np\nimport pickle\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing import sequence\nfrom keras.models import Sequential\nfrom keras.layers.embeddings import Embedding\nfrom keras.layers.convolutional import Conv1D\nfrom keras.layers.convolutional import MaxPooling1D\nfrom keras.layers import LSTM\nfrom keras.layers import Flatten\nfrom keras.layers import Dense\nfrom keras.callbacks import EarlyStopping\n\n\n# Main function of cnn training\n\ndef run_neural_network():\n print(\" == Enter into CNN training step ==\")\n\n np.random.seed(0)\n\n x_train = pd.read_pickle(\"data/pickles/train_after_preprocess.pkl\")\n x_train = np.array(x_train['tweet'])\n\n x_test = pd.read_pickle(\"data/pickles/test_after_preprocess.pkl\")\n x_test = np.array(x_test['tweet'])\n\n y = np.array(int(2500000 / 2) * [0] + int(2500000 / 2) * [1])\n print(\"Data loading finish!\")\n\n # Tokenization\n tokenizer = Tokenizer(filters='')\n tokenizer.fit_on_texts(x_train)\n\n # Turn x_train into sequence form\n sequence_train = tokenizer.texts_to_sequences(x_train)\n # Turn x_test into sequence form\n sequence_test = tokenizer.texts_to_sequences(x_test)\n\n # Transform sequence_train into into a 2D Numpy array\n sequence_train = sequence.pad_sequences(sequence_train, maxlen = 30)\n # Transform sequence_test into into a 2D Numpy array\n sequence_test = sequence.pad_sequences(sequence_test, maxlen = 30)\n\n # Affect input dimension\n input_dim = len(tokenizer.word_index) + 1\n input_length = sequence_train.shape[1]\n print(\"Tokenization finish!\")\n\n # Shuffle training dataset\n new_index = np.arange(sequence_train.shape[0])\n np.random.shuffle(new_index)\n sequence_train = sequence_train[new_index]\n y = y[new_index]\n print(\"Data shuffling finish!\")\n\n earlyStopping = EarlyStopping(monitor = 'val_loss', patience = 2)\n\n\n ### Model 1 ###\n print(\"Build model1!\")\n np.random.seed(1)\n model = Sequential()\n model.add(Embedding(input_dim, 50, input_length = input_length))\n model.add(Conv1D(padding = \"same\", kernel_size = 3, filters = 32, activation = \"relu\"))\n model.add(MaxPooling1D(pool_size = 2))\n model.add(Flatten())\n model.add(Dense(250, activation = 'relu'))\n model.add(Dense(1, activation = 'sigmoid'))\n model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['accuracy'])\n\n print(\"Fit model1!\")\n model.fit(sequence_train, y, validation_split = 0.1, epochs = 10, batch_size = 128, verbose = 1, shuffle = True, callbacks = [earlyStopping])\n\n print(\"Generate prediction!\")\n train_model1 = model.predict(sequence_train, batch_size = 128)\n pickle.dump(train_model1, open('data/xgboost/train_model1.txt', 'wb'))\n test_model1 = model.predict(sequence_test)\n pickle.dump(test_model1, open('data/xgboost/test_model1.txt', 'wb'))\n print(\"Model1 finished!\")\n\n\n ### Model 2 ###\n print(\"Build model2!\")\n np.random.seed(2)\n model = Sequential()\n model.add(Embedding(input_dim, 50, input_length = input_length))\n model.add(LSTM(100, recurrent_dropout = 0.2, dropout = 0.2))\n model.add(Dense(1, activation = 'sigmoid'))\n model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['accuracy'])\n\n print(\"Fit model2!\")\n model.fit(sequence_train, y, validation_split = 0.1, epochs = 10, batch_size = 128, verbose = 1, shuffle = True, callbacks = [earlyStopping])\n\n print(\"Generate prediction!\")\n train_model2 = model.predict(sequence_train, batch_size = 128)\n pickle.dump(train_model2, open('data/xgboost/train_model2.txt', 'wb'))\n test_model2 = model.predict(sequence_test)\n pickle.dump(test_model2, open('data/xgboost/test_model2.txt', 'wb'))\n print(\"Model2 finished!\")\n\n\n ### Model 3 ###\n print(\"Build model1!\")\n np.random.seed(3)\n model = Sequential()\n model.add(Embedding(input_dim, 50, input_length = input_length))\n model.add(Conv1D(padding = \"same\", kernel_size = 3, filters = 32, activation = \"relu\"))\n model.add(MaxPooling1D(pool_size = 2))\n model.add(LSTM(100))\n model.add(Dense(1, activation = 'sigmoid'))\n model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['accuracy'])\n\n print(\"Fit model3!\")\n model.fit(sequence_train, y, validation_split = 0.1, epochs = 10, batch_size = 128, verbose = 1, shuffle = True, callbacks = [earlyStopping])\n\n print(\"Generate prediction!\")\n train_model3= model.predict(sequence_train, batch_size = 128)\n pickle.dump(train_model3, open('data/xgboost/train_model3.txt', 'wb'))\n test_model3 = model.predict(sequence_test)\n pickle.dump(test_model3, open('data/xgboost/test_model3.txt', 'wb'))\n print(\"Model3 finished!\")\n\n\nif __name__ == \"__main__\":\n run_neural_network()\n" ]
[ [ "pandas.read_pickle", "numpy.random.shuffle", "numpy.random.seed", "numpy.arange", "numpy.array" ] ]
IlyaKodua/colorization_with_averaging_ab_channels_test
[ "425a9f3e8b875b21c76424e892cbf489a9e408cb" ]
[ "sig.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass SIGGRAPHGenerator(nn.Module):\n def __init__(self, norm_layer=nn.BatchNorm2d, classes=529):\n super(SIGGRAPHGenerator, self).__init__()\n\n # Conv1\n model1=[nn.Conv2d(4, 64, kernel_size=3, stride=1, padding=1, bias=True),]\n model1+=[nn.ReLU(True),]\n model1+=[nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=True),]\n model1+=[nn.ReLU(True),]\n model1+=[norm_layer(64),]\n # add a subsampling operation\n\n # Conv2\n model2=[nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=True),]\n model2+=[nn.ReLU(True),]\n model2+=[nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=True),]\n model2+=[nn.ReLU(True),]\n model2+=[norm_layer(128),]\n # add a subsampling layer operation\n\n # Conv3\n model3=[nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1, bias=True),]\n model3+=[nn.ReLU(True),]\n model3+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),]\n model3+=[nn.ReLU(True),]\n model3+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),]\n model3+=[nn.ReLU(True),]\n model3+=[norm_layer(256),]\n # add a subsampling layer operation\n\n # Conv4\n model4=[nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1, bias=True),]\n model4+=[nn.ReLU(True),]\n model4+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),]\n model4+=[nn.ReLU(True),]\n model4+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),]\n model4+=[nn.ReLU(True),]\n model4+=[norm_layer(512),]\n\n # Conv5\n model5=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),]\n model5+=[nn.ReLU(True),]\n model5+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),]\n model5+=[nn.ReLU(True),]\n model5+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),]\n model5+=[nn.ReLU(True),]\n model5+=[norm_layer(512),]\n\n # Conv6\n model6=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),]\n model6+=[nn.ReLU(True),]\n model6+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),]\n model6+=[nn.ReLU(True),]\n model6+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),]\n model6+=[nn.ReLU(True),]\n model6+=[norm_layer(512),]\n\n # Conv7\n model7=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),]\n model7+=[nn.ReLU(True),]\n model7+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),]\n model7+=[nn.ReLU(True),]\n model7+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),]\n model7+=[nn.ReLU(True),]\n model7+=[norm_layer(512),]\n\n # Conv7\n model8up=[nn.ConvTranspose2d(512, 256, kernel_size=4, stride=2, padding=1, bias=True)]\n model3short8=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),]\n\n model8=[nn.ReLU(True),]\n model8+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),]\n model8+=[nn.ReLU(True),]\n model8+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),]\n model8+=[nn.ReLU(True),]\n model8+=[norm_layer(256),]\n\n # Conv9\n model9up=[nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1, bias=True),]\n model2short9=[nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=True),]\n # add the two feature maps above \n\n model9=[nn.ReLU(True),]\n model9+=[nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=True),]\n model9+=[nn.ReLU(True),]\n model9+=[norm_layer(128),]\n\n # Conv10\n model10up=[nn.ConvTranspose2d(128, 128, kernel_size=4, stride=2, padding=1, bias=True),]\n model1short10=[nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=True),]\n # add the two feature maps above\n\n model10=[nn.ReLU(True),]\n model10+=[nn.Conv2d(128, 128, kernel_size=3, dilation=1, stride=1, padding=1, bias=True),]\n model10+=[nn.LeakyReLU(negative_slope=.2),]\n\n # classification output\n model_class=[nn.Conv2d(256, classes, kernel_size=1, padding=0, dilation=1, stride=1, bias=True),]\n\n # regression output\n model_out=[nn.Conv2d(128, 2, kernel_size=1, padding=0, dilation=1, stride=1, bias=True),]\n model_out+=[nn.Tanh()]\n\n self.model1 = nn.Sequential(*model1)\n self.model2 = nn.Sequential(*model2)\n self.model3 = nn.Sequential(*model3)\n self.model4 = nn.Sequential(*model4)\n self.model5 = nn.Sequential(*model5)\n self.model6 = nn.Sequential(*model6)\n self.model7 = nn.Sequential(*model7)\n self.model8up = nn.Sequential(*model8up)\n self.model8 = nn.Sequential(*model8)\n self.model9up = nn.Sequential(*model9up)\n self.model9 = nn.Sequential(*model9)\n self.model10up = nn.Sequential(*model10up)\n self.model10 = nn.Sequential(*model10)\n self.model3short8 = nn.Sequential(*model3short8)\n self.model2short9 = nn.Sequential(*model2short9)\n self.model1short10 = nn.Sequential(*model1short10)\n\n self.model_class = nn.Sequential(*model_class)\n self.model_out = nn.Sequential(*model_out)\n\n self.upsample4 = nn.Sequential(*[nn.Upsample(scale_factor=4, mode='bilinear'),])\n self.softmax = nn.Sequential(*[nn.Softmax(dim=1),])\n\n def forward(self, input_A, input_B=None, mask_B=None):\n if(input_B is None):\n input_B = torch.cat((input_A*0, input_A*0), dim=1)\n if(mask_B is None):\n mask_B = input_A*0\n\n conv1_2 = self.model1(torch.cat((input_A,input_B,mask_B),dim=1))\n conv2_2 = self.model2(conv1_2[:,:,::2,::2])\n conv3_3 = self.model3(conv2_2[:,:,::2,::2])\n conv4_3 = self.model4(conv3_3[:,:,::2,::2])\n conv5_3 = self.model5(conv4_3)\n conv6_3 = self.model6(conv5_3)\n conv7_3 = self.model7(conv6_3)\n\n conv8_up = self.re_pad_sum(self.model8up(conv7_3), self.model3short8(conv3_3))\n conv8_3 = self.model8(conv8_up)\n conv9_up = self.re_pad_sum(self.model9up(conv8_3),self.model2short9(conv2_2))\n conv9_3 = self.model9(conv9_up)\n conv10_up = self.re_pad_sum(self.model10up(conv9_3),self.model1short10(conv1_2))\n conv10_2 = self.model10(conv10_up)\n out_reg = self.model_out(conv10_2)\n\n conv9_up = self.re_pad_sum(self.model9up(conv8_3), self.model2short9(conv2_2))\n conv9_3 = self.model9(conv9_up)\n conv10_up = self.re_pad_sum(self.model10up(conv9_3), self.model1short10(conv1_2))\n conv10_2 = self.model10(conv10_up)\n out_reg = self.model_out(conv10_2)\n\n return out_reg\n\n def re_pad_sum(self, x, y):\n\n\n diffY = y.size()[2] - x.size()[2]\n diffX = y.size()[3] - x.size()[3]\n\n x = F.pad(x, [diffX // 2, diffX - diffX // 2,\n diffY // 2, diffY - diffY // 2])\n return x + y \n\n\ndef siggraph17(pretrained=True):\n model = SIGGRAPHGenerator()\n if(pretrained):\n import torch.utils.model_zoo as model_zoo\n model.load_state_dict(model_zoo.load_url('https://colorizers.s3.us-east-2.amazonaws.com/siggraph17-df00044c.pth',map_location='cpu',check_hash=True))\n return model" ]
[ [ "torch.nn.LeakyReLU", "torch.nn.functional.pad", "torch.nn.Softmax", "torch.nn.Tanh", "torch.nn.Upsample", "torch.nn.Conv2d", "torch.nn.Sequential", "torch.utils.model_zoo.load_url", "torch.nn.ReLU", "torch.cat", "torch.nn.ConvTranspose2d" ] ]
TuranSKT/detectron2_class
[ "c90e68abbd39afa8c34d83ac760cabf3b5d02868" ]
[ "imgcls/modeling/backbone/mobilenet.py" ]
[ "'''\n@Copyright (c) tkianai All Rights Reserved.\n@Author : tkianai\n@Github : https://github.com/tkianai\n@Date : 2020-04-26 14:14:18\n@FilePath : /ImageCls.detectron2/imgcls/modeling/backbone/mobilenet.py\n@Description : \n'''\n\n\nimport torch\nimport torch.nn as nn\nfrom detectron2.layers import Conv2d, ShapeSpec\nfrom detectron2.modeling.backbone.build import BACKBONE_REGISTRY\nfrom detectron2.modeling.backbone import Backbone\nfrom detectron2.modeling.backbone.fpn import FPN, LastLevelMaxPool, LastLevelP6P7\n\n\n__all__ = [\n 'build_mnetv1_backbone',\n 'build_mnetv2_backbone',\n]\n\n\ndef conv_bn_leaky(inp, oup, stride=1, leaky=0):\n return nn.Sequential(\n Conv2d(inp, oup, 3, stride, 1, bias=False),\n nn.BatchNorm2d(oup),\n nn.LeakyReLU(negative_slope=leaky, inplace=True)\n )\n\n\ndef conv_dw_leaky(inp, oup, stride, leaky=0.1):\n return nn.Sequential(\n Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),\n nn.BatchNorm2d(inp),\n nn.LeakyReLU(negative_slope=leaky, inplace=True),\n\n Conv2d(inp, oup, 1, 1, 0, bias=False),\n nn.BatchNorm2d(oup),\n nn.LeakyReLU(negative_slope=leaky, inplace=True),\n )\n\n\nclass MobileNetV1(Backbone):\n def __init__(self, cfg, data_channel, width_mult=1.0, out_features=None, num_classes=None):\n super().__init__()\n self.num_classes = num_classes\n \n input_channel = 32\n # scale input channel\n input_channel = int(input_channel * width_mult)\n # stem\n current_stride = 2\n name = \"stem\"\n self.stem = conv_bn_leaky(\n data_channel, input_channel, current_stride, leaky=0.1)\n\n self._out_feature_strides = {name: current_stride}\n self._out_feature_channels = {name: input_channel}\n\n # body\n dw_setting = [\n # c, n, s\n [64, 1, 1],\n [128, 2, 2],\n [256, 2, 2],\n [512, 6, 2],\n [1024, 2, 2],\n ]\n\n self.return_features_indices = [3, 5, 11, 13]\n self.features = nn.ModuleList([])\n # building depthwise conv block\n for c, n, s in dw_setting:\n output_channel = int(c * width_mult)\n for i in range(n):\n # the first one applying stride\n if i == 0:\n self.features.append(conv_dw_leaky(\n input_channel, output_channel, s))\n else:\n self.features.append(conv_dw_leaky(\n input_channel, output_channel, 1))\n # update input channel for next block\n input_channel = output_channel\n # check output this feature map?\n if len(self.features) in self.return_features_indices:\n name = \"mob{}\".format(\n self.return_features_indices.index(len(self.features)) + 2)\n self._out_feature_channels.update({\n name: output_channel\n })\n current_stride *= 2\n self._out_feature_strides.update({\n name: current_stride\n })\n\n if num_classes is not None:\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.linear = nn.Linear(input_channel, num_classes)\n nn.init.normal_(self.linear.weight, std=0.01)\n name = \"linear\"\n\n if out_features is None:\n out_features = [name]\n self._out_features = out_features\n assert len(self._out_features)\n\n self._initialize_weights()\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, (2. / n) ** 0.5)\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n # n = m.weight.size(1)\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n\n def freeze(self, freeze_at):\n if freeze_at > 0:\n # freeze stem\n for p in self.stem.parameters():\n p.requires_grad = False\n if freeze_at > 1:\n # freeze features\n freeze_at = freeze_at - 2\n freeze_layers = self.return_features_indices[freeze_at] if freeze_at < len(\n self.return_features_indices) else self.return_features_indices[-1]\n for layer_index in range(freeze_layers):\n for p in self.features[layer_index].parameters():\n p.requires_grad = False\n return self\n\n def forward(self, x):\n outputs = {}\n x = self.stem(x)\n if \"stem\" in self._out_features:\n outputs[\"stem\"] = x\n for i, m in enumerate(self.features, 1):\n x = m(x)\n if i in self.return_features_indices:\n name = \"mob{}\".format(\n self.return_features_indices.index(i) + 2)\n if name in self._out_features:\n outputs[name] = x\n if self.num_classes is not None:\n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n x = self.linear(x)\n if \"linear\" in self._out_features:\n outputs[\"linear\"] = x\n return outputs\n\n\ndef conv_bn(inp, oup, stride):\n return nn.Sequential(\n Conv2d(inp, oup, 3, stride, 1, bias=False),\n nn.BatchNorm2d(oup),\n nn.ReLU6(inplace=True)\n )\n\n\ndef conv_1x1_bn(inp, oup):\n return nn.Sequential(\n Conv2d(inp, oup, 1, 1, 0, bias=False),\n nn.BatchNorm2d(oup),\n nn.ReLU6(inplace=True)\n )\n\n\nclass InvertedResidual(nn.Module):\n def __init__(self, inp, oup, stride, expand_ratio):\n super().__init__()\n self.stride = stride\n assert stride in [1, 2]\n\n hidden_dim = int(round(inp * expand_ratio))\n self.use_res_connect = self.stride == 1 and inp == oup\n\n if expand_ratio == 1:\n self.conv = nn.Sequential(\n # dw\n Conv2d(inp, hidden_dim, 3, stride, 1,\n groups=hidden_dim, bias=False),\n nn.BatchNorm2d(hidden_dim),\n nn.ReLU6(inplace=True),\n # pw-linear\n Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),\n nn.BatchNorm2d(oup),\n )\n else:\n self.conv = nn.Sequential(\n # pw\n Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),\n nn.BatchNorm2d(hidden_dim),\n nn.ReLU6(inplace=True),\n # dw\n Conv2d(hidden_dim, hidden_dim, 3, stride,\n 1, groups=hidden_dim, bias=False),\n nn.BatchNorm2d(hidden_dim),\n nn.ReLU6(inplace=True),\n # pw-linear\n Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),\n nn.BatchNorm2d(oup),\n )\n\n def forward(self, x):\n if self.use_res_connect:\n return x + self.conv(x)\n else:\n return self.conv(x)\n\n\nclass MobileNetV2(Backbone):\n def __init__(self, cfg, data_channel, width_mult=1.0, out_features=None, num_classes=None):\n super().__init__()\n self.num_classes = num_classes\n \n input_channel = 32\n # scale input channel\n input_channel = int(input_channel * width_mult)\n # stem\n current_stride = 2\n name = \"stem\"\n self.stem = conv_bn(data_channel, input_channel, current_stride)\n\n self._out_feature_strides = {name: current_stride}\n self._out_feature_channels = {name: input_channel}\n\n # body\n block = InvertedResidual\n inverted_residual_setting = [\n # t, c, n, s\n [1, 16, 1, 1],\n [6, 24, 2, 2],\n [6, 32, 3, 2],\n [6, 64, 4, 2],\n [6, 96, 3, 1],\n [6, 160, 3, 2],\n [6, 320, 1, 1],\n ]\n self.return_features_indices = [3, 6, 13, 17]\n self.features = nn.ModuleList([])\n\n # building inverted residual blocks\n for t, c, n, s in inverted_residual_setting:\n output_channel = int(c * width_mult)\n for i in range(n):\n # the first one applying stride\n if i == 0:\n self.features.append(\n block(input_channel, output_channel, s, expand_ratio=t))\n else:\n self.features.append(\n block(input_channel, output_channel, 1, expand_ratio=t))\n # update input channel for next block\n input_channel = output_channel\n # check output this feature map?\n if len(self.features) in self.return_features_indices:\n name = \"mob{}\".format(\n self.return_features_indices.index(len(self.features)) + 2)\n self._out_feature_channels.update({\n name: output_channel\n })\n current_stride *= 2\n self._out_feature_strides.update({\n name: current_stride\n })\n \n if num_classes is not None:\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.linear = nn.Linear(input_channel, num_classes)\n nn.init.normal_(self.linear.weight, std=0.01)\n name = \"linear\"\n\n if out_features is None:\n out_features = [name]\n self._out_features = out_features\n assert len(self._out_features)\n\n self._initialize_weights()\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, (2. / n) ** 0.5)\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n # n = m.weight.size(1)\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n\n def freeze(self, freeze_at):\n if freeze_at > 0:\n # freeze stem\n for p in self.stem.parameters():\n p.requires_grad = False\n if freeze_at > 1:\n # freeze features\n freeze_at = freeze_at - 2\n freeze_layers = self.return_features_indices[freeze_at] if freeze_at < len(\n self.return_features_indices) else self.return_features_indices[-1]\n for layer_index in range(freeze_layers):\n for p in self.features[layer_index].parameters():\n p.requires_grad = False\n return self\n\n def forward(self, x):\n outputs = {}\n x = self.stem(x)\n if \"stem\" in self._out_features:\n outputs[\"stem\"] = x\n # res2 -> stride 2**2\n # res3 -> stride 2**3\n # output downsample stride: [4, 8, 16, 32]\n for i, m in enumerate(self.features, 1):\n x = m(x)\n if i in self.return_features_indices:\n name = \"mob{}\".format(\n self.return_features_indices.index(i) + 2)\n if name in self._out_features:\n outputs[name] = x\n \n if self.num_classes is not None:\n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n x = self.linear(x)\n if \"linear\" in self._out_features:\n outputs[\"linear\"] = x\n return outputs\n\n\n@BACKBONE_REGISTRY.register()\ndef build_mnetv1_backbone(cfg, input_shape: ShapeSpec):\n freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT\n out_features = cfg.MODEL.MNET.OUT_FEATURES\n width_mult = cfg.MODEL.MNET.WIDTH_MULT\n num_classes = cfg.MODEL.CLSNET.NUM_CLASSES if cfg.MODEL.CLSNET.ENABLE else None\n model = MobileNetV1(cfg, input_shape.channels, width_mult=width_mult,\n out_features=out_features, num_classes=num_classes).freeze(freeze_at)\n return model\n\n\n@BACKBONE_REGISTRY.register()\ndef build_mnetv2_backbone(cfg, input_shape: ShapeSpec):\n freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT\n out_features = cfg.MODEL.MNET.OUT_FEATURES\n width_mult = cfg.MODEL.MNET.WIDTH_MULT\n num_classes = cfg.MODEL.CLSNET.NUM_CLASSES if cfg.MODEL.CLSNET.ENABLE else None\n model = MobileNetV2(cfg, input_shape.channels, width_mult=width_mult,\n out_features=out_features, num_classes=num_classes).freeze(freeze_at)\n return model\n" ]
[ [ "torch.nn.BatchNorm2d", "torch.nn.Linear", "torch.nn.AdaptiveAvgPool2d", "torch.flatten", "torch.nn.init.normal_", "torch.nn.ReLU6", "torch.nn.ModuleList", "torch.nn.LeakyReLU" ] ]
kokoff/mlflow
[ "062722b172f403e613c41f9bb024b3e1673dfe31" ]
[ "tests/onnx/test_onnx_model_export.py" ]
[ "import sys\nimport os\nimport pytest\nimport mock\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nimport sklearn.datasets as datasets\nimport pandas as pd\nimport numpy as np\nimport yaml\n\nimport tensorflow as tf\nimport mlflow.pyfunc.scoring_server as pyfunc_scoring_server\nfrom mlflow import pyfunc\nfrom mlflow.models import infer_signature, Model\nfrom mlflow.models.utils import _read_example\nfrom mlflow.utils.file_utils import TempDir\nfrom tests.helper_functions import pyfunc_serve_and_score_model\nfrom mlflow.tracking.artifact_utils import _download_artifact_from_uri\nfrom mlflow.utils.environment import _mlflow_conda_env\nfrom mlflow.utils.model_utils import _get_flavor_configuration\n\npytestmark = pytest.mark.skipif(\n (sys.version_info < (3, 6)), reason=\"Tests require Python 3 to run!\"\n)\n\n\[email protected](scope=\"module\")\ndef data():\n iris = datasets.load_iris()\n data = pd.DataFrame(\n data=np.c_[iris[\"data\"], iris[\"target\"]], columns=iris[\"feature_names\"] + [\"target\"]\n )\n y = data[\"target\"]\n x = data.drop(\"target\", axis=1)\n return x, y\n\n\[email protected](scope=\"module\")\ndef model(data):\n x, y = data\n model = Sequential()\n model.add(Dense(3, input_dim=4))\n model.add(Dense(1))\n model.compile(loss=\"mean_squared_error\", optimizer=\"SGD\")\n model.fit(x, y)\n return model\n\n\[email protected](scope=\"module\")\ndef onnx_model(model):\n import onnxmltools\n\n return onnxmltools.convert_keras(model)\n\n\[email protected](scope=\"module\")\ndef sklearn_model(data):\n from sklearn.linear_model import LogisticRegression\n\n x, y = data\n model = LogisticRegression()\n model.fit(x, y)\n return model\n\n\[email protected](scope=\"module\")\ndef onnx_sklearn_model(sklearn_model):\n import onnxmltools\n from skl2onnx.common.data_types import FloatTensorType\n\n initial_type = [(\"float_input\", FloatTensorType([None, 4]))]\n onx = onnxmltools.convert_sklearn(sklearn_model, initial_types=initial_type)\n return onx\n\n\[email protected](scope=\"module\")\ndef predicted(model, data):\n return model.predict(data[0])\n\n\[email protected](scope=\"module\")\ndef tf_model_multiple_inputs_float64():\n graph = tf.Graph()\n with graph.as_default():\n t_in1 = tf.placeholder(tf.float64, 10, name=\"first_input\")\n t_in2 = tf.placeholder(tf.float64, 10, name=\"second_input\")\n t_out = tf.multiply(t_in1, t_in2)\n tf.identity(t_out, name=\"output\")\n return graph\n\n\[email protected](scope=\"module\")\ndef tf_model_multiple_inputs_float32():\n graph = tf.Graph()\n with graph.as_default():\n t_in1 = tf.placeholder(tf.float32, 10, name=\"first_input\")\n t_in2 = tf.placeholder(tf.float32, 10, name=\"second_input\")\n t_out = tf.multiply(t_in1, t_in2)\n tf.identity(t_out, name=\"output\")\n return graph\n\n\[email protected](scope=\"module\")\ndef onnx_model_multiple_inputs_float64(tf_model_multiple_inputs_float64):\n import tf2onnx\n\n sess = tf.Session(graph=tf_model_multiple_inputs_float64)\n\n onnx_graph = tf2onnx.tfonnx.process_tf_graph(\n sess.graph, input_names=[\"first_input:0\", \"second_input:0\"], output_names=[\"output:0\"]\n )\n model_proto = onnx_graph.make_model(\"test\")\n return model_proto\n\n\[email protected](scope=\"module\")\ndef onnx_model_multiple_inputs_float32(tf_model_multiple_inputs_float32):\n import tf2onnx\n\n sess = tf.Session(graph=tf_model_multiple_inputs_float32)\n\n onnx_graph = tf2onnx.tfonnx.process_tf_graph(\n sess.graph, input_names=[\"first_input:0\", \"second_input:0\"], output_names=[\"output:0\"]\n )\n model_proto = onnx_graph.make_model(\"test\")\n return model_proto\n\n\[email protected](scope=\"module\")\ndef data_multiple_inputs():\n return pd.DataFrame(\n {\"first_input:0\": np.random.random(10), \"second_input:0\": np.random.random(10)}\n )\n\n\[email protected](scope=\"module\")\ndef predicted_multiple_inputs(data_multiple_inputs):\n return pd.DataFrame(\n data_multiple_inputs[\"first_input:0\"] * data_multiple_inputs[\"second_input:0\"]\n )\n\n\[email protected]\ndef model_path(tmpdir):\n return os.path.join(tmpdir.strpath, \"model\")\n\n\[email protected]\ndef onnx_custom_env(tmpdir):\n conda_env = os.path.join(str(tmpdir), \"conda_env.yml\")\n _mlflow_conda_env(\n conda_env,\n additional_conda_deps=[\"pytest\", \"keras\"],\n additional_pip_deps=[\"onnx\", \"onnxmltools\"],\n )\n return conda_env\n\n\[email protected]\ndef test_cast_float64_to_float32():\n import mlflow.onnx\n\n df = pd.DataFrame([[1.0, 2.1], [True, False]], columns=[\"col1\", \"col2\"])\n df[\"col1\"] = df[\"col1\"].astype(np.float64)\n df[\"col2\"] = df[\"col2\"].astype(np.bool)\n df2 = mlflow.onnx._OnnxModelWrapper._cast_float64_to_float32(df, df.columns)\n assert df2[\"col1\"].dtype == np.float32 and df2[\"col2\"].dtype == np.bool\n\n\n# TODO: Use the default conda environment once MLflow's Travis build supports the onnxruntime\n# library\[email protected]\ndef test_model_save_load(onnx_model, model_path, onnx_custom_env):\n import onnx\n import mlflow.onnx\n\n mlflow.onnx.save_model(onnx_model, model_path, conda_env=onnx_custom_env)\n\n # Loading ONNX model\n onnx.checker.check_model = mock.Mock()\n mlflow.onnx.load_model(model_path)\n assert onnx.checker.check_model.called\n\n\[email protected]\ndef test_signature_and_examples_are_saved_correctly(onnx_model, data, onnx_custom_env):\n import mlflow.onnx\n\n model = onnx_model\n signature_ = infer_signature(*data)\n example_ = data[0].head(3)\n for signature in (None, signature_):\n for example in (None, example_):\n with TempDir() as tmp:\n path = tmp.path(\"model\")\n mlflow.onnx.save_model(\n model,\n path=path,\n conda_env=onnx_custom_env,\n signature=signature,\n input_example=example,\n )\n mlflow_model = Model.load(path)\n assert signature == mlflow_model.signature\n if example is None:\n assert mlflow_model.saved_input_example_info is None\n else:\n assert all((_read_example(mlflow_model, path) == example).all())\n\n\n# TODO: Mark this as large once MLflow's Travis build supports the onnxruntime library\[email protected]\ndef test_model_save_load_evaluate_pyfunc_format(onnx_model, model_path, data, predicted):\n import mlflow.onnx\n\n x = data[0]\n mlflow.onnx.save_model(onnx_model, model_path)\n\n # Loading pyfunc model\n pyfunc_loaded = mlflow.pyfunc.load_pyfunc(model_path)\n assert np.allclose(pyfunc_loaded.predict(x).values, predicted, rtol=1e-05, atol=1e-05)\n\n # pyfunc serve\n scoring_response = pyfunc_serve_and_score_model(\n model_uri=os.path.abspath(model_path),\n data=x,\n content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED,\n )\n assert np.allclose(\n pd.read_json(scoring_response.content, orient=\"records\").values.astype(np.float32),\n predicted,\n rtol=1e-05,\n atol=1e-05,\n )\n\n\n# TODO: Use the default conda environment once MLflow's Travis build supports the onnxruntime\n# library\[email protected]\ndef test_model_save_load_multiple_inputs(\n onnx_model_multiple_inputs_float64, model_path, onnx_custom_env\n):\n import onnx\n import mlflow.onnx\n\n mlflow.onnx.save_model(\n onnx_model_multiple_inputs_float64, model_path, conda_env=onnx_custom_env\n )\n\n # Loading ONNX model\n onnx.checker.check_model = mock.Mock()\n mlflow.onnx.load_model(model_path)\n assert onnx.checker.check_model.called\n\n\n# TODO: Mark this as large once MLflow's Travis build supports the onnxruntime library\[email protected]\ndef test_model_save_load_evaluate_pyfunc_format_multiple_inputs(\n onnx_model_multiple_inputs_float64, data_multiple_inputs, predicted_multiple_inputs, model_path\n):\n import mlflow.onnx\n\n mlflow.onnx.save_model(onnx_model_multiple_inputs_float64, model_path)\n\n # Loading pyfunc model\n pyfunc_loaded = mlflow.pyfunc.load_pyfunc(model_path)\n assert np.allclose(\n pyfunc_loaded.predict(data_multiple_inputs).values,\n predicted_multiple_inputs.values,\n rtol=1e-05,\n atol=1e-05,\n )\n\n # pyfunc serve\n scoring_response = pyfunc_serve_and_score_model(\n model_uri=os.path.abspath(model_path),\n data=data_multiple_inputs,\n content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED,\n )\n assert np.allclose(\n pd.read_json(scoring_response.content, orient=\"records\").values,\n predicted_multiple_inputs.values,\n rtol=1e-05,\n atol=1e-05,\n )\n\n\n# TODO: Remove test, along with explicit casting, when https://github.com/mlflow/mlflow/issues/1286\n# is fixed.\n# TODO: Mark this as large once MLflow's Travis build supports the onnxruntime library\[email protected]\ndef test_pyfunc_representation_of_float32_model_casts_and_evalutes_float64_inputs(\n onnx_model_multiple_inputs_float32, model_path, data_multiple_inputs, predicted_multiple_inputs\n):\n \"\"\"\n The ``python_function`` representation of an MLflow model with the ONNX flavor\n casts 64-bit floats to 32-bit floats automatically before evaluating, as opposed\n to throwing an unexpected type exception. This behavior is implemented due\n to the issue described in https://github.com/mlflow/mlflow/issues/1286 where\n the JSON representation of a Pandas DataFrame does not always preserve float\n precision (e.g., 32-bit floats may be converted to 64-bit floats when persisting a\n DataFrame as JSON).\n \"\"\"\n import mlflow.onnx\n\n mlflow.onnx.save_model(onnx_model_multiple_inputs_float32, model_path)\n\n # Loading pyfunc model\n pyfunc_loaded = mlflow.pyfunc.load_pyfunc(model_path)\n assert np.allclose(\n pyfunc_loaded.predict(data_multiple_inputs.astype(\"float64\")).values,\n predicted_multiple_inputs.astype(\"float32\").values,\n rtol=1e-05,\n atol=1e-05,\n )\n\n with pytest.raises(RuntimeError):\n pyfunc_loaded.predict(data_multiple_inputs.astype(\"int32\"))\n\n\n# TODO: Use the default conda environment once MLflow's Travis build supports the onnxruntime\n# library\[email protected]\ndef test_model_log(onnx_model, onnx_custom_env):\n # pylint: disable=unused-argument\n\n import onnx\n import mlflow.onnx\n\n # should_start_run tests whether or not calling log_model() automatically starts a run.\n for should_start_run in [False, True]:\n try:\n if should_start_run:\n mlflow.start_run()\n artifact_path = \"onnx_model\"\n mlflow.onnx.log_model(\n onnx_model=onnx_model, artifact_path=artifact_path, conda_env=onnx_custom_env\n )\n model_uri = \"runs:/{run_id}/{artifact_path}\".format(\n run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path\n )\n\n # Load model\n onnx.checker.check_model = mock.Mock()\n mlflow.onnx.load_model(model_uri)\n assert onnx.checker.check_model.called\n finally:\n mlflow.end_run()\n\n\ndef test_log_model_calls_register_model(onnx_model, onnx_custom_env):\n import mlflow.onnx\n\n artifact_path = \"model\"\n register_model_patch = mock.patch(\"mlflow.register_model\")\n with mlflow.start_run(), register_model_patch:\n mlflow.onnx.log_model(\n onnx_model=onnx_model,\n artifact_path=artifact_path,\n conda_env=onnx_custom_env,\n registered_model_name=\"AdsModel1\",\n )\n model_uri = \"runs:/{run_id}/{artifact_path}\".format(\n run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path\n )\n mlflow.register_model.assert_called_once_with(model_uri, \"AdsModel1\")\n\n\ndef test_log_model_no_registered_model_name(onnx_model, onnx_custom_env):\n import mlflow.onnx\n\n artifact_path = \"model\"\n register_model_patch = mock.patch(\"mlflow.register_model\")\n with mlflow.start_run(), register_model_patch:\n mlflow.onnx.log_model(\n onnx_model=onnx_model, artifact_path=artifact_path, conda_env=onnx_custom_env\n )\n mlflow.register_model.assert_not_called()\n\n\n# TODO: Mark this as large once MLflow's Travis build supports the onnxruntime library\[email protected]\ndef test_model_log_evaluate_pyfunc_format(onnx_model, data, predicted):\n import mlflow.onnx\n\n x = data[0]\n # should_start_run tests whether or not calling log_model() automatically starts a run.\n for should_start_run in [False, True]:\n try:\n if should_start_run:\n mlflow.start_run()\n artifact_path = \"onnx_model\"\n mlflow.onnx.log_model(onnx_model=onnx_model, artifact_path=artifact_path)\n model_uri = \"runs:/{run_id}/{artifact_path}\".format(\n run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path\n )\n\n # Loading pyfunc model\n pyfunc_loaded = mlflow.pyfunc.load_pyfunc(model_uri=model_uri)\n assert np.allclose(pyfunc_loaded.predict(x).values, predicted, rtol=1e-05, atol=1e-05)\n finally:\n mlflow.end_run()\n\n\[email protected]\ndef test_model_save_persists_specified_conda_env_in_mlflow_model_directory(\n onnx_model, model_path, onnx_custom_env\n):\n import mlflow.onnx\n\n mlflow.onnx.save_model(onnx_model=onnx_model, path=model_path, conda_env=onnx_custom_env)\n pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)\n saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])\n assert os.path.exists(saved_conda_env_path)\n assert saved_conda_env_path != onnx_custom_env\n\n with open(onnx_custom_env, \"r\") as f:\n onnx_custom_env_parsed = yaml.safe_load(f)\n with open(saved_conda_env_path, \"r\") as f:\n saved_conda_env_parsed = yaml.safe_load(f)\n assert saved_conda_env_parsed == onnx_custom_env_parsed\n\n\n# TODO: Mark this as large once MLflow's Travis build supports the onnxruntime library\[email protected]\ndef test_model_save_accepts_conda_env_as_dict(onnx_model, model_path):\n import mlflow.onnx\n\n conda_env = dict(mlflow.onnx.get_default_conda_env())\n conda_env[\"dependencies\"].append(\"pytest\")\n mlflow.onnx.save_model(onnx_model=onnx_model, path=model_path, conda_env=conda_env)\n\n pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)\n saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])\n assert os.path.exists(saved_conda_env_path)\n\n with open(saved_conda_env_path, \"r\") as f:\n saved_conda_env_parsed = yaml.safe_load(f)\n assert saved_conda_env_parsed == conda_env\n\n\[email protected]\ndef test_model_log_persists_specified_conda_env_in_mlflow_model_directory(\n onnx_model, onnx_custom_env\n):\n import mlflow.onnx\n\n artifact_path = \"model\"\n with mlflow.start_run():\n mlflow.onnx.log_model(\n onnx_model=onnx_model, artifact_path=artifact_path, conda_env=onnx_custom_env\n )\n model_path = _download_artifact_from_uri(\n \"runs:/{run_id}/{artifact_path}\".format(\n run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path\n )\n )\n\n pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)\n saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])\n assert os.path.exists(saved_conda_env_path)\n assert saved_conda_env_path != onnx_custom_env\n\n with open(onnx_custom_env, \"r\") as f:\n onnx_custom_env_parsed = yaml.safe_load(f)\n with open(saved_conda_env_path, \"r\") as f:\n saved_conda_env_parsed = yaml.safe_load(f)\n assert saved_conda_env_parsed == onnx_custom_env_parsed\n\n\n# TODO: Mark this as large once MLflow's Travis build supports the onnxruntime library\[email protected]\ndef test_model_save_without_specified_conda_env_uses_default_env_with_expected_dependencies(\n onnx_model, model_path\n):\n import mlflow.onnx\n\n mlflow.onnx.save_model(onnx_model=onnx_model, path=model_path, conda_env=None)\n pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)\n conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])\n with open(conda_env_path, \"r\") as f:\n conda_env = yaml.safe_load(f)\n\n assert conda_env == mlflow.onnx.get_default_conda_env()\n\n\n# TODO: Mark this as large once MLflow's Travis build supports the onnxruntime library\[email protected]\ndef test_model_log_without_specified_conda_env_uses_default_env_with_expected_dependencies(\n onnx_model,\n):\n import mlflow.onnx\n\n artifact_path = \"model\"\n with mlflow.start_run():\n mlflow.onnx.log_model(onnx_model=onnx_model, artifact_path=artifact_path, conda_env=None)\n model_path = _download_artifact_from_uri(\n \"runs:/{run_id}/{artifact_path}\".format(\n run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path\n )\n )\n\n pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)\n conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])\n with open(conda_env_path, \"r\") as f:\n conda_env = yaml.safe_load(f)\n\n assert conda_env == mlflow.onnx.get_default_conda_env()\n\n\n# TODO: Mark this as large once MLflow's Travis build supports the onnxruntime library\[email protected]\ndef test_pyfunc_predict_supports_models_with_list_outputs(onnx_sklearn_model, model_path, data):\n \"\"\"\n https://github.com/mlflow/mlflow/issues/2499\n User encountered issue where an sklearn model, converted to onnx, would return a list response.\n The issue resulted in an error because MLflow assumed it would be a numpy array. Therefore,\n the this test validates the service does not receive that error when using such a model.\n \"\"\"\n import mlflow.onnx\n\n x = data[0]\n mlflow.onnx.save_model(onnx_sklearn_model, model_path)\n wrapper = mlflow.pyfunc.load_model(model_path)\n wrapper.predict(pd.DataFrame(x))\n" ]
[ [ "tensorflow.placeholder", "pandas.DataFrame", "tensorflow.multiply", "pandas.read_json", "tensorflow.Graph", "numpy.random.random", "tensorflow.Session", "sklearn.linear_model.LogisticRegression", "tensorflow.identity", "sklearn.datasets.load_iris" ] ]
JohnZhang000/adaptive-jpeg-compression
[ "f54e4798c01169812958f4d5539a03927dbdc313", "f54e4798c01169812958f4d5539a03927dbdc313" ]
[ "remove_code/sotas/SSAH-adversarial-attack-main/utils/fid_score.py", "remove_code/my_data_mining.py" ]
[ "\"\"\"Calculates the Frechet Inception Distance (FID) to evalulate GANs\n\nThe FID metric calculates the distance between two distributions of images.\nTypically, we have summary statistics (mean & covariance matrix) of one\nof these distributions, while the 2nd distribution is given by a GAN.\n\nWhen run as a stand-alone program, it compares the distribution of\nimages that are stored as PNG/JPEG at a specified location with a\ndistribution given by summary statistics (in pickle format).\n\nThe FID is calculated by assuming that X_1 and X_2 are the activations of\nthe pool_3 layer of the inception net for generated samples and real world\nsamples respectively.\n\nSee --help to see further details.\n\nCode apapted from https://github.com/bioinf-jku/TTUR to use PyTorch instead\nof Tensorflow\n\nCopyright 2018 Institute of Bioinformatics, JKU Linz\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport os\nimport pathlib\nfrom argparse import ArgumentDefaultsHelpFormatter, ArgumentParser\nfrom multiprocessing import cpu_count\n\nimport numpy as np\nimport torch\nimport torchvision.transforms as TF\nfrom PIL import Image\nfrom scipy import linalg\nfrom torch.nn.functional import adaptive_avg_pool2d\n\ntry:\n from tqdm import tqdm\nexcept ImportError:\n # If tqdm is not available, provide a mock version of it\n def tqdm(x):\n return x\n\nfrom utils.inception import InceptionV3\n\nprint(InceptionV3.BLOCK_INDEX_BY_DIM)\nIMAGE_EXTENSIONS = {'bmp', 'jpg', 'jpeg', 'pgm', 'png', 'ppm',\n 'tif', 'tiff', 'webp'}\n\n\nclass ImagePathDataset(torch.utils.data.Dataset):\n def __init__(self, files, transforms=None):\n self.files = files\n self.transforms = transforms\n\n def __len__(self):\n return len(self.files)\n\n def __getitem__(self, i):\n path = self.files[i]\n img = Image.open(path).convert('RGB')\n if self.transforms is not None:\n img = self.transforms(img)\n return img\n\n\ndef get_activations(files, model, batch_size=50, dims=2048, device='cuda'):\n \"\"\"Calculates the activations of the pool_3 layer for all images.\n\n Params:\n -- files : List of image files paths\n -- model : Instance of inception model\n -- batch_size : Batch size of images for the model to process at once.\n Make sure that the number of samples is a multiple of\n the batch size, otherwise some samples are ignored. This\n behavior is retained to match the original FID score\n implementation.\n -- dims : Dimensionality of features returned by Inception\n -- device : Device to run calculations\n\n Returns:\n -- A numpy array of dimension (num images, dims) that contains the\n activations of the given tensor when feeding inception with the\n query tensor.\n \"\"\"\n model.eval()\n print(len(files), batch_size)\n\n if batch_size > len(files):\n print(('Warning: batch size is bigger than the data size. '\n 'Setting batch size to data size'))\n batch_size = len(files)\n\n dataset = ImagePathDataset(files, transforms=TF.ToTensor())\n\n dataloader = torch.utils.data.DataLoader(dataset,\n batch_size=batch_size,\n shuffle=False,\n drop_last=False,\n num_workers=cpu_count())\n\n pred_arr = np.empty((len(files), dims))\n\n start_idx = 0\n\n for batch in tqdm(dataloader):\n batch = batch.to(device)\n\n with torch.no_grad():\n pred = model(batch)[0]\n\n # If model output is not scalar, apply global spatial average pooling.\n # This happens if you choose a dimensionality not equal 2048.\n if pred.size(2) != 1 or pred.size(3) != 1:\n pred = adaptive_avg_pool2d(pred, output_size=(1, 1))\n\n pred = pred.squeeze(3).squeeze(2).cpu().numpy()\n\n pred_arr[start_idx:start_idx + pred.shape[0]] = pred\n\n start_idx = start_idx + pred.shape[0]\n\n return pred_arr\n\n\ndef calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):\n \"\"\"Numpy implementation of the Frechet Distance.\n The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)\n and X_2 ~ N(mu_2, C_2) is\n d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).\n\n Stable version by Dougal J. Sutherland.\n\n Params:\n -- mu1 : Numpy array containing the activations of a layer of the\n inception net (like returned by the function 'get_predictions')\n for generated samples.\n -- mu2 : The sample mean over activations, precalculated on an\n representative data set.\n -- sigma1: The covariance matrix over activations for generated samples.\n -- sigma2: The covariance matrix over activations, precalculated on an\n representative data set.\n\n Returns:\n -- : The Frechet Distance.\n \"\"\"\n\n mu1 = np.atleast_1d(mu1)\n mu2 = np.atleast_1d(mu2)\n\n sigma1 = np.atleast_2d(sigma1)\n sigma2 = np.atleast_2d(sigma2)\n\n assert mu1.shape == mu2.shape, \\\n 'Training and test mean vectors have different lengths'\n assert sigma1.shape == sigma2.shape, \\\n 'Training and test covariances have different dimensions'\n\n diff = mu1 - mu2\n\n # Product might be almost singular\n covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)\n if not np.isfinite(covmean).all():\n msg = ('fid calculation produces singular product; '\n 'adding %s to diagonal of cov estimates') % eps\n print(msg)\n offset = np.eye(sigma1.shape[0]) * eps\n covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))\n\n # Numerical error might give slight imaginary component\n if np.iscomplexobj(covmean):\n if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):\n m = np.max(np.abs(covmean.imag))\n raise ValueError('Imaginary component {}'.format(m))\n covmean = covmean.real\n\n tr_covmean = np.trace(covmean)\n\n return (diff.dot(diff) + np.trace(sigma1)\n + np.trace(sigma2) - 2 * tr_covmean)\n\n\ndef calculate_activation_statistics(files, model, batch_size=50, dims=2048,\n device='cuda'):\n \"\"\"Calculation of the statistics used by the FID.\n Params:\n -- files : List of image files paths\n -- model : Instance of inception model\n -- batch_size : The images numpy array is split into batches with\n batch size batch_size. A reasonable batch size\n depends on the hardware.\n -- dims : Dimensionality of features returned by Inception\n -- device : Device to run calculations\n\n Returns:\n -- mu : The mean over samples of the activations of the pool_3 layer of\n the inception model.\n -- sigma : The covariance matrix of the activations of the pool_3 layer of\n the inception model.\n \"\"\"\n act = get_activations(files, model, batch_size, dims, device)\n mu = np.mean(act, axis=0)\n sigma = np.cov(act, rowvar=False)\n return mu, sigma\n\n\ndef compute_statistics_of_path(path, model, batch_size, dims, device):\n if path.endswith('.npz'):\n with np.load(path) as f:\n m, s = f['mu'][:], f['sigma'][:]\n else:\n path = pathlib.Path(path)\n files = sorted([file for ext in IMAGE_EXTENSIONS\n for file in path.glob('*.{}'.format(ext))])\n m, s = calculate_activation_statistics(files, model, batch_size,\n dims, device)\n\n return m, s\n\n\ndef calculate_fid_given_paths(paths, batch_size, device, dims):\n \"\"\"Calculates the FID of two paths\"\"\"\n print('paths is :', paths)\n for p in paths:\n if not os.path.exists(p):\n raise RuntimeError('Invalid path: %s' % p)\n\n block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]\n\n model = InceptionV3([block_idx]).to(device)\n\n m1, s1 = compute_statistics_of_path(paths[0], model, batch_size,\n dims, device)\n m2, s2 = compute_statistics_of_path(paths[1], model, batch_size,\n dims, device)\n fid_value = calculate_frechet_distance(m1, s1, m2, s2)\n\n return fid_value\n\n\ndef return_fid(path1, path2):\n device = torch.device('cuda' if (torch.cuda.is_available()) else 'cpu')\n\n fid_value = calculate_fid_given_paths(paths=[path1, path2],\n batch_size=50,\n device=device,\n dims=2048)\n return fid_value\n", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 26 17:33:03 2021\n\n@author: ubuntu204\n\"\"\"\nimport numpy as np\nfrom scipy import stats\nimport statsmodels.stats.multitest as multitest\nimport matplotlib.pyplot as plt\nimport os\n\nimport pandas as pd\nfrom pandas import Series,DataFrame\n# import seaborn as sns\n# import palettable\nfrom sklearn import datasets\nfrom tqdm import tqdm\n\nplt.rcParams['font.sans-serif']=['SimHei']\n# plt.rcParams['axes.unicode_mnius']=False\nepsilon=1e-10\n\ndef volcano_mine(data1,data2,method='hs',flag_output_src=0,flag_plot=0):\n data1=data1+epsilon\n data2=data2+epsilon\n \n mdata1=data1.mean(axis=0)\n mdata2=data2.mean(axis=0) \n fold_change=(mdata2)/(mdata1)\n log2_fold_change=np.log2(fold_change)\n \n p_values=np.zeros_like(mdata1)\n for i in tqdm(range(len(p_values))):\n t,p=stats.ttest_ind(data1[:,i],data2[:,i])\n p_values[i]=p\n rejects,pvals_corrected,alphaSidak,alphaBonf=multitest.multipletests(p_values,method=method)\n log10_pvals_corrected=np.log10(pvals_corrected+epsilon)*(-1)\n \n return log2_fold_change,log10_pvals_corrected\n \ndef plot_volume(log2_fold_change,log10_pvals_corrected,title=None,saved_name=None):\n npt=len(log2_fold_change)\n colors=list(['grey']*npt)\n idx_green=(log2_fold_change>=np.log2(1.2))&(log10_pvals_corrected>(-np.log10(0.05)))\n for i in range(len(idx_green)):\n if idx_green[i]:\n colors[i]='green'\n idx_red=(log2_fold_change<=-np.log2(1.2))&(log10_pvals_corrected>(-np.log10(0.05)))\n for i in range(len(idx_red)):\n if idx_red[i]:\n colors[i]='red'\n # colors[idx_red]='red'\n \n plt.figure()\n plt.style.use('seaborn-whitegrid')\n plt.scatter(log2_fold_change, log10_pvals_corrected, color=colors)\n plt.xlabel('Log2 Fold Change')\n plt.ylabel('-Log10 P-Value')\n if title:\n plt.title(title)\n if saved_name:\n plt.savefig(saved_name,bbox_inches='tight',dpi=300)\n return\n\n# def plot_heatmap(data,row_c=None,dpi=300,figsize=(8/2.54,16/2.54),saved_name=None):\n# # plt.figure(dpi=dpi)\n# data_show=data.copy()\n# # data_show=data.drop(['class'],axis=1)\n# if row_c:\n# row_colors=data['class'].map(row_c)\n# sns.clustermap(data=data_show,method='single',metric='euclidean',\n# figsize=figsize,row_cluster=False,col_cluster=False,\n# cmap='rainbow')\n# sns.set(font_scale=1.5)\n# if saved_name:\n# plt.savefig(saved_name,bbox_inches='tight',dpi=dpi)\n \n \nif __name__=='__main__': \n # data1=np.random.rand(5, 10)\n # data2=np.random.rand(5, 10)\n # data2[:,0]=data1[:,0]*2.5\n # data2[:,1]=data1[:,1]*10\n # data2[:,2]=data1[:,2]/2.5\n # data2[:,3]=data1[:,3]/10\n # logFC,logP=volcano_mine(data1, data2)\n # plot_volume(logFC,logP)\n \n iris=datasets.load_iris()\n x,y=iris.data,iris.target\n data=np.hstack((x,y.reshape(150,1)))\n pd_iris=pd.DataFrame(data,columns=['sepal length(cm)','sepal width(cm)','petal length(cm)','petal width(cm)','class'])\n row_c=dict(zip(pd_iris['class'].unique(),['green','yellow','pink']))\n # plot_heatmap(pd_iris,row_c=row_c)" ]
[ [ "numpy.eye", "numpy.load", "numpy.atleast_2d", "numpy.cov", "torch.no_grad", "numpy.abs", "numpy.atleast_1d", "numpy.trace", "torch.nn.functional.adaptive_avg_pool2d", "torch.cuda.is_available", "numpy.isfinite", "numpy.iscomplexobj", "numpy.diagonal", "numpy.mean" ], [ "numpy.log2", "matplotlib.pyplot.style.use", "numpy.zeros_like", "matplotlib.pyplot.figure", "scipy.stats.ttest_ind", "pandas.DataFrame", "matplotlib.pyplot.savefig", "matplotlib.pyplot.title", "matplotlib.pyplot.ylabel", "numpy.log10", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.scatter", "sklearn.datasets.load_iris" ] ]
prcalopa/reactable-autocalibration
[ "1985d4c73fabd5f08f54b922e73a9306e09c77a5" ]
[ "autocalibration/lib/python2.7/site-packages/matplotlib/tests/test_units.py" ]
[ "from matplotlib.cbook import iterable\nimport matplotlib.pyplot as plt\nfrom matplotlib.testing.decorators import image_comparison\nimport matplotlib.units as munits\nimport numpy as np\n\ntry:\n # mock in python 3.3+\n from unittest.mock import MagicMock\nexcept ImportError:\n from mock import MagicMock\n\n\n# Basic class that wraps numpy array and has units\nclass Quantity(object):\n def __init__(self, data, units):\n self.magnitude = data\n self.units = units\n\n def to(self, new_units):\n factors = {('hours', 'seconds'): 3600, ('minutes', 'hours'): 1 / 60,\n ('minutes', 'seconds'): 60, ('feet', 'miles'): 1 / 5280.,\n ('feet', 'inches'): 12, ('miles', 'inches'): 12 * 5280}\n if self.units != new_units:\n mult = factors[self.units, new_units]\n return Quantity(mult * self.magnitude, new_units)\n else:\n return Quantity(self.magnitude, self.units)\n\n def __getattr__(self, attr):\n return getattr(self.magnitude, attr)\n\n def __getitem__(self, item):\n return Quantity(self.magnitude[item], self.units)\n\n def __array__(self):\n return np.asarray(self.magnitude)\n\n\n# Tests that the conversion machinery works properly for classes that\n# work as a facade over numpy arrays (like pint)\n@image_comparison(baseline_images=['plot_pint'],\n extensions=['png'], remove_text=False, style='mpl20')\ndef test_numpy_facade():\n # Create an instance of the conversion interface and\n # mock so we can check methods called\n qc = munits.ConversionInterface()\n\n def convert(value, unit, axis):\n if hasattr(value, 'units'):\n return value.to(unit).magnitude\n elif iterable(value):\n try:\n return [v.to(unit).magnitude for v in value]\n except AttributeError:\n return [Quantity(v, axis.get_units()).to(unit).magnitude\n for v in value]\n else:\n return Quantity(value, axis.get_units()).to(unit).magnitude\n\n qc.convert = MagicMock(side_effect=convert)\n qc.axisinfo = MagicMock(side_effect=lambda u, a: munits.AxisInfo(label=u))\n qc.default_units = MagicMock(side_effect=lambda x, a: x.units)\n\n # Register the class\n munits.registry[Quantity] = qc\n\n # Simple test\n y = Quantity(np.linspace(0, 30), 'miles')\n x = Quantity(np.linspace(0, 5), 'hours')\n\n fig, ax = plt.subplots()\n fig.subplots_adjust(left=0.15) # Make space for label\n ax.plot(x, y, 'tab:blue')\n ax.axhline(Quantity(26400, 'feet'), color='tab:red')\n ax.axvline(Quantity(120, 'minutes'), color='tab:green')\n ax.yaxis.set_units('inches')\n ax.xaxis.set_units('seconds')\n\n assert qc.convert.called\n assert qc.axisinfo.called\n assert qc.default_units.called\n\n\n# Tests gh-8908\n@image_comparison(baseline_images=['plot_masked_units'],\n extensions=['png'], remove_text=True, style='mpl20')\ndef test_plot_masked_units():\n data = np.linspace(-5, 5)\n data_masked = np.ma.array(data, mask=(data > -2) & (data < 2))\n data_masked_units = Quantity(data_masked, 'meters')\n\n fig, ax = plt.subplots()\n ax.plot(data_masked_units)\n" ]
[ [ "matplotlib.cbook.iterable", "numpy.asarray", "matplotlib.pyplot.subplots", "matplotlib.units.AxisInfo", "numpy.ma.array", "matplotlib.testing.decorators.image_comparison", "matplotlib.units.ConversionInterface", "numpy.linspace" ] ]
jolinlaw/turicreate
[ "6b2057dc29533da225d18138e93cc15680eea85d" ]
[ "src/unity/python/turicreate/toolkits/drawing_classifier/drawing_classifier.py" ]
[ "# -*- coding: utf-8 -*-\n# Copyright © 2019 Apple Inc. All rights reserved.\n#\n# Use of this source code is governed by a BSD-3-clause license that can\n# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause\n\nimport turicreate as _tc\nimport numpy as _np\nimport time as _time\nfrom turicreate.toolkits._model import CustomModel as _CustomModel\nfrom turicreate.toolkits._model import PythonProxy as _PythonProxy\nfrom turicreate.toolkits import evaluation as _evaluation\nimport turicreate.toolkits._internal_utils as _tkutl\nfrom turicreate.toolkits._main import ToolkitError as _ToolkitError\nfrom turicreate import extensions as _extensions\nfrom .. import _pre_trained_models\n\nBITMAP_WIDTH = 28\nBITMAP_HEIGHT = 28\nTRAIN_VALIDATION_SPLIT = .95\n\ndef _raise_error_if_not_drawing_classifier_input_sframe(\n dataset, feature, target):\n \"\"\"\n Performs some sanity checks on the SFrame provided as input to \n `turicreate.drawing_classifier.create` and raises a ToolkitError\n if something in the dataset is missing or wrong.\n \"\"\"\n from turicreate.toolkits._internal_utils import _raise_error_if_not_sframe\n _raise_error_if_not_sframe(dataset)\n if feature not in dataset.column_names():\n raise _ToolkitError(\"Feature column '%s' does not exist\" % feature)\n if target not in dataset.column_names():\n raise _ToolkitError(\"Target column '%s' does not exist\" % target)\n if (dataset[feature].dtype != _tc.Image and dataset[feature].dtype != list):\n raise _ToolkitError(\"Feature column must contain images\" \n + \" or stroke-based drawings encoded as lists of strokes\" \n + \" where each stroke is a list of points and\" \n + \" each point is stored as a dictionary\")\n if dataset[target].dtype != int and dataset[target].dtype != str:\n raise _ToolkitError(\"Target column contains \" + str(dataset[target].dtype)\n + \" but it must contain strings or integers to represent\" \n + \" labels for drawings.\")\n if len(dataset) == 0:\n raise _ToolkitError(\"Input Dataset is empty!\")\n\ndef create(input_dataset, target, feature=None, validation_set='auto',\n warm_start='auto', batch_size=256, \n max_iterations=100, verbose=True):\n \"\"\"\n Create a :class:`DrawingClassifier` model.\n\n Parameters\n ----------\n dataset : SFrame\n Input data. The columns named by the ``feature`` and ``target``\n parameters will be extracted for training the drawing classifier.\n\n target : string\n Name of the column containing the target variable. The values in this\n column must be of string or integer type.\n\n feature : string optional\n Name of the column containing the input drawings. 'None' (the default)\n indicates the column in `dataset` named \"drawing\" should be used as the\n feature.\n The feature column can contain both bitmap-based drawings as well as\n stroke-based drawings. Bitmap-based drawing input can be a grayscale\n tc.Image of any size.\n Stroke-based drawing input must be in the following format:\n Every drawing must be represented by a list of strokes, where each\n stroke must be a list of points in the order in which they were drawn\n on the canvas.\n Each point must be a dictionary with two keys, \"x\" and \"y\", and their\n respective values must be numerical, i.e. either integer or float.\n\n validation_set : SFrame optional\n A dataset for monitoring the model's generalization performance.\n The format of this SFrame must be the same as the training set.\n By default this argument is set to 'auto' and a validation set is\n automatically sampled and used for progress printing. If\n validation_set is set to None, then no additional metrics\n are computed. The default value is 'auto'.\n\n warm_start : string optional\n A string to denote which pretrained model to use. Set to \"auto\"\n by default which uses a model trained on 245 of the 345 classes in the\n Quick, Draw! dataset. To disable warm start, pass in None to this \n argument. Here is a list of all the pretrained models that\n can be passed in as this argument:\n \"auto\": Uses quickdraw_245_v0\n \"quickdraw_245_v0\": Uses a model trained on 245 of the 345 classes in the\n Quick, Draw! dataset.\n None: No Warm Start\n\n batch_size: int optional\n The number of drawings per training step. If not set, a default\n value of 256 will be used. If you are getting memory errors,\n try decreasing this value. If you have a powerful computer, increasing\n this value may improve performance.\n\n max_iterations : int optional\n The maximum number of allowed passes through the data. More passes over\n the data can result in a more accurately trained model. \n\n verbose : bool optional\n If True, print progress updates and model details.\n\n Returns\n -------\n out : DrawingClassifier\n A trained :class:`DrawingClassifier` model.\n\n See Also\n --------\n DrawingClassifier\n\n Examples\n --------\n .. sourcecode:: python\n\n # Train a drawing classifier model\n >>> model = turicreate.drawing_classifier.create(data)\n\n # Make predictions on the training set and as column to the SFrame\n >>> data['predictions'] = model.predict(data)\n\n \"\"\"\n \n import mxnet as _mx\n from mxnet import autograd as _autograd\n from ._model_architecture import Model as _Model\n from ._sframe_loader import SFrameClassifierIter as _SFrameClassifierIter\n from .._mxnet import _mxnet_utils\n \n start_time = _time.time()\n accepted_values_for_warm_start = [\"auto\", \"quickdraw_245_v0\", None]\n\n # @TODO: Should be able to automatically choose number of iterations\n # based on data size: Tracked in Github Issue #1576\n\n # automatically infer feature column\n if feature is None:\n feature = _tkutl._find_only_drawing_column(input_dataset)\n\n _raise_error_if_not_drawing_classifier_input_sframe(\n input_dataset, feature, target)\n\n if batch_size is not None and not isinstance(batch_size, int):\n raise TypeError(\"'batch_size' must be an integer >= 1\")\n if batch_size is not None and batch_size < 1:\n raise ValueError(\"'batch_size' must be >= 1\")\n if max_iterations is not None and not isinstance(max_iterations, int):\n raise TypeError(\"'max_iterations' must be an integer >= 1\")\n if max_iterations is not None and max_iterations < 1:\n raise ValueError(\"'max_iterations' must be >= 1\")\n\n is_stroke_input = (input_dataset[feature].dtype != _tc.Image)\n dataset = _extensions._drawing_classifier_prepare_data(\n input_dataset, feature) if is_stroke_input else input_dataset\n\n iteration = 0\n\n classes = dataset[target].unique()\n classes = sorted(classes)\n class_to_index = {name: index for index, name in enumerate(classes)}\n\n validation_set_corrective_string = (\"'validation_set' parameter must be \"\n + \"an SFrame, or None, or must be set to 'auto' for the toolkit to \" \n + \"automatically create a validation set.\")\n if isinstance(validation_set, _tc.SFrame):\n _raise_error_if_not_drawing_classifier_input_sframe(\n validation_set, feature, target)\n is_validation_stroke_input = (validation_set[feature].dtype != _tc.Image)\n validation_dataset = _extensions._drawing_classifier_prepare_data(\n validation_set, feature) if is_validation_stroke_input else validation_set\n elif isinstance(validation_set, str):\n if validation_set == 'auto':\n if dataset.num_rows() >= 100:\n if verbose:\n print ( \"PROGRESS: Creating a validation set from 5 percent of training data. This may take a while.\\n\"\n \" You can set ``validation_set=None`` to disable validation tracking.\\n\")\n dataset, validation_dataset = dataset.random_split(TRAIN_VALIDATION_SPLIT, exact=True)\n else:\n validation_set = None\n validation_dataset = _tc.SFrame()\n else:\n raise _ToolkitError(\"Unrecognized value for 'validation_set'. \"\n + validation_set_corrective_string)\n elif validation_set is None:\n validation_dataset = _tc.SFrame()\n else:\n raise TypeError(\"Unrecognized type for 'validation_set'.\"\n + validation_set_corrective_string)\n\n train_loader = _SFrameClassifierIter(dataset, batch_size,\n feature_column=feature,\n target_column=target,\n class_to_index=class_to_index,\n load_labels=True,\n shuffle=True,\n iterations=max_iterations)\n train_loader_to_compute_accuracy = _SFrameClassifierIter(dataset, batch_size,\n feature_column=feature,\n target_column=target,\n class_to_index=class_to_index,\n load_labels=True,\n shuffle=True,\n iterations=1)\n validation_loader = _SFrameClassifierIter(validation_dataset, batch_size,\n feature_column=feature,\n target_column=target,\n class_to_index=class_to_index,\n load_labels=True,\n shuffle=True,\n iterations=1)\n if verbose and iteration == 0:\n column_names = ['iteration', 'train_loss', 'train_accuracy', 'time']\n column_titles = ['Iteration', 'Training Loss', 'Training Accuracy', 'Elapsed Time (seconds)']\n if validation_set is not None:\n column_names.insert(3, 'validation_accuracy')\n column_titles.insert(3, 'Validation Accuracy')\n table_printer = _tc.util._ProgressTablePrinter(\n column_names, column_titles)\n\n ctx = _mxnet_utils.get_mxnet_context(max_devices=batch_size)\n model = _Model(num_classes = len(classes), prefix=\"drawing_\")\n model_params = model.collect_params()\n model_params.initialize(_mx.init.Xavier(), ctx=ctx)\n\n if warm_start is not None:\n if type(warm_start) is not str:\n raise TypeError(\"'warm_start' must be a string or None. \" \n + \"'warm_start' can take in the following values: \" \n + str(accepted_values_for_warm_start))\n if warm_start not in accepted_values_for_warm_start:\n raise _ToolkitError(\"Unrecognized value for 'warm_start': \" \n + warm_start + \". 'warm_start' can take in the following \" \n + \"values: \" + str(accepted_values_for_warm_start))\n pretrained_model = _pre_trained_models.DrawingClassifierPreTrainedModel(\n warm_start)\n pretrained_model_params_path = pretrained_model.get_model_path()\n model.load_params(pretrained_model_params_path, \n ctx=ctx, \n allow_missing=True)\n softmax_cross_entropy = _mx.gluon.loss.SoftmaxCrossEntropyLoss()\n model.hybridize()\n trainer = _mx.gluon.Trainer(model.collect_params(), 'adam')\n\n train_accuracy = _mx.metric.Accuracy()\n validation_accuracy = _mx.metric.Accuracy()\n\n def get_data_and_label_from_batch(batch):\n if batch.pad is not None:\n size = batch_size - batch.pad\n sliced_data = _mx.nd.slice_axis(batch.data[0], axis=0, begin=0, end=size)\n sliced_label = _mx.nd.slice_axis(batch.label[0], axis=0, begin=0, end=size)\n num_devices = min(sliced_data.shape[0], len(ctx))\n batch_data = _mx.gluon.utils.split_and_load(sliced_data, ctx_list=ctx[:num_devices], even_split=False)\n batch_label = _mx.gluon.utils.split_and_load(sliced_label, ctx_list=ctx[:num_devices], even_split=False)\n else:\n batch_data = _mx.gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)\n batch_label = _mx.gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)\n return batch_data, batch_label\n\n def compute_accuracy(accuracy_metric, batch_loader):\n batch_loader.reset()\n accuracy_metric.reset()\n for batch in batch_loader:\n batch_data, batch_label = get_data_and_label_from_batch(batch)\n outputs = []\n for x, y in zip(batch_data, batch_label):\n if x is None or y is None: continue\n z = model(x)\n outputs.append(z)\n accuracy_metric.update(batch_label, outputs)\n\n for train_batch in train_loader:\n train_batch_data, train_batch_label = get_data_and_label_from_batch(train_batch)\n with _autograd.record():\n # Inside training scope\n for x, y in zip(train_batch_data, train_batch_label):\n z = model(x)\n # Computes softmax cross entropy loss.\n loss = softmax_cross_entropy(z, y)\n # Backpropagate the error for one iteration.\n loss.backward()\n\n # Make one step of parameter update. Trainer needs to know the\n # batch size of data to normalize the gradient by 1/batch_size.\n trainer.step(train_batch.data[0].shape[0])\n # calculate training metrics\n train_loss = loss.mean().asscalar()\n train_time = _time.time() - start_time\n\n if train_batch.iteration > iteration:\n # Compute training accuracy\n compute_accuracy(train_accuracy, train_loader_to_compute_accuracy)\n # Compute validation accuracy\n if validation_set is not None:\n compute_accuracy(validation_accuracy, validation_loader)\n iteration = train_batch.iteration\n if verbose:\n kwargs = { \"iteration\": iteration,\n \"train_loss\": float(train_loss),\n \"train_accuracy\": train_accuracy.get()[1],\n \"time\": train_time}\n if validation_set is not None:\n kwargs[\"validation_accuracy\"] = validation_accuracy.get()[1]\n table_printer.print_row(**kwargs)\n\n state = {\n '_model': model,\n '_class_to_index': class_to_index,\n 'num_classes': len(classes),\n 'classes': classes,\n 'input_image_shape': (1, BITMAP_WIDTH, BITMAP_HEIGHT),\n 'batch_size': batch_size,\n 'training_loss': train_loss,\n 'training_accuracy': train_accuracy.get()[1],\n 'training_time': train_time,\n 'validation_accuracy': validation_accuracy.get()[1], \n # nan if validation_set=None\n 'max_iterations': max_iterations,\n 'target': target,\n 'feature': feature,\n 'num_examples': len(input_dataset)\n }\n return DrawingClassifier(state)\n\nclass DrawingClassifier(_CustomModel):\n \"\"\"\n A trained model that is ready to use for classification, and to be \n exported to Core ML.\n\n This model should not be constructed directly.\n \"\"\"\n\n _PYTHON_DRAWING_CLASSIFIER_VERSION = 1\n def __init__(self, state):\n self.__proxy__ = _PythonProxy(state)\n\n @classmethod\n def _native_name(cls):\n return \"drawing_classifier\"\n\n def _get_native_state(self):\n from .._mxnet import _mxnet_utils\n state = self.__proxy__.get_state()\n mxnet_params = state['_model'].collect_params()\n state['_model'] = _mxnet_utils.get_gluon_net_params_state(mxnet_params)\n return state\n\n def _get_version(self):\n return self._PYTHON_DRAWING_CLASSIFIER_VERSION\n\n @classmethod\n def _load_version(cls, state, version):\n _tkutl._model_version_check(version, \n cls._PYTHON_DRAWING_CLASSIFIER_VERSION)\n from ._model_architecture import Model as _Model\n from .._mxnet import _mxnet_utils\n net = _Model(num_classes = len(state['classes']), prefix = 'drawing_')\n ctx = _mxnet_utils.get_mxnet_context(max_devices=state['batch_size'])\n net_params = net.collect_params()\n _mxnet_utils.load_net_params_from_state(\n net_params, state['_model'], ctx=ctx \n )\n state['_model'] = net\n # For a model trained on integer classes, when saved and loaded back,\n # the classes are loaded as floats. The following if statement casts\n # the loaded \"float\" classes back to int.\n if len(state['classes']) > 0 and isinstance(state['classes'][0], float):\n state['classes'] = list(map(int, state['classes']))\n return DrawingClassifier(state)\n\n def __str__(self):\n \"\"\"\n Return a string description of the model to the ``print`` method.\n\n Returns\n -------\n out : string\n A description of the DrawingClassifier.\n \"\"\"\n return self.__repr__()\n\n def __repr__(self):\n \"\"\"\n Returns a string description of the model when the model name is \n entered in the terminal.\n \"\"\"\n\n width = 40\n sections, section_titles = self._get_summary_struct()\n out = _tkutl._toolkit_repr_print(self, sections, section_titles,\n width=width)\n return out\n\n def _get_summary_struct(self):\n \"\"\"\n Returns a structured description of the model, including (where\n relevant) the schema of the training data, description of the training\n data, training statistics, and model hyperparameters.\n\n Returns\n -------\n sections : list (of list of tuples)\n A list of summary sections.\n Each section is a list.\n Each item in a section list is a tuple of the form:\n ('<label>','<field>')\n section_titles: list\n A list of section titles.\n The order matches that of the 'sections' object.\n \"\"\"\n model_fields = [\n ('Number of classes', 'num_classes'),\n ('Feature column', 'feature'),\n ('Target column', 'target')\n ]\n training_fields = [\n ('Training Iterations', 'max_iterations'),\n ('Training Accuracy', 'training_accuracy'),\n ('Validation Accuracy', 'validation_accuracy'),\n ('Training Time', 'training_time'),\n ('Number of Examples', 'num_examples'),\n ('Batch Size', 'batch_size'),\n ('Final Loss (specific to model)', 'training_loss')\n ]\n\n section_titles = ['Schema', 'Training summary']\n return([model_fields, training_fields], section_titles)\n\n def export_coreml(self, filename, verbose=False):\n \"\"\"\n Save the model in Core ML format. The Core ML model takes a grayscale \n drawing of fixed size as input and produces two outputs: \n `classLabel` and `labelProbabilities`.\n\n The first one, `classLabel` is an integer or string (depending on the\n classes the model was trained on) to store the label of the top \n prediction by the model.\n\n The second one, `labelProbabilities`, is a dictionary with all the \n class labels in the dataset as the keys, and their respective \n probabilities as the values.\n\n See Also\n --------\n save\n\n Parameters\n ----------\n filename : string\n The path of the file where we want to save the Core ML model.\n\n verbose : bool optional\n If True, prints export progress.\n\n\n Examples\n --------\n >>> model.export_coreml('drawing_classifier.mlmodel')\n \"\"\"\n import mxnet as _mx\n from .._mxnet._mxnet_to_coreml import _mxnet_converter\n import coremltools as _coremltools\n\n batch_size = 1\n image_shape = (batch_size,) + (1, BITMAP_WIDTH, BITMAP_HEIGHT)\n s_image = _mx.sym.Variable(self.feature,\n shape=image_shape, dtype=_np.float32)\n\n from copy import copy as _copy\n net = _copy(self._model)\n s_ymap = net(s_image)\n \n mod = _mx.mod.Module(symbol=s_ymap, label_names=None, data_names=[self.feature])\n mod.bind(for_training=False, data_shapes=[(self.feature, image_shape)])\n mod.init_params()\n \n arg_params, aux_params = mod.get_params()\n net_params = net.collect_params()\n\n new_arg_params = {}\n for k, param in arg_params.items():\n new_arg_params[k] = net_params[k].data(net_params[k].list_ctx()[0])\n new_aux_params = {}\n for k, param in aux_params.items():\n new_aux_params[k] = net_params[k].data(net_params[k].list_ctx()[0])\n mod.set_params(new_arg_params, new_aux_params)\n\n coreml_model = _mxnet_converter.convert(mod, mode='classifier',\n class_labels=self.classes,\n input_shape=[(self.feature, image_shape)],\n builder=None, verbose=verbose,\n preprocessor_args={\n 'image_input_names': [self.feature],\n 'image_scale': 1.0/255\n })\n\n DESIRED_OUTPUT_NAME = self.target + \"Probabilities\"\n spec = coreml_model._spec\n class_label_output_index = 0 if spec.description.output[0].name == \"classLabel\" else 1\n probabilities_output_index = 1-class_label_output_index\n spec.neuralNetworkClassifier.labelProbabilityLayerName = DESIRED_OUTPUT_NAME\n spec.neuralNetworkClassifier.layers[-1].name = DESIRED_OUTPUT_NAME\n spec.neuralNetworkClassifier.layers[-1].output[0] = DESIRED_OUTPUT_NAME\n spec.description.predictedProbabilitiesName = DESIRED_OUTPUT_NAME\n spec.description.output[probabilities_output_index].name = DESIRED_OUTPUT_NAME\n from turicreate.toolkits import _coreml_utils\n model_type = \"drawing classifier\"\n spec.description.metadata.shortDescription = _coreml_utils._mlmodel_short_description(model_type)\n spec.description.input[0].shortDescription = self.feature\n spec.description.output[probabilities_output_index].shortDescription = 'Prediction probabilities'\n spec.description.output[class_label_output_index].shortDescription = 'Class Label of Top Prediction'\n from coremltools.models.utils import save_spec as _save_spec\n _save_spec(spec, filename)\n\n\n def _predict_with_probabilities(self, input_dataset, batch_size=None, \n verbose=True):\n \"\"\"\n Predict with probabilities. The core prediction part that both \n `evaluate` and `predict` share.\n\n Returns an SFrame with two columns, self.target, and \"probabilities\".\n\n The column with column name, self.target, contains the predictions made\n by the model for the provided dataset.\n\n The \"probabilities\" column contains the probabilities for each class \n that the model predicted for the data provided to the function.\n \"\"\"\n\n from .._mxnet import _mxnet_utils\n import mxnet as _mx\n from ._sframe_loader import SFrameClassifierIter as _SFrameClassifierIter\n\n is_stroke_input = (input_dataset[self.feature].dtype != _tc.Image)\n dataset = _extensions._drawing_classifier_prepare_data(\n input_dataset, self.feature) if is_stroke_input else input_dataset\n \n batch_size = self.batch_size if batch_size is None else batch_size\n loader = _SFrameClassifierIter(dataset, batch_size,\n class_to_index=self._class_to_index,\n feature_column=self.feature,\n target_column=self.target,\n load_labels=False,\n shuffle=False,\n iterations=1)\n\n dataset_size = len(dataset)\n ctx = _mxnet_utils.get_mxnet_context()\n\n index = 0\n last_time = 0\n done = False\n\n from turicreate import SArrayBuilder\n from array import array\n\n classes = self.classes\n all_predicted_builder = SArrayBuilder(dtype=type(classes[0]))\n all_probabilities_builder = SArrayBuilder(dtype=array)\n\n for batch in loader:\n if batch.pad is not None:\n size = batch_size - batch.pad\n batch_data = _mx.nd.slice_axis(batch.data[0], \n axis=0, begin=0, end=size)\n else:\n batch_data = batch.data[0]\n size = batch_size\n\n num_devices = min(batch_data.shape[0], len(ctx))\n split_data = _mx.gluon.utils.split_and_load(batch_data, ctx_list=ctx[:num_devices], even_split=False)\n\n for data in split_data:\n z = self._model(data).asnumpy()\n predicted = list(map(lambda x: classes[x], z.argmax(axis=1)))\n split_length = z.shape[0]\n all_predicted_builder.append_multiple(predicted)\n all_probabilities_builder.append_multiple(z.tolist())\n index += split_length\n if index == dataset_size - 1:\n done = True\n\n cur_time = _time.time()\n # Do not print progress if only a few samples are predicted\n if verbose and (dataset_size >= 5\n and cur_time > last_time + 10 or done):\n print('Predicting {cur_n:{width}d}/{max_n:{width}d}'.format(\n cur_n = index + 1,\n max_n = dataset_size,\n width = len(str(dataset_size))))\n last_time = cur_time\n\n return (_tc.SFrame({self.target: all_predicted_builder.close(),\n 'probability': all_probabilities_builder.close()}))\n\n def evaluate(self, dataset, metric='auto', batch_size=None, verbose=True):\n \"\"\"\n Evaluate the model by making predictions of target values and comparing\n these to actual values.\n \n Parameters\n ----------\n dataset : SFrame\n Dataset of new observations. Must include columns with the same\n names as the feature and target columns used for model training.\n Additional columns are ignored.\n \n metric : str, optional\n Name of the evaluation metric. Possible values are:\n \n - 'auto' : Returns all available metrics.\n - 'accuracy' : Classification accuracy (micro average).\n - 'auc' : Area under the ROC curve (macro average)\n - 'precision' : Precision score (macro average)\n - 'recall' : Recall score (macro average)\n - 'f1_score' : F1 score (macro average)\n - 'confusion_matrix' : An SFrame with counts of possible \n prediction/true label combinations.\n - 'roc_curve' : An SFrame containing information needed for an\n ROC curve\n \n verbose : bool, optional\n If True, prints prediction progress.\n\n Returns\n -------\n out : dict\n Dictionary of evaluation results where the key is the name of the\n evaluation metric (e.g. `accuracy`) and the value is the evaluation\n score.\n \n See Also\n ----------\n create, predict\n \n Examples\n ----------\n .. sourcecode:: python\n \n >>> results = model.evaluate(data)\n >>> print(results['accuracy'])\n \"\"\"\n\n if self.target not in dataset.column_names():\n raise _ToolkitError(\"Must provide ground truth column, '\" \n + self.target + \"' in the evaluation dataset.\")\n\n predicted = self._predict_with_probabilities(dataset, batch_size, verbose)\n\n avail_metrics = ['accuracy', 'auc', 'precision', 'recall',\n 'f1_score', 'confusion_matrix', 'roc_curve']\n\n _tkutl._check_categorical_option_type(\n 'metric', metric, avail_metrics + ['auto'])\n\n metrics = avail_metrics if metric == 'auto' else [metric]\n \n ret = {}\n if 'accuracy' in metrics:\n ret['accuracy'] = _evaluation.accuracy(\n dataset[self.target], predicted[self.target])\n if 'auc' in metrics:\n ret['auc'] = _evaluation.auc(\n dataset[self.target], predicted['probability'], \n index_map=self._class_to_index)\n if 'precision' in metrics:\n ret['precision'] = _evaluation.precision(\n dataset[self.target], predicted[self.target])\n if 'recall' in metrics:\n ret['recall'] = _evaluation.recall(\n dataset[self.target], predicted[self.target])\n if 'f1_score' in metrics:\n ret['f1_score'] = _evaluation.f1_score(\n dataset[self.target], predicted[self.target])\n if 'confusion_matrix' in metrics:\n ret['confusion_matrix'] = _evaluation.confusion_matrix(\n dataset[self.target], predicted[self.target])\n if 'roc_curve' in metrics:\n ret['roc_curve'] = _evaluation.roc_curve(\n dataset[self.target], predicted['probability'], \n index_map=self._class_to_index)\n \n return ret\n\n def predict_topk(self, dataset, output_type=\"probability\", k=3,\n batch_size=None):\n \"\"\"\n Return top-k predictions for the ``dataset``, using the trained model.\n Predictions are returned as an SFrame with three columns: `id`,\n `class`, and `probability` or `rank`, depending on the ``output_type``\n parameter.\n\n Parameters\n ----------\n dataset : SFrame | SArray | turicreate.Image | list\n Drawings to be classified.\n If dataset is an SFrame, it must include columns with the same\n names as the features used for model training, but does not require\n a target column. Additional columns are ignored.\n\n output_type : {'probability', 'rank'}, optional\n Choose the return type of the prediction:\n\n - `probability`: Probability associated with each label in the \n prediction.\n - `rank` : Rank associated with each label in the prediction.\n \n k : int, optional\n Number of classes to return for each input example.\n\n batch_size : int, optional\n If you are getting memory errors, try decreasing this value. If you\n have a powerful computer, increasing this value may improve\n performance.\n\n Returns\n -------\n out : SFrame\n An SFrame with model predictions.\n\n See Also\n --------\n predict, evaluate\n\n Examples\n --------\n >>> pred = m.predict_topk(validation_data, k=3)\n >>> pred\n +----+-------+-------------------+\n | id | class | probability |\n +----+-------+-------------------+\n | 0 | 4 | 0.995623886585 |\n | 0 | 9 | 0.0038311756216 |\n | 0 | 7 | 0.000301006948575 |\n | 1 | 1 | 0.928708016872 |\n | 1 | 3 | 0.0440889261663 |\n | 1 | 2 | 0.0176190119237 |\n | 2 | 3 | 0.996967732906 |\n | 2 | 2 | 0.00151345680933 |\n | 2 | 7 | 0.000637513934635 |\n | 3 | 1 | 0.998070061207 |\n | .. | ... | ... |\n +----+-------+-------------------+\n [35688 rows x 3 columns]\n \"\"\"\n _tkutl._check_categorical_option_type(\"output_type\", output_type, \n [\"probability\", \"rank\"])\n \n if not isinstance(k, int): \n raise TypeError(\"'k' must be an integer >= 1\")\n if k <= 0: \n raise ValueError(\"'k' must be >= 1\")\n if batch_size is not None and not isinstance(batch_size, int):\n raise TypeError(\"'batch_size' must be an integer >= 1\")\n if batch_size is not None and batch_size < 1:\n raise ValueError(\"'batch_size' must be >= 1\")\n\n prob_vector = self.predict(\n dataset, output_type='probability_vector', batch_size=batch_size)\n\n classes = self.classes\n if output_type == 'probability':\n results = prob_vector.apply(lambda p: [\n {'class': classes[i], 'probability': p[i]}\n for i in reversed(_np.argsort(p)[-k:])]\n )\n else:\n assert(output_type == 'rank')\n results = prob_vector.apply(lambda p: [\n {'class': classes[index], 'rank': rank}\n for rank, index in enumerate(reversed(_np.argsort(p)[-k:]))]\n )\n\n results = _tc.SFrame({'X': results})\n results = results.add_row_number()\n results = results.stack('X', new_column_name='X')\n results = results.unpack('X', column_name_prefix='')\n return results\n \n\n def predict(self, data, output_type='class', batch_size=None, verbose=True):\n \"\"\"\n Predict on an SFrame or SArray of drawings, or on a single drawing.\n\n Parameters\n ----------\n data : SFrame | SArray | tc.Image | list\n The drawing(s) on which to perform drawing classification.\n If dataset is an SFrame, it must have a column with the same name\n as the feature column during training. Additional columns are\n ignored.\n If the data is a single drawing, it can be either of type tc.Image,\n in which case it is a bitmap-based drawing input,\n or of type list, in which case it is a stroke-based drawing input.\n\n output_type : {'probability', 'class', 'probability_vector'}, optional\n Form of the predictions which are one of:\n \n - 'class': Class prediction. For multi-class classification, this\n returns the class with maximum probability.\n - 'probability': Prediction probability associated with the True\n class (not applicable for multi-class classification)\n - 'probability_vector': Prediction probability associated with each\n class as a vector. Label ordering is dictated by the ``classes``\n member variable.\n\n batch_size : int, optional\n If you are getting memory errors, try decreasing this value. If you\n have a powerful computer, increasing this value may improve\n performance.\n\n verbose : bool, optional\n If True, prints prediction progress.\n\n Returns\n -------\n out : SArray\n An SArray with model predictions. Each element corresponds to\n a drawing and contains a single value corresponding to the\n predicted label. Each prediction will have type integer or string\n depending on the type of the classes the model was trained on.\n If `data` is a single drawing, the return value will be a single\n prediction.\n\n See Also\n --------\n evaluate\n\n Examples\n --------\n .. sourcecode:: python\n\n # Make predictions\n >>> pred = model.predict(data)\n\n # Print predictions, for a better overview\n >>> print(pred)\n dtype: int\n Rows: 10\n [3, 4, 3, 3, 4, 5, 8, 8, 8, 4]\n \"\"\"\n _tkutl._check_categorical_option_type(\"output_type\", output_type, \n [\"probability\", \"class\", \"probability_vector\"])\n if isinstance(data, _tc.SArray):\n predicted = self._predict_with_probabilities(\n _tc.SFrame({\n self.feature: data\n }),\n batch_size,\n verbose\n )\n elif isinstance(data, _tc.SFrame):\n predicted = self._predict_with_probabilities(data, batch_size, verbose)\n else:\n # single input\n predicted = self._predict_with_probabilities(\n _tc.SFrame({\n self.feature: [data]\n }),\n batch_size,\n verbose\n )\n if output_type == \"class\":\n return predicted[self.target]\n elif output_type == \"probability\":\n _class_to_index = self._class_to_index\n target = self.target\n return predicted.apply(\n lambda row: row[\"probability\"][_class_to_index[row[target]]])\n else:\n assert (output_type == \"probability_vector\")\n return predicted[\"probability\"]\n" ]
[ [ "numpy.argsort" ] ]
jaydenmedia/OpenCV3-Python
[ "e0bfed6582447c567f100c507f5a8c59b621dfe1" ]
[ "opencv3_align_images.py" ]
[ "# -*- coding: utf-8 -*-\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will\n# list the files in the input directory from subprocess import check_output\n#print(check_output([\"ls\", \"../input\"]).decode(\"utf8\"))\n\n\n#ORB is basically a fusion of FAST keypoint detector and BRIEF descriptor with\n# many modifications to enhance the performance. First it use FAST to find\n# keypoints, then apply Harris corner measure to find top N points among them.\n#For any feature set of n binary tests at location (x_i, y_i),\n# define a 2 \\times n matrix, S which contains the coordinates of these pixels.\n# Then using the orientation of patch, \\theta, its rotation matrix is found\n# and rotates the S to get steered(rotated) version S_\\theta.\n#ORB runs a greedy search among all possible binary tests to find the ones that\n# have both high variance and means close to 0.5, as well as being uncorrelated.\n\n# Any results write to the current directory are saved as output.\n\nimport numpy as np # linear algebra\nimport cv2\nimport os\n\nimport csv\nimport sys\nfrom time import sleep\n\n\ndef im_align_orb(imp1, imp2, nf=10000):\n \"\"\"\n :param imp1: image1 file path\n :param imp2: image2 file path\n :param nf: max number of ORB key points\n :return: transformed image2, so that it can be aligned with image1\n \"\"\"\n img1 = cv2.imread(imp1, 0)\n img2 = cv2.imread(imp2, 0)\n h2, w2 = img2.shape[:2]\n\n orb = cv2.ORB_create(nfeatures=nf, WTA_K=2)\n kp1, des1 = orb.detectAndCompute(img1, None)\n kp2, des2 = orb.detectAndCompute(img2, None)\n\n bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)\n\n matches = bf.knnMatch(des1, des2, 2)\n\n matches_ = []\n for m in matches:\n if len(m) == 2 and m[0].distance < m[1].distance * 0.75:\n matches_.append((m[0].trainIdx, m[0].queryIdx))\n\n kp1_ = np.float32([kp1[m[1]].pt for m in matches_]).reshape(-1, 1, 2)\n kp2_ = np.float32([kp2[m[0]].pt for m in matches_]).reshape(-1, 1, 2)\n\n H, mask = cv2.findHomography(kp2_, kp1_, cv2.RANSAC, 1.0)\n\n h1, w1 = img1.shape[:2]\n\n img2 = cv2.warpPerspective(cv2.imread(imp2), H, (w1, h1))\n return img2\n\n\ndef align_set_by_id(setid, setvalue, isTrain=True, nFeatures=20000):\n \"\"\"\n :param setid: image set id values\n :param isTrain: train (true) or test (false) path\n :return: aligned images into output path\n \"\"\"\n train_path = '../output/train_sm/'\n test_path = '../output/test_sm/'\n\n counter = 0\n\n if isTrain:\n image_path = train_path\n fn1 = train_path + \"set\" + key + \"_\" + elem[0] + \".jpg\"\n outputpath = \"./train_output/\"\n else:\n image_path = test_path\n fn1 = train_path + \"set\" + key + \"_\" + elem[0] + \".jpg\"\n print(fn1)\n outputpath = \"./test_output/\"\n\n result = list()\n\n result.append(cv2.cvtColor(cv2.imread(fn1), cv2.COLOR_BGR2RGB))\n for id in elem: # outputmatrix elem\n fn2 = image_path + \"set\" + str(setid) + \"_\" + str(id) + \".jpg\"\n print(\"fn1=%s, fn2=%s\" % (os.path.basename(fn1), os.path.basename(fn2)))\n im = im_align_orb(fn1, fn2, nFeatures)\n cv2.imwrite(outputpath + os.path.basename(fn2), im)\n result.append(cv2.cvtColor(im, cv2.COLOR_BGR2RGB))\n counter += 1\n for i in range(21):\n sys.stdout.write('\\r')\n sys.stdout.write(\n '[%-20s] %d%% %d/%d ' % ('=' * i, 5 * i, counter, om_len)\n )\n sys.stdout.flush()\n sleep(0.25)\n\n return result\n\n\ndef align_all_set(path, isTrain=True):\n allfiles = os.listdir(path)\n allfiles = [\n os.path.basename(file) for file in allfiles if file.startswith('set')]\n allsets = np.unique([f.split(\"_\")[0].replace(\"set\", \"\") for f in allfiles])\n\n for s in allsets:\n align_set_by_id(s, isTrain=True, nFeatures=20000)\n\n#align_all_set(path='../output/train_sm')\n\n\ndef csv_lists(path):\n row = []\n matrix = {}\n\n with open(path) as f:\n csv_reader = csv.reader(f)\n csv_list = list(csv_reader)\n\n for idx, val in enumerate(csv_list):\n if not row:\n row.extend([val[0]])\n if row[0] == val[0]:\n row.extend([val[1]])\n elif row != val[0]:\n row = [val[0]]\n row.extend([val[1]])\n if len(row) is 6:\n matrix.update({row[0]: row[1:]})\n return matrix\n\noutputmatrix = csv_lists('../output/features_means_train.csv')\nom_len = len(outputmatrix)\n\nfor key, elem in list(outputmatrix.items()):\n align_set_by_id(key, elem, isTrain=True, nFeatures=15000)" ]
[ [ "numpy.float32" ] ]
josepablocam/janus-public
[ "4713092b27d02386bdb408213d8edc0dc5859eec" ]
[ "kaggle/ghouls-goblins-and-ghosts-boo/script_3.py" ]
[ "#Libraries\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set_style('whitegrid')\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.calibration import CalibratedClassifierCV\nimport xgboost as xgb\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn import svm\nfrom sklearn.ensemble import VotingClassifier\nfrom sklearn.naive_bayes import GaussianNB\ntrain = pd.read_csv('../input/train.csv')\ntest = pd.read_csv('../input/test.csv')\ntrain.info()\ntrain.describe(include='all')\ntrain.head()\nplt.subplot(1,4,1)\ntrain.groupby('type').mean()['rotting_flesh'].plot(kind='bar',figsize=(7,4), color='r')\nplt.subplot(1,4,2)\ntrain.groupby('type').mean()['bone_length'].plot(kind='bar',figsize=(7,4), color='g')\nplt.subplot(1,4,3)\ntrain.groupby('type').mean()['hair_length'].plot(kind='bar',figsize=(7,4), color='y')\nplt.subplot(1,4,4)\ntrain.groupby('type').mean()['has_soul'].plot(kind='bar',figsize=(7,4), color='teal')\nsns.factorplot(\"type\", col=\"color\", col_wrap=4, data=train, kind=\"count\", size=2.4, aspect=.8)\n#The graphs look much better with higher figsize.\nfig, ax = plt.subplots(2, 2, figsize = (16, 12))\nsns.pointplot(x=\"color\", y=\"rotting_flesh\", hue=\"type\", data=train, ax = ax[0, 0])\nsns.pointplot(x=\"color\", y=\"bone_length\", hue=\"type\", data=train, ax = ax[0, 1])\nsns.pointplot(x=\"color\", y=\"hair_length\", hue=\"type\", data=train, ax = ax[1, 0])\nsns.pointplot(x=\"color\", y=\"has_soul\", hue=\"type\", data=train, ax = ax[1, 1])\nsns.pairplot(train, hue='type')\ntrain['hair_soul'] = train['hair_length'] * train['has_soul']\ntrain['hair_bone'] = train['hair_length'] * train['bone_length']\ntest['hair_soul'] = test['hair_length'] * test['has_soul']\ntest['hair_bone'] = test['hair_length'] * test['bone_length']\ntrain['hair_soul_bone'] = train['hair_length'] * train['has_soul'] * train['bone_length']\ntest['hair_soul_bone'] = test['hair_length'] * test['has_soul'] * test['bone_length']\n#test_id will be used later, so save it\ntest_id = test['id']\ntrain.drop(['id'], axis=1, inplace=True)\ntest.drop(['id'], axis=1, inplace=True)\n#Deal with 'color' column\ncol = 'color'\ndummies = pd.get_dummies(train[col], drop_first=False)\ndummies = dummies.add_prefix(\"{}#\".format(col))\ntrain.drop(col, axis=1, inplace=True)\ntrain = train.join(dummies)\ndummies = pd.get_dummies(test[col], drop_first=False)\ndummies = dummies.add_prefix(\"{}#\".format(col))\ntest.drop(col, axis=1, inplace=True)\ntest = test.join(dummies)\nX_train = train.drop('type', axis=1)\nle = LabelEncoder()\nY_train = le.fit_transform(train.type.values)\nX_test = test\nclf = RandomForestClassifier(n_estimators=200)\nclf = clf.fit(X_train, Y_train)\nindices = np.argsort(clf.feature_importances_)[::-1]\n\n# Print the feature ranking\nprint('Feature ranking:')\n\nfor f in range(X_train.shape[1]):\n print('%d. feature %d %s (%f)' % (f + 1, indices[f], X_train.columns[indices[f]],\n clf.feature_importances_[indices[f]]))\nbest_features=X_train.columns[indices[0:7]]\nX = X_train[best_features]\nXt = X_test[best_features]\n#Splitting data for validation\nXtrain, Xtest, ytrain, ytest = train_test_split(X, Y_train, test_size=0.20, random_state=36)\nforest = RandomForestClassifier(max_depth = 100, \n min_samples_split =7,\n min_weight_fraction_leaf = 0.0,\n max_leaf_nodes = 60)\n\nparameter_grid = {'n_estimators' : [10, 20, 100, 150],\n 'criterion' : ['gini', 'entropy'],\n 'max_features' : ['auto', 'sqrt', 'log2', None]\n }\n\ngrid_search = GridSearchCV(forest, param_grid=parameter_grid, scoring='accuracy', cv=StratifiedKFold(5))\ngrid_search.fit(Xtrain, ytrain)\nprint('Best score: {}'.format(grid_search.best_score_))\nprint('Best parameters: {}'.format(grid_search.best_params_))\nforest = RandomForestClassifier(n_estimators = 150,\n criterion = 'entropy',\n max_features = 'auto')\nparameter_grid = {\n 'max_depth' : [None, 5, 20, 100],\n 'min_samples_split' : [2, 5, 7],\n 'min_weight_fraction_leaf' : [0.0, 0.1],\n 'max_leaf_nodes' : [40, 60, 80],\n }\n\ngrid_search = GridSearchCV(forest, param_grid=parameter_grid, scoring='accuracy', cv=StratifiedKFold(5))\ngrid_search.fit(Xtrain, ytrain)\nprint('Best score: {}'.format(grid_search.best_score_))\nprint('Best parameters: {}'.format(grid_search.best_params_))\n#Optimal parameters\nclf = RandomForestClassifier(n_estimators=150, n_jobs=-1, criterion = 'entropy', max_features = 'auto',\n min_samples_split=7, min_weight_fraction_leaf=0.0,\n max_leaf_nodes=40, max_depth=20)\n#Calibration improves probability predictions\ncalibrated_clf = CalibratedClassifierCV(clf, method='sigmoid', cv=5)\ncalibrated_clf.fit(Xtrain, ytrain)\ny_val = calibrated_clf.predict_proba(Xtest)\n\nprint(\"Validation accuracy: \", sum(pd.DataFrame(y_val, columns=le.classes_).idxmax(axis=1).values\n == le.inverse_transform(ytest))/len(ytest))\nsvc = svm.SVC(kernel='linear')\nsvc.fit(Xtrain, ytrain)\ny_val_s = svc.predict(Xtest)\nprint(\"Validation accuracy: \", sum(le.inverse_transform(y_val_s)\n == le.inverse_transform(ytest))/len(ytest))\n#The last model is logistic regression\nlogreg = LogisticRegression()\n\nparameter_grid = {'solver' : ['newton-cg', 'lbfgs'],\n 'multi_class' : ['ovr', 'multinomial'],\n 'C' : [0.005, 0.01, 1, 10, 100, 1000],\n 'tol': [0.0001, 0.001, 0.005]\n }\n\ngrid_search = GridSearchCV(logreg, param_grid=parameter_grid, cv=StratifiedKFold(5))\ngrid_search.fit(Xtrain, ytrain)\nprint('Best score: {}'.format(grid_search.best_score_))\nprint('Best parameters: {}'.format(grid_search.best_params_))\nlog_reg = LogisticRegression(C = 1, tol = 0.0001, solver='newton-cg', multi_class='multinomial')\nlog_reg.fit(Xtrain, ytrain)\ny_val_l = log_reg.predict_proba(Xtest)\nprint(\"Validation accuracy: \", sum(pd.DataFrame(y_val_l, columns=le.classes_).idxmax(axis=1).values\n == le.inverse_transform(ytest))/len(ytest))\nclf = RandomForestClassifier(n_estimators=20, n_jobs=-1, criterion = 'gini', max_features = 'sqrt',\n min_samples_split=2, min_weight_fraction_leaf=0.0,\n max_leaf_nodes=40, max_depth=100)\n\ncalibrated_clf = CalibratedClassifierCV(clf, method='sigmoid', cv=5)\n\nlog_reg = LogisticRegression(C = 1, tol = 0.0001, solver='newton-cg', multi_class='multinomial')\n\ngnb = GaussianNB()\ncalibrated_clf1 = CalibratedClassifierCV(RandomForestClassifier())\n\nlog_reg1 = LogisticRegression()\n\ngnb1 = GaussianNB()\nVclf1 = VotingClassifier(estimators=[('LR', log_reg1), ('CRF', calibrated_clf1),\n ('GNB', gnb1)], voting='hard')\nVclf = VotingClassifier(estimators=[('LR', log_reg), ('CRF', calibrated_clf),\n ('GNB', gnb)], voting='soft', weights=[1,1,1])\nhard_predict = le.inverse_transform(Vclf1.fit(X, Y_train).predict(Xt))\nsoft_predict = le.inverse_transform(Vclf.fit(X, Y_train).predict(Xt))\n#Let's see the differences:\nfor i in range(len(hard_predict)):\n if hard_predict[i] != soft_predict[i]:\n print(i, hard_predict[i], soft_predict[i])\nsubmission = pd.DataFrame({'id':test_id, 'type':hard_predict})\nsubmission.to_csv('GGG_submission.csv', index=False)\n" ]
[ [ "sklearn.calibration.CalibratedClassifierCV", "sklearn.svm.SVC", "sklearn.model_selection.StratifiedKFold", "pandas.read_csv", "pandas.DataFrame", "numpy.argsort", "matplotlib.pyplot.subplots", "matplotlib.pyplot.subplot", "sklearn.ensemble.VotingClassifier", "sklearn.preprocessing.LabelEncoder", "sklearn.ensemble.RandomForestClassifier", "sklearn.linear_model.LogisticRegression", "sklearn.model_selection.train_test_split", "pandas.get_dummies", "sklearn.naive_bayes.GaussianNB" ] ]
macky168/gaopt
[ "bf2785325d3cb4489513f47ed06f745a059262f8" ]
[ "example.py" ]
[ "import gaopt\nfrom gaopt import search_space\n\nimport pandas as pd\nimport numpy as np\n\nimport lightgbm as lgb\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import r2_score\nfrom sklearn.datasets import load_diabetes\n\nparams_range={\n 'lambda_l1': search_space.discrete_int(-8, 2),\n 'lambda_l2': search_space.discrete_int(-8, 2),\n 'num_leaves': search_space.discrete(2, 100, 4),\n 'feature_fraction': search_space.discrete(0.1, 1.0, 0.02),\n 'bagging_fraction': search_space.discrete(0.1, 1.0, 0.02),\n 'bagging_freq': search_space.discrete_int(0,1),\n 'min_child_samples': search_space.discrete_int(1,30),\n}\ncal_time_lst = []\ndate_start = None\n\n\ndef objective1(params): \n diabetes = load_diabetes()\n X = diabetes.data\n y = diabetes.target\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 0)\n X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size = 0.3, random_state = 0)\n\n lgb_train = lgb.Dataset(data=X_train, label=y_train)\n lgb_valid = lgb.Dataset(data=X_valid, label=y_valid)\n \n params ={\n 'lambda_l1': 10**params.lambda_l1,\n 'lambda_l2': 10**params.lambda_l2,\n 'num_leaves': params.num_leaves,\n 'feature_fraction': params.feature_fraction,\n 'bagging_fraction': params.bagging_fraction,\n 'bagging_freq': params.bagging_freq,\n 'min_child_samples': params.min_child_samples,\n 'objective': 'regression',\n 'metric': 'rmse',\n \"verbosity\": -1,\n \"seed\": 0\n }\n\n model = lgb.train(params,\n train_set=lgb_train,\n valid_sets=lgb_valid,\n verbose_eval=False\n )\n \n y_pred_lgb = model.predict(X_test)\n fitness = r2_score(y_test, y_pred_lgb)\n \n return fitness\n\n\ndef main():\n p_m = 0.10\n p_c = 0.7\n\n population = 30\n generation = 50\n\n instance = gaopt.GAOpt(params_range, objective=objective1, generation=generation, population=population,\n p_m=p_m, p_c=p_c, elitism=True,\n history=2, verbose=2, maximizing=True)\n best_params, best_fitness, best_fitness_lst, worst_fitness_lst, mean_fitness_lst, median_fitness_lst, sd_fitness_lst, search_history_lst = instance.fit()\n print(\"best params: \", best_params)\n print(\"best fitness: \", best_fitness)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "sklearn.metrics.r2_score", "sklearn.model_selection.train_test_split", "sklearn.datasets.load_diabetes" ] ]
t-imamichi/qiskit-core
[ "8d2eeeac44f97af1e10514cdae4157e5923ff2e5" ]
[ "qiskit/tools/jupyter/backend_overview.py" ]
[ "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2018.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"A module for monitoring backends.\"\"\"\n\nimport time\nimport threading\nimport types\nfrom IPython.display import display\nfrom IPython.core.magic import line_magic, Magics, magics_class\nfrom IPython.core import magic_arguments\nimport matplotlib.pyplot as plt\nimport ipywidgets as widgets\nfrom qiskit.tools.monitor.overview import get_unique_backends\nfrom qiskit.visualization.gate_map import plot_gate_map\n\n\n@magics_class\nclass BackendOverview(Magics):\n \"\"\"A class of status magic functions.\"\"\"\n\n @line_magic\n @magic_arguments.magic_arguments()\n @magic_arguments.argument(\n \"-i\", \"--interval\", type=float, default=60, help=\"Interval for status check.\"\n )\n def qiskit_backend_overview(self, line=\"\"):\n \"\"\"A Jupyter magic function to monitor backends.\"\"\"\n args = magic_arguments.parse_argstring(self.qiskit_backend_overview, line)\n\n unique_hardware_backends = get_unique_backends()\n _value = \"<h2 style ='color:#ffffff; background-color:#000000;\"\n _value += \"padding-top: 1%; padding-bottom: 1%;padding-left: 1%;\"\n _value += \"margin-top: 0px'>Backend Overview</h2>\"\n backend_title = widgets.HTML(value=_value, layout=widgets.Layout(margin=\"0px 0px 0px 0px\"))\n\n build_back_widgets = [backend_widget(b) for b in unique_hardware_backends]\n\n _backends = []\n # Sort backends by operational or not\n oper_ord_backends = []\n for n, back in enumerate(unique_hardware_backends):\n if back.status().operational:\n oper_ord_backends = [build_back_widgets[n]] + oper_ord_backends\n _backends = [back] + _backends\n else:\n oper_ord_backends = oper_ord_backends + [build_back_widgets[n]]\n _backends = _backends + [back]\n\n qubit_label = widgets.Label(value=\"Num. Qubits\")\n qv_label = widgets.Label(value=\"Quantum Vol.\")\n pend_label = widgets.Label(\n value=\"Pending Jobs\", layout=widgets.Layout(margin=\"5px 0px 0px 0px\")\n )\n least_label = widgets.Label(\n value=\"Least Busy\", layout=widgets.Layout(margin=\"10px 0px 0px 0px\")\n )\n oper_label = widgets.Label(\n value=\"Operational\", layout=widgets.Layout(margin=\"5px 0px 0px 0px\")\n )\n t12_label = widgets.Label(\n value=\"Avg. T1 / T2\", layout=widgets.Layout(margin=\"10px 0px 0px 0px\")\n )\n cx_label = widgets.Label(\n value=\"Avg. CX Err.\", layout=widgets.Layout(margin=\"8px 0px 0px 0px\")\n )\n meas_label = widgets.Label(\n value=\"Avg. Meas. Err.\", layout=widgets.Layout(margin=\"8px 0px 0px 0px\")\n )\n\n labels_widget = widgets.VBox(\n [\n qubit_label,\n qv_label,\n pend_label,\n oper_label,\n least_label,\n t12_label,\n cx_label,\n meas_label,\n ],\n layout=widgets.Layout(margin=\"295px 0px 0px 0px\", min_width=\"100px\"),\n )\n\n backend_grid = GridBox_with_thread(\n children=oper_ord_backends,\n layout=widgets.Layout(\n grid_template_columns=\"250px \" * len(unique_hardware_backends),\n grid_template_rows=\"auto\",\n grid_gap=\"0px 25px\",\n ),\n )\n\n backend_grid._backends = _backends # pylint: disable=attribute-defined-outside-init\n backend_grid._update = types.MethodType( # pylint: disable=attribute-defined-outside-init\n update_backend_info, backend_grid\n )\n\n backend_grid._thread = threading.Thread( # pylint: disable=attribute-defined-outside-init\n target=backend_grid._update, args=(args.interval,)\n )\n backend_grid._thread.start()\n\n back_box = widgets.HBox([labels_widget, backend_grid])\n\n back_monitor = widgets.VBox([backend_title, back_box])\n display(back_monitor)\n\n\nclass GridBox_with_thread(widgets.GridBox): # pylint: disable=invalid-name\n \"\"\"A GridBox that will close an attached thread\"\"\"\n\n def __del__(self):\n \"\"\"Object disposal\"\"\"\n if hasattr(self, \"_thread\"):\n try:\n self._thread.do_run = False\n self._thread.join()\n except Exception: # pylint: disable=broad-except\n pass\n self.close()\n\n\ndef backend_widget(backend):\n \"\"\"Creates a backend widget.\"\"\"\n config = backend.configuration().to_dict()\n props = backend.properties().to_dict()\n\n name = widgets.HTML(value=f\"<h4>{backend.name()}</h4>\", layout=widgets.Layout())\n\n num_qubits = config[\"n_qubits\"]\n\n qv_val = \"-\"\n if \"quantum_volume\" in config.keys():\n if config[\"quantum_volume\"]:\n qv_val = config[\"quantum_volume\"]\n\n qubit_count = widgets.HTML(\n value=f\"<h5><b>{num_qubits}</b></h5>\",\n layout=widgets.Layout(justify_content=\"center\"),\n )\n\n qv_value = widgets.HTML(\n value=f\"<h5>{qv_val}</h5>\",\n layout=widgets.Layout(justify_content=\"center\"),\n )\n\n cmap = widgets.Output(\n layout=widgets.Layout(\n min_width=\"250px\",\n max_width=\"250px\",\n max_height=\"250px\",\n min_height=\"250px\",\n justify_content=\"center\",\n align_items=\"center\",\n margin=\"0px 0px 0px 0px\",\n )\n )\n\n with cmap:\n _cmap_fig = plot_gate_map(backend, plot_directed=False, label_qubits=False)\n if _cmap_fig is not None:\n display(_cmap_fig)\n # Prevents plot from showing up twice.\n plt.close(_cmap_fig)\n\n pending = generate_jobs_pending_widget()\n\n is_oper = widgets.HTML(value=\"<h5></h5>\", layout=widgets.Layout(justify_content=\"center\"))\n\n least_busy = widgets.HTML(value=\"<h5></h5>\", layout=widgets.Layout(justify_content=\"center\"))\n\n t1_units = props[\"qubits\"][0][0][\"unit\"]\n avg_t1 = round(sum(q[0][\"value\"] for q in props[\"qubits\"]) / num_qubits, 1)\n avg_t2 = round(sum(q[1][\"value\"] for q in props[\"qubits\"]) / num_qubits, 1)\n t12_widget = widgets.HTML(\n value=f\"<h5>{avg_t1} / {avg_t2} {t1_units}</h5>\",\n layout=widgets.Layout(),\n )\n\n avg_cx_err = \"NA\"\n if config[\"coupling_map\"]:\n sum_cx_err = 0\n num_cx = 0\n for gate in props[\"gates\"]:\n if gate[\"gate\"] == \"cx\":\n for param in gate[\"parameters\"]:\n if param[\"name\"] == \"gate_error\":\n # Value == 1.0 means gate effectively off\n if param[\"value\"] != 1.0:\n sum_cx_err += param[\"value\"]\n num_cx += 1\n if num_cx > 0:\n avg_cx_err = round(sum_cx_err / num_cx, 4)\n\n cx_widget = widgets.HTML(value=f\"<h5>{avg_cx_err}</h5>\", layout=widgets.Layout())\n\n avg_meas_err = 0\n for qub in props[\"qubits\"]:\n for item in qub:\n if item[\"name\"] == \"readout_error\":\n avg_meas_err += item[\"value\"]\n avg_meas_err = round(avg_meas_err / num_qubits, 4)\n meas_widget = widgets.HTML(value=f\"<h5>{avg_meas_err}</h5>\", layout=widgets.Layout())\n\n out = widgets.VBox(\n [\n name,\n cmap,\n qubit_count,\n qv_value,\n pending,\n is_oper,\n least_busy,\n t12_widget,\n cx_widget,\n meas_widget,\n ],\n layout=widgets.Layout(display=\"inline-flex\", flex_flow=\"column\", align_items=\"center\"),\n )\n\n out._is_alive = True\n return out\n\n\ndef update_backend_info(self, interval=60):\n \"\"\"Updates the monitor info\n Called from another thread.\n \"\"\"\n my_thread = threading.current_thread()\n current_interval = 0\n started = False\n all_dead = False\n stati = [None] * len(self._backends)\n while getattr(my_thread, \"do_run\", True) and not all_dead:\n if current_interval == interval or started is False:\n for ind, back in enumerate(self._backends):\n _value = self.children[ind].children[2].value\n _head = _value.split(\"<b>\")[0]\n try:\n _status = back.status()\n stati[ind] = _status\n except Exception: # pylint: disable=broad-except\n self.children[ind].children[2].value = _value.replace(\n _head, \"<h5 style='color:#ff5c49'>\"\n )\n self.children[ind]._is_alive = False\n else:\n self.children[ind]._is_alive = True\n self.children[ind].children[2].value = _value.replace(_head, \"<h5>\")\n\n idx = list(range(len(self._backends)))\n pending = [s.pending_jobs for s in stati]\n _, least_idx = zip(*sorted(zip(pending, idx)))\n\n # Make sure least pending is operational\n for ind in least_idx:\n if stati[ind].operational:\n least_pending_idx = ind\n break\n\n for var in idx:\n if var == least_pending_idx:\n self.children[var].children[6].value = \"<h5 style='color:#34bc6e'>True</h5>\"\n else:\n self.children[var].children[6].value = \"<h5 style='color:#dc267f'>False</h5>\"\n\n self.children[var].children[4].children[1].max = max(\n self.children[var].children[4].children[1].max, pending[var] + 10\n )\n self.children[var].children[4].children[1].value = pending[var]\n if stati[var].operational:\n self.children[var].children[5].value = \"<h5 style='color:#34bc6e'>True</h5>\"\n else:\n self.children[var].children[5].value = \"<h5 style='color:#dc267f'>False</h5>\"\n\n started = True\n current_interval = 0\n time.sleep(1)\n all_dead = not any(wid._is_alive for wid in self.children)\n current_interval += 1\n\n\ndef generate_jobs_pending_widget():\n \"\"\"Generates a jobs_pending progress bar widget.\"\"\"\n pbar = widgets.IntProgress(\n value=0,\n min=0,\n max=50,\n description=\"\",\n orientation=\"horizontal\",\n layout=widgets.Layout(max_width=\"180px\"),\n )\n pbar.style.bar_color = \"#71cddd\"\n\n pbar_current = widgets.Label(value=str(pbar.value), layout=widgets.Layout(min_width=\"auto\"))\n pbar_max = widgets.Label(value=str(pbar.max), layout=widgets.Layout(min_width=\"auto\"))\n\n def _on_max_change(change):\n pbar_max.value = str(change[\"new\"])\n\n def _on_val_change(change):\n pbar_current.value = str(change[\"new\"])\n\n pbar.observe(_on_max_change, names=\"max\")\n pbar.observe(_on_val_change, names=\"value\")\n\n jobs_widget = widgets.HBox(\n [pbar_current, pbar, pbar_max],\n layout=widgets.Layout(max_width=\"250px\", min_width=\"250px\", justify_content=\"center\"),\n )\n\n return jobs_widget\n" ]
[ [ "matplotlib.pyplot.close" ] ]
wattanapong/DFA
[ "c05851beca2f8739f80531eb4de2f61639715cab" ]
[ "pysot/datasets/dataset_template.py" ]
[ "# Copyright (c) SenseTime. All Rights Reserved.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport json\nimport logging\nimport sys\nimport os\n\nimport cv2\nimport numpy as np\nfrom torch.utils.data import Dataset\n\nfrom pysot.utils.bbox import center2corner, Center\nfrom pysot.datasets.anchor_target import AnchorTarget\nfrom pysot.datasets.augmentation import Augmentation\nfrom pysot.core.config import cfg\n\nlogger = logging.getLogger(\"global\")\n\n# setting opencv\npyv = sys.version[0]\nif pyv[0] == '3':\n cv2.ocl.setUseOpenCL(False)\n\n\nclass SubDataset(object):\n def __init__(self, name, root, anno, frame_range, num_use, start_idx):\n cur_path = os.path.dirname(os.path.realpath(__file__))\n self.name = name\n self.root = os.path.join(cur_path, '../../', root)\n self.anno = os.path.join(cur_path, '../../', anno)\n self.frame_range = frame_range\n self.num_use = num_use\n self.start_idx = start_idx\n logger.info(\"loading \" + name)\n with open(self.anno, 'r') as f:\n meta_data = json.load(f)\n meta_data = self._filter_zero(meta_data)\n\n for video in list(meta_data.keys()):\n for track in meta_data[video]:\n frames = meta_data[video][track]\n frames = list(map(int,\n filter(lambda x: x.isdigit(), frames.keys())))\n frames.sort()\n meta_data[video][track]['frames'] = frames\n if len(frames) <= 0:\n logger.warning(\"{}/{} has no frames\".format(video, track))\n del meta_data[video][track]\n\n for video in list(meta_data.keys()):\n if len(meta_data[video]) <= 0:\n logger.warning(\"{} has no tracks\".format(video))\n del meta_data[video]\n\n self.labels = meta_data\n self.num = len(self.labels)\n self.num_use = self.num if self.num_use == -1 else self.num_use\n self.videos = list(meta_data.keys())\n logger.info(\"{} loaded\".format(self.name))\n self.path_format = '{}.{}.{}.jpg'\n self.pick = self.shuffle()\n\n def _filter_zero(self, meta_data):\n meta_data_new = {}\n for video, tracks in meta_data.items():\n new_tracks = {}\n for trk, frames in tracks.items():\n new_frames = {}\n for frm, bbox in frames.items():\n if not isinstance(bbox, dict):\n if len(bbox) == 4:\n x1, y1, x2, y2 = bbox\n w, h = x2 - x1, y2 - y1\n else:\n w, h = bbox\n if w <= 0 or h <= 0:\n continue\n new_frames[frm] = bbox\n if len(new_frames) > 0:\n new_tracks[trk] = new_frames\n if len(new_tracks) > 0:\n meta_data_new[video] = new_tracks\n return meta_data_new\n\n def log(self):\n logger.info(\"{} start-index {} select [{}/{}] path_format {}\".format(\n self.name, self.start_idx, self.num_use,\n self.num, self.path_format))\n\n def shuffle(self):\n lists = list(range(self.start_idx, self.start_idx + self.num))\n pick = []\n while len(pick) < self.num_use:\n np.random.shuffle(lists)\n pick += lists\n return pick[:self.num_use]\n\n def get_image_anno(self, video, track, frame):\n frame = \"{:06d}\".format(frame)\n image_path = os.path.join(self.root, video,\n self.path_format.format(frame, track, 'x'))\n image_anno = self.labels[video][track][frame]\n return image_path, image_anno\n\n # track is tracking object in video\n # video is one of subfolder under ILSVRC2015_VID_train_000{0-3}, for example, ILSVRC2015_train_00004000\n def get_positive_pair(self, index):\n video_name = self.videos[index]\n video = self.labels[video_name]\n track = np.random.choice(list(video.keys()))\n track_info = video[track]\n\n frames = track_info['frames']\n template_frame = np.random.randint(0, len(frames))\n template_frame = frames[template_frame]\n return self.get_image_anno(video_name, track, template_frame)\n\n def get_random_target(self, index=-1):\n if index == -1:\n index = np.random.randint(0, self.num)\n video_name = self.videos[index]\n video = self.labels[video_name]\n track = np.random.choice(list(video.keys()))\n track_info = video[track]\n frames = track_info['frames']\n frame = np.random.choice(frames)\n return self.get_image_anno(video_name, track, frame)\n\n def __len__(self):\n return self.num\n\n\nclass TrkDataset(Dataset):\n def __init__(self,):\n super(TrkDataset, self).__init__()\n\n desired_size = (cfg.TRAIN.SEARCH_SIZE - cfg.TRAIN.EXEMPLAR_SIZE) / \\\n cfg.ANCHOR.STRIDE + 1 + cfg.TRAIN.BASE_SIZE\n if desired_size != cfg.TRAIN.OUTPUT_SIZE:\n raise Exception('size not match!')\n\n # create anchor target\n self.anchor_target = AnchorTarget()\n\n # create sub dataset\n self.all_dataset = []\n start = 0\n self.num = 0\n for name in cfg.DATASET.NAMES:\n subdata_cfg = getattr(cfg.DATASET, name)\n sub_dataset = SubDataset(\n name,\n subdata_cfg.ROOT,\n subdata_cfg.ANNO,\n subdata_cfg.FRAME_RANGE,\n subdata_cfg.NUM_USE,\n start\n )\n start += sub_dataset.num\n self.num += sub_dataset.num_use\n\n sub_dataset.log()\n self.all_dataset.append(sub_dataset)\n\n # data augmentation\n self.template_aug = Augmentation(\n cfg.DATASET.TEMPLATE.SHIFT,\n cfg.DATASET.TEMPLATE.SCALE,\n cfg.DATASET.TEMPLATE.BLUR,\n cfg.DATASET.TEMPLATE.FLIP,\n cfg.DATASET.TEMPLATE.COLOR\n )\n self.search_aug = Augmentation(\n cfg.DATASET.SEARCH.SHIFT,\n cfg.DATASET.SEARCH.SCALE,\n cfg.DATASET.SEARCH.BLUR,\n cfg.DATASET.SEARCH.FLIP,\n cfg.DATASET.SEARCH.COLOR\n )\n videos_per_epoch = cfg.DATASET.VIDEOS_PER_EPOCH\n self.num = videos_per_epoch if videos_per_epoch > 0 else self.num\n self.num *= cfg.TRAIN.EPOCH\n self.pick = self.shuffle()\n\n def shuffle(self):\n pick = []\n m = 0\n while m < self.num:\n p = []\n for sub_dataset in self.all_dataset:\n sub_p = sub_dataset.pick\n p += sub_p\n np.random.shuffle(p)\n pick += p\n m = len(pick)\n logger.info(\"shuffle done!\")\n logger.info(\"dataset length {}\".format(self.num))\n return pick[:self.num]\n\n def _find_dataset(self, index):\n for dataset in self.all_dataset:\n if dataset.start_idx + dataset.num > index:\n return dataset, index - dataset.start_idx\n\n def _get_bbox(self, image, shape):\n imh, imw = image.shape[:2]\n if len(shape) == 4:\n w, h = shape[2]-shape[0], shape[3]-shape[1]\n else:\n w, h = shape\n context_amount = 0.5\n exemplar_size = cfg.TRAIN.EXEMPLAR_SIZE\n wc_z = w + context_amount * (w+h)\n hc_z = h + context_amount * (w+h)\n s_z = np.sqrt(wc_z * hc_z)\n scale_z = exemplar_size / s_z\n w = w*scale_z\n h = h*scale_z\n cx, cy = imw//2, imh//2\n bbox = center2corner(Center(cx, cy, w, h))\n return bbox\n\n def __len__(self):\n return self.num\n\n def __getitem__(self, index):\n index = self.pick[index]\n dataset, index = self._find_dataset(index)\n\n gray = cfg.DATASET.GRAY and cfg.DATASET.GRAY > np.random.random()\n neg = cfg.DATASET.NEG and cfg.DATASET.NEG > np.random.random()\n\n # get one dataset\n if neg:\n print('please check this suspension due to it was removed negative function (distractor)')\n import pdb\n pdb.set_trace()\n template = dataset.get_random_target(index)\n search = np.random.choice(self.all_dataset).get_random_target()\n else:\n template = dataset.get_positive_pair(index)\n\n if not os.path.exists(template[0]):\n print(template[0])\n\n # get image\n template_image = cv2.imread(template[0])\n\n # get bounding box\n template_box = self._get_bbox(template_image, template[1])\n\n # augmentation\n template, _ = self.template_aug(template_image,\n template_box,\n cfg.TRAIN.EXEMPLAR_SIZE,\n gray=gray)\n\n template = template.transpose((2, 0, 1)).astype(np.float32)\n\n return {\n 'template': template,\n 'gt': template_box\n }\n" ]
[ [ "numpy.random.shuffle", "numpy.random.choice", "numpy.random.random", "numpy.sqrt", "numpy.random.randint" ] ]
yifeim/gluon-nlp
[ "ea30d3399d87404b731d513535af9a31a5672799" ]
[ "src/gluonnlp/data/utils.py" ]
[ "# coding: utf-8\n\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\"\"\"Utility classes and functions. They help organize and keep statistics of datasets.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\n__all__ = [\n 'Counter', 'count_tokens', 'concat_sequence', 'slice_sequence', 'train_valid_split',\n 'line_splitter', 'whitespace_splitter', 'Splitter'\n]\n\nimport os\nimport collections\nimport zipfile\nimport tarfile\nimport numpy as np\n\nfrom mxnet.gluon.data import SimpleDataset\nfrom mxnet.gluon.utils import _get_repo_url, download, check_sha1\n\nfrom .. import _constants as C\n\n\nclass Counter(collections.Counter): # pylint: disable=abstract-method\n \"\"\"Counter class for keeping token frequencies.\"\"\"\n\n def discard(self, min_freq, unknown_token):\n \"\"\"Discards tokens with frequency below min_frequency and represents them\n as `unknown_token`.\n\n Parameters\n ----------\n min_freq: int\n Tokens whose frequency is under min_freq is counted as `unknown_token` in\n the Counter returned.\n unknown_token: str\n The representation for any unknown token.\n\n Returns\n -------\n The Counter instance.\n\n Examples\n --------\n >>> a = gluonnlp.data.Counter({'a': 10, 'b': 1, 'c': 1})\n >>> a.discard(3, '<unk>')\n Counter({'a': 10, '<unk>': 2})\n \"\"\"\n freq = 0\n ret = Counter({})\n for token, count in self.items():\n if count < min_freq:\n freq += count\n else:\n ret[token] = count\n ret[unknown_token] = ret.get(unknown_token, 0) + freq\n return ret\n\n\nclass DefaultLookupDict(dict):\n \"\"\"Dictionary class with fall-back look-up with default value set in the constructor.\"\"\"\n\n def __init__(self, default, d=None):\n if d:\n super(DefaultLookupDict, self).__init__(d)\n else:\n super(DefaultLookupDict, self).__init__()\n self._default = default\n\n def __getitem__(self, k):\n return self.get(k, self._default)\n\n\ndef count_tokens(tokens, to_lower=False, counter=None):\n r\"\"\"Counts tokens in the specified string.\n\n For token_delim='(td)' and seq_delim='(sd)', a specified string of two sequences of tokens may\n look like::\n\n (td)token1(td)token2(td)token3(td)(sd)(td)token4(td)token5(td)(sd)\n\n\n Parameters\n ----------\n tokens : list of str\n A source list of tokens.\n to_lower : bool, default False\n Whether to convert the source source_str to the lower case.\n counter : Counter or None, default None\n The Counter instance to be updated with the counts of `tokens`. If\n None, return a new Counter instance counting tokens from `tokens`.\n\n Returns\n -------\n The `counter` Counter instance after being updated with the token\n counts of `source_str`. If `counter` is None, return a new Counter\n instance counting tokens from `source_str`.\n\n Examples\n --------\n >>> import re\n >>> source_str = ' Life is great ! \\n life is good . \\n'\n >>> source_str_tokens = filter(None, re.split(' |\\n', source_str))\n >>> gluonnlp.data.count_tokens(source_str_tokens)\n Counter({'is': 2, 'Life': 1, 'great': 1, '!': 1, 'life': 1, 'good': 1, '.': 1})\n\n \"\"\"\n if to_lower:\n tokens = [t.lower() for t in tokens]\n\n if counter is None:\n return Counter(tokens)\n else:\n counter.update(tokens)\n return counter\n\n\ndef concat_sequence(sequences):\n \"\"\"Concatenate sequences of tokens into a single flattened list of tokens.\n\n Parameters\n ----------\n sequences : list of list of object\n Sequences of tokens, each of which is an iterable of tokens.\n\n Returns\n -------\n Flattened list of tokens.\n\n \"\"\"\n return [token for seq in sequences for token in seq if token]\n\n\ndef slice_sequence(sequence, length, pad_last=False, pad_val=C.PAD_TOKEN, overlap=0):\n \"\"\"Slice a flat sequence of tokens into sequences tokens, with each\n inner sequence's length equal to the specified `length`, taking into account the requested\n sequence overlap.\n\n Parameters\n ----------\n sequence : list of object\n A flat list of tokens.\n length : int\n The length of each of the samples.\n pad_last : bool, default False\n Whether to pad the last sequence when its length doesn't align. If the last sequence's\n length doesn't align and ``pad_last`` is False, it will be dropped.\n pad_val : object, default\n The padding value to use when the padding of the last sequence is enabled. In general,\n the type of ``pad_val`` should be the same as the tokens.\n overlap : int, default 0\n The extra number of items in current sample that should overlap with the\n next sample.\n\n Returns\n -------\n List of list of tokens, with the length of each inner list equal to `length`.\n\n \"\"\"\n if length <= overlap:\n raise ValueError('length needs to be larger than overlap')\n\n if pad_last:\n pad_len = _slice_pad_length(len(sequence), length, overlap)\n sequence = sequence + [pad_val] * pad_len\n num_samples = (len(sequence)-length) // (length-overlap) + 1\n return [sequence[i*(length-overlap):((i+1)*length-i*overlap)] for i in range(num_samples)]\n\n\ndef _slice_pad_length(num_items, length, overlap=0):\n \"\"\"Calculate the padding length needed for sliced samples in order not to discard data.\n\n Parameters\n ----------\n num_items : int\n Number of items in dataset before collating.\n length : int\n The length of each of the samples.\n overlap : int, default 0\n The extra number of items in current sample that should overlap with the\n next sample.\n\n Returns\n -------\n Length of paddings.\n\n \"\"\"\n if length <= overlap:\n raise ValueError('length needs to be larger than overlap')\n\n step = length-overlap\n span = num_items-length\n residual = span % step\n if residual:\n return step - residual\n else:\n return 0\n\n\n_vocab_sha1 = {'wikitext-2': 'be36dc5238c2e7d69720881647ab72eb506d0131',\n 'gbw': 'ebb1a287ca14d8fa6f167c3a779e5e7ed63ac69f',\n 'WMT2014_src': '230ebb817b1d86950d71e2e765f192a4e4f34415',\n 'WMT2014_tgt': '230ebb817b1d86950d71e2e765f192a4e4f34415',\n 'book_corpus_wiki_en_cased': '2d62af22535ed51f35cc8e2abb607723c89c2636',\n 'book_corpus_wiki_en_uncased': 'a66073971aa0b1a262453fe51342e57166a8abcf',\n 'wiki_multilingual_cased': '71bb9e248dc75dce9227d3c8c16fde3993588b9e',\n 'wiki_cn': 'a1e06f8e39ae51ab8a92b8458e6a658b8b1f72bf',\n 'wiki_multilingual': '2b2514cc539047b9179e9d98a4e68c36db05c97a'}\n\n\n_url_format = '{repo_url}gluon/dataset/vocab/{file_name}.zip'\n\n\ndef train_valid_split(dataset, valid_ratio=0.05):\n \"\"\"Split the dataset into training and validation sets.\n\n Parameters\n ----------\n train : list\n A list of training samples.\n valid_ratio : float, default 0.05\n Proportion of training samples to use for validation set\n range: [0, 1]\n\n Returns\n -------\n train : SimpleDataset\n valid : SimpleDataset\n \"\"\"\n if not 0.0 <= valid_ratio <= 1.0:\n raise ValueError('valid_ratio should be in [0, 1]')\n\n num_train = len(dataset)\n num_valid = np.ceil(num_train * valid_ratio).astype('int')\n indices = np.arange(num_train)\n\n np.random.shuffle(indices)\n valid = SimpleDataset([dataset[indices[i]] for i in range(num_valid)])\n train = SimpleDataset([dataset[indices[i + num_valid]] for i in range(num_train - num_valid)])\n return train, valid\n\n\ndef short_hash(name):\n if name not in _vocab_sha1:\n raise ValueError('Vocabulary for {name} is not available.'.format(name=name))\n return _vocab_sha1[name][:8]\n\n\ndef _load_pretrained_vocab(name, root=os.path.join('~', '.mxnet', 'models'), cls=None):\n \"\"\"Load the accompanying vocabulary object for pre-trained model.\n\n Parameters\n ----------\n name : str\n Name of the vocabulary, usually the name of the dataset.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n cls : nlp.Vocab or nlp.vocab.BERTVocab, default nlp.Vocab\n\n Returns\n -------\n Vocab or nlp.bert.BERTVocab\n Loaded vocabulary object for the pre-trained model.\n \"\"\"\n file_name = '{name}-{short_hash}'.format(name=name,\n short_hash=short_hash(name))\n root = os.path.expanduser(root)\n file_path = os.path.join(root, file_name+'.vocab')\n sha1_hash = _vocab_sha1[name]\n if os.path.exists(file_path):\n if check_sha1(file_path, sha1_hash):\n return _load_vocab_file(file_path, cls)\n else:\n print('Detected mismatch in the content of model vocab file. Downloading again.')\n else:\n print('Vocab file is not found. Downloading.')\n\n if not os.path.exists(root):\n os.makedirs(root)\n\n zip_file_path = os.path.join(root, file_name+'.zip')\n repo_url = _get_repo_url()\n if repo_url[-1] != '/':\n repo_url = repo_url + '/'\n download(_url_format.format(repo_url=repo_url, file_name=file_name),\n path=zip_file_path,\n overwrite=True)\n with zipfile.ZipFile(zip_file_path) as zf:\n zf.extractall(root)\n os.remove(zip_file_path)\n\n if check_sha1(file_path, sha1_hash):\n return _load_vocab_file(file_path, cls)\n else:\n raise ValueError('Downloaded file has different hash. Please try again.')\n\n\ndef _load_vocab_file(file_path, cls):\n with open(file_path, 'r') as f:\n if cls is None:\n from ..vocab import Vocab\n cls = Vocab\n\n return cls.from_json(f.read())\n\n\ndef _get_home_dir():\n \"\"\"Get home directory for storing datasets/models/pre-trained word embeddings\"\"\"\n _home_dir = os.environ.get('MXNET_HOME', os.path.join('~', '.mxnet'))\n # expand ~ to actual path\n _home_dir = os.path.expanduser(_home_dir)\n return _home_dir\n\n\ndef _extract_archive(file, target_dir):\n \"\"\"Extract archive file\n\n Parameters\n ----------\n file : str\n Absolute path of the archive file.\n target_dir : str\n Target directory of the archive to be uncompressed\n\n \"\"\"\n if file.endswith('.gz') or file.endswith('.tar') or file.endswith('.tgz'):\n archive = tarfile.open(file, 'r')\n elif file.endswith('.zip'):\n archive = zipfile.ZipFile(file, 'r')\n else:\n raise Exception('Unrecognized file type: ' + file)\n archive.extractall(path=target_dir)\n archive.close()\n\n\ndef line_splitter(s):\n \"\"\"Split a string at newlines.\n\n Parameters\n ----------\n s : str\n The string to be split\n\n Returns\n --------\n List[str]\n List of strings. Obtained by calling s.splitlines().\n\n \"\"\"\n return s.splitlines()\n\n\ndef whitespace_splitter(s):\n \"\"\"Split a string at whitespace (space, tab, newline, return, formfeed).\n\n Parameters\n ----------\n s : str\n The string to be split\n\n Returns\n --------\n List[str]\n List of strings. Obtained by calling s.split().\n \"\"\"\n return s.split()\n\n\nclass Splitter(object):\n \"\"\"Split a string based on a separator.\n\n Parameters\n ----------\n separator : str\n The separator based on which string is split.\n \"\"\"\n\n def __init__(self, separator=None):\n self._separator = separator\n\n def __call__(self, s):\n \"\"\"Split a string based on the separator.\n\n Parameters\n ----------\n s : str\n The string to be split\n\n Returns\n --------\n List[str]\n List of strings. Obtained by calling s.split(separator).\n \"\"\"\n return s.split(self._separator)\n" ]
[ [ "numpy.arange", "numpy.random.shuffle", "numpy.ceil" ] ]
philiptmassey/Cirq
[ "b8b457c2fc484d76bf8a82a73f6ecc11756229a6", "b8b457c2fc484d76bf8a82a73f6ecc11756229a6" ]
[ "cirq/ops/common_gates.py", "cirq/optimizers/merge_interactions.py" ]
[ "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Quantum gates that are commonly used in the literature.\n\nThis module creates Gate instances for the following gates:\n X,Y,Z: Pauli gates.\n H,S: Clifford gates.\n T: A non-Clifford gate.\n CZ: Controlled phase gate.\n CNOT: Controlled not gate.\n SWAP: the swap gate.\n ISWAP: a swap gate with a phase on the swapped subspace.\n\nEach of these are implemented as EigenGates, which means that they can be\nraised to a power (i.e. cirq.H**0.5). See the definition in EigenGate.\n\nIn addition MeasurementGate is defined and convenience methods for\nmeasurements are provided\n measure\n measure_each\n\"\"\"\nfrom typing import (\n Any, Callable, cast, Iterable, List, Optional, Tuple, Union,\n)\n\nimport numpy as np\n\nfrom cirq import linalg, protocols, value\nfrom cirq.ops import gate_features, eigen_gate, raw_types, gate_operation\n\nfrom cirq.type_workarounds import NotImplementedType\n\n# Note: avoiding 'from/as' because it creates a circular dependency in python 2.\nimport cirq.ops.phased_x_gate\n\n\nclass XPowGate(eigen_gate.EigenGate,\n gate_features.SingleQubitGate):\n \"\"\"A gate that rotates around the X axis of the Bloch sphere.\n\n The unitary matrix of ``XPowGate(exponent=t)`` is:\n\n [[g·c, -i·g·s],\n [-i·g·s, g·c]]\n\n where:\n\n c = cos(π·t/2)\n s = sin(π·t/2)\n g = exp(i·π·t/2).\n\n Note in particular that this gate has a global phase factor of\n e^{i·π·t/2} vs the traditionally defined rotation matrices\n about the Pauli X axis. See `cirq.Rx` for rotations without the global\n phase. The global phase factor can be adjusted by using the `global_shift`\n parameter when initializing.\n\n `cirq.X`, the Pauli X gate, is an instance of this gate at exponent=1.\n \"\"\"\n\n def _apply_unitary_(self, args: protocols.ApplyUnitaryArgs\n ) -> Optional[np.ndarray]:\n if self._exponent != 1:\n return None\n zero = args.subspace_index(0)\n one = args.subspace_index(1)\n args.available_buffer[zero] = args.target_tensor[one]\n args.available_buffer[one] = args.target_tensor[zero]\n p = 1j**(2 * self._exponent * self._global_shift)\n if p != 1:\n args.available_buffer *= p\n return args.available_buffer\n\n def _eigen_components(self):\n return [\n (0, np.array([[0.5, 0.5], [0.5, 0.5]])),\n (1, np.array([[0.5, -0.5], [-0.5, 0.5]])),\n ]\n\n def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs\n ) -> Union[str, protocols.CircuitDiagramInfo]:\n if self._global_shift == -0.5:\n return _rads_func_symbol(\n 'Rx',\n args,\n self._diagram_exponent(args, ignore_global_phase=False))\n\n return protocols.CircuitDiagramInfo(\n wire_symbols=('X',),\n exponent=self._diagram_exponent(args))\n\n def _qasm_(self,\n args: protocols.QasmArgs,\n qubits: Tuple[raw_types.QubitId, ...]) -> Optional[str]:\n args.validate_version('2.0')\n if self._exponent == 1:\n return args.format('x {0};\\n', qubits[0])\n else:\n return args.format('rx({0:half_turns}) {1};\\n',\n self._exponent, qubits[0])\n\n def _phase_by_(self, phase_turns, qubit_index):\n \"\"\"See `cirq.SupportsPhase`.\"\"\"\n return cirq.ops.phased_x_gate.PhasedXPowGate(\n exponent=self._exponent,\n phase_exponent=phase_turns * 2)\n\n def __str__(self) -> str:\n if self._exponent == 1:\n return 'X'\n return 'X**{!r}'.format(self._exponent)\n\n def __repr__(self) -> str:\n if self._global_shift == -0.5 and not protocols.is_parameterized(self):\n return 'cirq.Rx(np.pi*{!r})'.format(self._exponent)\n if self._global_shift == 0:\n if self._exponent == 1:\n return 'cirq.X'\n return '(cirq.X**{!r})'.format(self._exponent)\n return (\n 'cirq.XPowGate(exponent={!r}, '\n 'global_shift={!r})'\n ).format(self._exponent, self._global_shift)\n\n\nclass YPowGate(eigen_gate.EigenGate,\n gate_features.SingleQubitGate):\n \"\"\"A gate that rotates around the Y axis of the Bloch sphere.\n\n The unitary matrix of ``YPowGate(exponent=t)`` is:\n\n [[g·c, g·s],\n [-g·s, g·c]]\n\n where:\n\n c = cos(π·t/2)\n s = sin(π·t/2)\n g = exp(i·π·t/2).\n\n Note in particular that this gate has a global phase factor of\n e^{i·π·t/2} vs the traditionally defined rotation matrices\n about the Pauli Y axis. See `cirq.Ry` for rotations without the global\n phase. The global phase factor can be adjusted by using the `global_shift`\n parameter when initializing.\n\n `cirq.Y`, the Pauli Y gate, is an instance of this gate at exponent=1.\n \"\"\"\n\n def _eigen_components(self):\n return [\n (0, np.array([[0.5, -0.5j], [0.5j, 0.5]])),\n (1, np.array([[0.5, 0.5j], [-0.5j, 0.5]])),\n ]\n\n def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs\n ) -> Union[str, protocols.CircuitDiagramInfo]:\n if self._global_shift == -0.5:\n return _rads_func_symbol(\n 'Ry',\n args,\n self._diagram_exponent(args, ignore_global_phase=False))\n\n return protocols.CircuitDiagramInfo(\n wire_symbols=('Y',),\n exponent=self._diagram_exponent(args))\n\n def _qasm_(self,\n args: protocols.QasmArgs,\n qubits: Tuple[raw_types.QubitId, ...]) -> Optional[str]:\n args.validate_version('2.0')\n if self._exponent == 1:\n return args.format('y {0};\\n', qubits[0])\n else:\n return args.format('ry({0:half_turns}) {1};\\n',\n self._exponent, qubits[0])\n\n def _phase_by_(self, phase_turns, qubit_index):\n \"\"\"See `cirq.SupportsPhase`.\"\"\"\n return cirq.ops.phased_x_gate.PhasedXPowGate(\n exponent=self._exponent,\n phase_exponent=0.5 + phase_turns * 2)\n\n def __str__(self) -> str:\n if self._exponent == 1:\n return 'Y'\n return 'Y**{!r}'.format(self._exponent)\n\n def __repr__(self) -> str:\n if self._global_shift == -0.5 and not protocols.is_parameterized(self):\n return 'cirq.Ry(np.pi*{!r})'.format(self._exponent)\n if self._global_shift == 0:\n if self._exponent == 1:\n return 'cirq.Y'\n return '(cirq.Y**{!r})'.format(self._exponent)\n return (\n 'cirq.YPowGate(exponent={!r}, '\n 'global_shift={!r})'\n ).format(self._exponent, self._global_shift)\n\n\nclass ZPowGate(eigen_gate.EigenGate,\n gate_features.SingleQubitGate):\n \"\"\"A gate that rotates around the Z axis of the Bloch sphere.\n\n The unitary matrix of ``ZPowGate(exponent=t)`` is:\n\n [[1, 0],\n [0, g]]\n\n where:\n\n g = exp(i·π·t).\n\n Note in particular that this gate has a global phase factor of\n e^{i·π·t/2} vs the traditionally defined rotation matrices\n about the Pauli Z axis. See `cirq.Rz` for rotations without the global\n phase. The global phase factor can be adjusted by using the `global_shift`\n parameter when initializing.\n\n `cirq.Z`, the Pauli Z gate, is an instance of this gate at exponent=1.\n \"\"\"\n\n def _apply_unitary_(self, args: protocols.ApplyUnitaryArgs\n ) -> Optional[np.ndarray]:\n if protocols.is_parameterized(self):\n return None\n\n one = args.subspace_index(1)\n c = 1j**(self._exponent * 2)\n args.target_tensor[one] *= c\n p = 1j**(2 * self._exponent * self._global_shift)\n if p != 1:\n args.target_tensor *= p\n return args.target_tensor\n\n def _eigen_components(self):\n return [\n (0, np.diag([1, 0])),\n (1, np.diag([0, 1])),\n ]\n\n def _phase_by_(self, phase_turns: float, qubit_index: int):\n return self\n\n def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs\n ) -> Union[str, protocols.CircuitDiagramInfo]:\n if self._global_shift == -0.5:\n return _rads_func_symbol(\n 'Rz',\n args,\n self._diagram_exponent(args, ignore_global_phase=False))\n\n e = self._diagram_exponent(args)\n if e in [-0.25, 0.25]:\n return protocols.CircuitDiagramInfo(\n wire_symbols=('T',),\n exponent=cast(float, e) * 4)\n\n if e in [-0.5, 0.5]:\n return protocols.CircuitDiagramInfo(\n wire_symbols=('S',),\n exponent=cast(float, e) * 2)\n\n return protocols.CircuitDiagramInfo(\n wire_symbols=('Z',),\n exponent=e)\n\n def _qasm_(self,\n args: protocols.QasmArgs,\n qubits: Tuple[raw_types.QubitId, ...]) -> Optional[str]:\n args.validate_version('2.0')\n if self._exponent == 1:\n return args.format('z {0};\\n', qubits[0])\n else:\n return args.format('rz({0:half_turns}) {1};\\n',\n self._exponent, qubits[0])\n\n def __str__(self) -> str:\n if self._exponent == 0.25:\n return 'T'\n if self._exponent == -0.25:\n return 'T**-1'\n if self._exponent == 0.5:\n return 'S'\n if self._exponent == -0.5:\n return 'S**-1'\n if self._exponent == 1:\n return 'Z'\n return 'Z**{}'.format(self._exponent)\n\n def __repr__(self) -> str:\n if self._global_shift == -0.5 and not protocols.is_parameterized(self):\n return 'cirq.Rz(np.pi*{!r})'.format(self._exponent)\n if self._global_shift == 0:\n if self._exponent == 0.25:\n return 'cirq.T'\n if self._exponent == -0.25:\n return '(cirq.T**-1)'\n if self._exponent == 0.5:\n return 'cirq.S'\n if self._exponent == -0.5:\n return '(cirq.S**-1)'\n if self._exponent == 1:\n return 'cirq.Z'\n return '(cirq.Z**{!r})'.format(self._exponent)\n return (\n 'cirq.ZPowGate(exponent={!r}, '\n 'global_shift={!r})'\n ).format(self._exponent, self._global_shift)\n\n\[email protected]_equality\nclass MeasurementGate(raw_types.Gate):\n \"\"\"A gate that measures qubits in the computational basis.\n\n The measurement gate contains a key that is used to identify results\n of measurements.\n \"\"\"\n\n def __init__(self,\n key: str = '',\n invert_mask: Tuple[bool, ...] = ()) -> None:\n \"\"\"\n Args:\n key: The string key of the measurement.\n invert_mask: A list of values indicating whether the corresponding\n qubits should be flipped. The list's length must not be longer\n than the number of qubits, but it is permitted to be shorter.\n Qubits with indices past the end of the mask are not flipped.\n \"\"\"\n self.key = key\n self.invert_mask = invert_mask or ()\n\n @staticmethod\n def is_measurement(op: Union[raw_types.Gate, raw_types.Operation]) -> bool:\n if isinstance(op, MeasurementGate):\n return True\n if (isinstance(op, gate_operation.GateOperation) and\n isinstance(op.gate, MeasurementGate)):\n return True\n return False\n\n def with_bits_flipped(self, *bit_positions: int) -> 'MeasurementGate':\n \"\"\"Toggles whether or not the measurement inverts various outputs.\"\"\"\n old_mask = self.invert_mask or ()\n n = max(len(old_mask) - 1, *bit_positions) + 1\n new_mask = [k < len(old_mask) and old_mask[k] for k in range(n)]\n for b in bit_positions:\n new_mask[b] = not new_mask[b]\n return MeasurementGate(key=self.key, invert_mask=tuple(new_mask))\n\n def validate_args(self, qubits):\n if (self.invert_mask is not None and\n len(self.invert_mask) > len(qubits)):\n raise ValueError('len(invert_mask) > len(qubits)')\n\n def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs\n ) -> protocols.CircuitDiagramInfo:\n n = (max(1, len(self.invert_mask))\n if args.known_qubit_count is None\n else args.known_qubit_count)\n symbols = ['M'] * n\n\n # Show which output bits are negated.\n if self.invert_mask:\n for i, b in enumerate(self.invert_mask):\n if b:\n symbols[i] = '!M'\n\n # Mention the measurement key.\n if (not args.known_qubits or\n self.key != _default_measurement_key(args.known_qubits)):\n symbols[0] += \"('{}')\".format(self.key)\n\n return protocols.CircuitDiagramInfo(tuple(symbols))\n\n def _qasm_(self,\n args: protocols.QasmArgs,\n qubits: Tuple[raw_types.QubitId, ...]) -> Optional[str]:\n args.validate_version('2.0')\n invert_mask = self.invert_mask\n if len(invert_mask) < len(qubits):\n invert_mask = (invert_mask\n + (False,) * (len(qubits) - len(invert_mask)))\n lines = []\n for i, (qubit, inv) in enumerate(zip(qubits, invert_mask)):\n if inv:\n lines.append(args.format(\n 'x {0}; // Invert the following measurement\\n', qubit))\n lines.append(args.format('measure {0} -> {1:meas}[{2}];\\n',\n qubit, self.key, i))\n return ''.join(lines)\n\n def __repr__(self):\n return 'cirq.MeasurementGate({}, {})'.format(repr(self.key),\n repr(self.invert_mask))\n\n def _value_equality_values_(self):\n return self.key, self.invert_mask\n\n\ndef _default_measurement_key(qubits: Iterable[raw_types.QubitId]) -> str:\n return ','.join(str(q) for q in qubits)\n\n\ndef measure(*qubits: raw_types.QubitId,\n key: Optional[str] = None,\n invert_mask: Tuple[bool, ...] = ()\n ) -> gate_operation.GateOperation:\n \"\"\"Returns a single MeasurementGate applied to all the given qubits.\n\n The qubits are measured in the computational basis.\n\n Args:\n *qubits: The qubits that the measurement gate should measure.\n key: The string key of the measurement. If this is None, it defaults\n to a comma-separated list of the target qubits' str values.\n invert_mask: A list of Truthy or Falsey values indicating whether\n the corresponding qubits should be flipped. None indicates no\n inverting should be done.\n\n Returns:\n An operation targeting the given qubits with a measurement.\n\n Raises:\n ValueError if the qubits are not instances of QubitId.\n \"\"\"\n for qubit in qubits:\n if isinstance(qubit, np.ndarray):\n raise ValueError(\n 'measure() was called a numpy ndarray. Perhaps you meant '\n 'to call measure_state_vector on numpy array?'\n )\n elif not isinstance(qubit, raw_types.QubitId):\n raise ValueError(\n 'measure() was called with type different than QubitId.')\n\n if key is None:\n key = _default_measurement_key(qubits)\n return MeasurementGate(key, invert_mask).on(*qubits)\n\n\ndef measure_each(*qubits: raw_types.QubitId,\n key_func: Callable[[raw_types.QubitId], str] = str\n ) -> List[gate_operation.GateOperation]:\n \"\"\"Returns a list of operations individually measuring the given qubits.\n\n The qubits are measured in the computational basis.\n\n Args:\n *qubits: The qubits to measure.\n key_func: Determines the key of the measurements of each qubit. Takes\n the qubit and returns the key for that qubit. Defaults to str.\n\n Returns:\n A list of operations individually measuring the given qubits.\n \"\"\"\n return [MeasurementGate(key_func(q)).on(q) for q in qubits]\n\n\nclass HPowGate(eigen_gate.EigenGate, gate_features.SingleQubitGate):\n \"\"\"A Gate that performs a rotation around the X+Z axis of the Bloch sphere.\n\n\n The unitary matrix of ``HPowGate(exponent=t)`` is:\n\n [[g·(c-i·s/sqrt(2)), -i·g·s/sqrt(2)],\n [-i·g·s/sqrt(2)], g·(c+i·s/sqrt(2))]]\n\n where\n\n c = cos(π·t/2)\n s = sin(π·t/2)\n g = exp(i·π·t/2).\n\n Note in particular that for `t=1`, this gives the Hadamard matrix.\n\n `cirq.H`, the Hadamard gate, is an instance of this gate at `exponent=1`.\n \"\"\"\n\n def _eigen_components(self):\n s = np.sqrt(2)\n\n component0 = np.array([\n [3 + 2 * s, 1 + s],\n [1 + s, 1]\n ]) / (4 + 2 * s)\n\n component1 = np.array([\n [3 - 2 * s, 1 - s],\n [1 - s, 1]\n ]) / (4 - 2 * s)\n\n return [(0, component0), (1, component1)]\n\n def _apply_unitary_(self, args: protocols.ApplyUnitaryArgs\n ) -> Optional[np.ndarray]:\n if self._exponent != 1:\n return None\n\n zero = args.subspace_index(0)\n one = args.subspace_index(1)\n args.target_tensor[one] -= args.target_tensor[zero]\n args.target_tensor[one] *= -0.5\n args.target_tensor[zero] -= args.target_tensor[one]\n p = 1j**(2 * self._exponent * self._global_shift)\n args.target_tensor *= np.sqrt(2) * p\n return args.target_tensor\n\n def _decompose_(self, qubits):\n q = qubits[0]\n\n if self._exponent == 1:\n yield cirq.Y(q)**0.5\n yield cirq.XPowGate(global_shift=-0.25).on(q)\n return\n\n yield Y(q)**0.25\n yield X(q)**self._exponent\n yield Y(q)**-0.25\n\n def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs\n ) -> protocols.CircuitDiagramInfo:\n return protocols.CircuitDiagramInfo(('H',))\n\n def _qasm_(self,\n args: protocols.QasmArgs,\n qubits: Tuple[raw_types.QubitId, ...]) -> Optional[str]:\n args.validate_version('2.0')\n if self._exponent == 1:\n return args.format('h {0};\\n', qubits[0])\n else:\n return args.format('ry({0:half_turns}) {3};\\n'\n 'rx({1:half_turns}) {3};\\n'\n 'ry({2:half_turns}) {3};\\n',\n 0.25, self._exponent, -0.25, qubits[0])\n\n def __str__(self):\n if self._exponent == 1:\n return 'H'\n return 'H^{}'.format(self._exponent)\n\n def __repr__(self):\n if self._global_shift == 0:\n if self._exponent == 1:\n return 'cirq.H'\n return '(cirq.H**{!r})'.format(self._exponent)\n return (\n 'cirq.HPowGate(exponent={!r}, '\n 'global_shift={!r})'\n ).format(self._exponent, self._global_shift)\n\n\nclass CZPowGate(eigen_gate.EigenGate,\n gate_features.TwoQubitGate,\n gate_features.InterchangeableQubitsGate):\n \"\"\"A gate that applies a phase to the |11⟩ state of two qubits.\n\n The unitary matrix of `CZPowGate(exponent=t)` is:\n\n [[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, g]]\n\n where:\n\n g = exp(i·π·t/2).\n\n `cirq.CZ`, the controlled Z gate, is an instance of this gate at\n `exponent=1`.\n \"\"\"\n\n def _eigen_components(self):\n return [\n (0, np.diag([1, 1, 1, 0])),\n (1, np.diag([0, 0, 0, 1])),\n ]\n\n def _apply_unitary_(self, args: protocols.ApplyUnitaryArgs\n ) -> Union[np.ndarray, NotImplementedType]:\n if protocols.is_parameterized(self):\n return NotImplemented\n\n c = 1j**(2 * self._exponent)\n one_one = linalg.slice_for_qubits_equal_to(args.axes, 0b11)\n args.target_tensor[one_one] *= c\n p = 1j**(2 * self._exponent * self._global_shift)\n if p != 1:\n args.target_tensor *= p\n return args.target_tensor\n\n def _phase_by_(self, phase_turns, qubit_index):\n return self\n\n def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs\n ) -> protocols.CircuitDiagramInfo:\n return protocols.CircuitDiagramInfo(\n wire_symbols=('@', '@'),\n exponent=self._diagram_exponent(args))\n\n def _qasm_(self,\n args: protocols.QasmArgs,\n qubits: Tuple[raw_types.QubitId, ...]) -> Optional[str]:\n if self._exponent != 1:\n return None # Don't have an equivalent gate in QASM\n args.validate_version('2.0')\n return args.format('cz {0},{1};\\n', qubits[0], qubits[1])\n\n def __str__(self) -> str:\n if self._exponent == 1:\n return 'CZ'\n return 'CZ**{!r}'.format(self._exponent)\n\n def __repr__(self) -> str:\n if self._global_shift == 0:\n if self._exponent == 1:\n return 'cirq.CZ'\n return '(cirq.CZ**{!r})'.format(self._exponent)\n return (\n 'cirq.CZPowGate(exponent={!r}, '\n 'global_shift={!r})'\n ).format(self._exponent, self._global_shift)\n\n\ndef _rads_func_symbol(func_name: str,\n args: protocols.CircuitDiagramInfoArgs,\n half_turns: Any) -> str:\n unit = 'π' if args.use_unicode_characters else 'pi'\n if half_turns == 1:\n return '{}({})'.format(func_name, unit)\n if half_turns == -1:\n return '{}(-{})'.format(func_name, unit)\n return '{}({}{})'.format(func_name, half_turns, unit)\n\n\nclass CNotPowGate(eigen_gate.EigenGate, gate_features.TwoQubitGate):\n \"\"\"A gate that applies a controlled power of an X gate.\n\n When applying CNOT (controlled-not) to qubits, you can either use\n positional arguments CNOT(q1, q2), where q2 is toggled when q1 is on,\n or named arguments CNOT(control=q1, target=q2).\n (Mixing the two is not permitted.)\n\n The unitary matrix of `CNotPowGate(exponent=t)` is:\n\n [[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, g·c, -i·g·s],\n [0, 0, -i·g·s, g·c]]\n\n where:\n\n c = cos(π·t/2)\n s = sin(π·t/2)\n g = exp(i·π·t/2).\n\n `cirq.CNOT`, the controlled NOT gate, is an instance of this gate at\n `exponent=1`.\n \"\"\"\n\n def _decompose_(self, qubits):\n c, t = qubits\n yield Y(t)**-0.5\n yield CZ(c, t)**self._exponent\n yield Y(t)**0.5\n\n def _eigen_components(self):\n return [\n (0, np.array([[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 0.5, 0.5],\n [0, 0, 0.5, 0.5]])),\n (1, np.array([[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0.5, -0.5],\n [0, 0, -0.5, 0.5]])),\n ]\n\n def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs\n ) -> protocols.CircuitDiagramInfo:\n return protocols.CircuitDiagramInfo(\n wire_symbols=('@', 'X'),\n exponent=self._diagram_exponent(args))\n\n def _apply_unitary_(self, args: protocols.ApplyUnitaryArgs\n ) -> Optional[np.ndarray]:\n if self._exponent != 1:\n return None\n\n oo = args.subspace_index(0b11)\n zo = args.subspace_index(0b01)\n args.available_buffer[oo] = args.target_tensor[oo]\n args.target_tensor[oo] = args.target_tensor[zo]\n args.target_tensor[zo] = args.available_buffer[oo]\n p = 1j**(2 * self._exponent * self._global_shift)\n if p != 1:\n args.target_tensor *= p\n return args.target_tensor\n\n def _qasm_(self,\n args: protocols.QasmArgs,\n qubits: Tuple[raw_types.QubitId, ...]) -> Optional[str]:\n if self._exponent != 1:\n return None # Don't have an equivalent gate in QASM\n args.validate_version('2.0')\n return args.format('cx {0},{1};\\n', qubits[0], qubits[1])\n\n def __str__(self) -> str:\n if self._exponent == 1:\n return 'CNOT'\n return 'CNOT**{!r}'.format(self._exponent)\n\n def __repr__(self):\n if self._global_shift == 0:\n if self._exponent == 1:\n return 'cirq.CNOT'\n return '(cirq.CNOT**{!r})'.format(self._exponent)\n return (\n 'cirq.CNotPowGate(exponent={!r}, '\n 'global_shift={!r})'\n ).format(self._exponent, self._global_shift)\n\n def on(self, *args: raw_types.QubitId,\n **kwargs: raw_types.QubitId) -> gate_operation.GateOperation:\n if not kwargs:\n return super().on(*args)\n if not args and set(kwargs.keys()) == {'control', 'target'}:\n return super().on(kwargs['control'], kwargs['target'])\n raise ValueError(\n \"Expected two positional argument or else 'target' AND 'control' \"\n \"keyword arguments. But got args={!r}, kwargs={!r}.\".format(\n args, kwargs))\n\n\nclass SwapPowGate(eigen_gate.EigenGate,\n gate_features.TwoQubitGate,\n gate_features.InterchangeableQubitsGate):\n \"\"\"The SWAP gate, possibly raised to a power. Exchanges qubits.\n\n SwapPowGate()**t = SwapPowGate(exponent=t) and acts on two qubits in the\n computational basis as the matrix:\n\n [[1, 0, 0, 0],\n [0, g·c, -i·g·s, 0],\n [0, -i·g·s, g·c, 0],\n [0, 0, 0, 1]]\n\n where:\n\n c = cos(π·t/2)\n s = sin(π·t/2)\n g = exp(i·π·t/2).\n\n `cirq.SWAP`, the swap gate, is an instance of this gate at exponent=1.\n \"\"\"\n\n def _decompose_(self, qubits):\n \"\"\"See base class.\"\"\"\n a, b = qubits\n yield CNOT(a, b)\n yield CNOT(b, a) ** self._exponent\n yield CNOT(a, b)\n\n def _eigen_components(self):\n return [\n (0, np.array([[1, 0, 0, 0],\n [0, 0.5, 0.5, 0],\n [0, 0.5, 0.5, 0],\n [0, 0, 0, 1]])),\n (1, np.array([[0, 0, 0, 0],\n [0, 0.5, -0.5, 0],\n [0, -0.5, 0.5, 0],\n [0, 0, 0, 0]])),\n ]\n\n def _apply_unitary_(self, args: protocols.ApplyUnitaryArgs\n ) -> Optional[np.ndarray]:\n if self._exponent != 1:\n return None\n\n zo = args.subspace_index(0b01)\n oz = args.subspace_index(0b10)\n args.available_buffer[zo] = args.target_tensor[zo]\n args.target_tensor[zo] = args.target_tensor[oz]\n args.target_tensor[oz] = args.available_buffer[zo]\n p = 1j**(2 * self._exponent * self._global_shift)\n if p != 1:\n args.target_tensor *= p\n return args.target_tensor\n\n def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs\n ) -> protocols.CircuitDiagramInfo:\n if not args.use_unicode_characters:\n return protocols.CircuitDiagramInfo(\n wire_symbols=('swap', 'swap'),\n exponent=self._diagram_exponent(args))\n return protocols.CircuitDiagramInfo(\n wire_symbols=('×', '×'),\n exponent=self._diagram_exponent(args))\n\n def _qasm_(self,\n args: protocols.QasmArgs,\n qubits: Tuple[raw_types.QubitId, ...]) -> Optional[str]:\n if self._exponent != 1:\n return None # Don't have an equivalent gate in QASM\n args.validate_version('2.0')\n return args.format('swap {0},{1};\\n', qubits[0], qubits[1])\n\n def __str__(self) -> str:\n if self._exponent == 1:\n return 'SWAP'\n return 'SWAP**{!r}'.format(self._exponent)\n\n def __repr__(self):\n if self._global_shift == 0:\n if self._exponent == 1:\n return 'cirq.SWAP'\n return '(cirq.SWAP**{!r})'.format(self._exponent)\n return (\n 'cirq.SwapPowGate(exponent={!r}, '\n 'global_shift={!r})'\n ).format(self._exponent, self._global_shift)\n\n\nclass ISwapPowGate(eigen_gate.EigenGate,\n gate_features.InterchangeableQubitsGate,\n gate_features.TwoQubitGate):\n \"\"\"Rotates the |01⟩-vs-|10⟩ subspace of two qubits around its Bloch X-axis.\n\n When exponent=1, swaps the two qubits and phases |01⟩ and |10⟩ by i. More\n generally, this gate's matrix is defined as follows:\n\n ISWAP**t ≡ exp(+i π t (X⊗X + Y⊗Y) / 4)\n\n which is given by the matrix:\n\n [[1, 0, 0, 0],\n [0, c, i·s, 0],\n [0, i·s, c, 0],\n [0, 0, 0, 1]]\n\n where:\n\n c = cos(π·t/2)\n s = sin(π·t/2)\n\n `cirq.ISWAP`, the swap gate that applies -i to the |01> and |10> states,\n is an instance of this gate at exponent=1.\n \"\"\"\n\n def _eigen_components(self):\n return [\n (0, np.diag([1, 0, 0, 1])),\n (+0.5, np.array([[0, 0, 0, 0],\n [0, 0.5, 0.5, 0],\n [0, 0.5, 0.5, 0],\n [0, 0, 0, 0]])),\n (-0.5, np.array([[0, 0, 0, 0],\n [0, 0.5, -0.5, 0],\n [0, -0.5, 0.5, 0],\n [0, 0, 0, 0]])),\n ]\n\n def _decompose_(self, qubits):\n a, b = qubits\n\n yield CNOT(a, b)\n yield H(a)\n yield CNOT(b, a)\n yield S(a)**self._exponent\n yield CNOT(b, a)\n yield S(a)**-self._exponent\n yield H(a)\n yield CNOT(a, b)\n\n def _apply_unitary_(self, args: protocols.ApplyUnitaryArgs\n ) -> Optional[np.ndarray]:\n if self._exponent != 1:\n return None\n\n zo = args.subspace_index(0b01)\n oz = args.subspace_index(0b10)\n args.available_buffer[zo] = args.target_tensor[zo]\n args.target_tensor[zo] = args.target_tensor[oz]\n args.target_tensor[oz] = args.available_buffer[zo]\n args.target_tensor[zo] *= 1j\n args.target_tensor[oz] *= 1j\n p = 1j**(2 * self._exponent * self._global_shift)\n if p != 1:\n args.target_tensor *= p\n return args.target_tensor\n\n def _circuit_diagram_info_(self, args: protocols.CircuitDiagramInfoArgs\n ) -> protocols.CircuitDiagramInfo:\n return protocols.CircuitDiagramInfo(\n wire_symbols=('iSwap', 'iSwap'),\n exponent=self._diagram_exponent(args))\n\n def __str__(self) -> str:\n if self._exponent == 1:\n return 'ISWAP'\n return 'ISWAP**{!r}'.format(self._exponent)\n\n def __repr__(self):\n if self._global_shift == 0:\n if self._exponent == 1:\n return 'cirq.ISWAP'\n return '(cirq.ISWAP**{!r})'.format(self._exponent)\n return (\n 'cirq.ISwapPowGate(exponent={!r}, '\n 'global_shift={!r})'\n ).format(self._exponent, self._global_shift)\n\n\ndef Rx(rads: float) -> XPowGate:\n \"\"\"Returns a gate with the matrix e^{-i X rads / 2}.\"\"\"\n return XPowGate(exponent=rads / np.pi, global_shift=-0.5)\n\n\ndef Ry(rads: float) -> YPowGate:\n \"\"\"Returns a gate with the matrix e^{-i Y rads / 2}.\"\"\"\n return YPowGate(exponent=rads / np.pi, global_shift=-0.5)\n\n\ndef Rz(rads: float) -> ZPowGate:\n \"\"\"Returns a gate with the matrix e^{-i Z rads / 2}.\"\"\"\n return ZPowGate(exponent=rads / np.pi, global_shift=-0.5)\n\n\nX = XPowGate()\n\"\"\"The Pauli X gate.\n\nMatrix:\n\n [[0, 1],\n [1, 0]]\n\"\"\"\n\n\n#: The Pauli Y gate.\n#:\n#: Matrix:\n#:\n#: [[0, -i],\n#: [i, 0]]\nY = YPowGate()\n\n\n# The Pauli Z gate.\n#\n# Matrix:\n#\n# [[1, 0],\n# [0, -1]]\nZ = ZPowGate()\n\n\n# The Hadamard gate.\n#\n# Matrix:\n#\n# [[s, s],\n# [s, -s]]\n# where s = sqrt(0.5).\nH = HPowGate()\n\n\n# The Clifford S gate.\n#\n# Matrix:\n#\n# [[1, 0],\n# [0, i]]\nS = Z**0.5\n\n\n# The T gate.\n#\n# Matrix:\n#\n# [[1, 0]\n# [0, exp(i pi / 4)]]\nT = Z**0.25\n\n\n# The controlled Z gate.\n#\n# Matrix:\n#\n# [[1, 0, 0, 0],\n# [0, 1, 0, 0],\n# [0, 0, 1, 0],\n# [0, 0, 0, -1]]\nCZ = CZPowGate()\n\n\n# The controlled NOT gate.\n#\n# Matrix:\n#\n# [[1, 0, 0, 0],\n# [0, 1, 0, 0],\n# [0, 0, 0, 1],\n# [0, 0, 1, 0]]\nCNOT = CNotPowGate()\n\n\n# The swap gate.\n#\n# Matrix:\n#\n# [[1, 0, 0, 0],\n# [0, 0, 1, 0],\n# [0, 1, 0, 0],\n# [0, 0, 0, 1]]\nSWAP = SwapPowGate()\n\n\n# The iswap gate.\n#\n# Matrix:\n#\n# [[1, 0, 0, 0],\n# [0, 0, i, 0],\n# [0, i, 0, 0],\n# [0, 0, 0, 1]]\nISWAP = ISwapPowGate()\n", "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"An optimization pass that combines adjacent single-qubit rotations.\"\"\"\n\nfrom typing import Callable, List, Optional, Sequence, Tuple, cast\n\nimport numpy as np\n\nfrom cirq import circuits, ops, protocols\nfrom cirq.optimizers import two_qubit_decompositions\n\n\nclass MergeInteractions(circuits.PointOptimizer):\n \"\"\"Combines series of adjacent one and two-qubit gates operating on a pair\n of qubits.\"\"\"\n\n def __init__(self,\n tolerance: float = 1e-8,\n allow_partial_czs: bool = True,\n post_clean_up: Callable[\n [Sequence[ops.Operation]], ops.OP_TREE\n ] = lambda op_list: op_list) -> None:\n super().__init__(post_clean_up=post_clean_up)\n self.tolerance = tolerance\n self.allow_partial_czs = allow_partial_czs\n\n def optimization_at(self,\n circuit: circuits.Circuit,\n index: int,\n op: ops.Operation\n ) -> Optional[circuits.PointOptimizationSummary]:\n if len(op.qubits) != 2:\n return None\n\n old_operations, indices, matrix = (\n self._scan_two_qubit_ops_into_matrix(circuit, index, op.qubits))\n\n old_interaction_count = len([old_op for old_op in old_operations\n if len(old_op.qubits) == 2])\n\n switch_to_new = False\n switch_to_new |= any(len(old_op.qubits) == 2 and\n not (isinstance(old_op, ops.GateOperation) and\n isinstance(old_op.gate, ops.CZPowGate))\n for old_op in old_operations)\n if not self.allow_partial_czs:\n switch_to_new |= any(isinstance(old_op, ops.GateOperation) and\n isinstance(old_op.gate, ops.CZPowGate)\n and old_op.gate.exponent != 1\n for old_op in old_operations)\n\n # This point cannot be optimized using this method\n if not switch_to_new and old_interaction_count <= 1:\n return None\n\n # Find a max-3-cz construction.\n new_operations = (\n two_qubit_decompositions.two_qubit_matrix_to_operations(\n op.qubits[0],\n op.qubits[1],\n matrix,\n self.allow_partial_czs,\n self.tolerance))\n new_interaction_count = len([new_op for new_op in new_operations\n if len(new_op.qubits) == 2])\n\n switch_to_new |= new_interaction_count < old_interaction_count\n\n if not switch_to_new:\n return None\n\n return circuits.PointOptimizationSummary(\n clear_span=max(indices) + 1 - index,\n clear_qubits=op.qubits,\n new_operations=new_operations)\n\n def _op_to_matrix(self,\n op: Optional[ops.Operation],\n qubits: Tuple[ops.QubitId, ...]\n ) -> Optional[np.ndarray]:\n \"\"\"Determines the effect of an operation on the given qubits.\n\n If the operation is a 1-qubit operation on one of the given qubits,\n or a 2-qubit operation on both of the given qubits, and also the\n operation has a known matrix, then a matrix is returned. Otherwise None\n is returned.\n\n Args:\n op: The operation to understand.\n qubits: The qubits we care about. Order determines matrix tensor\n order.\n\n Returns:\n None, or else a matrix equivalent to the effect of the operation.\n \"\"\"\n q1, q2 = qubits\n\n matrix = protocols.unitary(op, None)\n if matrix is None:\n return None\n\n assert op is not None\n if op.qubits == qubits:\n return matrix\n if op.qubits == (q2, q1):\n return MergeInteractions._flip_kron_order(matrix)\n if op.qubits == (q1,):\n return np.kron(matrix, np.eye(2))\n if op.qubits == (q2,):\n return np.kron(np.eye(2), matrix)\n\n return None\n\n def _scan_two_qubit_ops_into_matrix(\n self,\n circuit: circuits.Circuit,\n index: Optional[int],\n qubits: Tuple[ops.QubitId, ...]\n ) -> Tuple[List[ops.Operation], List[int], np.ndarray]:\n \"\"\"Accumulates operations affecting the given pair of qubits.\n\n The scan terminates when it hits the end of the circuit, finds an\n operation without a known matrix, or finds an operation that interacts\n the given qubits with other qubits.\n\n Args:\n circuit: The circuit to scan for operations.\n index: The index to start scanning forward from.\n qubits: The pair of qubits we care about.\n\n Returns:\n A tuple containing:\n 0. The operations.\n 1. The moment indices those operations were on.\n 2. A matrix equivalent to the effect of the scanned operations.\n \"\"\"\n\n product = np.eye(4, dtype=np.complex128)\n all_operations = []\n touched_indices = []\n\n while index is not None:\n operations = list({circuit.operation_at(q, index) for q in qubits})\n op_data = [\n self._op_to_matrix(op, qubits)\n for op in operations\n if op is not None\n ]\n\n # Stop at any non-constant or non-local interaction.\n if any(e is None for e in op_data):\n break\n present_ops = [op for op in operations if op]\n present_op_data = cast(List[np.ndarray], op_data)\n\n for op_mat in present_op_data:\n product = np.dot(op_mat, product)\n all_operations.extend(present_ops)\n\n touched_indices.append(index)\n index = circuit.next_moment_operating_on(qubits, index + 1)\n\n return all_operations, touched_indices, product\n\n @staticmethod\n def _flip_kron_order(mat4x4: np.ndarray) -> np.ndarray:\n \"\"\"Given M = sum(kron(a_i, b_i)), returns M' = sum(kron(b_i, a_i)).\"\"\"\n result = np.array([[0] * 4] * 4, dtype=np.complex128)\n order = [0, 2, 1, 3]\n for i in range(4):\n for j in range(4):\n result[order[i], order[j]] = mat4x4[i, j]\n return result\n" ]
[ [ "numpy.sqrt", "numpy.array", "numpy.diag" ], [ "numpy.array", "numpy.eye", "numpy.dot" ] ]
PEI-I1/Nos_Tech_Problems
[ "cf8b0b51285a912988a96cc96438f81c75fa45b7" ]
[ "NTP_Bot/msg_interpreter.py" ]
[ "#!/usr/bin/env python3\nimport tensorflow_hub as hub\nimport numpy as np\nimport tensorflow_text\nimport json, re, os\nfrom threading import Thread\nfrom keywords import keywords\n\nembeddings = {}\nembed = None\n\ndef loadModelData():\n ''' Loads Tensorflow enconder and pre-encodes the problem data\n '''\n global embed\n global embeddings\n\n embed = hub.load(\"https://tfhub.dev/google/universal-sentence-encoder-multilingual-large/2\")\n feature_types = ['Sintoma', 'Tipificacao_Nivel_1', 'Tipificacao_Nivel_2', 'Tipificacao_Nivel_3']\n\n with open(os.getcwd() + '/input_options.json') as json_file:\n data = json.load(json_file)\n for typ in feature_types:\n embedProblemData(data, typ, embeddings)\n\n \ndef embedProblemData(data, feature_type, embeddings):\n ''' Calculates embeddings for all the values of feature_type\n :param: data\n :param: feature type\n :param: dict that maps feature values to their embeddings\n '''\n raw_features = [x for x in data[feature_type]]\n proc_features = [x.lower() for x in raw_features]\n feature_embeddings = embed(proc_features)[\"outputs\"]\n for i in range(0, len(raw_features)):\n embeddings[raw_features[i]] = feature_embeddings[i]\n\n\ndef replaceWithKeywords(line, keywords):\n ''' Replaces matches in line with a keyword\n :param: string to look for expressions\n :param: dictionary object that matches keywords with expressions\n :return: list of versions of the line with replaced expressions\n '''\n keyworded_versions = [line]\n for keyword, matches in keywords.items():\n keyworded_versions.extend([re.sub(match, keyword, line) for match in matches if re.search(match, line)])\n\n return keyworded_versions\n\n\ndef getFeatureSuggestion(line, keywords, ss_vals, ss_embeddings, category):\n ''' Calculates feature from category that is semantically closest to the one described in\n line\n :param: target\n :param: \n '''\n ll = line.lower()\n \n line_versions = replaceWithKeywords(ll, keywords['common'])\n if category>0:\n line_versions.extend(replaceWithKeywords(ll, keywords['tip_'+str(category)]))\n \n sentence_embeddings = [embed(line_version)[\"outputs\"] for line_version in line_versions]\n similarity_matrices = [list(np.inner(sent_emb, ss_embeddings)[0])\n for sent_emb in sentence_embeddings]\n max_values = [max(similarity_matrice) for similarity_matrice in similarity_matrices]\n max_abs = max(max_values)\n similarity_matrix = similarity_matrices[max_values.index(max_abs)]\n sugestao = ss_vals[similarity_matrix.index(max_abs)]\n return sugestao, max_abs\n\n\ndef extractProblemData(prob_desc, search_space, category):\n ''' Extracts the string in the search space that is semantically \n closest to the problem description\n :param: problem description\n :param: search space of the possible strings\n :param: search space category (simptome or typification)\n :return: closest string that belongs to search_space and confidence\n '''\n ss_embeddings = [embeddings[ss_val] for ss_val in search_space]\n return getFeatureSuggestion(prob_desc, keywords, search_space, ss_embeddings, category)\n" ]
[ [ "numpy.inner" ] ]
Nobu575/AppItk
[ "91de313115b753a6fb1ae67f53d4979580ef768b" ]
[ "opening2d.py" ]
[ "import numpy as np\nimport itk\nimport matplotlib.pyplot as plt\n\n# Input file name\ninput_filename = './jenga_g_150.png'\n\n# Set dimension\nDimension = 2\n\n# Read input image\nitk_image = itk.imread(input_filename)\n\n# Setting for input image (Grayscale)\nInputPixelType = itk.UC\nInputImageType = itk.Image[InputPixelType, Dimension]\n\n# Loading\nreader = itk.ImageFileReader[InputImageType].New()\nreader.SetFileName(input_filename)\n\n# Apply a filter: Thresholding\nthresholdFilter = itk.BinaryThresholdImageFilter[InputImageType,InputImageType].New()\nthresholdFilter.SetInput(reader.GetOutput())\nthresholdFilter.SetUpperThreshold(200)\nthresholdFilter.SetOutsideValue(1)\nthresholdFilter.SetInsideValue(0)\n\nStructuringElementType = itk.FlatStructuringElement[Dimension]\nstructuringElement = StructuringElementType.Ball(3)\n\n# Apply Opening (erosion and dilation)\nerodeFilter = itk.BinaryErodeImageFilter[InputImageType,InputImageType,StructuringElementType].New()\nerodeFilter.SetInput(thresholdFilter.GetOutput())\nerodeFilter.SetKernel(structuringElement)\nerodeFilter.SetForegroundValue(1)\n\ndilateFilter = itk.BinaryDilateImageFilter[InputImageType,InputImageType,StructuringElementType].New()\ndilateFilter.SetInput(erodeFilter.GetOutput())\ndilateFilter.SetKernel(structuringElement)\ndilateFilter.SetForegroundValue(1)\n\ndilateFilter.Update()\n\n# Plot the input and output images.\nplt.figure(figsize=(12, 4), dpi=50)\nplt.subplot(1,3,1),plt.title(\"original\"),plt.imshow(itk_image, cmap=\"gray\")\nplt.subplot(1,3,2),plt.title(\"threshold\"),plt.imshow(thresholdFilter.GetOutput())\nplt.subplot(1,3,3),plt.title(\"output\"),plt.imshow(dilateFilter.GetOutput())\nplt.savefig(\"./img/jenga_opening2d.png\")" ]
[ [ "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "matplotlib.pyplot.title", "matplotlib.pyplot.subplot", "matplotlib.pyplot.imshow" ] ]
veo-ibd/Genie
[ "735e3aa0dc71aab0c404fd0cb3a34c8e1d9784c2" ]
[ "genie/assay.py" ]
[ "import os\nimport logging\nimport subprocess\nimport yaml\n\nimport pandas as pd\n\nfrom .example_filetype_format import FileTypeFormat\nfrom . import process_functions\n\nlogger = logging.getLogger(__name__)\n\n\nclass Assayinfo(FileTypeFormat):\n '''\n Assay information file type\n '''\n _fileType = \"assayinfo\"\n\n _process_kwargs = [\"newPath\", \"databaseSynId\"]\n\n def _validateFilename(self, filepath_list):\n assert os.path.basename(filepath_list[0]) == \"assay_information.yaml\"\n\n def process_steps(self, assay_info_df, newPath, databaseSynId):\n # databaseSynId = kwargs['databaseSynId']\n # Must pass in a list\n process_assay_info_df = self._process(assay_info_df)\n col = ['SEQ_ASSAY_ID', 'is_paired_end', 'library_selection',\n 'library_strategy', 'platform', 'read_length',\n 'instrument_model', 'gene_padding', 'number_of_genes',\n 'variant_classifications', 'CENTER']\n process_functions.updateData(\n self.syn,\n databaseSynId,\n process_assay_info_df,\n self.center,\n col=col,\n filterByColumn=\"CENTER\",\n toDelete=True)\n process_assay_info_df.to_csv(newPath, sep=\"\\t\", index=False)\n return(newPath)\n\n def _process(self, df):\n '''\n Processing function for Assay information\n - Standardizes SEQ_ASSAY_ID\n - Default 10 for gene_padding\n - Fills in variant_classifications\n\n Args:\n df: Assay information dataframe\n\n Returns:\n dataframe: Processed dataframe\n '''\n seq_assay_ids = [\n assay.upper().replace('_', '-') for assay in df['SEQ_ASSAY_ID']]\n df['SEQ_ASSAY_ID'] = seq_assay_ids\n if process_functions.checkColExist(df, \"gene_padding\"):\n df['gene_padding'] = df['gene_padding'].fillna(10)\n df['gene_padding'] = df['gene_padding'].astype(int)\n else:\n df['gene_padding'] = 10\n\n if not process_functions.checkColExist(df, \"variant_classifications\"):\n df['variant_classifications'] = pd.np.nan\n\n df['CENTER'] = self.center\n return(df)\n\n def _get_dataframe(self, filepath_list):\n '''\n Takes in yaml file, returns dataframe\n '''\n filepath = filepath_list[0]\n try:\n with open(filepath, 'r') as yamlfile:\n # https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation\n # Must add this because yaml load deprecation\n panel_info_dict = yaml.load(yamlfile, Loader=yaml.FullLoader)\n except Exception:\n raise ValueError(\n \"assay_information.yaml: Can't read in your file. \"\n \"Please make sure the file is a correctly formatted yaml\")\n assay_info_df = pd.DataFrame(panel_info_dict)\n assay_info_df = assay_info_df.transpose()\n assay_info_df['SEQ_ASSAY_ID'] = assay_info_df.index\n assay_info_df.reset_index(drop=True, inplace=True)\n return(assay_info_df)\n\n def _validate(self, assay_info_df):\n '''\n Validates the values of assay information file\n\n Args:\n assay_info_df: assay information dataframe\n\n Returns:\n tuple: error and warning\n '''\n\n total_error = \"\"\n warning = \"\"\n\n if process_functions.checkColExist(assay_info_df, \"SEQ_ASSAY_ID\"):\n all_seq_assays = assay_info_df.SEQ_ASSAY_ID.unique()\n if not all([assay.startswith(self.center)\n for assay in all_seq_assays]):\n total_error += \\\n \"Assay_information.yaml: Please make sure your all your\" +\\\n \" SEQ_ASSAY_IDs start with your center abbreviation.\\n\"\n else:\n total_error += \\\n \"Assay_information.yaml: Must have SEQ_ASSAY_ID column.\\n\"\n\n read_group_dict = process_functions.get_gdc_data_dictionary(\n \"read_group\")\n read_group_headers = read_group_dict['properties']\n\n warn, error = process_functions.check_col_and_values(\n assay_info_df,\n 'is_paired_end',\n [True, False],\n filename=\"Assay_information.yaml\",\n required=True)\n warning += warn\n total_error += error\n\n warn, error = process_functions.check_col_and_values(\n assay_info_df, 'library_selection',\n read_group_headers['library_selection']['enum'],\n filename=\"Assay_information.yaml\",\n required=True)\n\n warning += warn\n total_error += error\n warn, error = process_functions.check_col_and_values(\n assay_info_df,\n 'library_strategy',\n read_group_headers['library_strategy']['enum'],\n filename=\"Assay_information.yaml\",\n required=True)\n\n warning += warn\n total_error += error\n warn, error = process_functions.check_col_and_values(\n assay_info_df,\n 'platform',\n read_group_headers['platform']['enum'],\n filename=\"Assay_information.yaml\",\n required=True)\n\n warning += warn\n total_error += error\n\n instrument_model = read_group_headers['instrument_model']['enum']\n instrument_model.append(None)\n warn, error = process_functions.check_col_and_values(\n assay_info_df,\n 'instrument_model',\n instrument_model,\n filename=\"Assay_information.yaml\",\n required=True)\n\n warning += warn\n total_error += error\n\n variant_classes = \\\n ['Splice_Site', 'Nonsense_Mutation', 'Frame_Shift_Del',\n 'Frame_Shift_Ins', 'Nonstop_Mutation', 'Translation_Start_Site',\n 'In_Frame_Ins', 'In_Frame_Del', 'Missense_Mutation',\n 'Intron', 'Splice_Region', 'Silent', 'RNA', \"5'UTR\", \"3'UTR\",\n 'IGR', \"5'Flank\", \"3'Flank\", None]\n warn, error = process_functions.check_col_and_values(\n assay_info_df,\n 'variant_classifications',\n variant_classes,\n filename=\"Assay_information.yaml\",\n na_allowed=True)\n\n warning += warn\n total_error += error\n\n # if not process_functions.checkColExist(\n # assay_info_df, \"target_capture_kit\"):\n # total_error += (\"Assay_information.yaml: \"\n # \"Must have target_capture_kit column.\\n\")\n\n if process_functions.checkColExist(assay_info_df, \"read_length\"):\n if not all([process_functions.checkInt(i)\n for i in assay_info_df[\"read_length\"]\n if i is not None and not pd.isnull(i)]):\n total_error += \\\n (\"Assay_information.yaml: \"\n \"Please double check your read_length. \"\n \"It must be an integer or null.\\n\")\n else:\n total_error += \\\n (\"Assay_information.yaml: \"\n \"Must have read_length column.\\n\")\n\n if process_functions.checkColExist(assay_info_df, \"number_of_genes\"):\n if not all([process_functions.checkInt(i)\n for i in assay_info_df[\"number_of_genes\"]]):\n total_error += \\\n (\"Assay_information.yaml: \"\n \"Please double check your number_of_genes. \"\n \"It must be an integer.\\n\")\n else:\n total_error += \\\n (\"Assay_information.yaml: \"\n \"Must have number_of_genes column.\\n\")\n\n if process_functions.checkColExist(assay_info_df, \"gene_padding\"):\n if not all([process_functions.checkInt(i)\n for i in assay_info_df[\"gene_padding\"]\n if i is not None and not pd.isnull(i)]):\n total_error += \\\n (\"Assay_information.yaml: \"\n \"Please double check your gene_padding. \"\n \"It must be an integer or blank.\\n\")\n else:\n warning += \\\n (\"Assay_information.yaml: \"\n \"gene_padding is by default 10 if not specified.\\n\")\n\n return(total_error, warning)\n" ]
[ [ "pandas.isnull", "pandas.DataFrame" ] ]
jamesgregson/easy_image_io
[ "4b5af29f3ccc37e4b10fbdc1e18d508ed04b882d" ]
[ "setup.py" ]
[ "from setuptools import setup, Extension\nimport numpy\nimport os\nimport config\n\ndef find(name, path):\n for root, dirs, files in os.walk(path):\n if name in files:\n return os.path.join(root, name)\n return '';\n\nprint('locating directories...')\ndefines = [ ('MAJOR_VERSION',0),('MINOR_VERSION',1) ]\ninclude_dirs = [ numpy.get_include() ]\nlibraries = []\nlibrary_dirs = []\n\nprint('checking for tiffio.h...')\nif find('tiffio.h', config.tiff_include_dir) != '':\n defines.append( ('cimg_use_tiff',1) )\n include_dirs.append( config.tiff_include_dir )\n libraries.append( 'tiff' )\n library_dirs.append( config.tiff_library_dir )\n\nprint('checking for png.h...')\nif find('png.h', config.png_include_dir ) != '':\n defines.append( ('cimg_use_png',1) )\n include_dirs.append( config.png_include_dir )\n libraries.append( 'png' )\n library_dirs.append( config.png_library_dir )\n\nfor lib in config.libs:\n libraries.append( lib )\n\nprint('Setting up extension...')\neasy_image_io = Extension('easy_image_io',\n define_macros=defines,\n sources=['easy_image_io.cpp'],\n include_dirs=include_dirs,\n library_dirs=library_dirs,\n libraries=libraries )\n\nprint('Building extension...')\nsetup(name='easy_image_io', version='0.1', ext_modules=[ easy_image_io ] )\n" ]
[ [ "numpy.get_include" ] ]
UMBCvision/Consistent-Explanations-by-Contrastive-Learning
[ "589ff89cbcc96a1d8bd8d5b7bd7a785448ed2de3" ]
[ "TorchRay/torchray/benchmark/evaluate_imagenet_gradcam_energy_inside_bbox.py" ]
[ "import argparse\nimport time\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim\nimport torch.utils.data.distributed\nimport torchvision.transforms as transforms\nimport resnet_multigpu_cgc as resnet\nimport cv2\nimport datasets as pointing_datasets\n\n\"\"\" \n Here, we evaluate the content heatmap (Grad-CAM heatmap within object bounding box) on the imagenet dataset.\n\"\"\"\n\nmodel_names = ['resnet18', 'resnet50']\n\nparser = argparse.ArgumentParser(description='PyTorch ImageNet Training')\nparser.add_argument('data', metavar='DIR', help='path to dataset')\nparser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',\n choices=model_names,\n help='model architecture: ' +\n ' | '.join(model_names) +\n ' (default: resnet18)')\nparser.add_argument('-j', '--workers', default=16, type=int, metavar='N',\n help='number of data loading workers (default: 16)')\nparser.add_argument('-b', '--batch-size', default=256, type=int,\n metavar='N', help='mini-batch size (default: 96)')\nparser.add_argument('--pretrained', dest='pretrained', action='store_true',\n help='use pre-trained model')\nparser.add_argument('-g', '--num-gpus', default=1, type=int,\n metavar='N', help='number of GPUs to match (default: 4)')\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('--input_resize', default=224, type=int,\n metavar='N', help='Resize for smallest side of input (default: 224)')\n\n\ndef main():\n global args\n args = parser.parse_args()\n\n if args.pretrained:\n print(\"=> using pre-trained model '{}'\".format(args.arch))\n if args.arch.startswith('resnet'):\n model = resnet.__dict__[args.arch](pretrained=True)\n else:\n assert False, 'Unsupported architecture: {}'.format(args.arch)\n else:\n print(\"=> creating model '{}'\".format(args.arch))\n if args.arch.startswith('resnet'):\n model = resnet.__dict__[args.arch]()\n \n model = torch.nn.DataParallel(model).cuda()\n\n if args.resume:\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume)\n model.load_state_dict(checkpoint['state_dict'])\n\n if (not args.resume) and (not args.pretrained):\n assert False, \"Please specify either the pre-trained model or checkpoint for evaluation\"\n\n cudnn.benchmark = True\n\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n\n # Here, we don't resize the images. We feed the full image and use AdaptivePooling before FC.\n # We will resize Gradcam heatmap to image size and compare the actual bbox co-ordinates\n val_dataset = pointing_datasets.ImageNetDetection(args.data,\n transform=transforms.Compose([\n transforms.Resize(args.input_resize),\n transforms.ToTensor(),\n normalize,\n ]))\n\n # we set batch size=1 since we are loading full resolution images.\n val_loader = torch.utils.data.DataLoader(\n val_dataset, batch_size=1, shuffle=False,\n num_workers=args.workers, pin_memory=True)\n\n validate_multi(val_loader, val_dataset, model)\n\n\ndef validate_multi(val_loader, val_dataset, model):\n batch_time = AverageMeter()\n heatmap_inside_bbox = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n\n end = time.time()\n for i, (images, annotation, targets) in enumerate(val_loader):\n images = images.cuda(non_blocking=True)\n targets = targets.cuda(non_blocking=True)\n\n # we assume batch size == 1 and unwrap the first elem of every list in annotation object\n annotation = unwrap_dict(annotation)\n image_size = val_dataset.as_image_size(annotation)\n\n output, feats = model(images, vanilla_with_feats=True)\n output_gradcam = compute_gradcam(output, feats, targets)\n output_gradcam_np = output_gradcam.data.cpu().numpy()[0] # since we have batch size==1\n resized_output_gradcam = cv2.resize(output_gradcam_np, image_size)\n spatial_sum = resized_output_gradcam.sum()\n if spatial_sum <= 0:\n # We ignore images with zero Grad-CAM\n continue\n\n # resized_output_gradcam is now normalized and can be considered as probabilities\n resized_output_gradcam = resized_output_gradcam / spatial_sum\n\n mask = pointing_datasets.imagenet_as_mask(annotation, targets[0].item())\n\n mask = mask.type(torch.ByteTensor)\n mask = mask.cpu().data.numpy()\n\n gcam_inside_gt_mask = mask * resized_output_gradcam\n\n # Now we sum the heatmap inside the object bounding box\n total_gcam_inside_gt_mask = gcam_inside_gt_mask.sum()\n heatmap_inside_bbox.update(total_gcam_inside_gt_mask)\n\n if i % 1000 == 0:\n print('\\nResults after {} examples: '.format(i+1))\n print('Curr % of heatmap inside bbox: {:.4f} ({:.4f})'.format(heatmap_inside_bbox.val * 100,\n heatmap_inside_bbox.avg * 100))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n print('\\nFinal Results - ')\n print('\\n\\n% of heatmap inside bbox: {:.4f}'.format(heatmap_inside_bbox.avg * 100))\n\n return\n\n\ndef compute_gradcam(output, feats, target):\n \"\"\"\n Compute the gradcam for the top predicted category\n :param output:\n :param feats:\n :param target:\n :return:\n \"\"\"\n eps = 1e-8\n relu = nn.ReLU(inplace=True)\n\n target = target.cpu().numpy()\n one_hot = np.zeros((output.shape[0], output.shape[-1]), dtype=np.float32)\n indices_range = np.arange(output.shape[0])\n one_hot[indices_range, target[indices_range]] = 1\n one_hot = torch.from_numpy(one_hot)\n one_hot.requires_grad = True\n\n # Compute the Grad-CAM for the original image\n one_hot_cuda = torch.sum(one_hot.cuda() * output)\n dy_dz1, = torch.autograd.grad(one_hot_cuda, feats, grad_outputs=torch.ones(one_hot_cuda.size()).cuda(),\n retain_graph=True, create_graph=True)\n # Changing to dot product of grad and features to preserve grad spatial locations\n gcam512_1 = dy_dz1 * feats\n gradcam = gcam512_1.sum(dim=1)\n gradcam = relu(gradcam)\n spatial_sum1 = gradcam.sum(dim=[1, 2]).unsqueeze(-1).unsqueeze(-1)\n gradcam = (gradcam / (spatial_sum1 + eps)) + eps\n\n return gradcam\n\n\ndef unwrap_dict(dict_object):\n new_dict = {}\n for k, v in dict_object.items():\n if k == 'object':\n new_v_list = []\n for elem in v:\n new_v_list.append(unwrap_dict(elem))\n new_dict[k] = new_v_list\n continue\n if isinstance(v, dict):\n new_v = unwrap_dict(v)\n elif isinstance(v, list) and len(v) == 1:\n new_v = v[0]\n else:\n new_v = v\n new_dict[k] = new_v\n return new_dict\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.utils.data.DataLoader", "torch.load", "numpy.zeros", "numpy.arange", "torch.from_numpy", "torch.nn.DataParallel", "torch.nn.ReLU" ] ]
500kg/learn2branch
[ "693d6f68def3ce290a0f5f289820e708019c019a" ]
[ "04_test.py" ]
[ "import os\nimport sys\nimport importlib\nimport argparse\nimport csv\nimport numpy as np\nimport time\nimport pickle\nimport pathlib\nimport gzip\n\nimport tensorflow as tf\nimport tensorflow.contrib.eager as tfe\n\nimport svmrank\n\nimport utilities\n\nfrom utilities_tf import load_batch_gcnn\n\n\ndef load_batch_flat(sample_files, feats_type, augment_feats, normalize_feats):\n cand_features = []\n cand_choices = []\n cand_scoress = []\n\n for i, filename in enumerate(sample_files):\n cand_states, cand_scores, cand_choice = utilities.load_flat_samples(filename, feats_type, 'scores', augment_feats, normalize_feats)\n\n cand_features.append(cand_states)\n cand_choices.append(cand_choice)\n cand_scoress.append(cand_scores)\n\n n_cands_per_sample = [v.shape[0] for v in cand_features]\n\n cand_features = np.concatenate(cand_features, axis=0).astype(np.float32, copy=False)\n cand_choices = np.asarray(cand_choices).astype(np.int32, copy=False)\n cand_scoress = np.concatenate(cand_scoress, axis=0).astype(np.float32, copy=False)\n n_cands_per_sample = np.asarray(n_cands_per_sample).astype(np.int32, copy=False)\n\n return cand_features, n_cands_per_sample, cand_choices, cand_scoress\n\n\ndef padding(output, n_vars_per_sample, fill=-1e8):\n n_vars_max = tf.reduce_max(n_vars_per_sample)\n\n output = tf.split(\n value=output,\n num_or_size_splits=n_vars_per_sample,\n axis=1,\n )\n output = tf.concat([\n tf.pad(\n x,\n paddings=[[0, 0], [0, n_vars_max - tf.shape(x)[1]]],\n mode='CONSTANT',\n constant_values=fill)\n for x in output\n ], axis=0)\n\n return output\n\n\ndef process(policy, dataloader, top_k):\n mean_kacc = np.zeros(len(top_k))\n\n n_samples_processed = 0\n for batch in dataloader:\n\n if policy['type'] == 'gcnn':\n c, ei, ev, v, n_cs, n_vs, n_cands, cands, best_cands, cand_scores = batch\n\n pred_scores = policy['model']((c, ei, ev, v, tf.reduce_sum(n_cs, keepdims=True), tf.reduce_sum(n_vs, keepdims=True)), tf.convert_to_tensor(False))\n\n # filter candidate variables\n pred_scores = tf.expand_dims(tf.gather(tf.squeeze(pred_scores, 0), cands), 0)\n\n elif policy['type'] == 'ml-competitor':\n cand_feats, n_cands, best_cands, cand_scores = batch\n\n # move to numpy\n cand_feats = cand_feats.numpy()\n n_cands = n_cands.numpy()\n\n # feature normalization\n cand_feats = (cand_feats - policy['feat_shift']) / policy['feat_scale']\n\n pred_scores = policy['model'].predict(cand_feats)\n\n # move back to TF\n pred_scores = tf.convert_to_tensor(pred_scores.reshape((1, -1)), dtype=tf.float32)\n\n # padding\n pred_scores = padding(pred_scores, n_cands)\n true_scores = padding(tf.reshape(cand_scores, (1, -1)), n_cands)\n true_bestscore = tf.reduce_max(true_scores, axis=-1, keepdims=True)\n\n assert all(true_bestscore.numpy() == np.take_along_axis(true_scores.numpy(), best_cands.numpy().reshape((-1, 1)), axis=1))\n\n kacc = []\n for k in top_k:\n pred_top_k = tf.nn.top_k(pred_scores, k=k)[1].numpy()\n pred_top_k_true_scores = np.take_along_axis(true_scores.numpy(), pred_top_k, axis=1)\n kacc.append(np.mean(np.any(pred_top_k_true_scores == true_bestscore.numpy(), axis=1)))\n kacc = np.asarray(kacc)\n\n batch_size = int(n_cands.shape[0])\n mean_kacc += kacc * batch_size\n n_samples_processed += batch_size\n\n mean_kacc /= n_samples_processed\n\n return mean_kacc\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'problem',\n help='MILP instance type to process.',\n choices=['setcover', 'cauctions', 'facilities', 'indset'],\n )\n parser.add_argument(\n '-g', '--gpu',\n help='CUDA GPU id (-1 for CPU).',\n type=int,\n default=0,\n )\n args = parser.parse_args()\n\n print(f\"problem: {args.problem}\")\n print(f\"gpu: {args.gpu}\")\n\n os.makedirs(\"results\", exist_ok=True)\n result_file = f\"results/{args.problem}_validation_{time.strftime('%Y%m%d-%H%M%S')}.csv\"\n seeds = [0, 1, 2, 3, 4]\n gcnn_models = ['baseline']\n other_models = ['extratrees_gcnn_agg', 'lambdamart_khalil', 'svmrank_khalil']\n test_batch_size = 128\n top_k = [1, 3, 5, 10]\n\n problem_folders = {\n 'setcover': 'setcover/500r_1000c_0.05d',\n 'cauctions': 'cauctions/100_500',\n 'facilities': 'facilities/100_100_5',\n 'indset': 'indset/500_4',\n }\n problem_folder = problem_folders[args.problem]\n\n if args.problem == 'setcover':\n gcnn_models += ['mean_convolution', 'no_prenorm']\n\n result_file = f\"results/{args.problem}_test_{time.strftime('%Y%m%d-%H%M%S')}\"\n\n result_file = result_file + '.csv'\n os.makedirs('results', exist_ok=True)\n\n ### TENSORFLOW SETUP ###\n if args.gpu == -1:\n os.environ['CUDA_VISIBLE_DEVICES'] = ''\n else:\n os.environ['CUDA_VISIBLE_DEVICES'] = f'{args.gpu}'\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n tf.enable_eager_execution(config)\n tf.executing_eagerly()\n\n test_files = list(pathlib.Path(f\"data/samples/{problem_folder}/test\").glob('sample_*.pkl'))\n test_files = [str(x) for x in test_files]\n\n print(f\"{len(test_files)} test samples\")\n\n evaluated_policies = [['gcnn', model] for model in gcnn_models] + \\\n [['ml-competitor', model] for model in other_models]\n\n fieldnames = [\n 'policy',\n 'seed',\n ] + [\n f'acc@{k}' for k in top_k\n ]\n with open(result_file, 'w', newline='') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n for policy_type, policy_name in evaluated_policies:\n print(f\"{policy_type}:{policy_name}...\")\n for seed in seeds:\n rng = np.random.RandomState(seed)\n tf.set_random_seed(rng.randint(np.iinfo(int).max))\n\n policy = {}\n policy['name'] = policy_name\n policy['type'] = policy_type\n\n if policy['type'] == 'gcnn':\n # load model\n sys.path.insert(0, os.path.abspath(f\"models/{policy['name']}\"))\n import model\n importlib.reload(model)\n del sys.path[0]\n policy['model'] = model.GCNPolicy()\n policy['model'].restore_state(f\"trained_models/{args.problem}/{policy['name']}/{seed}/best_params.pkl\")\n policy['model'].call = tfe.defun(policy['model'].call, input_signature=policy['model'].input_signature)\n policy['batch_datatypes'] = [tf.float32, tf.int32, tf.float32,\n tf.float32, tf.int32, tf.int32, tf.int32, tf.int32, tf.int32, tf.float32]\n policy['batch_fun'] = load_batch_gcnn\n else:\n # load feature normalization parameters\n try:\n with open(f\"trained_models/{args.problem}/{policy['name']}/{seed}/normalization.pkl\", 'rb') as f:\n policy['feat_shift'], policy['feat_scale'] = pickle.load(f)\n except:\n policy['feat_shift'], policy['feat_scale'] = 0, 1\n\n # load model\n if policy_name.startswith('svmrank'):\n policy['model'] = svmrank.Model().read(f\"trained_models/{args.problem}/{policy['name']}/{seed}/model.txt\")\n else:\n with open(f\"trained_models/{args.problem}/{policy['name']}/{seed}/model.pkl\", 'rb') as f:\n policy['model'] = pickle.load(f)\n\n # load feature specifications\n with open(f\"trained_models/{args.problem}/{policy['name']}/{seed}/feat_specs.pkl\", 'rb') as f:\n feat_specs = pickle.load(f)\n\n policy['batch_datatypes'] = [tf.float32, tf.int32, tf.int32, tf.float32]\n policy['batch_fun'] = lambda x: load_batch_flat(x, feat_specs['type'], feat_specs['augment'], feat_specs['qbnorm'])\n\n test_data = tf.data.Dataset.from_tensor_slices(test_files)\n test_data = test_data.batch(test_batch_size)\n test_data = test_data.map(lambda x: tf.py_func(\n policy['batch_fun'], [x], policy['batch_datatypes']))\n test_data = test_data.prefetch(2)\n\n test_kacc = process(policy, test_data, top_k)\n print(f\" {seed} \" + \" \".join([f\"acc@{k}: {100*acc:4.1f}\" for k, acc in zip(top_k, test_kacc)]))\n\n writer.writerow({\n **{\n 'policy': f\"{policy['type']}:{policy['name']}\",\n 'seed': seed,\n },\n **{\n f'acc@{k}': test_kacc[i] for i, k in enumerate(top_k)\n },\n })\n csvfile.flush()\n" ]
[ [ "tensorflow.contrib.eager.defun", "tensorflow.enable_eager_execution", "tensorflow.reduce_max", "tensorflow.reshape", "tensorflow.shape", "tensorflow.nn.top_k", "numpy.asarray", "tensorflow.reduce_sum", "numpy.random.RandomState", "tensorflow.squeeze", "numpy.iinfo", "tensorflow.convert_to_tensor", "tensorflow.executing_eagerly", "numpy.concatenate", "tensorflow.split", "tensorflow.ConfigProto", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.py_func" ] ]
marpyr/forecast_predictability
[ "2285b37e20095ae6f67533595bcb0580882924a2" ]
[ "predictability_utils/utils/helpers.py" ]
[ "import numpy as np\n\ndef compute_anomaly_corrs(out_true, out_pred):\n \n anomaly_corrs = np.zeros(out_pred.shape[1])\n for i in range(anomaly_corrs.size):\n anomaly_corrs[i] = np.corrcoef(out_pred[:,i], out_true[:,i])[0,1]\n \n return anomaly_corrs\n\ndef split_train_data(train_months, test_months, train_years, test_years):\n\n def make_idx(months, years): # based on simple broadcasting\n return np.asarray(months).reshape(-1,1)+(12*np.asarray(years).flatten())\n\n idx_source_train = make_idx(train_months, train_years)\n idx_target_train = make_idx(test_months, train_years)\n\n idx_source_test = make_idx(train_months, test_years)\n idx_target_test = make_idx(test_months, test_years)\n\n return idx_source_train, idx_target_train, idx_source_test, idx_target_test" ]
[ [ "numpy.asarray", "numpy.zeros", "numpy.corrcoef" ] ]
dataiku/dss-plugin-model-error-analysis
[ "4c0f42a5c0aa1710005db3d81ca9bd9d7f829e6b" ]
[ "python-lib/dku_error_analysis_mpp/dku_error_visualizer.py" ]
[ "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom graphviz import Source\nimport matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\nfrom dku_error_analysis_mpp.dku_error_analyzer import DkuErrorAnalyzer\nfrom mealy import _BaseErrorVisualizer, ErrorAnalyzerConstants\nfrom dku_error_analysis_utils import safe_str, format_float\n\nimport logging\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.INFO, format='Error Analysis Plugin | %(levelname)s - %(message)s')\n\nplt.rc('font', family=\"sans-serif\")\nSMALL_SIZE, MEDIUM_SIZE, BIGGER_SIZE = 8, 10, 12\nplt.rc('axes', titlesize=BIGGER_SIZE, labelsize=MEDIUM_SIZE)\nplt.rc('xtick', labelsize=SMALL_SIZE) \nplt.rc('ytick', labelsize=SMALL_SIZE)\nplt.rc('legend', fontsize=SMALL_SIZE)\nplt.rc(\"hatch\", color=\"white\", linewidth=4)\n\nclass DkuErrorVisualizer(_BaseErrorVisualizer):\n \"\"\"\n ErrorVisualizer provides visual utilities to analyze the error classifier in ErrorAnalyzer and DkuErrorAnalyzer.\n \"\"\"\n\n def __init__(self, error_analyzer):\n\n if not isinstance(error_analyzer, DkuErrorAnalyzer):\n raise TypeError('You need to input a DkuErrorAnalyzer object.')\n\n super(DkuErrorVisualizer, self).__init__(error_analyzer)\n\n self._tree = error_analyzer.tree\n\n def plot_error_tree(self, size=(50, 50)):\n \"\"\" Plot the graph of the decision tree\n Args:\n size (tuple): Size of the output plot as (width, length), in inches.\n\n \"\"\"\n\n return Source(self._tree.to_dot_string(size))\n\n def plot_feature_distributions_on_leaves(self, leaf_selector=None, top_k_features=ErrorAnalyzerConstants.TOP_K_FEATURES,\n show_global=True, show_class=False, rank_leaves_by=\"total_error_fraction\", nr_bins=10, figsize=(15, 10)):\n \"\"\" Return plot of error node feature distribution and compare to global baseline \"\"\"\n\n leaf_nodes = self._get_ranked_leaf_ids(leaf_selector, rank_leaves_by)\n ranked_features = self._tree.ranked_features[:top_k_features]\n nr_leaves, nr_features = len(leaf_nodes), len(ranked_features)\n logger.info(\"{} lea{} selected: {}\".format(nr_leaves,\n \"f\" if nr_leaves == 1 else \"ves\",\n leaf_nodes))\n logger.info(\"{} feature distribution{} plotted: {}\".format(nr_features,\n \"\" if nr_features == 1 else \"s\",\n [f[\"name\"] for f in ranked_features]))\n\n for leaf_id in leaf_nodes:\n leaf = self._tree.get_node(leaf_id)\n suptitle = 'Leaf {} ({}: {}'.format(leaf.id, leaf.probabilities[0][0], format_float(leaf.probabilities[0][1], 3))\n suptitle += ', {}: {})'.format(leaf.probabilities[1][0], format_float(leaf.probabilities[1][1], 3))\n for feature in ranked_features:\n feature_name = feature[\"name\"]\n\n leaf_stats = self._tree.get_stats(leaf.id, feature_name, nr_bins)\n feature_is_numerical = feature[\"numerical\"]\n bins = leaf_stats[\"bin_edge\"] if feature_is_numerical else leaf_stats[\"bin_value\"]\n\n if show_global:\n root_samples = self._tree.get_node(0).samples[0]\n root_stats = self._tree.get_stats(0, feature_name, nr_bins, bins) # TODO: optimize\n if show_class:\n root_hist_data = {}\n for class_value, bar_heights in root_stats[\"target_distrib\"].items():\n root_hist_data[class_value] = np.array(bar_heights)/root_samples\n else:\n root_hist_data, root_prediction = {}, self._tree.get_node(0).prediction\n root_hist_data[root_prediction] = np.array(root_stats[\"count\"])/root_samples\n else:\n root_hist_data = None\n\n if bins:\n leaf_hist_data = {}\n if show_class:\n for class_value, bar_heights in leaf_stats[\"target_distrib\"].items():\n leaf_hist_data[class_value] = np.array(bar_heights)/leaf.samples[0]\n else:\n leaf_hist_data = {leaf.prediction: np.array(leaf_stats[\"count\"])/leaf.samples[0]}\n else:\n leaf_hist_data = None\n logger.info(\"No values for the feature {} at the leaf {}\".format(feature_name, leaf.id))\n if show_global:\n bins = root_stats[\"bin_edge\"] if feature_is_numerical else root_stats[\"bin_value\"]\n\n x_ticks = range(len(bins))\n _BaseErrorVisualizer._add_new_plot(figsize, bins, x_ticks, feature_name, suptitle)\n _BaseErrorVisualizer._plot_feature_distribution(x_ticks, feature_is_numerical, leaf_hist_data, root_hist_data)\n\n plt.show()\n" ]
[ [ "matplotlib.use", "matplotlib.pyplot.show", "numpy.array", "matplotlib.pyplot.rc" ] ]
SmallMunich/Smoke
[ "591a03bdb5cad962999914c9a97c7a8bed9e529b" ]
[ "smoke/data/build.py" ]
[ "import logging\nimport copy\nimport bisect\nimport numpy as np\n\nimport torch.utils.data\n\nfrom smoke.utils.comm import get_world_size\nfrom smoke.utils.imports import import_file\nfrom smoke.utils.envs import seed_all_rng\n\nfrom . import datasets as D\nfrom . import samplers\nfrom .transforms import build_transforms\nfrom .collate_batch import BatchCollator\n\n\ndef build_dataset(cfg, transforms, dataset_catalog, is_train=True):\n '''\n Args:\n dataset_list (list[str]): Contains the names of the datasets.\n transforms (callable): transforms to apply to each (image, target) sample\n dataset_catalog (DatasetCatalog): contains the information on how to\n construct a dataset.\n is_train (bool): whether to setup the dataset for training or testing\n\n Returns:\n\n '''\n dataset_list = cfg.DATASETS.TRAIN if is_train else cfg.DATASETS.TEST\n if not isinstance(dataset_list, (list, tuple)):\n raise RuntimeError(\n \"dataset_list should be a list of strings, got {}\".format(dataset_list)\n )\n datasets = []\n for dataset_name in dataset_list:\n data = dataset_catalog.get(dataset_name)\n factory = getattr(D, data[\"factory\"])\n args = data[\"args\"]\n\n args[\"cfg\"] = cfg\n args[\"is_train\"] = is_train\n args[\"transforms\"] = transforms\n # make dataset from factory\n dataset = factory(**args)\n datasets.append(dataset)\n\n # for testing, return a list of datasets\n if not is_train:\n return datasets\n\n # for training, concatenate all datasets into a single one\n dataset = datasets[0]\n if len(datasets) > 1:\n dataset = D.ConcatDataset(datasets)\n\n return [dataset]\n\n\ndef make_data_loader(cfg, is_train=True):\n num_gpus = get_world_size()\n if is_train:\n images_per_batch = cfg.SOLVER.IMS_PER_BATCH\n assert images_per_batch % num_gpus == 0, \\\n \"SOLVER.IMS_PER_BATCH ({}) must be divisible by the number of GPUs ({}) used.\" \\\n .format(images_per_batch, num_gpus)\n\n images_per_gpu = images_per_batch // num_gpus\n else:\n images_per_batch = cfg.TEST.IMS_PER_BATCH\n assert images_per_batch % num_gpus == 0, \\\n \"SOLVER.IMS_PER_BATCH ({}) must be divisible by the number of GPUs ({}) used.\" \\\n .format(images_per_batch, num_gpus)\n\n images_per_gpu = images_per_batch // num_gpus\n\n # if images_per_gpu > 1:\n # logger = logging.getLogger(__name__)\n # logger.warning(\n # \"When using more than one image per GPU you may encounter \"\n # \"an out-of-memory (OOM) error if your GPU does not have \"\n # \"sufficient memory. If this happens, you can reduce \"\n # \"SOLVER.IMS_PER_BATCH (for training) or \"\n # \"TEST.IMS_PER_BATCH (for inference). For training, you must \"\n # \"also adjust the learning rate and schedule length according \"\n # \"to the linear scaling rule. See for example: \"\n # \"https://github.com/facebookresearch/Detectron/blob/master/configs/getting_started/tutorial_1gpu_e2e_faster_rcnn_R-50-FPN.yaml#L14\"\n # )\n\n # group images which have similar aspect ratio. In this case, we only\n # group in two cases: those with width / height > 1, and the other way around,\n # but the code supports more general grouping strategy\n aspect_grouping = [1] if cfg.DATALOADER.ASPECT_RATIO_GROUPING else []\n\n path_catalog = import_file(\n \"smoke.config.paths_catalog\", cfg.PATHS_CATALOG, True\n )\n DatasetCatalog = path_catalog.DatasetCatalog\n\n transforms = build_transforms(cfg, is_train)\n datasets = build_dataset(cfg, transforms, DatasetCatalog, is_train)\n\n data_loaders = []\n for dataset in datasets:\n sampler = samplers.TrainingSampler(len(dataset))\n batch_sampler = torch.utils.data.sampler.BatchSampler(\n sampler, images_per_gpu, drop_last=True\n )\n collator = BatchCollator(cfg.DATALOADER.SIZE_DIVISIBILITY)\n num_workers = cfg.DATALOADER.NUM_WORKERS\n # import pdb; pdb.set_trace()\n data_loader = torch.utils.data.DataLoader(\n dataset,\n num_workers=num_workers,\n batch_sampler=batch_sampler,\n collate_fn=collator,\n worker_init_fn=worker_init_reset_seed,\n )\n data_loaders.append(data_loader)\n\n if is_train:\n # during training, a single (possibly concatenated) data_loader is returned\n assert len(data_loaders) == 1\n return data_loaders[0]\n return data_loaders\n\n\ndef build_test_loader(cfg, is_train=False):\n path_catalog = import_file(\n \"smoke.config.paths_catalog\", cfg.PATHS_CATALOG, True\n )\n DatasetCatalog = path_catalog.DatasetCatalog\n\n transforms = build_transforms(cfg, is_train)\n datasets = build_dataset(cfg, transforms, DatasetCatalog, is_train)\n\n data_loaders = []\n for dataset in datasets:\n sampler = samplers.InferenceSampler(len(dataset))\n batch_sampler = torch.utils.data.sampler.BatchSampler(\n sampler, 1, drop_last=False\n )\n collator = BatchCollator(cfg.DATALOADER.SIZE_DIVISIBILITY)\n num_workers = cfg.DATALOADER.NUM_WORKERS\n data_loader = torch.utils.data.DataLoader(\n dataset,\n num_workers=num_workers,\n batch_sampler=batch_sampler,\n collate_fn=collator,\n )\n data_loaders.append(data_loader)\n\n # Origin is data_loader, Now I think this should be data_loaders\n return data_loader\n\n\ndef trivial_batch_collator(batch):\n \"\"\"\n A batch collator that does nothing.\n \"\"\"\n return batch\n\n\ndef worker_init_reset_seed(worker_id):\n seed_all_rng(np.random.randint(2 ** 31) + worker_id)\n" ]
[ [ "numpy.random.randint" ] ]
aalekhpatel07/retworkx
[ "ae93fcab17d55bc259476c65a677221b4177870a" ]
[ "tests/graph/test_floyd_warshall.py" ]
[ "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport unittest\n\nimport numpy\n\nimport retworkx\n\n\nclass TestFloydWarshall(unittest.TestCase):\n parallel_threshold = 300\n\n def test_vs_dijkstra_all_pairs(self):\n graph = retworkx.PyGraph()\n a = graph.add_node(\"A\")\n b = graph.add_node(\"B\")\n c = graph.add_node(\"C\")\n d = graph.add_node(\"D\")\n e = graph.add_node(\"E\")\n f = graph.add_node(\"F\")\n edge_list = [\n (a, b, 7),\n (c, a, 9),\n (a, d, 14),\n (b, c, 10),\n (d, c, 2),\n (d, e, 9),\n (b, f, 15),\n (c, f, 11),\n (e, f, 6),\n ]\n graph.add_edges_from(edge_list)\n\n dijkstra_lengths = retworkx.graph_all_pairs_dijkstra_path_lengths(\n graph, float\n )\n\n expected = {k: {**v, k: 0.0} for k, v in dijkstra_lengths.items()}\n\n result = retworkx.graph_floyd_warshall(\n graph, float, parallel_threshold=self.parallel_threshold\n )\n\n self.assertEqual(result, expected)\n\n def test_vs_dijkstra_all_pairs_with_node_removal(self):\n graph = retworkx.PyGraph()\n a = graph.add_node(\"A\")\n b = graph.add_node(\"B\")\n c = graph.add_node(\"C\")\n d = graph.add_node(\"D\")\n e = graph.add_node(\"E\")\n f = graph.add_node(\"F\")\n edge_list = [\n (a, b, 7),\n (c, a, 9),\n (a, d, 14),\n (b, c, 10),\n (d, c, 2),\n (d, e, 9),\n (b, f, 15),\n (c, f, 11),\n (e, f, 6),\n ]\n graph.add_edges_from(edge_list)\n graph.remove_node(d)\n\n dijkstra_lengths = retworkx.graph_all_pairs_dijkstra_path_lengths(\n graph, float\n )\n\n expected = {k: {**v, k: 0.0} for k, v in dijkstra_lengths.items()}\n\n result = retworkx.graph_floyd_warshall(\n graph, float, parallel_threshold=self.parallel_threshold\n )\n\n self.assertEqual(result, expected)\n\n def test_floyd_warshall_empty_graph(self):\n graph = retworkx.PyGraph()\n self.assertEqual({}, retworkx.graph_floyd_warshall(graph, float))\n\n def test_floyd_warshall_graph_no_edges(self):\n graph = retworkx.PyGraph()\n graph.add_nodes_from(list(range(1000)))\n expected = {x: {} for x in range(1000)}\n self.assertEqual(\n expected,\n retworkx.graph_floyd_warshall(graph, float),\n )\n\n def test_floyd_warshall_numpy_three_edges(self):\n graph = retworkx.PyGraph()\n graph.add_nodes_from(list(range(6)))\n weights = [2, 12, 1, 5, 1]\n graph.add_edges_from([(i, i + 1, weights[i]) for i in range(5)])\n graph.add_edge(5, 0, 10)\n dist = retworkx.graph_floyd_warshall_numpy(\n graph, lambda x: x, parallel_threshold=self.parallel_threshold\n )\n self.assertEqual(dist[0, 3], 15)\n self.assertEqual(dist[3, 0], 15)\n\n def test_weighted_numpy_two_edges(self):\n graph = retworkx.PyGraph()\n graph.add_nodes_from(list(range(8)))\n graph.add_edges_from(\n [\n (0, 1, 2),\n (1, 2, 2),\n (2, 3, 1),\n (3, 4, 1),\n (4, 5, 1),\n (5, 6, 1),\n (6, 7, 1),\n (7, 0, 1),\n ]\n )\n dist = retworkx.graph_floyd_warshall_numpy(\n graph, lambda x: x, parallel_threshold=self.parallel_threshold\n )\n self.assertEqual(dist[0, 2], 4)\n self.assertEqual(dist[2, 0], 4)\n\n def test_weighted_numpy_negative_cycle(self):\n graph = retworkx.PyGraph()\n graph.add_nodes_from(list(range(4)))\n graph.add_edges_from(\n [\n (0, 1, 1),\n (1, 2, -1),\n (2, 3, -1),\n (3, 0, -1),\n ]\n )\n dist = retworkx.graph_floyd_warshall_numpy(\n graph, lambda x: x, parallel_threshold=self.parallel_threshold\n )\n self.assertTrue(numpy.all(numpy.diag(dist) < 0))\n\n def test_floyd_warshall_numpy_cycle(self):\n graph = retworkx.PyGraph()\n graph.add_nodes_from(list(range(7)))\n graph.add_edges_from_no_data(\n [(0, 1), (0, 6), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6)]\n )\n dist = retworkx.graph_floyd_warshall_numpy(\n graph, lambda x: 1, parallel_threshold=self.parallel_threshold\n )\n self.assertEqual(dist[0, 3], 3)\n self.assertEqual(dist[0, 4], 3)\n\n def test_numpy_no_edges(self):\n graph = retworkx.PyGraph()\n graph.add_nodes_from(list(range(4)))\n dist = retworkx.graph_floyd_warshall_numpy(\n graph, lambda x: x, parallel_threshold=self.parallel_threshold\n )\n expected = numpy.full((4, 4), numpy.inf)\n numpy.fill_diagonal(expected, 0)\n self.assertTrue(numpy.array_equal(dist, expected))\n\n def test_floyd_warshall_numpy_graph_cycle_with_removals(self):\n graph = retworkx.PyGraph()\n graph.add_nodes_from(list(range(8)))\n graph.remove_node(0)\n graph.add_edges_from_no_data(\n [(1, 2), (1, 7), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7)]\n )\n dist = retworkx.graph_floyd_warshall_numpy(\n graph, lambda x: 1, parallel_threshold=self.parallel_threshold\n )\n self.assertEqual(dist[0, 3], 3)\n self.assertEqual(dist[0, 4], 3)\n\n def test_floyd_warshall_numpy_graph_cycle_no_weight_fn(self):\n graph = retworkx.PyGraph()\n graph.add_nodes_from(list(range(8)))\n graph.remove_node(0)\n graph.add_edges_from_no_data(\n [(1, 2), (1, 7), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7)]\n )\n dist = retworkx.graph_floyd_warshall_numpy(graph)\n self.assertEqual(dist[0, 3], 3)\n self.assertEqual(dist[0, 4], 3)\n\n def test_floyd_warshall_numpy_graph_cycle_default_weight(self):\n graph = retworkx.PyGraph()\n graph.add_nodes_from(list(range(8)))\n graph.remove_node(0)\n graph.add_edges_from_no_data(\n [(1, 2), (1, 7), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7)]\n )\n dist = retworkx.graph_floyd_warshall_numpy(\n graph, default_weight=2, parallel_threshold=self.parallel_threshold\n )\n self.assertEqual(dist[0, 3], 6)\n self.assertEqual(dist[0, 4], 6)\n\n\nclass TestParallelFloydWarshall(TestFloydWarshall):\n parallel_threshold = 0\n" ]
[ [ "numpy.diag", "numpy.full", "numpy.fill_diagonal", "numpy.array_equal" ] ]
melfm/lsdr
[ "36b0a85e970fdcaae828eeff6c147432aa767c93" ]
[ "lsdr/envs/analysis.py" ]
[ "import numpy as np\nimport torch\nimport matplotlib.pyplot as plt\nimport os\nimport math\nimport scipy.stats as stats\nimport lsdr.envs.environment_sampler as env_sampler\nfrom enum import IntEnum\n\n\n############################\n# Optimization Loss Opt\n############################\nclass Objectives(IntEnum):\n REWARDS = 1\n KL_OPT = 2\n REW_AND_KL = 3\n\n\ndef reward_function(x):\n return np.exp(-(x-20)**2)\n\ndef reward_function_v2(x):\n\n return np.sin(np.sqrt(x**2))\n\ndef calculate_reward(x):\n\n return reward_function(x)\n\ndef setup_distributions():\n\n ##############################\n # Initial distribution configs\n ##############################\n test_params = [\n np.array([-30.0, 50.0])\n ]\n\n # This can be modified for the initial distributions\n # to be different.\n ranges = np.asarray(test_params)\n mean = ranges.mean(-1)\n covar = (((ranges[:, 1] - ranges[:, 0])**2.0) / 12.0) * np.eye(\n ranges.shape[0])\n mu_train, L_train = mean, np.linalg.cholesky(covar)\n\n dist_params = [mu_train, L_train]\n\n\n sampler = env_sampler.init_env_sampler(\n 'hopper',\n seed=0,\n experiment_id='test_kl_div_loss_0',\n init_dist_params=dist_params,\n dist_type='gaussian',\n test_dist_params=None)\n\n ############################\n # Train Distribution\n ############################\n p_train = sampler.train_dist\n\n ############################\n # Test Distribution\n ############################\n\n ranges = np.asarray(test_params)\n mean = ranges.mean(-1)\n covar = (((ranges[:, 1] - ranges[:, 0])**2.0) / 12.0) * np.eye(\n ranges.shape[0])\n mu_test, L_test = mean, np.linalg.cholesky(covar)\n\n mu_test = torch.tensor(mu_test)\n L_test = torch.tensor(L_test)\n\n mu_test = mu_test.float().detach().requires_grad_(False)\n L_test = L_test.float().detach().requires_grad_(False)\n p_test = torch.distributions.MultivariateNormal(mu_test,\n scale_tril=L_test)\n\n train_mean = p_train.mean.detach()\n train_std = (p_train._unbroadcasted_scale_tril).diag().detach()\n test_mean = p_test.mean.detach()\n test_std = (p_test._unbroadcasted_scale_tril).diag().detach()\n\n print('Initial Distributions')\n print('Train Distribution Mean ', train_mean)\n print('Train Distribution STD ', train_std)\n print('Test Distribution Mean ', test_mean)\n print('Test Distribution STD ', test_std)\n\n ############################\n # Plot Initial Distribution\n ############################\n\n plot_distrs(train_mean, train_std,\n test_mean, test_std,\n plot_name='initial_train_distr')\n\n return sampler, p_train, p_test\n\n\ndef plot_distrs(train_mean, train_var,\n test_mean, test_var,\n plot_name='distributions'):\n\n plt.figure()\n mu = train_mean\n variance = train_var\n sigma = math.sqrt(variance)\n x = np.linspace(mu - 3*sigma, mu + 3*sigma, 100)\n plt.plot(x, stats.norm.pdf(x, mu, sigma), color='green',\n label='$p_{\\phi}(z)$',\n linestyle='-.')\n mu = test_mean\n variance = test_var\n sigma = math.sqrt(variance)\n x = np.linspace(mu - 3*sigma, mu + 3*sigma, 100)\n plt.plot(x, stats.norm.pdf(x, mu, sigma), color='red', label='$p(z)$')\n\n rew_func_range = np.arange(-20, 50, 1)\n plt.plot(rew_func_range, calculate_reward(rew_func_range),\n color='orange',\n label='$R(\\Theta, z)$')\n\n plt.legend(loc='upper left')\n\n res_dir = 'grad_analysis'\n if not os.path.exists(res_dir):\n os.makedirs(res_dir)\n plotname = res_dir + '/' + plot_name + '.png'\n plt.savefig(plotname)\n\n\ndef optimize_distribution(sampler, p_train, p_test, objective_opt):\n epochs, n_samples = 10000, 1000\n\n alpha = 1e-5\n\n opt = torch.optim.Adam(sampler.params, 1e-2)\n\n mu_grads = []\n var_grads = []\n\n def store_mu_grad_rew(grad):\n mu_grads.append(np.copy(grad))\n\n def store_tril_grad_rew(grad):\n var_grads.append(np.copy(grad))\n\n for _ in range(epochs):\n opt.zero_grad()\n\n ####################\n # Sample from p_test\n ####################\n z = p_test.sample(torch.Size([n_samples]))\n contexts = p_train.sample(torch.Size([n_samples]))\n\n ################\n # Eval Log probs\n ################\n log_p_train = p_train.log_prob(z)\n log_p_test = p_test.log_prob(z)\n\n ################\n # Calculate KL\n ################\n kl_samples = log_p_test - log_p_train\n kl_loss = kl_samples.mean(0)\n\n #######################\n # Calculate Reward term\n #######################\n log_probs_context = p_train.log_prob(contexts)\n reward_loss = (calculate_reward(contexts) * log_probs_context).mean(0)\n\n if objective_opt == Objectives.REWARDS:\n # For this to converge to the reward function,\n # need to change `z` sampling to be from train\n # distribution.\n total_loss = - reward_loss\n\n elif objective_opt == Objectives.KL_OPT:\n total_loss = kl_loss\n\n elif objective_opt == Objectives.REW_AND_KL:\n total_loss = (-(reward_loss) + (alpha*kl_loss))\n\n else:\n raise ValueError('Invalid op')\n\n total_loss.mean().backward()\n opt.step()\n\n train_mean = p_train.mean.detach()\n train_std = (p_train._unbroadcasted_scale_tril).diag().detach()\n test_mean = p_test.mean.detach()\n test_std = (p_test._unbroadcasted_scale_tril).diag().detach()\n\n print('Updated Distributions')\n print('######################')\n print('Train Distribution Mean ', train_mean)\n print('Train Distribution STD ', train_std)\n print('Test Distribution Mean ', test_mean)\n print('Test Distribution STD ', test_std)\n\n plot_distrs(train_mean, train_std,\n test_mean, test_std,\n plot_name='final_distributions')\n\n\nif __name__ == '__main__':\n sampler, p_train, p_test = setup_distributions()\n\n # objective_opt = Objectives.REWARDS\n # objective_opt = Objectives.KL_OPT\n objective_opt = Objectives.REW_AND_KL\n\n optimize_distribution(sampler,\n p_train,\n p_test,\n objective_opt)\n" ]
[ [ "numpy.eye", "matplotlib.pyplot.legend", "torch.Size", "numpy.array", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "scipy.stats.norm.pdf", "torch.tensor", "numpy.asarray", "numpy.exp", "numpy.copy", "numpy.arange", "torch.optim.Adam", "numpy.linalg.cholesky", "numpy.sqrt", "numpy.linspace", "torch.distributions.MultivariateNormal" ] ]
ImperialCollegeLondon/al_cfd_benchmark
[ "03b51d7e7d4def804e2ac18084deee8401636851" ]
[ "examples/pitz_daily/pitz_daily_runner.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"Pitz Daily\n\nThis case uses the pitzDaily example from the OpenFOAM tutorials \nand varies two parameters: Reynolds number and height of the inlet. \nIt returns the pressure difference between inlet and outlet.\n\n\"\"\"\n\nimport numpy as np\nfrom active_learning_cfd.cfd_case import CFDCase\n\nimport os\n\n\nclass PitzDaily(CFDCase):\n mesher = \"blockMesh\"\n solver = \"simpleFoam\"\n template = \"pitzDaily\"\n parameter_names = (\"reynolds\", \"entryHeight\")\n output_list = ((\"deltaP\", \"subtract\\(p\\) = (.+)\"),)\n\n def __call__(self, parameters):\n assert len(parameters) == len(self.parameter_names)\n parameter_dict = dict(zip(self.parameter_names, parameters))\n parameter_dict[\"reynolds\"] = np.power(10, parameter_dict[\"reynolds\"])\n self.solve(parameter_dict)\n return self.results[\"deltaP\"]\n\n\nif __name__ == \"__main__\":\n case = PitzDaily()\n reynolds = 50800.0\n entryHeight = 25.4\n print(\"deltaP = {}\".format(case([np.log10(reynolds), entryHeight])))\n" ]
[ [ "numpy.power", "numpy.log10" ] ]
li-ziang/cogdl
[ "60022d3334e3abae2d2a505e6e049a26acf10f39", "60022d3334e3abae2d2a505e6e049a26acf10f39", "60022d3334e3abae2d2a505e6e049a26acf10f39" ]
[ "cogdl/oag/dual_position_bert_model.py", "cogdl/utils/evaluator.py", "cogdl/models/nn/diffpool.py" ]
[ "import torch\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss\nimport logging\nfrom .bert_model import BertPreTrainedModel, BertPreTrainingHeads, BertModel, BertEncoder, BertPooler, BertLayerNorm\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass DualPositionBertEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\"\"\"\n\n def __init__(self, config):\n super(DualPositionBertEmbeddings, self).__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.position_embeddings_second = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load\n # any TensorFlow checkpoint file\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, input_ids, token_type_ids, position_ids, position_ids_second):\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n\n words_embeddings = self.word_embeddings(input_ids)\n position_embeddings = self.position_embeddings(position_ids)\n position_embeddings_second = self.position_embeddings(position_ids_second)\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n\n embeddings = words_embeddings + position_embeddings + position_embeddings_second + token_type_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\nclass DualPositionBertModel(BertModel):\n def __init__(self, config):\n super(DualPositionBertModel, self).__init__(config)\n self.embeddings = DualPositionBertEmbeddings(config)\n self.encoder = BertEncoder(config)\n self.pooler = BertPooler(config)\n self.apply(self.init_bert_weights)\n logger.info(\"Init BERT pretrain model\")\n\n def forward(\n self,\n input_ids,\n token_type_ids=None,\n attention_mask=None,\n output_all_encoded_layers=True,\n checkpoint_activations=False,\n position_ids=None,\n position_ids_second=None,\n ):\n if attention_mask is None:\n attention_mask = torch.ones_like(input_ids)\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n\n if len(attention_mask.shape) == 2:\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n elif len(attention_mask.shape) == 3:\n extended_attention_mask = attention_mask.unsqueeze(1)\n else:\n raise Exception(\"invalid attention mask shape! shape: %s\" % (attention_mask.shape))\n\n extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n embedding_output = self.embeddings(input_ids, token_type_ids, position_ids, position_ids_second)\n encoded_layers = self.encoder(\n embedding_output,\n extended_attention_mask,\n output_all_encoded_layers=output_all_encoded_layers,\n checkpoint_activations=checkpoint_activations,\n )\n sequence_output = encoded_layers[-1]\n pooled_output = self.pooler(sequence_output)\n\n if not output_all_encoded_layers:\n encoded_layers = encoded_layers[-1]\n return encoded_layers, pooled_output\n\n\nclass DualPositionBertForPreTrainingPreLN(BertPreTrainedModel):\n \"\"\"BERT model with pre-training heads and dual position\n Params:\n config: a BertConfig class instance with the configuration to build a new model.\n\n \"\"\"\n\n def __init__(self, config):\n super(DualPositionBertForPreTrainingPreLN, self).__init__(config)\n self.bert = DualPositionBertModel(config)\n self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)\n self.apply(self.init_bert_weights)\n\n def forward(\n self,\n input_ids,\n token_type_ids=None,\n attention_mask=None,\n masked_lm_labels=None,\n position_ids=None,\n position_ids_second=None,\n log=True,\n ):\n sequence_output, pooled_output = self.bert(\n input_ids=input_ids,\n token_type_ids=token_type_ids,\n attention_mask=attention_mask,\n output_all_encoded_layers=False,\n checkpoint_activations=False,\n position_ids=position_ids,\n position_ids_second=position_ids_second,\n )\n\n if masked_lm_labels is not None:\n # filter out all masked labels.\n masked_token_indexes = torch.nonzero((masked_lm_labels + 1).view(-1)).view(-1)\n prediction_scores, _ = self.cls(sequence_output, pooled_output, masked_token_indexes)\n target = torch.index_select(masked_lm_labels.view(-1), 0, masked_token_indexes)\n\n loss_fct = CrossEntropyLoss(ignore_index=-1)\n masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), target)\n return masked_lm_loss\n else:\n prediction_scores, _ = self.cls(sequence_output, pooled_output)\n return prediction_scores\n", "from typing import Union, Callable\nimport numpy as np\nimport warnings\n\nimport torch\nimport torch.nn as nn\n\nfrom sklearn.metrics import f1_score\n\n\ndef setup_evaluator(metric: Union[str, Callable]):\n if isinstance(metric, str):\n metric = metric.lower()\n if metric == \"acc\" or metric == \"accuracy\":\n return Accuracy()\n elif metric == \"multilabel_microf1\" or \"microf1\" or \"micro_f1\":\n return MultiLabelMicroF1()\n elif metric == \"multiclass_microf1\":\n return MultiClassMicroF1()\n else:\n raise NotImplementedError\n else:\n return BaseEvaluator(metric)\n\n\nclass BaseEvaluator(object):\n def __init__(self, eval_func):\n self.y_pred = list()\n self.y_true = list()\n self.eval_func = eval_func\n\n def __call__(self, y_pred, y_true):\n metric = self.eval_func(y_pred, y_true)\n self.y_pred.append(y_pred.cpu())\n self.y_true.append(y_true.cpu())\n return metric\n\n def clear(self):\n self.y_pred = list()\n self.y_true = list()\n\n def evaluate(self):\n if len(self.y_pred) > 0:\n y_pred = torch.cat(self.y_pred, dim=0)\n y_true = torch.cat(self.y_true, dim=0)\n self.clear()\n return self.eval_func(y_pred, y_true)\n return 0\n\n\nclass Accuracy(object):\n def __init__(self, mini_batch=False):\n super(Accuracy, self).__init__()\n self.mini_batch = mini_batch\n self.tp = list()\n self.total = list()\n\n def __call__(self, y_pred, y_true):\n pred = (y_pred.argmax(1) == y_true).int()\n tp = pred.sum().int()\n total = pred.shape[0]\n if torch.is_tensor(tp):\n tp = tp.item()\n\n # if self.mini_batch:\n self.tp.append(tp)\n self.total.append(total)\n\n return tp / total\n\n def evaluate(self):\n if len(self.tp) > 0:\n tp = np.sum(self.tp)\n total = np.sum(self.total)\n self.tp = list()\n self.total = list()\n return tp / total\n warnings.warn(\"pre-computing list is empty\")\n return 0\n\n def clear(self):\n self.tp = list()\n self.total = list()\n\n\nclass MultiLabelMicroF1(Accuracy):\n def __init__(self, mini_batch=False):\n super(MultiLabelMicroF1, self).__init__(mini_batch)\n\n def __call__(self, y_pred, y_true, sigmoid=False):\n if sigmoid:\n border = 0.5\n else:\n border = 0\n y_pred[y_pred >= border] = 1\n y_pred[y_pred < border] = 0\n tp = (y_pred * y_true).sum().to(torch.float32).item()\n fp = ((1 - y_true) * y_pred).sum().to(torch.float32).item()\n fn = (y_true * (1 - y_pred)).sum().to(torch.float32).item()\n total = tp + fp + fn\n\n # if self.mini_batch:\n self.tp.append(int(tp))\n self.total.append(int(total))\n\n if total == 0:\n return 0\n return float(tp / total)\n\n\nclass MultiClassMicroF1(Accuracy):\n def __init__(self, mini_batch=False):\n super(MultiClassMicroF1, self).__init__(mini_batch)\n\n\nclass CrossEntropyLoss(nn.Module):\n def __call__(self, y_pred, y_true):\n y_true = y_true.long()\n y_pred = torch.nn.functional.log_softmax(y_pred, dim=-1)\n return torch.nn.functional.nll_loss(y_pred, y_true)\n\n\nclass BCEWithLogitsLoss(nn.Module):\n def __call__(self, y_pred, y_true, reduction=\"mean\"):\n y_true = y_true.float()\n loss = torch.nn.BCEWithLogitsLoss(reduction=reduction)(y_pred, y_true)\n if reduction == \"none\":\n loss = torch.sum(torch.mean(loss, dim=0))\n return loss\n\n\ndef multilabel_f1(y_pred, y_true, sigmoid=False):\n if sigmoid:\n y_pred[y_pred > 0.5] = 1\n y_pred[y_pred <= 0.5] = 0\n else:\n y_pred[y_pred > 0] = 1\n y_pred[y_pred <= 0] = 0\n tp = (y_true * y_pred).sum().to(torch.float32)\n # tn = ((1 - y_true) * (1 - y_pred)).sum().to(torch.float32)\n fp = ((1 - y_true) * y_pred).sum().to(torch.float32)\n fn = (y_true * (1 - y_pred)).sum().to(torch.float32)\n\n epsilon = 1e-7\n precision = tp / (tp + fp + epsilon)\n recall = tp / (tp + fn + epsilon)\n f1 = (2 * precision * recall) / (precision + recall + epsilon)\n return f1.item()\n\n\ndef multiclass_f1(y_pred, y_true):\n y_true = y_true.squeeze().long()\n preds = y_pred.max(1)[1]\n preds = preds.cpu().detach().numpy()\n labels = y_true.cpu().detach().numpy()\n micro = f1_score(labels, preds, average=\"micro\")\n return micro\n\n\ndef accuracy(y_pred, y_true):\n y_true = y_true.squeeze().long()\n preds = y_pred.max(1)[1].type_as(y_true)\n correct = preds.eq(y_true).double()\n correct = correct.sum().item()\n return correct / len(y_true)\n\n\ndef cross_entropy_loss(y_pred, y_true):\n y_true = y_true.long()\n y_pred = torch.nn.functional.log_softmax(y_pred, dim=-1)\n return torch.nn.functional.nll_loss(y_pred, y_true)\n\n\ndef bce_with_logits_loss(y_pred, y_true, reduction=\"mean\"):\n y_true = y_true.float()\n loss = torch.nn.BCEWithLogitsLoss(reduction=reduction)(y_pred, y_true)\n if reduction == \"none\":\n loss = torch.sum(torch.mean(loss, dim=0))\n return loss\n", "import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom scipy.linalg import block_diag\n\nfrom cogdl.layers import SAGELayer\nfrom cogdl.utils import split_dataset_general\n\nfrom .. import BaseModel\n\n\nclass EntropyLoss(nn.Module):\n # Return Scalar\n def forward(self, adj, anext, s_l):\n # entropy.mean(-1).mean(-1): 1/n in node and batch\n # entropy = (torch.distributions.Categorical(\n # probs=s_l).entropy()).sum(-1).mean(-1)\n entropy = (torch.distributions.Categorical(probs=s_l).entropy()).mean()\n assert not torch.isnan(entropy)\n return entropy\n\n\nclass LinkPredLoss(nn.Module):\n def forward(self, adj, anext, s_l):\n link_pred_loss = (adj - s_l.matmul(s_l.transpose(-1, -2))).norm(dim=(1, 2))\n link_pred_loss = link_pred_loss / (adj.size(1) * adj.size(2))\n return link_pred_loss.mean()\n\n\nclass GraphSAGE(nn.Module):\n r\"\"\"GraphSAGE from `\"Inductive Representation Learning on Large Graphs\" <https://arxiv.org/pdf/1706.02216.pdf>`__.\n\n ..math::\n h^{i+1}_{\\mathcal{N}(v)}=AGGREGATE_{k}(h_{u}^{k})\n h^{k+1}_{v} = \\sigma(\\mathbf{W}^{k}·CONCAT(h_{v}^{k}, h_{\\mathcal{N}(v)}))\n\n Args:\n in_feats (int) : Size of each input sample.\n hidden_dim (int) : Size of hidden layer dimension.\n out_feats (int) : Size of each output sample.\n num_layers (int) : Number of GraphSAGE Layers.\n dropout (float, optional) : Size of dropout, default: ``0.5``.\n normalize (bool, optional) : Normalze features after each layer if True, default: ``True``.\n \"\"\"\n\n def __init__(\n self, in_feats, hidden_dim, out_feats, num_layers, dropout=0.5, normalize=False, concat=False, use_bn=False\n ):\n super(GraphSAGE, self).__init__()\n self.convlist = nn.ModuleList()\n self.bn_list = nn.ModuleList()\n self.num_layers = num_layers\n self.dropout = dropout\n self.use_bn = use_bn\n aggr = \"concat\" if concat else \"mean\"\n if num_layers == 1:\n self.convlist.append(SAGELayer(in_feats, out_feats, normalize, aggr))\n else:\n self.convlist.append(SAGELayer(in_feats, hidden_dim, normalize, aggr))\n if use_bn:\n self.bn_list.append(nn.BatchNorm1d(hidden_dim))\n for _ in range(num_layers - 2):\n self.convlist.append(SAGELayer(hidden_dim, hidden_dim, normalize, aggr))\n if use_bn:\n self.bn_list.append(nn.BatchNorm1d(hidden_dim))\n self.convlist.append(SAGELayer(hidden_dim, out_feats, normalize, aggr))\n\n def forward(self, graph, x):\n h = x\n for i in range(self.num_layers - 1):\n h = F.dropout(h, p=self.dropout, training=self.training)\n h = self.convlist[i](graph, h)\n if self.use_bn:\n h = self.bn_list[i](h)\n return self.convlist[self.num_layers - 1](graph, h)\n\n\nclass BatchedGraphSAGE(nn.Module):\n r\"\"\"GraphSAGE with mini-batch\n\n Args:\n in_feats (int) : Size of each input sample.\n out_feats (int) : Size of each output sample.\n use_bn (bool) : Apply batch normalization if True, default: ``True``.\n self_loop (bool) : Add self loop if True, default: ``True``.\n \"\"\"\n\n def __init__(self, in_feats, out_feats, use_bn=True, self_loop=True):\n super(BatchedGraphSAGE, self).__init__()\n self.self_loop = self_loop\n self.use_bn = use_bn\n self.weight = nn.Linear(in_feats, out_feats, bias=True)\n\n nn.init.xavier_uniform_(self.weight.weight.data, gain=nn.init.calculate_gain(\"relu\"))\n\n def forward(self, x, adj):\n device = x.device\n if self.self_loop:\n adj = adj + torch.eye(x.shape[1]).to(device)\n adj = adj / adj.sum(dim=1, keepdim=True)\n h = torch.matmul(adj, x)\n h = self.weight(h)\n h = F.normalize(h, dim=2, p=2)\n h = F.relu(h)\n # TODO: shape = [a, 0, b]\n # if self.use_bn and h.shape[1] > 0:\n # self.bn = nn.BatchNorm1d(h.shape[1]).to(device)\n # h = self.bn(h)\n return h\n\n\nclass BatchedDiffPoolLayer(nn.Module):\n r\"\"\"DIFFPOOL from paper `\"Hierarchical Graph Representation Learning\n with Differentiable Pooling\" <https://arxiv.org/pdf/1806.08804.pdf>`__.\n\n .. math::\n X^{(l+1)} = S^{l)}^T Z^{(l)}\n A^{(l+1)} = S^{(l)}^T A^{(l)} S^{(l)}\n Z^{(l)} = GNN_{l, embed}(A^{(l)}, X^{(l)})\n S^{(l)} = softmax(GNN_{l,pool}(A^{(l)}, X^{(l)}))\n\n Parameters\n ----------\n in_feats : int\n Size of each input sample.\n out_feats : int\n Size of each output sample.\n assign_dim : int\n Size of next adjacency matrix.\n batch_size : int\n Size of each mini-batch.\n dropout : float, optional\n Size of dropout, default: ``0.5``.\n link_pred_loss : bool, optional\n Use link prediction loss if True, default: ``True``.\n \"\"\"\n\n def __init__(\n self, in_feats, out_feats, assign_dim, batch_size, dropout=0.5, link_pred_loss=True, entropy_loss=True\n ):\n super(BatchedDiffPoolLayer, self).__init__()\n self.assign_dim = assign_dim\n self.dropout = dropout\n self.use_link_pred = link_pred_loss\n self.batch_size = batch_size\n self.embd_gnn = SAGELayer(in_feats, out_feats, normalize=False)\n self.pool_gnn = SAGELayer(in_feats, assign_dim, normalize=False)\n\n self.loss_dict = dict()\n\n def forward(self, graph, x, batch):\n embed = self.embd_gnn(graph, x)\n pooled = F.softmax(self.pool_gnn(graph, x), dim=-1)\n device = x.device\n masked_tensor = []\n value_set, value_counts = torch.unique(batch, return_counts=True)\n batch_size = len(value_set)\n for i in value_counts:\n masked = torch.ones((i, int(pooled.size()[1] / batch_size)))\n masked_tensor.append(masked)\n masked = torch.FloatTensor(block_diag(*masked_tensor)).to(device)\n\n result = torch.nn.functional.softmax(masked * pooled, dim=-1)\n result = result * masked\n result = result / (result.sum(dim=-1, keepdim=True) + 1e-13)\n # result = masked_softmax(pooled, masked, memory_efficient=False)\n\n h = torch.matmul(result.t(), embed)\n adj = torch.sparse_coo_tensor(torch.stack(graph.edge_index), graph.edge_weight)\n adj_new = torch.sparse.mm(adj, result)\n adj_new = torch.mm(result.t(), adj_new)\n\n if self.use_link_pred:\n adj_loss = torch.norm((adj.to_dense() - torch.mm(result, result.t()))) / np.power((len(batch)), 2)\n self.loss_dict[\"adj_loss\"] = adj_loss\n entropy_loss = (torch.distributions.Categorical(probs=pooled).entropy()).mean()\n assert not torch.isnan(entropy_loss)\n self.loss_dict[\"entropy_loss\"] = entropy_loss\n return adj_new, h\n\n def get_loss(self):\n loss_n = 0\n for _, value in self.loss_dict.items():\n loss_n += value\n return loss_n\n\n\nclass BatchedDiffPool(nn.Module):\n r\"\"\"DIFFPOOL layer with batch forward\n\n Parameters\n ----------\n in_feats : int\n Size of each input sample.\n next_size : int\n Size of next adjacency matrix.\n emb_size : int\n Dimension of next node feature matrix.\n use_bn : bool, optional\n Apply batch normalization if True, default: ``True``.\n self_loop : bool, optional\n Add self loop if True, default: ``True``.\n use_link_loss : bool, optional\n Use link prediction loss if True, default: ``True``.\n use_entropy : bool, optioinal\n Use entropy prediction loss if True, default: ``True``.\n \"\"\"\n\n def __init__(\n self, in_feats, next_size, emb_size, use_bn=True, self_loop=True, use_link_loss=False, use_entropy=True\n ):\n super(BatchedDiffPool, self).__init__()\n self.use_link_loss = use_link_loss\n self.use_bn = use_bn\n self.feat_trans = BatchedGraphSAGE(in_feats, emb_size)\n self.assign_trans = BatchedGraphSAGE(in_feats, next_size)\n\n self.link_loss = LinkPredLoss()\n self.entropy = EntropyLoss()\n\n self.loss_module = nn.ModuleList()\n if use_link_loss:\n self.loss_module.append(LinkPredLoss())\n if use_entropy:\n self.loss_module.append(EntropyLoss())\n self.loss = {}\n\n def forward(self, x, adj):\n h = self.feat_trans(x, adj)\n next_l = F.softmax(self.assign_trans(x, adj), dim=-1)\n\n h = torch.matmul(next_l.transpose(-1, -2), h)\n next = torch.matmul(next_l.transpose(-1, -2), torch.matmul(adj, next_l))\n\n for layer in self.loss_module:\n self.loss[str(type(layer).__name__)] = layer(adj, next, next_l)\n\n return h, next\n\n def get_loss(self):\n value = 0\n for _, v in self.loss.items():\n value += v\n return value\n\n\ndef toBatchedGraph(batch_adj, batch_feat, node_per_pool_graph):\n adj_list = [\n batch_adj[i : i + node_per_pool_graph, i : i + node_per_pool_graph]\n for i in range(0, batch_adj.size()[0], node_per_pool_graph)\n ]\n feat_list = [batch_feat[i : i + node_per_pool_graph, :] for i in range(0, batch_adj.size()[0], node_per_pool_graph)]\n adj_list = list(map(lambda x: torch.unsqueeze(x, 0), adj_list))\n feat_list = list(map(lambda x: torch.unsqueeze(x, 0), feat_list))\n adj = torch.cat(adj_list, dim=0)\n feat = torch.cat(feat_list, dim=0)\n return adj, feat\n\n\nclass DiffPool(BaseModel):\n r\"\"\"DIFFPOOL from paper `Hierarchical Graph Representation Learning\n with Differentiable Pooling <https://arxiv.org/pdf/1806.08804.pdf>`__.\n\n Parameters\n ----------\n in_feats : int\n Size of each input sample.\n hidden_dim : int\n Size of hidden layer dimension of GNN.\n embed_dim : int\n Size of embeded node feature, output size of GNN.\n num_classes : int\n Number of target classes.\n num_layers : int\n Number of GNN layers.\n num_pool_layers : int\n Number of pooling.\n assign_dim : int\n Embedding size after the first pooling.\n pooling_ratio : float\n Size of each poolling ratio.\n batch_size : int\n Size of each mini-batch.\n dropout : float, optional\n Size of dropout, default: `0.5`.\n no_link_pred : bool, optional\n If True, use link prediction loss, default: `True`.\n \"\"\"\n\n @staticmethod\n def add_args(parser):\n parser.add_argument(\"--num-layers\", type=int, default=2)\n parser.add_argument(\"--num-pooling-layers\", type=int, default=1)\n parser.add_argument(\"--no-link-pred\", dest=\"no_link_pred\", action=\"store_true\")\n parser.add_argument(\"--pooling-ratio\", type=float, default=0.15)\n parser.add_argument(\"--embedding-dim\", type=int, default=64)\n parser.add_argument(\"--hidden-size\", type=int, default=64)\n parser.add_argument(\"--dropout\", type=float, default=0.1)\n parser.add_argument(\"--batch-size\", type=int, default=20)\n parser.add_argument(\"--train-ratio\", type=float, default=0.7)\n parser.add_argument(\"--test-ratio\", type=float, default=0.1)\n parser.add_argument(\"--lr\", type=float, default=0.001)\n\n @classmethod\n def build_model_from_args(cls, args):\n return cls(\n args.num_features,\n args.hidden_size,\n args.embedding_dim,\n args.num_classes,\n args.num_layers,\n args.num_pooling_layers,\n int(args.max_graph_size * args.pooling_ratio) * args.batch_size,\n args.pooling_ratio,\n args.batch_size,\n args.dropout,\n args.no_link_pred,\n )\n\n @classmethod\n def split_dataset(cls, dataset, args):\n return split_dataset_general(dataset, args)\n\n def __init__(\n self,\n in_feats,\n hidden_dim,\n embed_dim,\n num_classes,\n num_layers,\n num_pool_layers,\n assign_dim,\n pooling_ratio,\n batch_size,\n dropout=0.5,\n no_link_pred=True,\n concat=False,\n use_bn=False,\n ):\n super(DiffPool, self).__init__()\n self.assign_dim = assign_dim\n self.assign_dim_list = [assign_dim]\n self.use_bn = use_bn\n self.dropout = dropout\n self.use_link_loss = not no_link_pred\n # assert num_layers > 3, \"layers > 3\"\n self.diffpool_layers = nn.ModuleList()\n self.before_pooling = GraphSAGE(\n in_feats, hidden_dim, embed_dim, num_layers=num_layers, dropout=dropout, use_bn=self.use_bn\n )\n self.init_diffpool = BatchedDiffPoolLayer(\n embed_dim, hidden_dim, assign_dim, batch_size, dropout, self.use_link_loss\n )\n\n pooled_emb_dim = embed_dim\n self.after_pool = nn.ModuleList()\n after_per_pool = nn.ModuleList()\n for _ in range(num_layers - 1):\n after_per_pool.append(BatchedGraphSAGE(hidden_dim, hidden_dim))\n after_per_pool.append(BatchedGraphSAGE(hidden_dim, pooled_emb_dim))\n self.after_pool.append(after_per_pool)\n\n for _ in range(num_pool_layers - 1):\n self.assign_dim = int(self.assign_dim // batch_size * pooling_ratio) * batch_size\n self.diffpool_layers.append(\n BatchedDiffPool(\n pooled_emb_dim, self.assign_dim, hidden_dim, use_bn=self.use_bn, use_link_loss=self.use_link_loss\n )\n )\n\n for _ in range(num_layers - 1):\n after_per_pool.append(BatchedGraphSAGE(hidden_dim, hidden_dim))\n after_per_pool.append(BatchedGraphSAGE(hidden_dim, pooled_emb_dim))\n self.after_pool.append(after_per_pool)\n\n self.assign_dim_list.append(self.assign_dim)\n\n if concat:\n out_dim = pooled_emb_dim * (num_pool_layers + 1)\n else:\n out_dim = pooled_emb_dim\n self.fc = nn.Linear(out_dim, num_classes)\n\n def reset_parameters(self):\n for i in self.modules():\n if isinstance(i, nn.Linear):\n nn.init.xavier_uniform_(i.weight.data, gain=nn.init.calculate_gain(\"relu\"))\n if i.bias is not None:\n nn.init.constant_(i.bias.data, 0.0)\n\n def after_pooling_forward(self, gnn_layers, adj, x, concat=False):\n readouts = []\n h = x\n for layer in gnn_layers:\n h = layer(h, adj)\n readouts.append(h)\n # readout = torch.cat(readouts, dim=1)\n return h\n\n def forward(self, batch):\n readouts_all = []\n\n init_emb = self.before_pooling(batch, batch.x)\n adj, h = self.init_diffpool(batch, init_emb, batch.batch)\n value_set, value_counts = torch.unique(batch.batch, return_counts=True)\n batch_size = len(value_set)\n adj, h = toBatchedGraph(adj, h, adj.size(0) // batch_size)\n h = self.after_pooling_forward(self.after_pool[0], adj, h)\n readout = torch.sum(h, dim=1)\n readouts_all.append(readout)\n\n for i, diff_layer in enumerate(self.diffpool_layers):\n h, adj = diff_layer(h, adj)\n h = self.after_pooling_forward(self.after_pool[i + 1], adj, h)\n readout = torch.sum(h, dim=1)\n readouts_all.append(readout)\n pred = self.fc(readout)\n return pred\n\n def graph_classificatoin_loss(self, batch):\n pred = self.forward(batch)\n pred = F.log_softmax(pred, dim=-1)\n loss_n = F.nll_loss(pred, batch.y)\n loss_n += self.init_diffpool.get_loss()\n for layer in self.diffpool_layers:\n loss_n += layer.get_loss()\n return loss_n\n" ]
[ [ "torch.ones_like", "torch.zeros_like", "torch.nn.Embedding", "torch.nn.CrossEntropyLoss", "torch.nn.Dropout" ], [ "numpy.sum", "torch.nn.functional.log_softmax", "torch.nn.functional.nll_loss", "sklearn.metrics.f1_score", "torch.is_tensor", "torch.nn.BCEWithLogitsLoss", "torch.cat", "torch.mean" ], [ "torch.stack", "torch.nn.functional.softmax", "torch.nn.functional.nll_loss", "torch.nn.ModuleList", "torch.eye", "torch.cat", "torch.nn.functional.dropout", "torch.nn.BatchNorm1d", "torch.unique", "torch.sparse.mm", "torch.unsqueeze", "torch.distributions.Categorical", "torch.nn.functional.normalize", "scipy.linalg.block_diag", "torch.isnan", "torch.sum", "torch.nn.init.calculate_gain", "torch.nn.functional.log_softmax", "torch.nn.Linear", "torch.nn.init.constant_", "torch.nn.functional.relu", "torch.matmul" ] ]
QinHan-Erin/AMOS
[ "634bf48edf4015e4a69a8c32d49b96bce2b5f16f" ]
[ "python/tvm/tensor_graph/testing/relay_examples/lenet.py" ]
[ "import tvm\nimport numpy as np\nfrom tvm import relay\nfrom tvm.relay.testing import run_infer_type, gradient\n\ndef get_lenet(batch_size,\n num_classes=10,\n image_shape=(1, 28, 28),\n dtype=\"float32\"):\n \"\"\"Get lenet funciton\n\n Parameters\n ----------\n batch_size : int\n The batch size used in the model\n\n num_classes : int, optional\n Number of claseses\n\n image_shape : tuple, optional\n The input image shape\n\n dtype : str, optional\n The data type\n\n Returns\n -------\n net : relay.Function\n The dataflow.\n \"\"\"\n data_shape = (batch_size,) + image_shape\n data = relay.TensorType(data_shape, dtype=dtype)\n data = relay.var(\"data\", data)\n conv_w1 = relay.var('c1.weight')\n c1 = relay.nn.conv2d(data=data, weight=conv_w1, channels=6, kernel_size=(5, 5),\n strides=(1, 1), padding=(2, 2))\n conv_b1 = relay.var('c1.bias', dtype=dtype)\n c1 = relay.nn.bias_add(c1, conv_b1, axis=-1)\n act_c1 = relay.nn.relu(data=c1)\n # Max-pooling\n # [64, 6, 14, 14]\n conv_w2 = relay.var('c2.weight', dtype=dtype)\n conv_b2 = relay.var('c2.bias', dtype=dtype)\n p1 = relay.nn.conv2d(data=act_c1, weight=conv_w2, channels=6, kernel_size=(2, 2),\n strides=(2, 2), padding=(0, 0))\n p1 = relay.nn.bias_add(p1, conv_b2, axis=-1)\n # Convolution\n conv_w3 = relay.var('c3.weight', dtype=dtype)\n conv_b3 = relay.var('c3.bias', dtype=dtype)\n c2 = relay.nn.conv2d(data=p1, weight=conv_w3, channels=6, kernel_size=(5, 5),\n strides=(1, 1), padding=(0, 0))\n c2 = relay.nn.bias_add(c2, conv_b3, axis=-1)\n # [64, 6, 28, 28]conv2d(p1, 16, (5, 5), (1, 1), (0, 0), 'c2') # [64, 16, 10, 10]\n act_c2 = relay.nn.relu(data=c2)\n # Max-pooling\n # [64, 16, 5, 5]\n conv_w4 = relay.var('c4.weight', dtype=dtype)\n conv_b4 = relay.var('c4.bias', dtype=dtype)\n p2 = relay.nn.conv2d(data=act_c2, weight=conv_w4, channels=6, kernel_size=(2, 2),\n strides=(2, 2), padding=(0, 0))\n p2 = relay.nn.bias_add(p2, conv_b4, axis=-1)\n # reshape\n r1 = relay.nn.batch_flatten(data=p2)\n w1 = relay.var('fc1.weight', dtype=dtype)\n b1 = relay.var('fc1.bias', dtype=dtype)\n fc1 = relay.nn.dense(data=r1, weight=w1, units=128)\n fc1 = relay.nn.bias_add(fc1, b1, axis=-1)\n act1 = relay.nn.relu(data=fc1)\n w2 = relay.var('fc2.weight', dtype=dtype)\n b2 = relay.var('fc2.bias', dtype=dtype)\n fc2 = relay.nn.dense(data=act1, weight=w2, units=64)\n fc2 = relay.nn.bias_add(fc2, b2, axis=-1)\n act2 = relay.nn.relu(data=fc2)\n w3 = relay.var('fc3.weight', dtype=dtype)\n b3 = relay.var('fc3.bias', dtype=dtype)\n fc3 = relay.nn.dense(data=act2, weight=w3, units=num_classes)\n fc3 = relay.nn.bias_add(fc3, b3, axis=-1)\n lenet = relay.nn.softmax(data=fc3)\n argu_list = [conv_w1, conv_b1, conv_w2, conv_b2, w1, b1, w2, b2, w3, b3]\n return relay.Function(relay.analysis.free_vars(lenet), lenet), argu_list\n\n\ndef make_sgd_update_net(loss_function, var, lr=0.002, scale=1.0, wd=0.0, clip=None):\n type_loss_function = run_infer_type(loss_function)\n grad_func = run_infer_type(gradient(type_loss_function))\n grads = relay.TupleWrapper(relay.TupleGetItem(grad_func.body, 1), len(loss_function.params))\n useful_grad = []\n type_var = []\n for var_item in var:\n for index, value_item in enumerate(type_loss_function.params):\n if var_item.name_hint == value_item.name_hint:\n useful_grad.append(grads[index])\n type_var.append(value_item)\n break\n else:\n raise(\"can't get required params from loss function, internal error\")\n updates = []\n for i, v in enumerate(type_var):\n g = useful_grad[i]\n g = relay.multiply(g, relay.const(scale, \"float32\"))\n if clip is not None:\n g = relay.clip(g, a_min=-1 * clip, a_max=clip)\n g = relay.subtract(v, \n relay.multiply(relay.const(lr, \"float32\"), \n relay.add(g, \n relay.multiply(relay.const(wd, \"float32\"), \n v))))\n updates.append(g)\n sgd_body = relay.Tuple(updates)\n return relay.Function(relay.analysis.free_vars(sgd_body), sgd_body)\n\n\ndef make_adam_update_net(loss_function, var, lr=0.001, beta1=0.9, beta2=0.99, scale=1.0, wd=0.0, clip=None, name=\"adam\", dtype='float32'):\n type_loss_function = run_infer_type(loss_function)\n grad_func = run_infer_type(gradient(type_loss_function))\n grads = relay.TupleWrapper(relay.TupleGetItem(grad_func.body, 1), len(loss_function.params))\n useful_grad = []\n type_var = []\n for var_item in var:\n for index, value_item in enumerate(type_loss_function.params):\n if var_item.name_hint == value_item.name_hint:\n useful_grad.append(grads[index])\n type_var.append(value_item)\n break\n else:\n raise(\"can't get required params from loss function, internal error\")\n print(type_var)\n updates = []\n m = []\n t = relay.zeros(shape=[1], dtype=dtype)\n epsilon = 1e-04\n const_1 = relay.const(1, dtype=dtype)\n const_beta1 = relay.const(beta1, dtype=dtype)\n const_beta2 = relay.const(beta2, dtype=dtype)\n for i, va in enumerate(type_var):\n m.append(relay.zeros_like(va))\n update_t = relay.add(t, const_1)\n rate = relay.divide(relay.sqrt(relay.subtract(const_1, relay.power(const_beta2, update_t))),\n relay.subtract(const_1, relay.power(const_beta1, update_t)))\n lr_t = relay.multiply(relay.const(lr, dtype=dtype), rate)\n for var, g, m in zip(type_var, useful_grad, m):\n update_m = relay.add(relay.multiply(const_beta1, m), \n relay.multiply(relay.subtract(const_1, const_beta1), g))\n update_v = relay.add(relay.multiply(const_beta2, m), \n relay.multiply(relay.subtract(const_1, const_beta2), \n relay.multiply(g, g)))\n update_var = relay.subtract(var, \n relay.divide(relay.multiply(lr_t, update_m), \n relay.add(relay.sqrt(update_v), \n relay.const(epsilon, dtype=\"float32\"))))\n updates.append(update_var)\n adam_body = relay.Tuple(updates)\n return relay.Function(relay.analysis.free_vars(adam_body), adam_body)\n\n\ndef mse_loss(lenet_function, target):\n sub = relay.subtract(lenet_function.body, target)\n loss_body = relay.sum(relay.multiply(sub, sub))\n return relay.Function(relay.analysis.free_vars(loss_body), loss_body)\n # return sum((predict - target)**2) / 2.0\n\n\ndef cross_entropy_loss(lenet_function, target):\n loss_body = relay.negative(relay.sum(relay.multiply(relay.log(relay.add(lenet_function.body, \n relay.const(1e-5, dtype=\"float32\"))), \n target)))\n return relay.Function(relay.analysis.free_vars(loss_body), loss_body)\n\n\ndef make_loss_net(lenet_function, target, optim=\"CROSS\"):\n \"\"\"Get loss funtion for lenet\n\n Parameters\n ----------\n lenet_function : relay.Function\n\n target : relay.Expr\n\n optim : str, optional\n loss_function strategy, \"CROSS\" or \"MSE\"\n\n Returns\n -------\n net : relay.Function\n The dataflow.\n \"\"\"\n if optim == \"CROSS\":\n return cross_entropy_loss(lenet_function, target)\n if optim == \"MSE\":\n return mse_loss(lenet_function, target)\n raise(\"unknown optim, use 'CROSS' or 'MSE'.\")\n\n\ndef make_grad_net(loss_function):\n \"\"\"Get updated funtion for lenet\n\n Parameters\n ----------\n loss_function : relay.Function\n\n Returns\n -------\n net : relay.Function\n The dataflow.\n \"\"\"\n type_loss_function = run_infer_type(loss_function)\n grad_func = run_infer_type(gradient(type_loss_function))\n return grad_func\n\n\ndef make_update_net(loss_function, weights, optim=\"SGD\"):\n \"\"\"Get updated funtion for lenet\n\n Parameters\n ----------\n loss_function : relay.Function\n\n weights : [relay.var]\n vars to compute gradient\n\n optim : str, optional\n updated_function strategy, \"ADAM\" or \"SGD\"\n\n Returns\n -------\n net : relay.Function\n The dataflow.\n \"\"\"\n if optim == \"ADAM\":\n return make_adam_update_net(loss_function, weights)\n if optim == \"SGD\":\n return make_sgd_update_net(loss_function, weights)\n raise(\"unknown optim, use 'ADAM' or 'SGD'.\")\n\n\ndef create_workload(net, initializer=None, seed=0):\n \"\"\"Helper function to create benchmark image classification workload.\n\n Parameters\n ----------\n net : tvm.relay.Function\n The selected function of the network.\n\n initializer : Initializer\n The initializer used\n\n seed : int\n The seed used in initialization.\n\n Returns\n -------\n mod : tvm.IRModule\n The created relay module.\n\n params : dict of str to NDArray\n The parameters.\n \"\"\"\n mod = tvm.IRModule.from_expr(net)\n mod = relay.transform.InferType()(mod)\n shape_dict = {\n v.name_hint : v.checked_type for v in mod[\"main\"].params}\n np.random.seed(seed)\n initializer = initializer if initializer else Xavier()\n params = {}\n for k, v in shape_dict.items():\n # modify here, skip \"label\" as well\n if k == \"data\" or k == \"label\":\n continue\n init_value = np.zeros(v.concrete_shape).astype(v.dtype)\n initializer(k, init_value)\n params[k] = tvm.nd.array(init_value, ctx=tvm.cpu(0))\n return mod, params\n" ]
[ [ "numpy.random.seed", "numpy.zeros" ] ]
chen1i/fedlearner
[ "981514dadbd0aa49ae87d185dd247d310e35605c" ]
[ "test/data_join/test_data_block_dumper.py" ]
[ "# Copyright 2020 The FedLearner Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# coding: utf-8\n\nimport unittest\nimport os\n\nimport tensorflow.compat.v1 as tf\ntf.enable_eager_execution()\nfrom google.protobuf import text_format, timestamp_pb2\nimport tensorflow_io\nfrom tensorflow.compat.v1 import gfile\n\nfrom fedlearner.common import db_client\nfrom fedlearner.common import common_pb2 as common_pb\nfrom fedlearner.common import data_join_service_pb2 as dj_pb\nfrom fedlearner.data_join import (\n data_block_manager, common, data_block_dumper,\n raw_data_manifest_manager, raw_data_visitor, visitor\n)\nfrom fedlearner.data_join.data_block_manager import DataBlockBuilder\nfrom fedlearner.data_join.raw_data_iter_impl.tf_record_iter import TfExampleItem\n\nclass TestDataBlockDumper(unittest.TestCase):\n def setUp(self):\n data_source_f = common_pb.DataSource()\n data_source_f.data_source_meta.name = \"milestone\"\n data_source_f.data_source_meta.partition_num = 1\n data_source_f.output_base_dir = \"./output-f\"\n self.data_source_f = data_source_f\n if gfile.Exists(self.data_source_f.output_base_dir):\n gfile.DeleteRecursively(self.data_source_f.output_base_dir)\n data_source_l = common_pb.DataSource()\n data_source_l.data_source_meta.name = \"milestone\"\n data_source_l.data_source_meta.partition_num = 1\n data_source_l.output_base_dir = \"./output-l\"\n self.raw_data_dir_l = \"./raw_data-l\"\n self.data_source_l = data_source_l\n if gfile.Exists(self.data_source_l.output_base_dir):\n gfile.DeleteRecursively(self.data_source_l.output_base_dir)\n if gfile.Exists(self.raw_data_dir_l):\n gfile.DeleteRecursively(self.raw_data_dir_l)\n self.kvstore = db_client.DBClient('etcd', True)\n self.kvstore.delete_prefix(common.data_source_kvstore_base_dir(self.data_source_l.data_source_meta.name))\n self.manifest_manager = raw_data_manifest_manager.RawDataManifestManager(\n self.kvstore, self.data_source_l)\n\n def generate_follower_data_block(self):\n dbm = data_block_manager.DataBlockManager(self.data_source_f, 0)\n self.assertEqual(dbm.get_dumped_data_block_count(), 0)\n self.assertEqual(dbm.get_lastest_data_block_meta(), None)\n leader_index = 0\n follower_index = 65536\n self.dumped_metas = []\n for i in range(5):\n builder = DataBlockBuilder(\n common.data_source_data_block_dir(self.data_source_f),\n self.data_source_f.data_source_meta.name,\n 0, i, dj_pb.WriterOptions(output_writer='TF_RECORD'), None\n )\n builder.set_data_block_manager(dbm)\n for j in range(1024):\n feat = {}\n example_id = '{}'.format(i * 1024 + j).encode()\n feat['example_id'] = tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[example_id]))\n event_time = 150000000 + i * 1024 + j\n feat['event_time'] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=[event_time]))\n feat['leader_index'] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=[leader_index]))\n feat['follower_index'] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=[follower_index]))\n example = tf.train.Example(features=tf.train.Features(feature=feat))\n builder.append_item(TfExampleItem(example.SerializeToString()),\n leader_index, follower_index)\n leader_index += 3\n follower_index += 1\n meta = builder.finish_data_block()\n self.dumped_metas.append(meta)\n self.leader_start_index = 0\n self.leader_end_index = leader_index\n self.assertEqual(dbm.get_dumped_data_block_count(), 5)\n for (idx, meta) in enumerate(self.dumped_metas):\n self.assertEqual(dbm.get_data_block_meta_by_index(idx), meta)\n\n def generate_leader_raw_data(self):\n dbm = data_block_manager.DataBlockManager(self.data_source_l, 0)\n raw_data_dir = os.path.join(self.raw_data_dir_l, common.partition_repr(0))\n if gfile.Exists(raw_data_dir):\n gfile.DeleteRecursively(raw_data_dir)\n gfile.MakeDirs(raw_data_dir)\n rdm = raw_data_visitor.RawDataManager(self.kvstore, self.data_source_l, 0)\n block_index = 0\n builder = DataBlockBuilder(\n self.raw_data_dir_l,\n self.data_source_l.data_source_meta.name,\n 0, block_index, dj_pb.WriterOptions(output_writer='TF_RECORD'), None\n )\n process_index = 0\n start_index = 0\n for i in range(0, self.leader_end_index + 3):\n if (i > 0 and i % 2048 == 0) or (i == self.leader_end_index + 2):\n meta = builder.finish_data_block()\n if meta is not None:\n ofname = common.encode_data_block_fname(\n self.data_source_l.data_source_meta.name,\n meta\n )\n fpath = os.path.join(raw_data_dir, ofname)\n self.manifest_manager.add_raw_data(\n 0,\n [dj_pb.RawDataMeta(file_path=fpath,\n timestamp=timestamp_pb2.Timestamp(seconds=3))],\n False)\n process_index += 1\n start_index += len(meta.example_ids)\n block_index += 1\n builder = DataBlockBuilder(\n self.raw_data_dir_l,\n self.data_source_l.data_source_meta.name,\n 0, block_index, dj_pb.WriterOptions(output_writer='TF_RECORD'), None\n )\n feat = {}\n pt = i + 1 << 30\n if i % 3 == 0:\n pt = i // 3\n example_id = '{}'.format(pt).encode()\n feat['example_id'] = tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[example_id]))\n event_time = 150000000 + pt\n feat['event_time'] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=[event_time]))\n example = tf.train.Example(features=tf.train.Features(feature=feat))\n builder.append_item(TfExampleItem(example.SerializeToString()), i, i)\n fpaths = [os.path.join(raw_data_dir, f)\n for f in gfile.ListDirectory(raw_data_dir)\n if not gfile.IsDirectory(os.path.join(raw_data_dir, f))]\n for fpath in fpaths:\n if not fpath.endswith(common.DataBlockSuffix):\n gfile.Remove(fpath)\n \n def test_data_block_dumper(self):\n self.generate_follower_data_block()\n self.generate_leader_raw_data()\n dbd = data_block_dumper.DataBlockDumperManager(\n self.kvstore, self.data_source_l, 0,\n dj_pb.RawDataOptions(raw_data_iter='TF_RECORD', read_ahead_size=1<<20, read_batch_size=128),\n dj_pb.WriterOptions(output_writer='TF_RECORD')\n )\n self.assertEqual(dbd.get_next_data_block_index(), 0)\n for (idx, meta) in enumerate(self.dumped_metas):\n success, next_index = dbd.add_synced_data_block_meta(meta)\n self.assertTrue(success)\n self.assertEqual(next_index, idx + 1)\n self.assertTrue(dbd.need_dump())\n self.assertEqual(dbd.get_next_data_block_index(), len(self.dumped_metas))\n with dbd.make_data_block_dumper() as dumper:\n dumper()\n dbm_f = data_block_manager.DataBlockManager(self.data_source_f, 0)\n dbm_l = data_block_manager.DataBlockManager(self.data_source_l, 0)\n self.assertEqual(dbm_f.get_dumped_data_block_count(), len(self.dumped_metas))\n self.assertEqual(dbm_f.get_dumped_data_block_count(),\n dbm_l.get_dumped_data_block_count())\n for (idx, meta) in enumerate(self.dumped_metas):\n self.assertEqual(meta.data_block_index, idx)\n self.assertEqual(dbm_l.get_data_block_meta_by_index(idx), meta)\n self.assertEqual(dbm_f.get_data_block_meta_by_index(idx), meta)\n meta_fpth_l = os.path.join(\n common.data_source_data_block_dir(self.data_source_l),\n common.partition_repr(0),\n common.encode_data_block_meta_fname(\n self.data_source_l.data_source_meta.name,\n 0, meta.data_block_index\n )\n )\n mitr = tf.io.tf_record_iterator(meta_fpth_l)\n meta_l = text_format.Parse(next(mitr), dj_pb.DataBlockMeta())\n self.assertEqual(meta_l, meta)\n meta_fpth_f = os.path.join(\n common.data_source_data_block_dir(self.data_source_f),\n common.partition_repr(0),\n common.encode_data_block_meta_fname(\n self.data_source_f.data_source_meta.name,\n 0, meta.data_block_index\n )\n )\n mitr = tf.io.tf_record_iterator(meta_fpth_f)\n meta_f = text_format.Parse(next(mitr), dj_pb.DataBlockMeta())\n self.assertEqual(meta_f, meta)\n data_fpth_l = os.path.join(\n common.data_source_data_block_dir(self.data_source_l),\n common.partition_repr(0),\n common.encode_data_block_fname(\n self.data_source_l.data_source_meta.name,\n meta_l\n )\n )\n for (iidx, record) in enumerate(tf.io.tf_record_iterator(data_fpth_l)):\n example = tf.train.Example()\n example.ParseFromString(record)\n feat = example.features.feature\n self.assertEqual(feat['example_id'].bytes_list.value[0],\n meta.example_ids[iidx])\n self.assertEqual(len(meta.example_ids), iidx + 1)\n data_fpth_f = os.path.join(\n common.data_source_data_block_dir(self.data_source_f),\n common.partition_repr(0),\n common.encode_data_block_fname(\n self.data_source_l.data_source_meta.name,\n meta_f\n )\n )\n for (iidx, record) in enumerate(tf.io.tf_record_iterator(data_fpth_f)):\n example = tf.train.Example()\n example.ParseFromString(record)\n feat = example.features.feature\n self.assertEqual(feat['example_id'].bytes_list.value[0],\n meta.example_ids[iidx])\n self.assertEqual(len(meta.example_ids), iidx +1)\n\n def tearDown(self):\n if gfile.Exists(self.data_source_f.output_base_dir):\n gfile.DeleteRecursively(self.data_source_f.output_base_dir)\n if gfile.Exists(self.data_source_l.output_base_dir):\n gfile.DeleteRecursively(self.data_source_l.output_base_dir)\n if gfile.Exists(self.raw_data_dir_l):\n gfile.DeleteRecursively(self.raw_data_dir_l)\n self.kvstore.delete_prefix(common.data_source_kvstore_base_dir(self.data_source_l.data_source_meta.name))\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "tensorflow.compat.v1.gfile.Remove", "tensorflow.compat.v1.train.Int64List", "tensorflow.compat.v1.train.Features", "tensorflow.compat.v1.train.BytesList", "tensorflow.compat.v1.gfile.Exists", "tensorflow.compat.v1.io.tf_record_iterator", "tensorflow.compat.v1.gfile.DeleteRecursively", "tensorflow.compat.v1.train.Example", "tensorflow.compat.v1.enable_eager_execution", "tensorflow.compat.v1.gfile.MakeDirs", "tensorflow.compat.v1.gfile.ListDirectory" ] ]
ctralie/GeometricBeatTracking
[ "2c35183f638c4afb51808c09e46da0f74384cba6" ]
[ "TheoryValidation/CirculantGraphs.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.sparse as sparse\nimport sys\nsys.path.append(\"..\")\nfrom Laplacian import *\n\ndef getCirculantAdj(N, lags):\n #Setup circular parts\n I = range(N)*(len(lags)+2)\n J = range(1, N+1) + range(-1, N-1)\n J[N-1] = 0\n J[N] = N-1\n for lag in lags:\n J = J + (np.mod(np.arange(N) + lag, N)).tolist()\n V = np.ones(len(I))\n return sparse.coo_matrix((V, (I, J)), shape=(N, N)).tocsr()\n\ndef getOneOnK(N, k):\n lags = [i*N/k for i in range(1, k)]\n return getCirculantAdj(N, lags)\n\ndef getCircleEigs(N):\n lambdas = np.zeros(N)\n for i in range(1, N/2+1):\n val = 2 - 2*np.cos(2*np.pi*i/N)\n i1 = i*2-1\n i2 = i*2\n lambdas[i1] = val\n if i2 < N:\n lambdas[i2] = val\n return lambdas\n\ndef getMoebiusEigs(N):\n lambdas = np.zeros(N)\n for i in range(1, N/2+1):\n val = 3 - 2*np.cos(2*np.pi*i/N) - (-1)**i\n i1 = i*2-1\n i2 = i*2\n lambdas[i1] = val\n if i2 < N:\n lambdas[i2] = val\n return (lambdas, np.sort(lambdas))\n\ndef get3WayEigs(N):\n lambdas = np.zeros(N)\n for i in range(1, N/2+1):\n val = 4 - 2*np.cos(2*np.pi*i/N) - 2*np.cos(2*np.pi*i/3)\n i1 = i*2-1\n i2 = i*2\n lambdas[i1] = val\n if i2 < N:\n lambdas[i2] = val\n return (lambdas, np.sort(lambdas))\n\nif __name__ == '__main__':\n N = 100\n A = getOneOnK(N, 2)\n #A = getCirculantAdj(N, [30, 60, 80])\n A = A.toarray()\n (w, v, L) = getLaplacianEigsDense(A, A.shape[0])\n \n (lambdas, lambdassorted) = get3WayEigs(N)\n \n plt.figure(figsize=(15, 4))\n plt.subplot(132)\n plt.plot(lambdas)\n plt.title(\"Eigenvalues\")\n plt.xlabel(\"Eigenvalue Number\")\n plt.ylabel(\"Eigenvalue\")\n \n# plt.subplot(224)\n# plt.scatter(w, lambdassorted)\n# plt.xlabel(\"Numerically Computed\")\n# plt.ylabel(\"Analytic\")\n# plt.axis('equal')\n# plt.title(\"Checking accuracy\")\n \n plt.subplot(131)\n plt.imshow(A, interpolation = 'nearest', cmap = 'gray')\n plt.title(\"Adjacency Matrix\")\n \n plt.subplot(133)\n plt.imshow(v, cmap = 'afmhot', aspect = 'auto', interpolation = 'nearest')\n plt.xlabel(\"k-th Smallest Eigenvector\")\n plt.title(\"Eigenvectors\")\n \n plt.savefig(\"Eigs.svg\", bbox_inches = 'tight')\n" ]
[ [ "numpy.zeros", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "numpy.cos", "matplotlib.pyplot.title", "scipy.sparse.coo_matrix", "matplotlib.pyplot.subplot", "matplotlib.pyplot.imshow", "matplotlib.pyplot.ylabel", "numpy.arange", "numpy.sort", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel" ] ]
adrienycart/PEAMT
[ "d3ae41e86dedeb64fcf54e2454c9feee993574f9" ]
[ "peamt/features/polyphony.py" ]
[ "import numpy as np\n\n\n########################################\n### Polyphony --- discarded\n########################################\n\ndef polyphony_level_diff(roll_output,roll_target):\n poly_output = np.sum(roll_output,axis=0)\n poly_target = np.sum(roll_target,axis=0)\n\n poly_diff = np.abs(poly_output-poly_target)\n\n return np.mean(poly_diff),np.std(poly_diff),np.min(poly_diff),np.max(poly_diff)\n\n\n# discarded\ndef false_negative_polyphony_level(roll_target,intervals_target,match):\n fs = 100\n\n if len(match) == 0:\n unmatched_targets = list(range(intervals_target))\n else:\n matched_targets, matched_outputs = zip(*match)\n # unmatched_targets= list(set(range(len(vel_target)))-set(matched_targets))\n unmatched_targets= list(set(range(len(intervals_target)))-set(matched_targets))\n\n unmatched_intervals = intervals_target[unmatched_targets,:]\n\n all_avg_poly = []\n\n for [start,end] in unmatched_intervals:\n start_idx = int(round(start*fs))\n end_idx = int(round(end*fs))\n avg_poly = np.mean(np.sum(roll_target[:,start_idx:end_idx],axis=0))\n all_avg_poly += [avg_poly]\n\n return all_avg_poly\n" ]
[ [ "numpy.sum", "numpy.abs", "numpy.max", "numpy.min", "numpy.std", "numpy.mean" ] ]
KnowingNothing/akg-test
[ "114d8626b824b9a31af50a482afc07ab7121862b" ]
[ "tests/common/test_run/round_run.py" ]
[ "# Copyright 2019 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport secrets\nfrom tests.common.tensorio import compare_tensor\nfrom akg.utils import kernel_exec as utils\nfrom tests.common.test_op import round\nfrom tests.common.gen_random import random_gaussian\nsecretsGenerator = secrets.SystemRandom()\ndef round_run(shape, dtype, attrs):\n in_shape = [shape]\n in_dtype = [dtype]\n\n if 'tuning' in attrs.keys():\n t = attrs.get(\"tuning\", False)\n kernel_name = attrs.get(\"kernel_name\", False)\n mod = utils.op_build_test(round.round_value, in_shape, in_dtype, kernel_name=kernel_name, attrs=attrs, tuning=t)\n if t:\n expect, input, output = gen_data(dtype, shape)\n return mod, expect, (input, output)\n else:\n return mod\n else:\n mod = utils.op_build_test(round.round_value, in_shape, in_dtype, kernel_name='round', attrs=attrs)\n expect, input, output = gen_data(dtype, shape)\n output = utils.mod_launch(mod, (input, output), expect=expect)\n return input, output, expect, compare_tensor(output, expect, rtol=5e-03, equal_nan=True)\n\n\ndef gen_data(dtype, shape):\n input = random_gaussian(shape, miu=1, sigma=10).astype(dtype)\n a = secretsGenerator.randint(0, 9)\n if a % 2 == 0:\n input = input.astype('int32') + 0.5\n input = input.astype(dtype)\n input_f16 = input.astype(np.float16)\n expect = np.round(input_f16).astype(\"int32\")\n output = np.full(shape, np.nan, \"int32\")\n return expect, input, output\n" ]
[ [ "numpy.round", "numpy.full" ] ]
rise-lang/iree
[ "46ad3fe392d38ce3df6eff7826cc1ab331a40b72" ]
[ "integrations/tensorflow/e2e/conv_test.py" ]
[ "# Lint as: python3\n# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nfrom pyiree.tf.support import tf_test_utils\nimport tensorflow.compat.v2 as tf\n\n\nclass Conv2dModule(tf.Module):\n\n @tf.function(input_signature=[\n tf.TensorSpec([1, 4, 5, 1], tf.float32),\n tf.TensorSpec([1, 1, 1, 1], tf.float32),\n ])\n def conv2d_1451x1111_valid(self, img, kernel):\n return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], \"VALID\", name=\"result\")\n\n @tf.function(input_signature=[\n tf.TensorSpec([2, 4, 5, 1], tf.float32),\n tf.TensorSpec([1, 1, 1, 1], tf.float32),\n ])\n def conv2d_2451x1111_valid(self, img, kernel):\n return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], \"VALID\", name=\"result\")\n\n @tf.function(input_signature=[\n tf.TensorSpec([1, 4, 5, 1], tf.float32),\n tf.TensorSpec([2, 3, 1, 1], tf.float32),\n ])\n def conv2d_1451x2311_valid(self, img, kernel):\n return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], \"VALID\", name=\"result\")\n\n @tf.function(input_signature=[\n tf.TensorSpec([1, 4, 5, 1], tf.float32),\n tf.TensorSpec([2, 3, 1, 1], tf.float32),\n ])\n def conv2d_1451x2311_same(self, img, kernel):\n return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], \"SAME\", name=\"result\")\n\n @tf.function(input_signature=[\n tf.TensorSpec([2, 4, 5, 1], tf.float32),\n tf.TensorSpec([2, 3, 1, 1], tf.float32),\n ])\n def conv2d_2451x2311_same(self, img, kernel):\n return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], \"SAME\", name=\"result\")\n\n @tf.function(input_signature=[\n tf.TensorSpec([1, 4, 5, 2], tf.float32),\n tf.TensorSpec([3, 2, 2, 1], tf.float32),\n ])\n def conv2d_1452x3221_same(self, img, kernel):\n return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], \"SAME\", name=\"result\")\n\n @tf.function(input_signature=[\n tf.TensorSpec([1, 4, 5, 1], tf.float32),\n tf.TensorSpec([1, 1, 1, 2], tf.float32),\n ])\n def conv2d_1451x1112_same(self, img, kernel):\n return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], \"SAME\", name=\"result\")\n\n @tf.function(input_signature=[\n tf.TensorSpec([1, 4, 5, 2], tf.float32),\n tf.TensorSpec([1, 1, 2, 2], tf.float32),\n ])\n def conv2d_1452x1122_same(self, img, kernel):\n return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], \"SAME\", name=\"result\")\n\n @tf.function(input_signature=[\n tf.TensorSpec([1, 4, 5, 2], tf.float32),\n tf.TensorSpec([2, 2, 2, 3], tf.float32),\n ])\n def conv2d_1452x2223_same(self, img, kernel):\n return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], \"SAME\", name=\"result\")\n\n @tf.function(input_signature=[\n tf.TensorSpec([1, 4, 5, 2], tf.float32),\n tf.TensorSpec([2, 2, 2, 3], tf.float32),\n ])\n def conv2d_1452x2223_valid(self, img, kernel):\n return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], \"VALID\", name=\"result\")\n\n @tf.function(input_signature=[\n tf.TensorSpec([2, 4, 5, 2], tf.float32),\n tf.TensorSpec([2, 2, 2, 3], tf.float32),\n ])\n def conv2d_2452x2223_valid(self, img, kernel):\n return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], \"VALID\", name=\"result\")\n\n\n@tf_test_utils.compile_module(Conv2dModule)\nclass ConvTest(tf_test_utils.SavedModelTestCase):\n\n def test_id_batch_size_1(self):\n i = np.arange(20, dtype=np.float32).reshape([1, 4, 5, 1])\n k = np.ones([1, 1, 1, 1], dtype=np.float32)\n r = self.get_module().conv2d_1451x1111_valid(i, k)\n r.print().assert_all_close()\n\n def test_id_batch_size_2(self):\n i = np.arange(40, dtype=np.float32).reshape([2, 4, 5, 1])\n k = np.ones([1, 1, 1, 1], dtype=np.float32)\n r = self.get_module().conv2d_2451x1111_valid(i, k)\n r.print().assert_all_close()\n\n def test_asym_kernel(self):\n i = np.arange(20, dtype=np.float32).reshape([1, 4, 5, 1])\n k = np.array([[1, 4, 2], [-2, 0, 1]], dtype=np.float32).reshape(2, 3, 1, 1)\n r = self.get_module().conv2d_1451x2311_valid(i, k)\n r.print().assert_all_close()\n\n def test_padding(self):\n i = np.arange(20, dtype=np.float32).reshape([1, 4, 5, 1])\n k = np.array([[1, 4, 2], [-2, 0, 1]], dtype=np.float32).reshape(2, 3, 1, 1)\n r = self.get_module().conv2d_1451x2311_same(i, k)\n r.print().assert_all_close()\n\n def test_batched_padding(self):\n i = np.arange(40, dtype=np.float32).reshape([2, 4, 5, 1])\n k = np.array([[1, 4, 2], [-2, 0, 1]], dtype=np.float32).reshape(2, 3, 1, 1)\n r = self.get_module().conv2d_2451x2311_same(i, k)\n r.print().assert_all_close()\n\n def test_feature_reduce(self):\n i = np.arange(40, dtype=np.float32).reshape([1, 4, 5, 2])\n k = np.ones([3, 2, 2, 1], dtype=np.float32)\n r = self.get_module().conv2d_1452x3221_same(i, k)\n r.print().assert_all_close()\n\n def test_feature_inflate(self):\n i = np.arange(20, dtype=np.float32).reshape([1, 4, 5, 1])\n k = np.arange(2, dtype=np.float32).reshape([1, 1, 1, 2])\n r = self.get_module().conv2d_1451x1112_same(i, k)\n r.print().assert_all_close()\n\n def test_feature_mix(self):\n i = np.arange(40, dtype=np.float32).reshape([1, 4, 5, 2])\n k = np.arange(4, dtype=np.float32).reshape([1, 1, 2, 2])\n r = self.get_module().conv2d_1452x1122_same(i, k)\n r.print().assert_all_close()\n\n def test_feature_padded(self):\n i = np.arange(40, dtype=np.float32).reshape([1, 4, 5, 2])\n k = np.arange(24, dtype=np.float32).reshape([2, 2, 2, 3])\n r = self.get_module().conv2d_1452x2223_same(i, k)\n r.print().assert_all_close()\n\n def test_feature_unpadded(self):\n i = np.arange(40, dtype=np.float32).reshape([1, 4, 5, 2])\n k = np.arange(24, dtype=np.float32).reshape([2, 2, 2, 3])\n r = self.get_module().conv2d_1452x2223_valid(i, k)\n r.print().assert_all_close()\n\n def test_batched_feature_unpadded(self):\n i = np.arange(80, dtype=np.float32).reshape([2, 4, 5, 2])\n k = np.arange(24, dtype=np.float32).reshape([2, 2, 2, 3])\n r = self.get_module().conv2d_2452x2223_valid(i, k)\n r.print().assert_all_close()\n\n\nif __name__ == \"__main__\":\n if hasattr(tf, \"enable_v2_behavior\"):\n tf.enable_v2_behavior()\n tf.test.main()\n" ]
[ [ "numpy.ones", "tensorflow.compat.v2.nn.conv2d", "tensorflow.compat.v2.test.main", "numpy.arange", "tensorflow.compat.v2.enable_v2_behavior", "numpy.array", "tensorflow.compat.v2.TensorSpec" ] ]
bruinxiong/fedlearner
[ "9cdeaf44b279acedd5bc88bbffd4a390697b06aa" ]
[ "fedlearner/trainer/estimator.py" ]
[ "# Copyright 2020 The FedLearner Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# coding: utf-8\n# pylint: disable=protected-access\n\nimport os\nimport logging\nimport time\nimport tensorflow.compat.v1 as tf\n\nfrom tensorflow.compat import as_str_any\nfrom tensorflow.compat.v1.train import Optimizer\nfrom tensorflow.compat.v1.estimator import ModeKeys\nfrom tensorflow_estimator.python.estimator import model_fn as model_fn_lib\n\nfrom fedlearner.common.mysql_client import DBClient\nfrom fedlearner.common.summary_hook import SummaryHook\nfrom fedlearner.trainer import patch # pylint: disable=unused-import\nfrom fedlearner.common import metrics\nfrom fedlearner.data_join.common import get_kvstore_config\n\nSYNC_PATH = '/sync/'\nDATA_CHECKPOINT_INIT_VALUE = \"_init_value\"\n\nclass DataCheckpointSaverListener(tf.estimator.CheckpointSaverListener):\n def __init__(self, tm, appid):\n self._trainer_master = tm\n self._application_id = appid\n\n def begin(self):\n ckpt = tf.placeholder(tf.string, name=\"data_checkpoint_plhd\")\n var_tmp = tf.Variable(DATA_CHECKPOINT_INIT_VALUE, \\\n name=\"data_checkpoint\")\n self._ckpt_tensor = var_tmp.assign(ckpt)\n\n def before_save(self, session, global_step_value):\n logging.info('About to write a checkpoint at step %d', \\\n global_step_value)\n data_checkpoint = self._trainer_master.get_data_block_checkpoint(\n self._application_id)\n #if empty block from checkpoint fetched due to exception or\n # master not ready, no need to save.\n if len(data_checkpoint) == 0:\n return\n res = session.run(self._ckpt_tensor, {\"data_checkpoint_plhd:0\":\n \",\".join(data_checkpoint)})\n logging.info(\"data checkpoint saved result: %s\", res)\n\nclass FLModel(object):\n def __init__(self, role, bridge, example_ids, exporting=False):\n self._role = role\n self._bridge = bridge\n self._example_ids = example_ids\n self._exporting = exporting\n\n self._train_ops = []\n self._recvs = []\n self._sends = []\n self._outputs = []\n\n @property\n def train_ops(self):\n return self._train_ops\n\n @property\n def sends(self):\n return [(n, t) for n, t, _ in self._sends]\n\n @property\n def recvs(self):\n return [(n, t) for n, t, _ in self._recvs]\n\n def verify_example_ids(self):\n tensor = tf.strings.to_hash_bucket_fast(self._example_ids, 2**31 - 1)\n if self._role == 'leader':\n self.send('_verify_example_ids', tensor)\n else:\n recv_tensor = self.recv('_verify_example_ids', tensor.dtype)\n op = tf.assert_equal(tensor, recv_tensor)\n self._train_ops.append(op)\n\n def send(self, name, tensor, require_grad=False):\n with tf.control_dependencies([self._example_ids]):\n op = self._bridge.send_op(name, tensor)\n self._train_ops.append(op)\n self._sends.append((name, tensor, require_grad))\n if require_grad:\n return self.recv(name + '_grad', tensor.dtype)\n return None\n\n def recv(self, name, dtype=tf.float32, require_grad=False):\n with tf.control_dependencies([self._example_ids]):\n tensor = self._bridge.receive_op(name, dtype)\n self._recvs.append((name, tensor, require_grad))\n return tensor\n\n def minimize(self,\n optimizer,\n loss,\n global_step=None,\n var_list=None,\n gate_gradients=Optimizer.GATE_OP,\n aggregation_method=None,\n colocate_gradients_with_ops=False,\n name=None,\n grad_loss=None):\n recv_grads = [i for i in self._recvs if i[2]]\n\n if var_list is None:\n var_list = \\\n tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) + \\\n tf.get_collection(tf.GraphKeys.TRAINABLE_RESOURCE_VARIABLES)\n var_list = [v for _, v, _ in recv_grads] + var_list\n\n grads_and_vars = optimizer.compute_gradients(\n loss,\n var_list=var_list,\n gate_gradients=gate_gradients,\n aggregation_method=aggregation_method,\n colocate_gradients_with_ops=colocate_gradients_with_ops,\n grad_loss=grad_loss)\n\n send_grads = grads_and_vars[:len(recv_grads)]\n for (n, _, _), (grad, _) in zip(recv_grads, send_grads):\n if grad is not None:\n self.send(n + '_grad', grad)\n\n if grads_and_vars[len(recv_grads):]:\n train_op = optimizer.apply_gradients(\n grads_and_vars[len(recv_grads):],\n global_step=global_step,\n name=name)\n else:\n train_op = tf.no_op()\n\n return train_op\n\n def _append_summary_hook(self, training_hooks):\n if not training_hooks:\n training_hooks = []\n summary_hook = SummaryHook.get_hook()\n if summary_hook:\n training_hooks.append(summary_hook)\n return training_hooks\n\n def make_spec(self,\n mode,\n predictions=None,\n loss=None,\n train_op=None,\n eval_metric_ops=None,\n training_chief_hooks=None,\n training_hooks=None,\n evaluation_hooks=None,\n prediction_hooks=None):\n if isinstance(predictions, tf.Tensor):\n predictions = {'output': predictions}\n if mode == ModeKeys.TRAIN:\n train_op = tf.group([train_op] + self._train_ops)\n training_hooks = self._append_summary_hook(training_hooks)\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions,\n loss=loss,\n train_op=train_op,\n eval_metric_ops=eval_metric_ops,\n training_chief_hooks=training_chief_hooks,\n training_hooks=training_hooks,\n evaluation_hooks=evaluation_hooks,\n prediction_hooks=prediction_hooks)\n\n\nclass FLEstimator(object):\n def __init__(self,\n model_fn,\n bridge,\n trainer_master,\n role,\n worker_rank=0,\n application_id=None,\n cluster_spec=None):\n self._model_fn = model_fn\n self._bridge = bridge\n self._trainer_master = trainer_master\n self._role = role\n self._worker_rank = worker_rank\n self._cluster_spec = cluster_spec\n self._application_id = application_id\n\n def _get_features_and_labels_from_input_fn(self, input_fn, mode):\n dataset = input_fn(self._bridge, self._trainer_master)\n features, labels = dataset.make_one_shot_iterator().get_next()\n return features, labels\n\n def _get_model_spec(self, features, labels, mode):\n model = FLModel(self._role, self._bridge,\n features.get('example_id', None),\n exporting=(mode == ModeKeys.PREDICT))\n spec = self._model_fn(model, features, labels, mode)\n return spec, model\n\n def _restore_datablock(self, blk_ids):\n # only chief worker restores from checkpoint.\n if self._worker_rank != 0 or blk_ids is None:\n return True\n block_id_str = as_str_any(blk_ids)\n block_ids = []\n if block_id_str != DATA_CHECKPOINT_INIT_VALUE:\n block_ids = block_id_str.split(\",\")\n logging.info(\"restore: %s\", block_id_str)\n return self._trainer_master.restore_data_block_checkpoint(\n self._application_id, block_ids)\n\n def _cheif_barriar(self, is_chief=False, sync_times=300):\n worker_replicas = os.environ.get('REPLICA_NUM', 0)\n kvstore_type = os.environ.get('KVSTORE_TYPE', 'etcd')\n db_database, db_addr, db_username, db_password, _ = \\\n get_kvstore_config(kvstore_type)\n kvstore_client = DBClient(db_database,\n db_addr,\n db_username,\n db_password,\n SYNC_PATH)\n sync_path = '%s/%s' % (os.environ['APPLICATION_ID'],\n os.environ['WORKER_RANK'])\n logging.info('Creating a sync flag at %s', sync_path)\n kvstore_client.set_data(sync_path, \"1\")\n if is_chief:\n for _ in range(sync_times):\n sync_list = kvstore_client.get_prefix_kvs(\n os.environ['APPLICATION_ID'])\n logging.info('Sync file pattern is: %s', sync_list)\n if len(sync_list) < worker_replicas:\n logging.info('Count of ready workers is %d',\n len(sync_list))\n time.sleep(6)\n else:\n break\n\n def train(self,\n input_fn,\n checkpoint_path=None,\n save_checkpoint_steps=None,\n save_checkpoint_secs=None):\n if self._cluster_spec is not None:\n device_fn = tf.train.replica_device_setter(\n worker_device=\"/job:worker/task:%d\" % self._worker_rank,\n merge_devices=True,\n cluster=self._cluster_spec)\n cluster_def = self._cluster_spec.as_cluster_def()\n local_address = self._cluster_spec.job_tasks('worker')[\n self._worker_rank]\n server = tf.train.Server(tf.train.ClusterSpec(\n {'local': {\n 0: local_address\n }}),\n job_name='local',\n task_index=0)\n target = 'grpc://' + local_address\n else:\n device_fn = None\n cluster_def = None\n target = None\n\n config = tf.ConfigProto(cluster_def=cluster_def)\n config.inter_op_parallelism_threads = 4\n config.intra_op_parallelism_threads = 4\n config.experimental.share_session_state_in_clusterspec_propagation \\\n = True\n tf.config.set_soft_device_placement(False)\n\n with tf.Graph().as_default() as g:\n with tf.device(device_fn):\n features, labels = self._get_features_and_labels_from_input_fn(\n input_fn, ModeKeys.TRAIN)\n spec, _ = self._get_model_spec(features, labels, ModeKeys.TRAIN)\n\n # Explicitly add a Saver\n if not tf.get_collection(tf.GraphKeys.SAVERS):\n saver = tf.train.Saver(\n sharded=True,\n defer_build=True,\n save_relative_paths=True) # Must set for portability\n tf.add_to_collection(tf.GraphKeys.SAVERS, saver)\n\n listener = DataCheckpointSaverListener(self._trainer_master,\n self._application_id)\n saver_hook = tf.estimator.CheckpointSaverHook(\n checkpoint_path, save_secs=save_checkpoint_secs,\n save_steps=save_checkpoint_steps, listeners=[listener])\n self._bridge.connect()\n\n try:\n with tf.train.MonitoredTrainingSession(\n master=target,\n config=config,\n is_chief=(self._worker_rank == 0),\n chief_only_hooks=[saver_hook],\n checkpoint_dir=checkpoint_path,\n save_checkpoint_steps=save_checkpoint_steps,\n save_checkpoint_secs=save_checkpoint_secs,\n hooks=spec.training_hooks) as sess:\n iter_id = 0\n\n data_checkpoint_value = None\n if hasattr(saver_hook, \"data_checkpoint\"):\n data_checkpoint_value = saver_hook.data_checkpoint\n if not self._restore_datablock(data_checkpoint_value):\n raise ValueError(\"Restore data checkpoint error\")\n\n while not sess.should_stop():\n self._bridge.start(iter_id)\n logging.debug('after bridge start.')\n start_time = time.time()\n sess.run(spec.train_op, feed_dict={})\n end_time = time.time()\n metrics.emit_timer(\n name=\"iter_timer\",\n value=end_time-start_time,\n tags={})\n logging.debug('after session run.')\n self._bridge.commit()\n logging.debug('after bridge commit.')\n iter_id += 1\n finally:\n self._bridge.terminate()\n\n return self\n\n def evaluate(self,\n input_fn,\n checkpoint_path=None):\n if not tf.train.latest_checkpoint(checkpoint_path):\n raise ValueError(\n \"Could not find trained model at %s\" % checkpoint_path)\n\n with tf.Graph().as_default():\n features, labels = self._get_features_and_labels_from_input_fn(\n input_fn, ModeKeys.EVAL)\n spec, model = self._get_model_spec(features, labels, ModeKeys.EVAL)\n\n # Track the average loss in default\n eval_metric_ops = spec.eval_metric_ops or {}\n if model_fn_lib.LOSS_METRIC_KEY not in eval_metric_ops:\n loss_metric = tf.metrics.mean(spec.loss)\n eval_metric_ops[model_fn_lib.LOSS_METRIC_KEY] = loss_metric\n\n # Create the real eval op\n update_ops, eval_dict = _extract_metric_update_ops(eval_metric_ops)\n update_ops.extend(model._train_ops)\n eval_op = tf.group(*update_ops)\n\n # Also track the global step\n if tf.GraphKeys.GLOBAL_STEP in eval_dict:\n raise ValueError(\n 'Metric with name `global_step` is not allowed, because '\n 'Estimator already defines a default metric with the '\n 'same name.')\n eval_dict[tf.GraphKeys.GLOBAL_STEP] = \\\n tf.train.get_or_create_global_step()\n\n # Prepare the session creator.\n scaffold = tf.train.Scaffold()\n session_creator = tf.train.ChiefSessionCreator(\n scaffold=scaffold,\n checkpoint_dir=checkpoint_path)\n\n # Prepare hooks\n all_hooks = list(spec.evaluation_hooks) or []\n final_ops_hook = tf.train.FinalOpsHook(eval_dict)\n all_hooks.append(final_ops_hook)\n\n # Evaluate over dataset\n self._bridge.connect()\n try:\n with tf.train.MonitoredSession(\n session_creator=session_creator, hooks=all_hooks) as sess:\n if not self._restore_datablock(DATA_CHECKPOINT_INIT_VALUE):\n raise ValueError(\"Restore data checkpoint error\")\n iter_id = 0\n while not sess.should_stop():\n self._bridge.start(iter_id)\n logging.debug('after bridge start.')\n start_time = time.time()\n sess.run(eval_op)\n end_time = time.time()\n metrics.emit_timer(\n name=\"iter_timer\",\n value=end_time-start_time,\n tags={})\n logging.debug('after session run.')\n self._bridge.commit()\n logging.debug('after bridge commit.')\n iter_id += 1\n finally:\n self._bridge.terminate()\n\n # Print result\n logging.info('Metrics for iteration %d: %s',\n iter_id, _dict_to_str(final_ops_hook.final_ops_values))\n return final_ops_hook.final_ops_values\n\n def export_saved_model(self,\n export_dir_base,\n serving_input_receiver_fn,\n checkpoint_path=None):\n with tf.Graph().as_default():\n receiver = serving_input_receiver_fn()\n spec, model = self._get_model_spec(receiver.features, None,\n ModeKeys.PREDICT)\n assert not model.sends, \"Exported model cannot send\"\n assert not model.recvs, \"Exported model cannot receive\"\n\n with tf.Session() as sess:\n saver_for_restore = tf.train.Saver(sharded=True)\n saver_for_restore.restore(\n sess, tf.train.latest_checkpoint(checkpoint_path))\n tf.saved_model.simple_save(sess, export_dir_base,\n receiver.receiver_tensors,\n spec.predictions, None)\n\n return export_dir_base\n\n\ndef _extract_metric_update_ops(eval_dict):\n \"\"\"Separate update operations from metric value operations.\"\"\"\n update_ops = []\n value_ops = {}\n # Sort metrics lexicographically so graph is identical every time.\n for name in sorted(eval_dict.keys()):\n metric_tensor, update_op = eval_dict[name]\n value_ops[name] = metric_tensor\n update_ops.append(update_op)\n return update_ops, value_ops\n\n\ndef _dict_to_str(dictionary):\n \"\"\"Get a `str` representation of a `dict`.\n\n Args:\n dictionary: The `dict` to be represented as `str`.\n\n Returns:\n A `str` representing the `dictionary`.\n \"\"\"\n return ', '.join('%s = %s' % (k, v)\n for k, v in sorted(dictionary.items())\n if not isinstance(v, bytes))\n" ]
[ [ "tensorflow.compat.v1.assert_equal", "tensorflow.compat.v1.train.ChiefSessionCreator", "tensorflow.compat.v1.strings.to_hash_bucket_fast", "tensorflow.compat.as_str_any", "tensorflow.compat.v1.train.MonitoredTrainingSession", "tensorflow.compat.v1.Graph", "tensorflow.compat.v1.config.set_soft_device_placement", "tensorflow.compat.v1.control_dependencies", "tensorflow.compat.v1.estimator.CheckpointSaverHook", "tensorflow.compat.v1.placeholder", "tensorflow.compat.v1.train.Saver", "tensorflow.compat.v1.group", "tensorflow.compat.v1.Variable", "tensorflow.compat.v1.train.Scaffold", "tensorflow.compat.v1.device", "tensorflow.compat.v1.saved_model.simple_save", "tensorflow.compat.v1.add_to_collection", "tensorflow.compat.v1.no_op", "tensorflow.compat.v1.Session", "tensorflow.compat.v1.estimator.EstimatorSpec", "tensorflow.compat.v1.train.FinalOpsHook", "tensorflow.compat.v1.metrics.mean", "tensorflow.compat.v1.train.replica_device_setter", "tensorflow.compat.v1.train.MonitoredSession", "tensorflow.compat.v1.ConfigProto", "tensorflow.compat.v1.train.latest_checkpoint", "tensorflow.compat.v1.train.ClusterSpec", "tensorflow.compat.v1.train.get_or_create_global_step", "tensorflow.compat.v1.get_collection" ] ]
Nazukixv/OpenNMT-py
[ "6265ddbbe9053b018714ac1fb4be9ec8adbaa128" ]
[ "onmt/model_builder.py" ]
[ "\"\"\"\nThis file is for models creation, which consults options\nand creates each encoder and decoder accordingly.\n\"\"\"\nimport re\nimport torch\nimport torch.nn as nn\nfrom torch.nn.init import xavier_uniform_\n\nimport onmt.inputters as inputters\nimport onmt.modules\nfrom onmt.encoders.rnn_encoder import RNNEncoder\nfrom onmt.encoders.transformer import TransformerEncoder\nfrom onmt.encoders.cnn_encoder import CNNEncoder\nfrom onmt.encoders.mean_encoder import MeanEncoder\nfrom onmt.encoders.audio_encoder import AudioEncoder\nfrom onmt.encoders.image_encoder import ImageEncoder\n\nfrom onmt.decoders.decoder import InputFeedRNNDecoder, StdRNNDecoder\nfrom onmt.decoders.transformer import TransformerDecoder\nfrom onmt.decoders.cnn_decoder import CNNDecoder\n\nfrom onmt.modules import Embeddings, CopyGenerator\nfrom onmt.utils.misc import use_gpu\nfrom onmt.utils.logging import logger\n\n\ndef build_embeddings(opt, word_dict, feature_dicts, for_encoder=True):\n \"\"\"\n Build an Embeddings instance.\n Args:\n opt: the option in current environment.\n word_dict(Vocab): words dictionary.\n feature_dicts([Vocab], optional): a list of feature dictionary.\n for_encoder(bool): build Embeddings for encoder or decoder?\n \"\"\"\n if for_encoder:\n embedding_dim = opt.src_word_vec_size\n else:\n embedding_dim = opt.tgt_word_vec_size\n\n word_padding_idx = word_dict.stoi[inputters.PAD_WORD]\n num_word_embeddings = len(word_dict)\n\n feats_padding_idx = [feat_dict.stoi[inputters.PAD_WORD]\n for feat_dict in feature_dicts]\n num_feat_embeddings = [len(feat_dict) for feat_dict in\n feature_dicts]\n\n return Embeddings(word_vec_size=embedding_dim,\n position_encoding=opt.position_encoding,\n feat_merge=opt.feat_merge,\n feat_vec_exponent=opt.feat_vec_exponent,\n feat_vec_size=opt.feat_vec_size,\n dropout=opt.dropout,\n word_padding_idx=word_padding_idx,\n feat_padding_idx=feats_padding_idx,\n word_vocab_size=num_word_embeddings,\n feat_vocab_sizes=num_feat_embeddings,\n sparse=opt.optim == \"sparseadam\")\n\n\ndef build_encoder(opt, embeddings):\n \"\"\"\n Various encoder dispatcher function.\n Args:\n opt: the option in current environment.\n embeddings (Embeddings): vocab embeddings for this encoder.\n \"\"\"\n if opt.encoder_type == \"transformer\":\n return TransformerEncoder(opt.enc_layers, opt.enc_rnn_size,\n opt.heads, opt.transformer_ff,\n opt.dropout, embeddings)\n elif opt.encoder_type == \"cnn\":\n return CNNEncoder(opt.enc_layers, opt.enc_rnn_size,\n opt.cnn_kernel_width,\n opt.dropout, embeddings)\n elif opt.encoder_type == \"mean\":\n return MeanEncoder(opt.enc_layers, embeddings)\n else:\n # \"rnn\" or \"brnn\"\n return RNNEncoder(opt.rnn_type, opt.brnn, opt.enc_layers,\n opt.enc_rnn_size, opt.dropout, embeddings,\n opt.bridge)\n\n\ndef build_decoder(opt, embeddings):\n \"\"\"\n Various decoder dispatcher function.\n Args:\n opt: the option in current environment.\n embeddings (Embeddings): vocab embeddings for this decoder.\n \"\"\"\n if opt.decoder_type == \"transformer\":\n return TransformerDecoder(opt.dec_layers, opt.dec_rnn_size,\n opt.heads, opt.transformer_ff,\n opt.global_attention, opt.copy_attn,\n opt.self_attn_type,\n opt.dropout, embeddings)\n elif opt.decoder_type == \"cnn\":\n return CNNDecoder(opt.dec_layers, opt.dec_rnn_size,\n opt.global_attention, opt.copy_attn,\n opt.cnn_kernel_width, opt.dropout,\n embeddings)\n elif opt.input_feed:\n return InputFeedRNNDecoder(opt.rnn_type, opt.brnn,\n opt.dec_layers, opt.dec_rnn_size,\n opt.global_attention,\n opt.global_attention_function,\n opt.coverage_attn,\n opt.context_gate,\n opt.copy_attn,\n opt.dropout,\n embeddings,\n opt.reuse_copy_attn)\n else:\n return StdRNNDecoder(opt.rnn_type, opt.brnn,\n opt.dec_layers, opt.dec_rnn_size,\n opt.global_attention,\n opt.global_attention_function,\n opt.coverage_attn,\n opt.context_gate,\n opt.copy_attn,\n opt.dropout,\n embeddings,\n opt.reuse_copy_attn)\n\n\ndef load_test_model(opt, dummy_opt, model_path=None):\n if model_path is None:\n model_path = opt.models[0]\n checkpoint = torch.load(model_path,\n map_location=lambda storage, loc: storage)\n fields = inputters.load_fields_from_vocab(\n checkpoint['vocab'], data_type=opt.data_type)\n\n model_opt = checkpoint['opt']\n\n for arg in dummy_opt:\n if arg not in model_opt:\n model_opt.__dict__[arg] = dummy_opt[arg]\n model = build_base_model(model_opt, fields, use_gpu(opt), checkpoint)\n model.eval()\n model.generator.eval()\n return fields, model, model_opt\n\n\ndef build_base_model(model_opt, fields, gpu, checkpoint=None):\n \"\"\"\n Args:\n model_opt: the option loaded from checkpoint.\n fields: `Field` objects for the model.\n gpu(bool): whether to use gpu.\n checkpoint: the model gnerated by train phase, or a resumed snapshot\n model from a stopped training.\n Returns:\n the NMTModel.\n \"\"\"\n assert model_opt.model_type in [\"text\", \"img\", \"audio\"], \\\n (\"Unsupported model type %s\" % (model_opt.model_type))\n\n # for backward compatibility\n if model_opt.rnn_size != -1:\n model_opt.enc_rnn_size = model_opt.rnn_size\n model_opt.dec_rnn_size = model_opt.rnn_size\n if model_opt.model_type == 'text' and \\\n model_opt.enc_rnn_size != model_opt.dec_rnn_size:\n raise AssertionError(\"\"\"We do not support different encoder and\n decoder rnn sizes for translation now.\"\"\")\n\n # Build encoder.\n if model_opt.model_type == \"text\":\n src_dict = fields[\"src\"].vocab\n feature_dicts = inputters.collect_feature_vocabs(fields, 'src')\n src_embeddings = build_embeddings(model_opt, src_dict, feature_dicts)\n encoder = build_encoder(model_opt, src_embeddings)\n elif model_opt.model_type == \"img\":\n if (\"image_channel_size\" not in model_opt.__dict__):\n image_channel_size = 3\n else:\n image_channel_size = model_opt.image_channel_size\n\n encoder = ImageEncoder(model_opt.enc_layers,\n model_opt.brnn,\n model_opt.enc_rnn_size,\n model_opt.dropout,\n image_channel_size)\n elif model_opt.model_type == \"audio\":\n encoder = AudioEncoder(model_opt.rnn_type,\n model_opt.enc_layers,\n model_opt.dec_layers,\n model_opt.brnn,\n model_opt.enc_rnn_size,\n model_opt.dec_rnn_size,\n model_opt.audio_enc_pooling,\n model_opt.dropout,\n model_opt.sample_rate,\n model_opt.window_size)\n\n # Build decoder.\n tgt_dict = fields[\"tgt\"].vocab\n feature_dicts = inputters.collect_feature_vocabs(fields, 'tgt')\n tgt_embeddings = build_embeddings(model_opt, tgt_dict,\n feature_dicts, for_encoder=False)\n\n # Share the embedding matrix - preprocess with share_vocab required.\n if model_opt.share_embeddings:\n # src/tgt vocab should be the same if `-share_vocab` is specified.\n if src_dict != tgt_dict:\n raise AssertionError('The `-share_vocab` should be set during '\n 'preprocess if you use share_embeddings!')\n\n tgt_embeddings.word_lut.weight = src_embeddings.word_lut.weight\n\n decoder = build_decoder(model_opt, tgt_embeddings)\n\n # Build NMTModel(= encoder + decoder).\n device = torch.device(\"cuda\" if gpu else \"cpu\")\n model = onmt.models.NMTModel(encoder, decoder)\n\n # Build Generator.\n if not model_opt.copy_attn:\n if model_opt.generator_function == \"sparsemax\":\n gen_func = onmt.modules.sparse_activations.LogSparsemax(dim=-1)\n else:\n gen_func = nn.LogSoftmax(dim=-1)\n generator = nn.Sequential(\n nn.Linear(model_opt.dec_rnn_size, len(fields[\"tgt\"].vocab)),\n gen_func\n )\n if model_opt.share_decoder_embeddings:\n generator[0].weight = decoder.embeddings.word_lut.weight\n else:\n generator = CopyGenerator(model_opt.dec_rnn_size,\n fields[\"tgt\"].vocab)\n\n # Load the model states from checkpoint or initialize them.\n if checkpoint is not None:\n # This preserves backward-compat for models using customed layernorm\n def fix_key(s):\n s = re.sub(r'(.*)\\.layer_norm((_\\d+)?)\\.b_2',\n r'\\1.layer_norm\\2.bias', s)\n s = re.sub(r'(.*)\\.layer_norm((_\\d+)?)\\.a_2',\n r'\\1.layer_norm\\2.weight', s)\n return s\n\n checkpoint['model'] = \\\n {fix_key(k): v for (k, v) in checkpoint['model'].items()}\n # end of patch for backward compatibility\n\n model.load_state_dict(checkpoint['model'], strict=False)\n generator.load_state_dict(checkpoint['generator'], strict=False)\n else:\n if model_opt.param_init != 0.0:\n for p in model.parameters():\n p.data.uniform_(-model_opt.param_init, model_opt.param_init)\n for p in generator.parameters():\n p.data.uniform_(-model_opt.param_init, model_opt.param_init)\n if model_opt.param_init_glorot:\n for p in model.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n for p in generator.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n\n if hasattr(model.encoder, 'embeddings'):\n model.encoder.embeddings.load_pretrained_vectors(\n model_opt.pre_word_vecs_enc, model_opt.fix_word_vecs_enc)\n if hasattr(model.decoder, 'embeddings'):\n model.decoder.embeddings.load_pretrained_vectors(\n model_opt.pre_word_vecs_dec, model_opt.fix_word_vecs_dec)\n\n # Add generator to model (this registers it as parameter of model).\n model.generator = generator\n model.to(device)\n\n return model\n\n\ndef build_model(model_opt, opt, fields, checkpoint):\n \"\"\" Build the Model \"\"\"\n logger.info('Building model...')\n model = build_base_model(model_opt, fields,\n use_gpu(opt), checkpoint)\n logger.info(model)\n return model\n" ]
[ [ "torch.nn.LogSoftmax", "torch.nn.init.xavier_uniform_", "torch.device", "torch.load" ] ]
eepsmedia/ping-pong-bounce
[ "8e06363032da88976f14146704af26d9312d195a" ]
[ "code/extractWAVdata.py" ]
[ "\"\"\"Convert a .wav file to .csv\n\nUses the `wave` package to convert a .wav file to a .csv. \nAssumes that the file is monoaural (one channel).\n\nBe sure to edit the code to point to correct values of `inFileName` and `outFileName`\n\"\"\"\n\nimport wave\nimport numpy\n\ninFileName = \"../data/pingpong.wav\"\noutFileName = '../data/pingpong raw redux.csv'\n\nf = wave.open(inFileName, 'rb')\nparams = f.getparams()\n\nprint(\"There are {} frames.\".format(params.nframes))\n\nbytesData = f.readframes(params.nframes)\nf.close()\n\na = numpy.frombuffer(bytesData, dtype=numpy.dtype('i2')) # answer is an ndarray\n\ni = 0\n\nwith open(outFileName, 'w') as out:\n\n out.write('time, sound\\n')\n\n for val in a:\n time = 1000 * i / params.framerate # milliseconds\n theLine = '{:g}, {:g}\\n'.format(time, val)\n out.write(theLine)\n i += 1\n\nprint(\"Wrote {} frames.\".format(i))\n" ]
[ [ "numpy.dtype" ] ]
m4rkl1u/tensorflow
[ "90a8825c7ae9719e8969d45040b4155b0e7de130" ]
[ "tensorflow/python/ops/variables.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Variable class.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport enum # pylint: disable=g-bad-import-order\nimport functools\nimport os\nimport six\n\nfrom tensorflow.core.framework import attr_value_pb2\nfrom tensorflow.core.framework import variable_pb2\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import gen_array_ops\nfrom tensorflow.python.ops import gen_state_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training.checkpointable import base as checkpointable\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import tf_should_use\nfrom tensorflow.python.util.deprecation import deprecated\nfrom tensorflow.python.util.tf_export import tf_export\n\n\ndef default_variable_creator(_, **kwds):\n del kwds\n raise NotImplementedError(\"variable_scope needs to be imported\")\n\n\ndef default_variable_creator_v2(_, **kwds):\n del kwds\n raise NotImplementedError(\"variable_scope needs to be imported\")\n\n\ndef _make_getter(captured_getter, captured_previous):\n \"\"\"To avoid capturing loop variables.\"\"\"\n def getter(**kwargs):\n return captured_getter(captured_previous, **kwargs)\n return getter\n\n\ndef _has_cycle(op, path):\n \"\"\"Detect cycles in the dependencies of `initial_value`.\"\"\"\n if op.name in path:\n return True\n path.add(op.name)\n for op_input in op.inputs:\n if _has_cycle(op_input.op, path):\n return True\n for op_control_input in op.control_inputs:\n if _has_cycle(op_control_input, path):\n return True\n path.remove(op.name)\n return False\n\n\n@tf_export(\"VariableSynchronization\")\nclass VariableSynchronization(enum.Enum):\n \"\"\"Indicates when a distributed variable will be synced.\n\n * `AUTO`: Indicates that the synchronization will be determined by the current\n `DistributionStrategy` (eg. With `MirroredStrategy` this would be\n `ON_WRITE`).\n * `NONE`: Indicates that there will only be one copy of the variable, so\n there is no need to sync.\n * `ON_WRITE`: Indicates that the variable will be updated across devices\n every time it is written.\n * `ON_READ`: Indicates that the variable will be aggregated across devices\n when it is read (eg. when checkpointing or when evaluating an op that uses\n the variable).\n \"\"\"\n AUTO = 0\n NONE = 1\n ON_WRITE = 2\n ON_READ = 3\n\n\n@tf_export(\"VariableAggregation\", v1=[])\nclass VariableAggregationV2(enum.Enum):\n \"\"\"Indicates how a distributed variable will be aggregated.\n\n `tf.contrib.distribute.DistributionStrategy` distributes a model by making\n multiple copies (called \"replicas\") acting data-parallel on different elements\n of the input batch. When performing some variable-update operation, say\n `var.assign_add(x)`, in a model, we need to resolve how to combine the\n different values for `x` computed in the different replicas.\n\n * `NONE`: This is the default, giving an error if you use a\n variable-update operation with multiple replicas.\n * `SUM`: Add the updates across replicas.\n * `MEAN`: Take the arithmetic mean (\"average\") of the updates across replicas.\n * `ONLY_FIRST_REPLICA`: This is for when every replica is performing the same\n update, but we only want to perform the update once. Used, e.g., for the\n global step counter.\n \"\"\"\n NONE = 0\n SUM = 1\n MEAN = 2\n ONLY_FIRST_REPLICA = 3\n\n\n@tf_export(v1=[\"VariableAggregation\"])\nclass VariableAggregation(enum.Enum):\n NONE = 0\n SUM = 1\n MEAN = 2\n ONLY_FIRST_REPLICA = 3\n ONLY_FIRST_TOWER = 3 # DEPRECATED\n\n\nVariableAggregation.__doc__ = (\n VariableAggregationV2.__doc__ +\n \"* `ONLY_FIRST_TOWER`: Deprecated alias for `ONLY_FIRST_REPLICA`.\\n \")\n\n\nclass VariableMetaclass(type):\n \"\"\"Metaclass to allow construction of tf.Variable to be overridden.\"\"\"\n\n def _variable_v1_call(cls,\n initial_value=None,\n trainable=None,\n collections=None,\n validate_shape=True,\n caching_device=None,\n name=None,\n variable_def=None,\n dtype=None,\n expected_shape=None,\n import_scope=None,\n constraint=None,\n use_resource=None,\n synchronization=VariableSynchronization.AUTO,\n aggregation=VariableAggregation.NONE):\n \"\"\"Call on Variable class. Useful to force the signature.\"\"\"\n previous_getter = lambda **kwargs: default_variable_creator(None, **kwargs)\n for getter in ops.get_default_graph()._variable_creator_stack: # pylint: disable=protected-access\n previous_getter = _make_getter(getter, previous_getter)\n\n # Reset `aggregation` that is explicitly set as `None` to the enum NONE.\n if aggregation is None:\n aggregation = VariableAggregation.NONE\n return previous_getter(\n initial_value=initial_value,\n trainable=trainable,\n collections=collections,\n validate_shape=validate_shape,\n caching_device=caching_device,\n name=name,\n variable_def=variable_def,\n dtype=dtype,\n expected_shape=expected_shape,\n import_scope=import_scope,\n constraint=constraint,\n use_resource=use_resource,\n synchronization=synchronization,\n aggregation=aggregation)\n\n def _variable_v2_call(cls,\n initial_value=None,\n trainable=None,\n validate_shape=True,\n caching_device=None,\n name=None,\n variable_def=None,\n dtype=None,\n import_scope=None,\n constraint=None,\n synchronization=VariableSynchronization.AUTO,\n aggregation=VariableAggregation.NONE):\n \"\"\"Call on Variable class. Useful to force the signature.\"\"\"\n previous_getter = lambda **kws: default_variable_creator_v2(None, **kws)\n for getter in ops.get_default_graph()._variable_creator_stack: # pylint: disable=protected-access\n previous_getter = _make_getter(getter, previous_getter)\n\n # Reset `aggregation` that is explicitly set as `None` to the enum NONE.\n if aggregation is None:\n aggregation = VariableAggregation.NONE\n return previous_getter(\n initial_value=initial_value,\n trainable=trainable,\n validate_shape=validate_shape,\n caching_device=caching_device,\n name=name,\n variable_def=variable_def,\n dtype=dtype,\n import_scope=import_scope,\n constraint=constraint,\n synchronization=synchronization,\n aggregation=aggregation)\n\n def __call__(cls, *args, **kwargs):\n if cls is VariableV1:\n return cls._variable_v1_call(*args, **kwargs)\n elif cls is Variable:\n return cls._variable_v2_call(*args, **kwargs)\n else:\n return super(VariableMetaclass, cls).__call__(*args, **kwargs)\n\n\n@tf_export(\"Variable\", v1=[])\nclass Variable(six.with_metaclass(VariableMetaclass,\n checkpointable.CheckpointableBase)):\n \"\"\"See the [Variables Guide](https://tensorflow.org/guide/variables).\n\n A variable maintains state in the graph across calls to `run()`. You add a\n variable to the graph by constructing an instance of the class `Variable`.\n\n The `Variable()` constructor requires an initial value for the variable,\n which can be a `Tensor` of any type and shape. The initial value defines the\n type and shape of the variable. After construction, the type and shape of\n the variable are fixed. The value can be changed using one of the assign\n methods.\n\n If you want to change the shape of a variable later you have to use an\n `assign` Op with `validate_shape=False`.\n\n Just like any `Tensor`, variables created with `Variable()` can be used as\n inputs for other Ops in the graph. Additionally, all the operators\n overloaded for the `Tensor` class are carried over to variables, so you can\n also add nodes to the graph by just doing arithmetic on variables.\n\n ```python\n import tensorflow as tf\n\n # Create a variable.\n w = tf.Variable(<initial-value>, name=<optional-name>)\n\n # Use the variable in the graph like any Tensor.\n y = tf.matmul(w, ...another variable or tensor...)\n\n # The overloaded operators are available too.\n z = tf.sigmoid(w + y)\n\n # Assign a new value to the variable with `assign()` or a related method.\n w.assign(w + 1.0)\n w.assign_add(1.0)\n ```\n\n When you launch the graph, variables have to be explicitly initialized before\n you can run Ops that use their value. You can initialize a variable by\n running its *initializer op*, restoring the variable from a save file, or\n simply running an `assign` Op that assigns a value to the variable. In fact,\n the variable *initializer op* is just an `assign` Op that assigns the\n variable's initial value to the variable itself.\n\n ```python\n # Launch the graph in a session.\n with tf.Session() as sess:\n # Run the variable initializer.\n sess.run(w.initializer)\n # ...you now can run ops that use the value of 'w'...\n ```\n\n The most common initialization pattern is to use the convenience function\n `global_variables_initializer()` to add an Op to the graph that initializes\n all the variables. You then run that Op after launching the graph.\n\n ```python\n # Add an Op to initialize global variables.\n init_op = tf.global_variables_initializer()\n\n # Launch the graph in a session.\n with tf.Session() as sess:\n # Run the Op that initializes global variables.\n sess.run(init_op)\n # ...you can now run any Op that uses variable values...\n ```\n\n If you need to create a variable with an initial value dependent on another\n variable, use the other variable's `initialized_value()`. This ensures that\n variables are initialized in the right order.\n\n All variables are automatically collected in the graph where they are\n created. By default, the constructor adds the new variable to the graph\n collection `GraphKeys.GLOBAL_VARIABLES`. The convenience function\n `global_variables()` returns the contents of that collection.\n\n When building a machine learning model it is often convenient to distinguish\n between variables holding the trainable model parameters and other variables\n such as a `global step` variable used to count training steps. To make this\n easier, the variable constructor supports a `trainable=<bool>` parameter. If\n `True`, the new variable is also added to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES`. The convenience function\n `trainable_variables()` returns the contents of this collection. The\n various `Optimizer` classes use this collection as the default list of\n variables to optimize.\n\n WARNING: tf.Variable objects by default have a non-intuitive memory model. A\n Variable is represented internally as a mutable Tensor which can\n non-deterministically alias other Tensors in a graph. The set of operations\n which consume a Variable and can lead to aliasing is undetermined and can\n change across TensorFlow versions. Avoid writing code which relies on the\n value of a Variable either changing or not changing as other operations\n happen. For example, using Variable objects or simple functions thereof as\n predicates in a `tf.cond` is dangerous and error-prone:\n\n ```\n v = tf.Variable(True)\n tf.cond(v, lambda: v.assign(False), my_false_fn) # Note: this is broken.\n ```\n\n Here replacing adding `use_resource=True` when constructing the variable will\n fix any nondeterminism issues:\n ```\n v = tf.Variable(True, use_resource=True)\n tf.cond(v, lambda: v.assign(False), my_false_fn)\n ```\n\n To use the replacement for variables which does\n not have these issues:\n\n * Add `use_resource=True` when constructing `tf.Variable`;\n * Call `tf.get_variable_scope().set_use_resource(True)` inside a\n `tf.variable_scope` before the `tf.get_variable()` call.\n \"\"\"\n\n def __init__(self,\n initial_value=None,\n trainable=True,\n validate_shape=True,\n caching_device=None,\n name=None,\n variable_def=None,\n dtype=None,\n import_scope=None,\n constraint=None,\n synchronization=VariableSynchronization.AUTO,\n aggregation=VariableAggregation.NONE):\n \"\"\"Creates a new variable with value `initial_value`.\n\n The new variable is added to the graph collections listed in `collections`,\n which defaults to `[GraphKeys.GLOBAL_VARIABLES]`.\n\n If `trainable` is `True` the variable is also added to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES`.\n\n This constructor creates both a `variable` Op and an `assign` Op to set the\n variable to its initial value.\n\n Args:\n initial_value: A `Tensor`, or Python object convertible to a `Tensor`,\n which is the initial value for the Variable. The initial value must have\n a shape specified unless `validate_shape` is set to False. Can also be a\n callable with no argument that returns the initial value when called. In\n that case, `dtype` must be specified. (Note that initializer functions\n from init_ops.py must first be bound to a shape before being used here.)\n trainable: If `True`, the default, GradientTapes automatically watch uses\n of this variable.\n validate_shape: If `False`, allows the variable to be initialized with a\n value of unknown shape. If `True`, the default, the shape of\n `initial_value` must be known.\n caching_device: Optional device string describing where the Variable\n should be cached for reading. Defaults to the Variable's device.\n If not `None`, caches on another device. Typical use is to cache\n on the device where the Ops using the Variable reside, to deduplicate\n copying through `Switch` and other conditional statements.\n name: Optional name for the variable. Defaults to `'Variable'` and gets\n uniquified automatically.\n variable_def: `VariableDef` protocol buffer. If not `None`, recreates\n the Variable object with its contents, referencing the variable's nodes\n in the graph, which must already exist. The graph is not changed.\n `variable_def` and the other arguments are mutually exclusive.\n dtype: If set, initial_value will be converted to the given type.\n If `None`, either the datatype will be kept (if `initial_value` is\n a Tensor), or `convert_to_tensor` will decide.\n import_scope: Optional `string`. Name scope to add to the\n `Variable.` Only used when initializing from protocol buffer.\n constraint: An optional projection function to be applied to the variable\n after being updated by an `Optimizer` (e.g. used to implement norm\n constraints or value constraints for layer weights). The function must\n take as input the unprojected Tensor representing the value of the\n variable and return the Tensor for the projected value\n (which must have the same shape). Constraints are not safe to\n use when doing asynchronous distributed training.\n synchronization: Indicates when a distributed a variable will be\n aggregated. Accepted values are constants defined in the class\n `tf.VariableSynchronization`. By default the synchronization is set to\n `AUTO` and the current `DistributionStrategy` chooses\n when to synchronize. If `synchronization` is set to `ON_READ`,\n `trainable` must not be set to `True`.\n aggregation: Indicates how a distributed variable will be aggregated.\n Accepted values are constants defined in the class\n `tf.VariableAggregation`.\n\n Raises:\n ValueError: If both `variable_def` and initial_value are specified.\n ValueError: If the initial value is not specified, or does not have a\n shape and `validate_shape` is `True`.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n raise NotImplementedError\n\n def __repr__(self):\n raise NotImplementedError\n\n def value(self):\n \"\"\"Returns the last snapshot of this variable.\n\n You usually do not need to call this method as all ops that need the value\n of the variable call it automatically through a `convert_to_tensor()` call.\n\n Returns a `Tensor` which holds the value of the variable. You can not\n assign a new value to this tensor as it is not a reference to the variable.\n\n To avoid copies, if the consumer of the returned value is on the same device\n as the variable, this actually returns the live value of the variable, not\n a copy. Updates to the variable are seen by the consumer. If the consumer\n is on a different device it will get a copy of the variable.\n\n Returns:\n A `Tensor` containing the value of the variable.\n \"\"\"\n raise NotImplementedError\n\n def read_value(self):\n \"\"\"Returns the value of this variable, read in the current context.\n\n Can be different from value() if it's on another device, with control\n dependencies, etc.\n\n Returns:\n A `Tensor` containing the value of the variable.\n \"\"\"\n raise NotImplementedError\n\n def set_shape(self, shape):\n \"\"\"Overrides the shape for this variable.\n\n Args:\n shape: the `TensorShape` representing the overridden shape.\n \"\"\"\n raise NotImplementedError\n\n @property\n def trainable(self):\n raise NotImplementedError\n\n def eval(self, session=None):\n \"\"\"In a session, computes and returns the value of this variable.\n\n This is not a graph construction method, it does not add ops to the graph.\n\n This convenience method requires a session where the graph\n containing this variable has been launched. If no session is\n passed, the default session is used. See `tf.Session` for more\n information on launching a graph and on sessions.\n\n ```python\n v = tf.Variable([1, 2])\n init = tf.global_variables_initializer()\n\n with tf.Session() as sess:\n sess.run(init)\n # Usage passing the session explicitly.\n print(v.eval(sess))\n # Usage with the default session. The 'with' block\n # above makes 'sess' the default session.\n print(v.eval())\n ```\n\n Args:\n session: The session to use to evaluate this variable. If\n none, the default session is used.\n\n Returns:\n A numpy `ndarray` with a copy of the value of this variable.\n \"\"\"\n raise NotImplementedError\n\n def initialized_value(self):\n \"\"\"Returns the value of the initialized variable.\n\n You should use this instead of the variable itself to initialize another\n variable with a value that depends on the value of this variable.\n\n ```python\n # Initialize 'v' with a random tensor.\n v = tf.Variable(tf.truncated_normal([10, 40]))\n # Use `initialized_value` to guarantee that `v` has been\n # initialized before its value is used to initialize `w`.\n # The random values are picked only once.\n w = tf.Variable(v.initialized_value() * 2.0)\n ```\n\n Returns:\n A `Tensor` holding the value of this variable after its initializer\n has run.\n \"\"\"\n raise NotImplementedError\n\n @property\n def initial_value(self):\n \"\"\"Returns the Tensor used as the initial value for the variable.\n\n Note that this is different from `initialized_value()` which runs\n the op that initializes the variable before returning its value.\n This method returns the tensor that is used by the op that initializes\n the variable.\n\n Returns:\n A `Tensor`.\n \"\"\"\n raise NotImplementedError\n\n @property\n def constraint(self):\n \"\"\"Returns the constraint function associated with this variable.\n\n Returns:\n The constraint function that was passed to the variable constructor.\n Can be `None` if no constraint was passed.\n \"\"\"\n raise NotImplementedError\n\n def assign(self, value, use_locking=False, name=None, read_value=True):\n \"\"\"Assigns a new value to the variable.\n\n This is essentially a shortcut for `assign(self, value)`.\n\n Args:\n value: A `Tensor`. The new value for this variable.\n use_locking: If `True`, use locking during the assignment.\n name: The name of the operation to be created\n read_value: if True, will return something which evaluates to the\n new value of the variable; if False will return the assign op.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the assignment has completed.\n \"\"\"\n raise NotImplementedError\n\n def assign_add(self, delta, use_locking=False, name=None, read_value=True):\n \"\"\"Adds a value to this variable.\n\n This is essentially a shortcut for `assign_add(self, delta)`.\n\n Args:\n delta: A `Tensor`. The value to add to this variable.\n use_locking: If `True`, use locking during the operation.\n name: The name of the operation to be created\n read_value: if True, will return something which evaluates to the\n new value of the variable; if False will return the assign op.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the addition has completed.\n \"\"\"\n raise NotImplementedError\n\n def assign_sub(self, delta, use_locking=False, name=None, read_value=True):\n \"\"\"Subtracts a value from this variable.\n\n This is essentially a shortcut for `assign_sub(self, delta)`.\n\n Args:\n delta: A `Tensor`. The value to subtract from this variable.\n use_locking: If `True`, use locking during the operation.\n name: The name of the operation to be created\n read_value: if True, will return something which evaluates to the\n new value of the variable; if False will return the assign op.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the subtraction has completed.\n \"\"\"\n raise NotImplementedError\n\n def scatter_sub(self, sparse_delta, use_locking=False, name=None):\n \"\"\"Subtracts `IndexedSlices` from this variable.\n\n Args:\n sparse_delta: `IndexedSlices` to be subtracted from this variable.\n use_locking: If `True`, use locking during the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered subtraction has completed.\n\n Raises:\n ValueError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n raise NotImplementedError\n\n def scatter_add(self, sparse_delta, use_locking=False, name=None):\n \"\"\"Adds `IndexedSlices` to this variable.\n\n Args:\n sparse_delta: `IndexedSlices` to be assigned to this variable.\n use_locking: If `True`, use locking during the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered subtraction has completed.\n\n Raises:\n ValueError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n raise NotImplementedError\n\n def scatter_update(self, sparse_delta, use_locking=False, name=None):\n \"\"\"Assigns `IndexedSlices` to this variable.\n\n Args:\n sparse_delta: `IndexedSlices` to be assigned to this variable.\n use_locking: If `True`, use locking during the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered subtraction has completed.\n\n Raises:\n ValueError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n raise NotImplementedError\n\n def scatter_nd_sub(self, indices, updates, name=None):\n \"\"\"Applies sparse subtraction to individual values or slices in a Variable.\n\n `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n `indices` must be integer tensor, containing indices into `ref`.\n It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\n The innermost dimension of `indices` (with length `K`) corresponds to\n indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\n dimension of `ref`.\n\n `updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n ```\n [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].\n ```\n\n For example, say we want to add 4 scattered elements to a rank-1 tensor to\n 8 elements. In Python, that update would look like this:\n\n ```python\n ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])\n indices = tf.constant([[4], [3], [1] ,[7]])\n updates = tf.constant([9, 10, 11, 12])\n op = ref.scatter_nd_sub(indices, updates)\n with tf.Session() as sess:\n print sess.run(op)\n ```\n\n The resulting update to ref would look like this:\n\n [1, -9, 3, -6, -6, 6, 7, -4]\n\n See `tf.scatter_nd` for more details about how to make updates to\n slices.\n\n Args:\n indices: The indices to be used in the operation.\n updates: The values to be used in the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered subtraction has completed.\n\n Raises:\n ValueError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n raise NotImplementedError\n\n def scatter_nd_add(self, indices, updates, name=None):\n \"\"\"Applies sparse addition to individual values or slices in a Variable.\n\n `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n `indices` must be integer tensor, containing indices into `ref`.\n It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\n The innermost dimension of `indices` (with length `K`) corresponds to\n indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\n dimension of `ref`.\n\n `updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n ```\n [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].\n ```\n\n For example, say we want to add 4 scattered elements to a rank-1 tensor to\n 8 elements. In Python, that update would look like this:\n\n ```python\n ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])\n indices = tf.constant([[4], [3], [1] ,[7]])\n updates = tf.constant([9, 10, 11, 12])\n add = ref.scatter_nd_add(indices, updates)\n with tf.Session() as sess:\n print sess.run(add)\n ```\n\n The resulting update to ref would look like this:\n\n [1, 13, 3, 14, 14, 6, 7, 20]\n\n See `tf.scatter_nd` for more details about how to make updates to\n slices.\n\n Args:\n indices: The indices to be used in the operation.\n updates: The values to be used in the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered subtraction has completed.\n\n Raises:\n ValueError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n raise NotImplementedError\n\n def scatter_nd_update(self, indices, updates, name=None):\n \"\"\"Applies sparse assignment to individual values or slices in a Variable.\n\n `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n `indices` must be integer tensor, containing indices into `ref`.\n It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\n The innermost dimension of `indices` (with length `K`) corresponds to\n indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\n dimension of `ref`.\n\n `updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n ```\n [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].\n ```\n\n For example, say we want to add 4 scattered elements to a rank-1 tensor to\n 8 elements. In Python, that update would look like this:\n\n ```python\n ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])\n indices = tf.constant([[4], [3], [1] ,[7]])\n updates = tf.constant([9, 10, 11, 12])\n op = ref.scatter_nd_assign(indices, updates)\n with tf.Session() as sess:\n print sess.run(op)\n ```\n\n The resulting update to ref would look like this:\n\n [1, 11, 3, 10, 9, 6, 7, 12]\n\n See `tf.scatter_nd` for more details about how to make updates to\n slices.\n\n Args:\n indices: The indices to be used in the operation.\n updates: The values to be used in the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered subtraction has completed.\n\n Raises:\n ValueError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n raise NotImplementedError\n\n def count_up_to(self, limit):\n \"\"\"Increments this variable until it reaches `limit`.\n\n When that Op is run it tries to increment the variable by `1`. If\n incrementing the variable would bring it above `limit` then the Op raises\n the exception `OutOfRangeError`.\n\n If no error is raised, the Op outputs the value of the variable before\n the increment.\n\n This is essentially a shortcut for `count_up_to(self, limit)`.\n\n Args:\n limit: value at which incrementing the variable raises an error.\n\n Returns:\n A `Tensor` that will hold the variable value before the increment. If no\n other Op modifies this variable, the values produced will all be\n distinct.\n \"\"\"\n raise NotImplementedError\n\n def load(self, value, session=None):\n \"\"\"Load new value into this variable.\n\n Writes new value to variable's memory. Doesn't add ops to the graph.\n\n This convenience method requires a session where the graph\n containing this variable has been launched. If no session is\n passed, the default session is used. See `tf.Session` for more\n information on launching a graph and on sessions.\n\n ```python\n v = tf.Variable([1, 2])\n init = tf.global_variables_initializer()\n\n with tf.Session() as sess:\n sess.run(init)\n # Usage passing the session explicitly.\n v.load([2, 3], sess)\n print(v.eval(sess)) # prints [2 3]\n # Usage with the default session. The 'with' block\n # above makes 'sess' the default session.\n v.load([3, 4], sess)\n print(v.eval()) # prints [3 4]\n ```\n\n Args:\n value: New variable value\n session: The session to use to evaluate this variable. If\n none, the default session is used.\n\n Raises:\n ValueError: Session is not passed and no default session\n \"\"\"\n raise NotImplementedError\n\n # Conversion to tensor.\n @staticmethod\n def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False): # pylint: disable=invalid-name\n \"\"\"Utility function for converting a Variable to a Tensor.\"\"\"\n _ = name\n if dtype and not dtype.is_compatible_with(v.dtype):\n raise ValueError(\n \"Incompatible type conversion requested to type '%s' for variable \"\n \"of type '%s'\" % (dtype.name, v.dtype.name))\n if as_ref:\n return v._ref() # pylint: disable=protected-access\n else:\n return v.value()\n\n @classmethod\n def _OverloadAllOperators(cls): # pylint: disable=invalid-name\n \"\"\"Register overloads for all operators.\"\"\"\n for operator in ops.Tensor.OVERLOADABLE_OPERATORS:\n cls._OverloadOperator(operator)\n # For slicing, bind getitem differently than a tensor (use SliceHelperVar\n # instead)\n # pylint: disable=protected-access\n setattr(cls, \"__getitem__\", array_ops._SliceHelperVar)\n\n @classmethod\n def _OverloadOperator(cls, operator): # pylint: disable=invalid-name\n \"\"\"Defer an operator overload to `ops.Tensor`.\n\n We pull the operator out of ops.Tensor dynamically to avoid ordering issues.\n\n Args:\n operator: string. The operator name.\n \"\"\"\n tensor_oper = getattr(ops.Tensor, operator)\n\n def _run_op(a, *args, **kwargs):\n # pylint: disable=protected-access\n return tensor_oper(a._AsTensor(), *args, **kwargs)\n\n functools.update_wrapper(_run_op, tensor_oper)\n setattr(cls, operator, _run_op)\n\n def __iter__(self):\n \"\"\"Dummy method to prevent iteration. Do not call.\n\n NOTE(mrry): If we register __getitem__ as an overloaded operator,\n Python will valiantly attempt to iterate over the variable's Tensor from 0\n to infinity. Declaring this method prevents this unintended behavior.\n\n Raises:\n TypeError: when invoked.\n \"\"\"\n raise TypeError(\"'Variable' object is not iterable.\")\n\n # NOTE(mrry): This enables the Variable's overloaded \"right\" binary\n # operators to run when the left operand is an ndarray, because it\n # accords the Variable class higher priority than an ndarray, or a\n # numpy matrix.\n # TODO(mrry): Convert this to using numpy's __numpy_ufunc__\n # mechanism, which allows more control over how Variables interact\n # with ndarrays.\n __array_priority__ = 100\n\n @property\n def name(self):\n \"\"\"The name of this variable.\"\"\"\n raise NotImplementedError\n\n @property\n def initializer(self):\n \"\"\"The initializer operation for this variable.\"\"\"\n raise NotImplementedError\n\n @property\n def device(self):\n \"\"\"The device of this variable.\"\"\"\n raise NotImplementedError\n\n @property\n def dtype(self):\n \"\"\"The `DType` of this variable.\"\"\"\n raise NotImplementedError\n\n @property\n def op(self):\n \"\"\"The `Operation` of this variable.\"\"\"\n raise NotImplementedError\n\n @property\n def graph(self):\n \"\"\"The `Graph` of this variable.\"\"\"\n raise NotImplementedError\n\n @property\n def shape(self):\n \"\"\"The `TensorShape` of this variable.\n\n Returns:\n A `TensorShape`.\n \"\"\"\n raise NotImplementedError\n\n def get_shape(self):\n \"\"\"Alias of Variable.shape.\"\"\"\n raise NotImplementedError\n\n def to_proto(self, export_scope=None):\n \"\"\"Converts a `Variable` to a `VariableDef` protocol buffer.\n\n Args:\n export_scope: Optional `string`. Name scope to remove.\n\n Returns:\n A `VariableDef` protocol buffer, or `None` if the `Variable` is not\n in the specified name scope.\n \"\"\"\n raise NotImplementedError\n\n @staticmethod\n def from_proto(variable_def, import_scope=None):\n \"\"\"Returns a `Variable` object created from `variable_def`.\"\"\"\n return RefVariable(variable_def=variable_def,\n import_scope=import_scope)\n\n class SaveSliceInfo(object):\n \"\"\"Information on how to save this Variable as a slice.\n\n Provides internal support for saving variables as slices of a larger\n variable. This API is not public and is subject to change.\n\n Available properties:\n\n * full_name\n * full_shape\n * var_offset\n * var_shape\n \"\"\"\n\n def __init__(self,\n full_name=None,\n full_shape=None,\n var_offset=None,\n var_shape=None,\n save_slice_info_def=None,\n import_scope=None):\n \"\"\"Create a `SaveSliceInfo`.\n\n Args:\n full_name: Name of the full variable of which this `Variable` is a\n slice.\n full_shape: Shape of the full variable, as a list of int.\n var_offset: Offset of this `Variable` into the full variable, as a\n list of int.\n var_shape: Shape of this `Variable`, as a list of int.\n save_slice_info_def: `SaveSliceInfoDef` protocol buffer. If not `None`,\n recreates the SaveSliceInfo object its contents.\n `save_slice_info_def` and other arguments are mutually\n exclusive.\n import_scope: Optional `string`. Name scope to add. Only used\n when initializing from protocol buffer.\n \"\"\"\n if save_slice_info_def:\n assert isinstance(save_slice_info_def, variable_pb2.SaveSliceInfoDef)\n self.full_name = ops.prepend_name_scope(\n save_slice_info_def.full_name, import_scope=import_scope)\n self.full_shape = [i for i in save_slice_info_def.full_shape]\n self.var_offset = [i for i in save_slice_info_def.var_offset]\n self.var_shape = [i for i in save_slice_info_def.var_shape]\n else:\n self.full_name = full_name\n self.full_shape = full_shape\n self.var_offset = var_offset\n self.var_shape = var_shape\n\n @property\n def spec(self):\n \"\"\"Computes the spec string used for saving.\"\"\"\n full_shape_str = \" \".join([\"%d\" % d for d in self.full_shape]) + \" \"\n sl_spec = \":\".join([\n \"%d,%d\" % (o, s) for o, s in zip(self.var_offset, self.var_shape)\n ])\n return full_shape_str + sl_spec\n\n def to_proto(self, export_scope=None):\n \"\"\"Returns a SaveSliceInfoDef() proto.\n\n Args:\n export_scope: Optional `string`. Name scope to remove.\n\n Returns:\n A `SaveSliceInfoDef` protocol buffer, or None if the `Variable` is not\n in the specified name scope.\n \"\"\"\n if (export_scope is None or\n self.full_name.startswith(export_scope)):\n save_slice_info_def = variable_pb2.SaveSliceInfoDef()\n save_slice_info_def.full_name = ops.strip_name_scope(\n self.full_name, export_scope)\n for i in self.full_shape:\n save_slice_info_def.full_shape.append(i)\n for i in self.var_offset:\n save_slice_info_def.var_offset.append(i)\n for i in self.var_shape:\n save_slice_info_def.var_shape.append(i)\n return save_slice_info_def\n else:\n return None\n\n def __iadd__(self, other):\n raise NotImplementedError\n\n def __isub__(self, other):\n raise NotImplementedError\n\n def __imul__(self, other):\n raise NotImplementedError\n\n def __idiv__(self, other):\n raise NotImplementedError\n\n def __itruediv__(self, other):\n raise NotImplementedError\n\n def __irealdiv__(self, other):\n raise NotImplementedError\n\n def __ipow__(self, other):\n raise NotImplementedError\n\n\n@tf_export(v1=[\"Variable\"])\nclass VariableV1(Variable):\n \"\"\"See the [Variables Guide](https://tensorflow.org/guide/variables).\n\n A variable maintains state in the graph across calls to `run()`. You add a\n variable to the graph by constructing an instance of the class `Variable`.\n\n The `Variable()` constructor requires an initial value for the variable,\n which can be a `Tensor` of any type and shape. The initial value defines the\n type and shape of the variable. After construction, the type and shape of\n the variable are fixed. The value can be changed using one of the assign\n methods.\n\n If you want to change the shape of a variable later you have to use an\n `assign` Op with `validate_shape=False`.\n\n Just like any `Tensor`, variables created with `Variable()` can be used as\n inputs for other Ops in the graph. Additionally, all the operators\n overloaded for the `Tensor` class are carried over to variables, so you can\n also add nodes to the graph by just doing arithmetic on variables.\n\n ```python\n import tensorflow as tf\n\n # Create a variable.\n w = tf.Variable(<initial-value>, name=<optional-name>)\n\n # Use the variable in the graph like any Tensor.\n y = tf.matmul(w, ...another variable or tensor...)\n\n # The overloaded operators are available too.\n z = tf.sigmoid(w + y)\n\n # Assign a new value to the variable with `assign()` or a related method.\n w.assign(w + 1.0)\n w.assign_add(1.0)\n ```\n\n When you launch the graph, variables have to be explicitly initialized before\n you can run Ops that use their value. You can initialize a variable by\n running its *initializer op*, restoring the variable from a save file, or\n simply running an `assign` Op that assigns a value to the variable. In fact,\n the variable *initializer op* is just an `assign` Op that assigns the\n variable's initial value to the variable itself.\n\n ```python\n # Launch the graph in a session.\n with tf.Session() as sess:\n # Run the variable initializer.\n sess.run(w.initializer)\n # ...you now can run ops that use the value of 'w'...\n ```\n\n The most common initialization pattern is to use the convenience function\n `global_variables_initializer()` to add an Op to the graph that initializes\n all the variables. You then run that Op after launching the graph.\n\n ```python\n # Add an Op to initialize global variables.\n init_op = tf.global_variables_initializer()\n\n # Launch the graph in a session.\n with tf.Session() as sess:\n # Run the Op that initializes global variables.\n sess.run(init_op)\n # ...you can now run any Op that uses variable values...\n ```\n\n If you need to create a variable with an initial value dependent on another\n variable, use the other variable's `initialized_value()`. This ensures that\n variables are initialized in the right order.\n\n All variables are automatically collected in the graph where they are\n created. By default, the constructor adds the new variable to the graph\n collection `GraphKeys.GLOBAL_VARIABLES`. The convenience function\n `global_variables()` returns the contents of that collection.\n\n When building a machine learning model it is often convenient to distinguish\n between variables holding the trainable model parameters and other variables\n such as a `global step` variable used to count training steps. To make this\n easier, the variable constructor supports a `trainable=<bool>` parameter. If\n `True`, the new variable is also added to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES`. The convenience function\n `trainable_variables()` returns the contents of this collection. The\n various `Optimizer` classes use this collection as the default list of\n variables to optimize.\n\n WARNING: tf.Variable objects by default have a non-intuitive memory model. A\n Variable is represented internally as a mutable Tensor which can\n non-deterministically alias other Tensors in a graph. The set of operations\n which consume a Variable and can lead to aliasing is undetermined and can\n change across TensorFlow versions. Avoid writing code which relies on the\n value of a Variable either changing or not changing as other operations\n happen. For example, using Variable objects or simple functions thereof as\n predicates in a `tf.cond` is dangerous and error-prone:\n\n ```\n v = tf.Variable(True)\n tf.cond(v, lambda: v.assign(False), my_false_fn) # Note: this is broken.\n ```\n\n Here replacing adding `use_resource=True` when constructing the variable will\n fix any nondeterminism issues:\n ```\n v = tf.Variable(True, use_resource=True)\n tf.cond(v, lambda: v.assign(False), my_false_fn)\n ```\n\n To use the replacement for variables which does\n not have these issues:\n\n * Add `use_resource=True` when constructing `tf.Variable`;\n * Call `tf.get_variable_scope().set_use_resource(True)` inside a\n `tf.variable_scope` before the `tf.get_variable()` call.\n \"\"\"\n\n def __init__(self, # pylint: disable=super-init-not-called\n initial_value=None,\n trainable=True,\n collections=None,\n validate_shape=True,\n caching_device=None,\n name=None,\n variable_def=None,\n dtype=None,\n expected_shape=None,\n import_scope=None,\n constraint=None,\n use_resource=None,\n synchronization=VariableSynchronization.AUTO,\n aggregation=VariableAggregation.NONE):\n \"\"\"Creates a new variable with value `initial_value`.\n\n The new variable is added to the graph collections listed in `collections`,\n which defaults to `[GraphKeys.GLOBAL_VARIABLES]`.\n\n If `trainable` is `True` the variable is also added to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES`.\n\n This constructor creates both a `variable` Op and an `assign` Op to set the\n variable to its initial value.\n\n Args:\n initial_value: A `Tensor`, or Python object convertible to a `Tensor`,\n which is the initial value for the Variable. The initial value must have\n a shape specified unless `validate_shape` is set to False. Can also be a\n callable with no argument that returns the initial value when called. In\n that case, `dtype` must be specified. (Note that initializer functions\n from init_ops.py must first be bound to a shape before being used here.)\n trainable: If `True`, the default, also adds the variable to the graph\n collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as\n the default list of variables to use by the `Optimizer` classes.\n collections: List of graph collections keys. The new variable is added to\n these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.\n validate_shape: If `False`, allows the variable to be initialized with a\n value of unknown shape. If `True`, the default, the shape of\n `initial_value` must be known.\n caching_device: Optional device string describing where the Variable\n should be cached for reading. Defaults to the Variable's device.\n If not `None`, caches on another device. Typical use is to cache\n on the device where the Ops using the Variable reside, to deduplicate\n copying through `Switch` and other conditional statements.\n name: Optional name for the variable. Defaults to `'Variable'` and gets\n uniquified automatically.\n variable_def: `VariableDef` protocol buffer. If not `None`, recreates\n the Variable object with its contents, referencing the variable's nodes\n in the graph, which must already exist. The graph is not changed.\n `variable_def` and the other arguments are mutually exclusive.\n dtype: If set, initial_value will be converted to the given type.\n If `None`, either the datatype will be kept (if `initial_value` is\n a Tensor), or `convert_to_tensor` will decide.\n expected_shape: A TensorShape. If set, initial_value is expected\n to have this shape.\n import_scope: Optional `string`. Name scope to add to the\n `Variable.` Only used when initializing from protocol buffer.\n constraint: An optional projection function to be applied to the variable\n after being updated by an `Optimizer` (e.g. used to implement norm\n constraints or value constraints for layer weights). The function must\n take as input the unprojected Tensor representing the value of the\n variable and return the Tensor for the projected value\n (which must have the same shape). Constraints are not safe to\n use when doing asynchronous distributed training.\n use_resource: whether to use resource variables.\n synchronization: unused\n aggregation: unused\n\n Raises:\n ValueError: If both `variable_def` and initial_value are specified.\n ValueError: If the initial value is not specified, or does not have a\n shape and `validate_shape` is `True`.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n\n SaveSliceInfo = Variable.SaveSliceInfo\n\n\n# TODO(apassos): do not repeat all comments here\nclass RefVariable(VariableV1):\n \"\"\"Ref-based implementation of variables.\"\"\"\n\n def __init__(self, # pylint: disable=super-init-not-called\n initial_value=None,\n trainable=True,\n collections=None,\n validate_shape=True,\n caching_device=None,\n name=None,\n variable_def=None,\n dtype=None,\n expected_shape=None,\n import_scope=None,\n constraint=None):\n \"\"\"Creates a new variable with value `initial_value`.\n\n The new variable is added to the graph collections listed in `collections`,\n which defaults to `[GraphKeys.GLOBAL_VARIABLES]`.\n\n If `trainable` is `True` the variable is also added to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES`.\n\n This constructor creates both a `variable` Op and an `assign` Op to set the\n variable to its initial value.\n\n Args:\n initial_value: A `Tensor`, or Python object convertible to a `Tensor`,\n which is the initial value for the Variable. The initial value must have\n a shape specified unless `validate_shape` is set to False. Can also be a\n callable with no argument that returns the initial value when called. In\n that case, `dtype` must be specified. (Note that initializer functions\n from init_ops.py must first be bound to a shape before being used here.)\n trainable: If `True`, the default, also adds the variable to the graph\n collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as\n the default list of variables to use by the `Optimizer` classes.\n collections: List of graph collections keys. The new variable is added to\n these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.\n validate_shape: If `False`, allows the variable to be initialized with a\n value of unknown shape. If `True`, the default, the shape of\n `initial_value` must be known.\n caching_device: Optional device string describing where the Variable\n should be cached for reading. Defaults to the Variable's device.\n If not `None`, caches on another device. Typical use is to cache\n on the device where the Ops using the Variable reside, to deduplicate\n copying through `Switch` and other conditional statements.\n name: Optional name for the variable. Defaults to `'Variable'` and gets\n uniquified automatically.\n variable_def: `VariableDef` protocol buffer. If not `None`, recreates\n the Variable object with its contents, referencing the variable's nodes\n in the graph, which must already exist. The graph is not changed.\n `variable_def` and the other arguments are mutually exclusive.\n dtype: If set, initial_value will be converted to the given type.\n If `None`, either the datatype will be kept (if `initial_value` is\n a Tensor), or `convert_to_tensor` will decide.\n expected_shape: A TensorShape. If set, initial_value is expected\n to have this shape.\n import_scope: Optional `string`. Name scope to add to the\n `Variable.` Only used when initializing from protocol buffer.\n constraint: An optional projection function to be applied to the variable\n after being updated by an `Optimizer` (e.g. used to implement norm\n constraints or value constraints for layer weights). The function must\n take as input the unprojected Tensor representing the value of the\n variable and return the Tensor for the projected value\n (which must have the same shape). Constraints are not safe to\n use when doing asynchronous distributed training.\n\n Raises:\n ValueError: If both `variable_def` and initial_value are specified.\n ValueError: If the initial value is not specified, or does not have a\n shape and `validate_shape` is `True`.\n RuntimeError: If eager execution is enabled.\n \"\"\"\n self._in_graph_mode = True\n if variable_def:\n # If variable_def is provided, recreates the variable from its fields.\n if initial_value:\n raise ValueError(\"variable_def and initial_value are mutually \"\n \"exclusive.\")\n self._init_from_proto(variable_def, import_scope=import_scope)\n else:\n # Create from initial_value.\n self._init_from_args(\n initial_value=initial_value,\n trainable=trainable,\n collections=collections,\n validate_shape=validate_shape,\n caching_device=caching_device,\n name=name,\n dtype=dtype,\n expected_shape=expected_shape,\n constraint=constraint)\n\n def __repr__(self):\n if context.executing_eagerly() and not self._in_graph_mode:\n return \"<tf.Variable '%s' shape=%s dtype=%s, numpy=%s>\" % (\n self.name, self.get_shape(), self.dtype.name,\n ops.numpy_text(self.read_value(), is_repr=True))\n else:\n return \"<tf.Variable '%s' shape=%s dtype=%s>\" % (\n self.name, self.get_shape(), self.dtype.name)\n\n def _init_from_args(self,\n initial_value=None,\n trainable=True,\n collections=None,\n validate_shape=True,\n caching_device=None,\n name=None,\n dtype=None,\n expected_shape=None,\n constraint=None):\n \"\"\"Creates a new variable from arguments.\n\n Args:\n initial_value: A `Tensor`, or Python object convertible to a `Tensor`,\n which is the initial value for the Variable. The initial value must have\n a shape specified unless `validate_shape` is set to False. Can also be a\n callable with no argument that returns the initial value when called.\n (Note that initializer functions from init_ops.py must first be bound\n to a shape before being used here.)\n trainable: If `True`, the default, also adds the variable to the graph\n collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as\n the default list of variables to use by the `Optimizer` classes.\n collections: List of graph collections keys. The new variable is added to\n these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.\n validate_shape: If `False`, allows the variable to be initialized with a\n value of unknown shape. If `True`, the default, the shape of\n `initial_value` must be known.\n caching_device: Optional device string or function describing where the\n Variable should be cached for reading. Defaults to the Variable's\n device. If not `None`, caches on another device. Typical use is to\n cache on the device where the Ops using the Variable reside, to\n deduplicate copying through `Switch` and other conditional statements.\n name: Optional name for the variable. Defaults to `'Variable'` and gets\n uniquified automatically.\n dtype: If set, initial_value will be converted to the given type.\n If None, either the datatype will be kept (if initial_value is\n a Tensor) or float32 will be used (if it is a Python object convertible\n to a Tensor).\n expected_shape: Deprecated. Ignored.\n constraint: An optional projection function to be applied to the variable\n after being updated by an `Optimizer` (e.g. used to implement norm\n constraints or value constraints for layer weights). The function must\n take as input the unprojected Tensor representing the value of the\n variable and return the Tensor for the projected value\n (which must have the same shape). Constraints are not safe to\n use when doing asynchronous distributed training.\n\n Raises:\n ValueError: If the initial value is not specified, or does not have a\n shape and `validate_shape` is `True`.\n RuntimeError: If lifted into the eager context.\n \"\"\"\n _ = expected_shape\n if initial_value is None:\n raise ValueError(\"initial_value must be specified.\")\n init_from_fn = callable(initial_value)\n\n if collections is None:\n collections = [ops.GraphKeys.GLOBAL_VARIABLES]\n if not isinstance(collections, (list, tuple, set)):\n raise ValueError(\n \"collections argument to Variable constructor must be a list, tuple, \"\n \"or set. Got %s of type %s\" % (collections, type(collections)))\n if constraint is not None and not callable(constraint):\n raise ValueError(\"The `constraint` argument must be a callable.\")\n\n # Store the graph key so optimizers know how to only retrieve variables from\n # this graph.\n self._graph_key = ops.get_default_graph()._graph_key # pylint: disable=protected-access\n if isinstance(initial_value, checkpointable.CheckpointInitialValue):\n self._maybe_initialize_checkpointable()\n self._update_uid = initial_value.checkpoint_position.restore_uid\n initial_value = initial_value.wrapped_value\n\n self._trainable = trainable\n if trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:\n collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES]\n with ops.init_scope():\n # Ensure that we weren't lifted into the eager context.\n if context.executing_eagerly():\n raise RuntimeError(\n \"RefVariable not supported when eager execution is enabled. \")\n with ops.name_scope(name, \"Variable\", [] if init_from_fn else\n [initial_value]) as name:\n\n if init_from_fn:\n # Use attr_scope and device(None) to simulate the behavior of\n # colocate_with when the variable we want to colocate with doesn't\n # yet exist.\n true_name = ops._name_from_scope_name(name) # pylint: disable=protected-access\n attr = attr_value_pb2.AttrValue(\n list=attr_value_pb2.AttrValue.ListValue(\n s=[compat.as_bytes(\"loc:@%s\" % true_name)]))\n # pylint: disable=protected-access\n with ops.get_default_graph()._attr_scope({\"_class\": attr}):\n with ops.name_scope(\"Initializer\"), ops.device(None):\n self._initial_value = ops.convert_to_tensor(\n initial_value(), name=\"initial_value\", dtype=dtype)\n shape = (self._initial_value.get_shape()\n if validate_shape else tensor_shape.unknown_shape())\n self._variable = state_ops.variable_op_v2(\n shape,\n self._initial_value.dtype.base_dtype,\n name=name)\n # pylint: enable=protected-access\n\n # Or get the initial value from a Tensor or Python object.\n else:\n self._initial_value = ops.convert_to_tensor(\n initial_value, name=\"initial_value\", dtype=dtype)\n # pylint: disable=protected-access\n if self._initial_value.op._get_control_flow_context() is not None:\n raise ValueError(\n \"Initializer for variable %s is from inside a control-flow \"\n \"construct, such as a loop or conditional. When creating a \"\n \"variable inside a loop or conditional, use a lambda as the \"\n \"initializer.\" % name)\n # pylint: enable=protected-access\n shape = (self._initial_value.get_shape()\n if validate_shape else tensor_shape.unknown_shape())\n # In this case, the variable op can't be created until after the\n # initial_value has been converted to a Tensor with a known type.\n self._variable = state_ops.variable_op_v2(\n shape,\n self._initial_value.dtype.base_dtype,\n name=name)\n\n # Manually overrides the variable's shape with the initial value's.\n if validate_shape:\n initial_value_shape = self._initial_value.get_shape()\n if not initial_value_shape.is_fully_defined():\n raise ValueError(\"initial_value must have a shape specified: %s\" %\n self._initial_value)\n\n # If 'initial_value' makes use of other variables, make sure we don't\n # have an issue if these other variables aren't initialized first by\n # using their initialized_value() method.\n self._initializer_op = state_ops.assign(\n self._variable,\n self._try_guard_against_uninitialized_dependencies(\n self._initial_value),\n validate_shape=validate_shape).op\n\n # TODO(vrv): Change this class to not take caching_device, but\n # to take the op to colocate the snapshot with, so we can use\n # colocation rather than devices.\n if caching_device is not None:\n with ops.device(caching_device):\n self._snapshot = array_ops.identity(self._variable, name=\"read\")\n else:\n with ops.colocate_with(self._variable.op):\n self._snapshot = array_ops.identity(self._variable, name=\"read\")\n ops.add_to_collections(collections, self)\n\n self._caching_device = caching_device\n self._save_slice_info = None\n self._constraint = constraint\n\n def _init_from_proto(self, variable_def, import_scope=None):\n \"\"\"Recreates the Variable object from a `VariableDef` protocol buffer.\n\n Args:\n variable_def: `VariableDef` protocol buffer, describing a variable\n whose nodes already exists in the graph.\n import_scope: Optional `string`. Name scope to add.\n \"\"\"\n assert isinstance(variable_def, variable_pb2.VariableDef)\n # Create from variable_def.\n g = ops.get_default_graph()\n self._variable = g.as_graph_element(\n ops.prepend_name_scope(variable_def.variable_name,\n import_scope=import_scope))\n self._initializer_op = g.as_graph_element(\n ops.prepend_name_scope(variable_def.initializer_name,\n import_scope=import_scope))\n # Tests whether initial_value_name exists first for backwards compatibility.\n if (hasattr(variable_def, \"initial_value_name\") and\n variable_def.initial_value_name):\n self._initial_value = g.as_graph_element(\n ops.prepend_name_scope(variable_def.initial_value_name,\n import_scope=import_scope))\n else:\n self._initial_value = None\n self._trainable = getattr(variable_def, \"trainable\", True)\n self._snapshot = g.as_graph_element(\n ops.prepend_name_scope(variable_def.snapshot_name,\n import_scope=import_scope))\n if variable_def.HasField(\"save_slice_info_def\"):\n self._save_slice_info = Variable.SaveSliceInfo(\n save_slice_info_def=variable_def.save_slice_info_def,\n import_scope=import_scope)\n else:\n self._save_slice_info = None\n self._caching_device = None\n self._constraint = None\n\n def _as_graph_element(self):\n \"\"\"Conversion function for Graph.as_graph_element().\"\"\"\n return self._variable\n\n def _AsTensor(self): # pylint: disable=invalid-name\n \"\"\"Converts this variable to a Tensor.\n\n See `tf.Variable.value`.\n\n Returns:\n A `Tensor` containing the value of the variable.\n \"\"\"\n return self._snapshot\n\n def value(self):\n \"\"\"Returns the last snapshot of this variable.\n\n You usually do not need to call this method as all ops that need the value\n of the variable call it automatically through a `convert_to_tensor()` call.\n\n Returns a `Tensor` which holds the value of the variable. You can not\n assign a new value to this tensor as it is not a reference to the variable.\n\n To avoid copies, if the consumer of the returned value is on the same device\n as the variable, this actually returns the live value of the variable, not\n a copy. Updates to the variable are seen by the consumer. If the consumer\n is on a different device it will get a copy of the variable.\n\n Returns:\n A `Tensor` containing the value of the variable.\n \"\"\"\n return self._snapshot\n\n def read_value(self):\n \"\"\"Returns the value of this variable, read in the current context.\n\n Can be different from value() if it's on another device, with control\n dependencies, etc.\n\n Returns:\n A `Tensor` containing the value of the variable.\n \"\"\"\n return array_ops.identity(self._variable, name=\"read\")\n\n def _ref(self):\n \"\"\"Returns a reference to this variable.\n\n You usually do not need to call this method as all ops that need a reference\n to the variable call it automatically.\n\n Returns is a `Tensor` which holds a reference to the variable. You can\n assign a new value to the variable by passing the tensor to an assign op.\n See `tf.Variable.value` if you want to get the value of the\n variable.\n\n Returns:\n A `Tensor` that is a reference to the variable.\n \"\"\"\n return self._variable\n\n def set_shape(self, shape):\n \"\"\"Overrides the shape for this variable.\n\n Args:\n shape: the `TensorShape` representing the overridden shape.\n \"\"\"\n self._ref().set_shape(shape)\n self.value().set_shape(shape)\n\n @property\n def trainable(self):\n return self._trainable\n\n def eval(self, session=None):\n \"\"\"In a session, computes and returns the value of this variable.\n\n This is not a graph construction method, it does not add ops to the graph.\n\n This convenience method requires a session where the graph\n containing this variable has been launched. If no session is\n passed, the default session is used. See `tf.Session` for more\n information on launching a graph and on sessions.\n\n ```python\n v = tf.Variable([1, 2])\n init = tf.global_variables_initializer()\n\n with tf.Session() as sess:\n sess.run(init)\n # Usage passing the session explicitly.\n print(v.eval(sess))\n # Usage with the default session. The 'with' block\n # above makes 'sess' the default session.\n print(v.eval())\n ```\n\n Args:\n session: The session to use to evaluate this variable. If\n none, the default session is used.\n\n Returns:\n A numpy `ndarray` with a copy of the value of this variable.\n \"\"\"\n return self._variable.eval(session=session)\n\n def initialized_value(self):\n \"\"\"Returns the value of the initialized variable.\n\n You should use this instead of the variable itself to initialize another\n variable with a value that depends on the value of this variable.\n\n ```python\n # Initialize 'v' with a random tensor.\n v = tf.Variable(tf.truncated_normal([10, 40]))\n # Use `initialized_value` to guarantee that `v` has been\n # initialized before its value is used to initialize `w`.\n # The random values are picked only once.\n w = tf.Variable(v.initialized_value() * 2.0)\n ```\n\n Returns:\n A `Tensor` holding the value of this variable after its initializer\n has run.\n \"\"\"\n with ops.init_scope():\n return control_flow_ops.cond(is_variable_initialized(self),\n self.read_value,\n lambda: self.initial_value)\n\n @property\n def initial_value(self):\n \"\"\"Returns the Tensor used as the initial value for the variable.\n\n Note that this is different from `initialized_value()` which runs\n the op that initializes the variable before returning its value.\n This method returns the tensor that is used by the op that initializes\n the variable.\n\n Returns:\n A `Tensor`.\n \"\"\"\n return self._initial_value\n\n @property\n def constraint(self):\n \"\"\"Returns the constraint function associated with this variable.\n\n Returns:\n The constraint function that was passed to the variable constructor.\n Can be `None` if no constraint was passed.\n \"\"\"\n return self._constraint\n\n def assign(self, value, use_locking=False, name=None, read_value=True):\n \"\"\"Assigns a new value to the variable.\n\n This is essentially a shortcut for `assign(self, value)`.\n\n Args:\n value: A `Tensor`. The new value for this variable.\n use_locking: If `True`, use locking during the assignment.\n name: The name of the operation to be created\n read_value: if True, will return something which evaluates to the\n new value of the variable; if False will return the assign op.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the assignment has completed.\n \"\"\"\n assign = state_ops.assign(self._variable, value, use_locking=use_locking,\n name=name)\n if read_value:\n return assign\n return assign.op\n\n def assign_add(self, delta, use_locking=False, name=None, read_value=True):\n \"\"\"Adds a value to this variable.\n\n This is essentially a shortcut for `assign_add(self, delta)`.\n\n Args:\n delta: A `Tensor`. The value to add to this variable.\n use_locking: If `True`, use locking during the operation.\n name: The name of the operation to be created\n read_value: if True, will return something which evaluates to the\n new value of the variable; if False will return the assign op.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the addition has completed.\n \"\"\"\n assign = state_ops.assign_add(\n self._variable, delta, use_locking=use_locking, name=name)\n if read_value:\n return assign\n return assign.op\n\n def assign_sub(self, delta, use_locking=False, name=None, read_value=True):\n \"\"\"Subtracts a value from this variable.\n\n This is essentially a shortcut for `assign_sub(self, delta)`.\n\n Args:\n delta: A `Tensor`. The value to subtract from this variable.\n use_locking: If `True`, use locking during the operation.\n name: The name of the operation to be created\n read_value: if True, will return something which evaluates to the\n new value of the variable; if False will return the assign op.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the subtraction has completed.\n \"\"\"\n assign = state_ops.assign_sub(\n self._variable, delta, use_locking=use_locking, name=name)\n if read_value:\n return assign\n return assign.op\n\n def scatter_sub(self, sparse_delta, use_locking=False, name=None):\n \"\"\"Subtracts `IndexedSlices` from this variable.\n\n Args:\n sparse_delta: `IndexedSlices` to be subtracted from this variable.\n use_locking: If `True`, use locking during the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered subtraction has completed.\n\n Raises:\n ValueError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n if not isinstance(sparse_delta, ops.IndexedSlices):\n raise ValueError(\"sparse_delta is not IndexedSlices: %s\" % sparse_delta)\n return gen_state_ops.scatter_sub(\n self._variable,\n sparse_delta.indices,\n sparse_delta.values,\n use_locking=use_locking,\n name=name)\n\n def scatter_add(self, sparse_delta, use_locking=False, name=None):\n \"\"\"Adds `IndexedSlices` from this variable.\n\n Args:\n sparse_delta: `IndexedSlices` to be added to this variable.\n use_locking: If `True`, use locking during the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered subtraction has completed.\n\n Raises:\n ValueError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n if not isinstance(sparse_delta, ops.IndexedSlices):\n raise ValueError(\"sparse_delta is not IndexedSlices: %s\" % sparse_delta)\n return gen_state_ops.scatter_add(\n self._variable,\n sparse_delta.indices,\n sparse_delta.values,\n use_locking=use_locking,\n name=name)\n\n def scatter_update(self, sparse_delta, use_locking=False, name=None):\n \"\"\"Assigns `IndexedSlices` to this variable.\n\n Args:\n sparse_delta: `IndexedSlices` to be assigned to this variable.\n use_locking: If `True`, use locking during the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered subtraction has completed.\n\n Raises:\n ValueError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n if not isinstance(sparse_delta, ops.IndexedSlices):\n raise ValueError(\"sparse_delta is not IndexedSlices: %s\" % sparse_delta)\n return gen_state_ops.scatter_update(\n self._variable,\n sparse_delta.indices,\n sparse_delta.values,\n use_locking=use_locking,\n name=name)\n\n def scatter_nd_sub(self, indices, updates, name=None):\n \"\"\"Applies sparse subtraction to individual values or slices in a Variable.\n\n `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n `indices` must be integer tensor, containing indices into `ref`.\n It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\n The innermost dimension of `indices` (with length `K`) corresponds to\n indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\n dimension of `ref`.\n\n `updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n ```\n [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].\n ```\n\n For example, say we want to add 4 scattered elements to a rank-1 tensor to\n 8 elements. In Python, that update would look like this:\n\n ```python\n ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])\n indices = tf.constant([[4], [3], [1] ,[7]])\n updates = tf.constant([9, 10, 11, 12])\n op = ref.scatter_nd_sub(indices, updates)\n with tf.Session() as sess:\n print sess.run(op)\n ```\n\n The resulting update to ref would look like this:\n\n [1, -9, 3, -6, -6, 6, 7, -4]\n\n See `tf.scatter_nd` for more details about how to make updates to\n slices.\n\n Args:\n indices: The indices to be used in the operation.\n updates: The values to be used in the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered subtraction has completed.\n\n Raises:\n ValueError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n return gen_state_ops.scatter_nd_sub(\n self._variable, indices, updates, use_locking=True, name=name)\n\n def scatter_nd_add(self, indices, updates, name=None):\n \"\"\"Applies sparse addition to individual values or slices in a Variable.\n\n `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n `indices` must be integer tensor, containing indices into `ref`.\n It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\n The innermost dimension of `indices` (with length `K`) corresponds to\n indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\n dimension of `ref`.\n\n `updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n ```\n [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].\n ```\n\n For example, say we want to add 4 scattered elements to a rank-1 tensor to\n 8 elements. In Python, that update would look like this:\n\n ```python\n ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])\n indices = tf.constant([[4], [3], [1] ,[7]])\n updates = tf.constant([9, 10, 11, 12])\n add = ref.scatter_nd_add(indices, updates)\n with tf.Session() as sess:\n print sess.run(add)\n ```\n\n The resulting update to ref would look like this:\n\n [1, 13, 3, 14, 14, 6, 7, 20]\n\n See `tf.scatter_nd` for more details about how to make updates to\n slices.\n\n Args:\n indices: The indices to be used in the operation.\n updates: The values to be used in the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered subtraction has completed.\n\n Raises:\n ValueError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n return gen_state_ops.scatter_nd_add(\n self._variable, indices, updates, use_locking=True, name=name)\n\n def scatter_nd_update(self, indices, updates, name=None):\n \"\"\"Applies sparse assignment to individual values or slices in a Variable.\n\n `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.\n\n `indices` must be integer tensor, containing indices into `ref`.\n It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.\n\n The innermost dimension of `indices` (with length `K`) corresponds to\n indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th\n dimension of `ref`.\n\n `updates` is `Tensor` of rank `Q-1+P-K` with shape:\n\n ```\n [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].\n ```\n\n For example, say we want to add 4 scattered elements to a rank-1 tensor to\n 8 elements. In Python, that update would look like this:\n\n ```python\n ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])\n indices = tf.constant([[4], [3], [1] ,[7]])\n updates = tf.constant([9, 10, 11, 12])\n op = ref.scatter_nd_update(indices, updates)\n with tf.Session() as sess:\n print sess.run(op)\n ```\n\n The resulting update to ref would look like this:\n\n [1, 11, 3, 10, 9, 6, 7, 12]\n\n See `tf.scatter_nd` for more details about how to make updates to\n slices.\n\n Args:\n indices: The indices to be used in the operation.\n updates: The values to be used in the operation.\n name: the name of the operation.\n\n Returns:\n A `Tensor` that will hold the new value of this variable after\n the scattered subtraction has completed.\n\n Raises:\n ValueError: if `sparse_delta` is not an `IndexedSlices`.\n \"\"\"\n return gen_state_ops.scatter_nd_update(\n self._variable, indices, updates, use_locking=True, name=name)\n\n def _strided_slice_assign(self,\n begin,\n end,\n strides,\n value,\n name,\n begin_mask,\n end_mask,\n ellipsis_mask,\n new_axis_mask,\n shrink_axis_mask):\n return gen_array_ops.strided_slice_assign(ref=self._ref(),\n begin=begin,\n end=end,\n strides=strides,\n value=value,\n name=name,\n begin_mask=begin_mask,\n end_mask=end_mask,\n ellipsis_mask=ellipsis_mask,\n new_axis_mask=new_axis_mask,\n shrink_axis_mask=shrink_axis_mask)\n\n def count_up_to(self, limit):\n \"\"\"Increments this variable until it reaches `limit`.\n\n When that Op is run it tries to increment the variable by `1`. If\n incrementing the variable would bring it above `limit` then the Op raises\n the exception `OutOfRangeError`.\n\n If no error is raised, the Op outputs the value of the variable before\n the increment.\n\n This is essentially a shortcut for `count_up_to(self, limit)`.\n\n Args:\n limit: value at which incrementing the variable raises an error.\n\n Returns:\n A `Tensor` that will hold the variable value before the increment. If no\n other Op modifies this variable, the values produced will all be\n distinct.\n \"\"\"\n return state_ops.count_up_to(self._variable, limit=limit)\n\n def load(self, value, session=None):\n \"\"\"Load new value into this variable.\n\n Writes new value to variable's memory. Doesn't add ops to the graph.\n\n This convenience method requires a session where the graph\n containing this variable has been launched. If no session is\n passed, the default session is used. See `tf.Session` for more\n information on launching a graph and on sessions.\n\n ```python\n v = tf.Variable([1, 2])\n init = tf.global_variables_initializer()\n\n with tf.Session() as sess:\n sess.run(init)\n # Usage passing the session explicitly.\n v.load([2, 3], sess)\n print(v.eval(sess)) # prints [2 3]\n # Usage with the default session. The 'with' block\n # above makes 'sess' the default session.\n v.load([3, 4], sess)\n print(v.eval()) # prints [3 4]\n ```\n\n Args:\n value: New variable value\n session: The session to use to evaluate this variable. If\n none, the default session is used.\n\n Raises:\n ValueError: Session is not passed and no default session\n \"\"\"\n if context.executing_eagerly():\n self.assign(value)\n else:\n session = session or ops.get_default_session()\n if session is None:\n raise ValueError(\n \"Either session argument should be provided or default session \"\n \"should be established\")\n session.run(self._initializer_op, {self._initializer_op.inputs[1]: value})\n\n # Conversion to tensor.\n @staticmethod\n def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False): # pylint: disable=invalid-name\n \"\"\"Utility function for converting a Variable to a Tensor.\"\"\"\n _ = name\n if dtype and not dtype.is_compatible_with(v.dtype):\n raise ValueError(\n \"Incompatible type conversion requested to type '%s' for variable \"\n \"of type '%s'\" % (dtype.name, v.dtype.name))\n if as_ref:\n return v._ref() # pylint: disable=protected-access\n else:\n return v.value()\n\n def _gather_saveables_for_checkpoint(self):\n \"\"\"For implementing `Checkpointable`. This object is saveable on its own.\"\"\"\n return {checkpointable.VARIABLE_VALUE_KEY: self}\n\n def _try_guard_against_uninitialized_dependencies(self, initial_value):\n \"\"\"Attempt to guard against dependencies on uninitialized variables.\n\n Replace references to variables in `initial_value` with references to the\n variable's initialized values. The initialized values are essentially\n conditional TensorFlow graphs that return a variable's value if it is\n initialized or its `initial_value` if it hasn't been initialized. This\n replacement is done on a best effort basis:\n\n - If the `initial_value` graph contains cycles, we don't do any\n replacements for that graph.\n - If the variables that `initial_value` depends on are not present in the\n `GLOBAL_VARIABLES` or `LOCAL_VARIABLES` we don't replace them.\n\n In these cases, it is up to the caller to ensure that the `initial_value`\n graph uses initialized variables or that they guard access to variables\n using their `initialized_value` method.\n\n Args:\n initial_value: `Tensor`. The initial value.\n Returns:\n A `Tensor` suitable to initialize a variable.\n Raises:\n TypeError: If `initial_value` is not a `Tensor`.\n \"\"\"\n if not isinstance(initial_value, ops.Tensor):\n raise TypeError(\"initial_value needs to be a Tensor: %s\" % initial_value)\n\n # Don't modify initial_value if it contains any cyclic dependencies.\n if _has_cycle(initial_value.op, path=set()):\n return initial_value\n\n return self._safe_initial_value_from_tensor(initial_value, op_cache={})\n\n def _safe_initial_value_from_tensor(self, tensor, op_cache):\n \"\"\"Replace dependencies on variables with their initialized values.\n\n Args:\n tensor: A `Tensor`. The tensor to replace.\n op_cache: A dict mapping operation names to `Operation`s. Used to memoize\n the results so as to avoid creating redundant operations.\n Returns:\n A `Tensor` compatible with `tensor`. Any inputs that lead to variable\n values will be replaced with a corresponding graph that uses the\n variable's initialized values. This is done on a best-effort basis. If no\n modifications need to be made then `tensor` will be returned unchanged.\n \"\"\"\n op = tensor.op\n new_op = op_cache.get(op.name)\n if new_op is None:\n new_op = self._safe_initial_value_from_op(op, op_cache)\n op_cache[op.name] = new_op\n return new_op.outputs[tensor.value_index]\n\n def _safe_initial_value_from_op(self, op, op_cache):\n \"\"\"Replace dependencies on variables with their initialized values.\n\n Args:\n op: An `Operation`. The operation to replace.\n op_cache: A dict mapping operation names to `Operation`s. Used to memoize\n the results so as to avoid creating redundant operations.\n Returns:\n An `Operation` compatible with `op`. Any inputs that lead to variable\n values will be replaced with a corresponding graph that uses the\n variable's initialized values. This is done on a best-effort basis. If no\n modifications need to be made then `op` will be returned unchanged.\n \"\"\"\n op_type = op.node_def.op\n if op_type in (\"IsVariableInitialized\", \"VarIsInitializedOp\",\n \"ReadVariableOp\"):\n return op\n\n # Attempt to find the initialized_value of any variable reference / handles.\n # TODO(b/70206927): Fix handling of ResourceVariables.\n if op_type in (\"Variable\", \"VariableV2\", \"VarHandleOp\"):\n initialized_value = self._find_initialized_value_for_variable(op)\n return op if initialized_value is None else initialized_value.op\n\n # Recursively build initializer expressions for inputs.\n modified = False\n new_op_inputs = []\n for op_input in op.inputs:\n new_op_input = self._safe_initial_value_from_tensor(op_input, op_cache)\n new_op_inputs.append(new_op_input)\n modified = modified or (new_op_input != op_input)\n\n # If at least one input was modified, replace the op.\n if modified:\n new_op_type = op_type\n if new_op_type == \"RefSwitch\":\n new_op_type = \"Switch\"\n new_op_name = op.node_def.name + \"_\" + self.name\n new_op_name = new_op_name.replace(\":\", \"_\")\n return self.graph.create_op(\n new_op_type, new_op_inputs,\n op._output_types, # pylint: disable=protected-access\n name=new_op_name, attrs=op.node_def.attr)\n\n return op\n\n def _find_initialized_value_for_variable(self, variable_op):\n \"\"\"Find the initialized value for a variable op.\n\n To do so, lookup the variable op in the variables collection.\n\n Args:\n variable_op: A variable `Operation`.\n Returns:\n A `Tensor` representing the initialized value for the variable or `None`\n if the initialized value could not be found.\n \"\"\"\n try:\n var_names = [variable_op.node_def.name, variable_op.node_def.name + \":0\"]\n for collection_name in (ops.GraphKeys.GLOBAL_VARIABLES,\n ops.GraphKeys.LOCAL_VARIABLES):\n for var in self.graph.get_collection(collection_name):\n if var.name in var_names:\n return var.initialized_value()\n except AttributeError:\n # Return None when an incomplete user-defined variable type was put in\n # the collection.\n return None\n return None\n\n # NOTE(mrry): This enables the Variable's overloaded \"right\" binary\n # operators to run when the left operand is an ndarray, because it\n # accords the Variable class higher priority than an ndarray, or a\n # numpy matrix.\n # TODO(mrry): Convert this to using numpy's __numpy_ufunc__\n # mechanism, which allows more control over how Variables interact\n # with ndarrays.\n __array_priority__ = 100\n\n @property\n def name(self):\n \"\"\"The name of this variable.\"\"\"\n return self._variable.name\n\n @property\n def _shared_name(self):\n \"\"\"The shared name of the variable.\n\n Unlike name(), shared_name doesn't have \":0\" suffix. It is user-specified\n name with name scope prefix.\n\n Returns:\n variable name.\n \"\"\"\n return self.name[:-2]\n\n @property\n def initializer(self):\n \"\"\"The initializer operation for this variable.\"\"\"\n return self._initializer_op\n\n @property\n def device(self):\n \"\"\"The device of this variable.\"\"\"\n return self._variable.device\n\n @property\n def dtype(self):\n \"\"\"The `DType` of this variable.\"\"\"\n return self._variable.dtype\n\n @property\n def op(self):\n \"\"\"The `Operation` of this variable.\"\"\"\n return self._variable.op\n\n @property\n def graph(self):\n \"\"\"The `Graph` of this variable.\"\"\"\n return self._variable.graph\n\n @property\n def shape(self):\n \"\"\"The `TensorShape` of this variable.\n\n Returns:\n A `TensorShape`.\n \"\"\"\n return self._variable.get_shape()\n\n def get_shape(self):\n \"\"\"Alias of Variable.shape.\"\"\"\n return self.shape\n\n def to_proto(self, export_scope=None):\n \"\"\"Converts a `Variable` to a `VariableDef` protocol buffer.\n\n Args:\n export_scope: Optional `string`. Name scope to remove.\n\n Returns:\n A `VariableDef` protocol buffer, or `None` if the `Variable` is not\n in the specified name scope.\n \"\"\"\n if (export_scope is None or\n self._variable.name.startswith(export_scope)):\n var_def = variable_pb2.VariableDef()\n var_def.variable_name = ops.strip_name_scope(\n self._variable.name, export_scope)\n if self._initial_value is not None:\n # For backwards compatibility.\n var_def.initial_value_name = ops.strip_name_scope(\n self._initial_value.name, export_scope)\n var_def.trainable = self.trainable\n var_def.initializer_name = ops.strip_name_scope(\n self.initializer.name, export_scope)\n var_def.snapshot_name = ops.strip_name_scope(\n self._snapshot.name, export_scope)\n if self._save_slice_info:\n var_def.save_slice_info_def.MergeFrom(self._save_slice_info.to_proto(\n export_scope=export_scope))\n return var_def\n else:\n return None\n\n def __iadd__(self, other):\n logging.log_first_n(\n logging.WARN,\n \"Variable += will be deprecated. Use variable.assign_add\"\n \" if you want assignment to the variable value or 'x = x + y'\"\n \" if you want a new python Tensor object.\", 1)\n return self + other\n\n def __isub__(self, other):\n logging.log_first_n(\n logging.WARN,\n \"Variable -= will be deprecated. Use variable.assign_sub\"\n \" if you want assignment to the variable value or 'x = x - y'\"\n \" if you want a new python Tensor object.\", 1)\n return self - other\n\n def __imul__(self, other):\n logging.log_first_n(\n logging.WARN,\n \"Variable *= will be deprecated. Use `var.assign(var * other)`\"\n \" if you want assignment to the variable value or `x = x * y`\"\n \" if you want a new python Tensor object.\", 1)\n return self * other\n\n def __idiv__(self, other):\n logging.log_first_n(\n logging.WARN,\n \"Variable /= will be deprecated. Use `var.assign(var / other)`\"\n \" if you want assignment to the variable value or `x = x / y`\"\n \" if you want a new python Tensor object.\", 1)\n return self / other\n\n def __itruediv__(self, other):\n logging.log_first_n(\n logging.WARN,\n \"Variable /= will be deprecated. Use `var.assign(var / other)`\"\n \" if you want assignment to the variable value or `x = x / y`\"\n \" if you want a new python Tensor object.\", 1)\n return self / other\n\n def __irealdiv__(self, other):\n logging.log_first_n(\n logging.WARN,\n \"Variable /= will be deprecated. Use `var.assign(var / other)`\"\n \" if you want assignment to the variable value or `x = x / y`\"\n \" if you want a new python Tensor object.\", 1)\n return self / other\n\n def __ipow__(self, other):\n logging.log_first_n(\n logging.WARN,\n \"Variable **= will be deprecated. Use `var.assign(var ** other)`\"\n \" if you want assignment to the variable value or `x = x ** y`\"\n \" if you want a new python Tensor object.\", 1)\n return self ** other\n\n def _set_save_slice_info(self, save_slice_info):\n \"\"\"Sets the slice info for this `Variable`.\n\n Args:\n save_slice_info: A `Variable.SaveSliceInfo` object.\n \"\"\"\n self._save_slice_info = save_slice_info\n\n def _get_save_slice_info(self):\n return self._save_slice_info\n\n\nclass PartitionedVariable(object):\n \"\"\"A container for partitioned `Variable` objects.\n\n @compatibility(eager) `tf.PartitionedVariable` is not compatible with\n eager execution. Use `tf.Variable` instead which is compatible\n with both eager execution and graph construction. See [the\n TensorFlow Eager Execution\n guide](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/eager/python/g3doc/guide.md#variables-and-optimizers)\n for details on how variables work in eager execution.\n @end_compatibility\n \"\"\"\n\n def __init__(self, name, shape, dtype, variable_list, partitions):\n \"\"\"Creates a new partitioned variable wrapper.\n\n Variables passed via the variable_list must contain a save_slice_info\n field. Concatenation and iteration is in lexicographic order according\n to the var_offset property of the save_slice_info.\n\n Args:\n name: String. Overall name of the variables.\n shape: List of integers. Overall shape of the variables.\n dtype: Type of the variables.\n variable_list: List of `Variable` that comprise this partitioned variable.\n partitions: List of integers. Number of partitions for each dimension.\n\n Raises:\n TypeError: If `variable_list` is not a list of `Variable` objects, or\n `partitions` is not a list.\n ValueError: If `variable_list` is empty, or the `Variable` shape\n information does not match `shape`, or `partitions` has invalid values.\n \"\"\"\n if not isinstance(variable_list, (list, tuple)):\n raise TypeError(\n \"variable_list is not a list or tuple: %s\" % variable_list)\n if not isinstance(partitions, (list, tuple)):\n raise TypeError(\"partitions is not a list or tuple: %s\" % partitions)\n if not all(p >= 1 for p in partitions):\n raise ValueError(\"partition values must be positive: %s\" % partitions)\n if not variable_list:\n raise ValueError(\"variable_list may not be empty\")\n # pylint: disable=protected-access\n for v in variable_list:\n # Sort the variable_list lexicographically according to var offset value.\n if not all(v._get_save_slice_info() is not None for v in variable_list):\n raise ValueError(\n \"All variables must have a save_slice_info available: %s\"\n % [v.name for v in variable_list])\n if len(shape) != len(partitions):\n raise ValueError(\"len(shape) != len(partitions): %s vs. %s\"\n % (shape, partitions))\n if v._get_save_slice_info().full_shape != shape:\n raise ValueError(\n \"All variables' full shapes must match shape: %s; \"\n \"but full shapes were: %s\"\n % (shape, str([v._get_save_slice_info().full_shape])))\n self._variable_list = sorted(\n variable_list, key=lambda v: v._get_save_slice_info().var_offset)\n # pylint: enable=protected-access\n\n self._name = name\n self._shape = shape\n self._dtype = dtype\n self._partitions = partitions\n self._as_tensor = None\n\n def __iter__(self):\n \"\"\"Return an iterable for accessing the underlying partition Variables.\"\"\"\n return iter(self._variable_list)\n\n def __len__(self):\n num_partition_axes = len(self._partition_axes())\n if num_partition_axes > 1:\n raise ValueError(\"Cannot get a length for %d > 1 partition axes\"\n % num_partition_axes)\n return len(self._variable_list)\n\n def _partition_axes(self):\n if all(p == 1 for p in self._partitions):\n return [0]\n else:\n return [i for i, p in enumerate(self._partitions) if p > 1]\n\n def _concat(self):\n \"\"\"Returns the overall concatenated value as a `Tensor`.\n\n This is different from using the partitioned variable directly as a tensor\n (through tensor conversion and `as_tensor`) in that it creates a new set of\n operations that keeps the control dependencies from its scope.\n\n Returns:\n `Tensor` containing the concatenated value.\n \"\"\"\n if len(self._variable_list) == 1:\n with ops.name_scope(None):\n return array_ops.identity(self._variable_list[0], name=self._name)\n\n partition_axes = self._partition_axes()\n\n if len(partition_axes) > 1:\n raise NotImplementedError(\n \"Cannot concatenate along more than one dimension: %s. \"\n \"Multi-axis partition concat is not supported\" % str(partition_axes))\n partition_ix = partition_axes[0]\n\n with ops.name_scope(self._name + \"/ConcatPartitions/\"):\n concatenated = array_ops.concat(self._variable_list, partition_ix)\n\n with ops.name_scope(None):\n return array_ops.identity(concatenated, name=self._name)\n\n def as_tensor(self):\n \"\"\"Returns the overall concatenated value as a `Tensor`.\n\n The returned tensor will not inherit the control dependencies from the scope\n where the value is used, which is similar to getting the value of\n `Variable`.\n\n Returns:\n `Tensor` containing the concatenated value.\n \"\"\"\n with ops.control_dependencies(None):\n return self._concat()\n\n @staticmethod\n def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False):\n # pylint: disable=invalid-name\n _ = name\n if dtype is not None and not dtype.is_compatible_with(v.dtype):\n raise ValueError(\n \"Incompatible type conversion requested to type '%s' for variable \"\n \"of type '%s'\" % (dtype.name, v.dtype.name))\n if as_ref:\n raise NotImplementedError(\n \"PartitionedVariable doesn't support being used as a reference.\")\n else:\n return v.as_tensor()\n\n @property\n def name(self):\n return self._name\n\n @property\n def dtype(self):\n return self._dtype\n\n @property\n def shape(self):\n return self.get_shape()\n\n def get_shape(self):\n return self._shape\n\n def _get_variable_list(self):\n return self._variable_list\n\n def _get_partitions(self):\n return self._partitions\n\n def _apply_assign_fn(self, assign_fn, value):\n partition_axes = self._partition_axes()\n if len(partition_axes) > 1:\n raise NotImplementedError(\n \"Cannot do assign action along more than one dimension: %s. \"\n \"Multi-axis partition assign action is not supported \" %\n str(partition_axes))\n if isinstance(value, list):\n assert len(value) == len(self._variable_list)\n value_list = value\n elif isinstance(value, PartitionedVariable):\n value_list = [var_part for var_part in value]\n else:\n partition_ix = partition_axes[0]\n size_splits_list = [\n tensor_shape.dimension_value(var.shape[partition_ix])\n for var in self._variable_list\n ]\n value_list = array_ops.split(value, size_splits_list, axis=partition_ix)\n\n op_list = [\n assign_fn(var, value_list[idx])\n for idx, var in enumerate(self._variable_list)\n ]\n return op_list\n\n def assign(self, value, use_locking=False, name=None, read_value=True):\n assign_fn = lambda var, r_value: var.assign(\n r_value, use_locking=use_locking,\n name=name, read_value=read_value)\n assign_list = self._apply_assign_fn(assign_fn, value)\n if read_value:\n return assign_list\n return [assign.op for assign in assign_list]\n\n def assign_add(self, value, use_locking=False, name=None, read_value=True):\n assign_fn = lambda var, r_value: var.assign_add(\n r_value, use_locking=use_locking,\n name=name, read_value=read_value)\n assign_list = self._apply_assign_fn(assign_fn, value)\n if read_value:\n return assign_list\n return [assign.op for assign in assign_list]\n\n def assign_sub(self, value, use_locking=False, name=None, read_value=True):\n assign_fn = lambda var, r_value: var.assign_sub(\n r_value, use_locking=use_locking,\n name=name, read_value=read_value)\n assign_list = self._apply_assign_fn(assign_fn, value)\n if read_value:\n return assign_list\n return [assign.op for assign in assign_list]\n\n@tf_export(v1=[\"global_variables\"])\ndef global_variables(scope=None):\n \"\"\"Returns global variables.\n\n Global variables are variables that are shared across machines in a\n distributed environment. The `Variable()` constructor or `get_variable()`\n automatically adds new variables to the graph collection\n `GraphKeys.GLOBAL_VARIABLES`.\n This convenience function returns the contents of that collection.\n\n An alternative to global variables are local variables. See\n `tf.local_variables`\n\n Args:\n scope: (Optional.) A string. If supplied, the resulting list is filtered\n to include only items whose `name` attribute matches `scope` using\n `re.match`. Items without a `name` attribute are never returned if a\n scope is supplied. The choice of `re.match` means that a `scope` without\n special tokens filters by prefix.\n\n Returns:\n A list of `Variable` objects.\n \"\"\"\n return ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, scope)\n\n\n@tf_export(v1=[\"all_variables\"])\n@deprecated(\"2017-03-02\", \"Please use tf.global_variables instead.\")\ndef all_variables():\n \"\"\"See `tf.global_variables`.\"\"\"\n return global_variables()\n\n\ndef _all_saveable_objects(scope=None):\n \"\"\"Returns all variables and `SaveableObject`s that must be checkpointed.\n\n Args:\n scope: (Optional.) A string. If supplied, the resulting list is filtered\n to include only items whose `name` attribute matches `scope` using\n `re.match`. Items without a `name` attribute are never returned if a\n scope is supplied. The choice of `re.match` means that a `scope` without\n special tokens filters by prefix.\n\n Returns:\n A list of `Variable` and `SaveableObject` to be checkpointed\n \"\"\"\n # TODO(andreasst): make this function public once things are settled.\n return (ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, scope) +\n ops.get_collection(ops.GraphKeys.SAVEABLE_OBJECTS, scope))\n\n\n@tf_export(v1=[\"local_variables\"])\ndef local_variables(scope=None):\n \"\"\"Returns local variables.\n\n Local variables - per process variables, usually not saved/restored to\n checkpoint and used for temporary or intermediate values.\n For example, they can be used as counters for metrics computation or\n number of epochs this machine has read data.\n The `tf.contrib.framework.local_variable()` function automatically adds the\n new variable to `GraphKeys.LOCAL_VARIABLES`.\n This convenience function returns the contents of that collection.\n\n An alternative to local variables are global variables. See\n `tf.global_variables`\n\n Args:\n scope: (Optional.) A string. If supplied, the resulting list is filtered\n to include only items whose `name` attribute matches `scope` using\n `re.match`. Items without a `name` attribute are never returned if a\n scope is supplied. The choice of `re.match` means that a `scope` without\n special tokens filters by prefix.\n\n Returns:\n A list of local `Variable` objects.\n \"\"\"\n return ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES, scope)\n\n\n@tf_export(v1=[\"model_variables\"])\ndef model_variables(scope=None):\n \"\"\"Returns all variables in the MODEL_VARIABLES collection.\n\n Args:\n scope: (Optional.) A string. If supplied, the resulting list is filtered\n to include only items whose `name` attribute matches `scope` using\n `re.match`. Items without a `name` attribute are never returned if a\n scope is supplied. The choice of `re.match` means that a `scope` without\n special tokens filters by prefix.\n\n Returns:\n A list of local Variable objects.\n \"\"\"\n return ops.get_collection(ops.GraphKeys.MODEL_VARIABLES, scope)\n\n\n@tf_export(v1=[\"trainable_variables\"])\ndef trainable_variables(scope=None):\n \"\"\"Returns all variables created with `trainable=True`.\n\n When passed `trainable=True`, the `Variable()` constructor automatically\n adds new variables to the graph collection\n `GraphKeys.TRAINABLE_VARIABLES`. This convenience function returns the\n contents of that collection.\n\n Args:\n scope: (Optional.) A string. If supplied, the resulting list is filtered\n to include only items whose `name` attribute matches `scope` using\n `re.match`. Items without a `name` attribute are never returned if a\n scope is supplied. The choice of `re.match` means that a `scope` without\n special tokens filters by prefix.\n\n Returns:\n A list of Variable objects.\n \"\"\"\n return ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES, scope)\n\n\n@tf_export(v1=[\"moving_average_variables\"])\ndef moving_average_variables(scope=None):\n \"\"\"Returns all variables that maintain their moving averages.\n\n If an `ExponentialMovingAverage` object is created and the `apply()`\n method is called on a list of variables, these variables will\n be added to the `GraphKeys.MOVING_AVERAGE_VARIABLES` collection.\n This convenience function returns the contents of that collection.\n\n Args:\n scope: (Optional.) A string. If supplied, the resulting list is filtered\n to include only items whose `name` attribute matches `scope` using\n `re.match`. Items without a `name` attribute are never returned if a\n scope is supplied. The choice of `re.match` means that a `scope` without\n special tokens filters by prefix.\n\n Returns:\n A list of Variable objects.\n \"\"\"\n return ops.get_collection(ops.GraphKeys.MOVING_AVERAGE_VARIABLES, scope)\n\n\n@tf_export(v1=[\"initializers.variables\", \"variables_initializer\"])\ndef variables_initializer(var_list, name=\"init\"):\n \"\"\"Returns an Op that initializes a list of variables.\n\n After you launch the graph in a session, you can run the returned Op to\n initialize all the variables in `var_list`. This Op runs all the\n initializers of the variables in `var_list` in parallel.\n\n Calling `initialize_variables()` is equivalent to passing the list of\n initializers to `Group()`.\n\n If `var_list` is empty, however, the function still returns an Op that can\n be run. That Op just has no effect.\n\n Args:\n var_list: List of `Variable` objects to initialize.\n name: Optional name for the returned operation.\n\n Returns:\n An Op that run the initializers of all the specified variables.\n \"\"\"\n if var_list and not context.executing_eagerly():\n return control_flow_ops.group(*[v.initializer for v in var_list], name=name)\n return control_flow_ops.no_op(name=name)\n\n\n@tf_export(v1=[\"initialize_variables\"])\n@tf_should_use.should_use_result\n@deprecated(\"2017-03-02\", \"Use `tf.variables_initializer` instead.\")\ndef initialize_variables(var_list, name=\"init\"):\n \"\"\"See `tf.variables_initializer`.\"\"\"\n return variables_initializer(var_list, name=name)\n\n\n@tf_export(v1=[\"initializers.global_variables\", \"global_variables_initializer\"])\ndef global_variables_initializer():\n \"\"\"Returns an Op that initializes global variables.\n\n This is just a shortcut for `variables_initializer(global_variables())`\n\n Returns:\n An Op that initializes global variables in the graph.\n \"\"\"\n if context.executing_eagerly():\n return control_flow_ops.no_op(name=\"global_variables_initializer\")\n return variables_initializer(global_variables())\n\n\n@tf_export(v1=[\"initialize_all_variables\"])\n@tf_should_use.should_use_result\n@deprecated(\"2017-03-02\", \"Use `tf.global_variables_initializer` instead.\")\ndef initialize_all_variables():\n \"\"\"See `tf.global_variables_initializer`.\"\"\"\n return global_variables_initializer()\n\n\n@tf_export(v1=[\"initializers.local_variables\", \"local_variables_initializer\"])\ndef local_variables_initializer():\n \"\"\"Returns an Op that initializes all local variables.\n\n This is just a shortcut for `variables_initializer(local_variables())`\n\n Returns:\n An Op that initializes all local variables in the graph.\n \"\"\"\n if context.executing_eagerly():\n return control_flow_ops.no_op(name=\"local_variables_initializer\")\n return variables_initializer(local_variables())\n\n\n@tf_export(v1=[\"initialize_local_variables\"])\n@tf_should_use.should_use_result\n@deprecated(\"2017-03-02\", \"Use `tf.local_variables_initializer` instead.\")\ndef initialize_local_variables():\n \"\"\"See `tf.local_variables_initializer`.\"\"\"\n return local_variables_initializer()\n\n\n@tf_export(v1=[\"is_variable_initialized\"])\n@tf_should_use.should_use_result\ndef is_variable_initialized(variable):\n \"\"\"Tests if a variable has been initialized.\n\n Args:\n variable: A `Variable`.\n\n Returns:\n Returns a scalar boolean Tensor, `True` if the variable has been\n initialized, `False` otherwise.\n \"\"\"\n return state_ops.is_variable_initialized(variable)\n\n\n@tf_export(v1=[\"assert_variables_initialized\"])\n@tf_should_use.should_use_result\ndef assert_variables_initialized(var_list=None):\n \"\"\"Returns an Op to check if variables are initialized.\n\n NOTE: This function is obsolete and will be removed in 6 months. Please\n change your implementation to use `report_uninitialized_variables()`.\n\n When run, the returned Op will raise the exception `FailedPreconditionError`\n if any of the variables has not yet been initialized.\n\n Note: This function is implemented by trying to fetch the values of the\n variables. If one of the variables is not initialized a message may be\n logged by the C++ runtime. This is expected.\n\n Args:\n var_list: List of `Variable` objects to check. Defaults to the\n value of `global_variables().`\n\n Returns:\n An Op, or None if there are no variables.\n \"\"\"\n if var_list is None:\n var_list = global_variables() + local_variables()\n # Backwards compatibility for old-style variables. TODO(touts): remove.\n if not var_list:\n var_list = []\n for op in ops.get_default_graph().get_operations():\n if op.type in [\"Variable\", \"VariableV2\", \"AutoReloadVariable\"]:\n var_list.append(op.outputs[0])\n if not var_list:\n return None\n else:\n ranks = []\n for var in var_list:\n with ops.colocate_with(var.op):\n ranks.append(array_ops.rank_internal(var, optimize=False))\n if len(ranks) == 1:\n return ranks[0]\n else:\n return array_ops.stack(ranks)\n\n\n@tf_export(v1=[\"report_uninitialized_variables\"])\n@tf_should_use.should_use_result\ndef report_uninitialized_variables(var_list=None,\n name=\"report_uninitialized_variables\"):\n \"\"\"Adds ops to list the names of uninitialized variables.\n\n When run, it returns a 1-D tensor containing the names of uninitialized\n variables if there are any, or an empty array if there are none.\n\n Args:\n var_list: List of `Variable` objects to check. Defaults to the\n value of `global_variables() + local_variables()`\n name: Optional name of the `Operation`.\n\n Returns:\n A 1-D tensor containing names of the uninitialized variables, or an empty\n 1-D tensor if there are no variables or no uninitialized variables.\n \"\"\"\n if var_list is None:\n var_list = global_variables() + local_variables()\n # Backwards compatibility for old-style variables. TODO(touts): remove.\n if not var_list:\n var_list = []\n for op in ops.get_default_graph().get_operations():\n if op.type in [\"Variable\", \"VariableV2\", \"AutoReloadVariable\"]:\n var_list.append(op.outputs[0])\n with ops.name_scope(name):\n # Run all operations on CPU\n if var_list:\n init_vars = [state_ops.is_variable_initialized(v) for v in var_list]\n local_device = os.environ.get(\n \"TF_DEVICE_FOR_UNINITIALIZED_VARIABLE_REPORTING\", \"/cpu:0\")\n with ops.device(local_device):\n if not var_list:\n # Return an empty tensor so we only need to check for returned tensor\n # size being 0 as an indication of model ready.\n return array_ops.constant([], dtype=dtypes.string)\n else:\n # Get a 1-D boolean tensor listing whether each variable is initialized.\n variables_mask = math_ops.logical_not(array_ops.stack(init_vars))\n # Get a 1-D string tensor containing all the variable names.\n variable_names_tensor = array_ops.constant(\n [s.op.name for s in var_list])\n # Return a 1-D tensor containing all the names of\n # uninitialized variables.\n return array_ops.boolean_mask(variable_names_tensor, variables_mask)\n\n# pylint: disable=protected-access\nVariable._OverloadAllOperators()\n\nops.register_tensor_conversion_function(\n PartitionedVariable, PartitionedVariable._TensorConversionFunction)\n# pylint: enable=protected-access\n\n\nops.register_dense_tensor_like_type(Variable)\n" ]
[ [ "tensorflow.python.framework.tensor_shape.unknown_shape", "tensorflow.python.util.deprecation.deprecated", "tensorflow.python.framework.ops.colocate_with", "tensorflow.python.framework.ops.register_tensor_conversion_function", "tensorflow.python.ops.state_ops.assign_sub", "tensorflow.python.framework.ops.get_default_session", "tensorflow.python.ops.array_ops.rank_internal", "tensorflow.python.ops.state_ops.variable_op_v2", "tensorflow.python.ops.gen_state_ops.scatter_update", "tensorflow.python.framework.tensor_shape.dimension_value", "tensorflow.python.framework.ops.add_to_collections", "tensorflow.python.ops.state_ops.is_variable_initialized", "tensorflow.python.ops.gen_state_ops.scatter_sub", "tensorflow.python.util.tf_export.tf_export", "tensorflow.core.framework.variable_pb2.SaveSliceInfoDef", "tensorflow.python.platform.tf_logging.log_first_n", "tensorflow.python.framework.ops.register_dense_tensor_like_type", "tensorflow.python.framework.ops.strip_name_scope", "tensorflow.python.framework.ops._name_from_scope_name", "tensorflow.python.ops.state_ops.assign_add", "tensorflow.python.ops.gen_state_ops.scatter_nd_update", "tensorflow.python.ops.control_flow_ops.group", "tensorflow.python.framework.ops.device", "tensorflow.python.ops.gen_state_ops.scatter_nd_sub", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.framework.ops.prepend_name_scope", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.python.util.compat.as_bytes", "tensorflow.python.ops.array_ops.constant", "tensorflow.python.ops.state_ops.count_up_to", "tensorflow.python.framework.ops.init_scope", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.ops.gen_state_ops.scatter_nd_add", "tensorflow.python.ops.state_ops.assign", "tensorflow.python.ops.gen_state_ops.scatter_add", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.ops.array_ops.boolean_mask", "tensorflow.python.framework.ops.get_collection", "tensorflow.python.ops.array_ops.split", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.array_ops.stack", "tensorflow.python.ops.array_ops.concat", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.ops.control_flow_ops.no_op", "tensorflow.core.framework.variable_pb2.VariableDef" ] ]
johnnylord/trytry-segmentation
[ "a88d75571ddba92bd10ac2d7303bee9426188b62" ]
[ "agent/segmentation.py" ]
[ "import os\nimport os.path as osp\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision.transforms as T\nfrom torch.utils.data import DataLoader\nfrom tensorboardX import SummaryWriter\n\nfrom data.segmentation import SegmentDataset\nfrom model.segmentation.fcn import FCN32\nfrom model.segmentation.unet import UNet, UNetVGG16\n\n\n__all__ = [ \"SegmentAgent\" ]\n\nclass SegmentAgent:\n \"\"\"Train Image Segmentation model\n\n Requirements:\n Simple baseline\n - (15%) validation mIoU > 0.635\n - (15%) testing mIoU > 0.625\n \"\"\"\n def __init__(self, config):\n self.config = config\n\n # Check environment\n if torch.cuda.is_available():\n self.device = torch.device(config['train']['device'])\n else:\n raise RuntimeError(\"Please train your model with GPU\")\n\n # Create dataset\n tr_transform = T.Compose([\n T.ToTensor(),\n T.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]), ])\n te_transform = T.Compose([\n T.ToTensor(),\n T.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]), ])\n train_dataset = SegmentDataset(root=config['dataset']['train']['root'],\n transform=tr_transform)\n valid_dataset = SegmentDataset(root=config['dataset']['valid']['root'],\n transform=te_transform)\n\n # Create dataloader\n self.train_loader = DataLoader(train_dataset,\n batch_size=config['loader']['batch_size'],\n num_workers=config['loader']['num_workers'],\n shuffle=True)\n self.valid_loader = DataLoader(valid_dataset,\n batch_size=config['loader']['batch_size'],\n num_workers=config['loader']['num_workers'],\n shuffle=False)\n\n # Create model\n if config['train']['model'] == 'fcn':\n self.model = FCN32(n_classes=7)\n elif config['train']['model'] == 'unet':\n self.model = UNetVGG16(n_classes=7)\n self.model.to(self.device)\n\n # Create optimizer\n self.optimizer = optim.Adam(self.model.parameters(), lr=config['optim']['lr'])\n\n # Create loss function\n self.criterion = nn.CrossEntropyLoss()\n\n # Create tensorboard\n tensorboard_dir = osp.join(config['train']['log_dir'], config['train']['exp_name'])\n self.writer = SummaryWriter(tensorboard_dir)\n\n # Logging\n self.start_epoch = 0\n self.current_epoch = -1\n self.current_loss = 10000\n\n # Resume training or not\n if config['train']['resume']:\n checkpoint_file = osp.join(config['train']['log_dir'],\n config['train']['checkpoint_dir'],\n 'best.pth')\n checkpoint = torch.load(checkpoint_file)\n self.model.load_state_dict(checkpoint['model'])\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = config['optim']['lr']\n self.current_epoch = checkpoint['current_epoch'] + 1\n self.start_epoch = self.current_epoch + 1\n print(\"Resume training at epoch {}\".format(self.start_epoch))\n\n def train(self):\n for epoch in range(self.start_epoch, self.config['train']['n_epochs']):\n self.current_epoch = epoch\n self.train_one_epoch()\n self.validate()\n\n def train_one_epoch(self):\n running_loss = 0\n\n self.model.train()\n for i, (imgs, targets) in enumerate(self.train_loader):\n imgs = imgs.to(self.device)\n targets = targets.to(self.device)\n\n # Forward & Backward\n self.optimizer.zero_grad()\n outputs = self.model(imgs) # (n, c, h, w)\n preds = outputs.transpose(1, 2).transpose(2, 3).contiguous().view(-1, 7)\n labels = targets.flatten()\n loss = self.criterion(preds, labels)\n loss.backward()\n self.optimizer.step()\n\n # Cumulate result\n running_loss += loss.item() * len(imgs)\n\n # Show training information\n if (i % self.config['train']['interval']) == 0:\n print(\"Epoch {}:{}({}%), Loss: {:.2f}\".format(\n self.current_epoch, self.config['train']['n_epochs'],\n int(i*100/len(self.train_loader)), loss.item()))\n\n train_loss = running_loss / len(self.train_loader.dataset)\n print(\"Epoch {}:{}, Train Loss: {:.2f}\".format(\n self.current_epoch, self.config['train']['n_epochs'], train_loss))\n\n # Export result to tensorboard\n self.writer.add_scalar(\"Train Loss\", train_loss, self.current_epoch)\n\n def validate(self):\n running_loss = 0\n pred_masks = []\n true_masks = []\n\n self.model.eval()\n with torch.no_grad():\n for imgs, targets in self.valid_loader:\n imgs = imgs.to(self.device)\n targets = targets.to(self.device)\n\n outputs = self.model(imgs) # (n, c, h, w)\n\n # Save segmenation mask\n pred_mask = np.argmax(outputs.detach().cpu().numpy(), axis=1)\n pred_masks.append(pred_mask)\n true_masks.append(targets.detach().cpu().numpy())\n\n # Compute loss\n preds = outputs.transpose(1, 2).transpose(2, 3).contiguous().view(-1, 7)\n labels = targets.flatten()\n loss = self.criterion(preds, labels)\n\n # Validation Loss\n running_loss += loss.item() * len(imgs)\n\n # Show validation result\n pred_masks = np.vstack(pred_masks)\n true_masks = np.vstack(true_masks)\n miou = self._mean_iou_score(pred_masks, true_masks)\n valid_loss = running_loss / len(self.valid_loader.dataset)\n print(\"Epoch {}:{}, Valid Loss: {:.2f}, mIoU: {:.3f}\".format(\n self.current_epoch, self.config['train']['n_epochs'],\n valid_loss, miou))\n\n # Save training checkpoints\n if valid_loss < self.current_loss:\n self.current_loss = valid_loss\n self._save_checkpoint()\n\n # Export result to tensorboard\n self.writer.add_scalar(\"Valid Loss\", valid_loss, self.current_epoch)\n\n def finalize(self):\n pass\n\n def _save_checkpoint(self):\n checkpoints = { 'model': self.model.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'current_epoch': self.current_epoch,\n 'current_loss': self.current_loss }\n checkpoint_file = osp.join(self.config['train']['log_dir'],\n self.config['train']['checkpoint_dir'],\n 'best.pth')\n if not osp.exists(osp.dirname(checkpoint_file)):\n os.makedirs(osp.dirname(checkpoint_file))\n\n torch.save(checkpoints, checkpoint_file)\n print(\"Save checkpoint to '{}'\".format(checkpoint_file))\n\n def _mean_iou_score(self, pred_masks, true_masks):\n \"\"\"Compute mean IoU score over 6 classes\"\"\"\n mean_iou = 0\n for i in range(6):\n tp_fp = np.sum(pred_masks == i)\n tp_fn = np.sum(true_masks == i)\n tp = np.sum((pred_masks == i) * (true_masks == i))\n iou = tp / (tp_fp + tp_fn - tp)\n mean_iou += iou / 6\n return mean_iou\n" ]
[ [ "numpy.vstack", "torch.utils.data.DataLoader", "numpy.sum", "torch.load", "torch.save", "torch.no_grad", "torch.nn.CrossEntropyLoss", "torch.cuda.is_available", "torch.device" ] ]
BradyBromley/DeepCTR
[ "3d12ffc0e0a5e893dce8bd315824c180445b772e" ]
[ "deepctr/models/din.py" ]
[ "# -*- coding:utf-8 -*-\n\"\"\"\nAuthor:\n Weichen Shen,[email protected]\n\nReference:\n [1] Zhou G, Zhu X, Song C, et al. Deep interest network for click-through rate prediction[C]//Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining. ACM, 2018: 1059-1068. (https://arxiv.org/pdf/1706.06978.pdf)\n\"\"\"\n\n\nfrom tensorflow.python.keras.layers import Dense,Concatenate, Flatten\nfrom tensorflow.python.keras.models import Model\n\nfrom ..inputs import build_input_features,create_embedding_matrix,SparseFeat,VarLenSparseFeat,DenseFeat,embedding_lookup,get_dense_input,varlen_embedding_lookup,get_varlen_pooling_list,combined_dnn_input\nfrom ..layers.core import DNN, PredictionLayer\nfrom ..layers.sequence import AttentionSequencePoolingLayer\nfrom ..layers.utils import concat_fun, NoMask\n\n\ndef DIN(dnn_feature_columns, history_feature_list, embedding_size=8, hist_len_max=16, dnn_use_bn=False,\n dnn_hidden_units=(200, 80), dnn_activation='relu', att_hidden_size=(80, 40), att_activation=\"dice\",\n att_weight_normalization=False, l2_reg_dnn=0, l2_reg_embedding=1e-6, dnn_dropout=0, init_std=0.0001, seed=1024,\n task='binary'):\n \"\"\"Instantiates the Deep Interest Network architecture.\n\n :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.\n :param history_feature_list: list,to indicate sequence sparse field\n :param embedding_size: positive integer,sparse feature embedding_size.\n :param hist_len_max: positive int, to indicate the max length of seq input\n :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in deep net\n :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net\n :param dnn_activation: Activation function to use in deep net\n :param att_hidden_size: list,list of positive integer , the layer number and units in each layer of attention net\n :param att_activation: Activation function to use in attention net\n :param att_weight_normalization: bool.Whether normalize the attention score of local activation unit.\n :param l2_reg_dnn: float. L2 regularizer strength applied to DNN\n :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector\n :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.\n :param init_std: float,to use as the initialize std of embedding vector\n :param seed: integer ,to use as random seed.\n :param task: str, ``\"binary\"`` for binary logloss or ``\"regression\"`` for regression loss\n :return: A Keras model instance.\n\n \"\"\"\n\n\n features = build_input_features(dnn_feature_columns)\n\n sparse_feature_columns = list(filter(lambda x:isinstance(x,SparseFeat),dnn_feature_columns)) if dnn_feature_columns else []\n dense_feature_columns = list(\n filter(lambda x: isinstance(x, DenseFeat), dnn_feature_columns)) if dnn_feature_columns else []\n varlen_sparse_feature_columns = list(filter(lambda x: isinstance(x, VarLenSparseFeat), dnn_feature_columns)) if dnn_feature_columns else []\n\n\n history_feature_columns = []\n sparse_varlen_feature_columns = []\n history_fc_names = list(map(lambda x: \"hist_\" + x, history_feature_list))\n for fc in varlen_sparse_feature_columns:\n feature_name = fc.name\n if feature_name in history_fc_names:\n history_feature_columns.append(fc)\n else:\n sparse_varlen_feature_columns.append(fc)\n\n\n inputs_list = list(features.values())\n\n\n embedding_dict = create_embedding_matrix(dnn_feature_columns,l2_reg_embedding,init_std,seed,embedding_size, prefix=\"\")\n\n\n query_emb_list = embedding_lookup(embedding_dict,features,sparse_feature_columns,history_feature_list,history_feature_list)#query是单独的\n keys_emb_list = embedding_lookup(embedding_dict, features, history_feature_columns, history_fc_names, history_fc_names)\n dnn_input_emb_list = embedding_lookup(embedding_dict,features,sparse_feature_columns,mask_feat_list=history_feature_list)\n dense_value_list = get_dense_input(features, dense_feature_columns)\n\n sequence_embed_dict = varlen_embedding_lookup(embedding_dict,features,sparse_varlen_feature_columns)\n sequence_embed_list = get_varlen_pooling_list(sequence_embed_dict, features, sparse_varlen_feature_columns)\n dnn_input_emb_list += sequence_embed_list\n\n\n keys_emb = concat_fun(keys_emb_list,mask=True)\n deep_input_emb = concat_fun(dnn_input_emb_list)\n query_emb = concat_fun(query_emb_list,mask=True)\n\n hist = AttentionSequencePoolingLayer(att_hidden_size, att_activation,\n weight_normalization=att_weight_normalization, supports_masking=True)([\n query_emb, keys_emb])\n\n deep_input_emb = Concatenate()([NoMask()(deep_input_emb), hist])\n deep_input_emb = Flatten()(deep_input_emb)\n dnn_input = combined_dnn_input([deep_input_emb],dense_value_list)\n output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn,\n dnn_dropout, dnn_use_bn, seed)(dnn_input)\n final_logit = Dense(1, use_bias=False)(output)\n\n output = PredictionLayer(task)(final_logit)\n\n model = Model(inputs=inputs_list, outputs=output)\n return model\n" ]
[ [ "tensorflow.python.keras.layers.Concatenate", "tensorflow.python.keras.layers.Flatten", "tensorflow.python.keras.layers.Dense", "tensorflow.python.keras.models.Model" ] ]
bernssolg/pyntcloud-master
[ "84cf000b7a7f69a2c1b36f9624f05f65160bf992" ]
[ "pyntcloud/structures/kdtree.py" ]
[ "from scipy.spatial import cKDTree\n\nfrom .base import Structure\n\n\nclass KDTree(cKDTree, Structure):\n\n def __init__(self, *, points, leafsize=16, compact_nodes=False, balanced_tree=False):\n Structure.__init__(self, points=points)\n self._leafsize = leafsize\n self._compact_nodes = compact_nodes\n self._balanced_tree = balanced_tree\n\n def compute(self):\n self.id = \"K({},{},{})\".format(self._leafsize, self._compact_nodes, self._balanced_tree)\n cKDTree.__init__(\n self,\n self._points,\n leafsize=self._leafsize,\n compact_nodes=self._compact_nodes,\n balanced_tree=self._balanced_tree)\n" ]
[ [ "scipy.spatial.cKDTree.__init__" ] ]
bhargavyagnik/FaceMaskDetection
[ "990c41a921a2a8a7760492a8dd21e4ab51391e51" ]
[ "facemask.py" ]
[ "import tensorflow as tf\r\nimport cv2\r\nimport numpy as np\r\n\r\nmodel = tf.keras.models.load_model('saved_model/model_3.h5')\r\nface_clsfr = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\r\n\r\nsource = cv2.VideoCapture(1)\r\n\r\nlabels_dict = {0: 'with_mask', 1: 'without_mask'}\r\ncolor_dict = {0: (0, 255, 0), 1: (0, 0, 255)}\r\n\r\nwhile (True):\r\n\r\n ret, img = source.read()\r\n faces = face_clsfr.detectMultiScale(img)\r\n print(img.shape)\r\n for x, y, w, h in faces:\r\n face_img = img[y:y + w, x:x + w]\r\n resized = cv2.resize(face_img, (128, 128))\r\n normalized = resized / 255.0\r\n reshaped = np.reshape(normalized, (1, 128, 128, 3))\r\n result = model.predict(reshaped)\r\n print(result)\r\n label=int(result.round().flatten())\r\n cv2.rectangle(img, (x, y), (x + w, y + h), color_dict[label], 2)\r\n cv2.rectangle(img, (x, y - 40), (x + w, y), color_dict[label], -1)\r\n cv2.putText(\r\n img, labels_dict[label],\r\n (x, y - 10),\r\n cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2)\r\n\r\n cv2.imshow('LIVE', img)\r\n key = cv2.waitKey(1)\r\n\r\n if (key == 27):\r\n break\r\n\r\ncv2.destroyAllWindows()\r\nsource.release()" ]
[ [ "numpy.reshape", "tensorflow.keras.models.load_model" ] ]
nlpming/tensorflow-DSMM
[ "dc982cc49bf03f474da2895e4dd4fb37061c0271" ]
[ "dssm/data_input.py" ]
[ "#!/usr/bin/env python\n# encoding=utf-8\nfrom inspect import getblock\nimport json\nimport os\nfrom os import read\nfrom numpy.core.fromnumeric import mean\nimport numpy as np\nimport paddlehub as hub\nimport six\nimport math\nimport random\nimport sys\nfrom util import read_file\nfrom config import Config\n# 配置文件\nconf = Config()\n\n\nclass Vocabulary(object):\n def __init__(self, meta_file, max_len, allow_unk=0, unk=\"$UNK$\", pad=\"$PAD$\",):\n self.voc2id = {}\n self.id2voc = {}\n self.unk = unk\n self.pad = pad\n self.max_len = max_len\n self.allow_unk = allow_unk\n with open(meta_file, encoding='utf-8') as f:\n for i, line in enumerate(f):\n line = convert_to_unicode(line.strip(\"\\n\"))\n self.voc2id[line] = i\n self.id2voc[i] = line\n self.size = len(self.voc2id)\n self.oov_num = self.size + 1\n\n def fit(self, words_list):\n \"\"\"\n :param words_list: [[w11, w12, ...], [w21, w22, ...], ...]\n :return:\n \"\"\"\n word_lst = []\n word_lst_append = word_lst.append\n for words in words_list:\n if not isinstance(words, list):\n print(words)\n continue\n for word in words:\n word = convert_to_unicode(word)\n word_lst_append(word)\n word_counts = Counter(word_lst)\n if self.max_num_word < 0:\n self.max_num_word = len(word_counts)\n sorted_voc = [w for w, c in word_counts.most_common(self.max_num_word)]\n self.max_num_word = len(sorted_voc)\n self.oov_index = self.max_num_word + 1\n self.voc2id = dict(zip(sorted_voc, range(1, self.max_num_word + 1)))\n return self\n\n def _transform2id(self, word):\n word = convert_to_unicode(word)\n if word in self.voc2id:\n return self.voc2id[word]\n elif self.allow_unk:\n return self.voc2id[self.unk]\n else:\n print(word)\n raise ValueError(\"word:{} Not in voc2id, please check\".format(word))\n\n def _transform_seq2id(self, words, padding=0):\n out_ids = []\n words = convert_to_unicode(words)\n if self.max_len:\n words = words[:self.max_len]\n for w in words:\n out_ids.append(self._transform2id(w))\n if padding and self.max_len:\n while len(out_ids) < self.max_len:\n out_ids.append(0)\n return out_ids\n \n def _transform_intent2ont_hot(self, words, padding=0):\n # 将多标签意图转为 one_hot\n out_ids = np.zeros(self.size, dtype=np.float32)\n words = convert_to_unicode(words)\n for w in words:\n out_ids[self._transform2id(w)] = 1.0\n return out_ids\n\n def _transform_seq2bert_id(self, words, padding=0):\n out_ids, seq_len = [], 0\n words = convert_to_unicode(words)\n if self.max_len:\n words = words[:self.max_len]\n seq_len = len(words)\n # 插入 [CLS], [SEP]\n out_ids.append(self._transform2id(\"[CLS]\"))\n for w in words:\n out_ids.append(self._transform2id(w))\n mask_ids = [1 for _ in out_ids]\n if padding and self.max_len:\n while len(out_ids) < self.max_len + 1:\n out_ids.append(0)\n mask_ids.append(0)\n seg_ids = [0 for _ in out_ids]\n return out_ids, mask_ids, seg_ids, seq_len\n\n @staticmethod\n def _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()\n\n def _transform_2seq2bert_id(self, seq1, seq2, padding=0):\n out_ids, seg_ids, seq_len = [], [1], 0\n seq1 = [x for x in convert_to_unicode(seq1)]\n seq2 = [x for x in convert_to_unicode(seq2)]\n # 截断\n self._truncate_seq_pair(seq1, seq2, self.max_len - 2)\n # 插入 [CLS], [SEP]\n out_ids.append(self._transform2id(\"[CLS]\"))\n for w in seq1:\n out_ids.append(self._transform2id(w))\n seg_ids.append(0)\n out_ids.append(self._transform2id(\"[SEP]\"))\n seg_ids.append(0)\n for w in seq2:\n out_ids.append(self._transform2id(w))\n seg_ids.append(1)\n mask_ids = [1 for _ in out_ids]\n if padding and self.max_len:\n while len(out_ids) < self.max_len + 1:\n out_ids.append(0)\n mask_ids.append(0)\n seg_ids.append(0)\n return out_ids, mask_ids, seg_ids, seq_len\n\n def transform(self, seq_list, is_bert=0):\n if is_bert:\n return [self._transform_seq2bert_id(seq) for seq in seq_list]\n else:\n return [self._transform_seq2id(seq) for seq in seq_list]\n\n def __len__(self):\n return len(self.voc2id)\n\ndef convert_to_unicode(text):\n \"\"\"Converts `text` to Unicode (if it's not already), assuming utf-8 input.\"\"\"\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text.decode(\"utf-8\", \"ignore\")\n elif isinstance(text, unicode):\n return text\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")\n\ndef gen_word_set(file_path, out_path='./data/words.txt'):\n word_set = set()\n with open(file_path, encoding='utf-8') as f:\n for line in f.readlines():\n spline = line.strip().split('\\t')\n if len(spline) < 4:\n continue\n prefix, query_pred, title, tag, label = spline\n if label == '0':\n continue\n cur_arr = [prefix, title]\n query_pred = json.loads(query_pred)\n for w in prefix:\n word_set.add(w)\n for each in query_pred:\n for w in each:\n word_set.add(w)\n with open(word_set, 'w', encoding='utf-8') as o:\n for w in word_set:\n o.write(w + '\\n')\n pass\n\ndef convert_word2id(query, vocab_map):\n ids = []\n for w in query:\n if w in vocab_map:\n ids.append(vocab_map[w])\n else:\n ids.append(vocab_map[conf.unk])\n while len(ids) < conf.max_seq_len:\n ids.append(vocab_map[conf.pad])\n return ids[:conf.max_seq_len]\n\n\ndef convert_seq2bow(query, vocab_map):\n bow_ids = np.zeros(conf.nwords)\n for w in query:\n if w in vocab_map:\n bow_ids[vocab_map[w]] += 1\n else:\n bow_ids[vocab_map[conf.unk]] += 1\n return bow_ids\n\n\ndef get_data(file_path):\n \"\"\"\n gen datasets, convert word into word ids.\n :param file_path:\n :return: [[query, pos sample, 4 neg sample]], shape = [n, 6]\n \"\"\"\n data_map = {'query': [], 'query_len': [], 'doc_pos': [], 'doc_pos_len': [], 'doc_neg': [], 'doc_neg_len': []}\n with open(file_path, encoding='utf8') as f:\n for line in f.readlines():\n spline = line.strip().split('\\t')\n if len(spline) < 4:\n continue\n prefix, query_pred, title, tag, label = spline\n if label == '0':\n continue\n cur_arr, cur_len = [], []\n query_pred = json.loads(query_pred)\n # only 4 negative sample\n for each in query_pred:\n if each == title:\n continue\n cur_arr.append(convert_word2id(each, conf.vocab_map))\n each_len = len(each) if len(each) < conf.max_seq_len else conf.max_seq_len\n cur_len.append(each_len)\n if len(cur_arr) >= 4:\n data_map['query'].append(convert_word2id(prefix, conf.vocab_map))\n data_map['query_len'].append(len(prefix) if len(prefix) < conf.max_seq_len else conf.max_seq_len)\n data_map['doc_pos'].append(convert_word2id(title, conf.vocab_map))\n data_map['doc_pos_len'].append(len(title) if len(title) < conf.max_seq_len else conf.max_seq_len)\n data_map['doc_neg'].extend(cur_arr[:4])\n data_map['doc_neg_len'].extend(cur_len[:4])\n pass\n return data_map\n\n\ndef get_data_siamese_rnn(file_path):\n \"\"\"\n gen datasets, convert word into word ids.\n :param file_path:\n :return: [[query, pos sample, 4 neg sample]], shape = [n, 6]\n \"\"\"\n data_arr = []\n with open(file_path, encoding='utf8') as f:\n for line in f.readlines():\n spline = line.strip().split('\\t')\n if len(spline) < 4:\n continue\n prefix, _, title, tag, label = spline\n prefix_seq = convert_word2id(prefix, conf.vocab_map)\n title_seq = convert_word2id(title, conf.vocab_map)\n data_arr.append([prefix_seq, title_seq, int(label)])\n return data_arr\n\n\ndef get_data_bow(file_path):\n \"\"\"\n gen datasets, convert word into word ids.\n :param file_path:\n :return: [[query, prefix, label]], shape = [n, 3]\n \"\"\"\n data_arr = []\n with open(file_path, encoding='utf8') as f:\n for line in f.readlines():\n spline = line.strip().split('\\t')\n if len(spline) < 4:\n continue\n prefix, _, title, tag, label = spline\n prefix_ids = convert_seq2bow(prefix, conf.vocab_map)\n title_ids = convert_seq2bow(title, conf.vocab_map)\n data_arr.append([prefix_ids, title_ids, int(label)])\n return data_arr\n\ndef trans_lcqmc(dataset):\n \"\"\"\n 最大长度\n \"\"\"\n out_arr, text_len = [], []\n for each in dataset:\n t1, t2, label = each.text_a, each.text_b, int(each.label)\n t1_ids = convert_word2id(t1, conf.vocab_map)\n t1_len = conf.max_seq_len if len(t1) > conf.max_seq_len else len(t1)\n t2_ids = convert_word2id(t2, conf.vocab_map)\n t2_len = conf.max_seq_len if len(t2) > conf.max_seq_len else len(t2)\n # t2_len = len(t2) \n out_arr.append([t1_ids, t1_len, t2_ids, t2_len, label])\n # out_arr.append([t1_ids, t1_len, t2_ids, t2_len, label, t1, t2])\n text_len.extend([len(t1), len(t2)])\n pass\n print(\"max len\", max(text_len), \"avg len\", mean(text_len), \"cover rate:\", np.mean([x <= conf.max_seq_len for x in text_len]))\n return out_arr\n\ndef get_lcqmc():\n \"\"\"\n 使用LCQMC数据集,并将其转为word_id\n \"\"\"\n dataset = hub.dataset.LCQMC()\n train_set = trans_lcqmc(dataset.train_examples)\n dev_set = trans_lcqmc(dataset.dev_examples)\n test_set = trans_lcqmc(dataset.test_examples)\n return train_set, dev_set, test_set\n # return test_set, test_set, test_set\n\ndef trans_lcqmc_bert(dataset:list, vocab:Vocabulary, is_merge=0):\n \"\"\"\n 最大长度\n \"\"\"\n out_arr, text_len = [], []\n for each in dataset:\n t1, t2, label = each.text_a, each.text_b, int(each.label)\n if is_merge:\n out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_2seq2bert_id(t1, t2, padding=1)\n out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1, label])\n text_len.extend([len(t1) + len(t2)])\n else:\n out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_seq2bert_id(t1, padding=1)\n out_ids2, mask_ids2, seg_ids2, seq_len2 = vocab._transform_seq2bert_id(t2, padding=1)\n out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1, out_ids2, mask_ids2, seg_ids2, seq_len2, label])\n text_len.extend([len(t1), len(t2)])\n pass\n print(\"max len\", max(text_len), \"avg len\", mean(text_len), \"cover rate:\", np.mean([x <= conf.max_seq_len for x in text_len]))\n return out_arr\n\ndef get_lcqmc_bert(vocab:Vocabulary, is_merge=0):\n \"\"\"\n 使用LCQMC数据集,并将每个query其转为word_id,\n \"\"\"\n dataset = hub.dataset.LCQMC()\n train_set = trans_lcqmc_bert(dataset.train_examples, vocab, is_merge)\n dev_set = trans_lcqmc_bert(dataset.dev_examples, vocab, is_merge)\n test_set = trans_lcqmc_bert(dataset.test_examples, vocab, is_merge)\n return train_set, dev_set, test_set\n # test_set = test_set[:100]\n # return test_set, test_set, test_set\n\ndef get_test(file_:str, vocab:Vocabulary):\n test_arr = read_file(file_, '\\t') # [[q1, q2],...]\n out_arr = []\n for line in test_arr:\n if len(line) != 2:\n print('wrong line size=', len(line))\n t1, t2 = line # [t1_ids, t1_len, t2_ids, t2_len, label]\n t1_ids = vocab._transform_seq2id(t1, padding=1)\n t1_len = vocab.max_len if len(t1) > vocab.max_len else len(t1)\n t2_ids = vocab._transform_seq2id(t2, padding=1)\n t2_len = vocab.max_len if len(t2) > vocab.max_len else len(t2)\n out_arr.append([t1_ids, t1_len, t2_ids, t2_len])\n return out_arr, test_arr\n\ndef get_test_bert(file_:str, vocab:Vocabulary, is_merge=0):\n test_arr = read_file(file_, '\\t') # [[q1, q2],...]\n out_arr, _ = get_test_bert_by_arr(test_arr, vocab, is_merge)\n return out_arr, test_arr\n\ndef get_test_bert_by_arr(test_arr:list, vocab:Vocabulary, is_merge=0):\n # test_arr # [[q1, q2],...]\n out_arr = []\n for line in test_arr:\n if len(line) != 2:\n print('wrong line size=', len(line))\n t1, t2 = line # [t1_ids, t1_len, t2_ids, t2_len, label]\n if is_merge:\n out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_2seq2bert_id(t1, t2, padding=1)\n out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1])\n else:\n out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_seq2bert_id(t1, padding=1)\n out_ids2, mask_ids2, seg_ids2, seq_len2 = vocab._transform_seq2bert_id(t2, padding=1)\n out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1, out_ids2, mask_ids2, seg_ids2, seq_len2])\n return out_arr, test_arr\n\ndef get_test_bert_single(file_:str, vocab:Vocabulary, is_merge=0):\n test_arr = read_file(file_) # [q1,...]\n out_arr = []\n for line in test_arr:\n t1 = line # [t1_ids, t1_len, t2_ids, t2_len, label]\n out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_seq2bert_id(t1, padding=1)\n out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1])\n return out_arr, test_arr\n\ndef get_batch(dataset, batch_size=None, is_test=0):\n # tf Dataset太难用,不如自己实现\n # https://stackoverflow.com/questions/50539342/getting-batches-in-tensorflow\n # dataset:每个元素是一个特征,[[x1, x2, x3,...], ...], 如果是测试集,可能就没有标签\n if not batch_size:\n batch_size = 32\n if not is_test:\n random.shuffle(dataset)\n steps = int(math.ceil(float(len(dataset)) / batch_size))\n for i in range(steps):\n idx = i * batch_size\n cur_set = dataset[idx: idx + batch_size]\n cur_set = zip(*cur_set)\n yield cur_set\n\n\nif __name__ == '__main__':\n # prefix, query_prediction, title, tag, label\n # query_prediction 为json格式。\n file_train = './data/oppo_round1_train_20180929.txt'\n file_vali = './data/oppo_round1_vali_20180929.txt'\n # data_train = get_data(file_train)\n # data_train = get_data(file_vali)\n # print(len(data_train['query']), len(data_train['doc_pos']), len(data_train['doc_neg']))\n dataset = get_lcqmc()\n print(dataset[1][:3])\n for each in get_batch(dataset[1][:3], batch_size=2):\n t1_ids, t1_len, t2_ids, t2_len, label = each\n print(each)\n pass\n" ]
[ [ "numpy.mean", "numpy.core.fromnumeric.mean", "numpy.zeros" ] ]
baagaard-usgs/groundmotion-processing
[ "6be2b4460d598bba0935135efa85af2655578565", "6be2b4460d598bba0935135efa85af2655578565" ]
[ "gmprocess/waveform_processing/clipping/clipping_check.py", "tests/gmprocess/waveform_processing/corner_frequencies_test.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\n\nfrom obspy.geodetics.base import gps2dist_azimuth\n\nfrom gmprocess.waveform_processing.clipping.clipping_ann import clipNet\nfrom gmprocess.waveform_processing.clipping.max_amp import Max_Amp\nfrom gmprocess.waveform_processing.clipping.histogram import Histogram\nfrom gmprocess.waveform_processing.clipping.ping import Ping\n\nM_TO_KM = 1.0 / 1000\n\n\ndef check_clipping(st, origin, threshold=0.2):\n \"\"\"Apply clicking check.\n\n Lower thresholds will pass fewer streams but will give less false negatives\n (i.e., streams in which clipping actually occurred but were missed).\n\n Args:\n st (StationStream):\n Trace of data.\n origin (ScalarEvent):\n ScalarEvent object.\n threshold (float):\n Threshold probability.\n\n Returns:\n StationStream checked for clipping.\n\n \"\"\"\n # Don't bother with test for strong motion instruments\n chan_code = st.get_id().split(\".\")[2]\n if chan_code[1] == \"N\":\n return st\n\n # Don't bother with test if it has already failed\n if not st.passed:\n return st\n\n event_mag = origin.magnitude\n event_lon = origin.longitude\n event_lat = origin.latitude\n dist = (\n gps2dist_azimuth(\n lat1=event_lat,\n lon1=event_lon,\n lat2=st[0].stats[\"coordinates\"][\"latitude\"],\n lon2=st[0].stats[\"coordinates\"][\"longitude\"],\n )[0]\n * M_TO_KM\n )\n\n # Clip mag/dist to range of training dataset\n event_mag = np.clip(event_mag, 4.0, 8.8)\n dist = np.clip(dist, 0.0, 445.0)\n\n clip_nnet = clipNet()\n\n max_amp_method = Max_Amp(st, max_amp_thresh=6e6)\n hist_method = Histogram(st)\n ping_method = Ping(st)\n inputs = [\n event_mag,\n dist,\n max_amp_method.is_clipped,\n hist_method.is_clipped,\n ping_method.is_clipped,\n ]\n prob_clip = clip_nnet.evaluate(inputs)[0][0]\n\n if prob_clip >= threshold:\n for tr in st:\n tr.fail(f\"Failed clipping check: prob_clip = {prob_clip:.2f}.\")\n\n return st\n", "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\n\nimport numpy as np\n\nfrom gmprocess.core.streamcollection import StreamCollection\nfrom gmprocess.io.read import read_data\nfrom gmprocess.utils.test_utils import read_data_dir\nfrom gmprocess.utils.config import get_config\n\nfrom gmprocess.waveform_processing.windows import signal_split\nfrom gmprocess.waveform_processing.windows import signal_end\nfrom gmprocess.waveform_processing.windows import window_checks\n\nfrom gmprocess.waveform_processing.processing import get_corner_frequencies\nfrom gmprocess.waveform_processing.snr import compute_snr\n\n\ndef test_corner_frequencies():\n # Default config has 'constant' corner frequency method, so the need\n # here is to force the 'snr' method.\n data_files, origin = read_data_dir(\"geonet\", \"us1000778i\", \"*.V1A\")\n streams = []\n for f in data_files:\n streams += read_data(f)\n\n sc = StreamCollection(streams)\n\n config = get_config()\n\n window_conf = config[\"windows\"]\n\n processed_streams = sc.copy()\n for st in processed_streams:\n if st.passed:\n # Estimate noise/signal split time\n event_time = origin.time\n event_lon = origin.longitude\n event_lat = origin.latitude\n st = signal_split(st, origin)\n\n # Estimate end of signal\n end_conf = window_conf[\"signal_end\"]\n event_mag = origin.magnitude\n print(st)\n st = signal_end(\n st,\n event_time=event_time,\n event_lon=event_lon,\n event_lat=event_lat,\n event_mag=event_mag,\n **end_conf\n )\n wcheck_conf = window_conf[\"window_checks\"]\n st = window_checks(\n st,\n min_noise_duration=wcheck_conf[\"min_noise_duration\"],\n min_signal_duration=wcheck_conf[\"min_signal_duration\"],\n )\n\n pconfig = config[\"processing\"]\n\n # Run SNR check\n # I think we don't do this anymore.\n test = [d for d in pconfig if list(d.keys())[0] == \"compute_snr\"]\n snr_config = test[0][\"compute_snr\"]\n snr_config[\"check\"][\"min_freq\"] = 0.2\n for stream in processed_streams:\n stream = compute_snr(stream, mag=origin.magnitude, **snr_config)\n\n # Run get_corner_frequencies\n test = [d for d in pconfig if list(d.keys())[0] == \"get_corner_frequencies\"]\n cf_config = test[0][\"get_corner_frequencies\"]\n snr_config = cf_config[\"snr\"]\n\n # With same_horiz False\n snr_config[\"same_horiz\"] = False\n\n lp = []\n hp = []\n for stream in processed_streams:\n if not stream.passed:\n continue\n stream = get_corner_frequencies(stream, method=\"snr\", snr=snr_config)\n if stream[0].hasParameter(\"corner_frequencies\"):\n cfdict = stream[0].getParameter(\"corner_frequencies\")\n lp.append(cfdict[\"lowpass\"])\n hp.append(cfdict[\"highpass\"])\n np.testing.assert_allclose(\n np.sort(hp), [0.00467919, 0.00584742, 0.01026485], atol=1e-6\n )\n\n st = processed_streams.select(station=\"HSES\")[0]\n lps = [tr.getParameter(\"corner_frequencies\")[\"lowpass\"] for tr in st]\n hps = [tr.getParameter(\"corner_frequencies\")[\"highpass\"] for tr in st]\n np.testing.assert_allclose(np.sort(lps), [100.0, 100.0, 100.0], atol=1e-6)\n np.testing.assert_allclose(\n np.sort(hps), [0.00542478, 0.01026485, 0.02527502], atol=1e-6\n )\n\n # With same_horiz True\n snr_config[\"same_horiz\"] = True\n\n lp = []\n hp = []\n for stream in processed_streams:\n if not stream.passed:\n continue\n stream = get_corner_frequencies(stream, method=\"snr\", snr=snr_config)\n if stream[0].hasParameter(\"corner_frequencies\"):\n cfdict = stream[0].getParameter(\"corner_frequencies\")\n lp.append(cfdict[\"lowpass\"])\n hp.append(cfdict[\"highpass\"])\n\n np.testing.assert_allclose(\n np.sort(hp), [0.00467919, 0.01026485, 0.01787214], atol=1e-6\n )\n\n st = processed_streams.select(station=\"HSES\")[0]\n lps = [tr.getParameter(\"corner_frequencies\")[\"lowpass\"] for tr in st]\n hps = [tr.getParameter(\"corner_frequencies\")[\"highpass\"] for tr in st]\n np.testing.assert_allclose(np.sort(lps), [100.0, 100.0, 100.0], atol=1e-6)\n np.testing.assert_allclose(np.sort(hps), [0.010265, 0.010265, 0.025275], atol=1e-6)\n\n\nif __name__ == \"__main__\":\n os.environ[\"CALLED_FROM_PYTEST\"] = \"True\"\n test_corner_frequencies()\n" ]
[ [ "numpy.clip" ], [ "numpy.sort" ] ]
HKUST-KnowComp/HPHG
[ "48b704b28c217e4590edf4dd3c7825495dffb76e" ]
[ "src/hypergraph.py" ]
[ "import numpy as np\nfrom tqdm import tqdm\n\n\nclass Hypergraph(object):\n def __init__(self,graph_type='0',nums_type=None):\n self._nodes = {} # node set\n self._edges = {} # edge set (hash index)\n self.graph_type = graph_type # graph type, homogeneous:0, heterogeneous:1\n self.nums_type = nums_type # for heterogeneous graph, number of different node type\n self.cumsum = np.cumsum(self.nums_type) if self.graph_type=='1' else None # cumsum of nums_type\n\n def add_edge(self, edge_name, e):\n '''\n Add a hyperedge.\n edge_name: name of hyperedge\n edge: node list of hyperedge\n weight: weight of hyperedge\n '''\n edge = tuple(sorted(e))\n\n self._edges[edge] = self._edges.get(edge,0)+1\n\n for v in edge:\n node_dict = self._nodes.get(v, {})\n\n neighbors = node_dict.get('neighbors', set())\n for v0 in edge:\n if v0!=v:\n neighbors.add(v0)\n node_dict['neighbors'] = neighbors\n\n if self.graph_type=='1':\n for i,k in enumerate(self.cumsum):\n if int(v) < k:\n break\n node_dict['type'] = i\n\n self._nodes[v] = node_dict\n\n def edge_weight(self, e):\n '''weight of weight e'''\n return self._edges.get(e,0)\n\n def nodes(self):\n '''node set'''\n return self._nodes.keys()\n\n def edges(self):\n '''edge set'''\n return self._edges.keys()\n\n def neighbors(self, n):\n '''neighbors of node n'''\n return self._nodes[n]['neighbors']\n\n def node_type(self, n):\n '''type of node n'''\n return self._nodes[n]['type'] \n\n\ndef get_indecom_factor(G, r):\n '''\n Get the indecomposable factor of heterogeneous hyper-network G.\n '''\n edges = list(G.edges())\n\n k = len(G.nums_type)\n m = len(edges)\n\n dcnt = []\n for i in range(k):\n dcnt.append({})\n for edge in edges:\n edge = list(edge)\n for i in range(k):\n subedge = tuple(sorted(edge[:i]+edge[i+1:]))\n dcnt[i][subedge] = dcnt[i].get(subedge,0)+1\n\n factors = [0]*k\n for edge in edges:\n edge = list(edge)\n for i in range(k):\n subedge = tuple(sorted(edge[:i]+edge[i+1:]))\n if dcnt[i].get(subedge,0)>1:\n factors[i]+=1\n\n factors = [factor/m for factor in factors]\n\n cumsum = [0]+list(G.cumsum)\n ps = [0]*k\n neg_num = m*r # sample enough random edges\n\n for i in tqdm(range(neg_num),ascii=True):\n random_edge = []\n for i in range(k):\n random_edge.append(np.random.randint(cumsum[i],cumsum[i+1]))\n for i in range(k):\n subedge = tuple(sorted(random_edge[:i]+random_edge[i+1:]))\n if dcnt[i].get(subedge,0)>1 or (dcnt[i].get(subedge,0)>0 and tuple(random_edge) not in edges):\n ps[i]+=1\n\n ps = [p/neg_num for p in ps]\n indecom_factors = [ps[i]/factors[i] for i in range(k)]\n\n return indecom_factors\n" ]
[ [ "numpy.random.randint", "numpy.cumsum" ] ]
pingsutw/tfx
[ "bf0d1d74e3f6ea429989fc7b80b82bea08077857" ]
[ "tfx/components/example_gen/base_example_gen_executor_test.py" ]
[ "# Lint as: python2, python3\n# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for tfx.components.example_gen.base_example_gen_executor.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport random\nimport apache_beam as beam\nimport tensorflow as tf\nfrom google.protobuf import json_format\nfrom tfx.components.example_gen import base_example_gen_executor\nfrom tfx.proto import example_gen_pb2\nfrom tfx.types import artifact_utils\nfrom tfx.types import standard_artifacts\n\n\[email protected]_fn\ndef _TestInputSourceToExamplePTransform(\n pipeline,\n input_dict, # pylint: disable=unused-argument\n exec_properties, # pylint: disable=unused-argument\n split_pattern):\n mock_examples = []\n size = 0\n if split_pattern == 'single/*':\n size = 30000\n elif split_pattern == 'train/*':\n size = 20000\n elif split_pattern == 'eval/*':\n size = 10000\n assert size != 0\n for i in range(size):\n feature = {}\n feature['i'] = tf.train.Feature() if random.randrange(\n 10) == 0 else tf.train.Feature(\n int64_list=tf.train.Int64List(value=[i]))\n feature['f'] = tf.train.Feature() if random.randrange(\n 10) == 0 else tf.train.Feature(\n float_list=tf.train.FloatList(value=[float(i)]))\n feature['s'] = tf.train.Feature() if random.randrange(\n 10) == 0 else tf.train.Feature(\n bytes_list=tf.train.BytesList(value=[tf.compat.as_bytes(str(i))]))\n example_proto = tf.train.Example(\n features=tf.train.Features(feature=feature))\n mock_examples.append(example_proto)\n return pipeline | beam.Create(mock_examples)\n\n\nclass TestExampleGenExecutor(base_example_gen_executor.BaseExampleGenExecutor):\n\n def GetInputSourceToExamplePTransform(self):\n return _TestInputSourceToExamplePTransform\n\n\nclass BaseExampleGenExecutorTest(tf.test.TestCase):\n\n def setUp(self):\n super(BaseExampleGenExecutorTest, self).setUp()\n output_data_dir = os.path.join(\n os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),\n self._testMethodName)\n\n # Create output dict.\n examples = standard_artifacts.Examples()\n examples.uri = output_data_dir\n examples.split_names = artifact_utils.encode_split_names(['train', 'eval'])\n self._output_dict = {'examples': [examples]}\n\n self._train_output_file = os.path.join(examples.uri, 'train',\n 'data_tfrecord-00000-of-00001.gz')\n self._eval_output_file = os.path.join(examples.uri, 'eval',\n 'data_tfrecord-00000-of-00001.gz')\n\n def testDoInputSplit(self):\n # Create exec proterties.\n exec_properties = {\n 'input_config':\n json_format.MessageToJson(\n example_gen_pb2.Input(splits=[\n example_gen_pb2.Input.Split(\n name='train', pattern='train/*'),\n example_gen_pb2.Input.Split(name='eval', pattern='eval/*')\n ]),\n preserving_proto_field_name=True),\n 'output_config':\n json_format.MessageToJson(\n example_gen_pb2.Output(), preserving_proto_field_name=True)\n }\n\n # Run executor.\n example_gen = TestExampleGenExecutor()\n example_gen.Do({}, self._output_dict, exec_properties)\n\n # Check example gen outputs.\n self.assertTrue(tf.io.gfile.exists(self._train_output_file))\n self.assertTrue(tf.io.gfile.exists(self._eval_output_file))\n # Input train split is bigger than eval split.\n self.assertGreater(\n tf.io.gfile.GFile(self._train_output_file).size(),\n tf.io.gfile.GFile(self._eval_output_file).size())\n\n def testDoOutputSplit(self):\n # Create exec proterties.\n exec_properties = {\n 'input_config':\n json_format.MessageToJson(\n example_gen_pb2.Input(splits=[\n example_gen_pb2.Input.Split(\n name='single', pattern='single/*'),\n ]),\n preserving_proto_field_name=True),\n 'output_config':\n json_format.MessageToJson(\n example_gen_pb2.Output(\n split_config=example_gen_pb2.SplitConfig(splits=[\n example_gen_pb2.SplitConfig.Split(\n name='train', hash_buckets=2),\n example_gen_pb2.SplitConfig.Split(\n name='eval', hash_buckets=1)\n ])))\n }\n\n # Run executor.\n example_gen = TestExampleGenExecutor()\n example_gen.Do({}, self._output_dict, exec_properties)\n\n # Check example gen outputs.\n self.assertTrue(tf.io.gfile.exists(self._train_output_file))\n self.assertTrue(tf.io.gfile.exists(self._eval_output_file))\n # Output split ratio: train:eval=2:1.\n self.assertGreater(\n tf.io.gfile.GFile(self._train_output_file).size(),\n tf.io.gfile.GFile(self._eval_output_file).size())\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.io.gfile.exists", "tensorflow.io.gfile.GFile", "tensorflow.train.Int64List", "tensorflow.train.Feature", "tensorflow.train.Features", "tensorflow.test.main" ] ]
uta-smile/CD-MVGNN
[ "b48f4cd14befed298980a83edb417ab6809f0af6", "b48f4cd14befed298980a83edb417ab6809f0af6" ]
[ "service/moleprop.py", "dglt/contrib/moses/moses/model/gvae/model_cvae.py" ]
[ "import os\nimport time\nimport math\nimport numpy as np\nimport torch\n\n# torch.multiprocessing.set_start_method('spawn')\ntorch.multiprocessing.set_start_method('forkserver', force=True)\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\nfrom argparse import Namespace\nfrom typing import List\nfrom dglt.data.dataset.molecular import MoleculeDataset\nfrom dglt.data.transformer.scaler import StandardScaler\nfrom dglt.data.transformer.collator import MolCollator\nfrom dglt.data.dataset.utils import get_data, get_data_from_smiles\nfrom dglt.utils import load_args, load_checkpoint, load_scalers\nfrom deploy import get_newest_train_args\nfrom third_party.dimorphite_dl.acid_base import mol_cls\n\nclass MoleProp(object):\n \"\"\"Molecular Properties Prediction Service\"\"\"\n\n def __init__(self, checkpoint_dir, debug=print):\n self.debug_ = debug\n self.checkpoint_paths_ = []\n for root, _, files in os.walk(checkpoint_dir):\n for fname in files:\n if fname.endswith('.pt'):\n self.checkpoint_paths_.append(os.path.join(root, fname))\n\n def load_model(self, args: Namespace):\n \"\"\"\n Load checkpoints\n\n :param args: Arguments.\n :return:\n \"\"\"\n self.scaler_, self.features_scaler_ = load_scalers(self.checkpoint_paths_[0])\n self.train_args = load_args(self.checkpoint_paths_[0])\n self.args_ = args\n for key, value in vars(self.train_args).items():\n if not hasattr(self.args_, key):\n setattr(self.args_, key, value)\n\n # update args with newest training args\n newest_train_args = get_newest_train_args()\n for key, value in vars(newest_train_args).items():\n if not hasattr(args, key):\n setattr(args, key, value)\n if args.features_path:\n args.features_path = None\n args.features_generator = ['rdkit_2d_normalized']\n self.models_ = []\n for checkpoint_path in tqdm(self.checkpoint_paths_, total=len(self.checkpoint_paths_)):\n self.models_.append(load_checkpoint(checkpoint_path, cuda=self.args_.cuda, current_args=self.args_))\n\n def inference(self,\n model: nn.Module,\n data: MoleculeDataset,\n args,\n batch_size: int,\n shared_dict,\n scaler: StandardScaler = None\n ) -> List[List[float]]:\n \"\"\"\n Do inference\n :param model: model.\n :param data: input data.\n :param args: Arguments.\n :param batch_size: batch size.\n :param shared_dict: shared_dict of model.\n :param scaler: scaler of input data.\n :return: prediction of molecular properties.\n \"\"\"\n # model.share_memory()\n model.eval()\n args.bond_drop_rate = 0\n preds = []\n iter_count = 0\n mol_collator = MolCollator(args=args, shared_dict=shared_dict)\n mol_loader = DataLoader(data, batch_size=batch_size, shuffle=False, num_workers=0, collate_fn=mol_collator)\n for i, item in enumerate(mol_loader):\n smiles_batch, batch, features_batch, mask, _ = item\n\n with torch.no_grad():\n batch_preds = model(batch, features_batch)\n iter_count += args.batch_size\n batch_preds = batch_preds.data.cpu().numpy()\n if scaler is not None:\n batch_preds = scaler.inverse_transform(batch_preds)\n batch_preds = batch_preds.tolist()\n preds.extend(batch_preds)\n\n return preds\n\n def postprocessing(self, task: str = None, smiles: List[str] = None, preds: np.ndarray = None):\n if task == 'caco2':\n for i in range(preds.shape[0]):\n if preds[i] is not None:\n for j in range(len(preds[i])):\n preds[i][j] = (math.pow(10, preds[i][j]) - 1) / 10\n elif task == 'pka':\n acid_base = mol_cls(smiles)\n preds[acid_base == None] = np.nan\n preds = np.column_stack((preds, np.array(acid_base, dtype=np.float)))\n elif task == 'ppb':\n preds[preds > 1] = 1\n preds[preds < 0] = 0\n return preds\n\n def predict(self, task: str = None, smiles: List[str] = None):\n \"\"\"\n Predict molecular properties.\n :param smiles: input data.\n :return: molecular properties.\n \"\"\"\n self.debug_('Loading data')\n tic = time.time()\n self.args_.max_workers = 30\n if smiles is not None:\n test_data = get_data_from_smiles(smiles=smiles, skip_invalid_smiles=True, args=self.args_)\n else:\n test_data = get_data(path=self.args_.input_file, args=self.args_,\n use_compound_names=self.args_.use_compound_names,\n skip_invalid_smiles=True)\n toc = time.time()\n self.debug_('loading data: {}s'.format(toc - tic))\n self.debug_('Validating SMILES')\n tic = time.time()\n valid_indices = [i for i in range(len(test_data)) if test_data[i].mol is not None]\n full_data = test_data\n test_data = MoleculeDataset([test_data[i] for i in valid_indices])\n\n # Edge case if empty list of smiles is provided\n if len(test_data) == 0:\n return [None] * len(full_data)\n\n # Normalize features\n if self.train_args.features_scaling:\n test_data.normalize_features(self.features_scaler)\n\n sum_preds = np.zeros((len(test_data), self.args_.num_tasks))\n toc = time.time()\n self.debug_('validating smiles: {}s'.format(toc - tic))\n self.debug_(f'Predicting...')\n tic = time.time()\n shared_dict = {}\n for model in self.models_:\n model_preds = self.inference(\n model=model,\n data=test_data,\n batch_size=self.args_.batch_size,\n scaler=self.scaler_,\n shared_dict=shared_dict,\n args=self.args_\n )\n sum_preds += np.array(model_preds)\n toc = time.time()\n self.debug_('predicting: {}s'.format(toc - tic))\n avg_preds = sum_preds / len(self.checkpoint_paths_)\n avg_preds = self.postprocessing(task=task, smiles=smiles, preds=avg_preds)\n avg_preds = avg_preds.tolist()\n assert len(test_data) == len(avg_preds)\n test_smiles = test_data.smiles()\n res = {}\n for i in range(len(avg_preds)):\n res[test_smiles[i]] = avg_preds[i]\n\n return {'task': task, 'task_score': res}\n", "import torch\nimport torch.nn as nn\nfrom torch.distributions.gumbel import Gumbel\n\nfrom dglt.contrib.moses.moses.model.gvae.utils import Flatten\n\nclass CVAE(nn.Module):\n \"\"\"Character Variational Autoencoder.\"\"\"\n\n def __init__(self, vocab, config):\n \"\"\"Constructor function.\n\n Args:\n * vocab: model's vocabulary\n * config: model's configuration\n \"\"\"\n\n super().__init__()\n\n # get the vocabulary and configurations\n self.eos_symbol = '&' # symbol '&' is not used in SMILES\n self.vocab = [self.eos_symbol] + vocab\n self.config = config\n self.vocab_len = len(self.vocab)\n\n # build encoder & decoder\n self.__build_encoder()\n self.__build_decoder()\n\n # pack all modules into one\n self.vae = nn.ModuleList([\n self.encoder,\n self.q_mu,\n self.q_logvar,\n self.mapper,\n self.decoder,\n self.decoder_fc\n ])\n\n def forward(self, x):\n \"\"\"Perform the forward passing and compute losses.\n\n Args:\n * x: training samples (as torch.tensor)\n\n Returns:\n * kl_loss: KL-divergence loss\n * recon_loss: reconstruction loss\n \"\"\"\n\n # send the data to model's device\n x = x.to(self.device)\n\n # encode into latent vectors\n x_trans = torch.transpose(x, 1, 2)\n x_encoded = self.encoder(x_trans)\n mu = self.q_mu(x_encoded)\n logvar = self.q_logvar(x_encoded)\n\n # decode from latent vectors\n z = mu + (logvar / 2).exp() * torch.randn_like(mu)\n z_mapped = self.mapper(z)\n z_tiled = z_mapped.unsqueeze(1).repeat(1, self.config.smiles_maxlen, 1)\n z_decoded, __ = self.decoder(z_tiled, None)\n y = self.decoder_fc(z_decoded)\n\n # compute KL-divergence & re-construction losses\n kl_loss = 0.5 * (logvar.exp() + mu ** 2 - 1 - logvar).sum(1).mean()\n recon_loss = -(x * y.log()).sum([1, 2]).mean()\n\n return kl_loss, recon_loss\n\n def sample(self, n_batch, max_len=100):\n \"\"\"Sample SMILES strings from the prior distribution.\n\n Args:\n * n_batch: # of SMILES strings\n * max_len: maximal length of a SMILES string\n\n Returns:\n * string_list: list of SMILES strings\n \"\"\"\n\n with torch.no_grad():\n # sample latent vectors from the prior distribution\n z = torch.randn(n_batch, self.config.d_z)\n z = z.to(self.device)\n\n # decode from latent vectors\n z_mapped = self.mapper(z)\n z_tiled = z_mapped.unsqueeze(1).repeat(1, max_len, 1)\n z_decoded, __ = self.decoder(z_tiled, None)\n y = self.decoder_fc(z_decoded)\n\n # convert tensors into SMILES strings\n m = Gumbel(torch.tensor([0.0]), torch.tensor([0.1]))\n noise = torch.squeeze(m.sample(y.size()))\n noise = noise.to(self.device)\n y_idxs = torch.argmax(y.log() + noise, dim=-1)\n tensor_list = torch.split(y_idxs, 1, dim=0)\n string_list = [self.tensor2string(tensor) for tensor in tensor_list]\n\n return string_list\n\n def string2tensor(self, string, device='model'):\n \"\"\"Convert a SMILES string to torch.tensor.\n\n Args:\n * string: SMILES string\n * device: where to place the torch.tensor\n\n Returns:\n * tensor: torch.tensor consists of one-hot vectors\n \"\"\"\n\n # obtain a list of non-zero entries' indices\n string += self.eos_symbol * (self.config.smiles_maxlen - len(string))\n ids = list(map(lambda x: self.vocab.index(x), string))\n\n # convert into a 2-D tensor consists of one-hot vectors\n tensor = torch.zeros(self.config.smiles_maxlen, self.vocab_len)\n tensor.scatter_(1, torch.tensor(ids).view([-1, 1]), 1)\n tensor.to(self.device if device == 'model' else device)\n\n return tensor\n\n def tensor2string(self, tensor):\n \"\"\"Convert a torch.tensor to SMILES string.\n\n Args:\n * tensor: torch.tensor consists of non-zero entries' indices\n\n Returns:\n * string: SMILES string\n \"\"\"\n\n # convert into a SMILES string with end-of-sequence characters removed\n ids = tensor.view(-1).tolist()\n string = ''.join([self.vocab[id] for id in ids])\n string = string.replace(self.eos_symbol, '')\n\n return string\n\n @property\n def device(self):\n \"\"\"The model's device.\"\"\"\n\n return next(self.parameters()).device\n\n def __build_encoder(self):\n \"\"\"Build the encoder sub-network.\n\n NOTE: encoder's input must be of size <batch_size * vocab_len * smiles_maxlen>.\n \"\"\"\n\n # configure parameters for convolutional and linear layers\n conv1_param = (self.vocab_len, 9, 9) # in_channels / out_channels / kernel_size\n conv2_param = (9, 9, 9)\n conv3_param = (9, 10, 11)\n nb_idims_fc = conv3_param[1] * (self.config.smiles_maxlen -\n (conv1_param[2] - 1) - (conv2_param[2] - 1) - (conv3_param[2] - 1))\n nb_odims_fc = 435\n\n # encoder sub-network\n self.encoder = nn.Sequential(\n nn.Conv1d(conv1_param[0], conv1_param[1], conv1_param[2]),\n nn.ReLU(),\n nn.Conv1d(conv2_param[0], conv2_param[1], conv2_param[2]),\n nn.ReLU(),\n nn.Conv1d(conv3_param[0], conv3_param[1], conv3_param[2]),\n nn.ReLU(),\n Flatten(),\n nn.Linear(nb_idims_fc, nb_odims_fc),\n nn.ReLU()\n )\n\n # latent vector's mean & variance\n self.q_mu = nn.Linear(nb_odims_fc, self.config.d_z)\n self.q_logvar = nn.Linear(nb_odims_fc, self.config.d_z)\n\n def __build_decoder(self):\n \"\"\"Build the decoder sub-network.\"\"\"\n\n # map the latent vector for decoding\n # <self.mapper>'s output should be repeated before feeding into <self.decoder>\n self.mapper = nn.Sequential(\n nn.Linear(self.config.d_z, self.config.d_z),\n nn.ReLU()\n )\n\n # decoder sub-network\n self.decoder = nn.GRU(\n self.config.d_z,\n self.config.d_d_h,\n num_layers=3,\n batch_first=True,\n dropout=self.config.d_dropout if self.config.d_n_layers > 1 else 0\n )\n\n # probabilistic outputs\n self.decoder_fc = nn.Sequential(\n nn.Linear(self.config.d_d_h, self.vocab_len),\n nn.Softmax(dim=-1),\n )\n" ]
[ [ "numpy.array", "torch.utils.data.DataLoader", "torch.multiprocessing.set_start_method", "torch.no_grad" ], [ "torch.nn.Linear", "torch.randn_like", "torch.split", "torch.randn", "torch.nn.Softmax", "torch.no_grad", "torch.tensor", "torch.nn.GRU", "torch.nn.Conv1d", "torch.nn.ModuleList", "torch.zeros", "torch.nn.ReLU", "torch.transpose" ] ]
ipovalyaev/events
[ "64ec6324368dd21f9cedd464304eed01e1737024" ]
[ "cv-competition-1/pytorch_baseline/compute_overlaps_np.py" ]
[ "import time\nimport numpy as np\nfrom compute_overlap import compute_overlap\n\n\ndef compute_overlap_np(a: np.array, b: np.array) -> np.array:\n \"\"\"\n Args\n a: (N, 4) ndarray of float [xmin, ymin, xmax, ymax]\n b: (K, 4) ndarray of float [xmin, ymin, xmax, ymax]\n\n Returns\n overlaps: (N, K) ndarray of overlap between boxes a and boxes b\n \"\"\"\n N, K = len(a), len(b)\n overlaps = np.zeros(shape=(N, K))\n for n in range(N):\n a_area = (a[n, 2] - a[n, 0]) * (a[n, 3] - a[n, 1])\n for k in range(K):\n dx = min(a[n, 2], b[k, 2]) - max(a[n, 0], b[k, 0])\n if dx >= 0:\n dy = min(a[n, 3], b[k, 3]) - max(a[n, 1], b[k, 1])\n if dy >= 0:\n b_area = (b[k, 2] - b[k, 0]) * (b[k, 3] - b[k, 1])\n intersection = max(dx, 0) * max(dy, 0)\n union = a_area + b_area - intersection\n overlaps[n, k] = intersection / union\n\n return overlaps\n\n\ndef test_overlap_1():\n a = np.array([[1, 1, 3, 3]], dtype=np.float)\n b = np.array([[2, 2, 4, 4]], dtype=np.float)\n assert compute_overlap_np(a, b)[0][0] == 1. / 7\n\n\ndef test_overlap_0():\n a = np.array([[1, 1, 3, 3]], dtype=np.float)\n b = np.array([[3, 3, 4, 4]], dtype=np.float)\n assert compute_overlap_np(a, b)[0][0] == 0.\n\n\ndef test_overlap_n(a_len, b_len, box_size=100):\n a = np.random.randint(0, 3000, (a_len, 4))\n b = np.random.randint(0, 4000, (b_len, 4))\n a = a.astype(np.float)\n b = b.astype(np.float)\n a[:, 2] = a[:, 0] + box_size\n b[:, 2] = b[:, 0] + box_size\n a[:, 3] = a[:, 1] + box_size\n b[:, 3] = b[:, 1] + box_size\n\n t1 = time.time()\n o_np = compute_overlap_np(a, b)\n t2 = time.time()\n o_c = compute_overlap(a, b)\n t3 = time.time()\n assert np.array_equal(o_np, o_c)\n\n print('Numpy time = ', t2 - t1)\n print('C_ext time = ', t3 - t2)\n\n\nif __name__ == '__main__':\n test_overlap_1()\n test_overlap_0()\n test_overlap_n(100, 5, 300)\n" ]
[ [ "numpy.array", "numpy.random.randint", "numpy.zeros", "numpy.array_equal" ] ]
wesleytao/Checkers-Reinforcement-Learning
[ "80d45f1c29fb7cd4503cdadedf344267553cad31" ]
[ "policy_value_net_numpy.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nImplement the policy value network using numpy, so that we can play with the\ntrained AI model without installing any DL framwork\n\n@author: Junxiao Song\n\"\"\"\n\nfrom __future__ import print_function\nimport numpy as np\n\n\n# some utility functions\ndef softmax(x):\n probs = np.exp(x - np.max(x))\n probs /= np.sum(probs)\n return probs\n\n\ndef relu(X):\n out = np.maximum(X, 0)\n return out\n\n\ndef conv_forward(X, W, b, stride=1, padding=1):\n n_filters, d_filter, h_filter, w_filter = W.shape\n # theano conv2d flips the filters (rotate 180 degree) first\n # while doing the calculation\n W = W[:, :, ::-1, ::-1]\n n_x, d_x, h_x, w_x = X.shape\n h_out = (h_x - h_filter + 2 * padding) / stride + 1\n w_out = (w_x - w_filter + 2 * padding) / stride + 1\n h_out, w_out = int(h_out), int(w_out)\n X_col = im2col_indices(X, h_filter, w_filter,\n padding=padding, stride=stride)\n W_col = W.reshape(n_filters, -1)\n out = (np.dot(W_col, X_col).T + b).T\n out = out.reshape(n_filters, h_out, w_out, n_x)\n out = out.transpose(3, 0, 1, 2)\n return out\n\n\ndef fc_forward(X, W, b):\n out = np.dot(X, W) + b\n return out\n\n\ndef get_im2col_indices(x_shape, field_height,\n field_width, padding=1, stride=1):\n # First figure out what the size of the output should be\n N, C, H, W = x_shape\n assert (H + 2 * padding - field_height) % stride == 0\n assert (W + 2 * padding - field_height) % stride == 0\n out_height = int((H + 2 * padding - field_height) / stride + 1)\n out_width = int((W + 2 * padding - field_width) / stride + 1)\n\n i0 = np.repeat(np.arange(field_height), field_width)\n i0 = np.tile(i0, C)\n i1 = stride * np.repeat(np.arange(out_height), out_width)\n j0 = np.tile(np.arange(field_width), field_height * C)\n j1 = stride * np.tile(np.arange(out_width), out_height)\n i = i0.reshape(-1, 1) + i1.reshape(1, -1)\n j = j0.reshape(-1, 1) + j1.reshape(1, -1)\n\n k = np.repeat(np.arange(C), field_height * field_width).reshape(-1, 1)\n\n return (k.astype(int), i.astype(int), j.astype(int))\n\n\ndef im2col_indices(x, field_height, field_width, padding=1, stride=1):\n \"\"\" An implementation of im2col based on some fancy indexing \"\"\"\n # Zero-pad the input\n p = padding\n x_padded = np.pad(x, ((0, 0), (0, 0), (p, p), (p, p)), mode='constant')\n\n k, i, j = get_im2col_indices(x.shape, field_height,\n field_width, padding, stride)\n\n cols = x_padded[:, k, i, j]\n C = x.shape[1]\n cols = cols.transpose(1, 2, 0).reshape(field_height * field_width * C, -1)\n return cols\n\n\nclass PolicyValueNet():\n \"\"\"policy-value network in numpy \"\"\"\n def __init__(self, board_width, board_height, net_params):\n self.board_width = board_width\n self.board_height = board_height\n self.params = net_params\n\n def policy_value_fn(self, board):\n \"\"\"\n input: board\n output: a list of (action, probability) tuples for each available\n action and the score of the board state\n \"\"\"\n legal_positions = board.availables\n current_state = board.current_state()\n\n X = current_state.reshape(-1, 4, self.board_width, self.board_height)\n # first 3 conv layers with ReLu nonlinearity\n for i in [0, 2, 4]:\n X = relu(conv_forward(X, self.params[i], self.params[i+1]))\n # policy head\n X_p = relu(conv_forward(X, self.params[6], self.params[7], padding=0))\n X_p = fc_forward(X_p.flatten(), self.params[8], self.params[9])\n act_probs = softmax(X_p)\n # value head\n X_v = relu(conv_forward(X, self.params[10],\n self.params[11], padding=0))\n X_v = relu(fc_forward(X_v.flatten(), self.params[12], self.params[13]))\n value = np.tanh(fc_forward(X_v, self.params[14], self.params[15]))[0]\n act_probs = zip(legal_positions, act_probs.flatten()[legal_positions])\n return act_probs, value\n" ]
[ [ "numpy.sum", "numpy.tile", "numpy.dot", "numpy.arange", "numpy.max", "numpy.maximum", "numpy.pad" ] ]
arvincsh/multiobjectdetection
[ "26b4d43ce981a7a4cd031611df70b8f7c08757df" ]
[ "yolo_app/etc/commons/opencv_helpers.py" ]
[ "import cv2\nimport numpy as np\nfrom math import sqrt\nfrom scipy.spatial import distance\nfrom yolo_app.etc.config import config\n\n\ndef crop_image(save_path, img, xywh):\n x = xywh[0]\n y = xywh[1]\n w = xywh[2]\n h = xywh[3]\n crop_img = img[y:y + h, x:x + w]\n cv2.imwrite(save_path, crop_img)\n\n\ndef np_xyxy2xywh(xyxy, data_type=int):\n # Convert bounding box format from [x1, y1, x2, y2] to [x, y, w, h]\n xywh = np.zeros_like(xyxy)\n x1 = xyxy[0]\n y1 = xyxy[1]\n x2 = xyxy[2]\n y2 = xyxy[3]\n\n xywh[0] = xyxy[0]\n xywh[1] = xyxy[1]\n xywh[2] = data_type(abs(x2 - x1))\n xywh[3] = data_type(abs(y1 - y2))\n return xywh\n\n\ndef torch2np_xyxy(xyxy, data_type=int):\n # Convert bounding box format from [x1, y1, x2, y2] to [x, y, w, h]\n\n # CPU Mode\n try:\n np_xyxy = np.zeros_like(xyxy)\n # GPU Mode\n except:\n np_xyxy = np.zeros_like(xyxy.data.cpu().numpy())\n\n np_xyxy[0] = data_type(xyxy[0])\n np_xyxy[1] = data_type(xyxy[1])\n np_xyxy[2] = data_type(xyxy[2])\n np_xyxy[3] = data_type(xyxy[3])\n\n return np_xyxy\n\n\ndef get_det_xyxy(det):\n numpy_xyxy = torch2np_xyxy(det[:4])\n return numpy_xyxy\n\n\n# Merged of 2 bounding boxes (xyxy and xyxy)\ndef get_mbbox(obj_1, obj_2):\n box1_x1 = obj_1[0]\n box1_y1 = obj_1[1]\n box1_x2 = obj_1[2]\n box1_y2 = obj_1[3]\n\n box2_x1 = obj_2[0]\n box2_y1 = obj_2[1]\n box2_x2 = obj_2[2]\n box2_y2 = obj_2[3]\n\n mbbox = [\n min(box1_x1, box2_x1),\n min(box1_y1, box2_y1),\n max(box1_x2, box2_x2),\n max(box1_y2, box2_y2)\n ]\n return mbbox\n\n\ndef np_xyxy2centroid(xyxy):\n centroid_x = (xyxy[0] + xyxy[2]) / 2\n centroid_y = (xyxy[1] + xyxy[3]) / 2\n return np.asarray([centroid_x, centroid_y])\n\n\ndef get_xyxy_distance(xyxy_1, xyxy_2):\n o1cx_o2cx = pow((xyxy_1[0] - xyxy_2[0]), 2)\n o1cy_o2cy = pow((xyxy_1[1] - xyxy_2[1]), 2)\n dist = sqrt(o1cx_o2cx + o1cy_o2cy)\n return dist\n\n\ndef get_xyxy_distance_manhattan(xyxy_1, xyxy_2):\n o1cx_o2cx = pow((xyxy_1[0] - xyxy_2[0]), 2)\n o1cy_o2cy = pow((xyxy_1[1] - xyxy_2[1]), 2)\n dist = sqrt(distance.cityblock(o1cx_o2cx, o1cy_o2cy))\n return dist\n\n\ndef save_txt(save_path, txt_format, bbox_xyxy=None, w_type='a', img_ext=\".png\", cls=None, conf=1.0):\n txt_path = save_path.replace(img_ext, '')\n with open(txt_path + '.txt', w_type) as file:\n if bbox_xyxy is None:\n file.write(\"\")\n else:\n if cls is None:\n cls = config[\"bbox_config\"][\"default_label\"]\n if txt_format == \"default\":\n file.write(('%g ' * 6 + '\\n') % (bbox_xyxy, cls, conf))\n elif txt_format == \"cartucho\":\n str_output = cls + \" \"\n str_output += str(conf) + \" \"\n str_output += str(int(bbox_xyxy[0])) + \" \" + \\\n str(int(bbox_xyxy[1])) + \" \" + \\\n str(int(bbox_xyxy[2])) + \" \" + \\\n str(int(bbox_xyxy[3])) + \"\\n\"\n\n file.write(str_output)\n else:\n pass\n" ]
[ [ "numpy.zeros_like", "numpy.asarray", "scipy.spatial.distance.cityblock" ] ]
MilesQLi/highway-networks
[ "de1875c33e311c12df7dc33decda67706dbf250a" ]
[ "python/caffe/detector.py" ]
[ "#!/usr/bin/env python\n\"\"\"\nDo windowed detection by classifying a number of images/crops at once,\noptionally using the selective search window proposal method.\n\nThis implementation follows ideas in\n Ross Girshick, Jeff Donahue, Trevor Darrell, Jitendra Malik.\n Rich feature hierarchies for accurate object detection and semantic\n segmentation.\n http://arxiv.org/abs/1311.2524\n\nThe selective_search_ijcv_with_python code required for the selective search\nproposal mode is available at\n https://github.com/sergeyk/selective_search_ijcv_with_python\n\"\"\"\nimport numpy as np\nimport os\n\nimport caffe\n\n\nclass Detector(caffe.Net):\n \"\"\"\n Detector extends Net for windowed detection by a list of crops or\n selective search proposals.\n \"\"\"\n def __init__(self, model_file, pretrained_file, mean=None,\n input_scale=None, raw_scale=None, channel_swap=None,\n context_pad=None):\n \"\"\"\n Take\n mean, input_scale, raw_scale, channel_swap: params for\n preprocessing options.\n context_pad: amount of surrounding context to take s.t. a `context_pad`\n sized border of pixels in the network input image is context, as in\n R-CNN feature extraction.\n \"\"\"\n caffe.Net.__init__(self, model_file, pretrained_file, caffe.TEST)\n\n # configure pre-processing\n in_ = self.inputs[0]\n self.transformer = caffe.io.Transformer(\n {in_: self.blobs[in_].data.shape})\n self.transformer.set_transpose(in_, (2, 0, 1))\n if mean is not None:\n self.transformer.set_mean(in_, mean)\n if input_scale is not None:\n self.transformer.set_input_scale(in_, input_scale)\n if raw_scale is not None:\n self.transformer.set_raw_scale(in_, raw_scale)\n if channel_swap is not None:\n self.transformer.set_channel_swap(in_, channel_swap)\n\n self.configure_crop(context_pad)\n\n def detect_windows(self, images_windows):\n \"\"\"\n Do windowed detection over given images and windows. Windows are\n extracted then warped to the input dimensions of the net.\n\n Parameters\n ----------\n images_windows: (image filename, window list) iterable.\n context_crop: size of context border to crop in pixels.\n\n Returns\n -------\n detections: list of {filename: image filename, window: crop coordinates,\n predictions: prediction vector} dicts.\n \"\"\"\n # Extract windows.\n window_inputs = []\n for image_fname, windows in images_windows:\n image = caffe.io.load_image(image_fname).astype(np.float32)\n for window in windows:\n window_inputs.append(self.crop(image, window))\n\n # Run through the net (warping windows to input dimensions).\n in_ = self.inputs[0]\n caffe_in = np.zeros((len(window_inputs), window_inputs[0].shape[2])\n + self.blobs[in_].data.shape[2:],\n dtype=np.float32)\n for ix, window_in in enumerate(window_inputs):\n caffe_in[ix] = self.transformer.preprocess(in_, window_in)\n out = self.forward_all(**{in_: caffe_in})\n predictions = out[self.outputs[0]].squeeze(axis=(2, 3))\n\n # Package predictions with images and windows.\n detections = []\n ix = 0\n for image_fname, windows in images_windows:\n for window in windows:\n detections.append({\n 'window': window,\n 'prediction': predictions[ix],\n 'filename': image_fname\n })\n ix += 1\n return detections\n\n def detect_selective_search(self, image_fnames):\n \"\"\"\n Do windowed detection over Selective Search proposals by extracting\n the crop and warping to the input dimensions of the net.\n\n Parameters\n ----------\n image_fnames: list\n\n Returns\n -------\n detections: list of {filename: image filename, window: crop coordinates,\n predictions: prediction vector} dicts.\n \"\"\"\n import selective_search_ijcv_with_python as selective_search\n # Make absolute paths so MATLAB can find the files.\n image_fnames = [os.path.abspath(f) for f in image_fnames]\n windows_list = selective_search.get_windows(\n image_fnames,\n cmd='selective_search_rcnn'\n )\n # Run windowed detection on the selective search list.\n return self.detect_windows(zip(image_fnames, windows_list))\n\n def crop(self, im, window):\n \"\"\"\n Crop a window from the image for detection. Include surrounding context\n according to the `context_pad` configuration.\n\n Parameters\n ----------\n im: H x W x K image ndarray to crop.\n window: bounding box coordinates as ymin, xmin, ymax, xmax.\n\n Returns\n -------\n crop: cropped window.\n \"\"\"\n # Crop window from the image.\n crop = im[window[0]:window[2], window[1]:window[3]]\n\n if self.context_pad:\n box = window.copy()\n crop_size = self.blobs[self.inputs[0]].width # assumes square\n scale = crop_size / (1. * crop_size - self.context_pad * 2)\n # Crop a box + surrounding context.\n half_h = (box[2] - box[0] + 1) / 2.\n half_w = (box[3] - box[1] + 1) / 2.\n center = (box[0] + half_h, box[1] + half_w)\n scaled_dims = scale * np.array((-half_h, -half_w, half_h, half_w))\n box = np.round(np.tile(center, 2) + scaled_dims)\n full_h = box[2] - box[0] + 1\n full_w = box[3] - box[1] + 1\n scale_h = crop_size / full_h\n scale_w = crop_size / full_w\n pad_y = round(max(0, -box[0]) * scale_h) # amount out-of-bounds\n pad_x = round(max(0, -box[1]) * scale_w)\n\n # Clip box to image dimensions.\n im_h, im_w = im.shape[:2]\n box = np.clip(box, 0., [im_h, im_w, im_h, im_w])\n clip_h = box[2] - box[0] + 1\n clip_w = box[3] - box[1] + 1\n assert(clip_h > 0 and clip_w > 0)\n crop_h = round(clip_h * scale_h)\n crop_w = round(clip_w * scale_w)\n if pad_y + crop_h > crop_size:\n crop_h = crop_size - pad_y\n if pad_x + crop_w > crop_size:\n crop_w = crop_size - pad_x\n\n # collect with context padding and place in input\n # with mean padding\n context_crop = im[box[0]:box[2], box[1]:box[3]]\n context_crop = caffe.io.resize_image(context_crop, (crop_h, crop_w))\n crop = np.ones(self.crop_dims, dtype=np.float32) * self.crop_mean\n crop[pad_y:(pad_y + crop_h), pad_x:(pad_x + crop_w)] = context_crop\n\n return crop\n\n def configure_crop(self, context_pad):\n \"\"\"\n Configure crop dimensions and amount of context for cropping.\n If context is included, make the special input mean for context padding.\n\n Parameters\n ----------\n context_pad : amount of context for cropping.\n \"\"\"\n # crop dimensions\n in_ = self.inputs[0]\n tpose = self.transformer.transpose[in_]\n inv_tpose = [tpose[t] for t in tpose]\n self.crop_dims = np.array(self.blobs[in_].data.shape[1:])[inv_tpose]\n #.transpose(inv_tpose)\n # context padding\n self.context_pad = context_pad\n if self.context_pad:\n in_ = self.inputs[0]\n transpose = self.transformer.transpose.get(in_)\n channel_order = self.transformer.channel_swap.get(in_)\n raw_scale = self.transformer.raw_scale.get(in_)\n # Padding context crops needs the mean in unprocessed input space.\n mean = self.transformer.mean.get(in_)\n if mean is not None:\n inv_transpose = [transpose[t] for t in transpose]\n crop_mean = mean.copy().transpose(inv_transpose)\n if channel_order is not None:\n channel_order_inverse = [channel_order.index(i)\n for i in range(crop_mean.shape[2])]\n crop_mean = crop_mean[:, :, channel_order_inverse]\n if raw_scale is not None:\n crop_mean /= raw_scale\n self.crop_mean = crop_mean\n else:\n self.crop_mean = np.zeros(self.crop_dims, dtype=np.float32)\n" ]
[ [ "numpy.ones", "numpy.tile", "numpy.zeros", "numpy.clip", "numpy.array" ] ]
geometrikal/tensorflow_models
[ "44a82f3f18a2e62b1cd99b94922f752be0672f46" ]
[ "research/object_detection/training/TFLite_detection_video.py" ]
[ "######## Webcam Object Detection Using Tensorflow-trained Classifier #########\n#\n# Author: Evan Juras\n# Date: 10/2/19\n# Description: \n# This program uses a TensorFlow Lite model to perform object detection on a\n# video. It draws boxes and scores around the objects of interest in each frame\n# from the video.\n#\n# This code is based off the TensorFlow Lite image classification example at:\n# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/examples/python/label_image.py\n#\n# I added my own method of drawing boxes and labels using OpenCV.\n\n# Import packages\nimport os\nimport argparse\nimport cv2\nimport numpy as np\nimport sys\nimport importlib.util\n\ndef increase_brightness(img, value=30):\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n h, s, v = cv2.split(hsv)\n\n lim = 255 - value\n v[v > lim] = 255\n v[v <= lim] += value\n\n final_hsv = cv2.merge((h, s, v))\n img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)\n return img\n\n\n# Define and parse input arguments\nparser = argparse.ArgumentParser()\nparser.add_argument('--modeldir', help='Folder the .tflite file is located in',\n required=True)\nparser.add_argument('--graph', help='Name of the .tflite file, if different than detect.tflite',\n default='detect.tflite')\nparser.add_argument('--labels', help='Name of the labelmap file, if different than labelmap.txt',\n default='labelmap.txt')\nparser.add_argument('--threshold', help='Minimum confidence threshold for displaying detected objects',\n default=0.5)\nparser.add_argument('--video', help='Name of the video file',\n default='test.mp4')\nparser.add_argument('--edgetpu', help='Use Coral Edge TPU Accelerator to speed up detection',\n action='store_true')\nparser.add_argument('--subsample', type=int, default=1, help='Subsample the input image')\nparser.add_argument('--offset', type=int, default=0, help='Offset into file')\n\nargs = parser.parse_args()\n\nMODEL_NAME = args.modeldir\nGRAPH_NAME = args.graph\nLABELMAP_NAME = args.labels\nVIDEO_NAME = args.video\nmin_conf_threshold = float(args.threshold)\nuse_TPU = args.edgetpu\n\n# Import TensorFlow libraries\n# If tflite_runtime is installed, import interpreter from tflite_runtime, else import from regular tensorflow\n# If using Coral Edge TPU, import the load_delegate library\npkg = importlib.util.find_spec('tflite_runtime')\nif pkg:\n from tflite_runtime.interpreter import Interpreter\n if use_TPU:\n from tflite_runtime.interpreter import load_delegate\nelse:\n from tensorflow.lite.python.interpreter import Interpreter\n if use_TPU:\n from tensorflow.lite.python.interpreter import load_delegate\n\n# If using Edge TPU, assign filename for Edge TPU model\nif use_TPU:\n # If user has specified the name of the .tflite file, use that name, otherwise use default 'edgetpu.tflite'\n if (GRAPH_NAME == 'detect.tflite'):\n GRAPH_NAME = 'edgetpu.tflite' \n\n# Get path to current working directory\nCWD_PATH = os.getcwd()\n\n# Path to video file\nVIDEO_PATH = os.path.join(CWD_PATH,VIDEO_NAME)\n\n# Path to .tflite file, which contains the model that is used for object detection\nPATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,GRAPH_NAME)\n\n# Path to label map file\nPATH_TO_LABELS = os.path.join(CWD_PATH,MODEL_NAME,LABELMAP_NAME)\n\n# Load the label map\nwith open(PATH_TO_LABELS, 'r') as f:\n labels = [line.strip() for line in f.readlines()]\n\n# Have to do a weird fix for label map if using the COCO \"starter model\" from\n# https://www.tensorflow.org/lite/models/object_detection/overview\n# First label is '???', which has to be removed.\nif labels[0] == '???':\n del(labels[0])\n\n# Load the Tensorflow Lite model.\n# If using Edge TPU, use special load_delegate argument\nif use_TPU:\n interpreter = Interpreter(model_path=PATH_TO_CKPT,\n experimental_delegates=[load_delegate('libedgetpu.so.1.0')])\n print(PATH_TO_CKPT)\nelse:\n interpreter = Interpreter(model_path=PATH_TO_CKPT)\n\ninterpreter.allocate_tensors()\n\n# Get model details\ninput_details = interpreter.get_input_details()\noutput_details = interpreter.get_output_details()\nheight = input_details[0]['shape'][1]\nwidth = input_details[0]['shape'][2]\n\n\nfloating_model = (input_details[0]['dtype'] == np.float32)\n\ninput_mean = 127.5\ninput_std = 127.5\n\n# Open video file\nvideo = cv2.VideoCapture(VIDEO_PATH)\nimW = video.get(cv2.CAP_PROP_FRAME_WIDTH)\nimH = video.get(cv2.CAP_PROP_FRAME_HEIGHT)\nout = cv2.VideoWriter('output.mp4', -1, 20.0, (int(imW),int(imH)))\n\nfidx = 0\nwhile(video.isOpened()):\n # Acquire frame and resize to expected shape [1xHxWx3]\n ret, frame = video.read()\n if not ret:\n print('Reached the end of the video!')\n break\n \n print(fidx)\n fidx += 1\n if fidx < args.offset: \n continue\n\n if args.subsample > 1:\n imH, imW, _ = frame.shape \n frame = cv2.resize(frame, (imW // args.subsample, imH // args.subsample)) \n \n # frame = increase_brightness(frame, value=70)\n frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame_resized = cv2.resize(frame_rgb, (width, height))\n input_data = np.expand_dims(frame_resized, axis=0)\n\n # Normalize pixel values if using a floating model (i.e. if model is non-quantized)\n if floating_model:\n input_data = (np.float32(input_data) - input_mean) / input_std\n\n # Perform the actual detection by running the model with the image as input\n interpreter.set_tensor(input_details[0]['index'],input_data)\n interpreter.invoke()\n\n # Retrieve detection results\n boxes = interpreter.get_tensor(output_details[0]['index'])[0] # Bounding box coordinates of detected objects\n classes = interpreter.get_tensor(output_details[1]['index'])[0] # Class index of detected objects\n scores = interpreter.get_tensor(output_details[2]['index'])[0] # Confidence of detected objects\n num = interpreter.get_tensor(output_details[3]['index'])[0] # Total number of detected objects (inaccurate and not needed)\n\n # Loop over all detections and draw detection box if confidence is above minimum threshold\n for i in range(int(num)):\n # for i in range(len(scores)):\n if ((scores[i] > min_conf_threshold) and (scores[i] <= 1.0)):\n\n # Get bounding box coordinates and draw box\n # Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()\n ymin = int(max(1,(boxes[i][0] * imH)))\n xmin = int(max(1,(boxes[i][1] * imW)))\n ymax = int(min(imH,(boxes[i][2] * imH)))\n xmax = int(min(imW,(boxes[i][3] * imW)))\n \n cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, 255, 0), 4)\n\n # Draw label\n object_name = labels[int(classes[i])] # Look up object name from \"labels\" array using class index\n label = '%s: %d%%' % (object_name, int(scores[i]*100)) # Example: 'person: 72%'\n labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size\n label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window\n cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in\n cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) # Draw label text\n\n # All the results have been drawn on the frame, so it's time to display it.\n out.write(frame)\n cv2.imshow('Object detector', frame)\n \n\n # Press 'q' to quit\n if cv2.waitKey(1) == ord('q'):\n break\n\n# Clean up\nout.release()\nvideo.release()\ncv2.destroyAllWindows()\n\n\n\n" ]
[ [ "numpy.expand_dims", "tensorflow.lite.python.interpreter.Interpreter", "tensorflow.lite.python.interpreter.load_delegate", "numpy.float32" ] ]
RuoyuX-2018/6998DL
[ "a9b75ee63a92c6824db9ac25cc6d931713e0cae5" ]
[ "my_test/get_graph.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 14 15:47:45 2021\n\n@author: xuery\n\"\"\"\n\nimport cv2\nimport time\nimport numpy as np\nimport os\nimport copy\nimport pickle\nimport random\nimport math\nimport matplotlib.pyplot as plt\nfrom scipy import spatial\nfrom skimage import morphology\nfrom sklearn.mixture import GaussianMixture\nfrom shapely.geometry import LineString, Point\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\nclass get_graph():\n def __init__(self, raw_img):\n self.raw_img = cv2.resize(raw_img, (512,512))\n self.stride = 30\n self.all_centroids = []\n \n def get_binary(self):\n gray_img = cv2.cvtColor(self.raw_img, cv2.COLOR_RGB2GRAY)\n _, binary_img = cv2.threshold(gray_img, 100, 255, cv2.THRESH_BINARY_INV)\n return binary_img\n \n \n \n def ske2point(self):\n skeleton_img = self.get_binary()\n img_w, img_h = skeleton_img.shape\n\n for i in range(img_w//self.stride):\n for j in range(img_h//self.stride):\n small_img = skeleton_img[i*self.stride:(i+1)*self.stride, j*self.stride:(j+1)*self.stride]\n x_idx, y_idx = small_img.nonzero()\n if len(x_idx) == 0:\n continue\n x_center, y_center = sum(x_idx) / len(x_idx) + i * self.stride,\\\n sum(y_idx) / len(x_idx) + j * self.stride\n #all_centorids stores the idx of points\n self.all_centroids.append(np.array([int(x_center), int(y_center)]))\n self.all_centroids = np.array(self.all_centroids)\n self.centroids_copy = copy.deepcopy(self.all_centroids)\n \n \n def optimization(self, save_path=None):\n #for the points in all_centroid that don't belong to the rope, delete it\n noise_idx = []\n binary_img = self.get_binary()\n for i in range(len(self.all_centroids)):\n if binary_img[int(self.all_centroids[i][0])][int(self.all_centroids[i][1])] == 0:\n noise_idx.append(i)\n self.all_centroids = np.delete(self.all_centroids, noise_idx, axis=0)\n if save_path != None:\n self.img_point_write(save_path, all_centroids, binary_img)\n \n \n def visualization(self):\n self.optimization()\n plt.plot(self.all_centroids[:,0], self.all_centroids[:,1], 'bo', ms=5)\n plt.show()\n\n\n def graph(self, num_neigh_points = 10):\n self.ske2point()\n self.visualization()\n tree = spatial.KDTree(self.all_centroids)\n start_point = [500, 0]\n neigh_points_idx, neigh_points = self.find_neigh_points(tree, start_point, 2)\n next_point = neigh_points[0]\n query_pair = [start_point, next_point]\n point_order = query_pair\n while True:\n if len(self.all_centroids) < num_neigh_points:\n break\n if len(self.all_centroids) == 30:\n break\n tree = spatial.KDTree(self.all_centroids)\n neigh_points_idx, neigh_points = self.find_neigh_points(tree, query_pair[1], num_neigh_points)\n idx, next_point = self.find_path(query_pair, neigh_points)\n if idx == -99:\n print(\"end of construction...\")\n return point_order\n query_pair = [query_pair[1], next_point]\n point_order.append(next_point)\n #pop out the walked point\n self.all_centroids = self.all_centroids.tolist()\n self.all_centroids.pop(neigh_points_idx[idx])\n self.all_centroids = np.array(self.all_centroids)\n print(\"remain lens of points: \", len(self.all_centroids))\n return point_order\n \n \n def find_neigh_points(self, tree, centroid, num_points):\n dist, near_points_idx = tree.query(centroid, k=num_points) \n near_points = self.all_centroids[near_points_idx]\n return near_points_idx[1:], near_points[1:]\n \n \n def find_path(self, query_pair, neigh_points):\n v_query = query_pair[1] - query_pair[0]\n next_point = np.zeros_like(query_pair[0])\n angle_diff = np.pi\n next_idx = -99\n for i in range(len(neigh_points)):\n v_compare = query_pair[1] - neigh_points[i]\n #if the dist of all neigh_points is more than 65, break. This setting is for noise\n if np.linalg.norm(v_compare) >70:\n continue\n #calculate the angle of two vectors\n unit_v1 = v_query / np.linalg.norm(v_query)\n unit_v2 = v_compare / np.linalg.norm(v_compare)\n dot_product = np.dot(unit_v1, unit_v2)\n angle = np.arccos(dot_product) #radian\n if np.pi - angle < angle_diff:\n next_point = neigh_points[i]\n angle_diff = np.pi - angle\n next_idx = i\n return next_idx, next_point\n \n \n def find_crossing(self, point_order, visual=False):\n #create lines\n pairs = []\n crossing = []\n for i in range(len(point_order)-1):\n new_pair = np.array([point_order[i], point_order[i+1]])\n pairs.append(new_pair)\n for i in range(len(pairs)):\n for j in range(len(pairs)-i):\n intersec = self.intersection(pairs[i], pairs[j+i])\n if intersec is not False:\n crossing.append([intersec, pairs[i][0], pairs[j+i][0]])\n if visual == True:\n self.visualization_final_graph(point_order, crossing)\n return crossing\n \n \n \n #if no intersection, return False, else return the value of intersection\n def intersection(self, pair1, pair2):\n #if two pairs has a same point, break\n if np.all(pair1[0]-pair2[0]==0) or np.all(pair1[1]-pair2[0]==0) \\\n or np.all(pair1[0]-pair2[1]==0) or np.all(pair1[1]-pair2[1]==0):\n return False\n \n line1 = LineString([pair1[0], pair1[1]])\n line2 = LineString([pair2[0], pair2[1]])\n intersection_point = line1.intersection(line2)\n #no intersection\n if intersection_point.is_empty:\n return False\n else:\n return np.array([intersection_point.x, intersection_point.y])\n \n \n def visualization_final_graph(self, point_order, crossing):\n x, y = zip(*point_order)\n plt.plot(x, y, '-o', zorder=1)\n crossing = np.array(crossing)\n c_x = crossing[:,0,0]\n c_y = crossing[:,0,1]\n plt.scatter(c_x, c_y, 20, 'r', zorder=2)\n plt.show()\n\n\n def trajectory(self, env, sa, point_order, crossing, stride):\n picker_pos, particle_pos = sa.action_space.Picker._get_pos()\n print(particle_pos)\n particle_dist_2d = np.linalg.norm(particle_pos[0] - particle_pos[1])\n init_particle = particle_pos[random.randint(0,len(particle_pos))].tolist()\n particle_list = []\n particle_list.append(init_particle)\n for i in range(len(point_order)-stride):\n if i % stride != 0:\n continue\n \n curr_particle = particle_list[i//stride]\n y_o = point_order[i+stride][1] - point_order[i][1]\n x_o = point_order[i+stride][0] - point_order[i][0]\n orientation = abs(y_o / x_o)\n theta = math.atan(orientation)\n if x_o == 0:\n x_o = 0.1\n if y_o == 0:\n y_o = 0.1\n x = curr_particle[0] + math.cos(theta) * particle_dist_2d * x_o / abs(x_o)\n y = curr_particle[2] + math.sin(theta) * particle_dist_2d * y_o / abs(y_o)\n next_particle = [x, curr_particle[1], y, curr_particle[3]]\n particle_list.append(next_particle)\n \n for i in range(len(particle_list)):\n if i == 3:\n particle_list[i][1] = 0.0145\n if i == 4:\n particle_list[i][1] = 0.0245\n if i == 5:\n particle_list[i][1] = 0.0145\n if i == 9:\n particle_list[i][1] = 0.0145\n if i == 10:\n particle_list[i][1] = 0.0245\n if i == 11:\n particle_list[i][1] = 0.0145\n particle_list = np.array(particle_list)\n particle_x = particle_list[:, 0]\n particle_z = particle_list[:, 1]\n particle_y = particle_list[:, 2]\n fig=plt.figure()\n ax2 = Axes3D(fig)\n ax2.scatter3D(particle_x,particle_y,particle_z, cmap='Blues')\n ax2.plot3D(particle_x,particle_y,particle_z,'gray')\n plt.show()\n return particle_list" ]
[ [ "numpy.zeros_like", "scipy.spatial.KDTree", "matplotlib.pyplot.figure", "numpy.arccos", "matplotlib.pyplot.show", "numpy.all", "numpy.delete", "numpy.array", "matplotlib.pyplot.plot", "numpy.dot", "numpy.linalg.norm", "matplotlib.pyplot.scatter" ] ]
HotaekHan/FCOS
[ "8e3a0438cf1a53f8916d21ea81d892b260c100a9" ]
[ "datagen.py" ]
[ "'''Load image/labels/boxes from an annotation file.\n\nThe list file is like:\n\n img.jpg width height xmin ymin xmax ymax label xmin ymin xmax ymax label ...\n'''\nimport random\nimport numpy as np\nimport json\nimport os\n# from PIL import Image, ImageDraw, ImageFile\n# ImageFile.LOAD_TRUNCATED_IMAGES = True\nimport cv2\n\nimport torch\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\n\nfrom encoder import DataEncoder\n\nclass jsonDataset(data.Dataset):\n def __init__(self, path, classes, transform, input_image_size, num_crops, fpn_level, is_norm_reg_target, radius,\n view_image=False, min_cols=1, min_rows=1):\n '''\n Args:\n root: (str) ditectory to images.\n list_file: (str) path to index file.\n train: (boolean) train or test.\n transform: ([transforms]) image transforms.\n input_size: (int) image shorter side size.\n max_size: (int) maximum image longer side size.\n '''\n self.path = path\n self.classes = classes\n self.transform = transform\n self.input_size = input_image_size\n self.num_crops = num_crops\n self.view_img = view_image\n self.fpn_level = fpn_level\n self.is_norm_reg_target = is_norm_reg_target\n self.radius = radius\n\n self.fnames = list()\n self.offsets = list()\n self.boxes = list()\n self.labels = list()\n\n self.num_classes = len(self.classes)\n\n self.label_map = dict()\n self.class_idx_map = dict()\n # 0 is background class\n for idx in range(0, self.num_classes):\n self.label_map[self.classes[idx]] = idx+1 # 0 is background\n self.class_idx_map[idx+1] = self.classes[idx]\n\n self.data_encoder = DataEncoder(image_size=self.input_size,\n num_classes=self.num_classes + 1,\n fpn_level=self.fpn_level,\n is_norm_reg_target=self.is_norm_reg_target)\n\n fp_read = open(self.path, 'r')\n gt_dict = json.load(fp_read)\n\n all_boxes = list()\n all_labels = list()\n all_img_path = list()\n\n # read gt files\n for gt_key in gt_dict:\n gt_data = gt_dict[gt_key][0]\n\n box = list()\n label = list()\n\n num_boxes = len(gt_data['labels'])\n\n img = cv2.imread(gt_data['image_path'])\n img_rows = img.shape[0]\n img_cols = img.shape[1]\n\n for iter_box in range(0, num_boxes):\n xmin = gt_data['boxes'][iter_box][0]\n ymin = gt_data['boxes'][iter_box][1]\n xmax = gt_data['boxes'][iter_box][2]\n ymax = gt_data['boxes'][iter_box][3]\n rows = ymax - ymin\n cols = xmax - xmin\n\n if xmin < 0 or ymin < 0:\n print('negative coordinate: [xmin: ' + str(xmin) + ', ymin: ' + str(ymin) + ']')\n print(gt_data['image_path'])\n continue\n\n if xmax > img_cols or ymax > img_rows:\n print('over maximum size: [xmax: ' + str(xmax) + ', ymax: ' + str(ymax) + ']')\n print(gt_data['image_path'])\n continue\n\n if cols < min_cols:\n print('cols is lower than ' + str(min_cols) + ': [' + str(xmin) + ', ' + str(ymin) + ', ' +\n str(xmax) + ', ' + str(ymax) + '] '\n + str(gt_data['image_path']))\n continue\n if rows < min_rows:\n print('rows is lower than ' + str(min_rows) + ': [' + str(xmin) + ', ' + str(ymin) + ', ' +\n str(xmax) + ', ' + str(ymax) + '] '\n + str(gt_data['image_path']))\n continue\n\n class_name = gt_data['labels'][iter_box][0]\n if class_name not in self.label_map:\n print('weired class name: ' + class_name)\n print(gt_data['image_path'])\n continue\n\n class_idx = self.label_map[class_name]\n box.append([float(xmin), float(ymin), float(xmax), float(ymax)])\n label.append(int(class_idx))\n\n if len(box) == 0 or len(label) == 0:\n print('none of object exist in the image: ' + gt_data['image_path'])\n continue\n\n all_boxes.append(box)\n all_labels.append(label)\n all_img_path.append(gt_data['image_path'])\n\n if len(all_boxes) == len(all_labels) and len(all_boxes) == len(all_img_path):\n num_images = len(all_img_path)\n else:\n print('num. of boxes: ' + str(len(all_boxes)))\n print('num. of labels: ' + str(len(all_labels)))\n print('num. of paths: ' + str(len(all_img_path)))\n raise ValueError('num. of elements are different(all boxes, all_labels, all_img_path)')\n\n if num_crops <= 0:\n for idx in range(0, num_images, 1):\n self.fnames.append(all_img_path[idx])\n self.boxes.append(torch.tensor(all_boxes[idx], dtype=torch.float32))\n self.labels.append(torch.tensor(all_labels[idx], dtype=torch.int64))\n else:\n for idx in range(0, num_images, 1):\n ori_boxes = all_boxes[idx]\n ori_labels = all_labels[idx]\n\n ori_img = cv2.imread(all_img_path[idx])\n img_rows = ori_img.shape[0]\n img_cols = ori_img.shape[1]\n\n offsets, crop_boxes, crop_labels = self._do_crop(ori_img_rows=img_rows, ori_img_cols=img_cols,\n target_img_size=self.input_size,\n boxes=ori_boxes, labels=ori_labels)\n\n num_offsets = len(offsets)\n\n for idx_offset in range(0, num_offsets, 1):\n self.fnames.append(all_img_path[idx])\n self.offsets.append(offsets[idx_offset])\n self.boxes.append(torch.tensor(crop_boxes[idx_offset], dtype=torch.float32))\n self.labels.append(torch.tensor(crop_labels[idx_offset], dtype=torch.int64))\n\n self.num_samples = len(self.fnames)\n\n def __getitem__(self, idx):\n # Load image and boxes.\n fname = self.fnames[idx]\n boxes = self.boxes[idx]\n labels = self.labels[idx]\n img = cv2.imread(fname)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n if self.num_crops > 0:\n offset = self.offsets[idx]\n crop_rect = (int(offset[0]), int(offset[1]),\n int(offset[0]+self.input_size[1]), int(offset[1]+self.input_size[0]))\n\n if offset[0] < 0 or offset[1] < 0:\n raise ValueError(\"negative offset!\")\n for box in boxes:\n if box[0] < 0 or box[1] < 0 or box[2] > self.input_size[1] or box[3] > self.input_size[0]:\n raise ValueError(\"negative box coordinate!\")\n\n img = img[crop_rect[1]:crop_rect[3], crop_rect[0]:crop_rect[2]]\n\n bboxes = [bbox.tolist() + [label.item()] for bbox, label in zip(boxes, labels)]\n augmented = self.transform(image=img, bboxes=bboxes)\n img = augmented['image']\n rows, cols = img.shape[1:]\n boxes = augmented['bboxes']\n boxes = [list(bbox) for bbox in boxes]\n labels = [bbox.pop() for bbox in boxes]\n\n if self.view_img is True:\n np_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n np_img = np_img.numpy()\n np_img = np.transpose(np_img, (1, 2, 0))\n np_img = np.uint8(np_img * 255)\n np_img = np.ascontiguousarray(np_img)\n for idx_box, box in enumerate(boxes):\n cv2.rectangle(np_img, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (0, 255, 0))\n class_idx = labels[idx_box]\n text_size = cv2.getTextSize(self.class_idx_map[class_idx], cv2.FONT_HERSHEY_PLAIN, 1, 1)\n cv2.putText(np_img, self.class_idx_map[class_idx], (int(box[0]), int(box[1]) - text_size[1]), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)\n\n cv2.imwrite(os.path.join(\"crop_test\", str(idx)+\".jpg\"), np_img)\n\n boxes = torch.tensor(boxes, dtype=torch.float32)\n labels = torch.tensor(labels, dtype=torch.int64)\n\n return img, boxes, labels, fname\n\n def __len__(self):\n return self.num_samples\n\n # def _resize(self, img, boxes):\n # if isinstance(self.input_size, int) is True:\n # w = h = self.input_size\n # elif isinstance(self.input_size, tuple) is True:\n # h = self.input_size[0]\n # w = self.input_size[1]\n # else:\n # raise ValueError('input size should be int or tuple of ints')\n #\n # ws = 1.0 * w / img.shape[1]\n # hs = 1.0 * h / img.shape[0]\n # scale = torch.tensor([ws, hs, ws, hs], dtype=torch.float32)\n # if boxes.numel() == 0:\n # scaled_box = boxes\n # else:\n # scaled_box = scale * boxes\n # return cv2.resize(img, (w, h)), scaled_box\n\n def _do_crop(self, ori_img_rows, ori_img_cols, target_img_size, boxes, labels):\n num_boxes = len(boxes)\n num_labels = len(labels)\n\n if num_boxes != num_labels:\n print(\"error occur: Random crop\")\n\n rand_indices = [0, 1, 2, 3, 4]\n np.random.shuffle(rand_indices)\n\n output_offsets = []\n output_boxes = []\n output_labels = []\n\n for box in boxes:\n # box coordinate from 1. not 0.\n xmin = box[0]\n ymin = box[1]\n xmax = box[2]\n ymax = box[3]\n\n width = (xmax - xmin)+1\n height = (ymax - ymin)+1\n\n if width < 0 or height< 0:\n print(\"negative width/height\")\n continue\n\n for iter_crop in range(0, self.num_crops, 1):\n rand_idx = rand_indices[iter_crop]\n\n margin = np.random.randint(16, 128, size=1)\n\n # top-left\n if rand_idx == 0:\n offset_x = xmin-1-margin[0]\n offset_y = ymin-1-margin[0]\n crop_maxx = offset_x + target_img_size[1]\n crop_maxy = offset_y + target_img_size[0]\n\n if crop_maxx > ori_img_cols-1 or crop_maxy > ori_img_rows-1:\n continue\n if offset_x < 0 or offset_y < 0:\n continue\n\n crop_rect = [offset_x, offset_y, target_img_size[1], target_img_size[0]]\n\n in_boxes, in_labels = self._find_boxes_in_crop(crop_rect, boxes, labels)\n\n if len(in_boxes) == 0:\n continue\n\n output_offsets.append([offset_x, offset_y])\n output_boxes.append(in_boxes)\n output_labels.append(in_labels)\n # top-right\n elif rand_idx == 1:\n offset_x = xmin - (target_img_size[1] - width)-1+margin[0]\n offset_y = ymin-1-margin[0]\n crop_maxx = offset_x + target_img_size[1]\n crop_maxy = offset_y + target_img_size[0]\n\n if crop_maxx > ori_img_cols-1 or crop_maxy > ori_img_rows-1:\n continue\n\n if offset_x < 0 or offset_y < 0:\n continue\n\n crop_rect = [offset_x, offset_y, target_img_size[1], target_img_size[0]]\n\n in_boxes, in_labels = self._find_boxes_in_crop(crop_rect, boxes, labels)\n\n if len(in_boxes) == 0:\n continue\n\n output_offsets.append([offset_x, offset_y])\n output_boxes.append(in_boxes)\n output_labels.append(in_labels)\n # bottom-left\n elif rand_idx == 2:\n offset_x = xmin-1-margin[0]\n offset_y = ymin - (target_img_size[0] - height)-1+margin[0]\n crop_maxx = offset_x + target_img_size[1]\n crop_maxy = offset_y + target_img_size[0]\n\n if crop_maxx > ori_img_cols-1 or crop_maxy > ori_img_rows-1:\n continue\n\n if offset_x < 0 or offset_y < 0:\n continue\n\n crop_rect = [offset_x, offset_y, target_img_size[1], target_img_size[0]]\n\n in_boxes, in_labels = self._find_boxes_in_crop(crop_rect, boxes, labels)\n\n if len(in_boxes) == 0:\n continue\n\n output_offsets.append([offset_x, offset_y])\n output_boxes.append(in_boxes)\n output_labels.append(in_labels)\n # bottom-right\n elif rand_idx == 3:\n offset_x = xmin - (target_img_size[1] - width)-1+margin[0]\n offset_y = ymin - (target_img_size[0] - height)-1+margin[0]\n crop_maxx = offset_x + target_img_size[1]\n crop_maxy = offset_y + target_img_size[0]\n\n if crop_maxx > ori_img_cols-1 or crop_maxy > ori_img_rows-1:\n continue\n\n if offset_x < 0 or offset_y < 0:\n continue\n\n crop_rect = [offset_x, offset_y, target_img_size[1], target_img_size[0]]\n\n in_boxes, in_labels = self._find_boxes_in_crop(crop_rect, boxes, labels)\n\n if len(in_boxes) == 0:\n continue\n\n output_offsets.append([offset_x, offset_y])\n output_boxes.append(in_boxes)\n output_labels.append(in_labels)\n # center\n elif rand_idx == 4:\n rand_direction = np.random.randint(-1, 1, size=1)\n\n offset_x = (xmin - ((target_img_size[1]-width)/2)-1) + (rand_direction[0] * margin[0])\n offset_y = (ymin - ((target_img_size[0]-height)/2)-1) + (rand_direction[0] * margin[0])\n crop_maxx = offset_x + target_img_size[1]\n crop_maxy = offset_y + target_img_size[0]\n\n if crop_maxx > ori_img_cols-1 or crop_maxy > ori_img_rows-1:\n continue\n\n if offset_x < 0 or offset_y < 0:\n continue\n\n crop_rect = [offset_x, offset_y, target_img_size[1], target_img_size[0]]\n\n in_boxes, in_labels = self._find_boxes_in_crop(crop_rect, boxes, labels)\n\n if len(in_boxes) == 0:\n continue\n\n output_offsets.append([offset_x, offset_y])\n output_boxes.append(in_boxes)\n output_labels.append(in_labels)\n\n else:\n print(\"exceed possible crop num\")\n\n return output_offsets, output_boxes, output_labels\n\n\n def _find_boxes_in_crop(self, crop_rect, boxes, labels):\n num_boxes = len(boxes)\n num_labels = len(labels)\n\n if num_boxes != num_labels:\n print(\"error occur: Random crop\")\n\n boxes_in_crop=[]\n labels_in_crop = []\n for idx in range(0, num_boxes, 1):\n box_in_crop, label, is_contain = self._find_box_in_crop(crop_rect, boxes[idx], labels[idx])\n\n if is_contain is True:\n boxes_in_crop.append(box_in_crop)\n labels_in_crop.append(label)\n\n return boxes_in_crop, labels_in_crop\n\n\n def _find_box_in_crop(self, rect, box, label):\n rect_minx = rect[0]\n rect_miny = rect[1]\n rect_width = rect[2]\n rect_height = rect[3]\n\n box_minx = box[0]\n box_miny = box[1]\n box_maxx = box[2]\n box_maxy = box[3]\n box_width = (box_maxx - box_minx)+1\n box_height = (box_maxy - box_miny)+1\n\n # occlusion_ratio\n occlusion_ratio = 0.3\n occlusion_width = int(box_width * occlusion_ratio) * -1\n occlusion_height = int(box_height * occlusion_ratio) * -1\n\n box_in_crop_minx = box_minx - rect_minx\n if box_in_crop_minx <= occlusion_width or box_in_crop_minx >= rect_width:\n box_in_rect = []\n return box_in_rect, label, False\n\n box_in_crop_miny = box_miny - rect_miny\n if box_in_crop_miny <= occlusion_height or box_in_crop_miny >= rect_height:\n box_in_rect = []\n return box_in_rect, label, False\n\n box_in_crop_maxx = box_maxx - rect_minx\n if rect_width - box_in_crop_maxx <= occlusion_width or box_in_crop_maxx <= 0:\n box_in_rect = []\n return box_in_rect, label, False\n\n box_in_crop_maxy = box_maxy - rect_miny\n if rect_height - box_in_crop_maxy <= occlusion_height or box_in_crop_maxy <= 0:\n box_in_rect = []\n return box_in_rect, label, False\n\n if box_in_crop_minx < 0:\n box_in_crop_minx = 0\n if box_in_crop_miny < 0:\n box_in_crop_miny = 0\n if rect_width - box_in_crop_maxx < 0:\n box_in_crop_maxx = rect_width-1\n if rect_height - box_in_crop_maxy < 0:\n box_in_crop_maxy = rect_height-1\n\n box_in_rect = [box_in_crop_minx, box_in_crop_miny, box_in_crop_maxx, box_in_crop_maxy]\n return box_in_rect, label, True\n\n\n def collate_fn(self, batch):\n imgs = [x[0] for x in batch]\n boxes = [x[1] for x in batch]\n labels = [x[2] for x in batch]\n paths = [x[3] for x in batch]\n\n num_imgs = len(imgs)\n\n if isinstance(self.input_size, int) is True:\n inputs = torch.zeros([num_imgs, 3, self.input_size, self.input_size], dtype=torch.float32)\n elif isinstance(self.input_size, tuple) is True:\n inputs = torch.zeros([num_imgs, 3, self.input_size[0], self.input_size[1]], dtype=torch.float32)\n else:\n raise ValueError('input size should be int or tuple of ints')\n\n loc_targets = list()\n cls_targets = list()\n center_targets = list()\n\n for i in range(num_imgs):\n im = imgs[i]\n imh, imw = im.size(1), im.size(2)\n inputs[i, :, :imh, :imw] = im\n\n # Encode data.\n loc_target, cls_target, center_target = self.data_encoder.encode(boxes[i], labels[i], radius=self.radius)\n\n loc_targets.append(loc_target)\n cls_targets.append(cls_target)\n center_targets.append(center_target)\n\n return inputs, \\\n torch.stack(loc_targets, dim=0), \\\n torch.stack(cls_targets, dim=0), \\\n torch.stack(center_targets, dim=0), \\\n paths\n\n\ndef test():\n import torchvision\n\n # transform = transforms.Compose([\n # transforms.ToTensor(),\n # transforms.Normalize((0.485,0.456,0.406), (0.229,0.224,0.225))\n # ])\n # set random seed\n random.seed(3000)\n np.random.seed(3000)\n torch.manual_seed(3000)\n\n transform = transforms.Compose([\n transforms.ToTensor()\n ])\n\n classes = 'person|bicycle|car|motorcycle|bus|truck|cat|dog|rider'\n classes = classes.split('|')\n\n dataset = jsonDataset(path='data/voc.json', classes=classes,transform=transform,\n input_image_size=(256, 512), num_crops=-1, fpn_level=5, is_norm_reg_target=True, radius=0.8,\n view_image=True, do_aug=True)\n print(len(dataset))\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=32, shuffle=True, num_workers=0,\n collate_fn=dataset.collate_fn)\n\n for idx, (images, loc_targets, cls_targets, center_targets, paths) in enumerate(dataloader):\n print(loc_targets.shape)\n print(cls_targets.shape)\n print(center_targets.shape)\n pos_ind = cls_targets[:, :, 0] <= 0\n print(pos_ind.shape)\n print(pos_ind.data.long().sum())\n\nif __name__ == '__main__':\n test()\n" ]
[ [ "torch.utils.data.DataLoader", "numpy.random.shuffle", "numpy.transpose", "torch.stack", "torch.manual_seed", "torch.tensor", "numpy.random.seed", "torch.zeros", "numpy.ascontiguousarray", "numpy.random.randint", "numpy.uint8" ] ]
edawson/SigProfilerMatrixGenerator
[ "bd6d3bb15e87805cdc7e771c3fdd886f4a9fc29b" ]
[ "SigProfilerMatrixGenerator/install.py" ]
[ "#!/usr/bin/env python3\n\n#Author: Erik Bergstrom\n\n#Contact: [email protected]\n\nfrom __future__ import print_function\nimport os\nimport sys\nimport re\nimport subprocess\nimport argparse\nimport time\nfrom scipy import spatial\nimport pandas as pd\nimport shutil\nimport logging\nimport hashlib\nfrom SigProfilerMatrixGenerator.scripts import convert_input_to_simple_files as convertIn\nfrom SigProfilerMatrixGenerator.scripts import SigProfilerMatrixGeneratorFunc as matGen\n\n\ndef md5(fname):\n hash_md5 = hashlib.md5()\n with open(fname, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n return (hash_md5.hexdigest())\n\n\ndef install_chromosomes (genomes, ref_dir, custom, rsync, bash):\n\tif custom:\n\t\tfor genome in genomes:\n\t\t\tos.system(\"gzip -d references/chromosomes/fasta/\" + genome + \"/*.gz\")\n\t\t\tchromosome_fasta_path = \"references/chromosomes/fasta/\" + genome + \"/\"\n\t\t\tos.system(\"python scripts/save_chrom_strings.py -g \" + genome)\n\t\t\tprint(\"Chromosome string files for \" + genome + \" have been created. Continuing with installation.\")\n\t\t\t#os.system(\"rm -r \" + chromosome_fasta_path)\n\telse:\n\t\tfor genome in genomes:\n\t\t\tspecies = None\n\t\t\tchrom_number = None\n\t\t\tif genome == 'GRCh37' or genome == 'GRCh38': \n\t\t\t\tspecies = \"homo_sapiens\"\n\t\t\t\tchrom_number = 24\n\t\t\telif genome == 'mm10' or genome == 'mm9':\n\t\t\t\tspecies = \"mus_musculus\"\n\t\t\t\tchrom_number = 21\n\t\t\telif genome == 'rn6':\n\t\t\t\tspecies = 'rattus_norvegicus'\n\t\t\t\tchrom_number = 22\n\t\t\telse:\n\t\t\t\tprint(genome + \" is not supported. The following genomes are supported:\\nGRCh37, GRCh38, mm10\")\n\t\t\t\tsys.exit()\n\t\t\t\n\t\t\tchromosome_string_path = \"references/chromosomes/chrom_string/\" + genome + \"/\"\n\t\t\tchromosome_fasta_path = \"references/chromosomes/fasta/\" + genome + \"/\"\n\n\t\t\tif os.path.exists(ref_dir + \"chromosomes/tsb/\" + genome) and len(os.listdir(ref_dir + \"chromosomes/tsb/\" + genome)) >= chrom_number:\n\t\t\t\tbreak\n\t\t\twget_flag = True\n\t\t\tif os.path.exists(chromosome_string_path) == False or len(os.listdir(chromosome_string_path)) <= chrom_number:\n\t\t\t\tprint(\"[DEBUG] Chromosome string files found at: \" + ref_dir + chromosome_string_path)\n\t\t\t\tif os.path.exists(chromosome_fasta_path) == False or len(os.listdir(chromosome_fasta_path)) <= chrom_number:\n\t\t\t\t\tprint(\"[DEBUG] Chromosome fasta files found at: \" + ref_dir + chromosome_fasta_path)\n\t\t\t\t\tprint(\"Chromosomes are not currently saved as individual text files for \" + genome + \". Downloading the files now...\")\n\t\t\t\t\tif not rsync:\n\t\t\t\t\t#os.system(\"rsync -av -m --include='*/' --include='*.dna.chromosome.*' --exclude='*' rsync://ftp.ensembl.org/ensembl/pub/grch37/update/fasta/homo_sapiens/dna/ \" + chromosome_fasta_path + \" 2>&1>> install.log\")\n\t\t\t\t\t\t# try:\n\t\t\t\t\t\t# \tp = subprocess.Popen(\"wget\", stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t\t\t\t\t\t# except:\n\t\t\t\t\t\t# \tproceed = input(\"You may not have wget or homebrew installed. Download those dependencies now?[Y/N]\").upper()\n\t\t\t\t\t\t# \tif proceed == 'Y':\n\t\t\t\t\t\t# \t\ttry:\n\t\t\t\t\t\t# \t\t\tos.system(\"brew install wget\")\n\t\t\t\t\t\t# \t\texcept:\n\t\t\t\t\t\t# \t\t\tos.system('/usr/bin/ruby -e \"$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)\"')\n\t\t\t\t\t\t# \t\t\tos.system(\"brew install wget\")\n\t\t\t\t\t\t# \telse:\n\t\t\t\t\t\t# \t\tprint(\"Installation has stopped. Please download the chromosome files before proceeding with the installation.\")\n\t\t\t\t\t\t# \t\twget_flag = False\n\t\t\t\t\t\t# \t\tsys.exit()\n\t\t\t\t\t\tif wget_flag:\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tif genome == 'GRCh37':\n\t\t\t\t\t\t\t\t\tif bash:\n\t\t\t\t\t\t\t\t\t\tos.system(\"bash -c '\" + 'wget -r -l1 -c -nc --no-parent -A \"*.dna.chromosome.*\" -nd -P ' + chromosome_fasta_path + ' ftp://ftp.ensembl.org/pub/grch37/current/fasta/homo_sapiens/dna/ 2>> install.log' + \"'\")\n\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\tos.system('wget -r -l1 -c -nc --no-parent -A \"*.dna.chromosome.*\" -nd -P ' + chromosome_fasta_path + ' ftp://ftp.ensembl.org/pub/grch37/current/fasta/homo_sapiens/dna/ 2>> install.log')\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t#os.system(\"wget -r -l1 -c -nc --no-parent -A '*.dna.chromosome.*' -nd -P \" + chromosome_fasta_path + \" ftp://ftp.ensembl.org/pub/grch37/update/fasta/homo_sapiens/dna/ 2>> install.log\")\n\t\t\t\t\t\t\t\telif genome == 'mm9':\n\t\t\t\t\t\t\t\t\tif bash:\n\t\t\t\t\t\t\t\t\t\tos.system(\"bash -c '\" + 'wget -r -l1 -c -nc --no-parent -A \"*.dna.chromosome.*\" -nd -P ' + chromosome_fasta_path + ' ftp://ftp.ensembl.org/pub/release-67/fasta/mus_musculus/dna/ 2>> install.log' + \"'\")\n\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\tos.system('wget -r -l1 -c -nc --no-parent -A \"*.dna.chromosome.*\" -nd -P ' + chromosome_fasta_path + ' ftp://ftp.ensembl.org/pub/release-67/fasta/mus_musculus/dna/ 2>> install.log')\n\n\t\t\t\t\t\t\t\telif genome == 'rn6':\n\t\t\t\t\t\t\t\t\tif bash:\n\t\t\t\t\t\t\t\t\t\tos.system(\"bash -c '\" + 'wget -r -l1 -c -nc --no-parent -A \"*.dna.chromosome.*\" -nd -P ' + chromosome_fasta_path + ' ftp://ftp.ensembl.org/pub/release-96/fasta/rattus_norvegicus/dna/ 2>> install.log' + \"'\")\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\tos.system('wget -r -l1 -c -nc --no-parent -A \"*.dna.chromosome.*\" -nd -P ' + chromosome_fasta_path + ' ftp://ftp.ensembl.org/pub/release-96/fasta/rattus_norvegicus/dna/ 2>> install.log')\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tif bash:\n\t\t\t\t\t\t\t\t\t\tos.system(\"bash -c '\" + 'wget -r -l1 -c -nc --no-parent -A \"*.dna.chromosome.*\" -nd -P ' + chromosome_fasta_path + ' ftp://ftp.ensembl.org/pub/release-93/fasta/' +species+'/dna/ 2>> install.log' + \"'\")\n\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\tos.system('wget -r -l1 -c -nc --no-parent -A \"*.dna.chromosome.*\" -nd -P ' + chromosome_fasta_path + ' ftp://ftp.ensembl.org/pub/release-93/fasta/' +species+'/dna/ 2>> install.log')\n\n\t\t\t\t\t\t\t\t#os.system(\"gunzip references/chromosomes/fasta/\" + genome + \"/*.gz\")\n\t\t\t\t\t\t\t\tos.system(\"gzip -d references/chromosomes/fasta/\" + genome + \"/*.gz\")\n\n\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\tprint(\"The ensembl ftp site is not currently responding.\")\n\t\t\t\t\t\t\t\tsys.exit()\n\t\t\t\t\telse:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tif genome == 'GRCh37':\n\t\t\t\t\t\t\t\tif bash:\n\t\t\t\t\t\t\t\t\tos.system(\"bash -c '\" + \"rsync -av -m --include='*/' --include='*.dna.chromosome.*' --exclude='*' rsync://ftp.ensembl.org/ensembl/pub/grch37/current/fasta/homo_sapiens/dna/ \" + chromosome_fasta_path + \" 2>&1>> install.log\" + \"'\")\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tos.system(\"rsync -av -m --include='*/' --include='*.dna.chromosome.*' --exclude='*' rsync://ftp.ensembl.org/ensembl/pub/grch37/current/fasta/homo_sapiens/dna/ \" + chromosome_fasta_path + \" 2>&1>> install.log\")\n\t\t\t\t\t\t\telif genome == 'mm9':\n\t\t\t\t\t\t\t\tif bash:\n\t\t\t\t\t\t\t\t\tos.system(\"bash -c '\" + \"rsync -av -m --include='*/' --include='*.dna.chromosome.*' --exclude='*' rsync://ftp.ensembl.org/ensembl/pub/release-67/fasta/mus_musculus/dna/ \" + chromosome_fasta_path + \" 2>&1>> install.log\" + \"'\")\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tos.system(\"rsync -av -m --include='*/' --include='*.dna.chromosome.*' --exclude='*' rsync://ftp.ensembl.org/ensembl/pub/release-67/fasta/mus_musculus/dna/ \" + chromosome_fasta_path + \" 2>&1>> install.log\")\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\telif genome == 'rn6':\n\t\t\t\t\t\t\t\tif bash:\n\t\t\t\t\t\t\t\t\tos.system(\"bash -c '\" + \"rsync -av -m --include='*/' --include='*.dna.chromosome.*' --exclude='*' rsync://ftp.ensembl.org/ensembl/pub/release-96/fasta/rattus_norvegicus/dna/ \" + chromosome_fasta_path + \" 2>> install.log\" + \"'\")\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tos.system(\"rsync -av -m --include='*/' --include='*.dna.chromosome.*' --exclude='*' rsync://ftp.ensembl.org/ensembl/pub/release-96/fasta/rattus_norvegicus/dna/ \" + chromosome_fasta_path + \" 2>> install.log\")\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tif bash:\n\t\t\t\t\t\t\t\t\tos.system(\"bash -c '\" + \"rsync -av -m --include='*/' --include='*.dna.chromosome.*' --exclude='*' rsync://ftp.ensembl.org/ensembl/pub/release-93/fasta/\"+species+\"/dna/ \" + chromosome_fasta_path + \" 2>&1>> install.log\" + \"'\")\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tos.system(\"rsync -av -m --include='*/' --include='*.dna.chromosome.*' --exclude='*' rsync://ftp.ensembl.org/ensembl/pub/release-93/fasta/\"+species+\"/dna/ \" + chromosome_fasta_path + \" 2>&1>> install.log\")\n\n\t\t\t\t\t\t\t#os.system(\"gunzip references/chromosomes/fasta/\" + genome + \"/*.gz\")\n\t\t\t\t\t\t\tos.system(\"gzip -d references/chromosomes/fasta/\" + genome + \"/*.gz\")\n\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tprint(\"The ensembl ftp site is not currently responding.\")\n\t\t\t\t\t\t\tsys.exit()\n\n\t\t\t\tprint(\"Chromosome fasta files for \" + genome + \" have been installed. Creating the chromosome string files now...\")\n\t\t\t\tos.system(\"python scripts/save_chrom_strings.py -g \" + genome)\n\t\t\t\tprint(\"Chromosome string files for \" + genome + \" have been created. Continuing with installation.\")\n\t\t\t\t# os.system(\"rm -r \" + chromosome_fasta_path)\n\t\t\t\t# os.remove(chromosome_fasta_path)\n\t\t\t\tshutil.rmtree(chromosome_fasta_path)\n\n\t\t\telse:\n\t\t\t\tprint(\"Chromosome reference files exist for \" + genome + \". Continuing with installation.\")\n\n\ndef install_chromosomes_tsb (genomes, ref_dir, custom):\n\tcheck_sum = {'GRCh37':\n\t\t\t\t\t\t\t{'1':'a7d51305e943cf06ff2029146bd91bca','2':'d24d0185af89356d44614ab0d6fd6a68','3':'ea5e033147dcaf77bfd4c70f50688d37',\n\t\t\t\t\t\t\t '4':'00d7797c7184f1802367e33f6e2bc3da','5':'f74b1eeb329088242a9f22a16322b325','6':'b353cc4c4abc90340e7747509fe7b457',\n\t\t\t\t\t\t\t '7':'bbadde91b3ef958c7d20e2b1088c8cd2','8':'0ff695692e3efebaf00c7905d0d536d7','9':'40b75a18acb66748a37888c53a76dcdb',\n\t\t\t\t\t\t\t '10':'557881b744a932b4ceee8a06d6de85a4','11':'f8b8f118101d7cb04164b29a7acadca4','12':'52c18e9fefc3ed3e35c1d8771d1247de',\n\t\t\t\t\t\t\t '13':'a241d1cdcadccfd94db792300ab000bf','14':'ed3907128336795669bc19d77c0aa409','15':'bfc66ad087c4e9025076d7571cffa30e',\n\t\t\t\t\t\t\t '16':'bd251fddc42400bb54ef95d5e1002ece','17':'fcd36b1bf5c4bd74328dc4caaae244ae','18':'e015d4324c36374827582c5b1214a736',\n\t\t\t\t\t\t\t '19':'5cfa7d47e2d73dbdbf8d68f97c8e8b23','20':'2fa0717bf4e8dddac64cd393f4134ff5','21':'ba5559776d4601b80ca42c82f02102a4',\n\t\t\t\t\t\t\t '22':'ba762b6ae493df40d04d1ff63d9b2933','Y':'0303100be91874b966a998273cd7c8eb','X':'14e331d82736f6cfc177ff8c90f7bd78',\n\t\t\t\t\t\t\t 'MT':'dfd6db5743d399516d5c8dadee5bee78'},\n\n\t\t\t\t'GRCh38':\n\t\t\t\t\t\t\t{'1':'ebe083105e7703a49581a36d73732a96','2':'cd65e36dbdf12a8ac3d2c70ebac8cad4','3':'6c20a7008394f2fa9c304d231a1f391b',\n\t\t\t\t\t\t\t '4':'5c7443e1678868adadeac0e57558f6e8','5':'45573232c8097c679503a6598f61e60b','6':'cfc137c7434d3a9a872332d405b5c553',\n\t\t\t\t\t\t\t '7':'9d8210c22c1962db837e7b62a578975c','8':'665134fd44f21915cbeef955addf89ba','9':'758d0c0c71d8bafbe1ede86587191730',\n\t\t\t\t\t\t\t '10':'397bb21acff1ca3052ac802f2aee06e0','11':'07707ff8a2a964656469a7be7bb3e576','12':'506d02539075e080ee12ebdf63908080',\n\t\t\t\t\t\t\t '13':'03ed22f01ab43145733c0b6a647e0560','14':'8b93447086549e476c65699ed813a567','15':'cd0dfe9fa78cae2fc7becf8f8ec6c693',\n\t\t\t\t\t\t\t '16':'e17bbb66eb4d6b62b7b0e2fbf062b6a6','17':'8fc95bb3101d024d890aa3543eb454c5','18':'a4870628045bb033a90e8c89f818e24d',\n\t\t\t\t\t\t\t '19':'6a9d0c8298f0ba2fa13180e02b969f16','20':'aa75d35969cf3956bb4ace7bdc57b34e','21':'5d55f5ad6271d6a0d8806876924990f7',\n\t\t\t\t\t\t\t '22':'efdb4e1d23ab7964302b828062a33447','Y':'3b38c639ad164d60f1a055b46fcd2748','X':'d5edbea3cf5d1716765dd4a7b41b7656',\n\t\t\t\t\t\t\t 'MT':'dfd6db5743d399516d5c8dadee5bee78'},\n\n\t\t\t\t'mm9':\n\t\t\t\t\t\t\t{'1':'c5afc4b3f7f2119696214511d7a04341','2':'a7b467475a1b032d2c893dac1c419a28','3':'f922bc529a17324f1cd858f9a8723d65',\n\t\t\t\t\t\t\t'4':'f3d6b74e3c04dbd229e2f1e363607506','5':'5fee4f1889c9fe20f7f8562c62bbeb0a','6':'481d47b87da45f3a20181c780fd796c2',\n\t\t\t\t\t\t\t'7':'454ef2bf49a5ba8cfea3d16dfcfc7f25','8':'2f4162d4c824db78a2a2a820cb4fec81','9':'0649e6aec61af1ab8ab4797ea8e54119',\n\t\t\t\t\t\t\t'10':'38296256bcfe886c8ae771418e4fd824','11':'b31cb0ce693e35eaa77031d44b12e474','12':'d2b3e4b015742b6aea30ceec5a972968',\n\t\t\t\t\t\t\t'13':'df77b6d0ed1b133224b128c189736372','14':'0ec3c0e6b3fa2cdb957541f19792e130','15':'44fcaf2ec9b82dae910f85ce41c3cfad',\n\t\t\t\t\t\t\t'16':'ad7a8dbdf46fa7077e0982a54eab70b7','17':'71aee1dee3cd2078e4619c485d88817e','18':'727ec4ed3128ecacd6cd2f7558083553',\n\t\t\t\t\t\t\t'19':'461a7119781ab7f4b654fdd9ef76e0ec','Y':'471ff3bbb4520c020cfaa7ca8371c543','X':'9ccadf96cd3aa0ed9d299894a3d7fde0',\n\t\t\t\t\t\t\t'MT':'a1d56043ed8308908965dd080a4d0c8d'},\n\n\t\t\t\t'mm10':\n\t\t\t\t\t\t\t{'1':'ef88c5ac276a32a2865c0408f92acd55','2':'ced7325ef9e2dfedea3fbe26428a6059','3':'9cd1794eeea27553077a018038303908',\n\t\t\t\t\t\t\t'4':'da616d7ed6c67f824487eb2ed09cd33b','5':'b327b82da6986bf947105d07c0ad6d2e','6':'fb9a8fa0b85561f8d4de633c22d5157a',\n\t\t\t\t\t\t\t'7':'12457fd80f6806779fc0d4cc8d36fbad','8':'5d98d86bd22bee1cb226406f49ee7caf','9':'b2f26613fcc622a4003e4c945ae55e25',\n\t\t\t\t\t\t\t'10':'e9f3589529e258ede66d2e77bb87d21d','11':'76bcd285c3c66471ad6fccfabe42294c','12':'ac34fc3616c9609d8e75a59069e9007a',\n\t\t\t\t\t\t\t'13':'f81b976e4e4617b25945d06f9aa30846','14':'95dc042eb2aa7d4cc0abe071d4d7966e','15':'fbf2477833aff73ae085537cd7ee0f85',\n\t\t\t\t\t\t\t'16':'77cbcd009ba50891571f785595717ec1','17':'cd9e4dfdd168ed3de05dac4d44c6e692', '18':'945e83694c7c8f69d6186e1a2abc9771',\n\t\t\t\t\t\t\t'19':'e57b25f8869de31a9dbce06510711db6','Y':'c2146ba4ab1ec262f5e38b2a1ebc5f5b','X':'9af543088be046fdc63976c2d41de94c',\n\t\t\t\t\t\t\t'MT':'a1d56043ed8308908965dd080a4d0c8d'},\n\t\t\t\t'rn6':\n\t\t\t\t\t\t\t{'1':'003723513cbdb3708fcc5d737c05199c','2':'53e52c5facc7f05462be533845f37425','3':'8d157a9b71fe9770cf783ea5459b19d7',\n\t\t\t\t\t\t\t'4':'a66dc1999bcc960ff11fe0b24c0d7b14','5':'601cf83411234adbdd9f911b89509564','6':'03b1f4af58fffdf213466ea85b570b3d',\n\t\t\t\t\t\t\t'7':'4ed05ddf9502ef79e121c02e391660e6','8':'3e2458daaf1b3e8ab4d0e0a9e60c067b','9':'8f83caeccec7ea6e35e404737138ee67',\n\t\t\t\t\t\t\t'10':'9c1af453a5facc9bfa821457bcfc4d30','11':'ef0480a905c55d76a3c58e295a85bc75','12':'643b6fe4a3a6363ffe64a6c316fa3e1a',\n\t\t\t\t\t\t\t'13':'102bb3fb420a4104c216bcdf99870374','14':'e26b8b63fba0ea7ced4f0330e93a8cdc','15':'da747616a1362d374d4786102fab6f9f',\n\t\t\t\t\t\t\t'16':'54e4f932eb0eda4cbf31156f96ef7235','17':'46c2facf5415e4eff8b0804161db722d', '18':'f1cb84f002967854b83bf266ec59a7a3',\n\t\t\t\t\t\t\t'19':'b85ca155fd1780fe5c327a4589c212a6','20':'899d3511352d78b9b9dc63f063d91b31','Y':'6a7a3539c329dc540dfa6db006003bb1',\n\t\t\t\t\t\t\t'X':'7a06bafab97c59a819f03633f0a6b7a2'},\n\t\t\t\t'c_elegans':\n\t\t\t\t\t\t\t{'I':'5a3ea8cf3dfbc641716b7bc805edcaae','II':'bf82edaa92809dd2fea2b791c38c9728','III':'d2df34b6743f41d3964549fc76c5f1a2',\n\t\t\t\t\t\t\t'IV':'23396bb57145d3acde2888947b5b8c3a','V':'09df3c53b12e5fd7d9035cc98ca221a3','X':'988046456f1409dfdb5e26444d84d238',\n\t\t\t\t\t\t\t'MtDNA':'48983f530959780de0125f74a87d4fc1'},\n\t\t\t\t'dog':\n\t\t\t\t\t\t\t{'1':'bef8283c1a36f9aef0e407de2ff6af00','2':'9cc961192bb5e58b3847060c3e9c1cfc','3':'d33263fa2de6666b41e140cb7a8da66c',\n\t\t\t\t\t\t\t '4':'cd4ed39ebac1c04800ccf30466ec69f5','5':'c0f48a4a764e58388b48835aca2ec0a4','6':'4b472a2f8d0a53ac75cce04e7dc9279a',\n\t\t\t\t\t\t\t '7':'12a61573a0da2c9306fff705bb1c39c1','8':'e22cf22a27560aa8523dc959ddcf6e25','9':'c079a73d719145cdd5c7c93969a1c392',\n\t\t\t\t\t\t\t '10':'45805a518147f7846bd0457ca038c8df','11':'f38cda8508463a7607dff14a581ee7b0','12':'adb5de197f58bb827fa01fe924eb3a1d',\n\t\t\t\t\t\t\t '13':'055a845ba97baad3b13d4d3359f88290','14':'27f0ba8e47996a058807a3827cf8e4a8','15':'2e9565c687a593eb0acbdd0962bb9255',\n\t\t\t\t\t\t\t '16':'89b2225bb78d88b0fd1d38d9514ab0cb','17':'f0378253e2f083e42b665ea202fde3b0','18':'04d124e273f3b54a685ad6526223cd03',\n\t\t\t\t\t\t\t '19':'67bae093919e6bb5ab6b9806c739d539','20':'5588387165a2e19c4533012cfb4998f3','21':'371cdf18a545728f7964b9db2fc72d5e',\n\t\t\t\t\t\t\t '22':'fbf76865f88a018d93506e036f6a68bc','23':'085145e01d9fd9f0f999fb9e8e8d4400','24':'69b75a9962fb766b447e7d1252cb31ac',\n\t\t\t\t\t\t\t '25':'12d5c6677b3e17170c317c1f5532d2a8','26':'13937d18e56b2b93d12fa5fcba48a138','27':'1d03d8ca5f201f4d156f5e1b38f7a67c',\n\t\t\t\t\t\t\t '28':'c33395dec7fdc13e9d8f10afaa946f8c','29':'174f2db104ecaa5efef770f44241e3b0','30':'047d420ef9aecb933a7d83b6af820b23',\n\t\t\t\t\t\t\t '31':'5be61f0c9944a5f2d7d1a5b2e75fb000','32':'212dcb867e95a642277a243fed8d8e41','33':'08a217b02cdd778cfdb0005dff4828b1',\n\t\t\t\t\t\t\t '34':'4245d6fc370d9049ef4c25314fbef239','35':'1344aba8755b8a4e304629180fc0591a','36':'e4fff6ed84777905dc999ca6d6bc2557',\n\t\t\t\t\t\t\t '37':'60d51ea6ae9e3f2fa316e3d03aff96b2','38':'4090ff76d94e6b38920916ae3ff2441c','X':'bce1372df64037d79b0995311d8ff971'}}\n\tfor genome in genomes:\n\t\tchrom_number = None\n\t\tif genome == 'GRCh37' or genome == 'GRCh38': \n\t\t\tchrom_number = 24\n\t\telif genome == 'mm10' or genome == 'mm9':\n\t\t\tchrom_number = 21\n\t\telif genome == 'rn6':\n\t\t\tchrom_number = 22\n\n\n\t\tchromosome_TSB_path = \"references/chromosomes/tsb/\" + genome + \"/\"\n\t\ttranscript_files = \"references/chromosomes/transcripts/\" + genome + \"/\"\n\t\tprint(\"[DEBUG] Chromosome tsb files found at: \" + ref_dir + chromosome_TSB_path)\n\n\n\t\tif os.path.exists(transcript_files) == False or len(os.listdir(transcript_files)) < 1:\n\t\t\tprint(\"Please download the transcript files before proceeding. You can download the files from 'http://www.ensembl.org/biomart/martview'.\")\n\t\t\tprint(\"Follow the format presented in the README file:\\n\\n\\tGene stable ID Transcript stable ID Chromosome/scaffold name Strand Transcript start (bp) Transcript end (bp)\\n\\n\\n\")\n\t\t\tsys.exit()\n\t\tif os.path.exists(chromosome_TSB_path) == False or len(os.listdir(chromosome_TSB_path)) < chrom_number:\n\t\t\tprint(\"The transcriptional reference data for \" + genome + \" has not been saved. Creating these files now\")\n\t\t\tos.system(\"python scripts/save_tsb_192.py -g \" + genome)\n\n\t\tcorrupt = False\n\t\tfor files in os.listdir(chromosome_TSB_path):\n\t\t\tif \"proportions\" in files:\n\t\t\t\tcontinue\n\t\t\tif \".DS_Store\" in files:\n\t\t\t\tcontinue\n\t\t\tchrom = files.split(\".\")\n\t\t\tchrom = chrom[0]\n\t\t\tcheck = md5(chromosome_TSB_path + files)\n\t\t\tif check_sum[genome][chrom] != check:\n\t\t\t\tcorrupt = True\n\t\t\t\tos.remove(chromosome_TSB_path + files)\n\t\t\t\tprint(\"[DEBUG] Chromosome \" + chrom + \" md5sum did not match => reference md5sum: \" + str(check_sum[genome][chrom]) + \" new file md5sum: \" + str(check))\n\t\tif corrupt:\n\t\t\tprint(\"The transcriptional reference data appears to be corrupted. Please reinstall the \" + genome + \" genome.\")\n\t\t\tsys.exit()\n\t\t\t\n\t\tprint(\"The transcriptional reference data for \" + genome + \" has been saved.\")\n\ndef install_chromosomes_tsb_BED (genomes, custom, ref_dir):\n\tfor genome in genomes:\n\t\tif not os.path.exists(ref_dir + \"chromosomes/tsb_BED/\" + genome + \"/\") or len(os.listdir(ref_dir + \"chromosomes/tsb_BED/\" + genome + \"/\")) < 19:\n\t\t\tos.system(\"python scripts/save_chrom_tsb_separate.py -g \" + genome)\n\t\t\tprint(\"The TSB BED files for \" + genome + \" have been saved.\")\n\ndef benchmark (genome, ref_dir):\n\t#current_dir = os.path.realpath(__file__)\n\t#ref_dir = re.sub('\\/install.py$', '', current_dir)\n\tref_dir = os.path.dirname(os.path.abspath(__file__))\n\tvcf_path = ref_dir + \"/references/vcf_files/\" + genome + \"_bench/\"\n\n\tstart_time = time.time()\n\tmatGen.SigProfilerMatrixGeneratorFunc(genome + \"_bench\", genome, vcf_path)\n\tend_time = time.time()\n\n\toriginal_matrix_96 = ref_dir + \"/scripts/Benchmark/\" + genome + \"_bench_orig_96.txt\"\n\toriginal_matrix_3072 = ref_dir + \"/scripts/Benchmark/\" + genome + \"_bench_orig_3072.txt\"\n\tnew_matrix_96 = vcf_path + \"output/SBS/\" + genome + \"_bench.SBS96.all\"\n\tnew_matrix_3072 = vcf_path + \"output/SBS/\" + genome + \"_bench.SBS6144.all\"\n\n\t#genome = \"GRCh37\"\n\n\t############# Cosine Test ###################################################\n\tdata_orig = pd.read_csv(original_matrix_96, sep='\\t', header=0)\n\tdata_new = pd.read_csv(new_matrix_96, sep='\\t', header=0)\n\tcount = 0\n\trange_count = min(len(data_orig.loc[0]), len(data_new.loc[0]))\n\tfor i in range (1, range_count, 1):\n\t orig_list = list(data_orig[data_orig.columns[i]])\n\t new_list = list(data_new[data_new.columns[i]])\n\t cosine_result = (1-spatial.distance.cosine(orig_list,new_list))\n\t if cosine_result != 1:\n\t count += 1\n\tif count != 0:\n\t print(\"There seems to be some errors in the newly generated matrix. The installation may not have been successful.\")\n\n\n\tdata_orig = pd.read_csv(original_matrix_3072, sep='\\t', header=0)\n\tdata_new = pd.read_csv(new_matrix_3072, sep='\\t', header=0)\n\tcount = 0\n\trange_count = min(len(data_orig.loc[0]), len(data_new.loc[0]))\n\tfor i in range (1, range_count, 1):\n\t orig_list = data_orig[data_orig.columns[i]]\n\t new_list = data_new[data_new.columns[i]]\n\t cosine_result = (1-spatial.distance.cosine(orig_list,new_list))\n\t if cosine_result <= 0.85:\n\t count += 1\n\tif count != 0:\n\t print(\"There seems to be some errors in the newly generated matrix. The installation may not have been successful.\")\n\n\tend_time = time.time()\n\tprint(\"Installation was succesful.\\nSigProfilerMatrixGenerator took \" + str(end_time-start_time) + \" seconds to complete.\")\n\n\ndef install (genome, custom=False, rsync=False, bash=True, ftp=True):\n\tfirst_path= os.getcwd()\n\tref_dir = os.path.dirname(os.path.abspath(__file__))\n\tos.chdir(ref_dir)\n\tif os.path.exists(\"install.log\"):\n\t\tos.remove(\"install.log\")\n\n\t#ref_dir += \"/references/\"\n\tchrom_string_dir = ref_dir + \"/references/chromosomes/chrom_string/\"\n\tchrom_fasta_dir = ref_dir + \"/references/chromosomes/fasta/\"\n\tchrom_tsb_dir = ref_dir + \"/references/chromosomes/tsb/\"\n\tmatrix_dir = ref_dir + \"/references/matrix/\"\n\tvcf_dir = ref_dir + \"/references/vcf_files/\"\n\tbed_dir = ref_dir + \"/references/vcf_files/BED/\"\n\tlog_dir = \"logs/\"\n\tnew_dirs = [ref_dir, chrom_string_dir, chrom_fasta_dir, chrom_tsb_dir, matrix_dir, vcf_dir, bed_dir, log_dir]\n\n\tfor dirs in new_dirs:\n\t\tif not os.path.exists(dirs):\n\t\t\tos.makedirs(dirs)\n\n\tif ftp:\n\t\tcheck_sum = {'GRCh37':\n\t\t\t\t\t\t\t\t{'1':'a7d51305e943cf06ff2029146bd91bca','2':'d24d0185af89356d44614ab0d6fd6a68','3':'ea5e033147dcaf77bfd4c70f50688d37',\n\t\t\t\t\t\t\t\t '4':'00d7797c7184f1802367e33f6e2bc3da','5':'f74b1eeb329088242a9f22a16322b325','6':'b353cc4c4abc90340e7747509fe7b457',\n\t\t\t\t\t\t\t\t '7':'bbadde91b3ef958c7d20e2b1088c8cd2','8':'0ff695692e3efebaf00c7905d0d536d7','9':'40b75a18acb66748a37888c53a76dcdb',\n\t\t\t\t\t\t\t\t '10':'557881b744a932b4ceee8a06d6de85a4','11':'f8b8f118101d7cb04164b29a7acadca4','12':'52c18e9fefc3ed3e35c1d8771d1247de',\n\t\t\t\t\t\t\t\t '13':'a241d1cdcadccfd94db792300ab000bf','14':'ed3907128336795669bc19d77c0aa409','15':'bfc66ad087c4e9025076d7571cffa30e',\n\t\t\t\t\t\t\t\t '16':'bd251fddc42400bb54ef95d5e1002ece','17':'fcd36b1bf5c4bd74328dc4caaae244ae','18':'e015d4324c36374827582c5b1214a736',\n\t\t\t\t\t\t\t\t '19':'5cfa7d47e2d73dbdbf8d68f97c8e8b23','20':'2fa0717bf4e8dddac64cd393f4134ff5','21':'ba5559776d4601b80ca42c82f02102a4',\n\t\t\t\t\t\t\t\t '22':'ba762b6ae493df40d04d1ff63d9b2933','Y':'0303100be91874b966a998273cd7c8eb','X':'14e331d82736f6cfc177ff8c90f7bd78',\n\t\t\t\t\t\t\t\t 'MT':'dfd6db5743d399516d5c8dadee5bee78'},\n\n\t\t\t\t\t'GRCh38':\n\t\t\t\t\t\t\t\t{'1':'ebe083105e7703a49581a36d73732a96','2':'cd65e36dbdf12a8ac3d2c70ebac8cad4','3':'6c20a7008394f2fa9c304d231a1f391b',\n\t\t\t\t\t\t\t\t '4':'5c7443e1678868adadeac0e57558f6e8','5':'45573232c8097c679503a6598f61e60b','6':'cfc137c7434d3a9a872332d405b5c553',\n\t\t\t\t\t\t\t\t '7':'9d8210c22c1962db837e7b62a578975c','8':'665134fd44f21915cbeef955addf89ba','9':'758d0c0c71d8bafbe1ede86587191730',\n\t\t\t\t\t\t\t\t '10':'397bb21acff1ca3052ac802f2aee06e0','11':'07707ff8a2a964656469a7be7bb3e576','12':'506d02539075e080ee12ebdf63908080',\n\t\t\t\t\t\t\t\t '13':'03ed22f01ab43145733c0b6a647e0560','14':'8b93447086549e476c65699ed813a567','15':'cd0dfe9fa78cae2fc7becf8f8ec6c693',\n\t\t\t\t\t\t\t\t '16':'e17bbb66eb4d6b62b7b0e2fbf062b6a6','17':'8fc95bb3101d024d890aa3543eb454c5','18':'a4870628045bb033a90e8c89f818e24d',\n\t\t\t\t\t\t\t\t '19':'6a9d0c8298f0ba2fa13180e02b969f16','20':'aa75d35969cf3956bb4ace7bdc57b34e','21':'5d55f5ad6271d6a0d8806876924990f7',\n\t\t\t\t\t\t\t\t '22':'efdb4e1d23ab7964302b828062a33447','Y':'3b38c639ad164d60f1a055b46fcd2748','X':'d5edbea3cf5d1716765dd4a7b41b7656',\n\t\t\t\t\t\t\t\t 'MT':'dfd6db5743d399516d5c8dadee5bee78'},\n\n\t\t\t\t\t'mm9':\n\t\t\t\t\t\t\t\t{'1':'c5afc4b3f7f2119696214511d7a04341','2':'a7b467475a1b032d2c893dac1c419a28','3':'f922bc529a17324f1cd858f9a8723d65',\n\t\t\t\t\t\t\t\t'4':'f3d6b74e3c04dbd229e2f1e363607506','5':'5fee4f1889c9fe20f7f8562c62bbeb0a','6':'481d47b87da45f3a20181c780fd796c2',\n\t\t\t\t\t\t\t\t'7':'454ef2bf49a5ba8cfea3d16dfcfc7f25','8':'2f4162d4c824db78a2a2a820cb4fec81','9':'0649e6aec61af1ab8ab4797ea8e54119',\n\t\t\t\t\t\t\t\t'10':'38296256bcfe886c8ae771418e4fd824','11':'b31cb0ce693e35eaa77031d44b12e474','12':'d2b3e4b015742b6aea30ceec5a972968',\n\t\t\t\t\t\t\t\t'13':'df77b6d0ed1b133224b128c189736372','14':'0ec3c0e6b3fa2cdb957541f19792e130','15':'44fcaf2ec9b82dae910f85ce41c3cfad',\n\t\t\t\t\t\t\t\t'16':'ad7a8dbdf46fa7077e0982a54eab70b7','17':'71aee1dee3cd2078e4619c485d88817e','18':'727ec4ed3128ecacd6cd2f7558083553',\n\t\t\t\t\t\t\t\t'19':'461a7119781ab7f4b654fdd9ef76e0ec','Y':'471ff3bbb4520c020cfaa7ca8371c543','X':'9ccadf96cd3aa0ed9d299894a3d7fde0',\n\t\t\t\t\t\t\t\t'MT':'a1d56043ed8308908965dd080a4d0c8d'},\n\n\t\t\t\t\t'mm10':\n\t\t\t\t\t\t\t\t{'1':'ef88c5ac276a32a2865c0408f92acd55','2':'ced7325ef9e2dfedea3fbe26428a6059','3':'9cd1794eeea27553077a018038303908',\n\t\t\t\t\t\t\t\t'4':'da616d7ed6c67f824487eb2ed09cd33b','5':'b327b82da6986bf947105d07c0ad6d2e','6':'fb9a8fa0b85561f8d4de633c22d5157a',\n\t\t\t\t\t\t\t\t'7':'12457fd80f6806779fc0d4cc8d36fbad','8':'5d98d86bd22bee1cb226406f49ee7caf','9':'b2f26613fcc622a4003e4c945ae55e25',\n\t\t\t\t\t\t\t\t'10':'e9f3589529e258ede66d2e77bb87d21d','11':'76bcd285c3c66471ad6fccfabe42294c','12':'ac34fc3616c9609d8e75a59069e9007a',\n\t\t\t\t\t\t\t\t'13':'f81b976e4e4617b25945d06f9aa30846','14':'95dc042eb2aa7d4cc0abe071d4d7966e','15':'fbf2477833aff73ae085537cd7ee0f85',\n\t\t\t\t\t\t\t\t'16':'77cbcd009ba50891571f785595717ec1','17':'cd9e4dfdd168ed3de05dac4d44c6e692', '18':'945e83694c7c8f69d6186e1a2abc9771',\n\t\t\t\t\t\t\t\t'19':'e57b25f8869de31a9dbce06510711db6','Y':'c2146ba4ab1ec262f5e38b2a1ebc5f5b','X':'9af543088be046fdc63976c2d41de94c',\n\t\t\t\t\t\t\t\t'MT':'a1d56043ed8308908965dd080a4d0c8d'},\n\t\t\t\t\t'rn6':\n\t\t\t\t\t\t\t\t{'1':'003723513cbdb3708fcc5d737c05199c','2':'53e52c5facc7f05462be533845f37425','3':'8d157a9b71fe9770cf783ea5459b19d7',\n\t\t\t\t\t\t\t\t'4':'a66dc1999bcc960ff11fe0b24c0d7b14','5':'601cf83411234adbdd9f911b89509564','6':'03b1f4af58fffdf213466ea85b570b3d',\n\t\t\t\t\t\t\t\t'7':'4ed05ddf9502ef79e121c02e391660e6','8':'3e2458daaf1b3e8ab4d0e0a9e60c067b','9':'8f83caeccec7ea6e35e404737138ee67',\n\t\t\t\t\t\t\t\t'10':'9c1af453a5facc9bfa821457bcfc4d30','11':'ef0480a905c55d76a3c58e295a85bc75','12':'643b6fe4a3a6363ffe64a6c316fa3e1a',\n\t\t\t\t\t\t\t\t'13':'102bb3fb420a4104c216bcdf99870374','14':'e26b8b63fba0ea7ced4f0330e93a8cdc','15':'da747616a1362d374d4786102fab6f9f',\n\t\t\t\t\t\t\t\t'16':'54e4f932eb0eda4cbf31156f96ef7235','17':'46c2facf5415e4eff8b0804161db722d', '18':'f1cb84f002967854b83bf266ec59a7a3',\n\t\t\t\t\t\t\t\t'19':'b85ca155fd1780fe5c327a4589c212a6','20':'899d3511352d78b9b9dc63f063d91b31','Y':'6a7a3539c329dc540dfa6db006003bb1',\n\t\t\t\t\t\t\t\t'X':'7a06bafab97c59a819f03633f0a6b7a2'},\n\t\t\t\t\t'c_elegans':\n\t\t\t\t\t\t\t\t{'I':'5a3ea8cf3dfbc641716b7bc805edcaae','II':'bf82edaa92809dd2fea2b791c38c9728','III':'d2df34b6743f41d3964549fc76c5f1a2',\n\t\t\t\t\t\t\t\t'IV':'23396bb57145d3acde2888947b5b8c3a','V':'09df3c53b12e5fd7d9035cc98ca221a3','X':'988046456f1409dfdb5e26444d84d238',\n\t\t\t\t\t\t\t\t'MtDNA':'48983f530959780de0125f74a87d4fc1'},\n\t\t\t\t\t'dog':\n\t\t\t\t\t\t\t\t{'1':'bef8283c1a36f9aef0e407de2ff6af00','2':'9cc961192bb5e58b3847060c3e9c1cfc','3':'d33263fa2de6666b41e140cb7a8da66c',\n\t\t\t\t\t\t\t\t '4':'cd4ed39ebac1c04800ccf30466ec69f5','5':'c0f48a4a764e58388b48835aca2ec0a4','6':'4b472a2f8d0a53ac75cce04e7dc9279a',\n\t\t\t\t\t\t\t\t '7':'12a61573a0da2c9306fff705bb1c39c1','8':'e22cf22a27560aa8523dc959ddcf6e25','9':'c079a73d719145cdd5c7c93969a1c392',\n\t\t\t\t\t\t\t\t '10':'45805a518147f7846bd0457ca038c8df','11':'f38cda8508463a7607dff14a581ee7b0','12':'adb5de197f58bb827fa01fe924eb3a1d',\n\t\t\t\t\t\t\t\t '13':'055a845ba97baad3b13d4d3359f88290','14':'27f0ba8e47996a058807a3827cf8e4a8','15':'2e9565c687a593eb0acbdd0962bb9255',\n\t\t\t\t\t\t\t\t '16':'89b2225bb78d88b0fd1d38d9514ab0cb','17':'f0378253e2f083e42b665ea202fde3b0','18':'04d124e273f3b54a685ad6526223cd03',\n\t\t\t\t\t\t\t\t '19':'67bae093919e6bb5ab6b9806c739d539','20':'5588387165a2e19c4533012cfb4998f3','21':'371cdf18a545728f7964b9db2fc72d5e',\n\t\t\t\t\t\t\t\t '22':'fbf76865f88a018d93506e036f6a68bc','23':'085145e01d9fd9f0f999fb9e8e8d4400','24':'69b75a9962fb766b447e7d1252cb31ac',\n\t\t\t\t\t\t\t\t '25':'12d5c6677b3e17170c317c1f5532d2a8','26':'13937d18e56b2b93d12fa5fcba48a138','27':'1d03d8ca5f201f4d156f5e1b38f7a67c',\n\t\t\t\t\t\t\t\t '28':'c33395dec7fdc13e9d8f10afaa946f8c','29':'174f2db104ecaa5efef770f44241e3b0','30':'047d420ef9aecb933a7d83b6af820b23',\n\t\t\t\t\t\t\t\t '31':'5be61f0c9944a5f2d7d1a5b2e75fb000','32':'212dcb867e95a642277a243fed8d8e41','33':'08a217b02cdd778cfdb0005dff4828b1',\n\t\t\t\t\t\t\t\t '34':'4245d6fc370d9049ef4c25314fbef239','35':'1344aba8755b8a4e304629180fc0591a','36':'e4fff6ed84777905dc999ca6d6bc2557',\n\t\t\t\t\t\t\t\t '37':'60d51ea6ae9e3f2fa316e3d03aff96b2','38':'4090ff76d94e6b38920916ae3ff2441c','X':'bce1372df64037d79b0995311d8ff971'}}\n\n\t\tchromosome_fasta_path = ref_dir + \"/references/chromosomes/tsb/\"\n\t\tprint(\"Beginning installation. This may take up to 40 minutes to complete.\")\n\t\tif not rsync:\n\t\t\ttry:\n\t\t\t\tif bash:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tos.system(\"bash -c '\" + 'wget -r -l1 -c -nc --no-parent -nd -P ' + chromosome_fasta_path + 'ftp://alexandrovlab-ftp.ucsd.edu/pub/tools/SigProfilerMatrixGenerator/' + genome + '.tar.gz 2>> install.log' + \"'\")\n\t\t\t\t\texcept:\n\t\t\t\t\t\tprint(\"The UCSD ftp site is not responding...pulling from sanger ftp now.\")\n\t\t\t\t\ttry:\n\t\t\t\t\t\tos.system(\"bash -c '\" + 'wget -r -l1 -c -nc --no-parent -nd -P ' + chromosome_fasta_path + ' ftp://ngs.sanger.ac.uk/scratch/project/mutographs/SigProf/' + genome + '.tar.gz 2>> install.log' + \"'\")\n\t\t\t\t\texcept:\n\t\t\t\t\t\tprint(\"The Sanger ftp site is not responding. Please check your internet connection/try again later.\")\n\t\t\t\telse:\n\t\t\t\t\tos.system('wget -r -l1 -c -nc --no-parent -nd -P ' + chromosome_fasta_path + ' ftp://ngs.sanger.ac.uk/scratch/project/mutographs/SigProf/' + genome + '.tar.gz 2>> install.log')\t\t\t\t\t\t\t\t\t\t\n\t\t\t\tos.system(\"tar -xzf \" + ref_dir + \"/references/chromosomes/tsb/\" + genome + \".tar.gz -C \" + ref_dir + \"/references/chromosomes/tsb/\")\n\t\t\t\tos.remove(ref_dir + \"/references/chromosomes/tsb/\" + genome + \".tar.gz\")\n\t\t\texcept:\n\t\t\t\tprint(\"The ensembl ftp site is not currently responding.\")\n\t\t\t\tsys.exit()\n\t\telse:\n\t\t\tprint(\"Direct download for RSYNC is not yet supported\")\n\t\t\tsys.exit()\n\t\t\t# try:\n\t\t\t# \tif bash:\n\t\t\t# \t\tos.system(\"bash -c '\" + \"rsync -av -m --include='*/' rsync://ftp.ngs.sanger.ac.uk/scratch/project/mutographs/SigProf/\" + genome + \".tar.gz \" + chromosome_fasta_path + \" 2>&1>> install.log\" + \"'\")\n\t\t\t# \telse:\n\t\t\t# \t\tos.system(\"rsync -av -m rsync://ftp://ngs.sanger.ac.uk/scratch/project/mutographs/SigProf/\" + genome + \".tar.gz \" + chromosome_fasta_path + \" 2>&1>> install.log\")\n\t\t\t# \tos.system(\"tar -xzf \" + ref_dir + \"/references/chromosomes/tsb/\" + genome + \".tar.gz -C \" + ref_dir + \"/references/chromosomes/tsb/\")\n\t\t\t# \tos.remove(ref_dir + \"/references/chromosomes/tsb/\" + genome + \".tar.gz\")\n\t\t\t# except:\n\t\t\t# \tprint(\"The ensembl ftp site is not currently responding.\")\n\t\t\t# \tsys.exit()\n\n\t\tchromosome_TSB_path = chromosome_fasta_path + genome + \"/\"\n\t\tcorrupt = False\n\t\tfor files in os.listdir(chromosome_TSB_path):\n\t\t\tif \"proportions\" in files:\n\t\t\t\tcontinue\n\t\t\tif \".DS_Store\" in files:\n\t\t\t\tcontinue\n\t\t\tchrom = files.split(\".\")\n\t\t\tchrom = chrom[0]\n\t\t\tcheck = md5(chromosome_TSB_path + files)\n\t\t\tif check_sum[genome][chrom] != check:\n\t\t\t\tcorrupt = True\n\t\t\t\tos.remove(chromosome_TSB_path + files)\n\t\t\t\tprint(\"[DEBUG] Chromosome \" + chrom + \" md5sum did not match => reference md5sum: \" + str(check_sum[genome][chrom]) + \" new file md5sum: \" + str(check))\n\t\tif corrupt:\n\t\t\tprint(\"The transcriptional reference data appears to be corrupted. Please reinstall the \" + genome + \" genome.\")\n\t\t\tsys.exit()\n\t\tprint(\"The transcriptional reference data for \" + genome + \" has been saved.\")\n\n\n\telse:\n\t\tprint(\"Beginning installation. This may take up to 20 minutes to complete.\")\n\t\tfirst_path = os.getcwd()\n\t\tref_dir = os.path.dirname(os.path.abspath(__file__))\n\t\tos.chdir(ref_dir)\n\n\t\tprint(\"[DEBUG] Path to SigProfilerMatrixGenerator used for the install: \", ref_dir)\n\n\t\tgenomes = [genome]\n\n\t\tif os.path.exists(\"install.log\"):\n\t\t\tos.remove(\"install.log\")\n\n\t\t# ref_dir += \"/references/\"\n\t\t# chrom_string_dir = ref_dir + \"chromosomes/chrom_string/\"\n\t\t# chrom_fasta_dir = ref_dir + \"chromosomes/fasta/\"\n\t\t# chrom_tsb_dir = ref_dir + \"chromosomes/tsb/\"\n\t\t# matrix_dir = ref_dir + \"matrix/\"\n\t\t# vcf_dir = ref_dir + \"vcf_files/\"\n\t\t# bed_dir = ref_dir + \"vcf_files/BED/\"\n\t\t# log_dir = \"logs/\"\n\t\t# new_dirs = [ref_dir, chrom_string_dir, chrom_fasta_dir, chrom_tsb_dir, matrix_dir, vcf_dir, bed_dir, log_dir]\n\n\t\t# for dirs in new_dirs:\n\t\t# \tif not os.path.exists(dirs):\n\t\t# \t\tos.makedirs(dirs)\n\n\t\tinstall_chromosomes(genomes, ref_dir, custom, rsync, bash)\n\t\tinstall_chromosomes_tsb (genomes, ref_dir, custom)\n\n\tif os.path.exists(\"BRCA_example/\"):\n\t\tshutil.copy(\"BRCA_example/\", \"references/vcf_files/\")\n\tif os.path.exists(\"example_test\"):\n\t\tshutil.copy(\"example_test/\", \"references/vcf_files/\")\n\tif os.path.exists(\"context_distributions/\"):\n\t\tshutil.copy(\"context_distributions/\", \"references/chromosomes/\")\n\n\tprint(\"All reference files have been created.\")\n\tif genome != \"rn6\" and genome != 'dog' and genome != 'c_elegans':\n\t\tprint(\"Verifying and benchmarking installation now...\")\n\t\tbenchmark(genome, ref_dir)\n\n\tprint (\"To proceed with matrix_generation, please provide the path to your vcf files and an appropriate output path.\")\n\tshutil.rmtree(chrom_string_dir)\n\tprint(\"Installation complete.\")\n\tos.chdir(first_path)\n\ndef main ():\n\n\tfirst_path= os.getcwd()\n\tos.chdir(first_path + \"/sigProfilerMatrixGenerator/\")\n\tgenomes = ['mm9', 'mm10','GRCh37', 'GRCh38' ]\n\t#genomes = ['GRCh37']\n\tcustom = False\n\tparser = argparse.ArgumentParser(description=\"Provide the necessary arguments to install the reference files.\")\n\tparser.add_argument(\"-g\", \"--genome\", nargs='?', help=\"Optional parameter instructs script to install the custom genome.\")\n\tparser.add_argument(\"-ct\", \"--custom\", help=\"Optional parameter instructs script to create the reference files for a custom genome\", action='store_true')\n\targs = parser.parse_args()\n\n\tif args.genome:\n\t\tgenomes = [args.genome]\n\tif args.custom:\n\t\tcustom = True\n\n\tif os.path.exists(\"install.log\"):\n\t\tos.system(\"rm install.log\")\n\n\tref_dir = \"references/\"\n\tchrom_string_dir = ref_dir + \"chromosomes/chrom_string/\"\n\tchrom_fasta_dir = ref_dir + \"chromosomes/fasta/\"\n\tchrom_tsb_dir = ref_dir + \"chromosomes/tsb/\"\n\tmatrix_dir = ref_dir + \"matrix/\"\n\tvcf_dir = ref_dir + \"vcf_files/\"\n\tbed_dir = ref_dir + \"vcf_files/BED/\"\n\tlog_dir = \"logs/\"\n\tnew_dirs = [ref_dir, chrom_string_dir, chrom_fasta_dir, chrom_tsb_dir, matrix_dir, vcf_dir, bed_dir, log_dir]\n\n\tcurrent_dir = os.getcwd()\n\tfor dirs in new_dirs:\n\t\tif not os.path.exists(dirs):\n\t\t\tos.makedirs(dirs)\n\n\n\tinstall_chromosomes(genomes, ref_dir, custom)\n\tinstall_chromosomes_tsb (genomes, ref_dir, custom)\n\t#install_chromosomes_tsb_BED (genomes, custom, ref_dir)\n\tif os.path.exists(\"BRCA_example/\"):\n\t\tos.system(\"mv BRCA_example/ references/vcf_files/\")\n\tif os.path.exists(\"example_test\"):\n\t\tos.system(\"mv example_test/ references/vcf_files/\")\n\tif os.path.exists(\"context_distributions/\"):\n\t\tos.system(\"mv context_distributions/ references/chromosomes/\")\n\n\t\n\tif os.path.exists(chrom_tsb_dir + \"GRCh37/\"):\n\t\tprint(\"All reference files have been created.\\nVerifying and benchmarking installation now...\")\n\t\tbenchmark(ref_dir)\n\telse:\n\t\tprint(\"All reference files have been created.\")\n\tprint (\"Please place your vcf files for each sample into the 'references/vcf_files/[test]/[mutation_type]/' directory. Once you have done that, you can proceed with the matrix generation.\")\n\t#os.system(\"rm -r \" + chrom_string_dir)\n\tprint(\"Installation complete.\")\n\tos.chdir(first_path)\n\nif __name__ == '__main__':\n\tmain()" ]
[ [ "pandas.read_csv", "scipy.spatial.distance.cosine" ] ]
AnaharaYasuo/mlPractice
[ "1a3d110fdc6cf4084ee6b1268d215151de5939cb" ]
[ "src/subplot1.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\nif __name__ == \"__main__\":\n x = np.linspace(-5, 5, 300)\n sin_x = np.sin(x)\n cos_x = np.cos(x)\n \n flg, aexs = plt.subplots(2, 1)\n aexs[0].set_ylim([-1.5,1.5])\n aexs[1].set_ylim([-1.5,1.5])\n aexs[0].plot(x,sin_x,color=\"r\") \n aexs[1].plot(x,cos_x,color=\"k\")\n \n plt.show()" ]
[ [ "numpy.cos", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show", "numpy.sin", "numpy.linspace" ] ]
Saran-nns/sorn
[ "619772c508b88aa711780ab9155fe5d0aa5214eb" ]
[ "sorn/utils.py" ]
[ "from __future__ import division\nimport numpy as np\nfrom scipy.stats import norm\nimport random\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom scipy.optimize import curve_fit\nfrom scipy import stats\nimport networkx as nx\nimport pandas as pd\nfrom mpl_toolkits.axes_grid1.inset_locator import InsetPosition\n\n\nclass Initializer(object):\n \"\"\"\n Helper class to initialize the matrices for the SORN\n \"\"\"\n\n def __init__(self):\n pass\n\n @staticmethod\n def generate_strong_inp(length: int, reservoir_size: int):\n \"\"\"Generate strong one-hot vector of input. Random neurons in the reservoir acts as inputs\n\n Args:\n length (int): Number of input neurons\n\n Returns:\n inp (array): Input vector of length equals the number of neurons in the reservoir\n with randomly chosen neuron set active\n\n idx (list): List of chosen input neurons \"\"\"\n\n inp = [0] * reservoir_size\n x = [0] * length\n idx = np.random.choice(length, np.random.randint(reservoir_size))\n\n for i in idx:\n x[i] = 1.0e4\n\n inp[: len(x)] = x\n\n return inp, idx\n\n # Generate multi-node one-hot strong inputs\n\n @staticmethod\n def multi_one_hot_inp(ne: int, inputs: list, n_nodes_per_inp: int):\n \"\"\"Generate multi(n_nodes_per_inp) one hot vector for each input.\n For each input, set n_nodes_per_inp equals one and the rest of\n neurons in the pool recieves no external stimuli\n\n Args:\n ne (int): Number of excitatory units in sorn\n\n inputs (list): input labels\n\n n_nodes_per_inp(int): Number of target units in pool that receives single input\n\n Returns:\n one_hot_vector for each label with length equals ne\n\n \"\"\"\n\n one_hot = np.zeros((ne, len(inputs)))\n\n idxs = []\n\n for _ in range(n_nodes_per_inp):\n idxs.append(random.sample(range(0, ne), len(inputs)))\n\n idxs = list(zip(*idxs))\n\n j = 0 # Max(j) = len(inputs)\n for idx_list in idxs:\n for i in idx_list:\n one_hot[i][j] = 1\n j += 1\n\n return one_hot, idxs\n\n @staticmethod\n def generate_gaussian_inputs(length: int, reservoir_size: int):\n\n \"\"\"Generate external stimuli sampled from Gaussian distribution.\n Randomly neurons in the reservoir receives this input at each timestep\n\n Args:\n length (int): Number of input neurons\n\n Returns:\n out (array): Input vector of length equals the number of neurons in the reservoir\n with randomly chosen neuron set active\n\n idx (int): List of chosen input neurons\n \"\"\"\n\n out = [0] * reservoir_size\n x = [0] * length\n idx = np.random.choice(length, np.random.randint(reservoir_size))\n inp = np.random.normal(length)\n\n for i in idx:\n x[i] = inp[i]\n\n out[: len(x)] = x\n\n return out, idx\n\n @staticmethod\n def normalize_weight_matrix(weight_matrix: np.array):\n\n # Applied only while initializing the weight. During simulation, Synaptic scaling applied on weight matrices\n\n \"\"\" Normalize the weights in the matrix such that incoming connections to a neuron sum up to 1\n\n Args:\n weight_matrix (array): Incoming Weights from W_ee or W_ei or W_ie\n\n Returns:\n weight_matrix (array): Normalized weight matrix\"\"\"\n\n normalized_weight_matrix = weight_matrix / np.sum(weight_matrix, axis=0)\n\n return normalized_weight_matrix\n\n @staticmethod\n def generate_lambd_connections(\n synaptic_connection: str, ne: int, ni: int, lambd_w: int, lambd_std: int\n ):\n\n \"\"\"Generate lambda incoming connections for Excitatory neurons and outgoing connections per Inhibitory neuron\n\n Args:\n synaptic_connection (str): Type of sysnpatic connection (EE,EI or IE)\n\n ne (int): Number of excitatory units\n\n ni (int): Number of inhibitory units\n\n lambd_w (int): Average number of incoming connections\n\n lambd_std (int): Standard deviation of average number of connections per neuron\n\n Returns:\n connection_weights (array) - Weight matrix\n\n \"\"\"\n\n if synaptic_connection == \"EE\":\n\n \"\"\"Choose random lamda connections per neuron\"\"\"\n\n # Draw normally distributed ne integers with mean lambd_w\n\n lambdas_incoming = norm.ppf(\n np.random.random(ne), loc=lambd_w, scale=lambd_std\n ).astype(int)\n\n # lambdas_outgoing = norm.ppf(np.random.random(ne), loc=lambd_w, scale=lambd_std).astype(int)\n\n # List of neurons\n\n list_neurons = list(range(ne))\n\n # Connection weights\n\n connection_weights = np.zeros((ne, ne))\n\n # For each lambd value in the above list,\n # generate weights for incoming and outgoing connections\n\n # -------------Gaussian Distribution of weights --------------\n\n # weight_matrix = np.random.randn(Sorn.ne, Sorn.ni) + 2 # Small random values from gaussian distribution\n # Centered around 2 to make all values positive\n\n # ------------Uniform Distribution --------------------------\n global_incoming_weights = np.random.uniform(0.0, 0.1, sum(lambdas_incoming))\n\n # Index Counter\n global_incoming_weights_idx = 0\n\n # Choose the neurons in order [0 to 199]\n\n for neuron in list_neurons:\n\n # Choose ramdom unique (lambdas[neuron]) neurons from list_neurons\n possible_connections = list_neurons.copy()\n\n possible_connections.remove(\n neuron\n ) # Remove the selected neuron from possible connections i!=j\n\n # Choose random presynaptic neurons\n possible_incoming_connections = random.sample(\n possible_connections, lambdas_incoming[neuron]\n )\n\n incoming_weights_neuron = global_incoming_weights[\n global_incoming_weights_idx : global_incoming_weights_idx\n + lambdas_incoming[neuron]\n ]\n\n # ---------- Update the connection weight matrix ------------\n\n # Update incoming connection weights for selected 'neuron'\n\n for incoming_idx, incoming_weight in enumerate(incoming_weights_neuron):\n connection_weights[possible_incoming_connections[incoming_idx]][\n neuron\n ] = incoming_weight\n\n global_incoming_weights_idx += lambdas_incoming[neuron]\n\n return connection_weights\n\n if synaptic_connection == \"EI\":\n\n \"\"\"Choose random lamda connections per neuron\"\"\"\n\n # Draw normally distributed ni integers with mean lambd_w\n lambdas = norm.ppf(\n np.random.random(ni), loc=lambd_w, scale=lambd_std\n ).astype(int)\n\n # List of neurons\n\n list_neurons = list(range(ni)) # Each i can connect with random ne neurons\n\n # Initializing connection weights variable\n\n connection_weights = np.zeros((ni, ne))\n\n # ------------Uniform Distribution -----------------------------\n global_outgoing_weights = np.random.uniform(0.0, 0.1, sum(lambdas))\n\n # Index Counter\n global_outgoing_weights_idx = 0\n\n # Choose the neurons in order [0 to 40]\n\n for neuron in list_neurons:\n\n # Choose random unique (lambdas[neuron]) neurons from list_neurons\n possible_connections = list(range(ne))\n\n possible_outgoing_connections = random.sample(\n possible_connections, lambdas[neuron]\n ) # possible_outgoing connections to the neuron\n\n # Update weights\n outgoing_weights = global_outgoing_weights[\n global_outgoing_weights_idx : global_outgoing_weights_idx\n + lambdas[neuron]\n ]\n\n # ---------- Update the connection weight matrix ------------\n\n # Update outgoing connections for the neuron\n\n for outgoing_idx, outgoing_weight in enumerate(\n outgoing_weights\n ): # Update the columns in the connection matrix\n connection_weights[neuron][\n possible_outgoing_connections[outgoing_idx]\n ] = outgoing_weight\n\n # Update the global weight values index\n global_outgoing_weights_idx += lambdas[neuron]\n\n return connection_weights\n\n @staticmethod\n def get_incoming_connection_dict(weights: np.array):\n \"\"\" Get the non-zero entries in columns is the incoming connections for the neurons\n\n Args:\n weights (np.array): Connection/Synaptic weights\n\n Returns:\n dict : Dictionary of incoming connections to each neuron\n \"\"\"\n\n # Indices of nonzero entries in the columns\n connection_dict = dict.fromkeys(range(1, len(weights) + 1), 0)\n\n for i in range(len(weights[0])): # For each neuron\n connection_dict[i] = list(np.nonzero(weights[:, i])[0])\n\n return connection_dict\n\n @staticmethod\n def get_outgoing_connection_dict(weights: np.array):\n \"\"\"Get the non-zero entries in rows is the outgoing connections for the neurons\n\n Args:\n weights (np.array): Connection/Synaptic weights\n\n Returns:\n dict : Dictionary of outgoing connections from each neuron\n \"\"\"\n\n # Indices of nonzero entries in the rows\n connection_dict = dict.fromkeys(range(1, len(weights) + 1), 1)\n\n for i in range(len(weights[0])): # For each neuron\n connection_dict[i] = list(np.nonzero(weights[i, :])[0])\n\n return connection_dict\n\n @staticmethod\n def prune_small_weights(weights: np.array, cutoff_weight: float):\n \"\"\"Prune the connections with negative connection strength. The weights less than cutoff_weight set to 0\n\n Args:\n weights (np.array): Synaptic strengths\n\n cutoff_weight (float): Lower weight threshold\n\n Returns:\n array: Connections weights with values less than cutoff_weight set to 0\n \"\"\"\n\n weights[weights <= cutoff_weight] = cutoff_weight\n\n return weights\n\n @staticmethod\n def set_max_cutoff_weight(weights: np.array, cutoff_weight: float):\n \"\"\" Set cutoff limit for the values in given array\n\n Args:\n weights (np.array): Synaptic strengths\n\n cutoff_weight (float): Higher weight threshold\n\n Returns:\n array: Connections weights with values greater than cutoff_weight set to 1\n \"\"\"\n\n weights[weights > cutoff_weight] = cutoff_weight\n\n return weights\n\n @staticmethod\n def get_unconnected_indexes(wee: np.array):\n \"\"\" Helper function for Structural plasticity to randomly select the unconnected units\n\n Args:\n wee (array): Weight matrix\n\n Returns:\n list (indices): (row_idx,col_idx)\"\"\"\n\n i, j = np.where(wee <= 0.0)\n indices = list(zip(i, j))\n\n self_conn_removed = []\n for i, idxs in enumerate(indices):\n\n if idxs[0] != idxs[1]:\n self_conn_removed.append(indices[i])\n\n return self_conn_removed\n\n @staticmethod\n def white_gaussian_noise(mu: float, sigma: float, t: int):\n\n \"\"\"Generates white gaussian noise with mean mu, standard deviation sigma and\n the noise length equals t\n\n Args:\n mu (float): Mean value of Gaussian noise\n\n sigma (float): Standard deviation of Gaussian noise\n\n t (int): Length of noise vector\n\n Returns:\n array: White gaussian noise of length t\n \"\"\"\n\n noise = np.random.normal(mu, sigma, t)\n\n return np.expand_dims(noise, 1)\n\n @staticmethod\n def zero_sum_incoming_check(weights: np.array):\n \"\"\"Make sure, each neuron in the pool has atleast 1 incoming connection\n\n Args:\n weights (array): Synaptic strengths\n\n Returns:\n array: Synaptic weights of neurons with atleast one positive (non-zero) incoming connection strength\n \"\"\"\n zero_sum_incomings = np.where(np.sum(weights, axis=0) == 0.0)\n if len(zero_sum_incomings[-1]) == 0:\n return weights\n else:\n for zero_sum_incoming in zero_sum_incomings[-1]:\n\n rand_indices = np.random.randint(\n int(weights.shape[0] * 0.2), size=2\n )\n rand_values = np.random.uniform(0.0, 0.1, 2)\n\n for i, idx in enumerate(rand_indices):\n weights[:, zero_sum_incoming][idx] = rand_values[i]\n\n return weights\n\n\nclass Plotter(object):\n \"\"\"Wrapper class to call plotting methods\n \"\"\"\n\n def __init__(self):\n pass\n\n @staticmethod\n def hist_incoming_conn(\n weights: np.array, bin_size: int, histtype: str, savefig: bool\n ):\n \"\"\"Plot the histogram of number of presynaptic connections per neuron\n\n Args:\n weights (array): Connection weights\n\n bin_size (int): Histogram bin size\n\n histtype (str): Same as histtype matplotlib\n\n savefig (bool): If True plot will be saved as png file in the cwd\n\n Returns:\n plot (matplotlib.pyplot): plot object\n \"\"\"\n num_incoming_weights = np.sum(np.array(weights) > 0, axis=0)\n\n plt.figure(figsize=(12, 5))\n plt.xlabel(\"Number of connections\")\n plt.ylabel(\"Probability\")\n\n # Fit a normal distribution to the data\n mu, std = norm.fit(num_incoming_weights)\n plt.hist(num_incoming_weights, bins=bin_size, density=True, alpha=0.6, color='b')\n\n # PDF\n xmin, xmax = plt.xlim()\n x = np.linspace(xmin, xmax, max(num_incoming_weights))\n p = norm.pdf(x, mu, std)\n plt.plot(x, p, 'k', linewidth=2)\n title = \"Distribution of presynaptic connections: mu = %.2f, std = %.2f\" % (mu, std)\n plt.title(title)\n\n if savefig:\n plt.savefig(\"hist_incoming_conn\")\n\n return plt.show()\n\n\n @staticmethod\n def hist_outgoing_conn(\n weights: np.array, bin_size: int, histtype: str, savefig: bool\n ):\n \"\"\"Plot the histogram of number of incoming connections per neuron\n\n Args:\n weights (array): Connection weights\n\n bin_size (int): Histogram bin size\n\n histtype (str): Same as histtype matplotlib\n\n savefig (bool): If True plot will be saved as png file in the cwd\n\n Returns:\n plot object \"\"\"\n\n # Plot the histogram of distribution of number of incoming connections in the network\n\n num_outgoing_weights = np.sum(np.array(weights) > 0, axis=1)\n\n plt.figure(figsize=(12, 5))\n plt.xlabel(\"Number of connections\")\n plt.ylabel(\"Probability\")\n\n # Fit a normal distribution to the data\n mu, std = norm.fit(num_outgoing_weights)\n plt.hist(num_outgoing_weights, bins=bin_size, density=True, alpha=0.6, color='b')\n\n # PDF\n xmin, xmax = plt.xlim()\n x = np.linspace(xmin, xmax, max(num_outgoing_weights))\n p = norm.pdf(x, mu, std)\n plt.plot(x, p, 'k', linewidth=2)\n title = \"Distribution of post synaptic connections: mu = %.2f, std = %.2f\" % (mu, std)\n plt.title(title)\n\n if savefig:\n plt.savefig(\"hist_outgoing_conn\")\n\n return plt.show()\n\n @staticmethod\n def network_connection_dynamics(\n connection_counts: np.array, savefig: bool\n ):\n \"\"\"Plot number of positive connection in the excitatory pool\n\n Args:\n connection_counts (array) - 1D Array of number of connections in the network per time step\n\n savefig (bool) - If True plot will be saved as png file in the cwd\n\n Returns:\n plot object\n \"\"\"\n\n # Plot graph for entire simulation time period\n _, ax1 = plt.subplots(figsize=(12, 5))\n ax1.plot(connection_counts, label=\"Connection dynamics\")\n plt.margins(x=0)\n ax1.set_xticks(ax1.get_xticks()[::2])\n\n ax1.set_title(\"Network connection dynamics\")\n plt.ylabel(\"Number of active connections\")\n plt.xlabel(\"Time step\")\n plt.legend(loc=\"upper right\")\n plt.tight_layout()\n\n if savefig:\n plt.savefig(\"connection_dynamics\")\n\n return plt.show()\n\n @staticmethod\n def hist_firing_rate_network(spike_train: np.array, bin_size: int, savefig: bool):\n\n \"\"\" Plot the histogram of firing rate (total number of neurons spike at each time step)\n\n Args:\n spike_train (array): Array of spike trains\n\n bin_size (int): Histogram bin size\n\n savefig (bool): If True, plot will be saved in the cwd\n\n Returns:\n plot object \"\"\"\n\n fr = np.count_nonzero(spike_train.tolist(), 1)\n\n # Filter zero entries in firing rate list above\n fr = list(filter(lambda a: a != 0, fr))\n plt.title(\"Distribution of population activity without inactive time steps\")\n plt.xlabel(\"Spikes/time step\")\n plt.ylabel(\"Count\")\n\n plt.hist(fr, bin_size)\n\n if savefig:\n plt.savefig(\"hist_firing_rate_network.png\")\n\n return plt.show()\n\n @staticmethod\n def scatter_plot(spike_train: np.array, savefig: bool):\n\n \"\"\"Scatter plot of spike trains\n\n Args:\n spike_train (list): Array of spike trains\n\n with_firing_rates (bool): If True, firing rate of the network will be plotted\n\n savefig (bool): If True, plot will be saved in the cwd\n\n Returns:\n plot object\"\"\"\n\n # Conver the list of spike train into array\n spike_train = np.asarray(spike_train)\n # Get the indices where spike_train is 1\n x, y = np.argwhere(spike_train.T == 1).T\n\n plt.figure(figsize=(8, 5))\n\n firing_rates = Statistics.firing_rate_network(spike_train).tolist()\n plt.plot(firing_rates, label=\"Firing rate\")\n plt.legend(loc=\"upper left\")\n\n plt.scatter(y, x, s=0.1, color=\"black\")\n plt.title('Spike Trains')\n plt.xlabel(\"Time step\")\n plt.ylabel(\"Neuron\")\n plt.legend(loc=\"upper left\")\n\n if savefig:\n plt.savefig(\"ScatterSpikeTrain.png\")\n return plt.show()\n\n @staticmethod\n def raster_plot(spike_train: np.array, savefig: bool):\n\n \"\"\"Raster plot of spike trains\n\n Args:\n spike_train (array): Array of spike trains\n\n with_firing_rates (bool): If True, firing rate of the network will be plotted\n\n savefig (bool): If True, plot will be saved in the cwd\n\n Returns:\n plot object\"\"\"\n\n # Conver the list of spike train into array\n spike_train = np.asarray(spike_train)\n\n plt.figure(figsize=(11, 6))\n\n firing_rates = Statistics.firing_rate_network(spike_train).tolist()\n plt.plot(firing_rates, label=\"Firing rate\")\n plt.legend(loc=\"upper left\")\n plt.title('Spike Trains')\n # Get the indices where spike_train is 1\n x, y = np.argwhere(spike_train.T == 1).T\n\n plt.plot(y, x, \"|r\")\n plt.xlabel(\"Time step\")\n plt.ylabel(\"Neuron\")\n\n if savefig:\n plt.savefig(\"RasterSpikeTrain.png\")\n return plt.show()\n\n @staticmethod\n def correlation(corr: np.array, savefig: bool):\n\n \"\"\"Plot correlation between neurons\n\n Args:\n corr (array): Correlation matrix\n\n savefig (bool): If true will save the plot at the current working directory\n\n Returns:\n matplotlib.pyplot: Neuron Correlation plot\n \"\"\"\n\n # Generate a mask for the upper triangle\n mask = np.zeros_like(corr, dtype=np.bool)\n mask[np.triu_indices_from(mask)] = True\n\n f, ax = plt.subplots(figsize=(11, 9))\n\n # Custom diverging colormap\n cmap = sns.diverging_palette(220, 10, as_cmap=True)\n\n sns.heatmap(\n corr,\n mask=mask,\n cmap=cmap,\n xticklabels=5,\n yticklabels=5,\n vmax=0.1,\n center=0,\n square=False,\n linewidths=0.0,\n cbar_kws={\"shrink\": 0.9},\n )\n\n if savefig:\n plt.savefig(\"Correlation between neurons\")\n return None\n\n @staticmethod\n def isi_exponential_fit(\n spike_train: np.array, neuron: int, bin_size: int, savefig: bool\n ):\n\n \"\"\"Plot Exponential fit on the inter-spike intervals during training or simulation phase\n\n Args:\n spike_train (array): Array of spike trains\n\n neuron (int): Target neuron\n\n bin_size (int): Spike train will be splitted into bins of size bin_size\n\n savefig (bool): If True, plot will be saved in the cwd\n\n Returns:\n plot object\"\"\"\n\n\n isi = Statistics.spike_time_intervals(spike_train[:,neuron])\n\n y, x = np.histogram(sorted(isi), bins=bin_size)\n\n x = [int(i) for i in x]\n y = [float(i) for i in y]\n\n def exponential_func(y, a, b, c):\n return a * np.exp(-b * np.array(y)) - c\n\n # Curve fit\n popt, _ = curve_fit(exponential_func, x[1:bin_size], y[1:bin_size])\n\n plt.plot(\n x[1:bin_size],\n exponential_func(x[1:bin_size], *popt),\n label=\"Exponential fit\",\n )\n plt.title('Distribution of Inter Spike Intervals and Exponential Curve Fit')\n plt.scatter(x[1:bin_size], y[1:bin_size], s=2.0, color=\"black\", label=\"ISI\")\n plt.xlabel(\"ISI\")\n plt.ylabel(\"Frequency\")\n plt.legend()\n\n if savefig:\n plt.savefig(\"isi_exponential_fit\")\n return plt.show()\n\n @staticmethod\n def weight_distribution(weights: np.array, bin_size: int, savefig: bool):\n\n \"\"\"Plot the distribution of synaptic weights\n\n Args:\n weights (array): Connection weights\n\n bin_size (int): Spike train will be splited into bins of size bin_size\n\n savefig (bool): If True, plot will be saved in the cwd\n\n Returns:\n plot object\"\"\"\n\n weights = weights[\n weights >= 0.01\n ] # Remove the weight values less than 0.01 # As reported in article SORN 2013\n y, x = np.histogram(weights, bins=bin_size) # Create histogram with bin_size\n plt.title('Synaptic weight distribution')\n plt.scatter(x[:-1], y, s=2.0, c=\"black\")\n plt.xlabel(\"Connection strength\")\n plt.ylabel(\"Frequency\")\n\n if savefig:\n plt.savefig(\"weight_distribution\")\n\n return plt.show()\n\n @staticmethod\n def linear_lognormal_fit(weights: np.array, num_points: int, savefig: bool):\n\n \"\"\"Lognormal curve fit on connection weight distribution\n\n Args:\n weights (array): Connection weights\n\n num_points(int): Number of points to be plotted in the x axis\n\n savefig(bool): If True, plot will be saved in the cwd\n\n Returns:\n plot object\"\"\"\n\n weights = np.array(weights.tolist())\n weights = weights[weights >= 0.01]\n\n M = float(np.mean(weights)) # Geometric mean\n s = float(np.std(weights)) # Geometric standard deviation\n\n # Lognormal distribution parameters\n\n mu = float(np.mean(np.log(weights))) # Mean of log(X)\n sigma = float(np.std(np.log(weights))) # Standard deviation of log(X)\n shape = sigma # Scipy's shape parameter\n scale = np.exp(mu) # Scipy's scale parameter\n median = np.exp(mu)\n\n mode = np.exp(mu - sigma ** 2) # Note that mode depends on both M and s\n mean = np.exp(mu + (sigma ** 2 / 2)) # Note that mean depends on both M and s\n x = np.linspace(\n np.min(weights), np.max(weights), num=num_points\n )\n\n pdf = stats.lognorm.pdf(\n x, shape, loc=0, scale=scale\n )\n\n plt.figure(figsize=(12, 4.5))\n plt.title('Curve fit on connection weight distribution')\n # Figure on linear scale\n plt.subplot(121)\n plt.plot(x, pdf)\n\n plt.vlines(mode, 0, pdf.max(), linestyle=\":\", label=\"Mode\")\n plt.vlines(\n mean,\n 0,\n stats.lognorm.pdf(mean, shape, loc=0, scale=scale),\n linestyle=\"--\",\n color=\"green\",\n label=\"Mean\",\n )\n plt.vlines(\n median,\n 0,\n stats.lognorm.pdf(median, shape, loc=0, scale=scale),\n color=\"blue\",\n label=\"Median\",\n )\n plt.ylim(ymin=0)\n plt.xlabel(\"Weight\")\n plt.title(\"Linear scale\")\n plt.legend()\n\n # Figure on logarithmic scale\n plt.subplot(122)\n plt.semilogx(x, pdf)\n\n plt.vlines(mode, 0, pdf.max(), linestyle=\":\", label=\"Mode\")\n plt.vlines(\n mean,\n 0,\n stats.lognorm.pdf(mean, shape, loc=0, scale=scale),\n linestyle=\"--\",\n color=\"green\",\n label=\"Mean\",\n )\n plt.vlines(\n median,\n 0,\n stats.lognorm.pdf(median, shape, loc=0, scale=scale),\n color=\"blue\",\n label=\"Median\",\n )\n plt.ylim(ymin=0)\n plt.xlabel(\"Weight\")\n plt.title(\"Logarithmic scale\")\n plt.legend()\n\n if savefig:\n plt.savefig(\"LinearLognormalFit\")\n\n return plt.show()\n\n @staticmethod\n def plot_network(corr: np.array, corr_thres: float, fig_name: str = None):\n\n \"\"\"Network x graphical visualization of the network using the correlation matrix\n\n Args:\n corr (array): Correlation between neurons\n\n corr_thres (array): Threshold to prune the connection. Smaller the threshold,\n higher the density of connections\n\n fig_name (array, optional): Name of the figure. Defaults to None.\n\n Returns:\n matplotlib.pyplot: Plot instance\n \"\"\"\n\n df = pd.DataFrame(corr)\n\n links = df.stack().reset_index()\n links.columns = [\"var1\", \"var2\", \"value\"]\n links_filtered = links.loc[\n (links[\"value\"] > corr_thres) & (links[\"var1\"] != links[\"var2\"])\n ]\n\n G = nx.from_pandas_edgelist(links_filtered, \"var1\", \"var2\")\n\n plt.figure(figsize=(50, 50))\n nx.draw(\n G,\n with_labels=True,\n node_color=\"orange\",\n node_size=50,\n linewidths=5,\n font_size=10,\n )\n plt.text(0.1, 0.9, \"%s\" % corr_thres)\n plt.savefig(\"%s\" % fig_name)\n plt.show()\n\n @staticmethod\n def hamming_distance(hamming_dist: list, savefig: bool):\n \"\"\"Hamming distance between true netorks states and perturbed network states\n\n Args:\n hamming_dist (list): Hamming distance values\n\n savefig (bool): If True, save the fig at current working directory\n\n Returns:\n matplotlib.pyplot: Hamming distance between true and perturbed network states\n \"\"\"\n\n plt.figure(figsize=(15, 6))\n plt.title(\"Hamming distance between actual and perturbed states\")\n plt.xlabel(\"Time steps\")\n plt.ylabel(\"Hamming distance\")\n plt.plot(hamming_dist)\n\n if savefig:\n plt.savefig(\"HammingDistance\")\n\n return plt.show()\n\n\nclass Statistics(object):\n \"\"\" Wrapper class for statistical analysis methods \"\"\"\n\n def __init__(self):\n pass\n\n @staticmethod\n def firing_rate_neuron(spike_train: np.array, neuron: int, bin_size: int):\n\n \"\"\"Measure spike rate of given neuron during given time window\n\n Args:\n spike_train (array): Array of spike trains\n\n neuron (int): Target neuron in the reservoir\n\n bin_size (int): Divide the spike trains into bins of size bin_size\n\n Returns:\n int: firing_rate \"\"\"\n\n time_period = len(spike_train[:, 0])\n\n neuron_spike_train = spike_train[:, neuron]\n\n # Split the list(neuron_spike_train) into sub lists of length time_step\n samples_spike_train = [\n neuron_spike_train[i : i + bin_size]\n for i in range(0, len(neuron_spike_train), bin_size)\n ]\n\n spike_rate = 0.0\n\n for _, spike_train in enumerate(samples_spike_train):\n spike_rate += list(spike_train).count(1.0)\n\n spike_rate = spike_rate * bin_size / time_period\n\n return time_period, bin_size, spike_rate\n\n @staticmethod\n def firing_rate_network(spike_train: np.array):\n\n \"\"\"Calculate number of neurons spikes at each time step.Firing rate of the network\n\n Args:\n spike_train (array): Array of spike trains\n\n Returns:\n int: firing_rate \"\"\"\n\n firing_rate = np.count_nonzero(spike_train.tolist(), 1)\n\n return firing_rate\n\n @staticmethod\n def scale_dependent_smoothness_measure(firing_rates: list):\n\n \"\"\"Smoothem the firing rate depend on its scale. Smaller values corresponds to smoother series\n\n Args:\n firing_rates (list): List of number of active neurons per time step\n\n Returns:\n sd_diff (list): Float value signifies the smoothness of the semantic changes in firing rates\n \"\"\"\n\n diff = np.diff(firing_rates)\n sd_diff = np.std(diff)\n\n return sd_diff\n\n @staticmethod\n def scale_independent_smoothness_measure(firing_rates: list):\n\n \"\"\"Smoothem the firing rate independent of its scale. Smaller values corresponds to smoother series\n\n Args:\n firing_rates (list): List of number of active neurons per time step\n\n Returns:\n coeff_var (list):Float value signifies the smoothness of the semantic changes in firing rates \"\"\"\n\n diff = np.diff(firing_rates)\n mean_diff = np.mean(diff)\n sd_diff = np.std(diff)\n\n coeff_var = sd_diff / abs(mean_diff)\n\n return coeff_var\n\n @staticmethod\n def autocorr(firing_rates: list, t: int = 2):\n \"\"\"\n Score interpretation\n - scores near 1 imply a smoothly varying series\n - scores near 0 imply that there's no overall linear relationship between a data point and the following one (that is, plot(x[-length(x)],x[-1]) won't give a scatter plot with any apparent linearity)\n\n - scores near -1 suggest that the series is jagged in a particular way: if one point is above the mean, the next is likely to be below the mean by about the same amount, and vice versa.\n\n Args:\n firing_rates (list): Firing rates of the network\n\n t (int, optional): Window size. Defaults to 2.\n\n Returns:\n array: Autocorrelation between neurons given their firing rates\n \"\"\"\n\n return np.corrcoef(\n np.array(\n [\n firing_rates[0 : len(firing_rates) - t],\n firing_rates[t : len(firing_rates)],\n ]\n )\n )\n\n @staticmethod\n def avg_corr_coeff(spike_train: np.array):\n\n \"\"\"Measure Average Pearson correlation coeffecient between neurons\n\n Args:\n spike_train (array): Neural activity\n\n Returns:\n array: Average correlation coeffecient\"\"\"\n\n corr_mat = np.corrcoef(np.asarray(spike_train).T)\n avg_corr = np.sum(corr_mat, axis=1) / 200\n corr_coeff = (\n avg_corr.sum() / 200\n ) # 2D to 1D and either upper or lower half of correlation matrix.\n\n return corr_mat, corr_coeff\n\n @staticmethod\n def spike_times(spike_train: np.array):\n\n \"\"\"Get the time instants at which neuron spikes\n\n Args:\n spike_train (array): Spike trains of neurons\n\n Returns:\n (array): Spike time of each neurons in the pool\"\"\"\n\n times = np.where(spike_train == 1.0)\n return times\n\n @staticmethod\n def spike_time_intervals(spike_train):\n\n \"\"\"Generate spike time intervals spike_trains\n\n Args:\n spike_train (array): Network activity\n\n Returns:\n list: Inter spike intervals for each neuron in the reservoir\n \"\"\"\n\n spike_times = Statistics.spike_times(spike_train)\n isi = np.diff(spike_times[-1])\n return isi\n\n @staticmethod\n def hamming_distance(actual_spike_train: np.array, perturbed_spike_train: np.array):\n \"\"\"Hamming distance between true netorks states and perturbed network states\n\n Args:\n actual_spike_train (np.array): True network's states\n\n perturbed_spike_train (np.array): Perturbated network's states\n\n Returns:\n float: Hamming distance between true and perturbed network states\n \"\"\"\n hd = [\n np.count_nonzero(actual_spike_train[i] != perturbed_spike_train[i])\n for i in range(len(actual_spike_train))\n ]\n return hd\n\n @staticmethod\n def fanofactor(spike_train: np.array, neuron: int, window_size: int):\n\n \"\"\"Investigate whether neuronal spike generation is a poisson process\n\n Args:\n spike_train (np.array): Spike train of neurons in the reservoir\n\n neuron (int): Target neuron in the pool\n\n window_size (int): Sliding window size for time step ranges to be considered for measuring the fanofactor\n\n Returns:\n float : Fano factor of the neuron spike train\n \"\"\"\n\n # Choose activity of random neuron\n neuron_act = spike_train[:, neuron]\n\n # Divide total observations into 'tws' time windows of size 'ws' for a neuron 60\n\n tws = np.split(neuron_act, window_size)\n fr = []\n for i in range(len(tws)):\n fr.append(np.count_nonzero(tws[i]))\n\n # print('Firing rate of the neuron during each time window of size %s is %s' %(ws,fr))\n\n mean_firing_rate = np.mean(fr)\n variance_firing_rate = np.var(fr)\n\n fano_factor = variance_firing_rate / mean_firing_rate\n\n return mean_firing_rate, variance_firing_rate, fano_factor\n\n\n @staticmethod\n def spike_source_entropy(spike_train: np.array, num_neurons: int):\n\n \"\"\"Measure the uncertainty about the origin of spike from the network using entropy\n\n Args:\n spike_train (np.array): Spike train of neurons\n\n num_neurons (int): Number of neurons in the reservoir\n\n Returns:\n int : Spike source entropy of the network\n \"\"\"\n # Number of spikes from each neuron during the interval\n n_spikes = np.count_nonzero(spike_train, axis=0)\n p = n_spikes / np.count_nonzero(\n spike_train\n ) # Probability of each neuron that can generate spike in next step\n # print(p) # Note: pi shouldn't be zero\n sse = np.sum([pi * np.log(pi) for pi in p]) / np.log(\n 1 / num_neurons\n ) # Spike source entropy\n\n return sse\n" ]
[ [ "scipy.stats.norm.fit", "numpy.sum", "scipy.optimize.curve_fit", "numpy.diff", "numpy.histogram", "matplotlib.pyplot.semilogx", "matplotlib.pyplot.tight_layout", "numpy.var", "numpy.argwhere", "numpy.asarray", "scipy.stats.lognorm.pdf", "numpy.log", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.margins", "matplotlib.pyplot.plot", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "matplotlib.pyplot.xlim", "matplotlib.pyplot.title", "numpy.expand_dims", "matplotlib.pyplot.text", "matplotlib.pyplot.hist", "numpy.where", "numpy.nonzero", "matplotlib.pyplot.scatter", "numpy.mean", "numpy.random.uniform", "numpy.triu_indices_from", "numpy.zeros", "matplotlib.pyplot.subplots", "numpy.count_nonzero", "numpy.max", "numpy.min", "matplotlib.pyplot.ylim", "numpy.std", "numpy.array", "numpy.zeros_like", "matplotlib.pyplot.legend", "scipy.stats.norm.pdf", "pandas.DataFrame", "numpy.exp", "numpy.random.random", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "numpy.random.normal", "numpy.random.randint", "matplotlib.pyplot.xlabel", "numpy.split" ] ]
kc611/aeppl
[ "d24eee80a7448c48b55a8ec41aec150d1dd9d6a7" ]
[ "tests/test_joint_logprob.py" ]
[ "import aesara\nimport aesara.tensor as at\nimport numpy as np\nimport pytest\nimport scipy.stats.distributions as sp\nfrom aesara.graph.basic import Apply, ancestors, equal_computations\nfrom aesara.graph.op import Op\nfrom aesara.tensor.subtensor import (\n AdvancedIncSubtensor,\n AdvancedIncSubtensor1,\n AdvancedSubtensor,\n AdvancedSubtensor1,\n IncSubtensor,\n Subtensor,\n)\n\nfrom aeppl.abstract import MeasurableVariable\nfrom aeppl.joint_logprob import joint_logprob\nfrom aeppl.logprob import _logprob, logprob\nfrom aeppl.utils import rvs_to_value_vars, walk_model\nfrom tests.utils import assert_no_rvs\n\n\ndef test_joint_logprob_basic():\n # A simple check for when `joint_logprob` is the same as `logprob`\n a = at.random.uniform(0.0, 1.0)\n a.name = \"a\"\n a_value_var = a.clone()\n\n a_logp = joint_logprob({a: a_value_var}, sum=False)\n a_logp_exp = logprob(a, a_value_var)\n\n assert equal_computations([a_logp], [a_logp_exp])\n\n # Let's try a hierarchical model\n sigma = at.random.invgamma(0.5, 0.5)\n Y = at.random.normal(0.0, sigma)\n\n sigma_value_var = sigma.clone()\n y_value_var = Y.clone()\n\n total_ll = joint_logprob({Y: y_value_var, sigma: sigma_value_var}, sum=False)\n\n # We need to replace the reference to `sigma` in `Y` with its value\n # variable\n ll_Y = logprob(Y, y_value_var)\n (ll_Y,), _ = rvs_to_value_vars(\n [ll_Y],\n initial_replacements={sigma: sigma_value_var},\n )\n total_ll_exp = logprob(sigma, sigma_value_var) + ll_Y\n\n assert equal_computations([total_ll], [total_ll_exp])\n\n # Now, make sure we can compute a joint log-probability for a hierarchical\n # model with some non-`RandomVariable` nodes\n c = at.random.normal()\n c.name = \"c\"\n b_l = c * a + 2.0\n b = at.random.uniform(b_l, b_l + 1.0)\n b.name = \"b\"\n\n b_value_var = b.clone()\n c_value_var = c.clone()\n\n b_logp = joint_logprob({a: a_value_var, b: b_value_var, c: c_value_var})\n\n # There shouldn't be any `RandomVariable`s in the resulting graph\n assert_no_rvs(b_logp)\n\n res_ancestors = list(walk_model((b_logp,), walk_past_rvs=True))\n assert b_value_var in res_ancestors\n assert c_value_var in res_ancestors\n assert a_value_var in res_ancestors\n\n\ndef test_joint_logprob_multi_obs():\n\n a = at.random.uniform(0.0, 1.0)\n b = at.random.normal(0.0, 1.0)\n\n a_val = a.clone()\n b_val = b.clone()\n\n logp = joint_logprob({a: a_val, b: b_val}, sum=False)\n logp_exp = logprob(a, a_val) + logprob(b, b_val)\n\n assert equal_computations([logp], [logp_exp])\n\n x = at.random.normal(0, 1)\n y = at.random.normal(x, 1)\n\n x_val = x.clone()\n y_val = y.clone()\n\n logp = joint_logprob({x: x_val, y: y_val})\n exp_logp = joint_logprob({x: x_val, y: y_val})\n\n assert equal_computations([logp], [exp_logp])\n\n\ndef test_joint_logprob_diff_dims():\n M = at.matrix(\"M\")\n x = at.random.normal(0, 1, size=M.shape[1], name=\"X\")\n y = at.random.normal(M.dot(x), 1, name=\"Y\")\n\n x_vv = x.clone()\n x_vv.name = \"x\"\n y_vv = y.clone()\n y_vv.name = \"y\"\n\n logp = joint_logprob({x: x_vv, y: y_vv})\n\n M_val = np.random.normal(size=(10, 3))\n x_val = np.random.normal(size=(3,))\n y_val = np.random.normal(size=(10,))\n\n point = {M: M_val, x_vv: x_val, y_vv: y_val}\n logp_val = logp.eval(point)\n\n exp_logp_val = (\n sp.norm.logpdf(x_val, 0, 1).sum()\n + sp.norm.logpdf(y_val, M_val.dot(x_val), 1).sum()\n )\n assert exp_logp_val == pytest.approx(logp_val)\n\n\[email protected](\n \"indices, size\",\n [\n (slice(0, 2), 5),\n (np.r_[True, True, False, False, True], 5),\n (np.r_[0, 1, 4], 5),\n ((np.array([0, 1, 4]), np.array([0, 1, 4])), (5, 5)),\n ],\n)\ndef test_joint_logprob_incsubtensor(indices, size):\n \"\"\"Make sure we can compute a joint log-probability for ``Y[idx] = data`` where ``Y`` is univariate.\"\"\"\n\n rng = np.random.RandomState(232)\n mu = np.power(10, np.arange(np.prod(size))).reshape(size)\n sigma = 0.001\n data = rng.normal(mu[indices], 1.0)\n y_val = rng.normal(mu, sigma, size=size)\n\n Y_rv = at.random.normal(mu, sigma, size=size)\n Y_rv.name = \"Y\"\n y_value_var = Y_rv.clone()\n y_value_var.name = \"y\"\n\n Y_sst = at.set_subtensor(Y_rv[indices], data)\n\n assert isinstance(\n Y_sst.owner.op, (IncSubtensor, AdvancedIncSubtensor, AdvancedIncSubtensor1)\n )\n\n Y_sst_logp = joint_logprob({Y_rv: y_value_var, Y_sst: None}, sum=False)\n\n obs_logps = Y_sst_logp.eval({y_value_var: y_val})\n\n y_val_idx = y_val.copy()\n y_val_idx[indices] = data\n exp_obs_logps = sp.norm.logpdf(y_val_idx, mu, sigma)\n\n np.testing.assert_almost_equal(obs_logps, exp_obs_logps)\n\n\ndef test_joint_logprob_subtensor():\n \"\"\"Make sure we can compute a joint log-probability for ``Y[I]`` where ``Y`` and ``I`` are random variables.\"\"\"\n\n size = 5\n\n mu_base = np.power(10, np.arange(np.prod(size))).reshape(size)\n mu = np.stack([mu_base, -mu_base])\n sigma = 0.001\n rng = aesara.shared(np.random.RandomState(232), borrow=True)\n\n A_rv = at.random.normal(mu, sigma, rng=rng)\n A_rv.name = \"A\"\n\n p = 0.5\n\n I_rv = at.random.bernoulli(p, size=size, rng=rng)\n I_rv.name = \"I\"\n\n A_idx = A_rv[I_rv, at.ogrid[A_rv.shape[-1] :]]\n\n assert isinstance(\n A_idx.owner.op, (Subtensor, AdvancedSubtensor, AdvancedSubtensor1)\n )\n\n A_idx_value_var = A_idx.type()\n A_idx_value_var.name = \"A_idx_value\"\n\n I_value_var = I_rv.type()\n I_value_var.name = \"I_value\"\n\n A_idx_logp = joint_logprob({A_idx: A_idx_value_var, I_rv: I_value_var}, sum=False)\n\n logp_vals_fn = aesara.function([A_idx_value_var, I_value_var], A_idx_logp)\n\n # The compiled graph should not contain any `RandomVariables`\n assert_no_rvs(logp_vals_fn.maker.fgraph.outputs[0])\n\n decimals = 6 if aesara.config.floatX == \"float64\" else 4\n\n test_val_rng = np.random.RandomState(3238)\n\n for i in range(10):\n bern_sp = sp.bernoulli(p)\n I_value = bern_sp.rvs(size=size, random_state=test_val_rng).astype(I_rv.dtype)\n\n norm_sp = sp.norm(mu[I_value, np.ogrid[mu.shape[1] :]], sigma)\n A_idx_value = norm_sp.rvs(random_state=test_val_rng).astype(A_idx.dtype)\n\n exp_obs_logps = norm_sp.logpdf(A_idx_value)\n exp_obs_logps += bern_sp.logpmf(I_value)\n\n logp_vals = logp_vals_fn(A_idx_value, I_value)\n\n np.testing.assert_almost_equal(logp_vals, exp_obs_logps, decimal=decimals)\n\n\ndef test_persist_inputs():\n \"\"\"Make sure we don't unnecessarily clone variables.\"\"\"\n x = at.scalar(\"x\")\n beta_rv = at.random.normal(0, 1, name=\"beta\")\n Y_rv = at.random.normal(beta_rv * x, 1, name=\"y\")\n\n beta_vv = beta_rv.type()\n y_vv = Y_rv.clone()\n\n logp = joint_logprob({beta_rv: beta_vv, Y_rv: y_vv})\n\n assert x in ancestors([logp])\n\n # Make sure we don't clone value variables when they're graphs.\n y_vv_2 = y_vv * 2\n logp_2 = joint_logprob({beta_rv: beta_vv, Y_rv: y_vv_2})\n\n assert y_vv_2 in ancestors([logp_2])\n\n\ndef test_ignore_logprob():\n x = at.scalar(\"x\")\n beta_rv = at.random.normal(0, 1, name=\"beta\")\n beta_rv.tag.ignore_logprob = True\n y_rv = at.random.normal(beta_rv * x, 1, name=\"y\")\n\n beta = beta_rv.type()\n y = y_rv.type()\n\n logp = joint_logprob({beta_rv: beta, y_rv: y})\n\n y_rv_2 = at.random.normal(beta * x, 1, name=\"y\")\n logp_exp = joint_logprob({y_rv_2: y})\n\n assert equal_computations([logp], [logp_exp])\n\n\ndef test_ignore_logprob_multiout():\n class MyMultiOut(Op):\n @staticmethod\n def impl(a, b):\n res1 = 2 * a\n res2 = 2 * b\n return [res1, res2]\n\n def make_node(self, a, b):\n return Apply(self, [a, b], [a.type(), b.type()])\n\n def perform(self, node, inputs, outputs):\n res1, res2 = self.impl(inputs[0], inputs[1])\n outputs[0][0] = res1\n outputs[1][0] = res2\n\n MeasurableVariable.register(MyMultiOut)\n\n @_logprob.register(MyMultiOut)\n def logprob_MyMultiOut(op, value, *inputs, name=None, **kwargs):\n return at.zeros_like(value)\n\n Y_1_rv, Y_2_rv = MyMultiOut()(at.vector(), at.vector())\n\n Y_1_rv.tag.ignore_logprob = True\n Y_2_rv.tag.ignore_logprob = True\n\n y_1_vv = Y_1_rv.clone()\n y_2_vv = Y_2_rv.clone()\n\n logp_exp = joint_logprob({Y_1_rv: y_1_vv, Y_2_rv: y_2_vv})\n\n assert logp_exp is None\n\n\ndef test_multiple_rvs_to_same_value_raises():\n x_rv1 = at.random.normal(name=\"x1\")\n x_rv2 = at.random.normal(name=\"x2\")\n x = x_rv1.type()\n x.name = \"x\"\n\n msg = \"More than one logprob factor was assigned to the value var x\"\n with pytest.raises(ValueError, match=msg):\n joint_logprob({x_rv1: x, x_rv2: x})\n" ]
[ [ "numpy.testing.assert_almost_equal", "scipy.stats.distributions.bernoulli", "numpy.stack", "numpy.random.RandomState", "scipy.stats.distributions.norm", "numpy.prod", "numpy.random.normal", "numpy.array", "scipy.stats.distributions.norm.logpdf" ] ]
sanils2002/PYTHON-CODES
[ "607fadc2cba4b185a5529bd101faefa08f4c3469" ]
[ "Codes-B/matplotlib-py-files/matplot-image.py" ]
[ "# importing required libraries\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.image as img\r\n\r\n# reading the image\r\ntestImage = img.imread('g4g.png')\r\n\r\n# displaying the image\r\nplt.imshow(testImage)\r\n\r\n# displaying the image as an array\r\nprint(testImage)\r\n\r\n###############################################\r\n\r\n# In the output image, only the mode of the image is modified\r\n\r\n# reading the image\r\ntestImage = img.imread('g4g.png')\r\n \r\n# displaying the shape of the image\r\nprint(testImage.shape)\r\n \r\n# modifying the shape of the image\r\nmodifiedImage = testImage[:, :, 0]\r\n \r\n# displaying the modified image\r\nplt.imshow(modifiedImage)\r\n\r\n# Here the height of the image is 150 pixels (displaying from the 50th pixel), \r\n# width is 100 pixels (displaying from the 100th pixel) and mode value is 1.\r\n\r\n# reading the image\r\ntestImage = img.imread('g4g.png')\r\n\r\n# displaying the shape of the image\r\nprint(testImage.shape)\r\n\r\n# modifying the shape of the image\r\nmodifiedImage = testImage[50:200, 100:200, 1]\r\n\r\n# displaying the modified image\r\nplt.imshow(modifiedImage)\r\n\r\n\r\n" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.image.imread" ] ]
czgdp1807/numpy
[ "e4894aef5c93c845081388818a2eb4264c5e1d72" ]
[ "numpy/random/tests/test_generator_mt19937.py" ]
[ "import sys\nimport hashlib\n\nimport pytest\n\nimport numpy as np\nfrom numpy.linalg import LinAlgError\nfrom numpy.testing import (\n assert_, assert_raises, assert_equal, assert_allclose,\n assert_warns, assert_no_warnings, assert_array_equal,\n assert_array_almost_equal, suppress_warnings)\n\nfrom numpy.random import Generator, MT19937, SeedSequence\n\nrandom = Generator(MT19937())\n\nJUMP_TEST_DATA = [\n {\n \"seed\": 0,\n \"steps\": 10,\n \"initial\": {\"key_md5\": \"64eaf265d2203179fb5ffb73380cd589\", \"pos\": 9},\n \"jumped\": {\"key_md5\": \"8cb7b061136efceef5217a9ce2cc9a5a\", \"pos\": 598},\n },\n {\n \"seed\":384908324,\n \"steps\":312,\n \"initial\": {\"key_md5\": \"e99708a47b82ff51a2c7b0625b81afb5\", \"pos\": 311},\n \"jumped\": {\"key_md5\": \"2ecdbfc47a895b253e6e19ccb2e74b90\", \"pos\": 276},\n },\n {\n \"seed\": [839438204, 980239840, 859048019, 821],\n \"steps\": 511,\n \"initial\": {\"key_md5\": \"9fcd6280df9199785e17e93162ce283c\", \"pos\": 510},\n \"jumped\": {\"key_md5\": \"433b85229f2ed853cde06cd872818305\", \"pos\": 475},\n },\n]\n\[email protected](scope='module', params=[True, False])\ndef endpoint(request):\n return request.param\n\n\nclass TestSeed:\n def test_scalar(self):\n s = Generator(MT19937(0))\n assert_equal(s.integers(1000), 479)\n s = Generator(MT19937(4294967295))\n assert_equal(s.integers(1000), 324)\n\n def test_array(self):\n s = Generator(MT19937(range(10)))\n assert_equal(s.integers(1000), 465)\n s = Generator(MT19937(np.arange(10)))\n assert_equal(s.integers(1000), 465)\n s = Generator(MT19937([0]))\n assert_equal(s.integers(1000), 479)\n s = Generator(MT19937([4294967295]))\n assert_equal(s.integers(1000), 324)\n\n def test_seedsequence(self):\n s = MT19937(SeedSequence(0))\n assert_equal(s.random_raw(1), 2058676884)\n\n def test_invalid_scalar(self):\n # seed must be an unsigned 32 bit integer\n assert_raises(TypeError, MT19937, -0.5)\n assert_raises(ValueError, MT19937, -1)\n\n def test_invalid_array(self):\n # seed must be an unsigned integer\n assert_raises(TypeError, MT19937, [-0.5])\n assert_raises(ValueError, MT19937, [-1])\n assert_raises(ValueError, MT19937, [1, -2, 4294967296])\n\n def test_noninstantized_bitgen(self):\n assert_raises(ValueError, Generator, MT19937)\n\n\nclass TestBinomial:\n def test_n_zero(self):\n # Tests the corner case of n == 0 for the binomial distribution.\n # binomial(0, p) should be zero for any p in [0, 1].\n # This test addresses issue #3480.\n zeros = np.zeros(2, dtype='int')\n for p in [0, .5, 1]:\n assert_(random.binomial(0, p) == 0)\n assert_array_equal(random.binomial(zeros, p), zeros)\n\n def test_p_is_nan(self):\n # Issue #4571.\n assert_raises(ValueError, random.binomial, 1, np.nan)\n\n\nclass TestMultinomial:\n def test_basic(self):\n random.multinomial(100, [0.2, 0.8])\n\n def test_zero_probability(self):\n random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])\n\n def test_int_negative_interval(self):\n assert_(-5 <= random.integers(-5, -1) < -1)\n x = random.integers(-5, -1, 5)\n assert_(np.all(-5 <= x))\n assert_(np.all(x < -1))\n\n def test_size(self):\n # gh-3173\n p = [0.5, 0.5]\n assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))\n assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))\n assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))\n assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))\n assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))\n assert_equal(random.multinomial(1, p, np.array((2, 2))).shape,\n (2, 2, 2))\n\n assert_raises(TypeError, random.multinomial, 1, p,\n float(1))\n\n def test_invalid_prob(self):\n assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2])\n assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9])\n\n def test_invalid_n(self):\n assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2])\n assert_raises(ValueError, random.multinomial, [-1] * 10, [0.8, 0.2])\n\n def test_p_non_contiguous(self):\n p = np.arange(15.)\n p /= np.sum(p[1::3])\n pvals = p[1::3]\n random = Generator(MT19937(1432985819))\n non_contig = random.multinomial(100, pvals=pvals)\n random = Generator(MT19937(1432985819))\n contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals))\n assert_array_equal(non_contig, contig)\n\n def test_multidimensional_pvals(self):\n assert_raises(ValueError, random.multinomial, 10, [[0, 1]])\n assert_raises(ValueError, random.multinomial, 10, [[0], [1]])\n assert_raises(ValueError, random.multinomial, 10, [[[0], [1]], [[1], [0]]])\n assert_raises(ValueError, random.multinomial, 10, np.array([[0, 1], [1, 0]]))\n\n\nclass TestMultivariateHypergeometric:\n\n def setup(self):\n self.seed = 8675309\n\n def test_argument_validation(self):\n # Error cases...\n\n # `colors` must be a 1-d sequence\n assert_raises(ValueError, random.multivariate_hypergeometric,\n 10, 4)\n\n # Negative nsample\n assert_raises(ValueError, random.multivariate_hypergeometric,\n [2, 3, 4], -1)\n\n # Negative color\n assert_raises(ValueError, random.multivariate_hypergeometric,\n [-1, 2, 3], 2)\n\n # nsample exceeds sum(colors)\n assert_raises(ValueError, random.multivariate_hypergeometric,\n [2, 3, 4], 10)\n\n # nsample exceeds sum(colors) (edge case of empty colors)\n assert_raises(ValueError, random.multivariate_hypergeometric,\n [], 1)\n\n # Validation errors associated with very large values in colors.\n assert_raises(ValueError, random.multivariate_hypergeometric,\n [999999999, 101], 5, 1, 'marginals')\n\n int64_info = np.iinfo(np.int64)\n max_int64 = int64_info.max\n max_int64_index = max_int64 // int64_info.dtype.itemsize\n assert_raises(ValueError, random.multivariate_hypergeometric,\n [max_int64_index - 100, 101], 5, 1, 'count')\n\n @pytest.mark.parametrize('method', ['count', 'marginals'])\n def test_edge_cases(self, method):\n # Set the seed, but in fact, all the results in this test are\n # deterministic, so we don't really need this.\n random = Generator(MT19937(self.seed))\n\n x = random.multivariate_hypergeometric([0, 0, 0], 0, method=method)\n assert_array_equal(x, [0, 0, 0])\n\n x = random.multivariate_hypergeometric([], 0, method=method)\n assert_array_equal(x, [])\n\n x = random.multivariate_hypergeometric([], 0, size=1, method=method)\n assert_array_equal(x, np.empty((1, 0), dtype=np.int64))\n\n x = random.multivariate_hypergeometric([1, 2, 3], 0, method=method)\n assert_array_equal(x, [0, 0, 0])\n\n x = random.multivariate_hypergeometric([9, 0, 0], 3, method=method)\n assert_array_equal(x, [3, 0, 0])\n\n colors = [1, 1, 0, 1, 1]\n x = random.multivariate_hypergeometric(colors, sum(colors),\n method=method)\n assert_array_equal(x, colors)\n\n x = random.multivariate_hypergeometric([3, 4, 5], 12, size=3,\n method=method)\n assert_array_equal(x, [[3, 4, 5]]*3)\n\n # Cases for nsample:\n # nsample < 10\n # 10 <= nsample < colors.sum()/2\n # colors.sum()/2 < nsample < colors.sum() - 10\n # colors.sum() - 10 < nsample < colors.sum()\n @pytest.mark.parametrize('nsample', [8, 25, 45, 55])\n @pytest.mark.parametrize('method', ['count', 'marginals'])\n @pytest.mark.parametrize('size', [5, (2, 3), 150000])\n def test_typical_cases(self, nsample, method, size):\n random = Generator(MT19937(self.seed))\n\n colors = np.array([10, 5, 20, 25])\n sample = random.multivariate_hypergeometric(colors, nsample, size,\n method=method)\n if isinstance(size, int):\n expected_shape = (size,) + colors.shape\n else:\n expected_shape = size + colors.shape\n assert_equal(sample.shape, expected_shape)\n assert_((sample >= 0).all())\n assert_((sample <= colors).all())\n assert_array_equal(sample.sum(axis=-1),\n np.full(size, fill_value=nsample, dtype=int))\n if isinstance(size, int) and size >= 100000:\n # This sample is large enough to compare its mean to\n # the expected values.\n assert_allclose(sample.mean(axis=0),\n nsample * colors / colors.sum(),\n rtol=1e-3, atol=0.005)\n\n def test_repeatability1(self):\n random = Generator(MT19937(self.seed))\n sample = random.multivariate_hypergeometric([3, 4, 5], 5, size=5,\n method='count')\n expected = np.array([[2, 1, 2],\n [2, 1, 2],\n [1, 1, 3],\n [2, 0, 3],\n [2, 1, 2]])\n assert_array_equal(sample, expected)\n\n def test_repeatability2(self):\n random = Generator(MT19937(self.seed))\n sample = random.multivariate_hypergeometric([20, 30, 50], 50,\n size=5,\n method='marginals')\n expected = np.array([[ 9, 17, 24],\n [ 7, 13, 30],\n [ 9, 15, 26],\n [ 9, 17, 24],\n [12, 14, 24]])\n assert_array_equal(sample, expected)\n\n def test_repeatability3(self):\n random = Generator(MT19937(self.seed))\n sample = random.multivariate_hypergeometric([20, 30, 50], 12,\n size=5,\n method='marginals')\n expected = np.array([[2, 3, 7],\n [5, 3, 4],\n [2, 5, 5],\n [5, 3, 4],\n [1, 5, 6]])\n assert_array_equal(sample, expected)\n\n\nclass TestSetState:\n def setup(self):\n self.seed = 1234567890\n self.rg = Generator(MT19937(self.seed))\n self.bit_generator = self.rg.bit_generator\n self.state = self.bit_generator.state\n self.legacy_state = (self.state['bit_generator'],\n self.state['state']['key'],\n self.state['state']['pos'])\n\n def test_gaussian_reset(self):\n # Make sure the cached every-other-Gaussian is reset.\n old = self.rg.standard_normal(size=3)\n self.bit_generator.state = self.state\n new = self.rg.standard_normal(size=3)\n assert_(np.all(old == new))\n\n def test_gaussian_reset_in_media_res(self):\n # When the state is saved with a cached Gaussian, make sure the\n # cached Gaussian is restored.\n\n self.rg.standard_normal()\n state = self.bit_generator.state\n old = self.rg.standard_normal(size=3)\n self.bit_generator.state = state\n new = self.rg.standard_normal(size=3)\n assert_(np.all(old == new))\n\n def test_negative_binomial(self):\n # Ensure that the negative binomial results take floating point\n # arguments without truncation.\n self.rg.negative_binomial(0.5, 0.5)\n\n\nclass TestIntegers:\n rfunc = random.integers\n\n # valid integer/boolean types\n itype = [bool, np.int8, np.uint8, np.int16, np.uint16,\n np.int32, np.uint32, np.int64, np.uint64]\n\n def test_unsupported_type(self, endpoint):\n assert_raises(TypeError, self.rfunc, 1, endpoint=endpoint, dtype=float)\n\n def test_bounds_checking(self, endpoint):\n for dt in self.itype:\n lbnd = 0 if dt is bool else np.iinfo(dt).min\n ubnd = 2 if dt is bool else np.iinfo(dt).max + 1\n ubnd = ubnd - 1 if endpoint else ubnd\n assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd,\n endpoint=endpoint, dtype=dt)\n assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1,\n endpoint=endpoint, dtype=dt)\n assert_raises(ValueError, self.rfunc, ubnd, lbnd,\n endpoint=endpoint, dtype=dt)\n assert_raises(ValueError, self.rfunc, 1, 0, endpoint=endpoint,\n dtype=dt)\n\n assert_raises(ValueError, self.rfunc, [lbnd - 1], ubnd,\n endpoint=endpoint, dtype=dt)\n assert_raises(ValueError, self.rfunc, [lbnd], [ubnd + 1],\n endpoint=endpoint, dtype=dt)\n assert_raises(ValueError, self.rfunc, [ubnd], [lbnd],\n endpoint=endpoint, dtype=dt)\n assert_raises(ValueError, self.rfunc, 1, [0],\n endpoint=endpoint, dtype=dt)\n\n def test_bounds_checking_array(self, endpoint):\n for dt in self.itype:\n lbnd = 0 if dt is bool else np.iinfo(dt).min\n ubnd = 2 if dt is bool else np.iinfo(dt).max + (not endpoint)\n\n assert_raises(ValueError, self.rfunc, [lbnd - 1] * 2, [ubnd] * 2,\n endpoint=endpoint, dtype=dt)\n assert_raises(ValueError, self.rfunc, [lbnd] * 2,\n [ubnd + 1] * 2, endpoint=endpoint, dtype=dt)\n assert_raises(ValueError, self.rfunc, ubnd, [lbnd] * 2,\n endpoint=endpoint, dtype=dt)\n assert_raises(ValueError, self.rfunc, [1] * 2, 0,\n endpoint=endpoint, dtype=dt)\n\n def test_rng_zero_and_extremes(self, endpoint):\n for dt in self.itype:\n lbnd = 0 if dt is bool else np.iinfo(dt).min\n ubnd = 2 if dt is bool else np.iinfo(dt).max + 1\n ubnd = ubnd - 1 if endpoint else ubnd\n is_open = not endpoint\n\n tgt = ubnd - 1\n assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,\n endpoint=endpoint, dtype=dt), tgt)\n assert_equal(self.rfunc([tgt], tgt + is_open, size=1000,\n endpoint=endpoint, dtype=dt), tgt)\n\n tgt = lbnd\n assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,\n endpoint=endpoint, dtype=dt), tgt)\n assert_equal(self.rfunc(tgt, [tgt + is_open], size=1000,\n endpoint=endpoint, dtype=dt), tgt)\n\n tgt = (lbnd + ubnd) // 2\n assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,\n endpoint=endpoint, dtype=dt), tgt)\n assert_equal(self.rfunc([tgt], [tgt + is_open],\n size=1000, endpoint=endpoint, dtype=dt),\n tgt)\n\n def test_rng_zero_and_extremes_array(self, endpoint):\n size = 1000\n for dt in self.itype:\n lbnd = 0 if dt is bool else np.iinfo(dt).min\n ubnd = 2 if dt is bool else np.iinfo(dt).max + 1\n ubnd = ubnd - 1 if endpoint else ubnd\n\n tgt = ubnd - 1\n assert_equal(self.rfunc([tgt], [tgt + 1],\n size=size, dtype=dt), tgt)\n assert_equal(self.rfunc(\n [tgt] * size, [tgt + 1] * size, dtype=dt), tgt)\n assert_equal(self.rfunc(\n [tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)\n\n tgt = lbnd\n assert_equal(self.rfunc([tgt], [tgt + 1],\n size=size, dtype=dt), tgt)\n assert_equal(self.rfunc(\n [tgt] * size, [tgt + 1] * size, dtype=dt), tgt)\n assert_equal(self.rfunc(\n [tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)\n\n tgt = (lbnd + ubnd) // 2\n assert_equal(self.rfunc([tgt], [tgt + 1],\n size=size, dtype=dt), tgt)\n assert_equal(self.rfunc(\n [tgt] * size, [tgt + 1] * size, dtype=dt), tgt)\n assert_equal(self.rfunc(\n [tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)\n\n def test_full_range(self, endpoint):\n # Test for ticket #1690\n\n for dt in self.itype:\n lbnd = 0 if dt is bool else np.iinfo(dt).min\n ubnd = 2 if dt is bool else np.iinfo(dt).max + 1\n ubnd = ubnd - 1 if endpoint else ubnd\n\n try:\n self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)\n except Exception as e:\n raise AssertionError(\"No error should have been raised, \"\n \"but one was with the following \"\n \"message:\\n\\n%s\" % str(e))\n\n def test_full_range_array(self, endpoint):\n # Test for ticket #1690\n\n for dt in self.itype:\n lbnd = 0 if dt is bool else np.iinfo(dt).min\n ubnd = 2 if dt is bool else np.iinfo(dt).max + 1\n ubnd = ubnd - 1 if endpoint else ubnd\n\n try:\n self.rfunc([lbnd] * 2, [ubnd], endpoint=endpoint, dtype=dt)\n except Exception as e:\n raise AssertionError(\"No error should have been raised, \"\n \"but one was with the following \"\n \"message:\\n\\n%s\" % str(e))\n\n def test_in_bounds_fuzz(self, endpoint):\n # Don't use fixed seed\n random = Generator(MT19937())\n\n for dt in self.itype[1:]:\n for ubnd in [4, 8, 16]:\n vals = self.rfunc(2, ubnd - endpoint, size=2 ** 16,\n endpoint=endpoint, dtype=dt)\n assert_(vals.max() < ubnd)\n assert_(vals.min() >= 2)\n\n vals = self.rfunc(0, 2 - endpoint, size=2 ** 16, endpoint=endpoint,\n dtype=bool)\n assert_(vals.max() < 2)\n assert_(vals.min() >= 0)\n\n def test_scalar_array_equiv(self, endpoint):\n for dt in self.itype:\n lbnd = 0 if dt is bool else np.iinfo(dt).min\n ubnd = 2 if dt is bool else np.iinfo(dt).max + 1\n ubnd = ubnd - 1 if endpoint else ubnd\n\n size = 1000\n random = Generator(MT19937(1234))\n scalar = random.integers(lbnd, ubnd, size=size, endpoint=endpoint,\n dtype=dt)\n\n random = Generator(MT19937(1234))\n scalar_array = random.integers([lbnd], [ubnd], size=size,\n endpoint=endpoint, dtype=dt)\n\n random = Generator(MT19937(1234))\n array = random.integers([lbnd] * size, [ubnd] *\n size, size=size, endpoint=endpoint, dtype=dt)\n assert_array_equal(scalar, scalar_array)\n assert_array_equal(scalar, array)\n\n def test_repeatability(self, endpoint):\n # We use a md5 hash of generated sequences of 1000 samples\n # in the range [0, 6) for all but bool, where the range\n # is [0, 2). Hashes are for little endian numbers.\n tgt = {'bool': 'b3300e66d2bb59e493d255d47c3a6cbe',\n 'int16': '39624ead49ad67e37545744024d2648b',\n 'int32': '5c4810373f979336c6c0c999996e47a1',\n 'int64': 'ab126c15edff26f55c50d2b7e37391ac',\n 'int8': 'ba71ccaffeeeb9eeb1860f8075020b9c',\n 'uint16': '39624ead49ad67e37545744024d2648b',\n 'uint32': '5c4810373f979336c6c0c999996e47a1',\n 'uint64': 'ab126c15edff26f55c50d2b7e37391ac',\n 'uint8': 'ba71ccaffeeeb9eeb1860f8075020b9c'}\n\n for dt in self.itype[1:]:\n random = Generator(MT19937(1234))\n\n # view as little endian for hash\n if sys.byteorder == 'little':\n val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint,\n dtype=dt)\n else:\n val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint,\n dtype=dt).byteswap()\n\n res = hashlib.md5(val).hexdigest()\n assert_(tgt[np.dtype(dt).name] == res)\n\n # bools do not depend on endianness\n random = Generator(MT19937(1234))\n val = random.integers(0, 2 - endpoint, size=1000, endpoint=endpoint,\n dtype=bool).view(np.int8)\n res = hashlib.md5(val).hexdigest()\n assert_(tgt[np.dtype(bool).name] == res)\n\n def test_repeatability_broadcasting(self, endpoint):\n for dt in self.itype:\n lbnd = 0 if dt in (bool, np.bool_) else np.iinfo(dt).min\n ubnd = 2 if dt in (bool, np.bool_) else np.iinfo(dt).max + 1\n ubnd = ubnd - 1 if endpoint else ubnd\n\n # view as little endian for hash\n random = Generator(MT19937(1234))\n val = random.integers(lbnd, ubnd, size=1000, endpoint=endpoint,\n dtype=dt)\n\n random = Generator(MT19937(1234))\n val_bc = random.integers([lbnd] * 1000, ubnd, endpoint=endpoint,\n dtype=dt)\n\n assert_array_equal(val, val_bc)\n\n random = Generator(MT19937(1234))\n val_bc = random.integers([lbnd] * 1000, [ubnd] * 1000,\n endpoint=endpoint, dtype=dt)\n\n assert_array_equal(val, val_bc)\n\n @pytest.mark.parametrize(\n 'bound, expected',\n [(2**32 - 1, np.array([517043486, 1364798665, 1733884389, 1353720612,\n 3769704066, 1170797179, 4108474671])),\n (2**32, np.array([517043487, 1364798666, 1733884390, 1353720613,\n 3769704067, 1170797180, 4108474672])),\n (2**32 + 1, np.array([517043487, 1733884390, 3769704068, 4108474673,\n 1831631863, 1215661561, 3869512430]))]\n )\n def test_repeatability_32bit_boundary(self, bound, expected):\n for size in [None, len(expected)]:\n random = Generator(MT19937(1234))\n x = random.integers(bound, size=size)\n assert_equal(x, expected if size is not None else expected[0])\n\n def test_repeatability_32bit_boundary_broadcasting(self):\n desired = np.array([[[1622936284, 3620788691, 1659384060],\n [1417365545, 760222891, 1909653332],\n [3788118662, 660249498, 4092002593]],\n [[3625610153, 2979601262, 3844162757],\n [ 685800658, 120261497, 2694012896],\n [1207779440, 1586594375, 3854335050]],\n [[3004074748, 2310761796, 3012642217],\n [2067714190, 2786677879, 1363865881],\n [ 791663441, 1867303284, 2169727960]],\n [[1939603804, 1250951100, 298950036],\n [1040128489, 3791912209, 3317053765],\n [3155528714, 61360675, 2305155588]],\n [[ 817688762, 1335621943, 3288952434],\n [1770890872, 1102951817, 1957607470],\n [3099996017, 798043451, 48334215]]])\n for size in [None, (5, 3, 3)]:\n random = Generator(MT19937(12345))\n x = random.integers([[-1], [0], [1]],\n [2**32 - 1, 2**32, 2**32 + 1],\n size=size)\n assert_array_equal(x, desired if size is not None else desired[0])\n\n def test_int64_uint64_broadcast_exceptions(self, endpoint):\n configs = {np.uint64: ((0, 2**65), (-1, 2**62), (10, 9), (0, 0)),\n np.int64: ((0, 2**64), (-(2**64), 2**62), (10, 9), (0, 0),\n (-2**63-1, -2**63-1))}\n for dtype in configs:\n for config in configs[dtype]:\n low, high = config\n high = high - endpoint\n low_a = np.array([[low]*10])\n high_a = np.array([high] * 10)\n assert_raises(ValueError, random.integers, low, high,\n endpoint=endpoint, dtype=dtype)\n assert_raises(ValueError, random.integers, low_a, high,\n endpoint=endpoint, dtype=dtype)\n assert_raises(ValueError, random.integers, low, high_a,\n endpoint=endpoint, dtype=dtype)\n assert_raises(ValueError, random.integers, low_a, high_a,\n endpoint=endpoint, dtype=dtype)\n\n low_o = np.array([[low]*10], dtype=object)\n high_o = np.array([high] * 10, dtype=object)\n assert_raises(ValueError, random.integers, low_o, high,\n endpoint=endpoint, dtype=dtype)\n assert_raises(ValueError, random.integers, low, high_o,\n endpoint=endpoint, dtype=dtype)\n assert_raises(ValueError, random.integers, low_o, high_o,\n endpoint=endpoint, dtype=dtype)\n\n def test_int64_uint64_corner_case(self, endpoint):\n # When stored in Numpy arrays, `lbnd` is casted\n # as np.int64, and `ubnd` is casted as np.uint64.\n # Checking whether `lbnd` >= `ubnd` used to be\n # done solely via direct comparison, which is incorrect\n # because when Numpy tries to compare both numbers,\n # it casts both to np.float64 because there is\n # no integer superset of np.int64 and np.uint64. However,\n # `ubnd` is too large to be represented in np.float64,\n # causing it be round down to np.iinfo(np.int64).max,\n # leading to a ValueError because `lbnd` now equals\n # the new `ubnd`.\n\n dt = np.int64\n tgt = np.iinfo(np.int64).max\n lbnd = np.int64(np.iinfo(np.int64).max)\n ubnd = np.uint64(np.iinfo(np.int64).max + 1 - endpoint)\n\n # None of these function calls should\n # generate a ValueError now.\n actual = random.integers(lbnd, ubnd, endpoint=endpoint, dtype=dt)\n assert_equal(actual, tgt)\n\n def test_respect_dtype_singleton(self, endpoint):\n # See gh-7203\n for dt in self.itype:\n lbnd = 0 if dt is bool else np.iinfo(dt).min\n ubnd = 2 if dt is bool else np.iinfo(dt).max + 1\n ubnd = ubnd - 1 if endpoint else ubnd\n dt = np.bool_ if dt is bool else dt\n\n sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)\n assert_equal(sample.dtype, dt)\n\n for dt in (bool, int, np.compat.long):\n lbnd = 0 if dt is bool else np.iinfo(dt).min\n ubnd = 2 if dt is bool else np.iinfo(dt).max + 1\n ubnd = ubnd - 1 if endpoint else ubnd\n\n # gh-7284: Ensure that we get Python data types\n sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)\n assert not hasattr(sample, 'dtype')\n assert_equal(type(sample), dt)\n\n def test_respect_dtype_array(self, endpoint):\n # See gh-7203\n for dt in self.itype:\n lbnd = 0 if dt is bool else np.iinfo(dt).min\n ubnd = 2 if dt is bool else np.iinfo(dt).max + 1\n ubnd = ubnd - 1 if endpoint else ubnd\n dt = np.bool_ if dt is bool else dt\n\n sample = self.rfunc([lbnd], [ubnd], endpoint=endpoint, dtype=dt)\n assert_equal(sample.dtype, dt)\n sample = self.rfunc([lbnd] * 2, [ubnd] * 2, endpoint=endpoint,\n dtype=dt)\n assert_equal(sample.dtype, dt)\n\n def test_zero_size(self, endpoint):\n # See gh-7203\n for dt in self.itype:\n sample = self.rfunc(0, 0, (3, 0, 4), endpoint=endpoint, dtype=dt)\n assert sample.shape == (3, 0, 4)\n assert sample.dtype == dt\n assert self.rfunc(0, -10, 0, endpoint=endpoint,\n dtype=dt).shape == (0,)\n assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape,\n (3, 0, 4))\n assert_equal(random.integers(0, -10, size=0).shape, (0,))\n assert_equal(random.integers(10, 10, size=0).shape, (0,))\n\n def test_error_byteorder(self):\n other_byteord_dt = '<i4' if sys.byteorder == 'big' else '>i4'\n with pytest.raises(ValueError):\n random.integers(0, 200, size=10, dtype=other_byteord_dt)\n\n # chi2max is the maximum acceptable chi-squared value.\n @pytest.mark.slow\n @pytest.mark.parametrize('sample_size,high,dtype,chi2max',\n [(5000000, 5, np.int8, 125.0), # p-value ~4.6e-25\n (5000000, 7, np.uint8, 150.0), # p-value ~7.7e-30\n (10000000, 2500, np.int16, 3300.0), # p-value ~3.0e-25\n (50000000, 5000, np.uint16, 6500.0), # p-value ~3.5e-25\n ])\n def test_integers_small_dtype_chisquared(self, sample_size, high,\n dtype, chi2max):\n # Regression test for gh-14774.\n samples = random.integers(high, size=sample_size, dtype=dtype)\n\n values, counts = np.unique(samples, return_counts=True)\n expected = sample_size / high\n chi2 = ((counts - expected)**2 / expected).sum()\n assert chi2 < chi2max\n\n\nclass TestRandomDist:\n # Make sure the random distribution returns the correct value for a\n # given seed\n\n def setup(self):\n self.seed = 1234567890\n\n def test_integers(self):\n random = Generator(MT19937(self.seed))\n actual = random.integers(-99, 99, size=(3, 2))\n desired = np.array([[-80, -56], [41, 37], [-83, -16]])\n assert_array_equal(actual, desired)\n\n def test_integers_masked(self):\n # Test masked rejection sampling algorithm to generate array of\n # uint32 in an interval.\n random = Generator(MT19937(self.seed))\n actual = random.integers(0, 99, size=(3, 2), dtype=np.uint32)\n desired = np.array([[9, 21], [70, 68], [8, 41]], dtype=np.uint32)\n assert_array_equal(actual, desired)\n\n def test_integers_closed(self):\n random = Generator(MT19937(self.seed))\n actual = random.integers(-99, 99, size=(3, 2), endpoint=True)\n desired = np.array([[-80, -56], [ 41, 38], [-83, -15]])\n assert_array_equal(actual, desired)\n\n def test_integers_max_int(self):\n # Tests whether integers with closed=True can generate the\n # maximum allowed Python int that can be converted\n # into a C long. Previous implementations of this\n # method have thrown an OverflowError when attempting\n # to generate this integer.\n actual = random.integers(np.iinfo('l').max, np.iinfo('l').max,\n endpoint=True)\n\n desired = np.iinfo('l').max\n assert_equal(actual, desired)\n\n def test_random(self):\n random = Generator(MT19937(self.seed))\n actual = random.random((3, 2))\n desired = np.array([[0.096999199829214, 0.707517457682192],\n [0.084364834598269, 0.767731206553125],\n [0.665069021359413, 0.715487190596693]])\n assert_array_almost_equal(actual, desired, decimal=15)\n\n random = Generator(MT19937(self.seed))\n actual = random.random()\n assert_array_almost_equal(actual, desired[0, 0], decimal=15)\n\n def test_random_float(self):\n random = Generator(MT19937(self.seed))\n actual = random.random((3, 2))\n desired = np.array([[0.0969992 , 0.70751746],\n [0.08436483, 0.76773121],\n [0.66506902, 0.71548719]])\n assert_array_almost_equal(actual, desired, decimal=7)\n\n def test_random_float_scalar(self):\n random = Generator(MT19937(self.seed))\n actual = random.random(dtype=np.float32)\n desired = 0.0969992\n assert_array_almost_equal(actual, desired, decimal=7)\n\n def test_random_unsupported_type(self):\n assert_raises(TypeError, random.random, dtype='int32')\n\n def test_choice_uniform_replace(self):\n random = Generator(MT19937(self.seed))\n actual = random.choice(4, 4)\n desired = np.array([0, 0, 2, 2], dtype=np.int64)\n assert_array_equal(actual, desired)\n\n def test_choice_nonuniform_replace(self):\n random = Generator(MT19937(self.seed))\n actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])\n desired = np.array([0, 1, 0, 1], dtype=np.int64)\n assert_array_equal(actual, desired)\n\n def test_choice_uniform_noreplace(self):\n random = Generator(MT19937(self.seed))\n actual = random.choice(4, 3, replace=False)\n desired = np.array([2, 0, 3], dtype=np.int64)\n assert_array_equal(actual, desired)\n actual = random.choice(4, 4, replace=False, shuffle=False)\n desired = np.arange(4, dtype=np.int64)\n assert_array_equal(actual, desired)\n\n def test_choice_nonuniform_noreplace(self):\n random = Generator(MT19937(self.seed))\n actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1])\n desired = np.array([0, 2, 3], dtype=np.int64)\n assert_array_equal(actual, desired)\n\n def test_choice_noninteger(self):\n random = Generator(MT19937(self.seed))\n actual = random.choice(['a', 'b', 'c', 'd'], 4)\n desired = np.array(['a', 'a', 'c', 'c'])\n assert_array_equal(actual, desired)\n\n def test_choice_multidimensional_default_axis(self):\n random = Generator(MT19937(self.seed))\n actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 3)\n desired = np.array([[0, 1], [0, 1], [4, 5]])\n assert_array_equal(actual, desired)\n\n def test_choice_multidimensional_custom_axis(self):\n random = Generator(MT19937(self.seed))\n actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 1, axis=1)\n desired = np.array([[0], [2], [4], [6]])\n assert_array_equal(actual, desired)\n\n def test_choice_exceptions(self):\n sample = random.choice\n assert_raises(ValueError, sample, -1, 3)\n assert_raises(ValueError, sample, 3., 3)\n assert_raises(ValueError, sample, [], 3)\n assert_raises(ValueError, sample, [1, 2, 3, 4], 3,\n p=[[0.25, 0.25], [0.25, 0.25]])\n assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])\n assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])\n assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])\n assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)\n # gh-13087\n assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False)\n assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False)\n assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False)\n assert_raises(ValueError, sample, [1, 2, 3], 2,\n replace=False, p=[1, 0, 0])\n\n def test_choice_return_shape(self):\n p = [0.1, 0.9]\n # Check scalar\n assert_(np.isscalar(random.choice(2, replace=True)))\n assert_(np.isscalar(random.choice(2, replace=False)))\n assert_(np.isscalar(random.choice(2, replace=True, p=p)))\n assert_(np.isscalar(random.choice(2, replace=False, p=p)))\n assert_(np.isscalar(random.choice([1, 2], replace=True)))\n assert_(random.choice([None], replace=True) is None)\n a = np.array([1, 2])\n arr = np.empty(1, dtype=object)\n arr[0] = a\n assert_(random.choice(arr, replace=True) is a)\n\n # Check 0-d array\n s = tuple()\n assert_(not np.isscalar(random.choice(2, s, replace=True)))\n assert_(not np.isscalar(random.choice(2, s, replace=False)))\n assert_(not np.isscalar(random.choice(2, s, replace=True, p=p)))\n assert_(not np.isscalar(random.choice(2, s, replace=False, p=p)))\n assert_(not np.isscalar(random.choice([1, 2], s, replace=True)))\n assert_(random.choice([None], s, replace=True).ndim == 0)\n a = np.array([1, 2])\n arr = np.empty(1, dtype=object)\n arr[0] = a\n assert_(random.choice(arr, s, replace=True).item() is a)\n\n # Check multi dimensional array\n s = (2, 3)\n p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]\n assert_equal(random.choice(6, s, replace=True).shape, s)\n assert_equal(random.choice(6, s, replace=False).shape, s)\n assert_equal(random.choice(6, s, replace=True, p=p).shape, s)\n assert_equal(random.choice(6, s, replace=False, p=p).shape, s)\n assert_equal(random.choice(np.arange(6), s, replace=True).shape, s)\n\n # Check zero-size\n assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape, (3, 0, 4))\n assert_equal(random.integers(0, -10, size=0).shape, (0,))\n assert_equal(random.integers(10, 10, size=0).shape, (0,))\n assert_equal(random.choice(0, size=0).shape, (0,))\n assert_equal(random.choice([], size=(0,)).shape, (0,))\n assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape,\n (3, 0, 4))\n assert_raises(ValueError, random.choice, [], 10)\n\n def test_choice_nan_probabilities(self):\n a = np.array([42, 1, 2])\n p = [None, None, None]\n assert_raises(ValueError, random.choice, a, p=p)\n\n def test_choice_p_non_contiguous(self):\n p = np.ones(10) / 5\n p[1::2] = 3.0\n random = Generator(MT19937(self.seed))\n non_contig = random.choice(5, 3, p=p[::2])\n random = Generator(MT19937(self.seed))\n contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2]))\n assert_array_equal(non_contig, contig)\n\n def test_choice_return_type(self):\n # gh 9867\n p = np.ones(4) / 4.\n actual = random.choice(4, 2)\n assert actual.dtype == np.int64\n actual = random.choice(4, 2, replace=False)\n assert actual.dtype == np.int64\n actual = random.choice(4, 2, p=p)\n assert actual.dtype == np.int64\n actual = random.choice(4, 2, p=p, replace=False)\n assert actual.dtype == np.int64\n\n def test_choice_large_sample(self):\n choice_hash = 'd44962a0b1e92f4a3373c23222244e21'\n random = Generator(MT19937(self.seed))\n actual = random.choice(10000, 5000, replace=False)\n if sys.byteorder != 'little':\n actual = actual.byteswap()\n res = hashlib.md5(actual.view(np.int8)).hexdigest()\n assert_(choice_hash == res)\n\n def test_bytes(self):\n random = Generator(MT19937(self.seed))\n actual = random.bytes(10)\n desired = b'\\x86\\xf0\\xd4\\x18\\xe1\\x81\\t8%\\xdd'\n assert_equal(actual, desired)\n\n def test_shuffle(self):\n # Test lists, arrays (of various dtypes), and multidimensional versions\n # of both, c-contiguous or not:\n for conv in [lambda x: np.array([]),\n lambda x: x,\n lambda x: np.asarray(x).astype(np.int8),\n lambda x: np.asarray(x).astype(np.float32),\n lambda x: np.asarray(x).astype(np.complex64),\n lambda x: np.asarray(x).astype(object),\n lambda x: [(i, i) for i in x],\n lambda x: np.asarray([[i, i] for i in x]),\n lambda x: np.vstack([x, x]).T,\n # gh-11442\n lambda x: (np.asarray([(i, i) for i in x],\n [(\"a\", int), (\"b\", int)])\n .view(np.recarray)),\n # gh-4270\n lambda x: np.asarray([(i, i) for i in x],\n [(\"a\", object, (1,)),\n (\"b\", np.int32, (1,))])]:\n random = Generator(MT19937(self.seed))\n alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])\n random.shuffle(alist)\n actual = alist\n desired = conv([4, 1, 9, 8, 0, 5, 3, 6, 2, 7])\n assert_array_equal(actual, desired)\n\n def test_shuffle_custom_axis(self):\n random = Generator(MT19937(self.seed))\n actual = np.arange(16).reshape((4, 4))\n random.shuffle(actual, axis=1)\n desired = np.array([[ 0, 3, 1, 2],\n [ 4, 7, 5, 6],\n [ 8, 11, 9, 10],\n [12, 15, 13, 14]])\n assert_array_equal(actual, desired)\n random = Generator(MT19937(self.seed))\n actual = np.arange(16).reshape((4, 4))\n random.shuffle(actual, axis=-1)\n assert_array_equal(actual, desired)\n\n def test_shuffle_axis_nonsquare(self):\n y1 = np.arange(20).reshape(2, 10)\n y2 = y1.copy()\n random = Generator(MT19937(self.seed))\n random.shuffle(y1, axis=1)\n random = Generator(MT19937(self.seed))\n random.shuffle(y2.T)\n assert_array_equal(y1, y2)\n\n def test_shuffle_masked(self):\n # gh-3263\n a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)\n b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)\n a_orig = a.copy()\n b_orig = b.copy()\n for i in range(50):\n random.shuffle(a)\n assert_equal(\n sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))\n random.shuffle(b)\n assert_equal(\n sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))\n\n def test_shuffle_exceptions(self):\n random = Generator(MT19937(self.seed))\n arr = np.arange(10)\n assert_raises(np.AxisError, random.shuffle, arr, 1)\n arr = np.arange(9).reshape((3, 3))\n assert_raises(np.AxisError, random.shuffle, arr, 3)\n assert_raises(TypeError, random.shuffle, arr, slice(1, 2, None))\n arr = [[1, 2, 3], [4, 5, 6]]\n assert_raises(NotImplementedError, random.shuffle, arr, 1)\n\n def test_permutation(self):\n random = Generator(MT19937(self.seed))\n alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]\n actual = random.permutation(alist)\n desired = [4, 1, 9, 8, 0, 5, 3, 6, 2, 7]\n assert_array_equal(actual, desired)\n\n random = Generator(MT19937(self.seed))\n arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T\n actual = random.permutation(arr_2d)\n assert_array_equal(actual, np.atleast_2d(desired).T)\n \n bad_x_str = \"abcd\"\n assert_raises(np.AxisError, random.permutation, bad_x_str)\n\n bad_x_float = 1.2\n assert_raises(np.AxisError, random.permutation, bad_x_float)\n\n random = Generator(MT19937(self.seed))\n integer_val = 10\n desired = [3, 0, 8, 7, 9, 4, 2, 5, 1, 6]\n\n actual = random.permutation(integer_val)\n assert_array_equal(actual, desired)\n\n def test_permutation_custom_axis(self):\n a = np.arange(16).reshape((4, 4))\n desired = np.array([[ 0, 3, 1, 2],\n [ 4, 7, 5, 6],\n [ 8, 11, 9, 10],\n [12, 15, 13, 14]])\n random = Generator(MT19937(self.seed))\n actual = random.permutation(a, axis=1)\n assert_array_equal(actual, desired)\n random = Generator(MT19937(self.seed))\n actual = random.permutation(a, axis=-1)\n assert_array_equal(actual, desired)\n\n def test_permutation_exceptions(self):\n random = Generator(MT19937(self.seed))\n arr = np.arange(10)\n assert_raises(np.AxisError, random.permutation, arr, 1)\n arr = np.arange(9).reshape((3, 3))\n assert_raises(np.AxisError, random.permutation, arr, 3)\n assert_raises(TypeError, random.permutation, arr, slice(1, 2, None))\n\n def test_beta(self):\n random = Generator(MT19937(self.seed))\n actual = random.beta(.1, .9, size=(3, 2))\n desired = np.array(\n [[1.083029353267698e-10, 2.449965303168024e-11],\n [2.397085162969853e-02, 3.590779671820755e-08],\n [2.830254190078299e-04, 1.744709918330393e-01]])\n assert_array_almost_equal(actual, desired, decimal=15)\n\n def test_binomial(self):\n random = Generator(MT19937(self.seed))\n actual = random.binomial(100.123, .456, size=(3, 2))\n desired = np.array([[42, 41],\n [42, 48],\n [44, 50]])\n assert_array_equal(actual, desired)\n\n random = Generator(MT19937(self.seed))\n actual = random.binomial(100.123, .456)\n desired = 42\n assert_array_equal(actual, desired)\n\n def test_chisquare(self):\n random = Generator(MT19937(self.seed))\n actual = random.chisquare(50, size=(3, 2))\n desired = np.array([[32.9850547060149, 39.0219480493301],\n [56.2006134779419, 57.3474165711485],\n [55.4243733880198, 55.4209797925213]])\n assert_array_almost_equal(actual, desired, decimal=13)\n\n def test_dirichlet(self):\n random = Generator(MT19937(self.seed))\n alpha = np.array([51.72840233779265162, 39.74494232180943953])\n actual = random.dirichlet(alpha, size=(3, 2))\n desired = np.array([[[0.5439892869558927, 0.45601071304410745],\n [0.5588917345860708, 0.4411082654139292 ]],\n [[0.5632074165063435, 0.43679258349365657],\n [0.54862581112627, 0.45137418887373015]],\n [[0.49961831357047226, 0.5003816864295278 ],\n [0.52374806183482, 0.47625193816517997]]])\n assert_array_almost_equal(actual, desired, decimal=15)\n bad_alpha = np.array([5.4e-01, -1.0e-16])\n assert_raises(ValueError, random.dirichlet, bad_alpha)\n\n random = Generator(MT19937(self.seed))\n alpha = np.array([51.72840233779265162, 39.74494232180943953])\n actual = random.dirichlet(alpha)\n assert_array_almost_equal(actual, desired[0, 0], decimal=15)\n\n def test_dirichlet_size(self):\n # gh-3173\n p = np.array([51.72840233779265162, 39.74494232180943953])\n assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))\n assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))\n assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))\n assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2))\n assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2))\n assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))\n\n assert_raises(TypeError, random.dirichlet, p, float(1))\n\n def test_dirichlet_bad_alpha(self):\n # gh-2089\n alpha = np.array([5.4e-01, -1.0e-16])\n assert_raises(ValueError, random.dirichlet, alpha)\n\n # gh-15876\n assert_raises(ValueError, random.dirichlet, [[5, 1]])\n assert_raises(ValueError, random.dirichlet, [[5], [1]])\n assert_raises(ValueError, random.dirichlet, [[[5], [1]], [[1], [5]]])\n assert_raises(ValueError, random.dirichlet, np.array([[5, 1], [1, 5]]))\n\n def test_dirichlet_alpha_non_contiguous(self):\n a = np.array([51.72840233779265162, -1.0, 39.74494232180943953])\n alpha = a[::2]\n random = Generator(MT19937(self.seed))\n non_contig = random.dirichlet(alpha, size=(3, 2))\n random = Generator(MT19937(self.seed))\n contig = random.dirichlet(np.ascontiguousarray(alpha),\n size=(3, 2))\n assert_array_almost_equal(non_contig, contig)\n\n def test_dirichlet_small_alpha(self):\n eps = 1.0e-9 # 1.0e-10 -> runtime x 10; 1e-11 -> runtime x 200, etc.\n alpha = eps * np.array([1., 1.0e-3])\n random = Generator(MT19937(self.seed))\n actual = random.dirichlet(alpha, size=(3, 2))\n expected = np.array([\n [[1., 0.],\n [1., 0.]],\n [[1., 0.],\n [1., 0.]],\n [[1., 0.],\n [1., 0.]]\n ])\n assert_array_almost_equal(actual, expected, decimal=15)\n\n @pytest.mark.slow\n def test_dirichlet_moderately_small_alpha(self):\n # Use alpha.max() < 0.1 to trigger stick breaking code path\n alpha = np.array([0.02, 0.04, 0.03])\n exact_mean = alpha / alpha.sum()\n random = Generator(MT19937(self.seed))\n sample = random.dirichlet(alpha, size=20000000)\n sample_mean = sample.mean(axis=0)\n assert_allclose(sample_mean, exact_mean, rtol=1e-3)\n\n def test_exponential(self):\n random = Generator(MT19937(self.seed))\n actual = random.exponential(1.1234, size=(3, 2))\n desired = np.array([[0.098845481066258, 1.560752510746964],\n [0.075730916041636, 1.769098974710777],\n [1.488602544592235, 2.49684815275751 ]])\n assert_array_almost_equal(actual, desired, decimal=15)\n\n def test_exponential_0(self):\n assert_equal(random.exponential(scale=0), 0)\n assert_raises(ValueError, random.exponential, scale=-0.)\n\n def test_f(self):\n random = Generator(MT19937(self.seed))\n actual = random.f(12, 77, size=(3, 2))\n desired = np.array([[0.461720027077085, 1.100441958872451],\n [1.100337455217484, 0.91421736740018 ],\n [0.500811891303113, 0.826802454552058]])\n assert_array_almost_equal(actual, desired, decimal=15)\n\n def test_gamma(self):\n random = Generator(MT19937(self.seed))\n actual = random.gamma(5, 3, size=(3, 2))\n desired = np.array([[ 5.03850858902096, 7.9228656732049 ],\n [18.73983605132985, 19.57961681699238],\n [18.17897755150825, 18.17653912505234]])\n assert_array_almost_equal(actual, desired, decimal=14)\n\n def test_gamma_0(self):\n assert_equal(random.gamma(shape=0, scale=0), 0)\n assert_raises(ValueError, random.gamma, shape=-0., scale=-0.)\n\n def test_geometric(self):\n random = Generator(MT19937(self.seed))\n actual = random.geometric(.123456789, size=(3, 2))\n desired = np.array([[ 1, 10],\n [ 1, 12],\n [ 9, 10]])\n assert_array_equal(actual, desired)\n\n def test_geometric_exceptions(self):\n assert_raises(ValueError, random.geometric, 1.1)\n assert_raises(ValueError, random.geometric, [1.1] * 10)\n assert_raises(ValueError, random.geometric, -0.1)\n assert_raises(ValueError, random.geometric, [-0.1] * 10)\n with np.errstate(invalid='ignore'):\n assert_raises(ValueError, random.geometric, np.nan)\n assert_raises(ValueError, random.geometric, [np.nan] * 10)\n\n def test_gumbel(self):\n random = Generator(MT19937(self.seed))\n actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))\n desired = np.array([[ 4.688397515056245, -0.289514845417841],\n [ 4.981176042584683, -0.633224272589149],\n [-0.055915275687488, -0.333962478257953]])\n assert_array_almost_equal(actual, desired, decimal=15)\n\n def test_gumbel_0(self):\n assert_equal(random.gumbel(scale=0), 0)\n assert_raises(ValueError, random.gumbel, scale=-0.)\n\n def test_hypergeometric(self):\n random = Generator(MT19937(self.seed))\n actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2))\n desired = np.array([[ 9, 9],\n [ 9, 9],\n [10, 9]])\n assert_array_equal(actual, desired)\n\n # Test nbad = 0\n actual = random.hypergeometric(5, 0, 3, size=4)\n desired = np.array([3, 3, 3, 3])\n assert_array_equal(actual, desired)\n\n actual = random.hypergeometric(15, 0, 12, size=4)\n desired = np.array([12, 12, 12, 12])\n assert_array_equal(actual, desired)\n\n # Test ngood = 0\n actual = random.hypergeometric(0, 5, 3, size=4)\n desired = np.array([0, 0, 0, 0])\n assert_array_equal(actual, desired)\n\n actual = random.hypergeometric(0, 15, 12, size=4)\n desired = np.array([0, 0, 0, 0])\n assert_array_equal(actual, desired)\n\n def test_laplace(self):\n random = Generator(MT19937(self.seed))\n actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2))\n desired = np.array([[-3.156353949272393, 1.195863024830054],\n [-3.435458081645966, 1.656882398925444],\n [ 0.924824032467446, 1.251116432209336]])\n assert_array_almost_equal(actual, desired, decimal=15)\n\n def test_laplace_0(self):\n assert_equal(random.laplace(scale=0), 0)\n assert_raises(ValueError, random.laplace, scale=-0.)\n\n def test_logistic(self):\n random = Generator(MT19937(self.seed))\n actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2))\n desired = np.array([[-4.338584631510999, 1.890171436749954],\n [-4.64547787337966 , 2.514545562919217],\n [ 1.495389489198666, 1.967827627577474]])\n assert_array_almost_equal(actual, desired, decimal=15)\n\n def test_lognormal(self):\n random = Generator(MT19937(self.seed))\n actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))\n desired = np.array([[ 0.0268252166335, 13.9534486483053],\n [ 0.1204014788936, 2.2422077497792],\n [ 4.2484199496128, 12.0093343977523]])\n assert_array_almost_equal(actual, desired, decimal=13)\n\n def test_lognormal_0(self):\n assert_equal(random.lognormal(sigma=0), 1)\n assert_raises(ValueError, random.lognormal, sigma=-0.)\n\n def test_logseries(self):\n random = Generator(MT19937(self.seed))\n actual = random.logseries(p=.923456789, size=(3, 2))\n desired = np.array([[14, 17],\n [3, 18],\n [5, 1]])\n assert_array_equal(actual, desired)\n\n def test_logseries_exceptions(self):\n with np.errstate(invalid='ignore'):\n assert_raises(ValueError, random.logseries, np.nan)\n assert_raises(ValueError, random.logseries, [np.nan] * 10)\n\n def test_multinomial(self):\n random = Generator(MT19937(self.seed))\n actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2))\n desired = np.array([[[1, 5, 1, 6, 4, 3],\n [4, 2, 6, 2, 4, 2]],\n [[5, 3, 2, 6, 3, 1],\n [4, 4, 0, 2, 3, 7]],\n [[6, 3, 1, 5, 3, 2],\n [5, 5, 3, 1, 2, 4]]])\n assert_array_equal(actual, desired)\n\n @pytest.mark.parametrize(\"method\", [\"svd\", \"eigh\", \"cholesky\"])\n def test_multivariate_normal(self, method):\n random = Generator(MT19937(self.seed))\n mean = (.123456789, 10)\n cov = [[1, 0], [0, 1]]\n size = (3, 2)\n actual = random.multivariate_normal(mean, cov, size, method=method)\n desired = np.array([[[-1.747478062846581, 11.25613495182354 ],\n [-0.9967333370066214, 10.342002097029821 ]],\n [[ 0.7850019631242964, 11.181113712443013 ],\n [ 0.8901349653255224, 8.873825399642492 ]],\n [[ 0.7130260107430003, 9.551628690083056 ],\n [ 0.7127098726541128, 11.991709234143173 ]]])\n\n assert_array_almost_equal(actual, desired, decimal=15)\n\n # Check for default size, was raising deprecation warning\n actual = random.multivariate_normal(mean, cov, method=method)\n desired = np.array([0.233278563284287, 9.424140804347195])\n assert_array_almost_equal(actual, desired, decimal=15)\n # Check that non symmetric covariance input raises exception when\n # check_valid='raises' if using default svd method.\n mean = [0, 0]\n cov = [[1, 2], [1, 2]]\n assert_raises(ValueError, random.multivariate_normal, mean, cov,\n check_valid='raise')\n\n # Check that non positive-semidefinite covariance warns with\n # RuntimeWarning\n cov = [[1, 2], [2, 1]]\n assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov)\n assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov,\n method='eigh')\n assert_raises(LinAlgError, random.multivariate_normal, mean, cov,\n method='cholesky')\n\n # and that it doesn't warn with RuntimeWarning check_valid='ignore'\n assert_no_warnings(random.multivariate_normal, mean, cov,\n check_valid='ignore')\n\n # and that it raises with RuntimeWarning check_valid='raises'\n assert_raises(ValueError, random.multivariate_normal, mean, cov,\n check_valid='raise')\n assert_raises(ValueError, random.multivariate_normal, mean, cov,\n check_valid='raise', method='eigh')\n\n # check degenerate samples from singular covariance matrix\n cov = [[1, 1], [1, 1]]\n if method in ('svd', 'eigh'):\n samples = random.multivariate_normal(mean, cov, size=(3, 2),\n method=method)\n assert_array_almost_equal(samples[..., 0], samples[..., 1],\n decimal=6)\n else:\n assert_raises(LinAlgError, random.multivariate_normal, mean, cov,\n method='cholesky')\n\n cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)\n with suppress_warnings() as sup:\n random.multivariate_normal(mean, cov, method=method)\n w = sup.record(RuntimeWarning)\n assert len(w) == 0\n\n mu = np.zeros(2)\n cov = np.eye(2)\n assert_raises(ValueError, random.multivariate_normal, mean, cov,\n check_valid='other')\n assert_raises(ValueError, random.multivariate_normal,\n np.zeros((2, 1, 1)), cov)\n assert_raises(ValueError, random.multivariate_normal,\n mu, np.empty((3, 2)))\n assert_raises(ValueError, random.multivariate_normal,\n mu, np.eye(3))\n\n @pytest.mark.parametrize(\"method\", [\"svd\", \"eigh\", \"cholesky\"])\n def test_multivariate_normal_basic_stats(self, method):\n random = Generator(MT19937(self.seed))\n n_s = 1000\n mean = np.array([1, 2])\n cov = np.array([[2, 1], [1, 2]])\n s = random.multivariate_normal(mean, cov, size=(n_s,), method=method)\n s_center = s - mean\n cov_emp = (s_center.T @ s_center) / (n_s - 1)\n # these are pretty loose and are only designed to detect major errors\n assert np.all(np.abs(s_center.mean(-2)) < 0.1)\n assert np.all(np.abs(cov_emp - cov) < 0.2)\n\n def test_negative_binomial(self):\n random = Generator(MT19937(self.seed))\n actual = random.negative_binomial(n=100, p=.12345, size=(3, 2))\n desired = np.array([[543, 727],\n [775, 760],\n [600, 674]])\n assert_array_equal(actual, desired)\n\n def test_negative_binomial_exceptions(self):\n with np.errstate(invalid='ignore'):\n assert_raises(ValueError, random.negative_binomial, 100, np.nan)\n assert_raises(ValueError, random.negative_binomial, 100,\n [np.nan] * 10)\n\n def test_negative_binomial_p0_exception(self):\n # Verify that p=0 raises an exception.\n with assert_raises(ValueError):\n x = random.negative_binomial(1, 0)\n\n def test_noncentral_chisquare(self):\n random = Generator(MT19937(self.seed))\n actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))\n desired = np.array([[ 1.70561552362133, 15.97378184942111],\n [13.71483425173724, 20.17859633310629],\n [11.3615477156643 , 3.67891108738029]])\n assert_array_almost_equal(actual, desired, decimal=14)\n\n actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))\n desired = np.array([[9.41427665607629e-04, 1.70473157518850e-04],\n [1.14554372041263e+00, 1.38187755933435e-03],\n [1.90659181905387e+00, 1.21772577941822e+00]])\n assert_array_almost_equal(actual, desired, decimal=14)\n\n random = Generator(MT19937(self.seed))\n actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))\n desired = np.array([[0.82947954590419, 1.80139670767078],\n [6.58720057417794, 7.00491463609814],\n [6.31101879073157, 6.30982307753005]])\n assert_array_almost_equal(actual, desired, decimal=14)\n\n def test_noncentral_f(self):\n random = Generator(MT19937(self.seed))\n actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1,\n size=(3, 2))\n desired = np.array([[0.060310671139 , 0.23866058175939],\n [0.86860246709073, 0.2668510459738 ],\n [0.23375780078364, 1.88922102885943]])\n assert_array_almost_equal(actual, desired, decimal=14)\n\n def test_noncentral_f_nan(self):\n random = Generator(MT19937(self.seed))\n actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan)\n assert np.isnan(actual)\n\n def test_normal(self):\n random = Generator(MT19937(self.seed))\n actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2))\n desired = np.array([[-3.618412914693162, 2.635726692647081],\n [-2.116923463013243, 0.807460983059643],\n [ 1.446547137248593, 2.485684213886024]])\n assert_array_almost_equal(actual, desired, decimal=15)\n\n def test_normal_0(self):\n assert_equal(random.normal(scale=0), 0)\n assert_raises(ValueError, random.normal, scale=-0.)\n\n def test_pareto(self):\n random = Generator(MT19937(self.seed))\n actual = random.pareto(a=.123456789, size=(3, 2))\n desired = np.array([[1.0394926776069018e+00, 7.7142534343505773e+04],\n [7.2640150889064703e-01, 3.4650454783825594e+05],\n [4.5852344481994740e+04, 6.5851383009539105e+07]])\n # For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this\n # matrix differs by 24 nulps. Discussion:\n # https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html\n # Consensus is that this is probably some gcc quirk that affects\n # rounding but not in any important way, so we just use a looser\n # tolerance on this test:\n np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)\n\n def test_poisson(self):\n random = Generator(MT19937(self.seed))\n actual = random.poisson(lam=.123456789, size=(3, 2))\n desired = np.array([[0, 0],\n [0, 0],\n [0, 0]])\n assert_array_equal(actual, desired)\n\n def test_poisson_exceptions(self):\n lambig = np.iinfo('int64').max\n lamneg = -1\n assert_raises(ValueError, random.poisson, lamneg)\n assert_raises(ValueError, random.poisson, [lamneg] * 10)\n assert_raises(ValueError, random.poisson, lambig)\n assert_raises(ValueError, random.poisson, [lambig] * 10)\n with np.errstate(invalid='ignore'):\n assert_raises(ValueError, random.poisson, np.nan)\n assert_raises(ValueError, random.poisson, [np.nan] * 10)\n\n def test_power(self):\n random = Generator(MT19937(self.seed))\n actual = random.power(a=.123456789, size=(3, 2))\n desired = np.array([[1.977857368842754e-09, 9.806792196620341e-02],\n [2.482442984543471e-10, 1.527108843266079e-01],\n [8.188283434244285e-02, 3.950547209346948e-01]])\n assert_array_almost_equal(actual, desired, decimal=15)\n\n def test_rayleigh(self):\n random = Generator(MT19937(self.seed))\n actual = random.rayleigh(scale=10, size=(3, 2))\n desired = np.array([[ 4.51734079831581, 15.6802442485758 ],\n [ 4.19850651287094, 17.08718809823704],\n [14.7907457708776 , 15.85545333419775]])\n assert_array_almost_equal(actual, desired, decimal=14)\n\n def test_rayleigh_0(self):\n assert_equal(random.rayleigh(scale=0), 0)\n assert_raises(ValueError, random.rayleigh, scale=-0.)\n\n def test_standard_cauchy(self):\n random = Generator(MT19937(self.seed))\n actual = random.standard_cauchy(size=(3, 2))\n desired = np.array([[-1.489437778266206, -3.275389641569784],\n [ 0.560102864910406, -0.680780916282552],\n [-1.314912905226277, 0.295852965660225]])\n assert_array_almost_equal(actual, desired, decimal=15)\n\n def test_standard_exponential(self):\n random = Generator(MT19937(self.seed))\n actual = random.standard_exponential(size=(3, 2), method='inv')\n desired = np.array([[0.102031839440643, 1.229350298474972],\n [0.088137284693098, 1.459859985522667],\n [1.093830802293668, 1.256977002164613]])\n assert_array_almost_equal(actual, desired, decimal=15)\n\n def test_standard_expoential_type_error(self):\n assert_raises(TypeError, random.standard_exponential, dtype=np.int32)\n\n def test_standard_gamma(self):\n random = Generator(MT19937(self.seed))\n actual = random.standard_gamma(shape=3, size=(3, 2))\n desired = np.array([[0.62970724056362, 1.22379851271008],\n [3.899412530884 , 4.12479964250139],\n [3.74994102464584, 3.74929307690815]])\n assert_array_almost_equal(actual, desired, decimal=14)\n\n def test_standard_gammma_scalar_float(self):\n random = Generator(MT19937(self.seed))\n actual = random.standard_gamma(3, dtype=np.float32)\n desired = 2.9242148399353027\n assert_array_almost_equal(actual, desired, decimal=6)\n\n def test_standard_gamma_float(self):\n random = Generator(MT19937(self.seed))\n actual = random.standard_gamma(shape=3, size=(3, 2))\n desired = np.array([[0.62971, 1.2238 ],\n [3.89941, 4.1248 ],\n [3.74994, 3.74929]])\n assert_array_almost_equal(actual, desired, decimal=5)\n\n def test_standard_gammma_float_out(self):\n actual = np.zeros((3, 2), dtype=np.float32)\n random = Generator(MT19937(self.seed))\n random.standard_gamma(10.0, out=actual, dtype=np.float32)\n desired = np.array([[10.14987, 7.87012],\n [ 9.46284, 12.56832],\n [13.82495, 7.81533]], dtype=np.float32)\n assert_array_almost_equal(actual, desired, decimal=5)\n\n random = Generator(MT19937(self.seed))\n random.standard_gamma(10.0, out=actual, size=(3, 2), dtype=np.float32)\n assert_array_almost_equal(actual, desired, decimal=5)\n\n def test_standard_gamma_unknown_type(self):\n assert_raises(TypeError, random.standard_gamma, 1.,\n dtype='int32')\n\n def test_out_size_mismatch(self):\n out = np.zeros(10)\n assert_raises(ValueError, random.standard_gamma, 10.0, size=20,\n out=out)\n assert_raises(ValueError, random.standard_gamma, 10.0, size=(10, 1),\n out=out)\n\n def test_standard_gamma_0(self):\n assert_equal(random.standard_gamma(shape=0), 0)\n assert_raises(ValueError, random.standard_gamma, shape=-0.)\n\n def test_standard_normal(self):\n random = Generator(MT19937(self.seed))\n actual = random.standard_normal(size=(3, 2))\n desired = np.array([[-1.870934851846581, 1.25613495182354 ],\n [-1.120190126006621, 0.342002097029821],\n [ 0.661545174124296, 1.181113712443012]])\n assert_array_almost_equal(actual, desired, decimal=15)\n\n def test_standard_normal_unsupported_type(self):\n assert_raises(TypeError, random.standard_normal, dtype=np.int32)\n\n def test_standard_t(self):\n random = Generator(MT19937(self.seed))\n actual = random.standard_t(df=10, size=(3, 2))\n desired = np.array([[-1.484666193042647, 0.30597891831161 ],\n [ 1.056684299648085, -0.407312602088507],\n [ 0.130704414281157, -2.038053410490321]])\n assert_array_almost_equal(actual, desired, decimal=15)\n\n def test_triangular(self):\n random = Generator(MT19937(self.seed))\n actual = random.triangular(left=5.12, mode=10.23, right=20.34,\n size=(3, 2))\n desired = np.array([[ 7.86664070590917, 13.6313848513185 ],\n [ 7.68152445215983, 14.36169131136546],\n [13.16105603911429, 13.72341621856971]])\n assert_array_almost_equal(actual, desired, decimal=14)\n\n def test_uniform(self):\n random = Generator(MT19937(self.seed))\n actual = random.uniform(low=1.23, high=10.54, size=(3, 2))\n desired = np.array([[2.13306255040998 , 7.816987531021207],\n [2.015436610109887, 8.377577533009589],\n [7.421792588856135, 7.891185744455209]])\n assert_array_almost_equal(actual, desired, decimal=15)\n\n def test_uniform_range_bounds(self):\n fmin = np.finfo('float').min\n fmax = np.finfo('float').max\n\n func = random.uniform\n assert_raises(OverflowError, func, -np.inf, 0)\n assert_raises(OverflowError, func, 0, np.inf)\n assert_raises(OverflowError, func, fmin, fmax)\n assert_raises(OverflowError, func, [-np.inf], [0])\n assert_raises(OverflowError, func, [0], [np.inf])\n\n # (fmax / 1e17) - fmin is within range, so this should not throw\n # account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >\n # DBL_MAX by increasing fmin a bit\n random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)\n\n def test_scalar_exception_propagation(self):\n # Tests that exceptions are correctly propagated in distributions\n # when called with objects that throw exceptions when converted to\n # scalars.\n #\n # Regression test for gh: 8865\n\n class ThrowingFloat(np.ndarray):\n def __float__(self):\n raise TypeError\n\n throwing_float = np.array(1.0).view(ThrowingFloat)\n assert_raises(TypeError, random.uniform, throwing_float,\n throwing_float)\n\n class ThrowingInteger(np.ndarray):\n def __int__(self):\n raise TypeError\n\n throwing_int = np.array(1).view(ThrowingInteger)\n assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1)\n\n def test_vonmises(self):\n random = Generator(MT19937(self.seed))\n actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))\n desired = np.array([[ 1.107972248690106, 2.841536476232361],\n [ 1.832602376042457, 1.945511926976032],\n [-0.260147475776542, 2.058047492231698]])\n assert_array_almost_equal(actual, desired, decimal=15)\n\n def test_vonmises_small(self):\n # check infinite loop, gh-4720\n random = Generator(MT19937(self.seed))\n r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6)\n assert_(np.isfinite(r).all())\n\n def test_vonmises_nan(self):\n random = Generator(MT19937(self.seed))\n r = random.vonmises(mu=0., kappa=np.nan)\n assert_(np.isnan(r))\n\n def test_wald(self):\n random = Generator(MT19937(self.seed))\n actual = random.wald(mean=1.23, scale=1.54, size=(3, 2))\n desired = np.array([[0.26871721804551, 3.2233942732115 ],\n [2.20328374987066, 2.40958405189353],\n [2.07093587449261, 0.73073890064369]])\n assert_array_almost_equal(actual, desired, decimal=14)\n\n def test_weibull(self):\n random = Generator(MT19937(self.seed))\n actual = random.weibull(a=1.23, size=(3, 2))\n desired = np.array([[0.138613914769468, 1.306463419753191],\n [0.111623365934763, 1.446570494646721],\n [1.257145775276011, 1.914247725027957]])\n assert_array_almost_equal(actual, desired, decimal=15)\n\n def test_weibull_0(self):\n random = Generator(MT19937(self.seed))\n assert_equal(random.weibull(a=0, size=12), np.zeros(12))\n assert_raises(ValueError, random.weibull, a=-0.)\n\n def test_zipf(self):\n random = Generator(MT19937(self.seed))\n actual = random.zipf(a=1.23, size=(3, 2))\n desired = np.array([[ 1, 1],\n [ 10, 867],\n [354, 2]])\n assert_array_equal(actual, desired)\n\n\nclass TestBroadcast:\n # tests that functions that broadcast behave\n # correctly when presented with non-scalar arguments\n def setup(self):\n self.seed = 123456789\n\n\n def test_uniform(self):\n random = Generator(MT19937(self.seed))\n low = [0]\n high = [1]\n uniform = random.uniform\n desired = np.array([0.16693771389729, 0.19635129550675, 0.75563050964095])\n\n random = Generator(MT19937(self.seed))\n actual = random.uniform(low * 3, high)\n assert_array_almost_equal(actual, desired, decimal=14)\n\n random = Generator(MT19937(self.seed))\n actual = random.uniform(low, high * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n\n def test_normal(self):\n loc = [0]\n scale = [1]\n bad_scale = [-1]\n random = Generator(MT19937(self.seed))\n desired = np.array([-0.38736406738527, 0.79594375042255, 0.0197076236097])\n\n random = Generator(MT19937(self.seed))\n actual = random.normal(loc * 3, scale)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, random.normal, loc * 3, bad_scale)\n\n random = Generator(MT19937(self.seed))\n normal = random.normal\n actual = normal(loc, scale * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, normal, loc, bad_scale * 3)\n\n def test_beta(self):\n a = [1]\n b = [2]\n bad_a = [-1]\n bad_b = [-2]\n desired = np.array([0.18719338682602, 0.73234824491364, 0.17928615186455])\n\n random = Generator(MT19937(self.seed))\n beta = random.beta\n actual = beta(a * 3, b)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, beta, bad_a * 3, b)\n assert_raises(ValueError, beta, a * 3, bad_b)\n\n random = Generator(MT19937(self.seed))\n actual = random.beta(a, b * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n\n def test_exponential(self):\n scale = [1]\n bad_scale = [-1]\n desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])\n\n random = Generator(MT19937(self.seed))\n actual = random.exponential(scale * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, random.exponential, bad_scale * 3)\n\n def test_standard_gamma(self):\n shape = [1]\n bad_shape = [-1]\n desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])\n\n random = Generator(MT19937(self.seed))\n std_gamma = random.standard_gamma\n actual = std_gamma(shape * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, std_gamma, bad_shape * 3)\n\n def test_gamma(self):\n shape = [1]\n scale = [2]\n bad_shape = [-1]\n bad_scale = [-2]\n desired = np.array([1.34491986425611, 0.42760990636187, 1.4355697857258])\n\n random = Generator(MT19937(self.seed))\n gamma = random.gamma\n actual = gamma(shape * 3, scale)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, gamma, bad_shape * 3, scale)\n assert_raises(ValueError, gamma, shape * 3, bad_scale)\n\n random = Generator(MT19937(self.seed))\n gamma = random.gamma\n actual = gamma(shape, scale * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, gamma, bad_shape, scale * 3)\n assert_raises(ValueError, gamma, shape, bad_scale * 3)\n\n def test_f(self):\n dfnum = [1]\n dfden = [2]\n bad_dfnum = [-1]\n bad_dfden = [-2]\n desired = np.array([0.07765056244107, 7.72951397913186, 0.05786093891763])\n\n random = Generator(MT19937(self.seed))\n f = random.f\n actual = f(dfnum * 3, dfden)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, f, bad_dfnum * 3, dfden)\n assert_raises(ValueError, f, dfnum * 3, bad_dfden)\n\n random = Generator(MT19937(self.seed))\n f = random.f\n actual = f(dfnum, dfden * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, f, bad_dfnum, dfden * 3)\n assert_raises(ValueError, f, dfnum, bad_dfden * 3)\n\n def test_noncentral_f(self):\n dfnum = [2]\n dfden = [3]\n nonc = [4]\n bad_dfnum = [0]\n bad_dfden = [-1]\n bad_nonc = [-2]\n desired = np.array([2.02434240411421, 12.91838601070124, 1.24395160354629])\n\n random = Generator(MT19937(self.seed))\n nonc_f = random.noncentral_f\n actual = nonc_f(dfnum * 3, dfden, nonc)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3)))\n\n assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)\n assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)\n assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)\n\n random = Generator(MT19937(self.seed))\n nonc_f = random.noncentral_f\n actual = nonc_f(dfnum, dfden * 3, nonc)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)\n assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)\n assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)\n\n random = Generator(MT19937(self.seed))\n nonc_f = random.noncentral_f\n actual = nonc_f(dfnum, dfden, nonc * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)\n assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)\n assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)\n\n def test_noncentral_f_small_df(self):\n random = Generator(MT19937(self.seed))\n desired = np.array([0.04714867120827, 0.1239390327694])\n actual = random.noncentral_f(0.9, 0.9, 2, size=2)\n assert_array_almost_equal(actual, desired, decimal=14)\n\n def test_chisquare(self):\n df = [1]\n bad_df = [-1]\n desired = np.array([0.05573640064251, 1.47220224353539, 2.9469379318589])\n\n random = Generator(MT19937(self.seed))\n actual = random.chisquare(df * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, random.chisquare, bad_df * 3)\n\n def test_noncentral_chisquare(self):\n df = [1]\n nonc = [2]\n bad_df = [-1]\n bad_nonc = [-2]\n desired = np.array([0.07710766249436, 5.27829115110304, 0.630732147399])\n\n random = Generator(MT19937(self.seed))\n nonc_chi = random.noncentral_chisquare\n actual = nonc_chi(df * 3, nonc)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)\n assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)\n\n random = Generator(MT19937(self.seed))\n nonc_chi = random.noncentral_chisquare\n actual = nonc_chi(df, nonc * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)\n assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)\n\n def test_standard_t(self):\n df = [1]\n bad_df = [-1]\n desired = np.array([-1.39498829447098, -1.23058658835223, 0.17207021065983])\n\n random = Generator(MT19937(self.seed))\n actual = random.standard_t(df * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, random.standard_t, bad_df * 3)\n\n def test_vonmises(self):\n mu = [2]\n kappa = [1]\n bad_kappa = [-1]\n desired = np.array([2.25935584988528, 2.23326261461399, -2.84152146503326])\n\n random = Generator(MT19937(self.seed))\n actual = random.vonmises(mu * 3, kappa)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, random.vonmises, mu * 3, bad_kappa)\n\n random = Generator(MT19937(self.seed))\n actual = random.vonmises(mu, kappa * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, random.vonmises, mu, bad_kappa * 3)\n\n def test_pareto(self):\n a = [1]\n bad_a = [-1]\n desired = np.array([0.95905052946317, 0.2383810889437 , 1.04988745750013])\n\n random = Generator(MT19937(self.seed))\n actual = random.pareto(a * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, random.pareto, bad_a * 3)\n\n def test_weibull(self):\n a = [1]\n bad_a = [-1]\n desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])\n\n random = Generator(MT19937(self.seed))\n actual = random.weibull(a * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, random.weibull, bad_a * 3)\n\n def test_power(self):\n a = [1]\n bad_a = [-1]\n desired = np.array([0.48954864361052, 0.19249412888486, 0.51216834058807])\n\n random = Generator(MT19937(self.seed))\n actual = random.power(a * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, random.power, bad_a * 3)\n\n def test_laplace(self):\n loc = [0]\n scale = [1]\n bad_scale = [-1]\n desired = np.array([-1.09698732625119, -0.93470271947368, 0.71592671378202])\n\n random = Generator(MT19937(self.seed))\n laplace = random.laplace\n actual = laplace(loc * 3, scale)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, laplace, loc * 3, bad_scale)\n\n random = Generator(MT19937(self.seed))\n laplace = random.laplace\n actual = laplace(loc, scale * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, laplace, loc, bad_scale * 3)\n\n def test_gumbel(self):\n loc = [0]\n scale = [1]\n bad_scale = [-1]\n desired = np.array([1.70020068231762, 1.52054354273631, -0.34293267607081])\n\n random = Generator(MT19937(self.seed))\n gumbel = random.gumbel\n actual = gumbel(loc * 3, scale)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, gumbel, loc * 3, bad_scale)\n\n random = Generator(MT19937(self.seed))\n gumbel = random.gumbel\n actual = gumbel(loc, scale * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, gumbel, loc, bad_scale * 3)\n\n def test_logistic(self):\n loc = [0]\n scale = [1]\n bad_scale = [-1]\n desired = np.array([-1.607487640433, -1.40925686003678, 1.12887112820397])\n\n random = Generator(MT19937(self.seed))\n actual = random.logistic(loc * 3, scale)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, random.logistic, loc * 3, bad_scale)\n\n random = Generator(MT19937(self.seed))\n actual = random.logistic(loc, scale * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, random.logistic, loc, bad_scale * 3)\n assert_equal(random.logistic(1.0, 0.0), 1.0)\n\n def test_lognormal(self):\n mean = [0]\n sigma = [1]\n bad_sigma = [-1]\n desired = np.array([0.67884390500697, 2.21653186290321, 1.01990310084276])\n\n random = Generator(MT19937(self.seed))\n lognormal = random.lognormal\n actual = lognormal(mean * 3, sigma)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, lognormal, mean * 3, bad_sigma)\n\n random = Generator(MT19937(self.seed))\n actual = random.lognormal(mean, sigma * 3)\n assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3)\n\n def test_rayleigh(self):\n scale = [1]\n bad_scale = [-1]\n desired = np.array([0.60439534475066, 0.66120048396359, 1.67873398389499])\n\n random = Generator(MT19937(self.seed))\n actual = random.rayleigh(scale * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, random.rayleigh, bad_scale * 3)\n\n def test_wald(self):\n mean = [0.5]\n scale = [1]\n bad_mean = [0]\n bad_scale = [-2]\n desired = np.array([0.38052407392905, 0.50701641508592, 0.484935249864])\n\n random = Generator(MT19937(self.seed))\n actual = random.wald(mean * 3, scale)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, random.wald, bad_mean * 3, scale)\n assert_raises(ValueError, random.wald, mean * 3, bad_scale)\n\n random = Generator(MT19937(self.seed))\n actual = random.wald(mean, scale * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, random.wald, bad_mean, scale * 3)\n assert_raises(ValueError, random.wald, mean, bad_scale * 3)\n\n def test_triangular(self):\n left = [1]\n right = [3]\n mode = [2]\n bad_left_one = [3]\n bad_mode_one = [4]\n bad_left_two, bad_mode_two = right * 2\n desired = np.array([1.57781954604754, 1.62665986867957, 2.30090130831326])\n\n random = Generator(MT19937(self.seed))\n triangular = random.triangular\n actual = triangular(left * 3, mode, right)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)\n assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)\n assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two,\n right)\n\n random = Generator(MT19937(self.seed))\n triangular = random.triangular\n actual = triangular(left, mode * 3, right)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)\n assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)\n assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3,\n right)\n\n random = Generator(MT19937(self.seed))\n triangular = random.triangular\n actual = triangular(left, mode, right * 3)\n assert_array_almost_equal(actual, desired, decimal=14)\n assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)\n assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)\n assert_raises(ValueError, triangular, bad_left_two, bad_mode_two,\n right * 3)\n\n assert_raises(ValueError, triangular, 10., 0., 20.)\n assert_raises(ValueError, triangular, 10., 25., 20.)\n assert_raises(ValueError, triangular, 10., 10., 10.)\n\n def test_binomial(self):\n n = [1]\n p = [0.5]\n bad_n = [-1]\n bad_p_one = [-1]\n bad_p_two = [1.5]\n desired = np.array([0, 0, 1])\n\n random = Generator(MT19937(self.seed))\n binom = random.binomial\n actual = binom(n * 3, p)\n assert_array_equal(actual, desired)\n assert_raises(ValueError, binom, bad_n * 3, p)\n assert_raises(ValueError, binom, n * 3, bad_p_one)\n assert_raises(ValueError, binom, n * 3, bad_p_two)\n\n random = Generator(MT19937(self.seed))\n actual = random.binomial(n, p * 3)\n assert_array_equal(actual, desired)\n assert_raises(ValueError, binom, bad_n, p * 3)\n assert_raises(ValueError, binom, n, bad_p_one * 3)\n assert_raises(ValueError, binom, n, bad_p_two * 3)\n\n def test_negative_binomial(self):\n n = [1]\n p = [0.5]\n bad_n = [-1]\n bad_p_one = [-1]\n bad_p_two = [1.5]\n desired = np.array([0, 2, 1], dtype=np.int64)\n\n random = Generator(MT19937(self.seed))\n neg_binom = random.negative_binomial\n actual = neg_binom(n * 3, p)\n assert_array_equal(actual, desired)\n assert_raises(ValueError, neg_binom, bad_n * 3, p)\n assert_raises(ValueError, neg_binom, n * 3, bad_p_one)\n assert_raises(ValueError, neg_binom, n * 3, bad_p_two)\n\n random = Generator(MT19937(self.seed))\n neg_binom = random.negative_binomial\n actual = neg_binom(n, p * 3)\n assert_array_equal(actual, desired)\n assert_raises(ValueError, neg_binom, bad_n, p * 3)\n assert_raises(ValueError, neg_binom, n, bad_p_one * 3)\n assert_raises(ValueError, neg_binom, n, bad_p_two * 3)\n\n def test_poisson(self):\n\n lam = [1]\n bad_lam_one = [-1]\n desired = np.array([0, 0, 3])\n\n random = Generator(MT19937(self.seed))\n max_lam = random._poisson_lam_max\n bad_lam_two = [max_lam * 2]\n poisson = random.poisson\n actual = poisson(lam * 3)\n assert_array_equal(actual, desired)\n assert_raises(ValueError, poisson, bad_lam_one * 3)\n assert_raises(ValueError, poisson, bad_lam_two * 3)\n\n def test_zipf(self):\n a = [2]\n bad_a = [0]\n desired = np.array([1, 8, 1])\n\n random = Generator(MT19937(self.seed))\n zipf = random.zipf\n actual = zipf(a * 3)\n assert_array_equal(actual, desired)\n assert_raises(ValueError, zipf, bad_a * 3)\n with np.errstate(invalid='ignore'):\n assert_raises(ValueError, zipf, np.nan)\n assert_raises(ValueError, zipf, [0, 0, np.nan])\n\n def test_geometric(self):\n p = [0.5]\n bad_p_one = [-1]\n bad_p_two = [1.5]\n desired = np.array([1, 1, 3])\n\n random = Generator(MT19937(self.seed))\n geometric = random.geometric\n actual = geometric(p * 3)\n assert_array_equal(actual, desired)\n assert_raises(ValueError, geometric, bad_p_one * 3)\n assert_raises(ValueError, geometric, bad_p_two * 3)\n\n def test_hypergeometric(self):\n ngood = [1]\n nbad = [2]\n nsample = [2]\n bad_ngood = [-1]\n bad_nbad = [-2]\n bad_nsample_one = [-1]\n bad_nsample_two = [4]\n desired = np.array([0, 0, 1])\n\n random = Generator(MT19937(self.seed))\n actual = random.hypergeometric(ngood * 3, nbad, nsample)\n assert_array_equal(actual, desired)\n assert_raises(ValueError, random.hypergeometric, bad_ngood * 3, nbad, nsample)\n assert_raises(ValueError, random.hypergeometric, ngood * 3, bad_nbad, nsample)\n assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_one)\n assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_two)\n\n random = Generator(MT19937(self.seed))\n actual = random.hypergeometric(ngood, nbad * 3, nsample)\n assert_array_equal(actual, desired)\n assert_raises(ValueError, random.hypergeometric, bad_ngood, nbad * 3, nsample)\n assert_raises(ValueError, random.hypergeometric, ngood, bad_nbad * 3, nsample)\n assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_one)\n assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_two)\n\n random = Generator(MT19937(self.seed))\n hypergeom = random.hypergeometric\n actual = hypergeom(ngood, nbad, nsample * 3)\n assert_array_equal(actual, desired)\n assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)\n assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)\n assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)\n assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)\n\n assert_raises(ValueError, hypergeom, -1, 10, 20)\n assert_raises(ValueError, hypergeom, 10, -1, 20)\n assert_raises(ValueError, hypergeom, 10, 10, -1)\n assert_raises(ValueError, hypergeom, 10, 10, 25)\n\n # ValueError for arguments that are too big.\n assert_raises(ValueError, hypergeom, 2**30, 10, 20)\n assert_raises(ValueError, hypergeom, 999, 2**31, 50)\n assert_raises(ValueError, hypergeom, 999, [2**29, 2**30], 1000)\n\n def test_logseries(self):\n p = [0.5]\n bad_p_one = [2]\n bad_p_two = [-1]\n desired = np.array([1, 1, 1])\n\n random = Generator(MT19937(self.seed))\n logseries = random.logseries\n actual = logseries(p * 3)\n assert_array_equal(actual, desired)\n assert_raises(ValueError, logseries, bad_p_one * 3)\n assert_raises(ValueError, logseries, bad_p_two * 3)\n\n def test_multinomial(self):\n random = Generator(MT19937(self.seed))\n actual = random.multinomial([5, 20], [1 / 6.] * 6, size=(3, 2))\n desired = np.array([[[0, 0, 2, 1, 2, 0],\n [2, 3, 6, 4, 2, 3]],\n [[1, 0, 1, 0, 2, 1],\n [7, 2, 2, 1, 4, 4]],\n [[0, 2, 0, 1, 2, 0],\n [3, 2, 3, 3, 4, 5]]], dtype=np.int64)\n assert_array_equal(actual, desired)\n\n random = Generator(MT19937(self.seed))\n actual = random.multinomial([5, 20], [1 / 6.] * 6)\n desired = np.array([[0, 0, 2, 1, 2, 0],\n [2, 3, 6, 4, 2, 3]], dtype=np.int64)\n assert_array_equal(actual, desired)\n\n\nclass TestThread:\n # make sure each state produces the same sequence even in threads\n def setup(self):\n self.seeds = range(4)\n\n def check_function(self, function, sz):\n from threading import Thread\n\n out1 = np.empty((len(self.seeds),) + sz)\n out2 = np.empty((len(self.seeds),) + sz)\n\n # threaded generation\n t = [Thread(target=function, args=(Generator(MT19937(s)), o))\n for s, o in zip(self.seeds, out1)]\n [x.start() for x in t]\n [x.join() for x in t]\n\n # the same serial\n for s, o in zip(self.seeds, out2):\n function(Generator(MT19937(s)), o)\n\n # these platforms change x87 fpu precision mode in threads\n if np.intp().dtype.itemsize == 4 and sys.platform == \"win32\":\n assert_array_almost_equal(out1, out2)\n else:\n assert_array_equal(out1, out2)\n\n def test_normal(self):\n def gen_random(state, out):\n out[...] = state.normal(size=10000)\n\n self.check_function(gen_random, sz=(10000,))\n\n def test_exp(self):\n def gen_random(state, out):\n out[...] = state.exponential(scale=np.ones((100, 1000)))\n\n self.check_function(gen_random, sz=(100, 1000))\n\n def test_multinomial(self):\n def gen_random(state, out):\n out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000)\n\n self.check_function(gen_random, sz=(10000, 6))\n\n\n# See Issue #4263\nclass TestSingleEltArrayInput:\n def setup(self):\n self.argOne = np.array([2])\n self.argTwo = np.array([3])\n self.argThree = np.array([4])\n self.tgtShape = (1,)\n\n def test_one_arg_funcs(self):\n funcs = (random.exponential, random.standard_gamma,\n random.chisquare, random.standard_t,\n random.pareto, random.weibull,\n random.power, random.rayleigh,\n random.poisson, random.zipf,\n random.geometric, random.logseries)\n\n probfuncs = (random.geometric, random.logseries)\n\n for func in funcs:\n if func in probfuncs: # p < 1.0\n out = func(np.array([0.5]))\n\n else:\n out = func(self.argOne)\n\n assert_equal(out.shape, self.tgtShape)\n\n def test_two_arg_funcs(self):\n funcs = (random.uniform, random.normal,\n random.beta, random.gamma,\n random.f, random.noncentral_chisquare,\n random.vonmises, random.laplace,\n random.gumbel, random.logistic,\n random.lognormal, random.wald,\n random.binomial, random.negative_binomial)\n\n probfuncs = (random.binomial, random.negative_binomial)\n\n for func in funcs:\n if func in probfuncs: # p <= 1\n argTwo = np.array([0.5])\n\n else:\n argTwo = self.argTwo\n\n out = func(self.argOne, argTwo)\n assert_equal(out.shape, self.tgtShape)\n\n out = func(self.argOne[0], argTwo)\n assert_equal(out.shape, self.tgtShape)\n\n out = func(self.argOne, argTwo[0])\n assert_equal(out.shape, self.tgtShape)\n\n def test_integers(self, endpoint):\n itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,\n np.int32, np.uint32, np.int64, np.uint64]\n func = random.integers\n high = np.array([1])\n low = np.array([0])\n\n for dt in itype:\n out = func(low, high, endpoint=endpoint, dtype=dt)\n assert_equal(out.shape, self.tgtShape)\n\n out = func(low[0], high, endpoint=endpoint, dtype=dt)\n assert_equal(out.shape, self.tgtShape)\n\n out = func(low, high[0], endpoint=endpoint, dtype=dt)\n assert_equal(out.shape, self.tgtShape)\n\n def test_three_arg_funcs(self):\n funcs = [random.noncentral_f, random.triangular,\n random.hypergeometric]\n\n for func in funcs:\n out = func(self.argOne, self.argTwo, self.argThree)\n assert_equal(out.shape, self.tgtShape)\n\n out = func(self.argOne[0], self.argTwo, self.argThree)\n assert_equal(out.shape, self.tgtShape)\n\n out = func(self.argOne, self.argTwo[0], self.argThree)\n assert_equal(out.shape, self.tgtShape)\n\n\[email protected](\"config\", JUMP_TEST_DATA)\ndef test_jumped(config):\n # Each config contains the initial seed, a number of raw steps\n # the md5 hashes of the initial and the final states' keys and\n # the position of of the initial and the final state.\n # These were produced using the original C implementation.\n seed = config[\"seed\"]\n steps = config[\"steps\"]\n\n mt19937 = MT19937(seed)\n # Burn step\n mt19937.random_raw(steps)\n key = mt19937.state[\"state\"][\"key\"]\n if sys.byteorder == 'big':\n key = key.byteswap()\n md5 = hashlib.md5(key)\n assert mt19937.state[\"state\"][\"pos\"] == config[\"initial\"][\"pos\"]\n assert md5.hexdigest() == config[\"initial\"][\"key_md5\"]\n\n jumped = mt19937.jumped()\n key = jumped.state[\"state\"][\"key\"]\n if sys.byteorder == 'big':\n key = key.byteswap()\n md5 = hashlib.md5(key)\n assert jumped.state[\"state\"][\"pos\"] == config[\"jumped\"][\"pos\"]\n assert md5.hexdigest() == config[\"jumped\"][\"key_md5\"]\n" ]
[ [ "numpy.sum", "numpy.ones", "numpy.testing.assert_equal", "numpy.dtype", "numpy.asarray", "numpy.testing.assert_no_warnings", "numpy.nextafter", "numpy.testing.assert_warns", "numpy.ascontiguousarray", "numpy.isfinite", "numpy.vstack", "numpy.abs", "numpy.testing.assert_array_equal", "numpy.uint32", "numpy.random.SeedSequence", "numpy.isnan", "numpy.unique", "numpy.eye", "numpy.atleast_2d", "numpy.zeros", "numpy.testing.suppress_warnings", "numpy.arange", "numpy.testing.assert_array_almost_equal_nulp", "numpy.testing.assert_array_almost_equal", "numpy.all", "numpy.finfo", "numpy.intp", "numpy.testing.assert_raises", "numpy.empty", "numpy.errstate", "numpy.iinfo", "numpy.testing.assert_allclose", "numpy.array", "numpy.full", "numpy.random.MT19937", "numpy.testing.assert_" ] ]
daobook/ray
[ "c18caa4db36d466718bdbcb2229aa0b2dc03da1f" ]
[ "rllib/examples/env/parametric_actions_cartpole.py" ]
[ "import gym\nfrom gym.spaces import Box, Dict, Discrete\nimport numpy as np\nimport random\n\n\nclass ParametricActionsCartPole(gym.Env):\n \"\"\"Parametric action version of CartPole.\n\n In this env there are only ever two valid actions, but we pretend there are\n actually up to `max_avail_actions` actions that can be taken, and the two\n valid actions are randomly hidden among this set.\n\n At each step, we emit a dict of:\n - the actual cart observation\n - a mask of valid actions (e.g., [0, 0, 1, 0, 0, 1] for 6 max avail)\n - the list of action embeddings (w/ zeroes for invalid actions) (e.g.,\n [[0, 0],\n [0, 0],\n [-0.2322, -0.2569],\n [0, 0],\n [0, 0],\n [0.7878, 1.2297]] for max_avail_actions=6)\n\n In a real environment, the actions embeddings would be larger than two\n units of course, and also there would be a variable number of valid actions\n per step instead of always [LEFT, RIGHT].\n \"\"\"\n\n def __init__(self, max_avail_actions):\n # Use simple random 2-unit action embeddings for [LEFT, RIGHT]\n self.left_action_embed = np.random.randn(2)\n self.right_action_embed = np.random.randn(2)\n self.action_space = Discrete(max_avail_actions)\n self.wrapped = gym.make(\"CartPole-v0\")\n self.observation_space = Dict({\n \"action_mask\": Box(\n 0, 1, shape=(max_avail_actions, ), dtype=np.float32),\n \"avail_actions\": Box(-10, 10, shape=(max_avail_actions, 2)),\n \"cart\": self.wrapped.observation_space,\n })\n\n def update_avail_actions(self):\n self.action_assignments = np.array(\n [[0., 0.]] * self.action_space.n, dtype=np.float32)\n self.action_mask = np.array(\n [0.] * self.action_space.n, dtype=np.float32)\n self.left_idx, self.right_idx = random.sample(\n range(self.action_space.n), 2)\n self.action_assignments[self.left_idx] = self.left_action_embed\n self.action_assignments[self.right_idx] = self.right_action_embed\n self.action_mask[self.left_idx] = 1\n self.action_mask[self.right_idx] = 1\n\n def reset(self):\n self.update_avail_actions()\n return {\n \"action_mask\": self.action_mask,\n \"avail_actions\": self.action_assignments,\n \"cart\": self.wrapped.reset(),\n }\n\n def step(self, action):\n if action == self.left_idx:\n actual_action = 0\n elif action == self.right_idx:\n actual_action = 1\n else:\n raise ValueError(\n \"Chosen action was not one of the non-zero action embeddings\",\n action, self.action_assignments, self.action_mask,\n self.left_idx, self.right_idx)\n orig_obs, rew, done, info = self.wrapped.step(actual_action)\n self.update_avail_actions()\n self.action_mask = self.action_mask.astype(np.float32)\n obs = {\n \"action_mask\": self.action_mask,\n \"avail_actions\": self.action_assignments,\n \"cart\": orig_obs,\n }\n return obs, rew, done, info\n\n\nclass ParametricActionsCartPoleNoEmbeddings(gym.Env):\n \"\"\"Same as the above ParametricActionsCartPole.\n\n However, action embeddings are not published inside observations,\n but will be learnt by the model.\n\n At each step, we emit a dict of:\n - the actual cart observation\n - a mask of valid actions (e.g., [0, 0, 1, 0, 0, 1] for 6 max avail)\n - action embeddings (w/ \"dummy embedding\" for invalid actions) are\n outsourced in the model and will be learned.\n \"\"\"\n\n def __init__(self, max_avail_actions):\n # Randomly set which two actions are valid and available.\n self.left_idx, self.right_idx = random.sample(\n range(max_avail_actions), 2)\n self.valid_avail_actions_mask = np.array(\n [0.] * max_avail_actions, dtype=np.float32)\n self.valid_avail_actions_mask[self.left_idx] = 1\n self.valid_avail_actions_mask[self.right_idx] = 1\n self.action_space = Discrete(max_avail_actions)\n self.wrapped = gym.make(\"CartPole-v0\")\n self.observation_space = Dict({\n \"valid_avail_actions_mask\": Box(0, 1, shape=(max_avail_actions, )),\n \"cart\": self.wrapped.observation_space,\n })\n\n def reset(self):\n return {\n \"valid_avail_actions_mask\": self.valid_avail_actions_mask,\n \"cart\": self.wrapped.reset(),\n }\n\n def step(self, action):\n if action == self.left_idx:\n actual_action = 0\n elif action == self.right_idx:\n actual_action = 1\n else:\n raise ValueError(\n \"Chosen action was not one of the non-zero action embeddings\",\n action, self.valid_avail_actions_mask, self.left_idx,\n self.right_idx)\n orig_obs, rew, done, info = self.wrapped.step(actual_action)\n obs = {\n \"valid_avail_actions_mask\": self.valid_avail_actions_mask,\n \"cart\": orig_obs,\n }\n return obs, rew, done, info\n" ]
[ [ "numpy.array", "numpy.random.randn" ] ]
stelselim/python-control
[ "d73b635d2b130af5c2829eefd59c99b9bd53fb01" ]
[ "doc/pvtol-nested.py" ]
[ "# pvtol-nested.py - inner/outer design for vectored thrust aircraft\n# RMM, 5 Sep 09\n#\n# This file works through a fairly complicated control design and\n# analysis, corresponding to the planar vertical takeoff and landing\n# (PVTOL) aircraft in Astrom and Murray, Chapter 11. It is intended\n# to demonstrate the basic functionality of the python-control\n# package.\n#\n\nfrom __future__ import print_function\n\nimport os\nimport matplotlib.pyplot as plt # MATLAB plotting functions\nfrom control.matlab import * # MATLAB-like functions\nimport numpy as np\n\n# System parameters\nm = 4 # mass of aircraft\nJ = 0.0475 # inertia around pitch axis\nr = 0.25 # distance to center of force\ng = 9.8 # gravitational constant\nc = 0.05 # damping factor (estimated)\n\n# Transfer functions for dynamics\nPi = tf([r], [J, 0, 0]) # inner loop (roll)\nPo = tf([1], [m, c, 0]) # outer loop (position)\n\n#\n# Inner loop control design\n#\n# This is the controller for the pitch dynamics. Goal is to have\n# fast response for the pitch dynamics so that we can use this as a \n# control for the lateral dynamics\n#\n\n# Design a simple lead controller for the system\nk, a, b = 200, 2, 50\nCi = k*tf([1, a], [1, b]) # lead compensator\nLi = Pi*Ci\n\n# Bode plot for the open loop process\nplt.figure(1) \nbode(Pi)\n\n# Bode plot for the loop transfer function, with margins\nplt.figure(2)\nbode(Li)\n\n# Compute out the gain and phase margins\n#! Not implemented\n# gm, pm, wcg, wcp = margin(Li)\n\n# Compute the sensitivity and complementary sensitivity functions\nSi = feedback(1, Li)\nTi = Li*Si\n\n# Check to make sure that the specification is met\nplt.figure(3)\ngangof4(Pi, Ci)\n\n# Compute out the actual transfer function from u1 to v1 (see L8.2 notes)\n# Hi = Ci*(1-m*g*Pi)/(1+Ci*Pi)\nHi = parallel(feedback(Ci, Pi), -m*g*feedback(Ci*Pi, 1))\n\nplt.figure(4)\nplt.clf()\nplt.subplot(221)\nbode(Hi)\n\n# Now design the lateral control system\na, b, K = 0.02, 5, 2\nCo = -K*tf([1, 0.3], [1, 10]) # another lead compensator\nLo = -m*g*Po*Co\n\nplt.figure(5)\nbode(Lo) # margin(Lo)\n\n# Finally compute the real outer-loop loop gain + responses\nL = Co*Hi*Po\nS = feedback(1, L)\nT = feedback(L, 1)\n\n# Compute stability margins\ngm, pm, wgc, wpc = margin(L)\nprint(\"Gain margin: %g at %g\" % (gm, wgc))\nprint(\"Phase margin: %g at %g\" % (pm, wpc))\n\nplt.figure(6)\nplt.clf()\nbode(L, np.logspace(-4, 3))\n\n# Add crossover line to the magnitude plot\n#\n# Note: in matplotlib before v2.1, the following code worked:\n#\n# plt.subplot(211); hold(True);\n# loglog([1e-4, 1e3], [1, 1], 'k-')\n#\n# In later versions of matplotlib the call to plt.subplot will clear the\n# axes and so we have to extract the axes that we want to use by hand.\n# In addition, hold() is deprecated so we no longer require it.\n#\nfor ax in plt.gcf().axes:\n if ax.get_label() == 'control-bode-magnitude':\n break\nax.semilogx([1e-4, 1e3], 20*np.log10([1, 1]), 'k-')\n\n#\n# Replot phase starting at -90 degrees\n#\n# Get the phase plot axes\nfor ax in plt.gcf().axes:\n if ax.get_label() == 'control-bode-phase':\n break\n\n# Recreate the frequency response and shift the phase\nmag, phase, w = freqresp(L, np.logspace(-4, 3))\nphase = phase - 360\n\n# Replot the phase by hand\nax.semilogx([1e-4, 1e3], [-180, -180], 'k-')\nax.semilogx(w, np.squeeze(phase), 'b-')\nax.axis([1e-4, 1e3, -360, 0])\nplt.xlabel('Frequency [deg]')\nplt.ylabel('Phase [deg]')\n# plt.set(gca, 'YTick', [-360, -270, -180, -90, 0])\n# plt.set(gca, 'XTick', [10^-4, 10^-2, 1, 100])\n\n#\n# Nyquist plot for complete design\n#\nplt.figure(7)\nplt.clf()\nnyquist(L, (0.0001, 1000))\nplt.axis([-700, 5300, -3000, 3000])\n\n# Add a box in the region we are going to expand\nplt.plot([-400, -400, 200, 200, -400], [-100, 100, 100, -100, -100], 'r-')\n\n# Expanded region \nplt.figure(8)\nplt.clf()\nplt.subplot(231)\nnyquist(L)\nplt.axis([-10, 5, -20, 20])\n\n# set up the color\ncolor = 'b'\n\n# Add arrows to the plot\n# H1 = L.evalfr(0.4); H2 = L.evalfr(0.41);\n# arrow([real(H1), imag(H1)], [real(H2), imag(H2)], AM_normal_arrowsize, \\\n# 'EdgeColor', color, 'FaceColor', color);\n\n# H1 = freqresp(L, 0.35); H2 = freqresp(L, 0.36);\n# arrow([real(H2), -imag(H2)], [real(H1), -imag(H1)], AM_normal_arrowsize, \\\n# 'EdgeColor', color, 'FaceColor', color);\n\nplt.figure(9)\nYvec, Tvec = step(T, np.linspace(0, 20))\nplt.plot(Tvec.T, Yvec.T)\n\nYvec, Tvec = step(Co*S, np.linspace(0, 20))\nplt.plot(Tvec.T, Yvec.T)\n\nplt.figure(10)\nplt.clf()\nP, Z = pzmap(T, plot=True, grid=True)\nprint(\"Closed loop poles and zeros: \", P, Z)\n\n# Gang of Four\nplt.figure(11)\nplt.clf()\ngangof4(Hi*Po, Co)\n\nif 'PYCONTROL_TEST_EXAMPLES' not in os.environ:\n plt.show()\n" ]
[ [ "numpy.squeeze", "matplotlib.pyplot.figure", "matplotlib.pyplot.axis", "matplotlib.pyplot.gcf", "matplotlib.pyplot.clf", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "numpy.log10", "numpy.logspace", "matplotlib.pyplot.plot", "numpy.linspace", "matplotlib.pyplot.xlabel" ] ]
machineko/jax
[ "1e3c4833c97302caf6046ff99656b8ff21430b8d" ]
[ "tests/jet_test.py" ]
[ "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom functools import reduce, partial\n\nfrom absl.testing import absltest\nimport numpy as np\nimport unittest\n\nimport jax\nfrom jax import test_util as jtu\nimport jax.numpy as jnp\nimport jax.scipy.special\nfrom jax import random\nfrom jax import jacfwd, jit\nfrom jax.experimental import stax\nfrom jax.experimental.jet import jet, fact, zero_series\nfrom jax import lax\n\nfrom jax.config import config\nconfig.parse_flags_with_absl()\n\ndef jvp_taylor(fun, primals, series):\n # Computes the Taylor series the slow way, with nested jvp.\n order, = set(map(len, series))\n primals = tuple(jnp.asarray(p) for p in primals)\n def composition(eps):\n taylor_terms = [sum([eps ** (i+1) * terms[i] / fact(i + 1)\n for i in range(len(terms))]) for terms in series]\n nudged_args = [(x + t).astype(x.dtype) for x, t in zip(primals, taylor_terms)]\n return fun(*nudged_args)\n primal_out = fun(*primals)\n terms_out = [repeated(jacfwd, i+1)(composition)(0.) for i in range(order)]\n return primal_out, terms_out\n\ndef repeated(f, n):\n def rfun(p):\n return reduce(lambda x, _: f(x), range(n), p)\n return rfun\n\ndef transform(lims, x):\n return x * (lims[1] - lims[0]) + lims[0]\n\nclass JetTest(jtu.JaxTestCase):\n\n def check_jet(self, fun, primals, series, atol=1e-5, rtol=1e-5,\n check_dtypes=True):\n y, terms = jet(fun, primals, series)\n expected_y, expected_terms = jvp_taylor(fun, primals, series)\n\n self.assertAllClose(y, expected_y, atol=atol, rtol=rtol,\n check_dtypes=check_dtypes)\n\n self.assertAllClose(terms, expected_terms, atol=atol, rtol=rtol,\n check_dtypes=check_dtypes)\n\n def check_jet_finite(self, fun, primals, series, atol=1e-5, rtol=1e-5,\n check_dtypes=True):\n\n y, terms = jet(fun, primals, series)\n expected_y, expected_terms = jvp_taylor(fun, primals, series)\n\n def _convert(x):\n return jnp.where(jnp.isfinite(x), x, jnp.nan)\n\n y = _convert(y)\n expected_y = _convert(expected_y)\n\n terms = _convert(jnp.asarray(terms))\n expected_terms = _convert(jnp.asarray(expected_terms))\n\n self.assertAllClose(y, expected_y, atol=atol, rtol=rtol,\n check_dtypes=check_dtypes)\n\n self.assertAllClose(terms, expected_terms, atol=atol, rtol=rtol,\n check_dtypes=check_dtypes)\n\n @jtu.skip_on_devices(\"tpu\")\n def test_dot(self):\n M, K, N = 2, 3, 4\n order = 3\n rng = np.random.RandomState(0)\n x1 = rng.randn(M, K)\n x2 = rng.randn(K, N)\n primals = (x1, x2)\n terms_in1 = [rng.randn(*x1.shape) for _ in range(order)]\n terms_in2 = [rng.randn(*x2.shape) for _ in range(order)]\n series_in = (terms_in1, terms_in2)\n self.check_jet(jnp.dot, primals, series_in)\n\n @jtu.skip_on_devices(\"tpu\")\n def test_conv(self):\n order = 3\n input_shape = (1, 5, 5, 1)\n key = random.PRNGKey(0)\n # TODO(duvenaud): Check all types of padding\n init_fun, apply_fun = stax.Conv(3, (2, 2), padding='VALID')\n _, (W, b) = init_fun(key, input_shape)\n\n rng = np.random.RandomState(0)\n\n x = rng.randn(*input_shape)\n primals = (W, b, x)\n\n series_in1 = [rng.randn(*W.shape) for _ in range(order)]\n series_in2 = [rng.randn(*b.shape) for _ in range(order)]\n series_in3 = [rng.randn(*x.shape) for _ in range(order)]\n\n series_in = (series_in1, series_in2, series_in3)\n\n def f(W, b, x):\n return apply_fun((W, b), x)\n\n self.check_jet(f, primals, series_in, check_dtypes=False)\n\n def unary_check(self, fun, lims=[-2, 2], order=3, dtype=None, atol=1e-4,\n rtol=1e-4):\n dims = 2, 3\n rng = np.random.RandomState(0)\n if dtype is None:\n primal_in = transform(lims, rng.rand(*dims))\n terms_in = [rng.randn(*dims) for _ in range(order)]\n else:\n rng = jtu.rand_uniform(rng, *lims)\n primal_in = rng(dims, dtype)\n terms_in = [rng(dims, dtype) for _ in range(order)]\n self.check_jet(fun, (primal_in,), (terms_in,), atol, rtol)\n\n def binary_check(self, fun, lims=[-2, 2], order=3, finite=True, dtype=None):\n dims = 2, 3\n rng = np.random.RandomState(0)\n if isinstance(lims, tuple):\n x_lims, y_lims = lims\n else:\n x_lims, y_lims = lims, lims\n if dtype is None:\n primal_in = (transform(x_lims, rng.rand(*dims)),\n transform(y_lims, rng.rand(*dims)))\n series_in = ([rng.randn(*dims) for _ in range(order)],\n [rng.randn(*dims) for _ in range(order)])\n else:\n rng = jtu.rand_uniform(rng, *lims)\n primal_in = (rng(dims, dtype),\n rng(dims, dtype))\n series_in = ([rng(dims, dtype) for _ in range(order)],\n [rng(dims, dtype) for _ in range(order)])\n if finite:\n self.check_jet(fun, primal_in, series_in, atol=1e-4, rtol=1e-4)\n else:\n self.check_jet_finite(fun, primal_in, series_in, atol=1e-4, rtol=1e-4)\n\n def unary_check_float0(self, fun, lims=[-2, 2], order=3, dtype=None):\n # like unary_check but for functions that output integers (so their tangent\n # type is float0 arrays)\n raise unittest.SkipTest(\"jet tests must be adapted for integer-output functions\")\n\n def binary_check_float0(self, fun, lims=[-2, 2], order=3, finite=True, dtype=None):\n # like binary_check but for functions that output integers (so their tangent\n # type is float0 arrays)\n raise unittest.SkipTest(\"jet tests must be adapted for integer-output functions\")\n\n def expit_check(self, lims=[-2, 2], order=3):\n dims = 2, 3\n rng = np.random.RandomState(0)\n primal_in = transform(lims, rng.rand(*dims))\n terms_in = [rng.randn(*dims) for _ in range(order)]\n\n primals = (primal_in, )\n series = (terms_in, )\n\n y, terms = jax.experimental.jet._expit_taylor(primals, series)\n expected_y, expected_terms = jvp_taylor(jax.scipy.special.expit, primals, series)\n\n atol = 1e-4\n rtol = 1e-4\n self.assertAllClose(y, expected_y, atol=atol, rtol=rtol)\n\n self.assertAllClose(terms, expected_terms, atol=atol, rtol=rtol)\n\n @jtu.skip_on_devices(\"tpu\")\n def test_int_pow(self):\n for p in range(6):\n self.unary_check(lambda x: x ** p, lims=[-2, 2])\n self.unary_check(lambda x: x ** 10, lims=[0, 0])\n\n @jtu.skip_on_devices(\"tpu\")\n def test_is_finite(self): self.unary_check_float0(lax.is_finite)\n @jtu.skip_on_devices(\"tpu\")\n def test_and(self): self.binary_check_float0(lax.bitwise_and, dtype=np.bool_)\n @jtu.skip_on_devices(\"tpu\")\n def test_or(self): self.binary_check_float0(lax.bitwise_or, dtype=np.bool_)\n @jtu.skip_on_devices(\"tpu\")\n def test_xor(self): self.binary_check_float0(jnp.bitwise_xor, dtype=np.bool_)\n @jtu.skip_on_devices(\"tpu\")\n def test_shift_left(self): self.binary_check_float0(lax.shift_left, dtype=np.int32)\n @jtu.skip_on_devices(\"tpu\")\n def test_shift_right_a(self): self.binary_check_float0(lax.shift_right_arithmetic, dtype=np.int32)\n @jtu.skip_on_devices(\"tpu\")\n def test_shift_right_l(self): self.binary_check_float0(lax.shift_right_logical, dtype=np.int32)\n @jtu.skip_on_devices(\"tpu\")\n def test_le(self): self.binary_check_float0(lambda x, y: x <= y)\n @jtu.skip_on_devices(\"tpu\")\n def test_gt(self): self.binary_check_float0(lambda x, y: x > y)\n @jtu.skip_on_devices(\"tpu\")\n def test_lt(self): self.binary_check_float0(lambda x, y: x < y)\n @jtu.skip_on_devices(\"tpu\")\n def test_ge(self): self.binary_check_float0(lambda x, y: x >= y)\n @jtu.skip_on_devices(\"tpu\")\n def test_eq(self): self.binary_check_float0(lambda x, y: x == y)\n @jtu.skip_on_devices(\"tpu\")\n def test_ne(self): self.binary_check_float0(lambda x, y: x != y)\n @jtu.skip_on_devices(\"tpu\")\n def test_not(self): self.unary_check_float0(lax.bitwise_not, dtype=np.bool_)\n\n @jtu.skip_on_devices(\"tpu\")\n def test_exp(self): self.unary_check(jnp.exp)\n @jtu.skip_on_devices(\"tpu\")\n def test_neg(self): self.unary_check(jnp.negative)\n @jtu.skip_on_devices(\"tpu\")\n def test_floor(self): self.unary_check(jnp.floor)\n @jtu.skip_on_devices(\"tpu\")\n def test_ceil(self): self.unary_check(jnp.ceil)\n @jtu.skip_on_devices(\"tpu\")\n def test_round(self): self.unary_check(lax.round)\n @jtu.skip_on_devices(\"tpu\")\n def test_sign(self): self.unary_check(lax.sign)\n @jtu.skip_on_devices(\"tpu\")\n def test_real(self): self.unary_check(lax.real, dtype=np.complex64)\n @jtu.skip_on_devices(\"tpu\")\n def test_conj(self): self.unary_check(lax.conj, dtype=np.complex64)\n @jtu.skip_on_devices(\"tpu\")\n def test_imag(self): self.unary_check(lax.imag, dtype=np.complex64)\n @jtu.skip_on_devices(\"tpu\")\n def test_log(self): self.unary_check(jnp.log, lims=[0.8, 4.0])\n @jtu.skip_on_devices(\"tpu\")\n def test_gather(self): self.unary_check(lambda x: x[1:])\n @jtu.skip_on_devices(\"tpu\")\n def test_reduce_max(self): self.unary_check(lambda x: x.max(axis=1))\n @jtu.skip_on_devices(\"tpu\")\n def test_reduce_min(self): self.unary_check(lambda x: x.min(axis=1))\n @jtu.skip_on_devices(\"tpu\")\n def test_all_max(self): self.unary_check(jnp.max)\n @jtu.skip_on_devices(\"tpu\")\n def test_all_min(self): self.unary_check(jnp.min)\n @jtu.skip_on_devices(\"tpu\")\n def test_stopgrad(self): self.unary_check(lax.stop_gradient)\n @jtu.skip_on_devices(\"tpu\")\n def test_abs(self): self.unary_check(jnp.abs)\n @jtu.skip_on_devices(\"tpu\")\n def test_fft(self): self.unary_check(jnp.fft.fft)\n @jtu.skip_on_devices(\"tpu\")\n def test_log1p(self): self.unary_check(jnp.log1p, lims=[0, 4.])\n @jtu.skip_on_devices(\"tpu\")\n def test_expm1(self): self.unary_check(jnp.expm1)\n @jtu.skip_on_devices(\"tpu\")\n def test_sin(self): self.unary_check(jnp.sin)\n @jtu.skip_on_devices(\"tpu\")\n def test_cos(self): self.unary_check(jnp.cos)\n @jtu.skip_on_devices(\"tpu\")\n def test_sinh(self): self.unary_check(jnp.sinh)\n @jtu.skip_on_devices(\"tpu\")\n def test_cosh(self): self.unary_check(jnp.cosh)\n @jtu.skip_on_devices(\"tpu\")\n def test_tanh(self): self.unary_check(jnp.tanh, lims=[-500, 500], order=5)\n @jtu.skip_on_devices(\"tpu\")\n def test_expit(self): self.unary_check(jax.scipy.special.expit, lims=[-500, 500], order=5)\n @jtu.skip_on_devices(\"tpu\")\n def test_expit2(self): self.expit_check(lims=[-500, 500], order=5)\n @jtu.skip_on_devices(\"tpu\")\n def test_sqrt(self): self.unary_check(jnp.sqrt, lims=[0, 5.])\n @jtu.skip_on_devices(\"tpu\")\n def test_rsqrt(self): self.unary_check(lax.rsqrt, lims=[0, 5000.])\n @jtu.skip_on_devices(\"tpu\")\n def test_asinh(self): self.unary_check(lax.asinh, lims=[-100, 100])\n @jtu.skip_on_devices(\"tpu\")\n def test_acosh(self): self.unary_check(lax.acosh, lims=[-100, 100])\n @jtu.skip_on_devices(\"tpu\")\n def test_atanh(self): self.unary_check(lax.atanh, lims=[-1, 1])\n @jtu.skip_on_devices(\"tpu\")\n def test_erf(self): self.unary_check(lax.erf)\n @jtu.skip_on_devices(\"tpu\")\n def test_erfc(self): self.unary_check(lax.erfc)\n @jtu.skip_on_devices(\"tpu\")\n def test_erf_inv(self): self.unary_check(lax.erf_inv, lims=[-1, 1])\n @jtu.skip_on_devices(\"tpu\")\n def test_cumsum(self): self.unary_check(jnp.cumsum)\n @jtu.skip_on_devices(\"tpu\")\n def test_cumprod(self): self.unary_check(jnp.cumprod)\n @jtu.skip_on_devices(\"tpu\")\n def test_cummax(self): self.unary_check(partial(lax.cummax, axis=0))\n @jtu.skip_on_devices(\"tpu\")\n def test_cummin(self): self.unary_check(partial(lax.cummin, axis=0))\n\n\n @jtu.skip_on_devices(\"tpu\")\n def test_div(self): self.binary_check(lambda x, y: x / y, lims=[0.8, 4.0])\n @jtu.skip_on_devices(\"tpu\")\n def test_rem(self): self.binary_check(lax.rem, lims=[0.8, 4.0])\n @jtu.skip_on_devices(\"tpu\")\n def test_complex(self): self.binary_check(lax.complex)\n @jtu.skip_on_devices(\"tpu\")\n def test_sub(self): self.binary_check(lambda x, y: x - y)\n @jtu.skip_on_devices(\"tpu\")\n def test_add(self): self.binary_check(lambda x, y: x + y)\n @jtu.skip_on_devices(\"tpu\")\n def test_mul(self): self.binary_check(lambda x, y: x * y)\n @jtu.skip_on_devices(\"tpu\")\n def test_max(self): self.binary_check(lax.max)\n @jtu.skip_on_devices(\"tpu\")\n def test_min(self): self.binary_check(lax.min)\n @jtu.skip_on_devices(\"tpu\")\n @jtu.ignore_warning(message=\"overflow encountered in power\")\n def test_pow(self): self.binary_check(lambda x, y: x ** y, lims=([0.2, 500], [-500, 500]), finite=False)\n @jtu.skip_on_devices(\"tpu\")\n def test_atan2(self): self.binary_check(lax.atan2, lims=[-40, 40])\n\n @jtu.skip_on_devices(\"tpu\")\n def test_clamp(self):\n lims = [-2, 2]\n order = 3\n dims = 2, 3\n rng = np.random.RandomState(0)\n primal_in = (transform(lims, rng.rand(*dims)),\n transform(lims, rng.rand(*dims)),\n transform(lims, rng.rand(*dims)))\n series_in = ([rng.randn(*dims) for _ in range(order)],\n [rng.randn(*dims) for _ in range(order)],\n [rng.randn(*dims) for _ in range(order)])\n\n self.check_jet(lax.clamp, primal_in, series_in, atol=1e-4, rtol=1e-4)\n\n def test_process_call(self):\n def f(x):\n return jit(lambda x: x * x)(x)\n self.unary_check(f, rtol=2e-4)\n\n def test_post_process_call(self):\n def f(x):\n return jit(lambda y: x * y)(2.)\n\n self.unary_check(f, rtol=5e-4)\n\n def test_select(self):\n M, K = 2, 3\n order = 3\n rng = np.random.RandomState(0)\n b = rng.rand(M, K) < 0.5\n x = rng.randn(M, K)\n y = rng.randn(M, K)\n primals = (b, x, y)\n terms_b = [rng.randn(*b.shape) for _ in range(order)]\n terms_x = [rng.randn(*x.shape) for _ in range(order)]\n terms_y = [rng.randn(*y.shape) for _ in range(order)]\n series_in = (terms_b, terms_x, terms_y)\n self.check_jet(jnp.where, primals, series_in, rtol=5e-4)\n\n def test_inst_zero(self):\n def f(x):\n return 2.\n def g(x):\n return 2. + 0 * x\n x = jnp.ones(1)\n order = 3\n f_out_primals, f_out_series = jet(f, (x, ), ([jnp.ones_like(x) for _ in range(order)], ))\n assert f_out_series is not zero_series\n\n g_out_primals, g_out_series = jet(g, (x, ), ([jnp.ones_like(x) for _ in range(order)], ))\n\n assert g_out_primals == f_out_primals\n assert g_out_series == f_out_series\n\n def test_add_any(self):\n # https://github.com/google/jax/issues/5217\n f = lambda x, eps: x * eps + eps + x\n def g(eps):\n x = jnp.array(1.)\n return jax.grad(f)(x, eps)\n jet(g, (1.,), ([1.],)) # doesn't crash\n\n def test_scatter_add(self):\n # very basic test from https://github.com/google/jax/issues/5365\n def f(x):\n x0 = x[0]\n x1 = x[1]\n return (x0**5 + x1**5).sum()\n\n def h(eps):\n from jax import jacfwd, grad\n\n x = jnp.array([1., 1.])\n μ = eps * x\n\n def F(t):\n return f(x + t * μ)\n\n return grad(jacfwd(F))(0.)\n\n self.check_jet(h, (0.,), ([1., 2., 3.],), rtol=1e-3)\n\n\nif __name__ == '__main__':\n absltest.main(testLoader=jtu.JaxTestLoader())\n" ]
[ [ "numpy.random.RandomState" ] ]
bpbpublications/Time-Series-Forecasting-using-Deep-Learning
[ "fd84553d33e912edb4a1400af0f9374e72747457" ]
[ "Chapter 05/enc_dec/model.py" ]
[ "import numpy as np\nimport random\nimport torch\nimport torch.nn as nn\nfrom torch import optim\n\n\nclass Encoder(nn.Module):\n\n def __init__(self, input_size, hidden_size, num_layers = 1):\n super(Encoder, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n\n self.lstm = nn.LSTM(input_size = input_size, hidden_size = hidden_size, num_layers = num_layers)\n\n def forward(self, x):\n flat = x.view(x.shape[0], x.shape[1], self.input_size)\n out, h = self.lstm(flat)\n return out, h\n\n\nclass Decoder(nn.Module):\n\n def __init__(self, input_size, hidden_size, output_size = 1, num_layers = 1):\n super(Decoder, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.output_size = output_size\n\n self.lstm = nn.LSTM(input_size = input_size, hidden_size = hidden_size, num_layers = num_layers)\n self.linear = nn.Linear(hidden_size, output_size)\n\n def forward(self, x, h):\n out, h = self.lstm(x.unsqueeze(0), h)\n y = self.linear(out.squeeze(0))\n return y, h\n\n\nclass EncoderDecoder(nn.Module):\n\n def __init__(self, hidden_size, input_size = 1, output_size = 1):\n super(EncoderDecoder, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n\n self.encoder = Encoder(input_size = input_size, hidden_size = hidden_size)\n self.decoder = Decoder(input_size = input_size, hidden_size = hidden_size, output_size = output_size)\n\n def train_model(\n self, train, target, epochs, target_len, method = 'recursive',\n tfr = 0.5, lr = 0.01, dynamic_tf = False\n ):\n losses = np.full(epochs, np.nan)\n optimizer = optim.Adam(self.parameters(), lr = lr)\n criterion = nn.MSELoss()\n\n for e in range(epochs):\n predicted = torch.zeros(target_len, train.shape[1], train.shape[2])\n optimizer.zero_grad()\n _, enc_h = self.encoder(train)\n\n dec_in = train[-1, :, :]\n dec_h = enc_h\n\n if method == 'recursive':\n for t in range(target_len):\n dec_out, dec_h = self.decoder(dec_in, dec_h)\n predicted[t] = dec_out\n dec_in = dec_out\n\n if method == 'teacher_forcing':\n # use teacher forcing\n if random.random() < tfr:\n for t in range(target_len):\n dec_out, dec_h = self.decoder(dec_in, dec_h)\n predicted[t] = dec_out\n dec_in = target[t, :, :]\n # predict recursively\n else:\n for t in range(target_len):\n dec_out, dec_h = self.decoder(dec_in, dec_h)\n predicted[t] = dec_out\n dec_in = dec_out\n\n if method == 'mixed_teacher_forcing':\n # predict using mixed teacher forcing\n for t in range(target_len):\n dec_out, dec_h = self.decoder(dec_in, dec_h)\n predicted[t] = dec_out\n # predict with teacher forcing\n if random.random() < tfr:\n dec_in = target[t, :, :]\n # predict recursively\n else:\n dec_in = dec_out\n\n loss = criterion(predicted, target)\n loss.backward()\n optimizer.step()\n\n losses[e] = loss.item()\n\n if e % 10 == 0:\n print(f'Epoch {e}/{epochs}: {round(loss.item(), 4)}')\n\n # dynamic teacher forcing\n if dynamic_tf and tfr > 0:\n tfr = tfr - 0.02\n\n return losses\n\n def predict(self, x, target_len):\n y = torch.zeros(target_len, x.shape[1], x.shape[2])\n\n _, enc_h = self.encoder(x)\n dec_in = x[-1, :, :]\n dec_h = enc_h\n\n for t in range(target_len):\n dec_out, dec_h = self.decoder(dec_in, dec_h)\n y[t] = dec_out\n dec_in = dec_out\n\n return y\n" ]
[ [ "torch.nn.LSTM", "torch.nn.Linear", "torch.nn.MSELoss", "torch.zeros", "numpy.full" ] ]
Challenging6/YoloDB
[ "bdc1c4239ec112c21a65df64b9f4dc8447b739fa" ]
[ "dataset/processes/make_center_points.py" ]
[ "import numpy as np\n\n#from concern.config import State\nfrom .data_process import DataProcess\n\n\nclass MakeCenterPoints(DataProcess):\n box_key = 'charboxes'\n size = 32\n\n def process(self, data):\n shape = data['image'].shape[:2]\n points = np.zeros((self.size, 2), dtype=np.float32)\n boxes = np.array(data[self.box_key])[:self.size]\n\n size = boxes.shape[0]\n points[:size] = boxes.mean(axis=1)\n data['points'] = (points / shape[::-1]).astype(np.float32)\n return data\n" ]
[ [ "numpy.array", "numpy.zeros" ] ]
BAD-Classifier/Signal-Processing
[ "8163657fb8b8e1ec32ea299a5b4cda9473b91fd0" ]
[ "Noise Elimination/mean_elimination_script.py" ]
[ "import numpy\nimport librosa\nimport glob\nimport os\nimport shutil\n\nfull_clips = glob.glob(\"Full_Clips/*.mp3\")\nprint(\"Number of full clips: \" + str(len(full_clips)))\n\nfor clip in full_clips:\n clip_name = clip[11:]\n print(\"Current clip: \" + clip_name)\n signal, fs = librosa.load(clip)\n signal_abs = numpy.absolute(signal)\n\n search_name = \"Cut_Clips/\" + clip_name[:-4] + \"*[0-9].*\"\n cut_clips = glob.glob(search_name)\n print(\"Number of clip segments: \" + str(len(cut_clips)))\n\n total_mean = numpy.mean(signal_abs)\n print(\"Signal Total mean: \" + str(total_mean))\n condition = total_mean*0.25\n\n for record in cut_clips:\n signal_segment, sample_rate_segment = librosa.load(record)\n mean = numpy.mean(numpy.abs(signal_segment))\n if mean < condition:\n print(record)\n print(\"Segment mean: \" + str(mean))\n shutil.move(record,\"Rejected_noise/\") \n \nrejected_clips = glob.glob(\"Rejected_noise/*.wav\")\nprint(rejected_clips)\nfor item in rejected_clips:\n name = item[15:]\n new_name = \"All_MFCCs/\" + name[:-3] + \"png\"\n if os.path.isfile(new_name):\n shutil.move(new_name, \"Rejected_MFCCS/\") \n" ]
[ [ "numpy.mean", "numpy.abs", "numpy.absolute" ] ]
bartekx43/AlphaTTT
[ "a01c38833a7f841483146bebeef73323d527d812" ]
[ "alphazero/mcts.py" ]
[ "import os\nimport sys\nimport math\nimport random\nimport numpy as np\nfrom copy import deepcopy\n\nsys.path.append(os.path.join(os.environ[\"HOME\"], \"AlphaTTT\"))\n\nfrom environment import Environment\n\nfrom alphazero.database import prepare_state\n\nnp.random.seed(80085)\nrandom.seed(80085)\n\ndef PUCT_score(child_value, child_prior, parent_visit_count, child_visit_count, c_puct):\n pb_c = child_prior * math.sqrt(parent_visit_count) / (child_visit_count + 1)\n return child_value + c_puct * pb_c\n\nclass MCTS():\n def __init__(self, model, root_state, args):\n '''\n model - class with predict method that returns a valid policy and value\n root_state - board_len x board_len array with the initial state of the game\n\n args:\n num_simulations - number of leaf node expansions per search\n alpha - mixing constant between policy and dirichlet noise\n dirichlet_alpha - dirichlet constant for generating dirichlet distribution\n c_puct - exploration constant in PUCT score\n '''\n\n self.model = model\n self.root = deepcopy(root_state)\n self.args = args\n\n self.Qsa = {} # self.Qsa(s, a) = Q value for (s, a)\n self.Nsa = {} # self.Nsa(s, a) = (s, a) visit count\n self.Ns = {} # self.Ns(s) = s visit count\n self.Ps = {} # self.Ps(s) = list of available actions in s and corresponding raw probabilities\n\n self.Es = {} # terminal states, potentially going to do this if not too computationally expensive and dirty\n\n # Add dirichlet noise to initial root node\n self.add_dirichlet()\n\n def add_dirichlet(self):\n rs = self.root.tobytes()\n if rs not in self.Ps:\n self.find_leaf(deepcopy(self.root))\n if self.Es[rs] == 10:\n dirichlet = np.random.dirichlet([self.args[\"dirichlet_alpha\"]]*len(self.Ps[rs]))\n for i, (move, prob) in enumerate(self.Ps[rs]):\n self.Ps[rs][i] = (move, (1 - self.args[\"alpha\"]) * prob + dirichlet[i] * self.args[\"alpha\"])\n\n def search(self): # builds the search tree from the root node\n for i in range(self.args[\"num_simulations\"]):\n self.find_leaf(deepcopy(self.root))\n return\n\n def find_leaf(self, state):\n s = state.tobytes()\n\n if s not in self.Es:\n self.Es[s] = Environment.game_over(state)\n if self.Es[s] != 10:\n # terminal state\n return -self.Es[s]\n\n if s not in self.Ps: # expand leaf node\n p, v = self.model.predict(prepare_state(state)) \n availability_mask = (state == 0)\n p *= availability_mask\n if np.sum(p) > 0.0:\n p /= np.sum(p) # re-normalize\n\n move_probs = []\n\n for i, row in enumerate(p): \n for j, prob in enumerate(row):\n if state[i][j] == 0:\n move_probs.append(((i, j), prob))\n \n self.Ps[s] = move_probs\n self.Ns[s] = 1\n return -v\n\n max_puct = -float('inf')\n max_action = None\n\n for move, prob in self.Ps[s]:\n (Nc, Qc) = (self.Nsa[(s, move)], self.Qsa[(s, move)]) if (s, move) in self.Nsa else (0, 0.0)\n puct = PUCT_score(Qc, prob, self.Ns[s], Nc, self.args[\"c_puct\"])\n if puct > max_puct:\n max_puct = puct\n max_action = move\n\n a = max_action\n state[a] = 1\n state *= -1\n\n v = self.find_leaf(state)\n\n if (s, a) in self.Nsa:\n self.Nsa[(s, a)] += 1\n self.Qsa[(s, a)] = (self.Nsa[(s, a)] * self.Qsa[(s, a)] + v) / (self.Nsa[(s, a)] + 1)\n else:\n self.Nsa[(s, a)] = 1\n self.Qsa[(s, a)] = v\n \n self.Ns[s] += 1\n return -v\n\n def get_pi(self, tau=1.0, as_prob=True):\n move_dist = np.zeros((len(self.root), len(self.root)))\n rs = self.root.tobytes()\n for move, _ in self.Ps[rs]:\n move_dist[move] = self.Nsa[(rs, move)] if (rs, move) in self.Nsa else 0\n if as_prob is True:\n if tau < 0.1: # protecting from numerical overflow \n z = np.zeros(move_dist.shape)\n move = np.unravel_index(np.argmax(move_dist), move_dist.shape)\n z[move[0]][move[1]] = 1.0\n move_dist = z\n else:\n move_dist = np.power(move_dist, 1.0/tau)\n if np.sum(move_dist) > 0.0:\n move_dist /= np.sum(move_dist)\n return move_dist\n\n def select_move(self, tau=1.0, external_move=None):\n if external_move is None:\n probas = self.get_pi(tau)\n selected_move = int(np.random.choice(len(probas.flatten()), 1, p=probas.flatten()))\n selected_move = np.unravel_index(selected_move, probas.shape)\n else:\n selected_move = external_move\n\n self.root[selected_move] = 1\n self.root *= -1\n\n # Add dirichlet noise to new root node:\n self.add_dirichlet()\n\n return selected_move\n" ]
[ [ "numpy.sum", "numpy.zeros", "numpy.random.seed", "numpy.argmax", "numpy.power", "numpy.unravel_index" ] ]
SultanAbuGhazal/3detr
[ "f9725ae655c6ced290c3ec2c53c07566350270f4" ]
[ "main.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n\nimport argparse\nimport os\nimport sys\nimport pickle\n\nimport numpy as np\nimport torch\nfrom torch.multiprocessing import set_start_method\nfrom torch.utils.data import DataLoader, DistributedSampler\n\n# 3DETR codebase specific imports\nfrom datasets import build_dataset\nfrom engine import evaluate, train_one_epoch\nfrom models import build_model\nfrom optimizer import build_optimizer\nfrom criterion import build_criterion\nfrom utils.dist import init_distributed, is_distributed, is_primary, get_rank, barrier\nfrom utils.misc import my_worker_init_fn\nfrom utils.io import save_checkpoint, resume_if_possible\nfrom utils.logger import Logger\n\n\ndef make_args_parser():\n parser = argparse.ArgumentParser(\"3D Detection Using Transformers\", add_help=False)\n\n ##### Optimizer #####\n parser.add_argument(\"--base_lr\", default=5e-4, type=float)\n parser.add_argument(\"--warm_lr\", default=1e-6, type=float)\n parser.add_argument(\"--warm_lr_epochs\", default=9, type=int)\n parser.add_argument(\"--final_lr\", default=1e-6, type=float)\n parser.add_argument(\"--lr_scheduler\", default=\"cosine\", type=str)\n parser.add_argument(\"--weight_decay\", default=0.1, type=float)\n parser.add_argument(\"--filter_biases_wd\", default=False, action=\"store_true\")\n parser.add_argument(\n \"--clip_gradient\", default=0.1, type=float, help=\"Max L2 norm of the gradient\"\n )\n\n ##### Model #####\n parser.add_argument(\n \"--model_name\",\n default=\"3detr\",\n type=str,\n help=\"Name of the model\",\n choices=[\"3detr\"],\n )\n ### Encoder\n parser.add_argument(\n \"--enc_type\", default=\"vanilla\", choices=[\"masked\", \"maskedv2\", \"vanilla\"]\n )\n # Below options are only valid for vanilla encoder\n parser.add_argument(\"--enc_nlayers\", default=3, type=int)\n parser.add_argument(\"--enc_dim\", default=256, type=int)\n parser.add_argument(\"--enc_ffn_dim\", default=128, type=int)\n parser.add_argument(\"--enc_dropout\", default=0.1, type=float)\n parser.add_argument(\"--enc_nhead\", default=4, type=int)\n parser.add_argument(\"--enc_pos_embed\", default=None, type=str)\n parser.add_argument(\"--enc_activation\", default=\"relu\", type=str)\n\n ### Decoder\n parser.add_argument(\"--dec_nlayers\", default=8, type=int)\n parser.add_argument(\"--dec_dim\", default=256, type=int)\n parser.add_argument(\"--dec_ffn_dim\", default=256, type=int)\n parser.add_argument(\"--dec_dropout\", default=0.1, type=float)\n parser.add_argument(\"--dec_nhead\", default=4, type=int)\n\n ### MLP heads for predicting bounding boxes\n parser.add_argument(\"--mlp_dropout\", default=0.3, type=float)\n parser.add_argument(\n \"--nsemcls\",\n default=-1,\n type=int,\n help=\"Number of semantic object classes. Can be inferred from dataset\",\n )\n\n ### Other model params\n parser.add_argument(\"--preenc_npoints\", default=2048, type=int)\n parser.add_argument(\n \"--pos_embed\", default=\"fourier\", type=str, choices=[\"fourier\", \"sine\"]\n )\n parser.add_argument(\"--nqueries\", default=256, type=int)\n parser.add_argument(\"--use_color\", default=False, action=\"store_true\")\n\n ##### Set Loss #####\n ### Matcher\n parser.add_argument(\"--matcher_giou_cost\", default=2, type=float)\n parser.add_argument(\"--matcher_cls_cost\", default=1, type=float)\n parser.add_argument(\"--matcher_center_cost\", default=0, type=float)\n parser.add_argument(\"--matcher_objectness_cost\", default=0, type=float)\n\n ### Loss Weights\n parser.add_argument(\"--loss_giou_weight\", default=0, type=float)\n parser.add_argument(\"--loss_sem_cls_weight\", default=1, type=float)\n parser.add_argument(\n \"--loss_no_object_weight\", default=0.2, type=float\n ) # \"no object\" or \"background\" class for detection\n parser.add_argument(\"--loss_angle_cls_weight\", default=0.1, type=float)\n parser.add_argument(\"--loss_angle_reg_weight\", default=0.5, type=float)\n parser.add_argument(\"--loss_center_weight\", default=5.0, type=float)\n parser.add_argument(\"--loss_size_weight\", default=1.0, type=float)\n\n ##### Dataset #####\n parser.add_argument(\n \"--dataset_name\", required=True, type=str, choices=[\"scannet\", \"sunrgbd\"]\n )\n parser.add_argument(\n \"--dataset_root_dir\",\n type=str,\n default=None,\n help=\"Root directory containing the dataset files. \\\n If None, default values from scannet.py/sunrgbd.py are used\",\n )\n # parser.add_argument(\n # \"--meta_data_dir\",\n # type=str,\n # default=None,\n # help=\"Root directory containing the metadata files. \\\n # If None, default values from scannet.py/sunrgbd.py are used\",\n # )\n parser.add_argument(\"--dataset_num_workers\", default=4, type=int)\n parser.add_argument(\"--batchsize_per_gpu\", default=8, type=int)\n\n ##### Training #####\n parser.add_argument(\"--start_epoch\", default=-1, type=int)\n parser.add_argument(\"--max_epoch\", default=720, type=int)\n parser.add_argument(\"--eval_every_epoch\", default=10, type=int)\n parser.add_argument(\"--seed\", default=0, type=int)\n\n ##### Testing #####\n parser.add_argument(\"--test_only\", default=False, action=\"store_true\")\n parser.add_argument(\"--test_ckpt\", default=None, type=str)\n\n ##### I/O #####\n parser.add_argument(\"--checkpoint_dir\", default=None, type=str)\n parser.add_argument(\"--log_every\", default=10, type=int)\n parser.add_argument(\"--log_metrics_every\", default=20, type=int)\n parser.add_argument(\"--save_separate_checkpoint_every_epoch\", default=100, type=int)\n\n ##### Distributed Training #####\n parser.add_argument(\"--ngpus\", default=1, type=int)\n parser.add_argument(\"--dist_url\", default=\"tcp://localhost:12345\", type=str)\n\n return parser\n\n\ndef do_train(\n args,\n model,\n model_no_ddp,\n optimizer,\n criterion,\n dataset_config,\n dataloaders,\n best_val_metrics,\n):\n \"\"\"\n Main training loop.\n This trains the model for `args.max_epoch` epochs and tests the model after every `args.eval_every_epoch`.\n We always evaluate the final checkpoint and report both the final AP and best AP on the val set.\n \"\"\"\n\n num_iters_per_epoch = len(dataloaders[\"train\"])\n num_iters_per_eval_epoch = len(dataloaders[\"test\"])\n print(f\"Model is {model}\")\n print(f\"Training started at epoch {args.start_epoch} until {args.max_epoch}.\")\n print(f\"One training epoch = {num_iters_per_epoch} iters.\")\n print(f\"One eval epoch = {num_iters_per_eval_epoch} iters.\")\n\n final_eval = os.path.join(args.checkpoint_dir, \"final_eval.txt\")\n final_eval_pkl = os.path.join(args.checkpoint_dir, \"final_eval.pkl\")\n\n if os.path.isfile(final_eval):\n print(f\"Found final eval file {final_eval}. Skipping training.\")\n return\n\n logger = Logger(args.checkpoint_dir)\n\n for epoch in range(args.start_epoch, args.max_epoch):\n if is_distributed():\n dataloaders[\"train_sampler\"].set_epoch(epoch)\n\n aps = train_one_epoch(\n args,\n epoch,\n model,\n optimizer,\n criterion,\n dataset_config,\n dataloaders[\"train\"],\n logger,\n )\n\n # latest checkpoint is always stored in checkpoint.pth\n save_checkpoint(\n args.checkpoint_dir,\n model_no_ddp,\n optimizer,\n epoch,\n args,\n best_val_metrics,\n filename=\"checkpoint.pth\",\n )\n\n metrics = aps.compute_metrics()\n metric_str = aps.metrics_to_str(metrics, per_class=False)\n metrics_dict = aps.metrics_to_dict(metrics)\n curr_iter = epoch * len(dataloaders[\"train\"])\n if is_primary():\n print(\"==\" * 10)\n print(f\"Epoch [{epoch}/{args.max_epoch}]; Metrics {metric_str}\")\n print(\"==\" * 10)\n logger.log_scalars(metrics_dict, curr_iter, prefix=\"Train/\")\n\n if (\n epoch > 0\n and args.save_separate_checkpoint_every_epoch > 0\n and epoch % args.save_separate_checkpoint_every_epoch == 0\n ):\n # separate checkpoints are stored as checkpoint_{epoch}.pth\n save_checkpoint(\n args.checkpoint_dir,\n model_no_ddp,\n optimizer,\n epoch,\n args,\n best_val_metrics,\n )\n\n if epoch % args.eval_every_epoch == 0 or epoch == (args.max_epoch - 1):\n ap_calculator = evaluate(\n args,\n epoch,\n model,\n criterion,\n dataset_config,\n dataloaders[\"test\"],\n logger,\n curr_iter,\n )\n metrics = ap_calculator.compute_metrics()\n ap25 = metrics[0.25][\"mAP\"]\n metric_str = ap_calculator.metrics_to_str(metrics, per_class=True)\n metrics_dict = ap_calculator.metrics_to_dict(metrics)\n if is_primary():\n print(\"==\" * 10)\n print(f\"Evaluate Epoch [{epoch}/{args.max_epoch}]; Metrics {metric_str}\")\n print(\"==\" * 10)\n logger.log_scalars(metrics_dict, curr_iter, prefix=\"Test/\")\n\n if is_primary() and (\n len(best_val_metrics) == 0 or best_val_metrics[0.25][\"mAP\"] < ap25\n ):\n best_val_metrics = metrics\n filename = \"checkpoint_best.pth\"\n save_checkpoint(\n args.checkpoint_dir,\n model_no_ddp,\n optimizer,\n epoch,\n args,\n best_val_metrics,\n filename=filename,\n )\n print(\n f\"Epoch [{epoch}/{args.max_epoch}] saved current best val checkpoint at {filename}; ap25 {ap25}\"\n )\n\n # always evaluate last checkpoint\n epoch = args.max_epoch - 1\n curr_iter = epoch * len(dataloaders[\"train\"])\n ap_calculator = evaluate(\n args,\n epoch,\n model,\n criterion,\n dataset_config,\n dataloaders[\"test\"],\n logger,\n curr_iter,\n )\n metrics = ap_calculator.compute_metrics()\n metric_str = ap_calculator.metrics_to_str(metrics)\n if is_primary():\n print(\"==\" * 10)\n print(f\"Evaluate Final [{epoch}/{args.max_epoch}]; Metrics {metric_str}\")\n print(\"==\" * 10)\n\n with open(final_eval, \"w\") as fh:\n fh.write(\"Training Finished.\\n\")\n fh.write(\"==\" * 10)\n fh.write(\"Final Eval Numbers.\\n\")\n fh.write(metric_str)\n fh.write(\"\\n\")\n fh.write(\"==\" * 10)\n fh.write(\"Best Eval Numbers.\\n\")\n fh.write(ap_calculator.metrics_to_str(best_val_metrics))\n fh.write(\"\\n\")\n\n with open(final_eval_pkl, \"wb\") as fh:\n pickle.dump(metrics, fh)\n\n\ndef test_model(args, model, model_no_ddp, criterion, dataset_config, dataloaders):\n if args.test_ckpt is None or not os.path.isfile(args.test_ckpt):\n f\"Please specify a test checkpoint using --test_ckpt. Found invalid value {args.test_ckpt}\"\n sys.exit(1)\n\n sd = torch.load(args.test_ckpt, map_location=torch.device(\"cpu\"))\n model_no_ddp.load_state_dict(sd[\"model\"])\n logger = Logger()\n criterion = None # do not compute loss for speed-up; Comment out to see test loss\n epoch = -1\n curr_iter = 0\n ap_calculator = evaluate(\n args,\n epoch,\n model,\n criterion,\n dataset_config,\n dataloaders[\"test\"],\n logger,\n curr_iter,\n )\n metrics = ap_calculator.compute_metrics()\n metric_str = ap_calculator.metrics_to_str(metrics)\n if is_primary():\n print(\"==\" * 10)\n print(f\"Test model; Metrics {metric_str}\")\n print(\"==\" * 10)\n\n\ndef main(local_rank, args):\n if args.ngpus > 1:\n print(\n \"Initializing Distributed Training. This is in BETA mode and hasn't been tested thoroughly. Use at your own risk :)\"\n )\n print(\"To get the maximum speed-up consider reducing evaluations on val set by setting --eval_every_epoch to greater than 50\")\n init_distributed(\n local_rank,\n global_rank=local_rank,\n world_size=args.ngpus,\n dist_url=args.dist_url,\n dist_backend=\"nccl\",\n )\n\n print(f\"Called with args: {args}\")\n torch.cuda.set_device(local_rank)\n np.random.seed(args.seed + get_rank())\n torch.manual_seed(args.seed + get_rank())\n if torch.cuda.is_available():\n torch.cuda.manual_seed_all(args.seed + get_rank())\n\n datasets, dataset_config = build_dataset(args)\n model, _ = build_model(args, dataset_config)\n model = model.cuda(local_rank)\n model_no_ddp = model\n\n if is_distributed():\n model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[local_rank]\n )\n criterion = build_criterion(args, dataset_config)\n criterion = criterion.cuda(local_rank)\n\n dataloaders = {}\n if args.test_only:\n dataset_splits = [\"test\"]\n else:\n dataset_splits = [\"train\", \"test\"]\n for split in dataset_splits:\n if split == \"train\":\n shuffle = True\n else:\n shuffle = False\n if is_distributed():\n sampler = DistributedSampler(datasets[split], shuffle=shuffle)\n elif shuffle:\n sampler = torch.utils.data.RandomSampler(datasets[split])\n else:\n sampler = torch.utils.data.SequentialSampler(datasets[split])\n\n dataloaders[split] = DataLoader(\n datasets[split],\n sampler=sampler,\n batch_size=args.batchsize_per_gpu,\n num_workers=args.dataset_num_workers,\n worker_init_fn=my_worker_init_fn,\n )\n dataloaders[split + \"_sampler\"] = sampler\n\n if args.test_only:\n criterion = None # faster evaluation\n test_model(args, model, model_no_ddp, criterion, dataset_config, dataloaders)\n else:\n assert (\n args.checkpoint_dir is not None\n ), f\"Please specify a checkpoint dir using --checkpoint_dir\"\n if is_primary() and not os.path.isdir(args.checkpoint_dir):\n os.makedirs(args.checkpoint_dir, exist_ok=True)\n optimizer = build_optimizer(args, model_no_ddp)\n loaded_epoch, best_val_metrics = resume_if_possible(\n args.checkpoint_dir, model_no_ddp, optimizer\n )\n args.start_epoch = loaded_epoch + 1\n do_train(\n args,\n model,\n model_no_ddp,\n optimizer,\n criterion,\n dataset_config,\n dataloaders,\n best_val_metrics,\n )\n\n\ndef launch_distributed(args):\n world_size = args.ngpus\n if world_size == 1:\n main(local_rank=0, args=args)\n else:\n torch.multiprocessing.spawn(main, nprocs=world_size, args=(args,))\n\n\nif __name__ == \"__main__\":\n parser = make_args_parser()\n args = parser.parse_args()\n try:\n set_start_method(\"spawn\")\n except RuntimeError:\n pass\n launch_distributed(args)\n" ]
[ [ "torch.utils.data.DataLoader", "torch.nn.SyncBatchNorm.convert_sync_batchnorm", "torch.multiprocessing.spawn", "torch.utils.data.DistributedSampler", "torch.utils.data.SequentialSampler", "torch.nn.parallel.DistributedDataParallel", "torch.cuda.is_available", "torch.multiprocessing.set_start_method", "torch.utils.data.RandomSampler", "torch.device", "torch.cuda.set_device" ] ]
muglyon/https-github.com-muglyon-DCOP-Decentralised-Control-of-Intelligent-Devices
[ "68cb868a0875f5e119ac0dbea024c17d241347ac" ]
[ "app/features/steps/value_propagation_test.py" ]
[ "#! python3\n# value_propagation_test.py - Test the VALUE Propagation behavior\n\nfrom behave import *\nfrom hamcrest import *\n\nimport numpy\n\n\n@when('get VALUE from parent after UTIL propagation')\ndef step_impl(context):\n set_up(context)\n context.dpop_to_test.util_manager.JOIN = context.util_matrix\n\n\n@then('should select the optimal assignment')\ndef step_impl(context):\n index = context.dpop_to_test.value_manager.get_index_of_best_value_with(\n context.data,\n context.dpop_to_test.util_manager.matrix_dimensions_order,\n context.dpop_to_test.util_manager.JOIN\n )\n assert_that(index, equal_to(2))\n\n\n@when('parent send values to wrong format')\ndef step_impl(context):\n set_up(context)\n context.data = 'wrong format'\n\n\n@when('matrix is Null')\ndef step_impl(context):\n set_up(context)\n context.util_matrix = None\n\n\n@then('should raise an exception')\ndef step_impl(context):\n assert_that(calling(context.dpop_to_test.value_manager.get_index_of_best_value_with)\n .with_args(context.data, context.util_matrix), raises(Exception))\n\n\n@when('matrix has 1 dimension')\ndef step_impl(context):\n set_up(context)\n context.dpop_to_test.util_manager.JOIN = numpy.asmatrix([[1], [0], [1]])\n\n\n@then('should return the min value index')\ndef step_impl(context):\n index = context.dpop_to_test.value_manager.get_index_of_best_value_with(\n context.data,\n context.dpop_to_test.util_manager.matrix_dimensions_order,\n context.dpop_to_test.util_manager.JOIN\n )\n assert_that(index, equal_to(1)) \n\n###\n# Privates Methods\n###\n\n\ndef set_up(context):\n context.dpop_to_test.util_manager.matrix_dimensions_order = [1]\n context.util_matrix = numpy.arange(start=11, stop=2, step=-1).reshape(3, 3)\n context.data = {\"1\": 1}\n" ]
[ [ "numpy.arange", "numpy.asmatrix" ] ]
sdpython/manyapi
[ "dc2aadc58a5d72904f95424dbe57bb832d3ccd73" ]
[ "_unittests/ut_plotting/test_dummy.py" ]
[ "\"\"\"\n@brief test log(time=13s)\n\"\"\"\nimport unittest\nfrom pyquickhelper.pycode import ExtTestCase\nfrom manydataapi.plotting import plot_aggregated_ts, daily_timeseries\n\n\nclass TestDummm(ExtTestCase):\n\n def test_agg_raise(self):\n df = daily_timeseries()\n\n from matplotlib import pyplot as plt\n _, ax = plt.subplots(1, 1)\n plot_aggregated_ts(df, ax=ax, value='X', agg='year')\n plt.close('all')\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "matplotlib.pyplot.close", "matplotlib.pyplot.subplots" ] ]
irisTa56/ease4lmp
[ "0ad69632fbe0d8c2a55e58af13efd7be1d566394" ]
[ "tests/test_lammps_cycle.py" ]
[ "import unittest\n\nfrom ease4lmp import (\n BondedAtoms, LammpsWriter,\n create_atoms_from_data, create_atoms_from_molecule)\n\nfrom ase.build import bulk, molecule\n\nimport numpy as np\n\nimport os\nimport itertools\n\n\ndef write_files(atoms):\n writer = LammpsWriter(atoms, atom_style=\"molecular\")\n\n writer.set_atom_data(mol=[0]*len(atoms))\n\n writer.set_bond_types({\n seq: i+1 for i, seq in enumerate(writer.get_bond_patterns())\n })\n\n writer.set_angle_types({\n seq: i+1 for i, seq in enumerate(writer.get_angle_patterns())\n })\n\n writer.set_dihedral_types({\n seq: i+1 for i, seq in enumerate(writer.get_dihedral_patterns())\n })\n\n writer.set_improper_types({\n seq: i+1 for i, seq in enumerate(writer.get_improper_patterns())\n })\n\n writer.write_lammps_data(\"data.tmp\", mass=True)\n writer.write_lammps_molecule(\"molecule.tmp\", mass=True)\n\ndef remove_files():\n os.remove(\"data.tmp\")\n os.remove(\"molecule.tmp\")\n\n\nclass TestLammpsCycle(unittest.TestCase):\n\n def test_methanol(self):\n \"\"\"Test for equivalence between original and written/read data.\"\"\"\n atoms = BondedAtoms(molecule(\"CH3OH\"))\n\n # confirm atomic numbers\n self.assertTrue(np.allclose(\n atoms.get_atomic_numbers(), np.array([6, 8, 1, 1, 1, 1])))\n\n # confirm O-H distance\n self.assertTrue(np.allclose(atoms.get_distance(1, 3), 0.97))\n\n atoms.set_types([1, 2, 3, 4, 3, 3])\n\n positions = atoms.get_positions()\n\n bonded_pairs = [\n (i, j) for i, j in itertools.combinations(range(len(atoms)), 2)\n if np.linalg.norm(positions[i] - positions[j]) < 1.5]\n\n # there are five bonds in CH3OH\n self.assertEqual(len(bonded_pairs), 5)\n\n for pair in bonded_pairs:\n atoms.add_bond(*pair)\n\n atoms.sort_bonds()\n\n atoms.set_cell([[5., 0., 0.], [0., 5., 0.], [0., 0., 5.]])\n atoms.center()\n\n write_files(atoms)\n\n atoms_from_data = create_atoms_from_data(\"data.tmp\", \"molecular\")\n atoms_from_molecule = create_atoms_from_molecule(\"molecule.tmp\")\n\n # atoms from Lammps' data and molecule file must be eaqual.\n self.assertTrue(np.allclose(\n atoms_from_data.get_positions(), atoms_from_molecule.get_positions()))\n self.assertTrue(np.allclose(\n atoms_from_data.get_masses(), atoms_from_molecule.get_masses()))\n self.assertTrue(np.allclose(\n atoms_from_data.get_types(), atoms_from_molecule.get_types()))\n self.assertTrue(np.allclose(\n atoms_from_data.get_bonds(), atoms_from_molecule.get_bonds()))\n\n # comparison with original atoms\n self.assertTrue(np.allclose(\n atoms_from_data.get_positions(), atoms.get_positions()))\n self.assertTrue(np.allclose(\n atoms_from_data.get_masses(), atoms.get_masses()))\n self.assertTrue(np.allclose(\n atoms_from_data.get_types(), atoms.get_types()))\n\n # storing order of bonds might be changed\n atoms_from_data.sort_bonds()\n self.assertTrue(np.allclose(\n atoms_from_data.get_bonds(), atoms.get_bonds()))\n\n remove_files()\n\n def test_acetic(self):\n \"\"\"Test for equivalence between original and written/read data.\"\"\"\n atoms = BondedAtoms(molecule(\"CH3COOH\"))\n\n # confirm atomic numbers\n self.assertTrue(np.allclose(\n atoms.get_atomic_numbers(), np.array([6, 8, 8, 1, 6, 1, 1, 1])))\n\n # confirm O-H distance < C=H distance\n self.assertTrue(all(\n atoms.get_distance(2, 3) < atoms.get_distance(i, j)\n for i, j in [(4, 5), (4, 6), (4, 7)]))\n\n atoms.set_types([1, 2, 3, 4, 5, 6, 6, 6])\n\n positions = atoms.get_positions()\n\n bonded_pairs = [\n (i, j) for i, j in itertools.combinations(range(len(atoms)), 2)\n if np.linalg.norm(positions[i] - positions[j]) < 1.5]\n\n # there are seven bonds in CH3COOH\n self.assertEqual(len(bonded_pairs), 7)\n\n for pair in bonded_pairs:\n atoms.add_bond(*pair)\n\n atoms.sort_bonds()\n\n atoms.set_cell([[5., 0., 0.], [0., 5., 0.], [0., 0., 5.]])\n atoms.center()\n\n write_files(atoms)\n\n atoms_from_data = create_atoms_from_data(\"data.tmp\", \"molecular\")\n atoms_from_molecule = create_atoms_from_molecule(\"molecule.tmp\")\n\n # atoms from Lammps' data and molecule file must be eaqual.\n self.assertTrue(np.allclose(\n atoms_from_data.get_positions(), atoms_from_molecule.get_positions()))\n self.assertTrue(np.allclose(\n atoms_from_data.get_masses(), atoms_from_molecule.get_masses()))\n self.assertTrue(np.allclose(\n atoms_from_data.get_types(), atoms_from_molecule.get_types()))\n self.assertTrue(np.allclose(\n atoms_from_data.get_bonds(), atoms_from_molecule.get_bonds()))\n\n # comparison with original atoms\n self.assertTrue(np.allclose(\n atoms_from_data.get_positions(), atoms.get_positions()))\n self.assertTrue(np.allclose(\n atoms_from_data.get_masses(), atoms.get_masses()))\n self.assertTrue(np.allclose(\n atoms_from_data.get_types(), atoms.get_types()))\n\n # storing order of bonds might be changed\n atoms_from_data.sort_bonds()\n self.assertTrue(np.allclose(\n atoms_from_data.get_bonds(), atoms.get_bonds()))\n\n remove_files()\n\n def test_nacl(self):\n \"\"\"Test for equivalence between original and written/read data.\"\"\"\n atoms = BondedAtoms(bulk(\"NaCl\", \"rocksalt\", a=5.64, orthorhombic=True))\n\n # confirm atomic numbers\n self.assertTrue(np.allclose(\n atoms.get_atomic_numbers(), np.array([11, 17, 11, 17])))\n\n atoms.set_types([1, 2, 1, 2])\n\n atoms.change_max_bonds(6)\n\n cell = atoms.get_cell()\n positions = atoms.get_positions()\n\n for i, j in itertools.combinations(range(len(atoms)), 2):\n r_original = positions[j] - positions[i]\n for ix, iy, iz in itertools.product(*[(-1, 0, 1)]*3):\n r = r_original + ix * cell[0] + iy * cell[1] + iz * cell[2]\n if np.isclose(np.linalg.norm(r), 2.82):\n atoms.add_bond(i, j, img2=(ix, iy, iz))\n\n atoms *= 5\n\n atoms.sort_bonds()\n\n write_files(atoms)\n\n atoms_from_data = create_atoms_from_data(\n \"data.tmp\", \"molecular\", pbc=True)\n\n # comparison with original atoms\n self.assertTrue(np.allclose(\n atoms_from_data.get_positions(), atoms.get_positions()))\n self.assertTrue(np.allclose(\n atoms_from_data.get_masses(), atoms.get_masses()))\n self.assertTrue(np.allclose(\n atoms_from_data.get_types(), atoms.get_types()))\n\n # storing order of bonds might be changed\n atoms_from_data.sort_bonds()\n self.assertTrue(np.allclose(\n atoms_from_data.get_bonds(), atoms.get_bonds()))\n\n remove_files()\n\ndef suite():\n suite = unittest.TestSuite()\n suite.addTest(TestLammpsCycle(\"test_methanol\"))\n suite.addTest(TestLammpsCycle(\"test_acetic\"))\n suite.addTest(TestLammpsCycle(\"test_nacl\"))\n return suite" ]
[ [ "numpy.array", "numpy.linalg.norm" ] ]