repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
zmic/stylegan2-zmic | [
"49e1ee8f617598dfccfde7815a231b42d4a1e222"
] | [
"dnnlib/tflib/custom_ops.py"
] | [
"# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.\n#\n# This work is made available under the Nvidia Source Code License-NC.\n# To view a copy of this license, visit\n# https://nvlabs.github.io/stylegan2/license.html\n\n\"\"\"TensorFlow custom ops builder.\n\"\"\"\n\nimport os\nimport re\nimport uuid\nimport hashlib\nimport tempfile\nimport shutil\nimport tensorflow as tf\nfrom tensorflow.python.client import device_lib # pylint: disable=no-name-in-module\n\n#----------------------------------------------------------------------------\n# Global options.\n\ncuda_cache_path = os.path.join(os.path.dirname(__file__), '_cudacache')\ncuda_cache_version_tag = 'v1'\ndo_not_hash_included_headers = False # Speed up compilation by assuming that headers included by the CUDA code never change. Unsafe!\nverbose = True # Print status messages to stdout.\n\ncompiler_bindir_search_path = [\n #'C:/Program Files (x86)/Microsoft Visual Studio/2017/Community/VC/Tools/MSVC/14.14.26428/bin/Hostx64/x64',\n 'C:/Program Files (x86)/Microsoft Visual Studio/2017/Community/VC/Tools/MSVC/14.16.27023/bin/HostX64/x64',\n #'C:/Program Files (x86)/Microsoft Visual Studio/2019/Community/VC/Tools/MSVC/14.23.28105/bin/Hostx64/x64',\n #'C:/Program Files (x86)/Microsoft Visual Studio 14.0/vc/bin',\n]\n\n#----------------------------------------------------------------------------\n# Internal helper funcs.\n\ndef _find_compiler_bindir():\n for compiler_path in compiler_bindir_search_path:\n if os.path.isdir(compiler_path):\n return compiler_path\n return None\n\ndef _get_compute_cap(device):\n caps_str = device.physical_device_desc\n m = re.search('compute capability: (\\\\d+).(\\\\d+)', caps_str)\n major = m.group(1)\n minor = m.group(2)\n return (major, minor)\n\ndef _get_cuda_gpu_arch_string():\n gpus = [x for x in device_lib.list_local_devices() if x.device_type == 'GPU']\n if len(gpus) == 0:\n raise RuntimeError('No GPU devices found')\n (major, minor) = _get_compute_cap(gpus[0])\n return 'sm_%s%s' % (major, minor)\n\ndef _run_cmd(cmd):\n with os.popen(cmd) as pipe:\n output = pipe.read()\n status = pipe.close()\n if status is not None:\n raise RuntimeError('NVCC returned an error. See below for full command line and output log:\\n\\n%s\\n\\n%s' % (cmd, output))\n\ndef _prepare_nvcc_cli(opts):\n cmd = 'nvcc ' + opts.strip()\n cmd += ' --disable-warnings'\n cmd += ' --include-path \"%s\"' % tf.sysconfig.get_include()\n cmd += ' --include-path \"%s\"' % os.path.join(tf.sysconfig.get_include(), 'external', 'protobuf_archive', 'src')\n cmd += ' --include-path \"%s\"' % os.path.join(tf.sysconfig.get_include(), 'external', 'com_google_absl')\n cmd += ' --include-path \"%s\"' % os.path.join(tf.sysconfig.get_include(), 'external', 'eigen_archive')\n\n compiler_bindir = _find_compiler_bindir()\n if compiler_bindir is None:\n # Require that _find_compiler_bindir succeeds on Windows. Allow\n # nvcc to use whatever is the default on Linux.\n if os.name == 'nt':\n raise RuntimeError('Could not find MSVC/GCC/CLANG installation on this computer. Check compiler_bindir_search_path list in \"%s\".' % __file__)\n else:\n cmd += ' --compiler-bindir \"%s\"' % compiler_bindir\n cmd += ' 2>&1'\n return cmd\n\n#----------------------------------------------------------------------------\n# Main entry point.\n\n_plugin_cache = dict()\n\ndef get_plugin(cuda_file):\n cuda_file_base = os.path.basename(cuda_file)\n cuda_file_name, cuda_file_ext = os.path.splitext(cuda_file_base)\n\n # Already in cache?\n if cuda_file in _plugin_cache:\n return _plugin_cache[cuda_file]\n\n # Setup plugin.\n if verbose:\n print('Setting up TensorFlow plugin \"%s\": ' % cuda_file_base, end='', flush=True)\n try:\n # Hash CUDA source.\n md5 = hashlib.md5()\n with open(cuda_file, 'rb') as f:\n md5.update(f.read())\n md5.update(b'\\n')\n\n # Hash headers included by the CUDA code by running it through the preprocessor.\n if not do_not_hash_included_headers:\n if verbose:\n print('Preprocessing... ', end='', flush=True)\n with tempfile.TemporaryDirectory() as tmp_dir:\n tmp_file = os.path.join(tmp_dir, cuda_file_name + '_tmp' + cuda_file_ext)\n _run_cmd(_prepare_nvcc_cli('\"%s\" --preprocess -o \"%s\" --keep --keep-dir \"%s\"' % (cuda_file, tmp_file, tmp_dir)))\n with open(tmp_file, 'rb') as f:\n bad_file_str = ('\"' + cuda_file.replace('\\\\', '/') + '\"').encode('utf-8') # __FILE__ in error check macros\n good_file_str = ('\"' + cuda_file_base + '\"').encode('utf-8')\n for ln in f:\n if not ln.startswith(b'# ') and not ln.startswith(b'#line '): # ignore line number pragmas\n ln = ln.replace(bad_file_str, good_file_str)\n md5.update(ln)\n md5.update(b'\\n')\n\n # Select compiler options.\n compile_opts = ''\n if os.name == 'nt':\n compile_opts += '\"%s\"' % os.path.join(tf.sysconfig.get_lib(), 'python', '_pywrap_tensorflow_internal.lib')\n elif os.name == 'posix':\n compile_opts += '\"%s\"' % os.path.join(tf.sysconfig.get_lib(), 'python', '_pywrap_tensorflow_internal.so')\n compile_opts += ' --compiler-options \\'-fPIC -D_GLIBCXX_USE_CXX11_ABI=0\\''\n else:\n assert False # not Windows or Linux, w00t?\n compile_opts += ' --gpu-architecture=%s' % _get_cuda_gpu_arch_string()\n compile_opts += ' --use_fast_math'\n nvcc_cmd = _prepare_nvcc_cli(compile_opts)\n\n # Hash build configuration.\n md5.update(('nvcc_cmd: ' + nvcc_cmd).encode('utf-8') + b'\\n')\n md5.update(('tf.VERSION: ' + tf.VERSION).encode('utf-8') + b'\\n')\n md5.update(('cuda_cache_version_tag: ' + cuda_cache_version_tag).encode('utf-8') + b'\\n')\n\n # Compile if not already compiled.\n bin_file_ext = '.dll' if os.name == 'nt' else '.so'\n bin_file = os.path.join(cuda_cache_path, cuda_file_name + '_' + md5.hexdigest() + bin_file_ext)\n if not os.path.isfile(bin_file):\n if verbose:\n print('Compiling... ', end='', flush=True)\n with tempfile.TemporaryDirectory() as tmp_dir:\n tmp_file = os.path.join(tmp_dir, cuda_file_name + '_tmp' + bin_file_ext)\n _run_cmd(nvcc_cmd + ' \"%s\" --shared -o \"%s\" --keep --keep-dir \"%s\"' % (cuda_file, tmp_file, tmp_dir))\n os.makedirs(cuda_cache_path, exist_ok=True)\n intermediate_file = os.path.join(cuda_cache_path, cuda_file_name + '_' + uuid.uuid4().hex + '_tmp' + bin_file_ext)\n shutil.copyfile(tmp_file, intermediate_file)\n os.rename(intermediate_file, bin_file) # atomic\n\n # Load.\n if verbose:\n print('Loading... ', end='', flush=True)\n plugin = tf.load_op_library(bin_file)\n\n # Add to cache.\n _plugin_cache[cuda_file] = plugin\n if verbose:\n print('Done.', flush=True)\n return plugin\n\n except:\n if verbose:\n print('Failed!', flush=True)\n raise\n\n#----------------------------------------------------------------------------\n"
] | [
[
"tensorflow.sysconfig.get_include",
"tensorflow.load_op_library",
"tensorflow.python.client.device_lib.list_local_devices",
"tensorflow.sysconfig.get_lib"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
cguccione/q2-feature-table | [
"90b75bb4848371bd640fe7c4baf14bc448d597c9"
] | [
"q2_feature_table/tests/test_rename.py"
] | [
"# ----------------------------------------------------------------------------\n# Copyright (c) 2016-2022, QIIME 2 development team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file LICENSE, distributed with this software.\n# ----------------------------------------------------------------------------\n\nimport unittest\nimport warnings\n\nimport biom\nimport qiime2\nimport pandas as pd\nimport numpy as np\nimport numpy.testing as npt\n\nfrom q2_feature_table import _rename\n\n\nclass TestRename(unittest.TestCase):\n def setUp(self):\n self.old_ids = ['S1', 'S2', 'S3']\n self.name_map = pd.Series({'S1': 'S1_new',\n 'S2': 'S2_new',\n 'S4': 'S4_name'})\n self.known = {'S1': 'S1_new', 'S2': 'S2_new', 'S3': 'S3'}\n\n def test_generate_new_names_non_unique(self):\n name_map = pd.Series({'S1': 'S2_new', 'S2': 'S2_new'})\n with self.assertRaises(ValueError) as cm:\n _rename._generate_new_names(self.old_ids,\n name_map,\n strict=True,\n verbose=False)\n self.assertEqual(\n str(cm.exception),\n ('All new ids must be unique.\\n'\n 'Try the group method in this plugin if you want '\n 'to combine multiple samples in the same table.')\n )\n\n def test_generate_new_names_old_disjoint_strict(self):\n with self.assertRaises(ValueError) as cm:\n _rename._generate_new_names(self.old_ids,\n self.name_map,\n strict=True,\n verbose=False)\n self.assertEqual(\n str(cm.exception),\n (\"There are ids in the table which do not have new names.\\n\"\n \"Either turn off strict mode or provide a remapping for \"\n \"all ids.\\nThe following ids are not mapped:\\n S3\")\n )\n\n def test_generate_new_names_verbose_warnings(self):\n with warnings.catch_warnings(record=True) as w:\n # Cause all warnings to always be triggered.\n warnings.simplefilter(\"always\")\n new_names = \\\n _rename._generate_new_names(self.old_ids,\n self.name_map,\n strict=False,\n verbose=True)\n self.assertEqual(len(w), 2)\n self.assertTrue(isinstance(w[0].message, UserWarning))\n self.assertEqual(str(w[0].message),\n 'There are ids in the original table which do not '\n 'have new names.\\nThe following ids will not be '\n 'included:\\n S3')\n self.assertTrue(isinstance(w[1].message, UserWarning))\n self.assertEqual(str(w[1].message),\n 'There are ids supplied for renaming that are not in'\n ' the table.\\nThe following ids will not be mapped:'\n '\\n S4'\n )\n self.assertEqual(new_names.keys(), self.known.keys())\n for k, v in new_names.items():\n self.assertEqual(v, self.known[k])\n\n def test_generate_new_names_no_verbse(self):\n with warnings.catch_warnings(record=True) as w:\n # Cause all warnings to always be triggered.\n warnings.simplefilter(\"always\")\n new_names = \\\n _rename._generate_new_names(self.old_ids,\n self.name_map,\n strict=False,\n verbose=False)\n self.assertEqual(len(w), 0)\n self.assertEqual(new_names.keys(), self.known.keys())\n for k, v in new_names.items():\n self.assertEqual(v, self.known[k])\n\n def test_rename_samples(self):\n table = biom.Table(np.array([[0, 1, 2], [3, 4, 5]]),\n observation_ids=['01', '02'],\n sample_ids=['S1', 'S2', 'S3'])\n meta1 = qiime2.Metadata(pd.DataFrame(\n data=np.array([['cat'], ['rat'], ['dog']]),\n index=pd.Index(['S1', 'S2', 'S3'], name='sample-id'),\n columns=['animal']\n ))\n meta2 = qiime2.Metadata(pd.DataFrame(\n data=[['CATCATCAT'], ['WANTCAT']],\n index=pd.Index(['01', '02'], name='feature-id'),\n columns=['sequence']\n ))\n updated = _rename.rename_ids(table,\n meta1.get_column('animal'))\n updated = _rename.rename_ids(updated,\n meta2.get_column('sequence'),\n axis='feature')\n\n npt.assert_array_equal(np.array(updated.ids(axis='sample')),\n np.array(['cat', 'rat', 'dog']))\n npt.assert_array_equal(np.array(updated.ids(axis='observation')),\n np.array(['CATCATCAT', 'WANTCAT']))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"pandas.Index",
"numpy.array",
"pandas.Series"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
ninikolov/lha | [
"b209684b8709e191e026892c5918b53402024e51"
] | [
"tools/embeddings.py"
] | [
"\"\"\"Code for computing embeddings of text.\"\"\"\n\nimport numpy as np\nfrom lha.preprocessing.clean_text import clean_text\nimport logging\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n\n\nEMBED_MODES = [\"avg\", \"sent2vec\"]\n\n\ndef get_word_vectors(txt, vector_dict, vector_size, weights=None, skip_missing=True):\n vectors = []\n missing_words = []\n for word in txt:\n if word in vector_dict.keys():\n word_embedding = vector_dict[word]\n vectors.append(word_embedding)\n else:\n missing_words.append(word)\n if not skip_missing or weights is not None:\n vectors.append(np.zeros(vector_size))\n if weights is not None:\n return np.array(vectors) * weights[:, None]\n return vectors\n\n\ndef get_word_vector_dic(txt, w2v):\n vectors = {}\n for word in txt:\n if word in w2v.wv.vocab:\n word_embedding = w2v.wv[word]\n vectors[word] = word_embedding\n return vectors\n\n\ndef get_word_vector_list(doc, w2v):\n \"\"\"Get all the vectors for a text\"\"\"\n vectors = []\n for word in doc:\n try:\n vectors.append(w2v.wv[word])\n except KeyError:\n continue\n return vectors\n\n\ndef average_word_embedding(txt, vector_dict, vector_size):\n \"\"\"Compute averaged embedding\"\"\"\n vectors = get_word_vectors(txt, vector_dict, vector_size)\n if len(vectors) == 0:\n # logging.error(\"Couldn't produce an embedding for {}\".format(txt))\n return np.ones(vector_size) * np.nan\n v = np.mean(vectors, 0)\n assert len(v) == vector_size\n return v\n\n\ndef get_tfidf_weights(sentence, model, vocab):\n \"\"\"Get the tfidf weights of a text, normed to sum to 1.\"\"\"\n tfidf_weights = model.transform([\" \".join(sentence)])\n weights = np.zeros(len(sentence))\n for i, word in enumerate(sentence):\n try:\n word_idx = vocab.get(word)\n weights[i] = tfidf_weights[0, word_idx]\n except IndexError:\n weights[i] = 0.\n s = np.sum(weights)\n if s > 0.:\n weights /= s\n return weights\n\n\ndef tf_idf_top_k_embedding(sentence, vector_dict, vector_size, tfidf_model, top_k=5):\n \"\"\"Compute tfidf embedding (weighted WCD).\"\"\"\n vocab = tfidf_model.get_params()['vect'].vocabulary_\n weights = get_tfidf_weights(sentence, tfidf_model, vocab)\n top_k_words = np.argsort(weights)[-top_k:][::-1]\n sent_vectors = get_word_vectors(sentence, vector_dict, vector_size, skip_missing=False)\n vectors = np.array(sent_vectors)\n k_vectors = np.array([sent_vectors[i] for i in top_k_words])\n try:\n projection = vectors.dot(k_vectors.T)\n v = np.sum(projection, 0)\n if len(v.shape) > 0 and v.shape[0] == top_k:\n return v\n return np.ones(top_k) * np.nan\n except Exception as e:\n print(sentence)\n raise e\n\n\ndef tf_idf_embedding(sentence, vector_dict, vector_size, tfidf_model):\n \"\"\"Compute tfidf embedding (weighted WCD).\"\"\"\n vocab = tfidf_model.get_params()['vect'].vocabulary_\n weights = get_tfidf_weights(sentence, tfidf_model, vocab)\n vectors = get_word_vectors(sentence, vector_dict, vector_size, weights)\n if len(vectors) == 0:\n return np.ones(vector_size) * np.nan\n return np.sum(vectors, 0)\n\n\ndef infersent(sentence, model):\n embeddings = model.encode(sentence, tokenize=True)\n return embeddings\n\n\ndef embed_text(txt, model, vector_dict=None, vector_size=None, mode=\"sent2vec\", clean=True):\n if clean:\n txt = clean_text(txt)\n\n if vector_dict is None and model is not None and mode != \"sent2vec\":\n vector_dict = get_word_vector_dic(txt, model)\n vector_size = model.vector_size\n\n assert mode in EMBED_MODES\n if mode == \"avg\":\n return average_word_embedding(txt, vector_dict, vector_size)\n elif mode == \"sent2vec\":\n if type(txt) == list:\n txt = \" \".join(txt)\n return model.embed_sentence(txt)\n elif mode == \"bert\":\n return model(txt)\n\n\ndef embed_bulk_text(txt, w2v, tfidf_model=None, mode=\"avg\", infersent_model=None):\n if tfidf_model is not None:\n mode = \"tfidf\"\n assert mode in EMBED_MODES\n if mode == \"avg\":\n return [average_word_embedding(t, w2v) for t in txt]\n elif mode == \"tfidf\":\n return tf_idf_embedding(txt, w2v, tfidf_model)\n"
] | [
[
"numpy.ones",
"numpy.mean",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
siddhantsrvstv284/Image-Classifier | [
"35c1ad22e352cd307a5eb9e621ca718a5ccc8604"
] | [
"train_utils.py"
] | [
"\nimport torch\nimport numpy as np\nfrom torch import nn\nfrom torch import optim\nimport torch.nn.functional as F\nfrom PIL import Image\n\nfrom torchvision import datasets, transforms, models\n\ndef load_data(data_dir):\n train_dir = data_dir + '/train'\n valid_dir = data_dir + '/valid'\n test_dir = data_dir + '/test'\n mean_norms = [0.485, 0.456, 0.406]\n std_devs = [0.229, 0.224, 0.225]\n \n data_transforms = {\n 'train': transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.RandomRotation(30),\n transforms.ToTensor(),\n transforms.Normalize(mean_norms, std_devs)\n ]),\n 'test': transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean_norms, std_devs)\n ]),\n 'valid': transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean_norms, std_devs)\n ]),\n } \n image_datasets = {\n 'train': datasets.ImageFolder(train_dir, transform=data_transforms['train']),\n 'test': datasets.ImageFolder(test_dir, transform=data_transforms['test']),\n 'valid': datasets.ImageFolder(valid_dir, transform=data_transforms['valid'])\n }\n\n # TODO: Using the image datasets and the trainforms, define the dataloaders\n dataloaders = {\n 'train': torch.utils.data.DataLoader(image_datasets['train'], batch_size=64, shuffle=True),\n 'test': torch.utils.data.DataLoader(image_datasets['test'], batch_size=32),\n 'valid': torch.utils.data.DataLoader(image_datasets['valid'], batch_size=32, shuffle=True)\n }\n # trainloader, validloader and testloader for training, validation and testing\n trainloader = dataloaders['train']\n testloader = dataloaders['test']\n validloader = dataloaders['valid']\n \n return trainloader, testloader, validloader, image_datasets['train'], image_datasets['test'], image_datasets['valid']\n"
] | [
[
"torch.utils.data.DataLoader"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lordfjw/OptimalGradCheckpointing | [
"c6914657c20097bc58a43c5f17e5696170137446"
] | [
"graph.py"
] | [
"from torch import nn as nn\nfrom torch.utils.checkpoint import checkpoint\nfrom queue import Queue\nimport networkx as nx\nimport torch\nimport torch.nn.functional as F\nfrom net.layer import TupleConstruct, TupleIndexing, Mul2, Add2, BasicIdentity, Cat, ListConstruct, Flatten, View, FunctionWrapperV2\nfrom copy import deepcopy\n\n# todo: get shapes of all the tensors when tracing\n\nBasic_ops = (nn.AdaptiveAvgPool1d, nn.AdaptiveAvgPool2d, nn.AdaptiveAvgPool3d, nn.AdaptiveMaxPool1d, nn.AdaptiveMaxPool2d,\n nn.AdaptiveMaxPool3d, nn.AvgPool1d, nn.AvgPool2d, nn.AvgPool3d, nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d,\n nn.Bilinear, nn.CELU, nn.Conv1d, nn.Conv2d, nn.Conv3d, nn.ConvTranspose1d, nn.ConvTranspose2d, nn.ConvTranspose3d,\n nn.ConstantPad1d, nn.ConstantPad2d, nn.ConstantPad3d, nn.Dropout, nn.Dropout2d, nn.Dropout3d, nn.ELU, nn.Embedding,\n nn.EmbeddingBag, nn.FeatureAlphaDropout, nn.FractionalMaxPool2d, nn.FractionalMaxPool3d, nn.GELU, nn.GLU, nn.GroupNorm,\n nn.GRU, nn.GRUCell, nn.Hardtanh, nn.Identity, nn.InstanceNorm1d, nn.InstanceNorm2d, nn.InstanceNorm3d,\n nn.LayerNorm, nn.LeakyReLU, nn.Linear, nn.LocalResponseNorm, nn.LogSigmoid, nn.LPPool1d, nn.LPPool2d, nn.LeakyReLU,\n nn.LogSoftmax, nn.LSTM, nn.LSTMCell, nn.MaxPool1d, nn.MaxPool2d, nn.MaxPool3d, nn.MaxUnpool1d, nn.MaxUnpool2d,\n nn.MaxUnpool3d, nn.MultiheadAttention, nn.PReLU, nn.ReLU, nn.ReLU6, nn.RReLU, nn.ReflectionPad1d, nn.ReflectionPad2d,\n nn.ReplicationPad1d, nn.ReplicationPad2d, nn.ReplicationPad3d, nn.SELU, nn.Sigmoid, nn.Softmax, nn.Softmax2d,\n nn.Softmin, nn.Softplus, nn.Softshrink, nn.Softshrink, nn.Tanh, nn.Tanhshrink, nn.Upsample, nn.UpsamplingBilinear2d,\n nn.UpsamplingNearest2d, nn.ZeroPad2d)\nMulti_input_ops = (TupleConstruct, ListConstruct)\n\n\ndef parse_computation_graph(module, inputs):\n '''\n :param module: nn.module to parse for computation graph\n :param input: torch.Tensor, example input tensor\n :return: nx.MultiDiGraph\n '''\n\n computation_graph, input_node_ids, output_node_ids = parse_raw_computation_graph_from_jit(module, inputs)\n computation_graph = optimize_computation_graph(computation_graph, input_node_ids, output_node_ids)\n\n sources, targets = get_source_target(computation_graph)\n if len(sources) > 1 or len(targets) > 1:\n raise Exception(\"Currently not supporting multi input or output graph, we are working on supporting it\")\n source, target = sources[0], targets[0]\n with torch.no_grad():\n tmp_parsed_segment = Segment(computation_graph, sources[0], targets[0], do_checkpoint=False, record_tensor_cost=True)\n output = tmp_parsed_segment.forward(inputs[0])\n return computation_graph, source, target\n\ndef parse_raw_computation_graph_from_jit(module, inputs):\n '''\n :param module: nn.module to parse for computation graph\n :param input: torch.Tensor, example input tensor\n :return: nx.MultiDiGraph\n '''\n add_input_tensor_hook_recursively(module)\n output = module.forward(*inputs)\n remove_input_tensor_hook_recursively(module)\n computation_graph, _, input_node_ids, output_node_ids = build_computation_graph_recursively(module, inputs, inputs_nodes_ids=None, outputs_nodes_ids=None, cur_node_idx=None)\n clean_up_input_tensor_recursively(module)\n return computation_graph, input_node_ids, output_node_ids\n\ndef classify_node_type(node_type):\n # todo: may need to refine\n if node_type[0] == '(' and node_type[-1] == ')':\n # parse tuple\n return 'Tuple'\n elif node_type == 'Tensor[]':\n return 'List'\n elif node_type in ['int', 'float', 'bool', 'int[]', 'float[]', 'bool[]', 'None']:\n return node_type\n elif node_type == 'Tensor' or 'Float' in node_type or 'Long' in node_type:\n # todo: may need to add more dtype\n return 'Tensor'\n else:\n return 'Module'\n\ndef parse_node_op(node_op):\n splits = node_op.split('(')\n op_def = '('.join(splits[:-1])\n op_args = splits[-1].strip(')')\n if len(op_args) == 0:\n op_args = []\n else:\n op_args = op_args.split(', ')\n return {'op_def': op_def, 'op_args': op_args}\n\ndef retrieve_constant_value(local_graph_dict, node_class, node_op):\n if node_class in ['int', 'float', 'bool']:\n op_def = node_op['op_def']\n if 'prim::Constant' in op_def:\n dtype = eval(node_class)\n value_str = op_def.split('[')[-1].split(']')[0].replace('value=', '')\n return (dtype)(value_str)\n elif op_def == 'aten::size':\n op_args = node_op['op_args']\n tensor_node, index_node = op_args\n tensor_shape = local_graph_dict[tensor_node]['shape']\n index = local_graph_dict[index_node]['value']\n return tensor_shape[index]\n elif op_def == 'prim::NumToTensor':\n # todo: we are handling tensor from numToTensor as constant and directly treat its value as int/float/bool, this might have risk\n op_args = node_op['op_args']\n return local_graph_dict[op_args[0]]['value']\n elif op_def == 'aten::Int':\n op_args = node_op['op_args']\n return local_graph_dict[op_args[0]]['value']\n else:\n raise NotImplementedError\n elif node_class == 'None':\n return None\n elif node_class in ['int[]', 'float[]', 'bool[]']:\n op_args = node_op['op_args']\n return [local_graph_dict[n]['value'] for n in op_args]\n else:\n raise NotImplementedError\n\ndef parse_input_node_str(node_str):\n # remove comment\n node_str = node_str.split(' #')[0]\n node_groups = node_str.split(', %')\n for i in range(1, len(node_groups)):\n # add back )\n node_groups[i] = '%' + node_groups[i]\n node_dict = {}\n for node_group in node_groups:\n if ' = ' in node_group:\n node_def, node_op = node_group.split(' = ')\n else:\n node_def = node_group\n node_name, node_type = node_def.split(' : ')\n node_class = classify_node_type(node_type)\n if node_name not in node_dict:\n node_dict[node_name] = {'node_class': node_class, 'node_op': None, 'output_id': None}\n return node_dict\n\n\ndef parse_node_str(node_str):\n # remove comment\n node_str = node_str.split(' #')[0]\n op_groups = node_str.split('), %')\n for i in range(len(op_groups) - 1):\n # add back )\n op_groups[i] += ')'\n for i in range(1, len(op_groups)):\n # add back )\n op_groups[i] = '%' + op_groups[i]\n\n node_dict = {}\n for op_group in op_groups:\n node_group, node_op = op_group.split(' = ')\n node_op = parse_node_op(node_op)\n node_defs = node_group.split(', %')\n for i in range(1, len(node_defs)):\n # add back %\n node_defs[i] = '%' + node_defs[i]\n for i, node_def in enumerate(node_defs):\n node_name, node_type = node_def.split(' : ')\n node_class = classify_node_type(node_type)\n if node_name not in node_dict:\n node_dict[node_name] = {'node_class': node_class, 'node_op': node_op, 'output_id': i}\n if node_class == 'Tensor' and '(' in node_type and ')' in node_type:\n # try to get shape\n shape_str = node_type.split('(')[-1].split(')')[0]\n if ', ' in shape_str:\n shape = [int(s) for s in shape_str.split(', ')]\n node_dict[node_name]['shape'] = shape\n else:\n node_dict[node_name]['shape'] = []\n\n # if node_class in ['int', 'float', 'bool']:\n # value = retrieve_constant_value(node_class, node_op)\n # node_dict[node_name]['value'] = value\n\n '''\n # remove comment\n node_str = node_str.split(' #')[0]\n splits = node_str.split(', %')\n for i in range(1, len(splits)):\n # add back %\n splits[i] = '%' + splits[i]\n node_dict = {}\n queue = []\n for s in splits:\n if ' = ' in s:\n node_def, node_op = s.split(' = ')\n node_name, node_type = node_def.split(' : ')\n node_class = classify_node_type(node_type)\n node_dict[node_name] = {'node_class': node_class, 'node_op': None, 'output_id': None}\n queue.append(node_name)\n node_op = parse_node_op(node_op)\n for i, queued_node_name in enumerate(queue):\n node_dict[queued_node_name]['node_op'] = node_op\n node_dict[queued_node_name]['output_id'] = i\n queue = []\n else:\n node_name, node_type = s.split(' : ')\n node_class = classify_node_type(node_type)\n node_dict[node_name] = {'node_class': node_class, 'node_op': None, 'output_id': None}\n queue.append(node_name)\n '''\n return node_dict\n\n'''def parse_node_def(node_def):\n splits = node_def.split(' = ')\n if len(splits) == 1:\n node_type = splits[0]\n node_class = classify_node_type(node_type)\n return {'node_class': node_class, 'node_op': None}\n elif len(splits) == 2:\n node_type, node_op = splits\n node_class = classify_node_type(node_type)\n node_op = parse_node_op(node_op)\n return {'node_class': node_class, 'node_op': node_op}\n else:\n raise NotImplementedError\n\ndef parse_inputs(graph_inputs):\n local_graph_dict = {}\n for i in graph_inputs:\n input_str = str(i)\n node_strs = input_str.split(', ')\n for node_str in node_strs:\n # remove comment\n node_str = node_str.split(' #')[0]\n node_name, node_def = node_str.split(' : ')\n if node_name in local_graph_dict:\n continue\n # parse node_def\n local_graph_dict[node_name] = parse_node_def(node_def)\n return local_graph_dict\n\ndef parse_nodes(graph_nodes, local_graph_dict={}):\n for n in graph_nodes:\n node_str = str(n)\n # remove comment\n node_str = node_str.split(' #')[0]\n node_name, node_def = node_str.split(' : ')\n if node_name in local_graph_dict:\n continue\n # parse node_def\n local_graph_dict[node_name] = parse_node_def(node_def)\n return local_graph_dict\n\ndef parse_outputs(graph_outputs, local_graph_dict={}):\n for o in graph_outputs:\n node_str = str(o)\n # remove comment\n node_str = node_str.split(' #')[0]\n node_name, node_def = node_str.split(' : ')\n if node_name in local_graph_dict:\n continue\n # parse node_def\n local_graph_dict[node_name] = parse_node_def(node_def)\n return local_graph_dict'''\n\ndef get_python_module(local_graph_dict, module, node_name):\n node_info = local_graph_dict[node_name]\n node_class, node_op = node_info['node_class'], node_info['node_op']\n if node_class == 'Module':\n if node_op == None or node_name == '%self.1':\n local_graph_dict[node_name]['python_module'] = module\n return local_graph_dict\n op_def, op_args = node_op['op_def'], node_op['op_args']\n if len(op_args) == 1:\n if 'prim::GetAttr' in op_def:\n parent_node_name = op_args[0]\n if 'python_module' not in local_graph_dict[parent_node_name]:\n raise Exception(\"python_module not defined for {}\".format(parent_node_name))\n parent_module = local_graph_dict[parent_node_name]['python_module']\n attr_name = op_def.split('[')[-1].split(']')[0].replace(\"name=\", '').strip(\"\\\"\")\n local_graph_dict[node_name]['python_module'] = getattr(parent_module, attr_name)\n else:\n raise Exception(\n \"op_def {} conversion to python not implemented, please raise an issue on github\".format(op_def))\n else:\n raise Exception(\"Module {} not recognized, op def {}, op args {}\".format(node_name, op_def, op_args))\n return local_graph_dict\n\ndef get_python_modules(local_graph_dict, module):\n # translate all the modules in local_graph_dict to python modules\n for node_name in local_graph_dict:\n node_info = local_graph_dict[node_name]\n node_class, node_op = node_info['node_class'], node_info['node_op']\n if node_class == 'Module':\n if node_op == None or node_name == '%self.1':\n local_graph_dict[node_name]['python_module'] = module\n continue\n op_def, op_args = node_op['op_def'], node_op['op_args']\n if len(op_args) == 1:\n if 'prim::GetAttr' in op_def:\n parent_node_name = op_args[0]\n if 'python_module' not in local_graph_dict[parent_node_name]:\n raise Exception(\"python_module not defined for {}\".format(parent_node_name))\n parent_module = local_graph_dict[parent_node_name]['python_module']\n attr_name = op_def.split('[')[-1].split(']')[0].replace(\"name=\", '').strip(\"\\\"\")\n local_graph_dict[node_name]['python_module'] = getattr(parent_module, attr_name)\n else:\n raise Exception(\"op_def {} conversion to python not implemented, please raise an issue on github\".format(op_def))\n else:\n raise Exception(\"Module {} not recognized, op def {}, op args {}\".format(node_name, op_def, op_args))\n return local_graph_dict\n\ndef get_python_module_from_node_op(local_graph_dict, node_op, output_id, local_node_mapping):\n # todo: add drop out, concat, relu, ...\n # todo: add code for functions from torch.nn.functional\n # todo: add code for constant add, mul, ...\n op_def, op_args, op_output_id = node_op['op_def'], node_op['op_args'], output_id\n if 'prim::CallMethod' in op_def:\n module_node = op_args[0]\n python_module = local_graph_dict[module_node]['python_module']\n if isinstance(python_module, Basic_ops):\n basic_op = True\n else:\n basic_op = False\n input_nodes = op_args[1:]\n input_node_ids = [local_node_mapping[n] for n in input_nodes]\n return {'python_module': python_module, 'input_node_ids': input_node_ids, 'basic_op': basic_op}\n elif 'prim::TupleConstruct' in op_def:\n python_module = TupleConstruct()\n basic_op = True\n input_nodes = op_args\n input_node_ids = [local_node_mapping[n] for n in input_nodes]\n return {'python_module': python_module, 'input_node_ids': input_node_ids, 'basic_op': basic_op}\n elif 'prim::TupleUnpack' in op_def:\n python_module = TupleIndexing(index=op_output_id)\n basic_op = True\n input_nodes = op_args\n input_node_ids = [local_node_mapping[n] for n in input_nodes]\n return {'python_module': python_module, 'input_node_ids': input_node_ids, 'basic_op': basic_op}\n elif 'prim::ListConstruct' in op_def:\n python_module = ListConstruct()\n basic_op = True\n input_nodes = op_args\n input_node_ids = [local_node_mapping[n] for n in input_nodes]\n return {'python_module': python_module, 'input_node_ids': input_node_ids, 'basic_op': basic_op}\n elif 'aten::mul' in op_def:\n python_module = Mul2()\n basic_op = True\n input_nodes = op_args\n input_node_ids = [local_node_mapping[n] for n in input_nodes]\n return {'python_module': python_module, 'input_node_ids': input_node_ids, 'basic_op': basic_op}\n elif 'aten::add' in op_def:\n python_module = Add2()\n basic_op = True\n input_nodes = op_args[:-1]\n #todo: not sure what constant do here\n constant = local_graph_dict[op_args[-1]]['value']\n input_node_ids = [local_node_mapping[n] for n in input_nodes]\n return {'python_module': python_module, 'input_node_ids': input_node_ids, 'basic_op': basic_op}\n elif 'aten::cat' in op_def:\n assert len(op_args) == 2\n basic_op = True\n input_nodes = [op_args[0]]\n dim = local_graph_dict[op_args[1]]['value']\n python_module = Cat(dim=dim)\n input_node_ids = [local_node_mapping[n] for n in input_nodes]\n return {'python_module': python_module, 'input_node_ids': input_node_ids, 'basic_op': basic_op}\n elif 'aten::flatten' in op_def:\n assert len(op_args) == 3\n basic_op = True\n input_nodes = [op_args[0]]\n # todo: not sure what this constant means\n constant = local_graph_dict[op_args[1]]['value']\n dim = local_graph_dict[op_args[2]]['value']\n python_module = Flatten(dim=dim)\n input_node_ids = [local_node_mapping[n] for n in input_nodes]\n return {'python_module': python_module, 'input_node_ids': input_node_ids, 'basic_op': basic_op}\n elif 'aten::relu_' in op_def:\n assert len(op_args) == 1\n python_module = nn.ReLU(inplace=True)\n basic_op = True\n input_node_ids = [local_node_mapping[n] for n in op_args]\n return {'python_module': python_module, 'input_node_ids': input_node_ids, 'basic_op': basic_op}\n elif 'aten::view' in op_def:\n assert len(op_args) == 2\n basic_op = True\n input_nodes = [op_args[0]]\n shape = local_graph_dict[op_args[1]]['value']\n python_module = View(shape=shape)\n input_node_ids = [local_node_mapping[n] for n in input_nodes]\n return {'python_module': python_module, 'input_node_ids': input_node_ids, 'basic_op': basic_op}\n elif op_def in ['aten::max_pool2d', 'aten::adaptive_avg_pool2d', 'aten::avg_pool2d', 'aten::dropout']:\n func_name = op_def.replace('aten::', '')\n func = getattr(F, func_name)\n basic_op = True\n input_nodes = [op_args[0]]\n args = [local_graph_dict[n]['value'] for n in op_args[1:]]\n python_module = FunctionWrapperV2(run_func=func, run_args=args)\n input_node_ids = [local_node_mapping[n] for n in input_nodes]\n return {'python_module': python_module, 'input_node_ids': input_node_ids, 'basic_op': basic_op}\n else:\n raise Exception(\"op_def {} conversion to python not implemented, please raise an issue on github\".format(op_def))\n\ndef merge_dict_list(dict_list):\n new_dict = {}\n for d in dict_list:\n for key in d:\n if key not in new_dict:\n new_dict[key] = d[key]\n return new_dict\n\ndef build_computation_graph_recursively(module, inputs, inputs_nodes_ids=None, outputs_nodes_ids=None, cur_node_idx=None):\n device = inputs[0].device\n if cur_node_idx is None:\n cur_node_idx = 0\n with torch.no_grad():\n traced = torch.jit.trace(module.forward, tuple(inputs))\n del inputs\n traced_graph = traced.graph\n graph_inputs = [str(i.node()).strip('\\n') for i in traced_graph.inputs()]\n graph_nodes = [str(n).strip('\\n') for n in traced_graph.nodes()]\n graph_outputs = [str(o.node()).strip('\\n') for o in traced_graph.outputs()]\n\n input_node_dicts = [parse_input_node_str(i) for i in graph_inputs]\n internal_node_dicts = [parse_node_str(n) for n in graph_nodes]\n output_node_dicts = [parse_node_str(o) for o in graph_outputs]\n node_dicts = input_node_dicts + internal_node_dicts + output_node_dicts\n\n local_graph_dict = merge_dict_list(node_dicts)\n inputs_dict = merge_dict_list(input_node_dicts)\n outputs_dict = merge_dict_list(output_node_dicts)\n\n # local_graph_dict = parse_inputs(graph_inputs)\n # local_graph_dict = parse_nodes(graph_nodes, local_graph_dict)\n # local_graph_dict = parse_outputs(graph_outputs, local_graph_dict)\n\n local_graph_dict = get_python_modules(local_graph_dict, module)\n\n local_node_mapping = {}\n if inputs_nodes_ids is not None:\n input_node_names = [n for n in inputs_dict if inputs_dict[n]['node_class'] != 'Module']\n assert len(input_node_names) == len(inputs_nodes_ids)\n for input_node_name, input_node_id in zip(input_node_names, inputs_nodes_ids):\n local_node_mapping[input_node_name] = input_node_id\n else:\n # allocate input tensors first\n inputs_nodes_ids = []\n input_node_names = [n for n in inputs_dict if inputs_dict[n]['node_class'] != 'Module']\n for input_node_name in input_node_names:\n local_node_mapping[input_node_name] = cur_node_idx\n inputs_nodes_ids.append(cur_node_idx)\n cur_node_idx += 1\n if outputs_nodes_ids is not None:\n output_node_names = [n for n in outputs_dict if outputs_dict[n]['node_class'] != 'Module']\n assert len(output_node_names) == len(outputs_nodes_ids)\n for output_node_name, output_node_id in zip(output_node_names, outputs_nodes_ids):\n local_node_mapping[output_node_name] = output_node_id\n else:\n # allocate output tensors\n outputs_nodes_ids = []\n output_node_names = [n for n in outputs_dict if outputs_dict[n]['node_class'] != 'Module']\n for output_node_name in output_node_names:\n local_node_mapping[output_node_name] = cur_node_idx\n outputs_nodes_ids.append(cur_node_idx)\n cur_node_idx += 1\n\n graph = nx.MultiDiGraph()\n for node_name in local_graph_dict:\n node_info = local_graph_dict[node_name]\n node_class, node_op, node_output_id = node_info['node_class'], node_info['node_op'], node_info['output_id']\n # todo: rewrite node_class, a workaround, sometimes LongTensor will be created from int\n if node_op != None and node_op['op_def'] == 'prim::NumToTensor':\n node_class = 'int'\n if node_class in ['Tensor', 'Tuple', 'List']:\n if node_name not in local_node_mapping:\n # allocate node id\n node_idx = cur_node_idx\n local_node_mapping[node_name] = node_idx\n cur_node_idx += 1\n else:\n # use existing node id\n node_idx = local_node_mapping[node_name]\n\n graph.add_node(node_idx)\n if node_op != None:\n op_def, op_args = node_op['op_def'], node_op['op_args']\n if len(op_args) > 0:\n op_input_node_names = op_args\n # run a sanity check\n for input_node_name in op_input_node_names:\n if input_node_name not in local_node_mapping:\n raise Exception(\"{} is input node for op {}, but not recorded by local_node_mapping\".format(input_node_name, op_def))\n\n python_module_dict = get_python_module_from_node_op(local_graph_dict, node_op, node_output_id, local_node_mapping)\n python_module, node_input_ids, basic_op = python_module_dict['python_module'], python_module_dict['input_node_ids'], python_module_dict['basic_op']\n if basic_op:\n if len(node_input_ids) > 1:\n # multi-input op\n transition_op = python_module\n transition_input_order = []\n for node_input_id in node_input_ids:\n identity = BasicIdentity()\n graph.add_edge(node_input_id, node_idx, cost=0, module=identity)\n transition_input_order.append((node_input_id, 0))\n graph.nodes[node_idx]['transition'] = transition_op\n graph.nodes[node_idx]['transition_input_order'] = transition_input_order\n elif len(node_input_ids) == 1:\n node_input_id = node_input_ids[0]\n graph.add_edge(node_input_id, node_idx, cost=0, module=python_module)\n else:\n raise Exception(\"op_def {} has no input nodes\".format(op_def))\n else:\n # construct computation graph recursively\n node_inputs = python_module.__input_tensor__\n subgraph, cur_node_idx, _, _ = build_computation_graph_recursively(python_module, node_inputs,\n inputs_nodes_ids=node_input_ids, outputs_nodes_ids=[node_idx], cur_node_idx=cur_node_idx)\n del python_module.__input_tensor__\n # merge subgraph in graph\n for node in subgraph.nodes:\n if node not in graph.nodes:\n graph.add_nodes_from({node: subgraph.nodes[node]}, **subgraph.nodes[node])\n else:\n # add attributes\n for key in subgraph.nodes[node]:\n graph.nodes[node][key] = subgraph.nodes[node][key]\n for edge in subgraph.edges:\n graph.add_edges_from({edge: subgraph.edges[edge]}, **subgraph.edges[edge])\n elif 'prim::Param' in op_def:\n pass\n else:\n raise Exception(\"Unrecognized op_def {} with empty input args\".format(op_def))\n elif node_class in ['int[]', 'float[]', 'bool[]', 'int', 'float', 'bool', 'None']:\n # implement here to retreive constant list\n local_node_mapping[node_name] = None\n value = retrieve_constant_value(local_graph_dict, node_class, node_op)\n local_graph_dict[node_name]['value'] = value\n elif node_class == 'Module':\n local_node_mapping[node_name] = None\n local_graph_dict = get_python_module(local_graph_dict, module, node_name)\n else:\n local_node_mapping[node_name] = None\n\n return graph, cur_node_idx, inputs_nodes_ids, outputs_nodes_ids\n\ndef optimize_computation_graph(G, input_node_ids, output_node_ids):\n G = merge_tuple_op(G)\n G = trim_unused_nodes(G, input_node_ids, output_node_ids)\n G = rewrite_multi_input_op(G)\n G = merge_inplace_op(G)\n return G\n\ndef trim_unused_nodes(graph, input_node_ids, output_node_ids):\n '''\n remove unused nodes (no incoming edge or no outgoing edge)\n :param graph: nx.MultiDiGraph\n :param input_node_ids: list of input node indices\n :param output_node_ids: list of output node indices\n :return:\n '''\n edges = [e for e in graph.edges()]\n nodes = [n for n in graph.nodes()]\n source_set = set([e[0] for e in edges])\n target_set = set([e[1] for e in edges])\n used_node_set = source_set.intersection(target_set)\n for input_node_id in input_node_ids:\n used_node_set.add(input_node_id)\n for output_node_id in output_node_ids:\n used_node_set.add(output_node_id)\n unused_node_set = set(nodes).difference(used_node_set)\n if len(unused_node_set) == 0:\n return graph\n else:\n for node in unused_node_set:\n graph.remove_node(node)\n graph = trim_unused_nodes(graph, input_node_ids, output_node_ids)\n return graph\n\ndef merge_tuple_op(graph):\n '''\n remove tuple construct and tuple indexing edges and merge nodes\n :param graph: nx.MultiDiGraph\n :return:\n '''\n tuple_node_ids = [n for n in graph.nodes if 'transition' in graph.nodes[n] and isinstance(graph.nodes[n]['transition'], TupleConstruct)]\n for tuple_node_id in tuple_node_ids:\n input_edges = graph.nodes[tuple_node_id]['transition_input_order']\n output_edges = [None for _ in input_edges]\n for e in graph.edges:\n s, t, id = e\n op = graph.edges[e]['module']\n if s == tuple_node_id:\n output_edges[op.index] = (t, id)\n merge_flag = True\n for output_edge in output_edges:\n output_op = graph.edges[(tuple_node_id, output_edge[0], output_edge[1])]['module']\n if not isinstance(output_op, TupleIndexing):\n merge_flag = False\n break\n if not merge_flag:\n continue\n # reroute the edges, and merge nodes before and after tuple\n graph.remove_node(tuple_node_id)\n for input_edge, output_edge in zip(input_edges, output_edges):\n input_node_id, output_node_id = input_edge[0], output_edge[0]\n # merge output node into input node\n for edge in graph.out_edges(output_node_id):\n multi_edges = graph.get_edge_data(edge[0], edge[1])\n for id in multi_edges:\n edge_key = (edge[0], edge[1], id)\n new_edge_key = (input_node_id, edge[1], id)\n graph.add_edges_from({new_edge_key: graph.edges[edge_key]}, **graph.edges[edge_key])\n # rewrite transition_input_order\n if 'transition_input_order' in graph.nodes[edge[1]]:\n for i, (trans_s, trans_id) in enumerate(graph.nodes[edge[1]]['transition_input_order']):\n if trans_s == output_node_id:\n graph.nodes[edge[1]]['transition_input_order'][i] = (input_node_id, graph.nodes[edge[1]]['transition_input_order'][i][1])\n graph.remove_node(output_node_id)\n return graph\n\ndef merge_inplace_op(graph):\n '''\n merge inplace operation such as nn.ReLU(inplace=True) into previous operations\n :param graph: nx.MultiDiGraph\n :return:\n '''\n inplace_edges = []\n for e in graph.edges:\n op = graph.edges[e]['module']\n if hasattr(op, 'inplace') and getattr(op, 'inplace'):\n inplace_edges.append(e)\n for e in inplace_edges:\n s, t, id = e\n inplace_op = graph.edges[e]['module']\n if 'transition' in graph.nodes[s]:\n # if the previous op is a multi input op (transition op) then merge into transition op\n graph.nodes[s]['transition'] = nn.Sequential(graph.nodes[s]['transition'], inplace_op)\n else:\n # merge inplace op into previous op\n for edge in graph.in_edges(s):\n multi_edges = graph.get_edge_data(edge[0], edge[1])\n for id in multi_edges:\n edge_key = (edge[0], edge[1], id)\n edge_op = graph.edges[edge_key]['module']\n graph.edges[edge_key]['module'] = nn.Sequential(edge_op, inplace_op)\n # reroute outgoing edges\n for edge in graph.out_edges(t):\n multi_edges = graph.get_edge_data(edge[0], edge[1])\n for id in multi_edges:\n edge_key = (edge[0], edge[1], id)\n new_edge_key = (s, edge[1], id)\n graph.add_edges_from({new_edge_key: graph.edges[edge_key]}, **graph.edges[edge_key])\n # rewrite transition_input_order\n if 'transition_input_order' in graph.nodes[edge[1]]:\n for i, (trans_s, trans_id) in enumerate(graph.nodes[edge[1]]['transition_input_order']):\n if trans_s == t:\n graph.nodes[edge[1]]['transition_input_order'][i] = (s, graph.nodes[edge[1]]['transition_input_order'][i][1])\n graph.remove_node(t)\n return graph\n\ndef rewrite_multi_input_op(graph):\n multi_input_node_ids = [n for n in graph.nodes if 'transition' in graph.nodes[n] and isinstance(graph.nodes[n]['transition'], Multi_input_ops)]\n for multi_input_node_id in multi_input_node_ids:\n input_edges = graph.nodes[multi_input_node_id]['transition_input_order']\n\n for edge in graph.out_edges(multi_input_node_id):\n multi_edges = graph.get_edge_data(edge[0], edge[1])\n if len(multi_edges) > 1:\n raise Exception(\"More than 1 edges exist between 2 nodes when optimizing the graph\")\n for id in multi_edges:\n edge_key = (edge[0], edge[1], id)\n op = graph.edges[edge_key]['module']\n graph.nodes[edge[1]]['transition'] = op\n graph.nodes[edge[1]]['transition_input_order'] = deepcopy(input_edges)\n for (tran_s, trans_id) in input_edges:\n graph.add_edges_from({(tran_s, edge[1], trans_id): graph.edges[(tran_s, multi_input_node_id, trans_id)]},\n **graph.edges[(tran_s, multi_input_node_id, trans_id)])\n\n graph.remove_node(multi_input_node_id)\n\n return graph\n\n\ndef get_source_target(graph):\n edges = [e for e in graph.edges()]\n s_set = set([e[0] for e in edges])\n t_set = set([e[1] for e in edges])\n intermediate_node_set = s_set.intersection(t_set)\n source_set = s_set.difference(intermediate_node_set)\n target_set = t_set.difference(intermediate_node_set)\n return list(source_set), list(target_set)\n\n\ndef add_input_tensor_hook_recursively(module):\n if isinstance(module, Basic_ops):\n # handle = module.register_forward_hook(input_tensor_hook)\n # module.__hook_handle__ = handle\n pass\n else:\n handle = module.register_forward_hook(input_tensor_hook)\n module.__hook_handle__ = handle\n for name, sub_module in module._modules.items():\n add_input_tensor_hook_recursively(sub_module)\n\n\ndef input_tensor_hook(module, input, output):\n # module.__input_shape__ = [i.shape for i in input]\n module.__input_tensor__ = input\n\ndef remove_input_tensor_hook_recursively(module):\n if isinstance(module, Basic_ops):\n # module.__hook_handle__.remove()\n # del module.__hook_handle__\n pass\n else:\n module.__hook_handle__.remove()\n del module.__hook_handle__\n for name, sub_module in module._modules.items():\n remove_input_tensor_hook_recursively(sub_module)\n\ndef clean_up_input_tensor_recursively(module):\n if isinstance(module, Basic_ops):\n if hasattr(module, '__input_tensor__'):\n del module.__input_tensor__\n else:\n if hasattr(module, '__input_tensor__'):\n del module.__input_tensor__\n for name, sub_module in module._modules.items():\n clean_up_input_tensor_recursively(sub_module)\n\n\ndef tuple_to_dict(t):\n l = list(t)\n num = len(l) // 3\n d = {}\n for i in range(num):\n tensor, s, ind = t[i * 3], t[i * 3 + 1], t[i * 3 + 2]\n d[(int(s), int(ind))] = tensor\n return d\n\ndef dict_to_tuple(d):\n l = []\n for (s, ind) in d:\n tensor = d[(s, ind)]\n l.append(tensor)\n # has to use float otherwise throw requires_grad error\n l.append(torch.tensor([float(s)], requires_grad=True))\n l.append(torch.tensor([float(ind)], requires_grad=True))\n return tuple(l)\n\ndef set_segment_training(segment, train=True):\n set_graph_training(segment.G, train=train)\n\n\ndef set_graph_training(graph, train=True):\n for e in graph.edges:\n module = graph.edges[e]['module']\n if isinstance(module, Segment):\n set_graph_training(module.G, train=train)\n else:\n if train:\n graph.edges[e]['module'].train()\n else:\n graph.edges[e]['module'].eval()\n\ndef replace_subgraph(graph1, graph2, source, target, id):\n '''\n replace subgraph in graph1 with graph2\n :param graph1: networkx DiGraph\n :param graph2: networkx DiGraph\n :param source: source vertex in graph1\n :param target: target vertex in graph1\n :param id: if None, meaning source and target is not connected, else specify the connection id\n :return:\n '''\n if source not in graph1.nodes or target not in graph1.nodes:\n raise ValueError\n if id is None:\n nodes1 = set(nx.ancestors(graph1, target))\n nodes2 = set(nx.descendants(graph1, source))\n nodes = (nodes1.intersection(nodes2)).union(set({source, target}))\n edges_add_back = {}\n for node in nodes:\n for p in graph1.predecessors(node):\n if p not in nodes:\n es = graph1.get_edge_data(p, node)\n if es is not None:\n for e in es:\n edges_add_back[(p, node, e)] = es[e]\n for s in graph1.successors(node):\n if s not in nodes:\n es = graph1.get_edge_data(node, s)\n if es is not None:\n for e in es:\n edges_add_back[(node, s, e)] = es[e]\n for node in nodes:\n graph1.remove_node(node)\n for node in graph2.nodes:\n graph1.add_nodes_from({node: graph2.nodes[node]}, **graph2.nodes[node])\n for edge in graph2.edges:\n graph1.add_edges_from({edge: graph2.edges[edge]}, **graph2.edges[edge])\n for edge in edges_add_back:\n if edge not in graph1.edges:\n graph1.add_edges_from({edge: edges_add_back[edge]}, **edges_add_back[edge])\n return graph1\n else:\n graph1.remove_edge(source, target, id)\n for node in graph2.nodes:\n if node != source and node != target:\n graph1.add_nodes_from({node: graph2.nodes[node]}, **graph2.nodes[node])\n for edge in graph2.edges:\n graph1.add_edges_from({edge: graph2.edges[edge]}, **graph2.edges[edge])\n return graph1\n\n\ndef segment_checkpoint_forward(segment):\n def custom_forward(*inputs):\n outputs = segment(*inputs)\n return outputs\n\n return custom_forward\n\n# NOTE: checkpoint autograd.function doesn't allow dictionary output, so have to use tensor to hold vertex id\ndef graph_forward(x, G=None, source=None, target=None, successors_dict=None, predecessors_dict=None, edges_dict=None, do_checkpoint=True, record_tensor_cost=False):\n '''\n Do checkpoint forward with each vertex in G as gradient checkpoint or do regular forward with G\n :param G: networkx DAG\n :param source: source vertex key\n :param target: target vertex key\n :param x: input tensor\n :param do_checkpoint: whether to do regular forward or checkpoint forward\n :param record_tensor_cost: whether to record the tensor cost during execution and store in G\n :return:\n '''\n\n tensor_dict = {source: x}\n queue = Queue()\n queue.put(source)\n while not queue.empty():\n vertex_key = queue.get()\n for target_vertex_id in successors_dict[vertex_key]:\n edges = edges_dict[(vertex_key, target_vertex_id)]\n target_vertex = G.nodes[target_vertex_id]\n outputs = {}\n for id in edges:\n op = edges[id]['module']\n input = tensor_dict[vertex_key]\n if do_checkpoint:\n output = checkpoint(segment_checkpoint_forward(op), input)\n else:\n output = op(input)\n\n if type(output) == tuple:\n output = tuple_to_dict(output)\n for key in output:\n outputs[key] = output[key]\n else:\n outputs[(vertex_key, id)] = output\n\n\n transition = target_vertex.get('transition', None)\n if transition is None:\n tensor_dict[target_vertex_id] = outputs[list(outputs.keys())[0]]\n queue.put(target_vertex_id)\n else:\n # handle multi inputs\n transition_input_order = target_vertex['transition_input_order']\n num_input = len(transition_input_order)\n\n inputs_for_transit = tensor_dict.get(target_vertex_id, {})\n for key in outputs:\n inputs_for_transit[key] = outputs[key]\n if len(inputs_for_transit) == num_input:\n inputs = [inputs_for_transit[i] for i in transition_input_order]\n tensor_dict[target_vertex_id] = transition(inputs)\n queue.put(target_vertex_id)\n else:\n tensor_dict[target_vertex_id] = inputs_for_transit\n if record_tensor_cost:\n for node in tensor_dict:\n if type(tensor_dict[node]) == dict:\n pass\n else:\n node_cost = tensor_dict[node].numel()\n G.nodes[node]['cost'] = node_cost\n\n if type(tensor_dict[target]) == dict:\n return dict_to_tuple(tensor_dict[target])\n else:\n return tensor_dict[target]\n\n\nclass Segment(nn.Module):\n '''\n wrapper class for inference with DAG\n '''\n def __init__(self, G, source, target, do_checkpoint=False, record_tensor_cost=False):\n super(Segment, self).__init__()\n self.G = G\n self.source = source\n self.target = target\n self.info_dict = self.prepare_for_forward(G, source, target, do_checkpoint, record_tensor_cost)\n\n def prepare_for_forward(self, G, source, target, do_checkpoint, record_tensor_cost):\n info_dict = {'G': G, 'source': source, 'target': target}\n successors_dict, predecessors_dict, edges_dict = {}, {}, {}\n for v in G.nodes:\n predecessors_dict[v] = [n for n in G.predecessors(v)]\n successors_dict[v] = [n for n in G.successors(v)]\n for key in G.edges:\n e = G.edges[key]\n start, end, id = key\n if (start, end) not in edges_dict:\n edges_dict[(start, end)] = {}\n edges_dict[(start, end)][id] = e\n info_dict.update(successors_dict=successors_dict, predecessors_dict=predecessors_dict, edges_dict=edges_dict,\n do_checkpoint=do_checkpoint, record_tensor_cost=record_tensor_cost)\n return info_dict\n\n\n def forward(self, x):\n return graph_forward(x, **self.info_dict)"
] | [
[
"torch.nn.Sequential",
"torch.nn.ReLU",
"torch.no_grad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
akiraenduo/mytutorials | [
"8ede39030b1302c265b10832951ccb4efeb4b200"
] | [
"nbs/utils/utils.py"
] | [
"from __future__ import division\nimport torch\nimport numpy as np\nimport cv2\n\n\ndef nms(bbox, thresh, score=None, limit=None):\n \"\"\"Suppress bounding boxes according to their IoUs and confidence scores.\n Args:\n bbox (array): Bounding boxes to be transformed. The shape is\n :math:`(R, 4)`. :math:`R` is the number of bounding boxes.\n thresh (float): Threshold of IoUs.\n score (array): An array of confidences whose shape is :math:`(R,)`.\n limit (int): The upper bound of the number of the output bounding\n boxes. If it is not specified, this method selects as many\n bounding boxes as possible.\n Returns:\n array:\n An array with indices of bounding boxes that are selected. \\\n They are sorted by the scores of bounding boxes in descending \\\n order. \\\n The shape of this array is :math:`(K,)` and its dtype is\\\n :obj:`numpy.int32`. Note that :math:`K \\\\leq R`.\n from: https://github.com/chainer/chainercv\n \"\"\"\n\n if len(bbox) == 0:\n return np.zeros((0,), dtype=np.int32)\n\n if score is not None:\n order = score.argsort()[::-1]\n bbox = bbox[order]\n bbox_area = np.prod(bbox[:, 2:] - bbox[:, :2], axis=1)\n\n selec = np.zeros(bbox.shape[0], dtype=bool)\n for i, b in enumerate(bbox):\n tl = np.maximum(b[:2], bbox[selec, :2])\n br = np.minimum(b[2:], bbox[selec, 2:])\n area = np.prod(br - tl, axis=1) * (tl < br).all(axis=1)\n\n iou = area / (bbox_area[i] + bbox_area[selec] - area)\n if (iou >= thresh).any():\n continue\n\n selec[i] = True\n if limit is not None and np.count_nonzero(selec) >= limit:\n break\n\n selec = np.where(selec)[0]\n if score is not None:\n selec = order[selec]\n return selec.astype(np.int32)\n\n\ndef postprocess(prediction, num_classes, conf_thre=0.7, nms_thre=0.45):\n \"\"\"\n Postprocess for the output of YOLO model\n perform box transformation, specify the class for each detection,\n and perform class-wise non-maximum suppression.\n Args:\n prediction (torch tensor): The shape is :math:`(N, B, 4)`.\n :math:`N` is the number of predictions,\n :math:`B` the number of boxes. The last axis consists of\n :math:`xc, yc, w, h` where `xc` and `yc` represent a center\n of a bounding box.\n num_classes (int):\n number of dataset classes.\n conf_thre (float):\n confidence threshold ranging from 0 to 1,\n which is defined in the config file.\n nms_thre (float):\n IoU threshold of non-max suppression ranging from 0 to 1.\n Returns:\n output (list of torch tensor):\n \"\"\"\n box_corner = prediction.new(prediction.shape)\n box_corner[:, :, 0] = prediction[:, :, 0] - prediction[:, :, 2] / 2\n box_corner[:, :, 1] = prediction[:, :, 1] - prediction[:, :, 3] / 2\n box_corner[:, :, 2] = prediction[:, :, 0] + prediction[:, :, 2] / 2\n box_corner[:, :, 3] = prediction[:, :, 1] + prediction[:, :, 3] / 2\n prediction[:, :, :4] = box_corner[:, :, :4]\n\n output = [None for _ in range(len(prediction))]\n for i, image_pred in enumerate(prediction):\n # Filter out confidence scores below threshold\n class_pred = torch.max(image_pred[:, 5:5 + num_classes], 1)\n class_pred = class_pred[0]\n conf_mask = (image_pred[:, 4] * class_pred >= conf_thre).squeeze()\n image_pred = image_pred[conf_mask]\n\n # If none are remaining => process next image\n if not image_pred.size(0):\n continue\n # Get detections with higher confidence scores than the threshold\n ind = (image_pred[:, 5:] * image_pred[:, 4][:, None] >= conf_thre).nonzero()\n # Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred)\n detections = torch.cat((\n image_pred[ind[:, 0], :5],\n image_pred[ind[:, 0], 5 + ind[:, 1]].unsqueeze(1),\n ind[:, 1].float().unsqueeze(1)\n ), 1)\n # Iterate through all predicted classes\n unique_labels = detections[:, -1].cpu().unique()\n if prediction.is_cuda:\n unique_labels = unique_labels.cuda()\n for c in unique_labels:\n # Get the detections with the particular class\n detections_class = detections[detections[:, -1] == c]\n nms_in = detections_class.cpu().numpy()\n nms_out_index = nms(\n nms_in[:, :4], nms_thre, score=nms_in[:, 4]*nms_in[:, 5])\n detections_class = detections_class[nms_out_index]\n if output[i] is None:\n output[i] = detections_class\n else:\n output[i] = torch.cat((output[i], detections_class))\n\n return output\n\n\ndef bboxes_iou(bboxes_a, bboxes_b, xyxy=True):\n \"\"\"Calculate the Intersection of Unions (IoUs) between bounding boxes.\n IoU is calculated as a ratio of area of the intersection\n and area of the union.\n Args:\n bbox_a (array): An array whose shape is :math:`(N, 4)`.\n :math:`N` is the number of bounding boxes.\n The dtype should be :obj:`numpy.float32`.\n bbox_b (array): An array similar to :obj:`bbox_a`,\n whose shape is :math:`(K, 4)`.\n The dtype should be :obj:`numpy.float32`.\n Returns:\n array:\n An array whose shape is :math:`(N, K)`. \\\n An element at index :math:`(n, k)` contains IoUs between \\\n :math:`n` th bounding box in :obj:`bbox_a` and :math:`k` th bounding \\\n box in :obj:`bbox_b`.\n from: https://github.com/chainer/chainercv\n \"\"\"\n if bboxes_a.shape[1] != 4 or bboxes_b.shape[1] != 4:\n raise IndexError\n\n # top left\n if xyxy:\n tl = torch.max(bboxes_a[:, None, :2], bboxes_b[:, :2])\n # bottom right\n br = torch.min(bboxes_a[:, None, 2:], bboxes_b[:, 2:])\n area_a = torch.prod(bboxes_a[:, 2:] - bboxes_a[:, :2], 1)\n area_b = torch.prod(bboxes_b[:, 2:] - bboxes_b[:, :2], 1)\n else:\n tl = torch.max((bboxes_a[:, None, :2] - bboxes_a[:, None, 2:] / 2),\n (bboxes_b[:, :2] - bboxes_b[:, 2:] / 2))\n # bottom right\n br = torch.min((bboxes_a[:, None, :2] + bboxes_a[:, None, 2:] / 2),\n (bboxes_b[:, :2] + bboxes_b[:, 2:] / 2))\n\n area_a = torch.prod(bboxes_a[:, 2:], 1)\n area_b = torch.prod(bboxes_b[:, 2:], 1)\n en = (tl < br).type(tl.type()).prod(dim=2)\n area_i = torch.prod(br - tl, 2) * en # * ((tl < br).all())\n return area_i / (area_a[:, None] + area_b - area_i)\n\n\ndef label2yolobox(labels, info_img, maxsize, lrflip):\n \"\"\"\n Transform coco labels to yolo box labels\n Args:\n labels (numpy.ndarray): label data whose shape is :math:`(N, 5)`.\n Each label consists of [class, x, y, w, h] where \\\n class (float): class index.\n x, y, w, h (float) : coordinates of \\\n left-top points, width, and height of a bounding box.\n Values range from 0 to width or height of the image.\n info_img : tuple of h, w, nh, nw, dx, dy.\n h, w (int): original shape of the image\n nh, nw (int): shape of the resized image without padding\n dx, dy (int): pad size\n maxsize (int): target image size after pre-processing\n lrflip (bool): horizontal flip flag\n Returns:\n labels:label data whose size is :math:`(N, 5)`.\n Each label consists of [class, xc, yc, w, h] where\n class (float): class index.\n xc, yc (float) : center of bbox whose values range from 0 to 1.\n w, h (float) : size of bbox whose values range from 0 to 1.\n \"\"\"\n h, w, nh, nw, dx, dy = info_img\n x1 = labels[:, 1] / w\n y1 = labels[:, 2] / h\n x2 = (labels[:, 1] + labels[:, 3]) / w\n y2 = (labels[:, 2] + labels[:, 4]) / h\n labels[:, 1] = (((x1 + x2) / 2) * nw + dx) / maxsize\n labels[:, 2] = (((y1 + y2) / 2) * nh + dy) / maxsize\n labels[:, 3] *= nw / w / maxsize\n labels[:, 4] *= nh / h / maxsize\n if lrflip:\n labels[:, 1] = 1 - labels[:, 1]\n return labels\n\n\ndef yolobox2label(box, info_img):\n \"\"\"\n Transform yolo box labels to yxyx box labels.\n Args:\n box (list): box data with the format of [yc, xc, w, h]\n in the coordinate system after pre-processing.\n info_img : tuple of h, w, nh, nw, dx, dy.\n h, w (int): original shape of the image\n nh, nw (int): shape of the resized image without padding\n dx, dy (int): pad size\n Returns:\n label (list): box data with the format of [y1, x1, y2, x2]\n in the coordinate system of the input image.\n \"\"\"\n h, w, nh, nw, dx, dy = info_img\n y1, x1, y2, x2 = box\n box_h = ((y2 - y1) / nh) * h\n box_w = ((x2 - x1) / nw) * w\n y1 = ((y1 - dy) / nh) * h\n x1 = ((x1 - dx) / nw) * w\n label = [y1, x1, y1 + box_h, x1 + box_w]\n return label\n\n\ndef preprocess(img, imgsize, jitter, random_placing=False):\n \"\"\"\n Image preprocess for yolo input\n Pad the shorter side of the image and resize to (imgsize, imgsize)\n Args:\n img (numpy.ndarray): input image whose shape is :math:`(H, W, C)`.\n Values range from 0 to 255.\n imgsize (int): target image size after pre-processing\n jitter (float): amplitude of jitter for resizing\n random_placing (bool): if True, place the image at random position\n Returns:\n img (numpy.ndarray): input image whose shape is :math:`(C, imgsize, imgsize)`.\n Values range from 0 to 1.\n info_img : tuple of h, w, nh, nw, dx, dy.\n h, w (int): original shape of the image\n nh, nw (int): shape of the resized image without padding\n dx, dy (int): pad size\n \"\"\"\n h, w, _ = img.shape\n img = img[:, :, ::-1]\n assert img is not None\n\n if jitter > 0:\n # add jitter\n dw = jitter * w\n dh = jitter * h\n new_ar = (w + np.random.uniform(low=-dw, high=dw))\\\n / (h + np.random.uniform(low=-dh, high=dh))\n else:\n new_ar = w / h\n\n if new_ar < 1:\n nh = imgsize\n nw = nh * new_ar\n else:\n nw = imgsize\n nh = nw / new_ar\n nw, nh = int(nw), int(nh)\n\n if random_placing:\n dx = int(np.random.uniform(imgsize - nw))\n dy = int(np.random.uniform(imgsize - nh))\n else:\n dx = (imgsize - nw) // 2\n dy = (imgsize - nh) // 2\n\n img = cv2.resize(img, (nw, nh))\n sized = np.ones((imgsize, imgsize, 3), dtype=np.uint8) * 127\n sized[dy:dy+nh, dx:dx+nw, :] = img\n\n info_img = (h, w, nh, nw, dx, dy)\n return sized, info_img\n\ndef rand_scale(s):\n \"\"\"\n calculate random scaling factor\n Args:\n s (float): range of the random scale.\n Returns:\n random scaling factor (float) whose range is\n from 1 / s to s .\n \"\"\"\n scale = np.random.uniform(low=1, high=s)\n if np.random.rand() > 0.5:\n return scale\n return 1 / scale\n\ndef random_distort(img, hue, saturation, exposure):\n \"\"\"\n perform random distortion in the HSV color space.\n Args:\n img (numpy.ndarray): input image whose shape is :math:`(H, W, C)`.\n Values range from 0 to 255.\n hue (float): random distortion parameter.\n saturation (float): random distortion parameter.\n exposure (float): random distortion parameter.\n Returns:\n img (numpy.ndarray)\n \"\"\"\n dhue = np.random.uniform(low=-hue, high=hue)\n dsat = rand_scale(saturation)\n dexp = rand_scale(exposure)\n\n img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n img = np.asarray(img, dtype=np.float32) / 255.\n img[:, :, 1] *= dsat\n img[:, :, 2] *= dexp\n H = img[:, :, 0] + dhue\n\n if dhue > 0:\n H[H > 1.0] -= 1.0\n else:\n H[H < 0.0] += 1.0\n\n img[:, :, 0] = H\n img = (img * 255).clip(0, 255).astype(np.uint8)\n img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB)\n img = np.asarray(img, dtype=np.float32)\n\n return img\n\n\ndef get_coco_label_names():\n \"\"\"\n COCO label names and correspondence between the model's class index and COCO class index.\n Returns:\n coco_label_names (tuple of str) : all the COCO label names including background class.\n coco_class_ids (list of int) : index of 80 classes that are used in 'instance' annotations\n coco_cls_colors (np.ndarray) : randomly generated color vectors used for box visualization\n \"\"\"\n coco_label_names = ('background', # class zero\n 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck',\n 'boat', 'traffic light', 'fire hydrant', 'street sign', 'stop sign',\n 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',\n 'elephant', 'bear', 'zebra', 'giraffe', 'hat', 'backpack', 'umbrella',\n 'shoe', 'eye glasses', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',\n 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',\n 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'plate', 'wine glass',\n 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',\n 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',\n 'couch', 'potted plant', 'bed', 'mirror', 'dining table', 'window', 'desk',\n 'toilet', 'door', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',\n 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'blender', 'book',\n 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'\n )\n coco_class_ids = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20,\n 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,\n 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67,\n 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]\n\n coco_cls_colors = np.random.randint(128, 255, size=(80, 3))\n\n return coco_label_names, coco_class_ids, coco_cls_colors"
] | [
[
"numpy.maximum",
"numpy.minimum",
"torch.max",
"torch.cat",
"numpy.asarray",
"torch.min",
"numpy.ones",
"numpy.random.rand",
"numpy.prod",
"numpy.count_nonzero",
"numpy.random.uniform",
"torch.prod",
"numpy.zeros",
"numpy.where",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xx812/CLBlast | [
"fc627d1fa19abad88485dc6d13bd0d3e65e52e7f"
] | [
"src/pyclblast/samples/override_parameters.py"
] | [
"#!/usr/bin/env python\n\n# This file is part of the CLBlast project. The project is licensed under Apache Version 2.0.\n# This file follows the PEP8 Python style guide and uses a max-width of 100 characters per line.\n#\n# Author(s):\n# Cedric Nugteren <www.cedricnugteren.nl>\n\nimport numpy as np\nimport pyopencl as cl\nfrom pyopencl.array import Array\nimport pyclblast\nfrom datetime import datetime\n\nif __name__ == \"__main__\":\n\n # Set up pyopencl:\n ctx = cl.create_some_context()\n queue = cl.CommandQueue(ctx)\n\n # Set up a basic sgemm example:\n m, n, k = 2, 3, 4\n a = np.random.rand(m, k).astype(dtype=np.float32)\n b = np.random.rand(k, n).astype(dtype=np.float32)\n c = np.empty((m, n), np.float32)\n cla = Array(queue, a.shape, a.dtype)\n clb = Array(queue, b.shape, b.dtype)\n clc = Array(queue, c.shape, c.dtype)\n cla.set(a)\n clb.set(b)\n clc.set(c)\n\n # Perform sgemm on these matrices, overriding the CLBlast parameters. In this example, we'll\n # just change the 'MWG' parameter a couple of times:\n params = { \"KWG\": 32, \"KWI\": 2, \"MDIMA\": 8, \"MDIMC\": 8, \"MWG\": 64, \"NDIMB\": 8, \"NDIMC\": 8,\n \"NWG\": 64, \"SA\": 0, \"SB\": 0, \"STRM\": 0, \"STRN\": 0, \"VWM\": 4, \"VWN\": 1 }\n for mwg in (32, 64, 256):\n print(\"Running sgemm tuned with MWG = %d\" % mwg)\n params[\"MWG\"] = mwg\n pyclblast.override_parameters(ctx.devices[0], 'Xgemm', 32, params)\n pyclblast.gemm(queue, m, n, k, cla, clb, clc, a_ld=k, b_ld=n, c_ld=n)\n assert np.allclose(clc.get(), a.dot(b)), \"uh-oh, xgemm isn't behaving correctly\"\n"
] | [
[
"numpy.random.rand",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
liuruoze/HierNet-SC2 | [
"7abfde0088e90416f11922d67c0f09659c7ecf81"
] | [
"mine_from_replay.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\" Transform replay data: \"\n\nimport shutil\nimport csv\nimport os\nimport sys\nimport traceback\nimport random\nimport pickle\nimport enum\nimport copy\n\nfrom absl import flags\nfrom absl import app\nfrom tqdm import tqdm\n\nimport matplotlib.pyplot as plt\n\nfrom pysc2.lib import point\nfrom pysc2.lib import features as Feat\nfrom pysc2.lib import actions as A\nfrom pysc2.lib.actions import FUNCTIONS as F\nfrom pysc2 import run_configs\n\nfrom s2clientprotocol import sc2api_pb2 as sc_pb\nfrom s2clientprotocol import common_pb2 as com_pb\n\nimport lib.utils as U\nimport lib.config as C\n\nfrom prefixspan import PrefixSpan\n\n__author__ = \"Ruo-Ze Liu\"\n\ndebug = False\n\nFLAGS = flags.FLAGS\nflags.DEFINE_bool(\"render\", True, \"Whether to render with pygame.\")\nflags.DEFINE_bool(\"realtime\", False, \"Whether to run in realtime mode.\")\nflags.DEFINE_bool(\"full_screen\", False, \"Whether to run full screen.\")\n\nflags.DEFINE_float(\"fps\", 22.4, \"Frames per second to run the game.\")\nflags.DEFINE_integer(\"step_mul\", 1, \"Game steps per observation.\")\nflags.DEFINE_bool(\"render_sync\", False, \"Turn on sync rendering.\")\nflags.DEFINE_integer(\"screen_resolution\", 64,\n \"Resolution for screen feature layers.\")\nflags.DEFINE_integer(\"minimap_resolution\", 64,\n \"Resolution for minimap feature layers.\")\n\nflags.DEFINE_integer(\"max_game_steps\", 0, \"Total game steps to run.\")\nflags.DEFINE_integer(\"max_episode_steps\", 0, \"Total game steps per episode.\")\n\nflags.DEFINE_integer(\"max_steps_of_replay\", int(22.4 * 60 * 60), \"Max game steps of a replay, max for 1 hour of game.\")\nflags.DEFINE_integer(\"small_max_steps_of_replay\", 256, \"Max game steps of a replay when debug.\")\n\n\nflags.DEFINE_bool(\"disable_fog\", False, \"Whether tp disable fog of war.\")\nflags.DEFINE_integer(\"observed_player\", 1, \"Which player to observe. For 2 player game, this can be 1 or 2.\")\n\nflags.DEFINE_integer(\"save_type\", 1, \"0 is torch_tensor, 1 is python_pickle, 2 is numpy_array\")\nflags.DEFINE_string(\"replay_version\", \"3.16.1\", \"the replays released by blizzard are all 3.16.1 version\")\n\n# note, replay path should be absoulte path\nflags.DEFINE_string(\"no_server_replay_path\", \"D:/work/Experiment/TODO/new_three_layer_server/data/replay/\", \"path of replay data\")\n\nflags.DEFINE_bool(\"save_data\", False, \"replays_save data or not\")\nflags.DEFINE_string(\"save_path\", \"./data/replay_data/\", \"path to replays_save replay data\")\nFLAGS(sys.argv)\n\n\nRACE = ['Terran', 'Zerg', 'Protoss', 'Random']\nRESULT = ['Victory', 'Defeat', 'Tie']\n\nRAW = False\n\nDATA_FROM = 0\nDATA_NUM = 30\nSTEPS = 20 * 60 * 22.4\n\nSTATISTIC_ACTIONS_INTERVAL = 5 * 22.4\n\nSELECT_SEPARATE_NUMBER = 2000\nSMART_TARGET_SEPARATE = 4500\n\n\nclass SaveType(enum.IntEnum):\n torch_tensor = 0\n python_pickle = 1\n numpy_array = 2\n\n\nif FLAGS.save_type == 0:\n SAVE_TYPE = SaveType.torch_tensor\nelif FLAGS.save_type == 1:\n SAVE_TYPE = SaveType.python_pickle\nelse:\n SAVE_TYPE = SaveType.numpy_array\n\n\ndef getFuncCall(o, feat):\n func_call = None\n\n #func_call = feat.reverse_action(o.actions[0])\n\n print('len(o.actions): ', len(o.actions)) if 1 else None\n\n action_list = []\n for action in o.actions:\n func_call = feat.reverse_action(action)\n\n if func_call.function == 0:\n # no op\n pass\n elif func_call.function == 1:\n # camera move\n pass\n elif func_call.function == 3:\n # we do not consider the smart rect\n pass \n else:\n print('func_call: ', func_call) if 1 else None\n action_list.append(func_call)\n\n if len(action_list) > 0:\n return action_list[0]\n else:\n return None\n\n\ndef transoform_replays(on_server=False):\n\n if on_server:\n REPLAY_PATH = P.replay_path \n max_steps_of_replay = FLAGS.max_steps_of_replay\n else:\n REPLAY_PATH = FLAGS.no_server_replay_path\n max_steps_of_replay = STEPS # 2000\n\n run_config = run_configs.get() # \n print('REPLAY_PATH:', REPLAY_PATH)\n replay_files = os.listdir(REPLAY_PATH)\n print('length of replay_files:', len(replay_files))\n replay_files.sort()\n\n screen_resolution = point.Point(FLAGS.screen_resolution, FLAGS.screen_resolution)\n minimap_resolution = point.Point(FLAGS.minimap_resolution, FLAGS.minimap_resolution)\n camera_width = 24\n\n interface = sc_pb.InterfaceOptions(\n\n )\n\n screen_resolution.assign_to(interface.feature_layer.resolution)\n minimap_resolution.assign_to(interface.feature_layer.minimap_resolution)\n\n replay_length_list = []\n noop_length_list = []\n\n all_func_call_list = []\n\n from_index = DATA_FROM\n end_index = DATA_FROM + DATA_NUM\n\n with run_config.start(game_version=FLAGS.replay_version, full_screen=False) as controller:\n\n for i, replay_file in enumerate(tqdm(replay_files)):\n try:\n replay_path = REPLAY_PATH + replay_file\n print('replay_path:', replay_path)\n\n do_write = False\n if i >= from_index:\n if end_index is None:\n do_write = True\n elif end_index is not None and i < end_index:\n do_write = True\n\n if not do_write:\n continue \n\n replay_data = run_config.replay_data(replay_path)\n replay_info = controller.replay_info(replay_data)\n\n print('replay_info', replay_info) if debug else None\n print('type(replay_info)', type(replay_info)) if debug else None\n\n print('replay_info.player_info:', replay_info.player_info) if debug else None\n infos = replay_info.player_info\n\n observe_id_list = []\n observe_result_list = []\n for info in infos:\n print('info:', info) if debug else None\n player_info = info.player_info\n result = info.player_result.result\n print('player_info', player_info) if debug else None\n if player_info.race_actual == com_pb.Protoss:\n observe_id_list.append(player_info.player_id)\n observe_result_list.append(result)\n\n print('observe_id_list', observe_id_list) if debug else None\n print('observe_result_list', observe_result_list) if debug else None\n\n win_observe_id = 0\n\n for i, result in enumerate(observe_result_list):\n if result == sc_pb.Victory:\n win_observe_id = observe_id_list[i]\n break\n\n # we observe the winning one\n print('win_observe_id', win_observe_id)\n\n if win_observe_id == 0:\n print('no win_observe_id found! continue')\n continue\n\n start_replay = sc_pb.RequestStartReplay(\n replay_data=replay_data,\n options=interface,\n disable_fog=False, # FLAGS.disable_fog\n observed_player_id=win_observe_id\n )\n\n print(\" Replay info \".center(60, \"-\")) if debug else None\n print(replay_info) if debug else None\n print(\"-\" * 60) if debug else None\n controller.start_replay(start_replay)\n\n feat = Feat.Features(controller.game_info()) \n\n print(\"feat obs spec:\", feat.observation_spec()) if debug else None\n print(\"feat action spec:\", feat.action_spec()) if debug else None\n\n prev_obs = None\n i = 0\n record_i = 0\n save_steps = 0\n noop_count = 0\n\n obs_list, func_call_list, z_list, delay_list = [], [], [], [] \n feature_list, label_list = [], []\n step_dict = {}\n\n # initial build order\n player_bo = []\n player_ucb = []\n\n no_op_window = []\n show = False\n\n unit_type = None\n\n while True:\n o = controller.observe()\n try:\n try:\n func_call = None\n no_op = False\n\n if i % STATISTIC_ACTIONS_INTERVAL == 0:\n all_func_call_list.append(func_call_list)\n func_call_list = []\n\n if o.actions:\n func_call = getFuncCall(o, feat)\n print('func_call', func_call)\n\n # we didn't consider move_camera (1)\n # in macro actions;\n if func_call is not None:\n if func_call.function == F.move_camera.id:\n func_call = None\n else:\n no_op = True\n\n if func_call is not None:\n save_steps += 1\n\n int_id = func_call.function\n print('func_call.function ', func_call.function)\n\n if int_id == F.select_point.id: # 2: # select_point \n arguments = func_call.arguments\n print('arguments', arguments)\n\n [x, y] = arguments[1]\n\n obs = feat.transform_obs(o.observation)\n unit_type_map = obs[\"screen\"][U._UNIT_TYPE]\n\n unit_type = unit_type_map[y, x]\n\n print('unit_type', unit_type)\n\n if show:\n imgplot = plt.imshow(unit_type_map)\n plt.show()\n\n if unit_type is not None:\n int_id = SELECT_SEPARATE_NUMBER + unit_type\n\n elif int_id == F.Smart_screen.id: # 451: # Smart_screen\n arguments = func_call.arguments\n print('arguments', arguments)\n [x, y] = arguments[1]\n\n obs = feat.transform_obs(o.observation)\n unit_type_map = obs[\"screen\"][U._UNIT_TYPE]\n\n unit_type = unit_type_map[y, x]\n\n print('unit_type', unit_type)\n\n if unit_type is not None:\n int_id = SMART_TARGET_SEPARATE + unit_type\n\n func_call_list.append(int_id)\n\n except Exception as e:\n traceback.print_exc()\n\n if i >= max_steps_of_replay: # test the first n frames\n print(\"max frames test, break out!\")\n break\n\n if o.player_result: # end of game\n print(o.player_result)\n break\n\n except Exception as inst:\n traceback.print_exc() \n\n controller.step()\n i += 1\n\n if SAVE_TYPE == SaveType.torch_tensor:\n pass\n\n elif SAVE_TYPE == SaveType.python_pickle:\n all_func_call_list.append(func_call_list)\n\n elif SAVE_TYPE == SaveType.numpy_array:\n pass\n\n replay_length_list.append(save_steps)\n noop_length_list.append(noop_count)\n # We only test the first one replay \n except Exception as inst:\n traceback.print_exc() \n\n print('begin save!')\n\n if SAVE_TYPE == SaveType.torch_tensor:\n pass\n\n elif SAVE_TYPE == SaveType.python_pickle:\n save_path = FLAGS.save_path + 'actions.dat'\n\n print(\"all_func_call_list\", all_func_call_list)\n\n with open(save_path, 'wb') as fp:\n pickle.dump(all_func_call_list, fp)\n\n elif SAVE_TYPE == SaveType.numpy_array:\n pass \n\n print('end save!')\n\n print(\"end\")\n print(\"replay_length_list:\", replay_length_list)\n print(\"noop_length_list:\", noop_length_list)\n\n\ndef analyse_data(on_server=False):\n read_path = FLAGS.save_path + 'actions.dat'\n print('read_path', read_path)\n\n with open(read_path, 'rb') as fp:\n all_func_call_list = pickle.load(fp)\n print('all_func_call_list', all_func_call_list) if 0 else None\n\n ps = PrefixSpan(all_func_call_list)\n\n # make sure the first actions should be select action, like select point (2), select group (4),\n # and select army (7).\n # meanwhile, the macro actions should not have the same actions, like len(set(patt)) equals to len(patt) \n result = ps.topk(75, filter=lambda patt, matches: len(set(patt)) == len(patt) and len(patt) >= 2 and\n (patt[0] in [F.select_army.id] or patt[0] > SELECT_SEPARATE_NUMBER))\n\n all_seq_output = []\n for i in result:\n print('frq', i[0]) if 0 else None\n seq = i[1]\n seq_output = []\n for j in seq:\n unit_type = None\n if j > SELECT_SEPARATE_NUMBER and j < SMART_TARGET_SEPARATE: # select point\n unit_type = j - SELECT_SEPARATE_NUMBER\n j = F.select_point.id\n\n elif j >= SMART_TARGET_SEPARATE: # smart screen\n unit_type = j - SMART_TARGET_SEPARATE\n\n # Function.ability(451, \"Smart_screen\", cmd_screen, 1),\n j = F.Smart_screen.id\n\n # specfila for smart screen on resource\n resource_minerals_list = [C.UNIT_MAP_INV['MineralField'], C.UNIT_MAP_INV['MineralField750'], C.UNIT_MAP_INV['MineralField450']]\n resource_gas_list = [C.UNIT_MAP_INV['Assimilator'], C.UNIT_MAP_INV['Extractor'], C.UNIT_MAP_INV['Refinery']]\n\n if unit_type in resource_minerals_list or unit_type in resource_gas_list:\n\n # Function.ability(264, \"Harvest_Gather_screen\", cmd_screen, 3666),\n j = F.Harvest_Gather_screen.id\n if unit_type in resource_minerals_list:\n\n # Unified into a single resource type\n unit_type = C.UNIT_MAP_INV['MineralField']\n\n if unit_type is not None: \n print(F[j].name, C.UNIT_MAP.get(unit_type, \"None\"), unit_type) if 0 else None\n else:\n print(F[j].name) if 0 else None\n\n seq_output.append([j, unit_type])\n all_seq_output.append([i[0], seq_output])\n\n # filter\n filter_seq_output = []\n for i in all_seq_output:\n frq = i[0]\n seq = i[1]\n\n select_point_size = 0\n first_is_select = True\n smart_screen_to_none = False\n first_select_type = None\n wrong_action_mapping = False\n for jdx, j in enumerate(seq):\n action = j[0]\n unit_type = j[1]\n\n # if the first action is not any select action, skip it\n if jdx == 0 and not (action in [F.select_point.id, F.select_control_group.id, F.select_army.id]):\n first_is_select = False\n break\n\n if jdx == 0 and action in [F.select_point.id]:\n first_select_type = unit_type\n\n # if the seq contains more than one select action, skip it\n if action in [F.select_point.id, F.select_control_group.id, F.select_army.id]:\n select_point_size += 1\n\n if jdx > 0 and first_select_type == C.UNIT_MAP_INV['Nexus']:\n\n # Function.ability(485, \"Train_Probe_quick\", cmd_quick, 1006),\n if action not in [F.Train_Probe_quick.id]:\n wrong_action_mapping = True\n break\n\n # if the smart_screen has a 0 target unit_type, skip it\n if action == F.Smart_screen.id and unit_type == 0:\n smart_screen_to_none = True\n break\n\n # if the first action is not any select action, skip it\n if not first_is_select:\n continue\n\n # if the seq contains more than one select action, skip it\n if select_point_size > 1:\n continue\n\n # if the smart_screen has a 0 target unit_type, skip it\n if smart_screen_to_none:\n continue\n\n # if the selected unit types can't match the subsequent actions, skit it\n if wrong_action_mapping:\n continue\n\n filter_seq_output.append([frq, seq])\n\n # transform to some easily reading format\n macros_list = []\n for i in filter_seq_output:\n frq = i[0]\n seq = i[1]\n out_string = 'frq:' + str(frq) + \" \"\n string_list = [] \n for jdx, j in enumerate(seq):\n action = j[0]\n unit_type = j[1]\n if unit_type is not None:\n string = F[action].name + \"(\" + C.UNIT_MAP.get(unit_type, \"None\") + \")\"\n else:\n string = F[action].name\n string_list.append(string)\n output_func = ' -> '.join(string_list)\n print(out_string + output_func) if 0 else None\n macros_list.append([out_string, output_func])\n\n macros_list_copy = copy.deepcopy(macros_list)\n\n # remove the repeated one\n filter_macros_list = []\n for i, macro in enumerate(macros_list):\n in_others = False\n print('macro', macro) if 0 else None\n for j, others in enumerate(macros_list_copy):\n if i < j and macro[1] in others[1]:\n in_others = True\n break\n print('in_others', in_others) if 0 else None\n if not in_others:\n filter_macros_list.append(macro)\n\n for i in filter_macros_list:\n print(i)\n\n output_file = FLAGS.save_path + \"generated_marco_actions.txt\"\n with open(output_file, 'w') as file: \n for i in filter_macros_list:\n file.write(i[0] + \" \" + i[1] + \"\\n\")\n\n\ndef run(analyse):\n if not analyse:\n transoform_replays()\n else:\n analyse_data()\n\n\nif __name__ == '__main__':\n run(analyse=0)\n run(analyse=1)\n"
] | [
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
eveiramirez/python_class | [
"7a3830cc92dc842b853b243c6b01e06993faa97e"
] | [
"Python2/tareas/tarea_7.py"
] | [
"\"\"\"\n\nNAME\n tarea_7.py\n\nVERSION\n [1.0]\n\nAUTHOR\n Ignacio Emmanuel Ramirez Bernabe\n\nCONTACT\n [email protected]\n\nGITHUB\n https://github.com/eveiramirez/python_class/blob/master/Python2/tareas/tarea_7.py\n\nDESCRIPTION\n Este programa contiene arrays estructurados para los arrays\n creados en el ejercicio 1, los cuales son:\n Produccion\n Costos\n Costos por g/L\n\nCATEGORY\n Numpy\n\n\"\"\"\nimport numpy as np\n# Crear array con la produccion de cada gen para cada temperatura\nproduction = np.array([(\"Gen1\", 5, 3), (\"Gen2\", 11, 7),\n (\"Gen3\", 4, 9), (\"Gen4\", 2, 6)],\n dtype=[(\"name\", (np.str_, 10)),\n (\"production_cond1\", np.int32),\n (\"production_cond2\", np.int32)])\n\n# Crear array con los costos de induccion\ncosts = np.array([(\"Gen1\", 3.5), (\"Gen2\", 5), (\"Gen3\", 7),\n (\"Gen4\", 4.3)], dtype=[(\"name\", (np.str_, 10)),\n (\"cost\", np.float64)])\n\n# Crear array con los costos por g/L para condicion 1\npc_cond1 = production[\"production_cond1\"]/costs[\"cost\"]\n\n# Crear array con los costos por g/L para temperatura 2\npc_cond2 = production[\"production_cond2\"]/costs[\"cost\"]\n\n# Crear lista con los costos por g/L para cada gene guardados en una\n# tupla\ngene_list = []\nfor gene in range(0, 4):\n gene_list.append((f\"Gen{gene+1}\", pc_cond1[gene], pc_cond2[gene]))\n\n# Crear array con los costos por g/L\nprod_costs = np.array(gene_list, dtype=[(\"name\", (np.str_, 10)),\n (\"pc_cond1\", np.float64),\n (\"pc_cond2\", np.float64)])\n\n# Imprimir array de los costos por g/L\nprint(prod_costs)\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
CVML-Lab-NUS/Complex-gated-recurrent-neural-networks | [
"ea8733a8236be742a29769741cb82de2ce70147a"
] | [
"test/hilbert.py"
] | [
"import numpy as np\nimport tensorflow as tf\nfrom IPython.core.debugger import Tracer\ndebug_here = Tracer()\n\n\n# ## numpy hilbert transform ##\ndef np_hilbert(xr):\n n = xr.shape[0]\n # fft over columns.\n x = np.fft.fft(xr.transpose()).transpose()\n h = np.zeros([n])\n if n > 0 and 2*np.fix(n/2) == n:\n # even and nonempty\n h[0:int(n/2+1)] = 1\n h[1:int(n/2)] = 2\n elif n > 0:\n # odd and nonempty\n h[0] = 1\n h[1:int((n+1)/2)] = 2\n if len(x.shape) == 2:\n hs = np.stack([h]*x.shape[-1], -1)\n elif len(x.shape) == 1:\n hs = h\n else:\n raise NotImplementedError\n print(hs)\n return np.fft.ifft((x*hs).transpose()).transpose()\n\n\n# Xr = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 1.0], [2.0, 3.0], [4.0, 5.0]])\n# Xr = np.array([1.0, 2.0, 3.0, 4.0])\nXr = np.array([0.3, 0.0])\nX = np_hilbert(Xr)\nprint(X)\n\n\n# # tensorflow hilbert transform ##\n\ndef hilbert(xr):\n with tf.variable_scope('hilbert_transform'):\n n = tf.Tensor.get_shape(xr).as_list()[0]\n # Run the fft on the columns no the rows.\n x = tf.transpose(tf.fft(tf.transpose(xr)))\n h = np.zeros([n])\n if n > 0 and 2*np.fix(n/2) == n:\n # even and nonempty\n h[0:int(n/2+1)] = 1\n h[1:int(n/2)] = 2\n elif n > 0:\n # odd and nonempty\n h[0] = 1\n h[1:int((n+1)/2)] = 2\n tf_h = tf.constant(h, name='h', dtype=tf.float32)\n if len(x.shape) == 2:\n hs = np.stack([h]*x.shape[-1], -1)\n reps = tf.Tensor.get_shape(x).as_list()[-1]\n hs = tf.stack([tf_h]*reps, -1)\n elif len(x.shape) == 1:\n hs = tf_h\n else:\n raise NotImplementedError\n tf_hc = tf.complex(hs, tf.zeros_like(hs))\n tmp = x*tf_hc\n return tf.transpose(tf.ifft(tf.transpose(tmp)))\n\n\ntest_graph = tf.Graph()\nwith test_graph.as_default():\n xr = tf.constant(Xr, dtype=tf.float32)\n xc = tf.complex(xr, tf.zeros_like(xr))\n X = hilbert(xc)\n\nwith tf.Session(graph=test_graph):\n print(X.eval())\n\n\n"
] | [
[
"tensorflow.Graph",
"tensorflow.constant",
"tensorflow.transpose",
"tensorflow.stack",
"numpy.stack",
"tensorflow.zeros_like",
"tensorflow.Session",
"tensorflow.Tensor.get_shape",
"tensorflow.variable_scope",
"numpy.fix",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
sangminwoo/BotoxNet-Trash-Classfication | [
"62b27e23a54d921382c6f0ef3a4e0cbe89fca060"
] | [
"resnet.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.model_zoo as model_zoo\nfrom attention import AttentionModule\n#from cbam import CBAM\nfrom SE import SELayer\nimport matplotlib.pyplot as plt\n\n__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',\n 'resnet152']\n\n\nmodel_urls = {\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\n 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\n 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',\n}\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, use_att=False, att_mode='ours'):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.downsample = downsample\n self.stride = stride\n self.use_att = use_att\n\n if use_att:\n assert att_mode in ['ours', 'cbam', 'se']\n if att_mode == 'ours':\n self.att = AttentionModule(planes)\n elif att_mode == 'cbam':\n self.att = CBAM(planes)\n elif att_mode == 'se':\n self.att = SELayer(planes)\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n if self.use_att:\n out = self.att(out)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, use_att=False, att_mode='ours'):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * self.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n self.use_att = use_att\n if use_att:\n assert att_mode in ['ours', 'cbam', 'se']\n if att_mode == 'ours':\n self.att = AttentionModule(planes * self.expansion)\n elif att_mode == 'cbam':\n self.att = CBAM(planes * self.expansion)\n elif att_mode == 'se':\n self.att = SELayer(planes * self.expansion)\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n if self.use_att:\n out = self.att(out)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, layers, num_classes=1000, use_att=False, att_mode='ours'):\n self.inplanes = 64\n super(ResNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n self.layer1 = self._make_layer(block, 64, layers[0], use_att=False, att_mode=att_mode)\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2, use_att=False, att_mode=att_mode)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2, use_att=False, att_mode=att_mode)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2, use_att=False, att_mode=att_mode)\n self.avgpool = nn.AvgPool2d(7, stride=1)\n \n self.final_fc = nn.Linear(512 * block.expansion, num_classes)\n\n self.use_att = use_att\n if use_att:\n self.att = AttentionModule(512*block.expansion)\n self.num_classes = num_classes\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def _make_layer(self, block, planes, blocks, stride=1, use_att=False, att_mode='ours'):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, use_att=use_att, att_mode=att_mode))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, use_att=use_att, att_mode=att_mode))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n imgs = x\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x) # 64\n x = self.layer2(x) # 128\n x = self.layer3(x) # \n x4 = self.layer4(x)\n\n if self.use_att:\n x4 = self.att(x4)\n\n x = self.avgpool(x4)\n x = x.view(x.size(0), -1)\n x = self.final_fc(x)\n return x, x4\n\ndef resnet18(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-18 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n if pretrained:\n pretrained_dict = model_zoo.load_url(model_urls['resnet18'])\n model_dict = model.state_dict()\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}\n model_dict.update(pretrained_dict)\n model.load_state_dict(model_dict)\n print('pre-trained model loaded successfully')\n # model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model\n\n\ndef resnet34(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-34 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n pretrained_dict = model_zoo.load_url(model_urls['resnet34'])\n model_dict = model.state_dict()\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}\n model_dict.update(pretrained_dict)\n model.load_state_dict(model_dict)\n print('pre-trained model loaded successfully')\n # model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model\n\n\ndef resnet50(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-50 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n if pretrained:\n pretrained_dict = model_zoo.load_url(model_urls['resnet50'])\n model_dict = model.state_dict()\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}\n model_dict.update(pretrained_dict)\n model.load_state_dict(model_dict)\n print('pre-trained model loaded successfully')\n # model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n return model\n\n\ndef resnet101(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-101 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n if pretrained:\n pretrained_dict = model_zoo.load_url(model_urls['resnet101'])\n model_dict = model.state_dict()\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}\n model_dict.update(pretrained_dict)\n model.load_state_dict(model_dict)\n # model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))\n return model\n\n\ndef resnet152(pretrained=False, **kwargs):\n \"\"\"Constructs a ResNet-152 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n if pretrained:\n pretrained_dict = model_zoo.load_url(model_urls['resnet152'])\n model_dict = model.state_dict()\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}\n model_dict.update(pretrained_dict)\n model.load_state_dict(model_dict)\n # model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))\n return model\n"
] | [
[
"torch.nn.Sequential",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.AvgPool2d",
"torch.nn.Linear",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.utils.model_zoo.load_url",
"torch.nn.init.kaiming_normal_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cantugba/Github_Tag_Analysis | [
"9ac76872e2977e67176ecd2119e76823f96516aa"
] | [
"apriori.py"
] | [
"#%%\n\nfrom apyori import apriori\nimport csv\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nall_items = set() # Tüm ögeler\nwith open(\"TestDatas/All_Data.csv\") as f:\n reader = csv.reader(f, delimiter=\",\") # her bir tag virgülle ayrıldıgı için burada belirtiyorum\n for i, line in enumerate(reader):\n all_items.update(line)\n# Her bir ögeden veriler içerisinde kaç tane oldugunun sayılması ve listenin buna göre güncellenmesi\ncounting = list()\nwith open(\"TestDatas/All_Data.csv\") as f:\n reader = csv.reader(f, delimiter=\",\")\n for i, line in enumerate(reader):\n row = {item: 0 for item in all_items}\n row.update({item:1 for item in line})\n counting.append(row)\ndeneme = pd.DataFrame(counting)\nprint(deneme.head()) # ss al burayı\nprint(deneme.shape)\n\n#%%\n\n# 1. Tüm satır toplamlarının toplamının toplam öğe sayısını bulun\ntot_item_count = sum(deneme.sum())\n#print(tot_item_count)\n\n# 2. İlk 20 öğeyi almak için satırları toplayın ve sıralama azalan düzendedir\nitem_sum = deneme.sum().sort_values(ascending=False).reset_index().head(n=20)\nitem_sum.rename(columns={item_sum.columns[0]:'Item_name',item_sum.columns[1]:'Item_count'}, inplace=True)\n#print(item_sum)\n\n# 3. Ne kadar katkıda bulunduğunu bilmemiz için yüzde değeri eklenir.\n# X'in toplam yüzdesi, toplam yüzdede x ve üzeri öğelerin yüzdesini, yani kümülatif toplamı belirler.\nitem_sum['Item_percent'] = item_sum['Item_count']/tot_item_count\nitem_sum['Tot_percent'] = item_sum.Item_percent.cumsum()\nitem_sum.head(20) # Yüzdelerle birlikte ilk 20 öğe listesi\n\n\n# sık geçen tagların cizimi\nobj = (list(item_sum['Item_name'].head(n=20)))\ny_pos = np.arange(len(obj))\nperformans = list(item_sum['Item_count'].head(n=20))\n#print(performans)\n\nplt.bar(y_pos,performans,align='center',alpha=0.9)\nplt.xticks(y_pos,obj,rotation='vertical')\nplt.ylabel('Frekans Sayısı')\nplt.title('Analiz Sonucu')\nplt.show(block=True)\nplt.interactive(False)\nplt.figure()\nprint(\"deneme: \")\nprint(deneme.shape)\n\n\n#%%\n\n#Dikkate alınacak öğe için Minimum Toplam Öge Yüzdesi -> Eşik değeri gibi\n# transaction islem Dikkate alınacak minimum işlem uzunluğu (yani arka arkaya minimum öğe sayısı).\n\ndef prune_dataset(olddf,len_transaction, tot_item_percent):\n if 'tot_items' in olddf.columns:\n del(olddf['tot_items'])\n\n # Her öğe için item_count ve toplam öğe sayısını bulma.\n # 3.adım gibi\n Item_count = olddf.sum().sort_values(ascending=False).reset_index()\n tot_items = sum(olddf.sum().sort_values(ascending=False))\n Item_count.rename(columns={Item_count.columns[0]:'Item_name', Item_count.columns[1]:'Item_count'},inplace = True)\n\n # Öge yuzdesi ve toplam yuzdeyi bulmak icin 3 adıma benzer\n Item_count['Item_percent'] = Item_count['Item_count'] / tot_items\n Item_count['Tot_percent'] = Item_count.Item_percent.cumsum()\n\n # Toplam yüzde için koşul / minimum eşiğe uyan öğeleri almak.\n selected_items = list(Item_count[Item_count.Tot_percent < tot_item_percent].Item_name)\n olddf['tot_items'] = olddf[selected_items].sum(axis=1)\n\n # İşlemin uzunluğu veya bir satırdaki öğe sayısı için koşul / minimum eşiğe uyan öğeleri almak.\n olddf = olddf[olddf.tot_items >= len_transaction]\n del(olddf['tot_items'])\n\n # Temizlenmis / Kısaltılmıs veri seti\n\n return olddf[selected_items],Item_count[Item_count.Tot_percent < tot_item_percent]\n\n\n#%%\n\n#Apriori için uygun bir veri seti elde etmek üzere şimdi len_transaction ve tot_item_percent için farklı değerler gireceğiz\n#Deneme 1\n\npruneddf ,Item_count= prune_dataset(deneme,3,0.7)\n\nprint(pruneddf.shape)\n#print(list(pruneddf.columns))\n# Çıktı (Sütun listesi aslında apriori için dikkate aldığımız öğelerdir.)\n\n\n#%%\n\n# DENEME 2\n#pruneddf,Item_count = prune_dataset(deneme,4,0.4)\nprint(pruneddf.shape)\n#print(list(pruneddf.columns))\n\n#%%\n\n\n\n#%%\n\n#\n# # deneme 3\n#pruneddf, Item_count = prune_dataset(deneme,4,0.2)\n#print(pruneddf.shape)\n#print(list(pruneddf.columns)\n\n\n\n#%%\n\n# İlk olarak, veri çerçevemizi, orijinal veri kümemiz gibi görünecek, ancak boyutu küçültülmüş bir csv dosyasına dönüştürmemiz gerekir. ½½\n# 1'leri uygun öğe adına dönüştürme (sütun adı)\n\ny = list(pruneddf.columns)\n#print(\"y\",y)\nfor s in y:\n pruneddf.loc[(pruneddf[s] == 1),s] = s\nprint(pruneddf)\n# Sıfırları Sil\nlol = pruneddf.values.tolist()\n#print(lol)\n\nfor a in lol:\n while(0 in a):\n a.remove(0)\n#print(\"sıfırsız lol\",lol)\n# Yeni bir temizlnemiş veri kümesi csv dosyası oluşturma\nwith open(\"Results/PrunedCVSs/prunedAll.csv\", \"w\", newline=\"\") as f:\n writer = csv.writer(f)\n writer.writerows(lol)\n\n#%%\n\nbirliktelik_kurali = apriori(lol, min_support=0.004, min_confidence=0.3, min_lift=3, min_length=4)\nbirliktelik_sonuc = list(birliktelik_kurali)\n\nprint(\"Türetilen Birliktelik İlişkisi {}.\".format(len(birliktelik_sonuc)))\n\nprint(\"Türetilen Kurallar: \")\nfor i in range(0, len(birliktelik_sonuc)):\n print(birliktelik_sonuc[i][0])\n\n\n#%%\n\n# Güven değerine göre sıralanması\n#sirali = sorted(birliktelik_sonuc, key=lambda x: int(x[2][0][2]))\n\n# Destek değerine göre sıralanması\n#sirali = sorted(birliktelik_sonuc, key=lambda x: int(x[1]))\n\n# Lift Değerine göre\n#sirali = sorted(birliktelik_sonuc, key=lambda x: int(x[2][0][3]))\n\n# for item in sirali:\n# # iç listenin ilk dizini\n# # Temel öğeyi içerir ve öğe ekler\n# pair = item[0]\n# items = [x for x in pair]\n# print(\"Kural: \" + items[0] + \" -> \" + items[1])\n#\n# # iç listenin ikinci dizini\n# print(\"Destek: \" + str(item[1]))\n#\n# # iç listenin üçüncü dizininin 0'ında bulunan listenin üçüncü dizini\n#\n# print(\"Güven: \" + str(item[2][0][2]))\n# print(\"Lift: \" + str(item[2][0][3]))\n# print(\"=====================================\")\n\n# iç içe liste -> nested list\n # İç içe listenin ilk dizi kuralı, ikinci dizini destek (support) değerini, ucuncu diznde destek(confidence) ve lift değeri bulunur\n\n\n\n#%%\n\nfor item in birliktelik_sonuc:\n # iç listenin ilk dizini\n # Temel öğeyi içerir ve öğe ekler\n\n pair = item[0]\n\n items = [x for x in pair]\n print(\"Kural: \" + items[0] + \" -> \" + items[1])\n\n # iç listenin ikinci dizini -> support\n print(\"Destek: \" + str(item[1]))\n\n # iç listenin üçüncü dizininin 0'ında bulunan listenin üçüncü dizini\n\n print(\"Güven: \" + str(item[2][0][2]))\n print(\"Lift: \" + str(item[2][0][3]))\n print(\"=====================================\")\n\n\n\n\n#%%\n\n# Sonucları csv dosyasına yazdırma\n#df = pd.DataFrame([[i[0], i[1],str(i[2][0][2]),str(i[2][0][3])] for i in birliktelik_sonuc],\n #columns=['taglar','destek','guven','lift'])\n\n\n\n#df.to_csv('Results/All_DataResults.csv', index=False)\n\n"
] | [
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.interactive",
"pandas.DataFrame",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
iceshade000/MMCGAN | [
"addd41a8c19d9e898804bd34cafcb644cd7a87cf"
] | [
"layers.py"
] | [
"''' Layers\n This file contains various layers for the BigGAN models.\n'''\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.nn import init\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.nn import Parameter as P\n\nfrom sync_batchnorm import SynchronizedBatchNorm2d as SyncBN2d\n\n\n# Projection of x onto y\ndef proj(x, y):\n return torch.mm(y, x.t()) * y / torch.mm(y, y.t())\n\n\n# Orthogonalize x wrt list of vectors ys\ndef gram_schmidt(x, ys):\n for y in ys:\n x = x - proj(x, y)\n return x\n\n\n# Apply num_itrs steps of the power method to estimate top N singular values.\ndef power_iteration(W, u_, update=True, eps=1e-12):\n # Lists holding singular vectors and values\n us, vs, svs = [], [], []\n for i, u in enumerate(u_):\n # Run one step of the power iteration\n with torch.no_grad():\n v = torch.matmul(u, W)\n # Run Gram-Schmidt to subtract components of all other singular vectors\n v = F.normalize(gram_schmidt(v, vs), eps=eps)\n # Add to the list\n vs =vs+ [v]\n # Update the other singular vector\n u = torch.matmul(v, W.t())\n # Run Gram-Schmidt to subtract components of all other singular vectors\n u = F.normalize(gram_schmidt(u, us), eps=eps)\n # Add to the list\n us =us+ [u]\n if update:\n u_[i][:] = u\n # Compute this singular value and add it to the list\n svs =svs+ [torch.squeeze(torch.matmul(torch.matmul(v, W.t()), u.t()))]\n #svs += [torch.sum(F.linear(u, W.transpose(0, 1)) * v)]\n return svs, us, vs\n\n\n# Convenience passthrough function\nclass identity(nn.Module):\n def forward(self, input):\n return input\n \n\n# Spectral normalization base class \nclass SN(object):\n def __init__(self, num_svs, num_itrs, num_outputs, transpose=False, eps=1e-12):\n # Number of power iterations per step\n self.num_itrs = num_itrs\n # Number of singular values\n self.num_svs = num_svs\n # Transposed?\n self.transpose = transpose\n # Epsilon value for avoiding divide-by-0\n self.eps = eps\n # Register a singular vector for each sv\n for i in range(self.num_svs):\n self.register_buffer('u%d' % i, torch.randn(1, num_outputs))\n self.register_buffer('sv%d' % i, torch.ones(1))\n \n # Singular vectors (u side)\n @property\n def u(self):\n return [getattr(self, 'u%d' % i) for i in range(self.num_svs)]\n\n # Singular values; \n # note that these buffers are just for logging and are not used in training. \n @property\n def sv(self):\n return [getattr(self, 'sv%d' % i) for i in range(self.num_svs)]\n \n # Compute the spectrally-normalized weight\n def W_(self):\n W_mat = self.weight.view(self.weight.size(0), -1)\n if self.transpose:\n W_mat = W_mat.t()\n # Apply num_itrs power iterations\n for _ in range(self.num_itrs):\n svs, us, vs = power_iteration(W_mat, self.u, update=self.training, eps=self.eps) \n # Update the svs\n if self.training:\n with torch.no_grad(): # Make sure to do this in a no_grad() context or you'll get memory leaks!\n for i, sv in enumerate(svs):\n self.sv[i][:] = sv \n return self.weight / svs[0]\n\n\n# 2D Conv layer with spectral norm\nclass SNConv2d(nn.Conv2d, SN):\n def __init__(self, in_channels, out_channels, kernel_size, stride=1,\n padding=0, dilation=1, groups=1, bias=True, \n num_svs=1, num_itrs=1, eps=1e-12):\n nn.Conv2d.__init__(self, in_channels, out_channels, kernel_size, stride, \n padding, dilation, groups, bias)\n SN.__init__(self, num_svs, num_itrs, out_channels, eps=eps) \n def forward(self, x):\n return F.conv2d(x, self.W_(), self.bias, self.stride, \n self.padding, self.dilation, self.groups)\n\n\n# Linear layer with spectral norm\nclass SNLinear(nn.Linear, SN):\n def __init__(self, in_features, out_features, bias=True,\n num_svs=1, num_itrs=1, eps=1e-12):\n nn.Linear.__init__(self, in_features, out_features, bias)\n SN.__init__(self, num_svs, num_itrs, out_features, eps=eps)\n def forward(self, x):\n return F.linear(x, self.W_(), self.bias)\n\n\n# Embedding layer with spectral norm\n# We use num_embeddings as the dim instead of embedding_dim here\n# for convenience sake\nclass SNEmbedding(nn.Embedding, SN):\n def __init__(self, num_embeddings, embedding_dim, padding_idx=None, \n max_norm=None, norm_type=2, scale_grad_by_freq=False,\n sparse=False, _weight=None,\n num_svs=1, num_itrs=1, eps=1e-12):\n nn.Embedding.__init__(self, num_embeddings, embedding_dim, padding_idx,\n max_norm, norm_type, scale_grad_by_freq, \n sparse, _weight)\n SN.__init__(self, num_svs, num_itrs, num_embeddings, eps=eps)\n def forward(self, x):\n return F.embedding(x, self.W_())\n\n\n# A non-local block as used in SA-GAN\n# Note that the implementation as described in the paper is largely incorrect;\n# refer to the released code for the actual implementation.\nclass Attention(nn.Module):\n def __init__(self, ch, which_conv=SNConv2d, name='attention'):\n super(Attention, self).__init__()\n # Channel multiplier\n self.ch = ch\n self.which_conv = which_conv\n self.theta = self.which_conv(self.ch, self.ch // 8, kernel_size=1, padding=0, bias=False)\n self.phi = self.which_conv(self.ch, self.ch // 8, kernel_size=1, padding=0, bias=False)\n self.g = self.which_conv(self.ch, self.ch // 2, kernel_size=1, padding=0, bias=False)\n self.o = self.which_conv(self.ch // 2, self.ch, kernel_size=1, padding=0, bias=False)\n # Learnable gain parameter\n self.gamma = P(torch.tensor(0.), requires_grad=True)\n def forward(self, x, y=None):\n # Apply convs\n theta = self.theta(x)\n phi = F.max_pool2d(self.phi(x), [2,2])\n g = F.max_pool2d(self.g(x), [2,2]) \n # Perform reshapes\n theta = theta.view(-1, self. ch // 8, x.shape[2] * x.shape[3])\n phi = phi.view(-1, self. ch // 8, x.shape[2] * x.shape[3] // 4)\n g = g.view(-1, self. ch // 2, x.shape[2] * x.shape[3] // 4)\n # Matmul and softmax to get attention maps\n beta = F.softmax(torch.bmm(theta.transpose(1, 2), phi), -1)\n # Attention map times g path\n o = self.o(torch.bmm(g, beta.transpose(1,2)).view(-1, self.ch // 2, x.shape[2], x.shape[3]))\n return self.gamma * o + x\n\n\n# Fused batchnorm op\ndef fused_bn(x, mean, var, gain=None, bias=None, eps=1e-5):\n # Apply scale and shift--if gain and bias are provided, fuse them here\n # Prepare scale\n scale = torch.rsqrt(var + eps)\n # If a gain is provided, use it\n if gain is not None:\n scale = scale * gain\n # Prepare shift\n shift = mean * scale\n # If bias is provided, use it\n if bias is not None:\n shift = shift - bias\n return x * scale - shift\n #return ((x - mean) / ((var + eps) ** 0.5)) * gain + bias # The unfused way.\n\n\n# Manual BN\n# Calculate means and variances using mean-of-squares minus mean-squared\ndef manual_bn(x, gain=None, bias=None, return_mean_var=False, eps=1e-5):\n # Cast x to float32 if necessary\n float_x = x.float()\n # Calculate expected value of x (m) and expected value of x**2 (m2) \n # Mean of x\n m = torch.mean(float_x, [0, 2, 3], keepdim=True)\n # Mean of x squared\n m2 = torch.mean(float_x ** 2, [0, 2, 3], keepdim=True)\n # Calculate variance as mean of squared minus mean squared.\n var = (m2 - m **2)\n # Cast back to float 16 if necessary\n var = var.type(x.type())\n m = m.type(x.type())\n # Return mean and variance for updating stored mean/var if requested \n if return_mean_var:\n return fused_bn(x, m, var, gain, bias, eps), m.squeeze(), var.squeeze()\n else:\n return fused_bn(x, m, var, gain, bias, eps)\n\n\n# My batchnorm, supports standing stats \nclass myBN(nn.Module):\n def __init__(self, num_channels, eps=1e-5, momentum=0.1):\n super(myBN, self).__init__()\n # momentum for updating running stats\n self.momentum = momentum\n # epsilon to avoid dividing by 0\n self.eps = eps\n # Momentum\n self.momentum = momentum\n # Register buffers\n self.register_buffer('stored_mean', torch.zeros(num_channels))\n self.register_buffer('stored_var', torch.ones(num_channels))\n self.register_buffer('accumulation_counter', torch.zeros(1))\n # Accumulate running means and vars\n self.accumulate_standing = False\n \n # reset standing stats\n def reset_stats(self):\n self.stored_mean[:] = 0\n self.stored_var[:] = 0\n self.accumulation_counter[:] = 0\n \n def forward(self, x, gain, bias):\n if self.training:\n out, mean, var = manual_bn(x, gain, bias, return_mean_var=True, eps=self.eps)\n # If accumulating standing stats, increment them\n if self.accumulate_standing:\n self.stored_mean[:] = self.stored_mean + mean.data\n self.stored_var[:] = self.stored_var + var.data\n self.accumulation_counter += 1.0\n # If not accumulating standing stats, take running averages\n else:\n self.stored_mean[:] = self.stored_mean * (1 - self.momentum) + mean * self.momentum\n self.stored_var[:] = self.stored_var * (1 - self.momentum) + var * self.momentum\n return out\n # If not in training mode, use the stored statistics\n else: \n mean = self.stored_mean.view(1, -1, 1, 1)\n var = self.stored_var.view(1, -1, 1, 1)\n # If using standing stats, divide them by the accumulation counter \n if self.accumulate_standing:\n mean = mean / self.accumulation_counter\n var = var / self.accumulation_counter\n return fused_bn(x, mean, var, gain, bias, self.eps)\n\n\n# Simple function to handle groupnorm norm stylization \ndef groupnorm(x, norm_style):\n # If number of channels specified in norm_style:\n if 'ch' in norm_style:\n ch = int(norm_style.split('_')[-1])\n groups = max(int(x.shape[1]) // ch, 1)\n # If number of groups specified in norm style\n elif 'grp' in norm_style:\n groups = int(norm_style.split('_')[-1])\n # If neither, default to groups = 16\n else:\n groups = 16\n return F.group_norm(x, groups)\n\n\n# Class-conditional bn\n# output size is the number of channels, input size is for the linear layers\n# Andy's Note: this class feels messy but I'm not really sure how to clean it up\n# Suggestions welcome! (By which I mean, refactor this and make a pull request\n# if you want to make this more readable/usable). \nclass ccbn(nn.Module):\n def __init__(self, output_size, input_size, which_linear, eps=1e-5, momentum=0.1,\n cross_replica=False, mybn=False, norm_style='bn',):\n super(ccbn, self).__init__()\n self.output_size, self.input_size = output_size, input_size\n # Prepare gain and bias layers\n self.gain = which_linear(input_size, output_size)\n self.bias = which_linear(input_size, output_size)\n # epsilon to avoid dividing by 0\n self.eps = eps\n # Momentum\n self.momentum = momentum\n # Use cross-replica batchnorm?\n self.cross_replica = cross_replica\n # Use my batchnorm?\n self.mybn = mybn\n # Norm style?\n self.norm_style = norm_style\n \n if self.cross_replica:\n self.bn = SyncBN2d(output_size, eps=self.eps, momentum=self.momentum, affine=False)\n elif self.mybn:\n self.bn = myBN(output_size, self.eps, self.momentum)\n elif self.norm_style in ['bn', 'in']:\n self.register_buffer('stored_mean', torch.zeros(output_size))\n self.register_buffer('stored_var', torch.ones(output_size)) \n \n \n def forward(self, x, y):\n # Calculate class-conditional gains and biases\n gain = (1 + self.gain(y)).view(y.size(0), -1, 1, 1)\n bias = self.bias(y).view(y.size(0), -1, 1, 1)\n # If using my batchnorm\n if self.mybn or self.cross_replica:\n return self.bn(x, gain=gain, bias=bias)\n # else:\n else:\n if self.norm_style == 'bn':\n out = F.batch_norm(x, self.stored_mean, self.stored_var, None, None,\n self.training, 0.1, self.eps)\n elif self.norm_style == 'in':\n out = F.instance_norm(x, self.stored_mean, self.stored_var, None, None,\n self.training, 0.1, self.eps)\n elif self.norm_style == 'gn':\n out = groupnorm(x, self.normstyle)\n elif self.norm_style == 'nonorm':\n out = x\n return out * gain + bias\n def extra_repr(self):\n s = 'out: {output_size}, in: {input_size},'\n s +=' cross_replica={cross_replica}'\n return s.format(**self.__dict__)\n\n\n# Normal, non-class-conditional BN\nclass bn(nn.Module):\n def __init__(self, output_size, eps=1e-5, momentum=0.1,\n cross_replica=False, mybn=False):\n super(bn, self).__init__()\n self.output_size= output_size\n # Prepare gain and bias layers\n self.gain = P(torch.ones(output_size), requires_grad=True)\n self.bias = P(torch.zeros(output_size), requires_grad=True)\n # epsilon to avoid dividing by 0\n self.eps = eps\n # Momentum\n self.momentum = momentum\n # Use cross-replica batchnorm?\n self.cross_replica = cross_replica\n # Use my batchnorm?\n self.mybn = mybn\n \n if self.cross_replica:\n self.bn = SyncBN2d(output_size, eps=self.eps, momentum=self.momentum, affine=False) \n elif mybn:\n self.bn = myBN(output_size, self.eps, self.momentum)\n # Register buffers if neither of the above\n else: \n self.register_buffer('stored_mean', torch.zeros(output_size))\n self.register_buffer('stored_var', torch.ones(output_size))\n \n def forward(self, x, y=None):\n if self.cross_replica or self.mybn:\n gain = self.gain.view(1,-1,1,1)\n bias = self.bias.view(1,-1,1,1)\n return self.bn(x, gain=gain, bias=bias)\n else:\n return F.batch_norm(x, self.stored_mean, self.stored_var, self.gain,\n self.bias, self.training, self.momentum, self.eps)\n\n \n# Generator blocks\n# Note that this class assumes the kernel size and padding (and any other\n# settings) have been selected in the main generator module and passed in\n# through the which_conv arg. Similar rules apply with which_bn (the input\n# size [which is actually the number of channels of the conditional info] must \n# be preselected)\nclass GBlock(nn.Module):\n def __init__(self, in_channels, out_channels,\n which_conv=nn.Conv2d, which_bn=bn, activation=None, \n upsample=None):\n super(GBlock, self).__init__()\n \n self.in_channels, self.out_channels = in_channels, out_channels\n self.which_conv, self.which_bn = which_conv, which_bn\n self.activation = activation\n self.upsample = upsample\n # Conv layers\n self.conv1 = self.which_conv(self.in_channels, self.out_channels)\n self.conv2 = self.which_conv(self.out_channels, self.out_channels)\n self.learnable_sc = in_channels != out_channels or upsample\n if self.learnable_sc:\n self.conv_sc = self.which_conv(in_channels, out_channels, \n kernel_size=1, padding=0)\n # Batchnorm layers\n self.bn1 = self.which_bn(in_channels)\n self.bn2 = self.which_bn(out_channels)\n # upsample layers\n self.upsample = upsample\n\n def forward(self, x, y):\n h = self.activation(self.bn1(x, y))\n if self.upsample:\n h = self.upsample(h)\n x = self.upsample(x)\n h = self.conv1(h)\n h = self.activation(self.bn2(h, y))\n h = self.conv2(h)\n if self.learnable_sc: \n x = self.conv_sc(x)\n return h + x\n \n \n# Residual block for the discriminator\nclass DBlock(nn.Module):\n def __init__(self, in_channels, out_channels, which_conv=SNConv2d, wide=True,\n preactivation=False, activation=None, downsample=None,):\n super(DBlock, self).__init__()\n self.in_channels, self.out_channels = in_channels, out_channels\n # If using wide D (as in SA-GAN and BigGAN), change the channel pattern\n self.hidden_channels = self.out_channels if wide else self.in_channels\n self.which_conv = which_conv\n self.preactivation = preactivation\n self.activation = activation\n self.downsample = downsample\n \n # Conv layers\n self.conv1 = self.which_conv(self.in_channels, self.hidden_channels)\n self.conv2 = self.which_conv(self.hidden_channels, self.out_channels)\n self.learnable_sc = True if (in_channels != out_channels) or downsample else False\n if self.learnable_sc:\n self.conv_sc = self.which_conv(in_channels, out_channels, \n kernel_size=1, padding=0)\n def shortcut(self, x):\n if self.preactivation:\n if self.learnable_sc:\n x = self.conv_sc(x)\n if self.downsample:\n x = self.downsample(x)\n else:\n if self.downsample:\n x = self.downsample(x)\n if self.learnable_sc:\n x = self.conv_sc(x)\n return x\n \n def forward(self, x):\n if self.preactivation:\n # h = self.activation(x) # NOT TODAY SATAN\n # Andy's note: This line *must* be an out-of-place ReLU or it \n # will negatively affect the shortcut connection.\n h = F.relu(x)\n else:\n h = x \n h = self.conv1(h)\n h = self.conv2(self.activation(h))\n if self.downsample:\n h = self.downsample(h) \n \n return h + self.shortcut(x)\n \n# dogball"
] | [
[
"torch.mean",
"torch.nn.functional.batch_norm",
"torch.ones",
"torch.zeros",
"torch.randn",
"torch.nn.Conv2d.__init__",
"torch.tensor",
"torch.nn.Embedding.__init__",
"torch.matmul",
"torch.rsqrt",
"torch.nn.functional.relu",
"torch.no_grad",
"torch.nn.functional.instance_norm",
"torch.nn.Linear.__init__",
"torch.nn.functional.group_norm"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DimitriPapadopoulos/nmrglue | [
"f47397dcda84854d2136395a9998fe0b57356cbf"
] | [
"nmrglue/analysis/peakpick.py"
] | [
"\"\"\"\nPeak picking routines, lineshape parameter guessing, and related functions.\n\"\"\"\n\nimport numpy as np\nimport scipy.ndimage as ndimage\n\nfrom .analysisbase import ndwindow_index, valid_pt\nfrom .lineshapes1d import gauss, ls_str2class\nfrom .segmentation import find_all_downward, find_all_upward\nfrom .segmentation import find_all_connected, find_all_nconnected\nfrom ..fileio import table\n\n\ndef pick(data, pthres, nthres=None, msep=None, algorithm='connected',\n est_params=True, lineshapes=None, edge=None, diag=False, c_struc=None,\n c_ndil=0, cluster=True, table=True, axis_names=['A', 'Z', 'Y', 'X']):\n \"\"\"\n Pick (find) peaks in a region of a NMR spectrum.\n\n Parameters\n ----------\n data : ndarray\n Region of NMR spectrum to pick peaks from.\n pthres : float\n Minimum peak height for positive peaks. None to not detect positive\n peaks.\n nthres : float\n Minimum peak height for negative peaks (typically a negative value).\n None to not detect negative peaks.\n msep : tuple of ints, optional\n N-tuple of minimum peak separations along each axis. Must be provided\n if algorithm is 'thresh' or 'thresh-fast'.\n algorithm : {'thres', thresh-fast', 'downward', 'connected'}, optional\n Peak picking algorithm to use. Default is 'connected'.\n est_params : bool, optional\n True to perform an estimate of linewidths and amplitude for all peaks\n picked. False, the default, will return only the peak locations.\n lineshapes : list, optional\n A list of lineshape classes or string shortcuts for each dimension.\n If not specified Gaussian type lineshapes with a FWHM linewidth\n parameter is assumed in each dimension. This parameter if only used\n if est_params is True.\n edge : tuple of ints, optional\n Tuple to add to peak locations representing the edge of the region.\n None, the default, skips this addition.\n diag : bool, optional\n True to consider diagonal points to be touching in peak finding\n algorithm and clustering.\n c_struc : ndarray, optional\n Structure element to use when applying dilation on segments before\n applying clustering algorithm. None will apply a default square\n structure with connectivity one will be applied.\n c_ndil : int, optional\n Number of dilations to perform on segments before applying clustering\n algorithm.\n cluster : bool, optional\n True to cluster touching peaks. False does not apply clustering.\n table : bool, optional\n True to return a table. False to return lists.\n axis_names : list. optional\n List of axis names, the last n will be used for column name prefixes\n in table where n is the number of dimensions.\n\n Returns\n -------\n locations : list, returned when table is False\n Peak locations.\n cluster_ids : list, returned when table is False and cluster is True\n Cluster numbers for peaks.\n scales : list, returned when table is False and est_params is True\n Estimated peak scales (linewidths).\n amps : list, returned when table is False and est_params is True\n Estimated peak amplitudes.\n table : recarray, returned when table is True\n Table of request peak parameters.\n\n \"\"\"\n ####################\n # Check parameters #\n ####################\n ndim = len(data.shape)\n\n # check msep\n if isinstance(msep, int):\n msep = (msep, )\n if algorithm in ['thres', 'thres-fast'] and len(msep) != ndim:\n raise ValueError(\"msep has incorrect length\")\n\n # check algorithm\n if algorithm not in ['thres', 'thres-fast', 'downward', 'connected']:\n raise ValueError('Invalid algorithm %s' % (algorithm))\n\n # check lineshapes\n if est_params:\n # expand None\n if lineshapes is None:\n lineshapes = [gauss() for i in range(ndim)]\n ls_classes = []\n\n # replace strings\n for l in lineshapes:\n if isinstance(l, str):\n ls_classes.append(ls_str2class(l))\n else:\n ls_classes.append(l)\n # check that all classes have 2 parameters\n for i, ls in enumerate(ls_classes):\n if ls.nparam(10) != 2:\n s = \"Lineshape class %i does not have two parameters\"\n raise ValueError(s % (i))\n\n if len(ls_classes) != ndim:\n raise ValueError(\"Incorrect number of lineshapes\")\n\n if edge is not None and len(edge) != ndim:\n raise ValueError(\"edge has incorrect length\")\n\n #######################\n # find positive peaks #\n #######################\n if pthres is None: # no locations\n ploc = []\n pseg = []\n\n elif est_params is True: # find locations and segments\n if algorithm == 'thres':\n ploc, pseg = find_all_thres_fast(data, pthres, msep, True)\n elif algorithm == 'thres-fast':\n ploc, pseg = find_all_thres_fast(data, pthres, msep, True)\n elif algorithm == 'downward':\n ploc, pseg = find_all_downward(data, pthres, True, diag)\n elif algorithm == 'connected':\n ploc, pseg = find_all_connected(data, pthres, True, diag)\n else:\n raise ValueError('Invalid algorithm %s' % (algorithm))\n\n else: # find only locations\n if algorithm == 'thres':\n ploc = find_all_thres_fast(data, pthres, msep, False)\n elif algorithm == 'thres-fast':\n ploc = find_all_thres_fast(data, pthres, msep, False)\n elif algorithm == 'downward':\n ploc = find_all_downward(data, pthres, False, diag)\n elif algorithm == 'connected':\n ploc = find_all_connected(data, pthres, False, diag)\n else:\n raise ValueError('Invalid algorithm %s' % (algorithm))\n\n #######################\n # find negative peaks #\n #######################\n if nthres is None: # no locations\n nloc = []\n nseg = []\n\n elif est_params is True: # find locations and segments\n if algorithm == 'thres':\n nloc, nseg = find_all_nthres(data, nthres, msep, True)\n elif algorithm == 'thres-fast':\n nloc, nseg = find_all_nthres_fast(data, nthres, msep, True)\n elif algorithm == 'downward':\n nloc, nseg = find_all_upward(data, nthres, True, diag)\n elif algorithm == 'connected':\n nloc, nseg = find_all_nconnected(data, nthres, True, diag)\n else:\n raise ValueError('Invalid algorithm %s' % (algorithm))\n\n else: # find only locations\n if algorithm == 'thres':\n nloc = find_all_nthres(data, nthres, msep, False)\n elif algorithm == 'thres-fast':\n nloc = find_all_nthres_fast(data, nthres, msep, False)\n elif algorithm == 'downward':\n nloc = find_all_upward(data, nthres, False, diag)\n elif algorithm == 'connected':\n nloc = find_all_nconnected(data, nthres, False, diag)\n else:\n raise ValueError('Invalid algorithm %s' % (algorithm))\n\n # combine the positive and negative peaks\n locations = ploc + nloc\n\n #########################################################\n # return locations if no parameter estimation requested #\n #########################################################\n if est_params is False:\n if cluster: # find clusters\n cluster_ids = clusters(data, locations, pthres, nthres, c_struc,\n None, c_ndil)\n locations = add_edge(locations, edge)\n if table:\n return pack_table(locations, cluster_ids,\n axis_names=axis_names)\n else:\n return locations, cluster_ids\n else: # Do not determine clusters\n locations = add_edge(locations, edge)\n if table:\n return pack_table(locations, axis_names=axis_names)\n else:\n return locations\n\n ##################################\n # estimate scales and amplitudes #\n ##################################\n seg_slices = pseg + nseg\n scales = [[]] * len(locations)\n amps = [[]] * len(locations)\n # scales = np.zeros(np.array(locations).shape,dtype=float)\n # amps = np.zeros(len(locations),dtype=float)\n\n for i, (l, seg_slice) in enumerate(zip(locations, seg_slices)):\n null, scales[i], amps[i] = guess_params_slice(data, l, seg_slice,\n ls_classes)\n\n ########################################################\n # return locations, scales and amplitudes as requested #\n ########################################################\n if cluster:\n cluster_ids = clusters(data, locations, pthres, nthres, c_struc, None,\n c_ndil)\n locations = add_edge(locations, edge)\n if table:\n return pack_table(locations, cluster_ids, scales, amps, axis_names)\n else:\n return locations, cluster_ids, scales, amps\n else:\n locations = add_edge(locations, edge)\n if table:\n return pack_table(locations, scales=scales, amps=amps,\n axis_names=axis_names)\n else:\n return locations, scales, amps\n\n\ndef add_edge(locations, edge):\n \"\"\"\n Add edge to list of locations, returning a list of edge-added locations\n \"\"\"\n if edge is None:\n return locations\n return [tuple([i + j for i, j in zip(edge, l)]) for l in locations]\n\n\ndef clusters(data, locations, pthres, nthres, d_struc=None, l_struc=None,\n ndil=0):\n \"\"\"\n Perform cluster analysis of peak locations.\n\n Parameters\n ----------\n data : ndarray\n Array of data which has been peak picked.\n locations : list\n List of peak locations.\n pthres : float\n Positive peak threshold. None for no positive peaks.\n nthres : float\n Negative peak threshold. None for no negative peaks.\n d_struc : ndarray, optional\n Structure of binary dilation to apply on segments before clustering.\n None uses a square structure with connectivity of one.\n l_struc : ndarray, optional\n Structure to use for determining segment connectivity in clustering.\n None uses square structure with connectivity of one.\n dnil : int, optional\n Number of dilation to apply on segments before determining clusters.\n\n Returns\n -------\n cluster_ids : list\n List of cluster number corresponding to peak locations.\n\n \"\"\"\n # make a binary array of regions above/below the noise thresholds\n if pthres is None: # negative peaks only\n input = data < nthres\n elif nthres is None: # positive peaks only\n input = data > pthres\n else: # both positive and negative\n input = np.bitwise_or(data < nthres, data > pthres)\n\n # apply dialations to these segments\n if ndil != 0:\n input = ndimage.binary_dilation(input, d_struc, iterations=ndil)\n\n # label this array, these are the clusters.\n labeled_array, num_features = ndimage.label(input, l_struc)\n\n return [labeled_array[i] for i in locations]\n\n\ndef pack_table(locations, cluster_ids=None, scales=None, amps=None,\n axis_names=[\"A\", \"Z\", \"Y\", \"X\"]):\n \"\"\"\n Create a table from peak information.\n\n Parameters\n ----------\n locations : list\n List of peak locations.\n cluster_ids : list, optional\n List of cluster numbers. None will not include cluster number in the\n table.\n scales : list, optional\n List of peak scales (linewidths). None will not include peak scales in\n the table.\n amps : list, optional\n List of peak amplitudes. None will not include peak amplitudes in the\n table.\n axis_names : list, optional\n List of axis names, the last n will be used for column name prefixes\n where n is the number of dimensions.\n\n Returns\n -------\n table : recarray\n nmrglue table with column representing peak parameters. Peak locations\n are given column names like 'X_AXIS', 'Y_AXIS', etc. Cluster_ids are\n given a column name of 'cID'. Peak scales (linewidths) are given\n column names like 'X_LW','Y_LW'. Peak amplitudes are given a column\n name of 'VOL'.\n\n \"\"\"\n ndim = len(locations[0])\n anames = axis_names[-ndim:]\n\n dt = [(a + \"_AXIS\", np.float) for a in anames]\n rec = np.rec.array(locations, dtype=dt)\n\n if cluster_ids is not None:\n rec = table.append_column(rec, cluster_ids, 'cID', 'int')\n if scales is not None:\n names = [a + \"_LW\" for a in anames]\n for n, c in zip(names, np.array(scales).T):\n rec = table.append_column(rec, c, n, 'float')\n if amps is not None:\n rec = table.append_column(rec, amps, 'VOL', 'float')\n\n return rec\n\n\ndef guess_params_slice(data, location, seg_slice, ls_classes):\n \"\"\"\n Guess the parameter of a peak in a segment.\n\n Parameters\n ----------\n data : ndarray\n NMR data.\n location : tuple\n Peak locations.\n seg_slice : list of slices\n List slices which slice data to give the desired segment.\n lineshapes : list\n List of lineshape classes.\n\n Returns\n -------\n location : list\n Peak locations.\n scale : list\n Peak scales (linewidths).\n amp : list\n Peak amplitudes.\n\n \"\"\"\n # find the rectangular region around the segment\n region = data[seg_slice]\n edge = [s.start for s in seg_slice]\n rlocation = [l - s.start for l, s in zip(location, seg_slice)]\n\n # amptide is estimated by the sum of all points in region\n amp = np.sum(region)\n\n scale = [] # list of linewidths\n nlocation = [] # list of peak centers\n\n # loop over the axes\n for axis, ls in enumerate(ls_classes):\n # create the 1D lineshape\n r = extract_1d(region, rlocation, axis)\n # estimate the linewidth\n loc, sc = ls.guessp(r)\n scale.append(float(sc))\n nlocation.append(float(loc))\n\n return tuple([l + e for l, e in zip(nlocation, edge)]), tuple(scale), amp\n\n\ndef extract_1d(data, location, axis):\n \"\"\"\n Extract a 1D slice from data along axis at location\n \"\"\"\n s = [slice(v, v + 1) for v in location]\n s[axis] = slice(None, None)\n return np.atleast_1d(np.squeeze(data[tuple(s)]))\n\n\n# algorithm specific peak picking routines\ndef find_all_thres(data, thres, msep, find_segs=False):\n \"\"\"\n Peak pick a spectrum using a threshhold-minimum distance algorithm.\n\n Find peaks (local maxima) in a arbitrary dimensional NMR spectra above a\n set threshold with a minimal distance between peaks. When the spectrum is\n small and multiple copies can fit into RAM use the _fast version of this\n function. Segments are found by finding the first point in each direction\n along each dimension which is below the threshold.\n\n Parameters\n ----------\n data : ndarray\n NMR data.\n thres : float\n Threshold value for minimum peak height\n msep : tuple\n Tuple of minimum peak separations along each axis.\n find_segs : bool, optional\n True to find segments and return a list of slices which select that\n segment. False performs no segmentation discovery.\n\n Returns\n -------\n locations : list\n List of peak locations\n seg_slices : list, optional\n List of slices which extract a region around each peak. Only returned\n when find_segs is True.\n\n \"\"\"\n locations = [] # create an empty list of peak locations\n wsize = tuple([2 * i + 1 for i in msep]) # window size is 2*separation+1\n\n # loop over the windows\n for idx, s in ndwindow_index(data.shape, wsize):\n max = data[s].max()\n if max == data[idx] and max > thres:\n locations.append(idx)\n if find_segs:\n seg_slices = find_pseg_slice(data, locations, thres)\n return locations, seg_slices\n else:\n return locations\n\n\ndef find_all_nthres(data, thres, msep, find_segs=False):\n \"\"\"\n Peak pick a spectrum using a threshhold-minimum distance algorithm.\n\n Identical to find_all_thres except local minima are found below the\n given threshold. See :py:func:`find_all_thres` for a description of the\n algorithm and documentation.\n\n \"\"\"\n locations = [] # create an empty list of peak locations\n wsize = tuple([2 * i + 1 for i in msep]) # window size is 2*separation+1\n\n # loop over the windows\n for idx, s in ndwindow_index(data.shape, wsize):\n min = data[s].min()\n if min == data[idx] and min < thres:\n locations.append(idx)\n if find_segs:\n seg_slices = find_pseg_slice(data, locations, thres)\n return locations, seg_slices\n else:\n return locations\n\n\ndef find_all_thres_fast(data, thres, msep, find_segs=False):\n \"\"\"\n Fast version of find_all_thres. See :py:func:`find_all_thres`.\n \"\"\"\n wsize = tuple([2 * i + 1 for i in msep]) # window size is 2*separation+1\n\n # find local maxima mask\n mx = ndimage.maximum_filter(data, size=wsize, mode='constant') == data\n\n # find positive threshold mask\n pthres = np.ma.greater(data, thres)\n\n # peaks are bitwise and of maximum mask and threshold mask\n locations = np.transpose(np.nonzero(np.bitwise_and(pthres, mx)))\n locations = [tuple(i) for i in locations]\n\n if find_segs:\n seg_slices = [find_pseg_slice(data, l, thres) for l in locations]\n return locations, seg_slices\n else:\n return locations\n\n\ndef find_all_nthres_fast(data, thres, msep, find_segs=False):\n \"\"\"\n Fast version of find_all_nthres_fast. See :py:func:`find_all_thres`.\n \"\"\"\n wsize = tuple([2 * i + 1 for i in msep]) # window size is 2*separation+1\n\n # find local maxima mask\n mn = ndimage.minimum_filter(data, size=wsize, mode='constant') == data\n\n # find positive threshold mask\n nthres = np.ma.less(data, thres)\n\n # peaks are bitwise and of maximum mask and threshold mask\n locations = np.transpose(np.nonzero(np.bitwise_and(nthres, mn)))\n locations = [tuple(i) for i in locations]\n\n if find_segs:\n seg_slices = [find_pseg_slice(data, l, thres) for l in locations]\n return locations, seg_slices\n else:\n return locations\n\n\ndef find_pseg_slice(data, location, thres):\n \"\"\"\n Find slices which define a segment in data above thres.\n \"\"\"\n shape = data.shape\n seg_slice = []\n for dim, v in enumerate(location):\n # find start value\n al = list(location)\n start = v\n while(valid_pt(al, shape) and data[tuple(al)] > thres):\n start = start - 1\n al[dim] = start\n # find stop value\n al = list(location)\n stop = v\n while(valid_pt(al, shape) and data[tuple(al)] > thres):\n stop = stop + 1\n al[dim] = stop\n seg_slice.append(slice(start + 1, stop))\n return seg_slice\n\n\ndef find_nseg_slice(data, location, thres):\n \"\"\"\n Find slices which define a segment in data below thres.\n \"\"\"\n shape = data.shape\n seg_slice = []\n for dim, v in enumerate(location):\n # find start value\n al = list(location)\n start = v\n while(valid_pt(al, shape) and data[tuple(al)] < thres):\n start = start - 1\n al[dim] = start\n # find stop value\n al = list(location)\n stop = v\n while(valid_pt(al, shape) and data[tuple(al)] < thres):\n stop = stop + 1\n al[dim] = stop\n seg_slice.append(slice(start + 1, stop))\n return seg_slice\n"
] | [
[
"scipy.ndimage.binary_dilation",
"numpy.ma.less",
"scipy.ndimage.minimum_filter",
"numpy.rec.array",
"scipy.ndimage.maximum_filter",
"scipy.ndimage.label",
"numpy.bitwise_and",
"numpy.ma.greater",
"numpy.array",
"numpy.sum",
"numpy.bitwise_or"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
Ericsong2333/partia-flood-warning-system | [
"545a257f601535c62b3341059fdf2203e06c8e17"
] | [
"floodsystem/analysis.py"
] | [
"import matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\ndef polyfit(dates, levels, p):\r\n return np.poly1d(np.polyfit(np.array(dates)-min(dates), levels,p)),min(dates)\r\n\r\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ebagdasa/pytorch-privacy | [
"767eec77463286ff8ff29e826d7d037aa9b3530e"
] | [
"training.py"
] | [
"import json\nfrom datetime import datetime\nimport argparse\nimport torch\nfrom tensorboardX import SummaryWriter\nfrom helper import Helper\nfrom models.simple import Net, NetTF\nimport torch.nn as nn\nimport torch.optim as optim\nfrom tqdm import tqdm as tqdm\nimport yaml\nimport logging\n\nlogger = logging.getLogger(\"logger\")\nwriter = SummaryWriter()\nlayout = {'accuracy_per_class': {\n 'accuracy_per_class': ['Multiline', ['accuracy_per_class/accuracy_var',\n 'accuracy_per_class/accuracy_min',\n 'accuracy_per_class/accuracy_max']]}}\nwriter.add_custom_scalars(layout)\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\ndef plot(x, y, name):\n writer.add_scalar(tag=name, scalar_value=y, global_step=x)\n\ndef create_table(params: dict):\n header = f\"| {' | '.join([x[:10] for x in params.keys()])} |\"\n line = f\"|{'|:'.join([3*'-' for x in range(len(params.keys()))])}|\"\n values = f\"| {' | '.join([str(x) for x in params.values()])} |\"\n return '\\n'.join([header, line, values])\n\n\ndef compute_norm(model, norm_type=2):\n total_norm = 0\n for p in model.parameters():\n param_norm = p.grad.data.norm(norm_type)\n total_norm += param_norm.item() ** norm_type\n total_norm = total_norm ** (1. / norm_type)\n return total_norm\n\n\ndef test(net, epoch, name, testloader, vis=True):\n net.eval()\n correct = 0\n total = 0\n with torch.no_grad():\n for data in testloader:\n inputs, labels = data\n inputs = inputs.to(device)\n labels = labels.to(device)\n outputs = net(inputs)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\n logger.info(f'Name: {name}. Epoch {epoch}. acc: {100 * correct / total}')\n if vis:\n plot(epoch, 100 * correct / total, name)\n return 100 * correct / total\n\n\ndef train_dp(trainloader, model, optimizer, epoch):\n \"\"\"\n Differentially Private version of the training procedure\n\n :param trainloader:\n :param model:\n :param optimizer:\n :param epoch:\n :return:\n \"\"\"\n model.train()\n running_loss = 0.0\n for i, data in tqdm(enumerate(trainloader, 0), leave=True):\n inputs, labels = data\n inputs = inputs.to(device)\n labels = labels.to(device)\n optimizer.zero_grad()\n\n outputs = model(inputs)\n loss = criterion(outputs, labels)\n running_loss += torch.mean(loss).item()\n\n losses = torch.mean(loss.reshape(num_microbatches, -1), dim=1)\n saved_var = dict()\n for tensor_name, tensor in model.named_parameters():\n saved_var[tensor_name] = torch.zeros_like(tensor)\n\n for j in losses:\n j.backward(retain_graph=True)\n torch.nn.utils.clip_grad_norm_(model.parameters(), S)\n for tensor_name, tensor in model.named_parameters():\n new_grad = tensor.grad\n saved_var[tensor_name].add_(new_grad)\n model.zero_grad()\n\n for tensor_name, tensor in model.named_parameters():\n if device.type =='cuda':\n noise = torch.cuda.FloatTensor(tensor.grad.shape).normal_(0, sigma)\n else:\n noise = torch.FloatTensor(tensor.grad.shape).normal_(0, sigma)\n saved_var[tensor_name].add_(noise)\n tensor.grad = saved_var[tensor_name] / num_microbatches\n optimizer.step()\n\n if i > 0 and i % 20 == 0:\n # logger.info('[%d, %5d] loss: %.3f' %\n # (epoch + 1, i + 1, running_loss / 2000))\n plot(epoch * len(trainloader) + i, running_loss, 'Train Loss')\n running_loss = 0.0\n\n\ndef clip_grad(parameters, max_norm, norm_type=2):\n parameters = list(filter(lambda p: p.grad is not None, parameters))\n total_norm = 0\n for p in parameters:\n param_norm = p.grad.data.norm(norm_type)\n total_norm += param_norm.item() ** norm_type\n\n\ndef train(trainloader, model, optimizer, epoch):\n \"\"\"\n Normal training\n\n \"\"\"\n model.train()\n running_loss = 0.0\n for i, data in tqdm(enumerate(trainloader, 0), leave=True):\n # get the inputs\n inputs, labels = data\n inputs = inputs.to(device)\n labels = labels.to(device)\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = model(inputs)\n loss = criterion(outputs, labels)\n\n loss.backward()\n optimizer.step()\n # print statistics\n running_loss += loss.item()\n if i > 0 and i % 20 == 0:\n # logger.info('[%d, %5d] loss: %.3f' %\n # (epoch + 1, i + 1, running_loss / 2000))\n plot(epoch * len(trainloader) + i, running_loss, 'Train Loss')\n running_loss = 0.0\n\ndef count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='PPDL')\n parser.add_argument('--params', dest='params', default='utils/params.yaml')\n args = parser.parse_args()\n\n with open(args.params) as f:\n params = yaml.load(f)\n helper = Helper(current_time=datetime.now().strftime('%b.%d_%H.%M.%S'), params=params, name='utk')\n batch_size = int(helper.params['batch_size'])\n num_microbatches = int(helper.params['num_microbatches'])\n lr = float(helper.params['lr'])\n momentum = float(helper.params['momentum'])\n decay = float(helper.params['decay'])\n epochs = int(helper.params['epochs'])\n S = float(helper.params['S'])\n z = float(helper.params['z'])\n sigma = z * S\n dp = helper.params['dp']\n logger.info(f'DP: {dp}')\n\n logger.info(batch_size)\n logger.info(lr)\n logger.info(momentum)\n helper.load_data()\n helper.create_loaders()\n if helper.params['useTF']:\n net = NetTF()\n else:\n net = Net()\n print(count_parameters(net))\n net.to(device)\n\n if dp:\n criterion = nn.CrossEntropyLoss(reduction='none')\n else:\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.SGD(net.parameters(), lr=lr, momentum=momentum, weight_decay=decay)\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,\n milestones=[0.5 * epochs,\n 0.75 * epochs],\n gamma=0.1)\n table = create_table(helper.params)\n writer.add_text('Model Params', table)\n name = \"accuracy\"\n\n for epoch in range(1, epochs): # loop over the dataset multiple times\n if dp:\n train_dp(helper.train_loader, net, optimizer, epoch)\n else:\n train(helper.train_loader, net, optimizer, epoch)\n if helper.params.get('scheduler', False):\n scheduler.step()\n acc = test(net, epoch, name, helper.test_loader, vis=True)\n acc_list = list()\n\n helper.save_model(net, epoch, acc)\n"
] | [
[
"torch.optim.lr_scheduler.MultiStepLR",
"torch.mean",
"torch.nn.CrossEntropyLoss",
"torch.max",
"torch.zeros_like",
"torch.cuda.FloatTensor",
"torch.no_grad",
"torch.FloatTensor",
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wyb330/multi-speaker-tacotron-tensorflow | [
"4644d0b2bbce5c351a3f8d3af94ff7461b07a6d6"
] | [
"models/rnn_wrappers2.py"
] | [
"import numpy as np\nimport tensorflow as tf\nfrom tensorflow.contrib.rnn import RNNCell\nfrom .modules2 import prenet\n\n\nclass DecoderPrenetWrapper(RNNCell):\n '''Runs RNN inputs through a prenet before sending them to the cell.'''\n\n def __init__(self, cell, is_training):\n super(DecoderPrenetWrapper, self).__init__()\n self._cell = cell\n self._is_training = is_training\n\n @property\n def state_size(self):\n return self._cell.state_size\n\n @property\n def output_size(self):\n return self._cell.output_size\n\n def call(self, inputs, state):\n prenet_out = prenet(inputs, self._is_training, scope='decoder_prenet')\n return self._cell(prenet_out, state)\n\n def zero_state(self, batch_size, dtype):\n return self._cell.zero_state(batch_size, dtype)\n\n\nclass ConcatOutputAndAttentionWrapper(RNNCell):\n '''Concatenates RNN cell output with the attention context vector.\n\n This is expected to wrap a cell wrapped with an AttentionWrapper constructed with\n attention_layer_size=None and output_attention=False. Such a cell's state will include an\n \"attention\" field that is the context vector.\n '''\n\n def __init__(self, cell):\n super(ConcatOutputAndAttentionWrapper, self).__init__()\n self._cell = cell\n\n @property\n def state_size(self):\n return self._cell.state_size\n\n @property\n def output_size(self):\n return self._cell.output_size + self._cell.state_size.attention\n\n def call(self, inputs, state):\n output, res_state = self._cell(inputs, state)\n return tf.concat([output, res_state.attention], axis=-1), res_state\n\n def zero_state(self, batch_size, dtype):\n return self._cell.zero_state(batch_size, dtype)\n"
] | [
[
"tensorflow.concat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
}
] |
killvxk/iust_deep_fuzz | [
"32dbf8df3e436f2be7ff0e6129893fb9be4dccdf"
] | [
"test_pycharm.py"
] | [
"\nimport sys\nimport os\nimport random\nimport numpy as np\n\nimport pdf_object_extractor_2\n\nstr1= 'ali'\n# print('Hello')\n\ndef funv(arg1, arg2):\n print(arg1+arg2)\n# funv(2,3)\n\nseq1 = 'x obj yobjzobjcdoj'\n\n# print(seq1.count('obj'))\n\nsample = np.random.multinomial(1, [1/6.]*6)\nprint('sample 10',sample)\n\nsample = np.random.multinomial(1, [1/5., 1/5., 3/5.], 5)\nprint('sample 5', sample)\nx = random.random()\n\n# print(int(1e6))\n# print(int(5.95))\n\nx = 966\nx = str(x).zfill(10)\n# print(x)\n\nz = 'zekeri morteza'\n# print(z.find('mor'))\n\nsum = lambda x,y: x+y\n# print(sum(12,13))\n\n\n# print(pdf_object_extractor_2.get_xref.__doc__)\n\n\n# Test working with byte and reversing bits of bytes. System programming.\ndef reverse_all_bits_in_byte():\n x = bytes('xyz120456', encoding='ascii')\n # print('len bytes = ', len(x))\n print('x_all', x)\n print('x5', x[5])\n b = \"{0:b}\".format(x[5])\n print('x5_binary_str',b)\n breverse =''\n for i in range(len(b)):\n if b[i] == '1':\n breverse+='0'\n else:\n breverse += '1'\n\n print(breverse)\n breverse2 = int(breverse, 2)\n # breverse = bytes(breverse)\n # breverse2 = bytes([breverse2])\n breverse2 = breverse2.to_bytes(1, 'little')\n print('breverse2',breverse2)\n x = x[:5]+ breverse2 +x[6:]\n print('x5_rev=', x[5])\n print('x_all_new=', x)\n\n\nsys.stdout.write('Hello dolly!')\nprint()\nsys.stdout.write('\\nHello dolly2!')\n\nstring_obj = 'abcdefgendobj'\n\nprint(string_obj.endswith('endobj'))\n\nfor i in range(10):\n print(random.randint(0,9))\n"
] | [
[
"numpy.random.multinomial"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pdn4kd/isochoric-expander | [
"56bfdc3c7efb3a242ff4ae4c556d70bb7f171e5f",
"56bfdc3c7efb3a242ff4ae4c556d70bb7f171e5f"
] | [
"planetsen.py",
"planetsfp.py"
] | [
"'''Attempts to generate configuration files for a given list of stars and their associated planets.'''\nimport numpy as np\nimport datetime\n\n#initializing\nstars = np.genfromtxt(\"planets.csv\", delimiter=',', names=True, dtype=(\"U23\", \"U9\", int, float, float, float, float, float, float, float, float, float, float))\nstar_name = stars[0][\"HIPnumber\"]\nmax_period = 3654.0\nfile_postfix = \"en.py\"\n\nfor i in np.arange(1,len(stars)):\n\tif (star_name != stars[i][\"HIPnumber\"]):\n\t\tplanets = stars[i-1][\"PlanetNumber\"]\n\t\t#start a new file\n\t\tnow = str((datetime.datetime.now()).isoformat())\n\t\tstar_config = open(star_name+file_postfix, 'w')\n\t\tstar_config.write(\"# Test Keplerian fit configuration file for \"+star_name+\"\\n\")\n\t\tstar_config.write(\"# Features: Short period (t<\"+str(max_period)+\" days) planets fit via period, tc, k; e fixed; trends fit\\n\")\n\t\tstar_config.write(\"# Generated on \"+now+\"\\n\\n\")\n\t\tstar_config.write(\"import os\\n\")\n\t\tstar_config.write(\"import pandas as pd\\n\")\n\t\tstar_config.write(\"import numpy as np\\n\")\n\t\tstar_config.write(\"import radvel\\n\")\n\t\tstar_config.write(\"starname = '\"+star_name+\"'\\n\")\n\t\tstar_config.write(\"instnames = [\\'NEID\\']\\n\")\n\t\tstar_config.write(\"ntels = len(instnames)\\n\")\n\t\tstar_config.write(\"fitting_basis = \\'per tc e w k\\'\\n\")\n\t\tstar_config.write(\"bjd0 = 2458850.\\n\")\n\t\tstar_config.write(\"planet_letters = {\")\n\t\tshort_planets = 0\n\t\tplanet_names = \"bcdefghijk\"\n\t\tfor\tj in np.arange(0, planets):\n\t\t\tif (stars[\"per\"][i-planets+j] < max_period):\n\t\t\t\tshort_planets += 1\n\t\t\t\tif (short_planets > 1):\n\t\t\t\t\tstar_config.write(\", \")\n\t\t\t\tstar_config.write(str(short_planets)+\": '\"+planet_names[j]+\"'\")\n\t\tstar_config.write(\"}\\n\\n\")\n\t\tstar_config.write(\"nplanets = \"+str(short_planets)+\" # out of \"+str(planets)+\"\\n\")\n\t\tstar_config.write(\"anybasis_params = radvel.Parameters(nplanets,basis='per tp e w k')\\n\\n\")\n\t\tstar_config.write(\"anybasis_params['dvdt'] = radvel.Parameter(value=0.0)\\n\")\n\t\tstar_config.write(\"anybasis_params['curv'] = radvel.Parameter(value=0.0)\\n\")\n\t\tstar_config.write(\"anybasis_params['gamma_NEID'] = radvel.Parameter(value=0.0)\\n\")\n\t\tstar_config.write(\"anybasis_params['jit_NEID'] = radvel.Parameter(value=0.0)\\n\")\n\t\t#loop to add planets (before)\n\t\tshort_planets = 0\n\t\tfor x in np.arange(0, planets):\n\t\t\tif (stars[\"per\"][i-planets+x] < max_period):\n\t\t\t\tshort_planets += 1\n\t\t\t\tstar_config.write(\"anybasis_params['per\"+str(short_planets)+\"'] = radvel.Parameter(value=\"+str(stars[i-planets+x][\"per\"])+\")\\n\")\n\t\t\t\tstar_config.write(\"anybasis_params['tp\"+str(short_planets)+\"'] = radvel.Parameter(value=\"+str(stars[i-planets+x][\"t_p\"])+\")\\n\")\n\t\t\t\tstar_config.write(\"anybasis_params['e\"+str(short_planets)+\"'] = radvel.Parameter(value=\"+str(stars[i-planets+x][\"e\"])+\")\\n\")\n\t\t\t\tstar_config.write(\"anybasis_params['w\"+str(short_planets)+\"'] = radvel.Parameter(value=\"+str(stars[i-planets+x][\"omega\"])+\")\\n\")\n\t\t\t\tstar_config.write(\"anybasis_params['k\"+str(short_planets)+\"'] = radvel.Parameter(value=\"+str(stars[i-planets+x][\"K\"])+\")\\n\")\n\t\tstar_config.write(\"\\nparams = anybasis_params.basis.to_any_basis(anybasis_params,fitting_basis)\\n\\n\")\n\t\t#loop to add planets (after)\n\t\tshort_planets = 0\n\t\tfor x in np.arange(0, planets):\n\t\t\tif (stars[\"per\"][i-planets+x] < max_period):\n\t\t\t\tshort_planets += 1\n\t\t\t\tstar_config.write(\"params['per\"+str(short_planets)+\"'].vary = True\\n\")\n\t\t\t\tstar_config.write(\"params['tc\"+str(short_planets)+\"'].vary = True\\n\")\n\t\t\t\tstar_config.write(\"params['e\"+str(short_planets)+\"'].vary = False\\n\")\n\t\t\t\tstar_config.write(\"params['w\"+str(short_planets)+\"'].vary = False\\n\")\n\t\t\t\tstar_config.write(\"params['k\"+str(short_planets)+\"'].vary = True\\n\")\n\t\t#write end of file\n\t\tstar_config.write(\"params['dvdt'].vary = True\\n\")\n\t\tstar_config.write(\"params['curv'].vary = True\\n\")\n\t\tstar_config.write(\"params['gamma_NEID'].vary = True\\n\")\n\t\tstar_config.write(\"params['jit_NEID'].vary = False\\n\\n\")\n\t\tstar_config.write(\"path = '\"+str(stars['RVfilename'][i-1])+\"'\\n\")\n\t\tstar_config.write(\"data = pd.read_csv(path)\\n\")\n\t\tstar_config.write(\"data['time'] = (data.obs_start+data.obs_end)/2\\n\")\n\t\tstar_config.write(\"data['mnvel'] = data.mnvel\\n\")\n\t\tstar_config.write(\"data['errvel'] = data.rvprec\\n\")\n\t\tstar_config.write(\"data['tel'] = 'NEID'\\n\\n\")\n\t\tstar_config.write(\"priors = [\\n\")\n\t\tstar_config.write(\" radvel.prior.EccentricityPrior( nplanets, ),\\n\")\n\t\tstar_config.write(\" radvel.prior.PositiveKPrior( nplanets ),\\n\")\n\t\tstar_config.write(\" radvel.prior.HardBounds('jit_NEID', 0.0, 15.0)\\n\")\n\t\tstar_config.write(\"]\\n\\n\")\n\t\tstar_config.write(\"time_base = np.mean([np.min(data.time), np.max(data.time)])\\n\")\n\t\tstar_config.close()\n\t\tstar_name = stars[i][\"HIPnumber\"]\n# We missed the last star\nstar_name = stars[i][\"HIPnumber\"]\nplanets = stars[i][\"PlanetNumber\"]\n#start a new file\nnow = str((datetime.datetime.now()).isoformat())\nstar_name = stars[i][\"HIPnumber\"]\nstar_config = open(star_name+file_postfix, 'w')\nstar_config.write(\"# Test Keplerian fit configuration file for \"+star_name+\"\\n\")\nstar_config.write(\"# Features: Short period (t<\"+str(max_period)+\" days) planets fit via period, tc, k; e fixed; trends fit\\n\")\nstar_config.write(\"# Generated on \"+now+\"\\n\\n\")\nstar_config.write(\"import os\\n\")\nstar_config.write(\"import pandas as pd\\n\")\nstar_config.write(\"import numpy as np\\n\")\nstar_config.write(\"import radvel\\n\")\nstar_config.write(\"starname = '\"+star_name+\"'\\n\")\nstar_config.write(\"instnames = [\\'NEID\\']\\n\")\nstar_config.write(\"ntels = len(instnames)\\n\")\nstar_config.write(\"fitting_basis = \\'per tc e w k\\'\\n\")\nstar_config.write(\"bjd0 = 2458850.\\n\")\nstar_config.write(\"planet_letters = {\")\nshort_planets = 0\nplanet_names = \"bcdefghijk\"\nfor\tj in np.arange(1, planets+1):\n\tif (stars[\"per\"][i-planets+j] < max_period):\n\t\tshort_planets += 1\n\t\tif (short_planets > 1):\n\t\t\tstar_config.write(\", \")\n\t\tstar_config.write(str(short_planets)+\": '\"+planet_names[j]+\"'\")\nstar_config.write(\"}\\n\\n\")\nstar_config.write(\"nplanets = \"+str(short_planets)+\" # out of \"+str(planets)+\"\\n\")\nstar_config.write(\"anybasis_params = radvel.Parameters(nplanets,basis='per tp e w k')\\n\\n\")\nstar_config.write(\"anybasis_params['dvdt'] = radvel.Parameter(value=0.0)\\n\")\nstar_config.write(\"anybasis_params['curv'] = radvel.Parameter(value=0.0)\\n\")\nstar_config.write(\"anybasis_params['gamma_NEID'] = radvel.Parameter(value=0.0)\\n\")\nstar_config.write(\"anybasis_params['jit_NEID'] = radvel.Parameter(value=0.0)\\n\")\n#loop to add planets (before)\nshort_planets = 0\nfor x in np.arange(1, planets+1):\n\tif (stars[\"per\"][i-planets+x] < max_period):\n\t\tshort_planets += 1\n\t\tstar_config.write(\"anybasis_params['per\"+str(short_planets)+\"'] = radvel.Parameter(value=\"+str(stars[i-planets+x][\"per\"])+\")\\n\")\n\t\tstar_config.write(\"anybasis_params['tp\"+str(short_planets)+\"'] = radvel.Parameter(value=\"+str(stars[i-planets+x][\"t_p\"])+\")\\n\")\n\t\tstar_config.write(\"anybasis_params['e\"+str(short_planets)+\"'] = radvel.Parameter(value=\"+str(stars[i-planets+x][\"e\"])+\")\\n\")\n\t\tstar_config.write(\"anybasis_params['w\"+str(short_planets)+\"'] = radvel.Parameter(value=\"+str(stars[i-planets+x][\"omega\"])+\")\\n\")\n\t\tstar_config.write(\"anybasis_params['k\"+str(short_planets)+\"'] = radvel.Parameter(value=\"+str(stars[i-planets+x][\"K\"])+\")\\n\")\nstar_config.write(\"params = anybasis_params.basis.to_any_basis(anybasis_params,fitting_basis)\\n\\n\")\n#loop to add planets (after)\nshort_planets = 0\nfor x in np.arange(1, planets+1):\n\tif (stars[\"per\"][i-planets+x] < max_period):\n\t\tshort_planets += 1\n\t\tstar_config.write(\"params['per\"+str(short_planets)+\"'].vary = True\\n\")\n\t\tstar_config.write(\"params['tc\"+str(short_planets)+\"'].vary = True\\n\")\n\t\tstar_config.write(\"params['e\"+str(short_planets)+\"'].vary = False\\n\")\n\t\tstar_config.write(\"params['w\"+str(short_planets)+\"'].vary = False\\n\")\n\t\tstar_config.write(\"params['k\"+str(short_planets)+\"'].vary = True\\n\")\n#write end of file\nstar_config.write(\"params['dvdt'].vary = True\\n\")\nstar_config.write(\"params['curv'].vary = True\\n\")\nstar_config.write(\"params['gamma_NEID'].vary = True\\n\")\nstar_config.write(\"params['jit_NEID'].vary = False\\n\\n\")\nstar_config.write(\"path = '\"+str(stars['RVfilename'][i])+\"'\\n\")\nstar_config.write(\"data = pd.read_csv(path)\\n\")\nstar_config.write(\"data['time'] = data.obs_start\\n\")\nstar_config.write(\"data['mnvel'] = data.mnvel\\n\")\nstar_config.write(\"data['errvel'] = data.rvprec\\n\")\nstar_config.write(\"data['tel'] = 'NEID'\\n\\n\")\nstar_config.write(\"priors = [\\n\")\nstar_config.write(\" radvel.prior.EccentricityPrior( nplanets, ),\\n\")\nstar_config.write(\" radvel.prior.PositiveKPrior( nplanets ),\\n\")\nstar_config.write(\" radvel.prior.HardBounds('jit_NEID', 0.0, 15.0)\\n\")\nstar_config.write(\"]\\n\\n\")\nstar_config.write(\"time_base = np.mean([np.min(data.time), np.max(data.time)])\\n\")\nstar_config.close()\n",
"'''Attempts to generate configuration files for a given list of stars and their associated planets.'''\nimport numpy as np\nimport datetime\n\n#initializing\nstars = np.genfromtxt(\"planets.csv\", delimiter=',', names=True, dtype=(\"U23\", \"U9\", int, float, float, float, float, float, float, float, float, float, float))\nstar_name = stars[0][\"HIPnumber\"]\n\nfor i in np.arange(1,len(stars)):\n\tif (star_name != stars[i][\"HIPnumber\"]):\n\t\tplanets = stars[i-1][\"PlanetNumber\"]\n\t\t#start a new file\n\t\tnow = str((datetime.datetime.now()).isoformat())\n\t\tstar_config = open(star_name+\"fp.py\", 'w')\n\t\tstar_config.write(\"# Test Keplerian fit configuration file for \"+star_name+\"\\n\")\n\t\tstar_config.write(\"# Features: All planets fit via period, tc, k; actual values for e, tc used (e can vary)\\n\")\n\t\tstar_config.write(\"# Generated on \"+now+\"\\n\\n\")\n\t\tstar_config.write(\"import os\\n\")\n\t\tstar_config.write(\"import pandas as pd\\n\")\n\t\tstar_config.write(\"import numpy as np\\n\")\n\t\tstar_config.write(\"import radvel\\n\")\n\t\tstar_config.write(\"starname = '\"+star_name+\"'\\n\")\n\t\tstar_config.write(\"nplanets = \"+str(planets)+\"\\n\")\n\t\tstar_config.write(\"instnames = [\\'NEID\\']\\n\")\n\t\tstar_config.write(\"ntels = len(instnames)\\n\")\n\t\tstar_config.write(\"fitting_basis = \\'per tc e w k\\'\\n\")\n\t\tstar_config.write(\"bjd0 = 2458850.\\n\")\n\t\tstar_config.write(\"planet_letters = {1: 'b'\")\n\t\tif (planets >= 2):\n\t\t\tstar_config.write(\", 2: 'c'\")\n\t\tif (planets >= 3):\n\t\t\tstar_config.write(\", 3: 'd'\")\n\t\tif (planets >= 4):\n\t\t\tstar_config.write(\", 4: 'e'\")\n\t\tif (planets >= 5):\n\t\t\tstar_config.write(\", 5: 'f'\")\n\t\tif (planets >= 6):\n\t\t\tstar_config.write(\", 6: 'g'\")\n\t\tif (planets >= 7):\n\t\t\tstar_config.write(\", 7: 'h'\")\n\t\tif (planets >= 8):\n\t\t\tstar_config.write(\", 8: 'i'\")\n\t\tif (planets >= 9):\n\t\t\tstar_config.write(\", 9: 'j'\")\n\t\tif (planets >= 10):\n\t\t\tstar_config.write(\", 10: 'k'\")\n\t\tstar_config.write(\"}\\n\\n\")\n\t\tstar_config.write(\"anybasis_params = radvel.Parameters(nplanets,basis='per tp e w k')\\n\\n\")\n\t\tstar_config.write(\"anybasis_params['dvdt'] = radvel.Parameter(value=0.0)\\n\")\n\t\tstar_config.write(\"anybasis_params['curv'] = radvel.Parameter(value=0.0)\\n\")\n\t\tstar_config.write(\"anybasis_params['gamma_NEID'] = radvel.Parameter(value=0.0)\\n\")\n\t\tstar_config.write(\"anybasis_params['jit_NEID'] = radvel.Parameter(value=0.0)\\n\")\n\t\t#loop to add planets (before)\n\t\tfor x in np.arange(1, planets+1):\n\t\t\tstar_config.write(\"anybasis_params['per\"+str(stars[\"PlanetNumber\"][i-x])+\"'] = radvel.Parameter(value=\"+str(stars[i-x][\"per\"])+\")\\n\")\n\t\t\tstar_config.write(\"anybasis_params['tp\"+str(stars[\"PlanetNumber\"][i-x])+\"'] = radvel.Parameter(value=\"+str(stars[i-x][\"t_p\"])+\")\\n\")\n\t\t\tstar_config.write(\"anybasis_params['e\"+str(stars[\"PlanetNumber\"][i-x])+\"'] = radvel.Parameter(value=\"+str(stars[i-x][\"e\"])+\")\\n\")\n\t\t\tstar_config.write(\"anybasis_params['w\"+str(stars[\"PlanetNumber\"][i-x])+\"'] = radvel.Parameter(value=\"+str(stars[i-x][\"omega\"])+\")\\n\")\n\t\t\tstar_config.write(\"anybasis_params['k\"+str(stars[\"PlanetNumber\"][i-x])+\"'] = radvel.Parameter(value=\"+str(stars[i-x][\"K\"])+\")\\n\")\n\t\tstar_config.write(\"\\nparams = anybasis_params.basis.to_any_basis(anybasis_params,fitting_basis)\\n\\n\")\n\t\t#loop to add planets (after)\n\t\tfor x in np.arange(1, planets+1):\n\t\t\tstar_config.write(\"params['per\"+str(stars[\"PlanetNumber\"][i-x])+\"'].vary = True\\n\")\n\t\t\tstar_config.write(\"params['tc\"+str(stars[\"PlanetNumber\"][i-x])+\"'].vary = True\\n\")\n\t\t\tstar_config.write(\"params['e\"+str(stars[\"PlanetNumber\"][i-x])+\"'].vary = True\\n\")\n\t\t\tstar_config.write(\"params['w\"+str(stars[\"PlanetNumber\"][i-x])+\"'].vary = False\\n\")\n\t\t\tstar_config.write(\"params['k\"+str(stars[\"PlanetNumber\"][i-x])+\"'].vary = True\\n\")\n\t\t#write end of file\n\t\tstar_config.write(\"params['dvdt'].vary = False\\n\")\n\t\tstar_config.write(\"params['curv'].vary = False\\n\")\n\t\tstar_config.write(\"params['gamma_NEID'].vary = False\\n\")\n\t\tstar_config.write(\"params['jit_NEID'].vary = False\\n\\n\")\n\t\tstar_config.write(\"path = '\"+str(stars['RVfilename'][i-1])+\"'\\n\")\n\t\tstar_config.write(\"data = pd.read_csv(path)\\n\")\n\t\tstar_config.write(\"data['time'] = (data.obs_start+data.obs_end)/2\\n\")\n\t\tstar_config.write(\"data['mnvel'] = data.mnvel\\n\")\n\t\tstar_config.write(\"data['errvel'] = data.rvprec\\n\")\n\t\tstar_config.write(\"data['tel'] = 'NEID'\\n\\n\")\n\t\tstar_config.write(\"priors = [\\n\")\n\t\tstar_config.write(\" radvel.prior.EccentricityPrior( nplanets, upperlims=0.99),\\n\")\n\t\tstar_config.write(\" radvel.prior.PositiveKPrior( nplanets ),\\n\")\n\t\tstar_config.write(\" radvel.prior.HardBounds('jit_NEID', 0.0, 15.0)\\n\")\n\t\tstar_config.write(\"]\\n\\n\")\n\t\tstar_config.write(\"time_base = np.mean([np.min(data.time), np.max(data.time)])\\n\")\n\t\tstar_config.close()\n\t\tstar_name = stars[i][\"HIPnumber\"]\nstar_name = stars[i][\"HIPnumber\"]\nplanets = stars[i][\"PlanetNumber\"]\n#start a new file\nnow = str((datetime.datetime.now()).isoformat())\nstar_name = stars[i][\"HIPnumber\"]\nstar_config = open(star_name+\"fp.py\", 'w')\nstar_config.write(\"# Test Keplerian fit configuration file for \"+star_name+\"\\n\")\nstar_config.write(\"# Features: All planets fit via period, tc, k; actual values for e, tc used (e can vary)\\n\")\nstar_config.write(\"# Generated on \"+now+\"\\n\\n\")\nstar_config.write(\"import os\\n\")\nstar_config.write(\"import pandas as pd\\n\")\nstar_config.write(\"import numpy as np\\n\")\nstar_config.write(\"import radvel\\n\")\nstar_config.write(\"starname = '\"+star_name+\"'\\n\")\nstar_config.write(\"nplanets = \"+str(planets)+\"\\n\")\nstar_config.write(\"instnames = [\\'NEID\\']\\n\")\nstar_config.write(\"ntels = len(instnames)\\n\")\nstar_config.write(\"fitting_basis = \\'per tc e w k\\'\\n\")\nstar_config.write(\"bjd0 = 2458850.\\n\")\nstar_config.write(\"planet_letters = {1: 'b'\")\nif (planets >= 2):\n\tstar_config.write(\", 2: 'c'\")\nif (planets >= 3):\n\tstar_config.write(\", 3: 'd'\")\nif (planets >= 4):\n\tstar_config.write(\", 4: 'e'\")\nif (planets >= 5):\n\tstar_config.write(\", 5: 'f'\")\nif (planets >= 6):\n\tstar_config.write(\", 6: 'g'\")\nif (planets >= 7):\n\tstar_config.write(\", 7: 'h'\")\nif (planets >= 8):\n\tstar_config.write(\", 8: 'i'\")\nif (planets >= 9):\n\tstar_config.write(\", 9: 'j'\")\nif (planets >= 10):\n\tstar_config.write(\", 10: 'k'\")\nstar_config.write(\"}\\n\\n\")\nstar_config.write(\"anybasis_params = radvel.Parameters(nplanets,basis='per tp e w k')\\n\\n\")\nstar_config.write(\"anybasis_params['dvdt'] = radvel.Parameter(value=0.0)\\n\")\nstar_config.write(\"anybasis_params['curv'] = radvel.Parameter(value=0.0)\\n\")\nstar_config.write(\"anybasis_params['gamma_NEID'] = radvel.Parameter(value=0.0)\\n\")\nstar_config.write(\"anybasis_params['jit_NEID'] = radvel.Parameter(value=0.0)\\n\")\n#loop to add planets (before)\nfor x in np.arange(0, planets):\n\tstar_config.write(\"anybasis_params['per\"+str(stars[\"PlanetNumber\"][i-x])+\"'] = radvel.Parameter(value=\"+str(stars[i-x][\"per\"])+\")\\n\")\n\tstar_config.write(\"anybasis_params['tp\"+str(stars[\"PlanetNumber\"][i-x])+\"'] = radvel.Parameter(value=\"+str(stars[i-x][\"t_p\"])+\")\\n\")\n\tstar_config.write(\"anybasis_params['e\"+str(stars[\"PlanetNumber\"][i-x])+\"'] = radvel.Parameter(value=\"+str(stars[i-x][\"e\"])+\")\\n\")\n\tstar_config.write(\"anybasis_params['w\"+str(stars[\"PlanetNumber\"][i-x])+\"'] = radvel.Parameter(value=\"+str(stars[i-x][\"omega\"])+\")\\n\")\n\tstar_config.write(\"anybasis_params['k\"+str(stars[\"PlanetNumber\"][i-x])+\"'] = radvel.Parameter(value=\"+str(stars[i-x][\"K\"])+\")\\n\")\nstar_config.write(\"params = anybasis_params.basis.to_any_basis(anybasis_params,fitting_basis)\\n\\n\")\n#loop to add planets (after)\nfor x in np.arange(0, planets):\n\tstar_config.write(\"params['per\"+str(stars[\"PlanetNumber\"][i-x])+\"'].vary = True\\n\")\n\tstar_config.write(\"params['tc\"+str(stars[\"PlanetNumber\"][i-x])+\"'].vary = True\\n\")\n\tstar_config.write(\"params['e\"+str(stars[\"PlanetNumber\"][i-x])+\"'].vary = True\\n\")\n\tstar_config.write(\"params['w\"+str(stars[\"PlanetNumber\"][i-x])+\"'].vary = False\\n\")\n\tstar_config.write(\"params['k\"+str(stars[\"PlanetNumber\"][i-x])+\"'].vary = True\\n\")\n#write end of file\nstar_config.write(\"params['dvdt'].vary = False\\n\")\nstar_config.write(\"params['curv'].vary = False\\n\")\nstar_config.write(\"params['gamma_NEID'].vary = False\\n\")\nstar_config.write(\"params['jit_NEID'].vary = False\\n\\n\")\nstar_config.write(\"path = '\"+str(stars['RVfilename'][i])+\"'\\n\")\nstar_config.write(\"data = pd.read_csv(path)\\n\")\nstar_config.write(\"data['time'] = data.obs_start\\n\")\nstar_config.write(\"data['mnvel'] = data.mnvel\\n\")\nstar_config.write(\"data['errvel'] = data.rvprec\\n\")\nstar_config.write(\"data['tel'] = 'NEID'\\n\\n\")\nstar_config.write(\"priors = [\\n\")\nstar_config.write(\" radvel.prior.EccentricityPrior( nplanets, upperlims=0.99),\\n\")\nstar_config.write(\" radvel.prior.PositiveKPrior( nplanets ),\\n\")\nstar_config.write(\" radvel.prior.HardBounds('jit_NEID', 0.0, 15.0)\\n\")\nstar_config.write(\"]\\n\\n\")\nstar_config.write(\"time_base = np.mean([np.min(data.time), np.max(data.time)])\\n\")\nstar_config.close()\n"
] | [
[
"numpy.arange",
"numpy.genfromtxt"
],
[
"numpy.arange",
"numpy.genfromtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hanlint/composer | [
"83d96b7efde533cbc2fff7dd7e0769da2b177807"
] | [
"composer/algorithms/mixup/mixup.py"
] | [
"# Copyright 2021 MosaicML. All Rights Reserved.\n\n\"\"\"Core MixUp classes and functions.\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\nfrom typing import Optional, Tuple\n\nimport numpy as np\nimport torch\n\nfrom composer.core import Algorithm, Event, State\nfrom composer.loggers import Logger\nfrom composer.loss.utils import ensure_targets_one_hot\n\nlog = logging.getLogger(__name__)\n\n__all__ = [\"MixUp\", \"mixup_batch\"]\n\n\ndef mixup_batch(input: torch.Tensor,\n target: torch.Tensor,\n mixing: Optional[float] = None,\n alpha: float = 0.2,\n indices: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor, torch.Tensor, float]:\n \"\"\"Create new samples using convex combinations of pairs of samples.\n\n This is done by taking a convex combination of ``input`` with a randomly\n permuted copy of ``input``. The permutation takes place along the sample\n axis (dim 0).\n\n The relative weight of the original ``input`` versus the permuted copy is\n defined by the ``mixing`` parameter. This parameter should be chosen\n from a ``Beta(alpha, alpha)`` distribution for some parameter ``alpha > 0``.\n Note that the same ``mixing`` is used for the whole batch.\n\n Args:\n input (torch.Tensor): input tensor of shape ``(minibatch, ...)``, where\n ``...`` indicates zero or more dimensions.\n target (torch.Tensor): target tensor of shape ``(minibatch, ...)``, where\n ``...`` indicates zero or more dimensions.\n mixing (float, optional): coefficient used to interpolate\n between the two examples. If provided, must be in :math:`[0, 1]`.\n If ``None``, value is drawn from a ``Beta(alpha, alpha)``\n distribution. Default: ``None``.\n alpha (float, optional): parameter for the Beta distribution over\n ``mixing``. Ignored if ``mixing`` is provided. Default: ``0.2``.\n indices (Tensor, optional): Permutation of the samples to use.\n Default: ``None``.\n\n Returns:\n input_mixed (torch.Tensor): batch of inputs after mixup has been applied\n target_perm (torch.Tensor): The labels of the mixed-in examples\n mixing (torch.Tensor): the amount of mixing used\n\n Example:\n .. testcode::\n\n import torch\n from composer.functional import mixup_batch\n\n N, C, H, W = 2, 3, 4, 5\n X = torch.randn(N, C, H, W)\n y = torch.randint(num_classes, size=(N,))\n X_mixed, y_perm, mixing = mixup_batch(\n X, y, alpha=0.2)\n \"\"\"\n if mixing is None:\n mixing = _gen_mixing_coef(alpha)\n # Create permuted versions of x and y in preparation for interpolation\n # Use given indices if there are any.\n if indices is None:\n permuted_idx = _gen_indices(input.shape[0])\n else:\n permuted_idx = indices\n x_permuted = input[permuted_idx]\n permuted_target = target[permuted_idx]\n # Interpolate between the inputs\n x_mixup = (1 - mixing) * input + mixing * x_permuted\n\n return x_mixup, permuted_target, mixing\n\n\nclass MixUp(Algorithm):\n \"\"\"`MixUp <https://arxiv.org/abs/1710.09412>`_ trains the network on convex combinations of pairs of examples and\n targets rather than individual examples and targets.\n\n This is done by taking a convex combination of a given batch X with a\n randomly permuted copy of X. The mixing coefficient is drawn from a\n ``Beta(alpha, alpha)`` distribution.\n\n Training in this fashion sometimes reduces generalization error.\n\n Args:\n alpha (float, optional): the psuedocount for the Beta distribution used to sample\n mixing parameters. As ``alpha`` grows, the two samples\n in each pair tend to be weighted more equally. As ``alpha``\n approaches 0 from above, the combination approaches only using\n one element of the pair. Default: ``0.2``.\n interpolate_loss (bool, optional): Interpolates the loss rather than the labels.\n A useful trick when using a cross entropy loss. Will produce incorrect behavior if the loss is not a linear\n function of the targets. Default: ``False``\n\n Example:\n .. testcode::\n\n from composer.algorithms import MixUp\n algorithm = MixUp(alpha=0.2)\n trainer = Trainer(\n model=model,\n train_dataloader=train_dataloader,\n eval_dataloader=eval_dataloader,\n max_duration=\"1ep\",\n algorithms=[algorithm],\n optimizers=[optimizer]\n )\n \"\"\"\n\n def __init__(self, alpha: float = 0.2, interpolate_loss: bool = False):\n self.alpha = alpha\n self.interpolate_loss = interpolate_loss\n self.mixing = 0.0\n self.indices = torch.Tensor()\n self.permuted_target = torch.Tensor()\n\n def match(self, event: Event, state: State) -> bool:\n if self.interpolate_loss:\n return event in [Event.BEFORE_FORWARD, Event.BEFORE_BACKWARD]\n else:\n return event in [Event.BEFORE_FORWARD, Event.BEFORE_LOSS]\n\n def apply(self, event: Event, state: State, logger: Logger) -> None:\n input, target = state.batch_pair\n\n if event == Event.BEFORE_FORWARD:\n if not isinstance(input, torch.Tensor):\n raise NotImplementedError(\"Multiple tensors for inputs not supported yet.\")\n if not isinstance(target, torch.Tensor):\n raise NotImplementedError(\"Multiple tensors for targets not supported yet.\")\n\n self.mixing = _gen_mixing_coef(self.alpha)\n self.indices = _gen_indices(input.shape[0])\n\n new_input, self.permuted_target, _ = mixup_batch(\n input,\n target,\n mixing=self.mixing,\n indices=self.indices,\n )\n\n state.batch = (new_input, target)\n\n if not self.interpolate_loss and event == Event.BEFORE_LOSS:\n # Interpolate the targets\n if not isinstance(state.outputs, torch.Tensor):\n raise NotImplementedError(\"Multiple output tensors not supported yet\")\n if not isinstance(target, torch.Tensor):\n raise NotImplementedError(\"Multiple target tensors not supported yet\")\n # Make sure that the targets are dense/one-hot\n target = ensure_targets_one_hot(state.outputs, target)\n permuted_target = ensure_targets_one_hot(state.outputs, self.permuted_target)\n # Interpolate to get the new target\n mixed_up_target = (1 - self.mixing) * target + self.mixing * permuted_target\n # Create the new batch\n state.batch = (input, mixed_up_target)\n\n if self.interpolate_loss and event == Event.BEFORE_BACKWARD:\n # Grab the loss function\n if hasattr(state.model, \"loss\"):\n loss_fn = state.model.loss\n elif hasattr(state.model, \"module\") and hasattr(state.model.module, \"loss\"):\n if isinstance(state.model.module, torch.nn.Module):\n loss_fn = state.model.module.loss\n else:\n raise TypeError(\"state.model.module must be a torch module\")\n else:\n raise AttributeError(\"Loss must be accesable via model.loss or model.module.loss\")\n # Verify that the loss is callable\n if not callable(loss_fn):\n raise TypeError(\"Loss must be callable\")\n # Interpolate the loss\n new_loss = loss_fn(state.outputs, (input, self.permuted_target))\n if not isinstance(state.loss, torch.Tensor):\n raise NotImplementedError(\"Multiple losses not supported yet\")\n if not isinstance(new_loss, torch.Tensor):\n raise NotImplementedError(\"Multiple losses not supported yet\")\n state.loss = (1 - self.mixing) * state.loss + self.mixing * new_loss\n\n\ndef _gen_mixing_coef(alpha: float) -> float:\n \"\"\"Samples ``max(z, 1-z), z ~ Beta(alpha, alpha)``.\"\"\"\n # First check if alpha is positive.\n assert alpha >= 0\n # Draw the mixing parameter from a beta distribution.\n # Check here is needed because beta distribution requires alpha > 0\n # but alpha = 0 is fine for mixup.\n if alpha == 0:\n mixing_lambda = 0\n else:\n mixing_lambda = np.random.beta(alpha, alpha)\n # for symmetric beta distribution, can always use 0 <= lambda <= .5;\n # this way the \"main\" label is always the original one, which keeps\n # the training accuracy meaningful\n return min(mixing_lambda, 1. - mixing_lambda)\n\n\ndef _gen_indices(num_samples: int) -> torch.Tensor:\n \"\"\"Generates a random permutation of the batch indices.\"\"\"\n return torch.randperm(num_samples)\n"
] | [
[
"torch.randperm",
"numpy.random.beta",
"torch.Tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Erotemic/utool | [
"9fbbceefed71ab4b38ab806b998fefc9b873f205"
] | [
"utool/Preferences.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nFIXME:\n This class is very old, convoluted, and coupled.\n It really needs to be rewritten efficiently.\n the __setattr__ __getattr__ stuff needs to be redone, and\n DynStruct probably needs to go away.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport six\nfrom six.moves import cPickle as pickle\ntry:\n import numpy as np\nexcept ImportError as ex:\n pass\nfrom utool import DynamicStruct\nfrom utool import util_dbg\nfrom utool import util_arg\nfrom utool import util_type\nfrom utool import util_inject\n# print, rrr, profile = util_inject.inject(__name__, '[pref]')\nutil_inject.noinject(__name__, '[pref]')\n\n# ---\n# GLOBALS\n# ---\nPrefNode = DynamicStruct.DynStruct\n\n\nVERBOSE_PREF = util_arg.get_argflag('--verbpref')\n\n\n# ---\n# Classes\n# ---\nclass PrefInternal(DynamicStruct.DynStruct):\n def __init__(_intern, name, doc, default, hidden, fpath, depeq, choices):\n super(PrefInternal, _intern).__init__(child_exclude_list=[])\n # self._intern describes this node\n _intern.name = name # A node has a name\n _intern.doc = doc # A node has a name\n _intern.value = default # A node has a value\n _intern.hidden = hidden # A node can be hidden\n _intern.fpath = fpath # A node is cached to\n _intern.depeq = depeq # A node depends on\n _intern.editable = True # A node can be uneditable\n _intern._frozen_type = None # A node's value type\n # Some preferences are constrained to a list of choices\n if choices is not None:\n _intern.value = PrefChoice(choices, default)\n\n def get_type(_intern):\n if _intern._frozen_type is not None:\n return _intern._frozen_type\n else:\n return type(_intern.value)\n\n def freeze_type(_intern):\n _intern._frozen_type = _intern.get_type()\n\n\nclass PrefTree(DynamicStruct.DynStruct):\n def __init__(_tree, parent):\n super(PrefTree, _tree).__init__(child_exclude_list=[])\n # self._tree describes node's children and the parents\n # relationship to them\n _tree.parent = parent # Children have parents\n _tree.hidden_children = [] # Children can be hidden\n _tree.child_list = [] # There can be many children\n _tree.child_names = [] # Each child has a name\n _tree.num_visible_children = 0 # Statistic\n _tree.aschildx = 0 # This node is the x-th child\n\n\nclass PrefChoice(DynamicStruct.DynStruct):\n def __init__(self, choices, default):\n super(PrefChoice, self).__init__(child_exclude_list=[])\n self.choices = choices\n self.sel = 0\n self.change_val(default)\n\n def change_val(self, new_val):\n # Try to select by index\n if isinstance(new_val, int):\n self.sel = new_val\n # Try to select by value\n elif isinstance(new_val, six.string_types):\n self.sel = self.choices.index(new_val)\n else:\n raise('Exception: Unknown newval=%r' % new_val)\n if self.sel < 0 or self.sel > len(self.choices):\n raise Exception('self.sel=%r is not in the self.choices=%r '\n % (self.sel, self.choices))\n\n def combo_val(self):\n return self.choices[self.sel]\n\n def get_tuple(self):\n return (self.sel, self.choices)\n\n\nclass Pref(PrefNode):\n \"\"\"\n Structure for Creating Preferences.\n Caveats: When using a value call with ['valname'] to be safe\n Features:\n * Can be saved and loaded.\n * Can be nested\n * Dynamically add/remove\n \"\"\"\n def __init__(self,\n default=PrefNode, # Default value for a Pref is to be itself\n doc='empty docs', # Documentation for a preference\n hidden=False, # Is a hidden preference?\n choices=None, # A list of choices\n depeq=None, # List of tuples representing dependencies\n fpath='', # Where to save to\n name='root', # Name of this node\n parent=None): # Reference to parent Pref\n \"\"\"\n Creates a pref struct that will save itself to pref_fpath if\n available and have init all members of some dictionary\n \"\"\"\n super(Pref, self).__init__(child_exclude_list=['_intern', '_tree'])\n # Private internal structure\n self._intern = PrefInternal(name, doc, default, hidden, fpath, depeq,\n choices)\n self._tree = PrefTree(parent)\n #if default is PrefNode:\n # print('----------')\n # print('new Pref(default=PrefNode)')\n\n def get_type(self):\n return self._intern.get_type()\n\n def ensure_attr(self, attr, default):\n if not hasattr(self, attr):\n setattr(self, attr, default)\n return getattr(self, attr)\n\n # -------------------\n # Attribute Setters\n def toggle(self, key):\n \"\"\" Toggles a boolean key \"\"\"\n val = self[key]\n assert isinstance(val, bool), 'key[%r] = %r is not a bool' % (key, val)\n self.pref_update(key, not val)\n\n def change_combo_val(self, new_val):\n \"\"\"\n Checks to see if a selection is a valid index or choice of a combo\n preference\n \"\"\"\n choice_obj = self._intern.value\n assert isinstance(self._intern.value, PrefChoice), 'must be a choice'\n return choice_obj.get_tuple()\n\n def __overwrite_child_attr(self, name, attr):\n # FIXME: when setting string preference nodes to lists, it casts\n # the list to a string!\n if VERBOSE_PREF:\n print('[pref.__overwrite_child_attr]: %s.%s = %r' % (self._intern.name, name, attr))\n # get child node to \"overwrite\"\n row = self._tree.child_names.index(name)\n child = self._tree.child_list[row]\n if isinstance(attr, Pref):\n # Do not break pointers when overwriting a Preference\n if issubclass(attr._intern.value, PrefNode):\n # Main Branch Logic\n for (key, val) in six.iteritems(attr):\n child.__setattr__(key, val)\n else:\n self.__overwrite_child_attr(name, attr.value())\n else: # Main Leaf Logic:\n #assert(not issubclass(child._intern.type, PrefNode), #(self.full_name() + ' Must be a leaf'))\n # Keep user-readonly map up to date with internals\n if isinstance(child._intern.value, PrefChoice):\n child.change_combo_val(attr)\n else:\n child_type = child._intern.get_type()\n if isinstance(attr, six.string_types) and issubclass(child_type, six.string_types):\n #import utool as ut\n #ut.embed()\n attr = child_type(attr)\n attr_type = type(attr)\n if attr is not None and child_type is not attr_type:\n if util_arg.VERBOSE:\n print('[pref] WARNING TYPE DIFFERENCE!')\n print('[pref] * expected child_type = %r' % (child_type,))\n print('[pref] * got attr_type = %r' % (attr_type,))\n print('[pref] * name = %r' % (name,))\n print('[pref] * attr = %r' % (attr,))\n attr = util_type.try_cast(attr, child_type, attr)\n child._intern.value = attr\n self.__dict__[name] = child.value()\n\n def __new_attr(self, name, attr):\n \"\"\"\n On a new child attribute:\n 1) Check to see if it is wrapped by a Pref object\n 2) If not do so, if so add it to the tree structure\n \"\"\"\n if isinstance(attr, Pref):\n # Child attribute already has a Pref wrapping\n if VERBOSE_PREF:\n print('[pref.__new_attr]: %s.%s = %r' % (self._intern.name, name, attr.value()))\n new_childx = len(self._tree.child_names)\n # Children know about parents\n attr._tree.parent = self # Give child parent\n attr._intern.name = name # Give child name\n if attr._intern.depeq is None:\n attr._intern.depeq = self._intern.depeq # Give child parent dependencies\n if attr._intern.hidden:\n self._tree.hidden_children.append(new_childx)\n self._tree.hidden_children.sort()\n # Used for QTIndexing\n attr._intern.aschildx = new_childx\n # Parents know about children\n self._tree.child_names.append(name) # Add child to tree\n self._tree.child_list.append(attr)\n self.__dict__[name] = attr.value() # Add child value to dict\n else:\n # The child attribute is not wrapped. Wrap with Pref and readd.\n pref_attr = Pref(default=attr)\n self.__new_attr(name, pref_attr)\n\n # Attributes are children\n def __setattr__(self, name, attr):\n \"\"\"\n Called when an attribute assignment is attempted. This is called instead\n of the normal mechanism (i.e. store the value in the instance\n dictionary). name is the attribute name, value is the value to be\n assigned to it.\n\n If __setattr__() wants to assign to an instance attribute, it should not\n simply execute self.name = value this would cause a recursive call to\n itself. Instead, it should insert the value in the dictionary of\n instance attributes, e.g., self.__dict__[name] = value. For new-style\n classes, rather than accessing the instance dictionary, it should call\n the base class method with the same name, for example,\n object.__setattr__(self, name, value).\n 'Wraps child attributes in a Pref object if not already done'\n \"\"\"\n # No wrapping for private vars: _printable_exclude, _intern, _tree\n if name.find('_') == 0:\n return super(DynamicStruct.DynStruct, self).__setattr__(name, attr)\n # Overwrite if child exists\n if name in self._tree.child_names:\n self.__overwrite_child_attr(name, attr)\n else:\n self.__new_attr(name, attr)\n\n # -------------------\n # Attribute Getters\n def value(self):\n # Return the wrapper in all its glory\n if self._intern.value == PrefNode:\n return self\n # Return basic types\n elif isinstance(self._intern.value, PrefChoice):\n return self._intern.value.combo_val()\n else:\n return self._intern.value # TODO AS REFERENCE\n\n def __getattr__(self, name):\n \"\"\"\n Called when an attribute lookup has not found the attribute in the usual\n places\n (i.e. it is not an instance attribute nor is it found in the class tree\n for self).\n name is the attribute name.\n This method should return the (computed) attribute value or raise an\n AttributeError exception.\n Get a child from this parent called as last resort.\n Allows easy access to internal prefs\n \"\"\"\n if name.find('_') == 0:\n # Names that start with underscore actually belong to the Pref object\n return super(PrefNode, self).__getitem__[name]\n if len(name) > 9 and name[-9:] == '_internal':\n # internal names belong to the internal structure\n attrx = self._tree.child_names.index(name[:-9])\n return self._tree.child_list[attrx]\n #print(self._internal.name)\n #print(self._tree)\n try:\n if six.PY2:\n return super(PrefNode, self).__getitem__[name]\n else:\n try:\n base_self1 = super(PrefNode, self)\n #base_self2 = super(DynStruct, self)\n #base_self3 = super()\n #import utool\n #utool.embed()\n return base_self1[name]\n except Exception as ex:\n print(ex)\n print('base_self1 = %r' % (base_self1,))\n #print('base_self2 = %r' % (base_self2,))\n #print('base_self3 = %r' % (base_self3,))\n print('name = %r' % (name,))\n raise\n except Exception as ex:\n if name == 'trait_names':\n # HACK FOR IPYTHON\n pass\n else:\n import utool as ut\n if ut.DEBUG2 and ut.VERBOSE:\n ut.printex(ex, 'Pref object missing named attribute',\n keys=['self._intern.name', 'name'],\n iswarning=True)\n raise AttributeError(\n ('Pref object is missing named attribute: name=%r.'\n 'You might try running ibeis with --nocache-pref '\n 'to see if that fixes things.') % name)\n #raise\n\n def iteritems(self):\n \"\"\"\n Wow this class is messed up. I had to overwrite items when\n moving to python3, just because I haden't called it yet\n \"\"\"\n for (key, val) in six.iteritems(self.__dict__):\n if key in self._printable_exclude:\n continue\n yield (key, val)\n\n def items(self):\n \"\"\"\n Wow this class is messed up. I had to overwrite items when\n moving to python3, just because I haden't called it yet\n \"\"\"\n for (key, val) in six.iteritems(self.__dict__):\n if key in self._printable_exclude:\n continue\n yield (key, val)\n\n #----------------\n # Disk caching\n def to_dict(self, split_structs_bit=False):\n \"\"\" Converts prefeters to a dictionary.\n Children Pref can be optionally separated \"\"\"\n pref_dict = {}\n struct_dict = {}\n for (key, val) in six.iteritems(self):\n if split_structs_bit and isinstance(val, Pref):\n struct_dict[key] = val\n continue\n pref_dict[key] = val\n if split_structs_bit:\n return (pref_dict, struct_dict)\n return pref_dict\n\n asdict = to_dict\n\n def save(self):\n \"\"\" Saves prefs to disk in dict format \"\"\"\n fpath = self.get_fpath()\n if fpath in ['', None]:\n if self._tree.parent is not None:\n if VERBOSE_PREF:\n print('[pref.save] Can my parent save me?') # ...to disk\n return self._tree.parent.save()\n if VERBOSE_PREF:\n print('[pref.save] I cannot be saved. I have no parents.')\n return False\n with open(fpath, 'wb') as f:\n print('[pref] Saving to ' + fpath)\n pref_dict = self.to_dict()\n pickle.dump(pref_dict, f, protocol=2) # Use protocol 2 to support python2 and 3\n return True\n\n def get_fpath(self):\n return self._intern.fpath\n\n def load(self):\n \"\"\" Read pref dict stored on disk. Overwriting current values. \"\"\"\n if VERBOSE_PREF:\n print('[pref.load()]')\n #if not os.path.exists(self._intern.fpath):\n # msg = '[pref] fpath=%r does not exist' % (self._intern.fpath)\n # return msg\n fpath = self.get_fpath()\n try:\n with open(fpath, 'rb') as f:\n if VERBOSE_PREF:\n print('load: %r' % fpath)\n pref_dict = pickle.load(f)\n except EOFError as ex1:\n util_dbg.printex(ex1, 'did not load pref fpath=%r correctly' % fpath, iswarning=True)\n #warnings.warn(msg)\n raise\n #return msg\n except ImportError as ex2:\n util_dbg.printex(ex2, 'did not load pref fpath=%r correctly' % fpath, iswarning=True)\n #warnings.warn(msg)\n raise\n #return msg\n if not util_type.is_dict(pref_dict):\n raise Exception('Preference file is corrupted')\n self.add_dict(pref_dict)\n return True\n\n #----------------------\n # String representation\n def __str__(self):\n if self._intern.value != PrefNode:\n ret = super(PrefNode, self).__str__()\n #.replace('\\n ', '')\n ret += '\\nLEAF ' + repr(self._intern.name) + ':' + repr(self._intern.value)\n return ret\n else:\n ret = repr(self._intern.value)\n return ret\n\n def full_name(self):\n \"\"\" returns name all the way up the tree \"\"\"\n if self._tree.parent is None:\n return self._intern.name\n return self._tree.parent.full_name() + '.' + self._intern.name\n\n def get_printable(self, type_bit=True, print_exclude_aug=[]):\n # Remove unsatisfied dependencies from the printed structure\n further_aug = print_exclude_aug[:]\n for child_name in self._tree.child_names:\n depeq = self[child_name + '_internal']._intern.depeq\n if depeq is not None and depeq[0].value() != depeq[1]:\n further_aug.append(child_name)\n return super(Pref, self).get_printable(type_bit, print_exclude_aug=further_aug)\n\n def customPrintableType(self, name):\n if name in self._tree.child_names:\n row = self._tree.child_names.index(name)\n #child = self._tree.child_list[row] # child node to \"overwrite\"\n _typestr = type(self._tree.child_list[row]._intern.value)\n if util_type.is_str(_typestr):\n return _typestr\n\n def pref_update(self, key, new_val):\n \"\"\" Changes a preference value and saves it to disk \"\"\"\n print('Update and save pref from: %s=%r, to: %s=%r' %\n (key, six.text_type(self[key]), key, six.text_type(new_val)))\n self.__setattr__(key, new_val)\n return self.save()\n\n def update(self, **kwargs):\n #print('Updating Preference: kwargs = %r' % (kwargs))\n self_keys = set(self.__dict__.keys())\n for key, val in six.iteritems(kwargs):\n if key in self_keys:\n self.__setattr__(key, val)\n\n # Method for QTWidget\n def createQWidget(self):\n # moving gui code away from utool\n try:\n #from utool._internal.PreferenceWidget import EditPrefWidget\n try:\n from guitool_ibeis.PreferenceWidget import EditPrefWidget\n except ImportError:\n from guitool.PreferenceWidget import EditPrefWidget\n editpref_widget = EditPrefWidget(self)\n editpref_widget.show()\n return editpref_widget\n except ImportError as ex:\n util_dbg.printex(ex, 'Cannot create preference widget. Is guitool and PyQt 4/5 Installed')\n raise\n\n def qt_get_parent(self):\n return self._tree.parent\n\n def qt_parents_index_of_me(self):\n return self._tree.aschildx\n\n def qt_get_child(self, row):\n row_offset = (np.array(self._tree.hidden_children) <= row).sum()\n return self._tree.child_list[row + row_offset]\n\n def qt_row_count(self):\n return len(self._tree.child_list) - len(self._tree.hidden_children)\n\n def qt_col_count(self):\n return 2\n\n def qt_get_data(self, column):\n if column == 0:\n return self._intern.name\n data = self.value()\n if isinstance(data, Pref): # Recursive Case: Pref\n data = ''\n elif data is None:\n # Check for a get of None\n data = 'None'\n return data\n\n def qt_is_editable(self):\n uneditable_hack = ['feat_type']\n if self._intern.name in uneditable_hack:\n return False\n if self._intern.depeq is not None:\n return self._intern.depeq[0].value() == self._intern.depeq[1]\n #return self._intern.value is not None\n return self._intern.value != PrefNode\n\n def qt_set_leaf_data(self, qvar):\n return _qt_set_leaf_data(self, qvar)\n\n\ndef _qt_set_leaf_data(self, qvar):\n \"\"\" Sets backend data using QVariants \"\"\"\n if VERBOSE_PREF:\n print('')\n print('+--- [pref.qt_set_leaf_data]')\n print('[pref.qt_set_leaf_data] qvar = %r' % qvar)\n print('[pref.qt_set_leaf_data] _intern.name=%r' % self._intern.name)\n print('[pref.qt_set_leaf_data] _intern.type_=%r' % self._intern.get_type())\n print('[pref.qt_set_leaf_data] type(_intern.value)=%r' % type(self._intern.value))\n print('[pref.qt_set_leaf_data] _intern.value=%r' % self._intern.value)\n #print('[pref.qt_set_leaf_data] qvar.toString()=%s' % six.text_type(qvar.toString()))\n if self._tree.parent is None:\n raise Exception('[Pref.qtleaf] Cannot set root preference')\n if self.qt_is_editable():\n new_val = '[Pref.qtleaf] BadThingsHappenedInPref'\n if self._intern.value == PrefNode:\n raise Exception('[Pref.qtleaf] Qt can only change leafs')\n elif self._intern.value is None:\n # None could be a number of types\n def cast_order(var, order=[bool, int, float, six.text_type]):\n for type_ in order:\n try:\n ret = type_(var)\n return ret\n except Exception:\n continue\n new_val = cast_order(six.text_type(qvar))\n self._intern.get_type()\n if isinstance(self._intern.value, bool):\n #new_val = bool(qvar.toBool())\n print('qvar = %r' % (qvar,))\n new_val = util_type.smart_cast(qvar, bool)\n #new_val = bool(eval(qvar, {}, {}))\n print('new_val = %r' % (new_val,))\n elif isinstance(self._intern.value, int):\n #new_val = int(qvar.toInt()[0])\n new_val = int(qvar)\n # elif isinstance(self._intern.value, float):\n elif self._intern.get_type() in util_type.VALID_FLOAT_TYPES:\n #new_val = float(qvar.toDouble()[0])\n new_val = float(qvar)\n elif isinstance(self._intern.value, six.string_types):\n #new_val = six.text_type(qvar.toString())\n new_val = six.text_type(qvar)\n elif isinstance(self._intern.value, PrefChoice):\n #new_val = qvar.toString()\n new_val = six.text_type(qvar)\n if new_val.upper() == 'NONE':\n new_val = None\n else:\n try:\n #new_val = six.text_type(qvar.toString())\n type_ = self._intern.get_type()\n if type_ is not None:\n new_val = type_(six.text_type(qvar))\n else:\n new_val = six.text_type(qvar)\n except Exception:\n raise NotImplementedError(\n ('[Pref.qtleaf] Unknown internal type. '\n 'type(_intern.value) = %r, '\n '_intern.get_type() = %r, ')\n % type(self._intern.value), self._intern.get_type())\n # Check for a set of None\n if isinstance(new_val, six.string_types):\n if new_val.lower() == 'none':\n new_val = None\n elif new_val.lower() == 'true':\n new_val = True\n elif new_val.lower() == 'false':\n new_val = False\n # save to disk after modifying data\n if VERBOSE_PREF:\n print('---')\n print('[pref.qt_set_leaf_data] new_val=%r' % new_val)\n print('[pref.qt_set_leaf_data] type(new_val)=%r' % type(new_val))\n print('L____ [pref.qt_set_leaf_data]')\n # TODO Add ability to set a callback function when certain\n # preferences are changed.\n return self._tree.parent.pref_update(self._intern.name, new_val)\n return 'PrefNotEditable'\n\n\ndef test_Preferences():\n r\"\"\"\n CommandLine:\n python -m utool.Preferences --test-test_Preferences --show --verbpref\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> # xdoctest: +REQUIRES(module:guitool_ibeis)\n >>> # FIXME depends on guitool_ibei\n >>> from utool.Preferences import * # NOQA\n >>> import utool as ut\n >>> import guitool_ibeis\n >>> guitool_ibeis.ensure_qtapp()\n >>> root = test_Preferences()\n >>> ut.quit_if_noshow()\n >>> widget = root.createQWidget()\n >>> #widget.show()\n >>> guitool_ibeis.qtapp_loop(widget)\n \"\"\"\n root = Pref()\n root.a = Pref()\n root.strvar = 'foobar'\n root.intvar = 1\n root.floatvar = -1.3\n root.boolvar = True\n root.listvar = [True, 1, 3]\n\n root.a.strvar = 'foobar1'\n root.a.intvar = -1\n root.a.floatvar = 2.4\n root.a.boolvar2 = False\n return root\n\n\nif __name__ == '__main__':\n \"\"\"\n CommandLine:\n python -m utool.Preferences\n python -m utool.Preferences --allexamples\n python -m utool.Preferences --allexamples --noface --nosrc\n \"\"\"\n import multiprocessing\n multiprocessing.freeze_support() # for win32\n import utool as ut # NOQA\n ut.doctest_funcs()\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MSiam/segment-any-moving_detectron | [
"ce3a68ef114a2af9e07d7d2240ac3822e150b686"
] | [
"lib/datasets/roidb.py"
] | [
"# Copyright (c) 2017-present, Facebook, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n##############################################################################\n\n\"\"\"Functions for common roidb manipulations.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport six\nimport logging\nimport numpy as np\n\nimport utils.boxes as box_utils\nimport utils.keypoints as keypoint_utils\nimport utils.segms as segm_utils\nimport utils.blob as blob_utils\nfrom core.config import cfg\nfrom . import load_dataset\n\nlogger = logging.getLogger(__name__)\n\n\ndef combined_roidb_for_training(dataset_names, proposal_files):\n \"\"\"Load and concatenate roidbs for one or more datasets, along with optional\n object proposals. The roidb entries are then prepared for use in training,\n which involves caching certain types of metadata for each roidb entry.\n \"\"\"\n def get_roidb(dataset_name, proposal_file):\n ds = load_dataset(dataset_name, cfg.DATA_LOADER.INPUT_FRAME_OFFSETS)\n roidb = ds.get_roidb(\n gt=True,\n proposal_file=proposal_file,\n crowd_filter_thresh=cfg.TRAIN.CROWD_FILTER_THRESH\n )\n if cfg.TRAIN.USE_FLIPPED:\n logger.info('Appending horizontally-flipped training examples...')\n extend_with_flipped_entries(roidb, ds)\n logger.info('Loaded dataset: {:s}'.format(ds.name))\n return roidb\n\n if isinstance(dataset_names, six.string_types):\n dataset_names = (dataset_names, )\n if isinstance(proposal_files, six.string_types):\n proposal_files = (proposal_files, )\n if len(proposal_files) == 0:\n proposal_files = (None, ) * len(dataset_names)\n assert len(dataset_names) == len(proposal_files)\n roidbs = [get_roidb(*args) for args in zip(dataset_names, proposal_files)]\n roidb = roidbs[0]\n for r in roidbs[1:]:\n roidb.extend(r)\n roidb = filter_for_training(roidb)\n\n if cfg.TRAIN.ASPECT_GROUPING or cfg.TRAIN.ASPECT_CROPPING:\n logger.info('Computing image aspect ratios and ordering the ratios...')\n ratio_list, ratio_index = rank_for_training(roidb)\n logger.info('done')\n else:\n ratio_list, ratio_index = None, None\n\n logger.info('Computing bounding-box regression targets...')\n add_bbox_regression_targets(roidb)\n logger.info('done')\n\n _compute_and_log_stats(roidb)\n\n return roidb, ratio_list, ratio_index\n\n\ndef extend_with_flipped_entries(roidb, dataset):\n \"\"\"Flip each entry in the given roidb and return a new roidb that is the\n concatenation of the original roidb and the flipped entries.\n\n \"Flipping\" an entry means that that image and associated metadata (e.g.,\n ground truth boxes and object proposals) are horizontally flipped.\n \"\"\"\n flipped_roidb = []\n for entry in roidb:\n width = entry['width']\n boxes = entry['boxes'].copy()\n oldx1 = boxes[:, 0].copy()\n oldx2 = boxes[:, 2].copy()\n boxes[:, 0] = width - oldx2 - 1\n boxes[:, 2] = width - oldx1 - 1\n assert (boxes[:, 2] >= boxes[:, 0]).all()\n flipped_entry = {}\n dont_copy = ('boxes', 'segms', 'gt_keypoints', 'flipped')\n for k, v in entry.items():\n if k not in dont_copy:\n flipped_entry[k] = v\n flipped_entry['boxes'] = boxes\n flipped_entry['segms'] = segm_utils.flip_segms(\n entry['segms'], entry['height'], entry['width']\n )\n if dataset.keypoints is not None:\n flipped_entry['gt_keypoints'] = keypoint_utils.flip_keypoints(\n dataset.keypoints, dataset.keypoint_flip_map,\n entry['gt_keypoints'], entry['width']\n )\n flipped_entry['flipped'] = True\n flipped_roidb.append(flipped_entry)\n roidb.extend(flipped_roidb)\n\n\ndef filter_for_training(roidb):\n \"\"\"Remove roidb entries that have no usable RoIs based on config settings.\n \"\"\"\n def is_valid(entry):\n # Valid images have:\n # (1) At least one foreground RoI OR\n # (2) At least one background RoI\n overlaps = entry['max_overlaps']\n # find boxes with sufficient overlap\n fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0]\n # Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)\n bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) &\n (overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]\n # image is only valid if such boxes exist\n valid = len(fg_inds) > 0 or len(bg_inds) > 0\n if cfg.MODEL.KEYPOINTS_ON:\n # If we're training for keypoints, exclude images with no keypoints\n valid = valid and entry['has_visible_keypoints']\n return valid\n\n num = len(roidb)\n filtered_roidb = [entry for entry in roidb if is_valid(entry)]\n num_after = len(filtered_roidb)\n logger.info('Filtered {} roidb entries: {} -> {}'.\n format(num - num_after, num, num_after))\n return filtered_roidb\n\n\ndef rank_for_training(roidb):\n \"\"\"Rank the roidb entries according to image aspect ration and mark for cropping\n for efficient batching if image is too long.\n\n Returns:\n ratio_list: ndarray, list of aspect ratios from small to large\n ratio_index: ndarray, list of roidb entry indices correspond to the ratios\n \"\"\"\n RATIO_HI = cfg.TRAIN.ASPECT_HI # largest ratio to preserve.\n RATIO_LO = cfg.TRAIN.ASPECT_LO # smallest ratio to preserve.\n\n need_crop_cnt = 0\n\n ratio_list = []\n for entry in roidb:\n width = entry['width']\n height = entry['height']\n ratio = width / float(height)\n\n if cfg.TRAIN.ASPECT_CROPPING:\n if ratio > RATIO_HI:\n entry['need_crop'] = True\n ratio = RATIO_HI\n need_crop_cnt += 1\n elif ratio < RATIO_LO:\n entry['need_crop'] = True\n ratio = RATIO_LO\n need_crop_cnt += 1\n else:\n entry['need_crop'] = False\n else:\n entry['need_crop'] = False\n\n ratio_list.append(ratio)\n\n if cfg.TRAIN.ASPECT_CROPPING:\n logging.info('Number of entries that need to be cropped: %d. Ratio bound: [%.2f, %.2f]',\n need_crop_cnt, RATIO_LO, RATIO_HI)\n ratio_list = np.array(ratio_list)\n ratio_index = np.argsort(ratio_list)\n return ratio_list[ratio_index], ratio_index\n\ndef add_bbox_regression_targets(roidb):\n \"\"\"Add information needed to train bounding-box regressors.\"\"\"\n for entry in roidb:\n entry['bbox_targets'] = _compute_targets(entry)\n\n\ndef _compute_targets(entry):\n \"\"\"Compute bounding-box regression targets for an image.\"\"\"\n # Indices of ground-truth ROIs\n rois = entry['boxes']\n overlaps = entry['max_overlaps']\n labels = entry['max_classes']\n gt_inds = np.where((entry['gt_classes'] > 0) & (entry['is_crowd'] == 0))[0]\n # Targets has format (class, tx, ty, tw, th)\n targets = np.zeros((rois.shape[0], 5), dtype=np.float32)\n if len(gt_inds) == 0:\n # Bail if the image has no ground-truth ROIs\n return targets\n\n # Indices of examples for which we try to make predictions\n ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0]\n\n # Get IoU overlap between each ex ROI and gt ROI\n ex_gt_overlaps = box_utils.bbox_overlaps(\n rois[ex_inds, :].astype(dtype=np.float32, copy=False),\n rois[gt_inds, :].astype(dtype=np.float32, copy=False))\n\n # Find which gt ROI each ex ROI has max overlap with:\n # this will be the ex ROI's gt target\n gt_assignment = ex_gt_overlaps.argmax(axis=1)\n gt_rois = rois[gt_inds[gt_assignment], :]\n ex_rois = rois[ex_inds, :]\n # Use class \"1\" for all boxes if using class_agnostic_bbox_reg\n targets[ex_inds, 0] = (\n 1 if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG else labels[ex_inds])\n targets[ex_inds, 1:] = box_utils.bbox_transform_inv(\n ex_rois, gt_rois, cfg.MODEL.BBOX_REG_WEIGHTS)\n return targets\n\n\ndef _compute_and_log_stats(roidb):\n classes = roidb[0]['dataset'].classes\n char_len = np.max([len(c) for c in classes])\n hist_bins = np.arange(len(classes) + 1)\n\n # Histogram of ground-truth objects\n gt_hist = np.zeros((len(classes)), dtype=np.int)\n for entry in roidb:\n gt_inds = np.where(\n (entry['gt_classes'] > 0) & (entry['is_crowd'] == 0))[0]\n gt_classes = entry['gt_classes'][gt_inds]\n gt_hist += np.histogram(gt_classes, bins=hist_bins)[0]\n logger.debug('Ground-truth class histogram:')\n for i, v in enumerate(gt_hist):\n logger.debug(\n '{:d}{:s}: {:d}'.format(\n i, classes[i].rjust(char_len), v))\n logger.debug('-' * char_len)\n logger.debug(\n '{:s}: {:d}'.format(\n 'total'.rjust(char_len), np.sum(gt_hist)))\n"
] | [
[
"numpy.sum",
"numpy.argsort",
"numpy.array",
"numpy.histogram",
"numpy.where",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MichaelKonobeev/derl | [
"2b9e1b2e989889a60dac2f9235f549dcec7ee554"
] | [
"derl/env/normalize.py"
] | [
"\"\"\" MuJoCo env wrappers. \"\"\"\n# Adapted from https://github.com/openai/baselines\nimport gym\nimport numpy as np\n\n\nclass RunningMeanVar:\n \"\"\" Computes running mean and variance.\n\n Args:\n eps (float): a small constant used to initialize mean to zero and\n variance to 1.\n shape tuple(int): shape of the statistics.\n \"\"\"\n # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm\n def __init__(self, eps=1e-4, shape=()):\n self.mean = np.zeros(shape)\n self.var = np.ones(shape)\n self.count = eps\n\n def update(self, batch):\n \"\"\" Updates the running statistics given a batch of samples. \"\"\"\n if not batch.shape[1:] == self.mean.shape:\n raise ValueError(f\"batch has invalid shape: {batch.shape}, \"\n f\"expected shape {(None,) + self.mean.shape}\")\n batch_mean = np.mean(batch, axis=0)\n batch_var = np.var(batch, axis=0)\n batch_count = batch.shape[0]\n self.update_from_moments(batch_mean, batch_var, batch_count)\n\n def update_from_moments(self, batch_mean, batch_var, batch_count):\n \"\"\" Updates the running statistics given their new values on new data. \"\"\"\n self.mean, self.var, self.count = update_mean_var_count_from_moments(\n self.mean, self.var, self.count, batch_mean, batch_var, batch_count)\n\n def save(self, filename):\n \"\"\" Saves statistics to a file. \"\"\"\n np.savez(filename, mean=self.mean, var=self.var, count=self.count)\n\n def restore(self, filename):\n \"\"\" Restores statistics from a file. \"\"\"\n npfile = np.load(filename)\n self.mean, self.var, self.count = (\n npfile[key] for key in [\"mean\", \"var\", \"count\"])\n\n\ndef update_mean_var_count_from_moments(mean, var, count,\n batch_mean, batch_var, batch_count):\n \"\"\" Updates running mean statistics given a new batch. \"\"\"\n delta = batch_mean - mean\n tot_count = count + batch_count\n\n new_mean = mean + delta * batch_count / tot_count\n new_var = (\n var * (count / tot_count)\n + batch_var * (batch_count / tot_count)\n + np.square(delta) * (count * batch_count / tot_count ** 2))\n new_count = tot_count\n\n return new_mean, new_var, new_count\n\n\nclass Normalize(gym.Wrapper):\n \"\"\"\n A vectorized wrapper that normalizes the observations\n and returns from an environment.\n \"\"\"\n # pylint: disable=too-many-arguments\n def __init__(self, env, obs=True, ret=True,\n clipobs=10., cliprew=10., gamma=0.99, eps=1e-8):\n super().__init__(env)\n self.obs_rmv = (RunningMeanVar(shape=self.observation_space.shape)\n if obs else None)\n self.ret_rmv = RunningMeanVar(shape=()) if ret else None\n self.clipob = clipobs\n self.cliprew = cliprew\n self.ret = np.zeros(getattr(self.env.unwrapped, \"nenvs\", 1))\n self.gamma = gamma\n self.eps = eps\n\n def save_wrapper(self, filename):\n \"\"\" Saves normalization stats to files. \"\"\"\n if filename.endswith(\"npz\"):\n filename = filename[:-3]\n if self.obs_rmv is not None:\n self.obs_rmv.save(f\"{filename}-obs-rmv\")\n if self.ret_rmv is not None:\n self.ret_rmv.save(f\"{filename}-ret-rmv\")\n\n def restore_wrapper(self, filename):\n \"\"\" Restores normalization statistics from a file. \"\"\"\n if self.obs_rmv is not None:\n self.obs_rmv.restore(f\"{filename}-obs-rmv.npz\")\n if self.ret_rmv is not None:\n self.ret_rmv.restore(f\"{filename}-ret-rmv.npz\")\n\n def observation(self, obs):\n \"\"\" Preprocesses a given observation. \"\"\"\n if not self.obs_rmv:\n return obs\n rmv_batch = (np.expand_dims(obs, 0)\n if not hasattr(self.env.unwrapped, \"nenvs\")\n else obs)\n self.obs_rmv.update(rmv_batch)\n obs = (obs - self.obs_rmv.mean) / np.sqrt(self.obs_rmv.var + self.eps)\n obs = np.clip(obs, -self.clipob, self.clipob)\n return obs\n\n def step(self, action):\n obs, rews, resets, info = self.env.step(action)\n self.ret = self.ret * self.gamma + rews\n obs = self.observation(obs)\n if self.ret_rmv:\n self.ret_rmv.update(self.ret)\n rews = np.clip(rews / np.sqrt(self.ret_rmv.var + self.eps),\n -self.cliprew, self.cliprew)\n self.ret[resets] = 0.\n return obs, rews, resets, info\n\n def reset(self, **kwargs):\n self.ret = np.zeros(getattr(self.env.unwrapped, \"nenvs\", 1))\n obs = self.env.reset(**kwargs)\n return self.observation(obs)\n"
] | [
[
"numpy.square",
"numpy.savez",
"numpy.expand_dims",
"numpy.sqrt",
"numpy.clip",
"numpy.ones",
"numpy.mean",
"numpy.var",
"numpy.load",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Napnel/DRL-Trading | [
"3eacb6462846af18dcfdad4db6971bc29509af3f"
] | [
"summarize_performance.py"
] | [
"import argparse\nimport glob\nimport os\nimport pickle\nfrom pathlib import Path\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom empyrical import annual_return, max_drawdown, sharpe_ratio\n\npd.options.display.float_format = \"{:.2f}\".format\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--local_dir\", type=str, default=\"./ray_results\")\nargs = parser.parse_args()\nprint(args)\n\n\ndef summary_computation_time(local_dir: str):\n algo_expt_paths = sorted(glob.glob(os.path.join(local_dir, \"*__*\")))\n n_fold, n_cpus, n_workers = 5, 8, 2\n n_parallel = n_cpus / n_workers\n\n total_time = 0\n total_time_df = pd.DataFrame()\n print(algo_expt_paths)\n for expt_path in algo_expt_paths:\n algo = expt_path.split(\"__\")[0][-3:]\n print(\"===\" * 15, algo, \"===\" * 15)\n sample_paths = glob.glob(os.path.join(expt_path, \"ExperimentCV-*\"))\n total_time_per_algo = 0\n n_samples = len(sample_paths) / n_fold\n print(sample_paths)\n for sample_path in sample_paths:\n progress = pd.read_csv(os.path.join(sample_path, \"progress.csv\"))\n progress = progress[[\"timesteps_total\", \"time_this_iter_s\"]]\n progress = progress[progress[\"timesteps_total\"] <= 150000]\n total_time_per_algo += progress[\"time_this_iter_s\"].sum()\n\n times = pd.Series(\n [\n total_time_per_algo / 60 / n_samples,\n total_time_per_algo / 60 / n_samples / n_fold,\n ],\n index=[\"Per CV [min]\", \"Per Trial [min]\"],\n name=algo,\n )\n total_time_df = pd.concat([total_time_df, times], axis=1)\n total_time += total_time_per_algo * 4\n\n sum_total_time = total_time_df.sum(axis=1)\n sum_total_time.name = \"Sum\"\n total_time_df = pd.concat([total_time_df, sum_total_time], axis=1)\n total_time_df.to_csv(os.path.join(local_dir, \"times.csv\"), float_format=\"%.1f\")\n print(total_time_df)\n print(f\"Our machine takes {total_time / 60 / 60 / n_parallel:.2f} [hours]\")\n\n\ndef summary_learning_curve(local_dir: str):\n algo_expt_paths = sorted(glob.glob(os.path.join(local_dir, \"*__*\")))\n\n def get_bh_reward(data_type: str):\n bh_dir = os.path.join(local_dir, \"backtest-stats-buy&hold/test-*\")\n bh_folder_cv = sorted(glob.glob(bh_dir))\n bh_reward_cv = []\n for bh_folder in bh_folder_cv:\n equity_curve = pd.read_csv(\n os.path.join(bh_folder, \"equity_curve.csv\"), index_col=0\n )[\"Equity\"]\n reward = equity_curve.apply(np.log).diff().dropna().sum()\n bh_reward_cv.append(reward)\n\n bh_reward_mean = sum(bh_reward_cv) / len(bh_reward_cv)\n return bh_reward_mean\n\n with plt.style.context([\"science\", \"ieee\"]):\n fig, axes = plt.subplots(1, 2, figsize=(6, 3))\n\n min_last_timestep = 1e9\n max_first_timestep = 0\n timesteps = None\n for expt_path in algo_expt_paths:\n algo = expt_path.split(\"__\")[0][-3:]\n\n with open(os.path.join(expt_path, \"results.pkl\"), \"rb\") as f:\n results: pd.DataFrame = pickle.load(f)\n\n params = list(results.index.names)\n params.remove(\"timesteps_total\")\n alpha = 0.3\n # Get best results with considering reward condition\n bh_reward_train = get_bh_reward(\"train\")\n # results_filtered = results.query(\"episode_reward_mean > @bh_reward_train\")\n results_filtered = results.query(\"episode_reward_mean > -1\")\n top_train = (\n results_filtered[\"episode_reward_mean\"]\n .ewm(alpha=alpha)\n .mean()\n .groupby(params)\n .last()\n .sort_values(ascending=False)\n )\n top_valid = (\n results_filtered[\"evaluation/episode_reward_mean\"]\n .ewm(alpha=alpha)\n .mean()\n .groupby(params)\n .last()\n .sort_values(ascending=False)\n )\n best_config_train = dict(zip(params, top_train.index[0]))\n best_results_train = results.loc[tuple(best_config_train.values())]\n best_config_valid = dict(zip(params, top_valid.index[0]))\n best_results_valid = results.loc[tuple(best_config_valid.values())]\n\n # Plot\n timesteps = best_results_train.index.values\n min_last_timestep = min(min_last_timestep, timesteps[-1])\n max_first_timestep = max(max_first_timestep, timesteps[0])\n training_reward = (\n best_results_train[\"episode_reward_mean\"].ewm(alpha=alpha).mean()\n )\n validation_reward = (\n best_results_valid[\"evaluation/episode_reward_mean\"]\n .ewm(alpha=alpha)\n .mean()\n )\n axes[0].plot(timesteps, training_reward, label=algo)\n axes[1].plot(timesteps, validation_reward, label=algo)\n\n bh_reward_train = pd.Series(\n [get_bh_reward(\"train\")] * len(timesteps),\n index=training_reward.index,\n )\n bh_reward_eval = pd.Series(\n [get_bh_reward(\"eval\")] * len(timesteps),\n index=validation_reward.index,\n )\n axes[0].plot(\n timesteps, bh_reward_train, label=\"B\\&H\", linestyle=(10, (5, 3, 1, 3, 1, 3))\n )\n axes[1].plot(\n timesteps, bh_reward_eval, label=\"B\\&H\", linestyle=(10, (5, 3, 1, 3, 1, 3))\n )\n\n axes[0].set_title(\"Training\")\n axes[1].set_title(\"Validation\")\n axes[0].set_ylabel(\"Reward\")\n axes[0].set_xlabel(\"Timesteps\")\n axes[1].set_xlabel(\"Timesteps\")\n axes[0].set_xlim(max_first_timestep, min_last_timestep)\n axes[1].set_xlim(max_first_timestep, min_last_timestep)\n axes[0].legend(loc=\"upper left\")\n axes[1].legend(loc=\"upper left\")\n\n local_dir = str(Path(algo_expt_paths[0]).parent)\n plt.savefig(os.path.join(local_dir, \"Learning Curve\"))\n plt.close(\"all\")\n\n\ndef get_performance_from_equity(local_dir: str):\n algo_expt_paths = sorted(glob.glob(os.path.join(local_dir, \"*__*\")))\n algo_avg_performance = pd.DataFrame()\n\n def calc_performance(equity_curve: pd.Series, name: str = None):\n returns = equity_curve.pct_change()\n ann_return = annual_return(returns) * 100\n ann_sharpe_ratio = sharpe_ratio(returns, annualization=True)\n ann_max_drawdown = max_drawdown(returns) * 100\n index = [\"Cum Return [%]\", \"Max. Drawdown [%]\", \"Sharpe Ratio\"]\n performance = pd.Series(\n [ann_return, ann_max_drawdown, ann_sharpe_ratio], index=index, name=name\n )\n return performance\n\n for expt_path in algo_expt_paths:\n algo = expt_path.split(\"__\")[0][-3:]\n print(\"===\" * 15, algo, \"===\" * 15)\n backtest_paths = glob.glob(os.path.join(expt_path, \"backtest-stats-test*\"))\n backtest_paths = sorted(backtest_paths)\n all_performance = pd.DataFrame()\n for backtest_path in backtest_paths:\n equity_curve = pd.read_csv(\n os.path.join(backtest_path, \"equity_curve.csv\"), index_col=0\n )[\"Equity\"]\n performance = calc_performance(equity_curve, backtest_path[-1])\n all_performance = pd.concat([all_performance, performance], axis=1)\n\n avg_performance = pd.Series(\n np.nanmean(all_performance, axis=1), index=all_performance.index\n )\n avg_performance.name = \"Avg\"\n all_performance = pd.concat([all_performance, avg_performance], axis=1)\n print(all_performance)\n avg_performance.name = algo\n algo_avg_performance = pd.concat(\n [algo_avg_performance, avg_performance], axis=1\n )\n\n bh_dir = os.path.join(local_dir, \"backtest-stats-buy&hold/test-*\")\n bh_folder_cv = sorted(glob.glob(bh_dir))\n all_bh_performance = pd.DataFrame()\n for bh_folder in bh_folder_cv:\n equity_curve = pd.read_csv(\n os.path.join(bh_folder, \"equity_curve.csv\"), index_col=0\n )[\"Equity\"]\n performance = calc_performance(equity_curve, name=bh_folder[-1])\n all_bh_performance = pd.concat([all_bh_performance, performance], axis=1)\n\n avg_bh_performance = all_bh_performance.mean(axis=1)\n avg_bh_performance.name = \"Avg\"\n all_bh_performance = pd.concat([all_bh_performance, avg_bh_performance], axis=1)\n print(\"===\" * 15, \"B&H\", \"===\" * 15)\n print(all_bh_performance)\n\n avg_bh_performance.name = \"B&H\"\n algo_avg_performance = pd.concat([algo_avg_performance, avg_bh_performance], axis=1)\n print(\"===\" * 15, f\"Overall Performance\", \"===\" * 15)\n print(algo_avg_performance.T)\n algo_avg_performance.T.to_csv(\n os.path.join(local_dir, \"avg_performance.csv\"), float_format=\"%.2f\"\n )\n\n\nif __name__ == \"__main__\":\n local_dir = Path(args.local_dir).resolve()\n local_dir = os.path.join(local_dir)\n summary_computation_time(local_dir)\n summary_learning_curve(local_dir)\n get_performance_from_equity(local_dir)\n"
] | [
[
"pandas.concat",
"pandas.Series",
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"matplotlib.pyplot.style.context",
"numpy.nanmean",
"matplotlib.pyplot.close"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
mourga/variational-lstm | [
"61bdcf55c38de9dda29a35e8171d52f4b770af41"
] | [
"locked_dropout.py"
] | [
"import torch.nn as nn\nfrom torch.autograd import Variable\n\n\"\"\"\nCode from https://github.com/salesforce/awd-lstm-lm\npaper: https://arxiv.org/pdf/1708.02182.pdf (see Section 4.2)\n\"\"\"\n\n\nclass LockedDropout(nn.Module):\n \"\"\"\n This function applies dropout to the input tensor x.\n The shape of the tensor x in our implementation is (batch_size, seq_len, feature_size)\n (contrary to Merity's AWD that uses (seq_len, batch_size, feature_size)).\n So, we sample a mask from the 'feature_size' dimension,\n but a different mask for each 'batch_size' dimension,\n and expand it in the 'sequence_length' dimension so that\n we apply the SAME mask FOR EACH TIMESTEP of the RNN (= 'seq_len' dim.).\n \"\"\"\n\n def __init__(self):\n super().__init__()\n\n def forward(self, x, dropout=0.5):\n if not self.training or not dropout:\n return x\n batch_size, seq_length, feat_size = x.size()\n m = x.data.new(batch_size, 1, feat_size).bernoulli_(1 - dropout)\n mask = Variable(m, requires_grad=False) / (1 - dropout)\n mask = mask.expand_as(x)\n return mask * x\n"
] | [
[
"torch.autograd.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
weilonghu/Active-BERT-NER | [
"4aa345fca7559e7e4a3931561448fcf7fd384307"
] | [
"evaluate.py"
] | [
"\"\"\"Evaluate the model\"\"\"\n\nimport argparse\nimport random\nimport logging\nimport os\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom tqdm import trange\n\nfrom sequence_tagger import BertOnlyForSequenceTagging as BertForSequenceTagging\n\nfrom seqeval.metrics import f1_score, classification_report\n\nfrom data_loader import DataLoader\nimport utils\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataset', default='conll',\n help=\"Directory containing the dataset\")\nparser.add_argument('--seed', type=int, default=23,\n help=\"random seed for initialization\")\nparser.add_argument('--multi_gpu', default=False, action='store_true',\n help=\"Whether to use multiple GPUs if available\")\nparser.add_argument('--fp16', default=False, action='store_true',\n help=\"Whether to use 16-bit float precision instead of 32-bit\")\n\n\ndef evaluate(model, data_iterator, params, mark='Eval', verbose=False):\n \"\"\"Evaluate the model on `steps` batches.\"\"\"\n # set model to evaluation mode\n model.eval()\n\n idx2tag = params.idx2tag\n\n true_tags = []\n pred_tags = []\n\n # a running average object for loss\n loss_avg = utils.RunningAverage()\n\n one_epoch = trange(params.eval_steps)\n for step, batch in zip(one_epoch, data_iterator):\n # fetch the next evaluation batch\n input_ids, label_ids, attention_mask, sentence_ids, label_mask = batch\n\n with torch.no_grad():\n loss, logits, labels = model(input_ids, token_type_ids=sentence_ids,\n attention_mask=attention_mask, labels=label_ids, label_masks=label_mask)\n if params.n_gpu > 1 and params.multi_gpu:\n loss = loss.mean()\n loss_avg.update(loss.item())\n\n batch_output = torch.argmax(F.log_softmax(logits, dim=2), dim=2)\n batch_output = batch_output.detach().cpu().numpy()\n batch_tags = labels.to('cpu').numpy()\n\n batch_true_tags = [\n [idx2tag.get(idx) for idx in indices[np.where(indices != -1)]]\n for indices in batch_tags]\n batch_pred_tags = [\n [idx2tag.get(idx) for idx in indices[np.where(batch_tags[i] != -1)]]\n for i, indices in enumerate(batch_output)]\n\n true_tags.extend(batch_true_tags)\n pred_tags.extend(batch_pred_tags)\n\n one_epoch.set_postfix(eval_loss='{:05.3f}'.format(loss_avg()))\n\n assert len(pred_tags) == len(true_tags)\n\n # logging loss, f1 and report\n metrics = {}\n f1 = f1_score(true_tags, pred_tags)\n metrics['loss'] = loss_avg()\n metrics['f1'] = f1\n metrics_str = \"; \".join(\"{}: {:05.2f}\".format(k, v)\n for k, v in metrics.items())\n logging.info(\"- {} metrics: \".format(mark) + metrics_str)\n\n if verbose:\n report = classification_report(true_tags, pred_tags)\n logging.info(report)\n return metrics\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n\n tagger_model_dir = 'experiments/' + args.dataset\n # Load the parameters from json file\n json_path = os.path.join(tagger_model_dir, 'params.json')\n assert os.path.isfile(\n json_path), \"No json configuration file found at {}\".format(json_path)\n params = utils.Params(json_path)\n\n # Use GPUs if available\n params.device = torch.device(\n 'cuda' if torch.cuda.is_available() else 'cpu')\n params.n_gpu = torch.cuda.device_count()\n params.multi_gpu = args.multi_gpu\n\n # Set the random seed for reproducible experiments\n random.seed(args.seed)\n torch.manual_seed(args.seed)\n if params.n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed) # set random seed for all GPUs\n params.seed = args.seed\n\n # Set the logger\n utils.set_logger(os.path.join(tagger_model_dir, 'evaluate.log'))\n\n # Create the input data pipeline\n logging.info(\"Loading the dataset...\")\n\n # Initialize the DataLoader\n data_dir = 'data/' + args.dataset\n if args.dataset in [\"conll\"]:\n bert_model_dir = 'pretrained_bert_models/bert-base-cased/'\n elif args.dataset in [\"msra\"]:\n bert_model_dir = 'pretrained_bert_models/bert-base-chinese/'\n\n data_loader = DataLoader(data_dir, bert_model_dir, params)\n\n # Load data\n test_data = data_loader.load_data('test')\n\n # Specify the test set size\n params.test_size = test_data.__len__()\n params.eval_steps = params.test_size // params.batch_size\n test_data_iterator = data_loader.data_iterator(test_data, shuffle=False)\n\n logging.info(\"- done.\")\n\n # Define the model\n # config_path = os.path.join(args.bert_model_dir, 'config.json')\n # config = BertConfig.from_json_file(config_path)\n # model = BertForTokenClassification(config, num_labels=len(params.tag2idx))\n # model = BertForSequenceTagging(config)\n model = BertForSequenceTagging.from_pretrained(tagger_model_dir)\n model.to(params.device)\n\n if args.fp16:\n model.half()\n if params.n_gpu > 1 and args.multi_gpu:\n model = torch.nn.DataParallel(model)\n\n logging.info(\"Starting evaluation...\")\n test_metrics = evaluate(model, test_data_iterator,\n params, mark='Test', verbose=True)\n"
] | [
[
"torch.nn.functional.log_softmax",
"torch.manual_seed",
"torch.nn.DataParallel",
"torch.no_grad",
"torch.cuda.manual_seed_all",
"torch.cuda.is_available",
"torch.cuda.device_count",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
julian-carpenter/beta-TCVAE | [
"572d9e31993ccce47ef7a072a49c027c9c944e5e"
] | [
"nn/visualize.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom matplotlib.cm import tab10\nfrom matplotlib.axes._axes import _log as matplotlib_axes_logger\n\nmatplotlib_axes_logger.setLevel('ERROR')\n\n\ndef image_grid(img, grid_size=25, ttl=None):\n \"\"\"Return a grid of the images as a matplotlib figure.\"\"\"\n # Create a figure to contain the plot.\n sns.set_style(\"white\")\n if np.floor(grid_size).astype(type(grid_size)) != grid_size:\n return None\n if not isinstance(img, (np.ndarray, np.generic)):\n img = np.array(img)\n f = plt.figure(figsize=(10, 10), dpi=300)\n for i in range(grid_size):\n # Start next subplot.\n ax = plt.subplot(int(np.sqrt(grid_size)), int(np.sqrt(grid_size)), i + 1)\n ax.imshow(np.squeeze(img[i]), cmap='turbo')\n ax.set_xticks([])\n ax.set_yticks([])\n ax.grid(False)\n ax.axis(\"off\")\n # f.set_size_inches(np.array(f.get_size_inches()) * grid_size)\n if ttl is not None:\n f.suptitle(ttl)\n plt.box(False)\n f.tight_layout(pad=0., h_pad=0., w_pad=0.)\n return f\n\n\ndef clean_img(img):\n \"\"\"Return a clean figure.\"\"\"\n f, ax = plt.subplots(1, 2, figsize=(8, 4))\n ax[0].imshow(np.squeeze(img[0]), cmap='turbo')\n ax[1].imshow(np.squeeze(img[1]), cmap='turbo')\n for a in ax:\n a.set_xticks([])\n a.set_yticks([])\n a.set_xticklabels([])\n a.set_yticklabels([])\n a.grid(False)\n ax[0].set_title(\"Real\")\n ax[1].set_title(\"Generated\")\n f.tight_layout()\n return f\n\n\ndef plot_cluster(emb, labels, ttl=\"\", txt=None, s=40, legend_labels=None):\n clustered_z = (labels >= 0)\n\n sns.set_style(\"whitegrid\")\n fig, ax = plt.subplots(1, 1, figsize=(20, 20))\n ax.scatter(emb[~clustered_z, 0],\n emb[~clustered_z, 1],\n c=\"#cccccc\",\n s=s,\n alpha=0.5)\n for ll in np.unique(labels):\n e = emb[labels == ll]\n c_i = clustered_z[labels == ll]\n scatter = ax.scatter(e[c_i, 0],\n e[c_i, 1],\n c=np.array(tab10.colors[:6])[int(ll)],\n s=s,\n label=legend_labels[int(ll)])\n\n if txt is not None:\n ax.text(0.15, 0.15, txt, horizontalalignment=\"left\", verticalalignment=\"center\",\n transform=ax.transAxes, bbox=dict(facecolor=\"w\", alpha=0.95, edgecolor=\"w\"))\n # ax.text(4, 1, txt, ha='left', rotation=0, wrap=True)\n\n legend1 = ax.legend(handles=scatter.legend_elements()[0], labels=legend_labels,\n loc=\"lower left\", title=\"Classes\",\n fontsize=\"xx-large\", markerscale=3)\n ax.add_artist(legend1)\n ax.set_title(\"{}\".format(ttl))\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n # ax.legend()\n fig.tight_layout()\n return fig\n\n\ndef bar_plot(x_bars, y_bars, labels, ticks, names):\n f, ax = plt.subplots(1, 1, figsize=(12, 6))\n with sns.axes_style(\"whitegrid\"):\n ax.bar(x_bars[0], y_bars[0], align=\"center\", width=.2, label=labels[0])\n ax.bar(x_bars[1], y_bars[1], align=\"center\", width=.2, label=labels[1])\n ax.set_xticks(ticks)\n ax.set_xticklabels(names)\n f.legend()\n f.tight_layout()\n return f\n\n\nturbo_colormap_data = np.array(\n [[0.18995, 0.07176, 0.23217],\n [0.19483, 0.08339, 0.26149],\n [0.19956, 0.09498, 0.29024],\n [0.20415, 0.10652, 0.31844],\n [0.20860, 0.11802, 0.34607],\n [0.21291, 0.12947, 0.37314],\n [0.21708, 0.14087, 0.39964],\n [0.22111, 0.15223, 0.42558],\n [0.22500, 0.16354, 0.45096],\n [0.22875, 0.17481, 0.47578],\n [0.23236, 0.18603, 0.50004],\n [0.23582, 0.19720, 0.52373],\n [0.23915, 0.20833, 0.54686],\n [0.24234, 0.21941, 0.56942],\n [0.24539, 0.23044, 0.59142],\n [0.24830, 0.24143, 0.61286],\n [0.25107, 0.25237, 0.63374],\n [0.25369, 0.26327, 0.65406],\n [0.25618, 0.27412, 0.67381],\n [0.25853, 0.28492, 0.69300],\n [0.26074, 0.29568, 0.71162],\n [0.26280, 0.30639, 0.72968],\n [0.26473, 0.31706, 0.74718],\n [0.26652, 0.32768, 0.76412],\n [0.26816, 0.33825, 0.78050],\n [0.26967, 0.34878, 0.79631],\n [0.27103, 0.35926, 0.81156],\n [0.27226, 0.36970, 0.82624],\n [0.27334, 0.38008, 0.84037],\n [0.27429, 0.39043, 0.85393],\n [0.27509, 0.40072, 0.86692],\n [0.27576, 0.41097, 0.87936],\n [0.27628, 0.42118, 0.89123],\n [0.27667, 0.43134, 0.90254],\n [0.27691, 0.44145, 0.91328],\n [0.27701, 0.45152, 0.92347],\n [0.27698, 0.46153, 0.93309],\n [0.27680, 0.47151, 0.94214],\n [0.27648, 0.48144, 0.95064],\n [0.27603, 0.49132, 0.95857],\n [0.27543, 0.50115, 0.96594],\n [0.27469, 0.51094, 0.97275],\n [0.27381, 0.52069, 0.97899],\n [0.27273, 0.53040, 0.98461],\n [0.27106, 0.54015, 0.98930],\n [0.26878, 0.54995, 0.99303],\n [0.26592, 0.55979, 0.99583],\n [0.26252, 0.56967, 0.99773],\n [0.25862, 0.57958, 0.99876],\n [0.25425, 0.58950, 0.99896],\n [0.24946, 0.59943, 0.99835],\n [0.24427, 0.60937, 0.99697],\n [0.23874, 0.61931, 0.99485],\n [0.23288, 0.62923, 0.99202],\n [0.22676, 0.63913, 0.98851],\n [0.22039, 0.64901, 0.98436],\n [0.21382, 0.65886, 0.97959],\n [0.20708, 0.66866, 0.97423],\n [0.20021, 0.67842, 0.96833],\n [0.19326, 0.68812, 0.96190],\n [0.18625, 0.69775, 0.95498],\n [0.17923, 0.70732, 0.94761],\n [0.17223, 0.71680, 0.93981],\n [0.16529, 0.72620, 0.93161],\n [0.15844, 0.73551, 0.92305],\n [0.15173, 0.74472, 0.91416],\n [0.14519, 0.75381, 0.90496],\n [0.13886, 0.76279, 0.89550],\n [0.13278, 0.77165, 0.88580],\n [0.12698, 0.78037, 0.87590],\n [0.12151, 0.78896, 0.86581],\n [0.11639, 0.79740, 0.85559],\n [0.11167, 0.80569, 0.84525],\n [0.10738, 0.81381, 0.83484],\n [0.10357, 0.82177, 0.82437],\n [0.10026, 0.82955, 0.81389],\n [0.09750, 0.83714, 0.80342],\n [0.09532, 0.84455, 0.79299],\n [0.09377, 0.85175, 0.78264],\n [0.09287, 0.85875, 0.77240],\n [0.09267, 0.86554, 0.76230],\n [0.09320, 0.87211, 0.75237],\n [0.09451, 0.87844, 0.74265],\n [0.09662, 0.88454, 0.73316],\n [0.09958, 0.89040, 0.72393],\n [0.10342, 0.89600, 0.71500],\n [0.10815, 0.90142, 0.70599],\n [0.11374, 0.90673, 0.69651],\n [0.12014, 0.91193, 0.68660],\n [0.12733, 0.91701, 0.67627],\n [0.13526, 0.92197, 0.66556],\n [0.14391, 0.92680, 0.65448],\n [0.15323, 0.93151, 0.64308],\n [0.16319, 0.93609, 0.63137],\n [0.17377, 0.94053, 0.61938],\n [0.18491, 0.94484, 0.60713],\n [0.19659, 0.94901, 0.59466],\n [0.20877, 0.95304, 0.58199],\n [0.22142, 0.95692, 0.56914],\n [0.23449, 0.96065, 0.55614],\n [0.24797, 0.96423, 0.54303],\n [0.26180, 0.96765, 0.52981],\n [0.27597, 0.97092, 0.51653],\n [0.29042, 0.97403, 0.50321],\n [0.30513, 0.97697, 0.48987],\n [0.32006, 0.97974, 0.47654],\n [0.33517, 0.98234, 0.46325],\n [0.35043, 0.98477, 0.45002],\n [0.36581, 0.98702, 0.43688],\n [0.38127, 0.98909, 0.42386],\n [0.39678, 0.99098, 0.41098],\n [0.41229, 0.99268, 0.39826],\n [0.42778, 0.99419, 0.38575],\n [0.44321, 0.99551, 0.37345],\n [0.45854, 0.99663, 0.36140],\n [0.47375, 0.99755, 0.34963],\n [0.48879, 0.99828, 0.33816],\n [0.50362, 0.99879, 0.32701],\n [0.51822, 0.99910, 0.31622],\n [0.53255, 0.99919, 0.30581],\n [0.54658, 0.99907, 0.29581],\n [0.56026, 0.99873, 0.28623],\n [0.57357, 0.99817, 0.27712],\n [0.58646, 0.99739, 0.26849],\n [0.59891, 0.99638, 0.26038],\n [0.61088, 0.99514, 0.25280],\n [0.62233, 0.99366, 0.24579],\n [0.63323, 0.99195, 0.23937],\n [0.64362, 0.98999, 0.23356],\n [0.65394, 0.98775, 0.22835],\n [0.66428, 0.98524, 0.22370],\n [0.67462, 0.98246, 0.21960],\n [0.68494, 0.97941, 0.21602],\n [0.69525, 0.97610, 0.21294],\n [0.70553, 0.97255, 0.21032],\n [0.71577, 0.96875, 0.20815],\n [0.72596, 0.96470, 0.20640],\n [0.73610, 0.96043, 0.20504],\n [0.74617, 0.95593, 0.20406],\n [0.75617, 0.95121, 0.20343],\n [0.76608, 0.94627, 0.20311],\n [0.77591, 0.94113, 0.20310],\n [0.78563, 0.93579, 0.20336],\n [0.79524, 0.93025, 0.20386],\n [0.80473, 0.92452, 0.20459],\n [0.81410, 0.91861, 0.20552],\n [0.82333, 0.91253, 0.20663],\n [0.83241, 0.90627, 0.20788],\n [0.84133, 0.89986, 0.20926],\n [0.85010, 0.89328, 0.21074],\n [0.85868, 0.88655, 0.21230],\n [0.86709, 0.87968, 0.21391],\n [0.87530, 0.87267, 0.21555],\n [0.88331, 0.86553, 0.21719],\n [0.89112, 0.85826, 0.21880],\n [0.89870, 0.85087, 0.22038],\n [0.90605, 0.84337, 0.22188],\n [0.91317, 0.83576, 0.22328],\n [0.92004, 0.82806, 0.22456],\n [0.92666, 0.82025, 0.22570],\n [0.93301, 0.81236, 0.22667],\n [0.93909, 0.80439, 0.22744],\n [0.94489, 0.79634, 0.22800],\n [0.95039, 0.78823, 0.22831],\n [0.95560, 0.78005, 0.22836],\n [0.96049, 0.77181, 0.22811],\n [0.96507, 0.76352, 0.22754],\n [0.96931, 0.75519, 0.22663],\n [0.97323, 0.74682, 0.22536],\n [0.97679, 0.73842, 0.22369],\n [0.98000, 0.73000, 0.22161],\n [0.98289, 0.72140, 0.21918],\n [0.98549, 0.71250, 0.21650],\n [0.98781, 0.70330, 0.21358],\n [0.98986, 0.69382, 0.21043],\n [0.99163, 0.68408, 0.20706],\n [0.99314, 0.67408, 0.20348],\n [0.99438, 0.66386, 0.19971],\n [0.99535, 0.65341, 0.19577],\n [0.99607, 0.64277, 0.19165],\n [0.99654, 0.63193, 0.18738],\n [0.99675, 0.62093, 0.18297],\n [0.99672, 0.60977, 0.17842],\n [0.99644, 0.59846, 0.17376],\n [0.99593, 0.58703, 0.16899],\n [0.99517, 0.57549, 0.16412],\n [0.99419, 0.56386, 0.15918],\n [0.99297, 0.55214, 0.15417],\n [0.99153, 0.54036, 0.14910],\n [0.98987, 0.52854, 0.14398],\n [0.98799, 0.51667, 0.13883],\n [0.98590, 0.50479, 0.13367],\n [0.98360, 0.49291, 0.12849],\n [0.98108, 0.48104, 0.12332],\n [0.97837, 0.46920, 0.11817],\n [0.97545, 0.45740, 0.11305],\n [0.97234, 0.44565, 0.10797],\n [0.96904, 0.43399, 0.10294],\n [0.96555, 0.42241, 0.09798],\n [0.96187, 0.41093, 0.09310],\n [0.95801, 0.39958, 0.08831],\n [0.95398, 0.38836, 0.08362],\n [0.94977, 0.37729, 0.07905],\n [0.94538, 0.36638, 0.07461],\n [0.94084, 0.35566, 0.07031],\n [0.93612, 0.34513, 0.06616],\n [0.93125, 0.33482, 0.06218],\n [0.92623, 0.32473, 0.05837],\n [0.92105, 0.31489, 0.05475],\n [0.91572, 0.30530, 0.05134],\n [0.91024, 0.29599, 0.04814],\n [0.90463, 0.28696, 0.04516],\n [0.89888, 0.27824, 0.04243],\n [0.89298, 0.26981, 0.03993],\n [0.88691, 0.26152, 0.03753],\n [0.88066, 0.25334, 0.03521],\n [0.87422, 0.24526, 0.03297],\n [0.86760, 0.23730, 0.03082],\n [0.86079, 0.22945, 0.02875],\n [0.85380, 0.22170, 0.02677],\n [0.84662, 0.21407, 0.02487],\n [0.83926, 0.20654, 0.02305],\n [0.83172, 0.19912, 0.02131],\n [0.82399, 0.19182, 0.01966],\n [0.81608, 0.18462, 0.01809],\n [0.80799, 0.17753, 0.01660],\n [0.79971, 0.17055, 0.01520],\n [0.79125, 0.16368, 0.01387],\n [0.78260, 0.15693, 0.01264],\n [0.77377, 0.15028, 0.01148],\n [0.76476, 0.14374, 0.01041],\n [0.75556, 0.13731, 0.00942],\n [0.74617, 0.13098, 0.00851],\n [0.73661, 0.12477, 0.00769],\n [0.72686, 0.11867, 0.00695],\n [0.71692, 0.11268, 0.00629],\n [0.70680, 0.10680, 0.00571],\n [0.69650, 0.10102, 0.00522],\n [0.68602, 0.09536, 0.00481],\n [0.67535, 0.08980, 0.00449],\n [0.66449, 0.08436, 0.00424],\n [0.65345, 0.07902, 0.00408],\n [0.64223, 0.07380, 0.00401],\n [0.63082, 0.06868, 0.00401],\n [0.61923, 0.06367, 0.00410],\n [0.60746, 0.05878, 0.00427],\n [0.59550, 0.05399, 0.00453],\n [0.58336, 0.04931, 0.00486],\n [0.57103, 0.04474, 0.00529],\n [0.55852, 0.04028, 0.00579],\n [0.54583, 0.03593, 0.00638],\n [0.53295, 0.03169, 0.00705],\n [0.51989, 0.02756, 0.00780],\n [0.50664, 0.02354, 0.00863],\n [0.49321, 0.01963, 0.00955],\n [0.47960, 0.01583, 0.01055]])\n\n\ndef RGBToPyCmap(rgbdata):\n nsteps = rgbdata.shape[0]\n stepaxis = np.linspace(0, 1, nsteps)\n\n rdata = []\n gdata = []\n bdata = []\n for istep in range(nsteps):\n r = rgbdata[istep, 0]\n g = rgbdata[istep, 1]\n b = rgbdata[istep, 2]\n rdata.append((stepaxis[istep], r, r))\n gdata.append((stepaxis[istep], g, g))\n bdata.append((stepaxis[istep], b, b))\n\n mpl_data = {'red': rdata,\n 'green': gdata,\n 'blue': bdata}\n\n return mpl_data\n\n\nmpl_data = RGBToPyCmap(turbo_colormap_data)\nplt.register_cmap(name='turbo', data=mpl_data, lut=turbo_colormap_data.shape[0])\n"
] | [
[
"matplotlib.pyplot.box",
"numpy.sqrt",
"numpy.linspace",
"numpy.unique",
"numpy.squeeze",
"matplotlib.axes._axes._log.setLevel",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.register_cmap",
"numpy.floor",
"numpy.array",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
closerbibi/bv_detection | [
"9bf9e75e26587ddeb9a3d39415408f7b7e5ffd7d"
] | [
"lib/datasets/imdb.py"
] | [
"# --------------------------------------------------------\n# Fast R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick\n# --------------------------------------------------------\n\nimport os\nimport os.path as osp\nimport PIL\nfrom utils.cython_bbox import bbox_overlaps\nimport numpy as np\nimport scipy.sparse\nfrom fast_rcnn.config import cfg\nimport scipy.io as sio\nimport pdb\n\nclass imdb(object):\n \"\"\"Image database.\"\"\"\n\n def __init__(self, name):\n self._name = name\n self._num_classes = 0\n self._classes = []\n self._image_index = []\n self._obj_proposer = 'selective_search'\n self._roidb = None\n self._roidb_handler = self.default_roidb\n # Use this dict for storing dataset specific config options\n self.config = {}\n\n @property\n def name(self):\n return self._name\n\n @property\n def num_classes(self):\n return len(self._classes)\n\n @property\n def classes(self):\n return self._classes\n\n @property\n def image_index(self):\n return self._image_index\n\n @property\n def roidb_handler(self):\n return self._roidb_handler\n\n @roidb_handler.setter\n def roidb_handler(self, val):\n self._roidb_handler = val\n\n def set_proposal_method(self, method):\n method = eval('self.' + method + '_roidb')\n self.roidb_handler = method\n\n @property\n def roidb(self):\n # A roidb is a list of dictionaries, each with the following keys:\n # boxes\n # gt_overlaps\n # gt_classes\n # flipped\n if self._roidb is not None:\n return self._roidb\n self._roidb = self.roidb_handler()\n return self._roidb\n\n @property\n def cache_path(self):\n cache_path = osp.abspath(osp.join(cfg.DATA_DIR, 'cache'))\n if not os.path.exists(cache_path):\n os.makedirs(cache_path)\n return cache_path\n\n @property\n def num_images(self):\n return len(self.image_index)\n\n def image_path_at(self, i):\n raise NotImplementedError\n\n def default_roidb(self):\n raise NotImplementedError\n\n def evaluate_detections(self, all_boxes, output_dir=None):\n \"\"\"\n all_boxes is a list of length number-of-classes.\n Each list element is a list of length number-of-images.\n Each of those list elements is either an empty list []\n or a numpy array of detection.\n\n all_boxes[class][image] = [] or np.array of shape #dets x 5\n \"\"\"\n raise NotImplementedError\n\n def _get_widths(self):\n #return [PIL.Image.open(self.image_path_at(i)).size[0]\n # for i in xrange(self.num_images)]\n return [sio.loadmat(self.image_path_at(i))['grid'].shape[1]\n for i in xrange(self.num_images)]\n\n def append_flipped_images(self):\n num_images = self.num_images\n widths = self._get_widths()\n for i in xrange(num_images):\n boxes = self.roidb[i]['boxes'].copy()\n oldx1 = boxes[:, 0].copy() # 0: xmin\n oldx2 = boxes[:, 2].copy() # 2: xmax\n boxes[:, 0] = widths[i] - oldx2 - 1 # width - max: suppose to be small\n boxes[:, 2] = widths[i] - oldx1 - 1 # width - min: supposed to be big\n assert (boxes[:, 2] >= boxes[:, 0]).all()\n entry = {'boxes' : boxes,\n 'gt_overlaps' : self.roidb[i]['gt_overlaps'],\n 'gt_classes' : self.roidb[i]['gt_classes'],\n 'flipped' : True}\n self.roidb.append(entry)\n self._image_index = self._image_index * 2\n\n def evaluate_recall(self, candidate_boxes=None, thresholds=None,\n area='all', limit=None):\n \"\"\"Evaluate detection proposal recall metrics.\n\n Returns:\n results: dictionary of results with keys\n 'ar': average recall\n 'recalls': vector recalls at each IoU overlap threshold\n 'thresholds': vector of IoU overlap thresholds\n 'gt_overlaps': vector of all ground-truth overlaps\n \"\"\"\n # Record max overlap value for each gt box\n # Return vector of overlap values\n areas = { 'all': 0, 'small': 1, 'medium': 2, 'large': 3,\n '96-128': 4, '128-256': 5, '256-512': 6, '512-inf': 7}\n area_ranges = [ [0**2, 1e5**2], # all\n [0**2, 32**2], # small\n [32**2, 96**2], # medium\n [96**2, 1e5**2], # large\n [96**2, 128**2], # 96-128\n [128**2, 256**2], # 128-256\n [256**2, 512**2], # 256-512\n [512**2, 1e5**2], # 512-inf\n ]\n assert areas.has_key(area), 'unknown area range: {}'.format(area)\n area_range = area_ranges[areas[area]]\n gt_overlaps = np.zeros(0)\n num_pos = 0\n for i in xrange(self.num_images):\n # Checking for max_overlaps == 1 avoids including crowd annotations\n # (...pretty hacking :/)\n max_gt_overlaps = self.roidb[i]['gt_overlaps'].toarray().max(axis=1)\n gt_inds = np.where((self.roidb[i]['gt_classes'] > 0) &\n (max_gt_overlaps == 1))[0]\n gt_boxes = self.roidb[i]['boxes'][gt_inds, :]\n gt_areas = self.roidb[i]['seg_areas'][gt_inds]\n valid_gt_inds = np.where((gt_areas >= area_range[0]) &\n (gt_areas <= area_range[1]))[0]\n gt_boxes = gt_boxes[valid_gt_inds, :]\n num_pos += len(valid_gt_inds)\n\n if candidate_boxes is None:\n # If candidate_boxes is not supplied, the default is to use the\n # non-ground-truth boxes from this roidb\n non_gt_inds = np.where(self.roidb[i]['gt_classes'] == 0)[0]\n boxes = self.roidb[i]['boxes'][non_gt_inds, :]\n else:\n boxes = candidate_boxes[i]\n if boxes.shape[0] == 0:\n continue\n if limit is not None and boxes.shape[0] > limit:\n boxes = boxes[:limit, :]\n\n overlaps = bbox_overlaps(boxes.astype(np.float),\n gt_boxes.astype(np.float))\n\n _gt_overlaps = np.zeros((gt_boxes.shape[0]))\n for j in xrange(gt_boxes.shape[0]):\n # find which proposal box maximally covers each gt box\n argmax_overlaps = overlaps.argmax(axis=0)\n # and get the iou amount of coverage for each gt box\n max_overlaps = overlaps.max(axis=0)\n # find which gt box is 'best' covered (i.e. 'best' = most iou)\n gt_ind = max_overlaps.argmax()\n gt_ovr = max_overlaps.max()\n assert(gt_ovr >= 0)\n # find the proposal box that covers the best covered gt box\n box_ind = argmax_overlaps[gt_ind]\n # record the iou coverage of this gt box\n _gt_overlaps[j] = overlaps[box_ind, gt_ind]\n assert(_gt_overlaps[j] == gt_ovr)\n # mark the proposal box and the gt box as used\n overlaps[box_ind, :] = -1\n overlaps[:, gt_ind] = -1\n # append recorded iou coverage level\n gt_overlaps = np.hstack((gt_overlaps, _gt_overlaps))\n\n gt_overlaps = np.sort(gt_overlaps)\n if thresholds is None:\n step = 0.05\n thresholds = np.arange(0.5, 0.95 + 1e-5, step)\n recalls = np.zeros_like(thresholds)\n # compute recall for each iou threshold\n for i, t in enumerate(thresholds):\n recalls[i] = (gt_overlaps >= t).sum() / float(num_pos)\n # ar = 2 * np.trapz(recalls, thresholds)\n ar = recalls.mean()\n return {'ar': ar, 'recalls': recalls, 'thresholds': thresholds,\n 'gt_overlaps': gt_overlaps}\n\n def create_roidb_from_box_list(self, box_list, gt_roidb):\n assert len(box_list) == self.num_images, \\\n 'Number of boxes must match number of ground-truth images'\n roidb = []\n for i in xrange(self.num_images):\n boxes = box_list[i]\n num_boxes = boxes.shape[0]\n overlaps = np.zeros((num_boxes, self.num_classes), dtype=np.float32)\n\n if gt_roidb is not None and gt_roidb[i]['boxes'].size > 0:\n gt_boxes = gt_roidb[i]['boxes']\n gt_classes = gt_roidb[i]['gt_classes']\n gt_overlaps = bbox_overlaps(boxes.astype(np.float),\n gt_boxes.astype(np.float))\n argmaxes = gt_overlaps.argmax(axis=1)\n maxes = gt_overlaps.max(axis=1)\n I = np.where(maxes > 0)[0]\n overlaps[I, gt_classes[argmaxes[I]]] = maxes[I]\n\n overlaps = scipy.sparse.csr_matrix(overlaps)\n roidb.append({\n 'boxes' : boxes,\n 'gt_classes' : np.zeros((num_boxes,), dtype=np.int32),\n 'gt_overlaps' : overlaps,\n 'flipped' : False,\n 'seg_areas' : np.zeros((num_boxes,), dtype=np.float32),\n })\n return roidb\n\n @staticmethod\n def merge_roidbs(a, b):\n assert len(a) == len(b)\n for i in xrange(len(a)):\n a[i]['boxes'] = np.vstack((a[i]['boxes'], b[i]['boxes']))\n a[i]['gt_classes'] = np.hstack((a[i]['gt_classes'],\n b[i]['gt_classes']))\n a[i]['gt_overlaps'] = scipy.sparse.vstack([a[i]['gt_overlaps'],\n b[i]['gt_overlaps']])\n a[i]['seg_areas'] = np.hstack((a[i]['seg_areas'],\n b[i]['seg_areas']))\n return a\n\n def competition_mode(self, on):\n \"\"\"Turn competition mode on or off.\"\"\"\n pass\n"
] | [
[
"numpy.hstack",
"numpy.arange",
"numpy.sort",
"numpy.zeros_like",
"numpy.zeros",
"numpy.where",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TimotheeMickus/onmt-selectrans | [
"ae73ba90161d1a1e663e05fe750734849b373713"
] | [
"onmt/modules/multi_headed_attn.py"
] | [
"\"\"\" Multi-Head Attention module \"\"\"\nimport math\nimport torch\nimport torch.nn as nn\n\nfrom onmt.utils.misc import generate_relative_positions_matrix,\\\n relative_matmul\n# from onmt.utils.misc import aeq\n\n\nclass MultiHeadedAttention(nn.Module):\n \"\"\"Multi-Head Attention module from \"Attention is All You Need\"\n :cite:`DBLP:journals/corr/VaswaniSPUJGKP17`.\n\n Similar to standard `dot` attention but uses\n multiple attention distributions simulataneously\n to select relevant items.\n\n .. mermaid::\n\n graph BT\n A[key]\n B[value]\n C[query]\n O[output]\n subgraph Attn\n D[Attn 1]\n E[Attn 2]\n F[Attn N]\n end\n A --> D\n C --> D\n A --> E\n C --> E\n A --> F\n C --> F\n D --> O\n E --> O\n F --> O\n B --> O\n\n Also includes several additional tricks.\n\n Args:\n head_count (int): number of parallel heads\n model_dim (int): the dimension of keys/values/queries,\n must be divisible by head_count\n dropout (float): dropout parameter\n \"\"\"\n\n def __init__(self, head_count, model_dim, dropout=0.1,\n max_relative_positions=0, use_sigmoid=False):\n assert model_dim % head_count == 0\n self.dim_per_head = model_dim // head_count\n self.model_dim = model_dim\n\n super(MultiHeadedAttention, self).__init__()\n self.head_count = head_count\n\n self.linear_keys = nn.Linear(model_dim,\n head_count * self.dim_per_head)\n self.linear_values = nn.Linear(model_dim,\n head_count * self.dim_per_head)\n self.linear_query = nn.Linear(model_dim,\n head_count * self.dim_per_head)\n self.softmax = nn.Softmax(dim=-1) if not use_sigmoid else nn.Sigmoid()\n self.dropout = nn.Dropout(dropout)\n self.final_linear = nn.Linear(model_dim, model_dim)\n\n self.max_relative_positions = max_relative_positions\n\n if max_relative_positions > 0:\n vocab_size = max_relative_positions * 2 + 1\n self.relative_positions_embeddings = nn.Embedding(\n vocab_size, self.dim_per_head)\n\n def forward(self, key, value, query, mask=None,\n layer_cache=None, type=None):\n \"\"\"\n Compute the context vector and the attention vectors.\n\n Args:\n key (FloatTensor): set of `key_len`\n key vectors ``(batch, key_len, dim)``\n value (FloatTensor): set of `key_len`\n value vectors ``(batch, key_len, dim)``\n query (FloatTensor): set of `query_len`\n query vectors ``(batch, query_len, dim)``\n mask: binary mask indicating which keys have\n non-zero attention ``(batch, query_len, key_len)``\n Returns:\n (FloatTensor, FloatTensor):\n\n * output context vectors ``(batch, query_len, dim)``\n * one of the attention vectors ``(batch, query_len, key_len)``\n \"\"\"\n\n # CHECKS\n # batch, k_len, d = key.size()\n # batch_, k_len_, d_ = value.size()\n # aeq(batch, batch_)\n # aeq(k_len, k_len_)\n # aeq(d, d_)\n # batch_, q_len, d_ = query.size()\n # aeq(batch, batch_)\n # aeq(d, d_)\n # aeq(self.model_dim % 8, 0)\n # if mask is not None:\n # batch_, q_len_, k_len_ = mask.size()\n # aeq(batch_, batch)\n # aeq(k_len_, k_len)\n # aeq(q_len_ == q_len)\n # END CHECKS\n\n batch_size = key.size(0)\n dim_per_head = self.dim_per_head\n head_count = self.head_count\n key_len = key.size(1)\n query_len = query.size(1)\n device = key.device\n\n def shape(x):\n \"\"\"Projection.\"\"\"\n return x.view(batch_size, -1, head_count, dim_per_head) \\\n .transpose(1, 2)\n\n def unshape(x):\n \"\"\"Compute context.\"\"\"\n return x.transpose(1, 2).contiguous() \\\n .view(batch_size, -1, head_count * dim_per_head)\n\n # 1) Project key, value, and query.\n if layer_cache is not None:\n if type == \"self\":\n query, key, value = self.linear_query(query),\\\n self.linear_keys(query),\\\n self.linear_values(query)\n key = shape(key)\n value = shape(value)\n if layer_cache[\"self_keys\"] is not None:\n key = torch.cat(\n (layer_cache[\"self_keys\"].to(device), key),\n dim=2)\n if layer_cache[\"self_values\"] is not None:\n value = torch.cat(\n (layer_cache[\"self_values\"].to(device), value),\n dim=2)\n layer_cache[\"self_keys\"] = key\n layer_cache[\"self_values\"] = value\n elif type == \"context\":\n query = self.linear_query(query)\n if layer_cache[\"memory_keys\"] is None:\n key, value = self.linear_keys(key),\\\n self.linear_values(value)\n key = shape(key)\n value = shape(value)\n else:\n key, value = layer_cache[\"memory_keys\"],\\\n layer_cache[\"memory_values\"]\n layer_cache[\"memory_keys\"] = key\n layer_cache[\"memory_values\"] = value\n else:\n key = self.linear_keys(key)\n value = self.linear_values(value)\n query = self.linear_query(query)\n key = shape(key)\n value = shape(value)\n\n if self.max_relative_positions > 0 and type == \"self\":\n key_len = key.size(2)\n # 1 or key_len x key_len\n relative_positions_matrix = generate_relative_positions_matrix(\n key_len, self.max_relative_positions,\n cache=True if layer_cache is not None else False)\n # 1 or key_len x key_len x dim_per_head\n relations_keys = self.relative_positions_embeddings(\n relative_positions_matrix.to(device))\n # 1 or key_len x key_len x dim_per_head\n relations_values = self.relative_positions_embeddings(\n relative_positions_matrix.to(device))\n\n query = shape(query)\n\n key_len = key.size(2)\n query_len = query.size(2)\n\n # 2) Calculate and scale scores.\n query = query / math.sqrt(dim_per_head)\n # batch x num_heads x query_len x key_len\n query_key = torch.matmul(query, key.transpose(2, 3))\n\n if self.max_relative_positions > 0 and type == \"self\":\n scores = query_key + relative_matmul(query, relations_keys, True)\n else:\n scores = query_key\n scores = scores.float()\n\n if mask is not None:\n mask = mask.unsqueeze(1) # [B, 1, 1, T_values]\n scores = scores.masked_fill(mask, -1e18)\n\n # 3) Apply attention dropout and compute context vectors.\n attn = self.softmax(scores).to(query.dtype)\n drop_attn = self.dropout(attn)\n\n context_original = torch.matmul(drop_attn, value)\n\n if self.max_relative_positions > 0 and type == \"self\":\n context = unshape(context_original\n + relative_matmul(drop_attn,\n relations_values,\n False))\n else:\n context = unshape(context_original)\n\n output = self.final_linear(context)\n # CHECK\n # batch_, q_len_, d_ = output.size()\n # aeq(q_len, q_len_)\n # aeq(batch, batch_)\n # aeq(d, d_)\n\n # Return one attn\n top_attn = attn \\\n .view(batch_size, head_count,\n query_len, key_len)[:, 0, :, :] \\\n .contiguous()\n\n return output, top_attn\n"
] | [
[
"torch.nn.Softmax",
"torch.nn.Dropout",
"torch.nn.Embedding",
"torch.nn.Sigmoid",
"torch.nn.Linear",
"torch.matmul"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Ascend/mindspore | [
"1509d3f848e6685660194d9f58646fc73ae0f0f0"
] | [
"tests/st/ops/gpu/test_cholesky_op.py"
] | [
"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nimport numpy as np\nimport scipy as scp\nimport pytest\nimport mindspore.context as context\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore.ops import operations as P\nfrom mindspore.common import dtype as mstype\nfrom mindspore.ops import PrimitiveWithInfer\nfrom mindspore.ops import prim_attr_register\nfrom mindspore._checkparam import Validator as validator\n\ncontext.set_context(mode=context.GRAPH_MODE, device_target=\"GPU\")\n\n\nclass NetCholesky(nn.Cell):\n def __init__(self):\n super(NetCholesky, self).__init__()\n self.cholesky = P.Cholesky()\n\n def construct(self, x):\n return self.cholesky(x)\n\n\nclass ScipyCholesky(PrimitiveWithInfer):\n \"\"\"\n Inner API for Cholesky base class.\n \"\"\"\n\n @prim_attr_register\n def __init__(self, lower=False, clean=False):\n super().__init__(name=\"PureCholesky\")\n self.lower = validator.check_value_type(\"lower\", lower, [bool], self.lower)\n self.clean = validator.check_value_type(\"clean\", clean, [bool], self.clean)\n self.init_prim_io_names(inputs=['x'], outputs=['y'])\n\n def __infer__(self, x):\n x_shape = x['shape']\n x_dtype = x['dtype']\n return {\n 'shape': tuple(x_shape),\n 'dtype': x_dtype,\n 'value': None\n }\n\n\nclass ScipyNetCholesky(nn.Cell):\n def __init__(self, lower=False, clean=False):\n super(ScipyNetCholesky, self).__init__()\n self.cholesky = ScipyCholesky(lower, clean)\n\n def construct(self, x):\n return self.cholesky(x)\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_cholesky_fp32():\n \"\"\"\n Feature: ALL TO ALL\n Description: test cases for origin cholesky [N,N]\n Expectation: the result match np cholesky\n \"\"\"\n cholesky = NetCholesky()\n x = np.array([[4, 12, -16], [12, 37, -43], [-16, -43, 98]]).astype(np.float32)\n output = cholesky(Tensor(x, dtype=mstype.float32))\n expect = np.linalg.cholesky(x)\n tol = 1e-6\n assert (np.abs(output.asnumpy() - expect) < tol).all()\n\n\[email protected]\[email protected]_x86_gpu_training\[email protected]_onecard\ndef test_scipy_cholesky_fp32():\n \"\"\"\n Feature: ALL TO ALL\n Description: test cases for new scipy cholesky [N,N]\n Expectation: the result match scipy cholesky\n \"\"\"\n a = np.array([[4, 12, -16], [12, 37, -43], [-16, -43, 98]]).astype(np.float32)\n tensor_a = Tensor(a)\n cholesky = ScipyNetCholesky(lower=True, clean=False)\n output = cholesky(tensor_a)\n\n cholesky1 = ScipyNetCholesky(lower=False, clean=False)\n output1 = cholesky1(tensor_a)\n\n expect = scp.linalg.cholesky(a, lower=True)\n expect1 = scp.linalg.cholesky(a, lower=False)\n\n rtol = 1.e-4\n atol = 1.e-5\n assert np.allclose(expect, output.asnumpy(), rtol=rtol, atol=atol)\n assert np.allclose(expect1, output1.asnumpy(), rtol=rtol, atol=atol)\n"
] | [
[
"numpy.array",
"scipy.linalg.cholesky",
"numpy.linalg.cholesky"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [
"0.13",
"0.12",
"0.14",
"0.15"
],
"tensorflow": []
}
] |
derrowap/DNC-TensorFlow | [
"3e9ad109f8101265ae422ba9c20e058aa70ef7df"
] | [
"src/testing/external_memory_test.py"
] | [
"\"\"\"Tests the ExternalMemory class implementation.\"\"\"\n\nimport tensorflow as tf\nimport unittest\n\nfrom .. dnc import external_memory\nfrom numpy.testing import assert_array_almost_equal\n\n\ndef suite():\n \"\"\"Create testing suite for all tests in this module.\"\"\"\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(ExternalMemoryTest))\n return suite\n\n\nclass ExternalMemoryTest(unittest.TestCase):\n \"\"\"Tests for the ExternalMemory class.\"\"\"\n\n def test_construction(self):\n \"\"\"Test the construction of an ExternalMemory.\"\"\"\n memory = external_memory.ExternalMemory()\n self.assertIsInstance(memory, external_memory.ExternalMemory)\n\n def test_content_weights(self):\n \"\"\"Test content_weights function.\"\"\"\n graph = tf.Graph()\n with graph.as_default():\n with tf.Session(graph=graph) as sess:\n diff = 1e-4 # take into account floating precision\n tests = [\n { # equal content addressed probabilities\n 'memory': [[[1, 0, 1], [1, 0, 1], [1, 0, 1]]],\n 'read_keys': [[[1, 1, 1]]],\n 'read_strengths': [[1]],\n 'equal_indices': [[[0, 1, 2]]],\n 'indices_sorted': [],\n }, { # similar slots of memory have greater values in c_t\n 'memory': [[[1, 1, 1], [1, 0, 1], [0, 0, 1]]],\n 'read_keys': [[[1, 1, 1]]],\n 'read_strengths': [[1]],\n 'equal_indices': [],\n 'indices_sorted': [[[0, 1, 2]]],\n }, { # unequal dimensions of memory for NxM\n 'memory': [[[-5, -4], [-3, -2], [-1, 0], [1, 2],\n [3, 4]]],\n 'read_keys': [[[-1, 0], [-1, 0]]],\n 'read_strengths': [[1]],\n 'equal_indices': [],\n 'indices_sorted': [[[2, 3, 4], [2, 1, 0]]],\n }, { # independent slots are equal in probability\n 'memory': [[[1, 2], [50, 60], [1, 2], [-30, 70],\n [1, 2], [120, 85]]],\n 'read_keys': [[[1, 2]]],\n 'read_strengths': [[1]],\n 'equal_indices': [[[0, 2, 4]]],\n 'indices_sorted': [],\n }, { # tests that num_reads > 1 works as expected\n 'memory': [[[1, 1, 1], [1, 1, 1],\n [1, 1, 1], [1, 1, 1]]],\n 'read_keys': [[[1, 1, 1], [1, 1, 1], [1, 1, 1]]],\n 'read_strengths': [[1, 1, 1]],\n 'equal_indices': [[[0, 1, 2], [0, 1, 2], [0, 1, 2]]],\n 'indices_sorted': [],\n }, { # test batch sizes work as expected\n 'memory': [[[1, 0, 1], [1, 0, 1], [1, 0, 1]],\n [[1, 1, 1], [1, 0, 1], [0, 0, 1]],\n [[1, 1, 1], [1, 0, 1], [0, 0, 1]]],\n 'read_keys': [[[1, 1, 1]], [[1, 1, 1]], [[0, 0, 1]]],\n 'read_strengths': [[1], [1], [1]],\n 'equal_indices': [[[0, 1, 2]], [], []],\n 'indices_sorted': [[], [[0, 1, 2]], [[2, 1, 0]]],\n },\n ]\n for test in tests:\n mem = external_memory.ExternalMemory(\n memory_size=len(test['memory'][0]),\n word_size=len(test['memory'][0][0]))\n memory = tf.constant(test['memory'], dtype=tf.float32)\n read_keys = tf.constant(test['read_keys'],\n dtype=tf.float32)\n read_strengths = tf.constant(test['read_strengths'],\n dtype=tf.float32)\n c_t_op = mem.content_weights(read_keys, read_strengths,\n memory)\n c_t = sess.run(c_t_op)\n batch_num = 0\n for equal_indices_batch in test['equal_indices']:\n read_num = 0\n for equal_indices in equal_indices_batch:\n for i in range(len(equal_indices) - 1):\n index1 = equal_indices[i]\n index2 = equal_indices[i + 1]\n self.assertTrue(\n abs(c_t[batch_num][read_num][index1] -\n c_t[batch_num][read_num][index2]) <=\n diff,\n msg=\"Test {}: batch {} -> expected index\"\n \" {} and {} to be equal but were {} \"\n \"and {}\".format(\n tests.index(test), batch_num,\n index1, index2,\n c_t[batch_num][read_num][index1],\n c_t[batch_num][read_num][index2]))\n read_num += 1\n batch_num += 1\n batch_num = 0\n for sorted_batch in test['indices_sorted']:\n read_num = 0\n for sorted in sorted_batch:\n for i in range(len(sorted) - 1):\n index1 = sorted[i]\n index2 = sorted[i + 1]\n self.assertTrue(\n c_t[batch_num][read_num][index1] >\n c_t[batch_num][read_num][index2],\n msg=\"Test {}: batch {} -> c_t[{}]={} not >\"\n \" c_t[{}]={}\".format(\n tests.index(test), batch_num,\n index1,\n c_t[batch_num][read_num][index1],\n index2,\n c_t[batch_num][read_num][index2]))\n read_num += 1\n batch_num += 1\n\n def test_content_weights_read_strenghts(self):\n \"\"\"Test content_weights function for effective read_strenghts.\"\"\"\n graph = tf.Graph()\n with graph.as_default():\n with tf.Session(graph=graph) as sess:\n diff = 1e-4 # take into account floating precision\n tests = [\n { # tests that num_reads > 1 works as expected\n 'memory': [[[1, 1, 1], [1, 1, 1],\n [1, 1, 1], [1, 1, 1]]],\n 'read_keys': [[[1, 1, 1], [1, 1, 1], [1, 1, 1]]],\n 'read_strengths': [[1, 2, 3]],\n 'indices_sorted': [[0, 1, 2]],\n }, {\n 'memory': [[[1, 1], [1, 1]], [[1, 1], [1, 1]]],\n 'read_keys': [[[1, 1], [1, 1]], [[1, 1], [1, 1]]],\n 'read_strengths': [[1, 2], [2, 1]],\n 'indices_sorted': [[0, 1], [1, 0]],\n },\n ]\n for test in tests:\n mem = external_memory.ExternalMemory(\n memory_size=len(test['memory'][0]),\n word_size=len(test['memory'][0][0]))\n memory = tf.constant(test['memory'], dtype=tf.float32)\n read_keys = tf.constant(test['read_keys'],\n dtype=tf.float32)\n read_strengths = tf.constant(test['read_strengths'],\n dtype=tf.float32)\n c_t_op = mem.content_weights(read_keys, read_strengths,\n memory)\n c_t = sess.run(c_t_op)\n batch_num = 0\n for sorted_reads in test['indices_sorted']:\n for i in range(len(sorted_reads) - 1):\n index1 = sorted_reads[i]\n index2 = sorted_reads[i + 1]\n for j in range(len(c_t[batch_num][index1])):\n self.assertTrue(\n abs(c_t[batch_num][index1][j] -\n c_t[batch_num][index2][j]) <= diff,\n msg=\"Test {}: expected c_t[{}][{}][{}] = \"\n \"{} == c_t[{}][{}][{}] = {}\".format(\n tests.index(test),\n batch_num, index1, j,\n c_t[batch_num][index1][j],\n batch_num, index2, j,\n c_t[batch_num][index2][j]))\n batch_num += 1\n\n def test_write_operation(self):\n \"\"\"Test writing in external memory.\"\"\"\n graph = tf.Graph()\n with graph.as_default():\n with tf.Session(graph=graph) as sess:\n tests = [\n { # do not erase or add memory, stay the same\n 'memory': [[[1, 2, 3], [4, 5, 6]],\n [[8, 9, 10], [11, 12, 13]]],\n 'write_weightings': [[0, 0], [0, 0]],\n 'erase_vector': [[0, 0, 0], [0, 0, 0]],\n 'write_vector': [[0, 0, 0], [0, 0, 0]],\n 'expected': [[[1, 2, 3], [4, 5, 6]],\n [[8, 9, 10], [11, 12, 13]]],\n }, { # basic write operation\n 'memory': [[[0, 0, 0], [0, 0, 0], [0, 0, 0]]],\n 'write_weightings': [[1, 1, 1]],\n 'erase_vector': [[0, 0, 0]],\n 'write_vector': [[1, 1, 1]],\n 'expected': [[[1, 1, 1], [1, 1, 1], [1, 1, 1]]],\n }, { # tests multiple batches\n 'memory': [[[0, 0], [0, 0]],\n [[0, 0], [0, 0]],\n [[0, 0], [0, 0]]],\n 'write_weightings': [[1, 1], [1, 1], [1, 1]],\n 'erase_vector': [[0, 0], [0, 0], [0, 0]],\n 'write_vector': [[1, 2], [3, 4], [5, 6]],\n 'expected': [[[1, 2], [1, 2]],\n [[3, 4], [3, 4]],\n [[5, 6], [5, 6]]],\n }, { # write weightings heighten written values in add\n 'memory': [[[0, 0], [0, 0]]],\n 'write_weightings': [[1, 2]],\n 'erase_vector': [[0, 0]],\n 'write_vector': [[1, 1]],\n 'expected': [[[1, 1], [2, 2]]],\n }, { # erase vector erases correct positions\n 'memory': [[[1, 2], [3, 4]]],\n 'write_weightings': [[1, 1]],\n 'erase_vector': [[.5, 1]], # 0.5 erases half the value\n 'write_vector': [[0, 0]],\n 'expected': [[[.5, 0], [1.5, 0]]],\n }, { # write weighting of 0 prevents erase to that slot\n 'memory': [[[1, 2], [3, 4]]],\n 'write_weightings': [[0, 1]],\n 'erase_vector': [[.5, 1]],\n 'write_vector': [[0, 0]],\n 'expected': [[[1, 2], [1.5, 0]]],\n }, { # write weighting of 0 prevents adding to that slot\n 'memory': [[[0, 0], [0, 0]]],\n 'write_weightings': [[1, 0]],\n 'erase_vector': [[0, 0]],\n 'write_vector': [[1, 1]],\n 'expected': [[[1, 1], [0, 0]]],\n }, { # write weighting heightens erase operation\n 'memory': [[[1, 1], [1, 1]]],\n 'write_weightings': [[.5, 1.5]],\n 'erase_vector': [[.2, .5]],\n 'write_vector': [[0, 0]],\n 'expected': [[[.9, .75], [.7, .25]]],\n }, { # write and erase at same time\n 'memory': [[[1, 2], [3, 4]]],\n 'write_weightings': [[1, 1]],\n 'erase_vector': [[0.5, 1]],\n 'write_vector': [[2, 4]],\n 'expected': [[[2.5, 4], [3.5, 4]]],\n },\n ]\n for test in tests:\n mem = external_memory.ExternalMemory(\n memory_size=len(test['memory'][0]),\n word_size=len(test['memory'][0][0]))\n write_weightings = tf.constant(\n test['write_weightings'], dtype=tf.float32)\n erase_vector = tf.constant(\n test['erase_vector'], dtype=tf.float32)\n write_vector = tf.constant(\n test['write_vector'], dtype=tf.float32)\n memory = tf.constant(\n test['memory'], dtype=tf.float32)\n next_memory_op = mem.write(write_weightings, erase_vector,\n write_vector, memory)\n got = sess.run(next_memory_op)\n assert_array_almost_equal(test['expected'], got)\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n"
] | [
[
"tensorflow.Graph",
"tensorflow.constant",
"tensorflow.Session",
"numpy.testing.assert_array_almost_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
qgallouedec/stable-baselines3-contrib | [
"bec00386d14b505015c54413cd5cd968e6f85c72"
] | [
"sb3_contrib/common/maskable/distributions.py"
] | [
"from abc import ABC, abstractmethod\nfrom typing import List, Optional, Tuple\n\nimport numpy as np\nimport torch as th\nfrom gym import spaces\nfrom stable_baselines3.common.distributions import Distribution\nfrom torch import nn\nfrom torch.distributions import Categorical\nfrom torch.distributions.utils import logits_to_probs\n\n\nclass MaskableCategorical(Categorical):\n \"\"\"\n Modified PyTorch Categorical distribution with support for invalid action masking.\n\n To instantiate, must provide either probs or logits, but not both.\n\n :param probs: Tensor containing finite non-negative values, which will be renormalized\n to sum to 1 along the last dimension.\n :param logits: Tensor of unnormalized log probabilities.\n :param validate_args: Whether or not to validate that arguments to methods like lob_prob()\n and icdf() match the distribution's shape, support, etc.\n :param masks: An optional boolean ndarray of compatible shape with the distribution.\n If True, the corresponding choice's logit value is preserved. If False, it is set to a\n large negative value, resulting in near 0 probability.\n \"\"\"\n\n def __init__(\n self,\n probs: Optional[th.Tensor] = None,\n logits: Optional[th.Tensor] = None,\n validate_args: Optional[bool] = None,\n masks: Optional[np.ndarray] = None,\n ):\n self.masks: Optional[th.Tensor] = None\n super().__init__(probs, logits, validate_args)\n self._original_logits = self.logits\n self.apply_masking(masks)\n\n def apply_masking(self, masks: Optional[np.ndarray]) -> None:\n \"\"\"\n Eliminate (\"mask out\") chosen categorical outcomes by setting their probability to 0.\n\n :param masks: An optional boolean ndarray of compatible shape with the distribution.\n If True, the corresponding choice's logit value is preserved. If False, it is set\n to a large negative value, resulting in near 0 probability. If masks is None, any\n previously applied masking is removed, and the original logits are restored.\n \"\"\"\n\n if masks is not None:\n device = self.logits.device\n self.masks = th.as_tensor(masks, dtype=th.bool, device=device).reshape(self.logits.shape)\n HUGE_NEG = th.tensor(-1e8, dtype=self.logits.dtype, device=device)\n\n logits = th.where(self.masks, self._original_logits, HUGE_NEG)\n else:\n self.masks = None\n logits = self._original_logits\n\n # Reinitialize with updated logits\n super().__init__(logits=logits)\n\n # self.probs may already be cached, so we must force an update\n self.probs = logits_to_probs(self.logits)\n\n def entropy(self) -> th.Tensor:\n if self.masks is None:\n return super().entropy()\n\n # Highly negative logits don't result in 0 probs, so we must replace\n # with 0s to ensure 0 contribution to the distribution's entropy, since\n # masked actions possess no uncertainty.\n device = self.logits.device\n p_log_p = self.logits * self.probs\n p_log_p = th.where(self.masks, p_log_p, th.tensor(0.0, device=device))\n return -p_log_p.sum(-1)\n\n\nclass MaskableDistribution(Distribution, ABC):\n @abstractmethod\n def apply_masking(self, masks: Optional[np.ndarray]) -> None:\n \"\"\"\n Eliminate (\"mask out\") chosen distribution outcomes by setting their probability to 0.\n\n :param masks: An optional boolean ndarray of compatible shape with the distribution.\n If True, the corresponding choice's logit value is preserved. If False, it is set\n to a large negative value, resulting in near 0 probability. If masks is None, any\n previously applied masking is removed, and the original logits are restored.\n \"\"\"\n\n\nclass MaskableCategoricalDistribution(MaskableDistribution):\n \"\"\"\n Categorical distribution for discrete actions. Supports invalid action masking.\n\n :param action_dim: Number of discrete actions\n \"\"\"\n\n def __init__(self, action_dim: int):\n super().__init__()\n self.distribution: Optional[MaskableCategorical] = None\n self.action_dim = action_dim\n\n def proba_distribution_net(self, latent_dim: int) -> nn.Module:\n \"\"\"\n Create the layer that represents the distribution:\n it will be the logits of the Categorical distribution.\n You can then get probabilities using a softmax.\n\n :param latent_dim: Dimension of the last layer\n of the policy network (before the action layer)\n :return:\n \"\"\"\n action_logits = nn.Linear(latent_dim, self.action_dim)\n return action_logits\n\n def proba_distribution(self, action_logits: th.Tensor) -> \"MaskableCategoricalDistribution\":\n # Restructure shape to align with logits\n reshaped_logits = action_logits.view(-1, self.action_dim)\n self.distribution = MaskableCategorical(logits=reshaped_logits)\n return self\n\n def log_prob(self, actions: th.Tensor) -> th.Tensor:\n assert self.distribution is not None, \"Must set distribution parameters\"\n return self.distribution.log_prob(actions)\n\n def entropy(self) -> th.Tensor:\n assert self.distribution is not None, \"Must set distribution parameters\"\n return self.distribution.entropy()\n\n def sample(self) -> th.Tensor:\n assert self.distribution is not None, \"Must set distribution parameters\"\n return self.distribution.sample()\n\n def mode(self) -> th.Tensor:\n assert self.distribution is not None, \"Must set distribution parameters\"\n return th.argmax(self.distribution.probs, dim=1)\n\n def actions_from_params(self, action_logits: th.Tensor, deterministic: bool = False) -> th.Tensor:\n # Update the proba distribution\n self.proba_distribution(action_logits)\n return self.get_actions(deterministic=deterministic)\n\n def log_prob_from_params(self, action_logits: th.Tensor) -> Tuple[th.Tensor, th.Tensor]:\n actions = self.actions_from_params(action_logits)\n log_prob = self.log_prob(actions)\n return actions, log_prob\n\n def apply_masking(self, masks: Optional[np.ndarray]) -> None:\n assert self.distribution is not None, \"Must set distribution parameters\"\n self.distribution.apply_masking(masks)\n\n\nclass MaskableMultiCategoricalDistribution(MaskableDistribution):\n \"\"\"\n MultiCategorical distribution for multi discrete actions. Supports invalid action masking.\n\n :param action_dims: List of sizes of discrete action spaces\n \"\"\"\n\n def __init__(self, action_dims: List[int]):\n super().__init__()\n self.distributions: List[MaskableCategorical] = []\n self.action_dims = action_dims\n\n def proba_distribution_net(self, latent_dim: int) -> nn.Module:\n \"\"\"\n Create the layer that represents the distribution:\n it will be the logits (flattened) of the MultiCategorical distribution.\n You can then get probabilities using a softmax on each sub-space.\n\n :param latent_dim: Dimension of the last layer\n of the policy network (before the action layer)\n :return:\n \"\"\"\n\n action_logits = nn.Linear(latent_dim, sum(self.action_dims))\n return action_logits\n\n def proba_distribution(self, action_logits: th.Tensor) -> \"MaskableMultiCategoricalDistribution\":\n # Restructure shape to align with logits\n reshaped_logits = action_logits.view(-1, sum(self.action_dims))\n\n self.distributions = [\n MaskableCategorical(logits=split) for split in th.split(reshaped_logits, tuple(self.action_dims), dim=1)\n ]\n return self\n\n def log_prob(self, actions: th.Tensor) -> th.Tensor:\n assert len(self.distributions) > 0, \"Must set distribution parameters\"\n\n # Restructure shape to align with each categorical\n actions = actions.view(-1, len(self.action_dims))\n\n # Extract each discrete action and compute log prob for their respective distributions\n return th.stack(\n [dist.log_prob(action) for dist, action in zip(self.distributions, th.unbind(actions, dim=1))], dim=1\n ).sum(dim=1)\n\n def entropy(self) -> th.Tensor:\n assert len(self.distributions) > 0, \"Must set distribution parameters\"\n return th.stack([dist.entropy() for dist in self.distributions], dim=1).sum(dim=1)\n\n def sample(self) -> th.Tensor:\n assert len(self.distributions) > 0, \"Must set distribution parameters\"\n return th.stack([dist.sample() for dist in self.distributions], dim=1)\n\n def mode(self) -> th.Tensor:\n assert len(self.distributions) > 0, \"Must set distribution parameters\"\n return th.stack([th.argmax(dist.probs, dim=1) for dist in self.distributions], dim=1)\n\n def actions_from_params(self, action_logits: th.Tensor, deterministic: bool = False) -> th.Tensor:\n # Update the proba distribution\n self.proba_distribution(action_logits)\n return self.get_actions(deterministic=deterministic)\n\n def log_prob_from_params(self, action_logits: th.Tensor) -> Tuple[th.Tensor, th.Tensor]:\n actions = self.actions_from_params(action_logits)\n log_prob = self.log_prob(actions)\n return actions, log_prob\n\n def apply_masking(self, masks: Optional[np.ndarray]) -> None:\n assert len(self.distributions) > 0, \"Must set distribution parameters\"\n\n split_masks = [None] * len(self.distributions)\n if masks is not None:\n masks = th.as_tensor(masks)\n\n # Restructure shape to align with logits\n masks = masks.view(-1, sum(self.action_dims))\n\n # Then split columnwise for each discrete action\n split_masks = th.split(masks, tuple(self.action_dims), dim=1)\n\n for distribution, mask in zip(self.distributions, split_masks):\n distribution.apply_masking(mask)\n\n\nclass MaskableBernoulliDistribution(MaskableMultiCategoricalDistribution):\n \"\"\"\n Bernoulli distribution for multibinary actions. Supports invalid action masking.\n\n :param action_dim: Number of binary actions\n \"\"\"\n\n def __init__(self, action_dim: int):\n # Two states per binary action\n action_dims = [2] * action_dim\n super().__init__(action_dims)\n\n\ndef make_masked_proba_distribution(action_space: spaces.Space) -> MaskableDistribution:\n \"\"\"\n Return an instance of Distribution for the correct type of action space\n\n :param action_space: the input action space\n :return: the appropriate Distribution object\n \"\"\"\n\n if isinstance(action_space, spaces.Discrete):\n return MaskableCategoricalDistribution(action_space.n)\n elif isinstance(action_space, spaces.MultiDiscrete):\n return MaskableMultiCategoricalDistribution(action_space.nvec)\n elif isinstance(action_space, spaces.MultiBinary):\n return MaskableBernoulliDistribution(action_space.n)\n else:\n raise NotImplementedError(\n \"Error: probability distribution, not implemented for action space\"\n f\"of type {type(action_space)}.\"\n \" Must be of type Gym Spaces: Discrete, MultiDiscrete.\"\n )\n"
] | [
[
"torch.distributions.utils.logits_to_probs",
"torch.tensor",
"torch.nn.Linear",
"torch.as_tensor",
"torch.where",
"torch.unbind",
"torch.argmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
siyuzhou/NRI | [
"9d7d52b3d8fdc150ff91408ac0edce6e0ddda1bc"
] | [
"lstm_baseline.py"
] | [
"from __future__ import division\nfrom __future__ import print_function\n\nimport time\nimport argparse\nimport pickle\nimport os\nimport datetime\n\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\n\nfrom utils import *\nfrom modules import *\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='Disables CUDA training.')\nparser.add_argument('--seed', type=int, default=42, help='Random seed.')\nparser.add_argument('--epochs', type=int, default=500,\n help='Number of epochs to train.')\nparser.add_argument('--batch-size', type=int, default=128,\n help='Number of samples per batch.')\nparser.add_argument('--lr', type=float, default=0.0005,\n help='Initial learning rate.')\nparser.add_argument('--hidden', type=int, default=256,\n help='Number of hidden units.')\nparser.add_argument('--num_atoms', type=int, default=5,\n help='Number of atoms in simulation.')\nparser.add_argument('--num-layers', type=int, default=2,\n help='Number of LSTM layers.')\nparser.add_argument('--suffix', type=str, default='_springs',\n help='Suffix for training data (e.g. \"_charged\".')\nparser.add_argument('--dropout', type=float, default=0.0,\n help='Dropout rate (1 - keep probability).')\nparser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='How many batches to wait before logging.')\nparser.add_argument('--save-folder', type=str, default='logs',\n help='Where to save the trained model.')\nparser.add_argument('--load-folder', type=str, default='',\n help='Where to load the trained model if finetunning. ' +\n 'Leave empty to train from scratch')\nparser.add_argument('--dims', type=int, default=4,\n help='The number of dimensions (position + velocity).')\nparser.add_argument('--timesteps', type=int, default=49,\n help='The number of time steps per sample.')\nparser.add_argument('--prediction-steps', type=int, default=10, metavar='N',\n help='Num steps to predict before using teacher forcing.')\nparser.add_argument('--lr-decay', type=int, default=200,\n help='After how epochs to decay LR by a factor of gamma.')\nparser.add_argument('--gamma', type=float, default=0.5,\n help='LR decay factor.')\nparser.add_argument('--motion', action='store_true', default=False,\n help='Use motion capture data loader.')\nparser.add_argument('--non-markov', action='store_true', default=False,\n help='Use non-Markovian evaluation setting.')\nparser.add_argument('--var', type=float, default=5e-5,\n help='Output variance.')\n\nargs = parser.parse_args()\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\nprint(args)\n\nnp.random.seed(args.seed)\ntorch.manual_seed(args.seed)\nif args.cuda:\n torch.cuda.manual_seed(args.seed)\n\nlog = None\n# Save model and meta-data. Always saves in a new folder.\nif args.save_folder:\n exp_counter = 0\n now = datetime.datetime.now()\n timestamp = now.isoformat()\n save_folder = '{}/exp{}/'.format(args.save_folder, timestamp)\n while os.path.isdir(save_folder):\n exp_counter += 1\n save_folder = os.path.join(args.save_folder,\n 'exp{}'.format(exp_counter))\n os.mkdir(save_folder)\n meta_file = os.path.join(save_folder, 'metadata.pkl')\n model_file = os.path.join(save_folder, 'model.pt')\n\n log_file = os.path.join(save_folder, 'log.txt')\n log = open(log_file, 'w')\n\n pickle.dump({'args': args}, open(meta_file, \"wb\"))\n\nelse:\n print(\"WARNING: No save_folder provided!\" +\n \"Testing (within this script) will throw an error.\")\n\ntrain_loader, valid_loader, test_loader, loc_max, loc_min, vel_max, vel_min = load_data(\n args.batch_size, args.suffix)\n\n\nclass RecurrentBaseline(nn.Module):\n \"\"\"LSTM model for joint trajectory prediction.\"\"\"\n\n def __init__(self, n_in, n_hid, n_out, n_atoms, n_layers, do_prob=0.):\n super(RecurrentBaseline, self).__init__()\n self.fc1_1 = nn.Linear(n_in, n_hid)\n self.fc1_2 = nn.Linear(n_hid, n_hid)\n self.rnn = nn.LSTM(n_atoms * n_hid, n_atoms * n_hid, n_layers)\n self.fc2_1 = nn.Linear(n_atoms * n_hid, n_atoms * n_hid)\n self.fc2_2 = nn.Linear(n_atoms * n_hid, n_atoms * n_out)\n\n self.bn = nn.BatchNorm1d(n_out)\n self.dropout_prob = do_prob\n\n self.init_weights()\n\n def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Linear):\n nn.init.xavier_normal(m.weight.data)\n m.bias.data.fill_(0.1)\n elif isinstance(m, nn.BatchNorm1d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def batch_norm(self, inputs):\n x = inputs.view(inputs.size(0) * inputs.size(1), -1)\n x = self.bn(x)\n return x.view(inputs.size(0), inputs.size(1), -1)\n\n def step(self, ins, hidden=None):\n # Input shape: [num_sims, n_atoms, n_in]\n x = F.relu(self.fc1_1(ins))\n x = F.dropout(x, self.dropout_prob, training=self.training)\n x = F.relu(self.fc1_2(x))\n x = x.view(ins.size(0), -1)\n # [num_sims, n_atoms*n_hid]\n\n x = x.unsqueeze(0)\n x, hidden = self.rnn(x, hidden)\n x = x[0, :, :]\n\n x = F.relu(self.fc2_1(x))\n x = self.fc2_2(x)\n # [num_sims, n_out*n_atoms]\n\n x = x.view(ins.size(0), ins.size(1), -1)\n # [num_sims, n_atoms, n_out]\n\n # Predict position/velocity difference\n x = x + ins\n\n return x, hidden\n\n def forward(self, inputs, prediction_steps, burn_in=False, burn_in_steps=1):\n\n # Input shape: [num_sims, num_things, num_timesteps, n_in]\n\n outputs = []\n hidden = None\n\n for step in range(0, inputs.size(2) - 1):\n\n if burn_in:\n if step <= burn_in_steps:\n ins = inputs[:, :, step, :]\n else:\n ins = outputs[step - 1]\n else:\n # Use ground truth trajectory input vs. last prediction\n if not step % prediction_steps:\n ins = inputs[:, :, step, :]\n else:\n ins = outputs[step - 1]\n\n output, hidden = self.step(ins, hidden)\n\n # Predict position/velocity difference\n outputs.append(output)\n\n outputs = torch.stack(outputs, dim=2)\n\n return outputs\n\n\nmodel = RecurrentBaseline(args.dims, args.hidden, args.dims,\n args.num_atoms, args.num_layers, args.dropout)\nif args.load_folder:\n model_file = os.path.join(args.load_folder, 'model.pt')\n model.load_state_dict(torch.load(model_file))\n args.save_folder = False\n\noptimizer = optim.Adam(list(model.parameters()), lr=args.lr)\nscheduler = lr_scheduler.StepLR(optimizer, step_size=args.lr_decay,\n gamma=args.gamma)\n\n# Linear indices of an upper triangular mx, used for loss calculation\ntriu_indices = get_triu_offdiag_indices(args.num_atoms)\n\nif args.cuda:\n model.cuda()\n\n\ndef train(epoch, best_val_loss):\n t = time.time()\n loss_train = []\n loss_val = []\n mse_baseline_train = []\n mse_baseline_val = []\n mse_train = []\n mse_val = []\n\n model.train()\n scheduler.step()\n for batch_idx, (data, relations) in enumerate(train_loader):\n\n if args.cuda:\n data, relations = data.cuda(), relations.cuda()\n data, relations = Variable(data), Variable(relations)\n\n optimizer.zero_grad()\n\n output = model(data, 100,\n burn_in=True,\n burn_in_steps=args.timesteps - args.prediction_steps)\n\n target = data[:, :, 1:, :]\n loss = nll_gaussian(output, target, args.var)\n\n mse = F.mse_loss(output, target)\n mse_baseline = F.mse_loss(data[:, :, :-1, :], data[:, :, 1:, :])\n\n loss.backward()\n optimizer.step()\n\n loss_train.append(loss.data[0])\n mse_train.append(mse.data[0])\n mse_baseline_train.append(mse_baseline.data[0])\n\n model.eval()\n for batch_idx, (data, relations) in enumerate(valid_loader):\n if args.cuda:\n data, relations = data.cuda(), relations.cuda()\n data, relations = Variable(data, requires_grad=False), Variable(\n relations, requires_grad=False)\n\n output = model(data, 1)\n\n target = data[:, :, 1:, :]\n\n loss = nll_gaussian(output, target, args.var)\n\n mse = F.mse_loss(output, target)\n mse_baseline = F.mse_loss(data[:, :, :-1, :], data[:, :, 1:, :])\n\n loss_val.append(loss.data[0])\n mse_val.append(mse.data[0])\n mse_baseline_val.append(mse_baseline.data[0])\n\n print('Epoch: {:04d}'.format(epoch),\n 'nll_train: {:.10f}'.format(np.mean(loss_train)),\n 'mse_train: {:.12f}'.format(np.mean(mse_train)),\n 'mse_baseline_train: {:.10f}'.format(np.mean(mse_baseline_train)),\n 'nll_val: {:.10f}'.format(np.mean(loss_val)),\n 'mse_val: {:.12f}'.format(np.mean(mse_val)),\n 'mse_baseline_val: {:.10f}'.format(np.mean(mse_baseline_val)),\n 'time: {:.4f}s'.format(time.time() - t))\n if args.save_folder and np.mean(loss_val) < best_val_loss:\n torch.save(model.state_dict(), model_file)\n print('Best model so far, saving...')\n print('Epoch: {:04d}'.format(epoch),\n 'nll_train: {:.10f}'.format(np.mean(loss_train)),\n 'mse_train: {:.12f}'.format(np.mean(mse_train)),\n 'mse_baseline_train: {:.10f}'.format(np.mean(mse_baseline_train)),\n 'nll_val: {:.10f}'.format(np.mean(loss_val)),\n 'mse_val: {:.12f}'.format(np.mean(mse_val)),\n 'mse_baseline_val: {:.10f}'.format(np.mean(mse_baseline_val)),\n 'time: {:.4f}s'.format(time.time() - t), file=log)\n log.flush()\n return np.mean(loss_val)\n\n\ndef test():\n loss_test = []\n mse_baseline_test = []\n mse_test = []\n tot_mse = 0\n tot_mse_baseline = 0\n counter = 0\n\n model.eval()\n model.load_state_dict(torch.load(model_file))\n for batch_idx, (inputs, relations) in enumerate(test_loader):\n\n assert (inputs.size(2) - args.timesteps) >= args.timesteps\n\n if args.cuda:\n inputs = inputs.cuda()\n else:\n inputs = inputs.contiguous()\n inputs = Variable(inputs, volatile=True)\n\n ins_cut = inputs[:, :, -args.timesteps:, :].contiguous()\n\n output = model(ins_cut, 1)\n\n target = ins_cut[:, :, 1:, :]\n\n loss = nll_gaussian(output, target, args.var)\n\n mse = F.mse_loss(output, target)\n mse_baseline = F.mse_loss(ins_cut[:, :, :-1, :], ins_cut[:, :, 1:, :])\n\n loss_test.append(loss.data[0])\n mse_test.append(mse.data[0])\n mse_baseline_test.append(mse_baseline.data[0])\n\n if args.motion or args.non_markov:\n # RNN decoder evaluation setting\n\n # For plotting purposes\n output = model(inputs, 100, burn_in=True,\n burn_in_steps=args.timesteps)\n\n output = output[:, :, args.timesteps:, :]\n target = inputs[:, :, -args.timesteps:, :]\n mse = ((target - output) ** 2).mean(dim=0).mean(dim=0).mean(dim=-1)\n tot_mse += mse.data.cpu().numpy()\n counter += 1\n\n # Baseline over multiple steps\n baseline = inputs[:, :, -(args.timesteps + 1):-args.timesteps,\n :].expand_as(\n target)\n mse_baseline = ((target - baseline) ** 2).mean(dim=0).mean(\n dim=0).mean(\n dim=-1)\n tot_mse_baseline += mse_baseline.data.cpu().numpy()\n\n else:\n\n # For plotting purposes\n output = model(inputs, 100, burn_in=True,\n burn_in_steps=args.timesteps)\n\n output = output[:, :, args.timesteps:args.timesteps + 20, :]\n target = inputs[:, :, args.timesteps + 1:args.timesteps + 21, :]\n\n mse = ((target - output) ** 2).mean(dim=0).mean(dim=0).mean(dim=-1)\n tot_mse += mse.data.cpu().numpy()\n counter += 1\n\n # Baseline over multiple steps\n baseline = inputs[:, :, args.timesteps:args.timesteps + 1,\n :].expand_as(\n target)\n mse_baseline = ((target - baseline) ** 2).mean(dim=0).mean(\n dim=0).mean(\n dim=-1)\n tot_mse_baseline += mse_baseline.data.cpu().numpy()\n\n mean_mse = tot_mse / counter\n mse_str = '['\n for mse_step in mean_mse[:-1]:\n mse_str += \" {:.12f} ,\".format(mse_step)\n mse_str += \" {:.12f} \".format(mean_mse[-1])\n mse_str += ']'\n\n mean_mse_baseline = tot_mse_baseline / counter\n mse_baseline_str = '['\n for mse_step in mean_mse_baseline[:-1]:\n mse_baseline_str += \" {:.12f} ,\".format(mse_step)\n mse_baseline_str += \" {:.12f} \".format(mean_mse_baseline[-1])\n mse_baseline_str += ']'\n\n print('--------------------------------')\n print('--------Testing-----------------')\n print('--------------------------------')\n print('nll_test: {:.10f}'.format(np.mean(loss_test)),\n 'mse_test: {:.12f}'.format(np.mean(mse_test)),\n 'mse_baseline_test: {:.10f}'.format(np.mean(mse_baseline_test)))\n print('MSE: {}'.format(mse_str))\n print('MSE Baseline: {}'.format(mse_baseline_str))\n if args.save_folder:\n print('--------------------------------', file=log)\n print('--------Testing-----------------', file=log)\n print('--------------------------------', file=log)\n print('nll_test: {:.10f}'.format(np.mean(loss_test)),\n 'mse_test: {:.12f}'.format(np.mean(mse_test)),\n 'mse_baseline_test: {:.10f}'.format(np.mean(mse_baseline_test)),\n file=log)\n print('MSE: {}'.format(mse_str), file=log)\n print('MSE Baseline: {}'.format(mse_baseline_str), file=log)\n log.flush()\n\n\n# Train model\nt_total = time.time()\nbest_val_loss = np.inf\nbest_epoch = 0\nfor epoch in range(args.epochs):\n val_loss = train(epoch, best_val_loss)\n if val_loss < best_val_loss:\n best_val_loss = val_loss\n best_epoch = epoch\nprint(\"Optimization Finished!\")\nprint(\"Best Epoch: {:04d}\".format(best_epoch))\nif args.save_folder:\n print(\"Best Epoch: {:04d}\".format(best_epoch), file=log)\n log.flush()\ntest()\nif log is not None:\n print(save_folder)\n log.close()\n"
] | [
[
"torch.optim.lr_scheduler.StepLR"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Patrickgsheng/GCN_detection_benchmark | [
"ded59653accc61aeeb8e437c2ea9203e4fe9e500"
] | [
"gcn/load_cora_data.py"
] | [
"import json\nimport networkx as nx\nfrom networkx.readwrite import json_graph\nimport numpy as np\nimport scipy.sparse as sp\nimport os\nfrom matplotlib import pyplot as plt\n\ndef sample_mask(idx, l):\n \"\"\"Create mask.\"\"\"\n mask = np.zeros(l)\n mask[idx] = 1\n return np.array(mask, dtype=np.bool)\n\ndef load_cora_data(path,prefix, normalize=True):\n G_data = json.load(open(path + prefix + \"-G.json\"))\n G = json_graph.node_link_graph(G_data)\n # change graph adjacency matrix to sparse matrix format\n adj = nx.adjacency_matrix(nx.from_dict_of_lists(G.adj))\n print(\"The number of edges\")\n edge_num = G.number_of_edges()\n print(edge_num)\n print(\"The number of nodes\")\n nodes_num = G.number_of_nodes()\n print(nodes_num)\n\n if isinstance(G.nodes()[0], int):\n conversion = lambda n: int(n)\n else:\n conversion = lambda n: n\n\n if os.path.exists(path+prefix + \"-feats.npy\"):\n feats = np.load(path+prefix + \"-feats.npy\")\n else:\n print(\"No features present.. Only identity features will be used.\")\n feats = None\n id_map = json.load(open(path+prefix + \"-id_map.json\"))\n id_map = {conversion(k): int(v) for k, v in id_map.items()}\n\n # just print the id_map keys range:\n # id_map_range = np.sort(id_map.keys())\n walks = []\n class_map = json.load(open(path + prefix + \"-class_map.json\"))\n if isinstance(list(class_map.values())[0], list):\n lab_conversion = lambda n: n\n else:\n lab_conversion = lambda n: int(n)\n\n class_map = {conversion(k): lab_conversion(v) for k, v in class_map.items()}\n\n # just print the class_map keys range:\n class_map_int_list = []\n for j in class_map.keys():\n class_map_int_list.append(int(j))\n class_map_range = np.sort(class_map_int_list)\n\n # generate y_train, y_val, y_test ndarray\n y_train = np.array([0, 0, 0, 0, 0, 0, 0])\n y_val = np.array([0, 0, 0, 0, 0, 0, 0])\n y_test = np.array([0, 0, 0, 0, 0, 0, 0])\n idx_train = range(140)\n idx_val = []\n idx_test = []\n for node in G.nodes():\n if node in idx_train:\n print(\"Train,currrent n is %d\" % node)\n train_label = G.node[node]['labels']\n train_label = np.array(train_label)\n y_train = np.vstack((y_train, train_label))\n y_val = np.vstack((y_val, [0, 0, 0, 0, 0, 0, 0]))\n y_test = np.vstack((y_test, [0, 0, 0, 0, 0, 0, 0]))\n elif G.node[node]['test'] == False and G.node[node]['val'] == False:\n print(\"no label id,currrent n is %d\" % node)\n y_train = np.vstack((y_train, [0, 0, 0, 0, 0, 0, 0]))\n y_val = np.vstack((y_val, [0, 0, 0, 0, 0, 0, 0]))\n y_test = np.vstack((y_test, [0, 0, 0, 0, 0, 0, 0]))\n elif G.node[node]['test'] == False and G.node[node]['val'] == True:\n print(\"Validation, current n is %d\" % node)\n validation_label = G.node[node]['labels']\n validation_label = np.array(validation_label)\n y_val = np.vstack((y_val, validation_label))\n y_train = np.vstack((y_train, [0, 0, 0, 0, 0, 0, 0]))\n y_test = np.vstack((y_test, [0, 0, 0, 0, 0, 0, 0]))\n idx_val.append(node)\n elif G.node[node]['test'] == True and G.node[node]['val'] == False:\n print(\"Test, current n is %d\" % node)\n test_label = G.node[node]['labels']\n test_label = np.array(test_label)\n y_test = np.vstack((y_test, test_label))\n y_train = np.vstack((y_train, [0, 0, 0, 0, 0, 0, 0]))\n y_val = np.vstack((y_val, [0, 0, 0, 0, 0, 0, 0]))\n idx_test.append(node)\n\n print(\"training label shape is\")\n print(y_train.shape)\n y_train = np.delete(y_train, 0, axis=0)\n y_val = np.delete(y_val, 0, axis=0)\n y_test = np.delete(y_test, 0, axis=0)\n\n # generate train_mask, val_mask and test_mask\n train_mask = sample_mask(idx_train, len(G.node))\n val_mask = sample_mask(idx_val, len(G.node))\n test_mask = sample_mask(idx_test, len(G.node))\n\n # check how many train_mask is true:\n train_true_num = np.count_nonzero(train_mask)\n # Similarly for val_mask, test_mask\n val_true_num = np.count_nonzero(val_mask)\n test_true_num = np.count_nonzero(test_mask)\n\n\n node_degrees = list(G.degree().values())\n print(\"the maximum degree of the graph is %d\" % max(node_degrees))\n\n ## Remove all nodes that do not have val/test annotations\n ## (necessary because of networkx weirdness with the Reddit data)\n broken_count = 0\n for node in G.nodes():\n if not 'val' in G.node[node] or not 'test' in G.node[node]:\n G.remove_node(node)\n broken_count += 1\n print(\"Removed {:d} nodes that lacked proper annotations due to networkx versioning issues\".format(broken_count))\n\n ## Make sure the graph has edge train_removed annotations\n ## (some datasets might already have this..)\n print(\"Loaded data.. now preprocessing..\")\n # add the train_removed Flag for each edge in G.edges\n # temp_useful_edges =0\n for edge in G.edges():\n if (G.node[edge[0]]['val'] or G.node[edge[1]]['val'] or\n G.node[edge[0]]['test'] or G.node[edge[1]]['test']):\n G[edge[0]][edge[1]]['train_removed'] = True\n else:\n G[edge[0]][edge[1]]['train_removed'] = False\n # temp_useful_edges+=1\n # print (G.node[edge[0]])\n # print (\"The real edges that are taken account in is %d\" %(temp_useful_edges))\n # 1432 useful edges marked with train_removed = False\n\n ''' Centering and scaling happen independently on each feature by computing the relevant statistics on the samples in the training set. Mean \n and standard deviation are then stored to be used on later data using the transform method. \n If a feature has a variance that is orders of magnitude larger that others, it might dominate the objective function and make the estimator unable to learn \n from other features correctly as expected.\n '''\n '''\n if normalize and not feats is None:\n from sklearn.preprocessing import StandardScaler\n scaler = StandardScaler()\n scaler.fit(feats)\n feats = scaler.transform(feats)\n '''\n\n feats = sp.csr_matrix(feats)\n\n '''\n # visualize the graph\n options = {\n 'arrows': True,\n 'node_color': 'blue',\n 'node_size': .05,\n 'line_color': 'black',\n 'linewidths': 1,\n 'width': 0.1,\n 'with_labels': False,\n 'node_shape': '.',\n 'node_list': range(G.number_of_nodes())\n }\n nx.draw_networkx(G, **options)\n #plt.savefig('/Users/april/Downloads/GraphSAGE_Benchmark-master/processed/kb/' + '/vis.png', dpi=1024)\n '''\n # print the diameter(maximum distance) of G\n '''\n k = nx.connected_component_subgraphs(G)\n diameter_list = []\n for i in k:\n print(\"Nodes in compoent.\", i.nodes())\n diameter_list.append(nx.diameter(i))\n '''\n return adj, feats, y_train, y_val, y_test, train_mask, val_mask, test_mask\n"
] | [
[
"numpy.sort",
"scipy.sparse.csr_matrix",
"numpy.delete",
"numpy.count_nonzero",
"numpy.load",
"numpy.array",
"numpy.zeros",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
ZilongJi/HippocampalSWRDynamics | [
"cda23b7fcc8a97222238a26c15d011d3593be39b"
] | [
"replay_structure/read_write.py"
] | [
"import pickle\nimport compress_pickle\nimport os\nimport numpy as np\nimport pandas as pd\nfrom typing import Optional, Union\n\nfrom replay_structure.metadata import (\n DATA_PATH,\n Likelihood_Function,\n RESULTS_PATH,\n DATA_PATH_O2,\n RESULTS_PATH_O2,\n Model_Name,\n Momentum,\n Data_Type_Name,\n Session_Name,\n SessionSpikemat_Name,\n Simulated_Session_Name,\n Session_Indicator,\n)\nfrom replay_structure.ratday_preprocessing import RatDay_Preprocessing\nfrom replay_structure.ripple_preprocessing import Ripple_Preprocessing\nfrom replay_structure.run_snippet_preprocessing import Run_Snippet_Preprocessing\nfrom replay_structure.highsynchronyevents import HighSynchronyEvents_Preprocessing\nfrom replay_structure.structure_analysis_input import Structure_Analysis_Input\nfrom replay_structure.structure_models_gridsearch import Structure_Gridsearch\nfrom replay_structure.structure_trajectory import Most_Likely_Trajectories\nfrom replay_structure.marginals import All_Models_Marginals\nfrom replay_structure.model_comparison import (\n Gridsearch_Marginalization,\n Model_Comparison,\n Factorial_Model_Comparison,\n)\nfrom replay_structure.deviance_models import Deviance_Explained\nfrom replay_structure.diffusion_constant import Diffusion_Constant\n\nfrom replay_structure.model_recovery import Model_Recovery_Trajectory_Set\nfrom replay_structure.simulated_neural_data import Simulated_Data_Preprocessing\nfrom replay_structure.pf_analysis import PF_Analysis\n\n\ndef load_data(filename, print_filename=True):\n if print_filename:\n print(\"loading \", filename)\n with open(filename, \"rb\") as file_object:\n raw_data = file_object.read()\n deserialized = pickle.loads(raw_data)\n return deserialized\n\n\ndef load_compressed_data(filename, print_filename=True):\n if print_filename:\n print(\"loading \", filename)\n with open(filename, \"rb\") as file_object:\n raw_data = file_object.read()\n deserialized = compress_pickle.loads(raw_data, \"gzip\")\n return deserialized\n\n\ndef save_data(data, filename, print_filename=True):\n if print_filename:\n print(\"saving \", filename)\n serialized = pickle.dumps(data)\n with open(filename, \"wb\") as file_object:\n file_object.write(serialized)\n\n\ndef save_compressed_data(data, filename, print_filename=True):\n if print_filename:\n print(\"saving \", filename)\n serialized = compress_pickle.dumps(data, \"gzip\")\n with open(filename, \"wb\") as file_object:\n file_object.write(serialized)\n\n\n# ----\n\n\ndef save_ratday_data(\n ratday: RatDay_Preprocessing,\n session_indicator: Session_Name,\n bin_size_cm: int = 4,\n placefields_rotated: bool = False,\n ext=\"\",\n) -> None:\n if placefields_rotated:\n filename = os.path.join(\n DATA_PATH,\n \"ratday\",\n f\"{session_indicator}_{bin_size_cm}cm_placefields_rotated{ext}.obj\",\n )\n else:\n filename = os.path.join(\n DATA_PATH, \"ratday\", f\"{session_indicator}_{bin_size_cm}cm{ext}.obj\"\n )\n save_data(ratday, filename)\n\n\ndef load_ratday_data(\n session_indicator: Session_Name,\n bin_size_cm: int = 4,\n placefields_rotated: bool = False,\n ext=\"\",\n) -> RatDay_Preprocessing:\n if placefields_rotated:\n filename = os.path.join(\n DATA_PATH,\n \"ratday\",\n f\"{session_indicator}_{bin_size_cm}cm_placefields_rotated{ext}.obj\",\n )\n else:\n filename = os.path.join(\n DATA_PATH, \"ratday\", f\"{session_indicator}_{bin_size_cm}cm{ext}.obj\"\n )\n ratday = load_data(filename)\n return ratday\n\n\n# ------\n\n\ndef save_spikemat_data(\n spikemat_data: Union[\n Ripple_Preprocessing,\n Run_Snippet_Preprocessing,\n Simulated_Data_Preprocessing,\n HighSynchronyEvents_Preprocessing,\n ],\n session_indicator: Session_Indicator,\n time_window_ms: int,\n data_type: Data_Type_Name,\n bin_size_cm: int = 4,\n ext=\"\",\n) -> None:\n filename = os.path.join(\n DATA_PATH,\n str(data_type),\n f\"{session_indicator}_{bin_size_cm}cm_{time_window_ms}ms{ext}.obj\",\n )\n save_data(spikemat_data, filename)\n\n\ndef load_spikemat_data(\n session_indicator: Session_Indicator,\n time_window_ms: int,\n data_type: Data_Type_Name,\n bin_size_cm: int = 4,\n ext=\"\",\n print_filename: bool = True,\n) -> Union[\n Ripple_Preprocessing,\n Run_Snippet_Preprocessing,\n Simulated_Data_Preprocessing,\n HighSynchronyEvents_Preprocessing,\n]:\n filename = os.path.join(\n DATA_PATH,\n str(data_type),\n f\"{session_indicator}_{bin_size_cm}cm_{time_window_ms}ms{ext}.obj\",\n )\n spikemat_data = load_data(filename, print_filename=print_filename)\n return spikemat_data\n\n\n# ----\n\n\ndef save_structure_data(\n structure_data: Structure_Analysis_Input,\n session_indicator: Session_Indicator,\n time_window_ms: int,\n data_type: Data_Type_Name,\n likelihood_function: Likelihood_Function,\n bin_size_cm: int = 4,\n ext=\"\",\n) -> None:\n if isinstance(session_indicator, SessionSpikemat_Name):\n folder = \"spikemat_structure_analysis_input\"\n else:\n folder = \"structure_analysis_input\"\n\n filename = os.path.join(\n DATA_PATH,\n folder,\n f\"{session_indicator}_{data_type}_{bin_size_cm}cm_{time_window_ms}ms_\"\n f\"{likelihood_function}{ext}.obj\",\n )\n save_data(structure_data, filename)\n\n\ndef load_structure_data(\n session_indicator: Session_Indicator,\n time_window_ms: int,\n data_type: Data_Type_Name,\n likelihood_function: Likelihood_Function,\n bin_size_cm: int = 4,\n o2: bool = False,\n ext=\"\",\n print_filename: bool = True,\n) -> Structure_Analysis_Input:\n if isinstance(session_indicator, SessionSpikemat_Name):\n folder = \"spikemat_structure_analysis_input\"\n else:\n folder = \"structure_analysis_input\"\n filename = os.path.join(\n f\"{DATA_PATH_O2 if o2 else DATA_PATH}\",\n folder,\n f\"{session_indicator}_{data_type}_{bin_size_cm}cm_{time_window_ms}ms_\"\n f\"{likelihood_function}{ext}.obj\",\n )\n structure_data = load_data(filename, print_filename)\n return structure_data\n\n\n# ----\n\n\ndef save_structure_model_results(\n session_indicator: Session_Indicator,\n time_window_ms: int,\n data_type: Data_Type_Name,\n likelihood_function: Likelihood_Function,\n model: Model_Name,\n model_evidences: np.ndarray,\n bin_size_cm: int = 4,\n ext=\"\",\n) -> None:\n filename = os.path.join(\n RESULTS_PATH,\n str(data_type),\n f\"{session_indicator}_{bin_size_cm}cm_{time_window_ms}ms_\"\n f\"{likelihood_function}_{model}{ext}.obj\",\n )\n save_data(model_evidences, filename)\n\n\ndef load_structure_model_results(\n session_indicator: Session_Indicator,\n time_window_ms: int,\n data_type: Data_Type_Name,\n likelihood_function: Likelihood_Function,\n model: Model_Name,\n bin_size_cm: int = 4,\n ext=\"\",\n) -> np.ndarray:\n filename = os.path.join(\n RESULTS_PATH,\n str(data_type),\n f\"{session_indicator}_{bin_size_cm}cm_{time_window_ms}ms_\"\n f\"{likelihood_function}_{model}{ext}.obj\",\n )\n model_evidences = load_data(filename)\n return model_evidences\n\n\n# ----\n\n\ndef save_gridsearch_results(\n session_indicator: Session_Indicator,\n time_window_ms: int,\n data_type: Data_Type_Name,\n likelihood_function: Likelihood_Function,\n model: Model_Name,\n gridsearch_results: Structure_Gridsearch,\n bin_size_cm: int = 4,\n spikemat_ind: Optional[int] = None,\n o2: bool = False,\n ext=\"\",\n) -> None:\n if spikemat_ind is not None:\n filename = os.path.join(\n f\"{RESULTS_PATH_O2 if o2 else RESULTS_PATH}\",\n str(data_type),\n f\"{session_indicator}_spikemat{spikemat_ind}_{bin_size_cm}cm_\"\n f\"{time_window_ms}ms_{likelihood_function}_{model}_gridsearch{ext}.obj\",\n )\n else:\n filename = os.path.join(\n f\"{RESULTS_PATH_O2 if o2 else RESULTS_PATH}\",\n str(data_type),\n f\"{session_indicator}_{bin_size_cm}cm_{time_window_ms}ms_\"\n f\"{likelihood_function}_{model}_gridsearch{ext}.obj\",\n )\n save_data(gridsearch_results, filename)\n\n\ndef load_gridsearch_results(\n session_indicator: Session_Indicator,\n time_window_ms: int,\n data_type: Data_Type_Name,\n likelihood_function: Likelihood_Function,\n model: Model_Name,\n bin_size_cm: int = 4,\n spikemat_ind: Optional[int] = None,\n # o2: bool = False,\n print_filename=True,\n ext=\"\",\n) -> Optional[Structure_Gridsearch]:\n if spikemat_ind is not None:\n filename = os.path.join(\n RESULTS_PATH,\n str(data_type),\n f\"{session_indicator}_spikemat{spikemat_ind}_{bin_size_cm}cm_\"\n f\"{time_window_ms}ms_{likelihood_function}_{model}_gridsearch{ext}.obj\",\n )\n else:\n filename = os.path.join(\n RESULTS_PATH,\n str(data_type),\n f\"{session_indicator}_{bin_size_cm}cm_{time_window_ms}ms_\"\n f\"{likelihood_function}_{model}_gridsearch{ext}.obj\",\n )\n if os.path.isfile(filename):\n # if datetime.fromtimestamp(os.path.getmtime(filename)).month == 2:\n gridsearch_results = load_data(filename, print_filename=print_filename)\n return gridsearch_results\n else:\n print(f\"No file: {filename}\")\n return None\n\n\ndef aggregate_momentum_gridsearch(\n session_indicator: Session_Indicator,\n time_window_ms: int,\n data_type: Data_Type_Name,\n likelihood_function: Likelihood_Function,\n bin_size_cm: int = 4,\n ext=\"\",\n):\n # get n_ripples\n structure_data = load_structure_data(\n session_indicator,\n time_window_ms,\n data_type,\n likelihood_function,\n bin_size_cm=bin_size_cm,\n )\n n_spikemats = len(structure_data.spikemats)\n\n # load first ripple and get gridsearch parameters\n momentum_gridsearch_0 = load_gridsearch_results(\n session_indicator,\n time_window_ms,\n data_type,\n likelihood_function,\n Momentum(),\n spikemat_ind=0,\n bin_size_cm=bin_size_cm,\n print_filename=True,\n ext=ext,\n )\n if momentum_gridsearch_0 is None:\n momentum_gridsearch_0 = load_gridsearch_results(\n session_indicator,\n time_window_ms,\n data_type,\n likelihood_function,\n Momentum(),\n spikemat_ind=2,\n bin_size_cm=bin_size_cm,\n print_filename=False,\n ext=ext,\n )\n assert isinstance(momentum_gridsearch_0, Structure_Gridsearch)\n n_sd = len(momentum_gridsearch_0.gridsearch_params[\"sd_array_meters\"])\n n_decay = len(momentum_gridsearch_0.gridsearch_params[\"decay_array\"])\n\n # fill out gridsearch_results\n gridsearch_results = np.full((n_spikemats, n_sd, n_decay), np.nan)\n to_run_on_o2_medium = np.array([])\n for ripple in range(n_spikemats):\n ripple_gridsearch = load_gridsearch_results(\n session_indicator,\n time_window_ms,\n data_type,\n likelihood_function,\n Momentum(),\n spikemat_ind=ripple,\n bin_size_cm=bin_size_cm,\n print_filename=False,\n ext=ext,\n )\n if isinstance(ripple_gridsearch, Structure_Gridsearch):\n gridsearch_results[ripple] = ripple_gridsearch.gridsearch_results\n else:\n to_run_on_o2_medium = np.append(to_run_on_o2_medium, ripple)\n print(\n f\"Session: {session_indicator}, run on o2 medium: {to_run_on_o2_medium}, \"\n f\"{len(to_run_on_o2_medium)} ripples total\"\n )\n\n # replace gridsearch_results and save\n momentum_gridsearch_aggregated = momentum_gridsearch_0\n momentum_gridsearch_aggregated.gridsearch_results = gridsearch_results\n save_gridsearch_results(\n session_indicator,\n time_window_ms,\n data_type,\n likelihood_function,\n Momentum(),\n momentum_gridsearch_aggregated,\n bin_size_cm=bin_size_cm,\n ext=ext,\n )\n\n\n# -----\n\n\ndef save_marginalized_gridsearch_results(\n session_indicator: Session_Indicator,\n time_window_ms: int,\n data_type: Data_Type_Name,\n likelihood_function: Likelihood_Function,\n model: Model_Name,\n marginalized_gridsearch: Gridsearch_Marginalization,\n bin_size_cm: int = 4,\n ext=\"\",\n) -> None:\n filename = os.path.join(\n RESULTS_PATH,\n str(data_type),\n f\"{session_indicator}_{bin_size_cm}cm_{time_window_ms}ms_\"\n f\"{likelihood_function}_{model}_gridsearch_marginalization{ext}.obj\",\n )\n save_data(marginalized_gridsearch, filename)\n\n\ndef load_marginalized_gridsearch_results(\n session_indicator: Session_Indicator,\n time_window_ms: int,\n data_type: Data_Type_Name,\n likelihood_function: Likelihood_Function,\n model: Model_Name,\n bin_size_cm: int = 4,\n ext=\"\",\n) -> Gridsearch_Marginalization:\n filename = os.path.join(\n RESULTS_PATH,\n str(data_type),\n f\"{session_indicator}_{bin_size_cm}cm_{time_window_ms}ms_\"\n f\"{likelihood_function}_{model}_gridsearch_marginalization{ext}.obj\",\n )\n marginalized_gridsearch = load_data(filename)\n return marginalized_gridsearch\n\n\n# -----\n\n\ndef save_model_comparison_results(\n session_indicator: Session_Indicator,\n time_window_ms: int,\n data_type: Data_Type_Name,\n likelihood_function: Likelihood_Function,\n mc_results: Model_Comparison,\n bin_size_cm: int = 4,\n ext=\"\",\n):\n filename = os.path.join(\n RESULTS_PATH,\n str(data_type),\n f\"{session_indicator}_{bin_size_cm}cm_{time_window_ms}ms_\"\n f\"{likelihood_function}_model_comparison{ext}.obj\",\n )\n save_data(mc_results, filename)\n\n\ndef load_model_comparison_results(\n session_indicator: Session_Indicator,\n time_window_ms: int,\n data_type: Data_Type_Name,\n likelihood_function: Likelihood_Function,\n bin_size_cm: int = 4,\n ext=\"\",\n) -> Model_Comparison:\n filename = os.path.join(\n RESULTS_PATH,\n str(data_type),\n f\"{session_indicator}_{bin_size_cm}cm_{time_window_ms}ms_\"\n f\"{likelihood_function}_model_comparison{ext}.obj\",\n )\n mc_results = load_data(filename)\n return mc_results\n\n\n# -----\n\n\ndef save_factorial_model_comparison_results(\n session_indicator: Session_Indicator,\n time_window_ms: int,\n data_type: Data_Type_Name,\n # likelihood_function: Likelihood_Function,\n mc_results: Factorial_Model_Comparison,\n bin_size_cm: int = 4,\n ext=\"\",\n):\n filename = os.path.join(\n RESULTS_PATH,\n str(data_type),\n f\"{session_indicator}_{bin_size_cm}cm_{time_window_ms}ms_\"\n f\"factorial_model_comparison{ext}.obj\",\n )\n save_data(mc_results, filename)\n\n\ndef load_factorial_model_comparison_results(\n session_indicator: Session_Indicator,\n time_window_ms: int,\n data_type: Data_Type_Name,\n # likelihood_function: Likelihood_Function,\n bin_size_cm: int = 4,\n ext=\"\",\n) -> Factorial_Model_Comparison:\n filename = os.path.join(\n RESULTS_PATH,\n str(data_type),\n f\"{session_indicator}_{bin_size_cm}cm_{time_window_ms}ms_\"\n f\"factorial_model_comparison{ext}.obj\",\n )\n mc_results = load_data(filename)\n return mc_results\n\n\n# -----\n\n\ndef save_deviance_explained_results(\n session_indicator: Session_Indicator,\n time_window_ms: int,\n data_type: Data_Type_Name,\n likelihood_function: Likelihood_Function,\n deviance_results: Deviance_Explained,\n bin_size_cm: int = 4,\n ext=\"\",\n):\n filename = os.path.join(\n RESULTS_PATH,\n str(data_type),\n f\"{session_indicator}_{bin_size_cm}cm_{time_window_ms}ms_\"\n f\"{likelihood_function}_deviance_explained{ext}.obj\",\n )\n save_data(deviance_results, filename)\n\n\ndef load_deviance_explained_results(\n session_indicator: Session_Indicator,\n time_window_ms: int,\n data_type: Data_Type_Name,\n likelihood_function: Likelihood_Function,\n bin_size_cm: int = 4,\n ext=\"\",\n) -> Deviance_Explained:\n filename = os.path.join(\n RESULTS_PATH,\n str(data_type),\n f\"{session_indicator}_{bin_size_cm}cm_{time_window_ms}ms_\"\n f\"{likelihood_function}_deviance_explained{ext}.obj\",\n )\n deviance_results = load_data(filename)\n return deviance_results\n\n\n# -----\n\n\ndef save_trajectory_results(\n session_indicator: Session_Indicator,\n time_window_ms: int,\n data_type: Data_Type_Name,\n likelihood_function: Likelihood_Function,\n trajectory_results: Most_Likely_Trajectories,\n bin_size_cm: int = 4,\n o2=False,\n ext=\"\",\n) -> None:\n # if data_type == \"ripple\":\n filename = os.path.join(\n f\"{RESULTS_PATH_O2 if o2 else RESULTS_PATH}\",\n str(data_type),\n f\"{session_indicator}_{bin_size_cm}cm_{time_window_ms}ms_\"\n f\"{likelihood_function}_trajectories{ext}.obj\",\n )\n save_data(trajectory_results, filename)\n\n\ndef load_trajectory_results(\n session_indicator: Session_Indicator,\n time_window_ms: int,\n data_type: Data_Type_Name,\n likelihood_function: Likelihood_Function,\n bin_size_cm: int = 4,\n ext=\"\",\n) -> Most_Likely_Trajectories:\n # if data_type == \"ripple\":\n filename = os.path.join(\n RESULTS_PATH,\n str(data_type),\n f\"{session_indicator}_{bin_size_cm}cm_{time_window_ms}ms_\"\n f\"{likelihood_function}_trajectories{ext}.obj\",\n )\n trajectory_results = load_data(filename)\n return trajectory_results\n\n\n# -----\n\n\ndef save_marginals(\n session_indicator: Session_Indicator,\n spikemat_ind: int,\n time_window_ms: int,\n data_type: Data_Type_Name,\n likelihood_function: Likelihood_Function,\n marginals: All_Models_Marginals,\n bin_size_cm: int = 4,\n ext=\"\",\n) -> None:\n filename = os.path.join(\n RESULTS_PATH,\n str(data_type),\n f\"{session_indicator}_spikemat{spikemat_ind}_{bin_size_cm}cm_\"\n f\"{time_window_ms}ms_{likelihood_function}_marginals{ext}.obj\",\n )\n save_data(marginals, filename)\n\n\ndef load_marginals(\n session_indicator: Session_Indicator,\n spikemat_ind: int,\n time_window_ms: int,\n data_type: Data_Type_Name,\n likelihood_function: Likelihood_Function,\n bin_size_cm: int = 4,\n ext=\"\",\n) -> All_Models_Marginals:\n filename = os.path.join(\n RESULTS_PATH,\n str(data_type),\n f\"{session_indicator}_spikemat{spikemat_ind}_{bin_size_cm}cm_\"\n f\"{time_window_ms}ms_{likelihood_function}_marginals{ext}.obj\",\n )\n marginals = load_data(filename, print_filename=False)\n return marginals\n\n\ndef save_diffusion_marginals(\n session_indicator: Session_Indicator,\n time_window_ms: int,\n data_type: Data_Type_Name,\n likelihood_function: Likelihood_Function,\n marginals: dict,\n bin_size_cm: int = 4,\n ext=\"\",\n) -> None:\n filename = os.path.join(\n RESULTS_PATH,\n str(data_type),\n f\"{session_indicator}_{bin_size_cm}cm_\"\n f\"{time_window_ms}ms_{likelihood_function}_diffusion_marginals{ext}.obj\",\n )\n save_data(marginals, filename)\n\n\ndef load_diffusion_marginals(\n session_indicator: Session_Indicator,\n time_window_ms: int,\n data_type: Data_Type_Name,\n likelihood_function: Likelihood_Function,\n bin_size_cm: int = 4,\n ext=\"\",\n) -> dict:\n filename = os.path.join(\n RESULTS_PATH,\n str(data_type),\n f\"{session_indicator}_{bin_size_cm}cm_\"\n f\"{time_window_ms}ms_{likelihood_function}_diffusion_marginals{ext}.obj\",\n )\n marginals = load_data(filename)\n return marginals\n\n\n# -----\n\n\ndef save_diffusion_constant_results(\n session_indicator: Session_Indicator,\n time_window_ms: int,\n data_type: Data_Type_Name,\n likelihood_function: Likelihood_Function,\n diffusion_constant_results: Diffusion_Constant,\n trajectory_type: str,\n bin_size_cm: int = 4,\n bin_space: bool = False,\n ext=\"\",\n) -> None:\n if bin_space:\n filename = os.path.join(\n RESULTS_PATH,\n str(data_type),\n f\"{session_indicator}_{bin_size_cm}cm_{time_window_ms}ms_\"\n f\"{likelihood_function}_{trajectory_type}_binned_trajectories_\"\n f\"diffusion_constant{ext}.obj\",\n )\n else:\n filename = os.path.join(\n RESULTS_PATH,\n str(data_type),\n f\"{session_indicator}_{bin_size_cm}cm_{time_window_ms}ms_\"\n f\"{likelihood_function}_{trajectory_type}_trajectories_\"\n f\"diffusion_constant{ext}.obj\",\n )\n save_data(diffusion_constant_results, filename)\n\n\ndef load_diffusion_constant_results(\n session_indicator: Session_Indicator,\n time_window_ms: int,\n data_type: Data_Type_Name,\n likelihood_function: Likelihood_Function,\n trajectory_type: str,\n bin_size_cm: int = 4,\n bin_space: bool = False,\n ext=\"\",\n) -> Diffusion_Constant:\n if bin_space:\n filename = os.path.join(\n RESULTS_PATH,\n str(data_type),\n f\"{session_indicator}_{bin_size_cm}cm_{time_window_ms}ms_\"\n f\"{likelihood_function}_{trajectory_type}_binned_trajectories_\"\n f\"diffusion_constant{ext}.obj\",\n )\n else:\n filename = os.path.join(\n RESULTS_PATH,\n str(data_type),\n f\"{session_indicator}_{bin_size_cm}cm_{time_window_ms}ms_\"\n f\"{likelihood_function}_{trajectory_type}_trajectories_\"\n f\"diffusion_constant{ext}.obj\",\n )\n diffusion_constant_results = load_data(filename)\n return diffusion_constant_results\n\n\n# -----\n\n\ndef save_descriptive_stats(\n time_window_ms: int,\n data_type: Data_Type_Name,\n likelihood_function: Likelihood_Function,\n descriptive_stats: pd.DataFrame,\n bin_size_cm: int = 4,\n ext=\"\",\n) -> None:\n filename = os.path.join(\n RESULTS_PATH,\n str(data_type),\n f\"descriptive_stats_{bin_size_cm}cm_{time_window_ms}ms_\"\n f\"{likelihood_function}{ext}.csv\",\n )\n descriptive_stats.to_csv(filename)\n\n\ndef load_descriptive_stats(\n time_window_ms: int,\n data_type: Data_Type_Name,\n likelihood_function: Likelihood_Function,\n bin_size_cm: int = 4,\n ext=\"\",\n) -> pd.DataFrame:\n filename = os.path.join(\n RESULTS_PATH,\n str(data_type),\n f\"descriptive_stats_{bin_size_cm}cm_{time_window_ms}ms_\"\n f\"{likelihood_function}{ext}.csv\",\n )\n descriptive_stats = pd.read_csv(filename)\n return descriptive_stats\n\n\n# -----\n\n\ndef save_predictive_analysis(\n time_window_ms: int,\n data_type: Data_Type_Name,\n likelihood_function: Likelihood_Function,\n trajectory_type: str,\n predictive_analysis,\n ext=\"\",\n bin_size_cm: int = 4,\n) -> None:\n filename = os.path.join(\n RESULTS_PATH,\n str(data_type),\n f\"predictive_analysis_{bin_size_cm}cm_{time_window_ms}ms_\"\n f\"{likelihood_function}_{trajectory_type}trajectories{ext}.obj\",\n )\n save_data(predictive_analysis, filename)\n\n\ndef load_predictive_analysis(\n time_window_ms: int,\n data_type: Data_Type_Name,\n likelihood_function: Likelihood_Function,\n trajectory_type: str,\n bin_size_cm: int = 4,\n ext=\"\",\n):\n filename = os.path.join(\n RESULTS_PATH,\n str(data_type),\n f\"predictive_analysis_{bin_size_cm}cm_{time_window_ms}ms_\"\n f\"{likelihood_function}_{trajectory_type}trajectories{ext}.obj\",\n )\n predictive_analysis = load_data(filename)\n return predictive_analysis\n\n\n# -----\n\n\ndef save_model_recovery_simulated_trajectory_set(\n trajectory_set: Model_Recovery_Trajectory_Set,\n session_indicator: Simulated_Session_Name,\n data_type: Data_Type_Name,\n ext=\"\",\n):\n filename = os.path.join(\n DATA_PATH,\n str(data_type),\n f\"{session_indicator}_simulated_trajectories{ext}.obj\",\n )\n save_data(trajectory_set, filename)\n\n\ndef load_model_recovery_simulated_trajectory_set(\n data_type: Data_Type_Name, session_indicator: Simulated_Session_Name, ext=\"\"\n) -> Model_Recovery_Trajectory_Set:\n filename = os.path.join(\n DATA_PATH,\n str(data_type),\n f\"{session_indicator}_simulated_trajectories{ext}.obj\",\n )\n trajectory_set = load_data(filename)\n return trajectory_set\n\n\n# ------\n\n\ndef save_pf_analysis(\n session_indicator: Session_Indicator,\n time_window_ms: int,\n data_type: Data_Type_Name,\n map_analysis: PF_Analysis,\n decoding_type: str,\n bin_size_cm: int = 4,\n ext=\"\",\n):\n filename = os.path.join(\n RESULTS_PATH,\n str(data_type),\n f\"{session_indicator}_{bin_size_cm}cm_{time_window_ms}ms_pf_analysis_\"\n f\"{decoding_type}{ext}.obj\",\n )\n save_data(map_analysis, filename)\n\n\ndef load_pf_analysis(\n session_indicator: Session_Indicator,\n time_window_ms: int,\n data_type: Data_Type_Name,\n decoding_type: str,\n bin_size_cm: int = 4,\n ext=\"\",\n print_filename: bool = True,\n) -> PF_Analysis:\n filename = os.path.join(\n RESULTS_PATH,\n str(data_type),\n f\"{session_indicator}_{bin_size_cm}cm_{time_window_ms}ms_pf_analysis_\"\n f\"{decoding_type}{ext}.obj\",\n )\n map_analysis = load_data(filename, print_filename=print_filename)\n return map_analysis\n"
] | [
[
"numpy.append",
"numpy.array",
"pandas.read_csv",
"numpy.full"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
CnBDM-Su/kdd2021_lsc_sy | [
"7e7be19a3c9751355470f4e60c518d077572281f"
] | [
"examples/lsc/mag240m/year_voting.py"
] | [
"import time\nimport argparse\nfrom tqdm import tqdm\n\nimport torch\nimport numpy as np\nimport pandas as pd\n\nfrom ogb.lsc import MAG240MDataset, MAG240MEvaluator\nfrom root import ROOT\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--device', type=int, default=0)\n parser.add_argument('--hidden_channels', type=int, default=512)\n parser.add_argument('--num_layers', type=int, default=2),\n parser.add_argument('--no_batch_norm', action='store_true')\n parser.add_argument('--relu_last', action='store_true')\n parser.add_argument('--dropout', type=float, default=0.5)\n parser.add_argument('--lr', type=float, default=0.01)\n parser.add_argument('--batch_size', type=int, default=380000)\n parser.add_argument('--epochs', type=int, default=1000)\n parser.add_argument('--evaluate', type=bool, default=False)\n parser.add_argument('--p_batch_size', type=int, default=40000)\n parser.add_argument('--mini_graph', type=bool, default=False)\n args = parser.parse_args()\n print(args)\n\n torch.manual_seed(12345)\n device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'\n\n dataset = MAG240MDataset(ROOT)\n evaluator = MAG240MEvaluator()\n\n train_idx = np.load(f'{dataset.dir}/mini_graph/train_idx.npy')\n valid_idx = np.load(f'{dataset.dir}/mini_graph/valid_idx.npy')\n test_idx = np.load(f'{dataset.dir}/mini_graph/test_idx.npy')\n\n ap_edge = np.load(f'{dataset.dir}/mini_graph/sorted_author_paper_edge.npy')\n idx = np.concatenate([train_idx,valid_idx],0)\n\n year = np.load(f'{dataset.dir}/mini_graph/paper_year.npy')\n label = np.load(f'{dataset.dir}/mini_graph/paper_label.npy')\n\n paper_author_list = []\n author_list = []\n bias = 0\n print('--reading paper author list--')\n for i in tqdm(range(idx.shape[0])):\n i = idx[i]\n for j in range(bias, ap_edge.shape[1]):\n if i == ap_edge[1,j]:\n paper_author_list.append([i,ap_edge[0,j]])\n author_list.append(ap_edge[0,j])\n if i < ap_edge[1,j]:\n bias = j\n break\n author_list = np.unique(author_list)\n\n ap_edge = np.load(f'{dataset.dir}/mini_graph/author_paper_edge.npy')\n\n print('--reading author paper list--')\n bias = 0\n author_paper_list = []\n paper_list = []\n num = 0\n for i in tqdm(range(author_list.shape[0])):\n i = author_list[i]\n sig = 0\n for j in range(bias, ap_edge.shape[1]):\n if i == ap_edge[0, j]:\n if ap_edge[1, j] in idx:\n sig = 1\n author_paper_list.append([i,ap_edge[1, j]])\n paper_list.append(ap_edge[1, j])\n if i < ap_edge[0, j]:\n bias = j\n break\n if sig==1:\n num += 1\n print('known author num:',num)\n # paper_list = np.unique(paper_list)\n #\n # related_year = year[paper_list]\n # related_label = label[paper_list]\n #\n # target1 = pd.DataFrame(author_paper_list,columns=['author','ind'])\n # target2 = pd.DataFrame(np.concatenate([paper_list.reshape(-1,1),np.array(related_year).reshape(-1,1),np.array(related_label).reshape(-1,1)],1),columns=['ind','year','label'])\n # target2 = target2[target2.year<2019]\n #\n # target = pd.merge(target1,target2)\n #\n # result = []\n # print('resulting...')\n #\n # target = target.groupby('author').apply(lambda t: t[t.year == t.year.max()]).reset_index(drop=True)\n # target = target.fillna(-1)\n # target2 = target.groupby('author').label.agg(lambda x: x.value_counts().index[0]).reset_index()\n #\n #\n # target1 = pd.DataFrame(paper_author_list,columns=['ind','author'])\n # # target2 = pd.DataFrame(a_,columns=['author','label'])\n #\n # target = pd.merge(target1,target2)\n # target = pd.DataFrame(target.groupby('ind').label.agg(lambda x: x.value_counts().index[0]))\n #\n #\n #\n # y_pred = target.loc[valid_idx].label.values\n # y_true = label[valid_idx]\n #\n # if not isinstance(y_pred, torch.Tensor):\n # y_pred = torch.from_numpy(y_pred)\n # if not isinstance(y_true, torch.Tensor):\n # y_true = torch.from_numpy(y_true)\n #\n # assert (y_true.numel() == y_pred.numel())\n # assert (y_true.dim() == y_pred.dim() == 1)\n #\n # acc = int((y_true == y_pred).sum()) / y_true.numel()\n # print(acc)\n #\n #\n #\n #\n\n\n"
] | [
[
"numpy.unique",
"torch.manual_seed",
"numpy.concatenate",
"torch.cuda.is_available",
"numpy.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jokva/windrose | [
"99a2f636a6558a29e7ded63d0d233f25dc7986b6"
] | [
"samples/example_pdf_by.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\n\n\"\"\"\nExample to create a PDF\nMonthly windrose axe\nOne figure per year\n\"\"\"\n\n\nimport click\n\nimport datetime\n# import time\n\nfrom math import pi\n\n# import matplotlib\nimport matplotlib.pyplot as plt\n# import matplotlib.animation\nfrom matplotlib.backends.backend_pdf import PdfPages\n# import matplotlib.cm as cm\n\nimport numpy as np\nfrom numpy import sin, cos\nimport pandas as pd\n\nfrom windrose import WindroseAxes, WindAxes, plot_windrose, clean # noqa\nfrom windrose import wrscatter, wrcontour, wrcontourf # noqa\n\nFIGSIZE_DEFAULT = (16, 12)\nS_FIGSIZE_DEFAULT = \",\".join(map(str, FIGSIZE_DEFAULT))\n\nDPI_DEFAULT = 40\n\n\ndef by_func_yearly(dt):\n return dt.year\n\n\ndef by_func_monthly(dt):\n return dt.year, dt.month\n\n\ndef by_func_daily(dt):\n return dt.year, dt.month, dt.day\n\n\[email protected]()\[email protected](\"--filename\", default=\"samples/sample_wind_poitiers.csv\", help=\"Input filename\")\[email protected](\"--filename_out\", default=\"windrose.pdf\", help=\"Output filename\")\[email protected](\"--dpi\", default=DPI_DEFAULT, help=\"Dot per inch for plot generation\")\[email protected](\"--figsize\", default=S_FIGSIZE_DEFAULT, help=\"Figure size x,y - default=%s\" % S_FIGSIZE_DEFAULT)\[email protected](\"--bins_min\", default=0.01, help=\"Bins minimum value\")\[email protected](\"--bins_max\", default=20, help=\"Bins maximum value\")\[email protected](\"--bins_step\", default=2, help=\"Bins step value\")\[email protected](\"--fontname\", default=\"Courier New\", help=\"Font name\")\[email protected](\"--show/--no-show\", default=False, help=\"Show figure\")\[email protected](\"--dt_from\", default='', help=\"Datetime from\")\[email protected](\"--dt_to\", default='', help=\"Datetime to\")\[email protected](\"--offset\", default=0, help=\"Axe figure offset\")\[email protected](\"--ncols\", default=4, help=\"Number of columns per figure\")\[email protected](\"--nrows\", default=3, help=\"Number of rows per figure\")\ndef main(filename, dt_from, dt_to, dpi, figsize,\n bins_min, bins_max, bins_step, ncols, nrows,\n fontname, show, filename_out, offset):\n\n # convert figsize (string like \"8,9\" to a list of float [8.0, 9.0]\n figsize = figsize.split(\",\")\n figsize = tuple(map(float, figsize))\n width, height = figsize\n\n # Read CSV file to a Pandas DataFrame\n df_all = pd.read_csv(filename)\n df_all['Timestamp'] = pd.to_datetime(df_all['Timestamp'])\n df_all = df_all.set_index('Timestamp')\n df_all.index = df_all.index.tz_localize('UTC').tz_convert('UTC')\n # df_all = df_all.iloc[-10000:,:]\n # df_all = df_all['2011-07-01':'2012-12-31']\n if dt_from == '':\n dt_from = df_all.index[0]\n if dt_to == '':\n dt_to = df_all.index[-1]\n df_all = df_all[dt_from:dt_to]\n\n # Get Numpy arrays from DataFrame\n direction_all = df_all['direction'].values\n var_all = df_all['speed'].values\n # index_all = df_all.index.to_datetime() # Fixed: .values -> to_datetime()\n by_all = df_all.index.map(by_func_monthly)\n by_unique = np.unique(by_all)\n print(by_unique)\n\n # Define bins\n # bins = np.arange(bins_min, bins_max, bins_step)\n\n with PdfPages(filename_out) as pdf:\n\n for i, by_value in enumerate(by_unique):\n print(\"processing: %s\" % str(by_value))\n\n if (i + offset) % (ncols * nrows) == 0 or i == 0:\n # Create figure and axes\n fig, axs = plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize, dpi=dpi, facecolor='w', edgecolor='w')\n print(\"%r\\n%r\\n%r\" % (fig, fig.axes, axs))\n\n i_sheet, sheet_pos = divmod(i + offset, ncols * nrows)\n i_row, i_col = divmod(sheet_pos, ncols)\n\n # ax = axs[i_row][i_col]\n ax = fig.axes[sheet_pos]\n\n mask = (pd.Series(by_all) == by_value).values\n\n # index = index_all[mask]\n var = var_all[mask]\n direction = direction_all[mask]\n\n # df = pd.DataFrame([var, direction], index=['Speed', 'Direction'], columns=index).transpose()\n # df.index.name = 'DateTime'\n # print(df)\n\n Vx = var * sin(pi / 180 * direction)\n Vy = var * cos(pi / 180 * direction)\n ax.scatter(Vx, Vy, alpha=0.1)\n v = 40\n ax.set_xlim(-v, v)\n ax.set_ylim(-v, v)\n\n # rect = [0.1, 0.1, 0.8, 0.8]\n # ax = WindroseAxes(fig, rect, facecolor='w')\n # wrscatter(direction, var, ax=ax) # ToFix!!!! TypeError: Input must be a 2D array.\n\n # print(direction)\n # print(var)\n # print(ax)\n # wrcontour(direction, var, ax=ax) # ToFix!!!! TypeError: Input must be a 2D array.\n\n # Same as above, but with contours over each filled region...\n # ToFix!!!! TypeError: Input must be a 2D array.\n # ax = WindroseAxes.from_ax(ax)\n # rect = [0.1, 0.1, 0.8, 0.8]\n # #axs[i_row][i_col] = WindroseAxes(fig, rect, facecolor='w')\n # #axs[i_row][i_col] = WindroseAxes.from_ax(fig=fig)\n # ax = WindroseAxes(fig, rect, facecolor='w')\n # fig.axes[i + offset] = ax\n # ax.contourf(direction, var, bins=bins, cmap=cm.hot)\n # ax.contour(direction, var, bins=bins, colors='black')\n\n # dt1 = index[0]\n # dt2 = index[-1]\n # dt1 = df.index[mask][0]\n # dt2 = df.index[mask][-1]\n # td = dt2 - dt1\n\n # title = by_value\n # title = \"From %s\\n to %s\" % (dt1, dt2)\n # title = \"%04d-%02d\" % (by_value[0], by_value[1])\n dt = datetime.date(by_value[0], by_value[1], 1)\n fmt = \"%B\" # \"%Y %B\" # Month\n title = dt.strftime(fmt)\n ax.set_title(title, fontname=fontname)\n\n # ax.set_legend()\n\n fig_title = dt.strftime(\"%Y\") # Year\n fig.suptitle(fig_title)\n\n remaining = (i + offset + 1) % (ncols * nrows)\n if remaining == 0:\n save_figure(fig, pdf, show, fig_title)\n\n if remaining != 0:\n save_figure(fig, pdf, show, fig_title)\n\n # time.sleep(10)\n\n print(\"Save file to '%s'\" % filename_out)\n\n print(\"remaining: %d\" % remaining)\n\n\ndef save_figure(fig, pdf, show, fig_title):\n filename = \"windrose_%s.png\" % fig_title\n print(\"save_figure: %s\" % filename)\n if show:\n plt.show()\n fig.savefig(filename) # Save to image\n pdf.savefig(fig)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"matplotlib.backends.backend_pdf.PdfPages",
"pandas.read_csv",
"pandas.to_datetime",
"pandas.Series",
"numpy.unique",
"matplotlib.pyplot.subplots",
"numpy.cos",
"numpy.sin",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
cmkumar87/corvid | [
"164113c5e0f0f9c7463f43213f38aeeeb6d5b05e"
] | [
"tests/test_semantic_table/test_semantic_table.py"
] | [
"\"\"\"\n\n\n\"\"\"\n\nimport unittest\n\nfrom numpy.testing import assert_array_equal\n\nfrom corvid.semantic_table.semantic_table import Cell, Table, SemanticTable, \\\n IdentitySemanticTable, LabelCollapseSemanticTable, NormalizationError\n\n\nclass TestSemanticTable(unittest.TestCase):\n def setUp(self):\n self.a = Cell(tokens=[''], index_topleft_row=0,\n index_topleft_col=0, rowspan=2, colspan=2)\n self.b = Cell(tokens=['C'], index_topleft_row=0,\n index_topleft_col=2, rowspan=1, colspan=2)\n self.c = Cell(tokens=['C:1'], index_topleft_row=1,\n index_topleft_col=2, rowspan=1, colspan=1)\n self.d = Cell(tokens=['C:2'], index_topleft_row=1,\n index_topleft_col=3, rowspan=1, colspan=1)\n self.e = Cell(tokens=['R'], index_topleft_row=2,\n index_topleft_col=0, rowspan=3, colspan=1)\n self.f = Cell(tokens=['R:1'], index_topleft_row=2,\n index_topleft_col=1, rowspan=1, colspan=1)\n self.g = Cell(tokens=['R:2'], index_topleft_row=3,\n index_topleft_col=1, rowspan=1, colspan=1)\n self.h = Cell(tokens=['R:3'], index_topleft_row=4,\n index_topleft_col=1, rowspan=1, colspan=1)\n self.i = Cell(tokens=['1'], index_topleft_row=2,\n index_topleft_col=2, rowspan=1, colspan=1)\n self.j = Cell(tokens=['2'], index_topleft_row=2,\n index_topleft_col=3, rowspan=1, colspan=1)\n self.k = Cell(tokens=['3'], index_topleft_row=3,\n index_topleft_col=2, rowspan=1, colspan=1)\n self.l = Cell(tokens=['4'], index_topleft_row=3,\n index_topleft_col=3, rowspan=1, colspan=1)\n self.m = Cell(tokens=['5'], index_topleft_row=4,\n index_topleft_col=2, rowspan=1, colspan=1)\n self.n = Cell(tokens=['6'], index_topleft_row=4,\n index_topleft_col=3, rowspan=1, colspan=1)\n\n self.table = Table(grid=[\n [self.a, self.a, self.b, self.b],\n [self.a, self.a, self.c, self.d],\n [self.e, self.f, self.i, self.j],\n [self.e, self.g, self.k, self.l],\n [self.e, self.h, self.m, self.n]\n ])\n\n self.i_semantic_table = IdentitySemanticTable(self.table)\n self.lc_semantic_table = LabelCollapseSemanticTable(self.table)\n\n def test_normalize_table(self):\n self.assertEqual(self.lc_semantic_table.normalized_table.nrow, 4)\n self.assertEqual(self.lc_semantic_table.normalized_table.ncol, 3)\n self.assertEqual(\n str(self.lc_semantic_table.normalized_table).replace(' ', ''),\n '\\tCC:1\\tCC:2\\nRR:1\\t1\\t2\\nRR:2\\t3\\t4\\nRR:3\\t5\\t6'\n )\n\n def test_insert_rows(self):\n pass\n\n def test_classify_cells(self):\n all_values_table = Table(cells=[\n Cell(tokens=['1'], index_topleft_row=0,\n index_topleft_col=0, rowspan=1, colspan=1),\n Cell(tokens=['2'], index_topleft_row=0,\n index_topleft_col=1, rowspan=1, colspan=1),\n Cell(tokens=['3'], index_topleft_row=1,\n index_topleft_col=0, rowspan=1, colspan=1),\n Cell(tokens=['4'], index_topleft_row=1,\n index_topleft_col=1, rowspan=1, colspan=1)\n ], nrow=2, ncol=2)\n labels, index_topmost_value_row, index_leftmost_value_col = \\\n self.lc_semantic_table._classify_cells(table=all_values_table)\n assert_array_equal(labels, [['VALUE', 'VALUE'], ['VALUE', 'VALUE']])\n self.assertEqual(index_topmost_value_row, 0)\n self.assertEqual(index_leftmost_value_col, 0)\n\n all_labels_table = Table(cells=[\n Cell(tokens=['a'], index_topleft_row=0,\n index_topleft_col=0, rowspan=1, colspan=1)\n ], nrow=1, ncol=1)\n labels, index_topmost_value_row, index_leftmost_value_col = \\\n self.lc_semantic_table._classify_cells(table=all_labels_table)\n assert_array_equal(labels, [['EMPTY']])\n self.assertEqual(index_topmost_value_row, 1)\n self.assertEqual(index_leftmost_value_col, 1)\n\n def test_merge_label_cells(self):\n all_values_table = Table(cells=[\n Cell(tokens=['1'], index_topleft_row=0,\n index_topleft_col=0, rowspan=1, colspan=1),\n Cell(tokens=['2'], index_topleft_row=0,\n index_topleft_col=1, rowspan=1, colspan=1),\n Cell(tokens=['3'], index_topleft_row=1,\n index_topleft_col=0, rowspan=1, colspan=1),\n Cell(tokens=['4'], index_topleft_row=1,\n index_topleft_col=1, rowspan=1, colspan=1)\n ], nrow=2, ncol=2)\n self.assertListEqual(\n self.lc_semantic_table._merge_label_cells(table=all_values_table,\n index_topmost_value_row=0,\n index_leftmost_value_col=0).cells,\n all_values_table.cells\n )\n\n merge_header_table = Table(cells=[\n Cell(tokens=['a'], index_topleft_row=0,\n index_topleft_col=0, rowspan=1, colspan=1),\n Cell(tokens=['b'], index_topleft_row=0,\n index_topleft_col=1, rowspan=1, colspan=1),\n Cell(tokens=['c'], index_topleft_row=1,\n index_topleft_col=0, rowspan=1, colspan=1),\n Cell(tokens=['d'], index_topleft_row=1,\n index_topleft_col=1, rowspan=1, colspan=1),\n Cell(tokens=['1'], index_topleft_row=2,\n index_topleft_col=0, rowspan=1, colspan=1),\n Cell(tokens=['2'], index_topleft_row=2,\n index_topleft_col=1, rowspan=1, colspan=1)\n ], nrow=3, ncol=2)\n collapsed_merge_header_table = self.lc_semantic_table._merge_label_cells(\n table=merge_header_table,\n index_topmost_value_row=2,\n index_leftmost_value_col=0)\n self.assertEqual(collapsed_merge_header_table.nrow, 2)\n self.assertEqual(collapsed_merge_header_table.ncol, 2)\n self.assertEqual(str(collapsed_merge_header_table).replace(' ', ''),\n 'ac\\tbd\\n1\\t2')\n\n merge_subject_table = Table(cells=[\n Cell(tokens=['a'], index_topleft_row=0,\n index_topleft_col=0, rowspan=1, colspan=1),\n Cell(tokens=['b'], index_topleft_row=0,\n index_topleft_col=1, rowspan=1, colspan=1),\n Cell(tokens=['1'], index_topleft_row=0,\n index_topleft_col=2, rowspan=1, colspan=1),\n Cell(tokens=['c'], index_topleft_row=1,\n index_topleft_col=0, rowspan=1, colspan=1),\n Cell(tokens=['d'], index_topleft_row=1,\n index_topleft_col=1, rowspan=1, colspan=1),\n Cell(tokens=['2'], index_topleft_row=1,\n index_topleft_col=2, rowspan=1, colspan=1)\n ], nrow=2, ncol=3)\n collapsed_merge_subject_table = self.lc_semantic_table._merge_label_cells(\n table=merge_subject_table,\n index_topmost_value_row=0,\n index_leftmost_value_col=2\n )\n self.assertEqual(collapsed_merge_header_table.nrow, 2)\n self.assertEqual(collapsed_merge_header_table.ncol, 2)\n self.assertEqual(str(collapsed_merge_subject_table).replace(' ', ''),\n 'ab\\t1\\ncd\\t2')\n\n def test_add_empty_header(self):\n table = Table(cells=[\n Cell(tokens=['1'], index_topleft_row=0,\n index_topleft_col=0, rowspan=1, colspan=1),\n Cell(tokens=['2'], index_topleft_row=0,\n index_topleft_col=1, rowspan=1, colspan=1),\n Cell(tokens=['3'], index_topleft_row=1,\n index_topleft_col=0, rowspan=1, colspan=1),\n Cell(tokens=['4'], index_topleft_row=1,\n index_topleft_col=1, rowspan=1, colspan=1)\n ], nrow=2, ncol=2)\n new_table = self.lc_semantic_table._add_empty_header(table=table)\n self.assertEqual(new_table.nrow, 3)\n self.assertEqual(new_table.ncol, 2)\n self.assertListEqual(new_table.cells[2:], table.cells)\n self.assertListEqual(new_table.cells[0].tokens, [])\n self.assertListEqual(new_table.cells[1].tokens, [])\n\n def test_add_empty_subject(self):\n table = Table(cells=[\n Cell(tokens=['1'], index_topleft_row=0,\n index_topleft_col=0, rowspan=1, colspan=1),\n Cell(tokens=['2'], index_topleft_row=0,\n index_topleft_col=1, rowspan=1, colspan=1),\n Cell(tokens=['3'], index_topleft_row=1,\n index_topleft_col=0, rowspan=1, colspan=1),\n Cell(tokens=['4'], index_topleft_row=1,\n index_topleft_col=1, rowspan=1, colspan=1)\n ], nrow=2, ncol=2)\n new_table = self.lc_semantic_table._add_empty_subject(table=table)\n self.assertEqual(new_table.nrow, 2)\n self.assertEqual(new_table.ncol, 3)\n assert_array_equal(new_table.grid[:, 1], table.grid[:, 0])\n assert_array_equal(new_table.grid[:, 2], table.grid[:, 1])\n self.assertListEqual(new_table.grid[0, 0].tokens, [])\n self.assertListEqual(new_table.grid[1, 0].tokens, [])\n\n\n\n\n\n #\n # # with self.assertRaises(Exception):\n # # self.semantic_table.insert_row(index=1, row=[x, y, y])\n #\n # def test_insert_column(self):\n # x = Cell(tokens=[Token(text='x')], rowspan=1, colspan=1)\n # y = Cell(tokens=[Token(text='y')], rowspan=1, colspan=1)\n # self.assertEqual(self.table.insert_column(index=1, column=[x, y]),\n # Table.create_from_grid(grid=[\n # [self.a, x, self.b, self.c],\n # [self.d, y, self.e, self.f]\n # ]))\n # with self.assertRaises(Exception):\n # self.table.insert_column(index=1, column=[x, y, y])\n #\n # def test_delete_row(self):\n # self.assertEqual(self.table.delete_row(index=1),\n # Table.create_from_grid(grid=[\n # [self.a, self.b, self.c]\n # ]))\n #\n # def test_delete_column(self):\n # self.assertEqual(self.table.delete_column(index=1),\n # Table.create_from_grid(grid=[\n # [self.a, self.c],\n # [self.d, self.f]\n # ]))\n #\n # def test_append_left(self):\n # self.assertEqual(\n # self.table.append_left(other=Table.create_from_grid(\n # grid=[[self.f, self.b, self.d],\n # [self.c, self.e, self.a]])),\n # Table.create_from_grid(\n # grid=[[self.f, self.b, self.d, self.a, self.b, self.c],\n # [self.c, self.e, self.a, self.d, self.e, self.f]])\n # )\n #\n # def test_append_right(self):\n # self.assertEqual(\n # self.table.append_right(other=Table.create_from_grid(\n # grid=[[self.f, self.b, self.d],\n # [self.c, self.e, self.a]])),\n # Table.create_from_grid(\n # grid=[[self.a, self.b, self.c, self.f, self.b, self.d],\n # [self.d, self.e, self.f, self.c, self.e, self.a]])\n # )\n #\n # def test_append_top(self):\n # self.assertEqual(\n # self.table.append_top(other=Table.create_from_grid(\n # grid=[[self.f, self.b, self.d],\n # [self.c, self.e, self.a]])),\n # Table.create_from_grid(\n # grid=[[self.f, self.b, self.d],\n # [self.c, self.e, self.a],\n # [self.a, self.b, self.c],\n # [self.d, self.e, self.f]])\n # )\n #\n # def test_append_bottom(self):\n # self.assertEqual(\n # self.table.append_bottom(other=Table.create_from_grid(\n # grid=[[self.f, self.b, self.d],\n # [self.c, self.e, self.a]])),\n # Table.create_from_grid(\n # grid=[[self.a, self.b, self.c],\n # [self.d, self.e, self.f],\n # [self.f, self.b, self.d],\n # [self.c, self.e, self.a]])\n # )\n #\n # def test_compute_bounding_box(self):\n # table = Table.create_from_cells(\n # cells=[\n # Cell(tokens=[Token(text='e')], rowspan=1, colspan=1,\n # bounding_box=Box(llx=-1.0, lly=-0.5, urx=1.0, ury=1.0)),\n # Cell(tokens=[Token(text='e')], rowspan=1, colspan=1,\n # bounding_box=Box(llx=1.5, lly=-0.5, urx=2.5, ury=1.5))\n # ],\n # nrow=1, ncol=2, paper_id='abc', page_num=0,\n # caption='hi this is caption')\n # box = table.bounding_box\n # self.assertEqual(box.ll.x, -1.0)\n # self.assertEqual(box.ll.y, -0.5)\n # self.assertEqual(box.ur.x, 2.5)\n # self.assertEqual(box.ur.y, 1.5)\n"
] | [
[
"numpy.testing.assert_array_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rcooke-ast/VICE | [
"762911eb4192c7206ce2ae36b645d120ed889cb7"
] | [
"migration/src/plots/plots/metallicity_gradient.py"
] | [
"r\"\"\"\nThis script produces a 4x2 panel figure comparing the model-predicted radial\nabundance gradients of four models in [O/H], [Fe/H], and [O/Fe].\n\nIn Johnson et al. (2021), this script produces Fig. 9.\n\"\"\"\n\nfrom ..._globals import ZONE_WIDTH\nfrom .. import env\nfrom ..utils import zheights\nfrom .utils import named_colors, mpl_loc, markers, dummy_background_axes\nimport matplotlib.pyplot as plt\nimport vice\n\n# The maximum radius of the VICE models in kpc\nMAX_RADIUS = 20.0\n\n# The maximum radius of star formation in the VICE models in kpc\n# gas gradients will be plotted only to this radius.\nMAX_SF_RADIUS = 15.5\n\n# y-axis limits for [O/H] and [Fe/H]\nXH_YLIM = [-0.9, 1.2]\n\n# y-axis limits for [O/Fe]\nOFE_YLIM = [-0.2, 0.5]\n\n# x-axis limits for galactocentric radius in kpc\nXLIM = [-2, 22]\n\n\ndef main(farleft, midleft, midright, farright, stem,\n\tlabels = [\"Constant SFR\", \"Inside-Out\", \"Late-Burst\", \"Outer-Burst\"]):\n\tr\"\"\"\n\tProduce a 4x2 panel figure showing the radial metallicity gradients in\n\tboth stars and gas predicted by four VICE models.\n\n\tParameters\n\t----------\n\tstatic : ``str``\n\t\tThe relative or absolute path to the VICE output whose predicted\n\t\tgradient is to be plotted in the far-left panels.\n\tmidleft : ``str``\n\t\tThe relative or absolute path to the VICE output whose predicted\n\t\tgradient is to be plotted in the middle-left panels.\n\tmidright : ``str``\n\t\tThe relative or absolute path to the VICE output whose predicted\n\t\tgradient is to be plotted in the middle-right panels.\n\tfarright: ``str``\n\t\tThe relative or absolute path to the VICE output whose predicted\n\t\tgradient is to be plotted in the far-right panels.\n\tstem : ``str``\n\t\tThe relative or absolute path to the output image, with no extension.\n\t\tThis function will save the figure in both PDF and PNG formats.\n\tlabels : ``list`` [elements of type ``str``]\n\t\t[default : [\"Constant SFR\", \"Inside-Out\", \"Late-Burst\", \"Outer-burst\"]]\n\t\tA list of descriptors of the four models to be visualized.\n\t\"\"\"\n\taxes = setup_axes(labels = labels)\n\toutputs = [farleft, midleft, midright, farright]\n\toutputs = [vice.multioutput(_) for _ in outputs]\n\t# static = vice.multioutput(static)\n\t# insideout = vice.multioutput(insideout)\n\t# lateburst = vice.multioutput(lateburst)\n\t# outerburst = vice.multioutput(outerburst)\n\tfor i in range(4):\n\t\tplot_stellar_metallicities(axes[0][i], axes[1][i], outputs[i],\n\t\t\tsymbols_legend = not i)\n\t\tplot_gas_phase_metallicity(axes[0][i], axes[1][i], outputs[i],\n\t\t\tlabel = not i, symbols_legend = not i)\n\t\tplot_target_gradient(axes[0][i])\n\n\t# plot_stellar_metallicities(axes[0][0], axes[1][0], static)\n\t# plot_stellar_metallicities(axes[0][1], axes[1][1], insideout)\n\t# plot_stellar_metallicities(axes[0][2], axes[1][2], lateburst)\n\t# plot_stellar_metallicities(axes[0][3], axes[1][3], outerburst)\n\t# plot_gas_phase_metallicity(axes[0][0], axes[1][0], static, label = True)\n\t# plot_gas_phase_metallicity(axes[0][1], axes[1][1], insideout)\n\t# plot_gas_phase_metallicity(axes[0][2], axes[1][2], lateburst)\n\t# plot_gas_phase_metallicity(axes[0][3], axes[1][3], outerburst)\n\t# for i in range(4): plot_target_gradient(axes[0][i])\n\n\tleg = axes[0][0].legend(loc = mpl_loc(\"upper right\"), frameon = False,\n\t\tncol = 1, bbox_to_anchor = (0.99, 0.99), handlelength = 0)\n\tfor i in range(2):\n\t\tleg.get_texts()[i].set_color([\"blue\", \"red\"][i])\n\t\tleg.legendHandles[i].set_visible(False)\n\n\taxes[1][0].legend(loc = mpl_loc(\"upper center\"), frameon = False,\n\t\tncol = 1, bbox_to_anchor = (0.50, 0.99), fontsize = 18)\n\n\tplt.savefig(\"%s.png\" % (stem))\n\tplt.savefig(\"%s.pdf\" % (stem))\n\n\ndef plot_gas_phase_metallicity(ax1, ax2, out, label = False,\n\tsymbols_legend = False):\n\tr\"\"\"\n\tPlot the present-day gas-phase gradient in [O/H], [Fe/H], and [O/Fe]\n\tpredicted by a given model.\n\n\tParameters\n\t----------\n\tax1 : ``axes``\n\t\tThe matplotlib subplot to plot the [O/H] and [Fe/H] gradients on.\n\tax2 : ``axes``\n\t\tThe matplotlib subplot to plot the [O/Fe] gradients on.\n\tout : ``vice.multioutput``\n\t\tThe output data from VICE's calculations containing the model-predicted\n\t\tabundances.\n\tlabel : ``bool`` [default : False]\n\t\tWhether or not to attach a legend handle to plotted lines.\n\tsymbols_legend : ``bool`` [default : False]\n\t\tWhether or not to label the line in ax2 as corresponding to the\n\t\tpresent-day gas-phase abundances.\n\t\"\"\"\n\tzones = [\"zone%d\" % (i) for i in range(int(MAX_SF_RADIUS / ZONE_WIDTH))]\n\tO = [out.zones[i].history[\"[o/h]\"][-1] for i in zones]\n\tFe = [out.zones[i].history[\"[fe/h]\"][-1] for i in zones]\n\tOFe = [out.zones[i].history[\"[o/fe]\"][-1] for i in zones]\n\tradii = [ZONE_WIDTH * (i + 0.5) for i in range(len(zones))]\n\tif label:\n\t\tax1.plot(radii, Fe, c = named_colors()[\"blue\"], label = \"Fe\")\n\t\tax1.plot(radii, O, c = named_colors()[\"red\"], label = \"O\")\n\telse:\n\t\tax1.plot(radii, Fe, c = named_colors()[\"blue\"])\n\t\tax1.plot(radii, O, c = named_colors()[\"red\"])\n\tif symbols_legend:\n\t\tax2.plot(radii, OFe, c = named_colors()[\"black\"],\n\t\t\tlabel = \"Gas (Present Day)\")\n\telse:\n\t\tax2.plot(radii, OFe, c = named_colors()[\"black\"])\n\n\ndef plot_stellar_metallicities(ax1, ax2, multioutput, symbols_legend = False):\n\tr\"\"\"\n\tPlot the stellar metallicity gradients in [O/H], [Fe/H], and [O/Fe]\n\tpredicted by a ``milkyway`` model.\n\n\tParameters\n\t----------\n\tax1 : ``axes``\n\t\tThe matplotlib subplot to plot the [O/H] and [Fe/H] gradients on.\n\tax2 : ``axes``\n\t\tThe matplotlib subplot to plot the [O/Fe] gradient on.\n\tmultioutput : ``vice.multioutput``\n\t\tThe output data from VICE's calculations containing the model-predicted\n\t\tabundance distributions.\n\tsymbols_legend : ``bool`` [default : False]\t\n\t\tWhether or not to label the points and shaded region in ax2 as\n\t\tcorresponding to the median stellar abundance and dispersion in the\n\t\tstars, respectively.\n\t\"\"\"\n\tzones = [\"zone%d\" % (i) for i in range(int(MAX_RADIUS / ZONE_WIDTH))]\n\tO = [median_stellar_metallicity(multioutput.zones[i],\n\t\t\"dn/d[o/h]\") for i in zones]\n\tO_disp = [stellar_dispersion(multioutput.zones[i],\n\t\t\"dn/d[o/h]\") for i in zones]\n\tFe = [median_stellar_metallicity(multioutput.zones[i],\n\t\t\"dn/d[fe/h]\") for i in zones]\n\tFe_disp = [stellar_dispersion(multioutput.zones[i],\n\t\t\"dn/d[fe/h]\") for i in zones]\n\tOFe = [median_stellar_metallicity(multioutput.zones[i],\n\t\t\"dn/d[o/fe]\") for i in zones]\n\tOFe_disp = [stellar_dispersion(multioutput.zones[i],\n\t\t\"dn/d[o/fe]\") for i in zones]\n\tradii = [ZONE_WIDTH * (i + 0.5) for i in range(\n\t\tlen(multioutput.zones.keys()))]\n\tkwargs = {\n\t\t\"s\": \t\t50,\n\t\t\"zorder\": \t20,\n\t\t\"marker\": \tmarkers()[\"point\"]\n\t}\n\t# ax1.scatter(radii, O, c = named_colors()[\"red\"],\n\t# \tmarker = markers()[\"point\"], s = 20, zorder = 20)\n\t# ax1.scatter(radii, Fe, c = named_colors()[\"blue\"],\n\t# \tmarker = markers()[\"point\"], s = 20, zorder = 20)\n\t# ax2.scatter(radii, OFe, c = named_colors()[\"black\"],\n\t# \tmarker = markers()[\"point\"], s = 20, zorder = 20)\n\tax1.scatter(radii, O, c = named_colors()[\"red\"], **kwargs)\n\tax1.scatter(radii, Fe, c = named_colors()[\"blue\"], **kwargs)\n\tif symbols_legend: kwargs[\"label\"] = \"Stars (median)\"\n\tax2.scatter(radii, OFe, c = named_colors()[\"black\"], **kwargs)\n\tkwargs = {\n\t\t\"alpha\": \t\t0.2,\n\t\t\"zorder\": \t\t0\n\t}\n\t# ax1.fill_between(radii, [row[0] for row in O_disp],\n\t# \t[row[1] for row in O_disp], alpha = 0.3, zorder = 0,\n\t# \tcolor = named_colors()[\"red\"])\n\t# ax1.fill_between(radii, [row[0] for row in Fe_disp],\n\t# \t[row[1] for row in Fe_disp], alpha = 0.3, zorder = 0,\n\t# \tcolor = named_colors()[\"blue\"])\n\t# ax2.fill_between(radii, [row[0] for row in OFe_disp],\n\t# \t[row[1] for row in OFe_disp], alpha = 0.3, zorder = 0,\n\t# \tcolor = named_colors()[\"black\"])\n\tax1.fill_between(radii, [row[0] for row in O_disp],\n\t\t[row[1] for row in O_disp], color = named_colors()[\"red\"], **kwargs)\n\tax1.fill_between(radii, [row[0] for row in Fe_disp],\n\t\t[row[1] for row in Fe_disp], color = named_colors()[\"blue\"], **kwargs)\n\tif symbols_legend: kwargs[\"label\"] = r\"Stars (16\\% - 84\\%)\"\n\tax2.fill_between(radii, [row[0] for row in OFe_disp],\n\t\t[row[1] for row in OFe_disp], color = named_colors()[\"black\"], **kwargs)\n\n\ndef median_stellar_metallicity(zone, mdf_key):\n\tr\"\"\"\n\tDetermine the median abundance within a given annulus.\n\n\tParameters\n\t----------\n\tzone : ``vice.output``\n\t\tThe VICE output containing the model-predicted data for a given\n\t\tannulus.\n\tmdf_key : ``str``\n\t\tThe key denoting which computed metallicity distribution function to\n\t\tcalculate the 50th percentile of.\n\n\tReturns\n\t-------\n\tmed : ``float``\n\t\tThe median [X/Y] abundance in the given zone predicted by the model.\n\n\tNotes\n\t-----\n\tVICE MDFs are normalized such that the integral over their extent is equal\n\tto 1. This function takes advantage of this by computing the integral by\n\thand and simply stopping when it reaches 0.5.\n\t\"\"\"\n\ts = 0\n\tfor i in range(len(zone.mdf[\"bin_edge_left\"])):\n\t\ts += zone.mdf[mdf_key][i] * (zone.mdf[\"bin_edge_right\"][i] -\n\t\t\tzone.mdf[\"bin_edge_left\"][i])\n\t\tif s >= 0.5: return (zone.mdf[\"bin_edge_left\"][i] +\n\t\t\tzone.mdf[\"bin_edge_right\"][i]) / 2.\n\traise ArithmeticError(\"Median not found.\")\n\n\ndef stellar_dispersion(zone, mdf_key):\n\tr\"\"\"\n\tDetermine the 16th and 84th percentiles of a metallicity distribution\n\tfunction in a given annulus.\n\n\tParameters\n\t----------\n\tzone : ``vice.output``\n\t\tThe VICE output containing the model-predicted data for a given\n\t\tannulus.\n\tmdf_key : ``str``\n\t\tThe key denoting which computed metallicity distribution function to\n\t\tcalculate the 16th and 84th percentiles of.\n\n\tReturns\n\t-------\n\tlow : ``float``\n\t\tThe 16th percentile of the [X/Y] distribution in the given zone.\n\thigh : ``float``\n\t\tThe 84th percentile of the [X/Y] distribution in the given zones.\n\n\tNotes\n\t-----\n\tVICE MDFs are normalized such that the integral over their extent is equal\n\tto 1. This function takes advantage of this by computing the integral by\n\thand and simply stopping when it gets to 0.16 and 0.84.\n\t\"\"\"\n\ts = 0\n\tlow = 0\n\thigh = 0\n\tfor i in range(len(zone.mdf[mdf_key])):\n\t\ts += (zone.mdf[\"bin_edge_right\"][i] -\n\t\t\tzone.mdf[\"bin_edge_left\"][i]) * zone.mdf[mdf_key][i]\n\t\tif s >= 0.16 and low == 0:\n\t\t\tlow = (zone.mdf[\"bin_edge_right\"][i] +\n\t\t\t\tzone.mdf[\"bin_edge_left\"][i]) / 2.\n\t\tif s >= 0.84:\n\t\t\thigh = (zone.mdf[\"bin_edge_right\"][i] +\n\t\t\t\tzone.mdf[\"bin_edge_left\"][i]) / 2.\n\t\t\tbreak\n\treturn [low, high]\n\n\ndef plot_target_gradient(ax):\n\tr\"\"\"\n\tPlot the adopted gradient relating mode([:math:`\\alpha`/H]) and\n\tgalactocentric radius as adopted in Johnson et al. (2021).\n\n\tParameters\n\t----------\n\tax : ``axes``\n\t\tThe matplotlib subplot to plot on.\n\t\"\"\"\n\tradii = [0.01 * _ for _ in range(1551)] # 0 to 15.5 in steps of 0.01\n\tgrad = [target_mode_abundance(_) for _ in radii]\n\tax.plot(radii, grad, c = named_colors()[\"black\"], zorder = 100)\n\n\ndef target_mode_abundance(radius):\n\tr\"\"\"\n\tThe adopted relation between mode([:math:`\\alpha`/H]) and galactocentric\n\tradius in Johnson et al. (2021).\n\n\tParameters\n\t----------\n\tradius : ``float``\n\t\tGalactocentric radius in kpc.\n\n\tReturns\n\t-------\n\tmode_alpha : ``float``\n\t\tThe mode alpha abundance at that radius if it followed the Johnson et\n\t\tal. (2021) gradient exactly, defined by:\n\n\t\t.. math:: [$\\alpha$/\\text{H}] = -0.08 * (\\frac{R}{\\text{kpc}} - 4) + 0.3\n\n\t\tfor a given galactocentric radius :math:`R`.\n\t\"\"\"\n\treturn -0.08 * (radius - 4) + 0.3\n\n\ndef setup_axes(\n\tlabels = [\"Constant SFR\", \"Inside-Out\", \"Late-Burst\", \"Outer-Burst\"]):\n\tr\"\"\"\n\tSetup the 4x2 matplotlib axes to plot on.\n\n\tParameters\n\t----------\n\tlabels : ``list``\n\t\tA list of descriptors of each of the four models whose gradients this\n\t\tscript will visualize.\n\n\tReturns\n\t-------\n\taxes : ``list``\n\t\tA 2x4-element ``list`` containing the rows of subplots on the first\n\t\taxis and the columns on the second.\n\t\"\"\"\n\tfig = plt.figure(figsize = (20, 10), facecolor = \"white\")\n\taxes = 2 * [None]\n\tfor i in range(len(axes)):\n\t\taxes[i] = 4 * [None]\n\t\tfor j in range(len(axes[i])):\n\t\t\tif j == 0:\n\t\t\t\taxes[i][j] = fig.add_subplot(241 + 4 * i + j)\n\t\t\telse:\n\t\t\t\taxes[i][j] = fig.add_subplot(241 + 4 * i + j,\n\t\t\t\t\tsharey = axes[i][0])\n\t\t\tif i == 0:\n\t\t\t\tplt.setp(axes[i][j].get_xticklabels(), visible = False)\n\t\t\t\taxes[i][j].text(5, 0.9, labels[j], fontsize = 25)\n\t\t\t\taxes[i][j].set_ylim(XH_YLIM)\n\t\t\telse:\n\t\t\t\taxes[i][j].set_ylim(OFE_YLIM)\n\t\t\taxes[i][j].set_xlim(XLIM)\n\t\t\taxes[i][j].xaxis.set_ticks([0.0, 5.0, 10.0, 15.0, 20.0])\n\t\t\tif j: plt.setp(axes[i][j].get_yticklabels(), visible = False)\n\n\taxes[0][0].set_ylabel(\"[X/H]\")\n\taxes[1][0].set_ylabel(\"[O/Fe]\")\n\tplt.tight_layout()\n\tplt.subplots_adjust(hspace = 0, wspace = 0, bottom = 0.12)\n\tdummy = dummy_background_axes(axes)\n\tdummy.set_xlabel(r\"$R_\\text{gal}$ [kpc]\", labelpad = 30)\n\treturn axes\n\n"
] | [
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Palash-Vishnani/cupy | [
"45b685319ae24db2bc002a97e072ebe630ade0f2"
] | [
"cupyx/scipy/sparse/linalg/_solve.py"
] | [
"import numpy\n\nimport cupy\nfrom cupy import cusparse\nfrom cupy.cuda import cusolver\nfrom cupy.cuda import device\nfrom cupy.cuda import runtime\nfrom cupy.linalg import _util\nfrom cupyx.scipy import sparse\n\nimport warnings\ntry:\n import scipy.sparse\n import scipy.sparse.linalg\n scipy_available = True\nexcept ImportError:\n scipy_available = False\n\n\ndef lsqr(A, b):\n \"\"\"Solves linear system with QR decomposition.\n\n Find the solution to a large, sparse, linear system of equations.\n The function solves ``Ax = b``. Given two-dimensional matrix ``A`` is\n decomposed into ``Q * R``.\n\n Args:\n A (cupy.ndarray or cupyx.scipy.sparse.csr_matrix): The input matrix\n with dimension ``(N, N)``\n b (cupy.ndarray): Right-hand side vector.\n\n Returns:\n tuple:\n Its length must be ten. It has same type elements\n as SciPy. Only the first element, the solution vector ``x``, is\n available and other elements are expressed as ``None`` because\n the implementation of cuSOLVER is different from the one of SciPy.\n You can easily calculate the fourth element by ``norm(b - Ax)``\n and the ninth element by ``norm(x)``.\n\n .. seealso:: :func:`scipy.sparse.linalg.lsqr`\n \"\"\"\n if runtime.is_hip:\n raise RuntimeError('HIP does not support lsqr')\n if not sparse.isspmatrix_csr(A):\n A = sparse.csr_matrix(A)\n _util._assert_nd_squareness(A)\n _util._assert_cupy_array(b)\n m = A.shape[0]\n if b.ndim != 1 or len(b) != m:\n raise ValueError('b must be 1-d array whose size is same as A')\n\n # Cast to float32 or float64\n if A.dtype == 'f' or A.dtype == 'd':\n dtype = A.dtype\n else:\n dtype = numpy.promote_types(A.dtype, 'f')\n\n handle = device.get_cusolver_sp_handle()\n nnz = A.nnz\n tol = 1.0\n reorder = 1\n x = cupy.empty(m, dtype=dtype)\n singularity = numpy.empty(1, numpy.int32)\n\n if dtype == 'f':\n csrlsvqr = cusolver.scsrlsvqr\n else:\n csrlsvqr = cusolver.dcsrlsvqr\n csrlsvqr(\n handle, m, nnz, A._descr.descriptor, A.data.data.ptr,\n A.indptr.data.ptr, A.indices.data.ptr, b.data.ptr, tol, reorder,\n x.data.ptr, singularity.ctypes.data)\n\n # The return type of SciPy is always float64. Therefore, x must be casted.\n x = x.astype(numpy.float64)\n ret = (x, None, None, None, None, None, None, None, None, None)\n return ret\n\n\ndef spsolve_triangular(A, b, lower=True, overwrite_A=False, overwrite_b=False,\n unit_diagonal=False):\n \"\"\"Solves a sparse triangular system ``A x = b``.\n\n Args:\n A (cupyx.scipy.sparse.spmatrix):\n Sparse matrix with dimension ``(M, M)``.\n b (cupy.ndarray):\n Dense vector or matrix with dimension ``(M)`` or ``(M, K)``.\n lower (bool):\n Whether ``A`` is a lower or upper trinagular matrix.\n If True, it is lower triangular, otherwise, upper triangular.\n overwrite_A (bool):\n (not supported)\n overwrite_b (bool):\n Allows overwriting data in ``b``.\n unit_diagonal (bool):\n If True, diagonal elements of ``A`` are assumed to be 1 and will\n not be referencec.\n\n Returns:\n cupy.ndarray:\n Solution to the system ``A x = b``. The shape is the same as ``b``.\n \"\"\"\n if not cusparse.check_availability('csrsm2'):\n raise NotImplementedError\n\n if not sparse.isspmatrix(A):\n raise TypeError('A must be cupyx.scipy.sparse.spmatrix')\n if not isinstance(b, cupy.ndarray):\n raise TypeError('b must be cupy.ndarray')\n if A.shape[0] != A.shape[1]:\n raise ValueError('A must be a square matrix (A.shape: {})'.\n format(A.shape))\n if b.ndim not in [1, 2]:\n raise ValueError('b must be 1D or 2D array (b.shape: {})'.\n format(b.shape))\n if A.shape[0] != b.shape[0]:\n raise ValueError('The size of dimensions of A must be equal to the '\n 'size of the first dimension of b '\n '(A.shape: {}, b.shape: {})'.format(A.shape, b.shape))\n if A.dtype.char not in 'fdFD':\n raise TypeError('unsupported dtype (actual: {})'.format(A.dtype))\n\n if not (sparse.isspmatrix_csr(A) or sparse.isspmatrix_csc(A)):\n warnings.warn('CSR or CSC format is required. Converting to CSR '\n 'format.', sparse.SparseEfficiencyWarning)\n A = A.tocsr()\n A.sum_duplicates()\n\n if (overwrite_b and A.dtype == b.dtype and\n (b._c_contiguous or b._f_contiguous)):\n x = b\n else:\n x = b.astype(A.dtype, copy=True)\n\n cusparse.csrsm2(A, x, lower=lower, unit_diag=unit_diagonal)\n\n if x.dtype.char in 'fF':\n # Note: This is for compatibility with SciPy.\n dtype = numpy.promote_types(x.dtype, 'float64')\n x = x.astype(dtype)\n return x\n\n\ndef spsolve(A, b):\n \"\"\"Solves a sparse linear system ``A x = b``\n\n Args:\n A (cupyx.scipy.sparse.spmatrix):\n Sparse matrix with dimension ``(M, M)``.\n b (cupy.ndarray):\n Dense vector or matrix with dimension ``(M)`` or ``(M, 1)``.\n\n Returns:\n cupy.ndarray:\n Solution to the system ``A x = b``.\n \"\"\"\n if not cupy.cusolver.check_availability('csrlsvqr'):\n raise NotImplementedError\n if not sparse.isspmatrix(A):\n raise TypeError('A must be cupyx.scipy.sparse.spmatrix')\n if not isinstance(b, cupy.ndarray):\n raise TypeError('b must be cupy.ndarray')\n if A.shape[0] != A.shape[1]:\n raise ValueError('A must be a square matrix (A.shape: {})'.\n format(A.shape))\n if not (b.ndim == 1 or (b.ndim == 2 and b.shape[1] == 1)):\n raise ValueError('Invalid b.shape (b.shape: {})'.format(b.shape))\n if A.shape[0] != b.shape[0]:\n raise ValueError('matrix dimension mismatch (A.shape: {}, b.shape: {})'\n .format(A.shape, b.shape))\n\n if not sparse.isspmatrix_csr(A):\n warnings.warn('CSR format is required. Converting to CSR format.',\n sparse.SparseEfficiencyWarning)\n A = A.tocsr()\n A.sum_duplicates()\n b = b.astype(A.dtype, copy=False).ravel()\n\n return cupy.cusolver.csrlsvqr(A, b)\n\n\nclass SuperLU():\n\n def __init__(self, obj):\n \"\"\"LU factorization of a sparse matrix.\n\n Args:\n obj (scipy.sparse.linalg.SuperLU): LU factorization of a sparse\n matrix, computed by `scipy.sparse.linalg.splu`, etc.\n \"\"\"\n if not scipy_available:\n raise RuntimeError('scipy is not available')\n if not isinstance(obj, scipy.sparse.linalg.SuperLU):\n raise TypeError('obj must be scipy.sparse.linalg.SuperLU')\n\n self.shape = obj.shape\n self.nnz = obj.nnz\n self.perm_r = cupy.array(obj.perm_r)\n self.perm_c = cupy.array(obj.perm_c)\n self.L = sparse.csr_matrix(obj.L.tocsr())\n self.U = sparse.csr_matrix(obj.U.tocsr())\n\n self._perm_r_rev = cupy.argsort(self.perm_r)\n self._perm_c_rev = cupy.argsort(self.perm_c)\n\n def solve(self, rhs, trans='N'):\n \"\"\"Solves linear system of equations with one or several right-hand sides.\n\n Args:\n rhs (cupy.ndarray): Right-hand side(s) of equation with dimension\n ``(M)`` or ``(M, K)``.\n trans (str): 'N', 'T' or 'H'.\n 'N': Solves ``A * x = rhs``.\n 'T': Solves ``A.T * x = rhs``.\n 'H': Solves ``A.conj().T * x = rhs``.\n\n Returns:\n cupy.ndarray:\n Solution vector(s)\n \"\"\"\n if not isinstance(rhs, cupy.ndarray):\n raise TypeError('ojb must be cupy.ndarray')\n if rhs.ndim not in (1, 2):\n raise ValueError('rhs.ndim must be 1 or 2 (actual: {})'.\n format(rhs.ndim))\n if rhs.shape[0] != self.shape[0]:\n raise ValueError('shape mismatch (self.shape: {}, rhs.shape: {})'\n .format(self.shape, rhs.shape))\n if trans not in ('N', 'T', 'H'):\n raise ValueError('trans must be \\'N\\', \\'T\\', or \\'H\\'')\n if not cusparse.check_availability('csrsm2'):\n raise NotImplementedError\n\n x = rhs.astype(self.L.dtype)\n if trans == 'N':\n if self.perm_r is not None:\n x = x[self._perm_r_rev]\n cusparse.csrsm2(self.L, x, lower=True, transa=trans)\n cusparse.csrsm2(self.U, x, lower=False, transa=trans)\n if self.perm_c is not None:\n x = x[self.perm_c]\n else:\n if self.perm_c is not None:\n x = x[self._perm_c_rev]\n cusparse.csrsm2(self.U, x, lower=False, transa=trans)\n cusparse.csrsm2(self.L, x, lower=True, transa=trans)\n if self.perm_r is not None:\n x = x[self.perm_r]\n\n if not x._f_contiguous:\n # For compatibility with SciPy\n x = x.copy(order='F')\n return x\n\n\nclass CusparseLU(SuperLU):\n\n def __init__(self, a):\n \"\"\"Incomplete LU factorization of a sparse matrix.\n\n Args:\n a (cupyx.scipy.sparse.csr_matrix): Incomplete LU factorization of a\n sparse matrix, computed by `cusparse.csrilu02`.\n \"\"\"\n if not scipy_available:\n raise RuntimeError('scipy is not available')\n if not sparse.isspmatrix_csr(a):\n raise TypeError('a must be cupyx.scipy.sparse.csr_matrix')\n\n self.shape = a.shape\n self.nnz = a.nnz\n self.perm_r = None\n self.perm_c = None\n # TODO(anaruse): Computes tril and triu on GPU\n a = a.get()\n al = scipy.sparse.tril(a)\n al.setdiag(1.0)\n au = scipy.sparse.triu(a)\n self.L = sparse.csr_matrix(al.tocsr())\n self.U = sparse.csr_matrix(au.tocsr())\n\n\ndef factorized(A):\n \"\"\"Return a function for solving a sparse linear system, with A pre-factorized.\n\n Args:\n A (cupyx.scipy.sparse.spmatrix): Sparse matrix to factorize.\n\n Returns:\n callable: a function to solve the linear system of equations given in\n ``A``.\n\n Note:\n This function computes LU decomposition of a sparse matrix on the CPU\n using `scipy.sparse.linalg.splu`. Therefore, LU decomposition is not\n accelerated on the GPU. On the other hand, the computation of solving\n linear equations using the method returned by this function is\n performed on the GPU.\n\n .. seealso:: :func:`scipy.sparse.linalg.factorized`\n \"\"\"\n return splu(A).solve\n\n\ndef splu(A, permc_spec=None, diag_pivot_thresh=None, relax=None,\n panel_size=None, options={}):\n \"\"\"Computes the LU decomposition of a sparse square matrix.\n\n Args:\n A (cupyx.scipy.sparse.spmatrix): Sparse matrix to factorize.\n permc_spec (str): (For further augments, see\n :func:`scipy.sparse.linalg.splu`)\n diag_pivot_thresh (float):\n relax (int):\n panel_size (int):\n options (dict):\n\n Returns:\n cupyx.scipy.sparse.linalg.SuperLU:\n Object which has a ``solve`` method.\n\n Note:\n This function LU-decomposes a sparse matrix on the CPU using\n `scipy.sparse.linalg.splu`. Therefore, LU decomposition is not\n accelerated on the GPU. On the other hand, the computation of solving\n linear equations using the ``solve`` method, which this function\n returns, is performed on the GPU.\n\n .. seealso:: :func:`scipy.sparse.linalg.splu`\n \"\"\"\n if not scipy_available:\n raise RuntimeError('scipy is not available')\n if not sparse.isspmatrix(A):\n raise TypeError('A must be cupyx.scipy.sparse.spmatrix')\n if A.shape[0] != A.shape[1]:\n raise ValueError('A must be a square matrix (A.shape: {})'\n .format(A.shape))\n if A.dtype.char not in 'fdFD':\n raise TypeError('Invalid dtype (actual: {})'.format(A.dtype))\n\n a = A.get().tocsc()\n a_inv = scipy.sparse.linalg.splu(\n a, permc_spec=permc_spec, diag_pivot_thresh=diag_pivot_thresh,\n relax=relax, panel_size=panel_size, options=options)\n return SuperLU(a_inv)\n\n\ndef spilu(A, drop_tol=None, fill_factor=None, drop_rule=None,\n permc_spec=None, diag_pivot_thresh=None, relax=None,\n panel_size=None, options={}):\n \"\"\"Computes the incomplete LU decomposition of a sparse square matrix.\n\n Args:\n A (cupyx.scipy.sparse.spmatrix): Sparse matrix to factorize.\n drop_tol (float): (For further augments, see\n :func:`scipy.sparse.linalg.spilu`)\n fill_factor (float):\n drop_rule (str):\n permc_spec (str):\n diag_pivot_thresh (float):\n relax (int):\n panel_size (int):\n options (dict):\n\n Returns:\n cupyx.scipy.sparse.linalg.SuperLU:\n Object which has a ``solve`` method.\n\n Note:\n This function computes incomplete LU decomposition of a sparse matrix\n on the CPU using `scipy.sparse.linalg.spilu` (unless you set\n ``fill_factor`` to ``1``). Therefore, incomplete LU decomposition is\n not accelerated on the GPU. On the other hand, the computation of\n solving linear equations using the ``solve`` method, which this\n function returns, is performed on the GPU.\n\n If you set ``fill_factor`` to ``1``, this function computes incomplete\n LU decomposition on the GPU, but without fill-in or pivoting.\n\n .. seealso:: :func:`scipy.sparse.linalg.spilu`\n \"\"\"\n if not scipy_available:\n raise RuntimeError('scipy is not available')\n if not sparse.isspmatrix(A):\n raise TypeError('A must be cupyx.scipy.sparse.spmatrix')\n if A.shape[0] != A.shape[1]:\n raise ValueError('A must be a square matrix (A.shape: {})'\n .format(A.shape))\n if A.dtype.char not in 'fdFD':\n raise TypeError('Invalid dtype (actual: {})'.format(A.dtype))\n\n if fill_factor == 1:\n # Computes ILU(0) on the GPU using cuSparse functions\n if not sparse.isspmatrix_csr(A):\n a = A.tocsr()\n else:\n a = A.copy()\n cusparse.csrilu02(a)\n return CusparseLU(a)\n\n a = A.get().tocsc()\n a_inv = scipy.sparse.linalg.spilu(\n a, fill_factor=fill_factor, drop_tol=drop_tol, drop_rule=drop_rule,\n permc_spec=permc_spec, diag_pivot_thresh=diag_pivot_thresh,\n relax=relax, panel_size=panel_size, options=options)\n return SuperLU(a_inv)\n"
] | [
[
"numpy.promote_types",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
johannah/robotics-rl-srl | [
"d8c5ed9a43e4506bc5962f3f83e13f535140938b"
] | [
"environments/dataset_generator.py"
] | [
"from __future__ import division, absolute_import, print_function\n\nimport argparse\nimport glob\nimport multiprocessing\nimport os\nimport shutil\nimport time\n\nimport numpy as np\nfrom gym.spaces import prng\nfrom stable_baselines import PPO2\nfrom stable_baselines.common.vec_env import DummyVecEnv, VecNormalize\nfrom stable_baselines.common.policies import CnnPolicy\n\nfrom environments import ThreadingType\nfrom environments.registry import registered_env\nfrom srl_zoo.utils import printRed, printYellow\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # used to remove debug info of tensorflow\n\n\ndef convertImagePath(args, path, record_id_start):\n \"\"\"\n Used to convert an image path, from one location, to another\n :param args: (ArgumentParser object)\n :param path: (str)\n :param record_id_start: (int) where does the current part start counting its records\n :return:\n \"\"\"\n image_name = path.split(\"/\")[-1]\n # get record id for output, by adding the current offset with the record_id\n # of the folder\n new_record_id = record_id_start + int(path.split(\"/\")[-2].split(\"_\")[-1])\n return args.name + \"/record_{:03d}\".format(new_record_id) + \"/\" + image_name\n\n\ndef env_thread(args, thread_num, partition=True, use_ppo2=False):\n \"\"\"\n Run a session of an environment\n :param args: (ArgumentParser object)\n :param thread_num: (int) The thread ID of the environment session\n :param partition: (bool) If the output should be in multiple parts (default=True)\n :param use_ppo2: (bool) Use ppo2 to generate the dataset\n \"\"\"\n env_kwargs = {\n \"max_distance\": args.max_distance,\n \"random_target\": args.random_target,\n \"force_down\": True,\n \"is_discrete\": not args.continuous_actions,\n \"renders\": thread_num == 0 and args.display,\n \"record_data\": not args.no_record_data,\n \"multi_view\": args.multi_view,\n \"save_path\": args.save_path,\n \"shape_reward\": args.shape_reward\n }\n\n if partition:\n env_kwargs[\"name\"] = args.name + \"_part-\" + str(thread_num)\n else:\n env_kwargs[\"name\"] = args.name\n\n env_class = registered_env[args.env][0]\n env = env_class(**env_kwargs)\n\n train_env = env_class(**{**env_kwargs, \"record_data\": False})\n train_env = DummyVecEnv([lambda: train_env])\n train_env = VecNormalize(train_env, norm_obs=True, norm_reward=False)\n\n model = None\n if use_ppo2:\n model = PPO2(CnnPolicy, train_env).learn(args.ppo2_timesteps)\n\n frames = 0\n start_time = time.time()\n # divide evenly, then do an extra one for only some of them in order to get the right count\n for i_episode in range(args.num_episode // args.num_cpu + 1 * (args.num_episode % args.num_cpu > thread_num)):\n # seed + position in this slice + size of slice (with reminder if uneven partitions)\n seed = args.seed + i_episode + args.num_episode // args.num_cpu * thread_num + \\\n (thread_num if thread_num <= args.num_episode % args.num_cpu else args.num_episode % args.num_cpu)\n\n env.seed(seed)\n prng.seed(seed) # this is for the sample() function from gym.space\n obs = env.reset()\n done = False\n t = 0\n while not done:\n env.render()\n\n if use_ppo2:\n action, _ = model.predict([obs])\n else:\n action = [env.action_space.sample()]\n\n _, _, done, _ = env.step(action[0])\n frames += 1\n t += 1\n if done:\n print(\"Episode finished after {} timesteps\".format(t + 1))\n\n if thread_num == 0:\n print(\"{:.2f} FPS\".format(frames * args.num_cpu / (time.time() - start_time)))\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Deteministic dataset generator for SRL training ' +\n '(can be used for environment testing)')\n parser.add_argument('--num-cpu', type=int, default=1, help='number of cpu to run on')\n parser.add_argument('--num-episode', type=int, default=50, help='number of episode to run')\n parser.add_argument('--save-path', type=str, default='srl_zoo/data/',\n help='Folder where the environments will save the output')\n parser.add_argument('--name', type=str, default='kuka_button', help='Folder name for the output')\n parser.add_argument('--env', type=str, default='KukaButtonGymEnv-v0', help='The environment wanted',\n choices=list(registered_env.keys()))\n parser.add_argument('--display', action='store_true', default=False)\n parser.add_argument('--no-record-data', action='store_true', default=False)\n parser.add_argument('--max-distance', type=float, default=0.28,\n help='Beyond this distance from the goal, the agent gets a negative reward')\n parser.add_argument('-c', '--continuous-actions', action='store_true', default=False)\n parser.add_argument('--seed', type=int, default=0, help='the seed')\n parser.add_argument('-f', '--force', action='store_true', default=False,\n help='Force the save, even if it overrides something else,' +\n ' including partial parts if they exist')\n parser.add_argument('-r', '--random-target', action='store_true', default=False,\n help='Set the button to a random position')\n parser.add_argument('--multi-view', action='store_true', default=False, help='Set a second camera to the scene')\n parser.add_argument('--shape-reward', action='store_true', default=False,\n help='Shape the reward (reward = - distance) instead of a sparse reward')\n parser.add_argument('--reward-dist', action='store_true', default=False,\n help='Prints out the reward distribution when the dataset generation is finished')\n parser.add_argument('--run-ppo2', action='store_true', default=False,\n help='runs a ppo2 agent instead of a random agent')\n parser.add_argument('--ppo2-timesteps', type=int, default=1000,\n help='number of timesteps to run PPO2 on before generating the dataset')\n args = parser.parse_args()\n\n assert (args.num_cpu > 0), \"Error: number of cpu must be positive and non zero\"\n assert (args.max_distance > 0), \"Error: max distance must be positive and non zero\"\n assert (args.num_episode > 0), \"Error: number of episodes must be positive and non zero\"\n assert not args.reward_dist or not args.shape_reward, \\\n \"Error: cannot display the reward distribution for continuous reward\"\n assert not(registered_env[args.env][3] is ThreadingType.NONE and args.num_cpu != 1), \\\n \"Error: cannot have more than 1 CPU for the environment {}\".format(args.env)\n if args.num_cpu > args.num_episode:\n args.num_cpu = args.num_episode\n printYellow(\"num_cpu cannot be greater than num_episode, defaulting to {} cpus.\".format(args.num_cpu))\n\n # this is done so seed 0 and 1 are different and not simply offset of the same datasets.\n args.seed = np.random.RandomState(args.seed).randint(int(1e10))\n\n # File exists, need to deal with it\n if not args.no_record_data and os.path.exists(args.save_path + args.name):\n assert args.force, \"Error: save directory '{}' already exists\".format(args.save_path + args.name)\n\n shutil.rmtree(args.save_path + args.name)\n for part in glob.glob(args.save_path + args.name + \"_part-[0-9]*\"):\n shutil.rmtree(part)\n if not args.no_record_data:\n # create the output\n os.mkdir(args.save_path + args.name)\n\n if args.num_cpu == 1:\n env_thread(args, 0, partition=False, use_ppo2=args.run_ppo2)\n else:\n # try and divide into multiple processes, with an environment each\n try:\n jobs = []\n for i in range(args.num_cpu):\n process = multiprocessing.Process(target=env_thread, args=(args, i, True, args.run_ppo2))\n jobs.append(process)\n\n for j in jobs:\n j.start()\n\n try:\n for j in jobs:\n j.join()\n except Exception as e:\n printRed(\"Error: unable to join thread\")\n raise e\n\n except Exception as e:\n printRed(\"Error: unable to start thread\")\n raise e\n\n if not args.no_record_data and args.num_cpu > 1:\n # sleep 1 second, to avoid congruency issues from multiprocess (eg., files still writing)\n time.sleep(1)\n # get all the parts\n file_parts = sorted(glob.glob(args.save_path + args.name + \"_part-[0-9]*\"), key=lambda a: int(a.split(\"-\")[-1]))\n\n # move the config files from any as they are identical\n os.rename(file_parts[0] + \"/dataset_config.json\", args.save_path + args.name + \"/dataset_config.json\")\n os.rename(file_parts[0] + \"/env_globals.json\", args.save_path + args.name + \"/env_globals.json\")\n\n ground_truth = None\n preprocessed_data = None\n\n # used to convert the part record_id to the fused record_id\n record_id = 0\n for part in file_parts:\n # sort the record names alphabetically, then numerically\n records = sorted(glob.glob(part + \"/record_[0-9]*\"), key=lambda a: int(a.split(\"_\")[-1]))\n\n record_id_start = record_id\n for record in records:\n os.renames(record, args.save_path + args.name + \"/record_{:03d}\".format(record_id))\n record_id += 1\n\n # fuse the npz files together, in the right order\n if ground_truth is None:\n # init\n ground_truth = {}\n preprocessed_data = {}\n ground_truth_load = np.load(part + \"/ground_truth.npz\")\n preprocessed_data_load = np.load(part + \"/preprocessed_data.npz\")\n\n for arr in ground_truth_load.files:\n if arr == \"images_path\":\n ground_truth[arr] = np.array(\n [convertImagePath(args, path, record_id_start) for path in ground_truth_load[arr]])\n else:\n ground_truth[arr] = ground_truth_load[arr]\n for arr in preprocessed_data_load.files:\n preprocessed_data[arr] = preprocessed_data_load[arr]\n\n else:\n ground_truth_load = np.load(part + \"/ground_truth.npz\")\n preprocessed_data_load = np.load(part + \"/preprocessed_data.npz\")\n\n for arr in ground_truth_load.files:\n if arr == \"images_path\":\n sanitised_paths = np.array(\n [convertImagePath(args, path, record_id_start) for path in ground_truth_load[arr]])\n ground_truth[arr] = np.concatenate((ground_truth[arr], sanitised_paths))\n else:\n ground_truth[arr] = np.concatenate((ground_truth[arr], ground_truth_load[arr]))\n for arr in preprocessed_data_load.files:\n preprocessed_data[arr] = np.concatenate((preprocessed_data[arr], preprocessed_data_load[arr]))\n\n # remove the current part folder\n shutil.rmtree(part)\n\n # save the fused outputs\n np.savez(args.save_path + args.name + \"/ground_truth.npz\", **ground_truth)\n np.savez(args.save_path + args.name + \"/preprocessed_data.npz\", **preprocessed_data)\n\n if args.reward_dist:\n rewards, counts = np.unique(np.load(args.save_path + args.name + \"/preprocessed_data.npz\")['rewards'],\n return_counts=True)\n counts = [\"{:.2f}%\".format(val * 100) for val in counts / np.sum(counts)]\n print(\"reward distribution:\")\n [print(\" \", reward, count) for reward, count in list(zip(rewards, counts))]\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.savez",
"numpy.concatenate",
"numpy.load",
"numpy.random.RandomState",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
malibuber/btc_price_prediction | [
"da508a029a14e316c1be9f941515fa80352abf9f"
] | [
"bitcoin_price_updown/denemeler.py"
] | [
"\r\n############## .63 3 input\r\n######################################\r\n\r\nimport pandas as pd\r\nimport talib\r\n#nltk.download('punkt')\r\n#notclean = pd.read_csv(\"../twitterdata/tweet_t.csv\" , usecols=['dt', 'text' , 'vader' , 'polarity' ,'sensitivity'])\r\ndef signal1_tweet_vol():\r\n notclean = pd.read_csv(\"tweet_t.csv\" , usecols=['dt', 'text' , 'vader' , 'polarity' ,'sensitivity'])\r\n notclean = notclean.dropna()\r\n\r\n notclean = notclean.drop_duplicates(subset=['text'])\r\n\r\n\r\n\r\n\r\n\r\n minute = \"60min\"\r\n notclean['dt'] = pd.to_datetime(notclean['dt'] , errors='coerce', format='%Y-%m-%d %H:%M:%S')\r\n notclean['dt'] = notclean['dt'].dt.strftime(\"%Y-%m-%d %H:%M:%S\")\r\n notclean['dt'] = pd.to_datetime(notclean['dt'])\r\n\r\n notclean['DateTime'] = notclean['dt'].dt.floor('min')\r\n print(notclean.head(2))\r\n vdf = notclean.groupby(pd.Grouper(key='dt',freq=minute)).size().reset_index(name='tweet_vol')\r\n\r\n vdf.index = pd.to_datetime(vdf.index)\r\n vdf=vdf.set_index('dt')\r\n\r\n notclean.index = pd.to_datetime(notclean.index)\r\n\r\n vdf['tweet_vol'] =vdf['tweet_vol'].astype(float)\r\n\r\n df = notclean.groupby('DateTime').agg(lambda x: x.mean())\r\n\r\n df['tweet_vol'] = vdf['tweet_vol']\r\n\r\n\r\n df = df.drop(df.index[0])\r\n\r\n df = df.dropna()\r\n\r\n ########################## lstm model start #############################\r\n ########## get bitcoin price from 14 days before to now ##################\r\n\r\n import datetime\r\n\r\n start = vdf.head(1).index[0] - datetime.timedelta( 14 )\r\n end = vdf.tail(1).index[0] + datetime.timedelta()\r\n\r\n #start = (datetime.date.today() - datetime.timedelta( NUM_DAYS ) )\r\n\r\n import talib\r\n import cryptocompare\r\n\r\n # get bitcoin price minu\r\n #pricelimit = cryptocompare.get_historical_price_minute('BTC', 'USD', toTs=datetime.datetime.now())\r\n # get bitcoin price hourly\r\n pricelimit = cryptocompare.get_historical_price_hour('BTC', 'USD' , limit = 341, toTs=datetime.datetime(end.year,end.month,end.day,end.hour))\r\n\r\n simple_list = []\r\n for i in pricelimit: \r\n\r\n simple_list.append([i.get(\"time\"), i.get(\"close\")])\r\n \r\n #df2 = pd.DataFrame({\"Timestamp\": time, \"Close\": close}) \r\n #print(df2)\r\n control=pd.DataFrame(simple_list,columns=['Timestamp','Close'])\r\n #df1.append(df2, ignore_index = True) \r\n control[\"Timestamp\"] = control[\"Timestamp\"] + 10800\r\n control[\"DateTime\"] = pd.to_datetime(control[\"Timestamp\"], unit='s')\r\n\r\n control['DateTime'] = control['DateTime'].dt.floor('min')\r\n control = control.drop([\"Timestamp\"] , axis = 1)\r\n control = control.set_index(\"DateTime\")\r\n control[\"ma\"] = talib.MA(control['Close'], timeperiod=5)\r\n control[\"diff\"] = control['Close'] - control[\"ma\"]\r\n control[\"diff_real\"] = control['Close'] - control['Close'].shift()\r\n control[\"diff_ma\"] = talib.MA(control[\"diff_real\"], timeperiod = 5)\r\n #control[\"diff_ma\"] = control['Close'] - control['Close'].shift()\r\n #control = control.drop([\"diff_real\"] , axis = 1)\r\n def buysell_signal(diff):\r\n \r\n \r\n if diff > 0:\r\n return 1\r\n else:\r\n return 0\r\n \r\n control[\"diff\"] = control[\"diff\"].apply(buysell_signal) \r\n \r\n\r\n Final_df = pd.merge(df,control, how='inner',left_index=True, right_index=True)\r\n\r\n Final_df =Final_df.drop(['dt', 'ma' ], axis=1)\r\n\r\n Final_df.columns = ['vader', 'polarity' ,'sensitivity','tweet_vol', 'Close', 'diff','diff_real' ,\"diff_ma\"]\r\n\r\n Final_df = Final_df[['vader' ,'polarity' ,'diff_real','diff_ma','sensitivity','tweet_vol', 'Close', 'diff']]\r\n\r\n #### sanki polarity daha iyi çalışıyo\r\n\r\n #x = data.filter(['vader_sent_hourly mean','Close','Daily Weight count',\"movement\"])\r\n data = Final_df\r\n x = data.filter(['tweet_vol','diff_real', 'Close','diff'])\r\n\r\n ### x = data.filter(['tweet_vol','diff_real', 'Close','diff']) .76 çıkıyor ortalama ama shuffle false olma durumundan kaynaklı olabilir\r\n ## tweet vol - diff real - close > diff \r\n ## .64 accuracy\r\n from sklearn import preprocessing\r\n\r\n xx = x.values #returns a numpy array\r\n min_max_scaler = preprocessing.MinMaxScaler()\r\n x_scaled = min_max_scaler.fit_transform(xx)\r\n #df = pd.DataFrame(x_scaled)\r\n print(x_scaled.shape)\r\n\r\n from sklearn.preprocessing import LabelEncoder\r\n\r\n\r\n\r\n # convert series to supervised learning\r\n def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):\r\n n_vars = 1 if type(data) is list else data.shape[1]\r\n df = pd.DataFrame(data)\r\n cols, names = list(), list()\r\n # input sequence (t-n, ... t-1)\r\n for i in range(n_in, 0, -1):\r\n cols.append(df.shift(i))\r\n names += [('var%d(t-%d)' % (df.columns[j], i)) for j in range(n_vars)]\r\n # forecast sequence (t, t+1, ... t+n)\r\n for i in range(0, n_out):\r\n cols.append(df.shift(-i))\r\n if i == 0:\r\n names += [('var%d(t)' % (df.columns[j])) for j in range(n_vars)]\r\n else:\r\n names += [('var%d(t+%d)' % (df.columns[j], i)) for j in range(n_vars)]\r\n # put it all together\r\n agg = pd.concat(cols, axis=1)\r\n agg.columns = names\r\n # drop rows with NaN values\r\n if dropnan:\r\n agg.dropna(inplace=True)\r\n return agg\r\n \r\n # integer encode direction\r\n encoder = LabelEncoder()\r\n xx[:,3] = encoder.fit_transform(xx[:,3])\r\n # ensure all data is float\r\n xx = xx.astype('float32')\r\n # normalize features\r\n scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))\r\n scaled = scaler.fit_transform(xx)\r\n # frame as supervised learning\r\n reframed = series_to_supervised(scaled, 2, 1)\r\n # drop columns we don't want to predict\r\n reframed.drop(reframed.columns[[8,9,10]], axis=1, inplace=True)\r\n print(reframed.head(1))\r\n\r\n hours_move = xx\r\n\r\n hours_move = series_to_supervised(hours_move,2,1)\r\n\r\n\r\n\r\n from tensorflow.keras.models import Sequential\r\n from tensorflow.keras.layers import LSTM, Bidirectional, Dropout,Activation\r\n from tensorflow.keras.layers import Dense\r\n\r\n from sklearn.model_selection import train_test_split\r\n from sklearn.metrics import mean_squared_error\r\n from sklearn import metrics\r\n from keras import metrics as met2\r\n import math\r\n\r\n\r\n\r\n ####### colleccted data too small that's why train and test set sizes are small because of this prediction result constant ########\r\n\r\n\r\n labels = reframed.values[:,-1]\r\n #labels = labels.astype('int')\r\n print(reframed.shape)\r\n features = reframed.values[:,:8]\r\n print(features.shape,labels.shape)\r\n ### shuffle false durumunu kontorl edilmesi gerikiyor\r\n ### shuffle false olma durmu tek bir yöne eğlim gösteriyor sürekli\r\n x_train, x_test, y_train, y_test = train_test_split(features, labels, test_size=0.2 , shuffle = False)\r\n #x_train, x_test, y_train, y_test = train_test_split(features, labels, test_size=0.33)\r\n x_train = x_train.reshape((x_train.shape[0], 2, 4))\r\n x_test = x_test.reshape((x_test.shape[0],2, 4))\r\n print(x_train.shape)\r\n print(x_test.shape)\r\n\r\n\r\n # design network\r\n #for i in range(1,10):\r\n model = Sequential()\r\n #model.add(Conv2D(100, 2, activation='relu'))\r\n #model.add(SimpleRNN(32))\r\n model.add(Bidirectional(LSTM((4), input_shape=(x_train.shape[1], x_train.shape[2]))))\r\n #model.add(ConvLSTM2D(2,kernel_size = (3,3),input_shape =(None,x_train.shape[1], x_train.shape[2],1) ,padding='same', return_sequences=True))\r\n model.add(Dropout(0))\r\n model.add(Dense(1))\r\n model.add(Activation(\"sigmoid\"))\r\n\r\n model.compile(loss='binary_crossentropy', metrics=[met2.binary_accuracy], optimizer='adam')\r\n # fit network\r\n history = model.fit(x_train, y_train, epochs=50, batch_size=10, validation_data=(x_test, y_test), verbose=2)\r\n # plot history\r\n print(model.summary())\r\n\r\n\r\n # save model\r\n model_json = model.to_json()\r\n with open(\"model_signal1.json\", \"w\") as json_file:\r\n json_file.write(model_json)\r\n # serialize weights to HDF5\r\n model.save_weights(\"model_signal2.h5\")\r\n\r\n #x_test = test_X.reshape((x_test.shape[0], x_test.shape[2]))\r\n yhat = model.predict(x_test)\r\n #print(yhat.score)\r\n rmse = math.sqrt(mean_squared_error(y_test, yhat))\r\n print('Test RMSE: %.3f' % rmse)\r\n\r\n\r\n\r\n #print(yhat[0:90])\r\n yhat = yhat.astype(\"float64\")\r\n y2 = yhat\r\n for i in range(yhat.shape[0]):\r\n if y2[i] >0.5:\r\n y2[i] = 1\r\n else:\r\n y2[i] = 0\r\n matrix = metrics.confusion_matrix(y_test,yhat)\r\n #acc = met2.binary_accuracy(y_test,keras.backend.round(yhat),threshold=0.5)\r\n print(matrix)\r\n #print((matrix[0][0]+matrix[1][1])/(matrix[0][0]+matrix[0][1]+matrix[1][0]+matrix[1][1]))\r\n #print(acc)\r\n\r\n score = model.evaluate(x_test, y_test, batch_size=72, verbose=1)\r\n print('Test score:', score[1])\r\n\r\n ########################## real time classfication data result ##############################\r\n print(yhat)\r\n pred_time = x.tail(len(yhat))\r\n pred_time[\"diff_pre\"] = yhat\r\n pred_time['Datetime'] = df.tail(len(yhat)).index\r\n pred_time[\"Future_time\"] = pred_time['Datetime'] + datetime.timedelta(hours = 1)\r\n\r\n pred_time = pred_time.filter([\"diff\",'Datetime', 'Future_time' , 'Close'])\r\n\r\n\r\n pred_time.to_csv(r'signal_tweet_1.csv', index = False, header=True)\r\n\r\n from numpy import concatenate\r\n import numpy as np\r\n next_pre = np.concatenate((scaled[-2,:],scaled[-1,:]))\r\n next_pre = next_pre.reshape(1, next_pre.shape[0])\r\n next_pre = next_pre.reshape((next_pre.shape[0], 2, 4))\r\n tmp = 0 \r\n for i in range(0,3):\r\n new_pre = model.predict(next_pre)\r\n if new_pre[0][0] > 0.5:\r\n new_pre[0][0] = 1\r\n tmp = tmp + new_pre[0][0]\r\n else:\r\n new_pre[0][0] = 0 \r\n tmp = tmp + new_pre[0][0]\r\n print(new_pre[0][0])\r\n if tmp > 1:\r\n return 1 \r\n else:\r\n return 0 \r\n\r\n### .65 gibi bir değer civarların da tıkandı \r\n### .65 cavarlaronda bir dğruluk oranı var\r\n\r\ndef signal2_tweet_vol():\r\n notclean = pd.read_csv(\"tweet_t.csv\" , usecols=['dt', 'text' , 'vader' , 'polarity' ,'sensitivity'])\r\n notclean = notclean.dropna()\r\n print(notclean.head(1))\r\n notclean = notclean.drop_duplicates(subset=['text'])\r\n \r\n \r\n \r\n \r\n \r\n minute = \"60min\"\r\n notclean['dt'] = pd.to_datetime(notclean['dt'] , errors='coerce', format='%Y-%m-%d %H:%M:%S')\r\n notclean['dt'] = notclean['dt'].dt.strftime(\"%Y-%m-%d %H:%M:%S\")\r\n notclean['dt'] = pd.to_datetime(notclean['dt'])\r\n \r\n notclean['DateTime'] = notclean['dt'].dt.floor('min')\r\n print(notclean.head(2))\r\n vdf = notclean.groupby(pd.Grouper(key='dt',freq=minute)).size().reset_index(name='tweet_vol')\r\n \r\n vdf.index = pd.to_datetime(vdf.index)\r\n vdf=vdf.set_index('dt')\r\n \r\n notclean.index = pd.to_datetime(notclean.index)\r\n \r\n vdf['tweet_vol'] =vdf['tweet_vol'].astype(float)\r\n \r\n df = notclean.groupby('DateTime').agg(lambda x: x.mean())\r\n \r\n df['tweet_vol'] = vdf['tweet_vol']\r\n \r\n \r\n df = df.drop(df.index[0])\r\n \r\n df = df.dropna()\r\n \r\n \r\n ########################## lstm model start #############################\r\n ########## get bitcoin price from 14 days before to now ##################\r\n \r\n import datetime\r\n \r\n start = vdf.head(1).index[0] - datetime.timedelta( 14 )\r\n end = vdf.tail(1).index[0] + datetime.timedelta()\r\n \r\n #start = (datetime.date.today() - datetime.timedelta( NUM_DAYS ) )\r\n \r\n import talib\r\n import cryptocompare\r\n \r\n # get bitcoin price minu\r\n #pricelimit = cryptocompare.get_historical_price_minute('BTC', 'USD', toTs=datetime.datetime.now())\r\n # get bitcoin price hourly\r\n pricelimit = cryptocompare.get_historical_price_hour('BTC', 'USD' , limit = 341, toTs=datetime.datetime(end.year,end.month,end.day,end.hour))\r\n \r\n simple_list = []\r\n for i in pricelimit: \r\n \r\n simple_list.append([i.get(\"time\"), i.get(\"close\") , i.get(\"high\") , i.get(\"low\")])\r\n \r\n #df2 = pd.DataFrame({\"Timestamp\": time, \"Close\": close}) \r\n #print(df2)\r\n control=pd.DataFrame(simple_list,columns=['Timestamp','Close' , 'High' , 'Low'])\r\n #df1.append(df2, ignore_index = True) \r\n control[\"Timestamp\"] = control[\"Timestamp\"] + 10800\r\n control[\"DateTime\"] = pd.to_datetime(control[\"Timestamp\"], unit='s')\r\n \r\n control['DateTime'] = control['DateTime'].dt.floor('min')\r\n control = control.drop([\"Timestamp\"] , axis = 1)\r\n control = control.set_index(\"DateTime\")\r\n control[\"ma\"] = talib.MA(control['Close'], timeperiod=5)\r\n control[\"adx\"] = talib.ADX(control['High'], control['Low'], control['Close'], timeperiod=5)\r\n control[\"diff\"] = control['Close'] - control[\"ma\"].shift()\r\n control[\"diff_real\"] = control['Close'] - control['Close'].shift()\r\n control[\"diff_ma\"] = talib.MA(control[\"diff_real\"], timeperiod = 5)\r\n #control[\"diff_ma\"] = control['Close'] - control['Close'].shift()\r\n #control = control.drop([\"diff_real\"] , axis = 1)\r\n def buysell_signal(diff):\r\n \r\n \r\n if diff > 0:\r\n return 1\r\n else:\r\n return 0\r\n \r\n control[\"diff\"] = control[\"diff\"].apply(buysell_signal) \r\n \r\n \r\n Final_df = pd.merge(df,control, how='inner',left_index=True, right_index=True)\r\n \r\n Final_df =Final_df.drop(['dt', 'ma' ,'High' , 'Low' ], axis=1)\r\n \r\n Final_df.columns = ['vader', 'polarity' ,'sensitivity','tweet_vol', 'Close','adx', 'diff','diff_real' ,\"diff_ma\"]\r\n \r\n Final_df = Final_df[['vader' ,'polarity' ,'diff_real','diff_ma','sensitivity','tweet_vol', 'Close', 'adx', 'diff']]\r\n\r\n data = Final_df\r\n x = data.filter(['tweet_vol','Close', 'adx','diff'])\r\n\r\n from sklearn import preprocessing\r\n \r\n xx = x.values #returns a numpy array\r\n min_max_scaler = preprocessing.MinMaxScaler()\r\n x_scaled = min_max_scaler.fit_transform(xx)\r\n #df = pd.DataFrame(x_scaled)\r\n print(x_scaled.shape)\r\n \r\n from sklearn.preprocessing import LabelEncoder\r\n \r\n \r\n \r\n # convert series to supervised learning\r\n def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):\r\n n_vars = 1 if type(data) is list else data.shape[1]\r\n df = pd.DataFrame(data)\r\n cols, names = list(), list()\r\n # input sequence (t-n, ... t-1)\r\n for i in range(n_in, 0, -1):\r\n cols.append(df.shift(i))\r\n names += [('var%d(t-%d)' % (df.columns[j], i)) for j in range(n_vars)]\r\n # forecast sequence (t, t+1, ... t+n)\r\n for i in range(0, n_out):\r\n cols.append(df.shift(-i))\r\n if i == 0:\r\n names += [('var%d(t)' % (df.columns[j])) for j in range(n_vars)]\r\n else:\r\n names += [('var%d(t+%d)' % (df.columns[j], i)) for j in range(n_vars)]\r\n # put it all together\r\n agg = pd.concat(cols, axis=1)\r\n agg.columns = names\r\n # drop rows with NaN values\r\n if dropnan:\r\n agg.dropna(inplace=True)\r\n return agg\r\n \r\n # integer encode direction\r\n encoder = LabelEncoder()\r\n xx[:,3] = encoder.fit_transform(xx[:,3])\r\n # ensure all data is float\r\n xx = xx.astype('float32')\r\n # normalize features\r\n scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))\r\n scaled = scaler.fit_transform(xx)\r\n # frame as supervised learning\r\n reframed = series_to_supervised(scaled, 2, 1)\r\n # drop columns we don't want to predict\r\n reframed.drop(reframed.columns[[8,9,10]], axis=1, inplace=True)\r\n print(reframed.head(1))\r\n \r\n hours_move = xx\r\n \r\n hours_move = series_to_supervised(hours_move,2,1)\r\n \r\n \r\n \r\n from tensorflow.keras.models import Sequential\r\n from tensorflow.keras.layers import LSTM, Bidirectional, Dropout,Activation\r\n from tensorflow.keras.layers import Dense\r\n \r\n from sklearn.model_selection import train_test_split\r\n from sklearn.metrics import mean_squared_error\r\n from sklearn import metrics\r\n from keras import metrics as met2\r\n import math\r\n \r\n \r\n \r\n ####### colleccted data too small that's why train and test set sizes are small because of this prediction result constant ########\r\n \r\n \r\n labels = reframed.values[:,-1]\r\n #labels = labels.astype('int')\r\n print(reframed.shape)\r\n features = reframed.values[:,:8]\r\n print(features.shape,labels.shape)\r\n x_train, x_test, y_train, y_test = train_test_split(features, labels, test_size=0.33 , shuffle = False)\r\n #x_train, x_test, y_train, y_test = train_test_split(features, labels, test_size=0.33)\r\n x_train = x_train.reshape((x_train.shape[0], 2, 4))\r\n x_test = x_test.reshape((x_test.shape[0],2, 4))\r\n print(x_train.shape)\r\n print(x_test.shape)\r\n \r\n import time\r\n now = time.time()\r\n if(now.strftime(\"%A\") == \"Sunday\") and (now.strftime(\"%H:%M\")== \"00:00\"):\r\n # design network\r\n #for i in range(1,10):\r\n model = Sequential()\r\n #model.add(Conv2D(100, 2, activation='relu'))\r\n #model.add(SimpleRNN(32))\r\n model.add(Bidirectional(LSTM((4), input_shape=(x_train.shape[1], x_train.shape[2]))))\r\n #model.add(ConvLSTM2D(2,kernel_size = (3,3),input_shape =(None,x_train.shape[1], x_train.shape[2],1) ,padding='same', return_sequences=True))\r\n model.add(Dropout(0))\r\n model.add(Dense(1))\r\n model.add(Activation(\"sigmoid\"))\r\n \r\n model.compile(loss='binary_crossentropy', metrics=[met2.binary_accuracy], optimizer='adam')\r\n # fit network\r\n history = model.fit(x_train, y_train, epochs=50, batch_size=10, validation_data=(x_test, y_test), verbose=2)\r\n # plot history\r\n print(model.summary())\r\n \r\n \r\n # save model\r\n \r\n \r\n #x_test = test_X.reshape((x_test.shape[0], x_test.shape[2]))\r\n yhat = model.predict(x_test)\r\n #print(yhat.score)\r\n rmse = math.sqrt(mean_squared_error(y_test, yhat))\r\n print('Test RMSE: %.3f' % rmse)\r\n \r\n \r\n \r\n #print(yhat[0:90])\r\n yhat = yhat.astype(\"float64\")\r\n y2 = yhat\r\n for i in range(yhat.shape[0]):\r\n if y2[i] >0.5:\r\n y2[i] = 1\r\n else:\r\n y2[i] = 0\r\n matrix = metrics.confusion_matrix(y_test,yhat)\r\n #acc = met2.binary_accuracy(y_test,keras.backend.round(yhat),threshold=0.5)\r\n print(matrix)\r\n #print((matrix[0][0]+matrix[1][1])/(matrix[0][0]+matrix[0][1]+matrix[1][0]+matrix[1][1]))\r\n #print(acc)\r\n\r\n print(yhat)\r\n pred_time = x.tail(len(yhat))\r\n pred_time[\"diff_pre\"] = yhat\r\n pred_time['Datetime'] = df.tail(len(yhat)).index\r\n pred_time[\"Future_time\"] = pred_time['Datetime'] + datetime.timedelta(hours = 1)\r\n\r\n pred_time = pred_time.filter([\"diff\",'Datetime', 'Future_time' , 'Close'])\r\n\r\n\r\n pred_time.to_csv(r'signal_from_future_price.csv', index = False, header=True)\r\n\r\n\r\n\r\n\r\n\r\n \r\n score = model.evaluate(x_test, y_test, batch_size=72, verbose=1)\r\n print('Test score:', score[1])\r\n \r\n import numpy as np\r\n next_pre = np.concatenate((scaled[-2,:],scaled[-1,:]))\r\n next_pre = next_pre.reshape(1, next_pre.shape[0])\r\n next_pre = next_pre.reshape((next_pre.shape[0], 2, 4))\r\n tmp = 0 \r\n for i in range(0,3):\r\n new_pre = model.predict(next_pre)\r\n if new_pre[0][0] > 0.5:\r\n new_pre[0][0] = 1\r\n tmp = tmp + new_pre[0][0]\r\n else:\r\n new_pre[0][0] = 0 \r\n tmp = tmp + new_pre[0][0]\r\n print(new_pre[0][0])\r\n if tmp > 1:\r\n return 1 \r\n else:\r\n return 0 \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
] | [
[
"pandas.merge",
"pandas.read_csv",
"pandas.to_datetime",
"pandas.concat",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.Dense",
"pandas.Grouper",
"sklearn.model_selection.train_test_split",
"pandas.DataFrame",
"sklearn.metrics.confusion_matrix",
"numpy.concatenate",
"sklearn.metrics.mean_squared_error",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.layers.Dropout",
"sklearn.preprocessing.LabelEncoder",
"tensorflow.keras.models.Sequential",
"sklearn.preprocessing.MinMaxScaler"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
cnktysz/qiskit-machine-learning | [
"de370a614bdebc825eae8b47a107545d0a7ad71d"
] | [
"qiskit_machine_learning/connectors/torch_connector.py"
] | [
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2021.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"A connector to use Qiskit (Quantum) Neural Networks as PyTorch modules.\"\"\"\n\nfrom typing import Tuple, Any, Optional, cast, Union\nimport numpy as np\nfrom qiskit.exceptions import MissingOptionalLibraryError\n\ntry:\n from sparse import SparseArray, COO\n\n _HAS_SPARSE = True\nexcept ImportError:\n _HAS_SPARSE = False\n\nfrom ..neural_networks import NeuralNetwork\nfrom ..exceptions import QiskitMachineLearningError\nfrom ..deprecation import deprecate_property\n\ntry:\n from torch import Tensor, sparse_coo_tensor, einsum\n from torch.autograd import Function\n from torch.nn import Module, Parameter as TorchParam\nexcept ImportError:\n\n class Function: # type: ignore\n \"\"\"Empty Function class\n Replacement if torch.autograd.Function is not present.\n \"\"\"\n\n pass\n\n class Tensor: # type: ignore\n \"\"\"Empty Tensor class\n Replacement if torch.Tensor is not present.\n \"\"\"\n\n pass\n\n class Module: # type: ignore\n \"\"\"Empty Module class\n Replacement if torch.nn.Module is not present.\n Always fails to initialize\n \"\"\"\n\n def __init__(self) -> None:\n raise MissingOptionalLibraryError(\n libname=\"Pytorch\",\n name=\"TorchConnector\",\n pip_install=\"pip install 'qiskit-machine-learning[torch]'\",\n )\n\n\nclass TorchConnector(Module):\n \"\"\"Connects a Qiskit (Quantum) Neural Network to PyTorch.\"\"\"\n\n # pylint: disable=abstract-method\n class _TorchNNFunction(Function):\n # pylint: disable=arguments-differ\n @staticmethod\n def forward( # type: ignore\n ctx: Any,\n input_data: Tensor,\n weights: Tensor,\n neural_network: NeuralNetwork,\n sparse: bool,\n ) -> Tensor:\n \"\"\"Forward pass computation.\n Args:\n ctx: The context to be passed to the backward pass.\n input_data: The input data.\n weights: The weights.\n neural_network: The neural network to be connected.\n sparse: Indicates whether to use sparse output or not.\n\n Returns:\n The resulting value of the forward pass.\n\n Raises:\n QiskitMachineLearningError: Invalid input data.\n MissingOptionalLibraryError: sparse not installed.\n \"\"\"\n\n # validate input shape\n if input_data.shape[-1] != neural_network.num_inputs:\n raise QiskitMachineLearningError(\n f\"Invalid input dimension! Received {input_data.shape} and \"\n + f\"expected input compatible to {neural_network.num_inputs}\"\n )\n\n ctx.neural_network = neural_network\n ctx.sparse = sparse\n ctx.save_for_backward(input_data, weights)\n result = neural_network.forward(input_data.numpy(), weights.numpy())\n if neural_network.sparse and sparse:\n if not _HAS_SPARSE:\n raise MissingOptionalLibraryError(\n libname=\"sparse\",\n name=\"COO\",\n pip_install=\"pip install 'qiskit-machine-learning[sparse]'\",\n )\n result = cast(COO, cast(SparseArray, result).asformat(\"coo\"))\n result_tensor = sparse_coo_tensor(result.coords, result.data)\n else:\n result_tensor = Tensor(result)\n\n # if the input was not a batch, then remove the batch-dimension from the result,\n # since the neural network will always treat input as a batch and cast to a\n # single-element batch if no batch is given and PyTorch does not follow this\n # convention.\n if len(input_data.shape) == 1:\n result_tensor = result_tensor[0]\n\n return result_tensor\n\n @staticmethod\n def backward(ctx: Any, grad_output: Tensor) -> Tuple: # type: ignore\n \"\"\"Backward pass computation.\n Args:\n ctx: context\n grad_output: previous gradient\n Raises:\n QiskitMachineLearningError: Invalid input data.\n Returns:\n gradients for the first two arguments and None for the others\n \"\"\"\n\n # get context data\n input_data, weights = ctx.saved_tensors\n neural_network = ctx.neural_network\n\n # if sparse output is requested return None, since PyTorch does not support it yet.\n if neural_network.sparse and ctx.sparse:\n return None, None, None, None\n\n # validate input shape\n if input_data.shape[-1] != neural_network.num_inputs:\n raise QiskitMachineLearningError(\n f\"Invalid input dimension! Received {input_data.shape} and \"\n + f\" expected input compatible to {neural_network.num_inputs}\"\n )\n\n # ensure same shape for single observations and batch mode\n if len(grad_output.shape) == 1:\n grad_output = grad_output.view(1, -1)\n\n # evaluate QNN gradient\n input_grad, weights_grad = neural_network.backward(input_data.numpy(), weights.numpy())\n if input_grad is not None:\n if neural_network.sparse:\n input_grad = sparse_coo_tensor(input_grad.coords, input_grad.data)\n\n # cast to dense here, since PyTorch does not support sparse output yet.\n # this should only happen if the network returns sparse output but the\n # connector is configured to return dense output.\n input_grad = input_grad.to_dense() # this should be eventually removed\n input_grad = input_grad.to(grad_output.dtype)\n else:\n input_grad = Tensor(input_grad).to(grad_output.dtype)\n\n # Takes gradients from previous layer in backward pass (i.e. later layer in forward\n # pass) j for each observation i in the batch. Multiplies this with the gradient\n # from this point on backwards with respect to each input k. Sums over all j\n # to get total gradient of output w.r.t. each input k and batch index i.\n # This operation should preserve the batch dimension to be able to do back-prop in\n # a batched manner.\n input_grad = einsum(\"ij,ijk->ik\", grad_output, input_grad)\n\n if weights_grad is not None:\n if neural_network.sparse:\n weights_grad = sparse_coo_tensor(weights_grad.coords, weights_grad.data)\n\n # cast to dense here, since PyTorch does not support sparse output yet.\n # this should only happen if the network returns sparse output but the\n # connector is configured to return dense output.\n weights_grad = weights_grad.to_dense() # this should be eventually removed\n weights_grad = weights_grad.to(grad_output.dtype)\n else:\n weights_grad = Tensor(weights_grad).to(grad_output.dtype)\n\n # Takes gradients from previous layer in backward pass (i.e. later layer in forward\n # pass) j for each observation i in the batch. Multiplies this with the gradient\n # from this point on backwards with respect to each parameter k. Sums over all i and\n # j to get total gradient of output w.r.t. each parameter k.\n # The weights' dimension is independent of the batch size.\n weights_grad = einsum(\"ij,ijk->k\", grad_output, weights_grad)\n\n # return gradients for the first two arguments and None for the others (i.e. qnn/sparse)\n return input_grad, weights_grad, None, None\n\n def __init__(\n self,\n neural_network: NeuralNetwork,\n initial_weights: Optional[Union[np.ndarray, Tensor]] = None,\n sparse: Optional[bool] = None,\n ):\n \"\"\"\n Args:\n neural_network: The neural network to be connected to PyTorch. Remember\n that ``input_gradients`` must be set to ``True`` in the neural network\n initialization before passing it to the ``TorchConnector`` for the gradient\n computations to work properly during training.\n initial_weights: The initial weights to start training the network. If this is None,\n the initial weights are chosen uniformly at random from [-1, 1].\n sparse: Whether this connector should return sparse output or not. If sparse is set\n to None, then the setting from the given neural network is used. Note that sparse\n output is only returned if the underlying neural network also returns sparse output,\n otherwise it will be dense independent of the setting. Also note that PyTorch\n currently does not support sparse back propagation, i.e., if sparse is set to True,\n the backward pass of this module will return None.\n \"\"\"\n super().__init__()\n self._neural_network = neural_network\n self._sparse = sparse\n\n weight_param = TorchParam(Tensor(neural_network.num_weights))\n # Register param. in graph following PyTorch naming convention\n self.register_parameter(\"weight\", weight_param)\n # If `weight_param` is assigned to `self._weights` after registration,\n # it will not be re-registered, and we can keep the private var. name\n # \"_weights\" for compatibility. The alternative, doing:\n # `self._weights = TorchParam(Tensor(neural_network.num_weights))`\n # would register the parameter with the name \"_weights\".\n self._weights = weight_param\n\n if initial_weights is None:\n self._weights.data.uniform_(-1, 1)\n else:\n self._weights.data = Tensor(initial_weights)\n\n @property\n def neural_network(self) -> NeuralNetwork:\n \"\"\"Returns the underlying neural network.\"\"\"\n return self._neural_network\n\n # Bug in mypy, if property decorator is used with another one\n # https://github.com/python/mypy/issues/1362\n\n @property # type: ignore\n @deprecate_property(\"0.2.0\", new_name=\"weight\")\n def weights(self) -> Tensor:\n \"\"\"\n .. deprecated:: 0.2.0\n Use :meth:`weight` instead.\n\n Returns the weights of the underlying network.\n \"\"\"\n return self.weight\n\n @property\n def weight(self) -> Tensor:\n \"\"\"Returns the weights of the underlying network.\"\"\"\n return self._weights\n\n @property\n def sparse(self) -> Optional[bool]:\n \"\"\"Returns whether this connector returns sparse output or not.\"\"\"\n return self._sparse\n\n def forward(self, input_data: Optional[Tensor] = None) -> Tensor:\n \"\"\"Forward pass.\n\n Args:\n input_data: data to be evaluated.\n\n Returns:\n Result of forward pass of this model.\n \"\"\"\n input_ = input_data if input_data is not None else Tensor([])\n return TorchConnector._TorchNNFunction.apply(\n input_, self._weights, self._neural_network, self._sparse\n )\n"
] | [
[
"torch.einsum",
"torch.Tensor",
"torch.sparse_coo_tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
daghan/jann | [
"1ce6a74a99313038e5823e760395f9c303ea5b5f"
] | [
"Jann/utils.py"
] | [
"import io\nimport csv\nimport pickle\nimport hashlib\nimport argparse\nimport numpy as np\nfrom tqdm import tqdm\nimport tensorflow as tf\nimport sentencepiece as spm\nimport tensorflow_hub as hub\nfrom annoy import AnnoyIndex\n\n\ndef parse_arguments(arguments=None):\n \"\"\"Parse the command line arguments.\"\"\"\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n\n default_module = 'data/module/universal-sentence-encoder-lite-2'\n parser.add_argument('--module_path',\n help='Specify the local encoder model path',\n default=default_module)\n parser.add_argument('--use_sentence_piece',\n type=bool,\n default=True)\n parser.add_argument('--infile',\n help=\"Path to input file.\",\n default='data/CMDC/all_lines_50.txt')\n parser.add_argument('--infile_path',\n help=\"Input file path\")\n parser.add_argument('--outfile',\n help=\"Output file.\",\n default=None)\n parser.add_argument('--num_lines',\n type=int,\n help=\"Number of lines to processes\")\n parser.add_argument('--pairs',\n dest='pairs',\n help=\"Flag to use pairs mode or not.\",\n action='store_true',\n default=False)\n parser.add_argument('--delimiter',\n default='\\t',\n help=\"Delimeter between input<>response.\")\n parser.add_argument('--verbose',\n dest='verbose',\n help=\"Flag to add verbose logging detail.\",\n action='store_true',\n default=True)\n parser.add_argument('--num_trees',\n type=int,\n help='Number of trees for to search for neighbors.',\n default=100)\n parser.add_argument('--num_neighbors',\n type=int,\n help='Number of nearest neighbors to return.',\n default=10)\n parser.add_argument('--search_k',\n type=int,\n help='Number of trees to search.',\n default=10)\n args = parser.parse_args(arguments)\n\n # Specify the local module path\n if 'lite' in args.module_path:\n args.use_sentence_piece = True\n\n # Reduce logging output.\n if args.verbose:\n tf.logging.set_verbosity(tf.logging.DEBUG)\n else:\n tf.logging.set_verbosity(tf.logging.WARN)\n return args\n\n\ndef load_data(file_path, dest_type, pairs=False, delimiter='\\t'):\n \"\"\"Load line separated text files into list. \"\"\"\n\n dest = None\n dest2 = None\n if dest_type == 'list':\n if not pairs:\n tempfile = io.open(\n file_path, 'r', encoding=\"iso-8859-1\", errors='ignore')\n dest = []\n for line in tempfile:\n clean_string = line.strip()\n # check if blank\n if clean_string:\n dest.append(clean_string)\n tempfile.close()\n else:\n first_lines = []\n second_lines = []\n tf.logging.info('Loading pairs data')\n with open(file_path, 'r', encoding='iso-8859-1') as f:\n reader = csv.reader(f, delimiter=delimiter)\n for row in reader:\n first_lines.append(row[0])\n second_lines.append(row[1])\n dest = first_lines\n dest2 = second_lines\n elif dest_type == 'dict':\n with open(file_path, 'rb') as f:\n dest = pickle.load(f)\n return dest, dest2\n\n\ndef load_lines(fname, fields):\n \"\"\"Load Cornell Movie Dialog Lines.\"\"\"\n lines = {}\n with open(fname, 'r', encoding='iso-8859-1') as f:\n for line in f:\n values = line.split(\" +++$+++ \")\n line_obj = {}\n for i, field in enumerate(fields):\n line_obj[field] = values[i]\n lines[line_obj['lineID']] = line_obj\n return lines\n\n\ndef load_conversations(fname, lines, fields):\n \"\"\"Load Cornell Movie Dialog Conversations.\"\"\"\n convos = []\n with open(fname, 'r', encoding='iso-8859-1') as f:\n for line in f:\n values = line.split(\" +++$+++ \")\n conv_obj = {}\n for i, field in enumerate(fields):\n conv_obj[field] = values[i]\n # Convert string to list\n line_ids = eval(conv_obj[\"utteranceIDs\"])\n conv_obj[\"lines\"] = []\n for line_id in line_ids:\n conv_obj[\"lines\"].append(lines[line_id])\n convos.append(conv_obj)\n return convos\n\n\ndef extract_pairs(conversations):\n \"\"\"Extract pairs from the Cornell Movie Dialog Conversations.\"\"\"\n collected_pairs = []\n for conversation in conversations:\n # if convo is ABC, pairs are AB and BC\n for i in range(len(conversation[\"lines\"]) - 1):\n first_line = conversation[\"lines\"][i][\"text\"].strip()\n second_line = conversation[\"lines\"][i+1][\"text\"].strip()\n if first_line and second_line:\n collected_pairs.append([first_line, second_line])\n return collected_pairs\n\n\ndef extract_pairs_from_lines(lines):\n \"\"\"Extract pairs from raw lines.\"\"\"\n collected_pairs = []\n for i in range(len(lines) - 1):\n first_line = lines[i].strip()\n second_line = lines[i+1].strip()\n if first_line and second_line:\n collected_pairs.append([first_line, second_line])\n return collected_pairs\n\n\ndef process_to_IDs_in_sparse_format(sp, sentences):\n # A utility method that processes sentences\n # with the sentence piece processor\n # 'sp' and returns the results in tf.SparseTensor-similar format:\n # (values, indices, dense_shape)\n ids = [sp.EncodeAsIds(x) for x in sentences]\n max_len = max(len(x) for x in ids)\n dense_shape = (len(ids), max_len)\n values = [item for sublist in ids for item in sublist]\n r_ids = range(len(ids))\n indices = [[row, col] for row in r_ids for col in range(len(ids[row]))]\n return (values, indices, dense_shape)\n\n\ndef get_id_chunks(the_big_list, n_sub_list):\n \"\"\"Yield successive n_sub_list-sized chunks from the_big_list.\"\"\"\n for i in range(0, len(the_big_list), n_sub_list):\n yield the_big_list[i:i + n_sub_list]\n\n\ndef embed_lines(args, unencoded_lines, output_dict,\n unencoded_lines_resps=None):\n \"\"\"Embed a collection of lines to an output dictionary.\"\"\"\n\n # Import the Universal Sentence Encoder's TF Hub module\n module = hub.Module(args.module_path, trainable=False)\n config = tf.ConfigProto(allow_soft_placement=True)\n\n with tf.Session(config=config) as session:\n # initialize the variables\n session.run(\n [tf.global_variables_initializer(), tf.tables_initializer()])\n\n if args.use_sentence_piece:\n # spm_path now contains a path to the SentencePiece\n # model stored inside the TF-Hub module\n spm_path = session.run(module(signature=\"spm_path\"))\n sp = spm.SentencePieceProcessor()\n sp.Load(spm_path)\n\n # build an input placeholder\n with tf.device('/gpu:0'):\n input_placeholder = tf.sparse_placeholder(\n tf.int64, shape=[None, None])\n embeddings = module(inputs=dict(\n values=input_placeholder.values,\n indices=input_placeholder.indices,\n dense_shape=input_placeholder.dense_shape\n )\n )\n\n # size of chunk is how many lines will be encoded\n # with each pass of the model\n size_of_chunk = 256\n\n # ensure that every line has a response\n assert len(unencoded_lines) == len(unencoded_lines_resps)\n all_id_chunks = get_id_chunks(\n range(len(unencoded_lines)), size_of_chunk)\n\n max_iter = len(unencoded_lines) // size_of_chunk\n for id_chunk in tqdm(all_id_chunks, total=max_iter):\n # get the chunk of lines and matching responses by list of ids\n chunk_unencoded_lines = [unencoded_lines[x] for x in id_chunk]\n chunck_unenc_resp = [unencoded_lines_resps[x] for x in id_chunk]\n\n if args.use_sentence_piece:\n # process unencoded lines to values and IDs in sparse format\n values, indices, dense_shape = process_to_IDs_in_sparse_format(\n sp=sp, sentences=chunk_unencoded_lines)\n\n # run the session\n with tf.device('/gpu:0'):\n chunk_line_embds = session.run(\n embeddings,\n feed_dict={\n input_placeholder.values: values,\n input_placeholder.indices: indices,\n input_placeholder.dense_shape: dense_shape\n }\n )\n else:\n with tf.device('/gpu:0'):\n chunk_line_embds = session.run(\n module(chunk_unencoded_lines))\n\n # hash the object into the full output dataframe\n for i, line_embedding in enumerate(\n np.array(chunk_line_embds).tolist()):\n if args.verbose:\n tf.logging.info(\n \"Line: {}\".format(chunk_unencoded_lines[i]))\n tf.logging.info(\n \"Embedding size: {}\".format(len(line_embedding)))\n snippet = \", \".join((str(x) for x in line_embedding[:3]))\n tf.logging.info(\n \"Embedding: [{}, ...]\\n\".format(snippet))\n\n # Encode a hash for the string\n hash_object = hashlib.md5(\n chunk_unencoded_lines[i].encode('utf-8'))\n\n # Add a row to the dataframe\n output_dict[hash_object.hexdigest()] = {\n 'line': chunk_unencoded_lines[i],\n 'line_embedding': line_embedding,\n 'response': chunck_unenc_resp[i]\n }\n return output_dict\n\n\nclass GenModelUSE(object):\n def __init__(self, annoy_index_path, unique_strings,\n use_sentence_piece, module_path):\n self.annoy_index_path = annoy_index_path\n self.unique_strings = unique_strings\n\n # load the annoy index for mmap speed\n # Length of item vector that will be indexed\n self.annoy_index = AnnoyIndex(512)\n\n # super fast, will just mmap the file\n self.annoy_index.load(self.annoy_index_path)\n\n g = tf.Graph()\n with g.as_default():\n # define the module\n module = hub.Module(module_path, trainable=False)\n\n if use_sentence_piece:\n # build an input placeholder\n self.input_placeholder = tf.sparse_placeholder(\n tf.int64, shape=[None, None])\n\n # build an input / output from the placeholders\n self.embeddings = module(inputs=dict(\n values=self.input_placeholder.values,\n indices=self.input_placeholder.indices,\n dense_shape=self.input_placeholder.dense_shape\n )\n )\n else:\n # build an input placeholder\n self.input_placeholder = tf.placeholder(\n tf.string, shape=(None))\n self.embeddings = module(self.input_placeholder)\n\n init_op = tf.group([tf.global_variables_initializer(),\n tf.tables_initializer()])\n\n # do not finalize the graph if using sentence piece module\n if not use_sentence_piece:\n g.finalize()\n\n # define the configuration\n config = tf.ConfigProto(allow_soft_placement=True)\n self.sess = tf.Session(graph=g, config=config)\n self.sess.run(init_op)\n\n if use_sentence_piece:\n # spm_path now contains a path to the SentencePiece\n # model stored inside the TF-Hub module\n with g.as_default():\n spm_path = self.sess.run(module(signature=\"spm_path\"))\n self.sp = spm.SentencePieceProcessor()\n self.sp.Load(spm_path)\n\n tf.logging.info('Interactive session is initialized...')\n\n def inference(self, input_text, num_neighbors=10, use_sentence_piece=True):\n \"\"\"Inference from nearest neighbor model.\"\"\"\n\n # Handle the short input\n if len(input_text) < 1:\n return 'Say something!'\n\n tf.logging.info('Input text: {}'.format(input_text))\n\n # Build a list of the user input\n user_input = [input_text]\n\n if use_sentence_piece:\n # process unencoded lines to values and IDs in sparse format\n values, indices, dense_shape = process_to_IDs_in_sparse_format(\n sp=self.sp, sentences=user_input)\n\n # run the session\n # Get embedding of the input text\n embeddings = self.sess.run(\n self.embeddings,\n feed_dict={\n self.input_placeholder.values: values,\n self.input_placeholder.indices: indices,\n self.input_placeholder.dense_shape: dense_shape\n }\n )\n else:\n embeddings = self.sess.run(\n self.embeddings,\n feed_dict={\n self.input_placeholder: user_input\n }\n )\n\n tf.logging.info(\n 'Successfully generated {} embeddings of length {}.'.format(\n len(embeddings), len(embeddings[0])))\n\n # Extract the query vector of interest.\n query_vector = embeddings[0]\n\n # Get nearest neighbors\n nns, distances = self.annoy_index.get_nns_by_vector(\n query_vector, num_neighbors, search_k=-1, include_distances=True)\n\n # Log the ids\n tf.logging.info('Nearest neighbor IDS: {}'.format(nns))\n\n return nns, distances\n"
] | [
[
"tensorflow.Graph",
"tensorflow.device",
"tensorflow.sparse_placeholder",
"tensorflow.placeholder",
"tensorflow.ConfigProto",
"tensorflow.global_variables_initializer",
"tensorflow.logging.info",
"tensorflow.logging.set_verbosity",
"tensorflow.Session",
"numpy.array",
"tensorflow.tables_initializer"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
pyaf/severstal-steel-defect-detection | [
"68a0df4164e84803b6cba78597a079d3736b4e00"
] | [
"ensemble.py"
] | [
"import pdb\nimport os\nimport cv2\nimport time\nfrom glob import glob\nimport torch\nimport scipy\nimport pandas as pd\nimport numpy as np\nfrom tqdm import tqdm\nimport torch.backends.cudnn as cudnn\nfrom torch.utils.data import DataLoader\nfrom argparse import ArgumentParser\nimport albumentations\nfrom albumentations import torch as AT\nfrom torchvision.datasets.folder import pil_loader\nimport torch.utils.data as data\nfrom sklearn.model_selection import KFold, StratifiedKFold\nfrom sklearn.metrics import cohen_kappa_score\nfrom models import Model, get_model\nfrom utils import *\nfrom image_utils import *\n\n# from submission import get_best_threshold\n\n\ndef get_parser():\n parser = ArgumentParser()\n parser.add_argument(\n \"-c\",\n \"--model_folder_path\",\n dest=\"model_folder_path\",\n help=\"relative path to the folder where model checkpoints are saved\",\n )\n parser.add_argument(\n \"-p\",\n \"--predict_on\",\n dest=\"predict_on\",\n help=\"predict on train or test set, options: test or train\",\n default=\"resnext101_32x4d\",\n )\n return parser\n\n\nclass Dataset(data.Dataset):\n def __init__(self, root, df, size, mean, std, tta=4):\n self.root = root\n self.size = size\n self.fnames = list(df[\"id_code\"])\n self.num_samples = len(self.fnames)\n self.tta = tta\n self.TTA = albumentations.Compose(\n [\n # albumentations.RandomRotate90(p=1),\n albumentations.Transpose(p=0.5),\n albumentations.Flip(p=0.5),\n albumentations.RandomScale(scale_limit=0.1),\n ]\n )\n self.transform = albumentations.Compose(\n [\n albumentations.Normalize(mean=mean, std=std, p=1),\n albumentations.Resize(size, size),\n AT.ToTensor(),\n ]\n )\n\n def __getitem__(self, idx):\n fname = self.fnames[idx]\n path = os.path.join(self.root, fname + \".png\")\n # image = load_image(path, size)\n # image = load_ben_gray(path)\n image = load_ben_color(path, size=self.size, crop=True)\n\n images = [self.transform(image=image)[\"image\"]]\n for _ in range(self.tta): # perform ttas\n aug_img = self.TTA(image=image)[\"image\"]\n aug_img = self.transform(image=aug_img)[\"image\"]\n images.append(aug_img)\n return torch.stack(images, dim=0)\n\n def __len__(self):\n return self.num_samples\n\n\ndef get_predictions(model, testset, tta):\n \"\"\"return all predictions on testset in a list\"\"\"\n num_images = len(testset)\n predictions = []\n for i, batch in enumerate(tqdm(testset)):\n if tta:\n # images.shape [n, 3, 96, 96] where n is num of 1+tta\n for images in batch:\n preds = model(images.to(device)) # [n, num_classes]\n predictions.append(preds.mean(dim=0).detach().tolist())\n else:\n preds = model(batch[:, 0].to(device))\n preds = preds.detach().tolist() # [1]\n predictions.extend(preds)\n\n return np.array(predictions)\n\n\ndef get_load_model(model_name, ckpt_path, num_classes):\n model = get_model(model_name, num_classes, pretrained=None)\n state = torch.load(ckpt_path, map_location=lambda storage, loc: storage)\n epoch = state[\"epoch\"]\n model.load_state_dict(state[\"state_dict\"])\n\n best_thresholds = state[\"best_thresholds\"]\n model.to(device)\n model.eval()\n return model, best_thresholds\n\n\ndef get_model_name_fold(model_folder_path):\n # example ckpt_path = weights/9-7_{modelname}_fold0_text/\n model_folder = model_folder_path.split(\n \"/\")[1] # 9-7_{modelname}_fold0_text\n model_name = \"_\".join(model_folder.split(\"_\")[1:-2]) # modelname\n fold = model_folder.split(\"_\")[-2] # fold0\n fold = fold.split(\"fold\")[-1] # 0\n return model_name, int(fold)\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Uses a list of ckpts, predicts on whole train set, averages the predictions and finds optimized thresholds based on train qwk\n \"\"\"\n model_name = \"efficientnet-b5\"\n ckpt_path_list = [\n # \"weights/19-7_efficientnet-b5_fold0_bgccpold/ckpt20.pth\",\n # \"weights/19-7_efficientnet-b5_fold1_bgccpold/ckpt10.pth\",\n # \"weights/19-7_efficientnet-b5_fold2_bgccpold/ckpt30.pth\",\n # \"weights/19-7_efficientnet-b5_fold3_bgccpold/ckpt15.pth\"\n \"weights/21-7_efficientnet-b5_fold1_bgccpo300/ckpt20.pth\"\n ]\n\n # folds = [0, 1, 2, 3] # for extracting val sets, used for thr optimization\n folds = [1]\n sample_submission_path = \"data/train.csv\"\n\n tta = 4 # number of augs in tta\n total_folds = 7\n\n root = f\"data/train_images/\"\n size = 300\n mean = (0.485, 0.456, 0.406)\n std = (0.229, 0.224, 0.225)\n # mean = (0, 0, 0)\n # std = (1, 1, 1)\n use_cuda = True\n num_classes = 1\n num_workers = 8\n batch_size = 16\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n if use_cuda:\n cudnn.benchmark = True\n torch.set_default_tensor_type(\"torch.cuda.FloatTensor\")\n else:\n torch.set_default_tensor_type(\"torch.FloatTensor\")\n\n df = pd.read_csv(sample_submission_path)\n\n # kfold = StratifiedKFold(total_folds, shuffle=True, random_state=69)\n # index_list = list(kfold.split(df[\"id_code\"], df[\"diagnosis\"]))\n\n # val_idx = []\n # for fold in folds:\n # val_idx.extend(index_list[fold][1])\n\n # df = df.iloc[val_idx]\n\n dataset = DataLoader(\n Dataset(root, df, size, mean, std, tta),\n batch_size=batch_size,\n shuffle=False,\n num_workers=num_workers,\n pin_memory=True if use_cuda else False,\n )\n print(f\"len dataset: {len(dataset)}\")\n\n # generate predictions using all models\n all_predictions = []\n for idx, ckpt in enumerate(ckpt_path_list):\n print(\"model: %s\" % ckpt)\n model, val_best_th = get_load_model(model_name, ckpt, num_classes)\n predictions = get_predictions(model, dataset, tta)\n all_predictions.append(predictions)\n # break\n\n predictions = np.mean(all_predictions, axis=0).flatten()\n\n # optimize thresholds on training set\n targets = df[\"diagnosis\"].values\n initial_thresholds = [0.5, 1.5, 2.5, 3.5]\n simplex = scipy.optimize.minimize(\n compute_score_inv,\n initial_thresholds,\n args=(predictions, targets),\n method=\"nelder-mead\",\n )\n best_thresholds = simplex[\"x\"]\n print(\"Best thresholds: %s\" % best_thresholds)\n\n # predictions using best_thresholds\n preds = predict(predictions, best_thresholds)\n\n qwk = cohen_kappa_score(preds, targets, weights=\"quadratic\")\n print(f\"Train qwk score: {qwk}\")\n\n cm = ConfusionMatrix(targets, preds)\n print(cm.print_normalized_matrix())\n\n # for further analysis.\n pdb.set_trace()\n\n # now use the best_threshold on test data to generate predictions\n\n df = pd.read_csv(\"data/sample_submission.csv\")\n root = f\"data/test_images/\"\n testset = DataLoader(\n Dataset(root, df, size, mean, std, tta),\n batch_size=batch_size,\n shuffle=False,\n num_workers=num_workers,\n pin_memory=True if use_cuda else False,\n )\n # generate predictions using all models\n base_thresholds = np.array([0.5, 1.5, 2.5, 3.5])\n all_predictions = []\n for idx, ckpt in enumerate(ckpt_path_list):\n print(\"model: %s\" % ckpt)\n model, val_best_th = get_load_model(model_name, ckpt, num_classes)\n predictions = get_predictions(model, testset, tta)\n preds = predict(predictions, best_thresholds)\n print(np.unique(preds, return_counts=True))\n all_predictions.append(predictions)\n # break\n predictions = np.mean(all_predictions, axis=0).flatten()\n preds = predict(predictions, best_thresholds)\n print(np.unique(preds, return_counts=True))\n\n pdb.set_trace()\n\n\"\"\"\nFootnotes\n\n[1] a cuda variable can be converted to python list with .detach() (i.e., grad no longer required) then .tolist(), apart from that a cuda variable can be converted to numpy variable only by copying the tensor to host memory by .cpu() and then .numpy\n\"\"\"\n"
] | [
[
"torch.set_default_tensor_type",
"pandas.read_csv",
"torch.load",
"numpy.unique",
"sklearn.metrics.cohen_kappa_score",
"scipy.optimize.minimize",
"numpy.mean",
"torch.device",
"numpy.array",
"torch.stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
WChCh/incubator-superset | [
"f59ed026e4b26c80a50f79d726969a21cb37e1e5"
] | [
"superset/views/core.py"
] | [
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=C,R,W\nfrom datetime import datetime, timedelta\nimport inspect\nimport logging\nimport os\nimport re\nimport time\nimport traceback\nfrom typing import List # noqa: F401\nfrom urllib import parse\n\nfrom flask import (\n abort, flash, g, Markup, redirect, render_template, request, Response, url_for,\n)\nfrom flask_appbuilder import expose, SimpleFormView\nfrom flask_appbuilder.actions import action\nfrom flask_appbuilder.models.sqla.interface import SQLAInterface\nfrom flask_appbuilder.security.decorators import has_access, has_access_api\nfrom flask_babel import gettext as __\nfrom flask_babel import lazy_gettext as _\nimport pandas as pd\nimport simplejson as json\nimport sqlalchemy as sqla\nfrom sqlalchemy import and_, create_engine, MetaData, or_, update\nfrom sqlalchemy.engine.url import make_url\nfrom sqlalchemy.exc import IntegrityError\nfrom werkzeug.routing import BaseConverter\nfrom werkzeug.utils import secure_filename\n\nfrom superset import (\n app, appbuilder, cache, conf, db, get_feature_flags, results_backend,\n security_manager, sql_lab, viz)\nfrom superset.connectors.connector_registry import ConnectorRegistry\nfrom superset.connectors.sqla.models import AnnotationDatasource, SqlaTable\nfrom superset.exceptions import SupersetException\nfrom superset.forms import CsvToDatabaseForm\nfrom superset.jinja_context import get_template_processor\nfrom superset.legacy import update_time_range\nimport superset.models.core as models\nfrom superset.models.sql_lab import Query\nfrom superset.models.user_attributes import UserAttribute\nfrom superset.sql_parse import ParsedQuery\nfrom superset.sql_validators import get_validator_by_name\nfrom superset.utils import core as utils\nfrom superset.utils import dashboard_import_export\nfrom superset.utils.dates import now_as_float\nfrom superset.utils.decorators import etag_cache\nfrom .base import (\n api, BaseSupersetView,\n check_ownership,\n CsvResponse, data_payload_response, DeleteMixin, generate_download_headers,\n get_error_msg, handle_api_exception, json_error_response, json_success,\n SupersetFilter, SupersetModelView, YamlExportMixin,\n)\nfrom .utils import bootstrap_user_data, get_datasource_info, get_form_data, get_viz\n\nconfig = app.config\nCACHE_DEFAULT_TIMEOUT = config.get('CACHE_DEFAULT_TIMEOUT', 0)\nstats_logger = config.get('STATS_LOGGER')\nlog_this = models.Log.log_this\nDAR = models.DatasourceAccessRequest\nQueryStatus = utils.QueryStatus\n\n\nALL_DATASOURCE_ACCESS_ERR = __(\n 'This endpoint requires the `all_datasource_access` permission')\nDATASOURCE_MISSING_ERR = __('The data source seems to have been deleted')\nACCESS_REQUEST_MISSING_ERR = __(\n 'The access requests seem to have been deleted')\nUSER_MISSING_ERR = __('The user seems to have been deleted')\n\nFORM_DATA_KEY_BLACKLIST: List[str] = []\nif not config.get('ENABLE_JAVASCRIPT_CONTROLS'):\n FORM_DATA_KEY_BLACKLIST = [\n 'js_tooltip',\n 'js_onclick_href',\n 'js_data_mutator',\n ]\n\n\ndef get_database_access_error_msg(database_name):\n return __('This view requires the database %(name)s or '\n '`all_datasource_access` permission', name=database_name)\n\n\ndef is_owner(obj, user):\n \"\"\" Check if user is owner of the slice \"\"\"\n return obj and user in obj.owners\n\n\ndef check_datasource_perms(self, datasource_type=None, datasource_id=None):\n \"\"\"\n Check if user can access a cached response from explore_json.\n\n This function takes `self` since it must have the same signature as the\n the decorated method.\n\n \"\"\"\n form_data = get_form_data()[0]\n datasource_id, datasource_type = get_datasource_info(\n datasource_id, datasource_type, form_data)\n viz_obj = get_viz(\n datasource_type=datasource_type,\n datasource_id=datasource_id,\n form_data=form_data,\n force=False,\n )\n security_manager.assert_datasource_permission(viz_obj.datasource)\n\n\ndef check_slice_perms(self, slice_id):\n \"\"\"\n Check if user can access a cached response from slice_json.\n\n This function takes `self` since it must have the same signature as the\n the decorated method.\n\n \"\"\"\n form_data, slc = get_form_data(slice_id, use_slice_data=True)\n datasource_type = slc.datasource.type\n datasource_id = slc.datasource.id\n viz_obj = get_viz(\n datasource_type=datasource_type,\n datasource_id=datasource_id,\n form_data=form_data,\n force=False,\n )\n security_manager.assert_datasource_permission(viz_obj.datasource)\n\n\nclass SliceFilter(SupersetFilter):\n def apply(self, query, func): # noqa\n if security_manager.all_datasource_access():\n return query\n perms = self.get_view_menus('datasource_access')\n # TODO(bogdan): add `schema_access` support here\n return query.filter(self.model.perm.in_(perms))\n\n\nclass DashboardFilter(SupersetFilter):\n\n \"\"\"List dashboards for which users have access to at least one slice or are owners\"\"\"\n\n def apply(self, query, func): # noqa\n if security_manager.all_datasource_access():\n return query\n Slice = models.Slice # noqa\n Dash = models.Dashboard # noqa\n User = security_manager.user_model\n # TODO(bogdan): add `schema_access` support here\n datasource_perms = self.get_view_menus('datasource_access')\n slice_ids_qry = (\n db.session\n .query(Slice.id)\n .filter(Slice.perm.in_(datasource_perms))\n )\n owner_ids_qry = (\n db.session\n .query(Dash.id)\n .join(Dash.owners)\n .filter(User.id == User.get_user_id())\n )\n query = query.filter(\n or_(Dash.id.in_(\n db.session.query(Dash.id)\n .distinct()\n .join(Dash.slices)\n .filter(Slice.id.in_(slice_ids_qry)),\n ), Dash.id.in_(owner_ids_qry)),\n )\n return query\n\n\nclass DatabaseView(SupersetModelView, DeleteMixin, YamlExportMixin): # noqa\n datamodel = SQLAInterface(models.Database)\n\n list_title = _('Databases')\n show_title = _('Show Database')\n add_title = _('Add Database')\n edit_title = _('Edit Database')\n\n list_columns = [\n 'database_name', 'backend', 'allow_run_async',\n 'allow_dml', 'allow_csv_upload', 'expose_in_sqllab', 'creator', 'modified']\n order_columns = [\n 'database_name', 'allow_run_async', 'allow_dml',\n 'modified', 'allow_csv_upload', 'expose_in_sqllab',\n ]\n add_columns = [\n 'database_name', 'sqlalchemy_uri', 'cache_timeout', 'expose_in_sqllab',\n 'allow_run_async', 'allow_csv_upload',\n 'allow_ctas', 'allow_dml', 'force_ctas_schema', 'impersonate_user',\n 'allow_multi_schema_metadata_fetch', 'extra',\n ]\n search_exclude_columns = (\n 'password', 'tables', 'created_by', 'changed_by', 'queries',\n 'saved_queries')\n edit_columns = add_columns\n show_columns = [\n 'tables',\n 'cache_timeout',\n 'extra',\n 'database_name',\n 'sqlalchemy_uri',\n 'perm',\n 'created_by',\n 'created_on',\n 'changed_by',\n 'changed_on',\n ]\n add_template = 'superset/models/database/add.html'\n edit_template = 'superset/models/database/edit.html'\n base_order = ('changed_on', 'desc')\n description_columns = {\n 'sqlalchemy_uri': utils.markdown(\n 'Refer to the '\n '[SqlAlchemy docs]'\n '(https://docs.sqlalchemy.org/en/rel_1_2/core/engines.html#'\n 'database-urls) '\n 'for more information on how to structure your URI.', True),\n 'expose_in_sqllab': _('Expose this DB in SQL Lab'),\n 'allow_run_async': _(\n 'Operate the database in asynchronous mode, meaning '\n 'that the queries are executed on remote workers as opposed '\n 'to on the web server itself. '\n 'This assumes that you have a Celery worker setup as well '\n 'as a results backend. Refer to the installation docs '\n 'for more information.'),\n 'allow_ctas': _('Allow CREATE TABLE AS option in SQL Lab'),\n 'allow_dml': _(\n 'Allow users to run non-SELECT statements '\n '(UPDATE, DELETE, CREATE, ...) '\n 'in SQL Lab'),\n 'force_ctas_schema': _(\n 'When allowing CREATE TABLE AS option in SQL Lab, '\n 'this option forces the table to be created in this schema'),\n 'extra': utils.markdown(\n 'JSON string containing extra configuration elements.<br/>'\n '1. The ``engine_params`` object gets unpacked into the '\n '[sqlalchemy.create_engine]'\n '(https://docs.sqlalchemy.org/en/latest/core/engines.html#'\n 'sqlalchemy.create_engine) call, while the ``metadata_params`` '\n 'gets unpacked into the [sqlalchemy.MetaData]'\n '(https://docs.sqlalchemy.org/en/rel_1_0/core/metadata.html'\n '#sqlalchemy.schema.MetaData) call.<br/>'\n '2. The ``metadata_cache_timeout`` is a cache timeout setting '\n 'in seconds for metadata fetch of this database. Specify it as '\n '**\"metadata_cache_timeout\": {\"schema_cache_timeout\": 600, '\n '\"table_cache_timeout\": 600}**. '\n 'If unset, cache will not be enabled for the functionality. '\n 'A timeout of 0 indicates that the cache never expires.<br/>'\n '3. The ``schemas_allowed_for_csv_upload`` is a comma separated list '\n 'of schemas that CSVs are allowed to upload to. '\n 'Specify it as **\"schemas_allowed_for_csv_upload\": '\n '[\"public\", \"csv_upload\"]**. '\n 'If database flavor does not support schema or any schema is allowed '\n 'to be accessed, just leave the list empty', True),\n 'impersonate_user': _(\n 'If Presto, all the queries in SQL Lab are going to be executed as the '\n 'currently logged on user who must have permission to run them.<br/>'\n 'If Hive and hive.server2.enable.doAs is enabled, will run the queries as '\n 'service account, but impersonate the currently logged on user '\n 'via hive.server2.proxy.user property.'),\n 'allow_multi_schema_metadata_fetch': _(\n 'Allow SQL Lab to fetch a list of all tables and all views across '\n 'all database schemas. For large data warehouse with thousands of '\n 'tables, this can be expensive and put strain on the system.'),\n 'cache_timeout': _(\n 'Duration (in seconds) of the caching timeout for charts of this database. '\n 'A timeout of 0 indicates that the cache never expires. '\n 'Note this defaults to the global timeout if undefined.'),\n 'allow_csv_upload': _(\n 'If selected, please set the schemas allowed for csv upload in Extra.'),\n }\n label_columns = {\n 'expose_in_sqllab': _('Expose in SQL Lab'),\n 'allow_ctas': _('Allow CREATE TABLE AS'),\n 'allow_dml': _('Allow DML'),\n 'force_ctas_schema': _('CTAS Schema'),\n 'database_name': _('Database'),\n 'creator': _('Creator'),\n 'changed_on_': _('Last Changed'),\n 'sqlalchemy_uri': _('SQLAlchemy URI'),\n 'cache_timeout': _('Chart Cache Timeout'),\n 'extra': _('Extra'),\n 'allow_run_async': _('Asynchronous Query Execution'),\n 'impersonate_user': _('Impersonate the logged on user'),\n 'allow_csv_upload': _('Allow Csv Upload'),\n 'modified': _('Modified'),\n 'allow_multi_schema_metadata_fetch': _('Allow Multi Schema Metadata Fetch'),\n 'backend': _('Backend'),\n }\n\n def pre_add(self, db):\n self.check_extra(db)\n db.set_sqlalchemy_uri(db.sqlalchemy_uri)\n security_manager.merge_perm('database_access', db.perm)\n # adding a new database we always want to force refresh schema list\n for schema in db.all_schema_names():\n security_manager.merge_perm(\n 'schema_access', security_manager.get_schema_perm(db, schema))\n\n def pre_update(self, db):\n self.pre_add(db)\n\n def pre_delete(self, obj):\n if obj.tables:\n raise SupersetException(Markup(\n 'Cannot delete a database that has tables attached. '\n \"Here's the list of associated tables: \" +\n ', '.join('{}'.format(o) for o in obj.tables)))\n\n def _delete(self, pk):\n DeleteMixin._delete(self, pk)\n\n def check_extra(self, db):\n # this will check whether json.loads(extra) can succeed\n try:\n extra = db.get_extra()\n except Exception as e:\n raise Exception('Extra field cannot be decoded by JSON. {}'.format(str(e)))\n\n # this will check whether 'metadata_params' is configured correctly\n metadata_signature = inspect.signature(MetaData)\n for key in extra.get('metadata_params', {}):\n if key not in metadata_signature.parameters:\n raise Exception('The metadata_params in Extra field '\n 'is not configured correctly. The key '\n '{} is invalid.'.format(key))\n\n\nappbuilder.add_link(\n 'Import Dashboards',\n label=__('Import Dashboards'),\n href='/superset/import_dashboards',\n icon='fa-cloud-upload',\n category='Manage',\n category_label=__('Manage'),\n category_icon='fa-wrench')\n\n\nappbuilder.add_view(\n DatabaseView,\n 'Databases',\n label=__('Databases'),\n icon='fa-database',\n category='Sources',\n category_label=__('Sources'),\n category_icon='fa-database')\n\n\nclass DatabaseAsync(DatabaseView):\n list_columns = [\n 'id', 'database_name',\n 'expose_in_sqllab', 'allow_ctas', 'force_ctas_schema',\n 'allow_run_async', 'allow_dml',\n 'allow_multi_schema_metadata_fetch', 'allow_csv_upload',\n 'allows_subquery', 'backend',\n ]\n\n\nappbuilder.add_view_no_menu(DatabaseAsync)\n\n\nclass CsvToDatabaseView(SimpleFormView):\n form = CsvToDatabaseForm\n form_template = 'superset/form_view/csv_to_database_view/edit.html'\n form_title = _('CSV to Database configuration')\n add_columns = ['database', 'schema', 'table_name']\n\n def form_get(self, form):\n form.sep.data = ','\n form.header.data = 0\n form.mangle_dupe_cols.data = True\n form.skipinitialspace.data = False\n form.skip_blank_lines.data = True\n form.infer_datetime_format.data = True\n form.decimal.data = '.'\n form.if_exists.data = 'fail'\n\n def form_post(self, form):\n database = form.con.data\n schema_name = form.schema.data or ''\n\n if not self.is_schema_allowed(database, schema_name):\n message = _('Database \"{0}\" Schema \"{1}\" is not allowed for csv uploads. '\n 'Please contact Superset Admin'.format(database.database_name,\n schema_name))\n flash(message, 'danger')\n return redirect('/csvtodatabaseview/form')\n\n csv_file = form.csv_file.data\n form.csv_file.data.filename = secure_filename(form.csv_file.data.filename)\n csv_filename = form.csv_file.data.filename\n path = os.path.join(config['UPLOAD_FOLDER'], csv_filename)\n try:\n utils.ensure_path_exists(config['UPLOAD_FOLDER'])\n csv_file.save(path)\n table = SqlaTable(table_name=form.name.data)\n table.database = form.data.get('con')\n table.database_id = table.database.id\n table.database.db_engine_spec.create_table_from_csv(form, table)\n except Exception as e:\n try:\n os.remove(path)\n except OSError:\n pass\n message = 'Table name {} already exists. Please pick another'.format(\n form.name.data) if isinstance(e, IntegrityError) else str(e)\n flash(\n message,\n 'danger')\n stats_logger.incr('failed_csv_upload')\n return redirect('/csvtodatabaseview/form')\n\n os.remove(path)\n # Go back to welcome page / splash screen\n db_name = table.database.database_name\n message = _('CSV file \"{0}\" uploaded to table \"{1}\" in '\n 'database \"{2}\"'.format(csv_filename,\n form.name.data,\n db_name))\n flash(message, 'info')\n stats_logger.incr('successful_csv_upload')\n return redirect('/tablemodelview/list/')\n\n def is_schema_allowed(self, database, schema):\n if not database.allow_csv_upload:\n return False\n schemas = database.get_schema_access_for_csv_upload()\n if schemas:\n return schema in schemas\n return (security_manager.database_access(database) or\n security_manager.all_datasource_access())\n\n\nappbuilder.add_view_no_menu(CsvToDatabaseView)\n\n\nclass DatabaseTablesAsync(DatabaseView):\n list_columns = ['id', 'all_table_names_in_database', 'all_schema_names']\n\n\nappbuilder.add_view_no_menu(DatabaseTablesAsync)\n\n\nif config.get('ENABLE_ACCESS_REQUEST'):\n class AccessRequestsModelView(SupersetModelView, DeleteMixin):\n datamodel = SQLAInterface(DAR)\n list_columns = [\n 'username', 'user_roles', 'datasource_link',\n 'roles_with_datasource', 'created_on']\n order_columns = ['created_on']\n base_order = ('changed_on', 'desc')\n label_columns = {\n 'username': _('User'),\n 'user_roles': _('User Roles'),\n 'database': _('Database URL'),\n 'datasource_link': _('Datasource'),\n 'roles_with_datasource': _('Roles to grant'),\n 'created_on': _('Created On'),\n }\n\n appbuilder.add_view(\n AccessRequestsModelView,\n 'Access requests',\n label=__('Access requests'),\n category='Security',\n category_label=__('Security'),\n icon='fa-table')\n\n\nclass SliceModelView(SupersetModelView, DeleteMixin): # noqa\n route_base = '/chart'\n datamodel = SQLAInterface(models.Slice)\n\n list_title = _('Charts')\n show_title = _('Show Chart')\n add_title = _('Add Chart')\n edit_title = _('Edit Chart')\n\n can_add = False\n search_columns = (\n 'slice_name', 'description', 'viz_type', 'datasource_name', 'owners',\n )\n list_columns = [\n 'slice_link', 'viz_type', 'datasource_link', 'creator', 'modified']\n order_columns = ['viz_type', 'datasource_link', 'modified']\n edit_columns = [\n 'slice_name', 'description', 'viz_type', 'owners', 'dashboards',\n 'params', 'cache_timeout']\n base_order = ('changed_on', 'desc')\n description_columns = {\n 'description': Markup(\n 'The content here can be displayed as widget headers in the '\n 'dashboard view. Supports '\n '<a href=\"https://daringfireball.net/projects/markdown/\"\">'\n 'markdown</a>'),\n 'params': _(\n 'These parameters are generated dynamically when clicking '\n 'the save or overwrite button in the explore view. This JSON '\n 'object is exposed here for reference and for power users who may '\n 'want to alter specific parameters.',\n ),\n 'cache_timeout': _(\n 'Duration (in seconds) of the caching timeout for this chart. '\n 'Note this defaults to the datasource/table timeout if undefined.'),\n }\n base_filters = [['id', SliceFilter, lambda: []]]\n label_columns = {\n 'cache_timeout': _('Cache Timeout'),\n 'creator': _('Creator'),\n 'dashboards': _('Dashboards'),\n 'datasource_link': _('Datasource'),\n 'description': _('Description'),\n 'modified': _('Last Modified'),\n 'owners': _('Owners'),\n 'params': _('Parameters'),\n 'slice_link': _('Chart'),\n 'slice_name': _('Name'),\n 'table': _('Table'),\n 'viz_type': _('Visualization Type'),\n }\n\n def pre_add(self, obj):\n utils.validate_json(obj.params)\n\n def pre_update(self, obj):\n utils.validate_json(obj.params)\n check_ownership(obj)\n\n def pre_delete(self, obj):\n check_ownership(obj)\n\n @expose('/add', methods=['GET', 'POST'])\n @has_access\n def add(self):\n datasources = ConnectorRegistry.get_all_datasources(db.session)\n datasources = [\n {'value': str(d.id) + '__' + d.type, 'label': repr(d)}\n for d in datasources\n ]\n return self.render_template(\n 'superset/add_slice.html',\n bootstrap_data=json.dumps({\n 'datasources': sorted(datasources, key=lambda d: d['label']),\n }),\n )\n\n\nappbuilder.add_view(\n SliceModelView,\n 'Charts',\n label=__('Charts'),\n icon='fa-bar-chart',\n category='',\n category_icon='')\n\n\nclass SliceAsync(SliceModelView): # noqa\n route_base = '/sliceasync'\n list_columns = [\n 'id', 'slice_link', 'viz_type', 'slice_name',\n 'creator', 'modified', 'icons', 'changed_on_humanized',\n ]\n label_columns = {\n 'icons': ' ',\n 'slice_link': _('Chart'),\n }\n\n\nappbuilder.add_view_no_menu(SliceAsync)\n\n\nclass SliceAddView(SliceModelView): # noqa\n route_base = '/sliceaddview'\n list_columns = [\n 'id', 'slice_name', 'slice_url', 'edit_url', 'viz_type', 'params',\n 'description', 'description_markeddown', 'datasource_id', 'datasource_type',\n 'datasource_name_text', 'datasource_link',\n 'owners', 'modified', 'changed_on', 'changed_on_humanized',\n ]\n\n\nappbuilder.add_view_no_menu(SliceAddView)\n\n\nclass DashboardModelView(SupersetModelView, DeleteMixin): # noqa\n route_base = '/dashboard'\n datamodel = SQLAInterface(models.Dashboard)\n\n list_title = _('Dashboards')\n show_title = _('Show Dashboard')\n add_title = _('Add Dashboard')\n edit_title = _('Edit Dashboard')\n\n list_columns = ['dashboard_link', 'creator', 'modified']\n order_columns = ['modified']\n edit_columns = [\n 'dashboard_title', 'slug', 'owners', 'position_json', 'css',\n 'json_metadata']\n show_columns = edit_columns + ['table_names', 'charts']\n search_columns = ('dashboard_title', 'slug', 'owners')\n add_columns = edit_columns\n base_order = ('changed_on', 'desc')\n description_columns = {\n 'position_json': _(\n 'This json object describes the positioning of the widgets in '\n 'the dashboard. It is dynamically generated when adjusting '\n 'the widgets size and positions by using drag & drop in '\n 'the dashboard view'),\n 'css': _(\n 'The CSS for individual dashboards can be altered here, or '\n 'in the dashboard view where changes are immediately '\n 'visible'),\n 'slug': _('To get a readable URL for your dashboard'),\n 'json_metadata': _(\n 'This JSON object is generated dynamically when clicking '\n 'the save or overwrite button in the dashboard view. It '\n 'is exposed here for reference and for power users who may '\n 'want to alter specific parameters.'),\n 'owners': _('Owners is a list of users who can alter the dashboard.'),\n }\n base_filters = [['slice', DashboardFilter, lambda: []]]\n label_columns = {\n 'dashboard_link': _('Dashboard'),\n 'dashboard_title': _('Title'),\n 'slug': _('Slug'),\n 'charts': _('Charts'),\n 'owners': _('Owners'),\n 'creator': _('Creator'),\n 'modified': _('Modified'),\n 'position_json': _('Position JSON'),\n 'css': _('CSS'),\n 'json_metadata': _('JSON Metadata'),\n 'table_names': _('Underlying Tables'),\n }\n\n def pre_add(self, obj):\n obj.slug = obj.slug or None\n if obj.slug:\n obj.slug = obj.slug.strip()\n obj.slug = obj.slug.replace(' ', '-')\n obj.slug = re.sub(r'[^\\w\\-]+', '', obj.slug)\n if g.user not in obj.owners:\n obj.owners.append(g.user)\n utils.validate_json(obj.json_metadata)\n utils.validate_json(obj.position_json)\n owners = [o for o in obj.owners]\n for slc in obj.slices:\n slc.owners = list(set(owners) | set(slc.owners))\n\n def pre_update(self, obj):\n check_ownership(obj)\n self.pre_add(obj)\n\n def pre_delete(self, obj):\n check_ownership(obj)\n\n @action('mulexport', __('Export'), __('Export dashboards?'), 'fa-database')\n def mulexport(self, items):\n if not isinstance(items, list):\n items = [items]\n ids = ''.join('&id={}'.format(d.id) for d in items)\n return redirect(\n '/dashboard/export_dashboards_form?{}'.format(ids[1:]))\n\n @log_this\n @has_access\n @expose('/export_dashboards_form')\n def download_dashboards(self):\n if request.args.get('action') == 'go':\n ids = request.args.getlist('id')\n return Response(\n models.Dashboard.export_dashboards(ids),\n headers=generate_download_headers('json'),\n mimetype='application/text')\n return self.render_template(\n 'superset/export_dashboards.html',\n dashboards_url='/dashboard/list',\n )\n\n\nappbuilder.add_view(\n DashboardModelView,\n 'Dashboards',\n label=__('Dashboards'),\n icon='fa-dashboard',\n category='',\n category_icon='')\n\n\nclass DashboardModelViewAsync(DashboardModelView): # noqa\n route_base = '/dashboardasync'\n list_columns = [\n 'id', 'dashboard_link', 'creator', 'modified', 'dashboard_title',\n 'changed_on', 'url', 'changed_by_name',\n ]\n label_columns = {\n 'dashboard_link': _('Dashboard'),\n 'dashboard_title': _('Title'),\n 'creator': _('Creator'),\n 'modified': _('Modified'),\n }\n\n\nappbuilder.add_view_no_menu(DashboardModelViewAsync)\n\n\nclass DashboardAddView(DashboardModelView): # noqa\n route_base = '/dashboardaddview'\n list_columns = [\n 'id', 'dashboard_link', 'creator', 'modified', 'dashboard_title',\n 'changed_on', 'url', 'changed_by_name',\n ]\n show_columns = list(set(DashboardModelView.edit_columns + list_columns))\n\n\nappbuilder.add_view_no_menu(DashboardAddView)\n\n\nclass LogModelView(SupersetModelView):\n datamodel = SQLAInterface(models.Log)\n\n list_title = _('Logs')\n show_title = _('Show Log')\n add_title = _('Add Log')\n edit_title = _('Edit Log')\n\n list_columns = ('user', 'action', 'dttm')\n edit_columns = ('user', 'action', 'dttm', 'json')\n base_order = ('dttm', 'desc')\n label_columns = {\n 'user': _('User'),\n 'action': _('Action'),\n 'dttm': _('dttm'),\n 'json': _('JSON'),\n }\n\n\nappbuilder.add_view(\n LogModelView,\n 'Action Log',\n label=__('Action Log'),\n category='Security',\n category_label=__('Security'),\n icon='fa-list-ol')\n\n\[email protected]('/health')\ndef health():\n return 'OK'\n\n\[email protected]('/healthcheck')\ndef healthcheck():\n return 'OK'\n\n\[email protected]('/ping')\ndef ping():\n return 'OK'\n\n\nclass KV(BaseSupersetView):\n\n \"\"\"Used for storing and retrieving key value pairs\"\"\"\n\n @log_this\n @has_access_api\n @expose('/store/', methods=['POST'])\n def store(self):\n try:\n value = request.form.get('data')\n obj = models.KeyValue(value=value)\n db.session.add(obj)\n db.session.commit()\n except Exception as e:\n return json_error_response(e)\n return Response(\n json.dumps({'id': obj.id}),\n status=200)\n\n @log_this\n @has_access_api\n @expose('/<key_id>/', methods=['GET'])\n def get_value(self, key_id):\n kv = None\n try:\n kv = db.session.query(models.KeyValue).filter_by(id=key_id).one()\n except Exception as e:\n return json_error_response(e)\n return Response(kv.value, status=200)\n\n\nappbuilder.add_view_no_menu(KV)\n\n\nclass R(BaseSupersetView):\n\n \"\"\"used for short urls\"\"\"\n\n @log_this\n @expose('/<url_id>')\n def index(self, url_id):\n url = db.session.query(models.Url).filter_by(id=url_id).first()\n if url and url.url:\n explore_url = '//superset/explore/?'\n if url.url.startswith(explore_url):\n explore_url += f'r={url_id}'\n return redirect(explore_url[1:])\n else:\n return redirect(url.url[1:])\n else:\n flash('URL to nowhere...', 'danger')\n return redirect('/')\n\n @log_this\n @has_access_api\n @expose('/shortner/', methods=['POST'])\n def shortner(self):\n url = request.form.get('data')\n obj = models.Url(url=url)\n db.session.add(obj)\n db.session.commit()\n return Response(\n '{scheme}://{request.headers[Host]}/r/{obj.id}'.format(\n scheme=request.scheme, request=request, obj=obj),\n mimetype='text/plain')\n\n\nappbuilder.add_view_no_menu(R)\n\n\nclass Superset(BaseSupersetView):\n \"\"\"The base views for Superset!\"\"\"\n @has_access_api\n @expose('/datasources/')\n def datasources(self):\n datasources = ConnectorRegistry.get_all_datasources(db.session)\n datasources = [o.short_data for o in datasources if o.short_data.get('name')]\n datasources = sorted(datasources, key=lambda o: o['name'])\n return self.json_response(datasources)\n\n @has_access_api\n @expose('/override_role_permissions/', methods=['POST'])\n def override_role_permissions(self):\n \"\"\"Updates the role with the give datasource permissions.\n\n Permissions not in the request will be revoked. This endpoint should\n be available to admins only. Expects JSON in the format:\n {\n 'role_name': '{role_name}',\n 'database': [{\n 'datasource_type': '{table|druid}',\n 'name': '{database_name}',\n 'schema': [{\n 'name': '{schema_name}',\n 'datasources': ['{datasource name}, {datasource name}']\n }]\n }]\n }\n \"\"\"\n data = request.get_json(force=True)\n role_name = data['role_name']\n databases = data['database']\n\n db_ds_names = set()\n for dbs in databases:\n for schema in dbs['schema']:\n for ds_name in schema['datasources']:\n fullname = utils.get_datasource_full_name(\n dbs['name'], ds_name, schema=schema['name'])\n db_ds_names.add(fullname)\n\n existing_datasources = ConnectorRegistry.get_all_datasources(db.session)\n datasources = [\n d for d in existing_datasources if d.full_name in db_ds_names]\n role = security_manager.find_role(role_name)\n # remove all permissions\n role.permissions = []\n # grant permissions to the list of datasources\n granted_perms = []\n for datasource in datasources:\n view_menu_perm = security_manager.find_permission_view_menu(\n view_menu_name=datasource.perm,\n permission_name='datasource_access')\n # prevent creating empty permissions\n if view_menu_perm and view_menu_perm.view_menu:\n role.permissions.append(view_menu_perm)\n granted_perms.append(view_menu_perm.view_menu.name)\n db.session.commit()\n return self.json_response({\n 'granted': granted_perms,\n 'requested': list(db_ds_names),\n }, status=201)\n\n @log_this\n @has_access\n @expose('/request_access/')\n def request_access(self):\n datasources = set()\n dashboard_id = request.args.get('dashboard_id')\n if dashboard_id:\n dash = (\n db.session.query(models.Dashboard)\n .filter_by(id=int(dashboard_id))\n .one()\n )\n datasources |= dash.datasources\n datasource_id = request.args.get('datasource_id')\n datasource_type = request.args.get('datasource_type')\n if datasource_id:\n ds_class = ConnectorRegistry.sources.get(datasource_type)\n datasource = (\n db.session.query(ds_class)\n .filter_by(id=int(datasource_id))\n .one()\n )\n datasources.add(datasource)\n\n has_access = all(\n (\n datasource and security_manager.datasource_access(datasource)\n for datasource in datasources\n ))\n if has_access:\n return redirect('/superset/dashboard/{}'.format(dashboard_id))\n\n if request.args.get('action') == 'go':\n for datasource in datasources:\n access_request = DAR(\n datasource_id=datasource.id,\n datasource_type=datasource.type)\n db.session.add(access_request)\n db.session.commit()\n flash(__('Access was requested'), 'info')\n return redirect('/')\n\n return self.render_template(\n 'superset/request_access.html',\n datasources=datasources,\n datasource_names=', '.join([o.name for o in datasources]),\n )\n\n @log_this\n @has_access\n @expose('/approve')\n def approve(self):\n def clean_fulfilled_requests(session):\n for r in session.query(DAR).all():\n datasource = ConnectorRegistry.get_datasource(\n r.datasource_type, r.datasource_id, session)\n if not datasource or \\\n security_manager.datasource_access(datasource):\n # datasource does not exist anymore\n session.delete(r)\n session.commit()\n datasource_type = request.args.get('datasource_type')\n datasource_id = request.args.get('datasource_id')\n created_by_username = request.args.get('created_by')\n role_to_grant = request.args.get('role_to_grant')\n role_to_extend = request.args.get('role_to_extend')\n\n session = db.session\n datasource = ConnectorRegistry.get_datasource(\n datasource_type, datasource_id, session)\n\n if not datasource:\n flash(DATASOURCE_MISSING_ERR, 'alert')\n return json_error_response(DATASOURCE_MISSING_ERR)\n\n requested_by = security_manager.find_user(username=created_by_username)\n if not requested_by:\n flash(USER_MISSING_ERR, 'alert')\n return json_error_response(USER_MISSING_ERR)\n\n requests = (\n session.query(DAR)\n .filter(\n DAR.datasource_id == datasource_id,\n DAR.datasource_type == datasource_type,\n DAR.created_by_fk == requested_by.id)\n .all()\n )\n\n if not requests:\n flash(ACCESS_REQUEST_MISSING_ERR, 'alert')\n return json_error_response(ACCESS_REQUEST_MISSING_ERR)\n\n # check if you can approve\n if (\n security_manager.all_datasource_access() or\n check_ownership(datasource, raise_if_false=False)\n ):\n # can by done by admin only\n if role_to_grant:\n role = security_manager.find_role(role_to_grant)\n requested_by.roles.append(role)\n msg = __(\n '%(user)s was granted the role %(role)s that gives access '\n 'to the %(datasource)s',\n user=requested_by.username,\n role=role_to_grant,\n datasource=datasource.full_name)\n utils.notify_user_about_perm_udate(\n g.user, requested_by, role, datasource,\n 'email/role_granted.txt', app.config)\n flash(msg, 'info')\n\n if role_to_extend:\n perm_view = security_manager.find_permission_view_menu(\n 'email/datasource_access', datasource.perm)\n role = security_manager.find_role(role_to_extend)\n security_manager.add_permission_role(role, perm_view)\n msg = __('Role %(r)s was extended to provide the access to '\n 'the datasource %(ds)s', r=role_to_extend,\n ds=datasource.full_name)\n utils.notify_user_about_perm_udate(\n g.user, requested_by, role, datasource,\n 'email/role_extended.txt', app.config)\n flash(msg, 'info')\n clean_fulfilled_requests(session)\n else:\n flash(__('You have no permission to approve this request'),\n 'danger')\n return redirect('/accessrequestsmodelview/list/')\n for r in requests:\n session.delete(r)\n session.commit()\n return redirect('/accessrequestsmodelview/list/')\n\n def get_form_data(self, slice_id=None, use_slice_data=False):\n form_data = {}\n post_data = request.form.get('form_data')\n request_args_data = request.args.get('form_data')\n # Supporting POST\n if post_data:\n form_data.update(json.loads(post_data))\n # request params can overwrite post body\n if request_args_data:\n form_data.update(json.loads(request_args_data))\n\n url_id = request.args.get('r')\n if url_id:\n saved_url = db.session.query(models.Url).filter_by(id=url_id).first()\n if saved_url:\n url_str = parse.unquote_plus(\n saved_url.url.split('?')[1][10:], encoding='utf-8', errors=None)\n url_form_data = json.loads(url_str)\n # allow form_date in request override saved url\n url_form_data.update(form_data)\n form_data = url_form_data\n\n form_data = {\n k: v\n for k, v in form_data.items()\n if k not in FORM_DATA_KEY_BLACKLIST\n }\n\n # When a slice_id is present, load from DB and override\n # the form_data from the DB with the other form_data provided\n slice_id = form_data.get('slice_id') or slice_id\n slc = None\n\n # Check if form data only contains slice_id\n contains_only_slc_id = not any(key != 'slice_id' for key in form_data)\n\n # Include the slice_form_data if request from explore or slice calls\n # or if form_data only contains slice_id\n if slice_id and (use_slice_data or contains_only_slc_id):\n slc = db.session.query(models.Slice).filter_by(id=slice_id).one_or_none()\n if slc:\n slice_form_data = slc.form_data.copy()\n slice_form_data.update(form_data)\n form_data = slice_form_data\n\n update_time_range(form_data)\n\n return form_data, slc\n\n def get_viz(\n self,\n slice_id=None,\n form_data=None,\n datasource_type=None,\n datasource_id=None,\n force=False,\n ):\n if slice_id:\n slc = (\n db.session.query(models.Slice)\n .filter_by(id=slice_id)\n .one()\n )\n return slc.get_viz()\n else:\n viz_type = form_data.get('viz_type', 'table')\n datasource = ConnectorRegistry.get_datasource(\n datasource_type, datasource_id, db.session)\n viz_obj = viz.viz_types[viz_type](\n datasource,\n form_data=form_data,\n force=force,\n )\n return viz_obj\n\n @has_access\n @expose('/slice/<slice_id>/')\n def slice(self, slice_id):\n form_data, slc = get_form_data(slice_id, use_slice_data=True)\n if not slc:\n abort(404)\n endpoint = '/superset/explore/?form_data={}'.format(\n parse.quote(json.dumps({'slice_id': slice_id})),\n )\n if request.args.get('standalone') == 'true':\n endpoint += '&standalone=true'\n return redirect(endpoint)\n\n def get_query_string_response(self, viz_obj):\n query = None\n try:\n query_obj = viz_obj.query_obj()\n if query_obj:\n query = viz_obj.datasource.get_query_str(query_obj)\n except Exception as e:\n logging.exception(e)\n return json_error_response(e)\n\n if query_obj and query_obj['prequeries']:\n query_obj['prequeries'].append(query)\n query = ';\\n\\n'.join(query_obj['prequeries'])\n if query:\n query += ';'\n else:\n query = 'No query.'\n\n return self.json_response({\n 'query': query,\n 'language': viz_obj.datasource.query_language,\n })\n\n def get_raw_results(self, viz_obj):\n return self.json_response({\n 'data': viz_obj.get_df_payload()['df'].to_dict('records'),\n })\n\n def get_samples(self, viz_obj):\n return self.json_response({\n 'data': viz_obj.get_samples(),\n })\n\n def generate_json(\n self, viz_obj, csv=False, query=False, results=False, samples=False):\n if csv:\n return CsvResponse(\n viz_obj.get_csv(),\n status=200,\n headers=generate_download_headers('csv'),\n mimetype='application/csv')\n\n if query:\n return self.get_query_string_response(viz_obj)\n\n if results:\n return self.get_raw_results(viz_obj)\n\n if samples:\n return self.get_samples(viz_obj)\n\n payload = viz_obj.get_payload()\n return data_payload_response(*viz_obj.payload_json_and_has_error(payload))\n\n @log_this\n @api\n @has_access_api\n @expose('/slice_json/<slice_id>')\n @etag_cache(CACHE_DEFAULT_TIMEOUT, check_perms=check_slice_perms)\n def slice_json(self, slice_id):\n form_data, slc = get_form_data(slice_id, use_slice_data=True)\n datasource_type = slc.datasource.type\n datasource_id = slc.datasource.id\n viz_obj = get_viz(\n datasource_type=datasource_type,\n datasource_id=datasource_id,\n form_data=form_data,\n force=False,\n )\n return self.generate_json(viz_obj)\n\n @log_this\n @api\n @has_access_api\n @expose('/annotation_json/<layer_id>')\n def annotation_json(self, layer_id):\n form_data = get_form_data()[0]\n form_data['layer_id'] = layer_id\n form_data['filters'] = [{'col': 'layer_id',\n 'op': '==',\n 'val': layer_id}]\n datasource = AnnotationDatasource()\n viz_obj = viz.viz_types['table'](\n datasource,\n form_data=form_data,\n force=False,\n )\n payload = viz_obj.get_payload()\n return data_payload_response(*viz_obj.payload_json_and_has_error(payload))\n\n @log_this\n @api\n @has_access_api\n @handle_api_exception\n @expose('/explore_json/<datasource_type>/<datasource_id>/', methods=['GET', 'POST'])\n @expose('/explore_json/', methods=['GET', 'POST'])\n @etag_cache(CACHE_DEFAULT_TIMEOUT, check_perms=check_datasource_perms)\n def explore_json(self, datasource_type=None, datasource_id=None):\n \"\"\"Serves all request that GET or POST form_data\n\n This endpoint evolved to be the entry point of many different\n requests that GETs or POSTs a form_data.\n\n `self.generate_json` receives this input and returns different\n payloads based on the request args in the first block\n\n TODO: break into one endpoint for each return shape\"\"\"\n csv = request.args.get('csv') == 'true'\n query = request.args.get('query') == 'true'\n results = request.args.get('results') == 'true'\n samples = request.args.get('samples') == 'true'\n force = request.args.get('force') == 'true'\n\n form_data = get_form_data()[0]\n datasource_id, datasource_type = get_datasource_info(\n datasource_id, datasource_type, form_data)\n viz_obj = get_viz(\n datasource_type=datasource_type,\n datasource_id=datasource_id,\n form_data=form_data,\n force=force,\n )\n\n return self.generate_json(\n viz_obj,\n csv=csv,\n query=query,\n results=results,\n samples=samples,\n )\n\n @log_this\n @has_access\n @expose('/import_dashboards', methods=['GET', 'POST'])\n def import_dashboards(self):\n \"\"\"Overrides the dashboards using json instances from the file.\"\"\"\n f = request.files.get('file')\n if request.method == 'POST' and f:\n dashboard_import_export.import_dashboards(db.session, f.stream)\n return redirect('/dashboard/list/')\n return self.render_template('superset/import_dashboards.html')\n\n @log_this\n @has_access\n @expose('/explorev2/<datasource_type>/<datasource_id>/')\n def explorev2(self, datasource_type, datasource_id):\n \"\"\"Deprecated endpoint, here for backward compatibility of urls\"\"\"\n return redirect(url_for(\n 'Superset.explore',\n datasource_type=datasource_type,\n datasource_id=datasource_id,\n **request.args))\n\n @log_this\n @has_access\n @expose('/explore/<datasource_type>/<datasource_id>/', methods=['GET', 'POST'])\n @expose('/explore/', methods=['GET', 'POST'])\n def explore(self, datasource_type=None, datasource_id=None):\n user_id = g.user.get_id() if g.user else None\n form_data, slc = get_form_data(use_slice_data=True)\n\n datasource_id, datasource_type = get_datasource_info(\n datasource_id, datasource_type, form_data)\n\n error_redirect = '/chart/list/'\n datasource = ConnectorRegistry.get_datasource(\n datasource_type, datasource_id, db.session)\n if not datasource:\n flash(DATASOURCE_MISSING_ERR, 'danger')\n return redirect(error_redirect)\n\n if config.get('ENABLE_ACCESS_REQUEST') and (\n not security_manager.datasource_access(datasource)\n ):\n flash(\n __(security_manager.get_datasource_access_error_msg(datasource)),\n 'danger')\n return redirect(\n 'superset/request_access/?'\n f'datasource_type={datasource_type}&'\n f'datasource_id={datasource_id}&')\n\n viz_type = form_data.get('viz_type')\n if not viz_type and datasource.default_endpoint:\n return redirect(datasource.default_endpoint)\n\n # slc perms\n slice_add_perm = security_manager.can_access('can_add', 'SliceModelView')\n slice_overwrite_perm = is_owner(slc, g.user)\n slice_download_perm = security_manager.can_access(\n 'can_download', 'SliceModelView')\n\n form_data['datasource'] = str(datasource_id) + '__' + datasource_type\n\n # On explore, merge legacy and extra filters into the form data\n utils.convert_legacy_filters_into_adhoc(form_data)\n utils.merge_extra_filters(form_data)\n\n # merge request url params\n if request.method == 'GET':\n utils.merge_request_params(form_data, request.args)\n\n # handle save or overwrite\n action = request.args.get('action')\n\n if action == 'overwrite' and not slice_overwrite_perm:\n return json_error_response(\n _('You don\\'t have the rights to ') + _('alter this ') + _('chart'),\n status=400)\n\n if action == 'saveas' and not slice_add_perm:\n return json_error_response(\n _('You don\\'t have the rights to ') + _('create a ') + _('chart'),\n status=400)\n\n if action in ('saveas', 'overwrite'):\n return self.save_or_overwrite_slice(\n request.args,\n slc, slice_add_perm,\n slice_overwrite_perm,\n slice_download_perm,\n datasource_id,\n datasource_type,\n datasource.name)\n\n standalone = request.args.get('standalone') == 'true'\n bootstrap_data = {\n 'can_add': slice_add_perm,\n 'can_download': slice_download_perm,\n 'can_overwrite': slice_overwrite_perm,\n 'datasource': datasource.data,\n 'form_data': form_data,\n 'datasource_id': datasource_id,\n 'datasource_type': datasource_type,\n 'slice': slc.data if slc else None,\n 'standalone': standalone,\n 'user_id': user_id,\n 'forced_height': request.args.get('height'),\n 'common': self.common_bootsrap_payload(),\n }\n table_name = datasource.table_name \\\n if datasource_type == 'table' \\\n else datasource.datasource_name\n if slc:\n title = slc.slice_name\n else:\n title = _('Explore - %(table)s', table=table_name)\n return self.render_template(\n 'superset/basic.html',\n bootstrap_data=json.dumps(bootstrap_data),\n entry='explore',\n title=title,\n standalone_mode=standalone)\n\n @api\n @handle_api_exception\n @has_access_api\n @expose('/filter/<datasource_type>/<datasource_id>/<column>/')\n def filter(self, datasource_type, datasource_id, column):\n \"\"\"\n Endpoint to retrieve values for specified column.\n\n :param datasource_type: Type of datasource e.g. table\n :param datasource_id: Datasource id\n :param column: Column name to retrieve values for\n :return:\n \"\"\"\n # TODO: Cache endpoint by user, datasource and column\n datasource = ConnectorRegistry.get_datasource(\n datasource_type, datasource_id, db.session)\n if not datasource:\n return json_error_response(DATASOURCE_MISSING_ERR)\n security_manager.assert_datasource_permission(datasource)\n payload = json.dumps(\n datasource.values_for_column(\n column,\n config.get('FILTER_SELECT_ROW_LIMIT', 10000),\n ),\n default=utils.json_int_dttm_ser)\n return json_success(payload)\n\n def save_or_overwrite_slice(\n self, args, slc, slice_add_perm, slice_overwrite_perm, slice_download_perm,\n datasource_id, datasource_type, datasource_name):\n \"\"\"Save or overwrite a slice\"\"\"\n slice_name = args.get('slice_name')\n action = args.get('action')\n form_data = get_form_data()[0]\n\n if action in ('saveas'):\n if 'slice_id' in form_data:\n form_data.pop('slice_id') # don't save old slice_id\n slc = models.Slice(owners=[g.user] if g.user else [])\n\n slc.params = json.dumps(form_data, indent=2, sort_keys=True)\n slc.datasource_name = datasource_name\n slc.viz_type = form_data['viz_type']\n slc.datasource_type = datasource_type\n slc.datasource_id = datasource_id\n slc.slice_name = slice_name\n\n if action in ('saveas') and slice_add_perm:\n self.save_slice(slc)\n elif action == 'overwrite' and slice_overwrite_perm:\n self.overwrite_slice(slc)\n\n # Adding slice to a dashboard if requested\n dash = None\n if request.args.get('add_to_dash') == 'existing':\n dash = (\n db.session.query(models.Dashboard)\n .filter_by(id=int(request.args.get('save_to_dashboard_id')))\n .one()\n )\n\n # check edit dashboard permissions\n dash_overwrite_perm = check_ownership(dash, raise_if_false=False)\n if not dash_overwrite_perm:\n return json_error_response(\n _('You don\\'t have the rights to ') + _('alter this ') +\n _('dashboard'),\n status=400)\n\n flash(\n _('Chart [{}] was added to dashboard [{}]').format(\n slc.slice_name,\n dash.dashboard_title),\n 'info')\n elif request.args.get('add_to_dash') == 'new':\n # check create dashboard permissions\n dash_add_perm = security_manager.can_access('can_add', 'DashboardModelView')\n if not dash_add_perm:\n return json_error_response(\n _('You don\\'t have the rights to ') + _('create a ') + _('dashboard'),\n status=400)\n\n dash = models.Dashboard(\n dashboard_title=request.args.get('new_dashboard_name'),\n owners=[g.user] if g.user else [])\n flash(\n _('Dashboard [{}] just got created and chart [{}] was added '\n 'to it').format(\n dash.dashboard_title,\n slc.slice_name),\n 'info')\n\n if dash and slc not in dash.slices:\n dash.slices.append(slc)\n db.session.commit()\n\n response = {\n 'can_add': slice_add_perm,\n 'can_download': slice_download_perm,\n 'can_overwrite': is_owner(slc, g.user),\n 'form_data': slc.form_data,\n 'slice': slc.data,\n 'dashboard_id': dash.id if dash else None,\n }\n\n if request.args.get('goto_dash') == 'true':\n response.update({'dashboard': dash.url})\n\n return json_success(json.dumps(response))\n\n def save_slice(self, slc):\n session = db.session()\n msg = _('Chart [{}] has been saved').format(slc.slice_name)\n session.add(slc)\n session.commit()\n flash(msg, 'info')\n\n def overwrite_slice(self, slc):\n session = db.session()\n session.merge(slc)\n session.commit()\n msg = _('Chart [{}] has been overwritten').format(slc.slice_name)\n flash(msg, 'info')\n\n @api\n @has_access_api\n @expose('/checkbox/<model_view>/<id_>/<attr>/<value>', methods=['GET'])\n def checkbox(self, model_view, id_, attr, value):\n \"\"\"endpoint for checking/unchecking any boolean in a sqla model\"\"\"\n modelview_to_model = {\n '{}ColumnInlineView'.format(name.capitalize()): source.column_class\n for name, source in ConnectorRegistry.sources.items()\n }\n model = modelview_to_model[model_view]\n col = db.session.query(model).filter_by(id=id_).first()\n checked = value == 'true'\n if col:\n setattr(col, attr, checked)\n if checked:\n metrics = col.get_metrics().values()\n col.datasource.add_missing_metrics(metrics)\n db.session.commit()\n return json_success('OK')\n\n @api\n @has_access_api\n @expose('/schemas/<db_id>/')\n @expose('/schemas/<db_id>/<force_refresh>/')\n def schemas(self, db_id, force_refresh='false'):\n db_id = int(db_id)\n force_refresh = force_refresh.lower() == 'true'\n database = (\n db.session\n .query(models.Database)\n .filter_by(id=db_id)\n .first()\n )\n if database:\n schemas = database.all_schema_names(\n cache=database.schema_cache_enabled,\n cache_timeout=database.schema_cache_timeout,\n force=force_refresh)\n schemas = security_manager.schemas_accessible_by_user(database, schemas)\n else:\n schemas = []\n\n return Response(\n json.dumps({'schemas': schemas}),\n mimetype='application/json')\n\n @api\n @has_access_api\n @expose('/tables/<db_id>/<schema>/<substr>/')\n @expose('/tables/<db_id>/<schema>/<substr>/<force_refresh>/')\n def tables(self, db_id, schema, substr, force_refresh='false'):\n \"\"\"Endpoint to fetch the list of tables for given database\"\"\"\n db_id = int(db_id)\n force_refresh = force_refresh.lower() == 'true'\n schema = utils.parse_js_uri_path_item(schema, eval_undefined=True)\n substr = utils.parse_js_uri_path_item(substr, eval_undefined=True)\n database = db.session.query(models.Database).filter_by(id=db_id).one()\n\n if schema:\n table_names = database.all_table_names_in_schema(\n schema=schema, force=force_refresh,\n cache=database.table_cache_enabled,\n cache_timeout=database.table_cache_timeout)\n view_names = database.all_view_names_in_schema(\n schema=schema, force=force_refresh,\n cache=database.table_cache_enabled,\n cache_timeout=database.table_cache_timeout)\n else:\n table_names = database.all_table_names_in_database(\n cache=True, force=False, cache_timeout=24 * 60 * 60)\n view_names = database.all_view_names_in_database(\n cache=True, force=False, cache_timeout=24 * 60 * 60)\n table_names = security_manager.accessible_by_user(database, table_names, schema)\n view_names = security_manager.accessible_by_user(database, view_names, schema)\n\n if substr:\n table_names = [tn for tn in table_names if substr in tn]\n view_names = [vn for vn in view_names if substr in vn]\n\n if not schema and database.default_schemas:\n def get_schema(tbl_or_view_name):\n return tbl_or_view_name.split('.')[0] if '.' in tbl_or_view_name else None\n\n user_schema = g.user.email.split('@')[0]\n valid_schemas = set(database.default_schemas + [user_schema])\n\n table_names = [tn for tn in table_names if get_schema(tn) in valid_schemas]\n view_names = [vn for vn in view_names if get_schema(vn) in valid_schemas]\n\n max_items = config.get('MAX_TABLE_NAMES') or len(table_names)\n total_items = len(table_names) + len(view_names)\n max_tables = len(table_names)\n max_views = len(view_names)\n if total_items and substr:\n max_tables = max_items * len(table_names) // total_items\n max_views = max_items * len(view_names) // total_items\n\n table_options = [{'value': tn, 'label': tn}\n for tn in table_names[:max_tables]]\n table_options.extend([{'value': vn, 'label': '[view] {}'.format(vn)}\n for vn in view_names[:max_views]])\n payload = {\n 'tableLength': len(table_names) + len(view_names),\n 'options': table_options,\n }\n return json_success(json.dumps(payload))\n\n @api\n @has_access_api\n @expose('/copy_dash/<dashboard_id>/', methods=['GET', 'POST'])\n def copy_dash(self, dashboard_id):\n \"\"\"Copy dashboard\"\"\"\n session = db.session()\n data = json.loads(request.form.get('data'))\n dash = models.Dashboard()\n original_dash = (\n session\n .query(models.Dashboard)\n .filter_by(id=dashboard_id).first())\n\n dash.owners = [g.user] if g.user else []\n dash.dashboard_title = data['dashboard_title']\n\n if data['duplicate_slices']:\n # Duplicating slices as well, mapping old ids to new ones\n old_to_new_sliceids = {}\n for slc in original_dash.slices:\n new_slice = slc.clone()\n new_slice.owners = [g.user] if g.user else []\n session.add(new_slice)\n session.flush()\n new_slice.dashboards.append(dash)\n old_to_new_sliceids['{}'.format(slc.id)] = \\\n '{}'.format(new_slice.id)\n\n # update chartId of layout entities\n # in v2_dash positions json data, chartId should be integer,\n # while in older version slice_id is string type\n for value in data['positions'].values():\n if (\n isinstance(value, dict) and value.get('meta') and\n value.get('meta').get('chartId')\n ):\n old_id = '{}'.format(value.get('meta').get('chartId'))\n new_id = int(old_to_new_sliceids[old_id])\n value['meta']['chartId'] = new_id\n else:\n dash.slices = original_dash.slices\n dash.params = original_dash.params\n\n self._set_dash_metadata(dash, data)\n session.add(dash)\n session.commit()\n dash_json = json.dumps(dash.data)\n session.close()\n return json_success(dash_json)\n\n @api\n @has_access_api\n @expose('/save_dash/<dashboard_id>/', methods=['GET', 'POST'])\n def save_dash(self, dashboard_id):\n \"\"\"Save a dashboard's metadata\"\"\"\n session = db.session()\n dash = (session\n .query(models.Dashboard)\n .filter_by(id=dashboard_id).first())\n check_ownership(dash, raise_if_false=True)\n data = json.loads(request.form.get('data'))\n self._set_dash_metadata(dash, data)\n session.merge(dash)\n session.commit()\n session.close()\n return json_success(json.dumps({'status': 'SUCCESS'}))\n\n @staticmethod\n def _set_dash_metadata(dashboard, data):\n positions = data['positions']\n # find slices in the position data\n slice_ids = []\n slice_id_to_name = {}\n for value in positions.values():\n if (\n isinstance(value, dict) and value.get('meta') and\n value.get('meta').get('chartId')\n ):\n slice_id = value.get('meta').get('chartId')\n slice_ids.append(slice_id)\n slice_id_to_name[slice_id] = value.get('meta').get('sliceName')\n\n session = db.session()\n Slice = models.Slice # noqa\n current_slices = session.query(Slice).filter(\n Slice.id.in_(slice_ids)).all()\n\n dashboard.slices = current_slices\n\n # update slice names. this assumes user has permissions to update the slice\n for slc in dashboard.slices:\n new_name = slice_id_to_name[slc.id]\n if slc.slice_name != new_name:\n slc.slice_name = new_name\n session.merge(slc)\n session.flush()\n\n # remove leading and trailing white spaces in the dumped json\n dashboard.position_json = json.dumps(\n positions, indent=None, separators=(',', ':'), sort_keys=True)\n md = dashboard.params_dict\n dashboard.css = data.get('css')\n dashboard.dashboard_title = data['dashboard_title']\n\n if 'filter_immune_slices' not in md:\n md['filter_immune_slices'] = []\n if 'timed_refresh_immune_slices' not in md:\n md['timed_refresh_immune_slices'] = []\n if 'filter_immune_slice_fields' not in md:\n md['filter_immune_slice_fields'] = {}\n md['expanded_slices'] = data['expanded_slices']\n md['refresh_frequency'] = data.get('refresh_frequency', 0)\n default_filters_data = json.loads(data.get('default_filters', '{}'))\n applicable_filters = \\\n {key: v for key, v in default_filters_data.items()\n if int(key) in slice_ids}\n md['default_filters'] = json.dumps(applicable_filters)\n if data.get('color_namespace'):\n md['color_namespace'] = data.get('color_namespace')\n if data.get('color_scheme'):\n md['color_scheme'] = data.get('color_scheme')\n if data.get('label_colors'):\n md['label_colors'] = data.get('label_colors')\n dashboard.json_metadata = json.dumps(md)\n\n @api\n @has_access_api\n @expose('/add_slices/<dashboard_id>/', methods=['POST'])\n def add_slices(self, dashboard_id):\n \"\"\"Add and save slices to a dashboard\"\"\"\n data = json.loads(request.form.get('data'))\n session = db.session()\n Slice = models.Slice # noqa\n dash = (\n session.query(models.Dashboard).filter_by(id=dashboard_id).first())\n check_ownership(dash, raise_if_false=True)\n new_slices = session.query(Slice).filter(\n Slice.id.in_(data['slice_ids']))\n dash.slices += new_slices\n session.merge(dash)\n session.commit()\n session.close()\n return 'SLICES ADDED'\n\n @api\n @has_access_api\n @expose('/testconn', methods=['POST', 'GET'])\n def testconn(self):\n \"\"\"Tests a sqla connection\"\"\"\n try:\n username = g.user.username if g.user is not None else None\n uri = request.json.get('uri')\n db_name = request.json.get('name')\n impersonate_user = request.json.get('impersonate_user')\n database = None\n if db_name:\n database = (\n db.session\n .query(models.Database)\n .filter_by(database_name=db_name)\n .first()\n )\n if database and uri == database.safe_sqlalchemy_uri():\n # the password-masked uri was passed\n # use the URI associated with this database\n uri = database.sqlalchemy_uri_decrypted\n\n configuration = {}\n\n if database and uri:\n url = make_url(uri)\n db_engine = models.Database.get_db_engine_spec_for_backend(\n url.get_backend_name())\n db_engine.patch()\n\n masked_url = database.get_password_masked_url_from_uri(uri)\n logging.info('Superset.testconn(). Masked URL: {0}'.format(masked_url))\n\n configuration.update(\n db_engine.get_configuration_for_impersonation(uri,\n impersonate_user,\n username),\n )\n\n engine_params = (\n request.json\n .get('extras', {})\n .get('engine_params', {}))\n connect_args = engine_params.get('connect_args')\n\n if configuration and connect_args is not None:\n connect_args['configuration'] = configuration\n\n engine = create_engine(uri, **engine_params)\n engine.connect()\n return json_success(json.dumps(engine.table_names(), indent=4))\n except Exception as e:\n logging.exception(e)\n return json_error_response((\n 'Connection failed!\\n\\n'\n 'The error message returned was:\\n{}').format(e))\n\n @api\n @has_access_api\n @expose('/recent_activity/<user_id>/', methods=['GET'])\n def recent_activity(self, user_id):\n \"\"\"Recent activity (actions) for a given user\"\"\"\n M = models # noqa\n\n if request.args.get('limit'):\n limit = int(request.args.get('limit'))\n else:\n limit = 1000\n\n qry = (\n db.session.query(M.Log, M.Dashboard, M.Slice)\n .outerjoin(\n M.Dashboard,\n M.Dashboard.id == M.Log.dashboard_id,\n )\n .outerjoin(\n M.Slice,\n M.Slice.id == M.Log.slice_id,\n )\n .filter(\n sqla.and_(\n ~M.Log.action.in_(('queries', 'shortner', 'sql_json')),\n M.Log.user_id == user_id,\n ),\n )\n .order_by(M.Log.dttm.desc())\n .limit(limit)\n )\n payload = []\n for log in qry.all():\n item_url = None\n item_title = None\n if log.Dashboard:\n item_url = log.Dashboard.url\n item_title = log.Dashboard.dashboard_title\n elif log.Slice:\n item_url = log.Slice.slice_url\n item_title = log.Slice.slice_name\n\n payload.append({\n 'action': log.Log.action,\n 'item_url': item_url,\n 'item_title': item_title,\n 'time': log.Log.dttm,\n })\n return json_success(\n json.dumps(payload, default=utils.json_int_dttm_ser))\n\n @api\n @has_access_api\n @expose('/csrf_token/', methods=['GET'])\n def csrf_token(self):\n return Response(\n self.render_template('superset/csrf_token.json'),\n mimetype='text/json',\n )\n\n @api\n @has_access_api\n @expose('/available_domains/', methods=['GET'])\n def available_domains(self):\n \"\"\"\n Returns the list of available Superset Webserver domains (if any)\n defined in config. This enables charts embedded in other apps to\n leverage domain sharding if appropriately configured.\n \"\"\"\n return Response(\n json.dumps(conf.get('SUPERSET_WEBSERVER_DOMAINS')),\n mimetype='text/json',\n )\n\n @api\n @has_access_api\n @expose('/fave_dashboards_by_username/<username>/', methods=['GET'])\n def fave_dashboards_by_username(self, username):\n \"\"\"This lets us use a user's username to pull favourite dashboards\"\"\"\n user = security_manager.find_user(username=username)\n return self.fave_dashboards(user.get_id())\n\n @api\n @has_access_api\n @expose('/fave_dashboards/<user_id>/', methods=['GET'])\n def fave_dashboards(self, user_id):\n qry = (\n db.session.query(\n models.Dashboard,\n models.FavStar.dttm,\n )\n .join(\n models.FavStar,\n sqla.and_(\n models.FavStar.user_id == int(user_id),\n models.FavStar.class_name == 'Dashboard',\n models.Dashboard.id == models.FavStar.obj_id,\n ),\n )\n .order_by(\n models.FavStar.dttm.desc(),\n )\n )\n payload = []\n for o in qry.all():\n d = {\n 'id': o.Dashboard.id,\n 'dashboard': o.Dashboard.dashboard_link(),\n 'title': o.Dashboard.dashboard_title,\n 'url': o.Dashboard.url,\n 'dttm': o.dttm,\n }\n if o.Dashboard.created_by:\n user = o.Dashboard.created_by\n d['creator'] = str(user)\n d['creator_url'] = '/superset/profile/{}/'.format(\n user.username)\n payload.append(d)\n return json_success(\n json.dumps(payload, default=utils.json_int_dttm_ser))\n\n @api\n @has_access_api\n @expose('/created_dashboards/<user_id>/', methods=['GET'])\n def created_dashboards(self, user_id):\n Dash = models.Dashboard # noqa\n qry = (\n db.session.query(\n Dash,\n )\n .filter(\n sqla.or_(\n Dash.created_by_fk == user_id,\n Dash.changed_by_fk == user_id,\n ),\n )\n .order_by(\n Dash.changed_on.desc(),\n )\n )\n payload = [{\n 'id': o.id,\n 'dashboard': o.dashboard_link(),\n 'title': o.dashboard_title,\n 'url': o.url,\n 'dttm': o.changed_on,\n } for o in qry.all()]\n return json_success(\n json.dumps(payload, default=utils.json_int_dttm_ser))\n\n @api\n @has_access_api\n @expose('/user_slices', methods=['GET'])\n @expose('/user_slices/<user_id>/', methods=['GET'])\n def user_slices(self, user_id=None):\n \"\"\"List of slices a user created, or faved\"\"\"\n if not user_id:\n user_id = g.user.id\n Slice = models.Slice # noqa\n FavStar = models.FavStar # noqa\n qry = (\n db.session.query(Slice,\n FavStar.dttm).join(\n models.FavStar,\n sqla.and_(\n models.FavStar.user_id == int(user_id),\n models.FavStar.class_name == 'slice',\n models.Slice.id == models.FavStar.obj_id,\n ),\n isouter=True).filter(\n sqla.or_(\n Slice.created_by_fk == user_id,\n Slice.changed_by_fk == user_id,\n FavStar.user_id == user_id,\n ),\n )\n .order_by(Slice.slice_name.asc())\n )\n payload = [{\n 'id': o.Slice.id,\n 'title': o.Slice.slice_name,\n 'url': o.Slice.slice_url,\n 'data': o.Slice.form_data,\n 'dttm': o.dttm if o.dttm else o.Slice.changed_on,\n 'viz_type': o.Slice.viz_type,\n } for o in qry.all()]\n return json_success(\n json.dumps(payload, default=utils.json_int_dttm_ser))\n\n @api\n @has_access_api\n @expose('/created_slices', methods=['GET'])\n @expose('/created_slices/<user_id>/', methods=['GET'])\n def created_slices(self, user_id=None):\n \"\"\"List of slices created by this user\"\"\"\n if not user_id:\n user_id = g.user.id\n Slice = models.Slice # noqa\n qry = (\n db.session.query(Slice)\n .filter(\n sqla.or_(\n Slice.created_by_fk == user_id,\n Slice.changed_by_fk == user_id,\n ),\n )\n .order_by(Slice.changed_on.desc())\n )\n payload = [{\n 'id': o.id,\n 'title': o.slice_name,\n 'url': o.slice_url,\n 'dttm': o.changed_on,\n 'viz_type': o.viz_type,\n } for o in qry.all()]\n return json_success(\n json.dumps(payload, default=utils.json_int_dttm_ser))\n\n @api\n @has_access_api\n @expose('/fave_slices', methods=['GET'])\n @expose('/fave_slices/<user_id>/', methods=['GET'])\n def fave_slices(self, user_id=None):\n \"\"\"Favorite slices for a user\"\"\"\n if not user_id:\n user_id = g.user.id\n qry = (\n db.session.query(\n models.Slice,\n models.FavStar.dttm,\n )\n .join(\n models.FavStar,\n sqla.and_(\n models.FavStar.user_id == int(user_id),\n models.FavStar.class_name == 'slice',\n models.Slice.id == models.FavStar.obj_id,\n ),\n )\n .order_by(\n models.FavStar.dttm.desc(),\n )\n )\n payload = []\n for o in qry.all():\n d = {\n 'id': o.Slice.id,\n 'title': o.Slice.slice_name,\n 'url': o.Slice.slice_url,\n 'dttm': o.dttm,\n 'viz_type': o.Slice.viz_type,\n }\n if o.Slice.created_by:\n user = o.Slice.created_by\n d['creator'] = str(user)\n d['creator_url'] = '/superset/profile/{}/'.format(\n user.username)\n payload.append(d)\n return json_success(\n json.dumps(payload, default=utils.json_int_dttm_ser))\n\n @api\n @has_access_api\n @expose('/warm_up_cache/', methods=['GET'])\n def warm_up_cache(self):\n \"\"\"Warms up the cache for the slice or table.\n\n Note for slices a force refresh occurs.\n \"\"\"\n slices = None\n session = db.session()\n slice_id = request.args.get('slice_id')\n table_name = request.args.get('table_name')\n db_name = request.args.get('db_name')\n\n if not slice_id and not (table_name and db_name):\n return json_error_response(__(\n 'Malformed request. slice_id or table_name and db_name '\n 'arguments are expected'), status=400)\n if slice_id:\n slices = session.query(models.Slice).filter_by(id=slice_id).all()\n if not slices:\n return json_error_response(__(\n 'Chart %(id)s not found', id=slice_id), status=404)\n elif table_name and db_name:\n SqlaTable = ConnectorRegistry.sources['table']\n table = (\n session.query(SqlaTable)\n .join(models.Database)\n .filter(\n models.Database.database_name == db_name or\n SqlaTable.table_name == table_name)\n ).first()\n if not table:\n return json_error_response(__(\n \"Table %(t)s wasn't found in the database %(d)s\",\n t=table_name, s=db_name), status=404)\n slices = session.query(models.Slice).filter_by(\n datasource_id=table.id,\n datasource_type=table.type).all()\n\n for slc in slices:\n try:\n form_data = get_form_data(slc.id, use_slice_data=True)[0]\n obj = get_viz(\n datasource_type=slc.datasource.type,\n datasource_id=slc.datasource.id,\n form_data=form_data,\n force=True,\n )\n obj.get_json()\n except Exception as e:\n return json_error_response(utils.error_msg_from_exception(e))\n return json_success(json.dumps(\n [{'slice_id': slc.id, 'slice_name': slc.slice_name}\n for slc in slices]))\n\n @has_access_api\n @expose('/favstar/<class_name>/<obj_id>/<action>/')\n def favstar(self, class_name, obj_id, action):\n \"\"\"Toggle favorite stars on Slices and Dashboard\"\"\"\n session = db.session()\n FavStar = models.FavStar # noqa\n count = 0\n favs = session.query(FavStar).filter_by(\n class_name=class_name, obj_id=obj_id,\n user_id=g.user.get_id()).all()\n if action == 'select':\n if not favs:\n session.add(\n FavStar(\n class_name=class_name,\n obj_id=obj_id,\n user_id=g.user.get_id(),\n dttm=datetime.now(),\n ),\n )\n count = 1\n elif action == 'unselect':\n for fav in favs:\n session.delete(fav)\n else:\n count = len(favs)\n session.commit()\n return json_success(json.dumps({'count': count}))\n\n @has_access\n @expose('/dashboard/<dashboard_id>/')\n def dashboard(self, dashboard_id):\n \"\"\"Server side rendering for a dashboard\"\"\"\n session = db.session()\n qry = session.query(models.Dashboard)\n if dashboard_id.isdigit():\n qry = qry.filter_by(id=int(dashboard_id))\n else:\n qry = qry.filter_by(slug=dashboard_id)\n\n dash = qry.one_or_none()\n if not dash:\n abort(404)\n datasources = set()\n for slc in dash.slices:\n datasource = slc.datasource\n if datasource:\n datasources.add(datasource)\n\n if config.get('ENABLE_ACCESS_REQUEST'):\n for datasource in datasources:\n if datasource and not security_manager.datasource_access(datasource):\n flash(\n __(security_manager.get_datasource_access_error_msg(datasource)),\n 'danger')\n return redirect(\n 'superset/request_access/?'\n f'dashboard_id={dash.id}&')\n\n dash_edit_perm = check_ownership(dash, raise_if_false=False) and \\\n security_manager.can_access('can_save_dash', 'Superset')\n dash_save_perm = security_manager.can_access('can_save_dash', 'Superset')\n superset_can_explore = security_manager.can_access('can_explore', 'Superset')\n superset_can_csv = security_manager.can_access('can_csv', 'Superset')\n slice_can_edit = security_manager.can_access('can_edit', 'SliceModelView')\n\n standalone_mode = request.args.get('standalone') == 'true'\n edit_mode = request.args.get('edit') == 'true'\n\n # Hack to log the dashboard_id properly, even when getting a slug\n @log_this\n def dashboard(**kwargs): # noqa\n pass\n dashboard(\n dashboard_id=dash.id,\n dashboard_version='v2',\n dash_edit_perm=dash_edit_perm,\n edit_mode=edit_mode)\n\n dashboard_data = dash.data\n dashboard_data.update({\n 'standalone_mode': standalone_mode,\n 'dash_save_perm': dash_save_perm,\n 'dash_edit_perm': dash_edit_perm,\n 'superset_can_explore': superset_can_explore,\n 'superset_can_csv': superset_can_csv,\n 'slice_can_edit': slice_can_edit,\n })\n\n bootstrap_data = {\n 'user_id': g.user.get_id(),\n 'dashboard_data': dashboard_data,\n 'datasources': {ds.uid: ds.data for ds in datasources},\n 'common': self.common_bootsrap_payload(),\n 'editMode': edit_mode,\n }\n\n if request.args.get('json') == 'true':\n return json_success(json.dumps(bootstrap_data))\n\n return self.render_template(\n 'superset/dashboard.html',\n entry='dashboard',\n standalone_mode=standalone_mode,\n title=dash.dashboard_title,\n bootstrap_data=json.dumps(bootstrap_data),\n )\n\n @api\n @log_this\n @expose('/log/', methods=['POST'])\n def log(self):\n return Response(status=200)\n\n @has_access\n @expose('/sync_druid/', methods=['POST'])\n @log_this\n def sync_druid_source(self):\n \"\"\"Syncs the druid datasource in main db with the provided config.\n\n The endpoint takes 3 arguments:\n user - user name to perform the operation as\n cluster - name of the druid cluster\n config - configuration stored in json that contains:\n name: druid datasource name\n dimensions: list of the dimensions, they become druid columns\n with the type STRING\n metrics_spec: list of metrics (dictionary). Metric consists of\n 2 attributes: type and name. Type can be count,\n etc. `count` type is stored internally as longSum\n other fields will be ignored.\n\n Example: {\n 'name': 'test_click',\n 'metrics_spec': [{'type': 'count', 'name': 'count'}],\n 'dimensions': ['affiliate_id', 'campaign', 'first_seen']\n }\n \"\"\"\n payload = request.get_json(force=True)\n druid_config = payload['config']\n user_name = payload['user']\n cluster_name = payload['cluster']\n\n user = security_manager.find_user(username=user_name)\n DruidDatasource = ConnectorRegistry.sources['druid']\n DruidCluster = DruidDatasource.cluster_class\n if not user:\n err_msg = __(\"Can't find User '%(name)s', please ask your admin \"\n 'to create one.', name=user_name)\n logging.error(err_msg)\n return json_error_response(err_msg)\n cluster = db.session.query(DruidCluster).filter_by(\n cluster_name=cluster_name).first()\n if not cluster:\n err_msg = __(\"Can't find DruidCluster with cluster_name = \"\n \"'%(name)s'\", name=cluster_name)\n logging.error(err_msg)\n return json_error_response(err_msg)\n try:\n DruidDatasource.sync_to_db_from_config(\n druid_config, user, cluster)\n except Exception as e:\n logging.exception(utils.error_msg_from_exception(e))\n return json_error_response(utils.error_msg_from_exception(e))\n return Response(status=201)\n\n @has_access\n @expose('/sqllab_viz/', methods=['POST'])\n @log_this\n def sqllab_viz(self):\n SqlaTable = ConnectorRegistry.sources['table']\n data = json.loads(request.form.get('data'))\n table_name = data.get('datasourceName')\n table = (\n db.session.query(SqlaTable)\n .filter_by(table_name=table_name)\n .first()\n )\n if not table:\n table = SqlaTable(table_name=table_name)\n table.database_id = data.get('dbId')\n table.schema = data.get('schema')\n table.template_params = data.get('templateParams')\n table.is_sqllab_view = True\n q = ParsedQuery(data.get('sql'))\n table.sql = q.stripped()\n db.session.add(table)\n cols = []\n for config in data.get('columns'):\n column_name = config.get('name')\n SqlaTable = ConnectorRegistry.sources['table']\n TableColumn = SqlaTable.column_class\n SqlMetric = SqlaTable.metric_class\n col = TableColumn(\n column_name=column_name,\n filterable=True,\n groupby=True,\n is_dttm=config.get('is_date', False),\n type=config.get('type', False),\n )\n cols.append(col)\n\n table.columns = cols\n table.metrics = [\n SqlMetric(metric_name='count', expression='count(*)'),\n ]\n db.session.commit()\n return self.json_response(json.dumps({\n 'table_id': table.id,\n }))\n\n @has_access\n @expose('/table/<database_id>/<table_name>/<schema>/')\n @log_this\n def table(self, database_id, table_name, schema):\n schema = utils.parse_js_uri_path_item(schema, eval_undefined=True)\n table_name = utils.parse_js_uri_path_item(table_name)\n mydb = db.session.query(models.Database).filter_by(id=database_id).one()\n payload_columns = []\n indexes = []\n primary_key = []\n foreign_keys = []\n try:\n columns = mydb.get_columns(table_name, schema)\n indexes = mydb.get_indexes(table_name, schema)\n primary_key = mydb.get_pk_constraint(table_name, schema)\n foreign_keys = mydb.get_foreign_keys(table_name, schema)\n except Exception as e:\n return json_error_response(utils.error_msg_from_exception(e))\n keys = []\n if primary_key and primary_key.get('constrained_columns'):\n primary_key['column_names'] = primary_key.pop('constrained_columns')\n primary_key['type'] = 'pk'\n keys += [primary_key]\n for fk in foreign_keys:\n fk['column_names'] = fk.pop('constrained_columns')\n fk['type'] = 'fk'\n keys += foreign_keys\n for idx in indexes:\n idx['type'] = 'index'\n keys += indexes\n\n for col in columns:\n dtype = ''\n try:\n dtype = '{}'.format(col['type'])\n except Exception:\n # sqla.types.JSON __str__ has a bug, so using __class__.\n dtype = col['type'].__class__.__name__\n pass\n payload_columns.append({\n 'name': col['name'],\n 'type': dtype.split('(')[0] if '(' in dtype else dtype,\n 'longType': dtype,\n 'keys': [\n k for k in keys\n if col['name'] in k.get('column_names')\n ],\n })\n tbl = {\n 'name': table_name,\n 'columns': payload_columns,\n 'selectStar': mydb.select_star(\n table_name, schema=schema, show_cols=True, indent=True,\n cols=columns, latest_partition=True),\n 'primaryKey': primary_key,\n 'foreignKeys': foreign_keys,\n 'indexes': keys,\n }\n return json_success(json.dumps(tbl))\n\n @has_access\n @expose('/extra_table_metadata/<database_id>/<table_name>/<schema>/')\n @log_this\n def extra_table_metadata(self, database_id, table_name, schema):\n schema = utils.parse_js_uri_path_item(schema, eval_undefined=True)\n table_name = utils.parse_js_uri_path_item(table_name)\n mydb = db.session.query(models.Database).filter_by(id=database_id).one()\n payload = mydb.db_engine_spec.extra_table_metadata(\n mydb, table_name, schema)\n return json_success(json.dumps(payload))\n\n @has_access\n @expose('/select_star/<database_id>/<table_name>')\n @expose('/select_star/<database_id>/<table_name>/<schema>')\n @log_this\n def select_star(self, database_id, table_name, schema=None):\n mydb = db.session.query(\n models.Database).filter_by(id=database_id).first()\n schema = utils.parse_js_uri_path_item(schema, eval_undefined=True)\n table_name = utils.parse_js_uri_path_item(table_name)\n return json_success(\n mydb.select_star(\n table_name,\n schema,\n latest_partition=True,\n show_cols=True,\n ),\n )\n\n @expose('/theme/')\n def theme(self):\n return self.render_template('superset/theme.html')\n\n @has_access_api\n @expose('/cached_key/<key>/')\n @log_this\n def cached_key(self, key):\n \"\"\"Returns a key from the cache\"\"\"\n resp = cache.get(key)\n if resp:\n return resp\n return 'nope'\n\n @has_access_api\n @expose('/cache_key_exist/<key>/')\n @log_this\n def cache_key_exist(self, key):\n \"\"\"Returns if a key from cache exist\"\"\"\n key_exist = True if cache.get(key) else False\n status = 200 if key_exist else 404\n return json_success(json.dumps({'key_exist': key_exist}),\n status=status)\n\n @has_access_api\n @expose('/results/<key>/')\n @log_this\n def results(self, key):\n \"\"\"Serves a key off of the results backend\"\"\"\n if not results_backend:\n return json_error_response(\"Results backend isn't configured\")\n\n read_from_results_backend_start = now_as_float()\n blob = results_backend.get(key)\n stats_logger.timing(\n 'sqllab.query.results_backend_read',\n now_as_float() - read_from_results_backend_start,\n )\n if not blob:\n return json_error_response(\n 'Data could not be retrieved. '\n 'You may want to re-run the query.',\n status=410,\n )\n\n query = db.session.query(Query).filter_by(results_key=key).one()\n rejected_tables = security_manager.rejected_datasources(\n query.sql, query.database, query.schema)\n if rejected_tables:\n return json_error_response(security_manager.get_table_access_error_msg(\n '{}'.format(rejected_tables)), status=403)\n\n payload = utils.zlib_decompress_to_string(blob)\n display_limit = app.config.get('DEFAULT_SQLLAB_LIMIT', None)\n if display_limit:\n payload_json = json.loads(payload)\n payload_json['data'] = payload_json['data'][:display_limit]\n return json_success(\n json.dumps(\n payload_json,\n default=utils.json_iso_dttm_ser,\n ignore_nan=True,\n ),\n )\n\n @has_access_api\n @expose('/stop_query/', methods=['POST'])\n @log_this\n def stop_query(self):\n client_id = request.form.get('client_id')\n try:\n query = (\n db.session.query(Query)\n .filter_by(client_id=client_id).one()\n )\n query.status = QueryStatus.STOPPED\n db.session.commit()\n except Exception:\n pass\n return self.json_response('OK')\n\n @has_access_api\n @expose('/validate_sql_json/', methods=['POST', 'GET'])\n @log_this\n def validate_sql_json(self):\n \"\"\"Validates that arbitrary sql is acceptable for the given database.\n Returns a list of error/warning annotations as json.\n \"\"\"\n sql = request.form.get('sql')\n database_id = request.form.get('database_id')\n schema = request.form.get('schema') or None\n template_params = json.loads(\n request.form.get('templateParams') or '{}')\n\n if len(template_params) > 0:\n # TODO: factor the Database object out of template rendering\n # or provide it as mydb so we can render template params\n # without having to also persist a Query ORM object.\n return json_error_response(\n 'SQL validation does not support template parameters',\n status=400)\n\n session = db.session()\n mydb = session.query(models.Database).filter_by(id=database_id).first()\n if not mydb:\n json_error_response(\n 'Database with id {} is missing.'.format(database_id),\n status=400,\n )\n\n spec = mydb.db_engine_spec\n validators_by_engine = get_feature_flags().get(\n 'SQL_VALIDATORS_BY_ENGINE')\n if not validators_by_engine or spec.engine not in validators_by_engine:\n return json_error_response(\n 'no SQL validator is configured for {}'.format(spec.engine),\n status=400)\n validator_name = validators_by_engine[spec.engine]\n validator = get_validator_by_name(validator_name)\n if not validator:\n return json_error_response(\n 'No validator named {} found (configured for the {} engine)'\n .format(validator_name, spec.engine))\n\n try:\n timeout = config.get('SQLLAB_VALIDATION_TIMEOUT')\n timeout_msg = (\n f'The query exceeded the {timeout} seconds timeout.')\n with utils.timeout(seconds=timeout,\n error_message=timeout_msg):\n errors = validator.validate(sql, schema, mydb)\n payload = json.dumps(\n [err.to_dict() for err in errors],\n default=utils.pessimistic_json_iso_dttm_ser,\n ignore_nan=True,\n encoding=None,\n )\n return json_success(payload)\n except Exception as e:\n logging.exception(e)\n msg = _(\n 'Failed to validate your SQL query text. Please check that '\n f'you have configured the {validator.name} validator '\n 'correctly and that any services it depends on are up. '\n f'Exception: {e}')\n return json_error_response(f'{msg}')\n\n @has_access_api\n @expose('/sql_json/', methods=['POST', 'GET'])\n @log_this\n def sql_json(self):\n \"\"\"Runs arbitrary sql and returns and json\"\"\"\n async_ = request.form.get('runAsync') == 'true'\n sql = request.form.get('sql')\n database_id = request.form.get('database_id')\n schema = request.form.get('schema') or None\n template_params = json.loads(\n request.form.get('templateParams') or '{}')\n limit = int(request.form.get('queryLimit', 0))\n if limit < 0:\n logging.warning(\n 'Invalid limit of {} specified. Defaulting to max limit.'.format(limit))\n limit = 0\n limit = limit or app.config.get('SQL_MAX_ROW')\n\n session = db.session()\n mydb = session.query(models.Database).filter_by(id=database_id).first()\n\n if not mydb:\n json_error_response(\n 'Database with id {} is missing.'.format(database_id))\n\n rejected_tables = security_manager.rejected_datasources(sql, mydb, schema)\n if rejected_tables:\n return json_error_response(\n security_manager.get_table_access_error_msg(rejected_tables),\n link=security_manager.get_table_access_link(rejected_tables),\n status=403)\n session.commit()\n\n select_as_cta = request.form.get('select_as_cta') == 'true'\n tmp_table_name = request.form.get('tmp_table_name')\n if select_as_cta and mydb.force_ctas_schema:\n tmp_table_name = '{}.{}'.format(\n mydb.force_ctas_schema,\n tmp_table_name,\n )\n\n client_id = request.form.get('client_id') or utils.shortid()[:10]\n query = Query(\n database_id=int(database_id),\n sql=sql,\n schema=schema,\n select_as_cta=select_as_cta,\n start_time=now_as_float(),\n tab_name=request.form.get('tab'),\n status=QueryStatus.PENDING if async_ else QueryStatus.RUNNING,\n sql_editor_id=request.form.get('sql_editor_id'),\n tmp_table_name=tmp_table_name,\n user_id=g.user.get_id() if g.user else None,\n client_id=client_id,\n )\n session.add(query)\n session.flush()\n query_id = query.id\n session.commit() # shouldn't be necessary\n if not query_id:\n raise Exception(_('Query record was not created as expected.'))\n logging.info('Triggering query_id: {}'.format(query_id))\n\n try:\n template_processor = get_template_processor(\n database=query.database, query=query)\n rendered_query = template_processor.process_template(\n query.sql,\n **template_params)\n except Exception as e:\n return json_error_response(\n 'Template rendering failed: {}'.format(utils.error_msg_from_exception(e)))\n\n # set LIMIT after template processing\n limits = [mydb.db_engine_spec.get_limit_from_sql(rendered_query), limit]\n query.limit = min(lim for lim in limits if lim is not None)\n\n # Async request.\n if async_:\n logging.info('Running query on a Celery worker')\n # Ignore the celery future object and the request may time out.\n try:\n sql_lab.get_sql_results.delay(\n query_id,\n rendered_query,\n return_results=False,\n store_results=not query.select_as_cta,\n user_name=g.user.username if g.user else None,\n start_time=now_as_float())\n except Exception as e:\n logging.exception(e)\n msg = _(\n 'Failed to start remote query on a worker. '\n 'Tell your administrator to verify the availability of '\n 'the message queue.')\n query.status = QueryStatus.FAILED\n query.error_message = msg\n session.commit()\n return json_error_response('{}'.format(msg))\n\n resp = json_success(json.dumps(\n {'query': query.to_dict()}, default=utils.json_int_dttm_ser,\n ignore_nan=True), status=202)\n session.commit()\n return resp\n\n # Sync request.\n try:\n timeout = config.get('SQLLAB_TIMEOUT')\n timeout_msg = (\n f'The query exceeded the {timeout} seconds timeout.')\n with utils.timeout(seconds=timeout,\n error_message=timeout_msg):\n # pylint: disable=no-value-for-parameter\n data = sql_lab.get_sql_results(\n query_id,\n rendered_query,\n return_results=True,\n user_name=g.user.username if g.user else None)\n payload = json.dumps(\n data,\n default=utils.pessimistic_json_iso_dttm_ser,\n ignore_nan=True,\n encoding=None,\n )\n except Exception as e:\n logging.exception(e)\n return json_error_response('{}'.format(e))\n if data.get('status') == QueryStatus.FAILED:\n return json_error_response(payload=data)\n return json_success(payload)\n\n @has_access\n @expose('/csv/<client_id>')\n @log_this\n def csv(self, client_id):\n \"\"\"Download the query results as csv.\"\"\"\n logging.info('Exporting CSV file [{}]'.format(client_id))\n query = (\n db.session.query(Query)\n .filter_by(client_id=client_id)\n .one()\n )\n\n rejected_tables = security_manager.rejected_datasources(\n query.sql, query.database, query.schema)\n if rejected_tables:\n flash(\n security_manager.get_table_access_error_msg('{}'.format(rejected_tables)))\n return redirect('/')\n blob = None\n if results_backend and query.results_key:\n logging.info(\n 'Fetching CSV from results backend '\n '[{}]'.format(query.results_key))\n blob = results_backend.get(query.results_key)\n if blob:\n logging.info('Decompressing')\n json_payload = utils.zlib_decompress_to_string(blob)\n obj = json.loads(json_payload)\n columns = [c['name'] for c in obj['columns']]\n df = pd.DataFrame.from_records(obj['data'], columns=columns)\n logging.info('Using pandas to convert to CSV')\n csv = df.to_csv(index=False, **config.get('CSV_EXPORT'))\n else:\n logging.info('Running a query to turn into CSV')\n sql = query.select_sql or query.executed_sql\n df = query.database.get_df(sql, query.schema)\n # TODO(bkyryliuk): add compression=gzip for big files.\n csv = df.to_csv(index=False, **config.get('CSV_EXPORT'))\n response = Response(csv, mimetype='text/csv')\n response.headers['Content-Disposition'] = f'attachment; filename={query.name}.csv'\n logging.info('Ready to return response')\n return response\n\n @api\n @handle_api_exception\n @has_access\n @expose('/fetch_datasource_metadata')\n @log_this\n def fetch_datasource_metadata(self):\n datasource_id, datasource_type = (\n request.args.get('datasourceKey').split('__'))\n datasource = ConnectorRegistry.get_datasource(\n datasource_type, datasource_id, db.session)\n # Check if datasource exists\n if not datasource:\n return json_error_response(DATASOURCE_MISSING_ERR)\n\n # Check permission for datasource\n security_manager.assert_datasource_permission(datasource)\n return json_success(json.dumps(datasource.data))\n\n @has_access_api\n @expose('/queries/<last_updated_ms>')\n def queries(self, last_updated_ms):\n \"\"\"Get the updated queries.\"\"\"\n stats_logger.incr('queries')\n if not g.user.get_id():\n return json_error_response(\n 'Please login to access the queries.', status=403)\n\n # Unix time, milliseconds.\n last_updated_ms_int = int(float(last_updated_ms)) if last_updated_ms else 0\n\n # UTC date time, same that is stored in the DB.\n last_updated_dt = utils.EPOCH + timedelta(seconds=last_updated_ms_int / 1000)\n\n sql_queries = (\n db.session.query(Query)\n .filter(\n Query.user_id == g.user.get_id(),\n Query.changed_on >= last_updated_dt,\n )\n .all()\n )\n dict_queries = {q.client_id: q.to_dict() for q in sql_queries}\n\n now = int(round(time.time() * 1000))\n\n unfinished_states = [\n QueryStatus.PENDING,\n QueryStatus.RUNNING,\n ]\n\n queries_to_timeout = [\n client_id for client_id, query_dict in dict_queries.items()\n if (\n query_dict['state'] in unfinished_states and (\n now - query_dict['startDttm'] >\n config.get('SQLLAB_ASYNC_TIME_LIMIT_SEC') * 1000\n )\n )\n ]\n\n if queries_to_timeout:\n update(Query).where(\n and_(\n Query.user_id == g.user.get_id(),\n Query.client_id in queries_to_timeout,\n ),\n ).values(state=QueryStatus.TIMED_OUT)\n\n for client_id in queries_to_timeout:\n dict_queries[client_id]['status'] = QueryStatus.TIMED_OUT\n\n return json_success(\n json.dumps(dict_queries, default=utils.json_int_dttm_ser))\n\n @has_access\n @expose('/search_queries')\n @log_this\n def search_queries(self) -> Response:\n \"\"\"\n Search for previously run sqllab queries. Used for Sqllab Query Search\n page /superset/sqllab#search.\n\n Custom permission can_only_search_queries_owned restricts queries\n to only queries run by current user.\n\n :returns: Response with list of sql query dicts\n \"\"\"\n query = db.session.query(Query)\n if security_manager.can_only_access_owned_queries():\n search_user_id = g.user.get_user_id()\n else:\n search_user_id = request.args.get('user_id')\n database_id = request.args.get('database_id')\n search_text = request.args.get('search_text')\n status = request.args.get('status')\n # From and To time stamp should be Epoch timestamp in seconds\n from_time = request.args.get('from')\n to_time = request.args.get('to')\n\n if search_user_id:\n # Filter on user_id\n query = query.filter(Query.user_id == search_user_id)\n\n if database_id:\n # Filter on db Id\n query = query.filter(Query.database_id == database_id)\n\n if status:\n # Filter on status\n query = query.filter(Query.status == status)\n\n if search_text:\n # Filter on search text\n query = query \\\n .filter(Query.sql.like('%{}%'.format(search_text)))\n\n if from_time:\n query = query.filter(Query.start_time > int(from_time))\n\n if to_time:\n query = query.filter(Query.start_time < int(to_time))\n\n query_limit = config.get('QUERY_SEARCH_LIMIT', 1000)\n sql_queries = (\n query.order_by(Query.start_time.asc())\n .limit(query_limit)\n .all()\n )\n\n dict_queries = [q.to_dict() for q in sql_queries]\n\n return Response(\n json.dumps(dict_queries, default=utils.json_int_dttm_ser),\n status=200,\n mimetype='application/json')\n\n @app.errorhandler(500)\n def show_traceback(self):\n return render_template(\n 'superset/traceback.html',\n error_msg=get_error_msg(),\n ), 500\n\n @expose('/welcome')\n def welcome(self):\n \"\"\"Personalized welcome page\"\"\"\n if not g.user or not g.user.get_id():\n return redirect(appbuilder.get_url_for_login)\n\n welcome_dashboard_id = (\n db.session\n .query(UserAttribute.welcome_dashboard_id)\n .filter_by(user_id=g.user.get_id())\n .scalar()\n )\n if welcome_dashboard_id:\n return self.dashboard(str(welcome_dashboard_id))\n\n payload = {\n 'user': bootstrap_user_data(),\n 'common': self.common_bootsrap_payload(),\n }\n\n return self.render_template(\n 'superset/basic.html',\n entry='welcome',\n title='Superset',\n bootstrap_data=json.dumps(payload, default=utils.json_iso_dttm_ser),\n )\n\n @has_access\n @expose('/profile/<username>/')\n def profile(self, username):\n \"\"\"User profile page\"\"\"\n if not username and g.user:\n username = g.user.username\n\n payload = {\n 'user': bootstrap_user_data(username, include_perms=True),\n 'common': self.common_bootsrap_payload(),\n }\n\n return self.render_template(\n 'superset/basic.html',\n title=_(\"%(user)s's profile\", user=username),\n entry='profile',\n bootstrap_data=json.dumps(payload, default=utils.json_iso_dttm_ser),\n )\n\n @has_access\n @expose('/sqllab')\n def sqllab(self):\n \"\"\"SQL Editor\"\"\"\n d = {\n 'defaultDbId': config.get('SQLLAB_DEFAULT_DBID'),\n 'common': self.common_bootsrap_payload(),\n }\n return self.render_template(\n 'superset/basic.html',\n entry='sqllab',\n bootstrap_data=json.dumps(d, default=utils.json_iso_dttm_ser),\n )\n\n @api\n @handle_api_exception\n @has_access_api\n @expose('/slice_query/<slice_id>/')\n def slice_query(self, slice_id):\n \"\"\"\n This method exposes an API endpoint to\n get the database query string for this slice\n \"\"\"\n viz_obj = get_viz(slice_id)\n security_manager.assert_datasource_permission(viz_obj.datasource)\n return self.get_query_string_response(viz_obj)\n\n @api\n @has_access_api\n @expose('/schemas_access_for_csv_upload')\n def schemas_access_for_csv_upload(self):\n \"\"\"\n This method exposes an API endpoint to\n get the schema access control settings for csv upload in this database\n \"\"\"\n if not request.args.get('db_id'):\n return json_error_response(\n 'No database is allowed for your csv upload')\n\n db_id = int(request.args.get('db_id'))\n database = (\n db.session\n .query(models.Database)\n .filter_by(id=db_id)\n .one()\n )\n try:\n schemas_allowed = database.get_schema_access_for_csv_upload()\n if (security_manager.database_access(database) or\n security_manager.all_datasource_access()):\n return self.json_response(schemas_allowed)\n # the list schemas_allowed should not be empty here\n # and the list schemas_allowed_processed returned from security_manager\n # should not be empty either,\n # otherwise the database should have been filtered out\n # in CsvToDatabaseForm\n schemas_allowed_processed = security_manager.schemas_accessible_by_user(\n database, schemas_allowed, False)\n return self.json_response(schemas_allowed_processed)\n except Exception:\n return json_error_response((\n 'Failed to fetch schemas allowed for csv upload in this database! '\n 'Please contact Superset Admin!\\n\\n'\n 'The error message returned was:\\n{}').format(traceback.format_exc()))\n\n\nappbuilder.add_view_no_menu(Superset)\n\n\nclass CssTemplateModelView(SupersetModelView, DeleteMixin):\n datamodel = SQLAInterface(models.CssTemplate)\n\n list_title = _('CSS Templates')\n show_title = _('Show CSS Template')\n add_title = _('Add CSS Template')\n edit_title = _('Edit CSS Template')\n\n list_columns = ['template_name']\n edit_columns = ['template_name', 'css']\n add_columns = edit_columns\n label_columns = {\n 'template_name': _('Template Name'),\n }\n\n\nclass CssTemplateAsyncModelView(CssTemplateModelView):\n list_columns = ['template_name', 'css']\n\n\nappbuilder.add_separator('Sources')\nappbuilder.add_view(\n CssTemplateModelView,\n 'CSS Templates',\n label=__('CSS Templates'),\n icon='fa-css3',\n category='Manage',\n category_label=__('Manage'),\n category_icon='')\n\n\nappbuilder.add_view_no_menu(CssTemplateAsyncModelView)\n\nappbuilder.add_link(\n 'SQL Editor',\n label=_('SQL Editor'),\n href='/superset/sqllab',\n category_icon='fa-flask',\n icon='fa-flask',\n category='SQL Lab',\n category_label=__('SQL Lab'),\n)\n\nappbuilder.add_link(\n 'Query Search',\n label=_('Query Search'),\n href='/superset/sqllab#search',\n icon='fa-search',\n category_icon='fa-flask',\n category='SQL Lab',\n category_label=__('SQL Lab'),\n)\n\nappbuilder.add_link(\n 'Upload a CSV',\n label=__('Upload a CSV'),\n href='/csvtodatabaseview/form',\n icon='fa-upload',\n category='Sources',\n category_label=__('Sources'),\n category_icon='fa-wrench')\nappbuilder.add_separator('Sources')\n\n\[email protected]_request\ndef apply_caching(response):\n \"\"\"Applies the configuration's http headers to all responses\"\"\"\n for k, v in config.get('HTTP_HEADERS').items():\n response.headers[k] = v\n return response\n\n\n# ---------------------------------------------------------------------\n# Redirecting URL from previous names\nclass RegexConverter(BaseConverter):\n def __init__(self, url_map, *items):\n super(RegexConverter, self).__init__(url_map)\n self.regex = items[0]\n\n\napp.url_map.converters['regex'] = RegexConverter\n\n\[email protected]('/<regex(\"panoramix\\/.*\"):url>')\ndef panoramix(url): # noqa\n return redirect(request.full_path.replace('panoramix', 'superset'))\n\n\[email protected]('/<regex(\"caravel\\/.*\"):url>')\ndef caravel(url): # noqa\n return redirect(request.full_path.replace('caravel', 'superset'))\n\n\n# ---------------------------------------------------------------------\n"
] | [
[
"pandas.DataFrame.from_records"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
sd2001/DANCE2LIVE-Hackerearth | [
"8aeb6b1022054b621a6246753b8421e0402cb0a7"
] | [
"directing.py"
] | [
"import os\r\nimport pandas as pd\r\nimport shutil\r\ndata=pd.read_csv(r'DanceForms/train.csv')\r\ntrain_img_dir='DanceForms/train'\r\n\r\n#print(data.head(10))\r\n\r\n\r\nfor i in os.listdir(train_img_dir):\r\n for img in data['Image']:\r\n if str(i)==str(img):\r\n name=data.loc[data['Image']==img]\r\n dance_name=(str(name['Target'].item()))\r\n mov_dir=os.path.join('Forms',dance_name)\r\n shutil.move(os.path.join(train_img_dir,img),mov_dir)\r\n \r\n \r\n \r\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
maituoy/BayesianOpt4dftu | [
"cdd80ab1cfc3bc2f5469202f0bce59d30580bac3"
] | [
"BayesOpt4dftu/core.py"
] | [
"import os\nimport json\nimport bayes_opt\nimport subprocess\nimport numpy as np\nimport pandas as pd\nimport pymatgen as mg\nimport xml.etree.ElementTree as ET\n\nfrom ase import Atoms, Atom\nfrom ase.calculators.vasp.vasp import Vasp\nfrom ase.dft.kpoints import *\n\nfrom pymatgen.io.vasp.inputs import Incar, Kpoints, Potcar, Poscar\nfrom pymatgen.core.lattice import Lattice\nfrom pymatgen.core.structure import Structure, Molecule\nfrom pymatgen.io.vasp.outputs import BSVasprun, Vasprun\n\nfrom bayes_opt import UtilityFunction\nfrom bayes_opt import BayesianOptimization\nfrom string import ascii_lowercase\nfrom BayesOpt4dftu.special_kpath import kpath_dict\n\nfrom vaspvis import Band\nfrom vaspvis.utils import get_bandgap\n\nfrom matplotlib import pyplot as plt\nfrom matplotlib import cm, gridspec\n\n\nclass vasp_init(object):\n def __init__(self, input_path):\n with open(input_path, 'r') as f:\n self.input_dict = json.load(f)\n self.struct_info = self.input_dict['structure_info']\n self.general_flags = self.input_dict['general_flags']\n self.atoms = None\n\n def init_atoms(self):\n lattice_param = self.struct_info['lattice_param']\n cell = np.array(self.struct_info['cell'])\n self.atoms = Atoms(cell=cell*lattice_param)\n for atom in self.struct_info['atoms']:\n self.atoms.append(Atom(atom[0], atom[1], magmom=atom[2]))\n\n return self.atoms\n\n def modify_poscar(self, path='./'):\n with open(path + '/POSCAR', 'r') as f:\n poscar = f.readlines()\n poscar[7] = 'Direct\\n'\n f.close()\n\n with open(path + '/POSCAR', 'w') as d:\n d.writelines(poscar)\n d.close()\n\n def kpt4pbeband(self, path, import_kpath):\n if import_kpath:\n special_kpoints = kpath_dict\n else:\n special_kpoints = get_special_points(self.atoms.cell)\n\n num_kpts = self.struct_info['num_kpts']\n labels = self.struct_info['kpath']\n kptset = list()\n lbs = list()\n if labels[0] in special_kpoints.keys():\n kptset.append(special_kpoints[labels[0]])\n lbs.append(labels[0])\n\n for i in range(1, len(labels)-1):\n if labels[i] in special_kpoints.keys():\n kptset.append(special_kpoints[labels[i]])\n lbs.append(labels[i])\n kptset.append(special_kpoints[labels[i]])\n lbs.append(labels[i])\n if labels[-1] in special_kpoints.keys():\n kptset.append(special_kpoints[labels[-1]])\n lbs.append(labels[-1])\n\n # Hardcoded for EuS and EuTe since one of the k-point is not in the special kpoints list.\n if 'EuS' in self.atoms.symbols or 'EuTe' in self.atoms.symbols:\n kptset[0] = np.array([0.5, 0.5, 1])\n\n kpt = Kpoints(comment='band', kpts=kptset, num_kpts=num_kpts,\n style='Line_mode', coord_type=\"Reciprocal\", labels=lbs)\n kpt.write_file(path+'/KPOINTS')\n\n def kpt4hseband(self, path, import_kpath):\n ibz = open(path+'/IBZKPT', 'r')\n num_kpts = self.struct_info['num_kpts']\n labels = self.struct_info['kpath']\n ibzlist = ibz.readlines()\n ibzlist[1] = str(num_kpts*(len(labels)-1) +\n int(ibzlist[1].split('\\n')[0])) + '\\n'\n if import_kpath:\n special_kpoints = kpath_dict\n else:\n special_kpoints = get_special_points(self.atoms.cell)\n for i in range(len(labels)-1):\n k_head = special_kpoints[labels[i]]\n k_tail = special_kpoints[labels[i+1]]\n increment = (k_tail-k_head)/(num_kpts-1)\n ibzlist.append(' '.join(map(str, k_head)) +\n ' 0 ' + labels[i] + '\\n')\n for j in range(1, num_kpts-1):\n k_next = k_head + increment*j\n ibzlist.append(' '.join(map(str, k_next)) + ' 0\\n')\n ibzlist.append(' '.join(map(str, k_tail)) +\n ' 0 ' + labels[i+1] + '\\n')\n with open(path+'/KPOINTS', 'w') as f:\n f.writelines(ibzlist)\n\n def generate_input(self, directory, step, xc, import_kpath):\n flags = {}\n flags.update(self.general_flags)\n flags.update(self.input_dict[step])\n if step == 'scf':\n if xc == 'pbe':\n flags.update(self.input_dict[xc])\n calc = Vasp(self.atoms, directory=directory,\n kpts=self.struct_info['kgrid_'+xc], gamma=True, **flags)\n calc.write_input(self.atoms)\n if str(self.atoms.symbols) in ['Ni2O2']:\n mom_list = {'Ni': 2, 'Mn': 5, 'Co': 3, 'Fe': 4}\n s = str(self.atoms.symbols[0])\n incar_scf = Incar.from_file(directory+'/INCAR')\n incar_scf['MAGMOM'] = '%s -%s 0 0' % (mom_list[s], mom_list[s])\n incar_scf.write_file(directory+'/INCAR')\n\n self.modify_poscar(path=directory)\n elif step == 'band':\n flags.update(self.input_dict[xc])\n calc = Vasp(self.atoms, directory=directory, gamma=True, **flags)\n calc.write_input(self.atoms)\n self.modify_poscar(path=directory)\n if xc == 'pbe':\n self.kpt4pbeband(directory, import_kpath)\n elif xc == 'hse':\n print(directory)\n self.kpt4hseband(directory, import_kpath)\n\n\nclass delta_band(object):\n def __init__(self, bandrange=10, path='./', iteration=1, interpolate=False):\n self.path = path\n self.br = bandrange\n self.interpolate = interpolate\n self.vasprun_hse = os.path.join(path, 'hse/band/vasprun.xml')\n self.kpoints_hse = os.path.join(path, 'hse/band/KPOINTS')\n self.vasprun_dftu = os.path.join(path, 'dftu/band/vasprun.xml')\n self.kpoints_dftu = os.path.join(path, 'dftu/band/KPOINTS')\n self.iteration = iteration\n\n def readInfo(self, filepath):\n tree = ET.parse(filepath)\n root = tree.getroot()\n ispin = int(root.findall(\n './parameters/separator/.[@name=\"electronic\"]/separator/.[@name=\"electronic spin\"]/i/.[@name=\"ISPIN\"]')[0].text)\n nbands = int(root.findall(\n './parameters/separator/.[@name=\"electronic\"]/i/.[@name=\"NBANDS\"]')[0].text)\n nkpts = len(root.findall('./kpoints/varray/.[@name=\"kpointlist\"]/v'))\n\n return ispin, nbands, nkpts\n\n def access_eigen(self, b, interpolate=False):\n wave_vectors = b._get_k_distance()\n eigenvalues = b.eigenvalues\n\n if interpolate:\n _, eigenvalues_interp = b._get_interpolated_data(\n wave_vectors=wave_vectors,\n data=eigenvalues\n )\n\n if interpolate:\n return eigenvalues_interp\n else:\n return eigenvalues\n\n def locate_and_shift_bands(self, eigenvalues):\n band_mean = eigenvalues.mean(axis=1)\n\n below_index = np.where(band_mean < 0)[0]\n above_index = np.where(band_mean >= 0)[0]\n\n vbm = np.max(eigenvalues[below_index])\n cbm = np.min(eigenvalues[above_index])\n\n if cbm < vbm:\n vbm = 0.0\n cbm = 0.0\n\n valence_bands = eigenvalues[below_index[-self.br:]]\n conduction_bands = eigenvalues[above_index[:self.br]]\n\n valence_bands -= vbm\n conduction_bands -= cbm\n\n shifted_bands = np.r_[conduction_bands, valence_bands]\n\n return shifted_bands\n\n def deltaBand(self):\n ispin_hse, nbands_hse, nkpts_hse = self.readInfo(self.vasprun_hse)\n ispin_dftu, nbands_dftu, nkpts_dftu = self.readInfo(self.vasprun_dftu)\n\n \n if nbands_hse != nbands_dftu:\n raise Exception('The band number of HSE and GGA+U are not match!')\n\n kpoints = [line for line in open(self.kpoints_hse) if line.strip()]\n kpts_diff = 0\n for ii, line in enumerate(kpoints[3:]):\n if line.split()[3] != '0':\n kpts_diff += 1\n\n if nkpts_hse - kpts_diff != nkpts_dftu:\n raise Exception(\n 'The kpoints number of HSE and GGA+U are not match!')\n\n new_n = 500\n\n if ispin_hse == 1 and ispin_dftu == 1:\n band_hse = Band(\n folder=os.path.join(self.path, 'hse/band'),\n spin='up',\n interpolate=self.interpolate,\n new_n=new_n,\n projected=False,\n )\n band_dftu = Band(\n folder=os.path.join(self.path, 'dftu/band'),\n spin='up',\n interpolate=self.interpolate,\n new_n=new_n,\n projected=False,\n bandgap=True,\n printbg=False,\n )\n\n eigenvalues_hse = self.access_eigen(band_hse, interpolate=self.interpolate)\n eigenvalues_dftu = self.access_eigen(band_dftu, interpolate=self.interpolate)\n\n shifted_hse = self.locate_and_shift_bands(eigenvalues_hse)\n shifted_dftu = self.locate_and_shift_bands(eigenvalues_dftu)\n\n n = shifted_hse.shape[0] * shifted_hse.shape[1]\n delta_band = sum((1/n)*sum((shifted_hse - shifted_dftu)**2))**(1/2)\n\n bg = get_bandgap(\n folder=os.path.join(self.path, 'dftu/band'),\n printbg=False,\n method=1,\n spin='both',\n )\n\n incar = Incar.from_file('./dftu/band/INCAR')\n u = incar['LDAUU']\n u.append(bg)\n u.append(delta_band)\n output = ' '.join(str(x) for x in u)\n\n with open('u_tmp.txt', 'a') as f:\n f.write(output + '\\n')\n f.close\n\n return delta_band\n\n elif ispin_hse == 2 and ispin_dftu == 2:\n band_hse_up = Band(\n folder=os.path.join(self.path, 'hse/band'),\n spin='up',\n interpolate=self.interpolate,\n new_n=new_n,\n projected=False,\n )\n\n band_dftu_up = Band(\n folder=os.path.join(self.path, 'dftu/band'),\n spin='up',\n interpolate=self.interpolate,\n new_n=new_n,\n projected=False,\n bandgap=True,\n printbg=False,\n )\n\n band_hse_down = Band(\n folder=os.path.join(self.path, 'hse/band'),\n spin='down',\n interpolate=self.interpolate,\n new_n=new_n,\n projected=False,\n )\n\n band_dftu_down = Band(\n folder=os.path.join(self.path, 'dftu/band'),\n spin='down',\n interpolate=self.interpolate,\n new_n=new_n,\n projected=False,\n )\n\n eigenvalues_hse_up = self.access_eigen(band_hse_up, interpolate=self.interpolate)\n eigenvalues_dftu_up = self.access_eigen(band_dftu_up, interpolate=self.interpolate)\n\n shifted_hse_up = self.locate_and_shift_bands(eigenvalues_hse_up)\n shifted_dftu_up = self.locate_and_shift_bands(eigenvalues_dftu_up)\n\n n_up = shifted_hse_up.shape[0] * shifted_hse_up.shape[1]\n delta_band_up = sum((1/n_up)*sum((shifted_hse_up - shifted_dftu_up)**2))**(1/2)\n\n eigenvalues_hse_down = self.access_eigen(band_hse_down, interpolate=self.interpolate)\n eigenvalues_dftu_down = self.access_eigen(band_dftu_down, interpolate=self.interpolate)\n\n shifted_hse_down = self.locate_and_shift_bands(eigenvalues_hse_down)\n shifted_dftu_down = self.locate_and_shift_bands(eigenvalues_dftu_down)\n\n n_down = shifted_hse_down.shape[0] * shifted_hse_down.shape[1]\n delta_band_down = sum((1/n_down)*sum((shifted_hse_down - shifted_dftu_down)**2))**(1/2)\n\n delta_band = np.mean([delta_band_up, delta_band_down])\n\n bg = get_bandgap(\n folder=os.path.join(self.path, 'dftu/band'),\n printbg=False,\n method=1,\n spin='both',\n )\n\n incar = Incar.from_file('./dftu/band/INCAR')\n u = incar['LDAUU']\n\n u.append(bg)\n u.append(delta_band)\n output = ' '.join(str(x) for x in u)\n\n with open('u_tmp.txt', 'a') as f:\n f.write(output + '\\n')\n f.close\n\n return delta_band\n else:\n raise Exception('The spin number of HSE and GGA+U are not match!')\n\nclass get_optimizer:\n def __init__(self, utxt_path, opt_u_index, u_range, gap_hse, a1, a2, kappa):\n data = pd.read_csv(utxt_path, header=0, delimiter=\"\\s\", engine='python')\n self.opt_u_index = opt_u_index\n self.u_range = u_range\n self.gap_hse = gap_hse\n self.a1 = a1\n self.a2 = a2\n self.kappa = kappa\n self.n_obs, _ = data.shape\n self.data = data\n self.utility_function = UtilityFunction(kind=\"ucb\", kappa=kappa, xi=0)\n\n def loss(self, y, y_hat, delta_band, alpha_1, alpha_2):\n return -alpha_1 * (y - y_hat) ** 2 - alpha_2 * delta_band ** 2\n \n def set_bounds(self):\n # Set up the number of variables are going to be optimized.\n num_variables = int(sum(self.opt_u_index))\n variables_string = ['u_'+ str(i) for i, o in enumerate(self.opt_u_index) if o]\n\n # Set up the U ranges for each variable.\n pbounds = {}\n for variable in variables_string:\n pbounds[variable] = self.u_range\n return pbounds\n \n def optimizer(self): \n pbounds = self.set_bounds() \n optimizer = BayesianOptimization(\n f=None,\n pbounds=pbounds,\n verbose=2,\n random_state=1,\n )\n\n v_strings = list(pbounds.keys())\n opt_index = [int(v.split('_')[1]) for v in v_strings]\n\n for i in range(self.n_obs):\n values = list()\n for j in range(len(opt_index)):\n values.append(self.data.iloc[i][j])\n params = {}\n for (value, variable) in zip(values, v_strings):\n params[variable] = value\n target = self.loss(self.gap_hse, \n self.data.iloc[i].band_gap, \n self.data.iloc[i].delta_band, \n self.a1, \n self.a2)\n\n optimizer.register(\n params=params,\n target=target,\n )\n return optimizer, target\n \n\nclass plot_bo(get_optimizer):\n def __init__(self, utxt_path, opt_u_index, u_range, gap_hse, a1, a2, kappa, elements):\n super().__init__(utxt_path, opt_u_index, u_range, gap_hse, a1, a2, kappa)\n optimizer, target = self.optimizer()\n self.optimizer = optimizer\n self.target = target\n self.elements = elements\n self.optimal = 0\n \n def get_optimal(self, x, mu):\n best_obj = mu.max()\n best_index = np.where(mu == mu.max())[0][0]\n best_u = x[best_index]\n optimal = (best_u, best_obj)\n return optimal\n \n def predict(self, ratio=1):\n u = list(self.optimizer.res[0][\"params\"].keys())\n dim = len(u)\n plot_size = len(self.optimizer.res)*ratio\n if dim == 1:\n x = np.linspace(self.u_range[0], self.u_range[1], 10000).reshape(-1, 1)\n x_obs = np.array([res[\"params\"][u[0]] for res in self.optimizer.res]).reshape(-1,1)[:plot_size]\n y_obs = np.array([res[\"target\"] for res in self.optimizer.res])[:plot_size]\n \n self.optimizer._gp.fit(x_obs, y_obs)\n mu, sigma = self.optimizer._gp.predict(x, return_std=True)\n self.optimal = self.get_optimal(x, mu)\n\n data4plot = {'mu': mu,\n 'sigma': sigma,\n 'x': x,\n 'x_obs': x_obs,\n 'y_obs': y_obs}\n\n return data4plot\n \n if dim == 2:\n x = y = np.linspace(self.u_range[0], self.u_range[1], 300)\n X, Y = np.meshgrid(x, y)\n x = X.ravel()\n y = Y.ravel()\n X = np.vstack([x, y]).T\n\n x1_obs = np.array([[res[\"params\"][u[0]]] for res in self.optimizer.res])[:plot_size]\n x2_obs = np.array([[res[\"params\"][u[1]]] for res in self.optimizer.res])[:plot_size]\n y_obs = np.array([res[\"target\"] for res in self.optimizer.res])[:plot_size]\n obs = np.column_stack((x1_obs, x2_obs))\n\n self.optimizer._gp.fit(obs, y_obs)\n mu, sigma = self.optimizer._gp.predict(X, eval)\n self.optimal = self.get_optimal(X, mu)\n\n data4plot = {'mu': mu,\n 'sigma': sigma,\n 'obs': obs,\n 'x1_obs': x1_obs,\n 'x2_obs': x2_obs,\n 'x': x,\n 'y': y,\n 'X': X}\n\n return data4plot\n \n if dim == 3:\n x = y = z = np.linspace(self.u_range[0], self.u_range[1], 100)\n X, Y, Z= np.meshgrid(x, y, z)\n x = X.ravel()\n y = Y.ravel()\n z = Z.ravel()\n X = np.vstack([x, y, z]).T\n\n x1_obs = np.array([[res[\"params\"][u[0]]] for res in self.optimizer.res])[:plot_size]\n x2_obs = np.array([[res[\"params\"][u[1]]] for res in self.optimizer.res])[:plot_size]\n x3_obs = np.array([[res[\"params\"][u[2]]] for res in self.optimizer.res])[:plot_size]\n y_obs = np.array([res[\"target\"] for res in self.optimizer.res])[:plot_size]\n obs = np.column_stack((x1_obs, x2_obs, x3_obs))\n\n self.optimizer._gp.fit(obs, y_obs)\n mu, sigma = self.optimizer._gp.predict(X, eval)\n self.optimal = self.get_optimal(X, mu)\n\n return mu, sigma\n\n def plot(self, ratio=1):\n u = list(self.optimizer.res[0][\"params\"].keys())\n dim = len(u)\n plot_size = len(self.optimizer.res)*ratio\n opt_eles = [ele for i, ele in enumerate(self.elements) if self.opt_u_index[i]]\n\n if dim == 1:\n d = self.predict()\n fig = plt.figure()\n gs = gridspec.GridSpec(2, 1) \n axis = plt.subplot(gs[0])\n acq = plt.subplot(gs[1])\n axis.plot(d['x_obs'].flatten(), d['y_obs'], 'D', markersize=8, label=u'Observations', color='r')\n axis.plot(d['x'], d['mu'], '--', color='k', label='Prediction')\n axis.fill(np.concatenate([d['x'], d['x'][::-1]]), \n np.concatenate([d['mu'] - 1.9600 * d['sigma'], (d['mu'] + 1.9600 * d['sigma'])[::-1]]),\n alpha=.6, fc='c', ec='None', label='95% confidence interval')\n \n axis.set_xlim(self.u_range)\n axis.set_ylim((None, None))\n axis.set_ylabel('f(x)')\n\n utility = self.utility_function.utility(d['x'], self.optimizer._gp, 0)\n acq.plot(d['x'], utility, label='Acquisition Function', color='purple')\n acq.plot(d['x'][np.argmax(utility)], np.max(utility), '*', markersize=15, \n label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)\n acq.set_xlim(self.u_range)\n acq.set_ylim((np.min(utility)-0.5,np.max(utility)+0.5))\n acq.set_ylabel('Acquisition')\n acq.set_xlabel('U (eV)')\n axis.legend(loc=4, borderaxespad=0.)\n acq.legend(loc=4, borderaxespad=0.)\n\n plt.savefig('1D_kappa_%s_a1_%s_a2_%s.png' %(self.kappa, self.a1, self.a2), dpi = 400)\n\n if dim == 2:\n \n d = self.predict()\n fig, axis = plt.subplots(1, 2, figsize=(15,5))\n plt.subplots_adjust(wspace = 0.2)\n \n axis[0].plot(d['x1_obs'], d['x2_obs'], 'D', markersize=4, color='k', label='Observations')\n axis[0].set_title('Gaussian Process Predicted Mean',pad=10)\n im1 = axis[0].hexbin(d['x'], d['y'], C=d['mu'], cmap=cm.jet, bins=None)\n axis[0].axis([d['x'].min(), d['x'].max(), d['y'].min(), d['y'].max()])\n axis[0].set_xlabel(r'U_%s (eV)' %opt_eles[0],labelpad=5)\n axis[0].set_ylabel(r'U_%s (eV)' %opt_eles[1],labelpad=10,va='center')\n cbar1 = plt.colorbar(im1, ax = axis[0])\n\n utility = self.utility_function.utility(d['X'], self.optimizer._gp, self.optimizer.max)\n axis[1].plot(d['x1_obs'], d['x2_obs'], 'D', markersize=4, color='k', label='Observations')\n axis[1].set_title('Acquisition Function',pad=10)\n axis[1].set_xlabel(r'U_%s (eV)' %opt_eles[0],labelpad=5)\n axis[1].set_ylabel(r'U_%s (eV)' %opt_eles[1],labelpad=10,va='center')\n im2 = axis[1].hexbin(d['x'], d['y'], C=utility, cmap=cm.jet, bins=None)\n axis[1].axis([d['x'].min(), d['x'].max(), d['y'].min(), d['y'].max()])\n cbar2 = plt.colorbar(im2, ax = axis[1])\n\n plt.savefig('2D_kappa_%s_a1_%s_a2_%s.png' %(self.kappa, self.a1, self.a2), dpi = 400)\n\nclass bayesOpt_DFTU(plot_bo):\n def __init__(self, \n path, \n opt_u_index=(1, 1, 0), \n u_range=(0, 10), \n a1=0.25, \n a2=0.75, \n kappa=2.5,\n elements=['ele1','ele2','ele3'],\n plot=False):\n gap_hse = get_bandgap(\n folder=os.path.join(path, 'hse/band'),\n printbg=False,\n method=1,\n spin='both',\n )\n if plot:\n upath = \"./u_kappa_%s_a1_%s_a2_%s.txt\" %(kappa, a1, a2)\n if not plot:\n upath = './u_tmp.txt'\n plot_bo.__init__(self, upath, opt_u_index, u_range, gap_hse, a1, a2, kappa, elements)\n\n\n def bo(self):\n next_point_to_probe = self.optimizer.suggest(self.utility_function)\n\n points = list(next_point_to_probe.values())\n points = [round(elem, 6) for elem in points]\n\n U = [str(x) for x in points]\n with open('input.json', 'r') as f:\n data = json.load(f)\n elements = list(data[\"pbe\"][\"ldau_luj\"].keys())\n for i in range(len(self.opt_u_index)):\n if self.opt_u_index[i]:\n try:\n data[\"pbe\"][\"ldau_luj\"][self.elements[i]\n ][\"U\"] = round(float(U[i]), 6)\n except:\n data[\"pbe\"][\"ldau_luj\"][self.elements[i]\n ][\"U\"] = round(float(U[i-1]), 6)\n f.close()\n\n with open('input.json', 'w') as f:\n json.dump(data, f, indent=4)\n f.close()\n \n return self.target\n \n\n\ndef calculate(command, outfilename, method, import_kpath):\n olddir = os.getcwd()\n calc = vasp_init(olddir+'/input.json')\n calc.init_atoms()\n\n if method == 'dftu':\n calc.generate_input(olddir+'/%s/scf' %\n method, 'scf', 'pbe', import_kpath)\n calc.generate_input(olddir+'/%s/band' %\n method, 'band', 'pbe', import_kpath)\n\n if os.path.isfile(f'{olddir}/{method}/band/eigenvalues.npy'):\n os.remove(f'{olddir}/{method}/band/eigenvalues.npy')\n\n elif method == 'hse':\n calc.generate_input(olddir+'/%s/scf' %\n method, 'scf', 'hse', import_kpath)\n if not os.path.exists(olddir+'/%s/band' % method):\n os.mkdir(olddir+'/%s/band' % method)\n\n try:\n os.chdir(olddir+'/%s/scf' % method)\n errorcode_scf = subprocess.call(\n '%s > %s' % (command, outfilename), shell=True)\n os.system('cp CHG* WAVECAR IBZKPT %s/%s/band' % (olddir, method))\n if method == 'hse':\n calc.generate_input(olddir+'/%s/band' %\n method, 'band', 'hse', import_kpath)\n finally:\n os.chdir(olddir+'/%s/band' % method)\n errorcode_band = subprocess.call(\n '%s > %s' % (command, outfilename), shell=True)\n os.chdir(olddir)\n"
] | [
[
"numpy.linspace",
"numpy.concatenate",
"numpy.max",
"numpy.mean",
"numpy.where",
"pandas.read_csv",
"matplotlib.pyplot.subplot",
"numpy.argmax",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.subplots_adjust",
"numpy.column_stack",
"matplotlib.pyplot.figure",
"numpy.min",
"matplotlib.pyplot.savefig",
"numpy.meshgrid",
"numpy.array",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.colorbar",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
AdrienCorenflos/PFlow | [
"ec5f43a5e20d1280260e482ee0f9139fb9d1ca2b"
] | [
"pflow/optimal_transport/recentering.py"
] | [
"import numpy as np\nimport torch\n\nfrom pflow.base import BaseReweight, NoResampling\nfrom pflow.optimal_transport.transportation_plan import Transport\nfrom pflow.resampling.systematic import SystematicResampling\nfrom pflow.utils.fix_for_geomloss_losses import SamplesLoss\n\n\ndef _learn(x, logw, loss, optim_kwargs, schedule_kwargs, n_steps, init_x, optim_class_name='Adam',\n scheduler_class_name='StepLR'):\n \"\"\"\n Combine solve_for_state and transport_from_potentials in a \"reweighting scheme\"\n :param x: torch.Tensor[N, D]\n The input\n :param w: torch.Tensor[N]\n The degenerate log weights\n :param loss: geomloss.SamplesLoss\n Needs to be biased for the potentials to correspond to a proper plan\n :param optim_kwargs: dict\n arguments for optimizer\n :param schedule_kwargs: dict\n arguments for lr scheduler\n :param n_steps: int\n number of steps for optimisation\n :param init_x: tensor\n where to start\n :param optim_class_name: str\n Name of the optimizer\n :param scheduler_class_name: str\n Name of the scheduler\n \"\"\"\n\n n = x.shape[0]\n uniform_weights = torch.full_like(logw, np.log(1 / n), requires_grad=False)\n x_i = init_x.clone().detach().requires_grad_(True)\n\n optimizer_class = getattr(torch.optim, optim_class_name)\n optimizer = optimizer_class([x_i], **optim_kwargs)\n scheduler_class = getattr(torch.optim.lr_scheduler, scheduler_class_name)\n\n scheduler = scheduler_class(optimizer, **schedule_kwargs)\n\n def closure():\n optimizer.zero_grad()\n loss_val = loss(uniform_weights, x_i, logw.detach(), x.detach())\n loss_val.backward()\n return loss_val\n\n for _ in range(n_steps):\n # loss_val = loss(uniform_weights, x_i, logw.detach(), x.detach())\n optimizer.zero_grad()\n optimizer.step(closure)\n scheduler.step()\n return x_i, uniform_weights\n\n\ndef _incremental_learning(x, w, loss, adam_kwargs, n_steps=5, inner_steps=5):\n \"\"\"\n Combine solve_for_state and transport_from_potentials in a \"reweighting scheme\"\n :param x: torch.Tensor[N, D]\n The input\n :param logw: torch.Tensor[N]\n The degenerate logweights\n :param loss: geomloss.SamplesLoss\n Needs to be biased for the potentials to correspond to a proper plan\n :param n_steps: int\n number of steps from degenerate to uniform\n :param inner_steps: int\n inner steps for one set of weights to the next\n :param adam_kwargs: dict\n arguments for adam\n \"\"\"\n n = x.shape[0]\n ts = np.linspace(0., 1., n_steps + 1)\n\n x_j = x.clone().detach()\n x_i = x.clone().detach().requires_grad_(True)\n adam = torch.optim.Adam([x_i], **adam_kwargs)\n\n ones = torch.full_like(w, 1 / n, requires_grad=False)\n\n for i in range(n_steps):\n w_i = w.detach() * (1 - ts[i]) + ones * ts[i]\n w_i_1 = w.detach() * (1 - ts[i + 1]) + ones * ts[i + 1]\n for _ in range(inner_steps):\n loss_val = loss(w_i_1.log(), x_i, w_i.log(), x_j)\n adam.zero_grad()\n loss_val.backward()\n adam.step()\n x_j.data.copy_(x_i.data)\n return x_j, ones.log()\n\n\nclass LearnBest(BaseReweight):\n def __init__(self,\n epsilon,\n geomloss_kwargs,\n learning_rate,\n optimizer_kwargs=None,\n schedule_kwargs=None,\n n_steps=10,\n start_from_systematic=False,\n start_from_regularised=False,\n jitter=0.,\n optim_class_name='SGD',\n scheduler_class_name='StepLR'\n ):\n \"\"\"\n Combine solve_for_state and transport_from_potentials in a \"reweighting scheme\"\n :param epsilon: float\n Blur parameter used in loss\n :param geomloss_kwargs: dict\n dict for Sinkhorn\n :param learning_rate: float\n for the optimizer\n :param optimizer_kwargs: dict\n parameters for the adam optimizer\n :param n_steps: int\n number of epochs\n :param start_from_systematic: bool\n initialisation from the systematic resampling - not proven to be differentiable\n :param start_from_regularised: bool\n initialise from the regularised ensemble transform\n :param jitter: float\n jitter the initial state\n :param optim_class_name: str\n Name of the optimizer\n :param scheduler_class_name: str\n Name of the scheduler\n \"\"\"\n self.epsilon = epsilon\n geomloss_kwargs.pop('blur', None)\n geomloss_kwargs.pop('potentials', None)\n geomloss_kwargs.pop('debias', None)\n self.learning_rate = learning_rate\n self.optimizer_kwargs = optimizer_kwargs or {}\n self.optim_class_name = optim_class_name\n self.scheduler_class_name = scheduler_class_name\n self.schedule_kwargs = schedule_kwargs\n self.n_steps = n_steps\n self.sample_loss = SamplesLoss(blur=epsilon, is_log=True, debias=True, potentials=False, **geomloss_kwargs)\n if start_from_systematic:\n self._subSample = SystematicResampling()\n elif start_from_regularised:\n self._subSample = Transport(epsilon, **geomloss_kwargs)\n else:\n self._subSample = NoResampling()\n self.jitter = jitter\n\n def apply(self, x, w, logw):\n \"\"\"\n Combine solve_for_state and transport_from_potentials in a \"reweighting scheme\"\n :param x: torch.Tensor[N, D]\n The input\n :param w: torch.Tensor[N]\n The degenerate weights\n \"\"\"\n init_x, _ = self._subSample.apply(x, w, logw)\n optimizer_kwargs = self.optimizer_kwargs.copy()\n optimizer_kwargs['lr'] = self.learning_rate * init_x.shape[0]\n return _learn(x, logw, self.sample_loss, optimizer_kwargs, self.schedule_kwargs, self.n_steps,\n init_x + self.jitter * torch.normal(0., 1., init_x.shape),\n self.optim_class_name, self.scheduler_class_name)\n\n\nclass IncrementalLearning(BaseReweight):\n def __init__(self, epsilon, geomloss_kwargs, adam_kwargs, n_steps=5, inner_steps=5):\n \"\"\"\n Combine solve_for_state and transport_from_potentials in a \"reweighting scheme\"\n :param epsilon: float\n Blur parameter used in loss\n :param geomloss_kwargs: dict\n dict for Sinkhorn loss\n :param adam_kwargs: dict\n parameters for the adam optimizer\n :param n_steps: int\n number of epochs\n \"\"\"\n self.epsilon = epsilon\n self.adam_kwargs = adam_kwargs\n geomloss_kwargs.pop('blur', None)\n geomloss_kwargs.pop('potentials', None)\n geomloss_kwargs.pop('debias', None)\n self.n_steps = n_steps\n self.sample_loss = SamplesLoss(blur=epsilon, is_log=True, debias=True, potentials=False, **geomloss_kwargs)\n self.inner_steps = inner_steps\n\n def apply(self, x, w, _logw):\n \"\"\"\n Combine solve_for_state and transport_from_potentials in a \"reweighting scheme\"\n :param x: torch.Tensor[N, D]\n The input\n :param w: torch.Tensor[N]\n The degenerate weights\n \"\"\"\n return _incremental_learning(x, w, self.sample_loss, self.adam_kwargs, self.n_steps,\n self.inner_steps)\n\n# def recenter_from_proposal(x, w_y, y, lr=1., n_iter=50, **kwargs):\n# uniform = torch.ones_like(w_y)\n# uniform /= uniform.sum()\n# x_new = x.clone().requires_grad_(True)\n# sample_loss = gl.SamplesLoss(\"sinkhorn\", **kwargs)\n# adam = torch.optim.Adam([x_new], lr=lr)\n#\n# for _ in range(n_iter):\n# adam.zero_grad()\n# loss = sample_loss(uniform, x_new, w_y, y)\n# loss.backward()\n# adam.step()\n# return x_new.clone()\n#\n#\n# def recenter_from_target(w_y, y, lr=0.5, n_ts=5, n_iter=5, **kwargs):\n# ts = torch.linspace(1 / n_ts, 1, n_ts, requires_grad=False)\n# sample_loss = gl.SamplesLoss(\"sinkhorn\", **kwargs)\n# uniform = torch.ones_like(w_y, requires_grad=False)\n# uniform /= uniform.sum()\n# y_1 = y.clone()\n# w_0 = w_y\n# for t in ts:\n# w_1 = (w_y * (-t + 1.) + t * uniform)\n# y_0 = y_1.clone()\n# y_0_clone = y_0.clone()\n# y_1 = y_0.detach().requires_grad_(True)\n# adam = torch.optim.Adam([y_1], lr=lr)\n# for _ in range(n_iter):\n# adam.zero_grad()\n# loss = sample_loss(w_1, y_1, w_0, y_0_clone)\n# loss.backward()\n# adam.step()\n# w_0 = w_1.detach()\n# return y_1.clone()\n#\n#\n# def main():\n# import time\n# import matplotlib.pyplot as plt\n# torch.random.manual_seed(0)\n# n = 300\n# x = torch.randn(n, 1)\n# y, idx = torch.randn(n, 1).sort(0)\n# w_y = 0.5 + torch.rand(n) * 0.5\n# w_y /= w_y.sum()\n# w_y[:100] = 0.\n# print((w_y - 1 / n).abs().mean())\n# print(y[100])\n# tic = time.time()\n# from_proposal = recenter_from_proposal(x, w_y, y, backend='tensorized').detach().numpy()\n# print(time.time() - tic)\n#\n# tic = time.time()\n# from_target = recenter_from_target(w_y, y, n_ts=3, n_iter=10, lr=0.25, backend='tensorized').detach().numpy()\n# print(time.time() - tic)\n#\n# plt.hist(from_proposal.squeeze(), bins=30, alpha=0.5, label='from_proposal', density=True)\n# plt.hist(from_target.squeeze(), bins=30, alpha=0.5, label='from_target', density=True)\n# plt.hist(y.detach().squeeze().numpy().tolist(), weights=w_y.detach().numpy(), bins=30, alpha=0.5, label='initial',\n# density=True)\n# plt.legend()\n# plt.show()\n#\n#\n# if __name__ == '__main__':\n# main()\n"
] | [
[
"torch.optim.Adam",
"torch.normal",
"numpy.log",
"numpy.linspace",
"torch.full_like"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
davidwaroquiers/atomate2 | [
"208ace348f8684ea9fde584dd3551d87e380a810"
] | [
"tests/vasp/jobs/test_core.py"
] | [
"from pytest import approx\n\n\ndef test_static_maker(mock_vasp, clean_dir, si_structure):\n from jobflow import run_locally\n\n from atomate2.vasp.jobs.core import StaticMaker\n from atomate2.vasp.schemas.task import TaskDocument\n\n # mapping from job name to directory containing test files\n ref_paths = {\"static\": \"Si_band_structure/static\"}\n\n # settings passed to fake_run_vasp; adjust these to check for certain INCAR settings\n fake_run_vasp_kwargs = {\"static\": {\"incar_settings\": [\"NSW\", \"ISMEAR\"]}}\n\n # automatically use fake VASP and write POTCAR.spec during the test\n mock_vasp(ref_paths, fake_run_vasp_kwargs)\n\n # generate job\n job = StaticMaker().make(si_structure)\n\n # run the flow or job and ensure that it finished running successfully\n responses = run_locally(job, create_folders=True, ensure_success=True)\n\n # validation the outputs of the job\n output1 = responses[job.uuid][1].output\n assert isinstance(output1, TaskDocument)\n assert output1.output.energy == approx(-10.85037078)\n\n\ndef test_relax_maker(mock_vasp, clean_dir, si_structure):\n from jobflow import run_locally\n\n from atomate2.vasp.jobs.core import RelaxMaker\n from atomate2.vasp.schemas.task import TaskDocument\n\n # mapping from job name to directory containing test files\n ref_paths = {\"relax\": \"Si_double_relax/relax_1\"}\n\n # settings passed to fake_run_vasp; adjust these to check for certain INCAR settings\n fake_run_vasp_kwargs = {\"relax\": {\"incar_settings\": [\"NSW\", \"ISMEAR\"]}}\n\n # automatically use fake VASP and write POTCAR.spec during the test\n mock_vasp(ref_paths, fake_run_vasp_kwargs)\n\n # generate job\n job = RelaxMaker().make(si_structure)\n\n # run the flow or job and ensure that it finished running successfully\n responses = run_locally(job, create_folders=True, ensure_success=True)\n\n # validation the outputs of the job\n output1 = responses[job.uuid][1].output\n assert isinstance(output1, TaskDocument)\n assert output1.output.energy == approx(-10.85083141)\n assert len(output1.calcs_reversed[0].output.ionic_steps) == 1\n assert output1.input.parameters[\"NSW\"] > 1\n\n\ndef test_dielectric(mock_vasp, clean_dir, si_structure):\n import numpy as np\n from jobflow import run_locally\n\n from atomate2.vasp.jobs.core import DielectricMaker\n\n # mapping from job name to directory containing test files\n ref_paths = {\"dielectric\": \"Si_dielectric\"}\n\n # settings passed to fake_run_vasp; adjust these to check for certain INCAR settings\n fake_run_vasp_kwargs = {\"dielectric\": {\"incar_settings\": [\"NSW\", \"IBRION\"]}}\n\n # automatically use fake VASP and write POTCAR.spec during the test\n mock_vasp(ref_paths, fake_run_vasp_kwargs)\n\n # Generate dielectric flow\n job = DielectricMaker().make(si_structure)\n job.maker.input_set_generator.user_incar_settings[\"KSPACING\"] = 0.5\n\n # Run the flow or job and ensure that it finished running successfully\n responses = run_locally(job, create_folders=True, ensure_success=True)\n\n # Additional validation on the outputs of the job\n output1 = responses[job.uuid][1].output\n assert np.allclose(\n output1.calcs_reversed[0].output.epsilon_static,\n [[11.41539467, 0, 0], [0, 11.41539963, 0], [0, 0, 11.41539866]],\n atol=0.01,\n )\n assert np.allclose(\n output1.calcs_reversed[0].output.epsilon_ionic,\n [[0, 0, 0], [0, 0, 0], [0, 0, 0]],\n atol=0.01,\n )\n\n\ndef test_hse_relax(mock_vasp, clean_dir, si_structure):\n from jobflow import run_locally\n\n from atomate2.vasp.jobs.core import HSERelaxMaker\n from atomate2.vasp.schemas.task import TaskDocument\n\n # mapping from job name to directory containing test files\n ref_paths = {\"hse relax\": \"Si_hse_relax\"}\n\n # settings passed to fake_run_vasp; adjust these to check for certain INCAR settings\n fake_run_vasp_kwargs = {\"hse relax\": {\"incar_settings\": [\"NSW\", \"ISMEAR\"]}}\n\n # automatically use fake VASP and write POTCAR.spec during the test\n mock_vasp(ref_paths, fake_run_vasp_kwargs)\n\n # generate job\n job = HSERelaxMaker().make(si_structure)\n job.maker.input_set_generator.user_incar_settings[\"KSPACING\"] = 0.4\n\n # Run the job and ensure that it finished running successfully\n responses = run_locally(job, create_folders=True, ensure_success=True)\n\n # validation on the output of the job\n output1 = responses[job.uuid][1].output\n assert isinstance(output1, TaskDocument)\n assert output1.output.energy == approx(-12.5326576)\n assert len(output1.calcs_reversed[0].output.ionic_steps) == 3\n assert output1.input.parameters[\"NSW\"] > 1\n\n\ndef test_static_maker(mock_vasp, clean_dir, si_structure):\n from jobflow import run_locally\n\n from atomate2.vasp.jobs.core import HSEStaticMaker\n from atomate2.vasp.schemas.task import TaskDocument\n\n # mapping from job name to directory containing test files\n ref_paths = {\"hse static\": \"Si_hse_band_structure/hse_static\"}\n\n # settings passed to fake_run_vasp; adjust these to check for certain INCAR settings\n fake_run_vasp_kwargs = {\"hse static\": {\"incar_settings\": [\"NSW\", \"ISMEAR\"]}}\n\n # automatically use fake VASP and write POTCAR.spec during the test\n mock_vasp(ref_paths, fake_run_vasp_kwargs)\n\n # generate job\n job = HSEStaticMaker().make(si_structure)\n job.maker.input_set_generator.user_incar_settings[\"KSPACING\"] = 0.4\n\n # run the flow or job and ensure that it finished running successfully\n responses = run_locally(job, create_folders=True, ensure_success=True)\n\n # validation the outputs of the job\n output1 = responses[job.uuid][1].output\n assert isinstance(output1, TaskDocument)\n assert output1.output.energy == approx(-12.52887403)\n"
] | [
[
"numpy.allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MaksTarnavskyi/improve-sequence-tagging | [
"faad3ffb79dba75de46b310c5102738602542ef4"
] | [
"utils/preprocess_lines_for_predict.py"
] | [
"import argparse\nimport os\nimport time\nimport datetime\n\nfrom difflib import SequenceMatcher\nfrom multiprocessing import Pool, cpu_count\n\nimport spacy\nnlp = spacy.load(\"en_core_web_sm\", exclude=['tagger', 'parser', 'ner', 'lemmatizer', 'textcat', 'custom'])\nnlp.add_pipe('sentencizer')\n\nimport numpy as np\nfrom tqdm import tqdm\n\ndef read_lines(fn):\n if not os.path.exists(fn):\n return []\n with open(fn, 'r', encoding='utf-8') as f:\n text = f.read()\n lines = text.split(\"\\n\")\n return lines\n\ndef write_lines(fn, lines, mode='w'):\n text_to_write = \"\\n\".join(lines)\n if len(text_to_write) > 0:\n text_to_write + \"\\n\"\n with open(fn, encoding='utf-8', mode=mode) as f:\n f.write(text_to_write)\n\n\ndef generate_text_for_log(processed_lines, \n total_lines, \n prediction_duration):\n return \"Processed lines: \"+str(processed_lines)+\"/\"+str(total_lines)+\" = \"+ str(round(100*processed_lines/total_lines, 2))+\"%\\n\"+ \"Process duration: \"+ str(prediction_duration)+\"\\n\" \n\n\ndef process_chunk(chunk_lines):\n sentences = []\n for line in chunk_lines:\n doc = nlp(str(line))\n sents = [sent for sent in doc.sents]\n new_sents = [\" \".join([tok.text for tok in list(sent)]) for sent in sents]\n sentences.extend(new_sents)\n return {\"sentences\":sentences}\n \n\ndef preprocess_lines(input_f, output_f, output_log, chunk_size,\n start_line, stop_line,\n count_cpu):\n\n write_lines(output_log, [\"Start\"], mode='w')\n write_lines(output_f, [], mode='w')\n \n input_lines = read_lines(input_f)\n \n total_lines = len(input_lines)\n \n count_chunks = int(total_lines/chunk_size)\n if count_chunks*chunk_size < total_lines:\n count_chunks += 1\n \n predicting_start_time = time.time()\n\n processed_lines = 0\n \n for chunk_num in range(count_chunks):\n start = chunk_num*chunk_size\n end = (chunk_num+1)*chunk_size\n chunk = input_lines[start:end]\n \n output_lines = []\n \n all_result_maps = []\n sub_chunks_input_lines = np.array_split(chunk, count_cpu)\n pool = Pool(count_cpu)\n result_map = pool.map(process_chunk, sub_chunks_input_lines)\n pool.close()\n pool.join()\n all_result_maps.extend(result_map)\n \n for res in all_result_maps:\n output_lines.extend(res[\"sentences\"])\n \n predicting_elapsed_time = time.time() - predicting_start_time\n prediction_duration = datetime.timedelta(seconds=predicting_elapsed_time)\n \n output_lines = [sen for sen in output_lines if len(sen) > 5]\n \n processed_lines += len(output_lines)\n \n text_for_log = generate_text_for_log(processed_lines, \n total_lines, prediction_duration)\n \n write_lines(output_log, [text_for_log], mode='w')\n write_lines(output_f, output_lines, mode='a')\n \n \n \ndef main(args):\n preprocess_lines(args.input_f, args.output_f, args.log, args.chunk_size, args.start_line, \n args.stop_line, args.count_cpu)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--input_f',\n help='Path to the input source file',\n required=True)\n parser.add_argument('-o', '--output_f',\n help='Path to the output source file',\n required=True)\n parser.add_argument('-l', '--log',\n help='Path to the output log file',\n required=True)\n parser.add_argument('--chunk_size',\n type=int,\n help='Dump each chunk size.',\n default=20000)\n parser.add_argument('--start_line',\n type=int,\n help='From which line to start',\n default=0)\n parser.add_argument('--stop_line',\n type=int,\n help='On which line to end',\n default=-1)\n parser.add_argument('--count_cpu',\n type=int,\n help='how many cpu to use',\n default=14)\n \n args = parser.parse_args()\n main(args)\n"
] | [
[
"numpy.array_split"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
abc123yuanrui/introtodeeplearning | [
"97d51ef3de77ed74abd6bab4973664d0cc06771e"
] | [
"mitdeeplearning/lab1.py"
] | [
"import os\nimport regex as re\nimport subprocess\nimport urllib\nimport numpy as np\nimport tensorflow as tf\n\nfrom IPython.display import Audio\n\n\ncwd = os.path.dirname(__file__)\n\ndef load_training_data():\n with open(os.path.join(cwd, \"data\", \"irish.abc\"), \"r\") as f:\n text = f.read()\n songs = extract_song_snippet(text)\n return songs\n\ndef extract_song_snippet(text):\n pattern = 'X:(.*?)\\n\\n'\n search_results = re.findall(pattern, text, overlapped=True, flags=re.DOTALL)\n songs = [song for song in search_results]\n print(\"Found {} songs in text\".format(len(songs)))\n return songs\n\ndef save_song_to_abc(song, filename=\"tmp\"):\n save_name = \"{}.abc\".format(filename)\n with open(save_name, \"w\") as f:\n f.write(song)\n return filename\n\ndef abc2wav(abc_file):\n path_to_tool = os.path.join(cwd, 'bin', 'abc2wav')\n cmd = \"{} {}\".format(path_to_tool, abc_file)\n return os.system(cmd)\n\ndef play_wav(wav_file):\n return Audio(wav_file)\n\ndef play_song(song):\n basename = save_song_to_abc(song)\n ret = abc2wav(basename+'.abc')\n if ret == 0: #did not suceed\n return play_wav(basename+'.wav')\n return None\n\ndef play_generated_song(generated_text):\n songs = extract_song_snippet(generated_text)\n if len(songs) == 0:\n print(\"No valid songs found in generated text. Try training the \\\n model longer or increasing the amount of generated music to \\\n ensure complete songs are generated!\")\n\n for song in songs:\n play_song(song)\n print(\"None of the songs were valid, try training longer to improve \\\n syntax.\")\n\ndef test_batch_func_types(func, args):\n ret = func(*args)\n assert len(ret) == 2, \"[FAIL] get_batch must return two arguments (input and label)\"\n assert type(ret[0]) == np.ndarray, \"[FAIL] test_batch_func_types: x is not np.array\"\n assert type(ret[1]) == np.ndarray, \"[FAIL] test_batch_func_types: y is not np.array\"\n print(\"[PASS] test_batch_func_types\")\n return True\n\ndef test_batch_func_shapes(func, args):\n dataset, seq_length, batch_size = args\n x, y = func(*args)\n correct = (batch_size, seq_length)\n assert x.shape == correct, \"[FAIL] test_batch_func_shapes: x {} is not correct shape {}\".format(x.shape, correct)\n assert y.shape == correct, \"[FAIL] test_batch_func_shapes: y {} is not correct shape {}\".format(y.shape, correct)\n print(\"[PASS] test_batch_func_shapes\")\n return True\n\ndef test_batch_func_next_step(func, args):\n x, y = func(*args)\n assert (x[:,1:] == y[:,:-1]).all(), \"[FAIL] test_batch_func_next_step: x_{t} must equal y_{t-1} for all t\"\n print(\"[PASS] test_batch_func_next_step\")\n return True\n\ndef test_custom_dense_layer_output(y):\n true_y = np.array([[0.2697859, 0.45750418, 0.66536945]],dtype='float32')\n assert tf.shape(y).numpy().tolist() == list(true_y.shape), \"[FAIL] output is of incorrect shape. expected {} but got {}\".format(true_y.shape, y.numpy().shape)\n np.testing.assert_almost_equal(y.numpy(), true_y, decimal=7, err_msg=\"[FAIL] output is of incorrect value. expected {} but got {}\".format(y.numpy(), true_y), verbose=True)\n print(\"[PASS] test_custom_dense_layer_output\")\n return True\n"
] | [
[
"numpy.array",
"tensorflow.shape"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
kouohhashi/PySyft | [
"7415961b459f1d25f762467b346b7b94c1d6943f"
] | [
"syft/frameworks/torch/tensors/interpreters/precision.py"
] | [
"import torch\nimport warnings\n\nimport syft\nfrom syft.frameworks.torch.nn import nn\nfrom syft.frameworks.torch.tensors.interpreters.additive_shared import AdditiveSharingTensor\nfrom syft.generic.frameworks.hook import hook_args\nfrom syft.generic.frameworks.overload import overloaded\nfrom syft.generic.pointers.multi_pointer import MultiPointerTensor\nfrom syft.generic.abstract.tensor import AbstractTensor\nfrom syft.workers.abstract import AbstractWorker\n\nfrom syft_proto.frameworks.torch.tensors.interpreters.v1.precision_pb2 import (\n FixedPrecisionTensor as FixedPrecisionTensorPB,\n)\n\n\nclass FixedPrecisionTensor(AbstractTensor):\n def __init__(\n self,\n owner=None,\n id=None,\n field: int = None,\n dtype: str = \"long\",\n base: int = 10,\n precision_fractional: int = 3,\n kappa: int = 1,\n tags: set = None,\n description: str = None,\n ):\n \"\"\"Initializes a Fixed Precision tensor, which encodes all decimal point\n values using an underlying integer value.\n\n The FixedPrecision enables to manipulate floats over an interface which\n supports only integers, Such as _SPDZTensor.\n\n This is done by specifying a precision p and given a float x,\n multiply it with 10**p before rounding to an integer (hence you keep\n p decimals)\n\n Args:\n owner: An optional BaseWorker object to specify the worker on which\n the tensor is located.\n id: An optional string or integer id of the FixedPrecisionTensor.\n \"\"\"\n super().__init__(id=id, owner=owner, tags=tags, description=description)\n\n self.base = base\n self.precision_fractional = precision_fractional\n self.kappa = kappa\n self.dtype = dtype\n if dtype == \"long\":\n self.field = 2 ** 64\n self.torch_dtype = torch.int64\n elif dtype == \"int\":\n self.field = 2 ** 32\n self.torch_dtype = torch.int32\n else:\n # Since n mod 0 is not defined\n warnings.warn(\"Prefer to use dtype instead of field\")\n if isinstance(field, int) and field > 0:\n if field <= 2 ** 32:\n self.dtype = \"int\"\n self.field = 2 ** 32\n self.torch_dtype = torch.int32\n else:\n self.dtype = \"long\"\n self.field = 2 ** 64\n self.torch_dtype = torch.int64\n else:\n # Invalid args dtype and field\n raise ValueError(\n \"Unsupported arg value for dtype. Use dtype='long' or dtype='int'.\"\n )\n\n def get_class_attributes(self):\n \"\"\"\n Specify all the attributes need to build a wrapper correctly when returning a response,\n for example precision_fractional is important when wrapping the result of a method\n on a self which is a fixed precision tensor with a non default precision_fractional.\n \"\"\"\n return {\n \"field\": self.field,\n \"base\": self.base,\n \"precision_fractional\": self.precision_fractional,\n \"kappa\": self.kappa,\n \"dtype\": self.dtype,\n }\n\n @property\n def data(self):\n return self\n\n @data.setter\n def data(self, new_data):\n self.child = new_data.child\n return self\n\n @property\n def grad(self):\n \"\"\"\n Gradient makes no sense for Fixed Precision Tensor, so we make it clear\n that if someone query .grad on a Fixed Precision Tensor it doesn't error\n but returns grad and can't be set\n \"\"\"\n return None\n\n def backward(self, *args, **kwargs):\n \"\"\"Calling backward on Precision Tensor doesn't make sense, but sometimes a call\n can be propagated downward the chain to an Precision Tensor (for example in\n create_grad_objects), so we just ignore the call.\"\"\"\n pass\n\n def attr(self, attr_name):\n return self.__getattribute__(attr_name)\n\n def fix_precision(self, check_range=True):\n \"\"\"This method encodes the .child object using fixed precision\"\"\"\n\n rational = self.child\n upscaled = (rational * self.base ** self.precision_fractional).long()\n if check_range:\n assert (\n upscaled.abs() < (self.field / 2)\n ).all(), (\n f\"{rational} cannot be correctly embedded: choose bigger field or a lower precision\"\n )\n\n field_element = upscaled\n field_element.owner = rational.owner\n self.child = field_element.type(self.torch_dtype)\n return self\n\n def float_precision(self):\n \"\"\"this method returns a new tensor which has the same values as this\n one, encoded with floating point precision\"\"\"\n value = self.child.type(self.torch_dtype)\n gate = value.native_lt(0).type(self.torch_dtype)\n\n neg_nums = value * gate\n pos_nums = value * (1 - gate)\n result = (neg_nums + pos_nums).float() / (self.base ** self.precision_fractional)\n\n return result\n\n def truncate(self, precision_fractional, check_sign=True):\n truncation = self.base ** precision_fractional\n\n # We need to make sure that values are truncated \"towards 0\"\n # i.e. for a field of 100, 70 (equivalent to -30), should be truncated\n # at 97 (equivalent to -3), not 7\n if isinstance(self.child, AdditiveSharingTensor) or not check_sign: # Handle FPT>(wrap)>AST\n self.child = self.child / truncation\n return self\n else:\n gate = self.child.native_lt(0).type(self.torch_dtype)\n neg_nums = self.child / truncation\n pos_nums = self.child / truncation\n self.child = neg_nums * gate + pos_nums * (1 - gate)\n return self\n\n @overloaded.method\n def mod(self, _self, divisor):\n \"\"\"\n Define the modulo operation over object instances.\n \"\"\"\n if isinstance(divisor, (int, float)):\n scaled_divisor = int(divisor * self.base ** self.precision_fractional)\n if isinstance(_self, AdditiveSharingTensor):\n return getattr(_self, \"mod\")(scaled_divisor)\n else:\n return getattr(_self, \"fmod\")(scaled_divisor)\n\n response = getattr(_self, \"fmod\")(divisor)\n\n return response\n\n __mod__ = mod\n\n @overloaded.method\n def add(self, _self, other):\n \"\"\"Add two fixed precision tensors together.\n \"\"\"\n if isinstance(other, (int, float)):\n scaled_int = int(other * self.base ** self.precision_fractional)\n return getattr(_self, \"add\")(scaled_int)\n\n if isinstance(_self, AdditiveSharingTensor) and isinstance(other, torch.Tensor):\n # If we try to add a FPT>(wrap)>AST and a FPT>torch.tensor,\n # we want to perform AST + torch.tensor\n other = other.wrap()\n elif isinstance(other, AdditiveSharingTensor) and isinstance(_self, torch.Tensor):\n # If we try to add a FPT>torch.tensor and a FPT>(wrap)>AST,\n # we swap operators so that we do the same operation as above\n _self, other = other, _self.wrap()\n\n response = getattr(_self, \"add\")(other)\n\n return response\n\n __add__ = add\n __radd__ = add\n\n def add_(self, value_or_tensor, tensor=None):\n if tensor is None:\n result = self.add(value_or_tensor)\n else:\n result = self.add(value_or_tensor * tensor)\n\n self.child = result.child\n return self\n\n def __iadd__(self, other):\n \"\"\"Add two fixed precision tensors together.\n \"\"\"\n self.child = self.add(other).child\n\n return self\n\n @overloaded.method\n def sub(self, _self, other):\n \"\"\"Subtracts a fixed precision tensor from another one.\n \"\"\"\n if isinstance(other, (int, float)):\n scaled_int = int(other * self.base ** self.precision_fractional)\n return getattr(_self, \"sub\")(scaled_int)\n\n if isinstance(_self, AdditiveSharingTensor) and isinstance(other, torch.Tensor):\n # If we try to subtract a FPT>(wrap)>AST and a FPT>torch.tensor,\n # we want to perform AST - torch.tensor\n other = other.wrap()\n elif isinstance(other, AdditiveSharingTensor) and isinstance(_self, torch.Tensor):\n # If we try to subtract a FPT>torch.tensor and a FPT>(wrap)>AST,\n # we swap operators so that we do the same operation as above\n _self, other = -other, -_self.wrap()\n\n response = getattr(_self, \"sub\")(other)\n\n return response\n\n __sub__ = sub\n\n def __rsub__(self, other):\n return (self - other) * -1\n\n def sub_(self, value_or_tensor, tensor=None):\n if tensor is None:\n result = self.sub(value_or_tensor)\n else:\n result = self.sub(value_or_tensor * tensor)\n\n self.child = result.child\n return self\n\n def __isub__(self, other):\n self.child = self.sub(other).child\n\n return self\n\n @overloaded.method\n def t(self, _self, *args, **kwargs):\n \"\"\"Transpose a tensor. Hooked is handled by the decorator\"\"\"\n response = getattr(_self, \"t\")(*args, **kwargs)\n\n return response\n\n def mul_and_div(self, other, cmd):\n \"\"\"\n Hook manually mul and div to add the truncation/rescaling part\n which is inherent to these operations in the fixed precision setting\n \"\"\"\n changed_sign = False\n if isinstance(other, FixedPrecisionTensor):\n assert (\n self.precision_fractional == other.precision_fractional\n ), \"In mul and div, all args should have the same precision_fractional\"\n assert self.base == other.base, \"In mul and div, all args should have the same base\"\n\n if isinstance(other, (int, torch.Tensor, AdditiveSharingTensor)):\n new_self = self.child\n new_other = other\n elif isinstance(other, float):\n raise NotImplementedError(\n \"Can't multiply or divide a FixedPrecisionTensor with a float value\"\n )\n\n elif isinstance(self.child, (AdditiveSharingTensor, MultiPointerTensor)) and isinstance(\n other.child, torch.Tensor\n ):\n # If operands are FPT>AST and FPT>torch.tensor,\n # we want to perform the operation on AST and torch.tensor\n if cmd == \"mul\":\n new_self = self.child\n elif cmd == \"div\":\n new_self = self.child * self.base ** self.precision_fractional\n new_other = other\n\n elif isinstance(other.child, (AdditiveSharingTensor, MultiPointerTensor)) and isinstance(\n self.child, torch.Tensor\n ):\n # If operands are FPT>torch.tensor and FPT>AST,\n # we swap operators so that we do the same operation as above\n if cmd == \"mul\":\n new_self = other.child\n new_other = self\n elif cmd == \"div\":\n # TODO how to divide by AST?\n raise NotImplementedError(\n \"Division of a FixedPrecisionTensor by an AdditiveSharingTensor not implemented\"\n )\n\n elif (\n cmd == \"mul\"\n and isinstance(self.child, (AdditiveSharingTensor, MultiPointerTensor))\n and isinstance(other.child, (AdditiveSharingTensor, MultiPointerTensor))\n ):\n # If we try to multiply a FPT>torch.tensor with a FPT>AST,\n # we swap operators so that we do the same operation as above\n new_self, new_other, _ = hook_args.unwrap_args_from_method(\"mul\", self, other, None)\n\n else:\n # Replace all syft tensor with their child attribute\n new_self, new_other, _ = hook_args.unwrap_args_from_method(cmd, self, other, None)\n\n # To avoid problems with negative numbers\n # we take absolute value of the operands\n # The problems could be 1) bad truncation for multiplication\n # 2) overflow when scaling self in division\n\n # sgn_self is 1 when new_self is positive else it's 0\n # The comparison is different if new_self is a torch tensor or an AST\n sgn_self = (new_self > 0).type(self.torch_dtype)\n pos_self = new_self * sgn_self\n neg_self = new_self * (sgn_self - 1)\n new_self = neg_self + pos_self\n\n # sgn_other is 1 when new_other is positive else it's 0\n # The comparison is different if new_other is a torch tensor or an AST\n sgn_other = (new_other > 0).type(self.torch_dtype)\n pos_other = new_other * sgn_other\n neg_other = new_other * (sgn_other - 1)\n new_other = neg_other + pos_other\n\n # If both have the same sign, sgn is 1 else it's 0\n # To be able to write sgn = 1 - (sgn_self - sgn_other) ** 2,\n # we would need to overload the __add__ for operators int and AST.\n sgn = -((sgn_self - sgn_other) ** 2) + 1\n changed_sign = True\n\n if cmd == \"div\":\n new_self *= self.base ** self.precision_fractional\n # Send it to the appropriate class and get the response\n response = getattr(new_self, cmd)(new_other)\n # Put back SyftTensor on the tensors found in the response\n response = hook_args.hook_response(\n cmd, response, wrap_type=type(self), wrap_args=self.get_class_attributes()\n )\n if not isinstance(other, (int, torch.Tensor, AdditiveSharingTensor)):\n if cmd == \"mul\":\n # If operation is mul, we need to truncate\n response = response.truncate(self.precision_fractional, check_sign=False)\n\n if changed_sign:\n # Give back its sign to response\n pos_res = response * sgn\n neg_res = response * (sgn - 1)\n response = neg_res + pos_res\n\n return response\n\n def mul(self, other):\n return self.mul_and_div(other, \"mul\")\n\n __mul__ = mul\n\n def __imul__(self, other):\n self.child = self.mul_and_div(other, \"mul\").child\n return self\n\n mul_ = __imul__\n\n def div(self, other):\n return self.mul_and_div(other, \"div\")\n\n __truediv__ = div\n\n def __itruediv__(self, other):\n self.child = self.mul_and_div(other, \"div\").child\n return self\n\n def pow(self, power):\n \"\"\"\n Compute integer power of a number by recursion using mul\n\n This uses the following trick:\n - Divide power by 2 and multiply base to itself (if the power is even)\n - Decrement power by 1 to make it even and then follow the first step\n\n Args:\n power (int): the exponent supposed to be an integer > 0\n \"\"\"\n base = self\n\n result = None\n while power > 0:\n # If power is odd\n if power % 2 == 1:\n result = result * base if result is not None else base\n\n # Divide the power by 2\n power = power // 2\n # Multiply base to itself\n base = base * base\n\n return result\n\n __pow__ = pow\n\n def matmul(self, *args, **kwargs):\n \"\"\"\n Hook manually matmul to add the truncation part which is inherent to multiplication\n in the fixed precision setting\n \"\"\"\n\n other = args[0]\n\n if isinstance(other, FixedPrecisionTensor):\n assert (\n self.precision_fractional == other.precision_fractional\n ), \"In matmul, all args should have the same precision_fractional\"\n\n if isinstance(self.child, AdditiveSharingTensor) and isinstance(other.child, torch.Tensor):\n # If we try to matmul a FPT>AST with a FPT>torch.tensor,\n # we want to perform AST @ torch.tensor\n new_self = self.child\n new_args = (other,)\n new_kwargs = kwargs\n\n elif isinstance(other.child, AdditiveSharingTensor) and isinstance(\n self.child, torch.Tensor\n ):\n # If we try to matmul a FPT>torch.tensor with a FPT>AST,\n # we swap operators so that we do the same operation as above\n new_self = other.child\n new_args = (self,)\n new_kwargs = kwargs\n else:\n # Replace all syft tensor with their child attribute\n new_self, new_args, new_kwargs = hook_args.unwrap_args_from_method(\n \"matmul\", self, args, kwargs\n )\n\n # Send it to the appropriate class and get the response\n response = getattr(new_self, \"matmul\")(*new_args, **new_kwargs)\n\n # Put back SyftTensor on the tensors found in the response\n response = hook_args.hook_response(\n \"matmul\", response, wrap_type=type(self), wrap_args=self.get_class_attributes()\n )\n\n response = response.truncate(other.precision_fractional)\n\n return response\n\n __matmul__ = matmul\n mm = matmul\n\n def reciprocal(self):\n ones = self * 0 + 1\n return ones / self\n\n # Approximations:\n def inverse(self, iterations=8):\n \"\"\"\n Computes an approximation of the matrix inversion using Newton-Schulz\n iterations\n \"\"\"\n # TODO: should we add non-approximate version if self.child is a pure tensor?\n\n assert len(self.shape) >= 2, \"Can't compute inverse on non-matrix\"\n assert self.shape[-1] == self.shape[-2], \"Must be batches of square matrices\"\n\n inverse = (0.1 * torch.eye(self.shape[-1])).fix_prec(**self.get_class_attributes()).child\n\n for _ in range(iterations):\n inverse = 2 * inverse - inverse @ self @ inverse\n\n return inverse\n\n def exp(self, iterations=8):\n r\"\"\"\n Approximates the exponential function using a limit approximation:\n exp(x) = \\lim_{n -> infty} (1 + x / n) ^ n\n\n Here we compute exp by choosing n = 2 ** d for some large d equal to\n iterations. We then compute (1 + x / n) once and square `d` times.\n\n Args:\n iterations (int): number of iterations for limit approximation\n\n Ref: https://github.com/LaRiffle/approximate-models\n \"\"\"\n return (1 + self / 2 ** iterations) ** (2 ** iterations)\n\n def sign(self):\n return (self > 0) + (self < 0) * (-1)\n\n @staticmethod\n def _sigmoid_exp(tensor):\n \"\"\"\n Implementation taken from FacebookResearch - CrypTen project\n\n Compute the sigmoid using the exp approximation\n sigmoid(x) = 1 / (1 + exp(-x))\n\n For stability:\n sigmoid(x) = (sigmoid(|x|) - 0.5) * sign(x) + 0.5\n\n Ref: https://timvieira.github.io/blog/post/2014/02/11/exp-normalize-trick/#numerically_stable_sigmoid_function # noqa: E501\n\n Args:\n tensor (tensor): values where sigmoid should be approximated\n \"\"\"\n\n sign = tensor.sign()\n\n # Make sure the elements are all positive\n x = tensor * sign\n ones = tensor * 0 + 1\n half = ones.div(2)\n result = (ones + (-ones * x).exp()).reciprocal()\n return (result - half) * sign + half\n\n @staticmethod\n def _sigmoid_maclaurin(tensor):\n \"\"\"\n Approximates the sigmoid function using Maclaurin, with polynomial\n interpolation of degree 5 over [-8,8]\n NOTE: This method is faster but not as precise as \"exp\"\n Ref: https://mortendahl.github.io/2017/04/17/private-deep-learning-with-mpc/#approximating-sigmoid # noqa: E501\n\n Args:\n tensor (tensor): values where sigmoid should be approximated\n \"\"\"\n\n weights = (\n torch.tensor([0.5, 1.91204779e-01, -4.58667307e-03, 4.20690803e-05])\n .fix_precision(**tensor.get_class_attributes())\n .child\n )\n degrees = [0, 1, 3, 5]\n\n # initiate with term of degree 0 to avoid errors with tensor ** 0\n one = tensor * 0 + 1\n result = one * weights[0]\n for i, d in enumerate(degrees[1:]):\n result += (tensor ** d) * weights[i + 1]\n\n return result\n\n @staticmethod\n def _sigmoid_chebyshev(tensor, maxval: int = 6, terms: int = 32):\n \"\"\"\n Implementation taken from FacebookResearch - CrypTen project\n Computes the sigmoid function as\n sigmoid(x) = (tanh(x /2) + 1) / 2\n\n Tanh is approximated using chebyshev polynomials\n Args:\n maxval (int): interval width used for tanh chebyshev polynomials\n terms (int): highest degree of Chebyshev polynomials for tanh.\n Must be even and at least 6.\n \"\"\"\n tanh_approx = tensor._tanh_chebyshev(tensor.div(2), maxval, terms)\n\n return tanh_approx.div(2) + 0.5\n\n def sigmoid(tensor, method=\"exp\"):\n \"\"\"\n Approximates the sigmoid function using a given method\n\n Args:\n tensor: the fixed precision tensor\n method (str): (default = \"chebyshev\")\n Possible values: \"exp\", \"maclaurin\", \"chebyshev\"\n \"\"\"\n\n sigmoid_f = getattr(tensor, f\"_sigmoid_{method}\")\n\n return sigmoid_f(tensor)\n\n def log(self, iterations=2, exp_iterations=8):\n \"\"\"Approximates the natural logarithm using 8th order modified Householder iterations.\n Recall that Householder method is an algorithm to solve a non linear equation f(x) = 0.\n Here f: x -> 1 - C * exp(-x) with C = self\n\n Iterations are computed by:\n y_0 = some constant\n h = 1 - self * exp(-y_n)\n y_{n+1} = y_n - h * (1 + h / 2 + h^2 / 3 + h^3 / 6 + h^4 / 5 + h^5 / 7)\n\n Args:\n iterations (int): number of iterations for 6th order modified\n Householder approximation.\n exp_iterations (int): number of iterations for limit approximation of exp\n\n Ref: https://github.com/LaRiffle/approximate-models\n \"\"\"\n\n y = self / 31 + 1.59 - 20 * (-2 * self - 1.4).exp(iterations=exp_iterations)\n\n # 6th order Householder iterations\n for i in range(iterations):\n h = [1 - self * (-y).refresh().exp(iterations=exp_iterations)]\n for i in range(1, 5):\n h.append(h[-1] * h[0])\n\n y -= h[0] * (1 + h[0] / 2 + h[1] / 3 + h[2] / 4 + h[3] / 5 + h[4] / 6)\n\n return y\n\n @staticmethod\n def _tanh_chebyshev(tensor, maxval: int = 6, terms: int = 32):\n r\"\"\"\n Implementation taken from FacebookResearch - CrypTen project\n Computes tanh via Chebyshev approximation with truncation.\n tanh(x) = \\sum_{j=1}^terms c_{2j - 1} P_{2j - 1} (x / maxval)\n where c_i is the ith Chebyshev series coefficient and P_i is ith polynomial.\n The approximation is truncated to +/-1 outside [-maxval, maxval].\n\n Args:\n tensor (tensor): values where the tanh needs to be approximated\n maxval (int): interval width used for computing chebyshev polynomials\n terms (int): highest degree of Chebyshev polynomials.\n Must be even and at least 6.\n\n More details can be found in the paper:\n Guo, Chuan and Hannun, Awni and Knott, Brian and van der Maaten,\n Laurens and Tygert, Mark and Zhu, Ruiyu,\n \"Secure multiparty computations in floating-point arithmetic\", Jan-2020\n Link: http://tygert.com/realcrypt.pdf\n\n \"\"\"\n\n coeffs = syft.common.util.chebyshev_series(torch.tanh, maxval, terms)[1::2]\n coeffs = coeffs.fix_precision(**tensor.get_class_attributes())\n coeffs = coeffs.unsqueeze(1)\n\n value = torch.tensor(maxval).fix_precision(**tensor.get_class_attributes())\n tanh_polys = syft.common.util.chebyshev_polynomials(tensor.div(value.child), terms)\n tanh_polys_flipped = tanh_polys.unsqueeze(dim=-1).transpose(0, -1).squeeze(dim=0)\n\n out = tanh_polys_flipped.matmul(coeffs.child)\n\n # truncate outside [-maxval, maxval]\n gate_up = tensor > value\n gate_down = -tensor > value\n res = gate_up - gate_down\n out = out.squeeze(1) * (1 - gate_up - gate_down)\n out = res + out\n\n return out\n\n @staticmethod\n def _tanh_sigmoid(tensor):\n \"\"\"\n Compute the tanh using the sigmoid approximation\n\n Args:\n tensor (tensor): values where tanh should be approximated\n \"\"\"\n\n return 2 * torch.sigmoid(2 * tensor) - 1\n\n def tanh(tensor, method=\"chebyshev\"):\n tanh_f = getattr(tensor, f\"_tanh_{method}\")\n\n return tanh_f(tensor)\n\n # Binary ops\n @overloaded.method\n def __gt__(self, _self, other):\n result = _self.__gt__(other)\n return result.type(self.torch_dtype) * self.base ** self.precision_fractional\n\n @overloaded.method\n def __ge__(self, _self, other):\n result = _self.__ge__(other)\n return result.type(self.torch_dtype) * self.base ** self.precision_fractional\n\n @overloaded.method\n def __lt__(self, _self, other):\n result = _self.__lt__(other)\n return result.type(self.torch_dtype) * self.base ** self.precision_fractional\n\n @overloaded.method\n def __le__(self, _self, other):\n result = _self.__le__(other)\n return result.type(self.torch_dtype) * self.base ** self.precision_fractional\n\n @overloaded.method\n def eq(self, _self, other):\n result = _self.eq(other)\n return result.type(self.torch_dtype) * self.base ** self.precision_fractional\n\n __eq__ = eq\n\n @overloaded.method\n def argmax(self, _self, **kwargs):\n result = _self.argmax(**kwargs)\n return result.long() * self.base ** self.precision_fractional\n\n @overloaded.method\n def argmin(self, _self, **kwargs):\n result = _self.argmin(**kwargs)\n return result.long() * self.base ** self.precision_fractional\n\n def var(self, unbiased=False, **kwargs):\n mu = self.mean(**kwargs)\n unbiased_self = self - mu\n mean = (unbiased_self * unbiased_self).mean(**kwargs)\n if unbiased:\n if kwargs.get(\"dim\"):\n dim = kwargs[\"dim\"]\n numel = self.shape[dim]\n else:\n numel = self.numel()\n return mean * numel / (numel - 1)\n else:\n return mean\n\n @staticmethod\n @overloaded.module\n def torch(module):\n def fmod(self, other):\n return self.__mod__(other)\n\n module.fmod = fmod\n\n def add(self, other):\n return self.__add__(other)\n\n module.add = add\n\n def sub(self, other):\n return self.__sub__(other)\n\n module.sub = sub\n\n def mul(self, other):\n return self.__mul__(other)\n\n module.mul = mul\n\n def div(self, other):\n return self.__truediv__(other)\n\n module.div = div\n\n def matmul(self, other):\n return self.matmul(other)\n\n module.matmul = matmul\n module.mm = matmul\n\n def addmm(bias, input_tensor, weight):\n matmul = input_tensor.matmul(weight)\n result = bias.add(matmul)\n return result\n\n module.addmm = addmm\n\n def inverse(self):\n return self.inverse()\n\n module.inverse = inverse\n\n def exp(tensor):\n return tensor.exp()\n\n module.exp = exp\n\n def sigmoid(tensor):\n return tensor.sigmoid()\n\n module.sigmoid = sigmoid\n\n def log(tensor):\n return tensor.log()\n\n module.log = log\n\n def tanh(tensor):\n return tensor.tanh()\n\n module.tanh = tanh\n\n def dot(self, other):\n return self.__mul__(other).sum()\n\n module.dot = dot\n\n # You can also overload functions in submodules!\n # Modules should be registered just like functions\n module.nn = nn # Handles all the overloading properly\n\n @classmethod\n def handle_func_command(cls, command):\n \"\"\"\n Receive an instruction for a function to be applied on a FixedPrecision Tensor,\n Perform some specific action (like logging) which depends of the\n instruction content, replace in the args all the FPTensors with\n their child attribute, forward the command instruction to the\n handle_function_command of the type of the child attributes, get the\n response and replace a FixedPrecision on top of all tensors found in\n the response.\n :param command: instruction of a function command: (command name,\n <no self>, arguments[, kwargs_])\n :return: the response of the function command\n \"\"\"\n cmd_name, _, args_, kwargs_ = command\n\n tensor = args_[0] if not isinstance(args_[0], (tuple, list)) else args_[0][0]\n\n # Check that the function has not been overwritten\n cmd = None\n try:\n # Try to get recursively the attributes in cmd = \"<attr1>.<attr2>.<attr3>...\"\n cmd = cls.rgetattr(cls, cmd_name)\n except AttributeError:\n pass\n\n if cmd is not None:\n return cmd(*args_, **kwargs_)\n\n # Replace all FixedPrecisionTensor with their child attribute\n new_args, new_kwargs, new_type = hook_args.unwrap_args_from_function(\n cmd_name, args_, kwargs_\n )\n\n # build the new command\n new_command = (cmd_name, None, new_args, new_kwargs)\n\n # Send it to the appropriate class and get the response\n response = new_type.handle_func_command(new_command)\n\n # Put back FixedPrecisionTensor on the tensors found in the response\n response = hook_args.hook_response(\n cmd_name, response, wrap_type=cls, wrap_args=tensor.get_class_attributes()\n )\n\n return response\n\n def share(self, *owners, protocol=None, field=None, dtype=None, crypto_provider=None):\n \"\"\"\n Forward the .share() command to the child tensor, and reconstruct a new\n FixedPrecisionTensor since the command is not inplace and should return\n a new chain\n\n Args:\n *owners: the owners of the shares of the resulting AdditiveSharingTensor\n protocol: the crypto protocol used to perform the computations ('snn' or 'fss')\n field: the field size in which the share values live\n dtype: the dtype in which the share values live\n crypto_provider: the worker used to provide the crypto primitives used\n to perform some computations on AdditiveSharingTensors\n\n Returns:\n A FixedPrecisionTensor whose child has been shared\n \"\"\"\n if dtype is None:\n dtype = self.dtype\n else:\n assert (\n dtype == self.dtype\n ), \"When sharing a FixedPrecisionTensor, the dtype of the resulting AdditiveSharingTensor \\\n must be the same as the one of the original tensor\"\n\n tensor = FixedPrecisionTensor(owner=self.owner, **self.get_class_attributes())\n\n tensor.child = self.child.share(\n *owners, protocol=protocol, dtype=dtype, crypto_provider=crypto_provider, no_wrap=True\n )\n return tensor\n\n def share_(self, *args, **kwargs):\n \"\"\"\n Performs an inplace call to share. The FixedPrecisionTensor returned is therefore the same,\n contrary to the classic share version\n \"\"\"\n dtype = kwargs.get(\"dtype\")\n if dtype is None:\n kwargs[\"dtype\"] = self.dtype\n else:\n assert (\n dtype == self.dtype\n ), \"When sharing a FixedPrecisionTensor, the dtype of the resulting AdditiveSharingTensor \\\n must be the same as the one of the original tensor\"\n self.child = self.child.share_(*args, no_wrap=True, **kwargs)\n return self\n\n @staticmethod\n def simplify(worker: AbstractWorker, tensor: \"FixedPrecisionTensor\") -> tuple:\n \"\"\"Takes the attributes of a FixedPrecisionTensor and saves them in a tuple.\n\n Args:\n worker: the worker doing the serialization\n tensor: a FixedPrecisionTensor.\n\n Returns:\n tuple: a tuple holding the unique attributes of the fixed precision tensor.\n \"\"\"\n chain = None\n if hasattr(tensor, \"child\"):\n chain = syft.serde.msgpack.serde._simplify(worker, tensor.child)\n\n return (\n syft.serde.msgpack.serde._simplify(worker, tensor.id),\n syft.serde.msgpack.serde._simplify(worker, tensor.field),\n tensor.dtype,\n tensor.base,\n tensor.precision_fractional,\n tensor.kappa,\n syft.serde.msgpack.serde._simplify(worker, tensor.tags),\n syft.serde.msgpack.serde._simplify(worker, tensor.description),\n chain,\n )\n\n @staticmethod\n def detail(worker: AbstractWorker, tensor_tuple: tuple) -> \"FixedPrecisionTensor\":\n \"\"\"This function reconstructs a FixedPrecisionTensor given it's attributes in form\n of a tuple.\n\n Args:\n worker: the worker doing the deserialization\n tensor_tuple: a tuple holding the attributes of the FixedPrecisionTensor\n Returns:\n FixedPrecisionTensor: a FixedPrecisionTensor\n Examples:\n shared_tensor = detail(data)\n \"\"\"\n\n (\n tensor_id,\n field,\n dtype,\n base,\n precision_fractional,\n kappa,\n tags,\n description,\n chain,\n ) = tensor_tuple\n\n tensor = FixedPrecisionTensor(\n owner=worker,\n id=syft.serde.msgpack.serde._detail(worker, tensor_id),\n field=syft.serde.msgpack.serde._detail(worker, field),\n dtype=dtype,\n base=base,\n precision_fractional=precision_fractional,\n kappa=kappa,\n tags=syft.serde.msgpack.serde._detail(worker, tags),\n description=syft.serde.msgpack.serde._detail(worker, description),\n )\n\n if chain is not None:\n chain = syft.serde.msgpack.serde._detail(worker, chain)\n tensor.child = chain\n\n return tensor\n\n @staticmethod\n def bufferize(worker, prec_tensor):\n \"\"\"\n This method serializes FixedPrecisionTensor into FixedPrecisionTensorPB.\n\n Args:\n prec_tensor (FixedPrecisionTensor): input FixedPrecisionTensor to be serialized.\n\n Returns:\n proto_prec_tensor (FixedPrecisionTensorPB): serialized FixedPrecisionTensor\n \"\"\"\n proto_prec_tensor = FixedPrecisionTensorPB()\n syft.serde.protobuf.proto.set_protobuf_id(proto_prec_tensor.id, prec_tensor.id)\n proto_prec_tensor.field = str(prec_tensor.field)\n proto_prec_tensor.dtype = prec_tensor.dtype\n proto_prec_tensor.base = prec_tensor.base\n proto_prec_tensor.kappa = prec_tensor.kappa\n proto_prec_tensor.precision_fractional = prec_tensor.precision_fractional\n for tag in prec_tensor.tags:\n proto_prec_tensor.tags.append(tag)\n proto_prec_tensor.description = prec_tensor.description\n if hasattr(prec_tensor, \"child\"):\n proto_prec_tensor.child.CopyFrom(\n syft.serde.protobuf.serde._bufferize(worker, prec_tensor.child)\n )\n\n return proto_prec_tensor\n\n @staticmethod\n def unbufferize(worker, proto_prec_tensor):\n \"\"\"\n This method deserializes FixedPrecisionTensorPB into FixedPrecisionTensor.\n\n Args:\n proto_prec_tensor (FixedPrecisionTensorPB): input FixedPrecisionTensor to be\n deserialized.\n\n Returns:\n tensor (FixedPrecisionTensor): deserialized FixedPrecisionTensorPB\n \"\"\"\n proto_id = syft.serde.protobuf.proto.get_protobuf_id(proto_prec_tensor.id)\n\n child = None\n if proto_prec_tensor.HasField(\"child\"):\n child = syft.serde.protobuf.serde._unbufferize(worker, proto_prec_tensor.child)\n\n tensor = FixedPrecisionTensor(\n owner=worker,\n id=proto_id,\n field=proto_prec_tensor.field,\n dtype=proto_prec_tensor.dtype,\n base=proto_prec_tensor.base,\n precision_fractional=proto_prec_tensor.precision_fractional,\n kappa=proto_prec_tensor.kappa,\n tags=set(proto_prec_tensor.tags),\n description=proto_prec_tensor.description,\n )\n\n tensor.child = child\n return tensor\n\n @staticmethod\n def get_protobuf_schema():\n \"\"\"\n Returns the protobuf schema used for FixedPrecisionTensor.\n\n Returns:\n Protobuf schema for FixedPrecisionTensor.\n \"\"\"\n return FixedPrecisionTensorPB\n\n\n### Register the tensor with hook_args.py ###\nhook_args.default_register_tensor(FixedPrecisionTensor)\n"
] | [
[
"torch.sigmoid",
"torch.eye",
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pigtamer/SNIPER | [
"714e7399bee1e85854b3895ecaef7b143562eab4",
"95a4c773a6e855290504932fb43adeb39f029239"
] | [
"configs/faster/default_configs.py",
"lib/nms/setup_linux.py"
] | [
"# --------------------------------------------------------------\n# SNIPER: Efficient Multi-Scale Training\n# Licensed under The Apache-2.0 License [see LICENSE for details]\n# by Mahyar Najibi\n# --------------------------------------------------------------\n\nimport yaml\nimport numpy as np\nfrom easydict import EasyDict as edict\n\nconfig = edict()\nconfig.proposal_path = 'data/proposals'\nconfig.MXNET_VERSION = ''\n\nconfig.output_path = ''\nconfig.symbol = ''\nconfig.gpus = ''\nconfig.CLASS_AGNOSTIC = True\n# default training\nconfig.default = edict()\nconfig.default.kvstore = 'device'\n\n# network related params\nconfig.network = edict()\nconfig.network.pretrained = ''\nconfig.network.pretrained_epoch = 0\nconfig.network.PIXEL_MEANS = np.array([0, 0, 0])\nconfig.network.RPN_FEAT_STRIDE = 16\nconfig.network.FIXED_PARAMS = ['gamma', 'beta']\nconfig.network.ANCHOR_SCALES = (8, 16, 32)\nconfig.network.ANCHOR_RATIOS = (0.5, 1, 2)\nconfig.network.NUM_ANCHORS = len(config.network.ANCHOR_SCALES) * len(config.network.ANCHOR_RATIOS)\n\n# dataset related params\nconfig.dataset = edict()\nconfig.dataset.dataset = 'PascalVOC'\nconfig.dataset.image_set = '2007_trainval'\nconfig.dataset.test_image_set = '2007_test'\nconfig.dataset.root_path = './data'\nconfig.dataset.dataset_path = './data/VOCdevkit'\nconfig.dataset.NUM_CLASSES = 21\n\n\nconfig.TRAIN = edict()\nconfig.TRAIN.ONLY_PROPOSAL = False\nconfig.TRAIN.CPP_CHIPS = False\nconfig.TRAIN.USE_NEG_CHIPS = True\nconfig.TRAIN.CHIPS_DB_PARTS = 20\nconfig.TRAIN.WITH_MASK = False\nconfig.TRAIN.lr = 0\nconfig.TRAIN.VALID_RANGES = ((-1, 80), (32, 150), (120, -1))\nconfig.TRAIN.SCALES = (3.0, 1.667, 512.0)\n\nconfig.TRAIN.lr_step = ''\nconfig.TRAIN.scale = 1.0\nconfig.TRAIN.lr_factor = 0.1\nconfig.TRAIN.warmup = False\nconfig.TRAIN.warmup_lr = 0\nconfig.TRAIN.warmup_step = 0\nconfig.TRAIN.momentum = 0.9\nconfig.TRAIN.wd = 0.0005\nconfig.TRAIN.fp16 = False\nconfig.TRAIN.begin_epoch = 0\nconfig.TRAIN.end_epoch = 0\nconfig.TRAIN.model_prefix = ''\n\nconfig.TRAIN.ALTERNATE = edict()\nconfig.TRAIN.ALTERNATE.RPN_BATCH_IMAGES = 0\nconfig.TRAIN.ALTERNATE.RCNN_BATCH_IMAGES = 0\nconfig.TRAIN.ALTERNATE.rpn1_lr = 0\nconfig.TRAIN.ALTERNATE.rpn1_lr_step = '' # recommend '2'\nconfig.TRAIN.ALTERNATE.rpn1_epoch = 0 # recommend 3\nconfig.TRAIN.ALTERNATE.rfcn1_lr = 0\nconfig.TRAIN.ALTERNATE.rfcn1_lr_step = '' # recommend '5'\nconfig.TRAIN.ALTERNATE.rfcn1_epoch = 0 # recommend 8\nconfig.TRAIN.ALTERNATE.rpn2_lr = 0\nconfig.TRAIN.ALTERNATE.rpn2_lr_step = '' # recommend '2'\nconfig.TRAIN.ALTERNATE.rpn2_epoch = 0 # recommend 3\nconfig.TRAIN.ALTERNATE.rfcn2_lr = 0\nconfig.TRAIN.ALTERNATE.rfcn2_lr_step = '' # recommend '5'\nconfig.TRAIN.ALTERNATE.rfcn2_epoch = 0 # recommend 8\n# optional\nconfig.TRAIN.ALTERNATE.rpn3_lr = 0\nconfig.TRAIN.ALTERNATE.rpn3_lr_step = '' # recommend '2'\nconfig.TRAIN.ALTERNATE.rpn3_epoch = 0 # recommend 3\n\n# whether flip image\nconfig.TRAIN.FLIP = True\n# whether shuffle image\nconfig.TRAIN.SHUFFLE = True\n# whether use OHEM\nconfig.TRAIN.ENABLE_OHEM = False\n# size of images for each device, 2 for rcnn, 1 for rpn and e2e\nconfig.TRAIN.BATCH_IMAGES = 2\n# e2e changes behavior of anchor loader and metric\nconfig.TRAIN.END2END = False\n\n\n# R-CNN\n# rcnn rois batch size\nconfig.TRAIN.BATCH_ROIS = 128\nconfig.TRAIN.BATCH_ROIS_OHEM = 128\n# rcnn rois sampling params\nconfig.TRAIN.FG_FRACTION = 0.25\nconfig.TRAIN.FG_THRESH = 0.5\nconfig.TRAIN.BG_THRESH_HI = 0.5\nconfig.TRAIN.BG_THRESH_LO = 0.0\n# rcnn bounding box regression params\nconfig.TRAIN.BBOX_REGRESSION_THRESH = 0.5\nconfig.TRAIN.BBOX_WEIGHTS = np.array([1.0, 1.0, 1.0, 1.0])\nconfig.TRAIN.visualization_path = 'debug/visualization'\nconfig.TRAIN.visualization_freq= 100\n# RPN anchor loader\n# rpn anchors batch size\nconfig.TRAIN.RPN_BATCH_SIZE = 256\n# rpn anchors sampling params\nconfig.TRAIN.RPN_FG_FRACTION = 0.5\nconfig.TRAIN.RPN_POSITIVE_OVERLAP = 0.7\nconfig.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3\nconfig.TRAIN.RPN_CLOBBER_POSITIVES = False\n# rpn bounding box regression params\nconfig.TRAIN.RPN_BBOX_WEIGHTS = (1.0, 1.0, 1.0, 1.0)\nconfig.TRAIN.RPN_POSITIVE_WEIGHT = -1.0\n\n# used for end2end training\n# RPN proposal\nconfig.TRAIN.CXX_PROPOSAL = True\nconfig.TRAIN.RPN_NMS_THRESH = 0.7\nconfig.TRAIN.RPN_PRE_NMS_TOP_N = 12000\nconfig.TRAIN.RPN_POST_NMS_TOP_N = 2000\nconfig.TRAIN.RPN_MIN_SIZE = config.network.RPN_FEAT_STRIDE\n# approximate bounding box regression\nconfig.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED = False\nconfig.TRAIN.BBOX_MEANS = (0.0, 0.0, 0.0, 0.0)\nconfig.TRAIN.BBOX_STDS = (0.1, 0.1, 0.2, 0.2)\n\nconfig.TEST = edict()\nconfig.TEST.NMS_SIGMA = 0.6\nconfig.TEST.TEST_FLAG = False\n# R-CNN testing\n# use rpn to generate proposal\nconfig.TEST.HAS_RPN = False\n# size of images for each device\nconfig.TEST.BATCH_IMAGES = 1\n\n# RPN proposal\nconfig.TEST.CXX_PROPOSAL = True\nconfig.TEST.RPN_NMS_THRESH = 0.7\nconfig.TEST.RPN_PRE_NMS_TOP_N = 6000\nconfig.TEST.RPN_POST_NMS_TOP_N = 300\nconfig.TEST.RPN_MIN_SIZE = config.network.RPN_FEAT_STRIDE\n\n# RPN generate proposal\nconfig.TEST.PROPOSAL_NMS_THRESH = 0.7\nconfig.TEST.PROPOSAL_PRE_NMS_TOP_N = 20000\nconfig.TEST.PROPOSAL_POST_NMS_TOP_N = 2000\nconfig.TEST.PROPOSAL_MIN_SIZE = config.network.RPN_FEAT_STRIDE\n\n# RCNN nms\nconfig.TEST.NMS = 0.3\n\nconfig.TEST.max_per_image = 300\n\n# Test Model Epoch\nconfig.TEST.test_epoch = 0\n\n\ndef update_config(config_file):\n exp_config = None\n with open(config_file) as f:\n exp_config = edict(yaml.load(f))\n for k, v in exp_config.items():\n if k in config:\n if isinstance(v, dict):\n if k == 'TRAIN':\n if 'BBOX_WEIGHTS' in v:\n v['BBOX_WEIGHTS'] = np.array(v['BBOX_WEIGHTS'])\n elif k == 'network':\n if 'PIXEL_MEANS' in v:\n v['PIXEL_MEANS'] = np.array(v['PIXEL_MEANS'])\n for vk, vv in v.items():\n config[k][vk] = vv\n else:\n if k == 'SCALES':\n if type(v)!=list:\n config[k][0] = (tuple(v))\n else:\n config[k] = v\n else:\n config[k] = v\n else:\n raise ValueError(\"key must exist in config.py\")\n\n\ndef update_config_from_list(cfg_list):\n \"\"\"Set config keys via list (e.g., from command line).\"\"\"\n from ast import literal_eval\n assert len(cfg_list) % 2 == 0\n for k, v in zip(cfg_list[0::2], cfg_list[1::2]):\n key_list = k.split('.')\n d = config\n for subkey in key_list[:-1]:\n assert d.has_key(subkey)\n d = d[subkey]\n subkey = key_list[-1]\n assert d.has_key(subkey)\n try:\n value = literal_eval(v)\n except:\n # handle the case when v is a string literal\n value = v\n assert type(value) == type(d[subkey]), \\\n 'type {} does not match original type {}'.format(\n type(value), type(d[subkey]))\n d[subkey] = value\n",
"# --------------------------------------------------------\n# Deformable Convolutional Networks\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)\n# --------------------------------------------------------\n\nimport os\nfrom os.path import join as pjoin\nfrom setuptools import setup\nfrom distutils.extension import Extension\nfrom Cython.Distutils import build_ext\nimport numpy as np\n\n\ndef find_in_path(name, path):\n \"Find a file in a search path\"\n # Adapted fom\n # http://code.activestate.com/recipes/52224-find-a-file-given-a-search-path/\n for dir in path.split(os.pathsep):\n binpath = pjoin(dir, name)\n if os.path.exists(binpath):\n return os.path.abspath(binpath)\n return None\n\n\ndef locate_cuda():\n \"\"\"Locate the CUDA environment on the system\n Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'\n and values giving the absolute path to each directory.\n Starts by looking for the CUDAHOME env variable. If not found, everything\n is based on finding 'nvcc' in the PATH.\n \"\"\"\n\n # first check if the CUDAHOME env variable is in use\n if 'CUDAHOME' in os.environ:\n home = os.environ['CUDAHOME']\n nvcc = pjoin(home, 'bin', 'nvcc')\n else:\n # otherwise, search the PATH for NVCC\n default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin')\n nvcc = find_in_path('nvcc', os.environ['PATH'] + os.pathsep + default_path)\n if nvcc is None:\n raise EnvironmentError('The nvcc binary could not be '\n 'located in your $PATH. Either add it to your path, or set $CUDAHOME')\n home = os.path.dirname(os.path.dirname(nvcc))\n\n cudaconfig = {'home':home, 'nvcc':nvcc,\n 'include': pjoin(home, 'include'),\n 'lib64': pjoin(home, 'lib64')}\n for k, v in cudaconfig.items():\n if not os.path.exists(v):\n raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v))\n\n return cudaconfig\nCUDA = locate_cuda()\n\n\n# Obtain the numpy include directory. This logic works across numpy versions.\ntry:\n numpy_include = np.get_include()\nexcept AttributeError:\n numpy_include = np.get_numpy_include()\n\n\ndef customize_compiler_for_nvcc(self):\n \"\"\"inject deep into distutils to customize how the dispatch\n to gcc/nvcc works.\n If you subclass UnixCCompiler, it's not trivial to get your subclass\n injected in, and still have the right customizations (i.e.\n distutils.sysconfig.customize_compiler) run on it. So instead of going\n the OO route, I have this. Note, it's kindof like a wierd functional\n subclassing going on.\"\"\"\n\n # tell the compiler it can processes .cu\n self.src_extensions.append('.cu')\n\n # save references to the default compiler_so and _comple methods\n default_compiler_so = self.compiler_so\n super = self._compile\n\n # now redefine the _compile method. This gets executed for each\n # object but distutils doesn't have the ability to change compilers\n # based on source extension: we add it.\n def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):\n if os.path.splitext(src)[1] == '.cu':\n # use the cuda for .cu files\n self.set_executable('compiler_so', CUDA['nvcc'])\n # use only a subset of the extra_postargs, which are 1-1 translated\n # from the extra_compile_args in the Extension class\n postargs = extra_postargs['nvcc']\n else:\n postargs = extra_postargs['gcc']\n\n super(obj, src, ext, cc_args, postargs, pp_opts)\n # reset the default compiler_so, which we might have changed for cuda\n self.compiler_so = default_compiler_so\n\n # inject our redefined _compile method into the class\n self._compile = _compile\n\n\n# run the customize_compiler\nclass custom_build_ext(build_ext):\n def build_extensions(self):\n customize_compiler_for_nvcc(self.compiler)\n build_ext.build_extensions(self)\n\n\next_modules = [\n Extension(\n \"cpu_nms\",\n [\"cpu_nms.pyx\"],\n extra_compile_args={'gcc': [\"-Wno-cpp\", \"-Wno-unused-function\"]},\n include_dirs = [numpy_include]\n ),\n Extension('gpu_nms',\n ['nms_kernel.cu', 'gpu_nms.pyx'],\n library_dirs=[CUDA['lib64']],\n libraries=['cudart'],\n language='c++',\n runtime_library_dirs=[CUDA['lib64']],\n # this syntax is specific to this build system\n # we're only going to use certain compiler args with nvcc and not with\n # gcc the implementation of this trick is in customize_compiler() below\n extra_compile_args={'gcc': [\"-Wno-unused-function\"],\n 'nvcc': ['-arch=sm_35',\n '--ptxas-options=-v',\n '-c',\n '--compiler-options',\n \"'-fPIC'\"]},\n include_dirs = [numpy_include, CUDA['include']]\n ),\n]\n\nsetup(\n name='nms',\n ext_modules=ext_modules,\n # inject our custom trigger\n cmdclass={'build_ext': custom_build_ext},\n)\n"
] | [
[
"numpy.array"
],
[
"numpy.get_numpy_include",
"numpy.get_include"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ankona/Learning-OpenCV-4-Computer-Vision-with-Python-Third-Edition | [
"ee29cfefb4f21ba5acf6222aa69ef1c05c8fc05d"
] | [
"chapter07/non_max_suppression.py"
] | [
"# import the necessary packages\nimport numpy as np\n\n# Malisiewicz et al.\n# Python port by Adrian Rosebrock\n# https://www.pyimagesearch.com/2015/02/16/faster-non-maximum-suppression-python/\ndef non_max_suppression_fast(boxes, overlapThresh):\n # if there are no boxes, return an empty list\n if len(boxes) == 0:\n return []\n\n # initialize the list of picked indexes \n pick = []\n\n # grab the coordinates of the bounding boxes\n x1 = boxes[:,0]\n y1 = boxes[:,1]\n x2 = boxes[:,2]\n y2 = boxes[:,3]\n scores = boxes[:,4]\n # compute the area of the bounding boxes and sort the bounding\n # boxes by the score/probability of the bounding box\n area = (x2 - x1 + 1) * (y2 - y1 + 1)\n idxs = np.argsort(scores)[::-1]\n\n # keep looping while some indexes still remain in the indexes\n # list\n while len(idxs) > 0:\n # grab the last index in the indexes list and add the\n # index value to the list of picked indexes\n last = len(idxs) - 1\n i = idxs[last]\n pick.append(i)\n\n # find the largest (x, y) coordinates for the start of\n # the bounding box and the smallest (x, y) coordinates\n # for the end of the bounding box\n xx1 = np.maximum(x1[i], x1[idxs[:last]])\n yy1 = np.maximum(y1[i], y1[idxs[:last]])\n xx2 = np.minimum(x2[i], x2[idxs[:last]])\n yy2 = np.minimum(y2[i], y2[idxs[:last]])\n\n # compute the width and height of the bounding box\n w = np.maximum(0, xx2 - xx1 + 1)\n h = np.maximum(0, yy2 - yy1 + 1)\n\n # compute the ratio of overlap\n overlap = (w * h) / area[idxs[:last]]\n\n # delete all indexes from the index list that have\n idxs = np.delete(idxs, np.concatenate(([last],\n np.where(overlap > overlapThresh)[0])))\n\n # return only the bounding boxes that were picked\n return boxes[pick]\n"
] | [
[
"numpy.argsort",
"numpy.where",
"numpy.maximum",
"numpy.minimum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lukerohrerUCSD/pytorch-wavenet | [
"14e6bd0fb453cc5bb760cfa14aa6256d14b2a40b"
] | [
"wavenet_training.py"
] | [
"import torch\nimport torch.optim as optim\nimport torch.utils.data\nimport time\nfrom datetime import datetime\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom model_logging import Logger\nfrom wavenet_modules import *\n\n\ndef print_last_loss(opt):\n print(\"loss: \", opt.losses[-1])\n\n\ndef print_last_validation_result(opt):\n print(\"validation loss: \", opt.validation_results[-1])\n\n\nclass WavenetTrainer:\n def __init__(self,\n model,\n dataset,\n optimizer=optim.Adam,\n lr=0.001,\n weight_decay=0,\n gradient_clipping=None,\n logger=Logger(),\n snapshot_path=None,\n snapshot_name='snapshot',\n snapshot_interval=1000,\n dtype=torch.FloatTensor,\n ltype=torch.LongTensor):\n self.model = model\n self.dataset = dataset\n self.dataloader = None\n self.lr = lr\n self.weight_decay = weight_decay\n self.clip = gradient_clipping\n self.optimizer_type = optimizer\n self.optimizer = self.optimizer_type(params=self.model.parameters(), lr=self.lr, weight_decay=self.weight_decay)\n self.logger = logger\n self.logger.trainer = self\n self.snapshot_path = snapshot_path\n self.snapshot_name = snapshot_name\n self.snapshot_interval = snapshot_interval\n self.dtype = dtype\n self.ltype = ltype\n\n def train(self,\n batch_size=32,\n epochs=10,\n continue_training_at_step=0):\n self.model.train()\n self.dataloader = torch.utils.data.DataLoader(self.dataset,\n batch_size=batch_size,\n shuffle=True,\n num_workers=8,\n pin_memory=False)\n step = continue_training_at_step\n for current_epoch in range(epochs):\n print(\"epoch\", current_epoch)\n tic = time.time()\n for (x, target) in iter(self.dataloader):\n x = Variable(x.type(self.dtype))\n target = Variable(target.view(-1).type(self.ltype))\n\n output = self.model(x)\n loss = F.cross_entropy(output.squeeze(), target.squeeze())\n self.optimizer.zero_grad()\n loss.backward()\n loss = loss.item()#.data[0]\n\n if self.clip is not None:\n torch.nn.utils.clip_grad_norm(self.model.parameters(), self.clip)\n self.optimizer.step()\n step += 1\n\n # time step duration:\n if step == 100:\n toc = time.time()\n print(\"one training step does take approximately \" + str((toc - tic) * 0.01) + \" seconds)\")\n\n if step % self.snapshot_interval == 0:\n if self.snapshot_path is None:\n continue\n time_string = time.strftime(\"%Y-%m-%d_%H-%M-%S\", time.gmtime())\n torch.save(self.model, self.snapshot_path + '/' + self.snapshot_name + '_' + time_string)\n\n self.logger.log(step, loss)\n\n def validate(self):\n self.model.eval()\n self.dataset.train = False\n total_loss = 0\n accurate_classifications = 0\n for (x, target) in iter(self.dataloader):\n x = Variable(x.type(self.dtype))\n target = Variable(target.view(-1).type(self.ltype))\n\n output = self.model(x)\n loss = F.cross_entropy(output.squeeze(), target.squeeze())\n total_loss += loss.item()#.data[0]\n\n predictions = torch.max(output, 1)[1].view(-1)\n correct_pred = torch.eq(target, predictions)\n accurate_classifications += torch.sum(correct_pred).data[0]\n # print(\"validate model with \" + str(len(self.dataloader.dataset)) + \" samples\")\n # print(\"average loss: \", total_loss / len(self.dataloader))\n avg_loss = total_loss / len(self.dataloader)\n avg_accuracy = accurate_classifications / (len(self.dataset)*self.dataset.target_length)\n self.dataset.train = True\n self.model.train()\n return avg_loss, avg_accuracy\n\n\ndef generate_audio(model,\n length=8000,\n temperatures=[0., 1.]):\n samples = []\n for temp in temperatures:\n samples.append(model.generate_fast(length, temperature=temp))\n samples = np.stack(samples, axis=0)\n return samples\n\n"
] | [
[
"torch.max",
"torch.eq",
"torch.sum",
"torch.utils.data.DataLoader",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
erikgeiser/law | [
"2c8a0d5161ea2b44063a79860f2bebdf66ff67d4"
] | [
"law/contrib/keras/formatter.py"
] | [
"# -*- coding: utf-8 -*-\n\n\"\"\"\nKeras target formatters.\n\"\"\"\n\nfrom law.target.formatter import Formatter\nfrom law.target.file import get_path\n\n\nclass ModelFormatter(Formatter):\n\n @classmethod\n def accepts(cls, path):\n return get_path(path).endswith(\".h5\")\n\n @classmethod\n def dump(cls, path, model, *args, **kwargs):\n model.save(path, *args, **kwargs)\n\n\nclass KerasModelFormatter(ModelFormatter):\n\n name = \"keras\"\n\n @classmethod\n def load(cls, path, *args, **kwargs):\n from keras.models import load_model\n return load_model(path, *args, **kwargs)\n\n\nclass TFKerasModelFormatter(ModelFormatter):\n\n name = \"tf_keras\"\n\n @classmethod\n def load(cls, path, *args, **kwargs):\n from tensorflow import keras\n return keras.models.load_model(path, *args, **kwargs)\n"
] | [
[
"tensorflow.keras.models.load_model"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
thanhhocse96/hcmut-simple-qna-nlp | [
"695578ea41e52a29499b69a82397e39a35b87ed9"
] | [
"utils/corpus_loader.py"
] | [
"import pandas as pd\n\nfrom utils.no_accent_vietnamese import no_accent_vietnamese\n\nfrom baseNLP.corpus import Corpus\nfrom baseNLP.word import Word\n\nimport constants as const\n\ndef category_define(cat_string):\n if cat_string == \"N\":\n return const.WORD_CAT.NOUN\n if cat_string == \"V\":\n return const.WORD_CAT.VERB\n if cat_string == \"PREP\":\n return const.WORD_CAT.PREP\n if cat_string == \"NUM\":\n return const.WORD_CAT.NUM\n if cat_string == \"ADV\":\n return const.WORD_CAT.ADV\n if cat_string == \"PRO\":\n return const.WORD_CAT.PRO\n if cat_string == \"NAME\":\n return const.WORD_CAT.NAME\n return const.WORD_CAT.OTHER\n\ndef corpus_loader(corpus_file, name_entity_file):\n corpus_df = pd.read_csv(corpus_file, header = 0, names = const.CORPUS_HEADER_LIST).sort_values(const.CORPUS_HEADER_LIST[1])\n name_df = pd.read_csv(name_entity_file, header = 0, names = const.STUDENTS_HEADER_LIST)\n\n corpus_list = list([\n Corpus(list([]), const.N_GRAM.ONE_GRAM),\n Corpus(list([]), const.N_GRAM.TWO_GRAM),\n Corpus(list([]), const.N_GRAM.MORE_GRAM)\n ])\n\n # Add word from corpus.csv file\n for row in corpus_df.to_dict('records'):\n \n corpus_word = Word(\n no_accent_vietnamese(row[const.CORPUS_HEADER_LIST[0]]).upper() + str(row[const.CORPUS_HEADER_LIST[3]]),\n row[const.CORPUS_HEADER_LIST[0]].lower(),\n category_define(row[const.CORPUS_HEADER_LIST[2]])\n )\n\n if row[const.CORPUS_HEADER_LIST[1]] == 1:\n corpus_list[0].word_list.append(corpus_word)\n if row[const.CORPUS_HEADER_LIST[1]] == 2:\n corpus_list[1].word_list.append(corpus_word)\n\n # Add word from students.csv file\n name_list = []\n for ele in name_df[const.STUDENTS_HEADER_LIST[0:5]].values.flatten():\n if ele not in name_list:\n name_list.append(ele)\n \n for ele in name_list:\n if ele not in corpus_list[2].word_list:\n corpus_word = Word(\n \"NAME\",\n ele.lower(),\n const.WORD_CAT.NAME\n )\n corpus_list[2].word_list.append(corpus_word)\n\n return corpus_list"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
zhang-free/S_Test | [
"1e8bf7d9227e8371349c7d4009d8778b65875c99"
] | [
"run.py"
] | [
"# -*- coding: utf-8 -*-\n\n# * Copyright (c) 2009-2018. Authors: see NOTICE file.\n# *\n# * Licensed under the Apache License, Version 2.0 (the \"License\");\n# * you may not use this file except in compliance with the License.\n# * You may obtain a copy of the License at\n# *\n# * http://www.apache.org/licenses/LICENSE-2.0\n# *\n# * Unless required by applicable law or agreed to in writing, software\n# * distributed under the License is distributed on an \"AS IS\" BASIS,\n# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# * See the License for the specific language governing permissions and\n# * limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom operator import attrgetter\n\nimport cv2\nimport numpy as np\nfrom cytomine import CytomineJob\nfrom cytomine.models import ImageInstanceCollection, AnnotationCollection, Annotation\nfrom shapely.geometry import Polygon\n\n__author__ = \"Rubens Ulysse <[email protected]>\"\n__contributors__ = [\"Marée Raphaël <[email protected]>\", \"Stévens Benjamin\"]\n__copyright__ = \"Copyright 2010-2018 University of Liège, Belgium, http://www.cytomine.be/\"\n\n\ndef find_components(image):\n contours, hierarchy = cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n\n components = []\n if len(contours) > 0:\n top_index = 0\n tops_remaining = True\n while tops_remaining:\n exterior = contours[top_index][:, 0, :].tolist()\n\n interiors = []\n # check if there are children and process if necessary\n if hierarchy[0][top_index][2] != -1:\n sub_index = hierarchy[0][top_index][2]\n subs_remaining = True\n while subs_remaining:\n interiors.append(contours[sub_index][:, 0, :].tolist())\n\n # check if there is another sub contour\n if hierarchy[0][sub_index][0] != -1:\n sub_index = hierarchy[0][sub_index][0]\n else:\n subs_remaining = False\n\n # add component tuple to components only if exterior is a polygon\n if len(exterior) > 3:\n components.append((exterior, interiors))\n\n # check if there is another top contour\n if hierarchy[0][top_index][0] != -1:\n top_index = hierarchy[0][top_index][0]\n else:\n tops_remaining = False\n return components\n\n\ndef main(argv):\n print(argv)\n with CytomineJob.from_cli(argv) as cj:\n\n images = ImageInstanceCollection().fetch_with_filter(\"project\", cj.parameters.cytomine_id_project)\n for image in cj.monitor(images, prefix=\"Running detection on image\", period=0.1):\n # Resize image if needed\n resize_ratio = max(image.width, image.height) / cj.parameters.max_image_size\n if resize_ratio < 1:\n resize_ratio = 1\n\n resized_width = int(image.width / resize_ratio)\n resized_height = int(image.height / resize_ratio)\n\n image.dump(dest_pattern=\"/tmp/{id}.jpg\", max_size=max(resized_width, resized_height), bits=image.bitDepth)\n img = cv2.imread(image.filename, cv2.IMREAD_GRAYSCALE)\n\n thresholded_img = cv2.adaptiveThreshold(img, 2**image.bitDepth, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\n cv2.THRESH_BINARY, cj.parameters.threshold_blocksize,\n cj.parameters.threshold_constant)\n\n kernel = np.ones((5, 5), np.uint8)\n eroded_img = cv2.erode(thresholded_img, kernel, iterations=cj.parameters.erode_iterations)\n dilated_img = cv2.dilate(eroded_img, kernel, iterations=cj.parameters.dilate_iterations)\n\n extension = 10\n extended_img = cv2.copyMakeBorder(dilated_img, extension, extension, extension, extension,\n cv2.BORDER_CONSTANT, value=2**image.bitDepth)\n\n components = find_components(extended_img)\n zoom_factor = image.width / float(resized_width)\n for i, component in enumerate(components):\n converted = []\n for point in component[0]:\n x = int((point[0] - extension) * zoom_factor)\n y = int(image.height - ((point[1] - extension) * zoom_factor))\n converted.append((x, y))\n\n components[i] = Polygon(converted)\n\n # Find largest component (whole image)\n largest = max(components, key=attrgetter('area'))\n components.remove(largest)\n\n # Only keep components greater than 5% of whole image\n min_area = int(0.05 * image.width * image.height)\n\n annotations = AnnotationCollection()\n for component in components:\n if component.area > min_area:\n annotations.append(Annotation(location=component.wkt, id_image=image.id,\n id_terms=[cj.parameters.cytomine_id_predicted_term],\n id_project=cj.parameters.cytomine_id_project))\n\n if len(annotations) % 100 == 0:\n annotations.save()\n annotations = AnnotationCollection()\n\n annotations.save()\n\n cj.job.update(statusComment=\"Finished.\")\n\n\nif __name__ == \"__main__\":\n import sys\n\n main(sys.argv[1:])\n\n"
] | [
[
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MattTurnock/PlanetarySciencesMatt | [
"81954d2182d9577bd7327a98c45963ae42968df4"
] | [
"Ass4/Q1.py"
] | [
"from astropy import units as u\nfrom json_to_dict import constants\nimport numpy as np\npi = np.pi\n\n######################################################################################\n\ndef get_K(Ms, Mp, P, i, e=0*u.one, G=constants[\"G\"]):\n top = (2*pi*G)**(1/3) * Mp * np.sin(i)\n bottom = P**(1/3) * (Ms + Mp)**(2/3) * np.sqrt(1-e**2)\n K = top/bottom\n K = K.decompose()\n return K\n\ndef get_Mp(Ms, K, P, e=0*u.one, G=constants[\"G\"], i=0*u.deg, units=True):\n if units:\n Ms = (Ms.to(u.kg)).value\n K = (K.to(u.m/u.s)).value\n P = (P.to(u.s)).value\n e = e.value\n G = (G.to(u.Unit(\"m**3/(kg*s*s)\"))).value\n i = (i.to(u.rad)).value\n\n A = ( K * P**(1/3)/(2*pi*G)**(1/3))**1.5 * (np.sqrt(1-e**2))**1.5\n Mp = A*Ms/(np.sin(i)-A)\n\n if units:\n Mp = Mp*u.kg\n\n return Mp\n\ndef get_Mp_2(Ms, K_true, P, minMp=0, maxMp=3, iters=100, i=90*u.deg):\n\n Mps = np.linspace(minMp,maxMp,iters) * constants[\"MJupiter\"]\n Kdiffs = []\n for Mp in Mps:\n K = get_K(Ms, Mp, P, i)\n Kdiffs.append((K-K_true).value)\n Kdiffs = np.abs(np.array(Kdiffs))\n mindex = np.argmin((Kdiffs))\n\n return Kdiffs[mindex], Mps[mindex]/ constants[\"MJupiter\"]\n\ndef get_a(mu, P, units=True, unit_return=u.AU):\n inner = mu*(P/(2*pi))**2\n a = inner**(1/3)\n if units:\n a = a.decompose()\n a = a.to(unit_return)\n\n return a\n\nMs = constants[\"MSun\"]\nK1 = 32*u.m/u.s\nP1 = 150 * constants[\"TEarth\"]\nK2 = 88*u.m/u.s\nP2 = 1200 * constants[\"TEarth\"]\ni=90*u.deg\n\n#######################################################################################################################\n# Part D\nprint(\"=====================================Part D============================================\")\ncalc1=True\nif calc1:\n Kdiff1, Mp1 = get_Mp_2(Ms, K1, P1, minMp=0, maxMp=1, iters=10000, i=i)\n print(\"(Minimum) mass of planet 1 (in Jupiter masses), Mp1: %s \\nWith K error: %s m/s \\n\" %(Mp1, Kdiff1))\n\n Kdiff2, Mp2 = get_Mp_2(Ms, K2, P2, minMp=4, maxMp=5, iters=10000, i=i)\n print(\"(Minimum) mass of planet 2 (in Jupiter masses), Mp2: %s \\nWith K error: %s m/s \\n\" %(Mp2, Kdiff2))\n\n\n########################################################################################################################\n# Part G\nprint(\"=====================================Part G============================================\")\niG = 30*u.deg\ncalc2=True\nif calc2:\n Kdiff1G, Mp1G = get_Mp_2(Ms, K1, P1, minMp=1, maxMp=2, iters=10000, i=iG)\n print(\"Mass of planet 1 (in Jupiter masses), for i=30 deg Mp1G: %s \\nWith K error: %s m/s \\n\" %(Mp1G, Kdiff1G))\n\n Kdiff2G, Mp2G = get_Mp_2(Ms, K2, P2, minMp=0, maxMp=100, iters=10000, i=iG)\n print(\"Mass of planet 2 (in Jupiter masses), for i=30 deg Mp2G: %s \\nWith K error: %s m/s \\n\" %(Mp2G, Kdiff2G))\n\n########################################################################################################################\n# Part H\nprint(\"=====================================Part H============================================\")\nmu = constants[\"G\"]*constants[\"MSun\"]\na1 = get_a(mu, P1)\nprint(\"Semi-major axis of planet 1, a1 (ie D1): %s \\n\" %a1)\n\na2 = get_a(mu, P2)\nprint(\"Semi-major axis of planet 2, a2 (ie D2): %s \\n\" %a2)\n\n########################################################################################################################\n# Part I\nprint(\"=====================================Part I============================================\")\n\nMi = 0.8*Ms\nmu_i = Mi*constants[\"G\"]\n\ncalc3=True\nif calc3:\n Kdiff1_I, Mp1_I = get_Mp_2(Mi, K1, P1, minMp=0, maxMp=1, iters=10000, i=i)\n print(\"(Minimum) mass of planet 1 (in Jupiter masses), for 0.8 solar mass star, Mp1_I: %s \\nWith K error: %s m/s \\n\" %(Mp1_I, Kdiff1_I))\n\n Kdiff2_I, Mp2_I = get_Mp_2(Mi, K2, P2, minMp=3, maxMp=5, iters=10000, i=i)\n print(\"(Minimum) mass of planet 2 (in Jupiter masses), for 0.8 solar mass star, Mp2_I: %s \\nWith K error: %s m/s \\n\" %(Mp2_I, Kdiff2_I))\n\n\na1_I = get_a(mu_i, P1)\nprint(\"Semi-major axis of planet 1, for a 0.8 solar mass star, a1_I (ie D1_I): %s \\n\" %a1_I)\n\na2_I = get_a(mu_i, P2)\nprint(\"Semi-major axis of planet 2, for a 0.8 solar mass star, a2_I (ie D2_I): %s \\n\" %a2_I)"
] | [
[
"numpy.sqrt",
"numpy.linspace",
"numpy.sin",
"numpy.argmin",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cwerner/dataflow | [
"15195d020649da82783113b6c24bc529b91af5f3"
] | [
"flow.py"
] | [
"import html\nimport io\nfrom pathlib import Path\nfrom typing import Optional, Union\n\nimport pandas as pd\nimport numpy as np\n\nimport pendulum\nimport prefect\nfrom prefect import Flow, task, unmapped\nfrom prefect.core.parameter import Parameter\nfrom prefect.engine.results import S3Result\nfrom prefect.engine.serializers import PickleSerializer\nfrom prefect.engine.state import Success\nfrom prefect.environments.storage import Docker\nfrom prefect.executors import DaskExecutor\nfrom prefect.run_configs import UniversalRun\nfrom prefect.schedules import Schedule\nfrom prefect.schedules.clocks import CronClock\nfrom prefect.tasks.aws.s3 import S3Upload\nfrom prefect.tasks.great_expectations.checkpoints import RunGreatExpectationsValidation\nfrom prefect.tasks.notifications.email_task import EmailTask\nfrom prefect.triggers import any_failed, all_finished\n\nfrom src.helpers import s3_kwargs\nfrom src.tasks import S3UploadDir\n\n\n# TODO: match this with GE parsing\ndef parse_dat_file(target: Union[Path, str], header: Union[Path, str]) -> pd.DataFrame:\n \"\"\"Parse .dat data file using header file for colnames\"\"\"\n colnames = pd.read_csv(header).columns.values\n return pd.read_csv(target, names=colnames, parse_dates=True)\n\n\nupload_to_s3 = S3Upload(boto_kwargs=s3_kwargs)\nupload_dir_to_s3 = S3UploadDir(boto_kwargs=s3_kwargs, trigger=all_finished)\n\n\n@task(trigger=any_failed)\ndef email_on_failure(notification_email):\n \"\"\"Send email on FAIL state\"\"\"\n\n flow_name = prefect.context.flow_name\n\n # TODO: Check how to create a custom prefect email/ email as a service\n # (use: [email protected])\n task = EmailTask(\n subject=f\"Prefect alert: {flow_name}\",\n msg=html.escape(f\"{flow_name} GreatExpectation Validation failed.\"),\n email_from=\"[email protected]\",\n email_to=notification_email,\n smtp_server=\"smtp.kit.edu\", \n smtp_port=25,\n smtp_type=\"STARTTLS\",\n ).run()\n return task\n\n\ndef flip_fail_to_success(task, old_state, new_state):\n \"\"\"A cheaky state_handler that flips a fail outcome to success\"\"\"\n if new_state.is_failed():\n return_state = Success(result=new_state.result)\n else:\n return_state = new_state\n return return_state\n\n\ndef create_filename(sitename: str, date: str) -> str:\n \"\"\"Create filename from site and date\"\"\"\n assert 2010 < date.year, \"jday must be in range >= 2010\"\n\n sitename_short = sitename.capitalize()[:3]\n year_short = str(date.year)[2:4]\n jday = date.timetuple().tm_yday\n return f\"{sitename_short}_M_{year_short}_{jday}.dat\"\n\n\n@task\ndef create_flags(df: pd.DataFrame, validation) -> pd.DataFrame:\n \"\"\"Use validation results and create a flag dataframe\"\"\"\n df_flags = df[[\"TIMESTAMP\"]]\n rows, cols = df.shape\n flags = np.ones((rows, cols - 1), dtype=int)\n\n df_flags = pd.concat(\n [\n df_flags,\n pd.DataFrame(data=flags, index=df.index, columns=df.columns.values[1:]),\n ],\n axis=1,\n )\n\n return df_flags\n\n\n# Define checkpoint task\nvalidation_task = RunGreatExpectationsValidation(\n # state_handlers=[flip_fail_to_success]\n)\n\n\n@task\ndef get_batch_kwargs(datasource_name, dataset):\n \"\"\"Retrieve batch kwargs including csv dataset\"\"\"\n dataset = pd.read_csv(Path(\"/home/data\") / dataset)\n return {\"dataset\": dataset, \"datasource\": datasource_name}\n\n\n@task\ndef retrieve_and_parse_target_file(\n location: str, site: str, current_time: Optional[str], offset: int = 0\n):\n \"\"\"Retrieve and parse target file\"\"\"\n current_time = current_time or pendulum.now(\"utc\")\n if isinstance(current_time, str):\n current_time = pendulum.parse(current_time)\n\n target_date = current_time.subtract(days=offset)\n\n filename = create_filename(site, target_date)\n\n basepath = Path(location) / site / \"micromet\" / \"raw\" / \"slow_response\"\n target = basepath / str(target_date.year) / filename\n\n site_short = site.capitalize()[:3]\n header = basepath / f\"{site_short}_M_header.csv\"\n\n outpath = Path(\"/home\") / \"data\"\n outpath.mkdir(exist_ok=True)\n\n outfile = outpath / target.name.replace(\".dat\", \".csv\")\n\n df = parse_dat_file(target, header)\n df.to_csv(outfile, index=False)\n\n return str(outfile.name)\n\n\n@task\ndef derive_flags_filename(filename: str):\n return filename.replace(\".csv\", \".csv.flags\")\n\n\n@task\ndef prepare_df_for_s3(df: pd.DataFrame) -> str:\n \"\"\"Convert dataframe for s3 upload\"\"\"\n csv_str = io.StringIO()\n df.to_csv(csv_str, index=False)\n return csv_str.getvalue()\n\n\n@task(log_stdout=True, trigger=all_finished)\ndef show_validation(results):\n logger = prefect.context.get(\"logger\")\n logger.info(f\"{type(results)}\")\n logger.info(f\"{dir(results)}\")\n logger.info(f\"{[res for res in results['run_results']]}\")\n\n key = list(results[\"run_results\"].keys())[0]\n\n valresult = results[\"run_results\"][key][\"validation_result\"]\n for results in valresult.results:\n # only check column exceptions\n if \"column\" in results.expectation_config.kwargs:\n col = results.expectation_config.kwargs[\"column\"]\n exp = results.expectation_config[\"expectation_type\"]\n\n if \"unexpected_index_list\" in results.result:\n logger.warning(f\"{col} {exp} :: {results.result}\")\n\n logger.info(f\"{col} {exp} :: {results.result}\")\n\n return results\n\n\nresult = S3Result(\n bucket=\"dataflow-ge-dailydata\",\n location=\"{task_name}.pickle\",\n boto3_kwargs=s3_kwargs,\n serializer=PickleSerializer(),\n)\n\n\nstorage = Docker(\n registry_url=\"cwerner\",\n image_name=\"dataflow\",\n base_image=\"cwerner/dataflow:latest\",\n env_vars={\n \"PREFECT__LOGGING__LEVEL\": \"INFO\",\n \"PREFECT__LOGGING__EXTRA_LOGGERS\": \"['great_expectations']\",\n },\n)\n\n\nwith Flow(\n \"TERENO Test Flow\",\n executor=DaskExecutor(),\n result=result,\n run_config=UniversalRun(labels=[\"dev\"]),\n schedule=Schedule(clocks=[CronClock(\"0 6 * * *\")]),\n storage=storage,\n) as flow:\n\n # parameters\n current_time = Parameter(\"current_time\", default=None)\n offset = Parameter(\"offset\", default=10)\n sitename = Parameter(\"sitename\", default=\"fendt\")\n datalocation = Parameter(\"datalocation\", default=\"/rawdata\")\n expectation_suite_name = Parameter(\"expectation_suite_name\", default=\"fendt.demo\")\n notification_email = Parameter(\n \"notification_email\", default=\"[email protected]\"\n )\n\n targetfile = retrieve_and_parse_target_file(\n datalocation, sitename, current_time, offset\n )\n\n targetfile_flags = derive_flags_filename(targetfile)\n\n batch_kwargs = get_batch_kwargs(\"data__dir\", targetfile)\n\n # validate based on ge expectations\n results = validation_task(\n batch_kwargs=batch_kwargs,\n expectation_suite_name=expectation_suite_name,\n context_root_dir=\"/home/great_expectations\",\n )\n state = email_on_failure(notification_email, upstream_tasks=[results])\n\n results = show_validation(results)\n\n uploaded = upload_dir_to_s3(\n \"/home/great_expectations/uncommitted/data_docs/local_site\",\n bucket=\"dataflow-ge-docs\",\n upstream_tasks=[results],\n )\n\n df_flags = create_flags(batch_kwargs[\"dataset\"], results)\n\n data_strs = prepare_df_for_s3.map([batch_kwargs[\"dataset\"], df_flags])\n\n uploaded = upload_to_s3.map(\n data_strs, [targetfile, targetfile_flags], bucket=unmapped(\"dataflow-lvl1\")\n )\n\n\nif __name__ == \"__main__\":\n # flow.run(run_on_schedule=False)\n # built_storage = flow.storage.build(push=False)\n flow.register(project_name=\"DataFlow\")\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
rdrussotto/xarray | [
"92e4a43544c8e330543fd6e1bc527e4c504a53f5"
] | [
"xarray/tests/test_missing.py"
] | [
"import itertools\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nimport xarray as xr\nfrom xarray.core.missing import (\n NumpyInterpolator,\n ScipyInterpolator,\n SplineInterpolator,\n _get_nan_block_lengths,\n get_clean_interp_index,\n)\nfrom xarray.core.pycompat import dask_array_type\nfrom xarray.tests import (\n assert_array_equal,\n assert_equal,\n raises_regex,\n requires_bottleneck,\n requires_dask,\n requires_scipy,\n)\n\n\[email protected]\ndef da():\n return xr.DataArray([0, np.nan, 1, 2, np.nan, 3, 4, 5, np.nan, 6, 7], dims=\"time\")\n\n\[email protected]\ndef ds():\n ds = xr.Dataset()\n ds[\"var1\"] = xr.DataArray(\n [0, np.nan, 1, 2, np.nan, 3, 4, 5, np.nan, 6, 7], dims=\"time\"\n )\n ds[\"var2\"] = xr.DataArray(\n [10, np.nan, 11, 12, np.nan, 13, 14, 15, np.nan, 16, 17], dims=\"x\"\n )\n return ds\n\n\ndef make_interpolate_example_data(shape, frac_nan, seed=12345, non_uniform=False):\n rs = np.random.RandomState(seed)\n vals = rs.normal(size=shape)\n if frac_nan == 1:\n vals[:] = np.nan\n elif frac_nan == 0:\n pass\n else:\n n_missing = int(vals.size * frac_nan)\n\n ys = np.arange(shape[0])\n xs = np.arange(shape[1])\n if n_missing:\n np.random.shuffle(ys)\n ys = ys[:n_missing]\n\n np.random.shuffle(xs)\n xs = xs[:n_missing]\n\n vals[ys, xs] = np.nan\n\n if non_uniform:\n # construct a datetime index that has irregular spacing\n deltas = pd.TimedeltaIndex(unit=\"d\", data=rs.normal(size=shape[0], scale=10))\n coords = {\"time\": (pd.Timestamp(\"2000-01-01\") + deltas).sort_values()}\n else:\n coords = {\"time\": pd.date_range(\"2000-01-01\", freq=\"D\", periods=shape[0])}\n da = xr.DataArray(vals, dims=(\"time\", \"x\"), coords=coords)\n df = da.to_pandas()\n\n return da, df\n\n\n@requires_scipy\ndef test_interpolate_pd_compat():\n shapes = [(8, 8), (1, 20), (20, 1), (100, 100)]\n frac_nans = [0, 0.5, 1]\n methods = [\"linear\", \"nearest\", \"zero\", \"slinear\", \"quadratic\", \"cubic\"]\n\n for (shape, frac_nan, method) in itertools.product(shapes, frac_nans, methods):\n\n da, df = make_interpolate_example_data(shape, frac_nan)\n\n for dim in [\"time\", \"x\"]:\n actual = da.interpolate_na(method=method, dim=dim, fill_value=np.nan)\n expected = df.interpolate(\n method=method, axis=da.get_axis_num(dim), fill_value=(np.nan, np.nan)\n )\n # Note, Pandas does some odd things with the left/right fill_value\n # for the linear methods. This next line inforces the xarray\n # fill_value convention on the pandas output. Therefore, this test\n # only checks that interpolated values are the same (not nans)\n expected.values[pd.isnull(actual.values)] = np.nan\n\n np.testing.assert_allclose(actual.values, expected.values)\n\n\n@requires_scipy\[email protected](\"method\", [\"barycentric\", \"krog\", \"pchip\", \"spline\", \"akima\"])\ndef test_scipy_methods_function(method):\n # Note: Pandas does some wacky things with these methods and the full\n # integration tests wont work.\n da, _ = make_interpolate_example_data((25, 25), 0.4, non_uniform=True)\n actual = da.interpolate_na(method=method, dim=\"time\")\n assert (da.count(\"time\") <= actual.count(\"time\")).all()\n\n\n@requires_scipy\ndef test_interpolate_pd_compat_non_uniform_index():\n shapes = [(8, 8), (1, 20), (20, 1), (100, 100)]\n frac_nans = [0, 0.5, 1]\n methods = [\"time\", \"index\", \"values\"]\n\n for (shape, frac_nan, method) in itertools.product(shapes, frac_nans, methods):\n\n da, df = make_interpolate_example_data(shape, frac_nan, non_uniform=True)\n for dim in [\"time\", \"x\"]:\n if method == \"time\" and dim != \"time\":\n continue\n actual = da.interpolate_na(\n method=\"linear\", dim=dim, use_coordinate=True, fill_value=np.nan\n )\n expected = df.interpolate(\n method=method, axis=da.get_axis_num(dim), fill_value=np.nan\n )\n\n # Note, Pandas does some odd things with the left/right fill_value\n # for the linear methods. This next line inforces the xarray\n # fill_value convention on the pandas output. Therefore, this test\n # only checks that interpolated values are the same (not nans)\n expected.values[pd.isnull(actual.values)] = np.nan\n\n np.testing.assert_allclose(actual.values, expected.values)\n\n\n@requires_scipy\ndef test_interpolate_pd_compat_polynomial():\n shapes = [(8, 8), (1, 20), (20, 1), (100, 100)]\n frac_nans = [0, 0.5, 1]\n orders = [1, 2, 3]\n\n for (shape, frac_nan, order) in itertools.product(shapes, frac_nans, orders):\n\n da, df = make_interpolate_example_data(shape, frac_nan)\n\n for dim in [\"time\", \"x\"]:\n actual = da.interpolate_na(\n method=\"polynomial\", order=order, dim=dim, use_coordinate=False\n )\n expected = df.interpolate(\n method=\"polynomial\", order=order, axis=da.get_axis_num(dim)\n )\n np.testing.assert_allclose(actual.values, expected.values)\n\n\n@requires_scipy\ndef test_interpolate_unsorted_index_raises():\n vals = np.array([1, 2, 3], dtype=np.float64)\n expected = xr.DataArray(vals, dims=\"x\", coords={\"x\": [2, 1, 3]})\n with raises_regex(ValueError, \"Index 'x' must be monotonically increasing\"):\n expected.interpolate_na(dim=\"x\", method=\"index\")\n\n\ndef test_interpolate_no_dim_raises():\n da = xr.DataArray(np.array([1, 2, np.nan, 5], dtype=np.float64), dims=\"x\")\n with raises_regex(NotImplementedError, \"dim is a required argument\"):\n da.interpolate_na(method=\"linear\")\n\n\ndef test_interpolate_invalid_interpolator_raises():\n da = xr.DataArray(np.array([1, 2, np.nan, 5], dtype=np.float64), dims=\"x\")\n with raises_regex(ValueError, \"not a valid\"):\n da.interpolate_na(dim=\"x\", method=\"foo\")\n\n\ndef test_interpolate_duplicate_values_raises():\n data = np.random.randn(2, 3)\n da = xr.DataArray(data, coords=[(\"x\", [\"a\", \"a\"]), (\"y\", [0, 1, 2])])\n with raises_regex(ValueError, \"Index 'x' has duplicate values\"):\n da.interpolate_na(dim=\"x\", method=\"foo\")\n\n\ndef test_interpolate_multiindex_raises():\n data = np.random.randn(2, 3)\n data[1, 1] = np.nan\n da = xr.DataArray(data, coords=[(\"x\", [\"a\", \"b\"]), (\"y\", [0, 1, 2])])\n das = da.stack(z=(\"x\", \"y\"))\n with raises_regex(TypeError, \"Index 'z' must be castable to float64\"):\n das.interpolate_na(dim=\"z\")\n\n\ndef test_interpolate_2d_coord_raises():\n coords = {\n \"x\": xr.Variable((\"a\", \"b\"), np.arange(6).reshape(2, 3)),\n \"y\": xr.Variable((\"a\", \"b\"), np.arange(6).reshape(2, 3)) * 2,\n }\n\n data = np.random.randn(2, 3)\n data[1, 1] = np.nan\n da = xr.DataArray(data, dims=(\"a\", \"b\"), coords=coords)\n with raises_regex(ValueError, \"interpolation must be 1D\"):\n da.interpolate_na(dim=\"a\", use_coordinate=\"x\")\n\n\n@requires_scipy\ndef test_interpolate_kwargs():\n da = xr.DataArray(np.array([4, 5, np.nan], dtype=np.float64), dims=\"x\")\n expected = xr.DataArray(np.array([4, 5, 6], dtype=np.float64), dims=\"x\")\n actual = da.interpolate_na(dim=\"x\", fill_value=\"extrapolate\")\n assert_equal(actual, expected)\n\n expected = xr.DataArray(np.array([4, 5, -999], dtype=np.float64), dims=\"x\")\n actual = da.interpolate_na(dim=\"x\", fill_value=-999)\n assert_equal(actual, expected)\n\n\ndef test_interpolate():\n\n vals = np.array([1, 2, 3, 4, 5, 6], dtype=np.float64)\n expected = xr.DataArray(vals, dims=\"x\")\n mvals = vals.copy()\n mvals[2] = np.nan\n missing = xr.DataArray(mvals, dims=\"x\")\n\n actual = missing.interpolate_na(dim=\"x\")\n\n assert_equal(actual, expected)\n\n\ndef test_interpolate_nonans():\n\n vals = np.array([1, 2, 3, 4, 5, 6], dtype=np.float64)\n expected = xr.DataArray(vals, dims=\"x\")\n actual = expected.interpolate_na(dim=\"x\")\n assert_equal(actual, expected)\n\n\n@requires_scipy\ndef test_interpolate_allnans():\n vals = np.full(6, np.nan, dtype=np.float64)\n expected = xr.DataArray(vals, dims=\"x\")\n actual = expected.interpolate_na(dim=\"x\")\n\n assert_equal(actual, expected)\n\n\n@requires_bottleneck\ndef test_interpolate_limits():\n da = xr.DataArray(\n np.array([1, 2, np.nan, np.nan, np.nan, 6], dtype=np.float64), dims=\"x\"\n )\n\n actual = da.interpolate_na(dim=\"x\", limit=None)\n assert actual.isnull().sum() == 0\n\n actual = da.interpolate_na(dim=\"x\", limit=2)\n expected = xr.DataArray(\n np.array([1, 2, 3, 4, np.nan, 6], dtype=np.float64), dims=\"x\"\n )\n\n assert_equal(actual, expected)\n\n\n@requires_scipy\ndef test_interpolate_methods():\n for method in [\"linear\", \"nearest\", \"zero\", \"slinear\", \"quadratic\", \"cubic\"]:\n kwargs = {}\n da = xr.DataArray(\n np.array([0, 1, 2, np.nan, np.nan, np.nan, 6, 7, 8], dtype=np.float64),\n dims=\"x\",\n )\n actual = da.interpolate_na(\"x\", method=method, **kwargs)\n assert actual.isnull().sum() == 0\n\n actual = da.interpolate_na(\"x\", method=method, limit=2, **kwargs)\n assert actual.isnull().sum() == 1\n\n\n@requires_scipy\ndef test_interpolators():\n for method, interpolator in [\n (\"linear\", NumpyInterpolator),\n (\"linear\", ScipyInterpolator),\n (\"spline\", SplineInterpolator),\n ]:\n xi = np.array([-1, 0, 1, 2, 5], dtype=np.float64)\n yi = np.array([-10, 0, 10, 20, 50], dtype=np.float64)\n x = np.array([3, 4], dtype=np.float64)\n\n f = interpolator(xi, yi, method=method)\n out = f(x)\n assert pd.isnull(out).sum() == 0\n\n\ndef test_interpolate_use_coordinate():\n xc = xr.Variable(\"x\", [100, 200, 300, 400, 500, 600])\n da = xr.DataArray(\n np.array([1, 2, np.nan, np.nan, np.nan, 6], dtype=np.float64),\n dims=\"x\",\n coords={\"xc\": xc},\n )\n\n # use_coordinate == False is same as using the default index\n actual = da.interpolate_na(dim=\"x\", use_coordinate=False)\n expected = da.interpolate_na(dim=\"x\")\n assert_equal(actual, expected)\n\n # possible to specify non index coordinate\n actual = da.interpolate_na(dim=\"x\", use_coordinate=\"xc\")\n expected = da.interpolate_na(dim=\"x\")\n assert_equal(actual, expected)\n\n # possible to specify index coordinate by name\n actual = da.interpolate_na(dim=\"x\", use_coordinate=\"x\")\n expected = da.interpolate_na(dim=\"x\")\n assert_equal(actual, expected)\n\n\n@requires_dask\ndef test_interpolate_dask():\n da, _ = make_interpolate_example_data((40, 40), 0.5)\n da = da.chunk({\"x\": 5})\n actual = da.interpolate_na(\"time\")\n expected = da.load().interpolate_na(\"time\")\n assert isinstance(actual.data, dask_array_type)\n assert_equal(actual.compute(), expected)\n\n # with limit\n da = da.chunk({\"x\": 5})\n actual = da.interpolate_na(\"time\", limit=3)\n expected = da.load().interpolate_na(\"time\", limit=3)\n assert isinstance(actual.data, dask_array_type)\n assert_equal(actual, expected)\n\n\n@requires_dask\ndef test_interpolate_dask_raises_for_invalid_chunk_dim():\n da, _ = make_interpolate_example_data((40, 40), 0.5)\n da = da.chunk({\"time\": 5})\n with raises_regex(ValueError, \"dask='parallelized' consists of multiple\"):\n da.interpolate_na(\"time\")\n\n\n@requires_bottleneck\ndef test_ffill():\n da = xr.DataArray(np.array([4, 5, np.nan], dtype=np.float64), dims=\"x\")\n expected = xr.DataArray(np.array([4, 5, 5], dtype=np.float64), dims=\"x\")\n actual = da.ffill(\"x\")\n assert_equal(actual, expected)\n\n\n@requires_bottleneck\n@requires_dask\ndef test_ffill_dask():\n da, _ = make_interpolate_example_data((40, 40), 0.5)\n da = da.chunk({\"x\": 5})\n actual = da.ffill(\"time\")\n expected = da.load().ffill(\"time\")\n assert isinstance(actual.data, dask_array_type)\n assert_equal(actual, expected)\n\n # with limit\n da = da.chunk({\"x\": 5})\n actual = da.ffill(\"time\", limit=3)\n expected = da.load().ffill(\"time\", limit=3)\n assert isinstance(actual.data, dask_array_type)\n assert_equal(actual, expected)\n\n\n@requires_bottleneck\n@requires_dask\ndef test_bfill_dask():\n da, _ = make_interpolate_example_data((40, 40), 0.5)\n da = da.chunk({\"x\": 5})\n actual = da.bfill(\"time\")\n expected = da.load().bfill(\"time\")\n assert isinstance(actual.data, dask_array_type)\n assert_equal(actual, expected)\n\n # with limit\n da = da.chunk({\"x\": 5})\n actual = da.bfill(\"time\", limit=3)\n expected = da.load().bfill(\"time\", limit=3)\n assert isinstance(actual.data, dask_array_type)\n assert_equal(actual, expected)\n\n\n@requires_bottleneck\ndef test_ffill_bfill_nonans():\n\n vals = np.array([1, 2, 3, 4, 5, 6], dtype=np.float64)\n expected = xr.DataArray(vals, dims=\"x\")\n\n actual = expected.ffill(dim=\"x\")\n assert_equal(actual, expected)\n\n actual = expected.bfill(dim=\"x\")\n assert_equal(actual, expected)\n\n\n@requires_bottleneck\ndef test_ffill_bfill_allnans():\n\n vals = np.full(6, np.nan, dtype=np.float64)\n expected = xr.DataArray(vals, dims=\"x\")\n\n actual = expected.ffill(dim=\"x\")\n assert_equal(actual, expected)\n\n actual = expected.bfill(dim=\"x\")\n assert_equal(actual, expected)\n\n\n@requires_bottleneck\ndef test_ffill_functions(da):\n result = da.ffill(\"time\")\n assert result.isnull().sum() == 0\n\n\n@requires_bottleneck\ndef test_ffill_limit():\n da = xr.DataArray(\n [0, np.nan, np.nan, np.nan, np.nan, 3, 4, 5, np.nan, 6, 7], dims=\"time\"\n )\n result = da.ffill(\"time\")\n expected = xr.DataArray([0, 0, 0, 0, 0, 3, 4, 5, 5, 6, 7], dims=\"time\")\n assert_array_equal(result, expected)\n\n result = da.ffill(\"time\", limit=1)\n expected = xr.DataArray(\n [0, 0, np.nan, np.nan, np.nan, 3, 4, 5, 5, 6, 7], dims=\"time\"\n )\n assert_array_equal(result, expected)\n\n\ndef test_interpolate_dataset(ds):\n actual = ds.interpolate_na(dim=\"time\")\n # no missing values in var1\n assert actual[\"var1\"].count(\"time\") == actual.dims[\"time\"]\n\n # var2 should be the same as it was\n assert_array_equal(actual[\"var2\"], ds[\"var2\"])\n\n\n@requires_bottleneck\ndef test_ffill_dataset(ds):\n ds.ffill(dim=\"time\")\n\n\n@requires_bottleneck\ndef test_bfill_dataset(ds):\n ds.ffill(dim=\"time\")\n\n\n@requires_bottleneck\[email protected](\n \"y, lengths\",\n [\n [np.arange(9), [[3, 3, 3, 0, 3, 3, 0, 2, 2]]],\n [np.arange(9) * 3, [[9, 9, 9, 0, 9, 9, 0, 6, 6]]],\n [[0, 2, 5, 6, 7, 8, 10, 12, 14], [[6, 6, 6, 0, 4, 4, 0, 4, 4]]],\n ],\n)\ndef test_interpolate_na_nan_block_lengths(y, lengths):\n arr = [[np.nan, np.nan, np.nan, 1, np.nan, np.nan, 4, np.nan, np.nan]]\n da = xr.DataArray(arr * 2, dims=[\"x\", \"y\"], coords={\"x\": [0, 1], \"y\": y})\n index = get_clean_interp_index(da, dim=\"y\", use_coordinate=True)\n actual = _get_nan_block_lengths(da, dim=\"y\", index=index)\n expected = da.copy(data=lengths * 2)\n assert_equal(actual, expected)\n\n\[email protected]\ndef da_time():\n return xr.DataArray(\n [np.nan, 1, 2, np.nan, np.nan, 5, np.nan, np.nan, np.nan, np.nan, 10],\n dims=[\"t\"],\n )\n\n\ndef test_interpolate_na_max_gap_errors(da_time):\n with raises_regex(\n NotImplementedError, \"max_gap not implemented for unlabeled coordinates\"\n ):\n da_time.interpolate_na(\"t\", max_gap=1)\n\n with raises_regex(ValueError, \"max_gap must be a scalar.\"):\n da_time.interpolate_na(\"t\", max_gap=(1,))\n\n da_time[\"t\"] = pd.date_range(\"2001-01-01\", freq=\"H\", periods=11)\n with raises_regex(TypeError, \"Underlying index is\"):\n da_time.interpolate_na(\"t\", max_gap=1)\n\n with raises_regex(TypeError, \"Expected integer or floating point\"):\n da_time.interpolate_na(\"t\", max_gap=\"1H\", use_coordinate=False)\n\n with raises_regex(ValueError, \"Could not convert 'huh' to timedelta64\"):\n da_time.interpolate_na(\"t\", max_gap=\"huh\")\n\n\n@requires_bottleneck\[email protected](\n \"time_range_func\",\n [pd.date_range, pytest.param(xr.cftime_range, marks=pytest.mark.xfail)],\n)\[email protected](\"transform\", [lambda x: x, lambda x: x.to_dataset(name=\"a\")])\[email protected](\n \"max_gap\", [\"3H\", np.timedelta64(3, \"h\"), pd.to_timedelta(\"3H\")]\n)\ndef test_interpolate_na_max_gap_time_specifier(\n da_time, max_gap, transform, time_range_func\n):\n da_time[\"t\"] = time_range_func(\"2001-01-01\", freq=\"H\", periods=11)\n expected = transform(\n da_time.copy(data=[np.nan, 1, 2, 3, 4, 5, np.nan, np.nan, np.nan, np.nan, 10])\n )\n actual = transform(da_time).interpolate_na(\"t\", max_gap=max_gap)\n assert_equal(actual, expected)\n\n\n@requires_bottleneck\[email protected](\n \"coords\",\n [\n pytest.param(None, marks=pytest.mark.xfail()),\n {\"x\": np.arange(4), \"y\": np.arange(11)},\n ],\n)\ndef test_interpolate_na_2d(coords):\n da = xr.DataArray(\n [\n [1, 2, 3, 4, np.nan, 6, 7, np.nan, np.nan, np.nan, 11],\n [1, 2, 3, np.nan, np.nan, 6, 7, np.nan, np.nan, np.nan, 11],\n [1, 2, 3, np.nan, np.nan, 6, 7, np.nan, np.nan, np.nan, 11],\n [1, 2, 3, 4, np.nan, 6, 7, np.nan, np.nan, np.nan, 11],\n ],\n dims=[\"x\", \"y\"],\n coords=coords,\n )\n\n actual = da.interpolate_na(\"y\", max_gap=2)\n expected_y = da.copy(\n data=[\n [1, 2, 3, 4, 5, 6, 7, np.nan, np.nan, np.nan, 11],\n [1, 2, 3, np.nan, np.nan, 6, 7, np.nan, np.nan, np.nan, 11],\n [1, 2, 3, np.nan, np.nan, 6, 7, np.nan, np.nan, np.nan, 11],\n [1, 2, 3, 4, 5, 6, 7, np.nan, np.nan, np.nan, 11],\n ]\n )\n assert_equal(actual, expected_y)\n\n actual = da.interpolate_na(\"x\", max_gap=3)\n expected_x = xr.DataArray(\n [\n [1, 2, 3, 4, np.nan, 6, 7, np.nan, np.nan, np.nan, 11],\n [1, 2, 3, 4, np.nan, 6, 7, np.nan, np.nan, np.nan, 11],\n [1, 2, 3, 4, np.nan, 6, 7, np.nan, np.nan, np.nan, 11],\n [1, 2, 3, 4, np.nan, 6, 7, np.nan, np.nan, np.nan, 11],\n ],\n dims=[\"x\", \"y\"],\n coords=coords,\n )\n assert_equal(actual, expected_x)\n"
] | [
[
"pandas.isnull",
"pandas.Timestamp",
"numpy.arange",
"numpy.random.shuffle",
"numpy.full",
"numpy.timedelta64",
"numpy.random.randn",
"numpy.testing.assert_allclose",
"pandas.date_range",
"pandas.to_timedelta",
"numpy.array",
"numpy.random.RandomState"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
ldelzott/ByteTrack | [
"5f8ab49a913a551d041918607a0bd2473602ad39"
] | [
"exps/example/mot/yolox_x_mix_det.py"
] | [
"# encoding: utf-8\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.distributed as dist\n\nfrom yolox.exp import Exp as MyExp\nfrom yolox.data import get_yolox_datadir\n\nclass Exp(MyExp):\n def __init__(self):\n super(Exp, self).__init__()\n self.num_classes = 1\n self.depth = 1.33\n self.width = 1.25\n self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(\".\")[0]\n self.train_ann = \"train.json\"\n self.val_ann = \"train.json\" # change to validation.json when validation set is available\n self.input_size = (1280, 1280)\n self.test_size = (1280, 1280)\n self.random_size = (18, 32)\n self.max_epoch = 15\n self.print_interval = 20\n self.eval_interval = 3\n self.test_conf = 0.001\n self.nmsthre = 0.7\n self.no_aug_epochs = 5\n self.basic_lr_per_img = 0.001 / 64.0\n self.warmup_epochs = 1\n\n def get_data_loader(self, batch_size, is_distributed, no_aug=False):\n from yolox.data import (\n MOTDataset,\n TrainTransform,\n YoloBatchSampler,\n DataLoader,\n InfiniteSampler,\n MosaicDetection,\n )\n\n dataset = MOTDataset(\n data_dir=os.path.join(get_yolox_datadir(), \"epucks_dataset_300\"),\n json_file=self.train_ann,\n name='',\n img_size=self.input_size,\n preproc=TrainTransform(\n rgb_means=(0.485, 0.456, 0.406),\n std=(0.229, 0.224, 0.225),\n max_labels=500,\n ),\n )\n\n dataset = MosaicDetection(\n dataset,\n mosaic=not no_aug,\n img_size=self.input_size,\n preproc=TrainTransform(\n rgb_means=(0.485, 0.456, 0.406),\n std=(0.229, 0.224, 0.225),\n max_labels=1000,\n ),\n degrees=self.degrees,\n translate=self.translate,\n scale=self.scale,\n shear=self.shear,\n perspective=self.perspective,\n enable_mixup=self.enable_mixup,\n )\n\n self.dataset = dataset\n\n if is_distributed:\n batch_size = batch_size // dist.get_world_size()\n\n sampler = InfiniteSampler(\n len(self.dataset), seed=self.seed if self.seed else 0\n )\n\n batch_sampler = YoloBatchSampler(\n sampler=sampler,\n batch_size=batch_size,\n drop_last=False,\n input_dimension=self.input_size,\n mosaic=not no_aug,\n )\n\n dataloader_kwargs = {\"num_workers\": self.data_num_workers, \"pin_memory\": True}\n dataloader_kwargs[\"batch_sampler\"] = batch_sampler\n train_loader = DataLoader(self.dataset, **dataloader_kwargs)\n\n return train_loader\n\n def get_eval_loader(self, batch_size, is_distributed, testdev=False):\n from yolox.data import MOTDataset, ValTransform\n\n valdataset = MOTDataset(\n data_dir=os.path.join(get_yolox_datadir(), \"epucks_dataset_300\"),\n json_file=self.val_ann,\n img_size=self.test_size,\n name='', # change to train when running on training set\n preproc=ValTransform(\n rgb_means=(0.485, 0.456, 0.406),\n std=(0.229, 0.224, 0.225),\n ),\n )\n\n if is_distributed:\n batch_size = batch_size // dist.get_world_size()\n sampler = torch.utils.data.distributed.DistributedSampler(\n valdataset, shuffle=False\n )\n else:\n sampler = torch.utils.data.SequentialSampler(valdataset)\n\n dataloader_kwargs = {\n \"num_workers\": self.data_num_workers,\n \"pin_memory\": True,\n \"sampler\": sampler,\n }\n dataloader_kwargs[\"batch_size\"] = batch_size\n val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs)\n\n return val_loader\n\n def get_evaluator(self, batch_size, is_distributed, testdev=False):\n from yolox.evaluators import COCOEvaluator\n\n val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev)\n evaluator = COCOEvaluator(\n dataloader=val_loader,\n img_size=self.test_size,\n confthre=self.test_conf,\n nmsthre=self.nmsthre,\n num_classes=self.num_classes,\n testdev=testdev,\n )\n return evaluator\n"
] | [
[
"torch.utils.data.SequentialSampler",
"torch.utils.data.DataLoader",
"torch.distributed.get_world_size",
"torch.utils.data.distributed.DistributedSampler"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
danbirks/PredictCode | [
"b4d7010d13706c771ba57437e9c7589e5c94329b"
] | [
"sandbox/debugPHS.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 14 14:37:28 2019\n\n@author: Dustin\n\"\"\"\n\n\n\n# Some fairly standard modules\nimport os, csv, lzma\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport descartes\nfrom itertools import product\nfrom collections import Counter\nimport datetime\nimport random\nimport time\n\n# The geopandas module does not come standard with anaconda,\n# so you'll need to run the anaconda prompt as an administrator\n# and install it via \"conda install -c conda-forge geopandas\".\n# That installation will include pyproj and shapely automatically.\n# These are useful modules for plotting geospatial data.\nimport geopandas as gpd\nimport pyproj\nimport shapely.geometry\n\n# These modules are useful for tracking where modules are\n# imported from, e.g., to check we're using our local edited\n# versions of open_cp scripts.\nimport sys\nimport inspect\nimport importlib\n\n# In order to use our local edited versions of open_cp\n# scripts, we insert the parent directory of the current\n# file (\"..\") at the start of our sys.path here.\nsys.path.insert(0, os.path.abspath(\"..\"))\n\n# Elements from PredictCode's custom \"open_cp\" package\nimport open_cp\nimport open_cp.geometry\nimport open_cp.plot\nimport open_cp.sources.chicago as chicago\nimport open_cp.retrohotspot as retro\nimport open_cp.prohotspot as phs\nimport open_cp.knox\n\n# Load custom functions that make dealing with datetime and timedelta easier\nfrom crimeRiskTimeTools import generateDateRange, \\\n generateLaterDate, \\\n generateEarlierDate, \\\n getTimedPointsInTimeRange, \\\n getSixDigitDate, \\\n _day\n\n# Useful line for python console\n# sys.path.insert(0,os.path.join(os.path.abspath(\".\"),\"Documents\",\"GitHub\",\"PredictCode\",\"sandbox\"))\n\n\n\n#Constants for figures\n\n# Heat-like color mapping\n_cdict = {'red': [(0.0, 1.0, 1.0),\n (1.0, 1.0, 1.0)],\n 'green': [(0.0, 1.0, 1.0),\n (1.0, 0.0, 0.0)],\n 'blue': [(0.0, 0.2, 0.2),\n (1.0, 0.2, 0.2)]}\n\nyellow_to_red = matplotlib.colors.LinearSegmentedColormap(\"yellow_to_red\", _cdict)\n\n\n\n\n#Functions\n\n\n# Functions that help process the data\n\n\n# Given a TimedPoints object and a Grid (MaskedGrid?) object,\n# return a Counter object that is a mapping from the grid cell\n# coordinates to the number of points within the cell.\n# Note that \"grid cell coordinates\" refers to which row of cells\n# and which column of cells it's located at, NOT spatial coords.\ndef countPointsPerCell(points, grid):\n # Get xy coords from TimedPoints\n xcoords, ycoords = points.xcoords, points.ycoords\n # Convert coords to cellcoords\n xgridinds = np.floor((xcoords - grid.xoffset) / grid.xsize).astype(np.int)\n ygridinds = np.floor((ycoords - grid.yoffset) / grid.ysize).astype(np.int)\n # Count the number of crimes per cell\n # NOTE: We do (y,x) instead of (x,y) because cells are (row,col)!!!\n return Counter(zip(ygridinds, xgridinds))\n\n\n# Given a sorted list of cells, and a mapping from cells to number of events\n# in those cells, return a list of numbers, of length equal to the given\n# list of cells, representing the running total of number of events in all\n# cells up to that point in the list.\ndef getHitRateList(sorted_cells, cell_hit_map):\n running_total = 0\n hit_rate_list = []\n for cell in sorted_cells:\n running_total += cell_hit_map[cell]\n hit_rate_list.append(running_total)\n return hit_rate_list\n\n\ndef sortCellsByRiskMatrix(cells, risk_matrix):\n # For each cellcoord, get its risk from the risk matrix\n cellcoord_risk_dict = dict()\n for cc in cells:\n cellcoord_risk_dict[cc] = risk_matrix[cc[0]][cc[1]]\n \n # Sort cellcoords by risk, highest risk first\n cells_risksort = sorted(cells, \\\n key=lambda x:cellcoord_risk_dict[x], \\\n reverse=True)\n return cells_risksort\n\n\n\ndef getRegionCells(grid):\n # Make sure to do yextent then xextent, because cellcoords\n # correspond to (row,col) in grid\n all_cells = product(range(grid.yextent), range(grid.xextent))\n return tuple(cc for cc in all_cells \n if not grid.mask[cc[0]][cc[1]])\n\n\n\n\n\n# Functions that run models\n\n\n# Construct a \"random\" model by simply randomly sorting the cells\ndef RandomlySortCells(cells, seed=None):\n if seed != None:\n random.seed(seed)\n cell_list = list(cells)\n random.shuffle(cell_list)\n return cell_list\n\n\n\n# Most naive model is to just count the number of events occurring in each\n# cell in the training data, and favor the cells with the most events\ndef runNaiveCount_model(data_points, grid):\n crime_ctr = countPointsPerCell(data_points, grid)\n grid_cells = getRegionCells(grid)\n print(\"Naive Count results:\")\n print(sorted(grid_cells)[0])\n print(sorted(grid_cells)[-1])\n print(max([x[0] for x in grid_cells]))\n print(max([x[1] for x in grid_cells]))\n print(\"(^^^End of Naive Count results)\\n\")\n return sorted(grid_cells, key=lambda x:crime_ctr[x], reverse=True)\n\n\n\ndef runRhsModel(training_data, grid, bandwidth=250, rand_seed=None):\n # Set RNG seed if given\n if rand_seed != None:\n np.random.seed(rand_seed)\n \n grid_cells = getRegionCells(grid)\n \n # Obtain model and prediction on grid cells\n rhs_pred = retro.RetroHotSpot()\n rhs_pred.data = training_data\n rhs_pred.weight = retro.Quartic(bandwidth = bandwidth)\n rhs_risk = rhs_pred.predict()\n rhs_grid_risk = open_cp.predictors.grid_prediction(rhs_risk, grid)\n rhs_grid_risk_matrix = rhs_grid_risk.intensity_matrix\n \n # Sort cellcoords by risk in intensity matrix, highest risk first\n return sortCellsByRiskMatrix(grid_cells, rhs_grid_risk_matrix)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef knox_ratio(statistic, distribution):\n # From \"Examples/Chicago Case Study/Knox Statistics\" notebook\n # Compute the ratio of the statistic to the \n # median of the values in the distribution\n d = np.array(distribution)\n d.sort()\n return statistic / d[len(d)//2]\n\n\n\ndef all_knox_ratios(result):\n for i, space_bin in enumerate(result.space_bins):\n for j, time_bin in enumerate(result.time_bins):\n yield knox_ratio(result.statistic(i,j), result.distribution(i,j))\n\n\n\n\n\n\ndef printPhsRiskMatrixInfo(phs_grid_risk_matrix):\n \n print(f\"Type of phs_grid_risk_matrix: {type(phs_grid_risk_matrix)}\")\n print(f\"Shape of phs_grid_risk_matrix: {phs_grid_risk_matrix.shape}\")\n valctr = Counter()\n for somerow in phs_grid_risk_matrix:\n valctr.update(somerow)\n print(\"Frequency of values in grid:\")\n print(valctr)\n print(\"Integer grid:\")\n indexrowh = \"\"\n indexrowt = \"\"\n indexrowo = \"\"\n for i in range(105):\n indexrowh += str(int(i/100))\n indexrowt += str(int((i%100)/10))\n indexrowo += str(i%10)\n print(indexrowh)\n print(indexrowt)\n print(indexrowo)\n srctr = 0\n scoremap = \"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n for somerow in (8*phs_grid_risk_matrix[:,:]).astype(int).tolist():\n print(\"\".join([scoremap[x] for x in somerow]) + \" \" + str(srctr))\n srctr += 1\n print(f\"Num rows printed: {srctr}\")\n sys.exit(0)\n\n\n\n\n\ndef runPhsModel(training_data, grid, cutoff_time, time_unit, dist_unit, time_bandwidth, dist_bandwidth, weight=\"linear\"):\n \n grid_cells = getRegionCells(grid=grid)\n \n # Obtain model and prediction on grid cells\n phs_predictor = phs.ProspectiveHotSpot(grid=grid)\n phs_predictor.data = training_data\n \n dist_band_in_units = dist_bandwidth/dist_unit\n time_band_in_units = time_bandwidth/time_unit\n \n if weight==\"linear\":\n phs_predictor.weight = phs.LinearWeightNormalised(space_bandwidth=dist_band_in_units, time_bandwidth=time_band_in_units)\n elif weight==\"classic\":\n phs_predictor.weight = phs.ClassicWeightNormalised(space_bandwidth=dist_band_in_units, time_bandwidth=time_band_in_units)\n \n phs_predictor.grid = dist_unit\n phs_predictor.time_unit = time_unit\n \n # Only include this method of establishing cutoff_time if we want a\n # prediction for the day after the latest event in training data. If so,\n # this will ignore any event-less period of time between training and\n # test data, which means time decay may be less pronounced.\n #cutoff_time = sorted(training_data.timestamps)[-1] + _day\n \n phs_grid_risk = phs_predictor.predict(cutoff_time, cutoff_time)\n \n #phs_grid_risk = open_cp.predictors.grid_prediction(phs_risk, grid)\n phs_grid_risk_matrix = phs_grid_risk.intensity_matrix\n \n \n \n \n # PHS info:\n # Type of grid_cells: <class 'tuple'>\n # Type of element of grid_cells: <class 'tuple'>\n # Length of grid_cells: 5587\n # Type of phs_predictor: <class 'open_cp.prohotspot.ProspectiveHotSpot'>\n # dist_band_in_units: 4.0\n # time_band_in_units: 4.0\n # time_bandwidth: 4 weeks\n # time_unit: 1 weeks\n # dist_unit: 100\n # time_unit: 1 weeks\n # Type of phs_grid_risk: <class 'open_cp.predictors.GridPredictionArray'>\n \n \n #print(f\"Timestamps:\")\n #print(f\"{training_data.timestamps}\")\n #print(f\"Lat/long coords:\")\n #print(f\"{training_data.coords}\")\n #printPhsRiskMatrixInfo(phs_grid_risk_matrix)\n \n \n # Sort cellcoords by risk in intensity matrix, highest risk first\n return sortCellsByRiskMatrix(grid_cells, phs_grid_risk_matrix)\n \n\n\n\n\n\n\n\n\n# Given a model name and relevant arguments,\n# return a sorted list of cells\ndef runModelAndSortCells(model_name, model_args):\n \n # We declare our recognised possible models here\n rec_models = [\"ideal\",\"random\",\"naivecount\",\"rhs\",\"phs\"]\n if model_name not in rec_models:\n print(\"Unrecognized model name: {}\".format(model_name))\n sys.exit(1)\n \n \n \n if model_name==\"ideal\":\n # We need these variables:\n # cellcoordlist_region\n # cells_testcrime_ctr\n \n cellcoordlist_region, cells_testcrime_ctr = model_args\n \n return sorted(cellcoordlist_region, \n key=lambda x:cells_testcrime_ctr[x], \n reverse=True)\n \n if model_name==\"random\":\n # We need these variables:\n # cellcoordlist_region\n # plot_random_seed\n \n cellcoordlist_region, plot_random_seed = model_args\n \n return RandomlySortCells(cellcoordlist_region, seed=plot_random_seed)\n \n \n \n # If the model isn't ideal or random,\n # then it's naivecount or rhs or phs,\n # so the first two args should be the data and the region\n \n points_crime_region_train = model_args[0]\n masked_grid_region = model_args[1]\n other_model_args = model_args[2:]\n \n if model_name==\"naivecount\":\n # We need these variables:\n # points_crime_region_train\n # masked_grid_region\n \n return runNaiveCount_model(points_crime_region_train, masked_grid_region)\n \n \n \n if model_name==\"rhs\":\n # We need these variables:\n # points_crime_region_train\n # masked_grid_region\n # rhs_bandwidth\n # rhs_random_seed\n \n rhs_random_seed, rhs_bandwidth = other_model_args\n \n return runRhsModel(points_crime_region_train,\n masked_grid_region, \n bandwidth = rhs_bandwidth, \n rand_seed=rhs_random_seed)\n \n if model_name==\"phs\":\n # We need these variables:\n # points_crime_region_train\n # masked_grid_region\n # time_unit (ex: np.timedelta64(1, \"W\") )\n # time_bandwidth (ex: np.timedelta64(4, \"W\") )\n # dist_unit (ex: 100 )\n # dist_bandwidth (ex: 500 )\n # weight (ex: \"linear\", \"classic\" )\n \n cutoff_time, time_unit, time_bandwidth, dist_unit, dist_bandwidth, weight = other_model_args\n return runPhsModel(\n training_data = points_crime_region_train, \n grid = masked_grid_region, \n cutoff_time = cutoff_time, \n time_unit = time_unit, \n dist_unit = dist_unit, \n time_bandwidth = time_bandwidth, \n dist_bandwidth = dist_bandwidth, \n weight = weight)\n \n\n\n\n\n\n# Start of code\n\n\n\n# START TIMER\ninit_start_time = time.time()\n\n\n\n# PARAMETERS\n\n\n\n\n# PARAMETERS TO SWEEP\n# Model-invariant parameters:\n# > DATA\n# - cell width (assume height is same)\n# - overall data set (currently hard-coded as chicago)\n# - crime type(s), just doing BURGLARY for now\n# - spatial offset? (not doing this now, but maybe worth investigating)\n# > DATE\n# - Length of training window\n# - Length of testing window\n# - Date (of, say, testing; other dates calculated from lengths)\n# > MODEL\n# - model type(s)\n# > EVAL\n# - Coverage (1%, 2%, 5%, 10%)\n# - Hit count or hit % as metric for evaluation\n# Model-specific parameters:\n# > Random\n# - seed\n# > RHS\n# - rhs_bandwidth\n# - seed\n# > PHS\n# - time_unit\n# - dist_unit (==cell width, probably?)\n# - time_bandwidth\n# - dist_bandwidth\n# - choice of weight? (linear vs classic)\n# - knox stuff? (some relates to the above)\n# - \n\n\n# What do I want to see in output?\n# ~~~INPUT DATA\n# Data set (e.g. \"Chicago South Side\")\n# Crime type(s) (e.g. \"BURGLARY\", or \"BURGLARY,THEFT\")\n# Cell width\n# Test start date (=train end date)\n# Train len\n# ~~~EVAL RESULTS\n# Test len\n# Coverage rate\n# Num crimes in test data\n# Num crimes in test data captured by model within coverage rate\n# % crimes in test data captured by model within coverage rate\n# ~~~MODEL\n# Model type\n# (various model-specific parameters: random seeds, bandwidths, etc, etc)\n\n\n\n#models_to_run = [\"random\", \"naivecount\", \"rhs\", \"phs\", \"ideal\"]\nmodels_to_run = [\"random\", \"naivecount\", \"phs\", \"ideal\"]\n#models_to_run = [\"naivecount\",\"phs\"]\n#models_to_run = [\"phs\"]\n\nmodel_param_dict = dict()\n\nmodel_param_dict[\"ideal\"] = [()]\n\nmodel_param_dict[\"naivecount\"] = [()]\n\nnum_random = 1\nrand_seeds = range(num_random)\nmodel_param_dict[\"random\"] = list(product(rand_seeds))\n\nnum_rhs = 1\nrhs_seeds = range(num_rhs)\nrhs_bandwidth_sweep = [300]\nmodel_param_dict[\"rhs\"] = list(product(rhs_seeds, rhs_bandwidth_sweep))\n\nphs_time_units = [np.timedelta64(1, \"W\")]\n#phs_time_bands = [np.timedelta64(4, \"W\")]\n#phs_time_bands = [np.timedelta64(x, \"W\") for x in range(1,7)]\nphs_time_bands = [np.timedelta64(x, \"W\") for x in range(1,9)]\nphs_dist_units = [100]\n#phs_dist_bands = [500]\n#phs_dist_bands = [x*100 for x in range(1,11)]\nphs_dist_bands = [x*100 for x in range(1,11)]\nphs_weights = [\"linear\"]\nmodel_param_dict[\"phs\"] = list(product(\n phs_time_units, \n phs_time_bands, \n phs_dist_units, \n phs_dist_bands, \n phs_weights))\n\n\n\n\n\n\n# Parameters for overall data set\ndataset_name = \"Chicago\"\ncrime_type_set_sweep = [{\"BURGLARY\"}]\ncell_width_sweep = [100]\n# If we did spatial offsets, that would belong here too\n# Also if there's a convenient way to specify Chicago vs other data set, do that here\n\n# Parameters for time range\n\n\n\n\n\n\n\n# Data parameters\nprint(\"Declaring parameters...\")\ndatadir = os.path.join(\"..\", \"..\", \"Data\")\n#chicago_file_name = \"chicago_all_old.csv\"\n#chicago_file_name = \"chi_all_s_BURGLARY_010101_190101.csv\"\nchicago_file_name = \"chi_all_s_BURGLARY_RES_010101_190101.csv\"\nchicago_side = \"South\"\nchicago_load_type = \"snapshot\"\nif \"all\" in chicago_file_name:\n chicago_load_type = \"all\"\nchicago_file_path = os.path.join(datadir, chicago_file_name)\n# Chicago module requires this line to access some data\nchicago.set_data_directory(datadir)\n\n\n\n# Time parameters\n# The best single date to define an experiment by is the start of the test\n# data. The training data will be from a given time range fully up to but\n# not including that date, while the test data will be from a given time\n# range starting on that date. If we wish to compare different sizes of\n# training or test data, then the best comparison would be against the\n# other experiments with this same date as the cutoff between the training\n# and testing data, regardless of the sizes of those data sets.\n\n# Of all planned experiments, earliest start of a test data set\nearliest_test_date = \"2013-01-01\"\nearliest_test_date_str = \"\".join(earliest_test_date.split(\"-\"))[2:]\n# Time between earliest experiment and latest experiment\ntest_date_range = \"5Y\"\n# Latest start of a test data set, calculated from above 2 variables\nlatest_test_date = generateLaterDate(earliest_test_date, test_date_range)\n\n# Length of training data\ntrain_len = \"8W\"\n#train_len_sweep = [\"4W\"] #multi-option not fully implemented\n# Length of testing data\ntest_len = \"1D\"\n#test_len_sweep = [\"1D\",\"3D\",\"7D\"] #multi-option not fully implemented\n\n# Time step between different experiments\n#test_date_step = \"1D\"\n# We have currently decided to step forward the experiment so that test sets\n# do not overlap, the reasoning being roughly: why would we bother evaluating\n# a model on 7 days of data if we're about to retrain the model 1 day later?\n# In the future it is possible this may change if we find a compelling reason\n# otherwise, or may add an option to override this choice.\ntest_date_step = test_len\n\n# List of all experiment dates\nstart_test_list = generateDateRange(earliest_test_date, latest_test_date, test_date_step)\n# Number of different experiment dates\ntotal_num_exp_dates = len(start_test_list)\n\n\n\ncoverage_rate_sweep = [0.01, 0.02, 0.05, 0.10]\n\n\ncell_sampling = 15 #!!! need to find where to use this, for rhs\n\n\n# Knox statistic parameters\n#knox_space_bin_size = 100\n#knox_space_bin_count = 5\n#knox_space_bins = [(i*knox_space_bin_size,(i+1)*knox_space_bin_size) \\\n# for i in range(knox_space_bin_count)]\n#print(knox_space_bins)\n#knox_time_bin_size = 3\n#knox_time_bin_count = 7\n#knox_time_bins = [(i*knox_time_bin_size,(i+1)*knox_time_bin_size) \\\n# for i in range(knox_time_bin_count)]\n#print(knox_time_bins)\n\n\n\nresult_info_header = [\n \"dataset\", \n \"event_types\",\n \"cell_width\", \n \"eval_date\", \n \"train_len\", \n \"test_len\", \n \"coverage_rate\", \n \"test_events\", \n \"hit_count\", \n \"hit_pct\", \n \"model\", \n \"rand_seed\", \n \"rhs_bandwidth\", \n \"phs_time_unit\", \n \"phs_time_band\", \n \"phs_dist_unit\", \n \"phs_dist_band\", \n \"phs_weight\", \n ]\n\n\n\nprint(\"...declared parameters.\")\n\n\n\ndate_today = datetime.date.today()\ndate_today_str = getSixDigitDate(date_today)\n\n# Create csv file\nout_csv_fname = \"results_{}_{}_{}_{}_{}.csv\".format(date_today_str, dataset_name, earliest_test_date_str, test_date_range, test_date_step)\nout_csv_full_path = os.path.join(datadir, out_csv_fname)\n\n\n\nwith open(out_csv_full_path, \"w\") as csvf:\n writer = csv.writer(csvf, delimiter=\",\", lineterminator=\"\\n\")\n writer.writerow(result_info_header)\n \n \n \n \n # PARAM:crime type\n for crime_type_set in crime_type_set_sweep:\n \n crime_types_printable = \"_\".join(sorted(crime_type_set))\n \n ### OBTAIN FULL DATA\n print(\"Obtaining full data set and region...\")\n obtain_data_start_time = time.time()\n points_crime = chicago.load(chicago_file_path, crime_type_set, \n type=chicago_load_type)\n \n \n ### OBTAIN GRIDDED REGION\n \n # Obtain polygon shapely object for region of interest\n region_polygon = chicago.get_side(chicago_side)\n \n # Obtain data set within relevant region\n points_crime_region = open_cp.geometry.intersect_timed_points(points_crime, region_polygon)\n \n \n obtain_data_end_time = time.time()\n print(\"...obtained full data set and region.\")\n print(\"Time: {}\".format(obtain_data_end_time - obtain_data_start_time))\n \n \n \n \n # PARAM:cell width\n for cell_width in cell_width_sweep:\n \n \n print(\"Obtaining grid for region...\")\n obtain_reg_start_time = time.time()\n \n # Obtain grid with cells only overlaid on relevant region\n masked_grid_region = open_cp.geometry.mask_grid_by_intersection(region_polygon, open_cp.data.Grid(xsize=cell_width, ysize=cell_width, xoffset=0, yoffset=0))\n \n # Get a list/tuple of all cellcoords in the region\n cellcoordlist_region = getRegionCells(masked_grid_region)\n \n # Obtain number of cells in the grid that contain relevant geometry\n # (i.e., not the full rectangular grid, only relevant cells)\n num_cells_region = len(cellcoordlist_region)\n coverage_cell_index_map = dict([(c, int(num_cells_region * c)-1) for c in coverage_rate_sweep])\n \n obtain_reg_end_time = time.time()\n print(\"...obtained grid for region.\")\n print(\"Time: {}\".format(obtain_reg_end_time - obtain_reg_start_time))\n \n \n \n # Log of how long an experiment takes to run\n exp_times = []\n \n \n \n # PARAM: Start date\n for exp_date_index, start_test in enumerate(start_test_list):\n \n exp_start_time = time.time()\n \n if exp_date_index % 5 == 0:\n print(\"Running experiment {}/{}...\".format(exp_date_index, total_num_exp_dates))\n \n # Declare time ranges of training and testing data\n end_train = start_test\n start_train = generateEarlierDate(end_train, train_len)\n end_test = generateLaterDate(start_test, test_len)\n \n #multi-option for train and test data time ranges is not\n # implemented, but would be implemented here\n #start_train_sweep = [generateEarlierDate(end_train, train_len) for train_len in train_len_sweep]\n #end_test_sweep = [generateLaterDate(start_test, test_len) for test_len in test_len_sweep]\n \n \n ### SELECT TRAINING DATA\n \n # Get subset of data for training\n points_crime_region_train = getTimedPointsInTimeRange(points_crime_region, \n start_train, \n end_train)\n \n \n \n ### TESTING DATA, USED FOR EVALUATION\n # (Also used for Ideal model, which is why we create it here)\n \n # Obtain selection of data for testing\n points_crime_region_test = getTimedPointsInTimeRange(points_crime_region, \n start_test, \n end_test)\n # Count how many crimes there were in this test data set\n num_crimes_test = len(points_crime_region_test.timestamps)\n \n # Count the number of crimes per cell\n # This is used for evaluation and also for the \"ideal\" model\n cells_testcrime_ctr = countPointsPerCell(points_crime_region_test, \n masked_grid_region)\n \n \n \n \n # PARAM: model\n \n for model_name in models_to_run:\n \n args_to_use = []\n if model_name in [\"ideal\", \"random\"]:\n args_to_use.append(cellcoordlist_region)\n if model_name == \"ideal\":\n args_to_use.append(cells_testcrime_ctr)\n elif model_name in [\"naivecount\", \"rhs\", \"phs\"]:\n args_to_use.append(points_crime_region_train)\n args_to_use.append(masked_grid_region)\n if model_name==\"phs\":\n args_to_use.append(start_test)\n \n \n # PARAM: param sweep for specific model\n \n for param_combo_index, param_combo in enumerate(model_param_dict[model_name]):\n \n sorted_cells = runModelAndSortCells(model_name, args_to_use + list(param_combo))\n \n hit_rate_list = getHitRateList(sorted_cells, cells_testcrime_ctr)\n \n \n # PARAM: coverage\n \n for coverage_rate in coverage_rate_sweep:\n \n num_hits = hit_rate_list[coverage_cell_index_map[coverage_rate]]\n pct_hits = 0\n if num_crimes_test>0:\n pct_hits = num_hits / num_crimes_test\n \n \n \n result_info = [\n dataset_name, \n crime_types_printable, \n cell_width, \n start_test, \n train_len, \n test_len, \n coverage_rate, \n num_crimes_test, \n num_hits, \n pct_hits, \n model_name]\n \n if model_name == \"phs\":\n result_info += [\"\",\"\"]\n \n result_info += list(param_combo)\n \n while len(result_info)<18:\n result_info.append(\"\")\n \n writer.writerow(result_info)\n print(f\"Wrote results for date {start_test} model {model_name} paramset {param_combo_index}\")\n \n \n \n exp_times.append(time.time() - exp_start_time)\n \n \n exp_times_sorted = sorted(exp_times)\n exp_times_total = sum(exp_times)\n print(\"Total experiment time: {}\".format(exp_times_total))\n print(\"Average experiment time: {}\".format(exp_times_total/len(exp_times)))\n print(\"Min experiment time: {}\".format(exp_times_sorted[0]))\n print(\"Max experiment time: {}\".format(exp_times_sorted[-1]))\n\n\nprint(\"Total time: {}\".format(time.time() - init_start_time))\n\nsys.exit(0)\n"
] | [
[
"numpy.random.seed",
"matplotlib.colors.LinearSegmentedColormap",
"numpy.timedelta64",
"numpy.floor",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
miguelsousa/robothon | [
"f2ac88884e04a6e77f79c91e1709ab8c84f46043"
] | [
"GlyphProofer/dist/GlyphProofer.app/Contents/Resources/lib/python2.6/numpy/testing/noseclasses.py"
] | [
"# These classes implement a doctest runner plugin for nose.\n# Because this module imports nose directly, it should not\n# be used except by nosetester.py to avoid a general NumPy\n# dependency on nose.\n\nimport os\nimport doctest\n\nfrom nose.plugins import doctests as npd\nfrom nose.plugins.errorclass import ErrorClass, ErrorClassPlugin\nfrom nose.plugins.base import Plugin\nfrom nose.util import src, tolist\nimport numpy\nfrom nosetester import get_package_name\nimport inspect\n\n_doctest_ignore = ['generate_numpy_api.py', 'scons_support.py',\n 'setupscons.py', 'setup.py']\n\n# Some of the classes in this module begin with 'Numpy' to clearly distinguish\n# them from the plethora of very similar names from nose/unittest/doctest\n\n\n#-----------------------------------------------------------------------------\n# Modified version of the one in the stdlib, that fixes a python bug (doctests\n# not found in extension modules, http://bugs.python.org/issue3158)\nclass NumpyDocTestFinder(doctest.DocTestFinder):\n\n def _from_module(self, module, object):\n \"\"\"\n Return true if the given object is defined in the given\n module.\n \"\"\"\n if module is None:\n #print '_fm C1' # dbg\n return True\n elif inspect.isfunction(object):\n #print '_fm C2' # dbg\n return module.__dict__ is object.func_globals\n elif inspect.isbuiltin(object):\n #print '_fm C2-1' # dbg\n return module.__name__ == object.__module__\n elif inspect.isclass(object):\n #print '_fm C3' # dbg\n return module.__name__ == object.__module__\n elif inspect.ismethod(object):\n # This one may be a bug in cython that fails to correctly set the\n # __module__ attribute of methods, but since the same error is easy\n # to make by extension code writers, having this safety in place\n # isn't such a bad idea\n #print '_fm C3-1' # dbg\n return module.__name__ == object.im_class.__module__\n elif inspect.getmodule(object) is not None:\n #print '_fm C4' # dbg\n #print 'C4 mod',module,'obj',object # dbg\n return module is inspect.getmodule(object)\n elif hasattr(object, '__module__'):\n #print '_fm C5' # dbg\n return module.__name__ == object.__module__\n elif isinstance(object, property):\n #print '_fm C6' # dbg\n return True # [XX] no way not be sure.\n else:\n raise ValueError(\"object must be a class or function\")\n\n\n\n def _find(self, tests, obj, name, module, source_lines, globs, seen):\n \"\"\"\n Find tests for the given object and any contained objects, and\n add them to `tests`.\n \"\"\"\n\n doctest.DocTestFinder._find(self,tests, obj, name, module,\n source_lines, globs, seen)\n\n # Below we re-run pieces of the above method with manual modifications,\n # because the original code is buggy and fails to correctly identify\n # doctests in extension modules.\n\n # Local shorthands\n from inspect import isroutine, isclass, ismodule\n\n # Look for tests in a module's contained objects.\n if inspect.ismodule(obj) and self._recurse:\n for valname, val in obj.__dict__.items():\n valname1 = '%s.%s' % (name, valname)\n if ( (isroutine(val) or isclass(val))\n and self._from_module(module, val) ):\n\n self._find(tests, val, valname1, module, source_lines,\n globs, seen)\n\n\n # Look for tests in a class's contained objects.\n if inspect.isclass(obj) and self._recurse:\n #print 'RECURSE into class:',obj # dbg\n for valname, val in obj.__dict__.items():\n #valname1 = '%s.%s' % (name, valname) # dbg\n #print 'N',name,'VN:',valname,'val:',str(val)[:77] # dbg\n # Special handling for staticmethod/classmethod.\n if isinstance(val, staticmethod):\n val = getattr(obj, valname)\n if isinstance(val, classmethod):\n val = getattr(obj, valname).im_func\n\n # Recurse to methods, properties, and nested classes.\n if ((inspect.isfunction(val) or inspect.isclass(val) or\n inspect.ismethod(val) or\n isinstance(val, property)) and\n self._from_module(module, val)):\n valname = '%s.%s' % (name, valname)\n self._find(tests, val, valname, module, source_lines,\n globs, seen)\n\n\nclass NumpyDocTestCase(npd.DocTestCase):\n \"\"\"Proxy for DocTestCase: provides an address() method that\n returns the correct address for the doctest case. Otherwise\n acts as a proxy to the test case. To provide hints for address(),\n an obj may also be passed -- this will be used as the test object\n for purposes of determining the test address, if it is provided.\n \"\"\"\n\n # doctests loaded via find(obj) omit the module name\n # so we need to override id, __repr__ and shortDescription\n # bonus: this will squash a 2.3 vs 2.4 incompatiblity\n def id(self):\n name = self._dt_test.name\n filename = self._dt_test.filename\n if filename is not None:\n pk = getpackage(filename)\n if pk is not None and not name.startswith(pk):\n name = \"%s.%s\" % (pk, name)\n return name\n\n\n# second-chance checker; if the default comparison doesn't\n# pass, then see if the expected output string contains flags that\n# tell us to ignore the output\nclass NumpyOutputChecker(doctest.OutputChecker):\n def check_output(self, want, got, optionflags):\n ret = doctest.OutputChecker.check_output(self, want, got,\n optionflags)\n if not ret:\n if \"#random\" in want:\n return True\n\n return ret\n\n\n# Subclass nose.plugins.doctests.DocTestCase to work around a bug in\n# its constructor that blocks non-default arguments from being passed\n# down into doctest.DocTestCase\nclass NumpyDocTestCase(npd.DocTestCase):\n def __init__(self, test, optionflags=0, setUp=None, tearDown=None,\n checker=None, obj=None, result_var='_'):\n self._result_var = result_var\n self._nose_obj = obj\n doctest.DocTestCase.__init__(self, test,\n optionflags=optionflags,\n setUp=setUp, tearDown=tearDown,\n checker=checker)\n\n\nprint_state = numpy.get_printoptions()\n\nclass NumpyDoctest(npd.Doctest):\n name = 'numpydoctest' # call nosetests with --with-numpydoctest\n enabled = True\n\n def options(self, parser, env=os.environ):\n Plugin.options(self, parser, env)\n\n def configure(self, options, config):\n Plugin.configure(self, options, config)\n self.doctest_tests = True\n# self.extension = tolist(options.doctestExtension)\n self.finder = NumpyDocTestFinder()\n self.parser = doctest.DocTestParser()\n\n # Turn on whitespace normalization, set a minimal execution context\n # for doctests, implement a \"#random\" directive to allow executing a\n # command while ignoring its output.\n def loadTestsFromModule(self, module):\n if not self.matches(module.__name__):\n npd.log.debug(\"Doctest doesn't want module %s\", module)\n return\n try:\n tests = self.finder.find(module)\n except AttributeError:\n # nose allows module.__test__ = False; doctest does not and\n # throws AttributeError\n return\n if not tests:\n return\n tests.sort()\n module_file = src(module.__file__)\n for test in tests:\n if not test.examples:\n continue\n if not test.filename:\n test.filename = module_file\n\n pkg_name = get_package_name(os.path.dirname(test.filename))\n\n # Each doctest should execute in an environment equivalent to\n # starting Python and executing \"import numpy as np\", and,\n # for SciPy packages, an additional import of the local\n # package (so that scipy.linalg.basic.py's doctests have an\n # implicit \"from scipy import linalg\" as well.\n #\n # Note: __file__ allows the doctest in NoseTester to run\n # without producing an error\n test.globs = {'__builtins__':__builtins__,\n '__file__':'__main__',\n '__name__':'__main__',\n 'np':numpy}\n\n # add appropriate scipy import for SciPy tests\n if 'scipy' in pkg_name:\n p = pkg_name.split('.')\n p1 = '.'.join(p[:-1])\n p2 = p[-1]\n test.globs[p2] = __import__(pkg_name, test.globs, {}, [p2])\n\n # always use whitespace and ellipsis options\n optionflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS\n\n yield NumpyDocTestCase(test,\n optionflags=optionflags,\n checker=NumpyOutputChecker())\n\n\n # Add an afterContext method to nose.plugins.doctests.Doctest in order\n # to restore print options to the original state after each doctest\n def afterContext(self):\n numpy.set_printoptions(**print_state)\n\n\n # Ignore NumPy-specific build files that shouldn't be searched for tests\n def wantFile(self, file):\n bn = os.path.basename(file)\n if bn in _doctest_ignore:\n return False\n return npd.Doctest.wantFile(self, file)\n\n\nclass KnownFailureTest(Exception):\n '''Raise this exception to mark a test as a known failing test.'''\n pass\n\n\nclass KnownFailure(ErrorClassPlugin):\n '''Plugin that installs a KNOWNFAIL error class for the \n KnownFailureClass exception. When KnownFailureTest is raised,\n the exception will be logged in the knownfail attribute of the\n result, 'K' or 'KNOWNFAIL' (verbose) will be output, and the\n exception will not be counted as an error or failure.'''\n enabled = True\n knownfail = ErrorClass(KnownFailureTest,\n label='KNOWNFAIL',\n isfailure=False)\n\n def options(self, parser, env=os.environ):\n env_opt = 'NOSE_WITHOUT_KNOWNFAIL'\n parser.add_option('--no-knownfail', action='store_true',\n dest='noKnownFail', default=env.get(env_opt, False),\n help='Disable special handling of KnownFailureTest '\n 'exceptions')\n\n def configure(self, options, conf):\n if not self.can_configure:\n return\n self.conf = conf\n disable = getattr(options, 'noKnownFail', False)\n if disable:\n self.enabled = False\n"
] | [
[
"numpy.set_printoptions",
"numpy.get_printoptions"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jz1248/fpn.pytorch | [
"5fa7a97d9ff37caf2d37f16faf25b21db11be546"
] | [
"lib/model/rpn/proposal_layer_fpn.py"
] | [
"# --------------------------------------------------------\n# Faster R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick and Sean Bell\n# --------------------------------------------------------\n# --------------------------------------------------------\n# Reorganized and modified by Jianwei Yang and Jiasen Lu\n# --------------------------------------------------------\n\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport math\nimport yaml\nfrom model.utils.config import cfg\nfrom generate_anchors import generate_anchors, generate_anchors_all_pyramids\nfrom bbox_transform import bbox_transform_inv, clip_boxes, clip_boxes_batch\nfrom model.nms.nms_wrapper import nms\n\nimport pdb\n\nDEBUG = False\n\nclass _ProposalLayer_FPN(nn.Module):\n \"\"\"\n Outputs object detection proposals by applying estimated bounding-box\n transformations to a set of regular boxes (called \"anchors\").\n \"\"\"\n\n def __init__(self, feat_stride, scales, ratios):\n super(_ProposalLayer_FPN, self).__init__()\n self._anchor_ratios = ratios\n self._feat_stride = feat_stride\n self._fpn_scales = np.array(cfg.FPN_ANCHOR_SCALES)\n self._fpn_feature_strides = np.array(cfg.FPN_FEAT_STRIDES)\n self._fpn_anchor_stride = cfg.FPN_ANCHOR_STRIDE\n # self._anchors = torch.from_numpy(generate_anchors_all_pyramids(self._fpn_scales, ratios, self._fpn_feature_strides, fpn_anchor_stride))\n # self._num_anchors = self._anchors.size(0)\n\n def forward(self, input):\n\n # Algorithm:\n #\n # for each (H, W) location i\n # generate A anchor boxes centered on cell i\n # apply predicted bbox deltas at cell i to each of the A anchors\n # clip predicted boxes to image\n # remove predicted boxes with either height or width < threshold\n # sort all (proposal, score) pairs by score from highest to lowest\n # take top pre_nms_topN proposals before NMS\n # apply NMS with threshold 0.7 to remaining proposals\n # take after_nms_topN proposals after NMS\n # return the top proposals (-> RoIs top, scores top)\n\n\n # the first set of _num_anchors channels are bg probs\n # the second set are the fg probs\n scores = input[0][:, :, 1] # batch_size x num_rois x 1\n bbox_deltas = input[1] # batch_size x num_rois x 4\n im_info = input[2]\n cfg_key = input[3]\n feat_shapes = input[4] \n\n pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N\n post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N\n nms_thresh = cfg[cfg_key].RPN_NMS_THRESH\n min_size = cfg[cfg_key].RPN_MIN_SIZE\n\n batch_size = bbox_deltas.size(0)\n\n anchors = torch.from_numpy(generate_anchors_all_pyramids(self._fpn_scales, self._anchor_ratios, \n feat_shapes, self._fpn_feature_strides, self._fpn_anchor_stride)).type_as(scores)\n num_anchors = anchors.size(0)\n\n anchors = anchors.view(1, num_anchors, 4).expand(batch_size, num_anchors, 4)\n\n # Convert anchors into proposals via bbox transformations\n proposals = bbox_transform_inv(anchors, bbox_deltas, batch_size)\n\n # 2. clip predicted boxes to image\n proposals = clip_boxes(proposals, im_info, batch_size)\n # keep_idx = self._filter_boxes(proposals, min_size).squeeze().long().nonzero().squeeze()\n \n scores_keep = scores\n proposals_keep = proposals\n\n _, order = torch.sort(scores_keep, 1, True)\n\n output = scores.new(batch_size, post_nms_topN, 5).zero_()\n for i in range(batch_size):\n # # 3. remove predicted boxes with either height or width < threshold\n # # (NOTE: convert min_size to input image scale stored in im_info[2])\n proposals_single = proposals_keep[i]\n scores_single = scores_keep[i]\n\n # # 4. sort all (proposal, score) pairs by score from highest to lowest\n # # 5. take top pre_nms_topN (e.g. 6000)\n order_single = order[i]\n\n if pre_nms_topN > 0 and pre_nms_topN < scores_keep.numel():\n order_single = order_single[:pre_nms_topN]\n\n proposals_single = proposals_single[order_single, :]\n scores_single = scores_single[order_single].view(-1,1)\n\n # 6. apply nms (e.g. threshold = 0.7)\n # 7. take after_nms_topN (e.g. 300)\n # 8. return the top proposals (-> RoIs top)\n\n keep_idx_i = nms(torch.cat((proposals_single, scores_single), 1), nms_thresh)\n keep_idx_i = keep_idx_i.long().view(-1)\n\n if post_nms_topN > 0:\n keep_idx_i = keep_idx_i[:post_nms_topN]\n proposals_single = proposals_single[keep_idx_i, :]\n scores_single = scores_single[keep_idx_i, :]\n\n # padding 0 at the end.\n num_proposal = proposals_single.size(0)\n output[i,:,0] = i\n # output[i,:num_proposal,1:] = proposals_single\n if num_proposal == 0:\n num_proposal = num_proposal + 1\n proposals_single = torch.zeros((1, 4))\n output[i, :num_proposal, 1:] = proposals_single\n\n return output\n\n def backward(self, top, propagate_down, bottom):\n \"\"\"This layer does not propagate gradients.\"\"\"\n pass\n\n def reshape(self, bottom, top):\n \"\"\"Reshaping happens during the call to forward.\"\"\"\n pass\n\n def _filter_boxes(self, boxes, min_size):\n \"\"\"Remove all boxes with any side smaller than min_size.\"\"\"\n ws = boxes[:, :, 2] - boxes[:, :, 0] + 1\n hs = boxes[:, :, 3] - boxes[:, :, 1] + 1\n keep = ((ws >= min_size) & (hs >= min_size))\n return keep\n"
] | [
[
"numpy.array",
"torch.zeros",
"torch.sort",
"torch.cat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
trisongz/tpubar | [
"adf44909a9a1afb30aeb38664200f372a1d7f34d"
] | [
"tpubar/network.py"
] | [
"\nimport os\nimport sys\nimport re\nimport calendar\nimport collections\nimport simdjson as json\nimport time\n\nimport google.auth\n\nfrom datetime import datetime\nfrom google.cloud import monitoring_v3\nfrom google.protobuf.json_format import MessageToJson\nfrom tpubar import env\n\nif env['profiler']:\n from tensorflow.python.framework import errors\n\nparser = json.Parser()\n\ndef flatten(d, parent_key='', sep='_'):\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, collections.MutableMapping):\n items.extend(flatten(v, new_key, sep=sep).items())\n else:\n items.append((new_key, v))\n return dict(items)\n\n\ndef pb_to_json(pb):\n \"\"\"Converts arbitrary protobuf messages into JSON\"\"\"\n return MessageToJson(pb)\n\n\ndef pb_to_dict(pb):\n \"\"\"Converts arbitrary protobuf messages into python dicts\"\"\"\n return parser.parse(pb_to_json(pb)).as_dict()\n\ndef utc():\n d = datetime.utcnow()\n return calendar.timegm(d.utctimetuple())\n\n\n\nmetrics = {\n 'vm_cpu': \"compute.googleapis.com/instance/cpu/utilization\",\n 'vm_net_sent': \"compute.googleapis.com/instance/network/sent_bytes_count\",\n 'vm_net_recv': \"compute.googleapis.com/instance/network/received_bytes_count\",\n 'vm_disk_write': \"compute.googleapis.com/instance/disk/write_bytes_count\",\n 'vm_disk_read': \"compute.googleapis.com/instance/disk/read_bytes_count\",\n 'tpu_core_mxu': \"tpu.googleapis.com/tpu/mxu/utilization\",\n 'tpu_container_cpu': \"tpu.googleapis.com/container/cpu/utilization\",\n 'tpu_container_mem': \"tpu.googleapis.com/container/memory/usage\",\n 'tpu_host_cpu': \"tpu.googleapis.com/cpu/utilization\",\n 'tpu_host_mem': \"tpu.googleapis.com/memory/usage\",\n 'tpu_host_net_sent': \"tpu.googleapis.com/network/sent_bytes_count\",\n 'tpu_host_net_recv': \"tpu.googleapis.com/network/received_bytes_count\",\n}\n\ndef gce_series_info(series):\n h = {k: pb_to_dict(getattr(series, k)) for k in \"metric resource metadata\".split()}\n h = {k: v for k, v in h.items() if len(v) > 0}\n return flatten(h)\n\n\ndef gce_instance_labeler(series, **options):\n if options.get('short'):\n return series.metric.labels['instance_name']\n r = []\n r += [k+'/'+series.resource.labels[k] for k in 'project_id zone'.split()]\n r += [k+'/'+series.metric.labels[k] for k in 'instance_name'.split()]\n return '/'.join(r)\n\n\ndef gce_instance_disk_labeler(series, **options):\n if options.get('short'):\n return '/'.join([series.metric.labels[k] for k in 'instance_name device_name'.split()])\n r = []\n r += [k+'/'+series.resource.labels[k] for k in 'project_id zone'.split()]\n r += [k+'/'+series.metric.labels[k] for k in 'instance_name device_name'.split()]\n return '/'.join(r)\n\n\ndef gce_series_getattrs(series, attrs, *, short=False):\n if isinstance(attrs, str):\n attrs = attrs.split()\n if short:\n r = [series.resource.labels[k] for k in attrs if len(series.resource.labels[k]) > 0]\n r += [series.metric.labels[k] for k in attrs if len(series.metric.labels[k]) > 0]\n else:\n r = [k+'/'+series.resource.labels[k] for k in attrs if len(series.resource.labels[k]) > 0]\n r += [k+'/'+series.metric.labels[k] for k in attrs if len(series.metric.labels[k]) > 0]\n return '/'.join(r)\n\n\ndef gce_tpu_labeler(series, **options):\n if options.get('short'):\n return gce_series_getattrs(series, 'node_id worker_id core container_name', short=True)\n return gce_series_getattrs(series, 'project_id zone node_id worker_id core container_name')\n\n\nlabelers = {\n \"compute.googleapis.com/instance/network/sent_bytes_count\":\n gce_instance_labeler,\n \"compute.googleapis.com/instance/network/received_bytes_count\":\n gce_instance_labeler,\n \"compute.googleapis.com/instance/cpu/utilization\":\n gce_instance_labeler,\n \"compute.googleapis.com/instance/disk/write_bytes_count\":\n gce_instance_disk_labeler,\n \"compute.googleapis.com/instance/disk/read_bytes_count\":\n gce_instance_disk_labeler,\n \"tpu.googleapis.com/tpu/mxu/utilization\":\n gce_tpu_labeler,\n \"tpu.googleapis.com/container/memory/usage\":\n gce_tpu_labeler,\n \"tpu.googleapis.com/cpu/utilization\":\n gce_tpu_labeler,\n \"tpu.googleapis.com/memory/usage\":\n gce_tpu_labeler,\n \"tpu.googleapis.com/network/sent_bytes_count\":\n gce_tpu_labeler,\n \"tpu.googleapis.com/network/received_bytes_count\":\n gce_tpu_labeler,\n}\n\n\ndef get_time_series_label(ts, **options):\n return labelers[ts.metric.type](ts, **options)\n\ndef get_default_project_id():\n _, project_id = google.auth.default()\n return project_id\n\nclass TimeSeriesMonitor:\n def __init__(self, project_id=None, client=None):\n if project_id is None:\n project_id = get_default_project_id()\n elif project_id in ['tfork', 'tensorfork']:\n project_id = 'gpt-2-15b-poetry'\n self.project_id = project_id\n if client is None:\n client = monitoring_v3.MetricServiceClient()\n self.client = client\n\n def __call__(self, *args, **kwargs):\n return self.get(*args, **kwargs)\n\n def get(self, metric=\"tpu_mxu\", node_id=None, interval=None, filters=None, raw=False, when=None, full_names=False):\n if when is None:\n when = utc()\n\n if '/' not in metric:\n metric = metrics[metric]\n\n if interval is None:\n now = time.time()\n seconds = int(now)\n nanos = int((now - seconds) * 10 ** 9)\n interval = monitoring_v3.TimeInterval(\n {\n \"end_time\": {\"seconds\": seconds, \"nanos\": nanos},\n \"start_time\": {\"seconds\": (seconds - 1200), \"nanos\": nanos},\n }\n )\n\n if filters is None:\n filters = []\n filters = filters[:]\n if node_id is not None:\n filters += [['resource.labels.node_id', node_id]]\n filters += [['metric.type', metric]]\n filters = ' AND '.join(['{} = {}'.format(k, json.dumps(v)) for k, v in filters])\n\n results = self.client.list_time_series(\n request={\n \"name\": \"projects/{project_id}\".format(project_id=self.project_id),\n \"filter\": filters,\n \"interval\": interval,\n \"view\": monitoring_v3.ListTimeSeriesRequest.TimeSeriesView.FULL,\n }\n )\n if raw:\n return results\n points = collections.defaultdict(lambda: [])\n for timeSeries in results:\n key = get_time_series_label(timeSeries, short=not full_names)\n for point in timeSeries.points:\n point_utc = point.interval.start_time.timestamp()\n seconds_ago = int(when - point_utc)\n if timeSeries.value_type == 2: # what's the correct way to get INT64 here?\n value = point.value.int64_value\n else:\n value = point.value.double_value\n points[key].append([seconds_ago, value])\n points = dict(points)\n return points\n\n\ndef get_workers_list(cluster_resolver):\n worker_job_name = 'worker'\n cluster_spec = cluster_resolver.cluster_spec()\n if not cluster_spec:\n raise errors.UnavailableError(\n 'None', 'None',\n 'Cluster spec not found, your client must run in GCE environment.')\n task_indices = cluster_spec.task_indices(worker_job_name)\n workers_list = [\n cluster_spec.task_address(worker_job_name, i).replace(':8470', ':8466')\n for i in task_indices\n ]\n return ','.join(workers_list)\n\n\ndef parse_tpu_data(tpu):\n data = tpu['name'].split('/')\n tpu_name, tpu_zone = data[-1], data[-3]\n tpu_config = {\n 'name': tpu_name,\n 'mesh': tpu['acceleratorType'],\n 'region': tpu_zone,\n 'master': tpu['ipAddress'] if 'ipAddress' in tpu else None,\n }\n return tpu_config\n\ndef tpunicorn_query(project):\n if project in ['tfork', 'tensorfork']:\n project = 'gpt-2-15b-poetry'\n config = {'project': project}\n if not env['colab']:\n import tpunicorn\n tpu_data = None\n for zone in ['europe-west4-a', 'us-central1-f', 'us-central1-a', 'us-central1-b', 'us-central1-c', 'asia-east1-c']:\n try:\n tpu_data = tpunicorn.tpu.get_tpus(zone=zone, project=project)\n if tpu_data:\n break\n\n except:\n continue\n \n selected_tpu = None\n tpu_name = os.environ.get('TPU_NAME', None)\n if not tpu_data:\n print('Failed to find a TPU - Ensure you have the correct GOOGLE_APPLICATION_CREDENTIALS set for your project')\n sys.exit()\n if len(tpu_data) > 1:\n if tpu_name:\n for x, tpu in enumerate(tpu_data):\n if tpu_name in tpu['name']:\n selected_tpu = tpu_data[x]\n else:\n for x, tpu in enumerate(tpu_data):\n print(f'[{x}] - {tpu}')\n \n tpu_idx = input('Select TPU')\n selected_tpu = tpu_data[tpu_idx]\n \n else:\n selected_tpu = tpu_data[0]\n\n tpu_config = parse_tpu_data(selected_tpu)\n config.update(tpu_config)\n \n else:\n config['master'] = os.environ['TPU_NAME']\n config['name'] = os.environ['TPU_NAME']\n config['region'] = 'us'\n config['mesh'] = 'v2-8'\n return config"
] | [
[
"tensorflow.python.framework.errors.UnavailableError"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
}
] |
ss-sathishsampath/Supervised-Learning-Algorithms | [
"e61df170a9baadbe72e5f268f2033445bc716a4d"
] | [
"executables/Boosting.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\n@author : Sathish Sampath([email protected])\n\n\"\"\"\n\n\nimport sklearn.model_selection as ms\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom helpers import dtclf_pruned\nimport pandas as pd\nfrom helpers import basicResults,makeTimingCurve,iterationLC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\n\n\n\nadult = pd.read_hdf('datasets.hdf','adult') \nadultX = adult.drop('income',1).copy().values\nadultY = adult['income'].copy().values\n\nalphas = [-1,-1e-3,-(1e-3)*10**-0.5, -1e-2, -(1e-2)*10**-0.5,-1e-1,-(1e-1)*10**-0.5, 0, (1e-1)*10**-0.5,1e-1,(1e-2)*10**-0.5,1e-2,(1e-3)*10**-0.5,1e-3]\n\n\nadult_trgX, adult_tstX, adult_trgY, adult_tstY = ms.train_test_split(adultX, adultY, test_size=0.3, random_state=0,stratify=adultY) \n#madelon_trgX, madelon_tstX, madelon_trgY, madelon_tstY = ms.train_test_split(madelonX, madelonY, test_size=0.3, random_state=0,stratify=madelonY) \n\n\n\n#madelon_base = dtclf_pruned(criterion='gini',class_weight='balanced',random_state=55) \nadult_base = dtclf_pruned(criterion='entropy',class_weight='balanced',random_state=55)\nOF_base = dtclf_pruned(criterion='gini',class_weight='balanced',random_state=55) \n#paramsA= {'Boost__n_estimators':[1,2,5,10,20,30,40,50],'Boost__learning_rate':[(2**x)/100 for x in range(8)]+[1]}\nparamsA= {'Boost__n_estimators':[1,2,5,10,20,30,45,60,80,100],\n 'Boost__base_estimator__alpha':alphas}\n#paramsM = {'Boost__n_estimators':[1,2,5,10,20,30,40,50,60,70,80,90,100],\n# 'Boost__learning_rate':[(2**x)/100 for x in range(8)]+[1]}\n\n#paramsM = {'Boost__n_estimators':[1,2,5,10,20,30,45,60,80,100],\n# 'Boost__base_estimator__alpha':alphas}\n \n \n#madelon_booster = AdaBoostClassifier(algorithm='SAMME',learning_rate=1,base_estimator=madelon_base,random_state=55)\nadult_booster = AdaBoostClassifier(algorithm='SAMME',learning_rate=1,base_estimator=adult_base,random_state=55)\nOF_booster = AdaBoostClassifier(algorithm='SAMME',learning_rate=1,base_estimator=OF_base,random_state=55)\n\n#pipeM = Pipeline([('Scale',StandardScaler()),\n# ('Cull1',SelectFromModel(RandomForestClassifier(random_state=1),threshold='median')),\n# ('Cull2',SelectFromModel(RandomForestClassifier(random_state=2),threshold='median')),\n# ('Cull3',SelectFromModel(RandomForestClassifier(random_state=3),threshold='median')),\n# ('Cull4',SelectFromModel(RandomForestClassifier(random_state=4),threshold='median')),\n# ('Boost',madelon_booster)])\n\npipeA = Pipeline([('Scale',StandardScaler()), \n ('Boost',adult_booster)])\n\n#\n#madelon_clf = basicResults(pipeM,madelon_trgX,madelon_trgY,madelon_tstX,madelon_tstY,paramsM,'Boost','madelon') \nadult_clf = basicResults(pipeA,adult_trgX,adult_trgY,adult_tstX,adult_tstY,paramsA,'Boost','adult') \n\n#\n#\n#madelon_final_params = {'n_estimators': 20, 'learning_rate': 0.02}\n#adult_final_params = {'n_estimators': 10, 'learning_rate': 1}\n#OF_params = {'learning_rate':1}\n\n#madelon_final_params = madelon_clf.best_params_\nadult_final_params = adult_clf.best_params_\nOF_params = {'Boost__base_estimator__alpha':-1, 'Boost__n_estimators':50}\n\n##\n#pipeM.set_params(**madelon_final_params)\npipeA.set_params(**adult_final_params)\n#makeTimingCurve(madelonX,madelonY,pipeM,'Boost','madelon')\nmakeTimingCurve(adultX,adultY,pipeA,'Boost','adult')\n#\n#pipeM.set_params(**madelon_final_params)\n#iterationLC(pipeM,madelon_trgX,madelon_trgY,madelon_tstX,madelon_tstY,{'Boost__n_estimators':[1,2,5,10,20,30,40,50,60,70,80,90,100]},'Boost','madelon') \npipeA.set_params(**adult_final_params)\niterationLC(pipeA,adult_trgX,adult_trgY,adult_tstX,adult_tstY,{'Boost__n_estimators':[1,2,5,10,20,30,40,50]},'Boost','adult') \n#pipeM.set_params(**OF_params)\n#iterationLC(pipeM,madelon_trgX,madelon_trgY,madelon_tstX,madelon_tstY,{'Boost__n_estimators':[1,2,5,10,20,30,40,50,60,70,80,90,100]},'Boost_OF','madelon') \npipeA.set_params(**OF_params)\niterationLC(pipeA,adult_trgX,adult_trgY,adult_tstX,adult_tstY,{'Boost__n_estimators':[1,2,5,10,20,30,40,50]},'Boost_OF','adult') \n\n \n\n\nbiodeg = pd.read_hdf('datasets.hdf','biodeg') \nbiodegX = biodeg.drop('clas',1).copy().values\nbiodegY = biodeg['clas'].copy().values\n\nalphas = [-1,-1e-3,-(1e-3)*10**-0.5, -1e-2, -(1e-2)*10**-0.5,-1e-1,-(1e-1)*10**-0.5, 0, (1e-1)*10**-0.5,1e-1,(1e-2)*10**-0.5,1e-2,(1e-3)*10**-0.5,1e-3]\n\n\nbiodeg_trgX, biodeg_tstX, biodeg_trgY, biodeg_tstY = ms.train_test_split(biodegX, biodegY, test_size=0.3, random_state=0,stratify=biodegY) \nbiodeg_base = dtclf_pruned(criterion='entropy',class_weight='balanced',random_state=55)\nOF_base = dtclf_pruned(criterion='gini',class_weight='balanced',random_state=55) \nparamsB= {'Boost__n_estimators':[1,2,5,10,20,30,45,60,80,100],\n 'Boost__base_estimator__alpha':alphas}\n\nbiodeg_booster = AdaBoostClassifier(algorithm='SAMME',learning_rate=1,base_estimator=biodeg_base,random_state=55)\nOF_booster = AdaBoostClassifier(algorithm='SAMME',learning_rate=1,base_estimator=OF_base,random_state=55)\n\npipeB = Pipeline([('Scale',StandardScaler()), \n ('Boost',biodeg_booster)])\n\nbiodeg_clf = basicResults(pipeB,biodeg_trgX,biodeg_trgY,biodeg_tstX,biodeg_tstY,paramsB,'Boost','biodeg') \n\nbiodeg_final_params = biodeg_clf.best_params_\nOF_params = {'Boost__base_estimator__alpha':-1, 'Boost__n_estimators':50}\n\npipeB.set_params(**biodeg_final_params)\n\nmakeTimingCurve(biodegX,biodegY,pipeB,'Boost','biodeg')\npipeB.set_params(**biodeg_final_params)\niterationLC(pipeB,biodeg_trgX,biodeg_trgY,biodeg_tstX,biodeg_tstY,{'Boost__n_estimators':[1,2,5,10,20,30,40,50]},'Boost','biodeg') \n\npipeB.set_params(**OF_params)\niterationLC(pipeB,biodeg_trgX,biodeg_trgY,biodeg_tstX,biodeg_tstY,{'Boost__n_estimators':[1,2,5,10,20,30,40,50]},'Boost_OF','biodeg') \n"
] | [
[
"sklearn.preprocessing.StandardScaler",
"pandas.read_hdf",
"sklearn.ensemble.AdaBoostClassifier",
"sklearn.model_selection.train_test_split"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Grade-pan/python-base | [
"bcb69052a6e9299dee45b91468e30b1305d82db3"
] | [
"Numpy/test5.py"
] | [
"import numpy as np\n\nprint(np.log(np.PZERO))\n# 负零是有限数。\nprint(np.NZERO)\nprint(np.PZERO)\n# 欧拉常数\nprint(np.e)\nprint(6 / 5)\n"
] | [
[
"numpy.log"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
oucxlw/ConferencingSpeech2021 | [
"617df8116c0510b2addadb1de374d7b50eea4f2b"
] | [
"simulation/mix_wav.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\" simulate train and dev set, use multiprocessing.Pool to accelerate the pipline;\nit's not totally random\n@author: [email protected]\n [email protected]\n\"\"\"\n\n\nimport numpy as np\nimport math\nimport soundfile as sf\nimport scipy.signal as sps\nimport librosa\nimport os\nimport warnings\nimport sys\neps = np.finfo(np.float32).eps\nimport argparse\nimport multiprocessing as mp\nimport traceback\ndef audioread(path, fs=16000):\n '''\n args\n path: wav path\n fs: sample rate\n return\n wave_data: L x C or L\n '''\n wave_data, sr = sf.read(path)\n if sr != fs:\n if len(wave_data.shape) != 1:\n wave_data = wave_data.transpose((1, 0))\n wave_data = librosa.resample(wave_data, sr, fs)\n if len(wave_data.shape) != 1:\n wave_data = wave_data.transpose((1, 0))\n return wave_data\n\ndef get_firstchannel_read(path, fs=16000):\n '''\n args\n path: wav path\n fs: sample rate\n return\n wave_data: L\n '''\n wave_data, sr = sf.read(path)\n if sr != fs:\n if len(wave_data.shape) != 1:\n wave_data = wave_data.transpose((1, 0))\n wave_data = librosa.resample(wave_data, sr, fs)\n if len(wave_data.shape) != 1:\n wave_data = wave_data.transpose((1, 0))\n if len(wave_data.shape) > 1:\n wave_data = wave_data[:, 0]\n return wave_data\n\ndef clip_data(data, start, segment_length):\n '''\n according the start point and segment_length to split the data\n args:\n data: numpy.array\n start: -2, -1, [0,...., L - 1]\n segment_length: int\n return:\n tgt: numpy.array\n '''\n tgt = np.zeros(segment_length)\n data_len = data.shape[0]\n if start == -2:\n \"\"\"\n this means segment_length // 4 < data_len < segment_length // 2\n padding to A_A_A\n \"\"\"\n if data_len < segment_length//3:\n data = np.pad(data, [0, segment_length//3 - data_len], 'constant')\n tgt[:segment_length//3] += data\n st = segment_length//3\n tgt[st:st+data.shape[0]] += data\n st = segment_length//3 * 2\n tgt[st:st+data.shape[0]] += data\n \n else:\n \"\"\"\n padding to A_A\n \"\"\"\n # st = (segment_length//2 - data_len) % 101\n # tgt[st:st+data_len] += data\n # st = segment_length//2 + (segment_length - data_len) % 173\n # tgt[st:st+data_len] += data\n data = np.pad(data, [0, segment_length//2 - data_len], 'constant')\n tgt[:segment_length//2] += data\n st = segment_length//2\n tgt[st:st+data.shape[0]] += data\n \n elif start == -1:\n '''\n this means segment_length < data_len*2\n padding to A_A\n '''\n if data_len % 4 == 0:\n tgt[:data_len] += data\n tgt[data_len:] += data[:segment_length-data_len]\n elif data_len % 4 == 1:\n tgt[:data_len] += data\n elif data_len % 4 == 2:\n tgt[-data_len:] += data\n elif data_len % 4 == 3:\n tgt[(segment_length-data_len)//2:(segment_length-data_len)//2+data_len] += data\n \n else:\n tgt += data[start:start+segment_length]\n \n return tgt\n\ndef rms(data):\n \"\"\"\n calc rms of wav\n \"\"\"\n energy = data ** 2\n max_e = np.max(energy)\n low_thres = max_e*(10**(-50/10)) # to filter lower than 50dB \n rms = np.mean(energy[energy>=low_thres])\n #rms = np.mean(energy)\n return rms\n\ndef snr_mix(clean, noise, snr):\n '''\n mix clean and noise according to snr\n '''\n clean_rms = rms(clean)\n clean_rms = np.maximum(clean_rms, eps)\n noise_rms = rms(noise)\n noise_rms = np.maximum(noise_rms, eps)\n k = math.sqrt(clean_rms / (10**(snr/10) * noise_rms))\n new_noise = noise * k\n return new_noise\n\ndef mix_noise(clean, noise, snr, channels=8):\n '''\n split/pad the noise data and then mix them according to snr\n '''\n clean_length = clean.shape[0]\n noise_length = noise.shape[0]\n st = 0 # choose the first point\n # padding the noise\n if clean_length > noise_length:\n # st = numpy.random.randint(clean_length + 1 - noise_length)\n noise_t = np.zeros([clean_length, channels])\n noise_t[st:st+noise_length] = noise\n noise = noise_t\n # split the noise\n elif clean_length < noise_length:\n # st = numpy.random.randint(noise_length + 1 - clean_length)\n noise = noise[st:st+clean_length]\n \n snr_noise = snr_mix(clean, noise, snr)\n return snr_noise\n\ndef add_reverb(cln_wav, rir_wav, channels=8, predelay=50,sample_rate=16000):\n \"\"\"\n add reverberation\n args:\n cln_wav: L\n rir_wav: L x C\n rir_wav is always [Lr, C] \n predelay is ms\n return:\n wav_tgt: L x C\n \"\"\"\n rir_len = rir_wav.shape[0]\n wav_tgt = np.zeros([channels, cln_wav.shape[0] + rir_len-1])\n dt = np.argmax(rir_wav, 0).min()\n et = dt+(predelay*sample_rate)//1000 \n et_rir = rir_wav[:et]\n wav_early_tgt = np.zeros([channels, cln_wav.shape[0] + et_rir.shape[0]-1])\n for i in range(channels):\n wav_tgt[i] = sps.oaconvolve(cln_wav, rir_wav[:, i]) \n wav_early_tgt[i] = sps.oaconvolve(cln_wav, et_rir[:, i]) \n # L x C\n wav_tgt = np.transpose(wav_tgt)\n wav_tgt = wav_tgt[:cln_wav.shape[0]] \n wav_early_tgt = np.transpose(wav_early_tgt)\n wav_early_tgt = wav_early_tgt[:cln_wav.shape[0]]\n return wav_tgt, wav_early_tgt\n\ndef get_one_spk_noise(clean, noise, snr, scale):\n \"\"\"\n mix clean and noise according to the snr and scale\n args:\n clean: numpy.array, L x C L is always segment_length\n noise: numpy.array, L' x C\n snr: float\n scale: float\n \"\"\"\n gen_noise = mix_noise(clean, noise, snr)\n noisy = clean + gen_noise\n\n max_amp = np.max(np.abs(noisy))\n max_amp = np.maximum(max_amp, eps)\n noisy_scale = 1. / max_amp * scale\n clean = clean * noisy_scale\n noisy = noisy * noisy_scale\n return noisy, clean, noisy_scale\n\ndef generate_data(clean_path, strat_time, noise_path, rir_path, snr, scale, segment_length=16000*4, channels=8):\n clean = get_firstchannel_read(clean_path)\n # chunk the clean wav\n clean = clip_data(clean, strat_time, segment_length)\n noise = get_firstchannel_read(noise_path)\n \n # add linear/circle rir\n rir = audioread(rir_path) \n\n L, C = rir.shape\n\n # linear array rir is [Lr, 16]\n if C%channels == 0 and C==2*channels:\n clean_rir = rir[:, :channels]\n noise_rir = rir[:, channels:]\n elif C==channels:\n warnings.warn(\"the clean'rir and noise's rir will be same\")\n clean_rir = rir \n noise_rir = rir\n # circle array rir is [Lr, 32]\n #elif C%channels == 0 and C%channels == 0:\n elif C%(channels*2) == 0:\n skip = C//channels//2\n clean_rir = rir[:, :C//2:skip] #every C//channels channels\n noise_rir = rir[:, C//2::skip] #every C//channels channels \n else:\n raise RuntimeError(\"Can not generate target channels data, please check data or parameters\")\n clean, clean_early = add_reverb(clean, clean_rir, channels=channels)\n noise,_ = add_reverb(noise, noise_rir, channels=channels)\n\n inputs, labels, noisy_scale = get_one_spk_noise(clean, noise, snr, scale)\n return inputs, labels, clean_early*noisy_scale\n\ndef preprocess_func(line, segment_length, result):\n try:\n path = line.strip()\n data = get_firstchannel_read(path)\n length = data.shape[0]\n\n if length < segment_length:\n if length * 2 < segment_length and length * 4 > segment_length:\n result.append('{} -2\\n'.format(path))\n elif length * 2 > segment_length:\n result.append('{} -1\\n'.format(path))\n else:\n sample_index = 0\n while sample_index + segment_length <= length:\n result.append('{} {}\\n'.format(path, sample_index))\n sample_index += segment_length\n if sample_index < length:\n result.append('{} {}\\n'.format(path, length - segment_length))\n\n except :\n traceback.print_exc()\n\ndef get_clean_chunk(clean_path, clean_chunk_path, sample_rate=16000, chunk=4, num_process=12):\n \n '''\n split the clean_wav every chunk second\n args:\n clean_path: \n format is /xxx/..../yyy.wav\n /xxy/..../zzz.wav\n /xxy/..../aaa.wav\n clean_chunk_path: \n format is /xxx/..../yyy.wav -2\n /xxy/..../zzz.wav -1\n /xxy/..../aaa.wav [0,1...L-1]\n '''\n lines = open(clean_path, 'r').readlines()\n\n pool = mp.Pool(num_process)\n mgr = mp.Manager()\n result = mgr.list()\n segment_length = int(sample_rate * chunk)\n\n for line in lines:\n pool.apply_async(\n preprocess_func,\n args=(line, segment_length, result)\n )\n pool.close()\n pool.join()\n wid = open(clean_chunk_path, 'w')\n for item in result:\n wid.write(item)\n wid.close()\n\ndef get_mix_config(clean_chunk_path, noise_path, rir_path, config_path, snr_range=[0,30], scale_range=[0.2,0.9]):\n '''\n generate config file\n format is: clean_path start_time noise_path rir_path snr scale\n '''\n \n clean_lines = open(clean_chunk_path, 'r').readlines()\n noise_lines = open(noise_path, 'r').readlines()\n rir_lines = open(rir_path, 'r').readlines()\n\n wid = open(config_path, 'w')\n noise_len = len(noise_lines)\n rir_len = len(rir_lines)\n\n idx = 0\n for line in clean_lines:\n clean_path = line.strip()\n noise_path = noise_lines[idx % noise_len].strip()\n rir_path = rir_lines[idx % rir_len].strip()\n snr = np.random.uniform(*snr_range) #snr range is [0, 30)\n scale = np.random.uniform(*scale_range) #scale range is [0.2, 0.9)\n wid.write(\"{} {} {} {} {}\\n\".format(clean_path, noise_path, rir_path, snr, scale))\n idx = idx + 1\n wid.close() \n\ndef mix_func(line, save_dir, chunk, sample_rate, result):\n try:\n segment_length = int(chunk * sample_rate)\n clean_path, start_time, noise_path, rir_path, snr, scale = line.split(' ')\n # L x C\n inputs, labels, noreverb_ref = generate_data(clean_path, int(start_time), noise_path, rir_path, \n float(snr), float(scale), segment_length) \n clean = os.path.basename(clean_path).replace('.wav', '')\n noise = os.path.basename(noise_path).replace('.wav', '')\n rir = os.path.basename(rir_path).replace('.wav', '')\n seg = '#'\n utt_id = clean + seg + noise + seg + rir + seg + start_time + seg + snr + seg + scale + '.wav'\n sf.write(os.path.join(save_dir, 'mix', utt_id), inputs, sample_rate)\n sf.write(os.path.join(save_dir, 'reverb_ref', utt_id), labels, sample_rate)\n sf.write(os.path.join(save_dir, 'noreverb_ref', utt_id), noreverb_ref, sample_rate)\n result.append(utt_id.replace('.wav', ''))\n except :\n traceback.print_exc()\n\ndef get_data(config_path, save_dir, chunk=4, sample_rate=16000, num_process=12):\n '''\n according to the config file to generate data and then save in save_dir\n '''\n lines = open(config_path, 'r')\n \n pool = mp.Pool(num_process)\n mgr = mp.Manager()\n result = mgr.list()\n\n for line in lines:\n line = line.strip()\n # multiprocessing\n pool.apply_async(\n mix_func,\n args=(line, save_dir, chunk, sample_rate, result)\n )\n pool.close()\n pool.join()\n scp_path = os.path.basename(config_path)\n wid = open(os.path.join(save_dir, scp_path), 'w')\n for item in result:\n wid.write(item + \"\\n\")\n wid.close()\n lines.close()\n\ndef main(args):\n clean_path = args.clean_wav_list \n clean_chunk_path = args.clean_wav_list+'.{}.duration'.format(args.chunk_len)\n\n noise_path = args.noise_wav_list \n rir_path = args.rir_wav_list \n config_path = args.mix_config_path\n \n save_dir = args.save_dir \n \n if not os.path.isdir(save_dir):\n os.makedirs(save_dir)\n if not os.path.isdir(os.path.join(save_dir, 'mix')):\n os.mkdir(os.path.join(save_dir, 'mix'))\n if not os.path.isdir(os.path.join(save_dir, 'reverb_ref')):\n os.mkdir(os.path.join(save_dir, 'reverb_ref'))\n if not os.path.isdir(os.path.join(save_dir, 'noreverb_ref')):\n os.mkdir(os.path.join(save_dir, 'noreverb_ref'))\n if args.generate_config: \n #if not os.path.exists(clean_chunk_path):\n print('LOG: preparing clean start time')\n get_clean_chunk(clean_path, clean_chunk_path, chunk=args.chunk_len)\n\n print('LOG: preparing mix config')\n get_mix_config(clean_chunk_path, noise_path, rir_path, config_path)\n \n print('LOG: generating')\n get_data(config_path, save_dir, chunk=args.chunk_len)\n\nif __name__ == \"__main__\":\n \n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--clean_wav_list',\n type=str,\n default='clean.lst',\n help='the list of clean wav to read'\n ) \n \n parser.add_argument(\n '--noise_wav_list',\n type=str,\n default='noise.lst',\n help='the list of noise wav to read'\n ) \n\n parser.add_argument(\n '--rir_wav_list',\n type=str,\n default='rir.lst',\n help='the list of rir wav to read'\n ) \n \n parser.add_argument(\n '--mix_config_path',\n type=str,\n default='mix.config',\n help='the save path of config path to save'\n ) \n \n parser.add_argument(\n '--save_dir',\n type=str,\n default='generated_data',\n help='the dir to save generated_data'\n ) \n parser.add_argument(\n '--chunk_len',\n type=float,\n default=6,\n help='the length of one chunk sample'\n ) \n parser.add_argument(\n '--generate_config',\n type=str,\n default='True',\n help='generate mix config file or not '\n ) \n args = parser.parse_args()\n if args.generate_config == 'True' \\\n or args.generate_config == 'true' \\\n or args.generate_config == 't' \\\n or args.generate_config == 'T':\n args.generate_config = True\n else:\n args.generate_config = False\n main(args)\n"
] | [
[
"numpy.maximum",
"numpy.abs",
"numpy.pad",
"numpy.finfo",
"numpy.max",
"numpy.argmax",
"numpy.mean",
"numpy.transpose",
"numpy.random.uniform",
"numpy.zeros",
"scipy.signal.oaconvolve"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.9",
"1.5",
"1.7",
"1.8"
],
"tensorflow": []
}
] |
tonygrey/klsh | [
"77dbcd2bdd3f04e4d9add136201afda31c964580"
] | [
"klsh/kernels.py"
] | [
"import numpy as np\nfrom scipy import fftpack, signal\n\n__all__ = [\"crosscorr_kernel\", \"crosscorr_similarity\"]\n\n\ndef pairwise_correlate_slow(X, Y, mode='full'):\n X, Y = map(np.atleast_2d, (X, Y))\n assert X.ndim == 2\n assert Y.ndim == 2\n\n Y = Y[:, ::-1]\n\n first_result = signal.fftconvolve(X[0], Y[0], mode)\n M = np.zeros((X.shape[0], Y.shape[0], len(first_result)),\n dtype=first_result.dtype)\n\n for i in range(X.shape[0]):\n for j in range(Y.shape[0]):\n M[i, j] = signal.fftconvolve(X[i], Y[j], mode)\n return M\n\n\ndef precompute_fft(X, Y):\n \"\"\"Pre-compute the FFT of X and Y for use in pairwise_correlate\"\"\"\n X, Y = map(np.atleast_2d, (X, Y))\n assert X.ndim == 2\n assert Y.ndim == 2\n\n Y = Y[:, ::-1]\n\n s1 = X.shape[1]\n s2 = Y.shape[1]\n size = s1 + s2 - 1\n complex_result = (np.issubdtype(X.dtype, np.complex) or\n np.issubdtype(Y.dtype, np.complex))\n\n # Always use 2**n-sized FFT\n fsize = [int(2 ** np.ceil(np.log2(size)))]\n\n if not complex_result:\n X_fft = np.fft.rfftn(X, fsize)\n Y_fft = np.fft.rfftn(Y, fsize)\n else:\n X_fft = np.fft.fftn(X, fsize)\n Y_fft = np.fft.fftn(Y, fsize)\n\n return X_fft, Y_fft, (complex_result, size, fsize)\n\n\ndef pairwise_correlate(X, Y, mode='full', fast=True,\n fft_precomputed=False, fft_info=None):\n \"\"\"\n Parameters\n ----------\n X, Y: array_like\n Two-dimensional arrays to convolve\n mode: string\n [\"full\"|\"valid\"|\"same\"]\n\n Other Parameters\n ----------------\n fast : bool\n if True (default) use a fast broadcasting-based algorithm.\n This is mainly for unit-testing purposes.\n fft_precomputed : bool\n if True, then X and Y actually contain the pre-computed FFT\n of X and Y. Default is False. Cannot be used with fast=False.\n FFTs can be precomputed with the precompute_fft() function.\n If True, then complex_result must be specified.\n fft_info : bool\n Required if fft_precomputed==True.\n\n Returns\n -------\n out: array\n Three-dimensional array. out[i, j] contains the\n convolution of X[i] and Y[j]\n \"\"\"\n if not fast:\n if fft_precomputed:\n raise ValueError(\"Cannot have fft_precomputed and not fast\")\n return pairwise_correlate_slow(X, Y, mode)\n\n if mode != 'full':\n raise NotImplementedError()\n\n if fft_precomputed:\n if fft_info is None:\n raise ValueError(\"must specify complex_result=[True/False] \"\n \"if fft_precomputed is True\")\n X_fft, Y_fft = np.asarray(X), np.asarray(Y)\n else:\n X_fft, Y_fft, fft_info = precompute_fft(X, Y)\n\n complex_result, size, fsize = fft_info\n\n assert X_fft.ndim == 2\n assert Y_fft.ndim == 2\n assert X_fft.shape[-1] == Y_fft.shape[-1]\n\n # prepare broadcasting\n X_fft = X_fft[:, np.newaxis, :]\n Y_fft = Y_fft[np.newaxis, :, :]\n\n if not complex_result:\n M = np.fft.irfftn(X_fft * Y_fft, fsize)[:, :, :size].real\n else:\n M = np.fft.ifftn(X_fft * Y_fft, fsize)[:, :, :size]\n\n #if mode == \"full\":\n #pass\n #elif mode == \"same\":\n #return _centered(ret, s1)\n #elif mode == \"valid\":\n #return _centered(ret, s1 - s2 + 1)\n\n return M\n\n\ndef _batch_crosscorr(X, Y, batch_size, reduce_func,\n fft_precomputed=False, fft_info=None):\n \"\"\"Helper routine for batch fft-based cross-correlation.\n\n Parameters\n ----------\n X : array_like\n shape = [Nx, n_features]\n Y : array_like\n shape = [Ny, n_features]\n batch_size : integer\n perform computation in batches of this size.\n reduce_func: function\n a function which will reduce the input along its last axis.\n Input is the result of pairwise_correlate() on the batch, and is\n of shape (n, m, p). reduce_func should take this as input and return\n a suitable array of shape (n, m).\n \"\"\"\n X = np.asarray(X)\n Y = np.asarray(Y)\n\n assert X.ndim == 2\n assert Y.ndim == 2\n\n # precompute fft if necessary\n if fft_precomputed:\n Xfft, Yfft = X, Y\n assert fft_info is not None\n else:\n Xfft, Yfft, fft_info = precompute_fft(X, Y)\n\n # if batches are unnecessary, do the calculation in one step\n if batch_size is None or X.shape[0] * Y.shape[0] <= batch_size:\n M = pairwise_correlate(Xfft, Yfft, fft_info=fft_info,\n fft_precomputed=True)\n return reduce_func(M)\n\n # otherwise, we divide the computation into batches\n result = np.zeros((X.shape[0], Y.shape[0]))\n\n if Y.shape[0] < batch_size:\n batchsize = [batch_size // Y.shape[0], Y.shape[0]]\n elif X.shape[0] < batch_size:\n batchsize = [X.shape[0], batch_size // X.shape[0]]\n else:\n batchsize = 2 * [int(np.sqrt(batch_size))]\n\n nbatches = [1 + (X.shape[0] - 1) // batchsize[0],\n 1 + (Y.shape[0] - 1) // batchsize[1]]\n\n for i in range(nbatches[0]):\n sliceX = slice(i * batchsize[0], (i + 1) * batchsize[0])\n for j in range(nbatches[1]):\n sliceY = slice(j * batchsize[1], (j + 1) * batchsize[1])\n corr = pairwise_correlate(Xfft[sliceX], Yfft[sliceY],\n fft_precomputed=True, fft_info=fft_info)\n result[sliceX, sliceY] = reduce_func(corr)\n return result\n\n\ndef crosscorr_similarity(X, Y, batch_size=10000):\n \"\"\"Cross-correlation similarity between X and Y\n\n Parameters\n ----------\n X : array_like\n shape = [Nx, n_features]\n Y : array_like\n shape = [Ny, n_features]\n batch_size : integer (default=10000)\n perform computation in batches of this size.\n\n Returns\n -------\n M : np.ndarray\n the pairwise cross-correlation kernel between X and Y, shape [Nx, Ny]\n \"\"\"\n reduce_func = lambda corr: corr.max(-1)\n return _batch_crosscorr(X, Y, batch_size, reduce_func)\n\n\ndef crosscorr_kernel(X, Y, lambda_=10, batch_size=10000):\n \"\"\"Cross-correlation kernel between X and Y\n\n Parameters\n ----------\n X : array_like\n shape = [Nx, n_features]\n Y : array_like\n shape = [Ny, n_features]\n lambda_ : float (default=10)\n the exponential free parameter in the kernel.\n batch_size : integer (default=10000)\n perform computation in batches of this size.\n\n Returns\n -------\n M : np.ndarray\n the pairwise cross-correlation kernel between X and Y, shape [Nx, Ny]\n \"\"\"\n reduce_func = lambda corr, lambda_=lambda_: np.exp(lambda_ * corr).sum(-1)\n return _batch_crosscorr(X, Y, batch_size, reduce_func)\n\n\ndef crosscorr_metric(X, Y, batch_size=10000):\n # This could be done WAY more efficiently, especially the XX & YY terms\n XX = crosscorr_similarity(X, X, batch_size)\n YY = crosscorr_similarity(Y, Y, batch_size)\n XY = crosscorr_similarity(X, Y, batch_size)\n return XX.diagonal()[:, None] + YY.diagonal() - 2 * XY\n"
] | [
[
"numpy.log2",
"numpy.sqrt",
"scipy.signal.fftconvolve",
"numpy.asarray",
"numpy.issubdtype",
"numpy.fft.fftn",
"numpy.fft.ifftn",
"numpy.fft.irfftn",
"numpy.exp",
"numpy.zeros",
"numpy.fft.rfftn"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.13",
"1.16",
"1.9",
"1.18",
"1.21",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
hotpxl/nebuchadnezzar | [
"b26e0f19b9fdfeb8baa094e0f5ee2526cefb6409"
] | [
"stats/data.py"
] | [
"import json\nimport os.path\nimport numpy as np\n\np = os.path.dirname(os.path.realpath(__file__))\n\ndef sse_indices():\n with open(os.path.join(p, '../data/sse_50.json')) as f:\n sse_indices = json.load(f)\n return np.asarray(sse_indices)\n\ndef get_merged(index, *fields):\n with open(os.path.join(p,\n '../data/merged/mobile_website/{}.json'.format(index))) as f:\n data = json.load(f)\n return np.asarray([[x[j] for j in fields] for x in data])\n\ndef get_merged_old(index, *fields):\n with open(os.path.join(p,\n '../data/merged/desktop_website/{}.json'.format(index))) as f:\n data = json.load(f)\n return np.asarray([[x[j] for j in fields] for x in data])\n"
] | [
[
"numpy.asarray"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
THasthika/pytorch-lightning | [
"c502e47abf115fb8c7b82bc537f72481441ed8bb"
] | [
"pytorch_lightning/utilities/imports.py"
] | [
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"General utilities\"\"\"\nimport importlib\nimport operator\nimport platform\nimport sys\nfrom importlib.util import find_spec\n\nimport torch\nfrom packaging.version import Version\nfrom pkg_resources import DistributionNotFound\n\n\ndef _module_available(module_path: str) -> bool:\n \"\"\"\n Check if a path is available in your environment\n\n >>> _module_available('os')\n True\n >>> _module_available('bla.bla')\n False\n \"\"\"\n try:\n return find_spec(module_path) is not None\n except AttributeError:\n # Python 3.6\n return False\n except ModuleNotFoundError:\n # Python 3.7+\n return False\n\n\ndef _compare_version(package: str, op, version) -> bool:\n \"\"\"\n Compare package version with some requirements\n\n >>> _compare_version(\"torch\", operator.ge, \"0.1\")\n True\n \"\"\"\n try:\n pkg = importlib.import_module(package)\n except (ModuleNotFoundError, DistributionNotFound):\n return False\n try:\n pkg_version = Version(pkg.__version__)\n except TypeError:\n # this is mock by sphinx, so it shall return True ro generate all summaries\n return True\n return op(pkg_version, Version(version))\n\n\n_IS_WINDOWS = platform.system() == \"Windows\"\n_IS_INTERACTIVE = hasattr(sys, \"ps1\") # https://stackoverflow.com/a/64523765\n_TORCH_LOWER_EQUAL_1_4 = _compare_version(\"torch\", operator.le, \"1.5.0\")\n_TORCH_GREATER_EQUAL_1_6 = _compare_version(\"torch\", operator.ge, \"1.6.0\")\n_TORCH_GREATER_EQUAL_1_7 = _compare_version(\"torch\", operator.ge, \"1.7.0\")\n_TORCH_GREATER_EQUAL_1_8 = _compare_version(\"torch\", operator.ge, \"1.8.0\")\n_TORCH_GREATER_EQUAL_1_8_1 = _compare_version(\"torch\", operator.ge, \"1.8.1\")\n_TORCH_GREATER_EQUAL_1_9 = _compare_version(\"torch\", operator.ge, \"1.9.0\")\n\n_APEX_AVAILABLE = _module_available(\"apex.amp\")\n_BOLTS_AVAILABLE = _module_available('pl_bolts')\n_DEEPSPEED_AVAILABLE = not _IS_WINDOWS and _module_available('deepspeed')\n_FAIRSCALE_AVAILABLE = not _IS_WINDOWS and _module_available('fairscale.nn.data_parallel')\n_FAIRSCALE_PIPE_AVAILABLE = _TORCH_GREATER_EQUAL_1_6 and _compare_version(\"fairscale\", operator.le, \"0.1.3\")\n_GROUP_AVAILABLE = not _IS_WINDOWS and _module_available('torch.distributed.group')\n_HOROVOD_AVAILABLE = _module_available(\"horovod.torch\")\n_HYDRA_AVAILABLE = _module_available(\"hydra\")\n_HYDRA_EXPERIMENTAL_AVAILABLE = _module_available(\"hydra.experimental\")\n_KINETO_AVAILABLE = _TORCH_GREATER_EQUAL_1_8_1 and torch.profiler.kineto_available()\n_NATIVE_AMP_AVAILABLE = _module_available(\"torch.cuda.amp\") and hasattr(torch.cuda.amp, \"autocast\")\n_OMEGACONF_AVAILABLE = _module_available(\"omegaconf\")\n_RPC_AVAILABLE = not _IS_WINDOWS and _module_available('torch.distributed.rpc')\n_TORCH_QUANTIZE_AVAILABLE = bool([eg for eg in torch.backends.quantized.supported_engines if eg != 'none'])\n_TORCHTEXT_AVAILABLE = _module_available(\"torchtext\")\n_TORCHVISION_AVAILABLE = _module_available('torchvision')\n_TORCHMETRICS_LOWER_THAN_0_3 = _compare_version(\"torchmetrics\", operator.lt, \"0.3.0\")\n_TORCHMETRICS_GREATER_EQUAL_0_3 = _compare_version(\"torchmetrics\", operator.ge, \"0.3.0\")\n_XLA_AVAILABLE = _module_available(\"torch_xla\")\n\nfrom pytorch_lightning.utilities.xla_device import XLADeviceUtils # noqa: E402\n\n_TPU_AVAILABLE = XLADeviceUtils.tpu_device_exists()\n"
] | [
[
"torch.profiler.kineto_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
WozzyaQ/tkinter-budget-app | [
"ec62ff9de03bc7d85958b2d9a3b3934c8b13c44a"
] | [
"main.py"
] | [
"import datetime\nimport tkinter as tk\nfrom tkinter import messagebox\nfrom tkinter import ttk\nfrom tkinter.font import Font\n\nimport matplotlib\nfrom matplotlib import pyplot as plt\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nimport numpy as np\nimport budget\n\nmatplotlib.use(\"TkAgg\")\n\npassword = \"\"\n\n\nclass GUI:\n def __init__(self, terminal):\n\n self.terminal = terminal\n self.filter = budget.Filter(terminal)\n root = self.root = tk.Tk()\n root.title(\"Budget\")\n root.geometry(\"1200x800+100+100\")\n root.resizable(False, False)\n root.withdraw()\n root.protocol(\"WM_DELETE_WINDOW\", self.save_all)\n\n self.main_font = main_font = Font(family='Courier New', size=24, weight='normal')\n\n # canvas for buttons, bd=0 and highlightthickness=0 is for no border\n self.left_canvas = left_canvas = tk.Canvas(root,\n width=300,\n height=800,\n bg=\"#e9e9e9\",\n bd=0,\n highlightthickness=0)\n\n add_income_btn = tk.Button(left_canvas, text=\"ADD INCOME\",\n font=main_font,\n command=self.add_income_btn_clicked,\n bd=0,\n highlightthickness=0)\n\n add_expense_btn = tk.Button(left_canvas, text=\"ADD EXPENSE\",\n font=main_font,\n command=self.add_expense_btn_clicked,\n bd=0,\n highlightthickness=0)\n\n put_deposit_btn = tk.Button(left_canvas, text=\"PUT DEPOSIT\",\n font=main_font,\n command=self.put_deposit_btn_clicked,\n bd=0,\n highlightthickness=0)\n\n get_credit_btn = tk.Button(left_canvas, text=\"GET CREDIT\",\n font=main_font,\n command=self.get_credit_btn_clicked,\n bd=0,\n highlightthickness=0)\n app_titple = tk.Label(left_canvas, text=\"MyBudget\",\n font=Font(family=\"Courier New\", size=35),\n bd=0,\n highlightthickness=0,\n bg=\"#D9CECE\")\n\n filter_button = tk.Button(left_canvas, text=\"FILTER\",\n font=main_font,\n command=self.filter_btn_clicked,\n bd=0,\n highlightthickness=0)\n\n credits_button = tk.Button(left_canvas, text=\"Credits\",\n font=Font(family=\"Courier New\", size=20, weight=\"normal\"),\n command=self.credits_btn_clicked,\n bd=0,\n highlightthickness=0)\n add_expense_btn.pack()\n add_income_btn.pack()\n put_deposit_btn.pack()\n get_credit_btn.pack()\n app_titple.pack()\n filter_button.pack()\n credits_button.pack()\n\n # Left styling line\n left_canvas.create_line(0, 0, 0, 800, fill=\"grey\")\n\n # Buttons\n left_canvas.create_window((30, 110), anchor=\"nw\", window=add_income_btn, width=240, height=60)\n left_canvas.create_window((30, 200), anchor=\"nw\", window=add_expense_btn, width=240, height=60)\n left_canvas.create_window((30, 300), anchor=\"nw\", window=put_deposit_btn, width=240, height=60)\n left_canvas.create_window((30, 400), anchor=\"nw\", window=get_credit_btn, width=240, height=60)\n left_canvas.create_window((20, 20), anchor=\"nw\", window=app_titple, width=260, height=60)\n left_canvas.create_window((30, 500), anchor=\"nw\", window=filter_button, width=240, height=60)\n left_canvas.create_window((100, 768), anchor=\"nw\", window=credits_button, width=100, height=30)\n left_canvas.place(x=900, y=0)\n\n self.information_canvas = information_canvas = tk.Canvas(root,\n width=900,\n height=600,\n bg=\"#f8f8f8\",\n bd=0,\n highlightthickness=0)\n\n # Line below information canvas\n information_canvas.create_line(0, 599, 900, 599, fill=\"black\")\n\n # Pie chart setup\n self.pie_chart, self.a = plt.subplots(figsize=(5.5,5.5))\n self.a.set_title(\"BALANCE PIE CHART\")\n self.pie_chart.set_facecolor(\"#f8f8f8\")\n self.a.pie([1], colors=[\"Brown\"])\n canvas = FigureCanvasTkAgg(self.pie_chart, information_canvas)\n canvas.get_tk_widget().place(x=0, y=0)\n\n # Total, income, expense\n total_label = self.total_label = tk.Label(text=\"Total: \",\n font=Font(family='Courier New', size=30, weight='normal'),\n bg=\"#f8f8f8\")\n\n income_label = self.income_label = tk.Label(text=\"Income: \",\n font=self.main_font,\n bg=\"#f8f8f8\")\n expense_label = self.expense_label = tk.Label(text=\"Expense: \",\n font=self.main_font,\n bg=\"#f8f8f8\")\n total_label.place(x=550, y=180)\n income_label.place(x=570, y=250)\n expense_label.place(x=570, y=300)\n\n information_canvas.place(x=0, y=0)\n\n self.last_transaction_canvas = last_transaction_canvas = tk.Canvas(root,\n width=900,\n height=200,\n bg=\"#f8f8f8\",\n bd=0,\n highlightthickness=0)\n\n last_transaction_canvas.create_line(0, 20, 900, 20, fill=\"black\")\n\n # transaction lines\n for i in range(1, 5):\n last_transaction_canvas.create_line(0, 20+i*36, 900, 20+i*36)\n\n last_transaction_canvas.create_text(360, 0, text=\"LAST TRANSACTIONS\",\n anchor=\"nw\",\n font=Font(family=\"Courier New\", size=20, weight=\"normal\"),\n fill=\"black\")\n # last transaction text, each field has tag tr$, where $ -> 1-5\n for i in range(5):\n last_transaction_canvas.create_text(0, 20+i*36,\n anchor=\"nw\",\n text=\"NONE\",\n font=Font(family=\"Courier New\", size=20, weight=\"normal\"),\n justify=\"center\",\n width=900,\n tag=\"tr\"+str(i+1))\n\n last_transaction_canvas.place(x=0, y=600)\n\n self.digit_validation = (self.root.register(self.do_digits_validation), \"%P\")\n self.dot_and_digit_validation = (self.root.register(self.do_dot_digit_validation), \"%P\")\n\n # Костыль который помогает очищать и работать с новыми созданными окнами\n # radio_btn_state, date, total, percent, category, purpose\n self.dynamic_widgets = dict()\n self.fill_zeros_dynamic_widgets(self.dynamic_widgets)\n self.create_start_window()\n\n # костыль, который помогает не столкнуться с KeyErroro'м когда пытаешся достать значение из словаря по\n # несуществующему ключу\n @staticmethod\n def fill_zeros_dynamic_widgets(dynamic_widgets):\n dynamic_widgets.update(current_popup=None,\n total=None,\n date=None,\n purpose=None,\n percent=None,\n radio_btn_state=None,\n periodic_type=None,\n percent_label_popup=None)\n\n # обнуляет костыль\n def reset_dynamic_widgets(self):\n for key in self.dynamic_widgets.keys():\n self.dynamic_widgets[key] = None\n\n # кривая валидация на точки и цифры\n def do_dot_digit_validation(self, value):\n if value:\n try:\n if value[-1] == \".\" and value[-2] != \".\" or value[-1].isdigit():\n return True\n else:\n return self.do_digits_validation(value)\n except IndexError:\n if value == \".\":\n return False\n return True\n\n @staticmethod\n def do_digits_validation(value):\n if value:\n try:\n int(value)\n return True\n except ValueError:\n return False\n else:\n return True\n\n @staticmethod\n def do_date_validation(date):\n try:\n datetime.datetime.strptime(date, '%d.%m.%Y')\n return True\n except ValueError:\n return False\n\n # создает и возвращает новое окно с заданым размером\n def create_basic_popup(self, width, height):\n popup = tk.Toplevel(self.root)\n popup.geometry(F\"{width}x{height}+500+200\")\n popup.grab_set()\n popup.resizable(False, False)\n\n self.dynamic_widgets[\"current_popup\"] = popup\n return popup\n\n # настраивает нужные виджеты для окон кредитов и депозитов\n def configure_credit_deposit_popup(self, top):\n top_frame = tk.Frame(top)\n frame_for_button = tk.Frame(top)\n\n date_label = tk.Label(top_frame, text=\"Date\",\n font=self.main_font)\n\n date_input = tk.Entry(top_frame,\n font=self.main_font,\n width=10,\n validate=\"key\",\n validatecommand=self.dot_and_digit_validation)\n\n total_label = tk.Label(top_frame,\n text=\"Total\",\n font=self.main_font)\n\n total_input = tk.Entry(top_frame,\n font=self.main_font,\n width=10,\n validate=\"key\",\n validatecommand=self.dot_and_digit_validation)\n\n percent_label = tk.Label(top_frame,\n text=\"Percent\",\n font=self.main_font)\n\n percent_input = tk.Entry(top_frame,\n font=self.main_font,\n width=10,\n validate=\"key\",\n validatecommand=self.dot_and_digit_validation)\n\n periodic_type = ttk.Combobox(top_frame,\n values=[\n \"Day\",\n \"Week\",\n \"Month\",\n \"Quarter\",\n \"Year\"\n ],\n font=Font(family=\"Courier New\", size=15),\n width=10,\n state=\"readonly\")\n\n submit_button = tk.Button(frame_for_button,\n font=self.main_font,\n text=\"Submit\",\n command=self.load_transaction)\n\n purpose_label = tk.Label(top_frame,\n text=\"Purpose\",\n font=self.main_font)\n\n purpose_input = tk.Entry(top_frame,\n font=self.main_font,\n width=10)\n\n date_label.grid(row=0, column=0)\n date_input.grid(row=0, column=1)\n total_label.grid(row=1, column=0)\n total_input.grid(row=1, column=1)\n percent_label.grid(row=2, column=0)\n percent_input.grid(row=2, column=1)\n periodic_type.grid(row=3, column=1)\n purpose_label.grid(row=4, column=0)\n purpose_input.grid(row=4, column=1)\n\n submit_button.pack()\n\n top_frame.pack()\n frame_for_button.pack()\n self.dynamic_widgets[\"total\"] = total_input\n self.dynamic_widgets[\"date\"] = date_input\n self.dynamic_widgets[\"purpose\"] = purpose_input\n self.dynamic_widgets[\"percent\"] = percent_input\n self.dynamic_widgets[\"periodic_type\"] = periodic_type\n\n # настраивает нужные виджеты окнам растрат и доходов\n def configure_expense_income_popup(self, top):\n top_frame = tk.Frame(top)\n frame_for_button = tk.Frame(top)\n\n total_label = tk.Label(top_frame,\n text=\"Total\",\n font=self.main_font)\n\n total_input = tk.Entry(top_frame,\n font=self.main_font,\n width=10,\n validate=\"key\",\n validatecommand=self.dot_and_digit_validation)\n\n date_label = tk.Label(top_frame, text=\"Date\",\n font=self.main_font)\n\n date_input = tk.Entry(top_frame,\n font=self.main_font,\n width=10,\n validate=\"key\",\n validatecommand=self.dot_and_digit_validation)\n\n periodic_label = tk.Label(top_frame, text=\"Periodic\", font=self.main_font)\n\n r_var = tk.BooleanVar()\n r_var.set(0)\n self.dynamic_widgets[\"radio_btn_state\"] = r_var.get()\n\n periodic_radio_btn_yes = tk.Radiobutton(top_frame,\n text=\"Yes\",\n font=self.main_font,\n variable=r_var,\n value=True,\n command=lambda: self.show_periodic_settings(top_frame, r_var))\n\n periodic_radio_btn_no = tk.Radiobutton(top_frame,\n text=\"No\",\n font=self.main_font,\n variable=r_var,\n value=False,\n command=lambda: self.show_periodic_settings(top_frame, r_var))\n\n purpose_label = tk.Label(top_frame,\n text=\"Purpose\",\n font=self.main_font)\n\n purpose_input = tk.Entry(top_frame,\n font=self.main_font,\n width=10)\n\n submit_button = tk.Button(frame_for_button,\n font=self.main_font,\n text=\"Submit\",\n command=self.load_transaction)\n\n date_label.grid(row=0, column=0)\n date_input.grid(row=0, column=1)\n\n total_label.grid(row=1, column=0)\n total_input.grid(row=1, column=1)\n\n periodic_label.grid(row=2, column=0)\n periodic_radio_btn_yes.grid(row=2, column=1)\n periodic_radio_btn_no.grid(row=2, column=2)\n\n purpose_label.grid(row=5, column=0)\n purpose_input.grid(row=5, column=1)\n\n submit_button.pack()\n\n top_frame.pack()\n frame_for_button.pack()\n\n self.dynamic_widgets[\"total\"] = total_input\n self.dynamic_widgets[\"date\"] = date_input\n self.dynamic_widgets[\"purpose\"] = purpose_input\n\n # еще один костыль\n def show_periodic_settings(self, frame, state):\n if state.get():\n periodic_type = ttk.Combobox(frame,\n values=[\n \"Day\",\n \"Week\",\n \"Month\",\n \"Quarter\",\n \"Year\"\n ],\n font=Font(family=\"Courier New\", size=15),\n width=10,\n state=\"readonly\")\n\n percent_label = tk.Label(frame,\n text=\"Percent\",\n font=self.main_font,\n width=10)\n\n percent_entry = tk.Entry(frame,\n font=self.main_font,\n width=10,\n validate=\"key\",\n validatecommand=self.dot_and_digit_validation)\n\n periodic_type.grid(row=3, column=1)\n percent_label.grid(row=4, column=0)\n percent_entry.grid(row=4, column=1)\n self.dynamic_widgets.update(periodic_type=periodic_type,\n radio_btn_state=state.get(),\n percent_label_popup=percent_label,\n percent=percent_entry)\n else:\n self.dynamic_widgets[\"periodic_type\"].destroy()\n self.dynamic_widgets[\"radio_btn_state\"] = state.get()\n self.dynamic_widgets[\"percent_label_popup\"].destroy()\n self.dynamic_widgets[\"percent\"].destroy()\n\n self.dynamic_widgets[\"periodic_type\"] = None\n self.dynamic_widgets[\"radio_btn_state\"] = None\n self.dynamic_widgets[\"percent_label_popup\"] = None\n self.dynamic_widgets[\"percent\"] = None\n\n # закрывает текущее всплывающее меню\n def close_popup(self, popup=None, update_widgets=False):\n if popup:\n popup.destroy()\n else:\n self.dynamic_widgets[\"current_popup\"].destroy()\n if update_widgets:\n self.update_total()\n self.update_pie_chart()\n self.update_expense()\n self.update_income()\n self.reset_dynamic_widgets()\n\n # обновляет полоску последних транзакций\n def update_last_transactions_box(self, last_transaction):\n for i in range(5, 1, -1):\n id = self.last_transaction_canvas.find_withtag(F\"tr{i-1}\")\n text = self.last_transaction_canvas.itemcget(*id, 'text')\n self.last_transaction_canvas.itemconfigure(F\"tr{i}\", text=text)\n\n self.last_transaction_canvas.itemconfigure(F\"tr1\", text=last_transaction)\n\n # загружает параметры в логику\n def load_transaction(self):\n\n date = self.dynamic_widgets[\"date\"].get()\n total = self.dynamic_widgets[\"total\"].get()\n purpose = self.dynamic_widgets[\"purpose\"].get()\n type_of_transaction = self.dynamic_widgets[\"transaction\"]\n if self.do_date_validation(date) and total:\n if self.dynamic_widgets[\"radio_btn_state\"]:\n period = self.dynamic_widgets[\"periodic_type\"].get()\n percent = self.dynamic_widgets[\"percent\"].get()\n print(period)\n if not percent or not period:\n messagebox.showinfo(\"Warning\",\n \"The percentage and period fields must be filled\")\n return\n # transaction_type date 0 / total 1 / is periodic 2/ period 3 / percent 4/ / purpose 5\n print(period)\n self.terminal.create_and_load_transaction(_transaction_type=type_of_transaction,\n _date=date,\n _total=total,\n _periodic=True,\n _period=period,\n _percent=percent,\n _purpose=purpose)\n\n self.update_last_transactions_box(F\"@{self.dynamic_widgets['transaction'].title()}|\"\n F\"@Date:{date}|@Total:{total}|@Period:{period}|\"\n F\"@Percent:{percent}\")\n\n self.close_popup(update_widgets=False)\n return\n elif type_of_transaction == \"deposit\" or type_of_transaction == \"credit\":\n percent = self.dynamic_widgets[\"percent\"].get()\n\n period = self.dynamic_widgets[\"periodic_type\"].get()\n if not percent or not period:\n messagebox.showinfo(\"Warning\",\n \"The percentage and period fields must be filled\")\n return\n self.terminal.create_and_load_transaction(_transaction_type=type_of_transaction,\n _date=date,\n _total=total,\n _percent=percent,\n _purpose=purpose,\n _period=period)\n\n self.update_last_transactions_box(F\"@{self.dynamic_widgets['transaction'].title()}|\"\n F\"@Date:{date}|@Total:{total}|@Period:{period}|\"\n F\"@Percent:{percent}\")\n self.close_popup(update_widgets=True)\n else:\n self.terminal.create_and_load_transaction(_transaction_type=type_of_transaction,\n _date=date,\n _total=total,\n _purpose=purpose)\n\n self.update_last_transactions_box(F\"@{self.dynamic_widgets['transaction'].title()}|\"\n F\"@Date:{date}|@Total:{total}\")\n self.close_popup(update_widgets=True)\n else:\n messagebox.showinfo(\"Warning\", \"THE DATE SHOULD BE IN DD.MM.YYYY FORMAT AND ALL FIELDS MUST BE FILLED\")\n\n def check_password(self, pas, top):\n if pas == password:\n self.root.deiconify()\n self.close_popup(top)\n self.update_on_start()\n else:\n messagebox.showinfo(\"Warning\", \"Incorrect password\")\n top.winfo_children()[1].delete(0, \"end\")\n\n def create_start_window(self):\n top = tk.Toplevel(self.root)\n top.geometry(\"300x300+600+200\")\n top.title(\"Enter a password\")\n\n password_label = tk.Label(top,\n text=\"Enter a password\",\n font=Font(family='Courier New', size=20, weight='normal'))\n\n password_entry = tk.Entry(top,\n show=\"*\",\n justify=\"center\")\n\n check_btn = tk.Button(top,\n text=\"Enter\",\n font=Font(family='Courier New', size=20, weight='normal'),\n command=lambda: self.check_password(password_entry.get(), top))\n\n password_label.place(relx=0.5, rely=0.25, anchor=tk.CENTER)\n password_entry.place(relx=0.5, rely=0.35, anchor=tk.CENTER)\n check_btn.place(relx=0.5, rely=0.70, anchor=tk.CENTER)\n\n def add_income_btn_clicked(self):\n top = self.create_basic_popup(width=400, height=260)\n top.title(\"Add income\")\n self.configure_expense_income_popup(top)\n\n self.dynamic_widgets[\"transaction\"] = \"income\"\n\n top.mainloop()\n\n def add_expense_btn_clicked(self):\n top = self.create_basic_popup(width=400, height=260)\n top.title(\"Add expense\")\n self.configure_expense_income_popup(top)\n\n self.dynamic_widgets[\"transaction\"] = \"expense\"\n top.mainloop()\n\n def put_deposit_btn_clicked(self):\n top = self.create_basic_popup(width=400, height=220)\n top.title(\"Deposit\")\n self.configure_credit_deposit_popup(top)\n\n self.dynamic_widgets[\"transaction\"] = \"deposit\"\n top.mainloop()\n\n def get_credit_btn_clicked(self):\n top = self.create_basic_popup(width=400, height=220)\n top.title(\"Credit\")\n self.configure_credit_deposit_popup(top)\n\n self.dynamic_widgets[\"transaction\"] = \"credit\"\n top.mainloop()\n\n # todo credits and filter and MAIN CANVAS WITH GRAPHICS\n def credits_btn_clicked(self):\n top = self.create_basic_popup(width=400, height=260)\n\n top.mainloop()\n\n def filter_btn_clicked(self):\n top = self.create_basic_popup(width=400, height=400)\n top.title(\"Filter\")\n\n # для корректного задания размеров кнопки, вообще я считаю это костыль\n # но увы другие способы очень долгие (по фрейму на каждую кнопку, нет спасибо)\n pixel = tk.PhotoImage(width=300, height=50)\n custom_font = Font(family='Courier New', size=20, weight='normal')\n\n incomes_for_period_btn = tk.Button(top, text=\"Show income for period\",\n image=pixel,\n command=lambda: self.incomes_for_period_clicked(top),\n bd=0,\n highlightthickness=0,\n compound=\"center\",\n font=custom_font)\n\n expenses_for_period_btn = tk.Button(top, text=\"Show expense for period\",\n image=pixel,\n command=lambda: self.expenses_for_period_clicked(top),\n bd=0,\n highlightthickness=0,\n compound=\"center\",\n font=custom_font)\n\n term_forecast_btn = tk.Button(top, text=\"Term forecast\",\n image=pixel,\n command=lambda: self.term_forecast_clicked_handler(top),\n bd=0,\n highlightthickness=0,\n compound=\"center\",\n font=custom_font)\n\n filter_by_sum_diapason_btn = tk.Button(top, text=\"Filter by sum diapason\",\n image=pixel,\n command=lambda: self.filter_by_sum_diapason_clicked(top),\n bd=0,\n highlightthickness=0,\n compound=\"center\",\n font=custom_font)\n\n show_all_transactions_btn = tk.Button(top, text=\"Show all transactions\",\n image=pixel,\n command=lambda: self.show_all_transaction_clicked(top),\n bd=0,\n highlightthickness=0,\n compound=\"center\",\n font=custom_font)\n\n incomes_for_period_btn.place(x=50, y=25)\n expenses_for_period_btn.place(x=50, y=95)\n term_forecast_btn.place(x=50, y=165)\n filter_by_sum_diapason_btn.place(x=50, y=235)\n show_all_transactions_btn.place(x=50, y=305)\n\n top.mainloop()\n\n @staticmethod\n def create_tree_view_popup(parent):\n tree_view_popup = tk.Toplevel(parent)\n tree_view_popup.resizable(False, False)\n\n tree_view_popup.geometry(\"870x600+200+200\")\n tree_view_frame = tk.Frame(tree_view_popup,\n height=600,\n width=870)\n tree = ttk.Treeview(tree_view_frame, height=600)\n # date / total / is_periodic / period / percent / purpose\n tree[\"columns\"] = (\"total\", \"is_periodic\", \"period\", \"percent\", \"purpose\")\n\n tree.heading(\"#0\", text=\"Date\")\n tree.heading(\"total\", text=\"Total\")\n tree.heading(\"is_periodic\", text=\"Is periodic?\")\n tree.heading(\"period\", text=\"Period\")\n tree.heading(\"percent\", text=\"Percent\")\n tree.heading(\"purpose\", text=\"Purpose\")\n\n tree.column(\"#0\",\n width=\"120\",\n minwidth=\"120\",\n anchor=\"center\")\n for item in tree[\"columns\"]:\n tree.column(F\"{item}\",\n width=\"120\",\n minwidth=\"120\",\n anchor=\"center\")\n tree.column(\"purpose\",\n width=\"250\",\n minwidth=\"200\",\n anchor=\"center\")\n\n tree_view_frame.pack(expand=False)\n tree.place(x=0, y=0, width=850, height=600)\n\n scrollbar = ttk.Scrollbar(tree_view_frame, orient=\"vertical\", command=tree.yview)\n scrollbar.place(x=850, y=0, height=600)\n tree.configure(yscrollcommand=scrollbar.set)\n\n return tree_view_popup\n\n def get_data_by_date(self, type_, begin, end, parent):\n fetch_by_type = {\n \"income\": self.filter.get_incomes_for_period,\n \"expense\": self.filter.get_expenses_for_period\n }\n if self.do_date_validation(begin) and self.do_date_validation(end):\n\n data = fetch_by_type[type_](begin, end)\n if data:\n tree_view_popup = self.create_tree_view_popup(parent)\n tree = tree_view_popup.winfo_children()[0].winfo_children()[0]\n\n for date in data:\n for params in date[1]:\n payload = (\n params[\"total\"],\n params[\"is_periodic\"],\n params[\"period\"],\n params[\"percent\"],\n params[\"purpose\"]\n )\n tree.insert(\"\",\n \"end\",\n text=date[0],\n values=payload)\n tree_view_popup.mainloop()\n else:\n messagebox.showinfo(\"Nothing to show\", \"No transactions found\")\n\n else:\n messagebox.showinfo(\"Incorrect date format\",\n \"THE DATE SHOULD BE IN DD.MM.YYYY FORMAT AND ALL FIELDS MUST BE FILLED\")\n\n def get_data_by_sum(self, lower_bound, upper_bound, parent):\n print(type(lower_bound))\n if self.do_digits_validation(lower_bound) and self.do_digits_validation(upper_bound):\n data = self.filter.filter_by_sum_diapason(lower_bound, upper_bound)\n if data:\n tree_view_popup = self.create_tree_view_popup(parent)\n tree = tree_view_popup.winfo_children()[0].winfo_children()[0]\n print(data)\n for date in data:\n for params in date[1]:\n payload = (\n params[\"total\"],\n params[\"is_periodic\"],\n params[\"period\"],\n params[\"percent\"],\n params[\"purpose\"]\n )\n tree.insert(\"\",\n \"end\",\n text=F\"{params['type']} | {date[0]}\",\n values=payload)\n tree_view_popup.mainloop()\n else:\n messagebox.showinfo(\"Nothing to show\", \"No transactions found\")\n else:\n messagebox.showinfo(\"Incorrect sum format\", \"The amount must be in float format, e.g. '175.35'\")\n\n def create_select_date_window(self, parent, type_):\n top = tk.Toplevel(parent)\n top.geometry(\"337x150+500+200\")\n custom_font = Font(family='Courier New', size=20, weight='normal')\n\n begin_label = tk.Label(top,\n text=\"START DATE\",\n font=custom_font)\n begin_entry = tk.Entry(top,\n font=custom_font,\n validate=\"key\",\n validatecommand=self.dot_and_digit_validation,\n width=12,\n justify=\"center\")\n\n end_label = tk.Label(top,\n text=\"END DATE\",\n font=custom_font)\n\n end_entry = tk.Entry(top,\n font=custom_font,\n validate=\"key\",\n validatecommand=self.dot_and_digit_validation,\n width=12,\n justify=\"center\")\n\n show_btn = tk.Button(top,\n text=\"Show\",\n font=custom_font,\n command=lambda: self.get_data_by_date(F\"{type_}\", begin_entry.get(), end_entry.get(), top),\n width=10,\n height=2)\n\n begin_label.place(relx=0.05, rely=0.05)\n begin_entry.place(relx=0, rely=0.2)\n\n end_label.place(relx=0.6, rely=0.05)\n end_entry.place(relx=0.5, rely=0.2)\n\n show_btn.place(relx=0.3, rely=0.5)\n return top\n\n def create_select_sum_window(self, parent):\n top = tk.Toplevel(parent)\n top.geometry(\"337x150+500+200\")\n custom_font = Font(family='Courier New', size=20, weight='normal')\n\n begin_label = tk.Label(top,\n text=\"LOWER BOUND\",\n font=custom_font)\n begin_entry = tk.Entry(top,\n font=custom_font,\n validate=\"key\",\n validatecommand=self.dot_and_digit_validation,\n width=12,\n justify=\"center\")\n\n end_label = tk.Label(top,\n text=\"UPPER BOUND\",\n font=custom_font)\n\n end_entry = tk.Entry(top,\n font=custom_font,\n validate=\"key\",\n validatecommand=self.dot_and_digit_validation,\n width=12,\n justify=\"center\")\n\n show_btn = tk.Button(top,\n text=\"Show\",\n font=custom_font,\n command=lambda: self.get_data_by_sum(float(begin_entry.get()), float(end_entry.get()), top),\n width=10,\n height=2)\n\n begin_label.place(relx=0.05, rely=0.05)\n begin_entry.place(relx=0, rely=0.2)\n\n end_label.place(relx=0.55, rely=0.05)\n end_entry.place(relx=0.5, rely=0.2)\n\n show_btn.place(relx=0.3, rely=0.5)\n return top\n\n def create_forecast_window(self, parent, forecast_date):\n if self.do_date_validation(forecast_date):\n top = tk.Toplevel(parent)\n top.resizable(False, False)\n top.title(\"Forecast\")\n top.geometry(\"1200x600+50+200\")\n # get forecast data\n forecast = list(self.filter.term_forecast(forecast_date))\n\n # bar graph setup\n labels = [\"Income\", \"Expense\"]\n width = 0.75\n x = np.arange(len(labels))\n bar, ax = plt.subplots(figsize=(6, 6))\n ax.set_title(F\"Forecast for {forecast_date}\")\n ax.set_ylabel(\"Total\")\n\n rect_income = ax.bar(0, forecast[0], width, label=\"Income\")\n rect_expense = ax.bar(1, forecast[1], width, label=\"Expense\")\n\n\n\n ax.set_xticks(x)\n ax.set_xticklabels(labels)\n canvas = FigureCanvasTkAgg(bar, top)\n\n total_label = tk.Label(top,\n text=F\"Expected total: {forecast[0]-forecast[1]}\",\n font=self.main_font)\n\n current_total_label = tk.Label(top,\n text=F\"Current total: {self.terminal.get_total_income()}\",\n font=self.main_font)\n\n income_label = tk.Label(top,\n text=F\"Expected income: {forecast[0]}\",\n font=self.main_font)\n\n expence_label = tk.Label(top,\n text=F\"Expected expense: {forecast[1]}\",\n font=self.main_font)\n\n canvas.get_tk_widget().place(x=0, y=0)\n total_label.place(x=580, y=150)\n current_total_label.place(x=580, y=200)\n income_label.place(x=580, y=250)\n expence_label.place(x=580, y=300)\n\n else:\n messagebox.showerror(\"Incorrect date\", \"The date should be entered in dd.mm.YYYY format\")\n\n def incomes_for_period_clicked(self, parent):\n top = self.create_select_date_window(parent, \"income\")\n top.title(\"Expense for period\")\n\n top.mainloop()\n\n def expenses_for_period_clicked(self, parent):\n top = self.create_select_date_window(parent, \"expense\")\n top.title(\"Expense for period\")\n\n top.mainloop()\n\n def term_forecast_clicked_handler(self, parent):\n top = tk.Toplevel(parent)\n top.resizable(False, False)\n top.geometry(\"200x100+600+300\")\n top.title(\"Forecast\")\n\n date_label = tk.Label(top,\n text=\"Forecast date\",\n font=self.main_font)\n\n date_entry = tk.Entry(top,\n font=self.main_font,\n validate=\"key\",\n validatecommand=self.dot_and_digit_validation,\n width=12,\n justify=\"center\")\n\n forecast_btn = tk.Button(top,\n text=\"Forecast\",\n font=self.main_font,\n width=12,\n justify=\"center\",\n command=lambda: self.create_forecast_window(top, date_entry.get()))\n\n date_label.pack(anchor=\"center\")\n date_entry.pack(anchor=\"center\")\n forecast_btn.pack(anchor=\"center\")\n top.mainloop()\n\n def filter_by_sum_diapason_clicked(self, parent):\n top = self.create_select_sum_window(parent)\n top.title(\"Get by sum diapason\")\n\n top.mainloop()\n\n def show_all_transaction_clicked(self, parent):\n tree_view_popup = self.create_tree_view_popup(parent)\n tree_view_popup.title(\"All transactions\")\n tree = tree_view_popup.winfo_children()[0].winfo_children()[0]\n data = self.filter.get_all_transactions()\n for date in data:\n print(date[1])\n for params in date[1]:\n payload = (\n params[\"total\"],\n params[\"is_periodic\"],\n params[\"period\"],\n params[\"percent\"],\n params[\"purpose\"]\n )\n tree.insert(\"\",\n \"end\",\n text=F\"{params['type']} | {date[0]}\",\n values=payload)\n\n tree_view_popup.mainloop()\n\n def update_on_start(self):\n self.update_pie_chart()\n self.update_income()\n self.update_expense()\n self.update_total()\n\n def update_pie_chart(self):\n self.a.clear()\n self.a.pie([self.terminal.get_total_income(), self.terminal.get_total_expense()],\n labels=[\"Income\", \"Expenses\"],\n autopct='%1.1f%%',\n colors=[\"#d6f5d6\", \"#ffcccc\"])\n self.pie_chart.canvas.draw_idle()\n\n def update_total(self):\n self.total_label[\"text\"] = F\"Total:{self.terminal.get_total_overall()}$\"\n\n def update_income(self):\n self.income_label[\"text\"] = F\"Income:{self.terminal.get_total_income()}$\"\n\n def update_expense(self):\n self.expense_label[\"text\"] = F\"Expense:{self.terminal.get_total_expense()}$\"\n\n def save_all(self):\n self.terminal.save_all()\n self.root.quit()\n\n\ngui = GUI(budget.Terminal())\ngui.root.mainloop()\n\n\n\n"
] | [
[
"matplotlib.use",
"matplotlib.pyplot.subplots",
"matplotlib.backends.backend_tkagg.FigureCanvasTkAgg"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mleila/surname_predictor | [
"76ad33bd8c5a0bf3e10c6f7808d5deb2b7e75558"
] | [
"surnames/data.py"
] | [
"import torch\nimport pandas as pd\n\nfrom surnames.vectorizers import SurnameClassificationVectorizer\nfrom surnames.constants import (\n TRAIN,\n VALID,\n TEST,\n SURNAME,\n ORIGIN,\n X_DATA,\n Y_TARGET\n )\n\n\nclass SurnameClassificationDataset(torch.utils.data.Dataset):\n\n def __init__(\n self,\n df: pd.DataFrame,\n surname_vectorizer: SurnameClassificationVectorizer,\n max_surname_length: int=10\n ):\n self.df = df\n self.surname_vectorizer = surname_vectorizer\n self.max_surname_length = max_surname_length\n self.set_split()\n\n @classmethod\n def from_dataframe(cls, df):\n surname_vectorizer = SurnameClassificationVectorizer.from_dataframe(df)\n return cls(df, surname_vectorizer)\n\n def set_split(self, split: str=TRAIN):\n assert split in [TRAIN, VALID, TEST], f'Split must be either {TRAIN}, {VALID}, or {TEST}'\n self._target_df = self.df.query(f'split==\"{split}\"')\n\n def __getitem__(self, index):\n row = self._target_df.iloc[index]\n surname, origin = row[SURNAME], row[ORIGIN]\n surname_vector = self.surname_vectorizer.vectorize_surname(surname, self.max_surname_length)\n origin_vector = self.surname_vectorizer.vectorize_origin(origin)\n return {\n X_DATA: surname_vector,\n Y_TARGET: origin_vector\n }\n\n def __len__(self):\n return len(self._target_df)\n\n\ndef generate_batches(\n dataset,\n batch_size,\n shuffle=True,\n drop_last=True,\n device='cpu'\n ):\n \"\"\"\n This generator wraps the DataLoader class to build tensors out of the\n raw data and send them to the desired device\n \"\"\"\n data_loader = torch.utils.data.DataLoader(\n dataset=dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n drop_last=drop_last)\n\n for data_dict in data_loader:\n out_data_dict = {}\n for name, tensor in data_dict.items():\n out_data_dict[name] = tensor.to(device)\n yield out_data_dict\n"
] | [
[
"torch.utils.data.DataLoader"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MAADDS/Skyless-YOLOP | [
"44b948522d93273c988f6fa92a5c29fa14461336"
] | [
"skydetection/sky_detector/detector.py"
] | [
"import cv2\nfrom statistics import median\nfrom scipy.signal import medfilt\nfrom scipy import ndimage\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\ndef cal_skyline(mask):\n h, w = mask.shape\n greatest = 0\n zerolist = []\n zerolist2 = []\n print(w) \n for i in range(w):\n raw = mask[:, i]\n after_median = medfilt(raw, 19)\n try:\n \n first_zero_index = np.where(after_median == 0)[0][0]\n first_one_index = np.where(after_median == 1)[0][0]\n if greatest < first_zero_index :\n greatest = first_zero_index\n if first_zero_index != 0:\n zerolist2.append(first_zero_index)\n zerolist.append(first_zero_index)\n \n #if first_zero_index > 20:\n mask[first_one_index:first_zero_index, i] = 0\n mask[first_zero_index:, i] = 1\n mask[:first_one_index, i] = 1\n except:\n continue\n medline = int(median(zerolist2))\n meanline = int(np.mean(zerolist2))\n\n return mask, medline, meanline, greatest\n\n\ndef get_sky_region_gradient(img):\n\n h, w, _ = img.shape\n print(w)\n\n blurshape = (5,5)\n\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n plt.imshow(img_gray)\n plt.show()\n\n img_gray = cv2.blur(img_gray, blurshape)\n plt.imshow(img_gray)\n plt.show()\n cv2.medianBlur(img_gray, 15)\n lap = cv2.Laplacian(img_gray, cv2.CV_8U)\n gradient_mask = (lap < 6).astype(np.uint8)\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, blurshape)\n mask = cv2.morphologyEx(gradient_mask, cv2.MORPH_ERODE, kernel)\n #plt.imshow(mask)\n #plt.show()\n\n mask, medline, meanline, greatest = cal_skyline(mask)\n #plt.imshow(mask)\n #plt.show()\n temp = 130\n after_img = cv2.bitwise_and(img, img, mask=mask)\n after_img[meanline-5:meanline+5,:] = (255,0,255)\n #after_img[int(np.mean([medline,temp]))-5 : int(np.mean([medline,temp]))+5,:] = (255,255,0)\n #after_img[temp-5:temp+5,:] = (0,255,0)\n\n return after_img"
] | [
[
"matplotlib.pyplot.imshow",
"scipy.signal.medfilt",
"numpy.mean",
"matplotlib.pyplot.show",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
mbeacom/superset | [
"8dfe2b70b2ee85c8cfe79a7a37eefaa790158bf1"
] | [
"superset/connectors/sqla/models.py"
] | [
"from datetime import datetime\nimport logging\nimport sqlparse\nfrom past.builtins import basestring\n\nimport pandas as pd\n\nfrom sqlalchemy import (\n Column, Integer, String, ForeignKey, Text, Boolean,\n DateTime,\n)\nimport sqlalchemy as sa\nfrom sqlalchemy import asc, and_, desc, select\nfrom sqlalchemy.ext.compiler import compiles\nfrom sqlalchemy.sql.expression import ColumnClause, TextAsFrom\nfrom sqlalchemy.orm import backref, relationship\nfrom sqlalchemy.sql import table, literal_column, text, column\n\nfrom flask import escape, Markup\nfrom flask_appbuilder import Model\nfrom flask_babel import lazy_gettext as _\n\nfrom superset import db, utils, import_util, sm\nfrom superset.connectors.base import BaseDatasource, BaseColumn, BaseMetric\nfrom superset.utils import DTTM_ALIAS, QueryStatus\nfrom superset.models.helpers import QueryResult\nfrom superset.models.core import Database\nfrom superset.jinja_context import get_template_processor\nfrom superset.models.helpers import set_perm\n\n\nclass TableColumn(Model, BaseColumn):\n\n \"\"\"ORM object for table columns, each table can have multiple columns\"\"\"\n\n __tablename__ = 'table_columns'\n table_id = Column(Integer, ForeignKey('tables.id'))\n table = relationship(\n 'SqlaTable',\n backref=backref('columns', cascade='all, delete-orphan'),\n foreign_keys=[table_id])\n is_dttm = Column(Boolean, default=False)\n expression = Column(Text, default='')\n python_date_format = Column(String(255))\n database_expression = Column(String(255))\n\n export_fields = (\n 'table_id', 'column_name', 'verbose_name', 'is_dttm', 'is_active',\n 'type', 'groupby', 'count_distinct', 'sum', 'avg', 'max', 'min',\n 'filterable', 'expression', 'description', 'python_date_format',\n 'database_expression'\n )\n\n @property\n def sqla_col(self):\n name = self.column_name\n if not self.expression:\n col = column(self.column_name).label(name)\n else:\n col = literal_column(self.expression).label(name)\n return col\n\n def get_time_filter(self, start_dttm, end_dttm):\n col = self.sqla_col.label('__time')\n return and_(\n col >= text(self.dttm_sql_literal(start_dttm)),\n col <= text(self.dttm_sql_literal(end_dttm)),\n )\n\n def get_timestamp_expression(self, time_grain):\n \"\"\"Getting the time component of the query\"\"\"\n expr = self.expression or self.column_name\n if not self.expression and not time_grain:\n return column(expr, type_=DateTime).label(DTTM_ALIAS)\n if time_grain:\n pdf = self.python_date_format\n if pdf in ('epoch_s', 'epoch_ms'):\n # if epoch, translate to DATE using db specific conf\n db_spec = self.table.database.db_engine_spec\n if pdf == 'epoch_s':\n expr = db_spec.epoch_to_dttm().format(col=expr)\n elif pdf == 'epoch_ms':\n expr = db_spec.epoch_ms_to_dttm().format(col=expr)\n grain = self.table.database.grains_dict().get(time_grain, '{col}')\n expr = grain.function.format(col=expr)\n return literal_column(expr, type_=DateTime).label(DTTM_ALIAS)\n\n @classmethod\n def import_obj(cls, i_column):\n def lookup_obj(lookup_column):\n return db.session.query(TableColumn).filter(\n TableColumn.table_id == lookup_column.table_id,\n TableColumn.column_name == lookup_column.column_name).first()\n return import_util.import_simple_obj(db.session, i_column, lookup_obj)\n\n def dttm_sql_literal(self, dttm):\n \"\"\"Convert datetime object to a SQL expression string\n\n If database_expression is empty, the internal dttm\n will be parsed as the string with the pattern that\n the user inputted (python_date_format)\n If database_expression is not empty, the internal dttm\n will be parsed as the sql sentence for the database to convert\n \"\"\"\n\n tf = self.python_date_format or '%Y-%m-%d %H:%M:%S.%f'\n if self.database_expression:\n return self.database_expression.format(dttm.strftime('%Y-%m-%d %H:%M:%S'))\n elif tf == 'epoch_s':\n return str((dttm - datetime(1970, 1, 1)).total_seconds())\n elif tf == 'epoch_ms':\n return str((dttm - datetime(1970, 1, 1)).total_seconds() * 1000.0)\n else:\n s = self.table.database.db_engine_spec.convert_dttm(\n self.type or '', dttm)\n return s or \"'{}'\".format(dttm.strftime(tf))\n\n\nclass SqlMetric(Model, BaseMetric):\n\n \"\"\"ORM object for metrics, each table can have multiple metrics\"\"\"\n\n __tablename__ = 'sql_metrics'\n table_id = Column(Integer, ForeignKey('tables.id'))\n table = relationship(\n 'SqlaTable',\n backref=backref('metrics', cascade='all, delete-orphan'),\n foreign_keys=[table_id])\n expression = Column(Text)\n\n export_fields = (\n 'metric_name', 'verbose_name', 'metric_type', 'table_id', 'expression',\n 'description', 'is_restricted', 'd3format')\n\n @property\n def sqla_col(self):\n name = self.metric_name\n return literal_column(self.expression).label(name)\n\n @property\n def perm(self):\n return (\n \"{parent_name}.[{obj.metric_name}](id:{obj.id})\"\n ).format(obj=self,\n parent_name=self.table.full_name) if self.table else None\n\n @classmethod\n def import_obj(cls, i_metric):\n def lookup_obj(lookup_metric):\n return db.session.query(SqlMetric).filter(\n SqlMetric.table_id == lookup_metric.table_id,\n SqlMetric.metric_name == lookup_metric.metric_name).first()\n return import_util.import_simple_obj(db.session, i_metric, lookup_obj)\n\n\nclass SqlaTable(Model, BaseDatasource):\n\n \"\"\"An ORM object for SqlAlchemy table references\"\"\"\n\n type = \"table\"\n query_language = 'sql'\n metric_class = SqlMetric\n column_class = TableColumn\n\n __tablename__ = 'tables'\n table_name = Column(String(250))\n main_dttm_col = Column(String(250))\n database_id = Column(Integer, ForeignKey('dbs.id'), nullable=False)\n fetch_values_predicate = Column(String(1000))\n user_id = Column(Integer, ForeignKey('ab_user.id'))\n owner = relationship(\n sm.user_model,\n backref='tables',\n foreign_keys=[user_id])\n database = relationship(\n 'Database',\n backref=backref('tables', cascade='all, delete-orphan'),\n foreign_keys=[database_id])\n schema = Column(String(255))\n sql = Column(Text)\n\n baselink = \"tablemodelview\"\n export_fields = (\n 'table_name', 'main_dttm_col', 'description', 'default_endpoint',\n 'database_id', 'offset', 'cache_timeout', 'schema',\n 'sql', 'params')\n\n __table_args__ = (\n sa.UniqueConstraint(\n 'database_id', 'schema', 'table_name',\n name='_customer_location_uc'),)\n\n def __repr__(self):\n return self.name\n\n @property\n def description_markeddown(self):\n return utils.markdown(self.description)\n\n @property\n def link(self):\n name = escape(self.name)\n return Markup(\n '<a href=\"{self.explore_url}\">{name}</a>'.format(**locals()))\n\n @property\n def schema_perm(self):\n \"\"\"Returns schema permission if present, database one otherwise.\"\"\"\n return utils.get_schema_perm(self.database, self.schema)\n\n def get_perm(self):\n return (\n \"[{obj.database}].[{obj.table_name}]\"\n \"(id:{obj.id})\").format(obj=self)\n\n @property\n def name(self):\n if not self.schema:\n return self.table_name\n return \"{}.{}\".format(self.schema, self.table_name)\n\n @property\n def full_name(self):\n return utils.get_datasource_full_name(\n self.database, self.table_name, schema=self.schema)\n\n @property\n def dttm_cols(self):\n l = [c.column_name for c in self.columns if c.is_dttm]\n if self.main_dttm_col and self.main_dttm_col not in l:\n l.append(self.main_dttm_col)\n return l\n\n @property\n def num_cols(self):\n return [c.column_name for c in self.columns if c.is_num]\n\n @property\n def any_dttm_col(self):\n cols = self.dttm_cols\n if cols:\n return cols[0]\n\n @property\n def html(self):\n t = ((c.column_name, c.type) for c in self.columns)\n df = pd.DataFrame(t)\n df.columns = ['field', 'type']\n return df.to_html(\n index=False,\n classes=(\n \"dataframe table table-striped table-bordered \"\n \"table-condensed\"))\n\n @property\n def sql_url(self):\n return self.database.sql_url + \"?table_name=\" + str(self.table_name)\n\n @property\n def time_column_grains(self):\n return {\n \"time_columns\": self.dttm_cols,\n \"time_grains\": [grain.name for grain in self.database.grains()]\n }\n\n def get_col(self, col_name):\n columns = self.columns\n for col in columns:\n if col_name == col.column_name:\n return col\n\n @property\n def data(self):\n d = super(SqlaTable, self).data\n if self.type == 'table':\n grains = self.database.grains() or []\n if grains:\n grains = [(g.name, g.name) for g in grains]\n d['granularity_sqla'] = utils.choicify(self.dttm_cols)\n d['time_grain_sqla'] = grains\n return d\n\n def values_for_column(self, column_name, limit=10000):\n \"\"\"Runs query against sqla to retrieve some\n sample values for the given column.\n \"\"\"\n cols = {col.column_name: col for col in self.columns}\n target_col = cols[column_name]\n\n qry = (\n select([target_col.sqla_col])\n .select_from(self.get_from_clause())\n .distinct(column_name)\n )\n if limit:\n qry = qry.limit(limit)\n\n if self.fetch_values_predicate:\n tp = self.get_template_processor()\n qry = qry.where(tp.process_template(self.fetch_values_predicate))\n\n engine = self.database.get_sqla_engine()\n sql = \"{}\".format(\n qry.compile(\n engine, compile_kwargs={\"literal_binds\": True}, ),\n )\n\n df = pd.read_sql_query(sql=sql, con=engine)\n return [row[0] for row in df.to_records(index=False)]\n\n def get_template_processor(self, **kwargs):\n return get_template_processor(\n table=self, database=self.database, **kwargs)\n\n def get_query_str(self, query_obj):\n engine = self.database.get_sqla_engine()\n qry = self.get_sqla_query(**query_obj)\n sql = str(\n qry.compile(\n engine,\n compile_kwargs={\"literal_binds\": True}\n )\n )\n logging.info(sql)\n sql = sqlparse.format(sql, reindent=True)\n sql = self.database.db_engine_spec.sql_preprocessor(sql)\n return sql\n\n def get_sqla_table(self):\n tbl = table(self.table_name)\n if self.schema:\n tbl.schema = self.schema\n return tbl\n\n def get_from_clause(self, template_processor=None):\n # Supporting arbitrary SQL statements in place of tables\n if self.sql:\n from_sql = self.sql\n if template_processor:\n from_sql = template_processor.process_template(from_sql)\n return TextAsFrom(sa.text(from_sql), []).alias('expr_qry')\n return self.get_sqla_table()\n\n def get_sqla_query( # sqla\n self,\n groupby, metrics,\n granularity,\n from_dttm, to_dttm,\n filter=None, # noqa\n is_timeseries=True,\n timeseries_limit=15,\n timeseries_limit_metric=None,\n row_limit=None,\n inner_from_dttm=None,\n inner_to_dttm=None,\n orderby=None,\n extras=None,\n columns=None,\n form_data=None):\n \"\"\"Querying any sqla table from this common interface\"\"\"\n\n template_kwargs = {\n 'from_dttm': from_dttm,\n 'groupby': groupby,\n 'metrics': metrics,\n 'row_limit': row_limit,\n 'to_dttm': to_dttm,\n 'form_data': form_data,\n }\n template_processor = self.get_template_processor(**template_kwargs)\n\n # For backward compatibility\n if granularity not in self.dttm_cols:\n granularity = self.main_dttm_col\n\n # Database spec supports join-free timeslot grouping\n time_groupby_inline = self.database.db_engine_spec.time_groupby_inline\n\n cols = {col.column_name: col for col in self.columns}\n metrics_dict = {m.metric_name: m for m in self.metrics}\n\n if not granularity and is_timeseries:\n raise Exception(_(\n \"Datetime column not provided as part table configuration \"\n \"and is required by this type of chart\"))\n for m in metrics:\n if m not in metrics_dict:\n raise Exception(_(\"Metric '{}' is not valid\".format(m)))\n metrics_exprs = [metrics_dict.get(m).sqla_col for m in metrics]\n timeseries_limit_metric = metrics_dict.get(timeseries_limit_metric)\n timeseries_limit_metric_expr = None\n if timeseries_limit_metric:\n timeseries_limit_metric_expr = \\\n timeseries_limit_metric.sqla_col\n if metrics_exprs:\n main_metric_expr = metrics_exprs[0]\n else:\n main_metric_expr = literal_column(\"COUNT(*)\").label(\"ccount\")\n\n select_exprs = []\n groupby_exprs = []\n\n if groupby:\n select_exprs = []\n inner_select_exprs = []\n inner_groupby_exprs = []\n for s in groupby:\n col = cols[s]\n outer = col.sqla_col\n inner = col.sqla_col.label(col.column_name + '__')\n\n groupby_exprs.append(outer)\n select_exprs.append(outer)\n inner_groupby_exprs.append(inner)\n inner_select_exprs.append(inner)\n elif columns:\n for s in columns:\n select_exprs.append(cols[s].sqla_col)\n metrics_exprs = []\n\n if granularity:\n dttm_col = cols[granularity]\n time_grain = extras.get('time_grain_sqla')\n time_filters = []\n\n if is_timeseries:\n timestamp = dttm_col.get_timestamp_expression(time_grain)\n select_exprs += [timestamp]\n groupby_exprs += [timestamp]\n\n # Use main dttm column to support index with secondary dttm columns\n if self.database.db_engine_spec.time_secondary_columns and \\\n self.main_dttm_col in self.dttm_cols and \\\n self.main_dttm_col != dttm_col.column_name:\n time_filters.append(cols[self.main_dttm_col].\n get_time_filter(from_dttm, to_dttm))\n time_filters.append(dttm_col.get_time_filter(from_dttm, to_dttm))\n\n select_exprs += metrics_exprs\n qry = sa.select(select_exprs)\n\n tbl = self.get_from_clause(template_processor)\n\n if not columns:\n qry = qry.group_by(*groupby_exprs)\n\n where_clause_and = []\n having_clause_and = []\n for flt in filter:\n if not all([flt.get(s) for s in ['col', 'op', 'val']]):\n continue\n col = flt['col']\n op = flt['op']\n eq = flt['val']\n col_obj = cols.get(col)\n if col_obj:\n if op in ('in', 'not in'):\n values = []\n for v in eq:\n # For backwards compatibility and edge cases\n # where a column data type might have changed\n if isinstance(v, basestring):\n v = v.strip(\"'\").strip('\"')\n if col_obj.is_num:\n v = utils.string_to_num(v)\n\n # Removing empty strings and non numeric values\n # targeting numeric columns\n if v is not None:\n values.append(v)\n cond = col_obj.sqla_col.in_(values)\n if op == 'not in':\n cond = ~cond\n where_clause_and.append(cond)\n else:\n if col_obj.is_num:\n eq = utils.string_to_num(flt['val'])\n if op == '==':\n where_clause_and.append(col_obj.sqla_col == eq)\n elif op == '!=':\n where_clause_and.append(col_obj.sqla_col != eq)\n elif op == '>':\n where_clause_and.append(col_obj.sqla_col > eq)\n elif op == '<':\n where_clause_and.append(col_obj.sqla_col < eq)\n elif op == '>=':\n where_clause_and.append(col_obj.sqla_col >= eq)\n elif op == '<=':\n where_clause_and.append(col_obj.sqla_col <= eq)\n elif op == 'LIKE':\n where_clause_and.append(col_obj.sqla_col.like(eq))\n if extras:\n where = extras.get('where')\n if where:\n where = template_processor.process_template(where)\n where_clause_and += [sa.text('({})'.format(where))]\n having = extras.get('having')\n if having:\n having = template_processor.process_template(having)\n having_clause_and += [sa.text('({})'.format(having))]\n if granularity:\n qry = qry.where(and_(*(time_filters + where_clause_and)))\n else:\n qry = qry.where(and_(*where_clause_and))\n qry = qry.having(and_(*having_clause_and))\n if groupby:\n qry = qry.order_by(desc(main_metric_expr))\n elif orderby:\n for col, ascending in orderby:\n direction = asc if ascending else desc\n qry = qry.order_by(direction(col))\n\n if row_limit:\n qry = qry.limit(row_limit)\n\n if is_timeseries and \\\n timeseries_limit and groupby and not time_groupby_inline:\n # some sql dialects require for order by expressions\n # to also be in the select clause -- others, e.g. vertica,\n # require a unique inner alias\n inner_main_metric_expr = main_metric_expr.label('mme_inner__')\n inner_select_exprs += [inner_main_metric_expr]\n subq = select(inner_select_exprs)\n subq = subq.select_from(tbl)\n inner_time_filter = dttm_col.get_time_filter(\n inner_from_dttm or from_dttm,\n inner_to_dttm or to_dttm,\n )\n subq = subq.where(and_(*(where_clause_and + [inner_time_filter])))\n subq = subq.group_by(*inner_groupby_exprs)\n ob = inner_main_metric_expr\n if timeseries_limit_metric_expr is not None:\n ob = timeseries_limit_metric_expr\n subq = subq.order_by(desc(ob))\n subq = subq.limit(timeseries_limit)\n on_clause = []\n for i, gb in enumerate(groupby):\n on_clause.append(\n groupby_exprs[i] == column(gb + '__'))\n\n tbl = tbl.join(subq.alias(), and_(*on_clause))\n\n return qry.select_from(tbl)\n\n def query(self, query_obj):\n qry_start_dttm = datetime.now()\n sql = self.get_query_str(query_obj)\n status = QueryStatus.SUCCESS\n error_message = None\n df = None\n try:\n df = self.database.get_df(sql, self.schema)\n except Exception as e:\n status = QueryStatus.FAILED\n logging.exception(e)\n error_message = (\n self.database.db_engine_spec.extract_error_message(e))\n\n return QueryResult(\n status=status,\n df=df,\n duration=datetime.now() - qry_start_dttm,\n query=sql,\n error_message=error_message)\n\n def get_sqla_table_object(self):\n return self.database.get_table(self.table_name, schema=self.schema)\n\n def fetch_metadata(self):\n \"\"\"Fetches the metadata for the table and merges it in\"\"\"\n try:\n table = self.get_sqla_table_object()\n except Exception:\n raise Exception(\n \"Table doesn't seem to exist in the specified database, \"\n \"couldn't fetch column information\")\n\n TC = TableColumn # noqa shortcut to class\n M = SqlMetric # noqa\n metrics = []\n any_date_col = None\n db_dialect = self.database.get_sqla_engine().dialect\n for col in table.columns:\n try:\n datatype = \"{}\".format(col.type).upper()\n except Exception as e:\n datatype = \"UNKNOWN\"\n logging.error(\n \"Unrecognized data type in {}.{}\".format(table, col.name))\n logging.exception(e)\n dbcol = (\n db.session\n .query(TC)\n .filter(TC.table == self)\n .filter(TC.column_name == col.name)\n .first()\n )\n db.session.flush()\n if not dbcol:\n dbcol = TableColumn(column_name=col.name, type=datatype)\n dbcol.groupby = dbcol.is_string\n dbcol.filterable = dbcol.is_string\n dbcol.sum = dbcol.is_num\n dbcol.avg = dbcol.is_num\n dbcol.is_dttm = dbcol.is_time\n\n db.session.merge(self)\n self.columns.append(dbcol)\n\n if not any_date_col and dbcol.is_time:\n any_date_col = col.name\n\n quoted = \"{}\".format(col.compile(dialect=db_dialect))\n if dbcol.sum:\n metrics.append(M(\n metric_name='sum__' + dbcol.column_name,\n verbose_name='sum__' + dbcol.column_name,\n metric_type='sum',\n expression=\"SUM({})\".format(quoted)\n ))\n if dbcol.avg:\n metrics.append(M(\n metric_name='avg__' + dbcol.column_name,\n verbose_name='avg__' + dbcol.column_name,\n metric_type='avg',\n expression=\"AVG({})\".format(quoted)\n ))\n if dbcol.max:\n metrics.append(M(\n metric_name='max__' + dbcol.column_name,\n verbose_name='max__' + dbcol.column_name,\n metric_type='max',\n expression=\"MAX({})\".format(quoted)\n ))\n if dbcol.min:\n metrics.append(M(\n metric_name='min__' + dbcol.column_name,\n verbose_name='min__' + dbcol.column_name,\n metric_type='min',\n expression=\"MIN({})\".format(quoted)\n ))\n if dbcol.count_distinct:\n metrics.append(M(\n metric_name='count_distinct__' + dbcol.column_name,\n verbose_name='count_distinct__' + dbcol.column_name,\n metric_type='count_distinct',\n expression=\"COUNT(DISTINCT {})\".format(quoted)\n ))\n dbcol.type = datatype\n db.session.merge(self)\n db.session.commit()\n\n metrics.append(M(\n metric_name='count',\n verbose_name='COUNT(*)',\n metric_type='count',\n expression=\"COUNT(*)\"\n ))\n for metric in metrics:\n m = (\n db.session.query(M)\n .filter(M.metric_name == metric.metric_name)\n .filter(M.table_id == self.id)\n .first()\n )\n metric.table_id = self.id\n if not m:\n db.session.add(metric)\n db.session.commit()\n if not self.main_dttm_col:\n self.main_dttm_col = any_date_col\n\n @classmethod\n def import_obj(cls, i_datasource, import_time=None):\n \"\"\"Imports the datasource from the object to the database.\n\n Metrics and columns and datasource will be overrided if exists.\n This function can be used to import/export dashboards between multiple\n superset instances. Audit metadata isn't copies over.\n \"\"\"\n def lookup_sqlatable(table):\n return db.session.query(SqlaTable).join(Database).filter(\n SqlaTable.table_name == table.table_name,\n SqlaTable.schema == table.schema,\n Database.id == table.database_id,\n ).first()\n\n def lookup_database(table):\n return db.session.query(Database).filter_by(\n database_name=table.params_dict['database_name']).one()\n return import_util.import_datasource(\n db.session, i_datasource, lookup_database, lookup_sqlatable,\n import_time)\n\n @classmethod\n def query_datasources_by_name(\n cls, session, database, datasource_name, schema=None):\n query = (\n session.query(cls)\n .filter_by(database_id=database.id)\n .filter_by(table_name=datasource_name)\n )\n if schema:\n query = query.filter_by(schema=schema)\n return query.all()\n\nsa.event.listen(SqlaTable, 'after_insert', set_perm)\nsa.event.listen(SqlaTable, 'after_update', set_perm)\n"
] | [
[
"pandas.read_sql_query",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
JarvisDevon/Restricted_Boltzmann_Machine | [
"b712778bb4dc7a9858746bac32b418315f053b56"
] | [
"RBM_movie.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport pickle\nimport gzip\n\nnp.set_printoptions(threshold=np.inf)\n\nf = open('data/ACML_Movies.csv', 'r')\nmovie_strngs = f.read()\nmovie_strngs = movie_strngs.split('\\n')\nmovie_strngs = movie_strngs[1:]\nmovie_strngs = movie_strngs[:-1]\nratings = []\nfor strng in movie_strngs:\n split_strng = strng.split(',')\n rate = np.array([int(d) for d in split_strng])\n ratings.append(rate)\n\nratings = np.array(ratings)\nratings = ratings[:, 1:]\n\ntest_ratings = np.copy(ratings[-11:])\nratings = ratings[:-11]\n\nweights = np.random.uniform(-0.3, 0.3, (20,35*5))\n\nlearn_rate = 0.01\nepochs = 400\n\ndef sigmoid(x):\n\tout = np.zeros(x.shape)\n\tfor i in range(out.shape[0]):\n\t\tif x[i] >= 0:\n\t\t\tout[i] = 1/(1+np.exp(-x[i]))\n\t\telse:\n\t\t\tout[i] = np.exp(x[i])/(1+np.exp(x[i]))\n\treturn out\n\t#return np.where(x >= 0, 1/(1+np.exp(-x)), np.exp(x)/(1+np.exp(x)))\n\ndef softmax(x):\n\treturn np.exp(x-np.max(x))/np.sum(np.exp(x-np.max(x)), axis=0)\t\t#NOTE If we using batches we will need axis=1\n\n\nfor k in range(epochs):\n print(\"Starting epoch: \", k)\n for v in ratings:\n rate_matrix = np.zeros((v.shape[0], 5))\n for i in range(v.shape[0]):\n if v[i] != -1:\n rate_matrix[i, v[i]-1] = 1\n v_in = rate_matrix.reshape(35*5,)\n h = np.random.binomial(1,sigmoid(np.dot(weights, v_in)))\n pos_grad = np.dot(h.reshape(20,1), v_in.reshape(1,175))\n v_prime = np.zeros((v.shape[0], 5))\n vis_active = np.dot(h, weights)\n vis_active_matrix = vis_active.reshape(v.shape[0], 5)\n for movie_index in range(len(vis_active_matrix)):\n v_prime[movie_index] = np.random.binomial(1, softmax(vis_active_matrix[movie_index]))\n #v_prime = np.random.binomial(1, sigmoid(np.dot(h, weights)))\n for i in range(len(v)):\n if v[i] == -1:\n v_prime[i] = np.zeros(5)\n h_prime = np.random.binomial(1, sigmoid(np.dot(weights, v_prime.reshape(35*5,))))\n neg_grad = np.dot(h_prime.reshape(20,1), v_prime.reshape(1, 175))\n delta_w = pos_grad - neg_grad\n weights = weights + (learn_rate * delta_w)\n\nnp.savetxt(\"RBM_movies_weights.txt\", weights)\n\nfor i in range(20):\n\th_set = np.zeros(20)\n\th_set[i] = 1\n\tvis_active = np.dot(h_set, weights)\n\tvis_active_matrix = vis_active.reshape(v.shape[0], 5)\n\tplt.figure()\n\tplt.imshow(vis_active_matrix)\n\tplt.axis('off')\n\tplt.savefig(\"RBM_movie_pc_out_ims/component_\" + str(i+1) + \".png\")\n\tnp.savetxt(\"RBM_movie_pc_out_ims/component_\" + str(i+1) + \".txt\", vis_active_matrix)\n"
] | [
[
"numpy.dot",
"matplotlib.pyplot.imshow",
"numpy.set_printoptions",
"numpy.exp",
"numpy.max",
"numpy.copy",
"matplotlib.pyplot.axis",
"numpy.savetxt",
"numpy.random.uniform",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bouillealpha/gluon-ts | [
"f155dd3ca12894a08dbd06094f0673305551c128"
] | [
"src/gluonts/mx/trainer/_base.py"
] | [
"# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\n# Standard library imports\nimport logging\nimport os\nimport tempfile\nimport time\nimport uuid\nfrom typing import Any, List, Optional, Union\n\n# Third-party imports\nimport mxnet as mx\nimport mxnet.autograd as autograd\nimport mxnet.gluon.nn as nn\nimport numpy as np\n\n# First-party imports\nfrom gluonts.core.component import get_mxnet_context, validated\nfrom gluonts.core.exception import GluonTSDataError, GluonTSUserError\nfrom gluonts.dataset.loader import TrainDataLoader, ValidationDataLoader\nfrom gluonts.gluonts_tqdm import tqdm\nfrom gluonts.support.util import HybridContext\n\n# Relative imports\nfrom . import learning_rate_scheduler as lrs\nfrom .model_averaging import (\n AveragingStrategy,\n SelectNBestMean,\n save_epoch_info,\n)\nfrom .model_iteration_averaging import (\n IterationAveragingStrategy,\n NTA,\n Alpha_Suffix,\n)\n\nlogger = logging.getLogger(\"gluonts\").getChild(\"trainer\")\n\n\nMODEL_ARTIFACT_FILE_NAME = \"model\"\nSTATE_ARTIFACT_FILE_NAME = \"state\"\n\n# make the IDE happy: mx.py does not explicitly import autograd\nmx.autograd = autograd\n\n\ndef check_loss_finite(val: float) -> None:\n if not np.isfinite(val):\n raise GluonTSDataError(\n \"Encountered invalid loss value! Try reducing the learning rate \"\n \"or try a different likelihood.\"\n )\n\n\ndef loss_value(loss: mx.metric.Loss) -> float:\n return loss.get_name_value()[0][1]\n\n\nclass Trainer:\n r\"\"\"\n A trainer specifies how a network is going to be trained.\n\n A trainer is mainly defined by two sets of parameters. The first one determines the number of examples\n that the network will be trained on (`epochs`, `num_batches_per_epoch` and `batch_size`), while the\n second one specifies how the gradient updates are performed (`learning_rate`, `learning_rate_decay_factor`,\n `patience`, `minimum_learning_rate`, `clip_gradient` and `weight_decay`).\n\n Parameters\n ----------\n ctx\n epochs\n Number of epochs that the network will train (default: 100).\n batch_size\n Number of examples in each batch (default: 32).\n num_batches_per_epoch\n Number of batches at each epoch (default: 50).\n learning_rate\n Initial learning rate (default: :math:`10^{-3}`).\n learning_rate_decay_factor\n Factor (between 0 and 1) by which to decrease the learning rate (default: 0.5).\n patience\n The patience to observe before reducing the learning rate, nonnegative integer (default: 10).\n minimum_learning_rate\n Lower bound for the learning rate (default: :math:`5\\cdot 10^{-5}`).\n clip_gradient\n Maximum value of gradient. The gradient is clipped if it is too large (default: 10).\n weight_decay\n The weight decay (or L2 regularization) coefficient. Modifies objective by adding a penalty for having\n large weights (default :math:`10^{-8}`).\n init\n Initializer of the weights of the network (default: \"xavier\").\n hybridize\n \"\"\"\n\n @validated()\n def __init__(\n self,\n ctx: Optional[mx.Context] = None,\n epochs: int = 100,\n batch_size: int = 32,\n num_batches_per_epoch: int = 50,\n learning_rate: float = 1e-3,\n learning_rate_decay_factor: float = 0.5,\n patience: int = 10,\n minimum_learning_rate: float = 5e-5,\n clip_gradient: float = 10.0,\n weight_decay: float = 1e-8,\n init: Union[str, mx.initializer.Initializer] = \"xavier\",\n hybridize: bool = True,\n avg_strategy: Union[\n AveragingStrategy, IterationAveragingStrategy\n ] = SelectNBestMean(num_models=1),\n ) -> None:\n\n assert (\n 0 <= epochs < float(\"inf\")\n ), \"The value of `epochs` should be >= 0\"\n assert 0 < batch_size, \"The value of `batch_size` should be > 0\"\n assert (\n 0 < num_batches_per_epoch\n ), \"The value of `num_batches_per_epoch` should be > 0\"\n assert (\n 0 < learning_rate < float(\"inf\")\n ), \"The value of `learning_rate` should be > 0\"\n assert (\n 0 <= learning_rate_decay_factor < 1\n ), \"The value of `learning_rate_decay_factor` should be in the [0, 1) range\"\n assert 0 <= patience, \"The value of `patience` should be >= 0\"\n assert (\n 0 <= minimum_learning_rate\n ), \"The value of `minimum_learning_rate` should be >= 0\"\n assert 0 < clip_gradient, \"The value of `clip_gradient` should be > 0\"\n assert 0 <= weight_decay, \"The value of `weight_decay` should be => 0\"\n\n self.epochs = epochs\n self.batch_size = batch_size\n self.num_batches_per_epoch = num_batches_per_epoch\n self.learning_rate = learning_rate\n self.learning_rate_decay_factor = learning_rate_decay_factor\n self.patience = patience\n self.minimum_learning_rate = minimum_learning_rate\n self.clip_gradient = clip_gradient\n self.weight_decay = weight_decay\n self.init = init\n self.hybridize = hybridize\n self.avg_strategy = avg_strategy\n self.ctx = ctx if ctx is not None else get_mxnet_context()\n self.halt = False\n\n def set_halt(self, signum: int, stack_frame: Any) -> None:\n logger.info(\"Received signal: {}\".format(signum))\n self.halt = True\n\n def count_model_params(self, net: nn.HybridBlock) -> int:\n params = net.collect_params()\n num_params = 0\n for p in params:\n v = params[p]\n num_params += np.prod(v.shape)\n return num_params\n\n def __call__(\n self,\n net: nn.HybridBlock,\n input_names: List[str],\n train_iter: TrainDataLoader,\n validation_iter: Optional[ValidationDataLoader] = None,\n ) -> None: # TODO: we may want to return some training information here\n is_validation_available = validation_iter is not None\n self.halt = False\n\n with tempfile.TemporaryDirectory(\n prefix=\"gluonts-trainer-temp-\"\n ) as gluonts_temp:\n\n def base_path() -> str:\n return os.path.join(\n gluonts_temp,\n \"{}_{}\".format(STATE_ARTIFACT_FILE_NAME, uuid.uuid4()),\n )\n\n logger.info(\"Start model training\")\n\n net.initialize(ctx=self.ctx, init=self.init)\n\n with HybridContext(\n net=net,\n hybridize=self.hybridize,\n static_alloc=True,\n static_shape=True,\n ):\n batch_size = train_iter.batch_size\n\n best_epoch_info = {\n \"params_path\": \"%s-%s.params\" % (base_path(), \"init\"),\n \"epoch_no\": -1,\n \"score\": np.Inf,\n }\n\n lr_scheduler = lrs.MetricAttentiveScheduler(\n objective=\"min\",\n patience=self.patience,\n decay_factor=self.learning_rate_decay_factor,\n min_lr=self.minimum_learning_rate,\n )\n\n optimizer = mx.optimizer.Adam(\n learning_rate=self.learning_rate,\n lr_scheduler=lr_scheduler,\n wd=self.weight_decay,\n clip_gradient=self.clip_gradient,\n )\n\n trainer = mx.gluon.Trainer(\n net.collect_params(),\n optimizer=optimizer,\n kvstore=\"device\", # FIXME: initialize properly\n )\n\n def loop(\n epoch_no, batch_iter, is_training: bool = True\n ) -> mx.metric.Loss:\n tic = time.time()\n\n epoch_loss = mx.metric.Loss()\n\n # use averaged model for validation\n if not is_training and isinstance(\n self.avg_strategy, IterationAveragingStrategy\n ):\n self.avg_strategy.load_averaged_model(net)\n\n with tqdm(batch_iter) as it:\n for batch_no, data_entry in enumerate(it, start=1):\n if self.halt:\n break\n\n inputs = [data_entry[k] for k in input_names]\n\n with mx.autograd.record():\n output = net(*inputs)\n\n # network can returns several outputs, the first being always the loss\n # when having multiple outputs, the forward returns a list in the case of hybrid and a\n # tuple otherwise\n # we may wrap network outputs in the future to avoid this type check\n if isinstance(output, (list, tuple)):\n loss = output[0]\n else:\n loss = output\n\n if is_training:\n loss.backward()\n trainer.step(batch_size)\n\n # iteration averaging in training\n if isinstance(\n self.avg_strategy,\n IterationAveragingStrategy,\n ):\n self.avg_strategy.apply(net)\n\n epoch_loss.update(None, preds=loss)\n lv = loss_value(epoch_loss)\n\n if not np.isfinite(lv):\n logger.warning(\n \"Epoch[%d] gave nan loss\", epoch_no\n )\n return epoch_loss\n\n it.set_postfix(\n ordered_dict={\n \"epoch\": f\"{epoch_no + 1}/{self.epochs}\",\n (\"\" if is_training else \"validation_\")\n + \"avg_epoch_loss\": lv,\n },\n refresh=False,\n )\n # print out parameters of the network at the first pass\n if batch_no == 1 and epoch_no == 0:\n net_name = type(net).__name__\n num_model_param = self.count_model_params(net)\n logger.info(\n f\"Number of parameters in {net_name}: {num_model_param}\"\n )\n # mark epoch end time and log time cost of current epoch\n toc = time.time()\n logger.info(\n \"Epoch[%d] Elapsed time %.3f seconds\",\n epoch_no,\n (toc - tic),\n )\n\n logger.info(\n \"Epoch[%d] Evaluation metric '%s'=%f\",\n epoch_no,\n (\"\" if is_training else \"validation_\") + \"epoch_loss\",\n lv,\n )\n\n if not is_training and isinstance(\n self.avg_strategy, IterationAveragingStrategy\n ):\n # bring back the cached model\n self.avg_strategy.load_cached_model(net)\n\n return epoch_loss\n\n for epoch_no in range(self.epochs):\n if self.halt:\n logger.info(f\"Epoch[{epoch_no}] Interrupting training\")\n break\n\n curr_lr = trainer.learning_rate\n logger.info(\n f\"Epoch[{epoch_no}] Learning rate is {curr_lr}\"\n )\n\n epoch_loss = loop(epoch_no, train_iter)\n if is_validation_available:\n epoch_loss = loop(\n epoch_no, validation_iter, is_training=False\n )\n\n # update average trigger\n if isinstance(\n self.avg_strategy, IterationAveragingStrategy\n ):\n self.avg_strategy.update_average_trigger(\n metric=loss_value(epoch_loss), epoch=epoch_no + 1\n )\n # once triggered, update the average immediately\n self.avg_strategy.apply(net)\n\n should_continue = lr_scheduler.step(loss_value(epoch_loss))\n if not should_continue:\n logger.info(\"Stopping training\")\n break\n\n # save model and epoch info\n bp = base_path()\n epoch_info = {\n \"params_path\": f\"{bp}-0000.params\",\n \"epoch_no\": epoch_no,\n \"score\": loss_value(epoch_loss),\n }\n\n net.save_parameters(\n epoch_info[\"params_path\"]\n ) # TODO: handle possible exception\n\n save_epoch_info(bp, epoch_info)\n\n # update best epoch info - needed for the learning rate scheduler\n if loss_value(epoch_loss) < best_epoch_info[\"score\"]:\n best_epoch_info = epoch_info.copy()\n\n if not trainer.learning_rate == curr_lr:\n if best_epoch_info[\"epoch_no\"] == -1:\n raise GluonTSUserError(\n \"Got NaN in first epoch. Try reducing initial learning rate.\"\n )\n\n logger.info(\n f\"Loading parameters from best epoch \"\n f\"({best_epoch_info['epoch_no']})\"\n )\n net.load_parameters(\n best_epoch_info[\"params_path\"], self.ctx\n )\n\n if isinstance(self.avg_strategy, AveragingStrategy):\n logging.info(\"Computing averaged parameters.\")\n averaged_params_path = self.avg_strategy.apply(\n gluonts_temp\n )\n\n logging.info(\"Loading averaged parameters.\")\n net.load_parameters(averaged_params_path, self.ctx)\n\n if isinstance(self.avg_strategy, IterationAveragingStrategy):\n logging.info(\"Loading averaged parameters.\")\n self.avg_strategy.load_averaged_model(net)\n\n logger.info(\"End model training\")\n"
] | [
[
"numpy.prod",
"numpy.isfinite"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
alisonrib17/ACM-Paper | [
"5fb1a8240825e58ed524c880f11152eed7904c3b"
] | [
"Spanish/TaskB/svm.py"
] | [
"# -*- coding: utf-8 -*-\n\n#@author: alison\n\nimport re\nimport string\nimport pickle\nimport numpy as np\nimport pandas as pd\nfrom nltk.corpus import stopwords\nfrom sklearn.svm import LinearSVC\nfrom nltk.stem import PorterStemmer, SnowballStemmer\nfrom nltk.tokenize import TweetTokenizer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.metrics import f1_score, precision_score, accuracy_score, recall_score\n\n# Etapa de pré-processamento\n\ndef clean_tweets(tweet):\n tweet = re.sub('@(\\\\w{1,15})\\b', '', tweet)\n tweet = tweet.replace(\"via \", \"\")\n tweet = tweet.replace(\"RT \", \"\")\n tweet = tweet.lower()\n return tweet\n \ndef clean_url(tweet):\n tweet = re.sub('http\\\\S+', '', tweet, flags=re.MULTILINE) \n return tweet\n \ndef remove_stop_words(tweet):\n stops = set(stopwords.words(\"spanish\"))\n stops.update(['.',',','\"',\"'\",'?',':',';','(',')','[',']','{','}'])\n toks = [tok for tok in tweet if not tok in stops and len(tok) >= 3]\n return toks\n \ndef stemming_tweets(tweet):\n stemmer = SnowballStemmer('spanish')\n stemmed_words = [stemmer.stem(word) for word in tweet]\n return stemmed_words\n\ndef remove_number(tweet):\n newTweet = re.sub('\\\\d+', '', tweet)\n return newTweet\n\ndef remove_hashtags(tweet):\n result = ''\n\n for word in tweet.split():\n if word.startswith('#') or word.startswith('@'):\n result += word[1:]\n result += ' '\n else:\n result += word\n result += ' '\n\n return result\n\ndef preprocessing(tweet, swords = True, url = True, stemming = True, ctweets = True, number = True, hashtag = True):\n\n if ctweets:\n tweet = clean_tweets(tweet)\n\n if url:\n tweet = clean_url(tweet)\n\n if hashtag:\n tweet = remove_hashtags(tweet)\n \n twtk = TweetTokenizer(strip_handles=True, reduce_len=True)\n\n if number:\n tweet = remove_number(tweet)\n \n tokens = [w.lower() for w in twtk.tokenize(tweet) if w != \"\" and w is not None]\n\n if swords:\n tokens = remove_stop_words(tokens)\n\n if stemming:\n tokens = stemming_tweets(tokens)\n\n text = \" \".join(tokens)\n\n return text\n\ndef bag_of_words(train, test):\n vec = CountVectorizer(analyzer='word', binary=True, ngram_range=(1, 3), min_df=1, max_features=25000)\n train = vec.fit_transform(train).toarray()\n test = vec.transform(test).toarray()\n return train, test\n\ndef save_files(ID, HS, TR, AG):\n with open(\"ClassPred/SVM_BoW.tsv\", \"w\") as file:\n for i in range(len(ID)):\n file.write(str(ID[i]))\n file.write('\\t')\n file.write(str(HS[i]))\n file.write('\\t')\n file.write(str(TR[i]))\n file.write('\\t')\n file.write(str(AG[i]))\n file.write('\\n')\n\n with open(\"input/res/es_b.tsv\", \"w\") as file:\n for i in range(len(ID)):\n file.write(str(ID[i]))\n file.write('\\t')\n file.write(str(HS[i]))\n file.write('\\t')\n file.write(str(TR[i]))\n file.write('\\t')\n file.write(str(AG[i]))\n file.write('\\n')\n\ndef classify(x_train, y_train, x_test):\n\t# Fase de classificação de sentimentos\n\n\tclf = LinearSVC(C=0.1, verbose=1, max_iter=2000, random_state=None, penalty='l2') # Instância do classificador\n\n\tclf.fit(x_train, y_train) # Fase de treinamento\n\n\t# Criando arquivo para salvar modelo treinado\n\tfilename = 'Models/modelSVM.sav'\n\tpickle.dump(clf, open(filename, 'wb'))\n\n\ty_pred = clf.predict(x_test) # Fase de predição, testando dados novos\n\n\treturn y_pred\n\ndef main():\n\n\ttrain = pd.read_csv('Dataset/train_es.tsv', delimiter='\\t',encoding='utf-8')\n\tdev = pd.read_csv('Dataset/dev_es.tsv', delimiter='\\t',encoding='utf-8')\n\ttest = pd.read_csv('Dataset/test_es.tsv', delimiter='\\t',encoding='utf-8')\n\n\t###########################################################################################################\n\n\t# Pré-processamento dos tweets\n\n\ttrain_text = train['text'].map(lambda x: preprocessing(x, swords = True, url = True, stemming = True, ctweets = True, number = True, hashtag = True))\n\ths_train = train['HS']\n\tid_train = train['id']\n\ttr_train = train['TR']\n\tag_train = train['AG']\n\n\tdev_text = dev['text'].map(lambda x: preprocessing(x, swords = True, url = True, stemming = True, ctweets = True, number = True, hashtag = True))\n\ths_dev = dev['HS']\n\tid_dev = dev['id']\n\ttr_dev = dev['TR']\n\tag_dev = dev['AG']\n\n\ttest_text = test['text'].map(lambda x: preprocessing(x, swords = True, url = True, stemming = True, ctweets = True, number = True, hashtag = True))\n\ths_test = test['HS']\n\tid_test = test['id']\n\ttr_test = test['TR']\n\tag_test = test['AG']\n\n\t###########################################################################################################\n\n\t# Bag-of-Words\n\n\t#train_text = np.concatenate((train_text, dev_text), axis=0)\n\t#hs_train = np.concatenate((hs_train, hs_dev), axis=0)\n\t#tr_train = np.concatenate((tr_train, tr_dev), axis=0)\n\t#ag_train = np.concatenate((ag_train, ag_dev), axis=0)\n\n\tx_train, x_test = bag_of_words(train_text, test_text)\n\n\t###########################################################################################################\n\n\ths_pred = classify(x_train, hs_train, x_test)\n\ttr_pred = classify(x_train, tr_train, x_test)\n\tag_pred = classify(x_train, ag_train, x_test)\n\n\t# Salvando arquivos para a avalição em evaluation.py\n\tsave_files(id_test, hs_pred, tr_pred, ag_pred)\n\n\tprint(\"Treinamento finalizado! Testando modelo...\")\n\n\t#print(\"F1.........: %f\" %(f1_score(y_test, y_pred, average=\"macro\")))\n\t#print(\"Precision..: %f\" %(precision_score(y_test, y_pred, average=\"macro\")))\n\t#print(\"Recall.....: %f\" %(recall_score(y_test, y_pred, average=\"macro\")))\n\t#print(\"Accuracy...: %f\" %(accuracy_score(y_test, y_pred)))\n\n \nif __name__ == '__main__':\n main()"
] | [
[
"sklearn.feature_extraction.text.CountVectorizer",
"pandas.read_csv",
"sklearn.svm.LinearSVC"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
tamnguyenvan/mobilenetv3-face | [
"7f3d95b500d6523d0077e3f6ea905499e7915417"
] | [
"engine.py"
] | [
"import math\nimport sys\nimport time\n\nimport torch\nimport torchvision.models.detection.mask_rcnn\nimport utils\nfrom coco_eval import CocoEvaluator\nfrom coco_utils import get_coco_api_from_dataset\n\n\ndef train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq, scaler=None):\n model.train()\n metric_logger = utils.MetricLogger(delimiter=\" \")\n metric_logger.add_meter(\"lr\", utils.SmoothedValue(window_size=1, fmt=\"{value:.6f}\"))\n header = f\"Epoch: [{epoch}]\"\n\n lr_scheduler = None\n if epoch == 0:\n warmup_factor = 1.0 / 1000\n warmup_iters = min(1000, len(data_loader) - 1)\n\n lr_scheduler = torch.optim.lr_scheduler.LinearLR(\n optimizer, start_factor=warmup_factor, total_iters=warmup_iters\n )\n\n for images, targets in metric_logger.log_every(data_loader, print_freq, header):\n images = list(image.to(device) for image in images)\n targets = [{k: v.to(device) for k, v in t.items()} for t in targets]\n with torch.cuda.amp.autocast(enabled=scaler is not None):\n loss_dict = model(images, targets)\n losses = sum(loss for loss in loss_dict.values())\n\n # reduce losses over all GPUs for logging purposes\n loss_dict_reduced = utils.reduce_dict(loss_dict)\n losses_reduced = sum(loss for loss in loss_dict_reduced.values())\n\n loss_value = losses_reduced.item()\n\n if not math.isfinite(loss_value):\n print(f\"Loss is {loss_value}, stopping training\")\n print(loss_dict_reduced)\n sys.exit(1)\n\n optimizer.zero_grad()\n if scaler is not None:\n scaler.scale(losses).backward()\n scaler.step(optimizer)\n scaler.update()\n else:\n losses.backward()\n optimizer.step()\n\n if lr_scheduler is not None:\n lr_scheduler.step()\n\n metric_logger.update(loss=losses_reduced, **loss_dict_reduced)\n metric_logger.update(lr=optimizer.param_groups[0][\"lr\"])\n\n return metric_logger\n\n\ndef _get_iou_types(model):\n model_without_ddp = model\n if isinstance(model, torch.nn.parallel.DistributedDataParallel):\n model_without_ddp = model.module\n iou_types = [\"bbox\"]\n if isinstance(model_without_ddp, torchvision.models.detection.MaskRCNN):\n iou_types.append(\"segm\")\n if isinstance(model_without_ddp, torchvision.models.detection.KeypointRCNN):\n iou_types.append(\"keypoints\")\n return iou_types\n\n\[email protected]_mode()\ndef evaluate(model, data_loader, device):\n n_threads = torch.get_num_threads()\n # FIXME remove this and make paste_masks_in_image run on the GPU\n torch.set_num_threads(1)\n cpu_device = torch.device(\"cpu\")\n model.eval()\n metric_logger = utils.MetricLogger(delimiter=\" \")\n header = \"Test:\"\n\n coco = get_coco_api_from_dataset(data_loader.dataset)\n iou_types = _get_iou_types(model)\n coco_evaluator = CocoEvaluator(coco, iou_types)\n\n for images, targets in metric_logger.log_every(data_loader, 100, header):\n images = list(img.to(device) for img in images)\n\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n model_time = time.time()\n outputs = model(images)\n\n outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]\n model_time = time.time() - model_time\n\n res = {target[\"image_id\"].item(): output for target, output in zip(targets, outputs)}\n evaluator_time = time.time()\n coco_evaluator.update(res)\n evaluator_time = time.time() - evaluator_time\n metric_logger.update(model_time=model_time, evaluator_time=evaluator_time)\n\n # gather the stats from all processes\n metric_logger.synchronize_between_processes()\n print(\"Averaged stats:\", metric_logger)\n coco_evaluator.synchronize_between_processes()\n\n # accumulate predictions from all images\n coco_evaluator.accumulate()\n coco_evaluator.summarize()\n torch.set_num_threads(n_threads)\n return coco_evaluator"
] | [
[
"torch.cuda.synchronize",
"torch.optim.lr_scheduler.LinearLR",
"torch.inference_mode",
"torch.cuda.amp.autocast",
"torch.set_num_threads",
"torch.cuda.is_available",
"torch.device",
"torch.get_num_threads"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wzy2009520/vnpy | [
"c7049a4ae910b74e1ebd89bdcafc38076951cee5"
] | [
"vn.trader/ctaAlgo/strategyAtrRsi.py"
] | [
"# encoding: UTF-8\n\n\"\"\"\n一个ATR-RSI指标结合的交易策略,适合用在股指的1分钟和5分钟线上。\n\n注意事项:\n1. 作者不对交易盈利做任何保证,策略代码仅供参考\n2. 本策略需要用到talib,没有安装的用户请先参考www.vnpy.org上的教程安装\n3. 将IF0000_1min.csv用ctaHistoryData.py导入MongoDB后,直接运行本文件即可回测策略\n\n\"\"\"\n\n\nfrom ctaBase import *\nfrom ctaTemplate import CtaTemplate\n\nimport talib\nimport numpy as np\n\n\n########################################################################\nclass AtrRsiStrategy(CtaTemplate):\n \"\"\"结合ATR和RSI指标的一个分钟线交易策略\"\"\"\n className = 'AtrRsiStrategy'\n author = u'用Python的交易员'\n\n # 策略参数\n atrLength = 22 # 计算ATR指标的窗口数 \n atrMaLength = 10 # 计算ATR均线的窗口数\n rsiLength = 5 # 计算RSI的窗口数\n rsiEntry = 16 # RSI的开仓信号\n trailingPercent = 0.8 # 百分比移动止损\n initDays = 10 # 初始化数据所用的天数\n fixedSize = 1 # 每次交易的数量\n\n # 策略变量\n bar = None # K线对象\n barMinute = EMPTY_STRING # K线当前的分钟\n\n bufferSize = 100 # 需要缓存的数据的大小\n bufferCount = 0 # 目前已经缓存了的数据的计数\n highArray = np.zeros(bufferSize) # K线最高价的数组\n lowArray = np.zeros(bufferSize) # K线最低价的数组\n closeArray = np.zeros(bufferSize) # K线收盘价的数组\n \n atrCount = 0 # 目前已经缓存了的ATR的计数\n atrArray = np.zeros(bufferSize) # ATR指标的数组\n atrValue = 0 # 最新的ATR指标数值\n atrMa = 0 # ATR移动平均的数值\n\n rsiValue = 0 # RSI指标的数值\n rsiBuy = 0 # RSI买开阈值\n rsiSell = 0 # RSI卖开阈值\n intraTradeHigh = 0 # 移动止损用的持仓期内最高价\n intraTradeLow = 0 # 移动止损用的持仓期内最低价\n\n orderList = [] # 保存委托代码的列表\n\n # 参数列表,保存了参数的名称\n paramList = ['name',\n 'className',\n 'author',\n 'vtSymbol',\n 'atrLength',\n 'atrMaLength',\n 'rsiLength',\n 'rsiEntry',\n 'trailingPercent'] \n\n # 变量列表,保存了变量的名称\n varList = ['inited',\n 'trading',\n 'pos',\n 'atrValue',\n 'atrMa',\n 'rsiValue',\n 'rsiBuy',\n 'rsiSell'] \n\n #----------------------------------------------------------------------\n def __init__(self, ctaEngine, setting):\n \"\"\"Constructor\"\"\"\n super(AtrRsiStrategy, self).__init__(ctaEngine, setting)\n \n # 注意策略类中的可变对象属性(通常是list和dict等),在策略初始化时需要重新创建,\n # 否则会出现多个策略实例之间数据共享的情况,有可能导致潜在的策略逻辑错误风险,\n # 策略类中的这些可变对象属性可以选择不写,全都放在__init__下面,写主要是为了阅读\n # 策略时方便(更多是个编程习惯的选择) \n\n #----------------------------------------------------------------------\n def onInit(self):\n \"\"\"初始化策略(必须由用户继承实现)\"\"\"\n self.writeCtaLog(u'%s策略初始化' %self.name)\n \n # 初始化RSI入场阈值\n self.rsiBuy = 50 + self.rsiEntry\n self.rsiSell = 50 - self.rsiEntry\n\n # 载入历史数据,并采用回放计算的方式初始化策略数值\n initData = self.loadBar(self.initDays)\n for bar in initData:\n self.onBar(bar)\n\n self.putEvent()\n\n #----------------------------------------------------------------------\n def onStart(self):\n \"\"\"启动策略(必须由用户继承实现)\"\"\"\n self.writeCtaLog(u'%s策略启动' %self.name)\n self.putEvent()\n\n #----------------------------------------------------------------------\n def onStop(self):\n \"\"\"停止策略(必须由用户继承实现)\"\"\"\n self.writeCtaLog(u'%s策略停止' %self.name)\n self.putEvent()\n\n #----------------------------------------------------------------------\n def onTick(self, tick):\n \"\"\"收到行情TICK推送(必须由用户继承实现)\"\"\"\n # 计算K线\n tickMinute = tick.datetime.minute\n\n if tickMinute != self.barMinute: \n if self.bar:\n self.onBar(self.bar)\n\n bar = CtaBarData() \n bar.vtSymbol = tick.vtSymbol\n bar.symbol = tick.symbol\n bar.exchange = tick.exchange\n\n bar.open = tick.lastPrice\n bar.high = tick.lastPrice\n bar.low = tick.lastPrice\n bar.close = tick.lastPrice\n\n bar.date = tick.date\n bar.time = tick.time\n bar.datetime = tick.datetime # K线的时间设为第一个Tick的时间\n\n self.bar = bar # 这种写法为了减少一层访问,加快速度\n self.barMinute = tickMinute # 更新当前的分钟\n else: # 否则继续累加新的K线\n bar = self.bar # 写法同样为了加快速度\n\n bar.high = max(bar.high, tick.lastPrice)\n bar.low = min(bar.low, tick.lastPrice)\n bar.close = tick.lastPrice\n\n #----------------------------------------------------------------------\n def onBar(self, bar):\n \"\"\"收到Bar推送(必须由用户继承实现)\"\"\"\n # 撤销之前发出的尚未成交的委托(包括限价单和停止单)\n for orderID in self.orderList:\n self.cancelOrder(orderID)\n self.orderList = []\n\n # 保存K线数据\n self.closeArray[0:self.bufferSize-1] = self.closeArray[1:self.bufferSize]\n self.highArray[0:self.bufferSize-1] = self.highArray[1:self.bufferSize]\n self.lowArray[0:self.bufferSize-1] = self.lowArray[1:self.bufferSize]\n \n self.closeArray[-1] = bar.close\n self.highArray[-1] = bar.high\n self.lowArray[-1] = bar.low\n \n self.bufferCount += 1\n if self.bufferCount < self.bufferSize:\n return\n\n # 计算指标数值\n self.atrValue = talib.ATR(self.highArray, \n self.lowArray, \n self.closeArray,\n self.atrLength)[-1]\n self.atrArray[0:self.bufferSize-1] = self.atrArray[1:self.bufferSize]\n self.atrArray[-1] = self.atrValue\n\n self.atrCount += 1\n if self.atrCount < self.bufferSize:\n return\n\n self.atrMa = talib.MA(self.atrArray, \n self.atrMaLength)[-1]\n self.rsiValue = talib.RSI(self.closeArray, \n self.rsiLength)[-1]\n\n # 判断是否要进行交易\n \n # 当前无仓位\n if self.pos == 0:\n self.intraTradeHigh = bar.high\n self.intraTradeLow = bar.low\n\n # ATR数值上穿其移动平均线,说明行情短期内波动加大\n # 即处于趋势的概率较大,适合CTA开仓\n if self.atrValue > self.atrMa:\n # 使用RSI指标的趋势行情时,会在超买超卖区钝化特征,作为开仓信号\n if self.rsiValue > self.rsiBuy:\n # 这里为了保证成交,选择超价5个整指数点下单\n self.buy(bar.close+5, self.fixedSize)\n\n elif self.rsiValue < self.rsiSell:\n self.short(bar.close-5, self.fixedSize)\n\n # 持有多头仓位\n elif self.pos > 0:\n # 计算多头持有期内的最高价,以及重置最低价\n self.intraTradeHigh = max(self.intraTradeHigh, bar.high)\n self.intraTradeLow = bar.low\n # 计算多头移动止损\n longStop = self.intraTradeHigh * (1-self.trailingPercent/100)\n # 发出本地止损委托,并且把委托号记录下来,用于后续撤单\n orderID = self.sell(longStop, abs(self.pos), stop=True)\n self.orderList.append(orderID)\n\n # 持有空头仓位\n elif self.pos < 0:\n self.intraTradeLow = min(self.intraTradeLow, bar.low)\n self.intraTradeHigh = bar.high\n\n shortStop = self.intraTradeLow * (1+self.trailingPercent/100)\n orderID = self.cover(shortStop, abs(self.pos), stop=True)\n self.orderList.append(orderID)\n\n # 发出状态更新事件\n self.putEvent()\n\n #----------------------------------------------------------------------\n def onOrder(self, order):\n \"\"\"收到委托变化推送(必须由用户继承实现)\"\"\"\n pass\n\n #----------------------------------------------------------------------\n def onTrade(self, trade):\n # 发出状态更新事件\n self.putEvent()\n\nif __name__ == '__main__':\n # 提供直接双击回测的功能\n # 导入PyQt4的包是为了保证matplotlib使用PyQt4而不是PySide,防止初始化出错\n from ctaBacktesting import *\n from PyQt4 import QtCore, QtGui\n \n # 创建回测引擎\n engine = BacktestingEngine()\n \n # 设置引擎的回测模式为K线\n engine.setBacktestingMode(engine.BAR_MODE)\n\n # 设置回测用的数据起始日期\n engine.setStartDate('20120101')\n \n # 设置产品相关参数\n engine.setSlippage(0.2) # 股指1跳\n engine.setRate(0.3/10000) # 万0.3\n engine.setSize(300) # 股指合约大小 \n \n # 设置使用的历史数据库\n engine.setDatabase(MINUTE_DB_NAME, 'IF0000')\n \n # 在引擎中创建策略对象\n d = {'atrLength': 11}\n engine.initStrategy(AtrRsiStrategy, d)\n \n # 开始跑回测\n engine.runBacktesting()\n \n # 显示回测结果\n engine.showBacktestingResult()\n \n ## 跑优化\n #setting = OptimizationSetting() # 新建一个优化任务设置对象\n #setting.setOptimizeTarget('capital') # 设置优化排序的目标是策略净盈利\n #setting.addParameter('atrLength', 12, 20, 2) # 增加第一个优化参数atrLength,起始11,结束12,步进1\n #setting.addParameter('atrMa', 20, 30, 5) # 增加第二个优化参数atrMa,起始20,结束30,步进1\n #setting.addParameter('rsiLength', 5) # 增加一个固定数值的参数\n \n ## 性能测试环境:I7-3770,主频3.4G, 8核心,内存16G,Windows 7 专业版\n ## 测试时还跑着一堆其他的程序,性能仅供参考\n #import time \n #start = time.time()\n \n ## 运行单进程优化函数,自动输出结果,耗时:359秒\n #engine.runOptimization(AtrRsiStrategy, setting) \n \n ## 多进程优化,耗时:89秒\n ##engine.runParallelOptimization(AtrRsiStrategy, setting) \n \n #print u'耗时:%s' %(time.time()-start)"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
takuseno/nnabla-ext-cuda | [
"f64c1bcc04a26efcac785ca72dc81120b99329a8"
] | [
"python/test/cuda/test_large_blocks.py"
] | [
"# Copyright (c) 2017 Sony Corporation. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport pytest\nimport numpy as np\nimport nnabla as nn\nimport nnabla.functions as F\n\n\[email protected](\"not hasattr(nn.extensions, 'cuda')\")\[email protected](\"m\", [1, 2, 3])\ndef test_cuda_large_blocks(m):\n CUDA_THREAD_PER_BLOCK = 512\n CUDA_MAX_BLOCKS = 65536\n size = CUDA_MAX_BLOCKS * CUDA_THREAD_PER_BLOCK * m + 3\n print(\"Variable size:\", size)\n x = np.zeros((size,), np.float32)\n v = nn.Variable(x.shape)\n v.d = x\n ctx = nn.Context(backend='cuda')\n y = F.relu(v)\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sagarchotalia/astropy | [
"b49ad06b4de9577648a55d499d914e08baeef2c6"
] | [
"astropy/modeling/core.py"
] | [
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n\"\"\"\nThis module defines base classes for all models. The base class of all\nmodels is `~astropy.modeling.Model`. `~astropy.modeling.FittableModel` is\nthe base class for all fittable models. Fittable models can be linear or\nnonlinear in a regression analysis sense.\n\nAll models provide a `__call__` method which performs the transformation in\na purely mathematical way, i.e. the models are unitless. Model instances can\nrepresent either a single model, or a \"model set\" representing multiple copies\nof the same type of model, but with potentially different values of the\nparameters in each model making up the set.\n\"\"\"\n# pylint: disable=invalid-name, protected-access, redefined-outer-name\nimport abc\nimport copy\nimport inspect\nimport itertools\nimport functools\nimport operator\nimport types\n\nfrom collections import defaultdict, deque\nfrom inspect import signature\nfrom itertools import chain\n\nimport numpy as np\n\nfrom astropy.utils import indent, metadata\nfrom astropy.table import Table\nfrom astropy.units import Quantity, UnitsError, dimensionless_unscaled\nfrom astropy.units.utils import quantity_asanyarray\nfrom astropy.utils import (sharedmethod, find_current_module,\n check_broadcast, IncompatibleShapeError, isiterable)\nfrom astropy.utils.codegen import make_function_with_signature\nfrom astropy.nddata.utils import add_array, extract_array\nfrom .utils import (combine_labels, make_binary_operator_eval,\n get_inputs_and_params, _combine_equivalency_dict,\n _ConstraintsDict, _SpecialOperatorsDict)\nfrom .bounding_box import ModelBoundingBox, CompoundBoundingBox\nfrom .parameters import (Parameter, InputParameterError,\n param_repr_oneline, _tofloat)\n\n\n__all__ = ['Model', 'FittableModel', 'Fittable1DModel', 'Fittable2DModel',\n 'CompoundModel', 'fix_inputs', 'custom_model', 'ModelDefinitionError',\n 'bind_bounding_box', 'bind_compound_bounding_box']\n\n\ndef _model_oper(oper, **kwargs):\n \"\"\"\n Returns a function that evaluates a given Python arithmetic operator\n between two models. The operator should be given as a string, like ``'+'``\n or ``'**'``.\n \"\"\"\n return lambda left, right: CompoundModel(oper, left, right, **kwargs)\n\n\nclass ModelDefinitionError(TypeError):\n \"\"\"Used for incorrect models definitions.\"\"\"\n\n\nclass _ModelMeta(abc.ABCMeta):\n \"\"\"\n Metaclass for Model.\n\n Currently just handles auto-generating the param_names list based on\n Parameter descriptors declared at the class-level of Model subclasses.\n \"\"\"\n\n _is_dynamic = False\n \"\"\"\n This flag signifies whether this class was created in the \"normal\" way,\n with a class statement in the body of a module, as opposed to a call to\n `type` or some other metaclass constructor, such that the resulting class\n does not belong to a specific module. This is important for pickling of\n dynamic classes.\n\n This flag is always forced to False for new classes, so code that creates\n dynamic classes should manually set it to True on those classes when\n creating them.\n \"\"\"\n\n # Default empty dict for _parameters_, which will be empty on model\n # classes that don't have any Parameters\n\n def __new__(mcls, name, bases, members, **kwds):\n # See the docstring for _is_dynamic above\n if '_is_dynamic' not in members:\n members['_is_dynamic'] = mcls._is_dynamic\n opermethods = [\n ('__add__', _model_oper('+')),\n ('__sub__', _model_oper('-')),\n ('__mul__', _model_oper('*')),\n ('__truediv__', _model_oper('/')),\n ('__pow__', _model_oper('**')),\n ('__or__', _model_oper('|')),\n ('__and__', _model_oper('&')),\n ('_fix_inputs', _model_oper('fix_inputs'))\n ]\n\n members['_parameters_'] = {k: v for k, v in members.items()\n if isinstance(v, Parameter)}\n\n for opermethod, opercall in opermethods:\n members[opermethod] = opercall\n cls = super().__new__(mcls, name, bases, members, **kwds)\n\n param_names = list(members['_parameters_'])\n\n # Need to walk each base MRO to collect all parameter names\n for base in bases:\n for tbase in base.__mro__:\n if issubclass(tbase, Model):\n # Preserve order of definitions\n param_names = list(tbase._parameters_) + param_names\n # Remove duplicates (arising from redefinition in subclass).\n param_names = list(dict.fromkeys(param_names))\n if cls._parameters_:\n if hasattr(cls, '_param_names'):\n # Slight kludge to support compound models, where\n # cls.param_names is a property; could be improved with a\n # little refactoring but fine for now\n cls._param_names = tuple(param_names)\n else:\n cls.param_names = tuple(param_names)\n\n return cls\n\n def __init__(cls, name, bases, members, **kwds):\n super(_ModelMeta, cls).__init__(name, bases, members, **kwds)\n cls._create_inverse_property(members)\n cls._create_bounding_box_property(members)\n pdict = {}\n for base in bases:\n for tbase in base.__mro__:\n if issubclass(tbase, Model):\n for parname, val in cls._parameters_.items():\n pdict[parname] = val\n cls._handle_special_methods(members, pdict)\n\n def __repr__(cls):\n \"\"\"\n Custom repr for Model subclasses.\n \"\"\"\n\n return cls._format_cls_repr()\n\n def _repr_pretty_(cls, p, cycle):\n \"\"\"\n Repr for IPython's pretty printer.\n\n By default IPython \"pretty prints\" classes, so we need to implement\n this so that IPython displays the custom repr for Models.\n \"\"\"\n\n p.text(repr(cls))\n\n def __reduce__(cls):\n if not cls._is_dynamic:\n # Just return a string specifying where the class can be imported\n # from\n return cls.__name__\n members = dict(cls.__dict__)\n # Delete any ABC-related attributes--these will be restored when\n # the class is reconstructed:\n for key in list(members):\n if key.startswith('_abc_'):\n del members[key]\n\n # Delete custom __init__ and __call__ if they exist:\n for key in ('__init__', '__call__'):\n if key in members:\n del members[key]\n\n return (type(cls), (cls.__name__, cls.__bases__, members))\n\n @property\n def name(cls):\n \"\"\"\n The name of this model class--equivalent to ``cls.__name__``.\n\n This attribute is provided for symmetry with the `Model.name` attribute\n of model instances.\n \"\"\"\n\n return cls.__name__\n\n @property\n def _is_concrete(cls):\n \"\"\"\n A class-level property that determines whether the class is a concrete\n implementation of a Model--i.e. it is not some abstract base class or\n internal implementation detail (i.e. begins with '_').\n \"\"\"\n return not (cls.__name__.startswith('_') or inspect.isabstract(cls))\n\n def rename(cls, name=None, inputs=None, outputs=None):\n \"\"\"\n Creates a copy of this model class with a new name, inputs or outputs.\n\n The new class is technically a subclass of the original class, so that\n instance and type checks will still work. For example::\n\n >>> from astropy.modeling.models import Rotation2D\n >>> SkyRotation = Rotation2D.rename('SkyRotation')\n >>> SkyRotation\n <class 'astropy.modeling.core.SkyRotation'>\n Name: SkyRotation (Rotation2D)\n N_inputs: 2\n N_outputs: 2\n Fittable parameters: ('angle',)\n >>> issubclass(SkyRotation, Rotation2D)\n True\n >>> r = SkyRotation(90)\n >>> isinstance(r, Rotation2D)\n True\n \"\"\"\n\n mod = find_current_module(2)\n if mod:\n modname = mod.__name__\n else:\n modname = '__main__'\n\n if name is None:\n name = cls.name\n if inputs is None:\n inputs = cls.inputs\n else:\n if not isinstance(inputs, tuple):\n raise TypeError(\"Expected 'inputs' to be a tuple of strings.\")\n elif len(inputs) != len(cls.inputs):\n raise ValueError(f'{cls.name} expects {len(cls.inputs)} inputs')\n if outputs is None:\n outputs = cls.outputs\n else:\n if not isinstance(outputs, tuple):\n raise TypeError(\"Expected 'outputs' to be a tuple of strings.\")\n elif len(outputs) != len(cls.outputs):\n raise ValueError(f'{cls.name} expects {len(cls.outputs)} outputs')\n new_cls = type(name, (cls,), {\"inputs\": inputs, \"outputs\": outputs})\n new_cls.__module__ = modname\n new_cls.__qualname__ = name\n\n return new_cls\n\n def _create_inverse_property(cls, members):\n inverse = members.get('inverse')\n if inverse is None or cls.__bases__[0] is object:\n # The latter clause is the prevent the below code from running on\n # the Model base class, which implements the default getter and\n # setter for .inverse\n return\n\n if isinstance(inverse, property):\n # We allow the @property decorator to be omitted entirely from\n # the class definition, though its use should be encouraged for\n # clarity\n inverse = inverse.fget\n\n # Store the inverse getter internally, then delete the given .inverse\n # attribute so that cls.inverse resolves to Model.inverse instead\n cls._inverse = inverse\n del cls.inverse\n\n def _create_bounding_box_property(cls, members):\n \"\"\"\n Takes any bounding_box defined on a concrete Model subclass (either\n as a fixed tuple or a property or method) and wraps it in the generic\n getter/setter interface for the bounding_box attribute.\n \"\"\"\n\n # TODO: Much of this is verbatim from _create_inverse_property--I feel\n # like there could be a way to generify properties that work this way,\n # but for the time being that would probably only confuse things more.\n bounding_box = members.get('bounding_box')\n if bounding_box is None or cls.__bases__[0] is object:\n return\n\n if isinstance(bounding_box, property):\n bounding_box = bounding_box.fget\n\n if not callable(bounding_box):\n # See if it's a hard-coded bounding_box (as a sequence) and\n # normalize it\n try:\n bounding_box = ModelBoundingBox.validate(cls, bounding_box, _preserve_ignore=True)\n except ValueError as exc:\n raise ModelDefinitionError(exc.args[0])\n else:\n sig = signature(bounding_box)\n # May be a method that only takes 'self' as an argument (like a\n # property, but the @property decorator was forgotten)\n #\n # However, if the method takes additional arguments then this is a\n # parameterized bounding box and should be callable\n if len(sig.parameters) > 1:\n bounding_box = \\\n cls._create_bounding_box_subclass(bounding_box, sig)\n\n # See the Model.bounding_box getter definition for how this attribute\n # is used\n cls._bounding_box = bounding_box\n del cls.bounding_box\n\n def _create_bounding_box_subclass(cls, func, sig):\n \"\"\"\n For Models that take optional arguments for defining their bounding\n box, we create a subclass of ModelBoundingBox with a ``__call__`` method\n that supports those additional arguments.\n\n Takes the function's Signature as an argument since that is already\n computed in _create_bounding_box_property, so no need to duplicate that\n effort.\n \"\"\"\n\n # TODO: Might be convenient if calling the bounding box also\n # automatically sets the _user_bounding_box. So that\n #\n # >>> model.bounding_box(arg=1)\n #\n # in addition to returning the computed bbox, also sets it, so that\n # it's a shortcut for\n #\n # >>> model.bounding_box = model.bounding_box(arg=1)\n #\n # Not sure if that would be non-obvious / confusing though...\n\n def __call__(self, **kwargs):\n return func(self._model, **kwargs)\n\n kwargs = []\n for idx, param in enumerate(sig.parameters.values()):\n if idx == 0:\n # Presumed to be a 'self' argument\n continue\n\n if param.default is param.empty:\n raise ModelDefinitionError(\n 'The bounding_box method for {0} is not correctly '\n 'defined: If defined as a method all arguments to that '\n 'method (besides self) must be keyword arguments with '\n 'default values that can be used to compute a default '\n 'bounding box.'.format(cls.name))\n\n kwargs.append((param.name, param.default))\n\n __call__.__signature__ = sig\n\n return type(f'{cls.name}ModelBoundingBox', (ModelBoundingBox,),\n {'__call__': __call__})\n\n def _handle_special_methods(cls, members, pdict):\n\n # Handle init creation from inputs\n def update_wrapper(wrapper, cls):\n # Set up the new __call__'s metadata attributes as though it were\n # manually defined in the class definition\n # A bit like functools.update_wrapper but uses the class instead of\n # the wrapped function\n wrapper.__module__ = cls.__module__\n wrapper.__doc__ = getattr(cls, wrapper.__name__).__doc__\n if hasattr(cls, '__qualname__'):\n wrapper.__qualname__ = f'{cls.__qualname__}.{wrapper.__name__}'\n\n if ('__call__' not in members and 'n_inputs' in members and\n isinstance(members['n_inputs'], int) and members['n_inputs'] > 0):\n\n # Don't create a custom __call__ for classes that already have one\n # explicitly defined (this includes the Model base class, and any\n # other classes that manually override __call__\n\n def __call__(self, *inputs, **kwargs):\n \"\"\"Evaluate this model on the supplied inputs.\"\"\"\n return super(cls, self).__call__(*inputs, **kwargs)\n\n # When called, models can take two optional keyword arguments:\n #\n # * model_set_axis, which indicates (for multi-dimensional input)\n # which axis is used to indicate different models\n #\n # * equivalencies, a dictionary of equivalencies to be applied to\n # the input values, where each key should correspond to one of\n # the inputs.\n #\n # The following code creates the __call__ function with these\n # two keyword arguments.\n\n args = ('self',)\n kwargs = dict([('model_set_axis', None),\n ('with_bounding_box', False),\n ('fill_value', np.nan),\n ('equivalencies', None),\n ('inputs_map', None)])\n\n new_call = make_function_with_signature(\n __call__, args, kwargs, varargs='inputs', varkwargs='new_inputs')\n\n # The following makes it look like __call__\n # was defined in the class\n update_wrapper(new_call, cls)\n\n cls.__call__ = new_call\n\n if ('__init__' not in members and not inspect.isabstract(cls) and\n cls._parameters_):\n # Build list of all parameters including inherited ones\n\n # If *all* the parameters have default values we can make them\n # keyword arguments; otherwise they must all be positional\n # arguments\n if all(p.default is not None for p in pdict.values()):\n args = ('self',)\n kwargs = []\n for param_name, param_val in pdict.items():\n default = param_val.default\n unit = param_val.unit\n # If the unit was specified in the parameter but the\n # default is not a Quantity, attach the unit to the\n # default.\n if unit is not None:\n default = Quantity(default, unit, copy=False)\n kwargs.append((param_name, default))\n else:\n args = ('self',) + tuple(pdict.keys())\n kwargs = {}\n\n def __init__(self, *params, **kwargs):\n return super(cls, self).__init__(*params, **kwargs)\n\n new_init = make_function_with_signature(\n __init__, args, kwargs, varkwargs='kwargs')\n update_wrapper(new_init, cls)\n cls.__init__ = new_init\n\n # *** Arithmetic operators for creating compound models ***\n __add__ = _model_oper('+')\n __sub__ = _model_oper('-')\n __mul__ = _model_oper('*')\n __truediv__ = _model_oper('/')\n __pow__ = _model_oper('**')\n __or__ = _model_oper('|')\n __and__ = _model_oper('&')\n _fix_inputs = _model_oper('fix_inputs')\n\n # *** Other utilities ***\n\n def _format_cls_repr(cls, keywords=[]):\n \"\"\"\n Internal implementation of ``__repr__``.\n\n This is separated out for ease of use by subclasses that wish to\n override the default ``__repr__`` while keeping the same basic\n formatting.\n \"\"\"\n\n # For the sake of familiarity start the output with the standard class\n # __repr__\n parts = [super().__repr__()]\n\n if not cls._is_concrete:\n return parts[0]\n\n def format_inheritance(cls):\n bases = []\n for base in cls.mro()[1:]:\n if not issubclass(base, Model):\n continue\n elif (inspect.isabstract(base) or\n base.__name__.startswith('_')):\n break\n bases.append(base.name)\n if bases:\n return f\"{cls.name} ({' -> '.join(bases)})\"\n return cls.name\n\n try:\n default_keywords = [\n ('Name', format_inheritance(cls)),\n ('N_inputs', cls.n_inputs),\n ('N_outputs', cls.n_outputs),\n ]\n\n if cls.param_names:\n default_keywords.append(('Fittable parameters',\n cls.param_names))\n\n for keyword, value in default_keywords + keywords:\n if value is not None:\n parts.append(f'{keyword}: {value}')\n\n return '\\n'.join(parts)\n except Exception:\n # If any of the above formatting fails fall back on the basic repr\n # (this is particularly useful in debugging)\n return parts[0]\n\n\nclass Model(metaclass=_ModelMeta):\n \"\"\"\n Base class for all models.\n\n This is an abstract class and should not be instantiated directly.\n\n The following initialization arguments apply to the majority of Model\n subclasses by default (exceptions include specialized utility models\n like `~astropy.modeling.mappings.Mapping`). Parametric models take all\n their parameters as arguments, followed by any of the following optional\n keyword arguments:\n\n Parameters\n ----------\n name : str, optional\n A human-friendly name associated with this model instance\n (particularly useful for identifying the individual components of a\n compound model).\n\n meta : dict, optional\n An optional dict of user-defined metadata to attach to this model.\n How this is used and interpreted is up to the user or individual use\n case.\n\n n_models : int, optional\n If given an integer greater than 1, a *model set* is instantiated\n instead of a single model. This affects how the parameter arguments\n are interpreted. In this case each parameter must be given as a list\n or array--elements of this array are taken along the first axis (or\n ``model_set_axis`` if specified), such that the Nth element is the\n value of that parameter for the Nth model in the set.\n\n See the section on model sets in the documentation for more details.\n\n model_set_axis : int, optional\n This argument only applies when creating a model set (i.e. ``n_models >\n 1``). It changes how parameter values are interpreted. Normally the\n first axis of each input parameter array (properly the 0th axis) is\n taken as the axis corresponding to the model sets. However, any axis\n of an input array may be taken as this \"model set axis\". This accepts\n negative integers as well--for example use ``model_set_axis=-1`` if the\n last (most rapidly changing) axis should be associated with the model\n sets. Also, ``model_set_axis=False`` can be used to tell that a given\n input should be used to evaluate all the models in the model set.\n\n fixed : dict, optional\n Dictionary ``{parameter_name: bool}`` setting the fixed constraint\n for one or more parameters. `True` means the parameter is held fixed\n during fitting and is prevented from updates once an instance of the\n model has been created.\n\n Alternatively the `~astropy.modeling.Parameter.fixed` property of a\n parameter may be used to lock or unlock individual parameters.\n\n tied : dict, optional\n Dictionary ``{parameter_name: callable}`` of parameters which are\n linked to some other parameter. The dictionary values are callables\n providing the linking relationship.\n\n Alternatively the `~astropy.modeling.Parameter.tied` property of a\n parameter may be used to set the ``tied`` constraint on individual\n parameters.\n\n bounds : dict, optional\n A dictionary ``{parameter_name: value}`` of lower and upper bounds of\n parameters. Keys are parameter names. Values are a list or a tuple\n of length 2 giving the desired range for the parameter.\n\n Alternatively the `~astropy.modeling.Parameter.min` and\n `~astropy.modeling.Parameter.max` or\n ~astropy.modeling.Parameter.bounds` properties of a parameter may be\n used to set bounds on individual parameters.\n\n eqcons : list, optional\n List of functions of length n such that ``eqcons[j](x0, *args) == 0.0``\n in a successfully optimized problem.\n\n ineqcons : list, optional\n List of functions of length n such that ``ieqcons[j](x0, *args) >=\n 0.0`` is a successfully optimized problem.\n\n Examples\n --------\n >>> from astropy.modeling import models\n >>> def tie_center(model):\n ... mean = 50 * model.stddev\n ... return mean\n >>> tied_parameters = {'mean': tie_center}\n\n Specify that ``'mean'`` is a tied parameter in one of two ways:\n\n >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,\n ... tied=tied_parameters)\n\n or\n\n >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)\n >>> g1.mean.tied\n False\n >>> g1.mean.tied = tie_center\n >>> g1.mean.tied\n <function tie_center at 0x...>\n\n Fixed parameters:\n\n >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,\n ... fixed={'stddev': True})\n >>> g1.stddev.fixed\n True\n\n or\n\n >>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)\n >>> g1.stddev.fixed\n False\n >>> g1.stddev.fixed = True\n >>> g1.stddev.fixed\n True\n \"\"\"\n\n parameter_constraints = Parameter.constraints\n \"\"\"\n Primarily for informational purposes, these are the types of constraints\n that can be set on a model's parameters.\n \"\"\"\n\n model_constraints = ('eqcons', 'ineqcons')\n \"\"\"\n Primarily for informational purposes, these are the types of constraints\n that constrain model evaluation.\n \"\"\"\n\n param_names = ()\n \"\"\"\n Names of the parameters that describe models of this type.\n\n The parameters in this tuple are in the same order they should be passed in\n when initializing a model of a specific type. Some types of models, such\n as polynomial models, have a different number of parameters depending on\n some other property of the model, such as the degree.\n\n When defining a custom model class the value of this attribute is\n automatically set by the `~astropy.modeling.Parameter` attributes defined\n in the class body.\n \"\"\"\n\n n_inputs = 0\n \"\"\"The number of inputs.\"\"\"\n n_outputs = 0\n \"\"\" The number of outputs.\"\"\"\n\n standard_broadcasting = True\n fittable = False\n linear = True\n _separable = None\n \"\"\" A boolean flag to indicate whether a model is separable.\"\"\"\n meta = metadata.MetaData()\n \"\"\"A dict-like object to store optional information.\"\"\"\n\n # By default models either use their own inverse property or have no\n # inverse at all, but users may also assign a custom inverse to a model,\n # optionally; in that case it is of course up to the user to determine\n # whether their inverse is *actually* an inverse to the model they assign\n # it to.\n _inverse = None\n _user_inverse = None\n\n _bounding_box = None\n _user_bounding_box = None\n\n _has_inverse_bounding_box = False\n\n # Default n_models attribute, so that __len__ is still defined even when a\n # model hasn't completed initialization yet\n _n_models = 1\n\n # New classes can set this as a boolean value.\n # It is converted to a dictionary mapping input name to a boolean value.\n _input_units_strict = False\n\n # Allow dimensionless input (and corresponding output). If this is True,\n # input values to evaluate will gain the units specified in input_units. If\n # this is a dictionary then it should map input name to a bool to allow\n # dimensionless numbers for that input.\n # Only has an effect if input_units is defined.\n _input_units_allow_dimensionless = False\n\n # Default equivalencies to apply to input values. If set, this should be a\n # dictionary where each key is a string that corresponds to one of the\n # model inputs. Only has an effect if input_units is defined.\n input_units_equivalencies = None\n\n # Covariance matrix can be set by fitter if available.\n # If cov_matrix is available, then std will set as well\n _cov_matrix = None\n _stds = None\n\n def __init_subclass__(cls, **kwargs):\n super().__init_subclass__()\n\n def __init__(self, *args, meta=None, name=None, **kwargs):\n super().__init__()\n self._default_inputs_outputs()\n if meta is not None:\n self.meta = meta\n self._name = name\n # add parameters to instance level by walking MRO list\n mro = self.__class__.__mro__\n for cls in mro:\n if issubclass(cls, Model):\n for parname, val in cls._parameters_.items():\n newpar = copy.deepcopy(val)\n newpar.model = self\n if parname not in self.__dict__:\n self.__dict__[parname] = newpar\n\n self._initialize_constraints(kwargs)\n kwargs = self._initialize_setters(kwargs)\n # Remaining keyword args are either parameter values or invalid\n # Parameter values must be passed in as keyword arguments in order to\n # distinguish them\n self._initialize_parameters(args, kwargs)\n self._initialize_slices()\n self._initialize_unit_support()\n\n def _default_inputs_outputs(self):\n if self.n_inputs == 1 and self.n_outputs == 1:\n self._inputs = (\"x\",)\n self._outputs = (\"y\",)\n elif self.n_inputs == 2 and self.n_outputs == 1:\n self._inputs = (\"x\", \"y\")\n self._outputs = (\"z\",)\n else:\n try:\n self._inputs = tuple(\"x\" + str(idx) for idx in range(self.n_inputs))\n self._outputs = tuple(\"x\" + str(idx) for idx in range(self.n_outputs))\n except TypeError:\n # self.n_inputs and self.n_outputs are properties\n # This is the case when subclasses of Model do not define\n # ``n_inputs``, ``n_outputs``, ``inputs`` or ``outputs``.\n self._inputs = ()\n self._outputs = ()\n\n def _initialize_setters(self, kwargs):\n \"\"\"\n This exists to inject defaults for settable properties for models\n originating from `custom_model`.\n \"\"\"\n if hasattr(self, '_settable_properties'):\n setters = {name: kwargs.pop(name, default)\n for name, default in self._settable_properties.items()}\n for name, value in setters.items():\n setattr(self, name, value)\n\n return kwargs\n\n @property\n def inputs(self):\n return self._inputs\n\n @inputs.setter\n def inputs(self, val):\n if len(val) != self.n_inputs:\n raise ValueError(f\"Expected {self.n_inputs} number of inputs, got {len(val)}.\")\n self._inputs = val\n self._initialize_unit_support()\n\n @property\n def outputs(self):\n return self._outputs\n\n @outputs.setter\n def outputs(self, val):\n if len(val) != self.n_outputs:\n raise ValueError(f\"Expected {self.n_outputs} number of outputs, got {len(val)}.\")\n self._outputs = val\n\n @property\n def n_inputs(self):\n # TODO: remove the code in the ``if`` block when support\n # for models with ``inputs`` as class variables is removed.\n if hasattr(self.__class__, 'n_inputs') and isinstance(self.__class__.n_inputs, property):\n try:\n return len(self.__class__.inputs)\n except TypeError:\n try:\n return len(self.inputs)\n except AttributeError:\n return 0\n\n return self.__class__.n_inputs\n\n @property\n def n_outputs(self):\n # TODO: remove the code in the ``if`` block when support\n # for models with ``outputs`` as class variables is removed.\n if hasattr(self.__class__, 'n_outputs') and isinstance(self.__class__.n_outputs, property):\n try:\n return len(self.__class__.outputs)\n except TypeError:\n try:\n return len(self.outputs)\n except AttributeError:\n return 0\n\n return self.__class__.n_outputs\n\n def _initialize_unit_support(self):\n \"\"\"\n Convert self._input_units_strict and\n self.input_units_allow_dimensionless to dictionaries\n mapping input name to a boolean value.\n \"\"\"\n if isinstance(self._input_units_strict, bool):\n self._input_units_strict = {key: self._input_units_strict for\n key in self.inputs}\n\n if isinstance(self._input_units_allow_dimensionless, bool):\n self._input_units_allow_dimensionless = {key: self._input_units_allow_dimensionless\n for key in self.inputs}\n\n @property\n def input_units_strict(self):\n \"\"\"\n Enforce strict units on inputs to evaluate. If this is set to True,\n input values to evaluate will be in the exact units specified by\n input_units. If the input quantities are convertible to input_units,\n they are converted. If this is a dictionary then it should map input\n name to a bool to set strict input units for that parameter.\n \"\"\"\n val = self._input_units_strict\n if isinstance(val, bool):\n return {key: val for key in self.inputs}\n return dict(zip(self.inputs, val.values()))\n\n @property\n def input_units_allow_dimensionless(self):\n \"\"\"\n Allow dimensionless input (and corresponding output). If this is True,\n input values to evaluate will gain the units specified in input_units. If\n this is a dictionary then it should map input name to a bool to allow\n dimensionless numbers for that input.\n Only has an effect if input_units is defined.\n \"\"\"\n\n val = self._input_units_allow_dimensionless\n if isinstance(val, bool):\n return {key: val for key in self.inputs}\n return dict(zip(self.inputs, val.values()))\n\n @property\n def uses_quantity(self):\n \"\"\"\n True if this model has been created with `~astropy.units.Quantity`\n objects or if there are no parameters.\n\n This can be used to determine if this model should be evaluated with\n `~astropy.units.Quantity` or regular floats.\n \"\"\"\n pisq = [isinstance(p, Quantity) for p in self._param_sets(units=True)]\n return (len(pisq) == 0) or any(pisq)\n\n def __repr__(self):\n return self._format_repr()\n\n def __str__(self):\n return self._format_str()\n\n def __len__(self):\n return self._n_models\n\n @staticmethod\n def _strip_ones(intup):\n return tuple(item for item in intup if item != 1)\n\n def __setattr__(self, attr, value):\n if isinstance(self, CompoundModel):\n param_names = self._param_names\n param_names = self.param_names\n\n if param_names is not None and attr in self.param_names:\n param = self.__dict__[attr]\n value = _tofloat(value)\n if param._validator is not None:\n param._validator(self, value)\n # check consistency with previous shape and size\n eshape = self._param_metrics[attr]['shape']\n if eshape == ():\n eshape = (1,)\n vshape = np.array(value).shape\n if vshape == ():\n vshape = (1,)\n esize = self._param_metrics[attr]['size']\n if (np.size(value) != esize or\n self._strip_ones(vshape) != self._strip_ones(eshape)):\n raise InputParameterError(\n \"Value for parameter {0} does not match shape or size\\n\"\n \"expected by model ({1}, {2}) vs ({3}, {4})\".format(\n attr, vshape, np.size(value), eshape, esize))\n if param.unit is None:\n if isinstance(value, Quantity):\n param._unit = value.unit\n param.value = value.value\n else:\n param.value = value\n else:\n if not isinstance(value, Quantity):\n raise UnitsError(f\"The '{param.name}' parameter should be given as a\"\n \" Quantity because it was originally \"\n \"initialized as a Quantity\")\n param._unit = value.unit\n param.value = value.value\n else:\n if attr in ['fittable', 'linear']:\n self.__dict__[attr] = value\n else:\n super().__setattr__(attr, value)\n\n def _pre_evaluate(self, *args, **kwargs):\n \"\"\"\n Model specific input setup that needs to occur prior to model evaluation\n \"\"\"\n\n # Broadcast inputs into common size\n inputs, broadcasted_shapes = self.prepare_inputs(*args, **kwargs)\n\n # Setup actual model evaluation method\n parameters = self._param_sets(raw=True, units=True)\n\n def evaluate(_inputs):\n return self.evaluate(*chain(_inputs, parameters))\n\n return evaluate, inputs, broadcasted_shapes, kwargs\n\n def get_bounding_box(self, with_bbox=True):\n \"\"\"\n Return the ``bounding_box`` of a model if it exists or ``None``\n otherwise.\n\n Parameters\n ----------\n with_bbox :\n The value of the ``with_bounding_box`` keyword argument\n when calling the model. Default is `True` for usage when\n looking up the model's ``bounding_box`` without risk of error.\n \"\"\"\n bbox = None\n\n if not isinstance(with_bbox, bool) or with_bbox:\n try:\n bbox = self.bounding_box\n except NotImplementedError:\n pass\n\n if isinstance(bbox, CompoundBoundingBox) and not isinstance(with_bbox, bool):\n bbox = bbox[with_bbox]\n\n return bbox\n\n @property\n def _argnames(self):\n \"\"\"The inputs used to determine input_shape for bounding_box evaluation\"\"\"\n return self.inputs\n\n def _validate_input_shape(self, _input, idx, argnames, model_set_axis, check_model_set_axis):\n \"\"\"\n Perform basic validation of a single model input's shape\n -- it has the minimum dimensions for the given model_set_axis\n\n Returns the shape of the input if validation succeeds.\n \"\"\"\n input_shape = np.shape(_input)\n # Ensure that the input's model_set_axis matches the model's\n # n_models\n if input_shape and check_model_set_axis:\n # Note: Scalar inputs *only* get a pass on this\n if len(input_shape) < model_set_axis + 1:\n raise ValueError(\n f\"For model_set_axis={model_set_axis}, all inputs must be at \"\n f\"least {model_set_axis + 1}-dimensional.\")\n if input_shape[model_set_axis] != self._n_models:\n try:\n argname = argnames[idx]\n except IndexError:\n # the case of model.inputs = ()\n argname = str(idx)\n\n raise ValueError(\n f\"Input argument '{argname}' does not have the correct \"\n f\"dimensions in model_set_axis={model_set_axis} for a model set with \"\n f\"n_models={self._n_models}.\")\n\n return input_shape\n\n def _validate_input_shapes(self, inputs, argnames, model_set_axis):\n \"\"\"\n Perform basic validation of model inputs\n --that they are mutually broadcastable and that they have\n the minimum dimensions for the given model_set_axis.\n\n If validation succeeds, returns the total shape that will result from\n broadcasting the input arrays with each other.\n \"\"\"\n\n check_model_set_axis = self._n_models > 1 and model_set_axis is not False\n\n all_shapes = []\n for idx, _input in enumerate(inputs):\n all_shapes.append(self._validate_input_shape(_input, idx, argnames,\n model_set_axis, check_model_set_axis))\n\n input_shape = check_broadcast(*all_shapes)\n if input_shape is None:\n raise ValueError(\n \"All inputs must have identical shapes or must be scalars.\")\n\n return input_shape\n\n def input_shape(self, inputs):\n \"\"\"Get input shape for bounding_box evaluation\"\"\"\n return self._validate_input_shapes(inputs, self._argnames, self.model_set_axis)\n\n def _generic_evaluate(self, evaluate, _inputs, fill_value, with_bbox):\n \"\"\"\n Generic model evaluation routine\n Selects and evaluates model with or without bounding_box enforcement\n \"\"\"\n\n # Evaluate the model using the prepared evaluation method either\n # enforcing the bounding_box or not.\n bbox = self.get_bounding_box(with_bbox)\n if (not isinstance(with_bbox, bool) or with_bbox) and bbox is not None:\n outputs = bbox.evaluate(evaluate, _inputs, fill_value)\n else:\n outputs = evaluate(_inputs)\n return outputs\n\n def _post_evaluate(self, inputs, outputs, broadcasted_shapes, with_bbox, **kwargs):\n \"\"\"\n Model specific post evaluation processing of outputs\n \"\"\"\n if self.get_bounding_box(with_bbox) is None and self.n_outputs == 1:\n outputs = (outputs,)\n\n outputs = self.prepare_outputs(broadcasted_shapes, *outputs, **kwargs)\n outputs = self._process_output_units(inputs, outputs)\n\n if self.n_outputs == 1:\n return outputs[0]\n return outputs\n\n @property\n def bbox_with_units(self):\n return (not isinstance(self, CompoundModel))\n\n def __call__(self, *args, **kwargs):\n \"\"\"\n Evaluate this model using the given input(s) and the parameter values\n that were specified when the model was instantiated.\n \"\"\"\n # Turn any keyword arguments into positional arguments.\n args, kwargs = self._get_renamed_inputs_as_positional(*args, **kwargs)\n\n # Read model evaluation related parameters\n with_bbox = kwargs.pop('with_bounding_box', False)\n fill_value = kwargs.pop('fill_value', np.nan)\n\n # prepare for model evaluation (overridden in CompoundModel)\n evaluate, inputs, broadcasted_shapes, kwargs = self._pre_evaluate(*args, **kwargs)\n\n outputs = self._generic_evaluate(evaluate, inputs,\n fill_value, with_bbox)\n\n # post-process evaluation results (overridden in CompoundModel)\n return self._post_evaluate(inputs, outputs, broadcasted_shapes, with_bbox, **kwargs)\n\n def _get_renamed_inputs_as_positional(self, *args, **kwargs):\n def _keyword2positional(kwargs):\n # Inputs were passed as keyword (not positional) arguments.\n # Because the signature of the ``__call__`` is defined at\n # the class level, the name of the inputs cannot be changed at\n # the instance level and the old names are always present in the\n # signature of the method. In order to use the new names of the\n # inputs, the old names are taken out of ``kwargs``, the input\n # values are sorted in the order of self.inputs and passed as\n # positional arguments to ``__call__``.\n\n # These are the keys that are always present as keyword arguments.\n keys = ['model_set_axis', 'with_bounding_box', 'fill_value',\n 'equivalencies', 'inputs_map']\n\n new_inputs = {}\n # kwargs contain the names of the new inputs + ``keys``\n allkeys = list(kwargs.keys())\n # Remove the names of the new inputs from kwargs and save them\n # to a dict ``new_inputs``.\n for key in allkeys:\n if key not in keys:\n new_inputs[key] = kwargs[key]\n del kwargs[key]\n return new_inputs, kwargs\n n_args = len(args)\n\n new_inputs, kwargs = _keyword2positional(kwargs)\n n_all_args = n_args + len(new_inputs)\n\n if n_all_args < self.n_inputs:\n raise ValueError(f\"Missing input arguments - expected {self.n_inputs}, got {n_all_args}\")\n elif n_all_args > self.n_inputs:\n raise ValueError(f\"Too many input arguments - expected {self.n_inputs}, got {n_all_args}\")\n if n_args == 0:\n # Create positional arguments from the keyword arguments in ``new_inputs``.\n new_args = []\n for k in self.inputs:\n new_args.append(new_inputs[k])\n elif n_args != self.n_inputs:\n # Some inputs are passed as positional, others as keyword arguments.\n args = list(args)\n\n # Create positional arguments from the keyword arguments in ``new_inputs``.\n new_args = []\n for k in self.inputs:\n if k in new_inputs:\n new_args.append(new_inputs[k])\n else:\n new_args.append(args[0])\n del args[0]\n else:\n new_args = args\n return new_args, kwargs\n\n # *** Properties ***\n @property\n def name(self):\n \"\"\"User-provided name for this model instance.\"\"\"\n\n return self._name\n\n @name.setter\n def name(self, val):\n \"\"\"Assign a (new) name to this model.\"\"\"\n\n self._name = val\n\n @property\n def model_set_axis(self):\n \"\"\"\n The index of the model set axis--that is the axis of a parameter array\n that pertains to which model a parameter value pertains to--as\n specified when the model was initialized.\n\n See the documentation on :ref:`astropy:modeling-model-sets`\n for more details.\n \"\"\"\n\n return self._model_set_axis\n\n @property\n def param_sets(self):\n \"\"\"\n Return parameters as a pset.\n\n This is a list with one item per parameter set, which is an array of\n that parameter's values across all parameter sets, with the last axis\n associated with the parameter set.\n \"\"\"\n\n return self._param_sets()\n\n @property\n def parameters(self):\n \"\"\"\n A flattened array of all parameter values in all parameter sets.\n\n Fittable parameters maintain this list and fitters modify it.\n \"\"\"\n\n # Currently the sequence of a model's parameters must be contiguous\n # within the _parameters array (which may be a view of a larger array,\n # for example when taking a sub-expression of a compound model), so\n # the assumption here is reliable:\n if not self.param_names:\n # Trivial, but not unheard of\n return self._parameters\n\n self._parameters_to_array()\n start = self._param_metrics[self.param_names[0]]['slice'].start\n stop = self._param_metrics[self.param_names[-1]]['slice'].stop\n\n return self._parameters[start:stop]\n\n @parameters.setter\n def parameters(self, value):\n \"\"\"\n Assigning to this attribute updates the parameters array rather than\n replacing it.\n \"\"\"\n\n if not self.param_names:\n return\n\n start = self._param_metrics[self.param_names[0]]['slice'].start\n stop = self._param_metrics[self.param_names[-1]]['slice'].stop\n\n try:\n value = np.array(value).flatten()\n self._parameters[start:stop] = value\n except ValueError as e:\n raise InputParameterError(\n \"Input parameter values not compatible with the model \"\n \"parameters array: {0}\".format(e))\n self._array_to_parameters()\n\n @property\n def sync_constraints(self):\n '''\n This is a boolean property that indicates whether or not accessing constraints\n automatically check the constituent models current values. It defaults to True\n on creation of a model, but for fitting purposes it should be set to False\n for performance reasons.\n '''\n if not hasattr(self, '_sync_constraints'):\n self._sync_constraints = True\n return self._sync_constraints\n\n @sync_constraints.setter\n def sync_constraints(self, value):\n if not isinstance(value, bool):\n raise ValueError('sync_constraints only accepts True or False as values')\n self._sync_constraints = value\n\n @property\n def fixed(self):\n \"\"\"\n A ``dict`` mapping parameter names to their fixed constraint.\n \"\"\"\n if not hasattr(self, '_fixed') or self.sync_constraints:\n self._fixed = _ConstraintsDict(self, 'fixed')\n return self._fixed\n\n @property\n def bounds(self):\n \"\"\"\n A ``dict`` mapping parameter names to their upper and lower bounds as\n ``(min, max)`` tuples or ``[min, max]`` lists.\n \"\"\"\n if not hasattr(self, '_bounds') or self.sync_constraints:\n self._bounds = _ConstraintsDict(self, 'bounds')\n return self._bounds\n\n @property\n def tied(self):\n \"\"\"\n A ``dict`` mapping parameter names to their tied constraint.\n \"\"\"\n if not hasattr(self, '_tied') or self.sync_constraints:\n self._tied = _ConstraintsDict(self, 'tied')\n return self._tied\n\n @property\n def eqcons(self):\n \"\"\"List of parameter equality constraints.\"\"\"\n\n return self._mconstraints['eqcons']\n\n @property\n def ineqcons(self):\n \"\"\"List of parameter inequality constraints.\"\"\"\n\n return self._mconstraints['ineqcons']\n\n def has_inverse(self):\n \"\"\"\n Returns True if the model has an analytic or user\n inverse defined.\n \"\"\"\n try:\n self.inverse\n except NotImplementedError:\n return False\n\n return True\n\n @property\n def inverse(self):\n \"\"\"\n Returns a new `~astropy.modeling.Model` instance which performs the\n inverse transform, if an analytic inverse is defined for this model.\n\n Even on models that don't have an inverse defined, this property can be\n set with a manually-defined inverse, such a pre-computed or\n experimentally determined inverse (often given as a\n `~astropy.modeling.polynomial.PolynomialModel`, but not by\n requirement).\n\n A custom inverse can be deleted with ``del model.inverse``. In this\n case the model's inverse is reset to its default, if a default exists\n (otherwise the default is to raise `NotImplementedError`).\n\n Note to authors of `~astropy.modeling.Model` subclasses: To define an\n inverse for a model simply override this property to return the\n appropriate model representing the inverse. The machinery that will\n make the inverse manually-overridable is added automatically by the\n base class.\n \"\"\"\n if self._user_inverse is not None:\n return self._user_inverse\n elif self._inverse is not None:\n result = self._inverse()\n if result is not NotImplemented:\n if not self._has_inverse_bounding_box:\n result.bounding_box = None\n return result\n\n raise NotImplementedError(\"No analytical or user-supplied inverse transform \"\n \"has been implemented for this model.\")\n\n @inverse.setter\n def inverse(self, value):\n if not isinstance(value, (Model, type(None))):\n raise ValueError(\n \"The ``inverse`` attribute may be assigned a `Model` \"\n \"instance or `None` (where `None` explicitly forces the \"\n \"model to have no inverse.\")\n\n self._user_inverse = value\n\n @inverse.deleter\n def inverse(self):\n \"\"\"\n Resets the model's inverse to its default (if one exists, otherwise\n the model will have no inverse).\n \"\"\"\n\n try:\n del self._user_inverse\n except AttributeError:\n pass\n\n @property\n def has_user_inverse(self):\n \"\"\"\n A flag indicating whether or not a custom inverse model has been\n assigned to this model by a user, via assignment to ``model.inverse``.\n \"\"\"\n return self._user_inverse is not None\n\n @property\n def bounding_box(self):\n r\"\"\"\n A `tuple` of length `n_inputs` defining the bounding box limits, or\n raise `NotImplementedError` for no bounding_box.\n\n The default limits are given by a ``bounding_box`` property or method\n defined in the class body of a specific model. If not defined then\n this property just raises `NotImplementedError` by default (but may be\n assigned a custom value by a user). ``bounding_box`` can be set\n manually to an array-like object of shape ``(model.n_inputs, 2)``. For\n further usage, see :ref:`astropy:bounding-boxes`\n\n The limits are ordered according to the `numpy` ``'C'`` indexing\n convention, and are the reverse of the model input order,\n e.g. for inputs ``('x', 'y', 'z')``, ``bounding_box`` is defined:\n\n * for 1D: ``(x_low, x_high)``\n * for 2D: ``((y_low, y_high), (x_low, x_high))``\n * for 3D: ``((z_low, z_high), (y_low, y_high), (x_low, x_high))``\n\n Examples\n --------\n\n Setting the ``bounding_box`` limits for a 1D and 2D model:\n\n >>> from astropy.modeling.models import Gaussian1D, Gaussian2D\n >>> model_1d = Gaussian1D()\n >>> model_2d = Gaussian2D(x_stddev=1, y_stddev=1)\n >>> model_1d.bounding_box = (-5, 5)\n >>> model_2d.bounding_box = ((-6, 6), (-5, 5))\n\n Setting the bounding_box limits for a user-defined 3D `custom_model`:\n\n >>> from astropy.modeling.models import custom_model\n >>> def const3d(x, y, z, amp=1):\n ... return amp\n ...\n >>> Const3D = custom_model(const3d)\n >>> model_3d = Const3D()\n >>> model_3d.bounding_box = ((-6, 6), (-5, 5), (-4, 4))\n\n To reset ``bounding_box`` to its default limits just delete the\n user-defined value--this will reset it back to the default defined\n on the class:\n\n >>> del model_1d.bounding_box\n\n To disable the bounding box entirely (including the default),\n set ``bounding_box`` to `None`:\n\n >>> model_1d.bounding_box = None\n >>> model_1d.bounding_box # doctest: +IGNORE_EXCEPTION_DETAIL\n Traceback (most recent call last):\n NotImplementedError: No bounding box is defined for this model\n (note: the bounding box was explicitly disabled for this model;\n use `del model.bounding_box` to restore the default bounding box,\n if one is defined for this model).\n \"\"\"\n\n if self._user_bounding_box is not None:\n if self._user_bounding_box is NotImplemented:\n raise NotImplementedError(\n \"No bounding box is defined for this model (note: the \"\n \"bounding box was explicitly disabled for this model; \"\n \"use `del model.bounding_box` to restore the default \"\n \"bounding box, if one is defined for this model).\")\n return self._user_bounding_box\n elif self._bounding_box is None:\n raise NotImplementedError(\n \"No bounding box is defined for this model.\")\n elif isinstance(self._bounding_box, ModelBoundingBox):\n # This typically implies a hard-coded bounding box. This will\n # probably be rare, but it is an option\n return self._bounding_box\n elif isinstance(self._bounding_box, types.MethodType):\n return ModelBoundingBox.validate(self, self._bounding_box())\n else:\n # The only other allowed possibility is that it's a ModelBoundingBox\n # subclass, so we call it with its default arguments and return an\n # instance of it (that can be called to recompute the bounding box\n # with any optional parameters)\n # (In other words, in this case self._bounding_box is a *class*)\n bounding_box = self._bounding_box((), model=self)()\n return self._bounding_box(bounding_box, model=self)\n\n @bounding_box.setter\n def bounding_box(self, bounding_box):\n \"\"\"\n Assigns the bounding box limits.\n \"\"\"\n\n if bounding_box is None:\n cls = None\n # We use this to explicitly set an unimplemented bounding box (as\n # opposed to no user bounding box defined)\n bounding_box = NotImplemented\n elif (isinstance(bounding_box, CompoundBoundingBox) or\n isinstance(bounding_box, dict)):\n cls = CompoundBoundingBox\n elif (isinstance(self._bounding_box, type) and\n issubclass(self._bounding_box, ModelBoundingBox)):\n cls = self._bounding_box\n else:\n cls = ModelBoundingBox\n\n if cls is not None:\n try:\n bounding_box = cls.validate(self, bounding_box, _preserve_ignore=True)\n except ValueError as exc:\n raise ValueError(exc.args[0])\n\n self._user_bounding_box = bounding_box\n\n def set_slice_args(self, *args):\n if isinstance(self._user_bounding_box, CompoundBoundingBox):\n self._user_bounding_box.slice_args = args\n else:\n raise RuntimeError('The bounding_box for this model is not compound')\n\n @bounding_box.deleter\n def bounding_box(self):\n self._user_bounding_box = None\n\n @property\n def has_user_bounding_box(self):\n \"\"\"\n A flag indicating whether or not a custom bounding_box has been\n assigned to this model by a user, via assignment to\n ``model.bounding_box``.\n \"\"\"\n\n return self._user_bounding_box is not None\n\n @property\n def cov_matrix(self):\n \"\"\"\n Fitter should set covariance matrix, if available.\n \"\"\"\n return self._cov_matrix\n\n @cov_matrix.setter\n def cov_matrix(self, cov):\n\n self._cov_matrix = cov\n\n unfix_untied_params = [p for p in self.param_names if (self.fixed[p] is False)\n and (self.tied[p] is False)]\n if type(cov) == list: # model set\n param_stds = []\n for c in cov:\n param_stds.append([np.sqrt(x) if x > 0 else None for x in np.diag(c.cov_matrix)])\n for p, param_name in enumerate(unfix_untied_params):\n par = getattr(self, param_name)\n par.std = [item[p] for item in param_stds]\n setattr(self, param_name, par)\n else:\n param_stds = [np.sqrt(x) if x > 0 else None for x in np.diag(cov.cov_matrix)]\n for param_name in unfix_untied_params:\n par = getattr(self, param_name)\n par.std = param_stds.pop(0)\n setattr(self, param_name, par)\n\n @property\n def stds(self):\n \"\"\"\n Standard deviation of parameters, if covariance matrix is available.\n \"\"\"\n return self._stds\n\n @stds.setter\n def stds(self, stds):\n self._stds = stds\n\n @property\n def separable(self):\n \"\"\" A flag indicating whether a model is separable.\"\"\"\n\n if self._separable is not None:\n return self._separable\n raise NotImplementedError(\n 'The \"separable\" property is not defined for '\n 'model {}'.format(self.__class__.__name__))\n\n # *** Public methods ***\n\n def without_units_for_data(self, **kwargs):\n \"\"\"\n Return an instance of the model for which the parameter values have\n been converted to the right units for the data, then the units have\n been stripped away.\n\n The input and output Quantity objects should be given as keyword\n arguments.\n\n Notes\n -----\n\n This method is needed in order to be able to fit models with units in\n the parameters, since we need to temporarily strip away the units from\n the model during the fitting (which might be done by e.g. scipy\n functions).\n\n The units that the parameters should be converted to are not\n necessarily the units of the input data, but are derived from them.\n Model subclasses that want fitting to work in the presence of\n quantities need to define a ``_parameter_units_for_data_units`` method\n that takes the input and output units (as two dictionaries) and\n returns a dictionary giving the target units for each parameter.\n\n \"\"\"\n model = self.copy()\n\n inputs_unit = {inp: getattr(kwargs[inp], 'unit', dimensionless_unscaled)\n for inp in self.inputs if kwargs[inp] is not None}\n\n outputs_unit = {out: getattr(kwargs[out], 'unit', dimensionless_unscaled)\n for out in self.outputs if kwargs[out] is not None}\n parameter_units = self._parameter_units_for_data_units(inputs_unit,\n outputs_unit)\n for name, unit in parameter_units.items():\n parameter = getattr(model, name)\n if parameter.unit is not None:\n parameter.value = parameter.quantity.to(unit).value\n parameter._set_unit(None, force=True)\n\n if isinstance(model, CompoundModel):\n model.strip_units_from_tree()\n\n return model\n\n def output_units(self, **kwargs):\n \"\"\"\n Return a dictionary of output units for this model given a dictionary\n of fitting inputs and outputs\n\n The input and output Quantity objects should be given as keyword\n arguments.\n\n Notes\n -----\n\n This method is needed in order to be able to fit models with units in\n the parameters, since we need to temporarily strip away the units from\n the model during the fitting (which might be done by e.g. scipy\n functions).\n\n This method will force extra model evaluations, which maybe computationally\n expensive. To avoid this, one can add a return_units property to the model,\n see :ref:`astropy:models_return_units`.\n \"\"\"\n units = self.return_units\n\n if units is None or units == {}:\n inputs = {inp: kwargs[inp] for inp in self.inputs}\n\n values = self(**inputs)\n if self.n_outputs == 1:\n values = (values,)\n\n units = {out: getattr(values[index], 'unit', dimensionless_unscaled)\n for index, out in enumerate(self.outputs)}\n\n return units\n\n def strip_units_from_tree(self):\n for item in self._leaflist:\n for parname in item.param_names:\n par = getattr(item, parname)\n par._set_unit(None, force=True)\n\n def with_units_from_data(self, **kwargs):\n \"\"\"\n Return an instance of the model which has units for which the parameter\n values are compatible with the data units specified.\n\n The input and output Quantity objects should be given as keyword\n arguments.\n\n Notes\n -----\n\n This method is needed in order to be able to fit models with units in\n the parameters, since we need to temporarily strip away the units from\n the model during the fitting (which might be done by e.g. scipy\n functions).\n\n The units that the parameters will gain are not necessarily the units\n of the input data, but are derived from them. Model subclasses that\n want fitting to work in the presence of quantities need to define a\n ``_parameter_units_for_data_units`` method that takes the input and output\n units (as two dictionaries) and returns a dictionary giving the target\n units for each parameter.\n \"\"\"\n model = self.copy()\n inputs_unit = {inp: getattr(kwargs[inp], 'unit', dimensionless_unscaled)\n for inp in self.inputs if kwargs[inp] is not None}\n\n outputs_unit = {out: getattr(kwargs[out], 'unit', dimensionless_unscaled)\n for out in self.outputs if kwargs[out] is not None}\n\n parameter_units = self._parameter_units_for_data_units(inputs_unit,\n outputs_unit)\n\n # We are adding units to parameters that already have a value, but we\n # don't want to convert the parameter, just add the unit directly,\n # hence the call to ``_set_unit``.\n for name, unit in parameter_units.items():\n parameter = getattr(model, name)\n parameter._set_unit(unit, force=True)\n\n return model\n\n @property\n def _has_units(self):\n # Returns True if any of the parameters have units\n for param in self.param_names:\n if getattr(self, param).unit is not None:\n return True\n else:\n return False\n\n @property\n def _supports_unit_fitting(self):\n # If the model has a ``_parameter_units_for_data_units`` method, this\n # indicates that we have enough information to strip the units away\n # and add them back after fitting, when fitting quantities\n return hasattr(self, '_parameter_units_for_data_units')\n\n @abc.abstractmethod\n def evaluate(self, *args, **kwargs):\n \"\"\"Evaluate the model on some input variables.\"\"\"\n\n def sum_of_implicit_terms(self, *args, **kwargs):\n \"\"\"\n Evaluate the sum of any implicit model terms on some input variables.\n This includes any fixed terms used in evaluating a linear model that\n do not have corresponding parameters exposed to the user. The\n prototypical case is `astropy.modeling.functional_models.Shift`, which\n corresponds to a function y = a + bx, where b=1 is intrinsically fixed\n by the type of model, such that sum_of_implicit_terms(x) == x. This\n method is needed by linear fitters to correct the dependent variable\n for the implicit term(s) when solving for the remaining terms\n (ie. a = y - bx).\n \"\"\"\n\n def render(self, out=None, coords=None):\n \"\"\"\n Evaluate a model at fixed positions, respecting the ``bounding_box``.\n\n The key difference relative to evaluating the model directly is that\n this method is limited to a bounding box if the `Model.bounding_box`\n attribute is set.\n\n Parameters\n ----------\n out : `numpy.ndarray`, optional\n An array that the evaluated model will be added to. If this is not\n given (or given as ``None``), a new array will be created.\n coords : array-like, optional\n An array to be used to translate from the model's input coordinates\n to the ``out`` array. It should have the property that\n ``self(coords)`` yields the same shape as ``out``. If ``out`` is\n not specified, ``coords`` will be used to determine the shape of\n the returned array. If this is not provided (or None), the model\n will be evaluated on a grid determined by `Model.bounding_box`.\n\n Returns\n -------\n out : `numpy.ndarray`\n The model added to ``out`` if ``out`` is not ``None``, or else a\n new array from evaluating the model over ``coords``.\n If ``out`` and ``coords`` are both `None`, the returned array is\n limited to the `Model.bounding_box` limits. If\n `Model.bounding_box` is `None`, ``arr`` or ``coords`` must be\n passed.\n\n Raises\n ------\n ValueError\n If ``coords`` are not given and the the `Model.bounding_box` of\n this model is not set.\n\n Examples\n --------\n :ref:`astropy:bounding-boxes`\n \"\"\"\n\n try:\n bbox = self.bounding_box\n except NotImplementedError:\n bbox = None\n\n if isinstance(bbox, ModelBoundingBox):\n bbox = bbox.bounding_box()\n\n ndim = self.n_inputs\n\n if (coords is None) and (out is None) and (bbox is None):\n raise ValueError('If no bounding_box is set, '\n 'coords or out must be input.')\n\n # for consistent indexing\n if ndim == 1:\n if coords is not None:\n coords = [coords]\n if bbox is not None:\n bbox = [bbox]\n\n if coords is not None:\n coords = np.asanyarray(coords, dtype=float)\n # Check dimensions match out and model\n assert len(coords) == ndim\n if out is not None:\n if coords[0].shape != out.shape:\n raise ValueError('inconsistent shape of the output.')\n else:\n out = np.zeros(coords[0].shape)\n\n if out is not None:\n out = np.asanyarray(out)\n if out.ndim != ndim:\n raise ValueError('the array and model must have the same '\n 'number of dimensions.')\n\n if bbox is not None:\n # Assures position is at center pixel,\n # important when using add_array.\n pd = np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2))\n for bb in bbox]).astype(int).T\n pos, delta = pd\n\n if coords is not None:\n sub_shape = tuple(delta * 2 + 1)\n sub_coords = np.array([extract_array(c, sub_shape, pos)\n for c in coords])\n else:\n limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]\n sub_coords = np.mgrid[limits]\n\n sub_coords = sub_coords[::-1]\n\n if out is None:\n out = self(*sub_coords)\n else:\n try:\n out = add_array(out, self(*sub_coords), pos)\n except ValueError:\n raise ValueError(\n 'The `bounding_box` is larger than the input out in '\n 'one or more dimensions. Set '\n '`model.bounding_box = None`.')\n else:\n if coords is None:\n im_shape = out.shape\n limits = [slice(i) for i in im_shape]\n coords = np.mgrid[limits]\n\n coords = coords[::-1]\n\n out += self(*coords)\n\n return out\n\n @property\n def input_units(self):\n \"\"\"\n This property is used to indicate what units or sets of units the\n evaluate method expects, and returns a dictionary mapping inputs to\n units (or `None` if any units are accepted).\n\n Model sub-classes can also use function annotations in evaluate to\n indicate valid input units, in which case this property should\n not be overridden since it will return the input units based on the\n annotations.\n \"\"\"\n if hasattr(self, '_input_units'):\n return self._input_units\n elif hasattr(self.evaluate, '__annotations__'):\n annotations = self.evaluate.__annotations__.copy()\n annotations.pop('return', None)\n if annotations:\n # If there are not annotations for all inputs this will error.\n return dict((name, annotations[name]) for name in self.inputs)\n else:\n # None means any unit is accepted\n return None\n\n @property\n def return_units(self):\n \"\"\"\n This property is used to indicate what units or sets of units the\n output of evaluate should be in, and returns a dictionary mapping\n outputs to units (or `None` if any units are accepted).\n\n Model sub-classes can also use function annotations in evaluate to\n indicate valid output units, in which case this property should not be\n overridden since it will return the return units based on the\n annotations.\n \"\"\"\n if hasattr(self, '_return_units'):\n return self._return_units\n elif hasattr(self.evaluate, '__annotations__'):\n return self.evaluate.__annotations__.get('return', None)\n else:\n # None means any unit is accepted\n return None\n\n def _prepare_inputs_single_model(self, params, inputs, **kwargs):\n broadcasts = []\n for idx, _input in enumerate(inputs):\n input_shape = _input.shape\n\n # Ensure that array scalars are always upgrade to 1-D arrays for the\n # sake of consistency with how parameters work. They will be cast back\n # to scalars at the end\n if not input_shape:\n inputs[idx] = _input.reshape((1,))\n\n if not params:\n max_broadcast = input_shape\n else:\n max_broadcast = ()\n\n for param in params:\n try:\n if self.standard_broadcasting:\n broadcast = check_broadcast(input_shape, param.shape)\n else:\n broadcast = input_shape\n except IncompatibleShapeError:\n raise ValueError(\n \"self input argument {0!r} of shape {1!r} cannot be \"\n \"broadcast with parameter {2!r} of shape \"\n \"{3!r}.\".format(self.inputs[idx], input_shape,\n param.name, param.shape))\n\n if len(broadcast) > len(max_broadcast):\n max_broadcast = broadcast\n elif len(broadcast) == len(max_broadcast):\n max_broadcast = max(max_broadcast, broadcast)\n\n broadcasts.append(max_broadcast)\n\n if self.n_outputs > self.n_inputs:\n extra_outputs = self.n_outputs - self.n_inputs\n if not broadcasts:\n # If there were no inputs then the broadcasts list is empty\n # just add a None since there is no broadcasting of outputs and\n # inputs necessary (see _prepare_outputs_single_self)\n broadcasts.append(None)\n broadcasts.extend([broadcasts[0]] * extra_outputs)\n\n return inputs, (broadcasts,)\n\n @staticmethod\n def _remove_axes_from_shape(shape, axis):\n \"\"\"\n Given a shape tuple as the first input, construct a new one by removing\n that particular axis from the shape and all preceeding axes. Negative axis\n numbers are permittted, where the axis is relative to the last axis.\n \"\"\"\n if len(shape) == 0:\n return shape\n if axis < 0:\n axis = len(shape) + axis\n return shape[:axis] + shape[axis+1:]\n if axis >= len(shape):\n axis = len(shape)-1\n shape = shape[axis+1:]\n return shape\n\n def _prepare_inputs_model_set(self, params, inputs, model_set_axis_input,\n **kwargs):\n reshaped = []\n pivots = []\n\n model_set_axis_param = self.model_set_axis # needed to reshape param\n for idx, _input in enumerate(inputs):\n max_param_shape = ()\n if self._n_models > 1 and model_set_axis_input is not False:\n # Use the shape of the input *excluding* the model axis\n input_shape = (_input.shape[:model_set_axis_input] +\n _input.shape[model_set_axis_input + 1:])\n else:\n input_shape = _input.shape\n\n for param in params:\n try:\n check_broadcast(input_shape,\n self._remove_axes_from_shape(param.shape,\n model_set_axis_param))\n except IncompatibleShapeError:\n raise ValueError(\n \"Model input argument {0!r} of shape {1!r} cannot be \"\n \"broadcast with parameter {2!r} of shape \"\n \"{3!r}.\".format(self.inputs[idx], input_shape,\n param.name,\n self._remove_axes_from_shape(param.shape,\n model_set_axis_param)))\n\n if len(param.shape) - 1 > len(max_param_shape):\n max_param_shape = self._remove_axes_from_shape(param.shape,\n model_set_axis_param)\n\n # We've now determined that, excluding the model_set_axis, the\n # input can broadcast with all the parameters\n input_ndim = len(input_shape)\n if model_set_axis_input is False:\n if len(max_param_shape) > input_ndim:\n # Just needs to prepend new axes to the input\n n_new_axes = 1 + len(max_param_shape) - input_ndim\n new_axes = (1,) * n_new_axes\n new_shape = new_axes + _input.shape\n pivot = model_set_axis_param\n else:\n pivot = input_ndim - len(max_param_shape)\n new_shape = (_input.shape[:pivot] + (1,) +\n _input.shape[pivot:])\n new_input = _input.reshape(new_shape)\n else:\n if len(max_param_shape) >= input_ndim:\n n_new_axes = len(max_param_shape) - input_ndim\n pivot = self.model_set_axis\n new_axes = (1,) * n_new_axes\n new_shape = (_input.shape[:pivot + 1] + new_axes +\n _input.shape[pivot + 1:])\n new_input = _input.reshape(new_shape)\n else:\n pivot = _input.ndim - len(max_param_shape) - 1\n new_input = np.rollaxis(_input, model_set_axis_input,\n pivot + 1)\n pivots.append(pivot)\n reshaped.append(new_input)\n\n if self.n_inputs < self.n_outputs:\n pivots.extend([model_set_axis_input] * (self.n_outputs - self.n_inputs))\n\n return reshaped, (pivots,)\n\n def prepare_inputs(self, *inputs, model_set_axis=None, equivalencies=None,\n **kwargs):\n \"\"\"\n This method is used in `~astropy.modeling.Model.__call__` to ensure\n that all the inputs to the model can be broadcast into compatible\n shapes (if one or both of them are input as arrays), particularly if\n there are more than one parameter sets. This also makes sure that (if\n applicable) the units of the input will be compatible with the evaluate\n method.\n \"\"\"\n # When we instantiate the model class, we make sure that __call__ can\n # take the following two keyword arguments: model_set_axis and\n # equivalencies.\n if model_set_axis is None:\n # By default the model_set_axis for the input is assumed to be the\n # same as that for the parameters the model was defined with\n # TODO: Ensure that negative model_set_axis arguments are respected\n model_set_axis = self.model_set_axis\n\n params = [getattr(self, name) for name in self.param_names]\n inputs = [np.asanyarray(_input, dtype=float) for _input in inputs]\n\n self._validate_input_shapes(inputs, self.inputs, model_set_axis)\n\n inputs_map = kwargs.get('inputs_map', None)\n\n inputs = self._validate_input_units(inputs, equivalencies, inputs_map)\n\n # The input formatting required for single models versus a multiple\n # model set are different enough that they've been split into separate\n # subroutines\n if self._n_models == 1:\n return self._prepare_inputs_single_model(params, inputs, **kwargs)\n else:\n return self._prepare_inputs_model_set(params, inputs,\n model_set_axis, **kwargs)\n\n def _validate_input_units(self, inputs, equivalencies=None, inputs_map=None):\n inputs = list(inputs)\n name = self.name or self.__class__.__name__\n # Check that the units are correct, if applicable\n\n if self.input_units is not None:\n # If a leaflist is provided that means this is in the context of\n # a compound model and it is necessary to create the appropriate\n # alias for the input coordinate name for the equivalencies dict\n if inputs_map:\n edict = {}\n for mod, mapping in inputs_map:\n if self is mod:\n edict[mapping[0]] = equivalencies[mapping[1]]\n else:\n edict = equivalencies\n # We combine any instance-level input equivalencies with user\n # specified ones at call-time.\n input_units_equivalencies = _combine_equivalency_dict(self.inputs,\n edict,\n self.input_units_equivalencies)\n\n # We now iterate over the different inputs and make sure that their\n # units are consistent with those specified in input_units.\n for i in range(len(inputs)):\n\n input_name = self.inputs[i]\n input_unit = self.input_units.get(input_name, None)\n\n if input_unit is None:\n continue\n\n if isinstance(inputs[i], Quantity):\n\n # We check for consistency of the units with input_units,\n # taking into account any equivalencies\n\n if inputs[i].unit.is_equivalent(\n input_unit,\n equivalencies=input_units_equivalencies[input_name]):\n\n # If equivalencies have been specified, we need to\n # convert the input to the input units - this is\n # because some equivalencies are non-linear, and\n # we need to be sure that we evaluate the model in\n # its own frame of reference. If input_units_strict\n # is set, we also need to convert to the input units.\n if len(input_units_equivalencies) > 0 or self.input_units_strict[input_name]:\n inputs[i] = inputs[i].to(input_unit,\n equivalencies=input_units_equivalencies[input_name])\n\n else:\n\n # We consider the following two cases separately so as\n # to be able to raise more appropriate/nicer exceptions\n\n if input_unit is dimensionless_unscaled:\n raise UnitsError(\"{0}: Units of input '{1}', {2} ({3}),\"\n \"could not be converted to \"\n \"required dimensionless \"\n \"input\".format(name,\n self.inputs[i],\n inputs[i].unit,\n inputs[i].unit.physical_type))\n else:\n raise UnitsError(\"{0}: Units of input '{1}', {2} ({3}),\"\n \" could not be \"\n \"converted to required input\"\n \" units of {4} ({5})\".format(\n name,\n self.inputs[i],\n inputs[i].unit,\n inputs[i].unit.physical_type,\n input_unit,\n input_unit.physical_type))\n else:\n\n # If we allow dimensionless input, we add the units to the\n # input values without conversion, otherwise we raise an\n # exception.\n\n if (not self.input_units_allow_dimensionless[input_name] and\n input_unit is not dimensionless_unscaled and\n input_unit is not None):\n if np.any(inputs[i] != 0):\n raise UnitsError(\"{0}: Units of input '{1}', (dimensionless), could not be \"\n \"converted to required input units of \"\n \"{2} ({3})\".format(name, self.inputs[i], input_unit,\n input_unit.physical_type))\n return inputs\n\n def _process_output_units(self, inputs, outputs):\n inputs_are_quantity = any([isinstance(i, Quantity) for i in inputs])\n if self.return_units and inputs_are_quantity:\n # We allow a non-iterable unit only if there is one output\n if self.n_outputs == 1 and not isiterable(self.return_units):\n return_units = {self.outputs[0]: self.return_units}\n else:\n return_units = self.return_units\n\n outputs = tuple([Quantity(out, return_units.get(out_name, None), subok=True)\n for out, out_name in zip(outputs, self.outputs)])\n return outputs\n\n @staticmethod\n def _prepare_output_single_model(output, broadcast_shape):\n if broadcast_shape is not None:\n if not broadcast_shape:\n return output.item()\n else:\n try:\n return output.reshape(broadcast_shape)\n except ValueError:\n try:\n return output.item()\n except ValueError:\n return output\n\n return output\n\n def _prepare_outputs_single_model(self, outputs, broadcasted_shapes):\n outputs = list(outputs)\n for idx, output in enumerate(outputs):\n try:\n broadcast_shape = check_broadcast(*broadcasted_shapes[0])\n except (IndexError, TypeError):\n broadcast_shape = broadcasted_shapes[0][idx]\n\n outputs[idx] = self._prepare_output_single_model(output, broadcast_shape)\n\n return tuple(outputs)\n\n def _prepare_outputs_model_set(self, outputs, broadcasted_shapes, model_set_axis):\n pivots = broadcasted_shapes[0]\n # If model_set_axis = False was passed then use\n # self._model_set_axis to format the output.\n if model_set_axis is None or model_set_axis is False:\n model_set_axis = self.model_set_axis\n outputs = list(outputs)\n for idx, output in enumerate(outputs):\n pivot = pivots[idx]\n if pivot < output.ndim and pivot != model_set_axis:\n outputs[idx] = np.rollaxis(output, pivot,\n model_set_axis)\n return tuple(outputs)\n\n def prepare_outputs(self, broadcasted_shapes, *outputs, **kwargs):\n model_set_axis = kwargs.get('model_set_axis', None)\n\n if len(self) == 1:\n return self._prepare_outputs_single_model(outputs, broadcasted_shapes)\n else:\n return self._prepare_outputs_model_set(outputs, broadcasted_shapes, model_set_axis)\n\n def copy(self):\n \"\"\"\n Return a copy of this model.\n\n Uses a deep copy so that all model attributes, including parameter\n values, are copied as well.\n \"\"\"\n\n return copy.deepcopy(self)\n\n def deepcopy(self):\n \"\"\"\n Return a deep copy of this model.\n\n \"\"\"\n\n return self.copy()\n\n @sharedmethod\n def rename(self, name):\n \"\"\"\n Return a copy of this model with a new name.\n \"\"\"\n new_model = self.copy()\n new_model._name = name\n return new_model\n\n def coerce_units(\n self,\n input_units=None,\n return_units=None,\n input_units_equivalencies=None,\n input_units_allow_dimensionless=False\n ):\n \"\"\"\n Attach units to this (unitless) model.\n\n Parameters\n ----------\n input_units : dict or tuple, optional\n Input units to attach. If dict, each key is the name of a model input,\n and the value is the unit to attach. If tuple, the elements are units\n to attach in order corresponding to `Model.inputs`.\n return_units : dict or tuple, optional\n Output units to attach. If dict, each key is the name of a model output,\n and the value is the unit to attach. If tuple, the elements are units\n to attach in order corresponding to `Model.outputs`.\n input_units_equivalencies : dict, optional\n Default equivalencies to apply to input values. If set, this should be a\n dictionary where each key is a string that corresponds to one of the\n model inputs.\n input_units_allow_dimensionless : bool or dict, optional\n Allow dimensionless input. If this is True, input values to evaluate will\n gain the units specified in input_units. If this is a dictionary then it\n should map input name to a bool to allow dimensionless numbers for that\n input.\n\n Returns\n -------\n `CompoundModel`\n A `CompoundModel` composed of the current model plus\n `~astropy.modeling.mappings.UnitsMapping` model(s) that attach the units.\n\n Raises\n ------\n ValueError\n If the current model already has units.\n\n Examples\n --------\n\n Wrapping a unitless model to require and convert units:\n\n >>> from astropy.modeling.models import Polynomial1D\n >>> from astropy import units as u\n >>> poly = Polynomial1D(1, c0=1, c1=2)\n >>> model = poly.coerce_units((u.m,), (u.s,))\n >>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP\n <Quantity 21. s>\n >>> model(u.Quantity(1000, u.cm)) # doctest: +FLOAT_CMP\n <Quantity 21. s>\n >>> model(u.Quantity(10, u.cm)) # doctest: +FLOAT_CMP\n <Quantity 1.2 s>\n\n Wrapping a unitless model but still permitting unitless input:\n\n >>> from astropy.modeling.models import Polynomial1D\n >>> from astropy import units as u\n >>> poly = Polynomial1D(1, c0=1, c1=2)\n >>> model = poly.coerce_units((u.m,), (u.s,), input_units_allow_dimensionless=True)\n >>> model(u.Quantity(10, u.m)) # doctest: +FLOAT_CMP\n <Quantity 21. s>\n >>> model(10) # doctest: +FLOAT_CMP\n <Quantity 21. s>\n \"\"\"\n from .mappings import UnitsMapping\n\n result = self\n\n if input_units is not None:\n if self.input_units is not None:\n model_units = self.input_units\n else:\n model_units = {}\n\n for unit in [model_units.get(i) for i in self.inputs]:\n if unit is not None and unit != dimensionless_unscaled:\n raise ValueError(\"Cannot specify input_units for model with existing input units\")\n\n if isinstance(input_units, dict):\n if input_units.keys() != set(self.inputs):\n message = (\n f\"\"\"input_units keys ({\", \".join(input_units.keys())}) \"\"\"\n f\"\"\"do not match model inputs ({\", \".join(self.inputs)})\"\"\"\n )\n raise ValueError(message)\n input_units = [input_units[i] for i in self.inputs]\n\n if len(input_units) != self.n_inputs:\n message = (\n \"input_units length does not match n_inputs: \"\n f\"expected {self.n_inputs}, received {len(input_units)}\"\n )\n raise ValueError(message)\n\n mapping = tuple((unit, model_units.get(i)) for i, unit in zip(self.inputs, input_units))\n input_mapping = UnitsMapping(\n mapping,\n input_units_equivalencies=input_units_equivalencies,\n input_units_allow_dimensionless=input_units_allow_dimensionless\n )\n input_mapping.inputs = self.inputs\n input_mapping.outputs = self.inputs\n result = input_mapping | result\n\n if return_units is not None:\n if self.return_units is not None:\n model_units = self.return_units\n else:\n model_units = {}\n\n for unit in [model_units.get(i) for i in self.outputs]:\n if unit is not None and unit != dimensionless_unscaled:\n raise ValueError(\"Cannot specify return_units for model with existing output units\")\n\n if isinstance(return_units, dict):\n if return_units.keys() != set(self.outputs):\n message = (\n f\"\"\"return_units keys ({\", \".join(return_units.keys())}) \"\"\"\n f\"\"\"do not match model outputs ({\", \".join(self.outputs)})\"\"\"\n )\n raise ValueError(message)\n return_units = [return_units[i] for i in self.outputs]\n\n if len(return_units) != self.n_outputs:\n message = (\n \"return_units length does not match n_outputs: \"\n f\"expected {self.n_outputs}, received {len(return_units)}\"\n )\n raise ValueError(message)\n\n mapping = tuple((model_units.get(i), unit) for i, unit in zip(self.outputs, return_units))\n return_mapping = UnitsMapping(mapping)\n return_mapping.inputs = self.outputs\n return_mapping.outputs = self.outputs\n result = result | return_mapping\n\n return result\n\n @property\n def n_submodels(self):\n \"\"\"\n Return the number of components in a single model, which is\n obviously 1.\n \"\"\"\n return 1\n\n def _initialize_constraints(self, kwargs):\n \"\"\"\n Pop parameter constraint values off the keyword arguments passed to\n `Model.__init__` and store them in private instance attributes.\n \"\"\"\n\n # Pop any constraints off the keyword arguments\n for constraint in self.parameter_constraints:\n values = kwargs.pop(constraint, {})\n for ckey, cvalue in values.items():\n param = getattr(self, ckey)\n setattr(param, constraint, cvalue)\n self._mconstraints = {}\n for constraint in self.model_constraints:\n values = kwargs.pop(constraint, [])\n self._mconstraints[constraint] = values\n\n def _initialize_parameters(self, args, kwargs):\n \"\"\"\n Initialize the _parameters array that stores raw parameter values for\n all parameter sets for use with vectorized fitting algorithms; on\n FittableModels the _param_name attributes actually just reference\n slices of this array.\n \"\"\"\n n_models = kwargs.pop('n_models', None)\n\n if not (n_models is None or\n (isinstance(n_models, (int, np.integer)) and n_models >= 1)):\n raise ValueError(\n \"n_models must be either None (in which case it is \"\n \"determined from the model_set_axis of the parameter initial \"\n \"values) or it must be a positive integer \"\n \"(got {0!r})\".format(n_models))\n\n model_set_axis = kwargs.pop('model_set_axis', None)\n if model_set_axis is None:\n if n_models is not None and n_models > 1:\n # Default to zero\n model_set_axis = 0\n else:\n # Otherwise disable\n model_set_axis = False\n else:\n if not (model_set_axis is False or\n np.issubdtype(type(model_set_axis), np.integer)):\n raise ValueError(\n \"model_set_axis must be either False or an integer \"\n \"specifying the parameter array axis to map to each \"\n \"model in a set of models (got {0!r}).\".format(\n model_set_axis))\n\n # Process positional arguments by matching them up with the\n # corresponding parameters in self.param_names--if any also appear as\n # keyword arguments this presents a conflict\n params = set()\n if len(args) > len(self.param_names):\n raise TypeError(\n \"{0}.__init__() takes at most {1} positional arguments ({2} \"\n \"given)\".format(self.__class__.__name__, len(self.param_names),\n len(args)))\n\n self._model_set_axis = model_set_axis\n self._param_metrics = defaultdict(dict)\n\n for idx, arg in enumerate(args):\n if arg is None:\n # A value of None implies using the default value, if exists\n continue\n # We use quantity_asanyarray here instead of np.asanyarray because\n # if any of the arguments are quantities, we need to return a\n # Quantity object not a plain Numpy array.\n param_name = self.param_names[idx]\n params.add(param_name)\n if not isinstance(arg, Parameter):\n value = quantity_asanyarray(arg, dtype=float)\n else:\n value = arg\n self._initialize_parameter_value(param_name, value)\n\n # At this point the only remaining keyword arguments should be\n # parameter names; any others are in error.\n for param_name in self.param_names:\n if param_name in kwargs:\n if param_name in params:\n raise TypeError(\n \"{0}.__init__() got multiple values for parameter \"\n \"{1!r}\".format(self.__class__.__name__, param_name))\n value = kwargs.pop(param_name)\n if value is None:\n continue\n # We use quantity_asanyarray here instead of np.asanyarray\n # because if any of the arguments are quantities, we need\n # to return a Quantity object not a plain Numpy array.\n value = quantity_asanyarray(value, dtype=float)\n params.add(param_name)\n self._initialize_parameter_value(param_name, value)\n # Now deal with case where param_name is not supplied by args or kwargs\n for param_name in self.param_names:\n if param_name not in params:\n self._initialize_parameter_value(param_name, None)\n\n if kwargs:\n # If any keyword arguments were left over at this point they are\n # invalid--the base class should only be passed the parameter\n # values, constraints, and param_dim\n for kwarg in kwargs:\n # Just raise an error on the first unrecognized argument\n raise TypeError(\n '{0}.__init__() got an unrecognized parameter '\n '{1!r}'.format(self.__class__.__name__, kwarg))\n\n # Determine the number of model sets: If the model_set_axis is\n # None then there is just one parameter set; otherwise it is determined\n # by the size of that axis on the first parameter--if the other\n # parameters don't have the right number of axes or the sizes of their\n # model_set_axis don't match an error is raised\n if model_set_axis is not False and n_models != 1 and params:\n max_ndim = 0\n if model_set_axis < 0:\n min_ndim = abs(model_set_axis)\n else:\n min_ndim = model_set_axis + 1\n\n for name in self.param_names:\n value = getattr(self, name)\n param_ndim = np.ndim(value)\n if param_ndim < min_ndim:\n raise InputParameterError(\n \"All parameter values must be arrays of dimension \"\n \"at least {0} for model_set_axis={1} (the value \"\n \"given for {2!r} is only {3}-dimensional)\".format(\n min_ndim, model_set_axis, name, param_ndim))\n\n max_ndim = max(max_ndim, param_ndim)\n\n if n_models is None:\n # Use the dimensions of the first parameter to determine\n # the number of model sets\n n_models = value.shape[model_set_axis]\n elif value.shape[model_set_axis] != n_models:\n raise InputParameterError(\n \"Inconsistent dimensions for parameter {0!r} for \"\n \"{1} model sets. The length of axis {2} must be the \"\n \"same for all input parameter values\".format(\n name, n_models, model_set_axis))\n\n self._check_param_broadcast(max_ndim)\n else:\n if n_models is None:\n n_models = 1\n\n self._check_param_broadcast(None)\n\n self._n_models = n_models\n # now validate parameters\n for name in params:\n param = getattr(self, name)\n if param._validator is not None:\n param._validator(self, param.value)\n\n def _initialize_parameter_value(self, param_name, value):\n \"\"\"Mostly deals with consistency checks and determining unit issues.\"\"\"\n if isinstance(value, Parameter):\n self.__dict__[param_name] = value\n return\n param = getattr(self, param_name)\n # Use default if value is not provided\n if value is None:\n default = param.default\n if default is None:\n # No value was supplied for the parameter and the\n # parameter does not have a default, therefore the model\n # is underspecified\n raise TypeError(\"{0}.__init__() requires a value for parameter \"\n \"{1!r}\".format(self.__class__.__name__, param_name))\n value = default\n unit = param.unit\n else:\n if isinstance(value, Quantity):\n unit = value.unit\n value = value.value\n else:\n unit = None\n if unit is None and param.unit is not None:\n raise InputParameterError(\n \"{0}.__init__() requires a Quantity for parameter \"\n \"{1!r}\".format(self.__class__.__name__, param_name))\n param._unit = unit\n param.internal_unit = None\n if param._setter is not None:\n if unit is not None:\n _val = param._setter(value * unit)\n else:\n _val = param._setter(value)\n if isinstance(_val, Quantity):\n param.internal_unit = _val.unit\n param._internal_value = np.array(_val.value)\n else:\n param.internal_unit = None\n param._internal_value = np.array(_val)\n else:\n param._value = np.array(value)\n\n def _initialize_slices(self):\n\n param_metrics = self._param_metrics\n total_size = 0\n\n for name in self.param_names:\n param = getattr(self, name)\n value = param.value\n param_size = np.size(value)\n param_shape = np.shape(value)\n param_slice = slice(total_size, total_size + param_size)\n param_metrics[name]['slice'] = param_slice\n param_metrics[name]['shape'] = param_shape\n param_metrics[name]['size'] = param_size\n total_size += param_size\n self._parameters = np.empty(total_size, dtype=np.float64)\n\n def _parameters_to_array(self):\n # Now set the parameter values (this will also fill\n # self._parameters)\n param_metrics = self._param_metrics\n for name in self.param_names:\n param = getattr(self, name)\n value = param.value\n if not isinstance(value, np.ndarray):\n value = np.array([value])\n self._parameters[param_metrics[name]['slice']] = value.ravel()\n\n # Finally validate all the parameters; we do this last so that\n # validators that depend on one of the other parameters' values will\n # work\n\n def _array_to_parameters(self):\n param_metrics = self._param_metrics\n for name in self.param_names:\n param = getattr(self, name)\n value = self._parameters[param_metrics[name]['slice']]\n value.shape = param_metrics[name]['shape']\n param.value = value\n\n def _check_param_broadcast(self, max_ndim):\n \"\"\"\n This subroutine checks that all parameter arrays can be broadcast\n against each other, and determines the shapes parameters must have in\n order to broadcast correctly.\n\n If model_set_axis is None this merely checks that the parameters\n broadcast and returns an empty dict if so. This mode is only used for\n single model sets.\n \"\"\"\n all_shapes = []\n model_set_axis = self._model_set_axis\n\n for name in self.param_names:\n param = getattr(self, name)\n value = param.value\n param_shape = np.shape(value)\n param_ndim = len(param_shape)\n if max_ndim is not None and param_ndim < max_ndim:\n # All arrays have the same number of dimensions up to the\n # model_set_axis dimension, but after that they may have a\n # different number of trailing axes. The number of trailing\n # axes must be extended for mutual compatibility. For example\n # if max_ndim = 3 and model_set_axis = 0, an array with the\n # shape (2, 2) must be extended to (2, 1, 2). However, an\n # array with shape (2,) is extended to (2, 1).\n new_axes = (1,) * (max_ndim - param_ndim)\n\n if model_set_axis < 0:\n # Just need to prepend axes to make up the difference\n broadcast_shape = new_axes + param_shape\n else:\n broadcast_shape = (param_shape[:model_set_axis + 1] +\n new_axes +\n param_shape[model_set_axis + 1:])\n self._param_metrics[name]['broadcast_shape'] = broadcast_shape\n all_shapes.append(broadcast_shape)\n else:\n all_shapes.append(param_shape)\n\n # Now check mutual broadcastability of all shapes\n try:\n check_broadcast(*all_shapes)\n except IncompatibleShapeError as exc:\n shape_a, shape_a_idx, shape_b, shape_b_idx = exc.args\n param_a = self.param_names[shape_a_idx]\n param_b = self.param_names[shape_b_idx]\n\n raise InputParameterError(\n \"Parameter {0!r} of shape {1!r} cannot be broadcast with \"\n \"parameter {2!r} of shape {3!r}. All parameter arrays \"\n \"must have shapes that are mutually compatible according \"\n \"to the broadcasting rules.\".format(param_a, shape_a,\n param_b, shape_b))\n\n def _param_sets(self, raw=False, units=False):\n \"\"\"\n Implementation of the Model.param_sets property.\n\n This internal implementation has a ``raw`` argument which controls\n whether or not to return the raw parameter values (i.e. the values that\n are actually stored in the ._parameters array, as opposed to the values\n displayed to users. In most cases these are one in the same but there\n are currently a few exceptions.\n\n Note: This is notably an overcomplicated device and may be removed\n entirely in the near future.\n \"\"\"\n\n values = []\n shapes = []\n for name in self.param_names:\n param = getattr(self, name)\n\n if raw and param._setter:\n value = param._internal_value\n else:\n value = param.value\n\n broadcast_shape = self._param_metrics[name].get('broadcast_shape')\n if broadcast_shape is not None:\n value = value.reshape(broadcast_shape)\n\n shapes.append(np.shape(value))\n\n if len(self) == 1:\n # Add a single param set axis to the parameter's value (thus\n # converting scalars to shape (1,) array values) for\n # consistency\n value = np.array([value])\n\n if units:\n if raw and param.internal_unit is not None:\n unit = param.internal_unit\n else:\n unit = param.unit\n if unit is not None:\n value = Quantity(value, unit)\n\n values.append(value)\n\n if len(set(shapes)) != 1 or units:\n # If the parameters are not all the same shape, converting to an\n # array is going to produce an object array\n # However the way Numpy creates object arrays is tricky in that it\n # will recurse into array objects in the list and break them up\n # into separate objects. Doing things this way ensures a 1-D\n # object array the elements of which are the individual parameter\n # arrays. There's not much reason to do this over returning a list\n # except for consistency\n psets = np.empty(len(values), dtype=object)\n psets[:] = values\n return psets\n\n return np.array(values)\n\n def _format_repr(self, args=[], kwargs={}, defaults={}):\n \"\"\"\n Internal implementation of ``__repr__``.\n\n This is separated out for ease of use by subclasses that wish to\n override the default ``__repr__`` while keeping the same basic\n formatting.\n \"\"\"\n\n parts = [repr(a) for a in args]\n\n parts.extend(\n f\"{name}={param_repr_oneline(getattr(self, name))}\"\n for name in self.param_names)\n\n if self.name is not None:\n parts.append(f'name={self.name!r}')\n\n for kwarg, value in kwargs.items():\n if kwarg in defaults and defaults[kwarg] == value:\n continue\n parts.append(f'{kwarg}={value!r}')\n\n if len(self) > 1:\n parts.append(f\"n_models={len(self)}\")\n\n return f\"<{self.__class__.__name__}({', '.join(parts)})>\"\n\n def _format_str(self, keywords=[], defaults={}):\n \"\"\"\n Internal implementation of ``__str__``.\n\n This is separated out for ease of use by subclasses that wish to\n override the default ``__str__`` while keeping the same basic\n formatting.\n \"\"\"\n\n default_keywords = [\n ('Model', self.__class__.__name__),\n ('Name', self.name),\n ('Inputs', self.inputs),\n ('Outputs', self.outputs),\n ('Model set size', len(self))\n ]\n\n parts = [f'{keyword}: {value}'\n for keyword, value in default_keywords\n if value is not None]\n\n for keyword, value in keywords:\n if keyword.lower() in defaults and defaults[keyword.lower()] == value:\n continue\n parts.append(f'{keyword}: {value}')\n parts.append('Parameters:')\n\n if len(self) == 1:\n columns = [[getattr(self, name).value]\n for name in self.param_names]\n else:\n columns = [getattr(self, name).value\n for name in self.param_names]\n\n if columns:\n param_table = Table(columns, names=self.param_names)\n # Set units on the columns\n for name in self.param_names:\n param_table[name].unit = getattr(self, name).unit\n parts.append(indent(str(param_table), width=4))\n\n return '\\n'.join(parts)\n\n\nclass FittableModel(Model):\n \"\"\"\n Base class for models that can be fitted using the built-in fitting\n algorithms.\n \"\"\"\n\n linear = False\n # derivative with respect to parameters\n fit_deriv = None\n \"\"\"\n Function (similar to the model's `~Model.evaluate`) to compute the\n derivatives of the model with respect to its parameters, for use by fitting\n algorithms. In other words, this computes the Jacobian matrix with respect\n to the model's parameters.\n \"\"\"\n # Flag that indicates if the model derivatives with respect to parameters\n # are given in columns or rows\n col_fit_deriv = True\n fittable = True\n\n\nclass Fittable1DModel(FittableModel):\n \"\"\"\n Base class for one-dimensional fittable models.\n\n This class provides an easier interface to defining new models.\n Examples can be found in `astropy.modeling.functional_models`.\n \"\"\"\n n_inputs = 1\n n_outputs = 1\n _separable = True\n\n\nclass Fittable2DModel(FittableModel):\n \"\"\"\n Base class for two-dimensional fittable models.\n\n This class provides an easier interface to defining new models.\n Examples can be found in `astropy.modeling.functional_models`.\n \"\"\"\n\n n_inputs = 2\n n_outputs = 1\n\n\ndef _make_arithmetic_operator(oper):\n # We don't bother with tuple unpacking here for efficiency's sake, but for\n # documentation purposes:\n #\n # f_eval, f_n_inputs, f_n_outputs = f\n #\n # and similarly for g\n def op(f, g):\n return (make_binary_operator_eval(oper, f[0], g[0]), f[1], f[2])\n\n return op\n\n\ndef _composition_operator(f, g):\n # We don't bother with tuple unpacking here for efficiency's sake, but for\n # documentation purposes:\n #\n # f_eval, f_n_inputs, f_n_outputs = f\n #\n # and similarly for g\n return (lambda inputs, params: g[0](f[0](inputs, params), params),\n f[1], g[2])\n\n\ndef _join_operator(f, g):\n # We don't bother with tuple unpacking here for efficiency's sake, but for\n # documentation purposes:\n #\n # f_eval, f_n_inputs, f_n_outputs = f\n #\n # and similarly for g\n return (lambda inputs, params: (f[0](inputs[:f[1]], params) +\n g[0](inputs[f[1]:], params)),\n f[1] + g[1], f[2] + g[2])\n\n\nBINARY_OPERATORS = {\n '+': _make_arithmetic_operator(operator.add),\n '-': _make_arithmetic_operator(operator.sub),\n '*': _make_arithmetic_operator(operator.mul),\n '/': _make_arithmetic_operator(operator.truediv),\n '**': _make_arithmetic_operator(operator.pow),\n '|': _composition_operator,\n '&': _join_operator\n}\n\nSPECIAL_OPERATORS = _SpecialOperatorsDict()\n\n\ndef _add_special_operator(sop_name, sop):\n return SPECIAL_OPERATORS.add(sop_name, sop)\n\n\nclass CompoundModel(Model):\n '''\n Base class for compound models.\n\n While it can be used directly, the recommended way\n to combine models is through the model operators.\n '''\n\n def __init__(self, op, left, right, name=None):\n self.__dict__['_param_names'] = None\n self._n_submodels = None\n self.op = op\n self.left = left\n self.right = right\n self._bounding_box = None\n self._user_bounding_box = None\n self._leaflist = None\n self._tdict = None\n self._parameters = None\n self._parameters_ = None\n self._param_metrics = None\n\n if op != 'fix_inputs' and len(left) != len(right):\n raise ValueError(\n 'Both operands must have equal values for n_models')\n self._n_models = len(left)\n\n if op != 'fix_inputs' and ((left.model_set_axis != right.model_set_axis)\n or left.model_set_axis): # not False and not 0\n raise ValueError(\"model_set_axis must be False or 0 and consistent for operands\")\n self._model_set_axis = left.model_set_axis\n\n if op in ['+', '-', '*', '/', '**'] or op in SPECIAL_OPERATORS:\n if (left.n_inputs != right.n_inputs) or \\\n (left.n_outputs != right.n_outputs):\n raise ModelDefinitionError(\n 'Both operands must match numbers of inputs and outputs')\n self.n_inputs = left.n_inputs\n self.n_outputs = left.n_outputs\n self.inputs = left.inputs\n self.outputs = left.outputs\n elif op == '&':\n self.n_inputs = left.n_inputs + right.n_inputs\n self.n_outputs = left.n_outputs + right.n_outputs\n self.inputs = combine_labels(left.inputs, right.inputs)\n self.outputs = combine_labels(left.outputs, right.outputs)\n elif op == '|':\n if left.n_outputs != right.n_inputs:\n raise ModelDefinitionError(\n \"Unsupported operands for |: {0} (n_inputs={1}, \"\n \"n_outputs={2}) and {3} (n_inputs={4}, n_outputs={5}); \"\n \"n_outputs for the left-hand model must match n_inputs \"\n \"for the right-hand model.\".format(\n left.name, left.n_inputs, left.n_outputs, right.name,\n right.n_inputs, right.n_outputs))\n\n self.n_inputs = left.n_inputs\n self.n_outputs = right.n_outputs\n self.inputs = left.inputs\n self.outputs = right.outputs\n elif op == 'fix_inputs':\n if not isinstance(left, Model):\n raise ValueError('First argument to \"fix_inputs\" must be an instance of an astropy Model.')\n if not isinstance(right, dict):\n raise ValueError('Expected a dictionary for second argument of \"fix_inputs\".')\n\n # Dict keys must match either possible indices\n # for model on left side, or names for inputs.\n self.n_inputs = left.n_inputs - len(right)\n # Assign directly to the private attribute (instead of using the setter)\n # to avoid asserting the new number of outputs matches the old one.\n self._outputs = left.outputs\n self.n_outputs = left.n_outputs\n newinputs = list(left.inputs)\n keys = right.keys()\n input_ind = []\n for key in keys:\n if np.issubdtype(type(key), np.integer):\n if key >= left.n_inputs or key < 0:\n raise ValueError(\n 'Substitution key integer value '\n 'not among possible input choices.')\n if key in input_ind:\n raise ValueError(\"Duplicate specification of \"\n \"same input (index/name).\")\n input_ind.append(key)\n elif isinstance(key, str):\n if key not in left.inputs:\n raise ValueError(\n 'Substitution key string not among possible '\n 'input choices.')\n # Check to see it doesn't match positional\n # specification.\n ind = left.inputs.index(key)\n if ind in input_ind:\n raise ValueError(\"Duplicate specification of \"\n \"same input (index/name).\")\n input_ind.append(ind)\n # Remove substituted inputs\n input_ind.sort()\n input_ind.reverse()\n for ind in input_ind:\n del newinputs[ind]\n self.inputs = tuple(newinputs)\n # Now check to see if the input model has bounding_box defined.\n # If so, remove the appropriate dimensions and set it for this\n # instance.\n try:\n self.bounding_box = \\\n self.left.bounding_box.fix_inputs(self, right)\n except NotImplementedError:\n pass\n\n else:\n raise ModelDefinitionError('Illegal operator: ', self.op)\n self.name = name\n self._fittable = None\n self.fit_deriv = None\n self.col_fit_deriv = None\n if op in ('|', '+', '-'):\n self.linear = left.linear and right.linear\n else:\n self.linear = False\n self.eqcons = []\n self.ineqcons = []\n self.n_left_params = len(self.left.parameters)\n self._map_parameters()\n\n def _get_left_inputs_from_args(self, args):\n return args[:self.left.n_inputs]\n\n def _get_right_inputs_from_args(self, args):\n op = self.op\n if op == '&':\n # Args expected to look like (*left inputs, *right inputs, *left params, *right params)\n return args[self.left.n_inputs: self.left.n_inputs + self.right.n_inputs]\n elif op == '|' or op == 'fix_inputs':\n return None\n else:\n return args[:self.left.n_inputs]\n\n def _get_left_params_from_args(self, args):\n op = self.op\n if op == '&':\n # Args expected to look like (*left inputs, *right inputs, *left params, *right params)\n n_inputs = self.left.n_inputs + self.right.n_inputs\n return args[n_inputs: n_inputs + self.n_left_params]\n else:\n return args[self.left.n_inputs: self.left.n_inputs + self.n_left_params]\n\n def _get_right_params_from_args(self, args):\n op = self.op\n if op == 'fix_inputs':\n return None\n if op == '&':\n # Args expected to look like (*left inputs, *right inputs, *left params, *right params)\n return args[self.left.n_inputs + self.right.n_inputs + self.n_left_params:]\n else:\n return args[self.left.n_inputs + self.n_left_params:]\n\n def _get_kwarg_model_parameters_as_positional(self, args, kwargs):\n # could do it with inserts but rebuilding seems like simpilist way\n\n #TODO: Check if any param names are in kwargs maybe as an intersection of sets?\n if self.op == \"&\":\n new_args = list(args[:self.left.n_inputs + self.right.n_inputs])\n args_pos = self.left.n_inputs + self.right.n_inputs\n else:\n new_args = list(args[:self.left.n_inputs])\n args_pos = self.left.n_inputs\n\n for param_name in self.param_names:\n kw_value = kwargs.pop(param_name, None)\n if kw_value is not None:\n value = kw_value\n else:\n try:\n value = args[args_pos]\n except IndexError:\n raise IndexError(\"Missing parameter or input\")\n\n args_pos += 1\n new_args.append(value)\n\n return new_args, kwargs\n\n def _apply_operators_to_value_lists(self, leftval, rightval, **kw):\n op = self.op\n if op == '+':\n return binary_operation(operator.add, leftval, rightval)\n elif op == '-':\n return binary_operation(operator.sub, leftval, rightval)\n elif op == '*':\n return binary_operation(operator.mul, leftval, rightval)\n elif op == '/':\n return binary_operation(operator.truediv, leftval, rightval)\n elif op == '**':\n return binary_operation(operator.pow, leftval, rightval)\n elif op == '&':\n if not isinstance(leftval, tuple):\n leftval = (leftval,)\n if not isinstance(rightval, tuple):\n rightval = (rightval,)\n return leftval + rightval\n elif op in SPECIAL_OPERATORS:\n return binary_operation(SPECIAL_OPERATORS[op], leftval, rightval)\n else:\n raise ModelDefinitionError('Unrecognized operator {op}')\n\n def evaluate(self, *args, **kw):\n op = self.op\n args, kw = self._get_kwarg_model_parameters_as_positional(args, kw)\n left_inputs = self._get_left_inputs_from_args(args)\n left_params = self._get_left_params_from_args(args)\n\n if op == 'fix_inputs':\n pos_index = dict(zip(self.left.inputs, range(self.left.n_inputs)))\n fixed_inputs = {\n key if np.issubdtype(type(key), np.integer) else pos_index[key]: value\n for key, value in self.right.items()\n }\n left_inputs = [\n fixed_inputs[ind] if ind in fixed_inputs.keys() else inp\n for ind, inp in enumerate(left_inputs)\n ]\n\n leftval = self.left.evaluate(*itertools.chain(left_inputs, left_params))\n\n if op == 'fix_inputs':\n return leftval\n\n right_inputs = self._get_right_inputs_from_args(args)\n right_params = self._get_right_params_from_args(args)\n\n if op == \"|\":\n if isinstance(leftval, tuple):\n return self.right.evaluate(*itertools.chain(leftval, right_params))\n else:\n return self.right.evaluate(leftval, *right_params)\n else:\n rightval = self.right.evaluate(*itertools.chain(right_inputs, right_params))\n\n return self._apply_operators_to_value_lists(leftval, rightval, **kw)\n\n @property\n def n_submodels(self):\n if self._leaflist is None:\n self._make_leaflist()\n return len(self._leaflist)\n\n @property\n def submodel_names(self):\n \"\"\" Return the names of submodels in a ``CompoundModel``.\"\"\"\n if self._leaflist is None:\n self._make_leaflist()\n names = [item.name for item in self._leaflist]\n nonecount = 0\n newnames = []\n for item in names:\n if item is None:\n newnames.append(f'None_{nonecount}')\n nonecount += 1\n else:\n newnames.append(item)\n return tuple(newnames)\n\n def both_inverses_exist(self):\n '''\n if both members of this compound model have inverses return True\n '''\n warnings.warn(\n \"CompoundModel.both_inverses_exist is deprecated. \"\n \"Use has_inverse instead.\",\n AstropyDeprecationWarning\n )\n\n try:\n linv = self.left.inverse\n rinv = self.right.inverse\n except NotImplementedError:\n return False\n\n return True\n\n def _pre_evaluate(self, *args, **kwargs):\n \"\"\"\n CompoundModel specific input setup that needs to occur prior to\n model evaluation.\n\n Note\n ----\n All of the _pre_evaluate for each component model will be\n performed at the time that the individual model is evaluated.\n \"\"\"\n\n # If equivalencies are provided, necessary to map parameters and pass\n # the leaflist as a keyword input for use by model evaluation so that\n # the compound model input names can be matched to the model input\n # names.\n if 'equivalencies' in kwargs:\n # Restructure to be useful for the individual model lookup\n kwargs['inputs_map'] = [(value[0], (value[1], key)) for\n key, value in self.inputs_map().items()]\n\n # Setup actual model evaluation method\n def evaluate(_inputs):\n return self._evaluate(*_inputs, **kwargs)\n\n return evaluate, args, None, kwargs\n\n @property\n def _argnames(self):\n \"\"\"No inputs should be used to determine input_shape when handling compound models\"\"\"\n return ()\n\n def _post_evaluate(self, inputs, outputs, broadcasted_shapes, with_bbox, **kwargs):\n \"\"\"\n CompoundModel specific post evaluation processing of outputs\n\n Note\n ----\n All of the _post_evaluate for each component model will be\n performed at the time that the individual model is evaluated.\n \"\"\"\n if self.get_bounding_box(with_bbox) is not None and self.n_outputs == 1:\n return outputs[0]\n return outputs\n\n def _evaluate(self, *args, **kw):\n op = self.op\n if op != 'fix_inputs':\n if op != '&':\n leftval = self.left(*args, **kw)\n if op != '|':\n rightval = self.right(*args, **kw)\n else:\n rightval = None\n\n else:\n leftval = self.left(*(args[:self.left.n_inputs]), **kw)\n rightval = self.right(*(args[self.left.n_inputs:]), **kw)\n\n if op != \"|\":\n return self._apply_operators_to_value_lists(leftval, rightval, **kw)\n\n elif op == '|':\n if isinstance(leftval, tuple):\n return self.right(*leftval, **kw)\n else:\n return self.right(leftval, **kw)\n\n else:\n subs = self.right\n newargs = list(args)\n subinds = []\n subvals = []\n for key in subs.keys():\n if np.issubdtype(type(key), np.integer):\n subinds.append(key)\n elif isinstance(key, str):\n ind = self.left.inputs.index(key)\n subinds.append(ind)\n subvals.append(subs[key])\n # Turn inputs specified in kw into positional indices.\n # Names for compound inputs do not propagate to sub models.\n kwind = []\n kwval = []\n for kwkey in list(kw.keys()):\n if kwkey in self.inputs:\n ind = self.inputs.index(kwkey)\n if ind < len(args):\n raise ValueError(\"Keyword argument duplicates \"\n \"positional value supplied.\")\n kwind.append(ind)\n kwval.append(kw[kwkey])\n del kw[kwkey]\n # Build new argument list\n # Append keyword specified args first\n if kwind:\n kwargs = list(zip(kwind, kwval))\n kwargs.sort()\n kwindsorted, kwvalsorted = list(zip(*kwargs))\n newargs = newargs + list(kwvalsorted)\n if subinds:\n subargs = list(zip(subinds, subvals))\n subargs.sort()\n # subindsorted, subvalsorted = list(zip(*subargs))\n # The substitutions must be inserted in order\n for ind, val in subargs:\n newargs.insert(ind, val)\n return self.left(*newargs, **kw)\n\n @property\n def param_names(self):\n \"\"\" An ordered list of parameter names.\"\"\"\n return self._param_names\n\n def _make_leaflist(self):\n tdict = {}\n leaflist = []\n make_subtree_dict(self, '', tdict, leaflist)\n self._leaflist = leaflist\n self._tdict = tdict\n\n def __getattr__(self, name):\n \"\"\"\n If someone accesses an attribute not already defined, map the\n parameters, and then see if the requested attribute is one of\n the parameters\n \"\"\"\n # The following test is needed to avoid infinite recursion\n # caused by deepcopy. There may be other such cases discovered.\n if name == '__setstate__':\n raise AttributeError\n if name in self._param_names:\n return self.__dict__[name]\n else:\n raise AttributeError(f'Attribute \"{name}\" not found')\n\n def __getitem__(self, index):\n if self._leaflist is None:\n self._make_leaflist()\n leaflist = self._leaflist\n tdict = self._tdict\n if isinstance(index, slice):\n if index.step:\n raise ValueError('Steps in slices not supported '\n 'for compound models')\n if index.start is not None:\n if isinstance(index.start, str):\n start = self._str_index_to_int(index.start)\n else:\n start = index.start\n else:\n start = 0\n if index.stop is not None:\n if isinstance(index.stop, str):\n stop = self._str_index_to_int(index.stop)\n else:\n stop = index.stop - 1\n else:\n stop = len(leaflist) - 1\n if index.stop == 0:\n raise ValueError(\"Slice endpoint cannot be 0\")\n if start < 0:\n start = len(leaflist) + start\n if stop < 0:\n stop = len(leaflist) + stop\n # now search for matching node:\n if stop == start: # only single value, get leaf instead in code below\n index = start\n else:\n for key in tdict:\n node, leftind, rightind = tdict[key]\n if leftind == start and rightind == stop:\n return node\n raise IndexError(\"No appropriate subtree matches slice\")\n if isinstance(index, type(0)):\n return leaflist[index]\n elif isinstance(index, type('')):\n return leaflist[self._str_index_to_int(index)]\n else:\n raise TypeError('index must be integer, slice, or model name string')\n\n def _str_index_to_int(self, str_index):\n # Search through leaflist for item with that name\n found = []\n for nleaf, leaf in enumerate(self._leaflist):\n if getattr(leaf, 'name', None) == str_index:\n found.append(nleaf)\n if len(found) == 0:\n raise IndexError(f\"No component with name '{str_index}' found\")\n if len(found) > 1:\n raise IndexError(\"Multiple components found using '{}' as name\\n\"\n \"at indices {}\".format(str_index, found))\n return found[0]\n\n @property\n def n_inputs(self):\n \"\"\" The number of inputs of a model.\"\"\"\n return self._n_inputs\n\n @n_inputs.setter\n def n_inputs(self, value):\n self._n_inputs = value\n\n @property\n def n_outputs(self):\n \"\"\" The number of outputs of a model.\"\"\"\n return self._n_outputs\n\n @n_outputs.setter\n def n_outputs(self, value):\n self._n_outputs = value\n\n @property\n def eqcons(self):\n return self._eqcons\n\n @eqcons.setter\n def eqcons(self, value):\n self._eqcons = value\n\n @property\n def ineqcons(self):\n return self._eqcons\n\n @ineqcons.setter\n def ineqcons(self, value):\n self._eqcons = value\n\n def traverse_postorder(self, include_operator=False):\n \"\"\" Postorder traversal of the CompoundModel tree.\"\"\"\n res = []\n if isinstance(self.left, CompoundModel):\n res = res + self.left.traverse_postorder(include_operator)\n else:\n res = res + [self.left]\n if isinstance(self.right, CompoundModel):\n res = res + self.right.traverse_postorder(include_operator)\n else:\n res = res + [self.right]\n if include_operator:\n res.append(self.op)\n else:\n res.append(self)\n return res\n\n def _format_expression(self, format_leaf=None):\n leaf_idx = 0\n operands = deque()\n\n if format_leaf is None:\n format_leaf = lambda i, l: f'[{i}]'\n\n for node in self.traverse_postorder():\n if not isinstance(node, CompoundModel):\n operands.append(format_leaf(leaf_idx, node))\n leaf_idx += 1\n continue\n\n right = operands.pop()\n left = operands.pop()\n if node.op in OPERATOR_PRECEDENCE:\n oper_order = OPERATOR_PRECEDENCE[node.op]\n\n if isinstance(node, CompoundModel):\n if (isinstance(node.left, CompoundModel) and\n OPERATOR_PRECEDENCE[node.left.op] < oper_order):\n left = f'({left})'\n if (isinstance(node.right, CompoundModel) and\n OPERATOR_PRECEDENCE[node.right.op] < oper_order):\n right = f'({right})'\n\n operands.append(' '.join((left, node.op, right)))\n else:\n left = f'(({left}),'\n right = f'({right}))'\n operands.append(' '.join((node.op[0], left, right)))\n\n return ''.join(operands)\n\n def _format_components(self):\n if self._parameters_ is None:\n self._map_parameters()\n return '\\n\\n'.join('[{0}]: {1!r}'.format(idx, m)\n for idx, m in enumerate(self._leaflist))\n\n def __str__(self):\n expression = self._format_expression()\n components = self._format_components()\n keywords = [\n ('Expression', expression),\n ('Components', '\\n' + indent(components))\n ]\n return super()._format_str(keywords=keywords)\n\n def rename(self, name):\n self.name = name\n return self\n\n @property\n def isleaf(self):\n return False\n\n @property\n def inverse(self):\n if self.op == '|':\n return self.right.inverse | self.left.inverse\n elif self.op == '&':\n return self.left.inverse & self.right.inverse\n else:\n return NotImplemented\n\n @property\n def fittable(self):\n \"\"\" Set the fittable attribute on a compound model.\"\"\"\n if self._fittable is None:\n if self._leaflist is None:\n self._map_parameters()\n self._fittable = all(m.fittable for m in self._leaflist)\n return self._fittable\n\n __add__ = _model_oper('+')\n __sub__ = _model_oper('-')\n __mul__ = _model_oper('*')\n __truediv__ = _model_oper('/')\n __pow__ = _model_oper('**')\n __or__ = _model_oper('|')\n __and__ = _model_oper('&')\n\n def _map_parameters(self):\n \"\"\"\n Map all the constituent model parameters to the compound object,\n renaming as necessary by appending a suffix number.\n\n This can be an expensive operation, particularly for a complex\n expression tree.\n\n All the corresponding parameter attributes are created that one\n expects for the Model class.\n\n The parameter objects that the attributes point to are the same\n objects as in the constiutent models. Changes made to parameter\n values to either are seen by both.\n\n Prior to calling this, none of the associated attributes will\n exist. This method must be called to make the model usable by\n fitting engines.\n\n If oldnames=True, then parameters are named as in the original\n implementation of compound models.\n \"\"\"\n if self._parameters is not None:\n # do nothing\n return\n if self._leaflist is None:\n self._make_leaflist()\n self._parameters_ = {}\n param_map = {}\n self._param_names = []\n for lindex, leaf in enumerate(self._leaflist):\n if not isinstance(leaf, dict):\n for param_name in leaf.param_names:\n param = getattr(leaf, param_name)\n new_param_name = f\"{param_name}_{lindex}\"\n self.__dict__[new_param_name] = param\n self._parameters_[new_param_name] = param\n self._param_names.append(new_param_name)\n param_map[new_param_name] = (lindex, param_name)\n self._param_metrics = {}\n self._param_map = param_map\n self._param_map_inverse = dict((v, k) for k, v in param_map.items())\n self._initialize_slices()\n self._param_names = tuple(self._param_names)\n\n def _initialize_slices(self):\n param_metrics = self._param_metrics\n total_size = 0\n\n for name in self.param_names:\n param = getattr(self, name)\n value = param.value\n param_size = np.size(value)\n param_shape = np.shape(value)\n param_slice = slice(total_size, total_size + param_size)\n param_metrics[name] = {}\n param_metrics[name]['slice'] = param_slice\n param_metrics[name]['shape'] = param_shape\n param_metrics[name]['size'] = param_size\n total_size += param_size\n self._parameters = np.empty(total_size, dtype=np.float64)\n\n @staticmethod\n def _recursive_lookup(branch, adict, key):\n if isinstance(branch, CompoundModel):\n return adict[key]\n return branch, key\n\n def inputs_map(self):\n \"\"\"\n Map the names of the inputs to this ExpressionTree to the inputs to the leaf models.\n \"\"\"\n inputs_map = {}\n if not isinstance(self.op, str): # If we don't have an operator the mapping is trivial\n return {inp: (self, inp) for inp in self.inputs}\n\n elif self.op == '|':\n if isinstance(self.left, CompoundModel):\n l_inputs_map = self.left.inputs_map()\n for inp in self.inputs:\n if isinstance(self.left, CompoundModel):\n inputs_map[inp] = l_inputs_map[inp]\n else:\n inputs_map[inp] = self.left, inp\n elif self.op == '&':\n if isinstance(self.left, CompoundModel):\n l_inputs_map = self.left.inputs_map()\n if isinstance(self.right, CompoundModel):\n r_inputs_map = self.right.inputs_map()\n for i, inp in enumerate(self.inputs):\n if i < len(self.left.inputs): # Get from left\n if isinstance(self.left, CompoundModel):\n inputs_map[inp] = l_inputs_map[self.left.inputs[i]]\n else:\n inputs_map[inp] = self.left, self.left.inputs[i]\n else: # Get from right\n if isinstance(self.right, CompoundModel):\n inputs_map[inp] = r_inputs_map[self.right.inputs[i - len(self.left.inputs)]]\n else:\n inputs_map[inp] = self.right, self.right.inputs[i - len(self.left.inputs)]\n elif self.op == 'fix_inputs':\n fixed_ind = list(self.right.keys())\n ind = [list(self.left.inputs).index(i) if isinstance(i, str) else i for i in fixed_ind]\n inp_ind = list(range(self.left.n_inputs))\n for i in ind:\n inp_ind.remove(i)\n for i in inp_ind:\n inputs_map[self.left.inputs[i]] = self.left, self.left.inputs[i]\n else:\n if isinstance(self.left, CompoundModel):\n l_inputs_map = self.left.inputs_map()\n for inp in self.left.inputs:\n if isinstance(self.left, CompoundModel):\n inputs_map[inp] = l_inputs_map[inp]\n else:\n inputs_map[inp] = self.left, inp\n return inputs_map\n\n def _parameter_units_for_data_units(self, input_units, output_units):\n if self._leaflist is None:\n self._map_parameters()\n units_for_data = {}\n for imodel, model in enumerate(self._leaflist):\n units_for_data_leaf = model._parameter_units_for_data_units(input_units, output_units)\n for param_leaf in units_for_data_leaf:\n param = self._param_map_inverse[(imodel, param_leaf)]\n units_for_data[param] = units_for_data_leaf[param_leaf]\n return units_for_data\n\n @property\n def input_units(self):\n inputs_map = self.inputs_map()\n input_units_dict = {key: inputs_map[key][0].input_units[orig_key]\n for key, (mod, orig_key) in inputs_map.items()\n if inputs_map[key][0].input_units is not None}\n if input_units_dict:\n return input_units_dict\n return None\n\n @property\n def input_units_equivalencies(self):\n inputs_map = self.inputs_map()\n input_units_equivalencies_dict = {\n key: inputs_map[key][0].input_units_equivalencies[orig_key]\n for key, (mod, orig_key) in inputs_map.items()\n if inputs_map[key][0].input_units_equivalencies is not None\n }\n if not input_units_equivalencies_dict:\n return None\n\n return input_units_equivalencies_dict\n\n @property\n def input_units_allow_dimensionless(self):\n inputs_map = self.inputs_map()\n return {key: inputs_map[key][0].input_units_allow_dimensionless[orig_key]\n for key, (mod, orig_key) in inputs_map.items()}\n\n @property\n def input_units_strict(self):\n inputs_map = self.inputs_map()\n return {key: inputs_map[key][0].input_units_strict[orig_key]\n for key, (mod, orig_key) in inputs_map.items()}\n\n @property\n def return_units(self):\n outputs_map = self.outputs_map()\n return {key: outputs_map[key][0].return_units[orig_key]\n for key, (mod, orig_key) in outputs_map.items()\n if outputs_map[key][0].return_units is not None}\n\n def outputs_map(self):\n \"\"\"\n Map the names of the outputs to this ExpressionTree to the outputs to the leaf models.\n \"\"\"\n outputs_map = {}\n if not isinstance(self.op, str): # If we don't have an operator the mapping is trivial\n return {out: (self, out) for out in self.outputs}\n\n elif self.op == '|':\n if isinstance(self.right, CompoundModel):\n r_outputs_map = self.right.outputs_map()\n for out in self.outputs:\n if isinstance(self.right, CompoundModel):\n outputs_map[out] = r_outputs_map[out]\n else:\n outputs_map[out] = self.right, out\n\n elif self.op == '&':\n if isinstance(self.left, CompoundModel):\n l_outputs_map = self.left.outputs_map()\n if isinstance(self.right, CompoundModel):\n r_outputs_map = self.right.outputs_map()\n for i, out in enumerate(self.outputs):\n if i < len(self.left.outputs): # Get from left\n if isinstance(self.left, CompoundModel):\n outputs_map[out] = l_outputs_map[self.left.outputs[i]]\n else:\n outputs_map[out] = self.left, self.left.outputs[i]\n else: # Get from right\n if isinstance(self.right, CompoundModel):\n outputs_map[out] = r_outputs_map[self.right.outputs[i - len(self.left.outputs)]]\n else:\n outputs_map[out] = self.right, self.right.outputs[i - len(self.left.outputs)]\n elif self.op == 'fix_inputs':\n return self.left.outputs_map()\n else:\n if isinstance(self.left, CompoundModel):\n l_outputs_map = self.left.outputs_map()\n for out in self.left.outputs:\n if isinstance(self.left, CompoundModel):\n outputs_map[out] = l_outputs_map()[out]\n else:\n outputs_map[out] = self.left, out\n return outputs_map\n\n @property\n def has_user_bounding_box(self):\n \"\"\"\n A flag indicating whether or not a custom bounding_box has been\n assigned to this model by a user, via assignment to\n ``model.bounding_box``.\n \"\"\"\n\n return self._user_bounding_box is not None\n\n def render(self, out=None, coords=None):\n \"\"\"\n Evaluate a model at fixed positions, respecting the ``bounding_box``.\n\n The key difference relative to evaluating the model directly is that\n this method is limited to a bounding box if the `Model.bounding_box`\n attribute is set.\n\n Parameters\n ----------\n out : `numpy.ndarray`, optional\n An array that the evaluated model will be added to. If this is not\n given (or given as ``None``), a new array will be created.\n coords : array-like, optional\n An array to be used to translate from the model's input coordinates\n to the ``out`` array. It should have the property that\n ``self(coords)`` yields the same shape as ``out``. If ``out`` is\n not specified, ``coords`` will be used to determine the shape of\n the returned array. If this is not provided (or None), the model\n will be evaluated on a grid determined by `Model.bounding_box`.\n\n Returns\n -------\n out : `numpy.ndarray`\n The model added to ``out`` if ``out`` is not ``None``, or else a\n new array from evaluating the model over ``coords``.\n If ``out`` and ``coords`` are both `None`, the returned array is\n limited to the `Model.bounding_box` limits. If\n `Model.bounding_box` is `None`, ``arr`` or ``coords`` must be\n passed.\n\n Raises\n ------\n ValueError\n If ``coords`` are not given and the the `Model.bounding_box` of\n this model is not set.\n\n Examples\n --------\n :ref:`astropy:bounding-boxes`\n \"\"\"\n\n bbox = self.get_bounding_box()\n\n ndim = self.n_inputs\n\n if (coords is None) and (out is None) and (bbox is None):\n raise ValueError('If no bounding_box is set, '\n 'coords or out must be input.')\n\n # for consistent indexing\n if ndim == 1:\n if coords is not None:\n coords = [coords]\n if bbox is not None:\n bbox = [bbox]\n\n if coords is not None:\n coords = np.asanyarray(coords, dtype=float)\n # Check dimensions match out and model\n assert len(coords) == ndim\n if out is not None:\n if coords[0].shape != out.shape:\n raise ValueError('inconsistent shape of the output.')\n else:\n out = np.zeros(coords[0].shape)\n\n if out is not None:\n out = np.asanyarray(out)\n if out.ndim != ndim:\n raise ValueError('the array and model must have the same '\n 'number of dimensions.')\n\n if bbox is not None:\n # Assures position is at center pixel, important when using\n # add_array.\n pd = np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2))\n for bb in bbox]).astype(int).T\n pos, delta = pd\n\n if coords is not None:\n sub_shape = tuple(delta * 2 + 1)\n sub_coords = np.array([extract_array(c, sub_shape, pos)\n for c in coords])\n else:\n limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]\n sub_coords = np.mgrid[limits]\n\n sub_coords = sub_coords[::-1]\n\n if out is None:\n out = self(*sub_coords)\n else:\n try:\n out = add_array(out, self(*sub_coords), pos)\n except ValueError:\n raise ValueError(\n 'The `bounding_box` is larger than the input out in '\n 'one or more dimensions. Set '\n '`model.bounding_box = None`.')\n else:\n if coords is None:\n im_shape = out.shape\n limits = [slice(i) for i in im_shape]\n coords = np.mgrid[limits]\n\n coords = coords[::-1]\n\n out += self(*coords)\n\n return out\n\n def replace_submodel(self, name, model):\n \"\"\"\n Construct a new `~astropy.modeling.CompoundModel` instance from an\n existing CompoundModel, replacing the named submodel with a new model.\n\n In order to ensure that inverses and names are kept/reconstructed, it's\n necessary to rebuild the CompoundModel from the replaced node all the\n way back to the base. The original CompoundModel is left untouched.\n\n Parameters\n ----------\n name : str\n name of submodel to be replaced\n model : `~astropy.modeling.Model`\n replacement model\n \"\"\"\n submodels = [m for m in self.traverse_postorder()\n if getattr(m, 'name', None) == name]\n if submodels:\n if len(submodels) > 1:\n raise ValueError(f\"More than one submodel named {name}\")\n\n old_model = submodels.pop()\n if len(old_model) != len(model):\n raise ValueError(\"New and old models must have equal values \"\n \"for n_models\")\n\n # Do this check first in order to raise a more helpful Exception,\n # although it would fail trying to construct the new CompoundModel\n if (old_model.n_inputs != model.n_inputs or\n old_model.n_outputs != model.n_outputs):\n raise ValueError(\"New model must match numbers of inputs and \"\n \"outputs of existing model\")\n\n tree = _get_submodel_path(self, name)\n while tree:\n branch = self.copy()\n for node in tree[:-1]:\n branch = getattr(branch, node)\n setattr(branch, tree[-1], model)\n model = CompoundModel(branch.op, branch.left, branch.right,\n name=branch.name)\n tree = tree[:-1]\n return model\n\n else:\n raise ValueError(f\"No submodels found named {name}\")\n\n def _set_sub_models_and_parameter_units(self, left, right):\n \"\"\"\n Provides a work-around to properly set the sub models and respective\n parameters's units/values when using ``without_units_for_data``\n or ``without_units_for_data`` methods.\n \"\"\"\n model = CompoundModel(self.op, left, right)\n\n self.left = left\n self.right = right\n\n for name in model.param_names:\n model_parameter = getattr(model, name)\n parameter = getattr(self, name)\n\n parameter.value = model_parameter.value\n parameter._set_unit(model_parameter.unit, force=True)\n\n def without_units_for_data(self, **kwargs):\n \"\"\"\n See `~astropy.modeling.Model.without_units_for_data` for overview\n of this method.\n\n Notes\n -----\n This modifies the behavior of the base method to account for the\n case where the sub-models of a compound model have different output\n units. This is only valid for compound * and / compound models as\n in that case it is reasonable to mix the output units. It does this\n by modifying the output units of each sub model by using the output\n units of the other sub model so that we can apply the original function\n and get the desired result.\n\n Additional data has to be output in the mixed output unit case\n so that the units can be properly rebuilt by\n `~astropy.modeling.CompoundModel.with_units_from_data`.\n\n Outside the mixed output units, this method is identical to the\n base method.\n \"\"\"\n if self.op in ['*', '/']:\n model = self.copy()\n inputs = {inp: kwargs[inp] for inp in self.inputs}\n\n left_units = self.left.output_units(**kwargs)\n right_units = self.right.output_units(**kwargs)\n\n if self.op == '*':\n left_kwargs = {out: kwargs[out] / right_units[out]\n for out in self.left.outputs if kwargs[out] is not None}\n right_kwargs = {out: kwargs[out] / left_units[out]\n for out in self.right.outputs if kwargs[out] is not None}\n else:\n left_kwargs = {out: kwargs[out] * right_units[out]\n for out in self.left.outputs if kwargs[out] is not None}\n right_kwargs = {out: 1 / kwargs[out] * left_units[out]\n for out in self.right.outputs if kwargs[out] is not None}\n\n left_kwargs.update(inputs.copy())\n right_kwargs.update(inputs.copy())\n\n left = self.left.without_units_for_data(**left_kwargs)\n if isinstance(left, tuple):\n left_kwargs['_left_kwargs'] = left[1]\n left_kwargs['_right_kwargs'] = left[2]\n left = left[0]\n\n right = self.right.without_units_for_data(**right_kwargs)\n if isinstance(right, tuple):\n right_kwargs['_left_kwargs'] = right[1]\n right_kwargs['_right_kwargs'] = right[2]\n right = right[0]\n\n model._set_sub_models_and_parameter_units(left, right)\n\n return model, left_kwargs, right_kwargs\n else:\n return super().without_units_for_data(**kwargs)\n\n def with_units_from_data(self, **kwargs):\n \"\"\"\n See `~astropy.modeling.Model.with_units_from_data` for overview\n of this method.\n\n Notes\n -----\n This modifies the behavior of the base method to account for the\n case where the sub-models of a compound model have different output\n units. This is only valid for compound * and / compound models as\n in that case it is reasonable to mix the output units. In order to\n do this it requires some additional information output by\n `~astropy.modeling.CompoundModel.without_units_for_data` passed as\n keyword arguments under the keywords ``_left_kwargs`` and ``_right_kwargs``.\n\n Outside the mixed output units, this method is identical to the\n base method.\n \"\"\"\n\n if self.op in ['*', '/']:\n left_kwargs = kwargs.pop('_left_kwargs')\n right_kwargs = kwargs.pop('_right_kwargs')\n\n left = self.left.with_units_from_data(**left_kwargs)\n right = self.right.with_units_from_data(**right_kwargs)\n\n model = self.copy()\n model._set_sub_models_and_parameter_units(left, right)\n\n return model\n else:\n return super().with_units_from_data(**kwargs)\n\n\ndef _get_submodel_path(model, name):\n \"\"\"Find the route down a CompoundModel's tree to the model with the\n specified name (whether it's a leaf or not)\"\"\"\n if getattr(model, 'name', None) == name:\n return []\n try:\n return ['left'] + _get_submodel_path(model.left, name)\n except (AttributeError, TypeError):\n pass\n try:\n return ['right'] + _get_submodel_path(model.right, name)\n except (AttributeError, TypeError):\n pass\n\n\ndef binary_operation(binoperator, left, right):\n '''\n Perform binary operation. Operands may be matching tuples of operands.\n '''\n if isinstance(left, tuple) and isinstance(right, tuple):\n return tuple([binoperator(item[0], item[1])\n for item in zip(left, right)])\n return binoperator(left, right)\n\n\ndef get_ops(tree, opset):\n \"\"\"\n Recursive function to collect operators used.\n \"\"\"\n if isinstance(tree, CompoundModel):\n opset.add(tree.op)\n get_ops(tree.left, opset)\n get_ops(tree.right, opset)\n else:\n return\n\n\ndef make_subtree_dict(tree, nodepath, tdict, leaflist):\n '''\n Traverse a tree noting each node by a key that indicates all the\n left/right choices necessary to reach that node. Each key will\n reference a tuple that contains:\n\n - reference to the compound model for that node.\n - left most index contained within that subtree\n (relative to all indices for the whole tree)\n - right most index contained within that subtree\n '''\n # if this is a leaf, just append it to the leaflist\n if not hasattr(tree, 'isleaf'):\n leaflist.append(tree)\n else:\n leftmostind = len(leaflist)\n make_subtree_dict(tree.left, nodepath+'l', tdict, leaflist)\n make_subtree_dict(tree.right, nodepath+'r', tdict, leaflist)\n rightmostind = len(leaflist)-1\n tdict[nodepath] = (tree, leftmostind, rightmostind)\n\n\n_ORDER_OF_OPERATORS = [('fix_inputs',), ('|',), ('&',), ('+', '-'), ('*', '/'), ('**',)]\nOPERATOR_PRECEDENCE = {}\nfor idx, ops in enumerate(_ORDER_OF_OPERATORS):\n for op in ops:\n OPERATOR_PRECEDENCE[op] = idx\ndel idx, op, ops\n\n\ndef fix_inputs(modelinstance, values, bounding_boxes=None, selector_args=None):\n \"\"\"\n This function creates a compound model with one or more of the input\n values of the input model assigned fixed values (scalar or array).\n\n Parameters\n ----------\n modelinstance : `~astropy.modeling.Model` instance\n This is the model that one or more of the\n model input values will be fixed to some constant value.\n values : dict\n A dictionary where the key identifies which input to fix\n and its value is the value to fix it at. The key may either be the\n name of the input or a number reflecting its order in the inputs.\n\n Examples\n --------\n\n >>> from astropy.modeling.models import Gaussian2D\n >>> g = Gaussian2D(1, 2, 3, 4, 5)\n >>> gv = fix_inputs(g, {0: 2.5})\n\n Results in a 1D function equivalent to Gaussian2D(1, 2, 3, 4, 5)(x=2.5, y)\n \"\"\"\n model = CompoundModel('fix_inputs', modelinstance, values)\n if bounding_boxes is not None:\n if selector_args is None:\n selector_args = tuple([(key, True) for key in values.keys()])\n bbox = CompoundBoundingBox.validate(modelinstance, bounding_boxes, selector_args)\n _selector = bbox.selector_args.get_fixed_values(modelinstance, values)\n\n new_bbox = bbox[_selector]\n new_bbox = new_bbox.__class__.validate(model, new_bbox)\n\n model.bounding_box = new_bbox\n return model\n\n\ndef bind_bounding_box(modelinstance, bounding_box, ignored=None, order='C'):\n \"\"\"\n Set a validated bounding box to a model instance.\n\n Parameters\n ----------\n modelinstance : `~astropy.modeling.Model` instance\n This is the model that the validated bounding box will be set on.\n bounding_box : tuple\n A bounding box tuple, see :ref:`astropy:bounding-boxes` for details\n ignored : list\n List of the inputs to be ignored by the bounding box.\n order : str, optional\n The ordering of the bounding box tuple, can be either ``'C'`` or\n ``'F'``.\n \"\"\"\n modelinstance.bounding_box = ModelBoundingBox.validate(modelinstance,\n bounding_box,\n ignored=ignored,\n order=order)\n\n\ndef bind_compound_bounding_box(modelinstance, bounding_boxes, selector_args,\n create_selector=None, ignored=None, order='C'):\n \"\"\"\n Add a validated compound bounding box to a model instance.\n\n Parameters\n ----------\n modelinstance : `~astropy.modeling.Model` instance\n This is the model that the validated compound bounding box will be set on.\n bounding_boxes : dict\n A dictionary of bounding box tuples, see :ref:`astropy:bounding-boxes`\n for details.\n selector_args : list\n List of selector argument tuples to define selection for compound\n bounding box, see :ref:`astropy:bounding-boxes` for details.\n create_selector : callable, optional\n An optional callable with interface (selector_value, model) which\n can generate a bounding box based on a selector value and model if\n there is no bounding box in the compound bounding box listed under\n that selector value. Default is ``None``, meaning new bounding\n box entries will not be automatically generated.\n ignored : list\n List of the inputs to be ignored by the bounding box.\n order : str, optional\n The ordering of the bounding box tuple, can be either ``'C'`` or\n ``'F'``.\n \"\"\"\n modelinstance.bounding_box = CompoundBoundingBox.validate(modelinstance,\n bounding_boxes, selector_args,\n create_selector=create_selector,\n ignored=ignored,\n order=order)\n\n\ndef custom_model(*args, fit_deriv=None):\n \"\"\"\n Create a model from a user defined function. The inputs and parameters of\n the model will be inferred from the arguments of the function.\n\n This can be used either as a function or as a decorator. See below for\n examples of both usages.\n\n The model is separable only if there is a single input.\n\n .. note::\n\n All model parameters have to be defined as keyword arguments with\n default values in the model function. Use `None` as a default argument\n value if you do not want to have a default value for that parameter.\n\n The standard settable model properties can be configured by default\n using keyword arguments matching the name of the property; however,\n these values are not set as model \"parameters\". Moreover, users\n cannot use keyword arguments matching non-settable model properties,\n with the exception of ``n_outputs`` which should be set to the number of\n outputs of your function.\n\n Parameters\n ----------\n func : function\n Function which defines the model. It should take N positional\n arguments where ``N`` is dimensions of the model (the number of\n independent variable in the model), and any number of keyword arguments\n (the parameters). It must return the value of the model (typically as\n an array, but can also be a scalar for scalar inputs). This\n corresponds to the `~astropy.modeling.Model.evaluate` method.\n fit_deriv : function, optional\n Function which defines the Jacobian derivative of the model. I.e., the\n derivative with respect to the *parameters* of the model. It should\n have the same argument signature as ``func``, but should return a\n sequence where each element of the sequence is the derivative\n with respect to the corresponding argument. This corresponds to the\n :meth:`~astropy.modeling.FittableModel.fit_deriv` method.\n\n Examples\n --------\n Define a sinusoidal model function as a custom 1D model::\n\n >>> from astropy.modeling.models import custom_model\n >>> import numpy as np\n >>> def sine_model(x, amplitude=1., frequency=1.):\n ... return amplitude * np.sin(2 * np.pi * frequency * x)\n >>> def sine_deriv(x, amplitude=1., frequency=1.):\n ... return 2 * np.pi * amplitude * np.cos(2 * np.pi * frequency * x)\n >>> SineModel = custom_model(sine_model, fit_deriv=sine_deriv)\n\n Create an instance of the custom model and evaluate it::\n\n >>> model = SineModel()\n >>> model(0.25)\n 1.0\n\n This model instance can now be used like a usual astropy model.\n\n The next example demonstrates a 2D Moffat function model, and also\n demonstrates the support for docstrings (this example could also include\n a derivative, but it has been omitted for simplicity)::\n\n >>> @custom_model\n ... def Moffat2D(x, y, amplitude=1.0, x_0=0.0, y_0=0.0, gamma=1.0,\n ... alpha=1.0):\n ... \\\"\\\"\\\"Two dimensional Moffat function.\\\"\\\"\\\"\n ... rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2\n ... return amplitude * (1 + rr_gg) ** (-alpha)\n ...\n >>> print(Moffat2D.__doc__)\n Two dimensional Moffat function.\n >>> model = Moffat2D()\n >>> model(1, 1) # doctest: +FLOAT_CMP\n 0.3333333333333333\n \"\"\"\n\n if len(args) == 1 and callable(args[0]):\n return _custom_model_wrapper(args[0], fit_deriv=fit_deriv)\n elif not args:\n return functools.partial(_custom_model_wrapper, fit_deriv=fit_deriv)\n else:\n raise TypeError(\n \"{0} takes at most one positional argument (the callable/\"\n \"function to be turned into a model. When used as a decorator \"\n \"it should be passed keyword arguments only (if \"\n \"any).\".format(__name__))\n\n\ndef _custom_model_inputs(func):\n \"\"\"\n Processes the inputs to the `custom_model`'s function into the appropriate\n categories.\n\n Parameters\n ----------\n func : callable\n\n Returns\n -------\n inputs : list\n list of evaluation inputs\n special_params : dict\n dictionary of model properties which require special treatment\n settable_params : dict\n dictionary of defaults for settable model properties\n params : dict\n dictionary of model parameters set by `custom_model`'s function\n \"\"\"\n inputs, parameters = get_inputs_and_params(func)\n\n special = ['n_outputs']\n settable = [attr for attr, value in vars(Model).items()\n if isinstance(value, property) and value.fset is not None]\n properties = [attr for attr, value in vars(Model).items()\n if isinstance(value, property) and value.fset is None and attr not in special]\n\n special_params = {}\n settable_params = {}\n params = {}\n for param in parameters:\n if param.name in special:\n special_params[param.name] = param.default\n elif param.name in settable:\n settable_params[param.name] = param.default\n elif param.name in properties:\n raise ValueError(f\"Parameter '{param.name}' cannot be a model property: {properties}.\")\n else:\n params[param.name] = param.default\n\n return inputs, special_params, settable_params, params\n\n\ndef _custom_model_wrapper(func, fit_deriv=None):\n \"\"\"\n Internal implementation `custom_model`.\n\n When `custom_model` is called as a function its arguments are passed to\n this function, and the result of this function is returned.\n\n When `custom_model` is used as a decorator a partial evaluation of this\n function is returned by `custom_model`.\n \"\"\"\n\n if not callable(func):\n raise ModelDefinitionError(\n \"func is not callable; it must be a function or other callable \"\n \"object\")\n\n if fit_deriv is not None and not callable(fit_deriv):\n raise ModelDefinitionError(\n \"fit_deriv not callable; it must be a function or other \"\n \"callable object\")\n\n model_name = func.__name__\n\n inputs, special_params, settable_params, params = _custom_model_inputs(func)\n\n if (fit_deriv is not None and\n len(fit_deriv.__defaults__) != len(params)):\n raise ModelDefinitionError(\"derivative function should accept \"\n \"same number of parameters as func.\")\n\n params = {param: Parameter(param, default=default)\n for param, default in params.items()}\n\n mod = find_current_module(2)\n if mod:\n modname = mod.__name__\n else:\n modname = '__main__'\n\n members = {\n '__module__': str(modname),\n '__doc__': func.__doc__,\n 'n_inputs': len(inputs),\n 'n_outputs': special_params.pop('n_outputs', 1),\n 'evaluate': staticmethod(func),\n '_settable_properties': settable_params\n }\n\n if fit_deriv is not None:\n members['fit_deriv'] = staticmethod(fit_deriv)\n\n members.update(params)\n\n cls = type(model_name, (FittableModel,), members)\n cls._separable = True if (len(inputs) == 1) else False\n return cls\n\n\ndef render_model(model, arr=None, coords=None):\n \"\"\"\n Evaluates a model on an input array. Evaluation is limited to\n a bounding box if the `Model.bounding_box` attribute is set.\n\n Parameters\n ----------\n model : `Model`\n Model to be evaluated.\n arr : `numpy.ndarray`, optional\n Array on which the model is evaluated.\n coords : array-like, optional\n Coordinate arrays mapping to ``arr``, such that\n ``arr[coords] == arr``.\n\n Returns\n -------\n array : `numpy.ndarray`\n The model evaluated on the input ``arr`` or a new array from\n ``coords``.\n If ``arr`` and ``coords`` are both `None`, the returned array is\n limited to the `Model.bounding_box` limits. If\n `Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed.\n\n Examples\n --------\n :ref:`astropy:bounding-boxes`\n \"\"\"\n\n bbox = model.bounding_box\n\n if (coords is None) & (arr is None) & (bbox is None):\n raise ValueError('If no bounding_box is set,'\n 'coords or arr must be input.')\n\n # for consistent indexing\n if model.n_inputs == 1:\n if coords is not None:\n coords = [coords]\n if bbox is not None:\n bbox = [bbox]\n\n if arr is not None:\n arr = arr.copy()\n # Check dimensions match model\n if arr.ndim != model.n_inputs:\n raise ValueError('number of array dimensions inconsistent with '\n 'number of model inputs.')\n if coords is not None:\n # Check dimensions match arr and model\n coords = np.array(coords)\n if len(coords) != model.n_inputs:\n raise ValueError('coordinate length inconsistent with the number '\n 'of model inputs.')\n if arr is not None:\n if coords[0].shape != arr.shape:\n raise ValueError('coordinate shape inconsistent with the '\n 'array shape.')\n else:\n arr = np.zeros(coords[0].shape)\n\n if bbox is not None:\n # assures position is at center pixel, important when using add_array\n pd = pos, delta = np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2))\n for bb in bbox]).astype(int).T\n\n if coords is not None:\n sub_shape = tuple(delta * 2 + 1)\n sub_coords = np.array([extract_array(c, sub_shape, pos)\n for c in coords])\n else:\n limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]\n sub_coords = np.mgrid[limits]\n\n sub_coords = sub_coords[::-1]\n\n if arr is None:\n arr = model(*sub_coords)\n else:\n try:\n arr = add_array(arr, model(*sub_coords), pos)\n except ValueError:\n raise ValueError('The `bounding_box` is larger than the input'\n ' arr in one or more dimensions. Set '\n '`model.bounding_box = None`.')\n else:\n\n if coords is None:\n im_shape = arr.shape\n limits = [slice(i) for i in im_shape]\n coords = np.mgrid[limits]\n\n arr += model(*coords[::-1])\n\n return arr\n\n\ndef hide_inverse(model):\n \"\"\"\n This is a convenience function intended to disable automatic generation\n of the inverse in compound models by disabling one of the constituent\n model's inverse. This is to handle cases where user provided inverse\n functions are not compatible within an expression.\n\n Example:\n compound_model.inverse = hide_inverse(m1) + m2 + m3\n\n This will insure that the defined inverse itself won't attempt to\n build its own inverse, which would otherwise fail in this example\n (e.g., m = m1 + m2 + m3 happens to raises an exception for this\n reason.)\n\n Note that this permanently disables it. To prevent that either copy\n the model or restore the inverse later.\n \"\"\"\n del model.inverse\n return model\n"
] | [
[
"numpy.rollaxis",
"numpy.diag",
"numpy.sqrt",
"numpy.ndim",
"numpy.ceil",
"numpy.size",
"numpy.asanyarray",
"numpy.shape",
"numpy.any",
"numpy.mean",
"numpy.array",
"numpy.zeros",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AlexRookie/neurocluster | [
"685e4c2930e7af68b09a5ae8ed7008936d6e49d4"
] | [
"MATLAB_path_generation/network_lvq.py"
] | [
"import sys\nsys.path.append(\"./libraries/Modified-SOM\")\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom detection.competitive_learning import SOM, CombineSomLvq\n\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.metrics import confusion_matrix, accuracy_score\n\nclass Network:\n def __init__(self):\n self.model = []\n self.batch_size = 32\n # Label encoder\n self.encoder = LabelEncoder()\n\n def define_model(self, map_size=(10,10)):\n map_size = np.asarray(map_size).astype(int)\n # Setting the random state\n random_state = 17\n self.model = SOM(n_rows=map_size[0], n_cols=map_size[1], random_state=random_state) # CombineSomLvq(n_rows=map_size[0], n_cols=map_size[1], random_state=random_state)\n return self.model\n\n def prepare_data(self, X, y, training_percentage=70, batch=32, randomize=True):\n X = np.asarray(X)\n y = np.asarray(y)\n self.batch_size = int(batch)\n\n if randomize:\n perm = np.random.permutation(X.shape[0])\n if len(X.shape)==2:\n X = X[perm,:]\n if len(X.shape)==3:\n X = X[perm,:,:]\n y = y[perm]\n\n num_of_samples = X.shape[0]\n train = int(training_percentage*num_of_samples/100)\n valid = num_of_samples-train\n\n if train < self.batch_size or valid < self.batch_size:\n self.batch_size = 1\n else:\n # Samples must be multiplier of batch\n train = int(train/self.batch_size) * self.batch_size\n valid = num_of_samples-train\n valid = int(valid/self.batch_size) * self.batch_size\n\n if len(X.shape)==2:\n X_train = X[0:train,:]\n X_valid = X[train:train+valid,:]\n if len(X.shape)==3:\n X_train = X[0:train,:,:]\n X_valid = X[train:train+valid,:,:]\n y_train = y[0:train]\n y_valid = y[train:train+valid]\n\n y_train = self.encoder.fit_transform(y_train)\n\n self.X_train = np.ascontiguousarray(X_train)\n self.y_train = np.ascontiguousarray(y_train)\n self.X_valid = np.ascontiguousarray(X_valid)\n self.y_valid = np.ascontiguousarray(y_valid)\n return self.X_train, self.y_train, self.X_valid, self.y_valid\n\n def train_model(self, X_train, y_train, epochs_unsup=50, epochs_sup=50, learn_rate=0.01):\n X_train = np.asarray(X_train)\n y_train = np.asarray(y_train).astype(np.int8)\n epochs_unsup = int(epochs_unsup)\n epochs_sup = int(epochs_sup)\n\n # Train the SOM\n self.model.fit(X_train, None, weights_init= \"pca\",\n num_iters = epochs_unsup, batch_size = self.batch_size, \n neighborhood = \"gaussian\",\n learning_rate = 0.75, learning_decay_rate = 1, learning_rate_decay_function = None,\n sigma = 1, sigma_decay_rate = 1, sigma_decay_function = None, num_clusters = 3, \n conscience = False, verbose = 1)\n\n # Train the LVQ\n #self.model.fit(X_train, y_train, weights_init = \"pca\", labels_init = None,\n # unsup_num_iters = epochs_unsup, unsup_batch_size = self.batch_size,\n # sup_num_iters = epochs_sup, sup_batch_size = self.batch_size,\n # neighborhood = \"gaussian\",\n # learning_rate = learn_rate, learning_decay_rate = 1, learning_rate_decay_function = None,\n # sigma = 1, sigma_decay_rate = 1, sigma_decay_function = None,\n # conscience = False, verbose = 1)\n return self.model\n\n def predict(self, X_valid, y_valid=None):\n X_valid = np.asarray(X_valid)\n\n # Predict the result\n y_pred = self.model.predict(X_valid)\n #y_pred = self.encoder.inverse_transform(y_pred)\n\n #if y_valid is not None:\n # y_valid = np.asarray(y_valid)\n # # Make confusion matrix\n # cm = confusion_matrix(y_valid, y_pred)\n # # Print the confusion matrix\n # print(cm)\n # print('Accuracy:', accuracy_score(y_valid, y_pred))\n\n y_pred = np.asarray(y_pred)\n return y_pred\n\n def predict_som(self, X_valid, y_valid=None):\n X_valid = np.asarray(X_valid)\n # Predict the result\n y_pred = self.model.predict_som(X_valid)\n y_pred = np.asarray(y_pred)\n return y_pred\n\n def get_data(self):\n weights, labels, bias, clus_labels = [], [], [], []\n weights = self.model._competitive_layer_weights\n #labels = self.model._nodes_label\n bias = self.model._bias\n clus_labels = self.model.cluster_label\n\n weights = np.asarray(weights)\n labels = np.asarray(labels)\n bias = np.asarray(bias)\n clus_labels = np.asarray(clus_labels)\n return weights, labels, bias, clus_labels\n\n def load_data(self, weights=None, labels=None, bias=None, clus_labels=None):\n weights = np.asarray(weights)\n labels = np.asarray(labels)\n bias = np.asarray(bias)\n clus_labels = np.asarray(clus_labels)\n\n self.model._competitive_layer_weights = weights\n self.model._nodes_label = labels\n self.model._bias = bias\n self.model.clus_labels = clus_labels\n return\n\n #def plot(self):\n # # Plot\n # som_weights = self.model.get_layer(name='SOM').get_weights()[0]\n # \n # fig1, axes = plt.subplots(nrows=self.map_size[0], ncols=self.map_size[1], figsize=(10, 10))\n # for k in range(self.map_size[0] * self.map_size[1]):\n # axes[k // self.map_size[1]][k % self.map_size[1]].imshow(som_weights[k].reshape(2, self.units), cmap='gray')\n # axes[k // self.map_size[1]][k % self.map_size[1]].axis('off')\n # plt.subplots_adjust(hspace=0.05, wspace=0.05)\n # \n # plt.draw() # non-blocking plot\n # plt.pause(0.1)\n\n#=======================================================================================#\n"
] | [
[
"numpy.asarray",
"sklearn.preprocessing.LabelEncoder",
"numpy.random.permutation",
"numpy.ascontiguousarray"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Femi123p/Cluster-py | [
"7af0ef5a4c06a2641cec9f0d916987eca678c619"
] | [
"cluster_gmm.py"
] | [
"from __future__ import division, print_function\r\nfrom elasticsearch import Elasticsearch\r\nimport numpy as np\r\nimport json\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.mixture import GaussianMixture\r\n\r\n\r\n\r\n\r\n\r\nes=Elasticsearch() #elasticsearch object created\r\ni=0\r\nelement = []\r\ncluster_list=[]\r\ncluster_list1=[]\r\ncluster_list2=[]\r\ncluster_list3=[]\r\ncluster_list4=[]\r\ncluster_list5=[]\r\nwith open('wiki_references_dummy.json') as json_data:\r\n\tfor each_line in json_data: \r\n\t\tif i>20:\r\n\t\t\tbreak\r\n\t\td=json.loads(each_line) #converts json to python dict\r\n\t\tgetsource=es.get(index='ref_dummy',doc_type='wiki_references',id=i)\r\n\t\ts=\"\"\r\n\t\tfor word in getsource[\"_source\"][\"combined_topicsequence\"]:\r\n\t\t\ts=s+\" \"+word\r\n\t\telement.append(s)\r\n\t\ti=i+1\r\n\r\n\t\t\r\nlength_element=len(element)\t\r\nvectorize=CountVectorizer()\r\nx=vectorize.fit_transform(element)\r\nmatrix=x.toarray()\r\n\r\n\r\n\r\ngmm=GaussianMixture(covariance_type='full', max_iter=100, n_components=6)\r\ngmm.fit(matrix)\r\nindex=0\r\nfor ary in gmm.predict_proba(matrix).tolist():\r\n\t#print(str(index)+\" \"+str(ary))\r\n\tindex=index+1\r\n\r\ncount=0\r\ncount1=0\r\ncount2=0\r\ncount3=0\r\ncount4=0\r\ncount5=0\r\nindex1=0\r\nindex2=[]\r\nfor arr in gmm.predict_proba(matrix).tolist():\r\n\tif arr[0]!=0:\r\n\t\tcount=count+1\r\n\t\tcluster_list.append(arr)\r\n\t\tindex2.append(index1)\r\n\tindex1=index1+1\r\nprint (\"cluster no 1\"+\"- \"+\"total no of clusters:\"+str(count)+\"- \"+\"cluster index:\"+str(index2))\r\nprint (cluster_list)\r\n\r\n\r\nindex2=0\r\nindex3=[]\r\nfor arr in gmm.predict_proba(matrix).tolist():\r\n\tif arr[1]!=0:\r\n\t\tcount1=count1+1\r\n\t\tcluster_list1.append(arr)\r\n\t\tindex3.append(index2)\r\n\tindex2=index2+1\r\nprint(\"cluster no 2\"+\"- \"+\"total no of clusters:\"+str(count1)+\"- \"+\"cluster index:\"+str(index3))\r\nprint (cluster_list1)\r\n\r\nindex3=0\r\nindex4=[]\r\nfor arr in gmm.predict_proba(matrix).tolist():\t\t\r\n\tif arr[2]!=0:\r\n\t\tcount2=count2+1\r\n\t\tcluster_list2.append(arr)\r\n\t\tindex4.append(index3)\r\n\tindex3=index3+1\r\nprint(\"cluster no 3\"+\"- \"+\"total no of clusters:\"+str(count2)+\"- \"+\"cluster index:\"+str(index4))\r\nprint (cluster_list2)\r\n\r\nindex4=0\r\nindex5=[]\r\nfor arr in gmm.predict_proba(matrix).tolist():\t\r\n\tif arr[3]!=0:\r\n\t\tcount3=count3+1\r\n\t\tcluster_list3.append(arr)\r\n\t\tindex5.append(index4)\r\n\tindex4=index4+1\r\nprint(\"cluster no 4\"+\"- \"+\"total no of clusters:\"+str(count3)+\"- \"+\"cluster index:\"+str(index5))\r\nprint (cluster_list3)\r\n\r\nindex5=0\r\nindex6=[]\r\nfor arr in gmm.predict_proba(matrix).tolist():\t\t\r\n\tif arr[4]!=0:\r\n\t\tcount4=count4+1\r\n\t\tcluster_list4.append(arr)\r\n\t\tindex6.append(index5)\r\n\tindex5=index5+1\r\nprint(\"cluster no 5\"+\"- \"+\"total no of clusters:\"+str(count4)+\"- \"+\"cluster index:\"+str(index6))\r\nprint (cluster_list4)\r\n\r\nindex6=0\r\nindex7=[]\r\nfor arr in gmm.predict_proba(matrix).tolist():\r\n\tif arr[5]!=0:\r\n\t\tcount5=count5+1\r\n\t\tcluster_list5.append(arr)\r\n\t\tindex7.append(index6)\r\n\tindex6=index6+1\r\nprint(\"cluster no 6\"+\"- \"+\"total no of clusters:\"+str(count5)+\"- \"+\"cluster index:\"+str(index7))\r\nprint(cluster_list5)\r\n\t\t\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
] | [
[
"sklearn.feature_extraction.text.CountVectorizer",
"sklearn.mixture.GaussianMixture"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
viniciusguigo/FastMOT | [
"9f544c89e5d3e6dca711abee90ac604ed661397f"
] | [
"fastmot/models/calibrator.py"
] | [
"import os\nimport numpy as np\nimport pycuda.driver as cuda\nimport pycuda.autoinit\nimport tensorrt as trt\nimport cv2\n\n\nclass SSDEntropyCalibrator(trt.IInt8EntropyCalibrator2):\n def __init__(self, model_shape, data_dir, cache_file):\n # Whenever you specify a custom constructor for a TensorRT class,\n # you MUST call the constructor of the parent explicitly.\n trt.IInt8EntropyCalibrator2.__init__(self)\n\n self.model_shape = model_shape\n self.num_calib_imgs = 100 # the number of images from the dataset to use for calibration\n self.batch_size = 10\n self.batch_shape = (self.batch_size, *self.model_shape)\n self.cache_file = cache_file\n\n calib_imgs = [os.path.join(data_dir, f) for f in os.listdir(data_dir)]\n self.calib_imgs = np.random.choice(calib_imgs, self.num_calib_imgs)\n self.counter = 0 # for keeping track of how many files we have read\n\n self.device_input = cuda.mem_alloc(trt.volume(self.batch_shape) * trt.float32.itemsize)\n\n def get_batch_size(self):\n return self.batch_size\n\n # TensorRT passes along the names of the engine bindings to the get_batch function.\n # You don't necessarily have to use them, but they can be useful to understand the order of\n # the inputs. The bindings list is expected to have the same ordering as 'names'.\n def get_batch(self, names):\n\n # if there are not enough calibration images to form a batch,\n # we have reached the end of our data set\n if self.counter == self.num_calib_imgs:\n return None\n\n batch_imgs = np.zeros((self.batch_size, trt.volume(self.model_shape)))\n for i in range(self.batch_size):\n img = cv2.imread(self.calib_imgs[self.counter + i])\n img = cv2.resize(img, (self.model_shape[2], self.model_shape[1]))\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n # HWC -> CHW\n img = img.transpose((2, 0, 1))\n # Normalize to [-1.0, 1.0] interval (expected by model)\n img = (2.0 / 255.0) * img - 1.0\n # add this image to the batch array\n batch_imgs[i, :] = img.ravel()\n\n # increase the counter for this batch\n self.counter += self.batch_size\n\n # Copy to device, then return a list containing pointers to input device buffers.\n cuda.memcpy_htod(self.device_input, batch_imgs.astype(np.float32))\n return [int(self.device_input)]\n\n def read_calibration_cache(self):\n # If there is a cache, use it instead of calibrating again.\n if os.path.exists(self.cache_file):\n with open(self.cache_file, \"rb\") as f:\n return f.read()\n\n def write_calibration_cache(self, cache):\n with open(self.cache_file, \"wb\") as f:\n f.write(cache)\n"
] | [
[
"numpy.random.choice"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
julio-cmdr/rljax | [
"cbca4638deb6d4e960e71a862573129ba4c5ea79"
] | [
"rljax/algorithm/base_class/q_learning.py"
] | [
"import os\nfrom abc import abstractmethod\n\nimport numpy as np\n\nfrom rljax.algorithm.base_class.base_algoirithm import OffPolicyAlgorithm\nfrom rljax.util import fake_state, load_params, save_params\n\n\nclass QLearning(OffPolicyAlgorithm):\n \"\"\"\n Base class for discrete Q-learning algorithms.\n \"\"\"\n\n def __init__(\n self,\n num_agent_steps,\n state_space,\n action_space,\n seed,\n max_grad_norm,\n gamma,\n nstep,\n buffer_size,\n use_per,\n batch_size,\n start_steps,\n update_interval,\n update_interval_target,\n eps,\n eps_eval,\n eps_decay_steps,\n loss_type,\n dueling_net,\n double_q,\n ):\n assert loss_type in [\"huber\", \"l2\"]\n super(QLearning, self).__init__(\n num_agent_steps=num_agent_steps,\n state_space=state_space,\n action_space=action_space,\n seed=seed,\n max_grad_norm=max_grad_norm,\n gamma=gamma,\n nstep=nstep,\n buffer_size=buffer_size,\n use_per=use_per,\n batch_size=batch_size,\n start_steps=start_steps,\n update_interval=update_interval,\n update_interval_target=update_interval_target,\n )\n self.eps = eps\n self.eps_eval = eps_eval\n self.eps_decay_steps = eps_decay_steps\n self.loss_type = loss_type\n self.dueling_net = dueling_net\n self.double_q = double_q\n # Define fake input.\n if not hasattr(self, \"fake_args\"):\n self.fake_args = (fake_state(state_space),)\n # If _forward() method uses random key or not.\n if not hasattr(self, \"use_key_forward\"):\n self.use_key_forward = False\n # Number of random keys for _loss() method.\n if not hasattr(self, \"num_keys_loss\"):\n self.num_keys_loss = 0\n\n def select_action(self, state):\n if np.random.rand() < self.eps_eval:\n action = self.action_space.sample()\n else:\n action = self.forward(state[None, ...])\n action = np.array(action[0])\n return action\n\n def explore(self, state):\n if np.random.rand() < self.eps_train:\n action = self.action_space.sample()\n else:\n action = self.forward(state[None, ...])\n action = np.array(action[0])\n return action\n\n def forward(self, state):\n return self._forward(self.params, state, **self.kwargs_forward)\n\n @abstractmethod\n def _forward(self, params, state, *args, **kwargs):\n pass\n\n @abstractmethod\n def _calculate_value(self, params, state, action, *args, **kwargs):\n pass\n\n @abstractmethod\n def _calculate_target(self, params, params_target, reward, done, next_state, *args, **kwargs):\n pass\n\n @abstractmethod\n def _calculate_loss_and_abs_td(self, value, target, weight, *args, **kwargs):\n pass\n\n @property\n def eps_train(self):\n \"\"\"Returns the current epsilon for the agent's epsilon-greedy policy.\n This follows the Nature DQN schedule of a linearly decaying epsilon (Mnih et\n al., 2015). The schedule is as follows:\n Begin at 1. until start_steps steps have been taken; then\n Linearly decay epsilon from 1. to eps in eps_decay_steps steps; and then\n Use eps from there on.\n \"\"\"\n steps_left = self.eps_decay_steps + self.start_steps - self.agent_step\n bonus = (1.0 - self.eps) * steps_left / self.eps_decay_steps\n bonus = np.clip(bonus, 0., 1. - self.eps)\n return self.eps + bonus\n\n @property\n def kwargs_forward(self):\n return {\"key\": next(self.rng)} if self.use_key_forward else {}\n\n @property\n def kwargs_update(self):\n return {\"key_list\": self.get_key_list(self.num_keys_loss)} if self.num_keys_loss else {}\n\n def save_params(self, save_dir):\n save_params(self.params, os.path.join(save_dir, \"params.npz\"))\n\n def load_params(self, save_dir):\n self.params = self.params_target = load_params(os.path.join(save_dir, \"params.npz\"))\n"
] | [
[
"numpy.array",
"numpy.random.rand",
"numpy.clip"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ngchc/deepBoosting | [
"13b3515c16f0d9a0a92b990dfb5eef09ec1a7298"
] | [
"train/model.py"
] | [
"\"\"\"\nThe core Boosting Network Model\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\nfrom ops import *\n\n\nclass Net(object):\n\tdef __init__(self, data, label, wl):\n\t\t\"\"\"\n\t\tArgs: [0, 1]\n\t\t data : [batch_size, height, width, channels] float32\n\t\t label: [batch_size, height, width, channels] float32\n\t\t\"\"\"\n\t\t# training dataset\n\t\tself.data = data\n\t\tself.label = label\n\t\tself.batchsize = self.data.get_shape().as_list()[0]\n\t\t\n\t\t# parameter lists for weights and biases\n\t\tself.W_params = []\n\t\tself.b_params = []\n\t\t\n\t\t# coefficient of weight decay\n\t\tself.wl = wl\n\t\n\t\n\tdef _check_shape(self, a, b, scale=1):\n\t\tN1, C1, H1, W1 = a.get_shape().as_list()\n\t\tN2, C2, H2, W2 = b.get_shape().as_list()\n\t\t\n\t\tassert N1 == N2, 'Inequality of batchs!'\n\t\tassert C1 == C2, 'Inequality of channels!'\n\t\tassert H1 == H2 / scale, 'Inequality of heights!'\n\t\tassert W1 == W2 / scale, 'Inequality of widths!'\n\t\n\t\n\tdef dfus_block(self, bottom, i):\n\t\tact = tf.nn.relu\n\t\t\n\t\twith tf.name_scope('dfus_block' + str(i)):\n\t\t\tconv1 = act(conv2d(bottom, 24, [1, 1], wl=None, scope='conv' + str(i) + '_i'), name='relu' + str(i) + '_i')\n\t\t\t\n\t\t\tfeat1 = act(conv2d(conv1, 6, [3, 3], wl=self.wl, scope='conv' + str(i) + '_1'), name='relu' + str(i) + '_1')\n\t\t\tfeat15 = act(conv2d(feat1, 3, [3, 3], dilated=2, wl=self.wl, scope='conv' + str(i) + '_15'), name='relu' + str(i) + '_15')\n\t\t\t\n\t\t\tfeat2 = act(conv2d(conv1, 6, [3, 3], dilated=2, wl=self.wl, scope='conv' + str(i) + '_2'), name='relu' + str(i) + '_2')\n\t\t\tfeat23 = act(conv2d(feat2, 3, [3, 3], wl=self.wl, scope='conv' + str(i) + '_23'), name='relu' + str(i) + '_23')\n\t\t\t\n\t\t\tfeat = tf.concat([feat1, feat15, feat2, feat23], 1, name='conv' + str(i) + '_c1')\n\t\t\tfeat = act(conv2d(feat, 8, [1, 1], wl=None, scope='conv' + str(i) + '_r'), name='relu' + str(i) + '_r')\n\t\t\t\n\t\t\ttop = tf.concat([bottom, feat], 1, name='conv' + str(i) + '_c2')\n\t\t\n\t\treturn top\n\t\n\t\n\tdef ddfn(self, bottom, step):\n\t\tact = tf.nn.relu\n\t\t\n\t\twith tf.variable_scope('ddfn_' + str(step)):\n\t\t\twith tf.name_scope('msfeat'):\n\t\t\t\tconv13 = act(conv2d(bottom, 8, [3, 3], wl=self.wl, scope='conv1_3'), name='relu1_3')\n\t\t\t\tconv15 = act(conv2d(bottom, 8, [3, 3], dilated=2, wl=self.wl, scope='conv1_5'), name='relu1_5')\n\t\t\t\t\n\t\t\t\tconv133 = act(conv2d(conv13, 6, [3, 3], dilated=2, wl=self.wl, scope='conv1_3_3'), name='relu1_3_3')\n\t\t\t\tconv153 = act(conv2d(conv15, 6, [3, 3], wl=self.wl, scope='conv1_5_3'), name='relu1_5_3')\n\t\t\t\t\n\t\t\t\tconv1 = tf.concat([conv13, conv15, conv133, conv153], 1, name='conv1_c')\n\t\t\t\n\t\t\tfeat = self.dfus_block(conv1, 2)\n\t\t\t\n\t\t\tfor i in range(3, 10, 1):\n\t\t\t\tfeat = self.dfus_block(feat, i)\n\t\t\t\n\t\t\ttop = conv2d(feat, 1, [1, 1], W_init=tf.truncated_normal_initializer(mean=0.0, stddev=0.001),\n\t\t\t add_biases=False, wl=None, scope='convr')\n\t\t\t\n\t\t\treturn top\n\t\n\t\n\tdef build_net(self, summary=False):\n\t\twith tf.name_scope('net'):\n\t\t\toutputs = self.ddfn(self.data, 1)\n\t\t\t\n\t\t\t# crop the boundary\n\t\t\toutputs = tf.image.crop_to_bounding_box(tf.transpose(outputs, [0, 2, 3, 1]), 1, 1, 48, 48)\n\t\t\tlabels = tf.image.crop_to_bounding_box(tf.transpose(self.label, [0, 2, 3, 1]), 1, 1, 48, 48)\n\t\t\t\n\t\t\t# mean square error\n\t\t\tself.l2_loss = (1.0 / self.batchsize) * tf.nn.l2_loss(outputs - labels)\n\t\t\ttf.add_to_collection('losses', self.l2_loss)\n\t\t\t\n\t\t\t# total loss collected from 'losses'\n\t\t\tself.total_loss = tf.add_n(tf.get_collection('losses'), name='total_loss')\n\t\t\t\n\t\t\t# summary data and label images\n\t\t\tif summary:\n\t\t\t\ttf.summary.image('data', self.data, max_outputs=1)\n\t\t\t\ttf.summary.image('label', labels, max_outputs=1)\n\t\t\t\ttf.summary.image('output', outputs, max_outputs=1)\n"
] | [
[
"tensorflow.transpose",
"tensorflow.concat",
"tensorflow.get_collection",
"tensorflow.summary.image",
"tensorflow.truncated_normal_initializer",
"tensorflow.nn.l2_loss",
"tensorflow.name_scope",
"tensorflow.add_to_collection"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
ojaswa-privado/ReAgent | [
"e990e66f69369cbe89212e334191180716c9bf4e"
] | [
"reagent/model_utils/seq2slate_utils.py"
] | [
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\nimport copy\nimport logging\nimport math\nfrom enum import Enum\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nlogger = logging.getLogger(__name__)\n\nPADDING_SYMBOL = 0\nDECODER_START_SYMBOL = 1\n\n\nclass Seq2SlateMode(Enum):\n RANK_MODE = \"rank\"\n PER_SEQ_LOG_PROB_MODE = \"per_sequence_log_prob\"\n PER_SYMBOL_LOG_PROB_DIST_MODE = \"per_symbol_log_prob_dist\"\n DECODE_ONE_STEP_MODE = \"decode_one_step\"\n ENCODER_SCORE_MODE = \"encoder_score_mode\"\n\n\nclass Seq2SlateOutputArch(Enum):\n # Only output encoder scores\n ENCODER_SCORE = \"encoder_score\"\n\n # A decoder outputs a sequence in an autoregressive way\n AUTOREGRESSIVE = \"autoregressive\"\n\n # Using encoder scores, a decoder outputs a sequence using\n # frechet sort (equivalent to iterative softmax)\n FRECHET_SORT = \"frechet_sort\"\n\n\ndef print_model_info(seq2slate):\n def _num_of_params(model):\n return len(torch.cat([p.flatten() for p in model.parameters()]))\n\n logger.info(f\"Num of total params: {_num_of_params(seq2slate)}\")\n logger.info(f\"Num of Encoder params: {_num_of_params(seq2slate.encoder)}\")\n logger.info(\n f\"Num of Candidate Embedder params: {_num_of_params(seq2slate.candidate_embedder)}\"\n )\n logger.info(\n f\"Num of State Embedder params: {_num_of_params(seq2slate.state_embedder)}\"\n )\n if seq2slate.output_arch == Seq2SlateOutputArch.FRECHET_SORT:\n logger.info(\n f\"Num of Encoder_Scorer params: {_num_of_params(seq2slate.encoder_scorer)}\"\n )\n elif seq2slate.output_arch == Seq2SlateOutputArch.AUTOREGRESSIVE:\n logger.info(\n f\"Num of Positional Encoding params: {_num_of_params(seq2slate.positional_encoding_decoder)}\"\n )\n logger.info(f\"Num of Decoder params: {_num_of_params(seq2slate.decoder)}\")\n elif seq2slate.output_arch == Seq2SlateOutputArch.ENCODER_SCORE:\n logger.info(\n f\"Num of Encoder_Scorer params: {_num_of_params(seq2slate.encoder_scorer)}\"\n )\n\n\ndef mask_logits_by_idx(logits, tgt_in_idx):\n # logits shape: batch_size, seq_len, candidate_size\n # tgt_in_idx shape: batch_size, seq_len\n\n # the first two symbols are reserved for padding and decoder-starting symbols\n # so they should never be a possible output label\n logits[:, :, :2] = float(\"-inf\")\n\n batch_size, seq_len = tgt_in_idx.shape\n mask_indices = torch.tril(\n tgt_in_idx.repeat(1, seq_len).reshape(batch_size, seq_len, seq_len), diagonal=0\n )\n logits = logits.scatter(2, mask_indices, float(\"-inf\"))\n return logits\n\n\ndef subsequent_mask(size: int, device: torch.device):\n \"\"\"\n Mask out subsequent positions. Mainly used in the decoding process,\n in which an item should not attend subsequent items.\n\n mask_ijk = 0 if the item should be ignored; 1 if the item should be paid attention\n \"\"\"\n subsequent_mask = ~torch.triu(\n torch.ones(1, size, size, device=device, dtype=torch.bool), diagonal=1\n )\n return subsequent_mask\n\n\n# TODO (@czxttkl): use when we introduce padding\ndef subsequent_and_padding_mask(tgt_in_idx):\n \"\"\" Create a mask to hide padding and future items \"\"\"\n # tgt_in_idx shape: batch_size, seq_len\n\n # tgt_tgt_mask shape: batch_size, 1, seq_len\n tgt_tgt_mask = (tgt_in_idx != PADDING_SYMBOL).unsqueeze(-2).type(torch.int8)\n # subseq_mask shape: 1, seq_len, seq_len\n subseq_mask = subsequent_mask(tgt_in_idx.size(-1), tgt_in_idx.device)\n # tgt_tgt_mask shape: batch_size, seq_len, seq_len\n tgt_tgt_mask = tgt_tgt_mask & subseq_mask\n return tgt_tgt_mask\n\n\ndef clones(module, N):\n \"\"\"\n Produce N identical layers.\n\n :param module: nn.Module class\n :param N: number of copies\n \"\"\"\n return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])\n\n\ndef attention(query, key, value, mask, d_k):\n \"\"\" Scaled Dot Product Attention \"\"\"\n # mask shape: batch_size x 1 x seq_len x seq_len\n\n # scores shape: batch_size x num_heads x seq_len x seq_len\n scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)\n scores = scores.masked_fill(mask == 0, float(\"-inf\"))\n # p_attn shape: batch_size x num_heads x seq_len x seq_len\n p_attn = F.softmax(scores, dim=3)\n # attn shape: batch_size x num_heads x seq_len x d_k\n attn = torch.matmul(p_attn, value)\n return attn, p_attn\n\n\ndef per_symbol_to_per_seq_log_probs(per_symbol_log_probs, tgt_out_idx):\n \"\"\" Gather per-symbol log probabilities into per-seq log probabilities \"\"\"\n # per_symbol_log_probs shape: batch_size, seq_len, candidate_size\n # tgt_out_idx shape: batch_size, seq_len\n # per_symbol_log_probs is log probability of each symbol in the tgt_out_idx\n # shape: batch_size, seq_len\n log_probs = torch.gather(per_symbol_log_probs, 2, tgt_out_idx.unsqueeze(2)).squeeze(\n 2\n )\n # shape: batch_size, 1\n return log_probs.sum(dim=1, keepdim=True)\n\n\ndef per_symbol_to_per_seq_probs(per_symbol_probs, tgt_out_idx):\n \"\"\" Gather per-symbol probabilities into per-seq probabilities \"\"\"\n # per_symbol_probs shape: batch_size, seq_len, candidate_size\n # tgt_out_idx shape: batch_size, seq_len\n # output shape: batch_size, 1\n return torch.clamp(\n torch.prod(\n torch.gather(per_symbol_probs, 2, tgt_out_idx.unsqueeze(2)).squeeze(2),\n dim=1,\n keepdim=True,\n ),\n # prevent zero probabilities, which cause torch.log return -inf\n min=1e-40,\n )\n\n\ndef pytorch_decoder_mask(\n memory: torch.Tensor, tgt_in_idx: torch.Tensor, num_heads: int\n):\n \"\"\"\n Compute the masks used in the PyTorch Transformer-based decoder for\n self-attention and attention over encoder outputs\n\n mask_ijk = 1 if the item should be ignored; 0 if the item should be paid attention\n\n Input:\n memory shape: batch_size, src_seq_len, dim_model\n tgt_in_idx (+2 offseted) shape: batch_size, tgt_seq_len\n\n Return:\n tgt_tgt_mask shape: batch_size * num_heads, tgt_seq_len, tgt_seq_len\n tgt_src_mask shape: batch_size * num_heads, tgt_seq_len, src_seq_len\n \"\"\"\n batch_size, src_seq_len, _ = memory.shape\n tgt_seq_len = tgt_in_idx.shape[1]\n device = memory.device\n mask_indices = torch.tril(\n tgt_in_idx.repeat(1, tgt_seq_len).reshape(batch_size, tgt_seq_len, tgt_seq_len),\n diagonal=0,\n ).to(device)\n tgt_src_mask_augmented = torch.zeros(\n batch_size, tgt_seq_len, src_seq_len + 2, dtype=torch.bool, device=device\n ).scatter(2, mask_indices, 1)\n tgt_src_mask = tgt_src_mask_augmented[:, :, 2:].repeat_interleave(num_heads, dim=0)\n tgt_tgt_mask = (subsequent_mask(tgt_seq_len, device) == 0).repeat(\n batch_size * num_heads, 1, 1\n )\n return tgt_tgt_mask, tgt_src_mask\n"
] | [
[
"torch.matmul",
"torch.nn.functional.softmax",
"torch.ones",
"torch.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tzongshiuan/tensorflow_introduction_demo | [
"ef31ac2752f09db727c4ca6de76d216f6ca23504"
] | [
"get_file_2.py"
] | [
"import tensorflow as tf\nimport random as r\nimport cv2\nimport numpy as np\nimport skimage.io as io\nfrom matplotlib import pyplot as plt\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n\ndef get_file(file_dir):\n # The images in each subfolder\n global fileNames\n images = []\n # The subfolders\n sub_folders = []\n\n # Using \"os.walk\" function to grab all the files in each folder\n for dirPath, dirNames, fileNames in os.walk(file_dir):\n names = []\n for name in fileNames:\n names.append(os.path.join(dirPath, name))\n\n for name in dirNames:\n sub_folders.append(os.path.join(dirPath, name))\n\n # 隨機打亂各個資料夾內的數據\n r.shuffle(names)\n if names != []:\n images.append(names)\n\n # 計算最小檔案數量的資料夾\n mincount = float(\"Inf\")\n for num_folder in sub_folders:\n n_img = len(os.listdir(num_folder))\n\n if n_img < mincount:\n mincount = n_img\n\n # 只保留最小檔案數量\n for i in range(len(images)):\n images[i] = images[i][0:mincount]\n\n images = np.reshape(images, [mincount * len(sub_folders), ])\n\n # To record the labels of the image dataset\n labels = []\n for count in range(len(sub_folders)):\n labels = np.append(labels, mincount * [count])\n\n # 打亂最後輸出的順序,去除每個類別間的隔閡\n sub_folders = np.array([images, labels])\n sub_folders = sub_folders[:, np.random.permutation(sub_folders.shape[1])].T\n\n image_list = list(sub_folders[:, 0])\n label_list = list(sub_folders[:, 1])\n label_list = [int(float(i)) for i in label_list]\n return image_list, label_list\n\n\n# 轉Int64資料為 tf.train.Feature 格式\ndef int64_feature(value):\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))\n\n\n# 轉Bytes資料為 tf.train.Feature 格式\ndef bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n\ndef convert_to_TFRecord(images, labels, filename):\n global image_raw\n n_samples = len(labels)\n TFWriter = tf.python_io.TFRecordWriter(filename)\n\n print('\\nTransform start...')\n for i in np.arange(0, n_samples):\n try:\n # image = cv2.imread(images[i])\n image = cv2.imread(images[i], 0)\n # image = Image.open(images[i])\n\n # color image\n # if i == 0:\n # img = Image.open(images[i])\n # plt.imshow(img)\n # plt.show()\n\n # gray scale image\n # if i == 1:\n # img = Image.open(images[i]).convert('L')\n # array = np.asarray(img)\n # plt.imshow(array, cmap='gray', vmin=0, vmax=255)\n # plt.show()\n\n if image is None:\n print('Error image:' + images[i])\n else:\n # image_raw = image.tobytes()\n image_raw = image.tostring()\n # print(len(image_raw))\n\n label = int(labels[i])\n\n # 將 tf.train.Feature 合併成 tf.train.Features\n ftrs = tf.train.Features(\n feature={'label': int64_feature(label),\n 'image_raw': bytes_feature(image_raw)}\n )\n\n # 將 tf.train.Features 轉成 tf.train.Example\n example = tf.train.Example(features=ftrs)\n\n # 將 tf.train.Example 寫成 tfRecord 格式\n TFWriter.write(example.SerializeToString())\n except IOError as e:\n print('Skip!\\n')\n\n TFWriter.close()\n print('Transform done!')\n"
] | [
[
"tensorflow.train.Example",
"numpy.arange",
"tensorflow.python_io.TFRecordWriter",
"numpy.append",
"numpy.random.permutation",
"tensorflow.train.BytesList",
"numpy.array",
"tensorflow.train.Int64List"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
knok/transformers | [
"23a2cea8cb95864ddb7e7e80e126e4f083640882"
] | [
"src/transformers/modeling_distilbert.py"
] | [
"# coding=utf-8\n# Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" PyTorch DistilBERT model\n adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM)\n and in part from HuggingFace PyTorch version of Google AI Bert model (https://github.com/google-research/bert)\n\"\"\"\n\n\nimport copy\nimport logging\nimport math\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.nn import CrossEntropyLoss\n\nfrom .configuration_distilbert import DistilBertConfig\nfrom .file_utils import add_start_docstrings\nfrom .modeling_utils import PreTrainedModel, prune_linear_layer\n\n\nlogger = logging.getLogger(__name__)\n\n\nDISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP = {\n \"distilbert-base-uncased\": \"https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-uncased-pytorch_model.bin\",\n \"distilbert-base-uncased-distilled-squad\": \"https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-uncased-distilled-squad-pytorch_model.bin\",\n \"distilbert-base-german-cased\": \"https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-german-cased-pytorch_model.bin\",\n \"distilbert-base-multilingual-cased\": \"https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-multilingual-cased-pytorch_model.bin\",\n \"distilbert-base-uncased-finetuned-sst-2-english\": \"https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-uncased-finetuned-sst-2-english-pytorch_model.bin\",\n}\n\n\n# UTILS AND BUILDING BLOCKS OF THE ARCHITECTURE #\ndef gelu(x):\n return 0.5 * x * (1.0 + torch.erf(x / math.sqrt(2.0)))\n\n\ndef create_sinusoidal_embeddings(n_pos, dim, out):\n position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)])\n out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))\n out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))\n out.detach_()\n out.requires_grad = False\n\n\nclass Embeddings(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.dim, padding_idx=0)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.dim)\n if config.sinusoidal_pos_embds:\n create_sinusoidal_embeddings(\n n_pos=config.max_position_embeddings, dim=config.dim, out=self.position_embeddings.weight\n )\n\n self.LayerNorm = nn.LayerNorm(config.dim, eps=1e-12)\n self.dropout = nn.Dropout(config.dropout)\n\n def forward(self, input_ids):\n \"\"\"\n Parameters\n ----------\n input_ids: torch.tensor(bs, max_seq_length)\n The token ids to embed.\n\n Outputs\n -------\n embeddings: torch.tensor(bs, max_seq_length, dim)\n The embedded tokens (plus position embeddings, no token_type embeddings)\n \"\"\"\n seq_length = input_ids.size(1)\n position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) # (max_seq_length)\n position_ids = position_ids.unsqueeze(0).expand_as(input_ids) # (bs, max_seq_length)\n\n word_embeddings = self.word_embeddings(input_ids) # (bs, max_seq_length, dim)\n position_embeddings = self.position_embeddings(position_ids) # (bs, max_seq_length, dim)\n\n embeddings = word_embeddings + position_embeddings # (bs, max_seq_length, dim)\n embeddings = self.LayerNorm(embeddings) # (bs, max_seq_length, dim)\n embeddings = self.dropout(embeddings) # (bs, max_seq_length, dim)\n return embeddings\n\n\nclass MultiHeadSelfAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n self.n_heads = config.n_heads\n self.dim = config.dim\n self.dropout = nn.Dropout(p=config.attention_dropout)\n self.output_attentions = config.output_attentions\n\n assert self.dim % self.n_heads == 0\n\n self.q_lin = nn.Linear(in_features=config.dim, out_features=config.dim)\n self.k_lin = nn.Linear(in_features=config.dim, out_features=config.dim)\n self.v_lin = nn.Linear(in_features=config.dim, out_features=config.dim)\n self.out_lin = nn.Linear(in_features=config.dim, out_features=config.dim)\n\n self.pruned_heads = set()\n\n def prune_heads(self, heads):\n attention_head_size = self.dim // self.n_heads\n if len(heads) == 0:\n return\n mask = torch.ones(self.n_heads, attention_head_size)\n heads = set(heads) - self.pruned_heads\n for head in heads:\n head -= sum(1 if h < head else 0 for h in self.pruned_heads)\n mask[head] = 0\n mask = mask.view(-1).contiguous().eq(1)\n index = torch.arange(len(mask))[mask].long()\n # Prune linear layers\n self.q_lin = prune_linear_layer(self.q_lin, index)\n self.k_lin = prune_linear_layer(self.k_lin, index)\n self.v_lin = prune_linear_layer(self.v_lin, index)\n self.out_lin = prune_linear_layer(self.out_lin, index, dim=1)\n # Update hyper params\n self.n_heads = self.n_heads - len(heads)\n self.dim = attention_head_size * self.n_heads\n self.pruned_heads = self.pruned_heads.union(heads)\n\n def forward(self, query, key, value, mask, head_mask=None):\n \"\"\"\n Parameters\n ----------\n query: torch.tensor(bs, seq_length, dim)\n key: torch.tensor(bs, seq_length, dim)\n value: torch.tensor(bs, seq_length, dim)\n mask: torch.tensor(bs, seq_length)\n\n Outputs\n -------\n weights: torch.tensor(bs, n_heads, seq_length, seq_length)\n Attention weights\n context: torch.tensor(bs, seq_length, dim)\n Contextualized layer. Optional: only if `output_attentions=True`\n \"\"\"\n bs, q_length, dim = query.size()\n k_length = key.size(1)\n # assert dim == self.dim, 'Dimensions do not match: %s input vs %s configured' % (dim, self.dim)\n # assert key.size() == value.size()\n\n dim_per_head = self.dim // self.n_heads\n\n mask_reshp = (bs, 1, 1, k_length)\n\n def shape(x):\n \"\"\" separate heads \"\"\"\n return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2)\n\n def unshape(x):\n \"\"\" group heads \"\"\"\n return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head)\n\n q = shape(self.q_lin(query)) # (bs, n_heads, q_length, dim_per_head)\n k = shape(self.k_lin(key)) # (bs, n_heads, k_length, dim_per_head)\n v = shape(self.v_lin(value)) # (bs, n_heads, k_length, dim_per_head)\n\n q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_length, dim_per_head)\n scores = torch.matmul(q, k.transpose(2, 3)) # (bs, n_heads, q_length, k_length)\n mask = (mask == 0).view(mask_reshp).expand_as(scores) # (bs, n_heads, q_length, k_length)\n scores.masked_fill_(mask, -float(\"inf\")) # (bs, n_heads, q_length, k_length)\n\n weights = nn.Softmax(dim=-1)(scores) # (bs, n_heads, q_length, k_length)\n weights = self.dropout(weights) # (bs, n_heads, q_length, k_length)\n\n # Mask heads if we want to\n if head_mask is not None:\n weights = weights * head_mask\n\n context = torch.matmul(weights, v) # (bs, n_heads, q_length, dim_per_head)\n context = unshape(context) # (bs, q_length, dim)\n context = self.out_lin(context) # (bs, q_length, dim)\n\n if self.output_attentions:\n return (context, weights)\n else:\n return (context,)\n\n\nclass FFN(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dropout = nn.Dropout(p=config.dropout)\n self.lin1 = nn.Linear(in_features=config.dim, out_features=config.hidden_dim)\n self.lin2 = nn.Linear(in_features=config.hidden_dim, out_features=config.dim)\n assert config.activation in [\"relu\", \"gelu\"], \"activation ({}) must be in ['relu', 'gelu']\".format(\n config.activation\n )\n self.activation = gelu if config.activation == \"gelu\" else nn.ReLU()\n\n def forward(self, input):\n x = self.lin1(input)\n x = self.activation(x)\n x = self.lin2(x)\n x = self.dropout(x)\n return x\n\n\nclass TransformerBlock(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n self.n_heads = config.n_heads\n self.dim = config.dim\n self.hidden_dim = config.hidden_dim\n self.dropout = nn.Dropout(p=config.dropout)\n self.activation = config.activation\n self.output_attentions = config.output_attentions\n\n assert config.dim % config.n_heads == 0\n\n self.attention = MultiHeadSelfAttention(config)\n self.sa_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12)\n\n self.ffn = FFN(config)\n self.output_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12)\n\n def forward(self, x, attn_mask=None, head_mask=None):\n \"\"\"\n Parameters\n ----------\n x: torch.tensor(bs, seq_length, dim)\n attn_mask: torch.tensor(bs, seq_length)\n\n Outputs\n -------\n sa_weights: torch.tensor(bs, n_heads, seq_length, seq_length)\n The attention weights\n ffn_output: torch.tensor(bs, seq_length, dim)\n The output of the transformer block contextualization.\n \"\"\"\n # Self-Attention\n sa_output = self.attention(query=x, key=x, value=x, mask=attn_mask, head_mask=head_mask)\n if self.output_attentions:\n sa_output, sa_weights = sa_output # (bs, seq_length, dim), (bs, n_heads, seq_length, seq_length)\n else: # To handle these `output_attention` or `output_hidden_states` cases returning tuples\n assert type(sa_output) == tuple\n sa_output = sa_output[0]\n sa_output = self.sa_layer_norm(sa_output + x) # (bs, seq_length, dim)\n\n # Feed Forward Network\n ffn_output = self.ffn(sa_output) # (bs, seq_length, dim)\n ffn_output = self.output_layer_norm(ffn_output + sa_output) # (bs, seq_length, dim)\n\n output = (ffn_output,)\n if self.output_attentions:\n output = (sa_weights,) + output\n return output\n\n\nclass Transformer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.n_layers = config.n_layers\n self.output_attentions = config.output_attentions\n self.output_hidden_states = config.output_hidden_states\n\n layer = TransformerBlock(config)\n self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.n_layers)])\n\n def forward(self, x, attn_mask=None, head_mask=None):\n \"\"\"\n Parameters\n ----------\n x: torch.tensor(bs, seq_length, dim)\n Input sequence embedded.\n attn_mask: torch.tensor(bs, seq_length)\n Attention mask on the sequence.\n\n Outputs\n -------\n hidden_state: torch.tensor(bs, seq_length, dim)\n Sequence of hiddens states in the last (top) layer\n all_hidden_states: Tuple[torch.tensor(bs, seq_length, dim)]\n Tuple of length n_layers with the hidden states from each layer.\n Optional: only if output_hidden_states=True\n all_attentions: Tuple[torch.tensor(bs, n_heads, seq_length, seq_length)]\n Tuple of length n_layers with the attention weights from each layer\n Optional: only if output_attentions=True\n \"\"\"\n all_hidden_states = ()\n all_attentions = ()\n\n hidden_state = x\n for i, layer_module in enumerate(self.layer):\n if self.output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_state,)\n\n layer_outputs = layer_module(x=hidden_state, attn_mask=attn_mask, head_mask=head_mask[i])\n hidden_state = layer_outputs[-1]\n\n if self.output_attentions:\n assert len(layer_outputs) == 2\n attentions = layer_outputs[0]\n all_attentions = all_attentions + (attentions,)\n else:\n assert len(layer_outputs) == 1\n\n # Add last layer\n if self.output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_state,)\n\n outputs = (hidden_state,)\n if self.output_hidden_states:\n outputs = outputs + (all_hidden_states,)\n if self.output_attentions:\n outputs = outputs + (all_attentions,)\n return outputs # last-layer hidden state, (all hidden states), (all attentions)\n\n\n# INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL #\nclass DistilBertPreTrainedModel(PreTrainedModel):\n \"\"\" An abstract class to handle weights initialization and\n a simple interface for downloading and loading pretrained models.\n \"\"\"\n\n config_class = DistilBertConfig\n pretrained_model_archive_map = DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP\n load_tf_weights = None\n base_model_prefix = \"distilbert\"\n\n def _init_weights(self, module):\n \"\"\" Initialize the weights.\n \"\"\"\n if isinstance(module, nn.Embedding):\n if module.weight.requires_grad:\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n if isinstance(module, nn.Linear):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n\n\nDISTILBERT_START_DOCSTRING = r\"\"\"\n DistilBERT is a small, fast, cheap and light Transformer model\n trained by distilling Bert base. It has 40% less parameters than\n `bert-base-uncased`, runs 60% faster while preserving over 95% of\n Bert's performances as measured on the GLUE language understanding benchmark.\n\n Here are the differences between the interface of Bert and DistilBert:\n\n - DistilBert doesn't have `token_type_ids`, you don't need to indicate which token belongs to which segment. Just separate your segments with the separation token `tokenizer.sep_token` (or `[SEP]`)\n - DistilBert doesn't have options to select the input positions (`position_ids` input). This could be added if necessary though, just let's us know if you need this option.\n\n For more information on DistilBERT, please refer to our\n `detailed blog post`_\n\n .. _`detailed blog post`:\n https://medium.com/huggingface/distilbert-8cf3380435b5\n\n Parameters:\n config (:class:`~transformers.DistilBertConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the configuration.\n Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.\n\"\"\"\n\nDISTILBERT_INPUTS_DOCSTRING = r\"\"\"\n Inputs:\n **input_ids** ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\n Indices of input sequence tokens in the vocabulary.\n The input sequences should start with `[CLS]` and end with `[SEP]` tokens.\n\n For now, ONLY BertTokenizer(`bert-base-uncased`) is supported and you should use this tokenizer when using DistilBERT.\n **attention_mask**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\n Mask to avoid performing attention on padding token indices.\n Mask values selected in ``[0, 1]``:\n ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n **head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:\n Mask to nullify selected heads of the self-attention modules.\n Mask values selected in ``[0, 1]``:\n ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.\n **inputs_embeds**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, embedding_dim)``:\n Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare DistilBERT encoder/transformer outputting raw hidden-states without any specific head on top.\",\n DISTILBERT_START_DOCSTRING,\n DISTILBERT_INPUTS_DOCSTRING,\n)\nclass DistilBertModel(DistilBertPreTrainedModel):\n r\"\"\"\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``\n Sequence of hidden-states at the output of the last layer of the model.\n **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)\n list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)\n of shape ``(batch_size, sequence_length, hidden_size)``:\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n **attentions**: (`optional`, returned when ``config.output_attentions=True``)\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')\n model = DistilBertModel.from_pretrained('distilbert-base-uncased')\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True)).unsqueeze(0) # Batch size 1\n outputs = model(input_ids)\n last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple\n\n \"\"\"\n\n def __init__(self, config):\n super().__init__(config)\n\n self.embeddings = Embeddings(config) # Embeddings\n self.transformer = Transformer(config) # Encoder\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, new_embeddings):\n self.embeddings.word_embeddings = new_embeddings\n\n def _prune_heads(self, heads_to_prune):\n \"\"\" Prunes heads of the model.\n heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n See base class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.transformer.layer[layer].attention.prune_heads(heads)\n\n def forward(self, input_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None):\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n if attention_mask is None:\n attention_mask = torch.ones(input_shape, device=device) # (bs, seq_length)\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n if head_mask is not None:\n if head_mask.dim() == 1:\n head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)\n head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)\n elif head_mask.dim() == 2:\n head_mask = (\n head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)\n ) # We can specify head_mask for each layer\n head_mask = head_mask.to(\n dtype=next(self.parameters()).dtype\n ) # switch to fload if need + fp16 compatibility\n else:\n head_mask = [None] * self.config.num_hidden_layers\n\n if inputs_embeds is None:\n inputs_embeds = self.embeddings(input_ids) # (bs, seq_length, dim)\n tfmr_output = self.transformer(x=inputs_embeds, attn_mask=attention_mask, head_mask=head_mask)\n hidden_state = tfmr_output[0]\n output = (hidden_state,) + tfmr_output[1:]\n\n return output # last-layer hidden-state, (all hidden_states), (all attentions)\n\n\n@add_start_docstrings(\n \"\"\"DistilBert Model with a `masked language modeling` head on top. \"\"\",\n DISTILBERT_START_DOCSTRING,\n DISTILBERT_INPUTS_DOCSTRING,\n)\nclass DistilBertForMaskedLM(DistilBertPreTrainedModel):\n r\"\"\"\n **masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\n Labels for computing the masked language modeling loss.\n Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)\n Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels\n in ``[0, ..., config.vocab_size]``\n\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:\n Masked language modeling loss.\n **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``\n Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).\n **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)\n list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)\n of shape ``(batch_size, sequence_length, hidden_size)``:\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n **attentions**: (`optional`, returned when ``config.output_attentions=True``)\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')\n model = DistilBertForMaskedLM.from_pretrained('distilbert-base-uncased')\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True)).unsqueeze(0) # Batch size 1\n outputs = model(input_ids, masked_lm_labels=input_ids)\n loss, prediction_scores = outputs[:2]\n\n \"\"\"\n\n def __init__(self, config):\n super().__init__(config)\n self.output_attentions = config.output_attentions\n self.output_hidden_states = config.output_hidden_states\n\n self.distilbert = DistilBertModel(config)\n self.vocab_transform = nn.Linear(config.dim, config.dim)\n self.vocab_layer_norm = nn.LayerNorm(config.dim, eps=1e-12)\n self.vocab_projector = nn.Linear(config.dim, config.vocab_size)\n\n self.init_weights()\n\n self.mlm_loss_fct = nn.CrossEntropyLoss()\n\n def get_output_embeddings(self):\n return self.vocab_projector\n\n def forward(self, input_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None, masked_lm_labels=None):\n dlbrt_output = self.distilbert(\n input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds\n )\n hidden_states = dlbrt_output[0] # (bs, seq_length, dim)\n prediction_logits = self.vocab_transform(hidden_states) # (bs, seq_length, dim)\n prediction_logits = gelu(prediction_logits) # (bs, seq_length, dim)\n prediction_logits = self.vocab_layer_norm(prediction_logits) # (bs, seq_length, dim)\n prediction_logits = self.vocab_projector(prediction_logits) # (bs, seq_length, vocab_size)\n\n outputs = (prediction_logits,) + dlbrt_output[1:]\n if masked_lm_labels is not None:\n mlm_loss = self.mlm_loss_fct(\n prediction_logits.view(-1, prediction_logits.size(-1)), masked_lm_labels.view(-1)\n )\n outputs = (mlm_loss,) + outputs\n\n return outputs # (mlm_loss), prediction_logits, (all hidden_states), (all attentions)\n\n\n@add_start_docstrings(\n \"\"\"DistilBert Model transformer with a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. \"\"\",\n DISTILBERT_START_DOCSTRING,\n DISTILBERT_INPUTS_DOCSTRING,\n)\nclass DistilBertForSequenceClassification(DistilBertPreTrainedModel):\n r\"\"\"\n **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:\n Labels for computing the sequence classification/regression loss.\n Indices should be in ``[0, ..., config.num_labels - 1]``.\n If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),\n If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).\n\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:\n Classification (or regression if config.num_labels==1) loss.\n **logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``\n Classification (or regression if config.num_labels==1) scores (before SoftMax).\n **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)\n list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)\n of shape ``(batch_size, sequence_length, hidden_size)``:\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n **attentions**: (`optional`, returned when ``config.output_attentions=True``)\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')\n model = DistilBertForSequenceClassification.from_pretrained('distilbert-base-uncased')\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True)).unsqueeze(0) # Batch size 1\n labels = torch.tensor([1]).unsqueeze(0) # Batch size 1\n outputs = model(input_ids, labels=labels)\n loss, logits = outputs[:2]\n\n \"\"\"\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.distilbert = DistilBertModel(config)\n self.pre_classifier = nn.Linear(config.dim, config.dim)\n self.classifier = nn.Linear(config.dim, config.num_labels)\n self.dropout = nn.Dropout(config.seq_classif_dropout)\n\n self.init_weights()\n\n def forward(self, input_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None, labels=None):\n distilbert_output = self.distilbert(\n input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds\n )\n hidden_state = distilbert_output[0] # (bs, seq_len, dim)\n pooled_output = hidden_state[:, 0] # (bs, dim)\n pooled_output = self.pre_classifier(pooled_output) # (bs, dim)\n pooled_output = nn.ReLU()(pooled_output) # (bs, dim)\n pooled_output = self.dropout(pooled_output) # (bs, dim)\n logits = self.classifier(pooled_output) # (bs, dim)\n\n outputs = (logits,) + distilbert_output[1:]\n if labels is not None:\n if self.num_labels == 1:\n loss_fct = nn.MSELoss()\n loss = loss_fct(logits.view(-1), labels.view(-1))\n else:\n loss_fct = nn.CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n outputs = (loss,) + outputs\n\n return outputs # (loss), logits, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\n \"\"\"DistilBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of\n the hidden-states output to compute `span start logits` and `span end logits`). \"\"\",\n DISTILBERT_START_DOCSTRING,\n DISTILBERT_INPUTS_DOCSTRING,\n)\nclass DistilBertForQuestionAnswering(DistilBertPreTrainedModel):\n r\"\"\"\n **start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n **end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:\n Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.\n **start_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``\n Span-start scores (before SoftMax).\n **end_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``\n Span-end scores (before SoftMax).\n **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)\n list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)\n of shape ``(batch_size, sequence_length, hidden_size)``:\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n **attentions**: (`optional`, returned when ``config.output_attentions=True``)\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')\n model = DistilBertForQuestionAnswering.from_pretrained('distilbert-base-uncased')\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True)).unsqueeze(0) # Batch size 1\n start_positions = torch.tensor([1])\n end_positions = torch.tensor([3])\n outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)\n loss, start_scores, end_scores = outputs[:3]\n\n \"\"\"\n\n def __init__(self, config):\n super().__init__(config)\n\n self.distilbert = DistilBertModel(config)\n self.qa_outputs = nn.Linear(config.dim, config.num_labels)\n assert config.num_labels == 2\n self.dropout = nn.Dropout(config.qa_dropout)\n\n self.init_weights()\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n head_mask=None,\n inputs_embeds=None,\n start_positions=None,\n end_positions=None,\n ):\n distilbert_output = self.distilbert(\n input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds\n )\n hidden_states = distilbert_output[0] # (bs, max_query_len, dim)\n\n hidden_states = self.dropout(hidden_states) # (bs, max_query_len, dim)\n logits = self.qa_outputs(hidden_states) # (bs, max_query_len, 2)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1) # (bs, max_query_len)\n end_logits = end_logits.squeeze(-1) # (bs, max_query_len)\n\n outputs = (start_logits, end_logits,) + distilbert_output[1:]\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions.clamp_(0, ignored_index)\n end_positions.clamp_(0, ignored_index)\n\n loss_fct = nn.CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n outputs = (total_loss,) + outputs\n\n return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)\n\n\n@add_start_docstrings(\n \"\"\"DistilBert Model with a token classification head on top (a linear layer on top of\n the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. \"\"\",\n DISTILBERT_START_DOCSTRING,\n DISTILBERT_INPUTS_DOCSTRING,\n)\nclass DistilBertForTokenClassification(DistilBertPreTrainedModel):\n r\"\"\"\n **labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:\n Labels for computing the token classification loss.\n Indices should be in ``[0, ..., config.num_labels - 1]``.\n\n Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:\n **loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:\n Classification loss.\n **scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.num_labels)``\n Classification scores (before SoftMax).\n **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)\n list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)\n of shape ``(batch_size, sequence_length, hidden_size)``:\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n **attentions**: (`optional`, returned when ``config.output_attentions=True``)\n list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.\n\n Examples::\n\n tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')\n model = DistilBertForTokenClassification.from_pretrained('distilbert-base-uncased')\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\")).unsqueeze(0) # Batch size 1\n labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1\n outputs = model(input_ids, labels=labels)\n loss, scores = outputs[:2]\n\n \"\"\"\n\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.distilbert = DistilBertModel(config)\n self.dropout = nn.Dropout(config.dropout)\n self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n def forward(self, input_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None, labels=None):\n\n outputs = self.distilbert(\n input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds\n )\n\n sequence_output = outputs[0]\n\n sequence_output = self.dropout(sequence_output)\n logits = self.classifier(sequence_output)\n\n outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n # Only keep active parts of the loss\n if attention_mask is not None:\n active_loss = attention_mask.view(-1) == 1\n active_logits = logits.view(-1, self.num_labels)[active_loss]\n active_labels = labels.view(-1)[active_loss]\n loss = loss_fct(active_logits, active_labels)\n else:\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n outputs = (loss,) + outputs\n\n return outputs # (loss), scores, (hidden_states), (attentions)\n"
] | [
[
"torch.nn.Softmax",
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"torch.ones",
"numpy.power",
"numpy.cos",
"torch.nn.Embedding",
"torch.nn.LayerNorm",
"numpy.sin",
"torch.nn.Linear",
"torch.matmul",
"torch.arange",
"torch.nn.ReLU",
"torch.nn.MSELoss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rouzbeh-afrasiabi/BertFineTuning | [
"848f24920850d6f0be4c4aee7de96e2332404e8b"
] | [
"BertFineTuning/duplicate_detection.py"
] | [
"from BertFineTuning.utils import *\nfrom BertFineTuning.model_config import*\n\nimport os\nimport sys\nimport numpy as np\nimport pandas as pd\nfrom collections import OrderedDict\nfrom pycm import *\n\nimport torch\nif(torch.cuda.is_available()):\n torch.cuda.current_device()\nfrom torch import nn\nfrom torch.utils.data import Dataset\nfrom torch.utils.data import DataLoader\nfrom torch import optim\nimport torch.nn.functional as F\n\nimport copy\nimport gc\n\nfrom transformers import BertModel\n\ncwd = os.getcwd()\nsys.path.append(cwd)\nsys.path.insert(0, cwd)\n\n\n\n\nrandom_state=123\ntorch.manual_seed(random_state)\nif torch.cuda.is_available():\n torch.cuda.manual_seed_all(random_state)\nnp.random.seed(random_state)\n\n\n\n\nclass BertFineTuning():\n def __init__(self,):\n \n class Network(nn.Module):\n def __init__(self, pre_trained_model,config):\n super().__init__()\n self.pre_trained_model=pre_trained_model.to(config['device'])\n self.pre_trained_out_features=list(list(self.pre_trained_model.children())[-1].children())[0].out_features\n self.classifier=nn.Sequential(OrderedDict([\n ('fc1', nn.Linear(self.pre_trained_out_features, self.pre_trained_out_features)),\n ('bn_1',nn.BatchNorm1d(self.pre_trained_out_features)),\n ('prelu1', nn.PReLU()),\n ('fc2', nn.Linear(self.pre_trained_out_features, config['num_classes']))]))\n\n def forward(self, tokens_tensor, segments_tensors):\n last_hidden_state, pooled_output = self.pre_trained_model(tokens_tensor, segments_tensors)\n logits = self.classifier(pooled_output)\n return logits \n \n def __device():\n return torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n self.check_point_loaded=False\n self.device = __device()\n self.criterion_config={}\n self.optimizer_config={}\n self.scheduler_config={}\n self.config=model_config\n self.config['device']=self.device\n self.pre_trained_model=BertModel.from_pretrained('bert-base-uncased')\n self.model=Network(self.pre_trained_model,self.config).to(self.device)\n self.parameters_main=[\n {\"params\": self.model.pre_trained_model.parameters(),\n \"lr\": self.config['learning_rate_PT'],'weight_decay': self.config['weight_decay']},\n {\"params\": self.model.classifier.parameters(),\n \"lr\": self.config['learning_rate_CLS'],'weight_decay': self.config['weight_decay']},\n ]\n self.no_decay = ['bias', 'LayerNorm.weight']\n self.__PT_n_param=self.model.pre_trained_model.named_parameters()\n self.__CLS_n_param=self.model.classifier.named_parameters()\n self.parameters_noDecay=[\n {'params': [p for n, p in self.__PT_n_param if not any(nd in n for nd in self.no_decay) and p.requires_grad],\n \"lr\": self.config['learning_rate_PT'], 'weight_decay': self.config['weight_decay']},\n {'params': [p for n, p in self.__PT_n_param if any(nd in n for nd in self.no_decay) and p.requires_grad],\n \"lr\": self.config['learning_rate_PT'], 'weight_decay': 0.0},\n {'params': [p for n, p in self.__CLS_n_param if p.requires_grad],\n \"lr\": self.config['learning_rate_PT'], 'weight_decay': self.config['weight_decay']},\n ]\n self.criterion=None\n self.optimizer=None\n self.scheduler=None\n self.validate_at_epoch=0\n self.checkpoint=None\n self.loss_history=[]\n self.test_loss_history=[]\n self.learning_rate=[]\n self.cm_test=[]\n self.cm_train=[]\n self.last_epoch=0\n self.epochs=100\n self.validate_at_epoch=0\n self.print_every=100\n self.e=0\n self.target_folder=cwd\n self.save_folder=os.path.join(cwd,'checkpoints')\n \n @staticmethod\n def _update_dict_strict(target,**kwargs):\n if(all([key in target.keys() for key in kwargs.keys()])):\n target.update(kwargs)\n else:\n raise Exception('Following keys not in dictionary',[key for key in kwargs.keys() if(key not in target.keys())]) \n \n @staticmethod \n def _update_dict(target,**kwargs):\n target.update(kwargs) \n \n def update_config(self,**kwargs):\n self._update_dict_strict(self.config,**kwargs) \n \n @staticmethod\n def print_results(cm):\n print(cm.AUCI)\n print(\"MCC: \",cm.MCC)\n print(\"Accuracy: \",cm.ACC)\n print({\"F1 Macro \":cm.F1_Macro},{'F1 Micro':cm.F1_Micro})\n print({\"F1 \":cm.F1})\n print(\"Precision: \",cm.PPV)\n print(\"recall: \",cm.TPR)\n cm.print_matrix() \n\n def save_it(self,target_folder):\n self.model.eval()\n print(\"Saving Model ...\")\n checkpoint = {'state_dict': self.model.state_dict(),\n 'optimizer':self.optimizer.state_dict(),\n 'optimizer_type':type(self.optimizer),\n 'criterion':self.criterion,\n 'criterion_type':type(self.criterion),\n 'scheduler':self.scheduler.state_dict(),\n 'scheduler_type':type(self.scheduler),\n 'last_epoch':self.e+1,\n 'train_loss_history':self.loss_history,\n 'test_loss_history':self.test_loss_history,\n 'learning_rate_history':self.learning_rate,\n 'cm_train':self.cm_train,\n 'cm_test':self.cm_test,\n 'config':self.config,\n 'train_loops':self.train_loops\n }\n try:\n torch.save(checkpoint,target_folder+'/'+'checkpoint'+str(self.e+1)+'.pth' )\n print(\"Model Saved.\\n\")\n self.model.train()\n except:\n print(\"Failed to Save Model!!\")\n \n def load_checkpoint(self,path):\n if(check_file(path)):\n self.checkpoint = torch.load(path,map_location=self.device)\n self.model.load_state_dict(self.checkpoint[\"state_dict\"])\n self.optimizer.load_state_dict(self.checkpoint[\"optimizer\"])\n self.scheduler.load_state_dict(self.checkpoint[\"scheduler\"])\n \n self.loss_history=self.checkpoint['train_loss_history']\n self.test_loss_history=self.checkpoint['test_loss_history']\n self.learning_rate=self.checkpoint['learning_rate_history']\n self.cm_test=self.checkpoint['cm_test']\n self.cm_train=self.checkpoint['cm_train']\n self.last_epoch=self.checkpoint['last_epoch']\n self.check_point_loaded=True\n self.model.eval()\n return \n\n def predict(self,target_loader):\n self.model.eval()\n with torch.no_grad():\n criterion=self.criterion\n predictions=np.array([])\n loss_history=[0]\n labels=np.array([])\n for i, (_ids,_list_of_indices,_segments_ids,_labels) in enumerate(target_loader):\n _labels=_labels.to(self.device).long()\n _list_of_indices,_segments_ids = _list_of_indices.to(self.device), _segments_ids.to(self.device)\n _output = self.model(_list_of_indices,_segments_ids)\n _loss=self.criterion(_output,_labels)\n loss_history.append(_loss.detach().item())\n _,_prediction= torch.max(_output, 1)\n predictions=np.append(predictions,_prediction.data.to('cpu'))\n labels=np.append(labels,_labels.cpu())\n torch.cuda.empty_cache()\n gc.collect()\n cm=ConfusionMatrix(labels,predictions)\n torch.cuda.empty_cache()\n gc.collect()\n return cm,np.mean(loss_history)\n\n def train(self,model_config,train_loader,valid_loader,epochs=100,print_every=100,validate_at_epoch=0):\n model=self.model\n train_res=np.array([])\n train_lbl=np.array([])\n if(not self.check_point_loaded):\n self.loss_history=[]\n self.test_loss_history=[]\n self.learning_rate=[]\n self.cm_test=[]\n self.cm_train=[]\n self.last_epoch=0\n elif(self.check_point_loaded):\n self.loss_history=self.checkpoint['train_loss_history']\n self.test_loss_history=self.checkpoint['test_loss_history']\n self.learning_rate=self.checkpoint['learning_rate_history']\n self.cm_test=self.checkpoint['cm_test']\n self.cm_train=self.checkpoint['cm_train']\n self.last_epoch=self.checkpoint['last_epoch']\n self.train_loops=len(train_loader)//print_every\n for e in range(self.last_epoch,self.epochs,1):\n self.e=e\n \n for i,(ids,list_of_indices,segments_ids,labels) in enumerate(train_loader):\n model.train()\n list_of_indices,segments_ids,labels=list_of_indices.to(self.device),segments_ids.to(self.device),labels.to(self.device)\n output=model(list_of_indices,segments_ids)\n loss=self.criterion(output,labels)\n self.loss_history.append(loss.data.item())\n self.learning_rate.append(self.scheduler.get_lr())\n loss.backward()\n self.optimizer.step()\n self.optimizer.zero_grad()\n _,prediction= torch.max(output, 1) \n train_res=np.append(train_res,(prediction.data.to('cpu')))\n train_lbl=np.append(train_lbl,labels.data.cpu().numpy())\n if((i+1)%print_every==0):\n cm=ConfusionMatrix(train_lbl,train_res)\n self.cm_train.append(cm)\n print(\"epoch: \",e+1,\" step: \",(i+1)//print_every,\"/\",self.train_loops)\n print(\"Batch Loss: \",np.mean(self.loss_history[len(self.loss_history)-print_every:len(self.loss_history)-1]))\n print('train results: \\n')\n self.print_results(cm)\n train_res=np.array([])\n train_lbl=np.array([])\n torch.cuda.empty_cache()\n gc.collect()\n\n print(\"epoch: \",e+1,\"Train Loss: \",np.mean(self.loss_history[-1*(len(train_loader)-1):]),\"\\n\")\n\n if(((e+1)>=validate_at_epoch)):\n print(\"************************\")\n print(\"validation started ...\",\"\\n\")\n _cm,_loss=self.predict(valid_loader)\n self.test_loss_history.append(_loss)\n print('test loss: ', _loss)\n self.print_results(_cm)\n print(\"************************\",\"\\n\")\n self.cm_test.append(_cm)\n self.save_it(self.save_folder)\n self.scheduler.step() \n \n"
] | [
[
"torch.nn.BatchNorm1d",
"torch.max",
"numpy.random.seed",
"torch.cuda.current_device",
"torch.load",
"torch.manual_seed",
"torch.nn.PReLU",
"torch.cuda.empty_cache",
"torch.nn.Linear",
"torch.no_grad",
"numpy.mean",
"torch.cuda.manual_seed_all",
"torch.cuda.is_available",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
muradtuk/UnifiedFramework | [
"07dd7cf50552fa87fd875818eead03a2fe9e5073"
] | [
"RegressionProblems.py"
] | [
"import numpy as np\nimport scipy as sp\nimport cvxpy as cp\nimport time\nfrom MainProgram import Utils\nimport PointSet\n\n\nclass RegressionProblem(object):\n def __init__(self, p_norm=2):\n assert(p_norm < 0, 'p_norm must be a positive scalar!')\n self.p_norm = p_norm\n self.coef_ = None\n # self.time_taken = None\n self.intercept_ = None\n\n def fit(self, X, Y, weights):\n d = X.shape[1]\n start_time = time.time()\n if self.p_norm < 1 or 'res' in Utils.PROBLEM_TYPE:\n func = lambda x: np.sum(np.multiply(weights, np.abs(np.dot(X, x) - Y) ** self.p_norm)) if self.p_norm < 1 \\\n else np.sum(np.multiply(weights, np.min(np.abs(np.dot(X, x) - Y), np.linalg.norm(x, Utils.Z))))\n grad = lambda x: sp.optimize.approx_fprime(x, func, Utils.EPSILON)\n optimal_x = None\n optimal_val = np.Inf\n for i in range(Utils.OPTIMIZATION_NUM_INIT):\n x0 = Utils.createRandomInitialVector(d)\n res = sp.optimize.minimize(fun=func, x0=x0, jac=grad, method='L-BFGS-B')\n temp = Utils.OBJECTIVE_COST(PointSet.PointSet(np.hstack((X,Y)), weights), res.x)\n if temp < optimal_val:\n optimal_val = temp\n optimal_x = res.x\n self.coef_ = optimal_x\n else:\n w = cp.Variable(d, )\n\n loss = cp.sum(cp.multiply(weights, cp.power(cp.abs(cp.matmul(X, w)- Y), self.p_norm)))\n constraints = []\n\n prob = cp.Problem(cp.Minimize(loss), constraints)\n prob.solve()\n self.coef_ = w.value()\n # self.time_taken = time.time() - start_time\n"
] | [
[
"numpy.hstack",
"numpy.dot",
"numpy.linalg.norm",
"scipy.optimize.minimize",
"scipy.optimize.approx_fprime"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
hao-qiang/pix2pix_keras | [
"70a008e1d7da181c536012e6f005bfb3d8d62084"
] | [
"data_utils.py"
] | [
"import numpy as np\nimport cv2\nimport scipy.misc\n\n\ndef normalization(img):\n # rescale input img within [-1,1]\n return img / 127.5 - 1\n\n\ndef inverse_normalization(img):\n # rescale output img within [0,1], then saving by 'scipy.misc.imsave'\n return (img + 1.) / 2.\n\n\ndef read_one_img(img_dir):\n img = cv2.imread(img_dir)[:, :, ::-1]\n img = normalization(img)\n img_HR = img[:, 256:, :]\n img_LR = img[:, :256, :]\n return img_HR, img_LR\n\n\ndef gen_batch(X_list, batch_size=32):\n idx = np.random.choice(X_list.shape[0], batch_size, replace=False)\n X_HR_batch = np.zeros((batch_size, 256, 256, 3), dtype=np.float32)\n X_LR_batch = np.zeros((batch_size, 256, 256, 3), dtype=np.float32)\n\n for i in range(batch_size):\n X_HR_batch[i], X_LR_batch[i] = read_one_img(X_list[idx[i]])\n return X_HR_batch, X_LR_batch\n\n\ndef get_disc_batch(X_HR_batch, X_LR_batch, G_model, batch_counter):\n # Create X_disc: alternatively only generated or real images\n if batch_counter % 2 == 0:\n # Produce an output\n X_disc = G_model.predict(X_LR_batch)\n y_disc = np.zeros((X_disc.shape[0], 1), dtype=np.uint8)\n y_disc[:, 0] = 0\n else:\n X_disc = X_HR_batch\n y_disc = np.zeros((X_disc.shape[0], 1), dtype=np.uint8)\n y_disc[:, 0] = 1\n return X_disc, y_disc\n\n\ndef plot_generated_batch(X_HR, X_LR, G_model, epoch):\n # Generate images\n X_SR = G_model.predict(X_LR[:4])\n X_SR = inverse_normalization(X_SR)\n X_LR = inverse_normalization(X_LR[:4])\n X_HR = inverse_normalization(X_HR[:4])\n X = np.concatenate((X_LR, X_SR, X_HR), axis=0)\n\n list_rows = []\n for i in range(int(X.shape[0] // 4)):\n Xr = np.concatenate([X[k] for k in range(4 * i, 4 * (i + 1))], axis=1)\n list_rows.append(Xr)\n\n Xr = np.concatenate(list_rows, axis=0)\n scipy.misc.imsave(\"./figures/val_epoch%s.png\" % epoch, Xr)\n"
] | [
[
"numpy.concatenate",
"numpy.zeros",
"numpy.random.choice"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AlexanderSouthan/pyAnalytics | [
"18038b2cda75a99280d3cdd68d61e601eefa0fe0"
] | [
"src/pyAnalytics/raman_data.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nFor inspection of confocal LSM and Raman datasets. \n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport glob\nimport imageio\nimport spc\nfrom tqdm import tqdm\n\n# import own modules#############################\nfrom .spectroscopy_data import spectroscopy_data\nfrom .confocal_data import confocal_data as confocal_data\n#################################################\n\nclass raman_image(spectroscopy_data, confocal_data):\n def __init__(self, measurement_type=None, file_extension='txt',\n data_source='import', directory=None, spectral_data=None,\n decimals_coordinates=1, file_names=None, **kwargs):\n self.data_source = data_source\n self.kwargs = kwargs\n\n self.directory = directory\n self.measurement_type = measurement_type\n self.file_extension = file_extension\n self.decimals_coordinates = decimals_coordinates\n self.coord_conversion_factor = int(10**self.decimals_coordinates)\n self.file_names = file_names\n\n if data_source == 'import':\n # imports images into self.spectral_data\n self.__import_data()\n elif data_source == 'DataFrame':\n self.spectral_data = spectral_data\n index_frame = self.spectral_data.index.to_frame()\n index_frame.columns = ['x_coded', 'y_coded', 'z_coded']\n new_index = pd.MultiIndex.from_frame(\n (index_frame*self.coord_conversion_factor).astype(np.int64))\n self.spectral_data.index = new_index\n\n if self.spectral_data.columns[1] < self.spectral_data.columns[0]:\n self.spectral_data = self.spectral_data.iloc[:, ::-1]\n\n self.reset_processed_data()\n self.wavenumbers = self.spectral_data.columns.to_numpy()\n self.baseline_data = {}\n self.monochrome_data = {}\n\n ###############################\n##### basic methods #############################################\n ###############################\n\n def __import_data(self):\n if self.file_names is None:\n self.file_names = glob.glob(\n self.directory + '*.' + self.file_extension)\n\n self.file_list = pd.DataFrame(self.file_names, columns=['file_name'],\n index=np.arange(len(self.file_names)))\n self.file_list['x_coded'] = np.zeros(len(self.file_names), dtype=int)\n self.file_list['y_coded'] = np.zeros(len(self.file_names), dtype=int)\n self.file_list['z_coded'] = np.zeros(len(self.file_names), dtype=int)\n\n if self.measurement_type in ['Raman_volume', 'Raman_x_scan',\n 'Raman_y_scan', 'Raman_z_scan',\n 'Raman_xy_scan', 'Raman_single_spectrum']:\n if self.measurement_type in ['Raman_volume', 'Raman_x_scan',\n 'Raman_xy_scan']:\n self.file_list.iloc[:, 1] = (\n pd.to_numeric(\n self.file_list.iloc[:, 0].str.extract(\n r'__X_([-*\\d*.*\\d*]*)\\__Y_', expand=False)) *\n self.coord_conversion_factor).astype(int)\n if self.measurement_type in ['Raman_volume', 'Raman_y_scan']:\n self.file_list.iloc[:, 2] = (\n pd.to_numeric(self.file_list.iloc[:, 0].str.extract(\n r'__Y_([-*\\d*.*\\d*]*)\\__Z_', expand=False)) *\n self.coord_conversion_factor).astype(int)\n if self.measurement_type in ['Raman_xy_scan']:\n self.file_list.iloc[:, 2] = (\n pd.to_numeric(self.file_list.iloc[:, 0].str.extract(\n r'__Y_([-*\\d*.*\\d*]*)\\__', expand=False)) *\n self.coord_conversion_factor).astype(int)\n if self.measurement_type in ['Raman_volume', 'Raman_z_scan']:\n self.file_list.iloc[:, 3] = (\n pd.to_numeric(self.file_list.iloc[:, 0].str.extract(\n r'__Z_([-*\\d*.*\\d*]*)\\__', expand=False)) *\n self.coord_conversion_factor).astype(int)\n\n self.file_list = self.file_list.sort_values(\n by=['z_coded', 'y_coded', 'x_coded'])\n self.file_list.index = pd.RangeIndex(len(self.file_list.index))\n\n wavenumbers = np.fromfile(\n self.file_list['file_name'][0], sep=' ')[::2]\n intensities = np.zeros((len(self.file_list.index),\n wavenumbers.size))\n\n for index, curr_index in enumerate(tqdm(self.file_list.index)):\n intensities[index] = np.fromfile(\n self.file_list.iloc[index, 0], sep=' ')[1::2]\n\n # Inline_IR and LSM still have to get their own classes\n elif self.measurement_type == 'Inline_IR':\n spectrum_data = spc.File(self.file_list.iloc[0, 0])\n number_of_spectra = len(spectrum_data.sub)\n wavenumbers = spectrum_data.x\n intensities = np.zeros((number_of_spectra, len(spectrum_data.x)))\n time_data = np.zeros(number_of_spectra)\n\n for index, curr_spec in enumerate(tqdm(spectrum_data.sub)):\n intensities[index, :] = curr_spec.y\n time_data[index] = curr_spec.subtime\n\n self.file_list = self.file_list.loc[\n self.file_list.index.repeat(number_of_spectra)].reset_index(\n drop=True)\n self.file_list.iloc[:, 1] = (pd.Series(time_data) *\n self.coord_conversion_factor).astype(\n int)\n\n # Is still experimental, especially correct coordinates are missing and\n # possibly not working for not square images\n elif self.measurement_type == 'LSM':\n # read first image to get image dimensions\n first_image = imageio.imread(self.file_list.iloc[0, 0])\n pixels_per_image = np.shape(first_image)[0]*np.shape(\n first_image)[1]\n number_of_images = len(self.file_list.index)\n\n intensities = np.zeros((number_of_images*pixels_per_image,\n np.shape(first_image)[2]), dtype='uint8')\n z_coords = np.repeat(np.arange(number_of_images), pixels_per_image)\n x_coords = np.tile(np.repeat(np.arange(np.shape(first_image)[0]),\n np.shape(first_image)[1]),\n number_of_images)\n y_coords = np.tile(np.tile(np.arange(np.shape(first_image)[1]),\n np.shape(first_image)[0]),\n number_of_images)\n\n wavenumbers = np.arange(np.shape(first_image)[2])\n\n for index, curr_file in enumerate(tqdm(self.file_list.iloc[:, 0])):\n intensities[index*pixels_per_image:(index+1)*pixels_per_image, :] = np.reshape(imageio.imread(curr_file), (-1, 3))\n\n self.file_list = pd.DataFrame(np.stack(\n [np.repeat(self.file_list.iloc[:, 0], pixels_per_image),\n x_coords, y_coords, z_coords]).T,\n columns=self.file_list.columns)\n\n hyperspectral_image_index = pd.MultiIndex.from_frame(\n self.file_list.iloc[:, 1:4])\n self.spectral_data = pd.DataFrame(\n intensities, index=hyperspectral_image_index,\n columns=np.around(wavenumbers, 2))\n"
] | [
[
"numpy.fromfile",
"pandas.MultiIndex.from_frame",
"pandas.Series",
"numpy.arange",
"numpy.around",
"numpy.shape",
"numpy.repeat",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"0.24",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
tcq1/programmingfordatascience | [
"20a6a251bd2309b0b07d9779fd4b5b1f1c4ee540"
] | [
"04_regression/regression.py"
] | [
"import matplotlib.pyplot as plt\nimport numpy as np\n\nfrom scipy.optimize import minimize\nfrom timeit import default_timer as timer\n\n\nlam1 = None\nlam2 = None\nlam3 = None\n\n\ndef get_random_start_params(min_val, max_val):\n \"\"\" Generates random start values for x and y\n \"\"\"\n return np.array([np.random.randint(min_val, max_val), np.random.randint(min_val, max_val)])\n\n\ndef ols(function, x, y):\n \"\"\" OLS algorithm using the scipy.optimize.minimize function\n \"\"\"\n result = minimize(function, x0=np.array(get_random_start_params(0, len(x))), args=(x, y))\n\n return result.x, result.fun\n\n\ndef sse(a, x, y):\n \"\"\" Calculates sum of squared errors of a function a\n \"\"\"\n result = np.sum((y - x @ a)**2)\n # print('SSE = {}'.format(result))\n\n return result\n\n\ndef sse_ridge(a, x, y):\n \"\"\" Calculates the solution using the formula for Ridge Regression\n \"\"\"\n result = sse(a, x, y) + lam1 * np.sum(a**2)\n # print('SSE Ridge: {}'.format(result))\n\n return result\n\n\ndef sse_lasso(a, x, y):\n \"\"\" Calculates the solution using the formula for Lasso Regression\n \"\"\"\n result = sse(a, x, y) + lam1 * np.sum(abs(a))\n # print('SSE Lasso: {}'.format(result))\n\n return result\n\n\ndef sse_net(a, x, y):\n \"\"\" Calculates the solution using the formula for Elastic Net\n \"\"\"\n result = sse(a, x, y) + lam2 * np.sum(a**2) + lam3 * (np.sum(abs(a)))\n # print('SSE Net: {}'.format(result))\n return result\n\n\ndef matrix_solution(x, y):\n \"\"\" Calculates the solution using matrix calculations\n \"\"\"\n return np.linalg.inv(x.transpose() @ x) @ x.transpose() @ y\n\n\ndef main():\n # prepare data\n file_path = 'data-OLS.csv'\n x, y = np.loadtxt(file_path, delimiter=',', dtype=float, skiprows=1, unpack=True)\n x = np.column_stack((x, np.ones(len(x))))\n\n # set lambda values\n global lam1, lam2, lam3\n lam1 = 10\n lam2 = 5\n lam3 = 5\n\n # perform calculations\n # benchmark ols\n ols_start = timer()\n m0, n0 = ols(sse, x, y)[0]\n ols_end = timer()\n # ridge regression\n m1, n1 = ols(sse_ridge, x, y)[0]\n # lasso regression\n m2, n2 = ols(sse_lasso, x, y)[0]\n # elastic net\n m3, n3 = ols(sse_net, x, y)[0]\n # benchmark matrix\n matrix_start = timer()\n m4, n4 = matrix_solution(x, y)\n matrix_end = timer()\n\n # print out times and functions\n print('Performance of OLS algorithm: {}'.format(ols_end-ols_start))\n print('Performance of matrix algorithm: {}'.format(matrix_end-matrix_start))\n\n solutions = {'sse': '{}x + {}'.format(m0, n0), 'ridge': '{}x + {}'.format(m1, n1),\n 'lasso': '{}x + {}'.format(m2, n2), 'net': '{}x + {}'.format(m3, n3),\n 'matrix': '{}x + {}'.format(m4, n4)}\n\n for key in solutions.keys():\n print('Function of {}: {}'.format(key, solutions[key]))\n\n # plot\n x = x[:, 0]\n\n fig, ax = plt.subplots(figsize=(16, 8))\n ax.scatter(x, y, s=8)\n ax.plot(x, m0 * x + n0, c='C0', label='sse')\n ax.plot(x, m1 * x + n1, c='C1', label='ridge')\n ax.plot(x, m2 * x + n2, c='C2', label='lasso')\n ax.plot(x, m3 * x + n3, c='C3', label='net')\n ax.plot(x, m4 * x + n4, c='C4', label='matrix')\n ax.legend(fontsize=12)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"numpy.sum",
"numpy.loadtxt",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JJuOn/Few-shot_Class_Incremental_Learning | [
"a2178051a6fefcd73b60f5e4236116bf828a801c"
] | [
"util.py"
] | [
"import torch\nimport torch.nn as nn\nimport numpy as np\nimport pickle\nimport os\nimport argparse\n\n\n\nclass LabelSmoothing(nn.Module):\n \"\"\"\n NLL loss with label smoothing.\n \"\"\"\n def __init__(self, smoothing=0.0):\n \"\"\"\n Constructor for the LabelSmoothing module.\n :param smoothing: label smoothing factor\n \"\"\"\n super(LabelSmoothing, self).__init__()\n self.confidence = 1.0 - smoothing\n self.smoothing = smoothing\n\n def forward(self, x, target):\n logprobs = torch.nn.functional.log_softmax(x, dim=-1)\n\n nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))\n nll_loss = nll_loss.squeeze(1)\n smooth_loss = -logprobs.mean(dim=-1)\n loss = self.confidence * nll_loss + self.smoothing * smooth_loss\n return loss.mean()\n\nclass BCEWithLogitsLoss(nn.Module):\n def __init__(self, weight=None, size_average=None, reduce=None, reduction='mean', pos_weight=None, num_classes=64):\n super(BCEWithLogitsLoss, self).__init__()\n self.num_classes = num_classes\n self.criterion = nn.BCEWithLogitsLoss(weight=weight,\n size_average=size_average,\n reduce=reduce,\n reduction=reduction,\n pos_weight=pos_weight)\n def forward(self, input, target):\n target_onehot = F.one_hot(target, num_classes=self.num_classes)\n return self.criterion(input, target_onehot)\n\ndef adjust_learning_rate(epoch, opt, optimizer):\n \"\"\"Sets the learning rate to the initial LR decayed by decay rate every steep step\"\"\"\n steps = np.sum(epoch > np.asarray(opt.lr_decay_epochs))\n if steps > 0:\n new_lr = opt.learning_rate * (opt.lr_decay_rate ** steps)\n for param_group in optimizer.param_groups:\n param_group['lr'] = new_lr\n\ndef create_and_save_embeds(opt, vocab):\n\n word_embeds = opt.word_embed_path\n dim = opt.word_embed_size\n embed_pth = \"{0}_dim{1}.pickle\".format(opt.dataset, dim)\n\n if not os.path.isdir(word_embeds):\n os.makedirs(word_embeds)\n\n words = []\n for token in vocab:\n words = words + token.split(' ')\n\n embed_pth = os.path.join(word_embeds, embed_pth)\n if os.path.exists(embed_pth):\n print(\"Found {}.\".format(embed_pth))\n return\n else:\n print(\"Loading dictionary...\")\n from torchnlp.word_to_vector import Vico\n pretrained_embedding = Vico(name='linear',\n dim=dim,\n is_include=lambda w: w in set(words))\n\n embeds = []\n keys = pretrained_embedding.token_to_index.keys()\n for w in keys:\n embeds.append(pretrained_embedding[w].numpy())\n d = dict(zip(keys, embeds))\n\n # Pickle the dictionary for later load\n print(\"Pickling word embeddings...\")\n with open(embed_pth, 'wb') as f:\n pickle.dump(d, f)\n print(\"Pickled.\")\n\n\ndef create_and_save_descriptions(opt, vocab):\n\n if not os.path.isdir(opt.description_embed_path):\n os.makedirs(opt.description_embed_path)\n\n embed_pth = os.path.join(opt.description_embed_path,\n \"{0}_{1}_layer{2}_prefix_{3}.pickle\".format(opt.dataset,\n opt.desc_embed_model,\n opt.transformer_layer,\n opt.prefix_label))\n\n if os.path.exists(embed_pth):\n return\n else:\n print(\"Path {} not found.\".format(embed_pth))\n with torch.no_grad():\n print(\"Creating tokenizer...\")\n from transformers import AutoTokenizer, AutoModelForMaskedLM\n tokenizer = AutoTokenizer.from_pretrained(opt.desc_embed_model)\n print(\"Initializing {}...\".format(opt.desc_embed_model))\n model = AutoModelForMaskedLM.from_pretrained(opt.desc_embed_model, output_hidden_states=True)\n\n # Create wordnet\n from nltk.corpus import wordnet\n defs = [wordnet.synsets(v.replace(\" \", \"_\"))[0].definition() for v in vocab]\n # defs = torch.cat(defs, 0)\n embeds = []\n for i,d in enumerate(defs):\n inp = vocab[i]+\" \"+d if opt.prefix_label else d\n inp = tokenizer(inp, return_tensors=\"pt\")\n outputs = model(**inp)\n hidden_states = outputs[1]\n embed = torch.mean(hidden_states[opt.transformer_layer], dim=(0,1))\n embeds.append(embed)\n\n d = dict(zip(vocab, embeds))\n # Pickle the dictionary for later load\n print(\"Pickling description embeddings from {}...\".format(opt.desc_embed_model))\n with open(embed_pth, 'wb') as f:\n pickle.dump(d, f)\n print(\"Pickled.\")\n\ndef restricted_float(x):\n try:\n x = float(x)\n except ValueError:\n raise argparse.ArgumentTypeError(\"%r not a floating-point literal\" % (x,))\n\n if x < 0.0 or x > 1.0:\n raise argparse.ArgumentTypeError(\"%r not in range [0.0, 1.0]\"%(x,))\n return x\n"
] | [
[
"torch.mean",
"torch.nn.functional.log_softmax",
"numpy.asarray",
"torch.nn.BCEWithLogitsLoss",
"torch.no_grad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mayi140611/mayiexamples | [
"221cf9e8916d81198df7355894ec59dc334ae0af"
] | [
"models_deep_learning/AutodiffEngine/autodiff_test.py"
] | [
"import autodiff as ad\nimport numpy as np\n\n\ndef test_identity():\n x2 = ad.Variable(name=\"x2\")\n y = x2\n\n grad_x2, = ad.gradients(y, [x2])\n\n executor = ad.Executor([y, grad_x2])\n x2_val = 2 * np.ones(3)\n y_val, grad_x2_val = executor.run(feed_dict={x2: x2_val})\n\n assert isinstance(y, ad.Node)\n assert np.array_equal(y_val, x2_val)\n assert np.array_equal(grad_x2_val, np.ones_like(x2_val))\n\n\ndef test_add_by_const():\n x2 = ad.Variable(name=\"x2\")\n y = 5 + x2\n\n grad_x2, = ad.gradients(y, [x2])\n\n executor = ad.Executor([y, grad_x2])\n x2_val = 2 * np.ones(3)\n y_val, grad_x2_val = executor.run(feed_dict={x2: x2_val})\n\n assert isinstance(y, ad.Node)\n assert np.array_equal(y_val, x2_val + 5)\n assert np.array_equal(grad_x2_val, np.ones_like(x2_val))\n\n\ndef test_sub_by_const():\n x2 = ad.Variable(name='x2')\n y = 3 - x2\n grad_x2, = ad.gradients(y, [x2])\n executor = ad.Executor([y, grad_x2])\n x2_val = 2 * np.ones(3)\n y_val, grad_x2_val= executor.run(feed_dict = {x2 : x2_val})\n\n assert isinstance(y, ad.Node)\n assert np.array_equal(y_val, 3 - x2_val)\n assert np.array_equal(grad_x2_val, -np.ones_like(x2_val))\n\n\ndef test_neg():\n x1 = ad.Variable(name='x1')\n x2 = ad.Variable(name='x2')\n\n y = -x2 + x1\n \n grad_x1, grad_x2 = ad.gradients(y, [x1, x2])\n executor = ad.Executor([y, grad_x1, grad_x2])\n x2_val = 2 * np.ones(3)\n x1_val = 3 * np.ones(3)\n y_val, grad_x1_val, grad_x2_val = executor.run(feed_dict = {x1: x1_val, x2 : x2_val})\n\n assert isinstance(y, ad.Node)\n assert np.array_equal(y_val, -x2_val + x1_val)\n assert np.array_equal(grad_x2_val, -np.ones_like(x2_val))\n assert np.array_equal(grad_x1_val, np.ones_like(x1_val))\n\n\ndef test_mul_by_const():\n x2 = ad.Variable(name = \"x2\")\n y = 5 * x2\n\n grad_x2, = ad.gradients(y, [x2])\n\n executor = ad.Executor([y, grad_x2])\n x2_val = 2 * np.ones(3)\n y_val, grad_x2_val= executor.run(feed_dict = {x2 : x2_val})\n\n assert isinstance(y, ad.Node)\n assert np.array_equal(y_val, x2_val * 5)\n assert np.array_equal(grad_x2_val, np.ones_like(x2_val) * 5)\n\n\ndef test_div_two_vars():\n x1 = ad.Variable(name = 'x1')\n x2 = ad.Variable(name = 'x2')\n \n y = x1 / x2\n\n grad_x1, grad_x2 = ad.gradients(y, [x1, x2])\n\n executor = ad.Executor([y, grad_x1, grad_x2])\n x1_val = 2 * np.ones(3)\n x2_val = 5 * np.ones(3)\n y_val, grad_x1_val, grad_x2_val= executor.run(feed_dict = {x1: x1_val, x2 : x2_val})\n\n assert isinstance(y, ad.Node)\n assert np.array_equal(y_val, x1_val / x2_val)\n assert np.array_equal(grad_x1_val, np.ones_like(x1_val) / x2_val)\n assert np.array_equal(grad_x2_val, -x1_val / (x2_val * x2_val))\n\n\ndef test_div_by_const():\n x2 = ad.Variable(name = \"x2\")\n y = 5 / x2\n\n grad_x2, = ad.gradients(y, [x2])\n\n executor = ad.Executor([y, grad_x2])\n x2_val = 2 * np.ones(3)\n y_val, grad_x2_val= executor.run(feed_dict = {x2 : x2_val})\n\n assert isinstance(y, ad.Node)\n assert np.array_equal(y_val, 5 / x2_val)\n print(grad_x2_val)\n print(-5 / (x2_val * x2_val))\n assert np.array_equal(grad_x2_val, -5 / (x2_val * x2_val))\n\n\ndef test_add_two_vars():\n x2 = ad.Variable(name = \"x2\")\n x3 = ad.Variable(name = \"x3\")\n y = x2 + x3\n\n grad_x2, grad_x3 = ad.gradients(y, [x2, x3])\n \n executor = ad.Executor([y, grad_x2, grad_x3])\n x2_val = 2 * np.ones(3)\n x3_val = 3 * np.ones(3)\n y_val, grad_x2_val, grad_x3_val = executor.run(feed_dict = {x2: x2_val, x3: x3_val})\n\n assert isinstance(y, ad.Node)\n assert np.array_equal(y_val, x2_val + x3_val)\n assert np.array_equal(grad_x2_val, np.ones_like(x2_val))\n assert np.array_equal(grad_x3_val, np.ones_like(x3_val))\n\n\ndef test_mul_two_vars():\n x2 = ad.Variable(name = \"x2\")\n x3 = ad.Variable(name = \"x3\")\n y = x2 * x3\n \n grad_x2, grad_x3 = ad.gradients(y, [x2, x3])\n\n executor = ad.Executor([y, grad_x2, grad_x3])\n x2_val = 2 * np.ones(3)\n x3_val = 3 * np.ones(3)\n y_val, grad_x2_val, grad_x3_val = executor.run(feed_dict = {x2: x2_val, x3: x3_val})\n \n assert isinstance(y, ad.Node)\n assert np.array_equal(y_val, x2_val * x3_val)\n assert np.array_equal(grad_x2_val, x3_val)\n assert np.array_equal(grad_x3_val, x2_val)\n\n\ndef test_add_mul_mix_1():\n x1 = ad.Variable(name = \"x1\")\n x2 = ad.Variable(name = \"x2\")\n x3 = ad.Variable(name = \"x3\")\n y = x1 + x2 * x3 * x1\n \n grad_x1, grad_x2, grad_x3 = ad.gradients(y, [x1, x2, x3])\n \n executor = ad.Executor([y, grad_x1, grad_x2, grad_x3])\n x1_val = 1 * np.ones(3)\n x2_val = 2 * np.ones(3)\n x3_val = 3 * np.ones(3)\n y_val, grad_x1_val, grad_x2_val, grad_x3_val = executor.run(feed_dict = {x1 : x1_val, x2: x2_val, x3 : x3_val})\n\n assert isinstance(y, ad.Node)\n assert np.array_equal(y_val, x1_val + x2_val * x3_val)\n assert np.array_equal(grad_x1_val, np.ones_like(x1_val) + x2_val * x3_val)\n assert np.array_equal(grad_x2_val, x3_val * x1_val)\n assert np.array_equal(grad_x3_val, x2_val * x1_val)\n\n\ndef test_add_mul_mix_2():\n x1 = ad.Variable(name = \"x1\")\n x2 = ad.Variable(name = \"x2\")\n x3 = ad.Variable(name = \"x3\")\n x4 = ad.Variable(name = \"x4\")\n y = x1 + x2 * x3 * x4\n \n grad_x1, grad_x2, grad_x3, grad_x4 = ad.gradients(y, [x1, x2, x3, x4])\n \n executor = ad.Executor([y, grad_x1, grad_x2, grad_x3, grad_x4])\n x1_val = 1 * np.ones(3)\n x2_val = 2 * np.ones(3)\n x3_val = 3 * np.ones(3)\n x4_val = 4 * np.ones(3)\n y_val, grad_x1_val, grad_x2_val, grad_x3_val, grad_x4_val = executor.run(feed_dict = {x1 : x1_val, x2: x2_val, x3 : x3_val, x4 : x4_val})\n\n assert isinstance(y, ad.Node)\n assert np.array_equal(y_val, x1_val + x2_val * x3_val * x4_val)\n assert np.array_equal(grad_x1_val, np.ones_like(x1_val))\n assert np.array_equal(grad_x2_val, x3_val * x4_val)\n assert np.array_equal(grad_x3_val, x2_val * x4_val)\n assert np.array_equal(grad_x4_val, x2_val * x3_val)\n\n\ndef test_add_mul_mix_3():\n x2 = ad.Variable(name = \"x2\")\n x3 = ad.Variable(name = \"x3\")\n z = x2 * x2 + x2 + x3 + 3\n y = z * z + x3\n \n grad_x2, grad_x3 = ad.gradients(y, [x2, x3])\n\n executor = ad.Executor([y, grad_x2, grad_x3])\n x2_val = 2 * np.ones(3)\n x3_val = 3 * np.ones(3)\n y_val, grad_x2_val, grad_x3_val = executor.run(feed_dict = {x2: x2_val, x3: x3_val})\n\n z_val = x2_val * x2_val + x2_val + x3_val + 3\n expected_yval = z_val * z_val + x3_val\n expected_grad_x2_val = 2 * (x2_val * x2_val + x2_val + x3_val + 3) * (2 * x2_val + 1)\n expected_grad_x3_val = 2 * (x2_val * x2_val + x2_val + x3_val + 3) + 1\n assert isinstance(y, ad.Node)\n assert np.array_equal(y_val, expected_yval)\n assert np.array_equal(grad_x2_val, expected_grad_x2_val)\n assert np.array_equal(grad_x3_val, expected_grad_x3_val)\n\n\ndef test_grad_of_grad():\n x2 = ad.Variable(name = \"x2\")\n x3 = ad.Variable(name = \"x3\")\n y = x2 * x2 + x2 * x3\n \n grad_x2, grad_x3 = ad.gradients(y, [x2, x3])\n grad_x2_x2, grad_x2_x3 = ad.gradients(grad_x2, [x2, x3])\n\n executor = ad.Executor([y, grad_x2, grad_x3, grad_x2_x2, grad_x2_x3])\n x2_val = 2 * np.ones(3)\n x3_val = 3 * np.ones(3)\n y_val, grad_x2_val, grad_x3_val, grad_x2_x2_val, grad_x2_x3_val = executor.run(feed_dict = {x2: x2_val, x3: x3_val})\n\n expected_yval = x2_val * x2_val + x2_val * x3_val\n expected_grad_x2_val = 2 * x2_val + x3_val \n expected_grad_x3_val = x2_val\n expected_grad_x2_x2_val = 2 * np.ones_like(x2_val)\n expected_grad_x2_x3_val = 1 * np.ones_like(x2_val)\n\n assert isinstance(y, ad.Node)\n assert np.array_equal(y_val, expected_yval)\n assert np.array_equal(grad_x2_val, expected_grad_x2_val)\n assert np.array_equal(grad_x3_val, expected_grad_x3_val)\n assert np.array_equal(grad_x2_x2_val, expected_grad_x2_x2_val)\n assert np.array_equal(grad_x2_x3_val, expected_grad_x2_x3_val)\n\n\ndef test_matmul_two_vars():\n x2 = ad.Variable(name = \"x2\")\n x3 = ad.Variable(name = \"x3\")\n y = ad.matmul_op(x2, x3)\n\n grad_x2, grad_x3 = ad.gradients(y, [x2, x3])\n \n executor = ad.Executor([y, grad_x2, grad_x3])\n x2_val = np.array([[1, 2], [3, 4], [5, 6]]) # 3x2\n x3_val = np.array([[7, 8, 9], [10, 11, 12]]) # 2x3\n\n y_val, grad_x2_val, grad_x3_val = executor.run(feed_dict = {x2: x2_val, x3: x3_val})\n\n expected_yval = np.matmul(x2_val, x3_val)\n expected_grad_x2_val = np.matmul(np.ones_like(expected_yval), np.transpose(x3_val))\n expected_grad_x3_val = np.matmul(np.transpose(x2_val), np.ones_like(expected_yval))\n\n assert isinstance(y, ad.Node)\n assert np.array_equal(y_val, expected_yval)\n assert np.array_equal(grad_x2_val, expected_grad_x2_val)\n assert np.array_equal(grad_x3_val, expected_grad_x3_val)\n\n\ndef test_log_op():\n x1 = ad.Variable(name = \"x1\")\n y = ad.log(x1)\n\n grad_x1, = ad.gradients(y, [x1])\n\n executor = ad.Executor([y, grad_x1])\n x1_val = 2 * np.ones(3)\n y_val, grad_x1_val= executor.run(feed_dict = {x1 : x1_val})\n\n assert isinstance(y, ad.Node)\n assert np.array_equal(y_val, np.log(x1_val))\n assert np.array_equal(grad_x1_val, 1 / x1_val)\n\n\ndef test_log_two_vars():\n x1 = ad.Variable(name = \"x1\")\n x2 = ad.Variable(name = \"x2\")\n y = ad.log(x1 * x2)\n\n grad_x1, grad_x2 = ad.gradients(y, [x1, x2])\n\n executor = ad.Executor([y, grad_x1, grad_x2])\n x1_val = 2 * np.ones(3)\n x2_val = 4 * np.ones(3)\n y_val, grad_x1_val, grad_x2_val = executor.run(feed_dict = {x1 : x1_val, x2: x2_val})\n\n assert isinstance(y, ad.Node)\n assert np.array_equal(y_val, np.log(x1_val * x2_val))\n assert np.array_equal(grad_x1_val, x2_val / (x1_val * x2_val))\n assert np.array_equal(grad_x2_val, x1_val / (x1_val * x2_val))\n\n\ndef test_exp_op():\n x1 = ad.Variable(name = \"x1\")\n y = ad.exp(x1)\n\n grad_x1, = ad.gradients(y, [x1])\n\n executor = ad.Executor([y, grad_x1])\n x1_val = 2 * np.ones(3)\n y_val, grad_x1_val= executor.run(feed_dict = {x1 : x1_val})\n\n assert isinstance(y, ad.Node)\n assert np.array_equal(y_val, np.exp(x1_val))\n assert np.array_equal(grad_x1_val, np.exp(x1_val))\n\n\ndef test_exp_mix_op():\n x1 = ad.Variable(name=\"x1\")\n x2 = ad.Variable(name=\"x2\")\n y = ad.exp(ad.log(x1 * x2) + 1)\n\n grad_x1, grad_x2 = ad.gradients(y, [x1, x2])\n\n executor = ad.Executor([y, grad_x1, grad_x2])\n x1_val = 2 * np.ones(3)\n x2_val = 4 * np.ones(3)\n y_val, grad_x1_val, grad_x2_val = executor.run(feed_dict = {x1 : x1_val, x2: x2_val})\n\n assert isinstance(y, ad.Node)\n assert np.array_equal(y_val, np.exp(np.log(x1_val * x2_val) + 1))\n assert np.array_equal(grad_x1_val, y_val * x2_val / (x1_val * x2_val))\n assert np.array_equal(grad_x2_val, y_val * x1_val / (x1_val * x2_val))\n\n\ndef test_reduce_sum():\n x1 = ad.Variable(name = \"x1\")\n y = ad.reduce_sum(x1)\n\n grad_x1, = ad.gradients(y, [x1])\n\n executor = ad.Executor([y, grad_x1])\n x1_val = 2 * np.ones(3)\n y_val, grad_x1_val= executor.run(feed_dict = {x1 : x1_val})\n \n assert isinstance(y, ad.Node)\n assert np.array_equal(y_val, np.sum(x1_val))\n assert np.array_equal(grad_x1_val, np.ones_like(x1_val))\n\n\ndef test_reduce_sum_mix():\n x1 = ad.Variable(name = \"x1\")\n y = ad.exp(ad.reduce_sum(x1))\n\n grad_x1, = ad.gradients(y, [x1])\n\n executor = ad.Executor([y, grad_x1])\n x1_val = 2 * np.ones(3)\n y_val, grad_x1_val= executor.run(feed_dict = {x1 : x1_val})\n expected_y_val = np.exp(np.sum(x1_val))\n assert isinstance(y, ad.Node)\n assert np.array_equal(y_val, expected_y_val)\n assert np.array_equal(grad_x1_val, expected_y_val * np.ones_like(x1_val))\n\n y2 = ad.log(ad.reduce_sum(x1))\n grad_x2, = ad.gradients(y2, [x1])\n executor2 = ad.Executor([y2, grad_x2])\n y2_val, grad_x2_val = executor2.run(feed_dict={x1: x1_val})\n expected_y2_val = np.log(np.sum(x1_val))\n assert isinstance(y2, ad.Node)\n assert np.array_equal(y2_val, expected_y2_val)\n assert np.array_equal(grad_x2_val, (1/np.sum(x1_val)) * np.ones_like(x1_val))\n\n\ndef test_mix_all():\n x1 = ad.Variable(name=\"x1\")\n y = 1/(1+ad.exp(-ad.reduce_sum(x1)))\n\n grad_x1, = ad.gradients(y, [x1])\n\n executor = ad.Executor([y, grad_x1])\n x1_val = 2 * np.ones(3)\n y_val, grad_x1_val= executor.run(feed_dict = {x1 : x1_val})\n expected_y_val = 1/(1+np.exp(-np.sum(x1_val)))\n expected_y_grad = expected_y_val * (1 - expected_y_val) * np.ones_like(x1_val)\n\n print(expected_y_grad)\n print(grad_x1_val)\n assert isinstance(y, ad.Node)\n assert np.array_equal(y_val, expected_y_val)\n assert np.sum(np.abs(grad_x1_val - expected_y_grad)) < 1E-10\n\n\ndef test_logistic():\n x1 = ad.Variable(name=\"x1\")\n w = ad.Variable(name='w')\n y = 1/(1+ad.exp(-ad.reduce_sum(w * x1)))\n\n grad_w, = ad.gradients(y, [w])\n\n executor = ad.Executor([y, grad_w])\n x1_val = 3 * np.ones(3)\n w_val = 3 * np.zeros(3)\n y_val, grad_w_val = executor.run(feed_dict={x1: x1_val, w: w_val})\n expected_y_val = 1/(1 + np.exp(-np.sum(w_val * x1_val)))\n expected_y_grad = expected_y_val * (1 - expected_y_val) * x1_val\n\n print(expected_y_grad)\n print(grad_w_val)\n assert isinstance(y, ad.Node)\n assert np.array_equal(y_val, expected_y_val)\n assert np.sum(np.abs(grad_w_val - expected_y_grad)) < 1E-7\n\n\ndef test_log_logistic():\n x1 = ad.Variable(name=\"x1\")\n w = ad.Variable(name='w')\n y = ad.log(1/(1+ad.exp(-ad.reduce_sum(w * x1))))\n\n grad_w, = ad.gradients(y, [w])\n\n executor = ad.Executor([y, grad_w])\n x1_val = 3 * np.ones(3)\n w_val = 3 * np.zeros(3)\n y_val, grad_w_val = executor.run(feed_dict={x1: x1_val, w: w_val})\n logistic = 1/(1+np.exp(-np.sum(w_val * x1_val)))\n expected_y_val = np.log(logistic)\n expected_y_grad = (1 - logistic) * x1_val\n\n print(expected_y_grad)\n print(grad_w_val)\n assert isinstance(y, ad.Node)\n assert np.array_equal(y_val, expected_y_val)\n assert np.sum(np.abs(grad_w_val - expected_y_grad)) < 1E-7\n\n\ndef test_logistic_loss():\n x = ad.Variable(name='x')\n w = ad.Variable(name='w')\n y = ad.Variable(name='y')\n\n h = 1 / (1 + ad.exp(-ad.reduce_sum(w * x)))\n L = y * ad.log(h) + (1 - y) * ad.log(1 - h)\n w_grad, = ad.gradients(L, [w])\n executor = ad.Executor([L, w_grad])\n\n y_val = 0\n x_val = np.array([2, 3, 4])\n w_val = np.random.random(3)\n\n L_val, w_grad_val = executor.run(feed_dict={x: x_val, y: y_val, w: w_val})\n\n logistic = 1 / (1 + np.exp(-np.sum(w_val * x_val)))\n expected_L_val = y_val * np.log(logistic) + (1 - y_val) * np.log(1 - logistic)\n expected_w_grad = (y_val - logistic) * x_val\n\n print(L_val)\n print(expected_L_val)\n print(expected_w_grad)\n print(w_grad_val)\n\n assert expected_L_val == L_val\n assert np.sum(np.abs(expected_w_grad - w_grad_val)) < 1E-9\n"
] | [
[
"numpy.log",
"numpy.random.random",
"numpy.ones_like",
"numpy.array_equal",
"numpy.abs",
"numpy.matmul",
"numpy.ones",
"numpy.transpose",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ashaazami/keras-onnx | [
"4a220193a3e2d6eb3bcb76dcf3be39a4b1f84f09"
] | [
"keras2onnx/_builtin.py"
] | [
"###############################################################################\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n###############################################################################\nimport sys\nimport numbers\nimport tensorflow\nimport numpy as np\nfrom typing import Union\nfrom onnx import numpy_helper, mapping\nfrom .common.utils import count_dynamic_dim\nfrom .common.onnx_ops import apply_identity, apply_reshape, OnnxOperatorBuilder\nfrom .funcbook import converter_func, set_converters\nfrom .proto import keras\nfrom .proto.tfcompat import is_tf2\n\n\nclass TYPES:\n # tf-node types:\n Identity = 'Identity'\n Const = 'Const'\n Any = 'Any'\n All = 'All'\n BatchMatMul = 'BatchMatMul'\n BatchMatMulV2 = 'BatchMatMulV2'\n BatchToSpaceND = 'BatchToSpaceND'\n BiasAdd = 'BiasAdd'\n BiasAddV1 = 'BiasAddV1'\n Cast = 'Cast'\n ConcatV2 = 'ConcatV2'\n Conv1D = 'Conv1D'\n Conv2D = 'Conv2D'\n DepthwiseConv2dNative = 'DepthwiseConv2dNative'\n ExpandDims = 'ExpandDims'\n Fill = 'Fill'\n FusedBatchNorm = 'FusedBatchNorm'\n FusedBatchNormV2 = 'FusedBatchNormV2'\n FusedBatchNormV3 = 'FusedBatchNormV3'\n GatherNd = 'GatherNd'\n GatherV2 = 'GatherV2'\n GreaterEqual = 'GreaterEqual'\n LessEqual = 'LessEqual'\n LogSoftmax = 'LogSoftmax'\n MatMul = 'MatMul'\n Max = 'Max'\n Maximum = 'Maximum'\n Mean = 'Mean'\n Min = 'Min'\n Minimum = 'Minimum'\n NonMaxSuppressionV2 = 'NonMaxSuppressionV2'\n NonMaxSuppressionV3 = 'NonMaxSuppressionV3'\n NotEqual = 'NotEqual'\n Pack = 'Pack'\n Pad = 'Pad'\n PadV2 = 'PadV2'\n Prod = 'Prod'\n Range = 'Range'\n ReadVariableOp = 'ReadVariableOp'\n Reshape = 'Reshape'\n ResizeBilinear = 'ResizeBilinear'\n ResizeNearestNeighbor = 'ResizeNearestNeighbor'\n Round = 'Round'\n Rsqrt = 'Rsqrt'\n ScatterNd = 'ScatterNd'\n Select = 'Select'\n Shape = 'Shape'\n Size = 'Size'\n Slice = 'Slice'\n Softmax = 'Softmax'\n SpaceToBatchND = 'SpaceToBatchND'\n Split = 'Split'\n SplitV = 'SplitV'\n Square = 'Square'\n SquaredDifference = 'SquaredDifference'\n Squeeze = 'Squeeze'\n StridedSlice = 'StridedSlice'\n Sum = 'Sum'\n Tile = 'Tile'\n TopKV2 = 'TopKV2'\n Transpose = 'Transpose'\n Unpack = 'Unpack'\n VarHandleOp = 'VarHandleOp'\n VariableV2 = 'VariableV2'\n Where = 'Where'\n\n # converter internal types:\n TD_Reshape = '_reshape_timedistributed'\n\n\ndef is_placeholder_node(node):\n return len(node.inputs) == 0 and node.type in ['Placeholder', \"PlaceholderV2\", 'PlaceholderWithDefault'] and \\\n node.outputs[0].dtype.name != 'resource'\n\n\ndef tsname_to_node(name):\n return name.split(':')[0]\n\n\nNCHW_TO_NHWC = [0, 2, 3, 1]\nNHWC_TO_NCHW = [0, 3, 1, 2]\nHWCN_TO_NCHW = [3, 2, 0, 1]\nNCHW_TO_HWCN = [2, 3, 1, 0]\n\n\ndef _is_nhwc(node):\n return node.get_attr('data_format') == b'NHWC'\n\n\n_MAX_FOLDING_NODE_NUMBER = 15\n\n\ndef _count_input_nodes(tensor): # type: (tensorflow.Tensor)->int\n nodes_to_keep = set()\n node_inputs = [tensor.op]\n while node_inputs:\n nd_ = node_inputs[0]\n del node_inputs[0]\n if nd_ in nodes_to_keep:\n continue\n\n if is_placeholder_node(nd_):\n return -1\n nodes_to_keep.add(nd_)\n if len(nodes_to_keep) >= _MAX_FOLDING_NODE_NUMBER:\n return -1\n\n node_inputs.extend(in_.op for in_ in nd_.inputs)\n\n return len(nodes_to_keep)\n\n\ndef _cal_tensor_value(tensor): # type: (tensorflow.Tensor)->Union[np.ndarray, None]\n if _count_input_nodes(tensor) < 0:\n return None\n\n node = tensor.op\n if node.type in [\"Const\", \"ConstV2\"]:\n make_ndarray = tensorflow.make_ndarray\n np_arr = make_ndarray(node.get_attr(\"value\"))\n return np_arr\n else:\n try:\n cls_sess = tensorflow.Session if hasattr(tensorflow, 'Session') else tensorflow.compat.v1.Session\n with cls_sess(graph=node.graph) as sess:\n np_arr = sess.run(tensor)\n return np_arr\n except (ValueError, tensorflow.errors.InvalidArgumentError, tensorflow.errors.OpError):\n return None\n\n\ndef _cal_tensor_shape(tensor):\n if len(tensor.shape) > 0 and hasattr(tensor.shape[0], 'value'):\n return [x.value for x in tensor.shape]\n else:\n return list(tensor.shape)\n\n\ndef _to_onnx_type(dt_type):\n # TensorFlow data types integrate seamlessly with numpy\n return mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dt_type.as_numpy_dtype)]\n\n\ndef default_convert(scope, operator, container):\n apply_identity(scope, operator.inputs[0].full_name, operator.outputs[0].full_name, container)\n\n\n@converter_func(TYPES.Identity)\ndef convert_tf_identity(scope, operator, container):\n default_convert(scope, operator, container)\n\n\n@converter_func(TYPES.BatchToSpaceND)\ndef convert_tf_batch_to_space(scope, operator, container):\n node = operator.raw_operator\n oopb = OnnxOperatorBuilder(container, scope)\n blocksize = _cal_tensor_value(node.inputs[1])\n crops = _cal_tensor_value(node.inputs[2])\n if operator.target_opset <= 10 or (blocksize is not None and crops is not None):\n input_shape = _cal_tensor_shape(node.outputs[0])\n assert len(input_shape) in (4, 3)\n assert len(blocksize) == 2 and blocksize[0] == blocksize[1]\n\n if len(input_shape) == 3:\n unsqueeze_node_1 = oopb.apply_unsqueeze(operator.inputs[0].full_name,\n name=operator.full_name + '_unsqueeze_0',\n axes=[3])\n transpose_node_1 = oopb.apply_transpose(unsqueeze_node_1,\n name=operator.full_name + '_transpose_1',\n perm=[3, 0, 1, 2])\n else:\n transpose_node_1 = oopb.apply_transpose(operator.inputs[0].full_name,\n name=operator.full_name + '_transpose_1',\n perm=[3, 0, 1, 2])\n depth_to_space_node = oopb.add_node('DepthToSpace',\n transpose_node_1,\n operator.inputs[0].full_name + '_depth_to_space',\n blocksize=blocksize[0])\n transpose_node_2 = oopb.apply_transpose(depth_to_space_node,\n name=operator.full_name + '_transpose_2',\n perm=[1, 2, 3, 0])\n\n if np.count_nonzero(crops) == 0:\n oopb.apply_op_with_output(\"apply_identity\",\n transpose_node_2,\n operator.output_full_names,\n name=operator.full_name + '_slice')\n return\n\n slice_axis = [1, 2]\n top, bottom = crops[0]\n left, right = crops[1]\n starts = [top, left]\n ends = []\n for end in [bottom, right]:\n if end != 0:\n ends.append(-end)\n else:\n ends.append(np.iinfo(np.int32).max)\n\n if len(input_shape) == 3:\n slice_node = oopb.apply_slice(transpose_node_2,\n name=operator.full_name + '_slice',\n starts=starts, ends=ends, axes=slice_axis)\n oopb.apply_op_with_output(\"apply_squeeze\",\n slice_node,\n operator.output_full_names,\n name=operator.full_name + '_squeeze_output',\n axes=[3])\n else:\n oopb.apply_op_with_output(\"apply_slice\",\n transpose_node_2,\n operator.output_full_names,\n name=operator.full_name + '_slice',\n starts=starts, ends=ends, axes=slice_axis)\n\n else:\n shape_x = oopb.add_node('Shape', [operator.inputs[0].full_name],\n operator.full_name + '_input_0_shape')\n block_shape = oopb.apply_cast(operator.inputs[1].full_name,\n to=oopb.int64,\n name=operator.full_name + '_input_1_cast')\n crop = oopb.apply_cast(operator.inputs[2].full_name,\n to=oopb.int64,\n name=operator.full_name + '_input_2_cast')\n block_size = oopb.apply_slice(block_shape,\n name=operator.full_name + '_slice_0',\n starts=[0], ends=[1])\n block_prod = oopb.apply_mul(block_size + block_size,\n name=operator.full_name + '_mul_0')\n padded_block_prod = oopb.apply_pad([block_prod,\n ('const_zero_three', oopb.int64, np.array([0, 3], dtype='int64')),\n ('one', oopb.int64, np.array([1], dtype='int64'))],\n name=operator.full_name + '_pad_0')\n new_shape_x = oopb.apply_div([shape_x] + padded_block_prod,\n name=operator.full_name + '_div')\n concat_new_shape_x = oopb.apply_concat(block_shape + new_shape_x,\n name=operator.full_name + '_concat',\n axis=0)\n reshaped_x = oopb.apply_reshape([operator.inputs[0].full_name] + concat_new_shape_x,\n name=operator.full_name + '_reshape_0')\n transposed_x = oopb.apply_transpose(reshaped_x,\n name=operator.full_name + '_transpose_0',\n perm=[2, 3, 0, 4, 1, 5])\n padded_block_shape = oopb.apply_pad([block_shape[0],\n ('const_one_one', oopb.int64, np.array([1, 1], dtype='int64')),\n ('one', oopb.int64, np.array([1], dtype='int64'))],\n name=operator.full_name + '_pad_1')\n new_shape_x_v2 = oopb.apply_mul(new_shape_x + padded_block_shape,\n name=operator.full_name + '_mul_1')\n reshaped_x_v2 = oopb.apply_reshape(transposed_x + new_shape_x_v2,\n name=operator.full_name + '_reshape_1')\n transposed_crop = oopb.apply_transpose(crop,\n name=operator.full_name + '_transpose_1',\n perm=[1, 0])\n slice_crop_starts = oopb.apply_slice(transposed_crop,\n name=operator.full_name + '_slice_starts',\n starts=[0, 0], ends=[1, 2])\n reshaped_slice_crop_starts = oopb.apply_reshape(slice_crop_starts +\n [('const_one_one', oopb.int64, np.array([2], dtype='int64'))],\n name=operator.full_name + '_reshape_starts')\n slice_crop_ends = oopb.apply_slice(transposed_crop,\n name=operator.full_name + '_slice_ends',\n starts=[1, 0], ends=[2, 2])\n reshaped_slice_crop_ends = oopb.apply_reshape(slice_crop_ends +\n [('const_two', oopb.int64, np.array([2], dtype='int64'))],\n name=operator.full_name + '_reshape_ends')\n sliced_new_shape_x_v2 = oopb.apply_slice(new_shape_x_v2,\n name=operator.full_name + '_slice_3',\n starts=[1], ends=[3])\n neged_reshaped_slice_crop_ends = oopb.apply_sub(sliced_new_shape_x_v2 + reshaped_slice_crop_ends,\n name=operator.full_name + '_sub')\n oopb.apply_op_with_output(\"apply_slice\",\n reshaped_x_v2,\n operator.output_full_names,\n name=operator.full_name + '_slice_final',\n starts=reshaped_slice_crop_starts[0],\n ends=neged_reshaped_slice_crop_ends[0],\n axes=[1, 2])\n\n\n@converter_func(TYPES.SpaceToBatchND)\ndef convert_tf_space_to_batch(scope, operator, container):\n node = operator.raw_operator\n oopb = OnnxOperatorBuilder(container, scope)\n blocksize = _cal_tensor_value(node.inputs[1])\n paddings = _cal_tensor_value(node.inputs[2])\n if operator.target_opset <= 10 or (blocksize is not None and paddings is not None):\n input_shape = _cal_tensor_shape(node.outputs[0])\n assert len(input_shape) == 4\n assert len(blocksize) == 2 and blocksize[0] == blocksize[1]\n\n top, bottom = paddings[0]\n left, right = paddings[1]\n pads = [0, top, left, 0,\n 0, bottom, right, 0]\n\n if np.count_nonzero(pads) > 0:\n pad_op = oopb.apply_pad(operator.inputs[0].full_name,\n name=operator.full_name + '_pad_1',\n pads=pads)\n else:\n pad_op = operator.inputs[0].full_name\n\n transpose_node_1 = oopb.apply_transpose(pad_op,\n name=operator.full_name + '_transpose_1',\n perm=[3, 0, 1, 2])\n space_to_depth_node = oopb.add_node('SpaceToDepth',\n transpose_node_1,\n operator.inputs[0].full_name + '_space_to_depth',\n blocksize=blocksize[0])\n oopb.apply_op_with_output(\"apply_transpose\",\n space_to_depth_node,\n operator.output_full_names,\n name=operator.full_name + '_transpose_2',\n perm=[1, 2, 3, 0])\n else:\n shape_x = oopb.add_node('Shape', [operator.inputs[0].full_name],\n operator.full_name + '_input_0_shape')\n block_shape = oopb.apply_cast(operator.inputs[1].full_name,\n to=oopb.int64,\n name=operator.full_name + '_input_1_cast')\n pad_x = oopb.apply_cast(operator.inputs[2].full_name,\n to=oopb.int64,\n name=operator.full_name + '_input_2_cast')\n concated_pad_x = oopb.apply_concat(\n [('_const_zero_zero', oopb.int64, np.array([[0, 0]], dtype='int64'))] + pad_x,\n name=operator.full_name + '_concat_1',\n axis=0)\n concated_pad_x_v2 = oopb.apply_concat(\n concated_pad_x + [('_const_zero_zero', oopb.int64, np.array([[0, 0]], dtype='int64'))],\n name=operator.full_name + '_concat_2',\n axis=0)\n transposed_concated_pad_x_v2 = oopb.apply_transpose(concated_pad_x_v2,\n name=operator.full_name + '_transpose_0',\n perm=[1, 0])\n reshaped_transposed_pad_x = oopb.apply_reshape(transposed_concated_pad_x_v2 +\n [('_const_eight', oopb.int64, np.array([8], dtype='int64'))],\n name=operator.full_name + '_reshape_0')\n padded_input_x = oopb.apply_pad(operator.inputs[0].full_name,\n name=operator.full_name + '_pad_1',\n pads=reshaped_transposed_pad_x)\n padded_block_shape = oopb.apply_pad(block_shape,\n name=operator.full_name + '_pad_2',\n pads=[1, 1], value=1)\n new_shape_x = oopb.apply_div([shape_x] + padded_block_shape,\n name=operator.full_name + '_div')\n first_row_new_shape_x = oopb.apply_slice(new_shape_x,\n name=operator.full_name + '_slice_0',\n starts=[0], ends=[2])\n block_size = oopb.apply_slice(block_shape,\n name=operator.full_name + '_slice_1',\n starts=[0], ends=[1])\n new_first_row_new_shape_x = oopb.apply_concat(first_row_new_shape_x + block_size,\n name=operator.full_name + '_concat_3',\n axis=0)\n second_row_new_shape_x_first_half = oopb.apply_slice(new_shape_x,\n name=operator.full_name + '_slice_second_first',\n starts=[2], ends=[3])\n second_row_new_shape_x_second_half = oopb.apply_slice(new_shape_x,\n name=operator.full_name + '_slice_second_second',\n starts=[3], ends=[4])\n new_second_row_new_shape_x_first_half = oopb.apply_concat(second_row_new_shape_x_first_half + block_size,\n name=operator.full_name + '_concat_second_first',\n axis=0)\n new_second_row_new_shape_x = oopb.apply_concat(\n new_second_row_new_shape_x_first_half + second_row_new_shape_x_second_half,\n name=operator.full_name + '_concat_second_shape',\n axis=0)\n new_shape_x_v2 = oopb.apply_concat(new_first_row_new_shape_x + new_second_row_new_shape_x,\n name=operator.full_name + '_concat_shape',\n axis=0)\n new_x = oopb.apply_reshape(padded_input_x + new_shape_x_v2,\n name=operator.full_name + '_reshape_new_x')\n transposed_new_x = oopb.apply_transpose(new_x,\n name=operator.full_name + '_transpose_new',\n perm=[2, 4, 0, 1, 3, 5])\n block_size_prod = oopb.apply_mul(block_size + block_size,\n name=operator.full_name + '_mul_0')\n padded_block_size_prod = oopb.apply_pad(block_size_prod,\n name=operator.full_name + '_pad_block_size',\n pads=[0, 3], value=1)\n new_shape_x_v3 = oopb.apply_mul(new_shape_x + padded_block_size_prod,\n name=operator.full_name + '_mul_shape_v3')\n oopb.apply_op_with_output(\"apply_reshape\",\n transposed_new_x,\n operator.output_full_names,\n name=operator.full_name + '_transpose_2',\n desired_shape=new_shape_x_v3)\n\n\n@converter_func(TYPES.BiasAdd, TYPES.BiasAddV1)\ndef convert_tf_bias_add(scope, operator, container):\n node = operator.raw_operator\n oopb = OnnxOperatorBuilder(container, scope)\n if not _is_nhwc(node):\n shape0 = _cal_tensor_shape(node.inputs[0])\n shape1 = _cal_tensor_shape(node.inputs[1])\n if node.inputs[1].op.type == 'Const':\n new_broadcast_shape = [shape1[0]] + [1] * (len(shape0) - 2)\n reshape_node = oopb.apply_reshape(operator.inputs[1].full_name,\n name=operator.full_name + '_reshape',\n desired_shape=new_broadcast_shape)\n oopb.apply_op_with_output(\"apply_add\",\n [node.inputs[0].name, reshape_node[0]],\n operator.output_full_names,\n name=operator.full_name + '_add')\n return\n\n oopb.apply_op_with_output(\"apply_add\",\n operator.input_full_names,\n operator.output_full_names,\n name=operator.full_name + '_add')\n\n\ndef _calc_explicit_padding(input_size, output_shape, output_padding, kernel_shape, stride, dilation,\n perm):\n to_nchw = lambda x, perm: [x[perm[n_]] for n_ in range(len(x))]\n input_size = to_nchw(input_size, perm)[2:]\n\n spatial = len(kernel_shape)\n total_padding = []\n pads = [None] * 2 * spatial\n for i in range(spatial):\n total_padding[i:] = [stride[i] * ((input_size[i] - 1) // stride[i]) + 1 +\n output_padding[i] + (kernel_shape[i] - 1) * dilation[i] - input_size[i]]\n total_padding[i] = max(total_padding[i], 0)\n pads[i] = total_padding[i] // 2\n pads[i + spatial] = total_padding[i] - (total_padding[i] // 2)\n\n return pads\n\n\n@converter_func(TYPES.DepthwiseConv2dNative)\ndef convert_tf_depthwise_conv2d(scope, operator, container):\n node = operator.raw_operator\n oopb = OnnxOperatorBuilder(container, scope)\n\n channels_first = node.get_attr('data_format') == b'NCHW'\n\n if channels_first:\n adjusted_input_name = [operator.inputs[0].full_name]\n else:\n adjusted_input_name = oopb.apply_transpose(operator.inputs[0].full_name,\n name=operator.full_name + '_transpose_0',\n perm=[0, 3, 1, 2])\n\n weight_perm_axes = [3, 2, 0, 1]\n weight_shape = _cal_tensor_shape(node.inputs[1])\n new_shape = weight_shape[:2] + [1, weight_shape[2] * weight_shape[3]]\n weight_reshape = oopb.apply_reshape(operator.inputs[1].full_name,\n name=operator.full_name + '_reshape_ends',\n desired_shape=new_shape)\n transposed_weight = oopb.apply_transpose(weight_reshape,\n name=operator.full_name + '_transpose_new',\n perm=weight_perm_axes)\n\n attrs = {}\n dilation_rate = node.get_attr('dilations')\n dilation_rate = dilation_rate[2:] if channels_first else dilation_rate[1:3]\n attrs['dilations'] = dilation_rate\n strides = node.get_attr('strides')\n strides = strides[2:] if channels_first else strides[1:3]\n attrs['strides'] = strides\n kernel_size = weight_shape[:2]\n input_channels, output_channels = weight_shape[-2:]\n group = input_channels\n attrs['group'] = group\n\n input_shape = _cal_tensor_shape(node.inputs[0])\n output_shape = _cal_tensor_shape(node.outputs[0])\n\n if node.get_attr('padding') == b'VALID':\n attrs['auto_pad'] = 'VALID'\n elif node.get_attr('padding') == b'SAME':\n if count_dynamic_dim(input_shape) > 1:\n attrs['auto_pad'] = 'SAME_UPPER'\n else:\n attrs['auto_pad'] = 'NOTSET'\n output_padding = [0] * len(kernel_size)\n attrs['pads'] = _calc_explicit_padding(input_shape,\n output_shape,\n output_padding,\n kernel_size,\n strides,\n dilation_rate,\n list(range(\n len(input_shape))) if channels_first else [0, 2, 3, 1])\n\n intermediate_output_name = oopb.apply_conv(adjusted_input_name + transposed_weight,\n name=operator.full_name + '_conv',\n **attrs)\n\n if not channels_first:\n oopb.apply_op_with_output(\"apply_transpose\",\n intermediate_output_name,\n operator.output_full_names,\n name=operator.full_name + '_transpose_final',\n perm=[0, 2, 3, 1])\n else:\n oopb.apply_op_with_output(\"apply_identity\",\n intermediate_output_name,\n operator.output_full_names,\n name=operator.full_name + '_identity_final')\n\n\n@converter_func(TYPES.MatMul, TYPES.BatchMatMul, TYPES.BatchMatMulV2)\ndef convert_tf_batchmatmul(scope, operator, container):\n node = operator.raw_operator # type: tensorflow.Operation\n oopb = OnnxOperatorBuilder(container, scope)\n\n tranpose_a = node.get_attr('transpose_a') if node.type == TYPES.MatMul else node.get_attr('adj_x')\n tranpose_b = node.get_attr('transpose_b') if node.type == TYPES.MatMul else node.get_attr('adj_y')\n\n input_names = operator.input_full_names\n for idx_, flag in enumerate([tranpose_a, tranpose_b]):\n if flag:\n shape_len = len(node.inputs[idx_].shape)\n perm = list(range(0, shape_len))[:-2] + [shape_len - 1, shape_len - 2]\n input_names[idx_] = oopb.apply_transpose(input_names[idx_],\n name=operator.full_name + '_transpose_%d' % idx_,\n perm=perm)[0]\n\n oopb.apply_op_with_output(\"apply_matmul\",\n input_names,\n operator.output_full_names,\n name=operator.full_name + '_add')\n\n\n@converter_func(TYPES.SquaredDifference)\ndef convert_tf_squared_difference(scope, operator, container):\n oopb = OnnxOperatorBuilder(container, scope)\n sub_node = oopb.apply_sub(operator.input_full_names, name=operator.full_name + '_sub')\n oopb.apply_op_with_output('apply_mul', sub_node + sub_node, operator.output_full_names, name=operator.full_name)\n\n\n@converter_func(TYPES.ConcatV2)\ndef convert_tf_concat_v2(scope, operator, container):\n node = operator.raw_operator\n input_name_idx = []\n original_input_number = len(operator.input_full_names) - 1\n for idx in range(original_input_number):\n val = _cal_tensor_value(node.inputs[idx])\n if not (val is not None and len(val) == 0):\n input_name_idx.append(idx)\n\n input_full_names = [operator.input_full_names[idx] for idx in input_name_idx]\n\n axis_val = _cal_tensor_value(node.inputs[-1]).item(0)\n if axis_val < 0 and operator.target_opset < 11:\n input_shape = _cal_tensor_shape(node.inputs[0])\n axis_val = len(input_shape) + axis_val\n\n oopb = OnnxOperatorBuilder(container, scope)\n need_casting = False\n if operator.target_opset < 8:\n supported_types = [oopb.float, oopb.float16]\n dtype = _to_onnx_type(node.outputs[0].dtype)\n need_casting = dtype not in supported_types\n\n if need_casting:\n concat_node = oopb.apply_concat(input_full_names,\n name=operator.full_name + '_concat',\n axis=axis_val)\n oopb.apply_op_with_output(\"apply_cast\",\n concat_node,\n operator.output_full_names,\n name=operator.full_name + '_cast',\n to=oopb.float)\n else:\n oopb.apply_op_with_output(\"apply_concat\",\n input_full_names,\n operator.output_full_names,\n name=operator.full_name + '_concat',\n axis=axis_val)\n\n\n@converter_func(TYPES.Const)\ndef convert_tf_const(scope, operator, container):\n node = operator.raw_operator\n np_arr = _cal_tensor_value(node.outputs[0])\n onnx_tensor = numpy_helper.from_array(np_arr, operator.outputs[0].onnx_name)\n container.add_initializer_from_tensor(onnx_tensor)\n\n\ndef _spatial_map(shape, perm):\n new_shape = shape[:]\n for i in perm:\n new_shape[i] = shape[perm[i]]\n return new_shape\n\n\ndef _conv_convert_inputs(oopb, operator, node, attrs, with_kernel=False, new_kernel_shape=None,\n output_indices=None):\n if output_indices is None:\n output_indices = [0]\n\n if _is_nhwc(node):\n # transpose input if needed, no need to record shapes on input\n transpose_node_1 = oopb.apply_transpose(node.inputs[0].name,\n name=operator.full_name + '_transpose_1',\n perm=NHWC_TO_NCHW)\n else:\n transpose_node_1 = [node.inputs[0].name]\n\n # kernel must to be transposed\n if with_kernel:\n val = _cal_tensor_value(node.inputs[1])\n if val is not None:\n val = val.transpose(HWCN_TO_NCHW)\n onnx_type = _to_onnx_type(node.inputs[1].dtype)\n transpose_node_kernel = oopb.apply_identity([('_start', onnx_type, val)],\n name=operator.full_name + '_transpose_kernel')\n else:\n transpose_node_kernel = oopb.apply_transpose(node.inputs[1].name,\n name=operator.full_name + '_transpose_kernel',\n perm=HWCN_TO_NCHW)\n # TODO, some onnx conv ops require the reshape the kernel (ie. depthwise_conv2d)\n else:\n transpose_node_kernel = [node.inputs[1].name]\n\n conv_node = oopb.apply_conv(transpose_node_1 + transpose_node_kernel,\n name=operator.full_name + '_conv',\n **attrs)\n\n # transpose outputs if needed\n if _is_nhwc(node):\n for idx in output_indices:\n oopb.add_node_with_output(\"Transpose\",\n conv_node,\n operator.outputs[idx].full_name,\n name=operator.full_name + '_transpose_2_' + str(idx),\n perm=NCHW_TO_NHWC)\n else:\n for idx in output_indices:\n oopb.apply_op_with_output(\"apply_identity\",\n conv_node,\n operator.outputs[idx].full_name,\n name=operator.full_name + '_identity_' + str(idx))\n\n\ndef _conv_dims_attr(node, dims):\n if _is_nhwc(node):\n if len(dims) == 2:\n h, w = dims\n else:\n n, h, w, c = dims\n else:\n n, c, h, w = dims\n dims = [h, w]\n return dims\n\n\ndef _convert_tf_conv2d(scope, operator, container):\n oopb = OnnxOperatorBuilder(container, scope)\n node = operator.raw_operator\n kernel_shape = _cal_tensor_shape(node.inputs[1])[0:2]\n strides = _conv_dims_attr(node, node.get_attr('strides'))\n dilations = _conv_dims_attr(node, node.get_attr('dilations'))\n padding = node.get_attr('padding')\n spatial = 2\n attrs = {'strides': strides, 'dilations': dilations, 'kernel_shape': kernel_shape}\n if padding:\n if dilations is None:\n dilations = [1] * spatial * 2\n if padding == b'SAME':\n pads = [0] * spatial * 2\n input_shape = _cal_tensor_shape(node.inputs[0])\n output_shape = _cal_tensor_shape(node.outputs[0])\n # transpose shape to nchw\n if _is_nhwc(node):\n input_shape = _spatial_map(input_shape, NHWC_TO_NCHW)\n output_shape = _spatial_map(output_shape, NHWC_TO_NCHW)\n # calculate pads\n if any(input_shape[i + 2] == None or output_shape[i + 2] == None for i in range(spatial)):\n attrs[\"auto_pad\"] = \"SAME_UPPER\"\n else:\n for i in range(spatial):\n pad = (output_shape[i + 2] - 1) * strides[i] + dilations[i] * kernel_shape[i] - input_shape[i + 2]\n pad = max(pad, 0)\n pads[i] = pad // 2\n pads[i + spatial] = pad - pad // 2\n attrs[\"pads\"] = pads\n\n _conv_convert_inputs(oopb, operator, node, attrs, with_kernel=True)\n\n\n@converter_func(TYPES.Conv1D)\ndef convert_tf_conv1d(scope, operator, container):\n _convert_tf_conv2d(scope, operator, container)\n\n\n@converter_func(TYPES.Conv2D)\ndef convert_tf_conv2d(scope, operator, container):\n _convert_tf_conv2d(scope, operator, container)\n\n\n@converter_func(TYPES.ExpandDims)\ndef convert_tf_expand_dims(scope, operator, container):\n oopb = OnnxOperatorBuilder(container, scope)\n node = operator.raw_operator\n axis = _cal_tensor_value(node.inputs[1]).tolist()\n rank = len(_cal_tensor_shape(node.inputs[0]))\n oopb.apply_op_with_output(\"apply_unsqueeze\",\n [operator.inputs[0].full_name],\n operator.output_full_names,\n name=operator.full_name,\n axes=[axis],\n rank=rank)\n\n\ndef _convert_tf_fused_batch_norm_core(scope, operator, container):\n oopb = OnnxOperatorBuilder(container, scope)\n node = operator.raw_operator\n input_dim = len(_cal_tensor_shape(node.inputs[0]))\n epsilon = node.get_attr('epsilon')\n attrs = {'epsilon': epsilon, 'momentum': 0.9, 'spatial': 1}\n outputs_num = min(5, len(node.outputs))\n\n if _is_nhwc(node):\n input_perm = [0, input_dim - 1] + list(range(1, input_dim - 1))\n transpose_node_1 = oopb.apply_transpose(operator.inputs[0].full_name, name=operator.full_name + '_transpose_1',\n perm=input_perm)\n for idx in range(1, 5):\n transpose_node_1.append(operator.inputs[idx].full_name)\n batch_norm = oopb.apply_batch_norm(transpose_node_1, name=operator.full_name + '_batch_norm',\n outputs_num=outputs_num, **attrs)\n output_perm = [0] + list(range(2, input_dim)) + [1]\n final_node = oopb.apply_transpose(batch_norm[0], name=operator.full_name + '_transpose_2',\n perm=output_perm)\n else:\n transpose_node_1 = []\n for idx in range(5):\n transpose_node_1.append(operator.inputs[idx].full_name)\n batch_norm = oopb.apply_batch_norm(transpose_node_1, name=operator.full_name + '_batch_norm',\n outputs_num=outputs_num, **attrs)\n final_node = batch_norm[0]\n\n oopb.apply_op_with_output(\"apply_identity\",\n final_node,\n operator.outputs[0].full_name,\n name=operator.full_name)\n\n\n@converter_func(TYPES.Fill)\ndef convert_tf_fill(scope, operator, container):\n oopb = OnnxOperatorBuilder(container, scope)\n node = operator.raw_operator\n if operator.target_opset < 9:\n fill_shape = _cal_tensor_shape(node.inputs[0])\n fill_shape_dims = fill_shape[0]\n val_dtype = _to_onnx_type(node.inputs[1].dtype)\n need_cast = val_dtype != oopb.float and operator.target_opset < 9\n if need_cast:\n cast_input_val = oopb.apply_cast(operator.inputs[1].full_name,\n to=oopb.float,\n name=operator.full_name + '_input_value_cast')\n else:\n cast_input_val = [operator.inputs[1].full_name]\n idx = 0\n for _ in range(fill_shape_dims):\n cast_input_val = oopb.apply_unsqueeze(cast_input_val,\n name=operator.full_name + '_unsqueeze_' + str(idx),\n axes=[0])\n idx += 1\n cast_input_dim = oopb.apply_cast(operator.inputs[0].full_name,\n to=oopb.int64,\n name=operator.full_name + '_input_dim_cast')\n if need_cast:\n tile_node = oopb.apply_tile(cast_input_val + cast_input_dim,\n name=operator.full_name + '_tile')\n oopb.apply_op_with_output(\"apply_cast\",\n tile_node,\n operator.output_full_names,\n name=operator.full_name)\n else:\n oopb.apply_op_with_output(\"apply_tile\",\n cast_input_val,\n operator.output_full_names,\n name=operator.full_name,\n repeats=cast_input_dim[0])\n else:\n val_dtype = _to_onnx_type(node.inputs[0].dtype)\n if val_dtype != oopb.int64:\n cast_input_dim = oopb.apply_cast(operator.inputs[0].full_name,\n to=oopb.int64,\n name=operator.full_name + '_input_dim_cast')\n else:\n cast_input_dim = [operator.inputs[0].full_name]\n\n val = _cal_tensor_value(node.inputs[1])\n value = np.array([val])\n attrs = {\"value\": numpy_helper.from_array(value)}\n oopb.add_node_with_output('ConstantOfShape',\n cast_input_dim,\n operator.outputs[0].full_name,\n name=operator.full_name,\n **attrs)\n\n\n@converter_func(TYPES.FusedBatchNorm)\ndef convert_tf_fused_batch_norm(scope, operator, container):\n _convert_tf_fused_batch_norm_core(scope, operator, container)\n\n\n@converter_func(TYPES.FusedBatchNormV2)\ndef convert_tf_fused_batch_norm_v2(scope, operator, container):\n _convert_tf_fused_batch_norm_core(scope, operator, container)\n\n\n@converter_func(TYPES.FusedBatchNormV3)\ndef convert_tf_fused_batch_norm_v3(scope, operator, container):\n _convert_tf_fused_batch_norm_core(scope, operator, container)\n\n\n@converter_func(TYPES.GatherV2)\ndef convert_tf_gather_v2(scope, operator, container):\n oopb = OnnxOperatorBuilder(container, scope)\n node = operator.raw_operator\n axis = _cal_tensor_value(node.inputs[2]).tolist()\n oopb.apply_op_with_output(\"apply_gather\",\n [operator.inputs[0].full_name, operator.inputs[1].full_name],\n operator.output_full_names,\n name=operator.full_name,\n axis=axis)\n\n\n@converter_func(TYPES.GatherNd)\ndef convert_tf_gather_nd(scope, operator, container):\n if operator.target_opset < 11:\n raise ValueError(\"GatherND op is not supported for opset < 11\")\n node = operator.raw_operator\n oopb = OnnxOperatorBuilder(container, scope)\n indices_dtype = _to_onnx_type(node.inputs[1].dtype)\n if indices_dtype != oopb.int64:\n cast_node = oopb.apply_cast(operator.inputs[1].full_name,\n to=oopb.int64,\n name=operator.full_name + '_cast')[0]\n else:\n cast_node = operator.inputs[1].full_name\n oopb.add_node_with_output('GatherND',\n [operator.inputs[0].full_name, cast_node],\n operator.outputs[0].full_name,\n name=operator.full_name)\n\n\ndef _convert_tf_compare_equal(scope, operator, container, tf_op_string, onnx_op_string):\n if operator.target_opset < 7:\n raise ValueError(tf_op_string + \" op is not supported for opset < 7\")\n oopb = OnnxOperatorBuilder(container, scope)\n if operator.target_opset >= 9:\n compare_node = oopb.add_node(onnx_op_string,\n operator.input_full_names,\n operator.full_name + '_' + onnx_op_string.lower())\n oopb.add_node_with_output('Not',\n [compare_node],\n operator.outputs[0].full_name,\n name=operator.full_name)\n else:\n compare_input_0 = oopb.add_node('Cast', [operator.inputs[0].full_name],\n operator.full_name + '_input_0_cast', to=oopb.float)\n compare_input_1 = oopb.add_node('Cast', [operator.inputs[1].full_name],\n operator.full_name + '_input_1_cast', to=oopb.float)\n less_out = oopb.add_node(onnx_op_string, [compare_input_0, compare_input_1],\n operator.full_name + '_' + onnx_op_string.lower())\n oopb.add_node_with_output('Not', less_out,\n operator.output_full_names,\n name=operator.full_name + '_not')\n\n\n@converter_func(TYPES.GreaterEqual)\ndef convert_tf_greater_equal(scope, operator, container):\n _convert_tf_compare_equal(scope, operator, container, 'GreaterEqual', 'Less')\n\n\n@converter_func(TYPES.LessEqual)\ndef convert_tf_less_equal(scope, operator, container):\n _convert_tf_compare_equal(scope, operator, container, 'LessEqual', 'Greater')\n\n\n@converter_func(TYPES.LogSoftmax)\ndef convert_tf_logsoftmax(scope, operator, container):\n oopb = OnnxOperatorBuilder(container, scope)\n node = operator.raw_operator\n logits_rank = len(_cal_tensor_shape(node.inputs[0]))\n axis = node.get_attr('axis') if hasattr(node, 'axis') else -1\n if operator.target_opset < 11 and axis < 0:\n axis += logits_rank\n\n oopb.add_node_with_output('LogSoftmax',\n operator.input_full_names,\n operator.output_full_names,\n name=operator.full_name,\n axis=axis)\n\n\ndef _convert_tf_maximum_minimum(scope, operator, container, oopb, apply_func):\n node = operator.raw_operator\n supported_types = [oopb.double, oopb.float, oopb.float16]\n output_type = _to_onnx_type(node.outputs[0].dtype)\n need_cast = False\n cast_inputs = []\n\n for idx, inp in enumerate(node.inputs):\n inp_type = _to_onnx_type(inp.dtype)\n if inp_type not in supported_types:\n diff_output = oopb.apply_cast(inp.name,\n to=oopb.float,\n name=operator.full_name + '_input_' + str(idx))\n cast_inputs.extend(diff_output)\n need_cast = True\n else:\n cast_inputs.append(inp.name)\n\n # tensorflow minimum/maximum does support broadcast, onnx < opset 8 does not.\n # handle this by doing something like:\n # y = min(x1, add(x2, sub(x1, x1))), where x1, x2 are the inputs and x2 is a scalar\n # this will create a tensor of zeros of the shape of x1, adds x2 to it (which broadcasts) and use that for min.\n broadcast_inputs = []\n needs_broadcast_op = []\n if operator.target_opset < 8:\n output_shape = _cal_tensor_shape(node.outputs[0])\n has_correct_shape = []\n for i, input_name in enumerate(node.inputs):\n input_shape = _cal_tensor_shape(node.inputs[i])\n if input_shape != output_shape:\n needs_broadcast_op.append(i)\n else:\n has_correct_shape.append(cast_inputs[i])\n\n if needs_broadcast_op:\n has_correct_shape = has_correct_shape[0]\n for i in range(len(cast_inputs)):\n if i in needs_broadcast_op:\n # get a tensor with zeros (since there is no Fill op as of opset8)\n sub_node = oopb.apply_sub([has_correct_shape, has_correct_shape],\n name=operator.full_name + '_diff_' + str(i))\n # use add as 'broadcast' op\n add_node = oopb.apply_add([cast_inputs[i]] + sub_node,\n name=operator.full_name + '_add_' + str(i))\n broadcast_inputs.extend(add_node)\n else:\n broadcast_inputs.append(cast_inputs[i])\n else:\n broadcast_inputs = cast_inputs\n\n op_postfix = '_max' if apply_func == oopb.apply_max else '_min'\n max_node = apply_func(broadcast_inputs,\n name=operator.full_name + op_postfix)\n\n if need_cast:\n oopb.apply_op_with_output(\"apply_cast\",\n max_node,\n operator.output_full_names,\n name=operator.full_name + '_castback',\n to=output_type)\n else:\n oopb.apply_op_with_output(\"apply_identity\",\n max_node,\n operator.output_full_names,\n name=operator.full_name + '_identity')\n\n\n@converter_func(TYPES.Maximum)\ndef convert_tf_maximum(scope, operator, container):\n oopb = OnnxOperatorBuilder(container, scope)\n _convert_tf_maximum_minimum(scope, operator, container, oopb, oopb.apply_max)\n\n\n@converter_func(TYPES.Minimum)\ndef convert_tf_minimum(scope, operator, container):\n oopb = OnnxOperatorBuilder(container, scope)\n _convert_tf_maximum_minimum(scope, operator, container, oopb, oopb.apply_min)\n\n\n@converter_func(TYPES.NonMaxSuppressionV2, TYPES.NonMaxSuppressionV3)\ndef convert_tf_nonmaxsuppression(scope, operator, container):\n if operator.target_opset < 10:\n raise ValueError(\"NonMaxSuppression op is not supported for opset < 10\")\n else:\n oopb = OnnxOperatorBuilder(container, scope)\n input_0 = oopb.apply_unsqueeze(operator.inputs[0].full_name,\n name=operator.full_name + '_unsqueeze_0',\n axes=[0])\n input_1 = oopb.apply_unsqueeze(operator.inputs[1].full_name,\n name=operator.full_name + '_unsqueeze_1',\n axes=[0, 1])\n input_2 = oopb.apply_cast(operator.inputs[2].full_name,\n to=oopb.int64,\n name=operator.full_name + '_cast_0')\n non_max_v = 10 if operator.target_opset < 11 else 11\n nonmaxsuppress = oopb.add_node('NonMaxSuppression',\n input_0 + input_1 + input_2 + operator.input_full_names[3:],\n operator.full_name + '_nonmax',\n op_version=non_max_v)\n slice_node = oopb.apply_slice(nonmaxsuppress,\n name=operator.full_name + '_slice',\n starts=[2], ends=[3], axes=[1])\n squeeze_node = oopb.apply_squeeze(slice_node,\n name=operator.full_name + '_squeeze',\n axes=[1])\n oopb.apply_op_with_output(\"apply_cast\",\n squeeze_node,\n operator.output_full_names,\n name=operator.full_name + '_castback',\n to=oopb.int32)\n\n\ndef _make_range_const(scope, operator, container, start, limit, delta, onnx_type):\n start = _cal_tensor_value(start).tolist()\n limit = _cal_tensor_value(limit).tolist()\n delta = _cal_tensor_value(delta).tolist()\n val = np.arange(start, limit, delta)\n oopb = OnnxOperatorBuilder(container, scope)\n oopb.add_node_with_output('Identity',\n [('_start', onnx_type, val)],\n operator.outputs[0].full_name,\n name=operator.full_name + '_range')\n\n\ndef _make_range_non_const(scope, operator, container, start, limit, delta, onnx_type):\n oopb = OnnxOperatorBuilder(container, scope)\n diff_node = oopb.apply_sub([limit.name, start.name],\n name=operator.full_name + '_diff')\n delta_cast = delta.name\n if onnx_type in [oopb.int32, oopb.int64]:\n diff_output = oopb.apply_cast(diff_node,\n to=oopb.float,\n name=operator.full_name + '_cast_diff')\n delta_cast = oopb.apply_cast(delta.name,\n to=oopb.float,\n name=operator.full_name + '_cast_delta')\n\n div_node = oopb.apply_div(diff_output + delta_cast,\n name=operator.full_name + '_div')\n ceil_node = oopb.add_node(\"Ceil\",\n div_node,\n name=operator.full_name + '_ceil')\n trip_count_node = oopb.apply_cast(ceil_node,\n to=oopb.int64,\n name=operator.full_name + '_trip_cnt')\n loop_inputs = [trip_count_node[0],\n # TENSOR_TYPE_TO_STORAGE_TENSOR_TYPE maps BOOL to INT32\n # so we need change np.array(True, dtype='bool') to int32 here\n ('_cond', oopb.bool, np.array(1, dtype='int32')),\n start.name]\n from onnx import helper\n n1 = helper.make_node(\"Identity\", [\"cond\"], [\"cond_out\"], name=\"n1\")\n n2 = helper.make_node(\"Add\", [\"prev\", delta.name], [\"current\"], name=\"n2\")\n n3 = helper.make_node(\"Identity\", [\"prev\"], [\"range\"], name=\"n3\")\n\n graph_proto = helper.make_graph(\n nodes=[n1, n2, n3],\n name=\"test\",\n inputs=[helper.make_tensor_value_info(\"i\", oopb.int64, []),\n helper.make_tensor_value_info(\"cond\", oopb.bool, []),\n helper.make_tensor_value_info(\"prev\", onnx_type, [])],\n outputs=[helper.make_tensor_value_info(\"cond_out\", oopb.bool, []),\n helper.make_tensor_value_info(\"current\", onnx_type, []),\n helper.make_tensor_value_info(\"range\", onnx_type, [])],\n initializer=[]\n )\n loop_node = oopb.add_node_all(\"Loop\",\n loop_inputs,\n name=operator.full_name + '_loop',\n outputs_num=2,\n body=graph_proto)\n oopb.apply_op_with_output(\"apply_identity\",\n loop_node[1],\n operator.output_full_names,\n name=operator.full_name + '_identity')\n\n\ndef _make_range(scope, operator, container, start, limit, delta, onnx_type):\n if all(_cal_tensor_value(n) is not None for n in [start, limit, delta]) is True:\n _make_range_const(scope, operator, container, start, limit, delta, onnx_type)\n else:\n _make_range_non_const(scope, operator, container, start, limit, delta, onnx_type)\n\n\n@converter_func(TYPES.Range)\ndef convert_tf_range(scope, operator, container):\n oopb = OnnxOperatorBuilder(container, scope)\n node = operator.raw_operator\n if operator.target_opset < 11:\n onnx_type = _to_onnx_type(node.outputs[0].dtype)\n _make_range(scope, operator, container, node.inputs[0], node.inputs[1], node.inputs[2], onnx_type)\n else:\n oopb.add_node_with_output(\"Range\",\n operator.input_full_names,\n operator.outputs[0].full_name,\n name=operator.full_name + '_range',\n op_version=11)\n\n\n@converter_func(TYPES.TD_Reshape)\ndef convert_reshape_timedistributed(scope, operator, container):\n target_shape = operator.get_attr('target_shape')\n apply_reshape(scope, operator.inputs[0].full_name, operator.outputs[0].full_name, container,\n operator_name=operator.full_name, desired_shape=target_shape)\n\n\n@converter_func(TYPES.All, TYPES.Any)\ndef convert_tf_any_all(scope, operator, container):\n oopb = OnnxOperatorBuilder(container, scope)\n node = operator.raw_operator\n axis = _cal_tensor_value(node.inputs[1]).tolist()\n axis = [axis] if np.isscalar(axis) else axis\n\n # It is fine to have nagative reduce_dim.\n cast_op = oopb.apply_cast(operator.input_full_names[0],\n to=oopb.float,\n name=operator.full_name + '_cast')\n keepdims = node.get_attr(\"keep_dims\")\n op_type = \"ReduceMin\" if node.type == \"All\" else \"ReduceSum\"\n reduce_op = oopb.add_node(op_type, cast_op,\n axes=axis,\n keepdims=keepdims,\n name=operator.full_name + '_reduce')\n oopb.apply_op_with_output('apply_greater',\n [reduce_op, np.array(0, dtype=np.float32)],\n operator.output_full_names,\n name=operator.full_name)\n\n\n@converter_func(TYPES.Pack)\ndef convert_tf_pack(scope, operator, container):\n oopb = OnnxOperatorBuilder(container, scope)\n node = operator.raw_operator\n axis = node.get_attr('axis')\n if axis < 0 and operator.target_opset < 11:\n axis += len(_cal_tensor_shape(node.inputs[0])) + 1\n\n inputs = []\n for i in range(len(node.inputs)):\n unsqueeze = oopb.add_node('Unsqueeze',\n operator.inputs[i].full_name,\n operator.full_name + '_unsqueeze' + str(i), axes=[axis])\n inputs.append(unsqueeze)\n\n oopb.apply_op_with_output(\"apply_concat\",\n inputs,\n operator.outputs[0].full_name,\n name=operator.full_name + '_concat',\n axis=axis)\n\n\ndef _convert_tf_pad(scope, operator, container):\n oopb = OnnxOperatorBuilder(container, scope)\n node = operator.raw_operator\n paddings_value = _cal_tensor_value(node.inputs[1])\n if paddings_value is None:\n padding_dtype = _to_onnx_type(node.inputs[1].dtype)\n if padding_dtype != oopb.int64:\n cast_node = oopb.apply_cast(operator.input_full_names[1],\n to=oopb.int64,\n name=operator.full_name + '_paddings_cast')\n else:\n cast_node = operator.input_full_names[1]\n transpose_node_1 = oopb.apply_transpose(cast_node,\n name=operator.full_name + '_transpose_1',\n perm=[1, 0])\n paddings = oopb.apply_reshape(transpose_node_1,\n name=operator.full_name + '_reshape',\n desired_shape=[-1])[0]\n else:\n paddings = np.array(_cal_tensor_value(node.inputs[1])).transpose().flatten()\n mode = node.get_attr(\"mode\") if hasattr(node, 'mode') else None\n\n if mode:\n mode = mode.s.decode(\"utf-8\").lower()\n if mode not in [None, \"constant\"]:\n raise ValueError(mode + \" pad mode is not supported\")\n\n origin_dtype = _to_onnx_type(node.outputs[0].dtype)\n if origin_dtype not in [oopb.float16, oopb.float,\n oopb.double]:\n cast_op = oopb.apply_cast(operator.input_full_names[0],\n to=oopb.float,\n name=operator.full_name + '_cast')\n else:\n cast_op = operator.input_full_names[0]\n\n if mode in [None, \"constant\"] and len(node.inputs) == 3:\n const_val = _cal_tensor_value(node.inputs[2]).tolist()\n else:\n const_val = None\n\n if operator.target_opset < 11:\n pad_node = oopb.apply_pad(cast_op,\n name=operator.full_name + '_pad',\n mode=mode,\n pads=paddings,\n value=const_val)\n else:\n pad_node = oopb.apply_pad(cast_op,\n name=operator.full_name + '_pad',\n mode=mode,\n pads=paddings,\n value=const_val,\n onnx_type=_to_onnx_type(node.inputs[0].dtype))\n\n if origin_dtype not in [oopb.float16, oopb.float,\n oopb.double]:\n oopb.apply_op_with_output(\"apply_cast\",\n pad_node,\n operator.output_full_names,\n name=operator.full_name + '_castback',\n to=origin_dtype)\n else:\n oopb.apply_op_with_output(\"apply_identity\",\n pad_node,\n operator.output_full_names,\n name=operator.full_name + '_identity')\n\n\n@converter_func(TYPES.Pad)\ndef convert_tf_pad(scope, operator, container):\n _convert_tf_pad(scope, operator, container)\n\n\n@converter_func(TYPES.PadV2)\ndef convert_tf_pad_v2(scope, operator, container):\n _convert_tf_pad(scope, operator, container)\n\n\ndef _convert_tf_reduce_op(scope, operator, container, onnx_op):\n oopb = OnnxOperatorBuilder(container, scope)\n node = operator.raw_operator\n axes = _cal_tensor_value(node.inputs[1]).tolist()\n axes = [axes] if np.isscalar(axes) else axes\n\n if operator.target_opset < 11:\n input_shape = _cal_tensor_shape(node.inputs[0])\n if input_shape is None:\n if any([val < 0 for val in axes]):\n raise ValueError(\"reduce_op: cannot have negative axis because we don't know input rank\")\n else:\n input_rank = len(input_shape)\n axes = [val + input_rank if val < 0 else val for val in axes]\n\n keepdims = node.get_attr(\"keep_dims\")\n oopb.add_node_with_output(onnx_op,\n operator.inputs[0].full_name,\n operator.outputs[0].full_name,\n name=operator.full_name + '_reduce_min',\n axes=axes, keepdims=keepdims)\n\n\n@converter_func(TYPES.Max)\ndef convert_tf_min(scope, operator, container):\n _convert_tf_reduce_op(scope, operator, container, 'ReduceMax')\n\n\n@converter_func(TYPES.Min)\ndef convert_tf_min(scope, operator, container):\n _convert_tf_reduce_op(scope, operator, container, 'ReduceMin')\n\n\n@converter_func(TYPES.Mean)\ndef convert_tf_mean(scope, operator, container):\n _convert_tf_reduce_op(scope, operator, container, 'ReduceMean')\n\n\n@converter_func(TYPES.Sum)\ndef convert_tf_sum(scope, operator, container):\n _convert_tf_reduce_op(scope, operator, container, 'ReduceSum')\n\n\n@converter_func(TYPES.Prod)\ndef convert_tf_prod(scope, operator, container):\n _convert_tf_reduce_op(scope, operator, container, 'ReduceProd')\n\n\n@converter_func(TYPES.Reshape)\ndef convert_tf_reshape(scope, operator, container):\n oopb = OnnxOperatorBuilder(container, scope)\n node = operator.raw_operator\n if _cal_tensor_value(node.inputs[1]) is None:\n temp_shape_value = node.inputs[1].name\n shape_value = temp_shape_value\n shape_dtype = _to_onnx_type(node.inputs[0].dtype)\n if shape_dtype != oopb.int64:\n shape_value = oopb.apply_cast(temp_shape_value,\n to=oopb.int64,\n name=operator.full_name + '_cast')[0]\n else:\n shape_value = _cal_tensor_value(node.inputs[1]).tolist()\n\n oopb.apply_op_with_output(\"apply_reshape\",\n operator.inputs[0].full_name,\n operator.outputs[0].full_name,\n name=operator.full_name + '_reshape',\n desired_shape=shape_value)\n\n\n@converter_func(TYPES.ScatterNd)\ndef convert_tf_scatter_nd(scope, operator, container):\n if operator.target_opset < 11:\n raise ValueError(\"ScatterNd op is not supported for opset = \" + str(operator.target_opset))\n else:\n oopb = OnnxOperatorBuilder(container, scope)\n node = operator.raw_operator\n\n const_shape_dtype = _to_onnx_type(node.inputs[2].dtype)\n if const_shape_dtype != oopb.int64:\n const_of_shape_input = oopb.apply_cast(operator.inputs[2].full_name,\n to=oopb.int64,\n name=operator.full_name + '_const_of_shape_input')\n else:\n const_of_shape_input = [operator.inputs[2].full_name]\n\n np_val = np.array([0], dtype=np.int64)\n onnx_tensor = numpy_helper.from_array(np_val, operator.inputs[2].full_name + '_value')\n const_of_shape = oopb.add_node('ConstantOfShape',\n const_of_shape_input,\n operator.inputs[2].full_name + '_const_of_shape',\n value=onnx_tensor)\n\n node_input_0_dtype = _to_onnx_type(node.inputs[0].dtype)\n if node_input_0_dtype != oopb.int64:\n node_input_0_cast = oopb.apply_cast(operator.inputs[0].full_name,\n to=oopb.int64,\n name=operator.full_name + '_input_0')\n else:\n node_input_0_cast = [operator.inputs[0].full_name]\n\n oopb.add_node_with_output('ScatterND',\n [const_of_shape] + node_input_0_cast + [operator.inputs[1].full_name],\n operator.outputs[0].full_name,\n name=operator.full_name + '_scatter_nd')\n\n\n@converter_func(TYPES.Select)\ndef convert_tf_select(scope, operator, container):\n if operator.target_opset < 9:\n raise ValueError(\"Select op is not supported for opset = \" + str(operator.target_opset))\n else:\n oopb = OnnxOperatorBuilder(container, scope)\n node = operator.raw_operator\n cond_shape = _cal_tensor_shape(node.inputs[0])\n input_shape = _cal_tensor_shape(node.inputs[1])\n if input_shape is None:\n input_shape = _cal_tensor_shape(node.inputs[2])\n input_rank = len(input_shape)\n if len(cond_shape) == 1 and input_rank > 1:\n broadcast_shape = [cond_shape[0]] + [1] * (input_rank - 1)\n reshape_node = oopb.apply_reshape(operator.inputs[0].full_name,\n name=operator.full_name + '_reshape',\n desired_shape=broadcast_shape)\n input_nodes = reshape_node + operator.input_full_names[1:]\n else:\n input_nodes = operator.input_full_names\n\n oopb.add_node_with_output('Where',\n input_nodes,\n operator.outputs[0].full_name,\n name=operator.full_name + '_where',\n op_version=9)\n\n\n@converter_func(TYPES.Size)\ndef convert_tf_size(scope, operator, container):\n oopb = OnnxOperatorBuilder(container, scope)\n node = operator.raw_operator\n dtype = _to_onnx_type(node.outputs[0].dtype)\n if dtype != oopb.int64:\n size_node = oopb.add_node('Size',\n operator.inputs[0].full_name,\n operator.inputs[0].full_name + '_size')\n oopb.apply_op_with_output(\"apply_cast\",\n size_node,\n operator.outputs[0].full_name,\n name=operator.full_name + '_size_cast',\n to=dtype)\n else:\n oopb.add_node_with_output('Size',\n operator.inputs[0].full_name,\n operator.output_full_names,\n name=operator.inputs[0].full_name + '_size')\n\n\ndef _convert_tf_resize(scope, operator, container, mode):\n node = operator.raw_operator\n oopb = OnnxOperatorBuilder(container, scope)\n shape = _cal_tensor_shape(node.inputs[0])\n target_shape = _cal_tensor_value(node.inputs[1])\n\n if shape and shape[1] is not None and shape[2] is not None and target_shape is not None:\n n, h, w, c = shape\n nh, nw = target_shape\n scale_val = np.array([1.0, 1.0, float(nh) / h, float(nw) / w]).astype(np.float32)\n scales = ('_scale', oopb.float, scale_val)\n else:\n if operator.target_opset < 10:\n raise ValueError(\"dynamic shape is not supported for Upsample when opset = \" + str(operator.target_opset))\n input_shape = oopb.add_node('Shape',\n operator.inputs[0].full_name,\n operator.inputs[0].full_name + '_input_shape')\n sliced_score = oopb.add_node('Slice',\n [input_shape,\n ('_start', oopb.int64, np.array([1], dtype='int64')),\n ('_end', oopb.int64, np.array([3], dtype='int64')),\n ('_axes', oopb.int64, np.array([0], dtype='int64'))\n ],\n operator.inputs[0].full_name + '_sliced')\n ori_cast = oopb.add_node('Cast',\n sliced_score,\n operator.inputs[0].full_name + '_ori_cast', to=oopb.float)\n target_cast = oopb.add_node('Cast',\n operator.inputs[1].full_name,\n operator.inputs[1].full_name + '_target_cast', to=oopb.float)\n scales_hw = oopb.add_node('Div',\n [target_cast, ori_cast],\n operator.inputs[1].full_name + '_scales_hw')\n scales = oopb.add_node('Concat',\n [('_concat', oopb.float, np.array([1.0, 1.0], dtype='float32')),\n scales_hw\n ],\n operator.inputs[0].full_name + '_concat',\n axis=0)\n\n input_nchw = oopb.add_node('Transpose',\n operator.inputs[0].full_name,\n operator.inputs[0].full_name + '_transpose',\n perm=[0, 3, 1, 2])\n attrs = {\"mode\": mode}\n attrs['coordinate_transformation_mode'] = 'asymmetric'\n if attrs['mode'] == 'nearest':\n attrs['nearest_mode'] = 'floor'\n if operator.target_opset < 10:\n op_type = 'Upsample'\n else:\n op_type = 'Resize'\n\n if operator.target_opset < 8:\n attrs = {\"mode\": mode, \"scales\": [1.0, 1.0, float(nh) / h, float(nw) / w]}\n upsample = oopb.add_node(op_type,\n input_nchw,\n operator.inputs[0].full_name + '_upsample',\n **attrs)\n elif operator.target_opset < 11:\n upsample = oopb.add_node(op_type,\n [input_nchw,\n scales],\n operator.inputs[0].full_name + '_upsample',\n mode=mode)\n else:\n upsample = oopb.add_node(op_type,\n [input_nchw,\n ('_rois', oopb.float, np.array([0.0, 0.0, 1.0, 1.0], dtype='float32')),\n scales],\n operator.inputs[0].full_name + '_upsample',\n **attrs)\n oopb.add_node_with_output('Transpose',\n upsample,\n operator.output_full_names,\n name=operator.inputs[0].full_name + '_transpose_2',\n perm=[0, 2, 3, 1])\n\n\n@converter_func(TYPES.ResizeBilinear)\ndef convert_tf_resize_bilinear(scope, operator, container):\n _convert_tf_resize(scope, operator, container, \"linear\")\n\n\n@converter_func(TYPES.ResizeNearestNeighbor)\ndef convert_tf_resize_nearest_neighbor(scope, operator, container):\n _convert_tf_resize(scope, operator, container, \"nearest\")\n\n\n@converter_func(TYPES.Round)\ndef convert_tf_round(scope, operator, container):\n oopb = OnnxOperatorBuilder(container, scope)\n if operator.target_opset < 11:\n add_output_name = oopb.add_node('Add',\n [operator.inputs[0].full_name,\n ('_add', oopb.float, np.array(-0.5, dtype=np.float32))\n ],\n operator.inputs[0].full_name + '_add')\n cast_0 = oopb.add_node('Cast',\n add_output_name,\n operator.inputs[0].full_name + '_0_cast', to=oopb.float)\n oopb.add_node_with_output(\"Ceil\",\n cast_0,\n operator.output_full_names,\n name=operator.full_name)\n else:\n oopb.add_node_with_output(\"Round\",\n operator.input_full_names,\n operator.output_full_names,\n name=operator.full_name)\n\n\n@converter_func(TYPES.Rsqrt)\ndef convert_tf_rsqrt(scope, operator, container):\n oopb = OnnxOperatorBuilder(container, scope)\n sqrt_node = oopb.add_node('Sqrt',\n operator.inputs[0].full_name,\n operator.inputs[0].full_name + '_sqrt')\n oopb.apply_op_with_output(\"apply_reciprocal\",\n sqrt_node,\n operator.output_full_names,\n name=operator.full_name + '_cast')\n\n\n@converter_func(TYPES.Shape)\ndef convert_tf_shape(scope, operator, container):\n node = operator.raw_operator\n dtype = _to_onnx_type(node.outputs[0].dtype)\n oopb = OnnxOperatorBuilder(container, scope)\n shape_node = oopb.add_node('Shape',\n operator.input_full_names[0],\n operator.input_full_names[0] + '_shape')\n if dtype == oopb.int64:\n oopb.add_node_with_output('Identity',\n shape_node,\n operator.output_full_names,\n operator.inputs[0].full_name + '_identity')\n else:\n oopb.apply_op_with_output(\"apply_cast\",\n shape_node,\n operator.output_full_names,\n name=operator.full_name + '_cast',\n to=dtype)\n\n\n@converter_func(TYPES.Split)\ndef convert_tf_split(scope, operator, container):\n oopb = OnnxOperatorBuilder(container, scope)\n node = operator.raw_operator\n split_dims = _cal_tensor_value(node.inputs[0]).tolist()\n oopb.apply_op_with_output('apply_split',\n operator.input_full_names[1:],\n operator.output_full_names,\n operator.inputs[0].full_name + '_split',\n axis=split_dims)\n\n\n@converter_func(TYPES.SplitV)\ndef convert_tf_splitv(scope, operator, container):\n oopb = OnnxOperatorBuilder(container, scope)\n node = operator.raw_operator\n split = _cal_tensor_value(node.inputs[1]).tolist()\n split_dims = _cal_tensor_value(node.inputs[2]).tolist()\n oopb.apply_op_with_output('apply_split',\n operator.input_full_names[0],\n operator.output_full_names,\n operator.inputs[0].full_name + '_split',\n split=split,\n axis=split_dims)\n\n\n@converter_func(TYPES.Squeeze)\ndef convert_tf_squeeze(scope, operator, container):\n oopb = OnnxOperatorBuilder(container, scope)\n node = operator.raw_operator\n shape = _cal_tensor_shape(node.inputs[0])\n axis = node.get_attr('squeeze_dims')\n\n if axis:\n neg_axis = any([val < 0 for val in axis])\n if neg_axis and operator.target_opset < 11:\n shape_len = len(shape)\n axis = [a + shape_len if a < 0 else a for a in axis]\n else:\n axis = [i for i, j in enumerate(shape) if j == 1]\n\n if shape is None:\n raise ValueError(\"Squeeze input shape cannot be None for node {}\".format(node.name))\n\n oopb.add_node_with_output('Squeeze',\n operator.input_full_names[0],\n operator.output_full_names,\n operator.inputs[0].full_name + '_squeeze',\n axes=axis)\n\n\n@converter_func(TYPES.Tile)\ndef convert_tf_tile(scope, operator, container):\n oopb = OnnxOperatorBuilder(container, scope)\n cast_1 = oopb.add_node('Cast',\n operator.inputs[1].full_name,\n operator.inputs[1].full_name + '_1_cast', to=oopb.int64)\n oopb.add_node_with_output('Tile',\n [operator.input_full_names[0], cast_1],\n operator.output_full_names,\n operator.inputs[0].full_name + '_tile')\n\n\n@converter_func(TYPES.TopKV2)\ndef convert_tf_topkv2(scope, operator, container):\n oopb = OnnxOperatorBuilder(container, scope)\n cast_0 = oopb.add_node('Cast',\n operator.inputs[0].full_name,\n operator.inputs[0].full_name + '_0_cast', to=oopb.float)\n cast_1 = oopb.add_node('Cast',\n operator.inputs[1].full_name,\n operator.inputs[1].full_name + '_1_cast', to=oopb.int64)\n unsqueeze = oopb.add_node('Unsqueeze',\n cast_1,\n operator.inputs[1].full_name + '_unsqueeze', axes=[0])\n oopb.add_node_with_output(\"TopK\",\n [cast_0, unsqueeze],\n operator.output_full_names,\n name=operator.full_name)\n\n\n@converter_func(TYPES.Transpose)\ndef convert_tf_transpose(scope, operator, container):\n oopb = OnnxOperatorBuilder(container, scope)\n node = operator.raw_operator\n perm = _cal_tensor_value(node.inputs[1])\n oopb.apply_op_with_output(\"apply_transpose\",\n operator.inputs[0].full_name,\n operator.output_full_names,\n name=operator.full_name,\n perm=perm)\n\n\n@converter_func(TYPES.Cast)\ndef convert_tf_cast(scope, operator, container):\n node = operator.raw_operator\n to = _to_onnx_type(node.get_attr(\"DstT\"))\n oopb = OnnxOperatorBuilder(container, scope)\n oopb.apply_op_with_output(\"apply_cast\",\n operator.input_full_names,\n operator.output_full_names,\n name=operator.full_name,\n to=to)\n\n\n@converter_func(TYPES.NotEqual)\ndef convert_tf_not_equal(scope, operator, container):\n oopb = OnnxOperatorBuilder(container, scope)\n if operator.target_opset >= 11:\n equal_out = oopb.add_node('Equal', [operator.inputs[0].full_name, operator.inputs[1].full_name],\n operator.full_name + '_mask')\n oopb.add_node_with_output('Not', equal_out,\n operator.output_full_names,\n name=operator.full_name + '_not')\n else:\n equal_input_0 = oopb.add_node('Cast', [operator.inputs[0].full_name],\n operator.full_name + '_input_0_cast', to=6)\n equal_input_1 = oopb.add_node('Cast', [operator.inputs[1].full_name],\n operator.full_name + '_input_1_cast', to=6)\n equal_out = oopb.add_node('Equal', [equal_input_0, equal_input_1],\n operator.full_name + '_mask')\n oopb.add_node_with_output('Not', equal_out,\n operator.output_full_names,\n name=operator.full_name + '_not')\n\n\n@converter_func(TYPES.ReadVariableOp)\ndef convert_tf_read_variable_op(scope, operator, container):\n oopb = OnnxOperatorBuilder(container, scope)\n node = operator.raw_operator\n if len(node.inputs) == 1 and len(node.outputs) == 1:\n oopb.apply_op_with_output(\"apply_identity\",\n operator.input_full_names,\n operator.output_full_names,\n name=operator.full_name)\n\n\n@converter_func(TYPES.Slice)\ndef convert_tf_slice(scope, operator, container):\n oopb = OnnxOperatorBuilder(container, scope)\n node = operator.raw_operator\n begin = _cal_tensor_value(node.inputs[1])\n size = _cal_tensor_value(node.inputs[2])\n\n if begin is not None and size is not None:\n begin_value = begin.tolist()\n size_value = size.tolist()\n end_value = []\n for begin_, size_ in zip(begin_value, size_value):\n if size_ == -1:\n end_value.append(np.iinfo(np.int64).max)\n else:\n end_value.append(begin_ + size_)\n else:\n if operator.target_opset < 10:\n raise ValueError(\"Dynamic inputs for tf.slice is not supported until opset 10\")\n\n dtype = _to_onnx_type(node.inputs[1].dtype)\n if dtype != oopb.int64:\n cast_begin = oopb.apply_cast(operator.inputs[1].full_name,\n to=oopb.int64,\n name=operator.full_name + '_begin_cast')\n else:\n cast_begin = [operator.inputs[1].full_name]\n\n dtype = _to_onnx_type(node.inputs[2].dtype)\n if dtype != oopb.int64:\n cast_size = oopb.apply_cast(operator.inputs[2].full_name,\n to=oopb.int64,\n name=operator.full_name + '_size_cast')\n else:\n cast_size = [operator.inputs[2].full_name]\n\n neg_one_size = oopb.add_node('Equal',\n cast_size + [('_neg_one', oopb.int64, np.array(-1, dtype=np.int64))],\n operator.full_name + '_equal_neg_one')\n cast_equal = oopb.apply_cast(neg_one_size,\n to=oopb.int64,\n name=operator.full_name + '_equal_cast')\n value_offset = oopb.apply_mul(\n cast_equal + [('_max_int', oopb.int64, np.array(np.iinfo(np.int64).max, dtype=np.int64))],\n name=operator.full_name + '_mul_max')\n size_adjust = oopb.apply_add(cast_size + value_offset,\n name=operator.full_name + '_size_adjust')\n begin_value = cast_begin[0]\n end_value = oopb.apply_add(cast_begin + size_adjust,\n name=operator.full_name + '_ends')[0]\n\n oopb.apply_op_with_output(\"apply_slice\",\n operator.inputs[0].full_name,\n operator.output_full_names,\n name=operator.full_name,\n starts=begin_value,\n ends=end_value)\n\n\n@converter_func(TYPES.Softmax)\ndef convert_tf_softmax(scope, operator, container):\n oopb = OnnxOperatorBuilder(container, scope)\n node = operator.raw_operator\n logits_rank = len(_cal_tensor_shape(node.inputs[0]))\n axis = node.get_attr('axis') if hasattr(node, 'axis') else -1\n if operator.target_opset < 11 and axis < 0:\n axis += logits_rank\n\n oopb.apply_op_with_output(\"apply_softmax\",\n operator.input_full_names,\n operator.output_full_names,\n name=operator.full_name,\n axis=axis)\n\n\n@converter_func(TYPES.Square)\ndef convert_tf_square(scope, operator, container):\n oopb = OnnxOperatorBuilder(container, scope)\n oopb.apply_op_with_output('apply_mul',\n operator.input_full_names + operator.input_full_names,\n operator.output_full_names,\n name=operator.full_name)\n\n\ndef _process_begin_end(new_begin, new_end, stride):\n if stride >= 0:\n new_begin.append(0)\n new_end.append(sys.maxsize)\n else:\n new_begin.append(-1)\n new_end.append(-sys.maxsize)\n\n\ndef _prepare_StridedSlice(node, target_opset):\n max_size = sys.maxsize\n begin = _cal_tensor_value(node.inputs[1])\n if begin is None:\n begin = [0] * node.inputs[1].shape[0]\n end = _cal_tensor_value(node.inputs[2])\n if end is None:\n end = [max_size] * node.inputs[2].shape[0]\n strides = _cal_tensor_value(node.inputs[3])\n if strides is None:\n strides = [1] * node.inputs[3].shape[0]\n begin_mask = node.get_attr(\"begin_mask\")\n begin_mask = begin_mask if begin_mask is not None else 0\n end_mask = node.get_attr(\"end_mask\")\n end_mask = end_mask if end_mask is not None else 0\n new_axis_mask = node.get_attr(\"new_axis_mask\")\n new_axis_mask = new_axis_mask if new_axis_mask is not None else 0\n shrink_axis_mask = node.get_attr(\"shrink_axis_mask\")\n shrink_axis_mask = shrink_axis_mask if shrink_axis_mask is not None else 0\n ellipsis_mask = node.get_attr(\"ellipsis_mask\")\n ellipsis_mask = ellipsis_mask if ellipsis_mask is not None else 0\n extra_mask = new_axis_mask or shrink_axis_mask or ellipsis_mask\n new_begin = []\n new_end = []\n axes = []\n steps = []\n # onnx slice op can't remove a axis, track axis and add a squeeze op if needed\n needs_squeeze = []\n ellipsis_gap = 0\n data_input = node.inputs[0]\n\n new_axis_len = 0\n cur_new_axis_mask = new_axis_mask\n while cur_new_axis_mask > 0:\n if cur_new_axis_mask & 1:\n new_axis_len += 1\n cur_new_axis_mask = cur_new_axis_mask >> 1\n new_axis_axes = []\n\n for idx, begin_item in enumerate(begin):\n if target_opset < 10 and strides[idx] != 1:\n raise ValueError(\"StridedSlice: only strides=1 are supported, current stride =\" + str(strides[idx]))\n\n if (ellipsis_mask >> idx) & 1:\n input_shape = node.inputs[0].shape # ctx.get_shape(node.input[0])\n if input_shape is None:\n raise ValueError(\"StridedSlice op {} requires the shape of input\".format(node.name))\n ellipsis_gap = len(input_shape) + new_axis_len - len(begin)\n for ellipsis_start_idx in range(idx, idx + ellipsis_gap + 1):\n new_begin.append(0)\n new_end.append(max_size)\n axes.append(ellipsis_start_idx)\n steps.append(1)\n continue\n\n if (new_axis_mask >> idx) & 1:\n new_axis_axes.append(idx + ellipsis_gap)\n continue\n\n end_item = end[idx]\n axes.append(idx + ellipsis_gap)\n steps.append(strides[idx])\n\n if (begin_mask >> idx) & 1 != 0 and (end_mask >> idx) & 1 != 0:\n _process_begin_end(new_begin, new_end, strides[idx])\n continue\n\n if begin_item == 0 and end_item == 0:\n _process_begin_end(new_begin, new_end, strides[idx])\n continue\n\n shrink_mask = (shrink_axis_mask >> idx) & 1\n if shrink_mask != 0:\n shrink_begin = begin_item + _cal_tensor_shape(data_input)[idx] if begin_item < 0 else begin_item\n new_begin.append(shrink_begin)\n new_end.append(shrink_begin + 1)\n needs_squeeze.append(idx + ellipsis_gap)\n continue\n\n if (begin_mask >> idx) & 1 != 0:\n new_begin.append(0) if strides[idx] >= 0 else new_begin.append(-1)\n new_end.append(end_item)\n continue\n\n if (end_mask >> idx) & 1 != 0:\n new_begin.append(begin_item)\n new_end.append(max_size) if strides[idx] >= 0 else new_begin.append(-max_size)\n continue\n\n new_begin.append(begin_item)\n new_end.append(end_item)\n\n return new_begin, new_end, axes, steps, needs_squeeze, begin_mask, end_mask, extra_mask, new_axis_axes\n\n\n@converter_func(TYPES.StridedSlice)\ndef convert_tf_strided_slice(scope, operator, container):\n node = operator.raw_operator\n new_begin, new_end, axes, steps, needs_squeeze, begin_mask, end_mask, extra_mask, new_axis_axes = _prepare_StridedSlice(\n node, operator.target_opset)\n oopb = OnnxOperatorBuilder(container, scope)\n\n if len(new_axis_axes) > 0:\n new_axis_unsqueeze = oopb.add_node('Unsqueeze',\n operator.inputs[0].full_name,\n operator.inputs[0].full_name + '_unsqueeze',\n axes=new_axis_axes)\n else:\n new_axis_unsqueeze = operator.inputs[0].full_name\n\n if operator.target_opset < 10:\n # for now we implement common cases. Things like strides!=1 are not mappable to onnx.\n cropped_tensor_name = oopb.add_node('Slice',\n new_axis_unsqueeze,\n operator.inputs[0].full_name + '_cropping',\n starts=new_begin, ends=new_end, axes=axes)\n else:\n if extra_mask or begin_mask:\n cast_node_begin = True\n else:\n start_cast = oopb.add_node('Cast',\n operator.inputs[1].full_name,\n operator.inputs[1].full_name + '_start_cast', to=7)\n cast_node_begin = False\n\n if extra_mask or end_mask:\n cast_node_end = True\n else:\n end_cast = oopb.add_node('Cast',\n operator.inputs[2].full_name,\n operator.inputs[2].full_name + '_end_cast', to=7)\n cast_node_end = False\n\n cropped_tensor_name = oopb.add_node('Slice',\n [new_axis_unsqueeze,\n ('_start', oopb.int64,\n np.array(new_begin, dtype=np.int64)) if cast_node_begin else start_cast,\n ('_end', oopb.int64,\n np.array(new_end, dtype=np.int64)) if cast_node_end else end_cast,\n ('_axes', oopb.int64, np.array(axes, dtype=np.int64)),\n ('_steps', oopb.int64, np.array(steps, dtype=np.int64))\n ],\n operator.inputs[0].full_name + '_cropping')\n\n if needs_squeeze:\n oopb.add_node_with_output('Squeeze',\n cropped_tensor_name,\n operator.output_full_names,\n operator.inputs[0].full_name + '_squeeze',\n axes=needs_squeeze)\n else:\n oopb.add_node_with_output('Identity',\n cropped_tensor_name,\n operator.output_full_names,\n operator.inputs[0].full_name + '_identity')\n\n\n@converter_func(TYPES.Unpack)\ndef convert_tf_unpack(scope, operator, container):\n oopb = OnnxOperatorBuilder(container, scope)\n node = operator.raw_operator\n axis_val = node.get_attr('axis')\n input_shape = _cal_tensor_shape(node.inputs[0])\n if axis_val < 0 and operator.target_opset < 11:\n axis_val = len(input_shape) + axis_val\n\n split_node = oopb.add_node_all('Split',\n operator.inputs[0].full_name,\n operator.full_name + '_split',\n outputs_num=input_shape[axis_val],\n axis=axis_val)\n\n for i in range(len(split_node)):\n oopb.apply_op_with_output(\"apply_squeeze\",\n split_node[i],\n operator.outputs[i].full_name,\n name=operator.full_name + '_squeeze_' + str(i),\n axes=[axis_val])\n\n\ndef _convert_tf_var_handle_helper(scope, operator, container, var_handle_name, graph_op_type):\n oopb = OnnxOperatorBuilder(container, scope)\n node = operator.raw_operator\n\n if is_tf2:\n v_output = node.outputs[0].name\n get_assign_value = False\n for graph_node_name in node.graph._nodes_by_name:\n graph_op = node.graph._nodes_by_name[graph_node_name]\n if graph_op.type == graph_op_type and len(graph_op.inputs) > 1 and v_output == graph_op.inputs[0].name:\n cur_i = graph_op.inputs[1].op\n if cur_i.type == 'Const':\n val_type = cur_i.get_attr('dtype')\n val_shape = [dim.size for dim in cur_i.get_attr('value').tensor_shape.dim]\n if cur_i.get_attr('value').tensor_content != b'':\n val_arr = np.frombuffer(cur_i.get_attr('value').tensor_content,\n val_type.as_numpy_dtype).reshape(*val_shape)\n else:\n val = cur_i.get_attr('value').float_val[0]\n val_arr = np.full(tuple(val_shape), val)\n node_input = [('_identity', _to_onnx_type(val_type), val_arr)]\n get_assign_value = True\n break\n else:\n sess = keras.backend.get_session()\n if node.type == 'VarHandleOp':\n val_arr = sess.run([node.name + \"/Read/ReadVariableOp:0\"])[0]\n graph_op = node.graph._nodes_by_name[node.name + \"/Read/ReadVariableOp\"]\n else:\n val_arr = sess.run([node.name + \":0\"])[0]\n graph_op = node.graph._nodes_by_name[node.name]\n val_type = graph_op.get_attr('dtype')\n node_input = [('_identity', _to_onnx_type(val_type), val_arr)]\n get_assign_value = True\n\n if get_assign_value:\n oopb.add_node_with_output('Identity',\n node_input,\n operator.output_full_names,\n operator.outputs[0].full_name + '_identity')\n else:\n raise ValueError(var_handle_name + \" op \" + node.name + \" is not properly processed\")\n\n\n@converter_func(TYPES.VarHandleOp)\ndef convert_tf_var_handle_op(scope, operator, container):\n _convert_tf_var_handle_helper(scope, operator, container, \"VarHandleOp\", \"AssignVariableOp\")\n\n\n@converter_func(TYPES.VariableV2)\ndef convert_tf_variable_v2(scope, operator, container):\n _convert_tf_var_handle_helper(scope, operator, container, \"VariableV2\", \"Assign\")\n\n\n@converter_func(TYPES.Where)\ndef convert_tf_where(scope, operator, container):\n if operator.target_opset < 9:\n raise ValueError(\"Where op is not supported for opset < 9\")\n else:\n oopb = OnnxOperatorBuilder(container, scope)\n node = operator.raw_operator\n where_node = oopb.add_node('NonZero',\n operator.inputs[0].full_name,\n operator.inputs[0].full_name + '_non_zero')\n oopb.apply_op_with_output(\"apply_transpose\",\n where_node,\n operator.output_full_names,\n name=operator.full_name + '_transpose',\n perm=list(reversed(range(len(node.outputs[0].shape)))))\n\n\ndirect_ops = {\"Abs\": (\"apply_abs\",),\n \"Acos\": 7,\n \"Acosh\": 9,\n \"Add\": (\"apply_add\",),\n \"AddV2\": (\"apply_add\",),\n \"Asin\": 7,\n \"Asinh\": 9,\n \"Atan\": 7,\n \"Atanh\": 9,\n \"Ceil\": (\"apply_ceil\",),\n \"Cos\": 7,\n \"Cosh\": 9,\n \"Div\": (\"apply_div\",),\n \"Elu\": (\"apply_elu\",),\n \"Equal\": 7,\n \"Erf\": 9,\n \"Exp\": (\"apply_exp\",),\n \"Floor\": (\"apply_floor\",),\n \"Log\": (\"apply_log\",),\n \"Mul\": (\"apply_mul\",),\n \"Neg\": (\"apply_neg\",),\n \"Pow\": (\"apply_pow\",),\n \"RealDiv\": (\"apply_div\",),\n \"Reciprocal\": (\"apply_reciprocal\",),\n \"Relu\": (\"apply_relu\",),\n \"Sigmoid\": (\"apply_sigmoid\",),\n \"Sin\": 7,\n \"Sinh\": 9,\n \"Softplus\": 1,\n \"Softsign\": 1,\n \"Sqrt\": (\"apply_sqrt\",),\n \"StopGradient\": (\"apply_identity\",),\n \"Sub\": (\"apply_sub\",),\n \"Tan\": 7,\n \"Tanh\": (\"apply_tanh\",)\n }\n\n\ndef tf_op_convert(scope, operator, container):\n oopb = OnnxOperatorBuilder(container, scope)\n type = operator.raw_operator.type\n item = direct_ops[type]\n assert item is not None, \"Can't find the tf op item.\"\n if isinstance(item, numbers.Integral):\n oopb.add_node_with_output(type,\n [var_.full_name for var_ in operator.inputs],\n [var_.full_name for var_ in operator.outputs],\n name=operator.raw_operator.name,\n op_version=item\n )\n else:\n apply_func_name = item[0]\n oopb.apply_op_with_output(apply_func_name,\n [var_.full_name for var_ in operator.inputs],\n [var_.full_name for var_ in operator.outputs],\n name=operator.raw_operator.name,\n )\n\n\nset_converters({k: tf_op_convert for k in direct_ops.keys()})\n"
] | [
[
"numpy.arange",
"numpy.dtype",
"numpy.iinfo",
"numpy.count_nonzero",
"numpy.isscalar",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.